summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/parallel-perf.py
diff options
context:
space:
mode:
authorJiri Slaby (SUSE) <jirislaby@kernel.org>2025-03-19 10:29:13 +0100
committerThomas Gleixner <tglx@linutronix.de>2025-05-16 21:06:09 +0200
commit7f68126a8766773a403ff754bd2cbce0df2306f6 (patch)
tree17cad2e7dc6e0217c70aa1851b375bd0c1ce0c93 /tools/perf/scripts/python/parallel-perf.py
parent3fd83ff1d9232efb0953b720086e6a9e02ecb166 (diff)
iio: Switch to irq_domain_create_simple()
irq_domain_add_simple() is going away as being obsolete now. Switch to the preferred irq_domain_create_simple(). That differs in the first parameter: It takes more generic struct fwnode_handle instead of struct device_node. Therefore, of_fwnode_handle() is added around the parameter. Note some of the users can likely use dev->fwnode directly instead of indirect of_fwnode_handle(dev->of_node). But dev->fwnode is not guaranteed to be set for all, so this has to be investigated on case to case basis (by people who can actually test with the HW). [ tglx: Fix up subject prefix ] Signed-off-by: Jiri Slaby (SUSE) <jirislaby@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/all/20250319092951.37667-21-jirislaby@kernel.org
Diffstat (limited to 'tools/perf/scripts/python/parallel-perf.py')
0 files changed, 0 insertions, 0 deletions
stat' width='100%'> -rw-r--r--drivers/net/Makefile7
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/amt.c44
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/arcnet/arcdevice.h3
-rw-r--r--drivers/net/arcnet/arcnet.c15
-rw-r--r--drivers/net/arcnet/com20020-isa.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c17
-rw-r--r--drivers/net/bareudp.c99
-rw-r--r--drivers/net/bonding/bond_3ad.c131
-rw-r--r--drivers/net/bonding/bond_alb.c8
-rw-r--r--drivers/net/bonding/bond_debugfs.c9
-rw-r--r--drivers/net/bonding/bond_main.c819
-rw-r--r--drivers/net/bonding/bond_netlink.c71
-rw-r--r--drivers/net/bonding/bond_options.c224
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c31
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c2
-rw-r--r--drivers/net/caif/caif_serial.c16
-rw-r--r--drivers/net/caif/caif_virtio.c11
-rw-r--r--drivers/net/can/Kconfig26
-rw-r--r--drivers/net/can/Makefile4
-rw-r--r--drivers/net/can/at91_can.c3
-rw-r--r--drivers/net/can/bxcan.c7
-rw-r--r--drivers/net/can/c_can/c_can_main.c34
-rw-r--r--drivers/net/can/c_can/c_can_platform.c58
-rw-r--r--drivers/net/can/can327.c1
-rw-r--r--drivers/net/can/cc770/Kconfig1
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/cc770/cc770_isa.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c34
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c30
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c2
-rw-r--r--drivers/net/can/dev/bittiming.c63
-rw-r--r--drivers/net/can/dev/calc_bittiming.c124
-rw-r--r--drivers/net/can/dev/dev.c188
-rw-r--r--drivers/net/can/dev/netlink.c903
-rw-r--r--drivers/net/can/dummy_can.c285
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c9
-rw-r--r--drivers/net/can/esd/esdacc.c57
-rw-r--r--drivers/net/can/esd/esdacc.h38
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c137
-rw-r--r--drivers/net/can/flexcan/flexcan.h8
-rw-r--r--drivers/net/can/grcan.c22
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c71
-rw-r--r--drivers/net/can/janz-ican3.c5
-rw-r--r--drivers/net/can/kvaser_pciefd/Makefile3
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd.h96
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c (renamed from drivers/net/can/kvaser_pciefd.c)525
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c60
-rw-r--r--drivers/net/can/m_can/m_can.c720
-rw-r--r--drivers/net/can/m_can/m_can.h9
-rw-r--r--drivers/net/can/m_can/m_can_pci.c7
-rw-r--r--drivers/net/can/m_can/m_can_platform.c14
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c119
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c7
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c51
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c312
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c959
-rw-r--r--drivers/net/can/rockchip/Kconfig10
-rw-r--r--drivers/net/can/rockchip/Makefile10
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c962
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-ethtool.c73
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-rx.c299
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c105
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c167
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd.h553
-rw-r--r--drivers/net/can/sja1000/Kconfig3
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c12
-rw-r--r--drivers/net/can/sja1000/plx_pci.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c72
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c17
-rw-r--r--drivers/net/can/slcan/slcan-core.c27
-rw-r--r--drivers/net/can/softing/softing_main.c3
-rw-r--r--drivers/net/can/spi/hi311x.c96
-rw-r--r--drivers/net/can/spi/mcp251x.c81
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c426
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c11
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c118
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c53
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c165
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c160
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c29
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c57
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h69
-rw-r--r--drivers/net/can/sun4i_can.c30
-rw-r--r--drivers/net/can/ti_hecc.c5
-rw-r--r--drivers/net/can/usb/Kconfig18
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c59
-rw-r--r--drivers/net/can/usb/esd_usb.c77
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c11
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c8
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c10
-rw-r--r--drivers/net/can/usb/f81604.c13
-rw-r--r--drivers/net/can/usb/gs_usb.c189
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h61
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c175
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c87
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c239
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c229
-rw-r--r--drivers/net/can/usb/mcba_usb.c2
-rw-r--r--drivers/net/can/usb/nct6694_canfd.c831
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c59
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h4
-rw-r--r--drivers/net/can/usb/ucan.c44
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/can/vcan.c4
-rw-r--r--drivers/net/can/vxcan.c31
-rw-r--r--drivers/net/can/xilinx_can.c39
-rw-r--r--drivers/net/dsa/Kconfig29
-rw-r--r--drivers/net/dsa/Makefile7
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c1121
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c8
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c136
-rw-r--r--drivers/net/dsa/b53/b53_priv.h176
-rw-r--r--drivers/net/dsa/b53/b53_regs.h92
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c5
-rw-r--r--drivers/net/dsa/b53/b53_spi.c2
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c72
-rw-r--r--drivers/net/dsa/bcm_sf2.h5
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c22
-rw-r--r--drivers/net/dsa/dsa_loop.c87
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c36
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c32
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h10
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c26
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h7
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c39
-rw-r--r--drivers/net/dsa/ks8995.c857
-rw-r--r--drivers/net/dsa/lan9303-core.c90
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c8
-rw-r--r--drivers/net/dsa/lantiq/Kconfig24
-rw-r--r--drivers/net/dsa/lantiq/Makefile3
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c518
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h301
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip_common.c1739
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.c733
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.h126
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h154
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2276
-rw-r--r--drivers/net/dsa/microchip/Kconfig12
-rw-r--r--drivers/net/dsa/microchip/Makefile4
-rw-r--r--drivers/net/dsa/microchip/ksz8.c (renamed from drivers/net/dsa/microchip/ksz8795.c)574
-rw-r--r--drivers/net/dsa/microchip/ksz8.h16
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h (renamed from drivers/net/dsa/microchip/ksz8795_reg.h)78
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c621
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h11
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c22
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h28
-rw-r--r--drivers/net/dsa/microchip/ksz9477_tc_flower.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c1653
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h222
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c612
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.h23
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c61
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h9
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c146
-rw-r--r--drivers/net/dsa/microchip/lan937x.h2
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c321
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h18
-rw-r--r--drivers/net/dsa/mt7530-mdio.c47
-rw-r--r--drivers/net/dsa/mt7530-mmio.c25
-rw-r--r--drivers/net/dsa/mt7530.c1277
-rw-r--r--drivers/net/dsa/mt7530.h379
-rw-r--r--drivers/net/dsa/mv88e6060.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig10
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c560
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h41
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c43
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c89
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c28
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h21
-rw-r--r--drivers/net/dsa/mv88e6xxx/leds.c848
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c9
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h156
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c195
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h133
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h8
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.h4
-rw-r--r--drivers/net/dsa/ocelot/felix.c339
-rw-r--r--drivers/net/dsa/ocelot/felix.h12
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c153
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c58
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c62
-rw-r--r--drivers/net/dsa/qca/ar9331.c43
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c64
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c125
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c12
-rw-r--r--drivers/net/dsa/qca/qca8k.h4
-rw-r--r--drivers/net/dsa/realtek/Kconfig6
-rw-r--r--drivers/net/dsa/realtek/Makefile3
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c8
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c8
-rw-r--r--drivers/net/dsa/realtek/realtek.h5
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c44
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c225
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c32
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c92
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c146
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c34
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c68
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c8
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c1499
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h66
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c43
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c4
-rw-r--r--drivers/net/dsa/yt921x.c3006
-rw-r--r--drivers/net/dsa/yt921x.h567
-rw-r--r--drivers/net/dummy.c22
-rw-r--r--drivers/net/eql.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c11
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c6
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/3com/Kconfig4
-rw-r--r--drivers/net/ethernet/8390/Kconfig6
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/etherh.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c11
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c6
-rw-r--r--drivers/net/ethernet/Kconfig15
-rw-r--r--drivers/net/ethernet/Makefile5
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c9
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
-rw-r--r--drivers/net/ethernet/adi/adin1110.c10
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c5
-rw-r--r--drivers/net/ethernet/agere/et131x.c43
-rw-r--r--drivers/net/ethernet/airoha/Kconfig34
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c3180
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h673
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c783
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c1561
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c187
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h923
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c21
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c28
-rw-r--r--drivers/net/ethernet/alteon/acenic.h8
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c51
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h148
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c483
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h180
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c254
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c220
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h17
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h9
-rw-r--r--drivers/net/ethernet/amd/7990.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig5
-rw-r--r--drivers/net/ethernet/amd/a2065.c5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c14
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/hplance.c1
-rw-r--r--drivers/net/ethernet/amd/lance.c1
-rw-r--r--drivers/net/ethernet/amd/mvme147.c8
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c40
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c46
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c17
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h18
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c13
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c19
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c29
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c1
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h164
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c136
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c126
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c453
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c499
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c182
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c399
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c135
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c147
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c212
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c165
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c127
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c218
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-selftest.c346
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h226
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c71
-rw-r--r--drivers/net/ethernet/apple/mace.c6
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c100
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c153
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c39
-rw-r--r--drivers/net/ethernet/arc/Kconfig10
-rw-r--r--drivers/net/ethernet/arc/Makefile1
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c88
-rw-r--r--drivers/net/ethernet/arc/emac_main.c27
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c2
-rw-r--r--drivers/net/ethernet/atheros/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c225
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c87
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c17
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig20
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c184
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h81
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c119
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c69
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c63
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c28
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c70
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile13
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h255
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.c258
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.h84
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c420
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c306
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c548
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h112
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c1185
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2485
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h454
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c617
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h97
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c499
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h202
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c127
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3836
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h291
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c305
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c138
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c925
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h10476
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c350
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h91
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c182
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c262
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c79
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h5
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c25
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1437
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h86
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c115
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c92
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c337
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c26
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c48
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c35
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h291
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c1285
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c16
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c4
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c284
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c64
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c66
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c57
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c57
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c70
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c119
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c227
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c41
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c24
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c13
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c8
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c67
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h25
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h142
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h114
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c190
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c841
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c129
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c20
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c115
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c5
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c19
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c123
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h24
-rw-r--r--drivers/net/ethernet/dlink/sundance.c41
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c205
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c35
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c6
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c101
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c88
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c112
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c4
-rw-r--r--drivers/net/ethernet/fealnx.c12
-rw-r--r--drivers/net/ethernet/freescale/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c186
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h20
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c145
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c106
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c64
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c29
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig57
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile13
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1133
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h168
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h232
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c1102
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c410
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h98
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c474
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c439
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c36
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c845
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c457
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/fec.h68
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c627
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c241
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c36
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c109
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c119
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h22
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c446
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h27
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c29
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c7
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c97
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c59
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c632
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h24
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c95
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig2
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c17
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c65
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h1
-rw-r--r--drivers/net/ethernet/fungible/funeth/Makefile2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c10
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c42
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_trace.h6
-rw-r--r--drivers/net/ethernet/google/Kconfig2
-rw-r--r--drivers/net/ethernet/google/gve/Makefile5
-rw-r--r--drivers/net/ethernet/google/gve/gve.h399
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c784
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h243
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c344
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h10
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c403
-rw-r--r--drivers/net/ethernet/google/gve/gve_flow_rule.c298
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c1685
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c166
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c182
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c787
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c129
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c502
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c5
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig20
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h294
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c168
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c349
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c194
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c500
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c399
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h63
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c145
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c528
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c304
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h302
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c717
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h44
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h49
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1086
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c212
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c202
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c2016
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h644
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c433
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c479
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c68
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h102
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c154
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h58
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c5
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c80
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c76
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c236
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h57
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h47
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h264
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c557
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c436
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c194
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c421
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c409
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c860
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h141
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h224
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c496
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c385
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h61
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h93
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c885
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h145
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c551
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h104
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c779
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h147
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h87
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c1
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c264
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h10
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c108
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c49
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c669
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h86
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c390
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/Kconfig36
-rw-r--r--drivers/net/ethernet/intel/Makefile3
-rw-r--r--drivers/net/ethernet/intel/e100.c17
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c43
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c194
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c133
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c328
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c22
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c122
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c78
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h156
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1482
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c196
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1100
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c1010
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h64
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c135
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h126
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c249
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h103
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c119
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c363
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c389
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c89
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1003
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c492
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c939
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h210
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h363
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c464
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile23
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c (renamed from drivers/net/ethernet/intel/ice/ice_devlink.c)939
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h (renamed from drivers/net/ethernet/intel/ice/ice_devlink.h)1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c551
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c1001
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h250
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c147
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h602
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c643
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c1738
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c259
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c598
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1768
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c559
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c864
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c166
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c113
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c270
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c425
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h167
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c472
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c269
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1062
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h414
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c528
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1451
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.c2430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h538
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c859
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c2128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h172
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h376
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c3287
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h378
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c372
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c242
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c329
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c289
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c1028
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c409
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c983
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h157
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c209
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h141
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h107
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c90
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c475
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h50
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c)17
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c)437
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)1
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c975
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c1922
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.c)1724
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.h)40
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig27
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile10
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h289
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c43
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c79
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c677
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c503
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c708
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c169
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c1021
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h379
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c572
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c3219
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h969
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c62
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1860
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h125
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c673
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h614
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c463
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c70
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h31
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c32
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h157
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h94
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c255
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c38
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c318
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c838
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c54
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c28
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c222
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h28
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c414
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h58
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c558
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c4043
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h102
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c323
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1065
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h144
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1032
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c251
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c53
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c88
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c196
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h5
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig15
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile12
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c273
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig25
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile14
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c52
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c126
-rw-r--r--drivers/net/ethernet/jme.c12
-rw-r--r--drivers/net/ethernet/korina.c7
-rw-r--r--drivers/net/ethernet/lantiq_etop.c39
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c6
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c74
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c194
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c26
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c428
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c201
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c80
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c88
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c27
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c91
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c70
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c33
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c209
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c218
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h380
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c123
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h200
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c104
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c332
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h189
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c176
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c148
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c732
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c94
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c335
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c232
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c64
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c469
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h69
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h100
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c53
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1042
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c450
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c403
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h226
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c123
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c331
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c692
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c320
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c126
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c245
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c879
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h55
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c83
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c13
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c8
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c15
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c28
-rw-r--r--drivers/net/ethernet/marvell/skge.c16
-rw-r--r--drivers/net/ethernet/marvell/sky2.c18
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c1021
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h163
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c12
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c31
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c76
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c38
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h166
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dim.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c199
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c365
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c349
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c838
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c1155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c857
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1448
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c157
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c688
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c793
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c731
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c914
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c673
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c545
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c799
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c652
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c799
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c264
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c237
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c2651
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h316
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c1226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c487
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c2329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h831
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1641
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c1293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h941
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h472
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c846
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c1358
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c494
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c)42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c)38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c)66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h)23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c)8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c)263
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c)76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c)125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h)10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h)40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h)9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c464
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c217
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c927
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c347
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c121
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/Kconfig38
-rw-r--r--drivers/net/ethernet/meta/Makefile6
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h227
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c149
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h1201
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c271
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c668
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c1924
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c1920
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h326
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h45
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c601
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h163
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c313
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c934
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h122
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mdio.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c857
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h117
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c655
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c311
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c1246
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h232
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c303
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c560
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h158
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c2930
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h201
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c64
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c10
-rw-r--r--drivers/net/ethernet/microchip/Kconfig7
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c10
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c7
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/microchip/fdma/Kconfig18
-rw-r--r--drivers/net/ethernet/microchip/fdma/Makefile7
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.c146
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.h243
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c220
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c776
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h33
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c95
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h12
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Kconfig19
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Makefile6
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c455
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c39
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c409
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c32
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h72
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c68
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c14
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c23
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c357
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c191
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c3843
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c126
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c65
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c456
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c373
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h293
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h4774
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c240
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c39
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c47
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c183
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h28
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c49
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c58
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.h247
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c47
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c97
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c125
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c50
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c45
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c16
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h4
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c33
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h2
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c864
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c135
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c48
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c1228
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c354
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c13
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c285
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c61
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c53
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c280
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c39
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c8
-rw-r--r--drivers/net/ethernet/mucse/Kconfig33
-rw-r--r--drivers/net/ethernet/mucse/Makefile7
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/Makefile11
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h71
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c143
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h17
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c320
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c406
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h20
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c191
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h88
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c16
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c10
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c19
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c33
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c46
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oa_tc6.c1369
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c46
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c12
-rw-r--r--drivers/net/ethernet/pensando/Kconfig2
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c19
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c408
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h61
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c152
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h390
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c406
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h35
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c63
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c556
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c7
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c149
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c89
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h8
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c35
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig16
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c24
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c4
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c60
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h4
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c4
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/Kconfig24
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h11
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1083
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c264
-rw-r--r--drivers/net/ethernet/realtek/rtase/Makefile10
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h362
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c2400
-rw-r--r--drivers/net/ethernet/renesas/Kconfig11
-rw-r--r--drivers/net/ethernet/renesas/Makefile3
-rw-r--r--drivers/net/ethernet/renesas/ravb.h38
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c859
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c12
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c78
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h46
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h121
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c (renamed from drivers/net/ethernet/renesas/rswitch.c)464
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c1371
-rw-r--r--drivers/net/ethernet/renesas/rtsn.h464
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c42
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c45
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c17
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/ether3.c8
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c140
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c49
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c5
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx.c147
-rw-r--r--drivers/net/ethernet/sfc/efx.h3
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c15
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h7
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c32
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c13
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c19
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c310
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h14
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c18
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c89
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c20
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h9
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/mae.c17
-rw-r--r--drivers/net/ethernet/sfc/mae.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c197
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h32
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c135
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h13844
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h92
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h3
-rw-r--r--drivers/net/ethernet/sfc/ptp.c11
-rw-r--r--drivers/net/ethernet/sfc/ptp.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c83
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h8
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c9
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c12
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c12
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c238
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h12
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h32
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c14
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h6
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c72
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.h4
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c18
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c14
-rw-r--r--drivers/net/ethernet/sfc/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c33
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c12
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/Kconfig4
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c13
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c8
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h8
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c19
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2162
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c230
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c198
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c445
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c656
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c396
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1675
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c186
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c389
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c68
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c167
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c402
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c104
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c70
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c286
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c172
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c244
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c532
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c293
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c68
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h167
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h97
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c305
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c314
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2013
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c541
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c295
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c124
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c271
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c375
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c6
-rw-r--r--drivers/net/ethernet/sun/niu.c96
-rw-r--r--drivers/net/ethernet/sun/niu.h8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c26
-rw-r--r--drivers/net/ethernet/sun/sunhme.c10
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c36
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c8
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c5
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig15
-rw-r--r--drivers/net/ethernet/tehuti/Makefile3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h2
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c1857
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h266
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c220
-rw-r--r--drivers/net/ethernet/tehuti/tn40_phy.c76
-rw-r--r--drivers/net/ethernet/tehuti/tn40_regs.h245
-rw-r--r--drivers/net/ethernet/ti/Kconfig33
-rw-r--r--drivers/net/ethernet/ti/Makefile25
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c95
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c1536
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h69
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c70
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c171
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c39
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c353
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h63
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c23
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c33
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c73
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h9
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c23
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c428
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.h73
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c122
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c1853
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c489
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h151
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c138
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c2504
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h254
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c1250
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_queues.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c49
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h172
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h36
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.c477
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.h13
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c46
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.h6
-rw-r--r--drivers/net/ethernet/ti/netcp.h5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c70
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c87
-rw-r--r--drivers/net/ethernet/ti/tlan.c8
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c58
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2555
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c139
-rw-r--r--drivers/net/ethernet/via/Kconfig1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c13
-rw-r--r--drivers/net/ethernet/via/via-velocity.c12
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig46
-rw-r--r--drivers/net/ethernet/wangxun/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c381
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h17
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c810
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h15
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c779
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h11
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c419
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h99
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c905
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c929
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h429
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h129
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c292
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h15
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c33
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c120
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c24
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h10
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c266
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h29
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c530
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c518
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c648
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h20
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c21
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c201
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c327
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c248
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h312
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c331
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h26
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h176
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c839
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c23
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c25
-rw-r--r--drivers/net/ethernet/xircom/Kconfig2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c6
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c71
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/fddi/defza.c12
-rw-r--r--drivers/net/fjes/fjes_ethtool.c64
-rw-r--r--drivers/net/fjes/fjes_main.c14
-rw-r--r--drivers/net/fjes/fjes_trace.h12
-rw-r--r--drivers/net/geneve.c176
-rw-r--r--drivers/net/gtp.c957
-rw-r--r--drivers/net/hamradio/6pack.c127
-rw-r--r--drivers/net/hamradio/Kconfig6
-rw-r--r--drivers/net/hamradio/baycom_epp.c6
-rw-r--r--drivers/net/hamradio/baycom_par.c5
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c4
-rw-r--r--drivers/net/hamradio/bpqether.c29
-rw-r--r--drivers/net/hamradio/scc.c44
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hippi/rrunner.c4
-rw-r--r--drivers/net/hyperv/Kconfig2
-rw-r--r--drivers/net/hyperv/hyperv_net.h22
-rw-r--r--drivers/net/hyperv/netvsc.c78
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c202
-rw-r--r--drivers/net/hyperv/netvsc_trace.h8
-rw-r--r--drivers/net/hyperv/rndis_filter.c57
-rw-r--r--drivers/net/ieee802154/Kconfig1
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c84
-rw-r--r--drivers/net/ieee802154/cc2520.c2
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c5
-rw-r--r--drivers/net/ifb.c18
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c6
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c6
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c6
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c6
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c6
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c24
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c6
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c6
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c6
-rw-r--r--drivers/net/ipa/gsi.c30
-rw-r--r--drivers/net/ipa/gsi.h12
-rw-r--r--drivers/net/ipa/gsi_private.h7
-rw-r--r--drivers/net/ipa/gsi_reg.c6
-rw-r--r--drivers/net/ipa/gsi_trans.c12
-rw-r--r--drivers/net/ipa/gsi_trans.h9
-rw-r--r--drivers/net/ipa/ipa.h15
-rw-r--r--drivers/net/ipa/ipa_cmd.c13
-rw-r--r--drivers/net/ipa/ipa_cmd.h18
-rw-r--r--drivers/net/ipa/ipa_data.h6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c19
-rw-r--r--drivers/net/ipa/ipa_endpoint.h10
-rw-r--r--drivers/net/ipa/ipa_gsi.c7
-rw-r--r--drivers/net/ipa/ipa_interrupt.c55
-rw-r--r--drivers/net/ipa/ipa_interrupt.h6
-rw-r--r--drivers/net/ipa/ipa_main.c58
-rw-r--r--drivers/net/ipa/ipa_mem.c36
-rw-r--r--drivers/net/ipa/ipa_mem.h4
-rw-r--r--drivers/net/ipa/ipa_modem.c18
-rw-r--r--drivers/net/ipa/ipa_modem.h5
-rw-r--r--drivers/net/ipa/ipa_power.c34
-rw-r--r--drivers/net/ipa/ipa_power.h19
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_qmi.h4
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c3
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h3
-rw-r--r--drivers/net/ipa/ipa_reg.c4
-rw-r--r--drivers/net/ipa/ipa_reg.h6
-rw-r--r--drivers/net/ipa/ipa_resource.c3
-rw-r--r--drivers/net/ipa/ipa_smp2p.c12
-rw-r--r--drivers/net/ipa/ipa_sysfs.c13
-rw-r--r--drivers/net/ipa/ipa_sysfs.h4
-rw-r--r--drivers/net/ipa/ipa_table.c27
-rw-r--r--drivers/net/ipa/ipa_table.h7
-rw-r--r--drivers/net/ipa/ipa_uc.c12
-rw-r--r--drivers/net/ipa/ipa_uc.h3
-rw-r--r--drivers/net/ipa/ipa_version.h22
-rw-r--r--drivers/net/ipa/reg.h8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c8
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c6
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.0.c6
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.5.c6
-rw-r--r--drivers/net/ipvlan/ipvlan.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c34
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c7
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c26
-rw-r--r--drivers/net/ipvlan/ipvtap.c6
-rw-r--r--drivers/net/loopback.c29
-rw-r--r--drivers/net/macsec.c377
-rw-r--r--drivers/net/macvlan.c60
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mctp/Kconfig15
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c59
-rw-r--r--drivers/net/mctp/mctp-i3c.c24
-rw-r--r--drivers/net/mctp/mctp-serial.c141
-rw-r--r--drivers/net/mctp/mctp-usb.c390
-rw-r--r--drivers/net/mdio.c172
-rw-r--r--drivers/net/mdio/Kconfig52
-rw-r--r--drivers/net/mdio/Makefile2
-rw-r--r--drivers/net/mdio/fwnode_mdio.c47
-rw-r--r--drivers/net/mdio/mdio-airoha.c278
-rw-r--r--drivers/net/mdio/mdio-aspeed.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c14
-rw-r--r--drivers/net/mdio/mdio-gpio.c5
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c2
-rw-r--r--drivers/net/mdio/mdio-i2c.c90
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c7
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c2
-rw-r--r--drivers/net/mdio/mdio-moxart.c2
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c10
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c2
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c5
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c5
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c56
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c2
-rw-r--r--drivers/net/mdio/mdio-octeon.c27
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-sun4i.c2
-rw-r--r--drivers/net/mdio/mdio-thunder.c14
-rw-r--r--drivers/net/mdio/mdio-xgene.c2
-rw-r--r--drivers/net/mdio/of_mdio.c15
-rw-r--r--drivers/net/mii.c3
-rw-r--r--drivers/net/net_failover.c6
-rw-r--r--drivers/net/netconsole.c1178
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bpf.c3
-rw-r--r--drivers/net/netdevsim/bus.c29
-rw-r--r--drivers/net/netdevsim/dev.c137
-rw-r--r--drivers/net/netdevsim/ethtool.c67
-rw-r--r--drivers/net/netdevsim/fib.c5
-rw-r--r--drivers/net/netdevsim/health.c6
-rw-r--r--drivers/net/netdevsim/hwstats.c34
-rw-r--r--drivers/net/netdevsim/ipsec.c50
-rw-r--r--drivers/net/netdevsim/macsec.c56
-rw-r--r--drivers/net/netdevsim/netdev.c813
-rw-r--r--drivers/net/netdevsim/netdevsim.h71
-rw-r--r--drivers/net/netdevsim/psp.c252
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c35
-rw-r--r--drivers/net/netkit.c215
-rw-r--r--drivers/net/nlmon.c4
-rw-r--r--drivers/net/ntb_netdev.c10
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c465
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c263
-rw-r--r--drivers/net/ovpn/netlink-gen.h48
-rw-r--r--drivers/net/ovpn/netlink.c1293
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c241
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c619
-rw-r--r--drivers/net/ovpn/tcp.h37
-rw-r--r--drivers/net/ovpn/udp.c448
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pcs/Kconfig17
-rw-r--r--drivers/net/pcs/Makefile3
-rw-r--r--drivers/net/pcs/pcs-lynx.c127
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c26
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c363
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c24
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c457
-rw-r--r--drivers/net/pcs/pcs-xpcs-wx.c56
-rw-r--r--drivers/net/pcs/pcs-xpcs.c1087
-rw-r--r--drivers/net/pcs/pcs-xpcs.h71
-rw-r--r--drivers/net/pfcp.c298
-rw-r--r--drivers/net/phy/Kconfig97
-rw-r--r--drivers/net/phy/Makefile32
-rw-r--r--drivers/net/phy/adin.c8
-rw-r--r--drivers/net/phy/adin1100.c14
-rw-r--r--drivers/net/phy/air_en8811h.c1224
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia/Makefile2
-rw-r--r--drivers/net/phy/aquantia/aquantia.h137
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c47
-rw-r--r--drivers/net/phy/aquantia/aquantia_hwmon.c32
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c160
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c1031
-rw-r--r--drivers/net/phy/as21xxx.c1088
-rw-r--r--drivers/net/phy/ax88796b.c7
-rw-r--r--drivers/net/phy/ax88796b_rust.rs9
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c118
-rw-r--r--drivers/net/phy/bcm-phy-lib.h4
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c33
-rw-r--r--drivers/net/phy/bcm54140.c3
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm84881.c16
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/broadcom.c605
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c106
-rw-r--r--drivers/net/phy/dp83822.c570
-rw-r--r--drivers/net/phy/dp83848.c4
-rw-r--r--drivers/net/phy/dp83867.c125
-rw-r--r--drivers/net/phy/dp83869.c37
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c734
-rw-r--r--drivers/net/phy/dp83tg720.c518
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/fixed_phy.c282
-rw-r--r--drivers/net/phy/icplus.c11
-rw-r--r--drivers/net/phy/intel-xway.c262
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c507
-rw-r--r--drivers/net/phy/marvell-88x2222.c17
-rw-r--r--drivers/net/phy/marvell.c610
-rw-r--r--drivers/net/phy/marvell10g.c47
-rw-r--r--drivers/net/phy/mdio-boardinfo.c80
-rw-r--r--drivers/net/phy/mdio-boardinfo.h23
-rw-r--r--drivers/net/phy/mdio-open-alliance.h49
-rw-r--r--drivers/net/phy/mdio-private.h11
-rw-r--r--drivers/net/phy/mdio_bus.c570
-rw-r--r--drivers/net/phy/mdio_bus_provider.c442
-rw-r--r--drivers/net/phy/mdio_device.c66
-rw-r--r--drivers/net/phy/mediatek-ge.c114
-rw-r--r--drivers/net/phy/mediatek/Kconfig40
-rw-r--r--drivers/net/phy/mediatek/Makefile5
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c413
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c (renamed from drivers/net/phy/mediatek-ge-soc.c)806
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c142
-rw-r--r--drivers/net/phy/mediatek/mtk-phy-lib.c347
-rw-r--r--drivers/net/phy/mediatek/mtk.h104
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c2295
-rw-r--r--drivers/net/phy/microchip.c200
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1306
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h247
-rw-r--r--drivers/net/phy/microchip_t1.c1272
-rw-r--r--drivers/net/phy/microchip_t1s.c430
-rw-r--r--drivers/net/phy/motorcomm.c804
-rw-r--r--drivers/net/phy/mscc/mscc.h31
-rw-r--r--drivers/net/phy/mscc/mscc_main.c528
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c162
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/mxl-86110.c978
-rw-r--r--drivers/net/phy/mxl-gpy.c439
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c8
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c240
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.h1
-rw-r--r--drivers/net/phy/nxp-cbtx.c4
-rw-r--r--drivers/net/phy/nxp-tja11xx.c65
-rw-r--r--drivers/net/phy/open_alliance_helpers.c77
-rw-r--r--drivers/net/phy/open_alliance_helpers.h47
-rw-r--r--drivers/net/phy/phy-c45.c369
-rw-r--r--drivers/net/phy/phy-caps.h64
-rw-r--r--drivers/net/phy/phy-core.c492
-rw-r--r--drivers/net/phy/phy.c426
-rw-r--r--drivers/net/phy/phy_caps.c380
-rw-r--r--drivers/net/phy/phy_device.c1035
-rw-r--r--drivers/net/phy/phy_led_triggers.c25
-rw-r--r--drivers/net/phy/phy_link_topology.c105
-rw-r--r--drivers/net/phy/phy_package.c419
-rw-r--r--drivers/net/phy/phylib-internal.h31
-rw-r--r--drivers/net/phy/phylib.h34
-rw-r--r--drivers/net/phy/phylink.c1751
-rw-r--r--drivers/net/phy/qcom/Kconfig3
-rw-r--r--drivers/net/phy/qcom/at803x.c210
-rw-r--r--drivers/net/phy/qcom/qca807x.c79
-rw-r--r--drivers/net/phy/qcom/qca808x.c27
-rw-r--r--drivers/net/phy/qcom/qca83xx.c18
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c100
-rw-r--r--drivers/net/phy/qcom/qcom.h28
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/qt2025.rs111
-rw-r--r--drivers/net/phy/realtek.c1092
-rw-r--r--drivers/net/phy/realtek/Kconfig15
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c74
-rw-r--r--drivers/net/phy/realtek/realtek_main.c2277
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/sfp-bus.c138
-rw-r--r--drivers/net/phy/sfp.c225
-rw-r--r--drivers/net/phy/sfp.h4
-rw-r--r--drivers/net/phy/smsc.c65
-rw-r--r--drivers/net/phy/spi_ks8995.c506
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/teranetics.c5
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c181
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c14
-rw-r--r--drivers/net/plip/plip.c3
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_async.c4
-rw-r--r--drivers/net/ppp/ppp_deflate.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c315
-rw-r--r--drivers/net/ppp/ppp_mppe.c110
-rw-r--r--drivers/net/ppp/ppp_synctty.c7
-rw-r--r--drivers/net/ppp/pppoe.c140
-rw-r--r--drivers/net/ppp/pptp.c27
-rw-r--r--drivers/net/pse-pd/Kconfig34
-rw-r--r--drivers/net/pse-pd/Makefile3
-rw-r--r--drivers/net/pse-pd/pd692x0.c1883
-rw-r--r--drivers/net/pse-pd/pse_core.c1774
-rw-r--r--drivers/net/pse-pd/pse_regulator.c64
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/pse-pd/tps23881.c1563
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sb1000.c1179
-rw-r--r--drivers/net/slip/slhc.c59
-rw-r--r--drivers/net/slip/slip.c20
-rw-r--r--drivers/net/sungem_phy.c37
-rw-r--r--drivers/net/tap.c200
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team_core.c (renamed from drivers/net/team/team.c)296
-rw-r--r--drivers/net/team/team_mode_activebackup.c3
-rw-r--r--drivers/net/team/team_mode_loadbalance.c13
-rw-r--r--drivers/net/team/team_nl.c60
-rw-r--r--drivers/net/team/team_nl.h30
-rw-r--r--drivers/net/thunderbolt/main.c21
-rw-r--r--drivers/net/tun.c338
-rw-r--r--drivers/net/tun_vnet.h269
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/aqc111.c20
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c22
-rw-r--r--drivers/net/usb/asix_devices.c78
-rw-r--r--drivers/net/usb/ax88172a.c12
-rw-r--r--drivers/net/usb/ax88179_178a.c70
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_ether.c10
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c77
-rw-r--r--drivers/net/usb/ch9200.c7
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/ipheth.c89
-rw-r--r--drivers/net/usb/lan78xx.c2103
-rw-r--r--drivers/net/usb/net1080.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c42
-rw-r--r--drivers/net/usb/r8152.c159
-rw-r--r--drivers/net/usb/r8153_ecm.c6
-rw-r--r--drivers/net/usb/rtl8150.c38
-rw-r--r--drivers/net/usb/sierra_net.c10
-rw-r--r--drivers/net/usb/smsc75xx.c17
-rw-r--r--drivers/net/usb/smsc95xx.c98
-rw-r--r--drivers/net/usb/sr9700.c27
-rw-r--r--drivers/net/usb/usbnet.c416
-rw-r--r--drivers/net/veth.c113
-rw-r--r--drivers/net/virtio_net.c3563
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h61
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c272
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c94
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h33
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c18
-rw-r--r--drivers/net/vrf.c102
-rw-r--r--drivers/net/vsockmon.c6
-rw-r--r--drivers/net/vxlan/vxlan_core.c1080
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c4
-rw-r--r--drivers/net/vxlan/vxlan_private.h21
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c69
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/framer/framer-core.c23
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c37
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c35
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/ixp4xx_hss.c2
-rw-r--r--drivers/net/wan/lapbether.c4
-rw-r--r--drivers/net/wireguard/Makefile2
-rw-r--r--drivers/net/wireguard/allowedips.c106
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c22
-rw-r--r--drivers/net/wireguard/device.c21
-rw-r--r--drivers/net/wireguard/generated/netlink.c73
-rw-r--r--drivers/net/wireguard/generated/netlink.h30
-rw-r--r--drivers/net/wireguard/main.c2
-rw-r--r--drivers/net/wireguard/netlink.c107
-rw-r--r--drivers/net/wireguard/noise.c36
-rw-r--r--drivers/net/wireguard/peer.h2
-rw-r--r--drivers/net/wireguard/queueing.h17
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c49
-rw-r--r--drivers/net/wireguard/send.c2
-rw-r--r--drivers/net/wireguard/socket.c4
-rw-r--r--drivers/net/wireguard/timers.c25
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c27
-rw-r--r--drivers/net/wireless/ath/ath.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c28
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c163
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h31
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c63
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h59
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c89
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c214
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c255
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h64
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c95
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c121
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c596
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h72
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c196
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c58
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h30
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c295
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c37
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c72
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h49
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h21
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1217
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c37
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.c149
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c268
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h19
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c49
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c86
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c123
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c82
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h44
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c205
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h103
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c45
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig28
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c510
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h114
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1156
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c109
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c1467
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h740
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h81
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c1515
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h147
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c6178
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h2076
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c337
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c530
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h247
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c2819
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c1702
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h82
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c832
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c206
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h109
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h96
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c144
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h534
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h29
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c623
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h79
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c10100
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h144
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c117
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c21
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c410
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c244
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h59
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c806
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h55
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c646
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h28
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h100
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c395
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c4583
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h1730
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1029
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.h62
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c14
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c7
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c22
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c73
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c28
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c51
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/key.c2
-rw-r--r--drivers/net/wireless/ath/main.c1
-rw-r--r--drivers/net/wireless/ath/testmode_i.h (renamed from drivers/net/wireless/ath/ath11k/testmode_i.h)54
-rw-r--r--drivers/net/wireless/ath/trace.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h74
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c24
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c60
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h9
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c32
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c27
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h8
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c64
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c27
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c12
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.c13
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.h2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c26
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c39
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c223
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c314
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c131
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c41
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c50
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c447
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h5
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Makefile7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c17
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c29
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h162
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto.c246
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_ccmp.c438
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_tkip.c728
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_wep.c247
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c38
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c102
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_spy.c233
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c43
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c42
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c21
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h305
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c52
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h19
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c398
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c296
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c202
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h176
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c (renamed from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c)486
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c307
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h268
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h322
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h209
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h221
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h350
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c399
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c327
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c249
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h115
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h553
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c395
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c334
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h)17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c837
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h1028
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c677
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2065
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c1113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.h244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c451
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c548
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/hcmd.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c703
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c909
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c336
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2702
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c285
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c776
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h615
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c1199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c719
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c391
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c321
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c2264
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c2179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.h102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c1319
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h273
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c521
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c663
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c339
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/module.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c353
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c503
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c467
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c739
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c1394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c937
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c357
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c382
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c481
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c134
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c1149
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c580
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c134
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h460
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c490
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c153
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c304
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c506
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c284
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c443
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c290
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c)243
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2479
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h (renamed from drivers/net/wireless/intel/iwlwifi/pcie/internal.h)460
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/rx.c)235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c)220
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans.c)1580
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c1434
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx.c)1366
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h)112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info.h)44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c262
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c1900
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h191
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c282
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c109
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c8
-rw-r--r--drivers/net/wireless/intersil/p54/main.c5
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h1
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c18
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c15
-rw-r--r--drivers/net/wireless/marvell/libertas/Kconfig1
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c16
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c143
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h15
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c105
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c27
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c534
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c112
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h77
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c99
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c80
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c143
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h72
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c182
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c46
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c67
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c202
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c128
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c19
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c198
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c416
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c360
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h107
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c427
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h477
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c52
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c72
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h112
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c167
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c253
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c495
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c84
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c364
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c157
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c181
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c1247
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c1549
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h171
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h104
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.c265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h156
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c294
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c537
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c261
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c877
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c1347
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c1789
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c1653
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h78
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c320
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h417
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/npu.c352
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h97
-rw-r--r--drivers/net/wireless/mediatek/mt76/npu.c501
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c118
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c26
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c156
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h13
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c42
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c99
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c248
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c35
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c476
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h55
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c37
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h5
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c19
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h4
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c47
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c10
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c19
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c49
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c116
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h22
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c20
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c153
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.h29
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c13
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c34
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c)20
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188f.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c)33
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c)149
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192e.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192f.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8710b.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c)160
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723b.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c)41
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c)356
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h)1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c218
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h (renamed from drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h)0
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c1061
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c370
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c1225
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c856
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h111
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h (renamed from drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h)162
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c359
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c516
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h405
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c1072
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h91
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c375
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h37
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c1170
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c929
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c377
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c517
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h433
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c120
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c63
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c1212
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c3118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c240
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c394
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c1675
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c372
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c63
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h57
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig86
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile35
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c103
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c362
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c111
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c85
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c34
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c215
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h151
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c83
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c297
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h281
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c2003
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.h102
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c902
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c34
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c752
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h269
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c788
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h524
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c1125
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.c2812
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2270
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.c23930
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c1226
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.c2350
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c128
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h33
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c116
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c131
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h33
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c1989
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.h175
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c129
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h75
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c56
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c20
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c538
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig87
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile41
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1178
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h249
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c668
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h564
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c1528
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h138
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c6025
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h151
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c3360
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h2047
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2843
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c150
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c3758
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h1412
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c1675
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h371
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c1380
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c136
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c938
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h278
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c98
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c2743
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h133
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c43
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c340
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h333
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c1140
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c275
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c461
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c501
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c66
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c227
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c308
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852au.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2060
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.h122
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c2106
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h400
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c324
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c890
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c4279
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c490
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c108
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c362
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c379
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c2706
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c33
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852cu.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c614
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c123
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c755
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h32
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c83
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h119
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1071
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h26
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c1220
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h126
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c20
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c31
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c55
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c45
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c33
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c36
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h9
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c11
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/st/cw1200/main.c2
-rw-r--r--drivers/net/wireless/st/cw1200/pm.c2
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c31
-rw-r--r--drivers/net/wireless/st/cw1200/queue.h1
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c18
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h7
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c79
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h5
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c7
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c24
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c3
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c75
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c13
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c43
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c186
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h10
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c526
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h26
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c34
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c12
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c15
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c8
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c24
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c58
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c33
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c15
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c48
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c31
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c139
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c52
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c60
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h5
-rw-r--r--drivers/net/wwan/wwan_core.c28
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/net/xen-netback/common.h5
-rw-r--r--drivers/net/xen-netback/hash.c5
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c15
-rw-r--r--drivers/net/xen-netfront.c39
3946 files changed, 552411 insertions, 165819 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8ca0bc223b30..ac12eaf11755 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,24 +76,11 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
- depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
- select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
- select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
- select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
- select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
- select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
- select CRYPTO_POLY1305_ARM if ARM
- select CRYPTO_BLAKE2S_ARM if ARM
- select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
- select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
- select CRYPTO_POLY1305_MIPS if MIPS
- select CRYPTO_CHACHA_S390 if S390
+ select CRYPTO_LIB_UTILS
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
@@ -115,6 +102,21 @@ config WIREGUARD_DEBUG
Say N here unless you know what you're doing.
+config OVPN
+ tristate "OpenVPN data channel offload"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select DST_CACHE
+ select NET_UDP_TUNNEL
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ select CRYPTO_CHACHA20POLY1305
+ select STREAM_PARSER
+ help
+ This module enhances the performance of the OpenVPN userspace software
+ by offloading the data channel processing to kernelspace.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
help
@@ -290,6 +292,19 @@ config GTP
To compile this drivers as a module, choose M here: the module
will be called gtp.
+config PFCP
+ tristate "Packet Forwarding Control Protocol (PFCP)"
+ depends on INET
+ select NET_UDP_TUNNEL
+ help
+ This allows one to create PFCP virtual interfaces that allows to
+ set up software and hardware offload of PFCP packets.
+ Note that this module does not support PFCP protocol in the kernel space.
+ There is no support for parsing any PFCP messages.
+
+ To compile this drivers as a module, choose M here: the module
+ will be called pfcp.
+
config AMT
tristate "Automatic Multicast Tunneling (AMT)"
depends on INET && IP_MULTICAST
@@ -505,30 +520,6 @@ source "drivers/net/hippi/Kconfig"
source "drivers/net/ipa/Kconfig"
-config NET_SB1000
- tristate "General Instruments Surfboard 1000"
- depends on PNP
- help
- This is a driver for the General Instrument (also known as
- NextLevel) SURFboard 1000 internal
- cable modem. This is an ISA card which is used by a number of cable
- TV companies to provide cable modem access. It's a one-way
- downstream-only cable modem, meaning that your upstream net link is
- provided by your regular phone modem.
-
- At present this driver only compiles as a module, so say M here if
- you have this card. The module will be called sb1000. Then read
- <file:Documentation/networking/device_drivers/cable/sb1000.rst> for
- information on how to use this module, as it needs special ppp
- scripts for establishing a connection. Further documentation
- and the necessary scripts can be found at:
-
- <http://www.jacksonville.net/~fventuri/>
- <http://home.adelphia.net/~siglercm/sb1000.html>
- <http://linuxpower.cx/~cable/>
-
- If you don't have this card, of course say N.
-
source "drivers/net/phy/Kconfig"
source "drivers/net/pse-pd/Kconfig"
@@ -627,6 +618,8 @@ config NETDEVSIM
depends on PSAMPLE || PSAMPLE=n
depends on PTP_1588_CLOCK_MOCK || PTP_1588_CLOCK_MOCK=n
select NET_DEVLINK
+ select PAGE_POOL
+ select NET_SHAPER
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7cab36f94782..73bc63ecd65f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_WIREGUARD) += wireguard/
+obj-$(CONFIG_OVPN) += ovpn/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
@@ -38,6 +39,7 @@ obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
+obj-$(CONFIG_PFCP) += pfcp.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o
@@ -48,7 +50,9 @@ obj-$(CONFIG_MHI_NET) += mhi_net.o
obj-$(CONFIG_ARCNET) += arcnet/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
-obj-$(CONFIG_NET_DSA) += dsa/
+ifdef CONFIG_NET_DSA
+obj-y += dsa/
+endif
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
@@ -66,7 +70,6 @@ obj-$(CONFIG_PPPOL2TP) += ppp/
obj-$(CONFIG_PPTP) += ppp/
obj-$(CONFIG_SLIP) += slip/
obj-$(CONFIG_SLHC) += slip/
-obj-$(CONFIG_NET_SB1000) += sb1000.o
obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index dc50797a2ed0..c01e2c2f7d6c 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -67,8 +67,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
s = dev_boot_setup;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
- memset(s[i].name, 0, sizeof(s[i].name));
- strscpy(s[i].name, name, IFNAMSIZ);
+ strscpy_pad(s[i].name, name);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 6d15ab3bfbbc..902c817a0dea 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -11,6 +11,7 @@
#include <linux/net.h>
#include <linux/igmp.h>
#include <linux/workqueue.h>
+#include <net/flow.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -28,6 +29,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/inet_common.h>
+#include <net/inet_dscp.h>
#include <net/ip6_checksum.h>
static struct workqueue_struct *amt_wq;
@@ -979,7 +981,7 @@ static void amt_event_send_request(struct amt_dev *amt)
amt->req_cnt++;
out:
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
- mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
+ mod_delayed_work(amt_wq, &amt->req_wq, secs_to_jiffies(exp));
}
static void amt_req_work(struct work_struct *work)
@@ -1018,7 +1020,7 @@ static bool amt_send_membership_update(struct amt_dev *amt,
fl4.flowi4_oif = amt->stream_dev->ifindex;
fl4.daddr = amt->remote_ip;
fl4.saddr = amt->local_ip;
- fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
fl4.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
@@ -1046,7 +1048,8 @@ static bool amt_send_membership_update(struct amt_dev *amt,
amt->gw_port,
amt->relay_port,
false,
- false);
+ false,
+ 0);
amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
return false;
}
@@ -1103,7 +1106,8 @@ static void amt_send_multicast_data(struct amt_dev *amt,
amt->relay_port,
tunnel->source_port,
false,
- false);
+ false,
+ 0);
}
static bool amt_send_membership_query(struct amt_dev *amt,
@@ -1131,7 +1135,7 @@ static bool amt_send_membership_query(struct amt_dev *amt,
fl4.flowi4_oif = amt->stream_dev->ifindex;
fl4.daddr = tunnel->ip4;
fl4.saddr = amt->local_ip;
- fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
fl4.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
@@ -1161,7 +1165,8 @@ static bool amt_send_membership_query(struct amt_dev *amt,
amt->relay_port,
tunnel->source_port,
false,
- false);
+ false,
+ 0);
amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
return false;
}
@@ -3098,9 +3103,9 @@ static void amt_link_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
+ dev->netns_immutable = true;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
@@ -3161,14 +3166,17 @@ static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int amt_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int amt_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct amt_dev *amt = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err = -EINVAL;
- amt->net = net;
+ amt->net = link_net;
amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
if (data[IFLA_AMT_MAX_TUNNELS] &&
@@ -3183,7 +3191,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
amt->hash_buckets = AMT_HSIZE;
amt->nr_tunnels = 0;
get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
- amt->stream_dev = dev_get_by_index(net,
+ amt->stream_dev = dev_get_by_index(link_net,
nla_get_u32(data[IFLA_AMT_LINK]));
if (!amt->stream_dev) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
@@ -3206,15 +3214,11 @@ static int amt_newlink(struct net *net, struct net_device *dev,
goto err;
}
- if (data[IFLA_AMT_RELAY_PORT])
- amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
- else
- amt->relay_port = htons(IANA_AMT_UDP_PORT);
+ amt->relay_port = nla_get_be16_default(data[IFLA_AMT_RELAY_PORT],
+ htons(IANA_AMT_UDP_PORT));
- if (data[IFLA_AMT_GATEWAY_PORT])
- amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
- else
- amt->gw_port = htons(IANA_AMT_UDP_PORT);
+ amt->gw_port = nla_get_be16_default(data[IFLA_AMT_GATEWAY_PORT],
+ htons(IANA_AMT_UDP_PORT));
if (!amt->relay_port) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a51b9dab6d3a..d1d07a1d4fbc 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
#
menuconfig ARCNET
- depends on NETDEVICES && (ISA || PCI || PCMCIA)
+ depends on NETDEVICES && (ISA || PCI || PCMCIA) && HAS_IOPORT
tristate "ARCnet support"
help
If you have a network card of this type, say Y and check out the
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index b54275389f8a..bee60b377d7c 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -16,6 +16,7 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
/*
* RECON_THRESHOLD is the maximum number of RECON messages to receive
@@ -268,7 +269,7 @@ struct arcnet_local {
struct net_device *dev;
int reply_status;
- struct tasklet_struct reply_tasklet;
+ struct work_struct reply_work;
/*
* Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 166bfc3c8e6c..882972604c82 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -54,6 +54,7 @@
#include <linux/errqueue.h>
#include <linux/leds.h>
+#include <linux/workqueue.h>
#include "arcdevice.h"
#include "com9026.h"
@@ -381,7 +382,7 @@ static void arcdev_setup(struct net_device *dev)
static void arcnet_timer(struct timer_list *t)
{
- struct arcnet_local *lp = from_timer(lp, t, timer);
+ struct arcnet_local *lp = timer_container_of(lp, t, timer);
struct net_device *dev = lp->dev;
spin_lock_irq(&lp->lock);
@@ -424,9 +425,9 @@ out:
rtnl_unlock();
}
-static void arcnet_reply_tasklet(struct tasklet_struct *t)
+static void arcnet_reply_work(struct work_struct *t)
{
- struct arcnet_local *lp = from_tasklet(lp, t, reply_tasklet);
+ struct arcnet_local *lp = from_work(lp, t, reply_work);
struct sk_buff *ackskb, *skb;
struct sock_exterr_skb *serr;
@@ -527,7 +528,7 @@ int arcnet_open(struct net_device *dev)
arc_cont(D_PROTO, "\n");
}
- tasklet_setup(&lp->reply_tasklet, arcnet_reply_tasklet);
+ INIT_WORK(&lp->reply_work, arcnet_reply_work);
arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n");
@@ -615,12 +616,12 @@ int arcnet_close(struct net_device *dev)
struct arcnet_local *lp = netdev_priv(dev);
arcnet_led_event(dev, ARCNET_LED_EVENT_STOP);
- del_timer_sync(&lp->timer);
+ timer_delete_sync(&lp->timer);
netif_stop_queue(dev);
netif_carrier_off(dev);
- tasklet_kill(&lp->reply_tasklet);
+ cancel_work_sync(&lp->reply_work);
/* flush TX and disable RX */
lp->hw.intmask(dev, 0);
@@ -984,7 +985,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
->ack_tx(dev, ackstatus);
}
lp->reply_status = ackstatus;
- tasklet_hi_schedule(&lp->reply_tasklet);
+ queue_work(system_bh_highpri_wq, &lp->reply_work);
}
if (lp->cur_tx != -1)
release_arcbuf(dev, lp->cur_tx);
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 293a621e654c..fef2ac2852a8 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -137,6 +137,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset ISA driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index c5e571ec94c9..0472bcdff130 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -251,18 +251,33 @@ static int com20020pci_probe(struct pci_dev *pdev,
card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "arc%d-%d-tx",
dev->dev_id, i);
+ if (!card->tx_led.default_trigger) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"pci:green:tx:%d-%d",
dev->dev_id, i);
-
+ if (!card->tx_led.name) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
card->tx_led.dev = &dev->dev;
card->recon_led.brightness_set = led_recon_set;
card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "arc%d-%d-recon",
dev->dev_id, i);
+ if (!card->recon_led.default_trigger) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"pci:red:recon:%d-%d",
dev->dev_id, i);
+ if (!card->recon_led.name) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
card->recon_led.dev = &dev->dev;
ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 339db6e4a1d5..0df3208783ad 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -61,12 +61,14 @@ struct bareudp_dev {
static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
+ IP_TUNNEL_DECLARE_FLAGS(key) = { };
struct bareudp_dev *bareudp;
unsigned short family;
unsigned int len;
__be16 proto;
void *oiph;
int err;
+ int nh;
bareudp = rcu_dereference_sk_user_data(sk);
if (!bareudp)
@@ -82,7 +84,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
sizeof(ipversion))) {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
ipversion >>= 4;
@@ -92,7 +94,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
@@ -106,7 +108,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
ipv4_is_multicast(tunnel_hdr->daddr)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else {
@@ -122,7 +124,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
(addr_type & IPV6_ADDR_MULTICAST)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
}
@@ -134,20 +136,38 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
proto,
!net_eq(bareudp->net,
dev_net(bareudp->dev)))) {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
- tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+
+ __set_bit(IP_TUNNEL_KEY_BIT, key);
+
+ tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
skb_dst_set(skb, &tun_dst->dst);
skb->dev = bareudp->dev;
- oiph = skb_network_header(skb);
- skb_reset_network_header(skb);
skb_reset_mac_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
+ skb_reset_network_header(skb);
+
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(bareudp->dev, rx_length_errors);
+ DEV_STATS_INC(bareudp->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
@@ -165,8 +185,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
&((struct ipv6hdr *)oiph)->saddr);
}
if (err > 1) {
- ++bareudp->dev->stats.rx_frame_errors;
- ++bareudp->dev->stats.rx_errors;
+ DEV_STATS_INC(bareudp->dev, rx_frame_errors);
+ DEV_STATS_INC(bareudp->dev, rx_errors);
goto drop;
}
}
@@ -174,7 +194,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
len = skb->len;
err = gro_cells_receive(&bareudp->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(bareudp->dev, len);
+ dev_dstats_rx_add(bareudp->dev, len);
return 0;
drop:
@@ -285,10 +305,10 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
__be16 sport, df;
@@ -297,6 +317,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be32 saddr;
int err;
+ if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
+ return -EINVAL;
+
if (!sock)
return -ESHUTDOWN;
@@ -316,7 +339,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
+ htons(IP_DF) : 0;
skb_scrub_packet(skb, xnet);
err = -ENOSPC;
@@ -338,7 +362,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, bareudp->port,
!net_eq(bareudp->net, dev_net(bareudp->dev)),
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags),
+ 0);
return 0;
free_dst:
@@ -350,10 +375,10 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
struct in6_addr saddr, daddr;
@@ -362,6 +387,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
+ return -EINVAL;
+
if (!sock)
return -ESHUTDOWN;
@@ -402,7 +430,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
&saddr, &daddr, prio, ttl,
info->key.label, sport, bareudp->port,
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags),
+ 0);
return 0;
free_dst:
@@ -460,11 +490,11 @@ tx_error:
dev_kfree_skb(skb);
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK;
}
@@ -546,7 +576,6 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
- dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->hw_features |= NETIF_F_RXCSUM;
@@ -559,8 +588,9 @@ static void bareudp_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -669,10 +699,13 @@ static void bareudp_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int bareudp_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bareudp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct bareudp_conf conf;
int err;
@@ -680,7 +713,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = bareudp_configure(net, dev, &conf, extack);
+ err = bareudp_configure(link_net, dev, &conf, extack);
if (err)
return err;
@@ -745,27 +778,19 @@ static __net_init int bareudp_init_net(struct net *net)
return 0;
}
-static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit bareudp_exit_rtnl_net(struct net *net,
+ struct list_head *dev_kill_list)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *bareudp, *next;
list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
- unregister_netdevice_queue(bareudp->dev, head);
-}
-
-static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_kill_list)
-{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, dev_kill_list);
+ bareudp_dellink(bareudp->dev, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch_rtnl = bareudp_exit_batch_rtnl,
+ .exit_rtnl = bareudp_exit_rtnl_net,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c6807e473ab7..1a8de2bf8655 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -76,6 +76,7 @@ enum ad_link_speed_type {
AD_LINK_SPEED_200000MBPS,
AD_LINK_SPEED_400000MBPS,
AD_LINK_SPEED_800000MBPS,
+ AD_LINK_SPEED_1600000MBPS,
};
/* compare MAC addresses */
@@ -95,13 +96,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
static void ad_tx_machine(struct port *port);
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
+static void ad_periodic_machine(struct port *port);
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
static void ad_agg_selection_logic(struct aggregator *aggregator,
bool *update_slave_arr);
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
-static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
static void ad_enable_collecting(struct port *port);
static void ad_disable_distributing(struct port *port,
bool *update_slave_arr);
@@ -300,6 +301,7 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_200000MBPS
* %AD_LINK_SPEED_400000MBPS
* %AD_LINK_SPEED_800000MBPS
+ * %AD_LINK_SPEED_1600000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -379,6 +381,10 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_800000MBPS;
break;
+ case SPEED_1600000:
+ speed = AD_LINK_SPEED_1600000MBPS;
+ break;
+
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
@@ -436,6 +442,7 @@ static void __ad_actor_update_port(struct port *port)
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
+ port->actor_port_priority = SLAVE_AD_INFO(port->slave)->port_priority;
}
/* Conversions */
@@ -746,6 +753,18 @@ static int __agg_active_ports(struct aggregator *agg)
return active;
}
+static unsigned int __agg_ports_priority(const struct aggregator *agg)
+{
+ struct port *port = agg->lag_ports;
+ unsigned int prio = 0;
+
+ for (; port; port = port->next_port_in_aggregator)
+ if (port->is_enabled)
+ prio += port->actor_port_priority;
+
+ return prio;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@@ -809,6 +828,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_800000MBPS:
bandwidth = nports * 800000;
break;
+ case AD_LINK_SPEED_1600000MBPS:
+ bandwidth = nports * 1600000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
@@ -982,6 +1004,17 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
return 0;
}
+static void ad_cond_set_peer_notif(struct port *port)
+{
+ struct bonding *bond = port->slave->bond;
+
+ if (bond->params.broadcast_neighbor && rtnl_trylock()) {
+ bond->send_peer_notif = bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay);
+ rtnl_unlock();
+ }
+}
+
/**
* ad_mux_machine - handle a port's mux state machine
* @port: the port we're looking at
@@ -1296,10 +1329,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
+ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
+ * machine state diagram, the statue should be
+ * Partner_Oper_Port_State.Synchronization = FALSE;
+ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
+ * start current_while_timer(Short Timeout);
+ * Actor_Oper_Port_State.Expired = TRUE;
+ */
+ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
@@ -1378,7 +1417,7 @@ static void ad_tx_machine(struct port *port)
/* check if tx timer expired, to verify that we do not send more than
* 3 packets per second
*/
- if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
+ if (!port->sm_tx_timer_counter || !(--port->sm_tx_timer_counter)) {
/* check if there is something to send */
if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
__update_lacpdu_from_port(port);
@@ -1393,23 +1432,23 @@ static void ad_tx_machine(struct port *port)
* again until demanded
*/
port->ntt = false;
+
+ /* restart tx timer(to verify that we will not
+ * exceed AD_MAX_TX_IN_SECOND
+ */
+ port->sm_tx_timer_counter = ad_ticks_per_sec / AD_MAX_TX_IN_SECOND;
}
}
- /* restart tx timer(to verify that we will not exceed
- * AD_MAX_TX_IN_SECOND
- */
- port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
}
}
/**
* ad_periodic_machine - handle a port's periodic state machine
* @port: the port we're looking at
- * @bond_params: bond parameters we will use
*
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
*/
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
+static void ad_periodic_machine(struct port *port)
{
periodic_states_t last_state;
@@ -1418,8 +1457,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
- !bond_params->lacp_active) {
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
/* check if state machine should change state */
@@ -1691,6 +1729,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
* 4. Therefore, current and best both have partner replies or
* both do not, so perform selection policy:
*
+ * BOND_AD_PRIO: Select by total priority of ports. If priority
+ * is equal, select by count.
+ *
* BOND_AD_COUNT: Select by count of ports. If count is equal,
* select by bandwidth.
*
@@ -1712,6 +1753,14 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
return best;
switch (__get_agg_selection_mode(curr->lag_ports)) {
+ case BOND_AD_PRIO:
+ if (__agg_ports_priority(curr) > __agg_ports_priority(best))
+ return curr;
+
+ if (__agg_ports_priority(curr) < __agg_ports_priority(best))
+ return best;
+
+ fallthrough;
case BOND_AD_COUNT:
if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
@@ -1777,6 +1826,10 @@ static int agg_device_up(const struct aggregator *agg)
* (slaves), and reselect whenever a link state change takes place or the
* set of slaves in the bond changes.
*
+ * BOND_AD_PRIO: select the aggregator with highest total priority of ports
+ * (slaves), and reselect whenever a link state change takes place or the
+ * set of slaves in the bond changes.
+ *
* FIXME: this function MUST be called with the first agg in the bond, or
* __get_active_agg() won't work correctly. This function should be better
* called with the bond itself, and retrieve the first agg from it.
@@ -1943,16 +1996,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
/**
* ad_initialize_port - initialize a given port's parameters
* @port: the port we're looking at
- * @lacp_fast: boolean. whether fast periodic should be used
+ * @bond_params: bond parameters we will use
*/
-static void ad_initialize_port(struct port *port, int lacp_fast)
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
{
static const struct port_params tmpl = {
.system_priority = 0xffff,
.key = 1,
.port_number = 1,
.port_priority = 0xff,
- .port_state = 1,
+ .port_state = 0,
};
static const struct lacpdu lacpdu = {
.subtype = 0x01,
@@ -1970,12 +2023,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
+ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
+ if (bond_params->lacp_active) {
+ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ }
- if (lacp_fast)
+ if (bond_params->lacp_fast)
port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
@@ -2061,6 +2116,8 @@ static void ad_enable_collecting_distributing(struct port *port,
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+ /* Should notify peers if possible */
+ ad_cond_set_peer_notif(port);
}
}
@@ -2187,7 +2244,10 @@ void bond_3ad_bind_slave(struct slave *slave)
/* port initialization */
port = &(SLAVE_AD_INFO(slave)->port);
- ad_initialize_port(port, bond->params.lacp_fast);
+ ad_initialize_port(port, &bond->params);
+
+ /* Port priority is initialized. Update it to slave's ad info */
+ SLAVE_AD_INFO(slave)->port_priority = port->actor_port_priority;
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
@@ -2499,7 +2559,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
ad_rx_machine(NULL, port);
- ad_periodic_machine(port, &bond->params);
+ ad_periodic_machine(port);
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
@@ -2869,6 +2929,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
spin_unlock_bh(&bond->mode_lock);
}
+/**
+ * bond_3ad_update_lacp_active - change the lacp active
+ * @bond: bonding struct
+ *
+ * Update actor_oper_port_state when lacp_active is modified.
+ */
+void bond_3ad_update_lacp_active(struct bonding *bond)
+{
+ struct port *port = NULL;
+ struct list_head *iter;
+ struct slave *slave;
+ int lacp_active;
+
+ lacp_active = bond->params.lacp_active;
+ spin_lock_bh(&bond->mode_lock);
+ bond_for_each_slave(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave)->port);
+ if (lacp_active)
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ else
+ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
+ }
+ spin_unlock_bh(&bond->mode_lock);
+}
+
size_t bond_3ad_stats_size(void)
{
return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7edf0fd58c34..2d37b07c8215 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1035,7 +1035,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
*/
memcpy(ss.__data, addr, len);
ss.ss_family = dev->type;
- if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
+ if (dev_set_mac_address(dev, &ss, NULL)) {
slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
return -EOPNOTSUPP;
}
@@ -1273,8 +1273,7 @@ unwind:
break;
bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
rollback_slave->dev->addr_len);
- dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(rollback_slave->dev, &ss, NULL);
dev_addr_set(rollback_slave->dev, tmp_addr);
}
@@ -1763,8 +1762,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
- dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
- NULL);
+ dev_set_mac_address(new_slave->dev, &ss, NULL);
dev_addr_set(new_slave->dev, tmp_addr);
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index b19492a7f6ad..8adbec7c5084 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -63,13 +63,8 @@ void bond_debug_unregister(struct bonding *bond)
void bond_debug_reregister(struct bonding *bond)
{
- struct dentry *d;
-
- d = debugfs_rename(bonding_debug_root, bond->debug_dir,
- bonding_debug_root, bond->dev->name);
- if (!IS_ERR(d)) {
- bond->debug_dir = d;
- } else {
+ int err = debugfs_change_name(bond->debug_dir, "%s", bond->dev->name);
+ if (err) {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
bond_debug_unregister(bond);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2c5ed0a7cb18..3d56339a8a10 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -90,6 +90,7 @@
#include <net/tls.h>
#endif
#include <net/ip6_route.h>
+#include <net/netdev_lock.h>
#include <net/xdp.h>
#include "bonding_priv.h"
@@ -141,8 +142,7 @@ module_param(downdelay, int, 0);
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
"in milliseconds");
module_param(use_carrier, int, 0);
-MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
- "0 for off, 1 for on (default)");
+MODULE_PARM_DESC(use_carrier, "option obsolete, use_carrier cannot be disabled");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
@@ -211,6 +211,8 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
+DEFINE_STATIC_KEY_FALSE(bond_bcast_neigh_enabled);
+
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -322,9 +324,9 @@ static bool bond_sk_check(struct bonding *bond)
}
}
-static bool bond_xdp_check(struct bonding *bond)
+bool bond_xdp_check(struct bonding *bond, int mode)
{
- switch (BOND_MODE(bond)) {
+ switch (mode) {
case BOND_MODE_ROUNDROBIN:
case BOND_MODE_ACTIVEBACKUP:
return true;
@@ -419,14 +421,49 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
#ifdef CONFIG_XFRM_OFFLOAD
/**
+ * bond_ipsec_dev - Get active device for IPsec offload
+ * @xs: pointer to transformer state struct
+ *
+ * Context: caller must hold rcu_read_lock.
+ *
+ * Return: the device for ipsec offload, or NULL if not exist.
+ **/
+static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
+{
+ struct net_device *bond_dev = xs->xso.dev;
+ struct bonding *bond;
+ struct slave *slave;
+
+ bond = netdev_priv(bond_dev);
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+ return NULL;
+
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave)
+ return NULL;
+
+ if (!xs->xso.real_dev)
+ return NULL;
+
+ if (xs->xso.real_dev != slave->dev)
+ pr_warn_ratelimited("%s: (slave %s): not same with IPsec offload real dev %s\n",
+ bond_dev->name, slave->dev->name, xs->xso.real_dev->name);
+
+ return slave->dev;
+}
+
+/**
* bond_ipsec_add_sa - program device with a security association
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int bond_ipsec_add_sa(struct xfrm_state *xs,
+static int bond_ipsec_add_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+ netdevice_tracker tracker;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
@@ -438,112 +475,185 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
- if (!slave) {
- rcu_read_unlock();
- return -ENODEV;
+ real_dev = slave ? slave->dev : NULL;
+ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+ if (!real_dev) {
+ err = -ENODEV;
+ goto out;
}
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
- netif_is_bond_master(slave->dev)) {
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(real_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
- rcu_read_unlock();
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
+ ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL);
if (!ipsec) {
- rcu_read_unlock();
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
- xs->xso.real_dev = slave->dev;
- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
+ err = real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, xs, extack);
if (!err) {
+ xs->xso.real_dev = real_dev;
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
list_add(&ipsec->list, &bond->ipsec_list);
- spin_unlock_bh(&bond->ipsec_lock);
+ mutex_unlock(&bond->ipsec_lock);
} else {
kfree(ipsec);
}
- rcu_read_unlock();
+out:
+ netdev_put(real_dev, &tracker);
return err;
}
static void bond_ipsec_add_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
+ struct net_device *real_dev;
struct bond_ipsec *ipsec;
struct slave *slave;
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- if (!slave)
- goto out;
+ slave = rtnl_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ if (!real_dev)
+ return;
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
- netif_is_bond_master(slave->dev)) {
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(real_dev)) {
if (!list_empty(&bond->ipsec_list))
- slave_warn(bond_dev, slave->dev,
+ slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_add\n",
__func__);
- spin_unlock_bh(&bond->ipsec_lock);
goto out;
}
- spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- ipsec->xs->xso.real_dev = slave->dev;
- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
- slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
- ipsec->xs->xso.real_dev = NULL;
+ /* If new state is added before ipsec_lock acquired */
+ if (ipsec->xs->xso.real_dev == real_dev)
+ continue;
+
+ if (real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev,
+ ipsec->xs, NULL)) {
+ slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
+ continue;
}
+
+ spin_lock_bh(&ipsec->xs->lock);
+ /* xs might have been killed by the user during the migration
+ * to the new dev, but bond_ipsec_del_sa() should have done
+ * nothing, as xso.real_dev is NULL.
+ * Delete it from the device we just added it to. The pending
+ * bond_ipsec_free_sa() call will do the rest of the cleanup.
+ */
+ if (ipsec->xs->km.state == XFRM_STATE_DEAD &&
+ real_dev->xfrmdev_ops->xdo_dev_state_delete)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ ipsec->xs->xso.real_dev = real_dev;
+ spin_unlock_bh(&ipsec->xs->lock);
}
- spin_unlock_bh(&bond->ipsec_lock);
out:
- rcu_read_unlock();
+ mutex_unlock(&bond->ipsec_lock);
}
/**
* bond_ipsec_del_sa - clear out this specific SA
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
**/
-static void bond_ipsec_del_sa(struct xfrm_state *xs)
+static void bond_ipsec_del_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+
+ if (!bond_dev || !xs->xso.real_dev)
+ return;
+
+ real_dev = xs->xso.real_dev;
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(real_dev)) {
+ slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
+ return;
+ }
+
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, xs);
+}
+
+static void bond_ipsec_del_sa_all(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct net_device *real_dev;
struct bond_ipsec *ipsec;
- struct bonding *bond;
struct slave *slave;
+ slave = rtnl_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ if (!real_dev)
+ return;
+
+ mutex_lock(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (!ipsec->xs->xso.real_dev)
+ continue;
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(real_dev)) {
+ slave_warn(bond_dev, real_dev,
+ "%s: no slave xdo_dev_state_delete\n",
+ __func__);
+ continue;
+ }
+
+ spin_lock_bh(&ipsec->xs->lock);
+ ipsec->xs->xso.real_dev = NULL;
+ /* Don't double delete states killed by the user. */
+ if (ipsec->xs->km.state != XFRM_STATE_DEAD)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ spin_unlock_bh(&ipsec->xs->lock);
+
+ if (real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev,
+ ipsec->xs);
+ }
+ mutex_unlock(&bond->ipsec_lock);
+}
+
+static void bond_ipsec_free_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
+{
+ struct net_device *real_dev;
+ struct bond_ipsec *ipsec;
+ struct bonding *bond;
+
if (!bond_dev)
return;
- rcu_read_lock();
bond = netdev_priv(bond_dev);
- slave = rcu_dereference(bond->curr_active_slave);
-
- if (!slave)
- goto out;
+ mutex_lock(&bond->ipsec_lock);
if (!xs->xso.real_dev)
goto out;
- WARN_ON(xs->xso.real_dev != slave->dev);
+ real_dev = xs->xso.real_dev;
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
- goto out;
- }
-
- slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+ xs->xso.real_dev = NULL;
+ if (real_dev->xfrmdev_ops &&
+ real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, xs);
out:
- spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (ipsec->xs == xs) {
list_del(&ipsec->list);
@@ -551,88 +661,84 @@ out:
break;
}
}
- spin_unlock_bh(&bond->ipsec_lock);
- rcu_read_unlock();
+ mutex_unlock(&bond->ipsec_lock);
}
-static void bond_ipsec_del_sa_all(struct bonding *bond)
+/**
+ * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+ * @xs: pointer to transformer state struct
+ **/
+static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
- struct net_device *bond_dev = bond->dev;
- struct bond_ipsec *ipsec;
- struct slave *slave;
+ struct net_device *real_dev;
rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- if (!slave) {
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev || netif_is_bond_master(real_dev)) {
rcu_read_unlock();
- return;
+ return false;
}
- spin_lock_bh(&bond->ipsec_lock);
- list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- if (!ipsec->xs->xso.real_dev)
- continue;
-
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev,
- "%s: no slave xdo_dev_state_delete\n",
- __func__);
- } else {
- slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
- }
- ipsec->xs->xso.real_dev = NULL;
- }
- spin_unlock_bh(&bond->ipsec_lock);
rcu_read_unlock();
+ return true;
}
/**
- * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
+ * bond_advance_esn_state - ESN support for IPSec HW offload
* @xs: pointer to transformer state struct
**/
-static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+static void bond_advance_esn_state(struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- struct slave *curr_active;
- struct bonding *bond;
- int err;
- bond = netdev_priv(bond_dev);
rcu_read_lock();
- curr_active = rcu_dereference(bond->curr_active_slave);
- real_dev = curr_active->dev;
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
+ goto out;
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
- err = false;
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
+ pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_advance_esn\n", __func__, real_dev->name);
goto out;
}
- if (!xs->xso.real_dev) {
- err = false;
+ real_dev->xfrmdev_ops->xdo_dev_state_advance_esn(xs);
+out:
+ rcu_read_unlock();
+}
+
+/**
+ * bond_xfrm_update_stats - Update xfrm state
+ * @xs: pointer to transformer state struct
+ **/
+static void bond_xfrm_update_stats(struct xfrm_state *xs)
+{
+ struct net_device *real_dev;
+
+ rcu_read_lock();
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
goto out;
- }
if (!real_dev->xfrmdev_ops ||
- !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
- netif_is_bond_master(real_dev)) {
- err = false;
+ !real_dev->xfrmdev_ops->xdo_dev_state_update_stats) {
+ pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_update_stats\n", __func__, real_dev->name);
goto out;
}
- err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_update_stats(xs);
out:
rcu_read_unlock();
- return err;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
.xdo_dev_state_add = bond_ipsec_add_sa,
.xdo_dev_state_delete = bond_ipsec_del_sa,
+ .xdo_dev_state_free = bond_ipsec_free_sa,
.xdo_dev_offload_ok = bond_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = bond_advance_esn_state,
+ .xdo_dev_state_update_stats = bond_xfrm_update_stats,
};
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -723,73 +829,6 @@ const char *bond_slave_link_status(s8 link)
}
}
-/* if <dev> supports MII link status reporting, check its link status.
- *
- * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
- * depending upon the setting of the use_carrier parameter.
- *
- * Return either BMSR_LSTATUS, meaning that the link is up (or we
- * can't tell and just pretend it is), or 0, meaning that the link is
- * down.
- *
- * If reporting is non-zero, instead of faking link up, return -1 if
- * both ETHTOOL and MII ioctls fail (meaning the device does not
- * support them). If use_carrier is set, return whatever it says.
- * It'd be nice if there was a good way to tell if a driver supports
- * netif_carrier, but there really isn't.
- */
-static int bond_check_dev_link(struct bonding *bond,
- struct net_device *slave_dev, int reporting)
-{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- int (*ioctl)(struct net_device *, struct ifreq *, int);
- struct ifreq ifr;
- struct mii_ioctl_data *mii;
-
- if (!reporting && !netif_running(slave_dev))
- return 0;
-
- if (bond->params.use_carrier)
- return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
-
- /* Try to get link status using Ethtool first. */
- if (slave_dev->ethtool_ops->get_link)
- return slave_dev->ethtool_ops->get_link(slave_dev) ?
- BMSR_LSTATUS : 0;
-
- /* Ethtool can't be used, fallback to MII ioctls. */
- ioctl = slave_ops->ndo_eth_ioctl;
- if (ioctl) {
- /* TODO: set pointer to correct ioctl on a per team member
- * bases to make this more efficient. that is, once
- * we determine the correct ioctl, we will always
- * call it and not the others for that team
- * member.
- */
-
- /* We cannot assume that SIOCGMIIPHY will also read a
- * register; not all network drivers (e.g., e100)
- * support that.
- */
-
- /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
- strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
- mii = if_mii(&ifr);
- if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
- mii->reg_num = MII_BMSR;
- if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
- return mii->val_out & BMSR_LSTATUS;
- }
- }
-
- /* If reporting, report that either there's no ndo_eth_ioctl,
- * or both SIOCGMIIREG and get_link failed (meaning that we
- * cannot report link status). If not reporting, pretend
- * we're ok.
- */
- return reporting ? -1 : BMSR_LSTATUS;
-}
-
/*----------------------------- Multicast list ------------------------------*/
/* Push the promiscuity flag down to appropriate slaves */
@@ -892,6 +931,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_UP)
bond_hw_addr_flush(bond->dev, old_active->dev);
+
+ bond_slave_ns_maddrs_add(bond, old_active);
}
if (new_active) {
@@ -908,6 +949,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
dev_mc_sync(new_active->dev, bond->dev);
netif_addr_unlock_bh(bond->dev);
}
+
+ bond_slave_ns_maddrs_del(bond, new_active);
}
}
@@ -925,7 +968,7 @@ static int bond_set_dev_addr(struct net_device *bond_dev,
slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
bond_dev, slave_dev, slave_dev->addr_len);
- err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
+ err = netif_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
if (err)
return err;
@@ -999,8 +1042,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
ss.ss_family = bond->dev->type;
}
- rv = dev_set_mac_address(new_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(new_active->dev, &ss, NULL);
if (rv) {
slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
-rv);
@@ -1014,8 +1056,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
new_active->dev->addr_len);
ss.ss_family = old_active->dev->type;
- rv = dev_set_mac_address(old_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(old_active->dev, &ss, NULL);
if (rv)
slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
-rv);
@@ -1121,23 +1162,35 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
return bestslave;
}
+/* must be called in RCU critical section or with RTNL held */
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave;
-
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- rcu_read_unlock();
+ struct bond_up_slave *usable;
+ struct slave *slave = NULL;
- if (!slave || !bond->send_peer_notif ||
+ if (!bond->send_peer_notif ||
bond->send_peer_notif %
max(1, bond->params.peer_notif_delay) != 0 ||
- !netif_carrier_ok(bond->dev) ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
+ !netif_carrier_ok(bond->dev))
return false;
+ /* The send_peer_notif is set by active-backup or 8023ad
+ * mode, and cleared in bond_close() when changing mode.
+ * It is safe to only check bond mode here.
+ */
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ usable = rcu_dereference_rtnl(bond->usable_slaves);
+ if (!usable || !READ_ONCE(usable->count))
+ return false;
+ } else {
+ slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ if (!slave || test_bit(__LINK_STATE_LINKWATCH_PENDING,
+ &slave->dev->state))
+ return false;
+ }
+
netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
- slave ? slave->dev->name : "NULL");
+ slave ? slave->dev->name : "all");
return true;
}
@@ -1363,7 +1416,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
slave_disable_netpoll(slave);
}
-static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+static int bond_netpoll_setup(struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
@@ -1403,9 +1456,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
struct slave *slave;
mask = features;
-
- features &= ~NETIF_F_ONE_FOR_ALL;
- features |= NETIF_F_ALL_FOR_ALL;
+ features = netdev_base_features(features);
bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features,
@@ -1417,86 +1468,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
return features;
}
-#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
- NETIF_F_HIGHDMA | NETIF_F_LRO)
-
-#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
-
-#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_GSO_SOFTWARE)
-
-
-static void bond_compute_features(struct bonding *bond)
-{
- unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
- IFF_XMIT_DST_RELEASE_PERM;
- netdev_features_t vlan_features = BOND_VLAN_FEATURES;
- netdev_features_t enc_features = BOND_ENC_FEATURES;
-#ifdef CONFIG_XFRM_OFFLOAD
- netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
-#endif /* CONFIG_XFRM_OFFLOAD */
- netdev_features_t mpls_features = BOND_MPLS_FEATURES;
- struct net_device *bond_dev = bond->dev;
- struct list_head *iter;
- struct slave *slave;
- unsigned short max_hard_header_len = ETH_HLEN;
- unsigned int tso_max_size = TSO_MAX_SIZE;
- u16 tso_max_segs = TSO_MAX_SEGS;
-
- if (!bond_has_slaves(bond))
- goto done;
- vlan_features &= NETIF_F_ALL_FOR_ALL;
- mpls_features &= NETIF_F_ALL_FOR_ALL;
-
- bond_for_each_slave(bond, slave, iter) {
- vlan_features = netdev_increment_features(vlan_features,
- slave->dev->vlan_features, BOND_VLAN_FEATURES);
-
- enc_features = netdev_increment_features(enc_features,
- slave->dev->hw_enc_features,
- BOND_ENC_FEATURES);
-
-#ifdef CONFIG_XFRM_OFFLOAD
- xfrm_features = netdev_increment_features(xfrm_features,
- slave->dev->hw_enc_features,
- BOND_XFRM_FEATURES);
-#endif /* CONFIG_XFRM_OFFLOAD */
-
- mpls_features = netdev_increment_features(mpls_features,
- slave->dev->mpls_features,
- BOND_MPLS_FEATURES);
-
- dst_release_flag &= slave->dev->priv_flags;
- if (slave->dev->hard_header_len > max_hard_header_len)
- max_hard_header_len = slave->dev->hard_header_len;
-
- tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
- tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
- }
- bond_dev->hard_header_len = max_hard_header_len;
-
-done:
- bond_dev->vlan_features = vlan_features;
- bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
-#ifdef CONFIG_XFRM_OFFLOAD
- bond_dev->hw_enc_features |= xfrm_features;
-#endif /* CONFIG_XFRM_OFFLOAD */
- bond_dev->mpls_features = mpls_features;
- netif_set_tso_max_segs(bond_dev, tso_max_segs);
- netif_set_tso_max_size(bond_dev, tso_max_size);
-
- bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
- if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
- dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
- bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
-
- netdev_change_features(bond_dev);
-}
-
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
@@ -1811,7 +1782,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
+ if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
@@ -1832,7 +1803,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
- int link_reporting;
int res = 0, i;
if (slave_dev->flags & IFF_MASTER &&
@@ -1842,12 +1812,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
return -EPERM;
}
- if (!bond->params.use_carrier &&
- slave_dev->ethtool_ops->get_link == NULL &&
- slave_ops->ndo_eth_ioctl == NULL) {
- slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
- }
-
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
@@ -1996,15 +1960,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
* set the master's mac address to that of the first slave
*/
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
- ss.ss_family = slave_dev->type;
- res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
- extack);
- if (res) {
- slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
- goto err_restore_mtu;
- }
+ } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ bond_has_slaves(bond) &&
+ memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ /* Set slave to random address to avoid duplicate mac
+ * address in later fail over.
+ */
+ eth_random_addr(ss.__data);
+ } else {
+ goto skip_mac_set;
+ }
+
+ ss.ss_family = slave_dev->type;
+ res = dev_set_mac_address(slave_dev, &ss, extack);
+ if (res) {
+ slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
+ goto err_restore_mtu;
}
+skip_mac_set:
+
/* set no_addrconf flag before open to prevent IPv6 addrconf */
slave_dev->priv_flags |= IFF_NO_ADDRCONF;
@@ -2050,29 +2026,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
new_slave->last_tx = new_slave->last_rx;
- if (bond->params.miimon && !bond->params.use_carrier) {
- link_reporting = bond_check_dev_link(bond, slave_dev, 1);
-
- if ((link_reporting == -1) && !bond->params.arp_interval) {
- /* miimon is set but a bonded network driver
- * does not support ETHTOOL/MII and
- * arp_interval is not set. Note: if
- * use_carrier is enabled, we will never go
- * here (because netif_carrier is always
- * supported); thus, we don't need to change
- * the messages for netif_carrier.
- */
- slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
- } else if (link_reporting == -1) {
- /* unable get link status using mii/ethtool */
- slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
- }
- }
-
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
- if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
+ if (netif_running(slave_dev) && netif_carrier_ok(slave_dev)) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
@@ -2225,19 +2182,25 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
bond->slave_cnt++;
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
bond_set_carrier(bond);
+ /* Needs to be called before bond_select_active_slave(), which will
+ * remove the maddrs if the slave is selected as active slave.
+ */
+ bond_slave_ns_maddrs_add(bond, new_slave);
+
if (bond_uses_primary(bond)) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
- if (bond_mode_can_use_xmit_hash(bond))
+ /* broadcast mode uses the all_slaves to loop through slaves. */
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
-
if (!slave_dev->netdev_ops->ndo_bpf ||
!slave_dev->netdev_ops->ndo_xdp_xmit) {
if (bond->xdp_prog) {
@@ -2261,7 +2224,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_sysfs_del;
}
- res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ res = dev_xdp_propagate(slave_dev, &xdp);
if (res < 0) {
/* ndo_bpf() sets extack error message */
slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
@@ -2321,7 +2284,7 @@ err_restore_mac:
bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
new_slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
err_restore_mtu:
@@ -2397,7 +2360,7 @@ static int __bond_release_one(struct net_device *bond_dev,
.prog = NULL,
.extack = NULL,
};
- if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
+ if (dev_xdp_propagate(slave_dev, &xdp))
slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
}
@@ -2411,7 +2374,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_upper_dev_unlink(bond, slave);
- if (bond_mode_can_use_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, slave);
slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
@@ -2421,7 +2385,7 @@ static int __bond_release_one(struct net_device *bond_dev,
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- if (!all && (!bond->params.fail_over_mac ||
+ if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
@@ -2435,6 +2399,12 @@ static int __bond_release_one(struct net_device *bond_dev,
if (oldcurrent == slave)
bond_change_active_slave(bond, NULL);
+ /* Must be called after bond_change_active_slave () as the slave
+ * might change from an active slave to a backup slave. Then it is
+ * necessary to clear the maddrs on the backup slave.
+ */
+ bond_slave_ns_maddrs_del(bond, slave);
+
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
* detached from the list and the curr_active_slave
@@ -2467,7 +2437,7 @@ static int __bond_release_one(struct net_device *bond_dev,
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
@@ -2509,13 +2479,16 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
- if (unregister)
- __dev_set_mtu(slave_dev, slave->original_mtu);
- else
+ if (unregister) {
+ netdev_lock_ops(slave_dev);
+ __netif_set_mtu(slave_dev, slave->original_mtu);
+ netdev_unlock_ops(slave_dev);
+ } else {
dev_set_mtu(slave_dev, slave->original_mtu);
+ }
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
@@ -2601,7 +2574,8 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
- link_state = bond_check_dev_link(bond, slave->dev, 0);
+ link_state = netif_running(slave->dev) &&
+ netif_carrier_ok(slave->dev);
switch (slave->link) {
case BOND_LINK_UP:
@@ -2810,7 +2784,7 @@ static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- bool should_notify_peers = false;
+ bool should_notify_peers;
bool commit;
unsigned long delay;
struct slave *slave;
@@ -2822,30 +2796,33 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
- if (bond->send_peer_notif) {
- rcu_read_unlock();
- if (rtnl_trylock()) {
- bond->send_peer_notif--;
- rtnl_unlock();
- }
- } else {
- rcu_read_unlock();
- }
- if (commit) {
+ rcu_read_unlock();
+
+ if (commit || bond->send_peer_notif) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
- should_notify_peers = false;
goto re_arm;
}
- bond_for_each_slave(bond, slave, iter) {
- bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+ if (commit) {
+ bond_for_each_slave(bond, slave, iter) {
+ bond_commit_link_state(slave,
+ BOND_SLAVE_NOTIFY_LATER);
+ }
+ bond_miimon_commit(bond);
+ }
+
+ if (bond->send_peer_notif) {
+ bond->send_peer_notif--;
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
}
- bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
}
@@ -2853,13 +2830,6 @@ static void bond_mii_monitor(struct work_struct *work)
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
-
- if (should_notify_peers) {
- if (!rtnl_trylock())
- return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
- rtnl_unlock();
- }
}
static int bond_upper_dev_walk(struct net_device *upper,
@@ -3014,8 +2984,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
tags = NULL;
/* Find out through which dev should the packet go */
- rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
- RTO_ONLINK, 0);
+ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 0, 0,
+ RT_SCOPE_LINK);
if (IS_ERR(rt)) {
/* there's no route to target - try to send arp
* probe to generate any traffic (arp_validate=0)
@@ -3197,7 +3167,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
/* Find out through which dev should the packet go */
memset(&fl6, 0, sizeof(struct flowi6));
fl6.daddr = targets[i];
- fl6.flowi6_oif = bond->dev->ifindex;
dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
if (dst->error) {
@@ -3968,7 +3937,7 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_FEAT_CHANGE:
if (!bond->notifier_ctx) {
bond->notifier_ctx = true;
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
bond->notifier_ctx = false;
}
break;
@@ -4081,7 +4050,7 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *
}
if (l34 && *ip_proto >= 0)
- fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
+ fk->ports.ports = skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
return true;
}
@@ -4253,7 +4222,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
-static void bond_work_cancel_all(struct bonding *bond)
+void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
@@ -4315,6 +4284,9 @@ static int bond_open(struct net_device *bond_dev)
bond_for_each_slave(bond, slave, iter)
dev_mc_add(slave->dev, lacpdu_mcast_addr);
+
+ if (bond->params.broadcast_neighbor)
+ static_branch_inc(&bond_bcast_neigh_enabled);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -4334,6 +4306,10 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+ bond->params.broadcast_neighbor)
+ static_branch_dec(&bond_bcast_neigh_enabled);
+
if (bond_uses_primary(bond)) {
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
@@ -4710,7 +4686,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
}
}
- bond_dev->mtu = new_mtu;
+ WRITE_ONCE(bond_dev->mtu, new_mtu);
return 0;
@@ -4793,8 +4769,7 @@ unwind:
if (rollback_slave == slave)
break;
- tmp_res = dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&tmp_ss, NULL);
+ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_ss, NULL);
if (tmp_res) {
slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
__func__, tmp_res);
@@ -5170,6 +5145,37 @@ static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
return slaves->arr[hash % count];
}
+static bool bond_should_broadcast_neighbor(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct {
+ struct ipv6hdr ip6;
+ struct icmp6hdr icmp6;
+ } *combined, _combined;
+
+ if (!static_branch_unlikely(&bond_bcast_neigh_enabled))
+ return false;
+
+ if (!bond->params.broadcast_neighbor)
+ return false;
+
+ if (skb->protocol == htons(ETH_P_ARP))
+ return true;
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ combined = skb_header_pointer(skb, skb_mac_header_len(skb),
+ sizeof(_combined),
+ &_combined);
+ if (combined && combined->ip6.nexthdr == NEXTHDR_ICMP &&
+ (combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+ combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT))
+ return true;
+ }
+
+ return false;
+}
+
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
@@ -5189,17 +5195,27 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
return bond_tx_drop(dev, skb);
}
-/* in broadcast mode, we send everything to all usable interfaces. */
+/* in broadcast mode, we send everything to all or usable slave interfaces.
+ * under rcu_read_lock when this function is called.
+ */
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
- struct net_device *bond_dev)
+ struct net_device *bond_dev,
+ bool all_slaves)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave = NULL;
- struct list_head *iter;
+ struct bond_up_slave *slaves;
bool xmit_suc = false;
bool skb_used = false;
+ int slaves_count, i;
- bond_for_each_slave_rcu(bond, slave, iter) {
+ if (all_slaves)
+ slaves = rcu_dereference(bond->all_slaves);
+ else
+ slaves = rcu_dereference(bond->usable_slaves);
+
+ slaves_count = slaves ? READ_ONCE(slaves->count) : 0;
+ for (i = 0; i < slaves_count; i++) {
+ struct slave *slave = slaves->arr[i];
struct sk_buff *skb2;
if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
@@ -5245,7 +5261,7 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
- if (slave->queue_id == skb_get_queue_mapping(skb)) {
+ if (READ_ONCE(slave->queue_id) == skb_get_queue_mapping(skb)) {
if (bond_slave_is_up(slave) &&
slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -5437,10 +5453,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
case BOND_MODE_8023AD:
+ if (bond_should_broadcast_neighbor(skb, dev))
+ return bond_xmit_broadcast(skb, dev, false);
+ fallthrough;
case BOND_MODE_XOR:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
- return bond_xmit_broadcast(skb, dev);
+ return bond_xmit_broadcast(skb, dev, true);
case BOND_MODE_ALB:
return bond_alb_xmit(skb, dev);
case BOND_MODE_TLB:
@@ -5497,9 +5516,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
break;
default:
- /* Should never happen. Mode guarded by bond_xdp_check() */
- netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
- WARN_ON_ONCE(1);
+ if (net_ratelimit())
+ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n",
+ BOND_MODE(bond));
return NULL;
}
@@ -5563,8 +5582,11 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
ASSERT_RTNL();
- if (!bond_xdp_check(bond))
+ if (!bond_xdp_check(bond, BOND_MODE(bond))) {
+ BOND_NL_ERR(dev, extack,
+ "No native XDP support for the current bonding mode");
return -EOPNOTSUPP;
+ }
old_prog = bond->xdp_prog;
bond->xdp_prog = prog;
@@ -5587,7 +5609,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err = dev_xdp_propagate(slave_dev, &xdp);
if (err < 0) {
/* ndo_bpf() sets extack error message */
slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
@@ -5619,7 +5641,7 @@ err:
if (slave == rollback_slave)
break;
- err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err_unwind = dev_xdp_propagate(slave_dev, &xdp);
if (err_unwind < 0)
slave_err(dev, slave_dev,
"Error %d when unwinding XDP program change\n", err_unwind);
@@ -5755,10 +5777,10 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
}
static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct ethtool_ts_info ts_info;
+ struct kernel_ethtool_ts_info ts_info;
struct net_device *real_dev;
bool sw_tx_support = false;
struct list_head *iter;
@@ -5882,11 +5904,14 @@ void bond_setup(struct net_device *bond_dev)
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
INIT_LIST_HEAD(&bond->ipsec_list);
- spin_lock_init(&bond->ipsec_lock);
+ mutex_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
- bond_dev->features |= NETIF_F_LLTX;
+ bond_dev->lltx = true;
+
+ /* Don't allow bond devices to change network namespaces. */
+ bond_dev->netns_immutable = true;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
@@ -5895,10 +5920,7 @@ void bond_setup(struct net_device *bond_dev)
* capable
*/
- /* Don't allow bond devices to change network namespaces. */
- bond_dev->features |= NETIF_F_NETNS_LOCAL;
-
- bond_dev->hw_features = BOND_VLAN_FEATURES |
+ bond_dev->hw_features = MASTER_UPPER_DEV_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_RX |
@@ -5907,6 +5929,7 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+ bond_dev->features |= NETIF_F_GSO_PARTIAL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
/* Only enable XFRM features if this is an active-backup config */
@@ -5931,9 +5954,13 @@ static void bond_uninit(struct net_device *bond_dev)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
+#ifdef CONFIG_XFRM_OFFLOAD
+ mutex_destroy(&bond->ipsec_lock);
+#endif /* CONFIG_XFRM_OFFLOAD */
+
bond_set_slave_arr(bond, NULL, NULL);
- list_del(&bond->bond_list);
+ list_del_rcu(&bond->bond_list);
bond_debug_unregister(bond);
}
@@ -6041,10 +6068,10 @@ static int __init bond_check_params(struct bond_params *params)
downdelay = 0;
}
- if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
- use_carrier = 1;
+ if (use_carrier != 1) {
+ pr_err("Error: invalid use_carrier parameter (%d)\n",
+ use_carrier);
+ return -EINVAL;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
@@ -6291,7 +6318,6 @@ static int __init bond_check_params(struct bond_params *params)
params->updelay = updelay;
params->downdelay = downdelay;
params->peer_notif_delay = 0;
- params->use_carrier = use_carrier;
params->lacp_active = 1;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
@@ -6308,6 +6334,7 @@ static int __init bond_check_params(struct bond_params *params)
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
params->coupled_control = 1;
+ params->broadcast_neighbor = 0;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
@@ -6338,7 +6365,8 @@ static int bond_init(struct net_device *bond_dev)
netdev_dbg(bond_dev, "Begin bond_init\n");
- bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
+ bond->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ bond_dev->name);
if (!bond->wq)
return -ENOMEM;
@@ -6347,7 +6375,7 @@ static int bond_init(struct net_device *bond_dev)
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
- list_add_tail(&bond->bond_list, &bn->dev_list);
+ list_add_tail_rcu(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
@@ -6419,7 +6447,7 @@ static int __net_init bond_net_init(struct net *net)
/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
* race condition in bond unloading") we need to remove sysfs files
- * before we remove our devices (done later in bond_net_exit_batch_rtnl())
+ * before we remove our devices (done later in bond_net_exit_rtnl())
*/
static void __net_exit bond_net_pre_exit(struct net *net)
{
@@ -6428,25 +6456,20 @@ static void __net_exit bond_net_pre_exit(struct net *net)
bond_destroy_sysfs(bn);
}
-static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_kill_list)
+static void __net_exit bond_net_exit_rtnl(struct net *net,
+ struct list_head *dev_kill_list)
{
- struct bond_net *bn;
- struct net *net;
+ struct bond_net *bn = net_generic(net, bond_net_id);
+ struct bonding *bond, *tmp_bond;
/* Kill off any bonds created after unregistering bond rtnl ops */
- list_for_each_entry(net, net_list, exit_list) {
- struct bonding *bond, *tmp_bond;
-
- bn = net_generic(net, bond_net_id);
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, dev_kill_list);
- }
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
}
/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
* only after all bonds are gone") bond_destroy_proc_dir() is called
- * after bond_net_exit_batch_rtnl() has completed.
+ * after bond_net_exit_rtnl() has completed.
*/
static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
@@ -6462,7 +6485,7 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
.pre_exit = bond_net_pre_exit,
- .exit_batch_rtnl = bond_net_exit_batch_rtnl,
+ .exit_rtnl = bond_net_exit_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
@@ -6477,16 +6500,16 @@ static int __init bonding_init(void)
if (res)
goto out;
+ bond_create_debugfs();
+
res = register_pernet_subsys(&bond_net_ops);
if (res)
- goto out;
+ goto err_net_ops;
res = bond_netlink_init();
if (res)
goto err_link;
- bond_create_debugfs();
-
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
@@ -6501,10 +6524,11 @@ static int __init bonding_init(void)
out:
return res;
err:
- bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
+err_net_ops:
+ bond_destroy_debugfs();
goto out;
}
@@ -6513,11 +6537,11 @@ static void __exit bonding_exit(void)
{
unregister_netdevice_notifier(&bond_netdev_notifier);
- bond_destroy_debugfs();
-
bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
+ bond_destroy_debugfs();
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Make sure we don't have an imbalance on our netpoll blocking */
WARN_ON(atomic_read(&netpoll_block_tx));
@@ -6529,3 +6553,4 @@ module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 29b4c3d1b9b6..286f11c517f7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -28,6 +28,7 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_ACTOR_PORT_PRIO */
0;
}
@@ -51,7 +52,8 @@ static int bond_fill_slave_info(struct sk_buff *skb,
slave_dev->addr_len, slave->perm_hwaddr))
goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID,
+ READ_ONCE(slave->queue_id)))
goto nla_put_failure;
if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
@@ -76,6 +78,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
ad_port->partner_oper.port_state))
goto nla_put_failure;
}
+
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_ACTOR_PORT_PRIO,
+ SLAVE_AD_INFO(slave)->port_priority))
+ goto nla_put_failure;
}
return 0;
@@ -123,11 +129,13 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
[IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
+ [IFLA_BOND_BROADCAST_NEIGH] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
[IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
+ [IFLA_BOND_SLAVE_ACTOR_PORT_PRIO] = { .type = NLA_U16 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -178,6 +186,16 @@ static int bond_slave_changelink(struct net_device *bond_dev,
return err;
}
+ if (data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]) {
+ u16 ad_prio = nla_get_u16(data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, ad_prio);
+ err = __bond_opt_set(bond, BOND_OPT_ACTOR_PORT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -257,13 +275,11 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
if (data[IFLA_BOND_USE_CARRIER]) {
- int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
-
- bond_opt_initval(&newval, use_carrier);
- err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval,
- data[IFLA_BOND_USE_CARRIER], extack);
- if (err)
- return err;
+ if (nla_get_u8(data[IFLA_BOND_USE_CARRIER]) != 1) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_USE_CARRIER],
+ "option obsolete, use_carrier cannot be disabled");
+ return -EINVAL;
+ }
}
if (data[IFLA_BOND_ARP_INTERVAL]) {
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
@@ -560,25 +576,39 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BOND_BROADCAST_NEIGH]) {
+ int broadcast_neigh = nla_get_u8(data[IFLA_BOND_BROADCAST_NEIGH]);
+
+ bond_opt_initval(&newval, broadcast_neigh);
+ err = __bond_opt_set(bond, BOND_OPT_BROADCAST_NEIGH, &newval,
+ data[IFLA_BOND_BROADCAST_NEIGH], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
-static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bond_newlink(struct net_device *bond_dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err;
- err = bond_changelink(bond_dev, tb, data, extack);
- if (err < 0)
+ err = register_netdevice(bond_dev);
+ if (err)
return err;
- err = register_netdevice(bond_dev);
- if (!err) {
- struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
- netif_carrier_off(bond_dev);
- bond_work_init_all(bond);
+ err = bond_changelink(bond_dev, tb, data, extack);
+ if (err) {
+ bond_work_cancel_all(bond);
+ unregister_netdevice(bond_dev);
}
return err;
@@ -627,6 +657,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_BROADCAST_NEIGH */
0;
}
@@ -673,7 +704,7 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.peer_notif_delay * bond->params.miimon))
goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, 1))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
@@ -790,6 +821,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.coupled_control))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_BROADCAST_NEIGH,
+ bond->params.broadcast_neighbor))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 4cdbc7e084f4..384499c869b8 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -15,6 +15,7 @@
#include <linux/sched/signal.h>
#include <net/bonding.h>
+#include <net/ndisc.h>
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval);
@@ -78,6 +79,8 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_user_port_key_set(struct bonding *bond,
@@ -86,6 +89,8 @@ static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_coupled_control_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_broadcast_neigh_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
@@ -157,10 +162,11 @@ static const struct bond_opt_value bond_lacp_rate_tbl[] = {
};
static const struct bond_opt_value bond_ad_select_tbl[] = {
- { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
- { "bandwidth", BOND_AD_BANDWIDTH, 0},
- { "count", BOND_AD_COUNT, 0},
- { NULL, -1, 0},
+ { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
+ { "bandwidth", BOND_AD_BANDWIDTH, 0},
+ { "count", BOND_AD_COUNT, 0},
+ { "actor_port_prio", BOND_AD_PRIO, 0},
+ { NULL, -1, 0},
};
static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
@@ -184,7 +190,6 @@ static const struct bond_opt_value bond_primary_reselect_tbl[] = {
};
static const struct bond_opt_value bond_use_carrier_tbl[] = {
- { "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
@@ -239,6 +244,12 @@ static const struct bond_opt_value bond_coupled_control_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_broadcast_neigh_tbl[] = {
+ { "off", 0, BOND_VALFLAG_DEFAULT},
+ { "on", 1, 0},
+ { NULL, -1, 0}
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -361,7 +372,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_AD_SELECT] = {
.id = BOND_OPT_AD_SELECT,
.name = "ad_select",
- .desc = "803.ad aggregation selection logic",
+ .desc = "802.3ad aggregation selection logic",
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_select_tbl,
.set = bond_option_ad_select_set
@@ -410,7 +421,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_USE_CARRIER] = {
.id = BOND_OPT_USE_CARRIER,
.name = "use_carrier",
- .desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
+ .desc = "option obsolete, use_carrier cannot be disabled",
.values = bond_use_carrier_tbl,
.set = bond_option_use_carrier_set
},
@@ -475,6 +486,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_ad_actor_sys_prio_tbl,
.set = bond_option_ad_actor_sys_prio_set,
},
+ [BOND_OPT_ACTOR_PORT_PRIO] = {
+ .id = BOND_OPT_ACTOR_PORT_PRIO,
+ .name = "actor_port_prio",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_actor_port_prio_set,
+ },
[BOND_OPT_AD_ACTOR_SYSTEM] = {
.id = BOND_OPT_AD_ACTOR_SYSTEM,
.name = "ad_actor_system",
@@ -512,6 +530,14 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_coupled_control_tbl,
.set = bond_option_coupled_control_set,
+ },
+ [BOND_OPT_BROADCAST_NEIGH] = {
+ .id = BOND_OPT_BROADCAST_NEIGH,
+ .name = "broadcast_neighbor",
+ .desc = "Broadcast neighbor packets to all active slaves",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_broadcast_neigh_tbl,
+ .set = bond_option_broadcast_neigh_set,
}
};
@@ -867,6 +893,9 @@ static bool bond_set_xfrm_features(struct bonding *bond)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ if (bond->xdp_prog && !bond_xdp_check(bond, newval->value))
+ return -EOPNOTSUPP;
+
if (!bond_mode_uses_arp(newval->value)) {
if (bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -890,6 +919,13 @@ static int bond_option_mode_set(struct bonding *bond,
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
+ /* When changing mode, the bond device is down, we may reduce
+ * the bond_bcast_neigh_enabled in bond_close() if broadcast_neighbor
+ * enabled in 8023ad mode. Therefore, only clear broadcast_neighbor
+ * to 0.
+ */
+ bond->params.broadcast_neighbor = 0;
+
if (bond->dev->reg_state == NETREG_REGISTERED) {
bool update = false;
@@ -936,7 +972,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
/* check to see if we are clearing active */
if (!slave_dev) {
netdev_dbg(bond->dev, "Clearing current active slave\n");
- RCU_INIT_POINTER(bond->curr_active_slave, NULL);
+ bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
} else {
struct slave *old_active = rtnl_dereference(bond->curr_active_slave);
@@ -1064,10 +1100,6 @@ static int bond_option_peer_notif_delay_set(struct bonding *bond,
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_dbg(bond->dev, "Setting use_carrier to %llu\n",
- newval->value);
- bond->params.use_carrier = newval->value;
-
return 0;
}
@@ -1214,9 +1246,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
__be32 target;
if (newval->string) {
- if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
- netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
- &target);
+ if (strlen(newval->string) < 1 ||
+ !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
+ netdev_err(bond->dev, "invalid ARP target specified\n");
return ret;
}
if (newval->string[0] == '+')
@@ -1234,6 +1266,107 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
}
#if IS_ENABLED(CONFIG_IPV6)
+static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *slave)
+{
+ return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ !bond_is_active_slave(slave) &&
+ slave->dev->flags & IFF_MULTICAST;
+}
+
+/**
+ * slave_set_ns_maddrs - add/del all NS mac addresses for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @add: add or remove all the NS mac addresses
+ *
+ * This function tries to add or delete all the NS mac addresses on the slave
+ *
+ * Note, the IPv6 NS target address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * arp_validate changes
+ * * enslaving, releasing new slaves
+ */
+static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ char slot_maddr[MAX_ADDR_LEN];
+ struct in6_addr mcaddr;
+ int i;
+
+ if (!slave_can_set_ns_maddr(bond, slave))
+ return;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (ipv6_addr_any(&targets[i]))
+ break;
+
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) {
+ if (add)
+ dev_mc_add(slave->dev, slot_maddr);
+ else
+ dev_mc_del(slave->dev, slot_maddr);
+ }
+ }
+}
+
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave)
+{
+ if (!bond->params.arp_validate)
+ return;
+ slave_set_ns_maddrs(bond, slave, true);
+}
+
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
+{
+ if (!bond->params.arp_validate)
+ return;
+ slave_set_ns_maddrs(bond, slave, false);
+}
+
+/**
+ * slave_set_ns_maddr - set new NS mac address for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @target: the new IPv6 target
+ * @slot: the old IPv6 target in the slot
+ *
+ * This function tries to replace the old mac address to new one on the slave.
+ *
+ * Note, the target/slot IPv6 address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * An IPv6 NS target is added or removed.
+ */
+static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
+ struct in6_addr *target, struct in6_addr *slot)
+{
+ char mac_addr[MAX_ADDR_LEN];
+ struct in6_addr mcast_addr;
+
+ if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
+ return;
+
+ /* remove the previous mac addr from slave */
+ addrconf_addr_solict_mult(slot, &mcast_addr);
+ if (!ipv6_addr_any(slot) &&
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_del(slave->dev, mac_addr);
+
+ /* add new mac addr on slave if target is set */
+ addrconf_addr_solict_mult(target, &mcast_addr);
+ if (!ipv6_addr_any(target) &&
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_add(slave->dev, mac_addr);
+}
+
static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
struct in6_addr *target,
unsigned long last_rx)
@@ -1243,8 +1376,10 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
struct slave *slave;
if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave(bond, slave, iter) {
slave->target_last_arp_rx[slot] = last_rx;
+ slave_set_ns_maddr(bond, slave, target, &targets[slot]);
+ }
targets[slot] = *target;
}
}
@@ -1296,15 +1431,30 @@ static int bond_option_ns_ip6_targets_set(struct bonding *bond,
{
return -EPERM;
}
+
+static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) {}
+
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) {}
+
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) {}
#endif
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ bool changed = !!bond->params.arp_validate != !!newval->value;
+ struct list_head *iter;
+ struct slave *slave;
+
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
bond->params.arp_validate = newval->value;
+ if (changed) {
+ bond_for_each_slave(bond, slave, iter)
+ slave_set_ns_maddrs(bond, slave, !!bond->params.arp_validate);
+ }
+
return 0;
}
@@ -1515,6 +1665,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
newval->string, newval->value);
bond->params.lacp_active = newval->value;
+ bond_3ad_update_lacp_active(bond);
return 0;
}
@@ -1589,7 +1740,7 @@ static int bond_option_queue_id_set(struct bonding *bond,
goto err_no_cmd;
/* Actually set the qids for the slave */
- update_slave->queue_id = qid;
+ WRITE_ONCE(update_slave->queue_id, qid);
out:
return ret;
@@ -1671,6 +1822,26 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
return 0;
}
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(bond->dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+
+ netdev_dbg(newval->slave_dev, "Setting actor_port_prio to %llu\n",
+ newval->value);
+
+ SLAVE_AD_INFO(slave)->port_priority = newval->value;
+ bond_3ad_update_ad_actor_settings(bond);
+
+ return 0;
+}
+
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1718,3 +1889,22 @@ static int bond_option_coupled_control_set(struct bonding *bond,
bond->params.coupled_control = newval->value;
return 0;
}
+
+static int bond_option_broadcast_neigh_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ if (bond->params.broadcast_neighbor == newval->value)
+ return 0;
+
+ bond->params.broadcast_neighbor = newval->value;
+ if (bond->dev->flags & IFF_UP) {
+ if (bond->params.broadcast_neighbor)
+ static_branch_inc(&bond_bcast_neigh_enabled);
+ else
+ static_branch_dec(&bond_bcast_neigh_enabled);
+ }
+
+ netdev_dbg(bond->dev, "Setting broadcast_neighbor to %s (%llu)\n",
+ newval->string, newval->value);
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 43be458422b3..7edf72ec816a 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -209,7 +209,7 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "Permanent HW addr: %*phC\n",
slave->dev->addr_len, slave->perm_hwaddr);
- seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
+ seq_printf(seq, "Slave queue ID: %d\n", READ_ONCE(slave->queue_id));
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
const struct port *port = &SLAVE_AD_INFO(slave)->port;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 2805135a7205..9a75ad3181ab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -37,12 +37,12 @@ static ssize_t bonding_show_bonds(const struct class *cls,
{
const struct bond_net *bn =
container_of_const(attr, struct bond_net, class_attr_bonding_masters);
- int res = 0;
struct bonding *bond;
+ int res = 0;
- rtnl_lock();
+ rcu_read_lock();
- list_for_each_entry(bond, &bn->dev_list, bond_list) {
+ list_for_each_entry_rcu(bond, &bn->dev_list, bond_list) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -55,7 +55,7 @@ static ssize_t bonding_show_bonds(const struct class *cls,
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- rtnl_unlock();
+ rcu_read_unlock();
return res;
}
@@ -170,10 +170,9 @@ static ssize_t bonding_show_slaves(struct device *d,
struct slave *slave;
int res = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -184,7 +183,7 @@ static ssize_t bonding_show_slaves(struct device *d,
res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
- rtnl_unlock();
+ rcu_read_unlock();
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -468,14 +467,12 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
-/* Show the use_carrier flag. */
+/* use_carrier is obsolete, but print value for compatibility */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct bonding *bond = to_bond(d);
-
- return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "1\n");
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
@@ -626,10 +623,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
struct slave *slave;
int res = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
@@ -638,12 +634,13 @@ static ssize_t bonding_show_queue_id(struct device *d,
break;
}
res += sysfs_emit_at(buf, res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ slave->dev->name,
+ READ_ONCE(slave->queue_id));
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- rtnl_unlock();
+ rcu_read_unlock();
return res;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 313866f2c0e4..36d0e8440b5b 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -53,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sysfs_emit(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", READ_ONCE(slave->queue_id));
}
static SLAVE_ATTR_RO(queue_id);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index ed3a589def6b..c398ac42eae9 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -126,15 +126,6 @@ static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
ser->rx_blob.data = ser->rx_data;
ser->rx_blob.size = size;
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
- if (size > sizeof(ser->tx_data))
- size = sizeof(ser->tx_data);
- memcpy(ser->tx_data, data, size);
- ser->tx_blob.data = ser->tx_data;
- ser->tx_blob.size = size;
-}
#else
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
@@ -151,11 +142,6 @@ static inline void update_tty_status(struct ser_device *ser)
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
{
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
-}
-
#endif
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
@@ -344,7 +330,7 @@ static int ldisc_open(struct tty_struct *tty)
ser->tty = tty_kref_get(tty);
ser->dev = dev;
debugfs_init(ser, tty);
- tty->receive_room = N_TTY_BUF_SIZE;
+ tty->receive_room = 4096;
tty->disc_data = ser;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
rtnl_lock();
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 0b0f234b0b50..c60386bf2d1a 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -646,9 +646,7 @@ static inline void debugfs_init(struct cfv_info *cfv)
/* Setup CAIF for the a virtio device */
static int cfv_probe(struct virtio_device *vdev)
{
- vq_callback_t *vq_cbs = cfv_release_cb;
vrh_callback_t *vrh_cbs = cfv_recv;
- const char *names = "output";
const char *cfv_netdev_name = "cfvrt";
struct net_device *netdev;
struct cfv_info *cfv;
@@ -675,9 +673,11 @@ static int cfv_probe(struct virtio_device *vdev)
goto err;
/* Get the TX virtio ring. This is a "guest side vring". */
- err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL);
- if (err)
+ cfv->vq_tx = virtio_find_single_vq(vdev, cfv_release_cb, "output");
+ if (IS_ERR(cfv->vq_tx)) {
+ err = PTR_ERR(cfv->vq_tx);
goto err;
+ }
/* Get the CAIF configuration from virtio config space, if available */
if (vdev->config->get) {
@@ -745,7 +745,7 @@ err:
if (cfv->vr_rx)
vdev->vringh_config->del_vrhs(cfv->vdev);
- if (cfv->vdev)
+ if (cfv->vq_tx)
vdev->config->del_vqs(cfv->vdev);
free_netdev(netdev);
return err;
@@ -782,7 +782,6 @@ static struct virtio_driver caif_virtio_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = cfv_probe,
.remove = cfv_remove,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 2e31db55d927..e15e320db476 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -124,6 +124,23 @@ config CAN_CAN327
If this driver is built as a module, it will be called can327.
+config CAN_DUMMY
+ tristate "Dummy CAN"
+ help
+ A dummy CAN module supporting Classical CAN, CAN FD and CAN XL. It
+ exposes bittiming values which can be configured through the netlink
+ interface.
+
+ The module will simply echo any frame sent to it. If debug messages
+ are activated, it prints all the CAN bittiming information in the
+ kernel log. Aside from that it does nothing.
+
+ This is convenient for testing the CAN netlink interface. Most of the
+ users will never need this. If unsure, say NO.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dummy-can.
+
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on OF || COLDFIRE || COMPILE_TEST
@@ -154,6 +171,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select NET_DEVLINK
help
This is a driver for the Kvaser PCI Express CAN FD family.
@@ -187,9 +205,8 @@ config CAN_SLCAN
slcand) can be found in the can-utils at the linux-can project, see
https://github.com/linux-can/can-utils for details.
- The slcan driver supports up to 10 CAN netdevices by default which
- can be changed by the 'maxdev=xx' module option. This driver can
- also be built as a module. If so, the module will be called slcan.
+ This driver can also be built as a module. If so, the module
+ will be called slcan.
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
@@ -202,7 +219,7 @@ config CAN_SUN4I
be called sun4i_can.
config CAN_TI_HECC
- depends on ARM
+ depends on ARM || COMPILE_TEST
tristate "TI High End CAN Controller"
select CAN_RX_OFFLOAD
help
@@ -226,6 +243,7 @@ source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/peak_canfd/Kconfig"
source "drivers/net/can/rcar/Kconfig"
+source "drivers/net/can/rockchip/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 4669cd51e7bf..d7bc10a6b8ea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
obj-y += esd/
obj-y += rcar/
+obj-y += rockchip/
obj-y += spi/
obj-y += usb/
obj-y += softing/
@@ -20,11 +21,12 @@ obj-$(CONFIG_CAN_CAN327) += can327.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
+obj-$(CONFIG_CAN_DUMMY) += dummy_can.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
-obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 11f434d708b3..c2a3a4eef5b2 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -948,7 +948,6 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_open = at91_open,
.ndo_stop = at91_close,
.ndo_start_xmit = at91_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops at91_ethtool_ops = {
@@ -1191,7 +1190,7 @@ MODULE_DEVICE_TABLE(platform, at91_can_id_table);
static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
- .remove_new = at91_can_remove,
+ .remove = at91_can_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(at91_can_dt_ids),
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index 49cf9682b925..baf494d20bef 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -227,7 +227,7 @@ static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
* mask mode with 32 bits width.
*/
- /* Enter filter initialization mode and assing filters to CAN
+ /* Enter filter initialization mode and assign filters to CAN
* controllers.
*/
regmap_update_bits(priv->gcan, BXCAN_FMR_REG,
@@ -842,7 +842,7 @@ static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
u32 id;
int i, j;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (bxcan_tx_busy(priv))
@@ -881,7 +881,6 @@ static const struct net_device_ops bxcan_netdev_ops = {
.ndo_open = bxcan_open,
.ndo_stop = bxcan_stop,
.ndo_start_xmit = bxcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops bxcan_ethtool_ops = {
@@ -1092,7 +1091,7 @@ static struct platform_driver bxcan_driver = {
.of_match_table = bxcan_of_match,
},
.probe = bxcan_probe,
- .remove_new = bxcan_remove,
+ .remove = bxcan_remove,
};
module_platform_driver(bxcan_driver);
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index c63f7fc1e691..3702cac7fbf0 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -1011,47 +1011,60 @@ static int c_can_handle_bus_err(struct net_device *dev,
/* common for all type of bus errors */
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
break;
default:
break;
}
+ if (unlikely(!skb))
+ return 0;
+
netif_receive_skb(skb);
return 1;
}
@@ -1349,7 +1362,6 @@ static const struct net_device_ops c_can_netdev_ops = {
.ndo_open = c_can_open,
.ndo_stop = c_can_close,
.ndo_start_xmit = c_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
int register_c_can_dev(struct net_device *dev)
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e2ec69aa46e5..19c86b94a40e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -269,30 +269,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* get the appropriate clk */
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto exit;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
/* get the platform data */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto exit;
- }
+ if (irq < 0)
+ return irq;
addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto exit;
- }
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
/* allocate the c_can device */
dev = alloc_c_can_dev(drvdata->msg_obj_num);
- if (!dev) {
- ret = -ENOMEM;
- goto exit;
- }
+ if (!dev)
+ return -ENOMEM;
priv = netdev_priv(dev);
switch (drvdata->id) {
@@ -324,33 +316,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* Check if we need custom RAMINIT via syscon. Mostly for TI
* platforms. Only supported with DT boot.
*/
- if (np && of_property_read_bool(np, "syscon-raminit")) {
+ if (np && of_property_present(np, "syscon-raminit")) {
+ unsigned int args[2];
u32 id;
struct c_can_raminit *raminit = &priv->raminit_sys;
ret = -EINVAL;
- raminit->syscon = syscon_regmap_lookup_by_phandle(np,
- "syscon-raminit");
+ raminit->syscon = syscon_regmap_lookup_by_phandle_args(np,
+ "syscon-raminit",
+ 2, args);
if (IS_ERR(raminit->syscon)) {
- /* can fail with -EPROBE_DEFER */
ret = PTR_ERR(raminit->syscon);
- free_c_can_dev(dev);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "syscon-raminit", 1,
- &raminit->reg)) {
- dev_err(&pdev->dev,
- "couldn't get the RAMINIT reg. offset!\n");
goto exit_free_device;
}
- if (of_property_read_u32_index(np, "syscon-raminit", 2,
- &id)) {
- dev_err(&pdev->dev,
- "couldn't get the CAN instance ID\n");
- goto exit_free_device;
- }
+ raminit->reg = args[0];
+ id = args[1];
if (id >= drvdata->raminit_num) {
dev_err(&pdev->dev,
@@ -385,18 +366,17 @@ static int c_can_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
- goto exit_free_device;
+ goto exit_pm_runtime;
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
KBUILD_MODNAME, priv->base, dev->irq);
return 0;
-exit_free_device:
+exit_pm_runtime:
pm_runtime_disable(priv->device);
+exit_free_device:
free_c_can_dev(dev);
-exit:
- dev_err(&pdev->dev, "probe failed\n");
return ret;
}
@@ -476,7 +456,7 @@ static struct platform_driver c_can_plat_driver = {
.of_match_table = c_can_of_table,
},
.probe = c_can_plat_probe,
- .remove_new = c_can_plat_remove,
+ .remove = c_can_plat_remove,
.suspend = c_can_suspend,
.resume = c_can_resume,
.id_table = c_can_id_table,
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
index 24af63961030..b66fc16aedd2 100644
--- a/drivers/net/can/can327.c
+++ b/drivers/net/can/can327.c
@@ -849,7 +849,6 @@ static const struct net_device_ops can327_netdev_ops = {
.ndo_open = can327_netdev_open,
.ndo_stop = can327_netdev_close,
.ndo_start_xmit = can327_netdev_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops can327_ethtool_ops = {
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
index 9ef1359319f0..aae25c2f849e 100644
--- a/drivers/net/can/cc770/Kconfig
+++ b/drivers/net/can/cc770/Kconfig
@@ -7,6 +7,7 @@ if CAN_CC770
config CAN_CC770_ISA
tristate "ISA Bus based legacy CC770 driver"
+ depends on HAS_IOPORT
help
This driver adds legacy support for CC770 and AN82527 chips
connected to the ISA bus using I/O port, memory mapped or
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 30909f3aab57..8d5abd643c06 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -834,7 +834,6 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_open = cc770_open,
.ndo_stop = cc770_close,
.ndo_start_xmit = cc770_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops cc770_ethtool_ops = {
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 22009440a983..d06762817153 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -307,7 +307,7 @@ static void cc770_isa_remove(struct platform_device *pdev)
static struct platform_driver cc770_isa_driver = {
.probe = cc770_isa_probe,
- .remove_new = cc770_isa_remove,
+ .remove = cc770_isa_remove,
.driver = {
.name = KBUILD_MODNAME,
},
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 13bcfba05f18..b6c4f02ffb97 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -70,17 +70,10 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
static int cc770_get_of_node_data(struct platform_device *pdev,
struct cc770_priv *priv)
{
+ u32 clkext = CC770_PLATFORM_CAN_CLOCK, clkout = 0;
struct device_node *np = pdev->dev.of_node;
- const u32 *prop;
- int prop_size;
- u32 clkext;
-
- prop = of_get_property(np, "bosch,external-clock-frequency",
- &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- clkext = *prop;
- else
- clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+
+ of_property_read_u32(np, "bosch,external-clock-frequency", &clkext);
priv->can.clock.freq = clkext;
/* The system clock may not exceed 10 MHz */
@@ -98,7 +91,7 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
priv->cpu_interface |= CPUIF_MUX;
- if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ if (!of_property_read_bool(np, "bosch,no-comperator-bypass"))
priv->bus_config |= BUSCFG_CBY;
if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
priv->bus_config |= BUSCFG_DR0;
@@ -109,25 +102,22 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (of_property_read_bool(np, "bosch,polarity-dominant"))
priv->bus_config |= BUSCFG_POL;
- prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
- if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
- u32 cdv = clkext / *prop;
- int slew;
+ of_property_read_u32(np, "bosch,clock-out-frequency", &clkout);
+ if (clkout > 0) {
+ u32 cdv = clkext / clkout;
if (cdv > 0 && cdv < 16) {
+ u32 slew;
+
priv->cpu_interface |= CPUIF_CEN;
priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
- prop = of_get_property(np, "bosch,slew-rate",
- &prop_size);
- if (prop && (prop_size == sizeof(u32))) {
- slew = *prop;
- } else {
+ if (of_property_read_u32(np, "bosch,slew-rate", &slew)) {
/* Determine default slew rate */
slew = (CLKOUT_SL_MASK >>
CLKOUT_SL_SHIFT) -
((cdv * clkext - 1) / 8000000);
- if (slew < 0)
+ if (slew > (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT))
slew = 0;
}
priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
@@ -257,7 +247,7 @@ static struct platform_driver cc770_platform_driver = {
.of_match_table = cc770_platform_table,
},
.probe = cc770_platform_probe,
- .remove_new = cc770_platform_remove,
+ .remove = cc770_platform_remove,
};
module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 64c349fd4600..1e6b9e3dc2fe 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -275,7 +275,7 @@ static int ctucan_set_bittiming(struct net_device *ndev)
static int ctucan_set_data_bittiming(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
/* Note that dbt may be modified here */
return ctucan_set_btr(ndev, dbt, false);
@@ -290,7 +290,7 @@ static int ctucan_set_data_bittiming(struct net_device *ndev)
static int ctucan_set_secondary_sample_point(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
int ssp_offset = 0;
u32 ssp_cfg = 0; /* No SSP by default */
@@ -506,11 +506,12 @@ static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
* @buf: TXT Buffer index to which frame is inserted (0-based)
* @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
*
- * Return: True - Frame inserted successfully
- * False - Frame was not inserted due to one of:
- * 1. TXT Buffer is not writable (it is in wrong state)
- * 2. Invalid TXT buffer index
- * 3. Invalid frame length
+ * Return:
+ * * True - Frame inserted successfully
+ * * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
*/
static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
bool isfdf)
@@ -867,10 +868,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
}
break;
case CAN_STATE_ERROR_ACTIVE:
- cf->can_id |= CAN_ERR_CNT;
- cf->data[1] = CAN_ERR_CRTL_ACTIVE;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
break;
default:
netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
@@ -1298,7 +1301,6 @@ static const struct net_device_ops ctucan_netdev_ops = {
.ndo_open = ctucan_open,
.ndo_stop = ctucan_close,
.ndo_start_xmit = ctucan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ctucan_ethtool_ops = {
@@ -1356,12 +1358,12 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
priv->ntxbufs = ntxbufs;
priv->dev = dev;
priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
- priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.fd.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
priv->can.do_set_mode = ctucan_do_set_mode;
/* Needed for timing adjustment to be performed as soon as possible */
priv->can.do_set_bittiming = ctucan_set_bittiming;
- priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = ctucan_set_data_bittiming;
priv->can.do_get_berr_counter = ctucan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
index 55bb10b157b4..70e2577c8541 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_platform.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, ctucan_of_match);
static struct platform_driver ctucanfd_driver = {
.probe = ctucan_platform_probe,
- .remove_new = ctucan_platform_remove,
+ .remove = ctucan_platform_remove,
.driver = {
.name = DRV_NAME,
.pm = &ctucan_platform_pm_ops,
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 0b93900b1dfa..8f82418230ce 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/can/dev.h>
@@ -151,3 +152,65 @@ int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
return -EINVAL;
}
+
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack)
+{
+ const struct can_priv *priv = netdev_priv(dev);
+ u32 xl_bit_time_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 nom_bit_time_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ u32 pwms_ns = can_tqmin_to_ns(pwm->pwms, priv->clock.freq);
+ u32 pwml_ns = can_tqmin_to_ns(pwm->pwml, priv->clock.freq);
+
+ if (pwms_ns + pwml_ns > CAN_PWM_NS_MAX) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "The PWM symbol duration: %u ns may not exceed %u ns",
+ pwms_ns + pwml_ns, CAN_PWM_NS_MAX);
+ return -EINVAL;
+ }
+
+ if (pwms_ns < CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u ns shall be at least %u ns",
+ pwms_ns, CAN_PWM_DECODE_NS);
+ return -EINVAL;
+ }
+
+ if (pwm->pwms >= pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin shall be smaller than PWML: %u tqmin",
+ pwm->pwms, pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (pwml_ns - pwms_ns < 2 * CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "At least %u ns shall separate PWMS: %u ns from PMWL: %u ns",
+ 2 * CAN_PWM_DECODE_NS, pwms_ns, pwml_ns);
+ return -EINVAL;
+ }
+
+ if (xl_bit_time_tqmin % (pwm->pwms + pwm->pwml) != 0) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWM duration: %u tqmin does not divide XL's bit time: %u tqmin",
+ pwm->pwms + pwm->pwml, xl_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ if (pwm->pwmo >= pwm->pwms + pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin can not be greater than PWMS + PWML: %u tqmin",
+ pwm->pwmo, pwm->pwms + pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (nom_bit_time_tqmin % (pwm->pwms + pwm->pwml) != pwm->pwmo) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not assemble nominal bit time: %u tqmin out of PWMS + PMWL and PWMO",
+ nom_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index 3809c148fb88..cc4022241553 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/units.h>
@@ -9,6 +10,33 @@
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+/* CiA recommended sample points for Non Return to Zero encoding. */
+static int can_calc_sample_point_nrz(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ return 750;
+
+ if (bt->bitrate > 500 * KILO /* BPS */)
+ return 800;
+
+ return 875;
+}
+
+/* Sample points for Pulse-Width Modulation encoding. */
+static int can_calc_sample_point_pwm(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 15 * MEGA /* BPS */)
+ return 625;
+
+ if (bt->bitrate > 9 * MEGA /* BPS */)
+ return 600;
+
+ if (bt->bitrate > 4 * MEGA /* BPS */)
+ return 560;
+
+ return 520;
+}
+
/* Bit-timing calculation derived from:
*
* Code based on LinCAN sources and H8S2638 project
@@ -23,7 +51,7 @@
*/
static int
can_update_sample_point(const struct can_bittiming_const *btc,
- const unsigned int sample_point_nominal, const unsigned int tseg,
+ const unsigned int sample_point_reference, const unsigned int tseg,
unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
unsigned int *sample_point_error_ptr)
{
@@ -34,7 +62,7 @@ can_update_sample_point(const struct can_bittiming_const *btc,
for (i = 0; i <= 1; i++) {
tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ (sample_point_reference * (tseg + CAN_SYNC_SEG)) /
1000 - i;
tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
tseg1 = tseg - tseg2;
@@ -45,9 +73,9 @@ can_update_sample_point(const struct can_bittiming_const *btc,
sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
(tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
+ sample_point_error = abs(sample_point_reference - sample_point);
- if (sample_point <= sample_point_nominal &&
+ if (sample_point <= sample_point_reference &&
sample_point_error < best_sample_point_error) {
best_sample_point = sample_point;
best_sample_point_error = sample_point_error;
@@ -67,28 +95,24 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
{
struct can_priv *priv = netdev_priv(dev);
unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int bitrate_error; /* diff between calculated and reference value */
unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int sample_point_error; /* diff between calculated and reference value */
unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int sample_point_reference; /* reference sample point */
unsigned int best_tseg = 0; /* current best value for tseg */
unsigned int best_brp = 0; /* current best value for brp */
unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
int err;
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800 * KILO /* BPS */)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500 * KILO /* BPS */)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
+ if (bt->sample_point)
+ sample_point_reference = bt->sample_point;
+ else if (btc == priv->xl.data_bittiming_const &&
+ (priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ sample_point_reference = can_calc_sample_point_pwm(bt);
+ else
+ sample_point_reference = can_calc_sample_point_nrz(bt);
/* tseg even = round down, odd = round up */
for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
@@ -114,7 +138,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
if (bitrate_error < best_bitrate_error)
best_sample_point_error = UINT_MAX;
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ can_update_sample_point(btc, sample_point_reference, tseg / 2,
&tseg1, &tseg2, &sample_point_error);
if (sample_point_error >= best_sample_point_error)
continue;
@@ -129,23 +153,26 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
}
if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
+ /* Error in one-hundredth of a percent */
+ v64 = (u64)best_bitrate_error * 10000;
do_div(v64, bt->bitrate);
bitrate_error = (u32)v64;
+ /* print at least 0.01% if the error is smaller */
+ bitrate_error = max(bitrate_error, 1U);
if (bitrate_error > CAN_CALC_MAX_ERROR) {
NL_SET_ERR_MSG_FMT(extack,
- "bitrate error: %u.%u%% too high",
- bitrate_error / 10, bitrate_error % 10);
+ "bitrate error: %u.%02u%% too high",
+ bitrate_error / 100,
+ bitrate_error % 100);
return -EINVAL;
}
NL_SET_ERR_MSG_FMT(extack,
- "bitrate error: %u.%u%%",
- bitrate_error / 10, bitrate_error % 10);
+ "bitrate error: %u.%02u%%",
+ bitrate_error / 100, bitrate_error % 100);
}
/* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ bt->sample_point = can_update_sample_point(btc, sample_point_reference,
best_tseg, &tseg1, &tseg2,
NULL);
@@ -173,13 +200,15 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
+ u32 tdc_auto = tdc_mask & CAN_CTRLMODE_TDC_AUTO_MASK;
+
+ if (!tdc_const || !(ctrlmode_supported & tdc_auto))
return;
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ *ctrlmode &= ~tdc_mask;
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
@@ -193,6 +222,41 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
if (sample_point_in_tc < tdc_const->tdco_min)
return;
tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
+ *ctrlmode |= tdc_auto;
}
}
+
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct can_pwm *pwm = &priv->xl.pwm;
+ u32 xl_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 xl_ns = can_tqmin_to_ns(xl_tqmin, priv->clock.freq);
+ u32 nom_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ int pwm_per_bit_max = xl_tqmin / (pwm_const->pwms_min + pwm_const->pwml_min);
+ int pwm_per_bit;
+ u32 pwm_tqmin;
+
+ /* For 5 MB/s databitrate or greater, xl_ns < CAN_PWM_NS_MAX
+ * giving us a pwm_per_bit of 1 and the loop immediately breaks
+ */
+ for (pwm_per_bit = DIV_ROUND_UP(xl_ns, CAN_PWM_NS_MAX);
+ pwm_per_bit <= pwm_per_bit_max; pwm_per_bit++)
+ if (xl_tqmin % pwm_per_bit == 0)
+ break;
+
+ if (pwm_per_bit > pwm_per_bit_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not divide the XL data phase's bit time: %u tqmin into multiple PWM symbols",
+ xl_tqmin);
+ return -EINVAL;
+ }
+
+ pwm_tqmin = xl_tqmin / pwm_per_bit;
+ pwm->pwms = DIV_ROUND_UP_POW2(pwm_tqmin, 4);
+ pwm->pwml = pwm_tqmin - pwm->pwms;
+ pwm->pwmo = nom_tqmin % pwm_tqmin;
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 3a3be5cdfc1f..091f30e94c61 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,17 +4,17 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
#include <linux/gpio/consumer.h>
+#include <linux/if_arp.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
@@ -85,11 +85,52 @@ const char *can_get_state_str(const enum can_state state)
default:
return "<unknown>";
}
-
- return "<unknown>";
}
EXPORT_SYMBOL_GPL(can_get_state_str);
+const char *can_get_ctrlmode_str(u32 ctrlmode)
+{
+ switch (ctrlmode & ~(ctrlmode - 1)) {
+ case 0:
+ return "(none)";
+ case CAN_CTRLMODE_LOOPBACK:
+ return "LOOPBACK";
+ case CAN_CTRLMODE_LISTENONLY:
+ return "LISTEN-ONLY";
+ case CAN_CTRLMODE_3_SAMPLES:
+ return "TRIPLE-SAMPLING";
+ case CAN_CTRLMODE_ONE_SHOT:
+ return "ONE-SHOT";
+ case CAN_CTRLMODE_BERR_REPORTING:
+ return "BERR-REPORTING";
+ case CAN_CTRLMODE_FD:
+ return "FD";
+ case CAN_CTRLMODE_PRESUME_ACK:
+ return "PRESUME-ACK";
+ case CAN_CTRLMODE_FD_NON_ISO:
+ return "FD-NON-ISO";
+ case CAN_CTRLMODE_CC_LEN8_DLC:
+ return "CC-LEN8-DLC";
+ case CAN_CTRLMODE_TDC_AUTO:
+ return "TDC-AUTO";
+ case CAN_CTRLMODE_TDC_MANUAL:
+ return "TDC-MANUAL";
+ case CAN_CTRLMODE_RESTRICTED:
+ return "RESTRICTED";
+ case CAN_CTRLMODE_XL:
+ return "XL";
+ case CAN_CTRLMODE_XL_TDC_AUTO:
+ return "XL-TDC-AUTO";
+ case CAN_CTRLMODE_XL_TDC_MANUAL:
+ return "XL-TDC-MANUAL";
+ case CAN_CTRLMODE_XL_TMS:
+ return "TMS";
+ default:
+ return "<unknown>";
+ }
+}
+EXPORT_SYMBOL_GPL(can_get_ctrlmode_str);
+
static enum can_state can_state_err_to_state(u16 err)
{
if (err < CAN_ERROR_WARNING_THRESHOLD)
@@ -147,13 +188,16 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
EXPORT_SYMBOL_GPL(can_change_state);
/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
+static int can_restart(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
int err;
+ if (!priv->do_set_mode)
+ return -EOPNOTSUPP;
+
if (netif_carrier_ok(dev))
netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
@@ -175,10 +219,14 @@ static void can_restart(struct net_device *dev)
if (err) {
netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err));
netif_carrier_off(dev);
+
+ return err;
} else {
netdev_dbg(dev, "Restarted\n");
priv->can_stats.restarts++;
}
+
+ return 0;
}
static void can_restart_work(struct work_struct *work)
@@ -203,9 +251,8 @@ int can_restart_now(struct net_device *dev)
return -EBUSY;
cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
- return 0;
+ return can_restart(dev);
}
/* CAN bus-off
@@ -236,6 +283,8 @@ void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
@@ -305,87 +354,86 @@ void free_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(free_candev);
-/* changing MTU and control mode for CAN/CANFD devices */
-int can_change_mtu(struct net_device *dev, int new_mtu)
+void can_set_default_mtu(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- u32 ctrlmode_static = can_get_static_ctrlmode(priv);
- /* Do not allow changing the MTU while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
+ if (priv->ctrlmode & CAN_CTRLMODE_XL) {
+ if (can_is_canxl_dev_mtu(dev->mtu))
+ return;
+ dev->mtu = CANXL_MTU;
+ dev->min_mtu = CANXL_MIN_MTU;
+ dev->max_mtu = CANXL_MAX_MTU;
+ } else if (priv->ctrlmode & CAN_CTRLMODE_FD) {
+ dev->mtu = CANFD_MTU;
+ dev->min_mtu = CANFD_MTU;
+ dev->max_mtu = CANFD_MTU;
+ } else {
+ dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
+ }
+}
- /* allow change of MTU according to the CANFD ability of the device */
- switch (new_mtu) {
- case CAN_MTU:
- /* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (ctrlmode_static & CAN_CTRLMODE_FD)
- return -EINVAL;
+/* helper to define static CAN controller features at device creation time */
+int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
- priv->ctrlmode &= ~CAN_CTRLMODE_FD;
- break;
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
+ }
+ priv->ctrlmode = static_mode;
- case CANFD_MTU:
- /* check for potential CANFD ability */
- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(ctrlmode_static & CAN_CTRLMODE_FD))
- return -EINVAL;
+ /* override MTU which was set by default in can_setup()? */
+ can_set_default_mtu(dev);
- priv->ctrlmode |= CAN_CTRLMODE_FD;
- break;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_set_static_ctrlmode);
- default:
- return -EINVAL;
- }
+/* generic implementation of netdev_ops::ndo_hwtstamp_get for CAN devices
+ * supporting hardware timestamps
+ */
+int can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ cfg->tx_type = HWTSTAMP_TX_ON;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
- dev->mtu = new_mtu;
return 0;
}
-EXPORT_SYMBOL_GPL(can_change_mtu);
+EXPORT_SYMBOL(can_hwtstamp_get);
-/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
+/* generic implementation of netdev_ops::ndo_hwtstamp_set for CAN devices
* supporting hardware timestamps
*/
-int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config hwts_cfg = { 0 };
-
- switch (cmd) {
- case SIOCSHWTSTAMP: /* set */
- if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
- return -EFAULT;
- if (hwts_cfg.tx_type == HWTSTAMP_TX_ON &&
- hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
- return 0;
- return -ERANGE;
-
- case SIOCGHWTSTAMP: /* get */
- hwts_cfg.tx_type = HWTSTAMP_TX_ON;
- hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
- if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
- return -EFAULT;
+ if (cfg->tx_type == HWTSTAMP_TX_ON &&
+ cfg->rx_filter == HWTSTAMP_FILTER_ALL)
return 0;
-
- default:
- return -EOPNOTSUPP;
- }
+ NL_SET_ERR_MSG_MOD(extack, "Only TX on and RX all packets filter supported");
+ return -ERANGE;
}
-EXPORT_SYMBOL(can_eth_ioctl_hwts);
+EXPORT_SYMBOL(can_hwtstamp_set);
/* generic implementation of ethtool_ops::get_ts_info for CAN devices
* supporting hardware timestamps
*/
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
@@ -409,8 +457,8 @@ int open_candev(struct net_device *dev)
/* For CAN FD the data bitrate has to be >= the arbitration bitrate */
if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ (!priv->fd.data_bittiming.bitrate ||
+ priv->fd.data_bittiming.bitrate < priv->bittiming.bitrate)) {
netdev_err(dev, "incorrect/missing data bit-timing\n");
return -EINVAL;
}
@@ -471,7 +519,7 @@ static int can_set_termination(struct net_device *ndev, u16 term)
else
set = 0;
- gpiod_set_value(priv->termination_gpio, set);
+ gpiod_set_value_cansleep(priv->termination_gpio, set);
return 0;
}
@@ -548,16 +596,16 @@ int register_candev(struct net_device *dev)
if (!priv->bitrate_const != !priv->bitrate_const_cnt)
return -EINVAL;
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ if (!priv->fd.data_bitrate_const != !priv->fd.data_bitrate_const_cnt)
return -EINVAL;
/* We only support either fixed bit rates or bit timing const. */
- if ((priv->bitrate_const || priv->data_bitrate_const) &&
- (priv->bittiming_const || priv->data_bittiming_const))
+ if ((priv->bitrate_const || priv->fd.data_bitrate_const) &&
+ (priv->bittiming_const || priv->fd.data_bittiming_const))
return -EINVAL;
if (!can_bittiming_const_valid(priv->bittiming_const) ||
- !can_bittiming_const_valid(priv->data_bittiming_const))
+ !can_bittiming_const_valid(priv->fd.data_bittiming_const))
return -EINVAL;
if (!priv->termination_const) {
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index dfdc039d92a6..d6b0e686fb11 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -2,7 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/can/dev.h>
@@ -18,10 +18,14 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
[IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
[IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_XL_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_XL_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_PWM] = { .type = NLA_NESTED },
};
static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
@@ -36,116 +40,360 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
-static int can_validate_bittiming(const struct can_bittiming *bt,
- struct netlink_ext_ack *extack)
+static const struct nla_policy can_pwm_policy[IFLA_CAN_PWM_MAX + 1] = {
+ [IFLA_CAN_PWM_PWMS_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO] = { .type = NLA_U32 },
+};
+
+static int can_validate_bittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_bittiming)
{
+ struct can_bittiming *bt;
+
+ if (!data[ifla_can_bittiming])
+ return 0;
+
+ static_assert(__alignof__(*bt) <= NLA_ALIGNTO);
+ bt = nla_data(data[ifla_can_bittiming]);
+
/* sample point is in one-tenth of a percent */
if (bt->sample_point >= 1000) {
NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
-
return -EINVAL;
}
return 0;
}
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+static int can_validate_tdc(struct nlattr *data_tdc,
+ struct netlink_ext_ack *extack, u32 tdc_flags)
{
- bool is_can_fd = false;
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
+ bool tdc_auto = tdc_flags & CAN_CTRLMODE_TDC_AUTO_MASK;
int err;
- /* Make sure that valid CAN FD configurations always consist of
- * - nominal/arbitration bittiming
- * - data bittiming
- * - control mode with CAN_CTRLMODE_FD set
- * - TDC parameters are coherent (details below)
- */
+ if (tdc_auto && tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual and auto modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
- if (!data)
- return 0;
+ /* If one of the CAN_CTRLMODE_{,XL}_TDC_* flags is set then TDC
+ * must be set and vice-versa
+ */
+ if ((tdc_auto || tdc_manual) && !data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC parameters are missing");
+ return -EOPNOTSUPP;
+ }
+ if (!(tdc_auto || tdc_manual) && data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC mode (auto or manual) is missing");
+ return -EOPNOTSUPP;
+ }
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
+ /* If providing TDC parameters, at least TDCO is needed. TDCV is
+ * needed if and only if CAN_CTRLMODE_{,XL}_TDC_MANUAL is set
+ */
+ if (data_tdc) {
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
+ data_tdc, can_tdc_policy, extack);
if (err)
return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ if (tdc_auto) {
+ NL_SET_ERR_MSG(extack,
+ "TDCV is incompatible with TDC auto mode");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual mode requires TDCV");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ NL_SET_ERR_MSG(extack, "TDCO is missing");
+ return -EOPNOTSUPP;
+ }
}
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
+ return 0;
+}
+
+static int can_validate_pwm(struct nlattr *data[],
+ struct netlink_ext_ack *extack, u32 flags)
+{
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ int err;
+
+ if (!data[IFLA_CAN_XL_PWM])
+ return 0;
+
+ if (!(flags & CAN_CTRLMODE_XL_TMS)) {
+ NL_SET_ERR_MSG(extack, "PWM requires TMS");
+ return -EOPNOTSUPP;
+ }
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, data[IFLA_CAN_XL_PWM],
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb_pwm[IFLA_CAN_PWM_PWMS] != !tb_pwm[IFLA_CAN_PWM_PWML]) {
+ NL_SET_ERR_MSG(extack,
+ "Provide either both PWMS and PWML, or none for automatic calculation");
+ return -EOPNOTSUPP;
+ }
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
- if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
+ if (tb_pwm[IFLA_CAN_PWM_PWMO] &&
+ (!tb_pwm[IFLA_CAN_PWM_PWMS] || !tb_pwm[IFLA_CAN_PWM_PWML])) {
+ NL_SET_ERR_MSG(extack, "PWMO requires both PWMS and PWML");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int can_validate_databittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_data_bittiming, u32 flags)
+{
+ struct nlattr *data_tdc;
+ const char *type;
+ u32 tdc_flags;
+ bool is_on;
+ int err;
+
+ /* Make sure that valid CAN FD/XL configurations always consist of
+ * - nominal/arbitration bittiming
+ * - data bittiming
+ * - control mode with CAN_CTRLMODE_{FD,XL} set
+ * - TDC parameters are coherent (details in can_validate_tdc())
+ */
+
+ if (ifla_can_data_bittiming == IFLA_CAN_DATA_BITTIMING) {
+ data_tdc = data[IFLA_CAN_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_FD_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_FD;
+ type = "FD";
+ } else {
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_XL_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_XL;
+ type = "XL";
+ }
+
+ if (is_on) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Provide both nominal and %s data bittiming",
+ type);
return -EOPNOTSUPP;
- /* If one of the CAN_CTRLMODE_TDC_* flag is set then
- * TDC must be set and vice-versa
- */
- if (!!tdc_flags != !!data[IFLA_CAN_TDC])
+ }
+ } else {
+ if (data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s data bittiming requires CAN %s",
+ type, type);
return -EOPNOTSUPP;
- /* If providing TDC parameters, at least TDCO is
- * needed. TDCV is needed if and only if
- * CAN_CTRLMODE_TDC_MANUAL is set
- */
- if (data[IFLA_CAN_TDC]) {
- struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ }
+ if (data_tdc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s TDC requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ }
- err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
- data[IFLA_CAN_TDC],
- can_tdc_policy, extack);
- if (err)
- return err;
+ err = can_validate_bittiming(data, extack, ifla_can_data_bittiming);
+ if (err)
+ return err;
- if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
- if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
- return -EOPNOTSUPP;
- } else {
- if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
- return -EOPNOTSUPP;
- }
+ err = can_validate_tdc(data_tdc, extack, tdc_flags);
+ if (err)
+ return err;
+
+ return 0;
+}
- if (!tb_tdc[IFLA_CAN_TDC_TDCO])
+static int can_validate_xl_flags(struct netlink_ext_ack *extack,
+ u32 masked_flags, u32 mask)
+{
+ if (masked_flags & CAN_CTRLMODE_XL) {
+ if (masked_flags & CAN_CTRLMODE_XL_TMS) {
+ const u32 tms_conflicts_mask = CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_XL_TDC_MASK;
+ u32 tms_conflicts = masked_flags & tms_conflicts_mask;
+
+ if (tms_conflicts) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "TMS and %s are mutually exclusive",
+ can_get_ctrlmode_str(tms_conflicts));
return -EOPNOTSUPP;
+ }
+ }
+ } else {
+ if (mask & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack, "TMS requires CAN XL");
+ return -EOPNOTSUPP;
}
}
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+ return 0;
+}
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u32 flags = 0;
+ int err;
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ flags = cm->flags & cm->mask;
+
+ if ((flags & CAN_CTRLMODE_LISTENONLY) &&
+ (flags & CAN_CTRLMODE_RESTRICTED)) {
+ NL_SET_ERR_MSG(extack,
+ "LISTEN-ONLY and RESTRICTED modes are mutually exclusive");
return -EOPNOTSUPP;
+ }
+
+ err = can_validate_xl_flags(extack, flags, cm->mask);
+ if (err)
+ return err;
}
- if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
- if (!is_can_fd)
- return -EOPNOTSUPP;
+ err = can_validate_bittiming(data, extack, IFLA_CAN_BITTIMING);
+ if (err)
+ return err;
+
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_XL_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_pwm(data, extack, flags);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int can_ctrlmode_changelink(struct net_device *dev,
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic, maskedflags, deactivated, notsupp, ctrlstatic_missing;
+
+ if (!data[IFLA_CAN_CTRLMODE])
+ return 0;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = can_get_static_ctrlmode(priv);
+ maskedflags = cm->flags & cm->mask;
+ deactivated = ~cm->flags & cm->mask;
+ notsupp = maskedflags & ~(priv->ctrlmode_supported | ctrlstatic);
+ ctrlstatic_missing = (maskedflags & ctrlstatic) ^ ctrlstatic;
+
+ if (notsupp) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "requested control mode %s not supported",
+ can_get_ctrlmode_str(notsupp));
+ return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming bt;
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
- memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
- if (err)
- return err;
+ if (ctrlstatic_missing) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "missing required %s static control mode",
+ can_get_ctrlmode_str(ctrlstatic_missing));
+ return -EOPNOTSUPP;
+ }
+
+ /* If FD was active and is not turned off, check for XL conflicts */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD & ~deactivated) {
+ if (maskedflags & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack,
+ "TMS can not be activated while CAN FD is on");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* If a top dependency flag is provided, reset all its dependencies */
+ if (cm->mask & CAN_CTRLMODE_FD)
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ if (cm->mask & CAN_CTRLMODE_XL)
+ priv->ctrlmode &= ~(CAN_CTRLMODE_XL_TDC_MASK |
+ CAN_CTRLMODE_XL_TMS);
+
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* Wipe potential leftovers from previous CAN FD/XL config */
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD)) {
+ memset(&priv->fd.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL)) {
+ memset(&priv->xl.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_XL_TDC_MASK;
+ memset(&priv->xl.tdc, 0, sizeof(priv->xl.tdc));
+ memset(&priv->xl.pwm, 0, sizeof(priv->xl.pwm));
+ }
+
+ can_set_default_mtu(dev);
return 0;
}
-static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
+static int can_tdc_changelink(struct data_bittiming_params *dbt_params,
+ const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
struct can_tdc tdc = { 0 };
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ const struct can_tdc_const *tdc_const = dbt_params->tdc_const;
int err;
- if (!tdc_const || !can_tdc_is_enabled(priv))
+ if (!tdc_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support TDC");
return -EOPNOTSUPP;
+ }
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
can_tdc_policy, extack);
@@ -179,8 +427,166 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
tdc.tdcf = tdcf;
}
- priv->tdc = tdc;
+ dbt_params->tdc = tdc;
+
+ return 0;
+}
+
+static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
+ bool fd, struct netlink_ext_ack *extack)
+{
+ struct nlattr *data_bittiming, *data_tdc;
+ struct can_priv *priv = netdev_priv(dev);
+ struct data_bittiming_params *dbt_params;
+ struct can_bittiming dbt;
+ bool need_tdc_calc = false;
+ u32 tdc_mask;
+ int err;
+
+ if (fd) {
+ data_bittiming = data[IFLA_CAN_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_TDC];
+ dbt_params = &priv->fd;
+ tdc_mask = CAN_CTRLMODE_FD_TDC_MASK;
+ } else {
+ data_bittiming = data[IFLA_CAN_XL_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ dbt_params = &priv->xl;
+ tdc_mask = CAN_CTRLMODE_XL_TDC_MASK;
+ }
+
+ if (!data_bittiming)
+ return 0;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on data_bittiming_const
+ * if set, otherwise pass bitrate directly via do_set_bitrate().
+ * Bail out if neither is given.
+ */
+ if (!dbt_params->data_bittiming_const && !dbt_params->do_set_data_bittiming &&
+ !dbt_params->data_bitrate_const)
+ return -EOPNOTSUPP;
+
+ memcpy(&dbt, nla_data(data_bittiming), sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt, dbt_params->data_bittiming_const,
+ dbt_params->data_bitrate_const,
+ dbt_params->data_bitrate_const_cnt, extack);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "CAN data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memset(&dbt_params->tdc, 0, sizeof(dbt_params->tdc));
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ if (fd || !(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ need_tdc_calc = !(cm->mask & tdc_mask);
+ }
+ if (data_tdc) {
+ /* TDC parameters are provided: use them */
+ err = can_tdc_changelink(dbt_params, data_tdc, extack);
+ if (err) {
+ priv->ctrlmode &= ~tdc_mask;
+ return err;
+ }
+ } else if (need_tdc_calc) {
+ /* Neither of TDC parameters nor TDC flags are provided:
+ * do calculation
+ */
+ can_calc_tdco(&dbt_params->tdc, dbt_params->tdc_const, &dbt,
+ tdc_mask, &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_{,XL}_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+ */
+
+ memcpy(&dbt_params->data_bittiming, &dbt, sizeof(dbt));
+
+ if (dbt_params->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = dbt_params->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int can_pwm_changelink(struct net_device *dev,
+ const struct nlattr *pwm_nla,
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ struct can_pwm pwm = { 0 };
+ int err;
+
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ return 0;
+
+ if (!pwm_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support PWM");
+ return -EOPNOTSUPP;
+ }
+
+ if (!pwm_nla)
+ return can_calc_pwm(dev, extack);
+
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, pwm_nla,
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMS]) {
+ pwm.pwms = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMS]);
+ if (pwm.pwms < pwm_const->pwms_min ||
+ pwm.pwms > pwm_const->pwms_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin is out of range: %u...%u",
+ pwm.pwms, pwm_const->pwms_min,
+ pwm_const->pwms_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWML]) {
+ pwm.pwml = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWML]);
+ if (pwm.pwml < pwm_const->pwml_min ||
+ pwm.pwml > pwm_const->pwml_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWML: %u tqmin is out of range: %u...%u",
+ pwm.pwml, pwm_const->pwml_min,
+ pwm_const->pwml_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO]) {
+ pwm.pwmo = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMO]);
+ if (pwm.pwmo < pwm_const->pwmo_min ||
+ pwm.pwmo > pwm_const->pwmo_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin is out of range: %u...%u",
+ pwm.pwmo, pwm_const->pwmo_min,
+ pwm_const->pwmo_max);
+ return -EINVAL;
+ }
+ }
+ err = can_validate_pwm_bittiming(dev, &pwm, extack);
+ if (err)
+ return err;
+
+ priv->xl.pwm = pwm;
return 0;
}
@@ -189,12 +595,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
- u32 tdc_mask = 0;
int err;
/* We need synchronization with dev->stop() */
ASSERT_RTNL();
+ can_ctrlmode_changelink(dev, data, extack);
+
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
@@ -237,61 +644,28 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
}
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
-
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = can_get_static_ctrlmode(priv);
- maskedflags = cm->flags & cm->mask;
-
- /* check whether provided bits are allowed to be passed */
- if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
-
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+ if (data[IFLA_CAN_RESTART_MS]) {
+ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
+ if (restart_ms != 0 && !priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
return -EOPNOTSUPP;
-
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
-
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD) {
- dev->mtu = CANFD_MTU;
- } else {
- dev->mtu = CAN_MTU;
- memset(&priv->data_bittiming, 0,
- sizeof(priv->data_bittiming));
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
- memset(&priv->tdc, 0, sizeof(priv->tdc));
}
- tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
- * exclusive: make sure to turn the other one off
- */
- if (tdc_mask)
- priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
- }
-
- if (data[IFLA_CAN_RESTART_MS]) {
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ priv->restart_ms = restart_ms;
}
if (data[IFLA_CAN_RESTART]) {
+ if (!priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow a restart while not running */
if (!(dev->flags & IFF_UP))
return -EINVAL;
@@ -300,75 +674,29 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
- !priv->data_bitrate_const)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt,
- extack);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- NL_SET_ERR_MSG_FMT(extack,
- "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
- dbt.bitrate, priv->bitrate_max);
- return -EINVAL;
- }
+ /* CAN FD */
+ err = can_dbt_changelink(dev, data, true, extack);
+ if (err)
+ return err;
- memset(&priv->tdc, 0, sizeof(priv->tdc));
- if (data[IFLA_CAN_TDC]) {
- /* TDC parameters are provided: use them */
- err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
- extack);
- if (err) {
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
- return err;
- }
- } else if (!tdc_mask) {
- /* Neither of TDC parameters nor TDC flags are
- * provided: do calculation
- */
- can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
- &priv->ctrlmode, priv->ctrlmode_supported);
- } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
- * turned off. TDC is disabled: do nothing
- */
-
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
+ /* CAN XL */
+ err = can_dbt_changelink(dev, data, false, extack);
+ if (err)
+ return err;
+ err = can_pwm_changelink(dev, data[IFLA_CAN_XL_PWM], extack);
+ if (err)
+ return err;
if (data[IFLA_CAN_TERMINATION]) {
const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
const unsigned int num_term = priv->termination_const_cnt;
unsigned int i;
- if (!priv->do_set_termination)
+ if (!priv->do_set_termination) {
+ NL_SET_ERR_MSG(extack,
+ "Termination is not configurable on this device");
return -EOPNOTSUPP;
+ }
/* check whether given value is supported by the interface */
for (i = 0; i < num_term; i++) {
@@ -389,44 +717,85 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return 0;
}
-static size_t can_tdc_get_size(const struct net_device *dev)
+static size_t can_tdc_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
{
- struct can_priv *priv = netdev_priv(dev);
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
size_t size;
- if (!priv->tdc_const)
+ if (!dbt_params->tdc_const)
return 0;
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
}
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
- if (priv->tdc_const->tdcf_max) {
+ if (dbt_params->tdc_const->tdcf_max) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
- if (can_tdc_is_enabled(priv)) {
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
- priv->do_get_auto_tdcv)
+ if (tdc_flags) {
+ if (tdc_manual || dbt_params->do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
- if (priv->tdc_const->tdcf_max)
+ if (dbt_params->tdc_const->tdcf_max)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
}
return size;
}
+static size_t can_data_bittiming_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
+{
+ size_t size = 0;
+
+ if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_{,XL}_DATA_BITTIMING */
+ size += nla_total_size(sizeof(dbt_params->data_bittiming));
+ if (dbt_params->data_bittiming_const) /* IFLA_CAN_{,XL}_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bittiming_const));
+ if (dbt_params->data_bitrate_const) /* IFLA_CAN_{,XL}_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bitrate_const) *
+ dbt_params->data_bitrate_const_cnt);
+ size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_{,XL}_TDC */
+
+ return size;
+}
+
static size_t can_ctrlmode_ext_get_size(void)
{
return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
}
+static size_t can_pwm_get_size(const struct can_pwm_const *pwm_const,
+ bool pwm_on)
+{
+ size_t size;
+
+ if (!pwm_const || !pwm_on)
+ return 0;
+
+ size = nla_total_size(0); /* nest IFLA_CAN_PWM */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MAX */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO */
+
+ return size;
+}
+
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -442,10 +811,6 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
if (priv->termination_const) {
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
@@ -454,31 +819,76 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
- size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
+ size += can_data_bittiming_get_size(&priv->fd,
+ priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
+
+ size += can_data_bittiming_get_size(&priv->xl,
+ priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
+ size += can_pwm_get_size(priv->xl.pwm_const, /* IFLA_CAN_XL_PWM */
+ priv->ctrlmode & CAN_CTRLMODE_XL_TMS);
+
return size;
}
-static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
+static int can_bittiming_fill_info(struct sk_buff *skb, int ifla_can_bittiming,
+ struct can_bittiming *bittiming)
+{
+ return bittiming->bitrate != CAN_BITRATE_UNSET &&
+ bittiming->bitrate != CAN_BITRATE_UNKNOWN &&
+ nla_put(skb, ifla_can_bittiming, sizeof(*bittiming), bittiming);
+}
+
+static int can_bittiming_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bittiming_const,
+ const struct can_bittiming_const *bittiming_const)
+{
+ return bittiming_const &&
+ nla_put(skb, ifla_can_bittiming_const,
+ sizeof(*bittiming_const), bittiming_const);
+}
+
+static int can_bitrate_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bitrate_const,
+ const u32 *bitrate_const, unsigned int cnt)
+{
+ return bitrate_const &&
+ nla_put(skb, ifla_can_bitrate_const,
+ sizeof(*bitrate_const) * cnt, bitrate_const);
+}
+
+static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev,
+ int ifla_can_tdc)
{
- struct nlattr *nest;
struct can_priv *priv = netdev_priv(dev);
- struct can_tdc *tdc = &priv->tdc;
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ struct data_bittiming_params *dbt_params;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc *tdc;
+ struct nlattr *nest;
+ bool tdc_is_enabled, tdc_manual;
+
+ if (ifla_can_tdc == IFLA_CAN_TDC) {
+ dbt_params = &priv->fd;
+ tdc_is_enabled = can_fd_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL;
+ } else {
+ dbt_params = &priv->xl;
+ tdc_is_enabled = can_xl_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MANUAL;
+ }
+ tdc_const = dbt_params->tdc_const;
+ tdc = &dbt_params->tdc;
if (!tdc_const)
return 0;
- nest = nla_nest_start(skb, IFLA_CAN_TDC);
+ nest = nla_nest_start(skb, ifla_can_tdc);
if (!nest)
return -EMSGSIZE;
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
+ if (tdc_manual &&
(nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
goto err_cancel;
@@ -490,15 +900,15 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
goto err_cancel;
- if (can_tdc_is_enabled(priv)) {
+ if (tdc_is_enabled) {
u32 tdcv;
int err = -EINVAL;
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
tdcv = tdc->tdcv;
err = 0;
- } else if (priv->do_get_auto_tdcv) {
- err = priv->do_get_auto_tdcv(dev, &tdcv);
+ } else if (dbt_params->do_get_auto_tdcv) {
+ err = dbt_params->do_get_auto_tdcv(dev, &tdcv);
}
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
goto err_cancel;
@@ -517,6 +927,42 @@ err_cancel:
return -EMSGSIZE;
}
+static int can_pwm_fill_info(struct sk_buff *skb, const struct can_priv *priv)
+{
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ const struct can_pwm *pwm = &priv->xl.pwm;
+ struct nlattr *nest;
+
+ if (!pwm_const)
+ return 0;
+
+ nest = nla_nest_start(skb, IFLA_CAN_XL_PWM);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MIN, pwm_const->pwms_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MAX, pwm_const->pwms_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MIN, pwm_const->pwml_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MAX, pwm_const->pwml_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MIN, pwm_const->pwmo_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MAX, pwm_const->pwmo_max))
+ goto err_cancel;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS, pwm->pwms) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML, pwm->pwml) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO, pwm->pwmo))
+ goto err_cancel;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
const struct can_priv *priv)
{
@@ -546,14 +992,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
- priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
+ if (can_bittiming_fill_info(skb, IFLA_CAN_BITTIMING,
+ &priv->bittiming) ||
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_BITTIMING_CONST,
+ priv->bittiming_const) ||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
@@ -564,14 +1007,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
!priv->do_get_berr_counter(dev, &bec) &&
nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+ can_bittiming_fill_info(skb, IFLA_CAN_DATA_BITTIMING,
+ &priv->fd.data_bittiming) ||
- (priv->data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ priv->fd.data_bittiming_const) ||
(priv->termination_const &&
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
@@ -580,27 +1020,36 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
priv->termination_const_cnt,
priv->termination_const))) ||
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_BITRATE_CONST,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt) ||
- (priv->data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ priv->fd.data_bitrate_const,
+ priv->fd.data_bitrate_const_cnt) ||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
&priv->bitrate_max)) ||
- can_tdc_fill_info(skb, dev) ||
+ can_tdc_fill_info(skb, dev, IFLA_CAN_TDC) ||
- can_ctrlmode_ext_fill_info(skb, priv)
- )
+ can_ctrlmode_ext_fill_info(skb, priv) ||
+ can_bittiming_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING,
+ &priv->xl.data_bittiming) ||
+
+ can_bittiming_const_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING_CONST,
+ priv->xl.data_bittiming_const) ||
+
+ can_bitrate_const_fill_info(skb, IFLA_CAN_XL_DATA_BITRATE_CONST,
+ priv->xl.data_bitrate_const,
+ priv->xl.data_bitrate_const_cnt) ||
+
+ can_tdc_fill_info(skb, dev, IFLA_CAN_XL_TDC) ||
+
+ can_pwm_fill_info(skb, priv)
+ )
return -EMSGSIZE;
return 0;
@@ -624,8 +1073,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int can_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/can/dummy_can.c b/drivers/net/can/dummy_can.c
new file mode 100644
index 000000000000..41953655e3d3
--- /dev/null
+++ b/drivers/net/can/dummy_can.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org> */
+
+#include <linux/array_size.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+#include <linux/string_choices.h>
+
+#include <linux/can.h>
+#include <linux/can/bittiming.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+
+struct dummy_can {
+ struct can_priv can;
+ struct net_device *dev;
+};
+
+static struct dummy_can *dummy_can;
+
+static const struct can_bittiming_const dummy_can_bittiming_const = {
+ .name = "dummy_can CC",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_bittiming_const dummy_can_fd_databittiming_const = {
+ .name = "dummy_can FD",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_tdc_const dummy_can_fd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 127,
+ .tdcf_min = 0,
+ .tdcf_max = 127
+};
+
+static const struct can_bittiming_const dummy_can_xl_databittiming_const = {
+ .name = "dummy_can XL",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_tdc_const dummy_can_xl_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 127,
+ .tdcf_min = 0,
+ .tdcf_max = 127
+};
+
+static const struct can_pwm_const dummy_can_pwm_const = {
+ .pwms_min = 1,
+ .pwms_max = 8,
+ .pwml_min = 2,
+ .pwml_max = 24,
+ .pwmo_min = 0,
+ .pwmo_max = 16,
+};
+
+static void dummy_can_print_bittiming(struct net_device *dev,
+ struct can_bittiming *bt)
+{
+ netdev_dbg(dev, "\tbitrate: %u\n", bt->bitrate);
+ netdev_dbg(dev, "\tsample_point: %u\n", bt->sample_point);
+ netdev_dbg(dev, "\ttq: %u\n", bt->tq);
+ netdev_dbg(dev, "\tprop_seg: %u\n", bt->prop_seg);
+ netdev_dbg(dev, "\tphase_seg1: %u\n", bt->phase_seg1);
+ netdev_dbg(dev, "\tphase_seg2: %u\n", bt->phase_seg2);
+ netdev_dbg(dev, "\tsjw: %u\n", bt->sjw);
+ netdev_dbg(dev, "\tbrp: %u\n", bt->brp);
+}
+
+static void dummy_can_print_tdc(struct net_device *dev, struct can_tdc *tdc)
+{
+ netdev_dbg(dev, "\t\ttdcv: %u\n", tdc->tdcv);
+ netdev_dbg(dev, "\t\ttdco: %u\n", tdc->tdco);
+ netdev_dbg(dev, "\t\ttdcf: %u\n", tdc->tdcf);
+}
+
+static void dummy_can_print_pwm(struct net_device *dev, struct can_pwm *pwm,
+ struct can_bittiming *dbt)
+{
+ netdev_dbg(dev, "\t\tpwms: %u\n", pwm->pwms);
+ netdev_dbg(dev, "\t\tpwml: %u\n", pwm->pwml);
+ netdev_dbg(dev, "\t\tpwmo: %u\n", pwm->pwmo);
+}
+
+static void dummy_can_print_ctrlmode(struct net_device *dev)
+{
+ struct dummy_can *priv = netdev_priv(dev);
+ struct can_priv *can_priv = &priv->can;
+ unsigned long supported = can_priv->ctrlmode_supported;
+ u32 enabled = can_priv->ctrlmode;
+
+ netdev_dbg(dev, "Control modes:\n");
+ netdev_dbg(dev, "\tsupported: 0x%08x\n", (u32)supported);
+ netdev_dbg(dev, "\tenabled: 0x%08x\n", enabled);
+
+ if (supported) {
+ int idx;
+
+ netdev_dbg(dev, "\tlist:");
+ for_each_set_bit(idx, &supported, BITS_PER_TYPE(u32))
+ netdev_dbg(dev, "\t\t%s: %s\n",
+ can_get_ctrlmode_str(BIT(idx)),
+ enabled & BIT(idx) ? "on" : "off");
+ }
+}
+
+static void dummy_can_print_bittiming_info(struct net_device *dev)
+{
+ struct dummy_can *priv = netdev_priv(dev);
+ struct can_priv *can_priv = &priv->can;
+
+ netdev_dbg(dev, "Clock frequency: %u\n", can_priv->clock.freq);
+ netdev_dbg(dev, "Maximum bitrate: %u\n", can_priv->bitrate_max);
+ netdev_dbg(dev, "MTU: %u\n", dev->mtu);
+ netdev_dbg(dev, "\n");
+
+ dummy_can_print_ctrlmode(dev);
+ netdev_dbg(dev, "\n");
+
+ netdev_dbg(dev, "Classical CAN nominal bittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->bittiming);
+ netdev_dbg(dev, "\n");
+
+ if (can_priv->ctrlmode & CAN_CTRLMODE_FD) {
+ netdev_dbg(dev, "CAN FD databittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->fd.data_bittiming);
+ if (can_fd_tdc_is_enabled(can_priv)) {
+ netdev_dbg(dev, "\tCAN FD TDC:\n");
+ dummy_can_print_tdc(dev, &can_priv->fd.tdc);
+ }
+ }
+ netdev_dbg(dev, "\n");
+
+ if (can_priv->ctrlmode & CAN_CTRLMODE_XL) {
+ netdev_dbg(dev, "CAN XL databittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->xl.data_bittiming);
+ if (can_xl_tdc_is_enabled(can_priv)) {
+ netdev_dbg(dev, "\tCAN XL TDC:\n");
+ dummy_can_print_tdc(dev, &can_priv->xl.tdc);
+ }
+ if (can_priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ netdev_dbg(dev, "\tCAN XL PWM:\n");
+ dummy_can_print_pwm(dev, &can_priv->xl.pwm,
+ &can_priv->xl.data_bittiming);
+ }
+ }
+ netdev_dbg(dev, "\n");
+}
+
+static int dummy_can_netdev_open(struct net_device *dev)
+{
+ int ret;
+ struct can_priv *priv = netdev_priv(dev);
+
+ dummy_can_print_bittiming_info(dev);
+ netdev_dbg(dev, "error-signalling is %s\n",
+ str_enabled_disabled(!can_dev_in_xl_only_mode(priv)));
+
+ ret = open_candev(dev);
+ if (ret)
+ return ret;
+ netif_start_queue(dev);
+ netdev_dbg(dev, "dummy-can is up\n");
+
+ return 0;
+}
+
+static int dummy_can_netdev_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ close_candev(dev);
+ netdev_dbg(dev, "dummy-can is down\n");
+
+ return 0;
+}
+
+static netdev_tx_t dummy_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ can_put_echo_skb(skb, dev, 0, 0);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += can_get_echo_skb(dev, 0, NULL);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops dummy_can_netdev_ops = {
+ .ndo_open = dummy_can_netdev_open,
+ .ndo_stop = dummy_can_netdev_close,
+ .ndo_start_xmit = dummy_can_start_xmit,
+};
+
+static const struct ethtool_ops dummy_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int __init dummy_can_init(void)
+{
+ struct net_device *dev;
+ struct dummy_can *priv;
+ int ret;
+
+ dev = alloc_candev(sizeof(*priv), 1);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->netdev_ops = &dummy_can_netdev_ops;
+ dev->ethtool_ops = &dummy_can_ethtool_ops;
+ priv = netdev_priv(dev);
+ priv->can.bittiming_const = &dummy_can_bittiming_const;
+ priv->can.bitrate_max = 20 * MEGA /* BPS */;
+ priv->can.clock.freq = 160 * MEGA /* Hz */;
+ priv->can.fd.data_bittiming_const = &dummy_can_fd_databittiming_const;
+ priv->can.fd.tdc_const = &dummy_can_fd_tdc_const;
+ priv->can.xl.data_bittiming_const = &dummy_can_xl_databittiming_const;
+ priv->can.xl.tdc_const = &dummy_can_xl_tdc_const;
+ priv->can.xl.pwm_const = &dummy_can_pwm_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_RESTRICTED | CAN_CTRLMODE_XL |
+ CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TMS;
+ priv->dev = dev;
+
+ ret = register_candev(priv->dev);
+ if (ret) {
+ free_candev(priv->dev);
+ return ret;
+ }
+
+ dummy_can = priv;
+ netdev_dbg(dev, "dummy-can ready\n");
+
+ return 0;
+}
+
+static void __exit dummy_can_exit(void)
+{
+ struct net_device *dev = dummy_can->dev;
+
+ netdev_dbg(dev, "dummy-can bye bye\n");
+ unregister_candev(dev);
+ free_candev(dev);
+}
+
+module_init(dummy_can_init);
+module_exit(dummy_can_exit);
+
+MODULE_DESCRIPTION("A dummy CAN driver, mainly to test the netlink interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vincent Mailhol <mailhol@kernel.org>");
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
index b7cdcffd0e45..c826f00c551b 100644
--- a/drivers/net/can/esd/esd_402_pci-core.c
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -86,8 +86,8 @@ static const struct net_device_ops pci402_acc_netdev_ops = {
.ndo_open = acc_open,
.ndo_stop = acc_close,
.ndo_start_xmit = acc_start_xmit,
- .ndo_change_mtu = can_change_mtu,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static const struct ethtool_ops pci402_acc_ethtool_ops = {
@@ -369,12 +369,13 @@ static int pci402_init_cores(struct pci_dev *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
priv = netdev_priv(netdev);
+ priv->can.clock.freq = card->ov.core_frequency;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_CC_LEN8_DLC;
-
- priv->can.clock.freq = card->ov.core_frequency;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_DAR)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
priv->can.bittiming_const = &pci402_bittiming_const_canfd;
else
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index 121cbbf81458..73e66f9a3781 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -17,6 +17,9 @@
/* esdACC DLC register layout */
#define ACC_DLC_DLC_MASK GENMASK(3, 0)
#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */
+
+/* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */
#define ACC_DLC_TXD_FLAG BIT(5)
/* ecc value of esdACC equals SJA1000's ECC register */
@@ -43,8 +46,8 @@
static void acc_resetmode_enter(struct acc_core *core)
{
- acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+ acc_set_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
@@ -52,14 +55,14 @@ static void acc_resetmode_enter(struct acc_core *core)
static void acc_resetmode_leave(struct acc_core *core)
{
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
}
-static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc,
const void *data)
{
acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
@@ -172,7 +175,7 @@ int acc_open(struct net_device *netdev)
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
u32 tx_fifo_status;
- u32 ctrl_mode;
+ u32 ctrl;
int err;
/* Retry to enter RESET mode if out of sync. */
@@ -187,19 +190,19 @@ int acc_open(struct net_device *netdev)
if (err)
return err;
- ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
- ACC_REG_CONTROL_MASK_IE_TXERROR |
- ACC_REG_CONTROL_MASK_IE_ERRWARN |
- ACC_REG_CONTROL_MASK_IE_OVERRUN |
- ACC_REG_CONTROL_MASK_IE_ERRPASS;
+ ctrl = ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS;
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
- ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
+ ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
+ ctrl |= ACC_REG_CTRL_MASK_LOM;
- acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
+ acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl);
acc_resetmode_leave(core);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -218,13 +221,13 @@ int acc_close(struct net_device *netdev)
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_IE_RXTX |
- ACC_REG_CONTROL_MASK_IE_TXERROR |
- ACC_REG_CONTROL_MASK_IE_ERRWARN |
- ACC_REG_CONTROL_MASK_IE_OVERRUN |
- ACC_REG_CONTROL_MASK_IE_ERRPASS |
- ACC_REG_CONTROL_MASK_IE_BUSERR);
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS |
+ ACC_REG_CTRL_MASK_IE_BUSERR);
netif_stop_queue(netdev);
acc_resetmode_enter(core);
@@ -233,9 +236,9 @@ int acc_close(struct net_device *netdev)
/* Mark pending TX requests to be aborted after controller restart. */
acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
- /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_LOM);
+ /* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_LOM);
close_candev(netdev);
return 0;
@@ -249,9 +252,9 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u8 tx_fifo_head = core->tx_fifo_head;
int fifo_usage;
u32 acc_id;
- u8 acc_dlc;
+ u32 acc_dlc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* Access core->tx_fifo_tail only once because it may be changed
@@ -274,6 +277,8 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
if (cf->can_id & CAN_RTR_FLAG)
acc_dlc |= ACC_DLC_RTR_FLAG;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ acc_dlc |= ACC_DLC_SSTX_FLAG;
if (cf->can_id & CAN_EFF_FLAG) {
acc_id = cf->can_id & CAN_EFF_MASK;
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
index a70488b25d39..6b7ebd8c91b2 100644
--- a/drivers/net/can/esd/esdacc.h
+++ b/drivers/net/can/esd/esdacc.h
@@ -35,6 +35,7 @@
*/
#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+#define ACC_OV_REG_FEAT_MASK_DAR BIT(30 - 16)
#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
@@ -50,7 +51,7 @@
#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
/* esdACC CAN Core Module */
-#define ACC_CORE_OF_CTRL_MODE 0x0000
+#define ACC_CORE_OF_CTRL 0x0000
#define ACC_CORE_OF_STATUS_IRQ 0x0008
#define ACC_CORE_OF_BRP 0x000c
#define ACC_CORE_OF_BTR 0x0010
@@ -66,21 +67,22 @@
#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
-#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0)
-#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1)
-#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2)
-#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5)
-#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6)
-#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7)
-
-#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8)
-#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9)
-#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10)
-#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11)
-#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12)
-#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13)
-#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14)
-#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15)
+/* CTRL register layout */
+#define ACC_REG_CTRL_MASK_RESETMODE BIT(0)
+#define ACC_REG_CTRL_MASK_LOM BIT(1)
+#define ACC_REG_CTRL_MASK_STM BIT(2)
+#define ACC_REG_CTRL_MASK_TRANSEN BIT(5)
+#define ACC_REG_CTRL_MASK_TS BIT(6)
+#define ACC_REG_CTRL_MASK_SCHEDULE BIT(7)
+
+#define ACC_REG_CTRL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CTRL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CTRL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CTRL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CTRL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CTRL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CTRL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CTRL_MASK_IE_BUSERR BIT(15)
/* BRP and BTR register layout for CAN-Classic version */
#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
@@ -300,9 +302,9 @@ static inline void acc_clear_bits(struct acc_core *core,
static inline int acc_resetmode_entered(struct acc_core *core)
{
- u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE);
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL);
- return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0;
+ return (ctrl & ACC_REG_CTRL_MASK_RESETMODE) != 0;
}
static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 8ea7f2795551..f5d22c61503f 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/can/platform/flexcan.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -354,6 +355,14 @@ static struct flexcan_devtype_data fsl_imx93_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
+static const struct flexcan_devtype_data fsl_imx95_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+};
+
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
@@ -378,6 +387,16 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
+static const struct flexcan_devtype_data nxp_s32g2_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SECONDARY_MB_IRQ,
+};
+
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -544,6 +563,13 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) {
+ /* For the SCMI mode, driver do nothing, ATF will send request to
+ * SM(system manager, M33 core) through SCMI protocol after linux
+ * suspend. Once SM get this request, it will send IPG_STOP signal
+ * to Flex_CAN, let CAN in STOP mode.
+ */
+ return 0;
}
return flexcan_low_power_enter_ack(priv);
@@ -555,7 +581,11 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
u32 reg_mcr;
int ret;
- /* remove stop request */
+ /* Remove stop request, for FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+ * do nothing here, because ATF already send request to SM before
+ * linux resume. Once SM get this request, it will deassert the
+ * IPG_STOP signal to Flex_CAN.
+ */
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, false);
if (ret < 0)
@@ -615,18 +645,22 @@ static void flexcan_clks_disable(const struct flexcan_priv *priv)
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_enable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_on(priv->transceiver);
- return regulator_enable(priv->reg_xceiver);
+ return 0;
}
static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_disable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_off(priv->transceiver);
- return regulator_disable(priv->reg_xceiver);
+ return 0;
}
static int flexcan_chip_enable(struct flexcan_priv *priv)
@@ -1192,7 +1226,7 @@ static void flexcan_set_bittiming_cbt(const struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_cbt, reg_fdctrl;
@@ -1743,14 +1777,25 @@ static int flexcan_open(struct net_device *dev)
goto out_free_irq_boff;
}
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ err = request_irq(priv->irq_secondary_mb,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq_err;
+ }
+
flexcan_chip_interrupts_enable(dev);
netif_start_queue(dev);
return 0;
+ out_free_irq_err:
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_err, dev);
out_free_irq_boff:
- free_irq(priv->irq_boff, dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_boff, dev);
out_free_irq:
free_irq(dev->irq, dev);
out_can_rx_offload_disable:
@@ -1775,6 +1820,9 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ)
+ free_irq(priv->irq_secondary_mb, dev);
+
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
free_irq(priv->irq_err, dev);
free_irq(priv->irq_boff, dev);
@@ -1819,7 +1867,6 @@ static const struct net_device_ops flexcan_netdev_ops = {
.ndo_open = flexcan_open,
.ndo_stop = flexcan_close,
.ndo_start_xmit = flexcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static int register_flexcandev(struct net_device *dev)
@@ -1983,6 +2030,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
ret = flexcan_setup_stop_mode_scfw(pdev);
else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
ret = flexcan_setup_stop_mode_gpr(pdev);
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)
+ /* ATF will handle all STOP_IPG related work */
+ ret = 0;
else
/* return 0 directly if doesn't support stop mode feature */
return 0;
@@ -2009,6 +2059,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
{ .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, },
+ { .compatible = "fsl,imx95-flexcan", .data = &fsl_imx95_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -2018,6 +2069,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
{ .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
+ { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -2038,6 +2090,7 @@ static int flexcan_probe(struct platform_device *pdev)
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
struct flexcan_platform_data *pdata;
@@ -2053,6 +2106,11 @@ static int flexcan_probe(struct platform_device *pdev)
else if (IS_ERR(reg_xceiver))
return PTR_ERR(reg_xceiver);
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver))
+ return dev_err_probe(&pdev->dev, PTR_ERR(transceiver),
+ "failed to get phy\n");
+
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
@@ -2150,6 +2208,10 @@ static int flexcan_probe(struct platform_device *pdev)
priv->clk_per = clk_per;
priv->clk_src = clk_src;
priv->reg_xceiver = reg_xceiver;
+ priv->transceiver = transceiver;
+
+ if (transceiver)
+ priv->can.bitrate_max = transceiver->attrs.max_link_rate;
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
priv->irq_boff = platform_get_irq(pdev, 1);
@@ -2164,11 +2226,19 @@ static int flexcan_probe(struct platform_device *pdev)
}
}
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1");
+ if (priv->irq_secondary_mb < 0) {
+ err = priv->irq_secondary_mb;
+ goto failed_platform_get_irq;
+ }
+ }
+
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
priv->can.bittiming_const = &flexcan_fd_bittiming_const;
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&flexcan_fd_data_bittiming_const;
} else {
priv->can.bittiming_const = &flexcan_bittiming_const;
@@ -2237,14 +2307,19 @@ static int __maybe_unused flexcan_suspend(struct device *device)
flexcan_chip_interrupts_disable(dev);
+ err = flexcan_transceiver_disable(priv);
+ if (err)
+ return err;
+
err = pinctrl_pm_select_sleep_state(device);
if (err)
return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
+
+ priv->can.state = CAN_STATE_SLEEPING;
}
- priv->can.state = CAN_STATE_SLEEPING;
return 0;
}
@@ -2255,7 +2330,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
@@ -2269,12 +2343,20 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
- err = flexcan_chip_start(dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
return err;
+ err = flexcan_chip_start(dev);
+ if (err) {
+ flexcan_transceiver_disable(priv);
+ return err;
+ }
+
flexcan_chip_interrupts_enable(dev);
}
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
return 0;
@@ -2309,9 +2391,19 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, true);
- err = pm_runtime_force_suspend(device);
- if (err)
- return err;
+ /* For FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, it need ATF to send
+ * to SM through SCMI protocol, SM will assert the IPG_STOP
+ * signal. But all this works need the CAN clocks keep on.
+ * After the CAN module get the IPG_STOP mode, and switch to
+ * STOP mode, whether still keep the CAN clocks on or gate them
+ * off depend on the Hardware design.
+ */
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_suspend(device);
+ if (err)
+ return err;
+ }
}
return 0;
@@ -2325,9 +2417,12 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
if (netif_running(dev)) {
int err;
- err = pm_runtime_force_resume(device);
- if (err)
- return err;
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+ }
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false);
@@ -2349,7 +2444,7 @@ static struct platform_driver flexcan_driver = {
.of_match_table = flexcan_of_match,
},
.probe = flexcan_probe,
- .remove_new = flexcan_remove,
+ .remove = flexcan_remove,
.id_table = flexcan_id_table,
};
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 025c3417031f..16692a2502eb 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -68,6 +68,12 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
+/* Setup stop mode with ATF SCMI protocol to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
+/* Device has two separate interrupt lines for two mailbox ranges, which
+ * both need to have an interrupt handler registered.
+ */
+#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -101,10 +107,12 @@ struct flexcan_priv {
struct clk *clk_per;
struct flexcan_devtype_data devtype_data;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct flexcan_stop_mode stm;
int irq_boff;
int irq_err;
+ int irq_secondary_mb;
/* IPC handle when setup stop mode by System Controller firmware(scfw) */
struct imx_sc_ipc *sc_ipc_handle;
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 6d3ba71a6a73..3b1b09943436 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -778,7 +778,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id)
*/
if (priv->need_txbug_workaround &&
(sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) {
- del_timer(&priv->hang_timer);
+ timer_delete(&priv->hang_timer);
}
/* Frame(s) received or transmitted */
@@ -806,7 +806,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id)
*/
static void grcan_running_reset(struct timer_list *t)
{
- struct grcan_priv *priv = from_timer(priv, t, rr_timer);
+ struct grcan_priv *priv = timer_container_of(priv, t, rr_timer);
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -817,8 +817,8 @@ static void grcan_running_reset(struct timer_list *t)
spin_lock_irqsave(&priv->lock, flags);
priv->resetting = false;
- del_timer(&priv->hang_timer);
- del_timer(&priv->rr_timer);
+ timer_delete(&priv->hang_timer);
+ timer_delete(&priv->rr_timer);
if (!priv->closing) {
/* Save and reset - config register preserved by grcan_reset */
@@ -897,7 +897,7 @@ static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
/* Disable channels and schedule a running reset */
static void grcan_initiate_running_reset(struct timer_list *t)
{
- struct grcan_priv *priv = from_timer(priv, t, hang_timer);
+ struct grcan_priv *priv = timer_container_of(priv, t, hang_timer);
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -1073,9 +1073,10 @@ static int grcan_open(struct net_device *dev)
if (err)
goto exit_close_candev;
+ napi_enable(&priv->napi);
+
spin_lock_irqsave(&priv->lock, flags);
- napi_enable(&priv->napi);
grcan_start(dev);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(dev);
@@ -1107,8 +1108,8 @@ static int grcan_close(struct net_device *dev)
priv->closing = true;
if (priv->need_txbug_workaround) {
spin_unlock_irqrestore(&priv->lock, flags);
- del_timer_sync(&priv->hang_timer);
- del_timer_sync(&priv->rr_timer);
+ timer_delete_sync(&priv->hang_timer);
+ timer_delete_sync(&priv->rr_timer);
spin_lock_irqsave(&priv->lock, flags);
}
netif_stop_queue(dev);
@@ -1146,7 +1147,7 @@ static void grcan_transmit_catch_up(struct net_device *dev)
* so prevent a running reset while catching up
*/
if (priv->need_txbug_workaround)
- del_timer(&priv->hang_timer);
+ timer_delete(&priv->hang_timer);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1560,7 +1561,6 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_open = grcan_open,
.ndo_stop = grcan_close,
.ndo_start_xmit = grcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops grcan_ethtool_ops = {
@@ -1725,7 +1725,7 @@ static struct platform_driver grcan_driver = {
.of_match_table = grcan_match,
},
.probe = grcan_probe,
- .remove_new = grcan_remove,
+ .remove = grcan_remove,
};
module_platform_driver(grcan_driver);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 72307297d75e..0f83335e4d07 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -390,36 +390,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
return 0;
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
/* Propagate the error condition to the CAN stack. */
skb = alloc_can_err_skb(ndev, &cf);
- if (unlikely(!skb))
- return 0;
/* Read the error counter register and check for new errors. */
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
- cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
/* Reset the error counter, ack the IRQ and re-enable the counter. */
writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
@@ -427,6 +446,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
priv->base + IFI_CANFD_INTERRUPT);
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+ if (unlikely(!skb))
+ return 0;
+
netif_receive_skb(skb);
return 1;
@@ -647,7 +669,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
@@ -922,7 +944,6 @@ static const struct net_device_ops ifi_canfd_netdev_ops = {
.ndo_open = ifi_canfd_open,
.ndo_stop = ifi_canfd_close,
.ndo_start_xmit = ifi_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ifi_canfd_ethtool_ops = {
@@ -978,10 +999,10 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
- priv->can.bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.data_bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.do_set_mode = ifi_canfd_set_mode;
- priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
+ priv->can.bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.fd.data_bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.do_set_mode = ifi_canfd_set_mode;
+ priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
/* IFI CANFD can do both Bosch FD and ISO FD */
priv->can.ctrlmode = CAN_CTRLMODE_FD;
@@ -1033,7 +1054,7 @@ static struct platform_driver ifi_canfd_plat_driver = {
.of_match_table = ifi_canfd_of_table,
},
.probe = ifi_canfd_plat_probe,
- .remove_new = ifi_canfd_plat_remove,
+ .remove = ifi_canfd_plat_remove,
};
module_platform_driver(ifi_canfd_plat_driver);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index d048ea565b89..1efdd1fd8caa 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1752,7 +1752,6 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_open = ican3_open,
.ndo_stop = ican3_stop,
.ndo_start_xmit = ican3_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ican3_ethtool_ops = {
@@ -1867,7 +1866,7 @@ static ssize_t fwinfo_show(struct device *dev,
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
- return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
+ return sysfs_emit(buf, "%s\n", mod->fwinfo);
}
static DEVICE_ATTR_RW(termination);
@@ -2049,7 +2048,7 @@ static struct platform_driver ican3_driver = {
.name = DRV_NAME,
},
.probe = ican3_probe,
- .remove_new = ican3_remove,
+ .remove = ican3_remove,
};
module_platform_driver(ican3_driver);
diff --git a/drivers/net/can/kvaser_pciefd/Makefile b/drivers/net/can/kvaser_pciefd/Makefile
new file mode 100644
index 000000000000..8c5b8cdc6b5f
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
+kvaser_pciefd-y = kvaser_pciefd_core.o kvaser_pciefd_devlink.o
diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h
new file mode 100644
index 000000000000..08c9ddc1ee85
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/* kvaser_pciefd common definitions and declarations
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+
+#ifndef _KVASER_PCIEFD_H
+#define _KVASER_PCIEFD_H
+
+#include <linux/can/dev.h>
+#include <linux/completion.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
+#define KVASER_PCIEFD_DMA_COUNT 2U
+#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
+#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
+
+struct kvaser_pciefd;
+
+struct kvaser_pciefd_address_offset {
+ u32 serdes;
+ u32 pci_ien;
+ u32 pci_irq;
+ u32 sysid;
+ u32 loopback;
+ u32 kcan_srb_fifo;
+ u32 kcan_srb;
+ u32 kcan_ch0;
+ u32 kcan_ch1;
+};
+
+struct kvaser_pciefd_irq_mask {
+ u32 kcan_rx0;
+ u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ u32 all;
+};
+
+struct kvaser_pciefd_dev_ops {
+ void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+};
+
+struct kvaser_pciefd_driver_data {
+ const struct kvaser_pciefd_address_offset *address_offset;
+ const struct kvaser_pciefd_irq_mask *irq_mask;
+ const struct kvaser_pciefd_dev_ops *ops;
+};
+
+struct kvaser_pciefd_fw_version {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
+struct kvaser_pciefd_can {
+ struct can_priv can;
+ struct devlink_port devlink_port;
+ struct kvaser_pciefd *kv_pcie;
+ void __iomem *reg_base;
+ struct can_berr_counter bec;
+ u32 ioc;
+ u8 cmd_seq;
+ u8 tx_max_count;
+ u8 tx_idx;
+ u8 ack_idx;
+ int err_rep_cnt;
+ unsigned int completed_tx_pkts;
+ unsigned int completed_tx_bytes;
+ spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
+ struct timer_list bec_poll_timer;
+ struct completion start_comp, flush_comp;
+};
+
+struct kvaser_pciefd {
+ struct pci_dev *pci;
+ void __iomem *reg_base;
+ struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ const struct kvaser_pciefd_driver_data *driver_data;
+ void *dma_data[KVASER_PCIEFD_DMA_COUNT];
+ u8 nr_channels;
+ u32 bus_freq;
+ u32 freq;
+ u32 freq_to_ticks_div;
+ struct kvaser_pciefd_fw_version fw_version;
+};
+
+extern const struct devlink_ops kvaser_pciefd_devlink_ops;
+
+int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can);
+void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can);
+#endif /* _KVASER_PCIEFD_H */
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c
index 7b5028b67cd5..d8c9bfb20230 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c
@@ -5,6 +5,8 @@
* - PEAK linux canfd driver
*/
+#include "kvaser_pciefd.h"
+
#include <linux/bitfield.h>
#include <linux/can/dev.h>
#include <linux/device.h>
@@ -16,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
@@ -26,13 +29,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
#define KVASER_PCIEFD_MAX_ERR_REP 256U
-#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
-#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
-#define KVASER_PCIEFD_DMA_COUNT 2U
-
-#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
#define KVASER_PCIEFD_VENDOR 0x1a07
+
/* Altera based devices */
#define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
#define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
@@ -65,6 +64,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
+#define KVASER_PCIEFD_KCAN_IOC_REG 0x404
#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414
@@ -135,6 +135,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
/* Request status packet */
#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
+/* Control CAN LED, active low */
+#define KVASER_PCIEFD_KCAN_IOC_LED BIT(0)
+
/* Transmitter unaligned */
#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
/* Tx FIFO empty */
@@ -291,35 +294,6 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
-struct kvaser_pciefd_address_offset {
- u32 serdes;
- u32 pci_ien;
- u32 pci_irq;
- u32 sysid;
- u32 loopback;
- u32 kcan_srb_fifo;
- u32 kcan_srb;
- u32 kcan_ch0;
- u32 kcan_ch1;
-};
-
-struct kvaser_pciefd_dev_ops {
- void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
- dma_addr_t addr, int index);
-};
-
-struct kvaser_pciefd_irq_mask {
- u32 kcan_rx0;
- u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
- u32 all;
-};
-
-struct kvaser_pciefd_driver_data {
- const struct kvaser_pciefd_address_offset *address_offset;
- const struct kvaser_pciefd_irq_mask *irq_mask;
- const struct kvaser_pciefd_dev_ops *ops;
-};
-
static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = {
.serdes = 0x1000,
.pci_ien = 0x50,
@@ -404,32 +378,6 @@ static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data =
.ops = &kvaser_pciefd_xilinx_dev_ops,
};
-struct kvaser_pciefd_can {
- struct can_priv can;
- struct kvaser_pciefd *kv_pcie;
- void __iomem *reg_base;
- struct can_berr_counter bec;
- u8 cmd_seq;
- int err_rep_cnt;
- int echo_idx;
- spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
- spinlock_t echo_lock; /* Locks the message echo buffer */
- struct timer_list bec_poll_timer;
- struct completion start_comp, flush_comp;
-};
-
-struct kvaser_pciefd {
- struct pci_dev *pci;
- void __iomem *reg_base;
- struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
- const struct kvaser_pciefd_driver_data *driver_data;
- void *dma_data[KVASER_PCIEFD_DMA_COUNT];
- u8 nr_channels;
- u32 bus_freq;
- u32 freq;
- u32 freq_to_ticks_div;
-};
-
struct kvaser_pciefd_rx_packet {
u32 header[2];
u64 timestamp;
@@ -524,6 +472,16 @@ static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can
kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT);
}
+static inline void kvaser_pciefd_set_led(struct kvaser_pciefd_can *can, bool on)
+{
+ if (on)
+ can->ioc &= ~KVASER_PCIEFD_KCAN_IOC_LED;
+ else
+ can->ioc |= KVASER_PCIEFD_KCAN_IOC_LED;
+
+ iowrite32(can->ioc, can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG);
+}
+
static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
{
u32 mode;
@@ -550,7 +508,7 @@ static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
spin_unlock_irqrestore(&can->lock, irq);
}
-static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
{
u32 msk;
@@ -631,7 +589,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
u32 mode;
unsigned long irq;
- del_timer(&can->bec_poll_timer);
+ timer_delete(&can->bec_poll_timer);
if (!completion_done(&can->flush_comp))
kvaser_pciefd_start_controller_flush(can);
@@ -711,17 +669,20 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
static int kvaser_pciefd_open(struct net_device *netdev)
{
- int err;
+ int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- err = open_candev(netdev);
- if (err)
- return err;
+ can->tx_idx = 0;
+ can->ack_idx = 0;
- err = kvaser_pciefd_bus_on(can);
- if (err) {
+ ret = open_candev(netdev);
+ if (ret)
+ return ret;
+
+ ret = kvaser_pciefd_bus_on(can);
+ if (ret) {
close_candev(netdev);
- return err;
+ return ret;
}
return 0;
@@ -742,24 +703,29 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
ret = -ETIMEDOUT;
} else {
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- del_timer(&can->bec_poll_timer);
+ timer_delete(&can->bec_poll_timer);
}
can->can.state = CAN_STATE_STOPPED;
+ netdev_reset_queue(netdev);
close_candev(netdev);
return ret;
}
+static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
+{
+ return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
+}
+
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
- struct kvaser_pciefd_can *can,
+ struct can_priv *can, u8 seq,
struct sk_buff *skb)
{
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
int packet_size;
- int seq = can->echo_idx;
memset(p, 0, sizeof(*p));
- if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
if (cf->can_id & CAN_RTR_FLAG)
@@ -782,7 +748,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
} else {
p->header[1] |=
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
- can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
+ can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
}
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
@@ -797,22 +763,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- unsigned long irq_flags;
struct kvaser_pciefd_tx_packet packet;
+ unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
+ unsigned int frame_len;
int nr_words;
- u8 count;
if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
+ if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
+ return NETDEV_TX_BUSY;
- nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
+ nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
- spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx, 0);
-
- /* Move echo index to the next slot */
- can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+ WRITE_ONCE(can->can.echo_skb[seq], NULL);
+ frame_len = can_skb_get_frame_len(skb);
+ can_put_echo_skb(skb, netdev, seq, frame_len);
+ netdev_sent_queue(netdev, frame_len);
+ WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
/* Write header to fifo */
iowrite32(packet.header[0],
@@ -836,14 +804,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
}
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
- /* No room for a new message, stop the queue until at least one
- * successful transmit
- */
- if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&can->echo_lock, irq_flags);
+ netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
return NETDEV_TX_OK;
}
@@ -856,7 +817,7 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
struct can_bittiming *bt;
if (data)
- bt = &can->can.data_bittiming;
+ bt = &can->can.fd.data_bittiming;
else
bt = &can->can.bittiming;
@@ -930,7 +891,8 @@ static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
{
- struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
+ struct kvaser_pciefd_can *can = timer_container_of(can, data,
+ bec_poll_timer);
kvaser_pciefd_enable_err_gen(can);
kvaser_pciefd_request_status(can);
@@ -940,13 +902,37 @@ static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
static const struct net_device_ops kvaser_pciefd_netdev_ops = {
.ndo_open = kvaser_pciefd_open,
.ndo_stop = kvaser_pciefd_stop,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_pciefd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
+static int kvaser_pciefd_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 3; /* 3 On/Off cycles per second */
+
+ case ETHTOOL_ID_ON:
+ kvaser_pciefd_set_led(can, true);
+ return 0;
+
+ case ETHTOOL_ID_OFF:
+ case ETHTOOL_ID_INACTIVE:
+ kvaser_pciefd_set_led(can, false);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .set_phys_id = kvaser_pciefd_set_phys_id,
};
static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
@@ -957,9 +943,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
struct net_device *netdev;
struct kvaser_pciefd_can *can;
u32 status, tx_nr_packets_max;
+ int ret;
netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
- KVASER_PCIEFD_CAN_TX_MAX_COUNT);
+ roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT));
if (!netdev)
return -ENOMEM;
@@ -970,8 +957,11 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->kv_pcie = pcie;
can->cmd_seq = 0;
can->err_rep_cnt = 0;
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
can->bec.txerr = 0;
can->bec.rxerr = 0;
+ can->can.dev->dev_port = i;
init_completion(&can->start_comp);
init_completion(&can->flush_comp);
@@ -980,26 +970,28 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
/* Disable Bus load reporting */
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
+ can->ioc = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG);
+ kvaser_pciefd_set_led(can, false);
+
tx_nr_packets_max =
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq;
- can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
- can->echo_idx = 0;
- spin_lock_init(&can->echo_lock);
spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
- can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
+ can->can.fd.data_bittiming_const = &kvaser_pciefd_bittiming_const;
can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
- can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
+ can->can.fd.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
can->can.do_set_mode = kvaser_pciefd_set_mode;
can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
@@ -1022,6 +1014,11 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
pcie->can[i] = can;
kvaser_pciefd_pwm_start(can);
+ ret = kvaser_pciefd_devlink_port_register(can);
+ if (ret) {
+ dev_err(&pcie->pci->dev, "Failed to register devlink port\n");
+ return ret;
+ }
}
return 0;
@@ -1032,15 +1029,15 @@ static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
int i;
for (i = 0; i < pcie->nr_channels; i++) {
- int err = register_candev(pcie->can[i]->can.dev);
+ int ret = register_candev(pcie->can[i]->can.dev);
- if (err) {
+ if (ret) {
int j;
/* Unregister all successfully registered devices. */
for (j = 0; j < i; j++)
unregister_candev(pcie->can[j]->can.dev);
- return err;
+ return ret;
}
}
@@ -1053,13 +1050,13 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
void __iomem *serdes_base;
u32 word1, word2;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT;
- word2 = addr >> 32;
-#else
- word1 = addr;
- word2 = 0;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) {
+ word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT;
+ word2 = upper_32_bits(addr);
+ } else {
+ word1 = addr;
+ word2 = 0;
+ }
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
iowrite32(word1, serdes_base);
iowrite32(word2, serdes_base + 0x4);
@@ -1072,9 +1069,9 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK;
u32 msb = 0x0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- msb = addr >> 32;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ msb = upper_32_bits(addr);
+
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index;
iowrite32(lsb, serdes_base);
iowrite32(msb, serdes_base + 0x4);
@@ -1087,9 +1084,9 @@ static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
u32 msb = 0x0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- msb = addr >> 32;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ msb = upper_32_bits(addr);
+
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
iowrite32(msb, serdes_base);
iowrite32(lsb, serdes_base + 0x4);
@@ -1104,6 +1101,9 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
/* Disable the DMA */
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
+
+ dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64));
+
for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
KVASER_PCIEFD_DMA_SIZE,
@@ -1151,14 +1151,12 @@ static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
u32 version, srb_status, build;
version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG);
+ build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS,
FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version));
-
- build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
- dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n",
- FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version),
- FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version),
- FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build));
+ pcie->fw_version.major = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version);
+ pcie->fw_version.minor = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version);
+ pcie->fw_version.build = FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build);
srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
@@ -1197,7 +1195,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_canfd_skb(priv->dev, &cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
cf->len = can_fd_dlc2len(dlc);
@@ -1209,7 +1207,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
}
@@ -1227,15 +1225,21 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
priv->dev->stats.rx_packets++;
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- return netif_rx(skb);
+ netif_rx(skb);
+
+ return 0;
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+ const struct can_berr_counter *bec,
struct can_frame *cf,
enum can_state new_state,
enum can_state tx_state,
enum can_state rx_state)
{
+ enum can_state old_state;
+
+ old_state = can->can.state;
can_change_state(can->can.dev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1251,6 +1255,18 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
can_bus_off(ndev);
}
}
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
}
static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
@@ -1285,7 +1301,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
struct net_device *ndev = can->can.dev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct can_frame *cf = NULL;
old_state = can->can.state;
@@ -1294,16 +1310,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
- skb = alloc_can_err_skb(ndev, &cf);
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(ndev, &cf);
if (new_state != old_state) {
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- if (skb)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
}
can->err_rep_cnt++;
@@ -1316,18 +1326,19 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
- if (!skb) {
- ndev->stats.rx_dropped++;
- return -ENOMEM;
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ if (!skb) {
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
}
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
-
return 0;
}
@@ -1356,6 +1367,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
{
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
+ int ret = 0;
old_state = can->can.state;
@@ -1369,25 +1381,15 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
struct can_frame *cf;
skb = alloc_can_err_skb(ndev, &cf);
- if (!skb) {
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
+ if (skb) {
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ netif_rx(skb);
+ } else {
ndev->stats.rx_dropped++;
- return -ENOMEM;
- }
-
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- cf->can_id |= CAN_ERR_RESTARTED;
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ret = -ENOMEM;
}
-
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
-
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
}
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
@@ -1395,7 +1397,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
if (bec.txerr || bec.rxerr)
mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
- return 0;
+ return ret;
}
static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
@@ -1504,19 +1506,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
- int len;
- u8 count;
+ unsigned int len, frame_len = 0;
struct sk_buff *skb;
+ if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
+ return 0;
skb = can->can.echo_skb[echo_idx];
- if (skb)
- kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ if (!skb)
+ return 0;
+ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+ len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
- if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
- netif_wake_queue(can->can.dev);
+ /* Pairs with barrier in kvaser_pciefd_start_xmit() */
+ smp_store_release(&can->ack_idx, can->ack_idx + 1);
+ can->completed_tx_pkts++;
+ can->completed_tx_bytes += frame_len;
if (!one_shot_fail) {
can->can.dev->stats.tx_bytes += len;
@@ -1619,7 +1623,7 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
/* Position does not point to the end of the package,
* corrupted packet size?
*/
- if ((*start_pos + size) != pos)
+ if (unlikely((*start_pos + size) != pos))
return -EIO;
/* Point to the next packet header, if any */
@@ -1632,39 +1636,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
{
int pos = 0;
int res = 0;
+ unsigned int i;
do {
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ /* Report ACKs in this buffer to BQL en masse for correct periods */
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
+
+ if (!can->completed_tx_pkts)
+ continue;
+ netif_subqueue_completed_wake(can->can.dev, 0,
+ can->completed_tx_pkts,
+ can->completed_tx_bytes,
+ kvaser_pciefd_tx_avail(can), 1);
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
+ }
+
return res;
}
static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
+ void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
- /* Reset DMA buffer 0 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
}
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
- /* Reset DMA buffer 1 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
}
- if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
+ if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
-
- iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1691,27 +1707,24 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
{
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
- u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
int i;
- if (!(board_irq & irq_mask->all))
+ if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
- if (board_irq & irq_mask->kcan_rx0)
+ iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+
+ if (pci_irq & irq_mask->kcan_rx0)
kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
- if (!pcie->can[i]) {
- dev_err(&pcie->pci->dev,
- "IRQ mask points to unallocated controller\n");
- break;
- }
-
- /* Check that mask matches channel (i) IRQ mask */
- if (board_irq & irq_mask->kcan_tx[i])
+ if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+
return IRQ_HANDLED;
}
@@ -1725,61 +1738,87 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
if (can) {
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
kvaser_pciefd_pwm_stop(can);
+ kvaser_pciefd_devlink_port_unregister(can);
free_candev(can->can.dev);
}
}
}
+static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
+{
+ unsigned int i;
+
+ /* Masking PCI_IRQ is insufficient as running ISR will unmask it */
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+ for (i = 0; i < pcie->nr_channels; ++i)
+ iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+}
+
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int err;
+ int ret;
+ struct devlink *devlink;
+ struct device *dev = &pdev->dev;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
- void __iomem *irq_en_base;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ devlink = devlink_alloc(&kvaser_pciefd_devlink_ops, sizeof(*pcie), dev);
+ if (!devlink)
return -ENOMEM;
+ pcie = devlink_priv(devlink);
pci_set_drvdata(pdev, pcie);
pcie->pci = pdev;
pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
irq_mask = pcie->driver_data->irq_mask;
- err = pci_enable_device(pdev);
- if (err)
- return err;
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free_devlink;
- err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
- if (err)
+ ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
+ if (ret)
goto err_disable_pci;
pcie->reg_base = pci_iomap(pdev, 0, 0);
if (!pcie->reg_base) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_release_regions;
}
- err = kvaser_pciefd_setup_board(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_board(pcie);
+ if (ret)
goto err_pci_iounmap;
- err = kvaser_pciefd_setup_dma(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_dma(pcie);
+ if (ret)
goto err_pci_iounmap;
pci_set_master(pdev);
- err = kvaser_pciefd_setup_can_ctrls(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_can_ctrls(pcie);
+ if (ret)
goto err_teardown_can_ctrls;
- err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
- IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
- if (err)
+ ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to allocate IRQ vectors.\n");
goto err_teardown_can_ctrls;
+ }
+
+ ret = pci_irq_vector(pcie->pci, 0);
+ if (ret < 0)
+ goto err_pci_free_irq_vectors;
+ pcie->pci->irq = ret;
+
+ ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d\n", pcie->pci->irq);
+ goto err_pci_free_irq_vectors;
+ }
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
@@ -1789,25 +1828,28 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
/* Enable PCI interrupts */
- irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
- iowrite32(irq_mask->all, irq_en_base);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- err = kvaser_pciefd_reg_candev(pcie);
- if (err)
+ ret = kvaser_pciefd_reg_candev(pcie);
+ if (ret)
goto err_free_irq;
+ devlink_register(devlink);
+
return 0;
err_free_irq:
- /* Disable PCI interrupts */
- iowrite32(0, irq_en_base);
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
+err_pci_free_irq_vectors:
+ pci_free_irq_vectors(pcie->pci);
+
err_teardown_can_ctrls:
kvaser_pciefd_teardown_can_ctrls(pcie);
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
@@ -1822,41 +1864,38 @@ err_release_regions:
err_disable_pci:
pci_disable_device(pdev);
- return err;
-}
-
-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
-{
- int i;
-
- for (i = 0; i < pcie->nr_channels; i++) {
- struct kvaser_pciefd_can *can = pcie->can[i];
+err_free_devlink:
+ devlink_free(devlink);
- if (can) {
- iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- unregister_candev(can->can.dev);
- del_timer(&can->bec_poll_timer);
- kvaser_pciefd_pwm_stop(can);
- free_candev(can->can.dev);
- }
- }
+ return ret;
}
static void kvaser_pciefd_remove(struct pci_dev *pdev)
{
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
+ unsigned int i;
- kvaser_pciefd_remove_all_ctrls(pcie);
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
- /* Disable interrupts */
- iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
- iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+ unregister_candev(can->can.dev);
+ timer_delete(&can->bec_poll_timer);
+ kvaser_pciefd_pwm_stop(can);
+ kvaser_pciefd_devlink_port_unregister(can);
+ }
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
+ pci_free_irq_vectors(pcie->pci);
+
+ for (i = 0; i < pcie->nr_channels; ++i)
+ free_candev(pcie->can[i]->can.dev);
+ devlink_unregister(priv_to_devlink(pcie));
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ devlink_free(priv_to_devlink(pcie));
}
static struct pci_driver kvaser_pciefd = {
diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c
new file mode 100644
index 000000000000..1d61a8b0eeba
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* kvaser_pciefd devlink functions
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+#include "kvaser_pciefd.h"
+
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+static int kvaser_pciefd_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct kvaser_pciefd *pcie = devlink_priv(devlink);
+ char buf[] = "xxx.xxx.xxxxx";
+ int ret;
+
+ if (pcie->fw_version.major) {
+ snprintf(buf, sizeof(buf), "%u.%u.%u",
+ pcie->fw_version.major,
+ pcie->fw_version.minor,
+ pcie->fw_version.build);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct devlink_ops kvaser_pciefd_devlink_ops = {
+ .info_get = kvaser_pciefd_devlink_info_get,
+};
+
+int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can)
+{
+ int ret;
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ .phys.port_number = can->can.dev->dev_port,
+ };
+ devlink_port_attrs_set(&can->devlink_port, &attrs);
+
+ ret = devlink_port_register(priv_to_devlink(can->kv_pcie),
+ &can->devlink_port, can->can.dev->dev_port);
+ if (ret)
+ return ret;
+
+ SET_NETDEV_DEVLINK_PORT(can->can.dev, &can->devlink_port);
+
+ return 0;
+}
+
+void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can)
+{
+ devlink_port_unregister(&can->devlink_port);
+}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 14b231c4d7ec..eb856547ae7d 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
/* Bosch M_CAN user manual can be obtained from:
@@ -23,6 +23,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include "m_can.h"
@@ -379,45 +380,79 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
+static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val)
{
- u32 cccr = m_can_read(cdev, M_CAN_CCCR);
- u32 timeout = 10;
- u32 val = 0;
+ u32 val_before = m_can_read(cdev, M_CAN_CCCR);
+ u32 val_after = (val_before & ~mask) | val;
+ size_t tries = 10;
- /* Clear the Clock stop request if it was set */
- if (cccr & CCCR_CSR)
- cccr &= ~CCCR_CSR;
-
- if (enable) {
- /* enable m_can configuration */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
- udelay(5);
- /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
- } else {
- m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
+ if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) {
+ netdev_err(cdev->net,
+ "refusing to configure device when in normal mode\n");
+ return -EBUSY;
}
- /* there's a delay for module initialization */
- if (enable)
- val = CCCR_INIT | CCCR_CCE;
+ /* The chip should be in standby mode when changing the CCCR register,
+ * and some chips set the CSR and CSA bits when in standby. Furthermore,
+ * the CSR and CSA bits should be written as zeros, even when they read
+ * ones.
+ */
+ val_after &= ~(CCCR_CSR | CCCR_CSA);
- while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
- if (timeout == 0) {
- netdev_warn(cdev->net, "Failed to init module\n");
- return;
- }
- timeout--;
- udelay(1);
+ while (tries--) {
+ u32 val_read;
+
+ /* Write the desired value in each try, as setting some bits in
+ * the CCCR register require other bits to be set first. E.g.
+ * setting the NISO bit requires setting the CCE bit first.
+ */
+ m_can_write(cdev, M_CAN_CCCR, val_after);
+
+ val_read = m_can_read(cdev, M_CAN_CCCR) & ~(CCCR_CSR | CCCR_CSA);
+
+ if (val_read == val_after)
+ return 0;
+
+ usleep_range(1, 5);
}
+
+ return -ETIMEDOUT;
+}
+
+static int m_can_config_enable(struct m_can_classdev *cdev)
+{
+ int err;
+
+ /* CCCR_INIT must be set in order to set CCCR_CCE, but access to
+ * configuration registers should only be enabled when in standby mode,
+ * where CCCR_INIT is always set.
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, CCCR_CCE);
+ if (err)
+ netdev_err(cdev->net, "failed to enable configuration mode\n");
+
+ return err;
+}
+
+static int m_can_config_disable(struct m_can_classdev *cdev)
+{
+ int err;
+
+ /* Only clear CCCR_CCE, since CCCR_INIT cannot be cleared while in
+ * standby mode
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, 0);
+ if (err)
+ netdev_err(cdev->net, "failed to disable configuration registers\n");
+
+ return err;
}
static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
{
if (cdev->active_interrupts == interrupts)
return;
- cdev->ops->write_reg(cdev, M_CAN_IE, interrupts);
+ m_can_write(cdev, M_CAN_IE, interrupts);
cdev->active_interrupts = interrupts;
}
@@ -435,7 +470,7 @@ static void m_can_coalescing_disable(struct m_can_classdev *cdev)
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
if (!cdev->net->irq) {
- dev_dbg(cdev->dev, "Start hrtimer\n");
+ netdev_dbg(cdev->net, "Start hrtimer\n");
hrtimer_start(&cdev->hrtimer,
ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
HRTIMER_MODE_REL_PINNED);
@@ -449,11 +484,10 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
m_can_coalescing_disable(cdev);
m_can_write(cdev, M_CAN_ILE, 0x0);
- cdev->active_interrupts = 0x0;
if (!cdev->net->irq) {
- dev_dbg(cdev->dev, "Stop hrtimer\n");
- hrtimer_cancel(&cdev->hrtimer);
+ netdev_dbg(cdev->net, "Stop hrtimer\n");
+ hrtimer_try_to_cancel(&cdev->hrtimer);
}
}
@@ -632,7 +666,7 @@ static int m_can_handle_lost_msg(struct net_device *dev)
struct can_frame *frame;
u32 timestamp = 0;
- netdev_err(dev, "msg lost in rxf0\n");
+ netdev_dbg(dev, "msg lost in rxf0\n");
stats->rx_errors++;
stats->rx_over_errors++;
@@ -662,47 +696,60 @@ static int m_can_handle_lec_err(struct net_device *dev,
u32 timestamp = 0;
cdev->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
default:
break;
}
+ if (unlikely(!skb))
+ return 0;
+
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -744,6 +791,10 @@ static int m_can_get_berr_counter(const struct net_device *dev,
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
+ /* Avoid waking up the controller if the interface is down */
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
err = m_can_clk_start(cdev);
if (err)
return err;
@@ -766,6 +817,9 @@ static int m_can_handle_state_change(struct net_device *dev,
u32 timestamp = 0;
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
@@ -795,6 +849,12 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
@@ -831,30 +891,33 @@ static int m_can_handle_state_change(struct net_device *dev,
return 1;
}
-static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
+static enum can_state
+m_can_state_get_by_psr(struct m_can_classdev *cdev)
{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done = 0;
+ u32 reg_psr;
- if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
- netdev_dbg(dev, "entered error warning state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_WARNING);
- }
+ reg_psr = m_can_read(cdev, M_CAN_PSR);
- if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
- netdev_dbg(dev, "entered error passive state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_PASSIVE);
- }
+ if (reg_psr & PSR_BO)
+ return CAN_STATE_BUS_OFF;
+ if (reg_psr & PSR_EP)
+ return CAN_STATE_ERROR_PASSIVE;
+ if (reg_psr & PSR_EW)
+ return CAN_STATE_ERROR_WARNING;
- if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "entered error bus off state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_BUS_OFF);
- }
+ return CAN_STATE_ERROR_ACTIVE;
+}
- return work_done;
+static int m_can_handle_state_errors(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ enum can_state new_state;
+
+ new_state = m_can_state_get_by_psr(cdev);
+ if (new_state == cdev->can.state)
+ return 0;
+
+ return m_can_handle_state_change(dev, new_state);
}
static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
@@ -985,8 +1048,7 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
}
if (irqstatus & IR_ERR_STATE)
- work_done += m_can_handle_state_errors(dev,
- m_can_read(cdev, M_CAN_PSR));
+ work_done += m_can_handle_state_errors(dev);
if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus,
@@ -1003,22 +1065,6 @@ end:
return work_done;
}
-static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus)
-{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done;
-
- work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus);
-
- /* Don't re-enable interrupts if the driver had a fatal error
- * (e.g., FIFO read failure).
- */
- if (work_done < 0)
- m_can_disable_all_interrupts(cdev);
-
- return work_done;
-}
-
static int m_can_poll(struct napi_struct *napi, int quota)
{
struct net_device *dev = napi->dev;
@@ -1183,25 +1229,39 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
HRTIMER_MODE_REL);
}
-static irqreturn_t m_can_isr(int irq, void *dev_id)
+/* This interrupt handler is called either from the interrupt thread or a
+ * hrtimer. This has implications like cancelling a timer won't be possible
+ * blocking.
+ */
+static int m_can_interrupt_handler(struct m_can_classdev *cdev)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct m_can_classdev *cdev = netdev_priv(dev);
- u32 ir;
+ struct net_device *dev = cdev->net;
+ u32 ir = 0, ir_read;
+ int ret;
- if (pm_runtime_suspended(cdev->dev)) {
- m_can_coalescing_disable(cdev);
+ if (pm_runtime_suspended(cdev->dev))
return IRQ_NONE;
+
+ /* The m_can controller signals its interrupt status as a level, but
+ * depending in the integration the CPU may interpret the signal as
+ * edge-triggered (for example with m_can_pci). For these
+ * edge-triggered integrations, we must observe that IR is 0 at least
+ * once to be sure that the next interrupt will generate an edge.
+ */
+ while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
+ ir |= ir_read;
+
+ /* ACK all irqs */
+ m_can_write(cdev, M_CAN_IR, ir);
+
+ if (!cdev->irq_edge_triggered)
+ break;
}
- ir = m_can_read(cdev, M_CAN_IR);
m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
- /* ACK all irqs */
- m_can_write(cdev, M_CAN_IR, ir);
-
if (cdev->ops->clear_interrupts)
cdev->ops->clear_interrupts(cdev);
@@ -1216,11 +1276,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
} else {
- int pkts;
-
- pkts = m_can_rx_peripheral(dev, ir);
- if (pkts < 0)
- goto out_fail;
+ ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
+ if (ret < 0)
+ return ret;
}
}
@@ -1238,8 +1296,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
} else {
if (ir & (IR_TEFN | IR_TEFW)) {
/* New TX FIFO Element arrived */
- if (m_can_echo_tx_event(dev) != 0)
- goto out_fail;
+ ret = m_can_echo_tx_event(dev);
+ if (ret != 0)
+ return ret;
}
}
@@ -1247,16 +1306,31 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
can_rx_offload_threaded_irq_finish(&cdev->offload);
return IRQ_HANDLED;
+}
-out_fail:
- m_can_disable_all_interrupts(cdev);
- return IRQ_HANDLED;
+static irqreturn_t m_can_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
+
+ ret = m_can_interrupt_handler(cdev);
+ if (ret < 0) {
+ m_can_disable_all_interrupts(cdev);
+ return IRQ_HANDLED;
+ }
+
+ return ret;
}
static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
{
struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+ if (cdev->can.state == CAN_STATE_BUS_OFF ||
+ cdev->can.state == CAN_STATE_STOPPED)
+ return HRTIMER_NORESTART;
+
irq_wake_thread(cdev->net->irq, cdev->net);
return HRTIMER_NORESTART;
@@ -1310,11 +1384,32 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
.brp_inc = 1,
};
+static int m_can_init_ram(struct m_can_classdev *cdev)
+{
+ int end, i, start;
+ int err = 0;
+
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = cdev->mcfg[MRAM_SIDF].off;
+ end = cdev->mcfg[MRAM_TXB].off +
+ cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+
+ for (i = start; i < end; i += 4) {
+ err = m_can_fifo_write_no_off(cdev, i, 0x0);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
const struct can_bittiming *bt = &cdev->can.bittiming;
- const struct can_bittiming *dbt = &cdev->can.data_bittiming;
+ const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -1395,15 +1490,18 @@ static int m_can_chip_config(struct net_device *dev)
err = m_can_init_ram(cdev);
if (err) {
- dev_err(cdev->dev, "Message RAM configuration failed\n");
+ netdev_err(dev, "Message RAM configuration failed\n");
return err;
}
/* Disable unused interrupts */
interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
- IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F |
+ IR_TSW);
- m_can_config_endisable(cdev, true);
+ err = m_can_config_enable(cdev);
+ if (err)
+ return err;
/* RX Buffer/FIFO Element Size 64 bytes data field */
m_can_write(cdev, M_CAN_RXESC,
@@ -1506,6 +1604,7 @@ static int m_can_chip_config(struct net_device *dev)
else
interrupts &= ~(IR_ERR_LEC_31X);
}
+ cdev->active_interrupts = 0;
m_can_interrupt_enable(cdev, interrupts);
/* route all interrupts to INT0 */
@@ -1521,7 +1620,9 @@ static int m_can_chip_config(struct net_device *dev)
FIELD_PREP(TSCC_TCP_MASK, 0xf) |
FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
- m_can_config_endisable(cdev, false);
+ err = m_can_config_disable(cdev);
+ if (err)
+ return err;
if (cdev->ops->init)
cdev->ops->init(cdev);
@@ -1542,7 +1643,7 @@ static int m_can_start(struct net_device *dev)
netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
cdev->tx_max_coalesced_frames);
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = m_can_state_get_by_psr(cdev);
m_can_enable_all_interrupts(cdev);
@@ -1550,7 +1651,11 @@ static int m_can_start(struct net_device *dev)
cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
m_can_read(cdev, M_CAN_TXFQS));
- return 0;
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, 0);
+ if (ret)
+ netdev_err(dev, "failed to enter normal mode\n");
+
+ return ret;
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
@@ -1599,52 +1704,54 @@ static int m_can_check_core_release(struct m_can_classdev *cdev)
}
/* Selectable Non ISO support only in version 3.2.x
- * This function checks if the bit is writable.
+ * Return 1 if the bit is writable, 0 if it is not, or negative on error.
*/
-static bool m_can_niso_supported(struct m_can_classdev *cdev)
+static int m_can_niso_supported(struct m_can_classdev *cdev)
{
- u32 cccr_reg, cccr_poll = 0;
- int niso_timeout = -ETIMEDOUT;
- int i;
+ int ret, niso;
- m_can_config_endisable(cdev, true);
- cccr_reg = m_can_read(cdev, M_CAN_CCCR);
- cccr_reg |= CCCR_NISO;
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
+ ret = m_can_config_enable(cdev);
+ if (ret)
+ return ret;
- for (i = 0; i <= 10; i++) {
- cccr_poll = m_can_read(cdev, M_CAN_CCCR);
- if (cccr_poll == cccr_reg) {
- niso_timeout = 0;
- break;
- }
+ /* First try to set the NISO bit. */
+ niso = m_can_cccr_update_bits(cdev, CCCR_NISO, CCCR_NISO);
- usleep_range(1, 5);
+ /* Then clear the it again. */
+ ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0);
+ if (ret) {
+ netdev_err(cdev->net, "failed to revert the NON-ISO bit in CCCR\n");
+ return ret;
}
- /* Clear NISO */
- cccr_reg &= ~(CCCR_NISO);
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
-
- m_can_config_endisable(cdev, false);
+ ret = m_can_config_disable(cdev);
+ if (ret)
+ return ret;
- /* return false if time out (-ETIMEDOUT), else return true */
- return !niso_timeout;
+ return niso == 0;
}
static int m_can_dev_setup(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- int m_can_version, err;
+ int m_can_version, err, niso;
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
if (!m_can_version) {
- dev_err(cdev->dev, "Unsupported version number: %2d",
- m_can_version);
+ netdev_err(cdev->net, "Unsupported version number: %2d",
+ m_can_version);
return -EINVAL;
}
+ /* Write the INIT bit, in case no hardware reset has happened before
+ * the probe (for example, it was observed that the Intel Elkhart Lake
+ * SoCs do not properly reset the CAN controllers on reboot)
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (err)
+ return err;
+
if (!cdev->is_peripheral)
netif_napi_add(dev, &cdev->napi, m_can_poll);
@@ -1668,7 +1775,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
@@ -1676,42 +1783,52 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
- cdev->can.ctrlmode_supported |=
- (m_can_niso_supported(cdev) ?
- CAN_CTRLMODE_FD_NON_ISO : 0);
+ niso = m_can_niso_supported(cdev);
+ if (niso < 0)
+ return niso;
+ if (niso)
+ cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
break;
default:
- dev_err(cdev->dev, "Unsupported version number: %2d",
- cdev->version);
+ netdev_err(cdev->net, "Unsupported version number: %2d",
+ cdev->version);
return -EINVAL;
}
- if (cdev->ops->init)
- cdev->ops->init(cdev);
-
return 0;
}
static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
/* Set init mode to disengage from the network */
- m_can_config_endisable(cdev, true);
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (ret)
+ netdev_err(dev, "failed to enter standby mode: %pe\n",
+ ERR_PTR(ret));
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
+
+ if (cdev->ops->deinit) {
+ ret = cdev->ops->deinit(cdev);
+ if (ret)
+ netdev_err(dev, "failed to deinitialize: %pe\n",
+ ERR_PTR(ret));
+ }
}
static int m_can_close(struct net_device *dev)
@@ -1720,12 +1837,9 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
- if (!cdev->is_peripheral)
- napi_disable(&cdev->napi);
-
m_can_stop(dev);
- m_can_clk_stop(cdev);
- free_irq(dev->irq, dev);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
m_can_clean(dev);
@@ -1733,10 +1847,14 @@ static int m_can_close(struct net_device *dev)
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
+ } else {
+ napi_disable(&cdev->napi);
}
close_candev(dev);
+ reset_control_assert(cdev->rst);
+ m_can_clk_stop(cdev);
phy_power_off(cdev->transceiver);
return 0;
@@ -1859,11 +1977,6 @@ out_fail:
static void m_can_tx_submit(struct m_can_classdev *cdev)
{
- if (cdev->version == 30)
- return;
- if (!cdev->is_peripheral)
- return;
-
m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
cdev->tx_peripheral_submit = 0;
}
@@ -1944,12 +2057,21 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
return ret;
}
-static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
+static enum hrtimer_restart m_can_polling_timer(struct hrtimer *timer)
{
struct m_can_classdev *cdev = container_of(timer, struct
m_can_classdev, hrtimer);
+ int ret;
+
+ if (cdev->can.state == CAN_STATE_BUS_OFF ||
+ cdev->can.state == CAN_STATE_STOPPED)
+ return HRTIMER_NORESTART;
- m_can_isr(0, cdev->net);
+ ret = m_can_interrupt_handler(cdev);
+
+ /* On error or if napi is scheduled to read, stop the timer */
+ if (ret < 0 || napi_is_scheduled(&cdev->napi))
+ return HRTIMER_NORESTART;
hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
@@ -1969,15 +2091,21 @@ static int m_can_open(struct net_device *dev)
if (err)
goto out_phy_power_off;
+ err = reset_control_deassert(cdev->rst);
+ if (err)
+ goto exit_disable_clks;
+
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
- goto exit_disable_clks;
+ goto out_reset_control_assert;
}
if (cdev->is_peripheral)
can_rx_offload_enable(&cdev->offload);
+ else
+ napi_enable(&cdev->napi);
/* register interrupt handler */
if (cdev->is_peripheral) {
@@ -2009,22 +2137,26 @@ static int m_can_open(struct net_device *dev)
/* start the m_can controller */
err = m_can_start(dev);
if (err)
- goto exit_irq_fail;
-
- if (!cdev->is_peripheral)
- napi_enable(&cdev->napi);
+ goto exit_start_fail;
netif_start_queue(dev);
return 0;
+exit_start_fail:
+ if (cdev->is_peripheral || dev->irq)
+ free_irq(dev->irq, dev);
exit_irq_fail:
if (cdev->is_peripheral)
destroy_workqueue(cdev->tx_wq);
out_wq_fail:
if (cdev->is_peripheral)
can_rx_offload_disable(&cdev->offload);
+ else
+ napi_disable(&cdev->napi);
close_candev(dev);
+out_reset_control_assert:
+ reset_control_assert(cdev->rst);
exit_disable_clks:
m_can_clk_stop(cdev);
out_phy_power_off:
@@ -2036,7 +2168,6 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_open = m_can_open,
.ndo_stop = m_can_close,
.ndo_start_xmit = m_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static int m_can_get_coalesce(struct net_device *dev,
@@ -2120,16 +2251,63 @@ static int m_can_set_coalesce(struct net_device *dev,
cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
if (cdev->rx_coalesce_usecs_irq)
- cdev->irq_timer_wait =
- ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC);
+ cdev->irq_timer_wait = us_to_ktime(cdev->rx_coalesce_usecs_irq);
else
- cdev->irq_timer_wait =
- ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC);
+ cdev->irq_timer_wait = us_to_ktime(cdev->tx_coalesce_usecs_irq);
return 0;
}
-static const struct ethtool_ops m_can_ethtool_ops = {
+static void m_can_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ wol->supported = device_can_wakeup(cdev->dev) ? WAKE_PHY : 0;
+ wol->wolopts = device_may_wakeup(cdev->dev) ? WAKE_PHY : 0;
+}
+
+static int m_can_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ bool wol_enable = !!(wol->wolopts & WAKE_PHY);
+ int ret;
+
+ if (wol->wolopts & ~WAKE_PHY)
+ return -EINVAL;
+
+ if (wol_enable == device_may_wakeup(cdev->dev))
+ return 0;
+
+ ret = device_set_wakeup_enable(cdev->dev, wol_enable);
+ if (ret) {
+ netdev_err(cdev->net, "Failed to set wakeup enable %pE\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!IS_ERR_OR_NULL(cdev->pinctrl_state_wakeup)) {
+ if (wol_enable)
+ ret = pinctrl_select_state(cdev->pinctrl, cdev->pinctrl_state_wakeup);
+ else
+ ret = pinctrl_pm_select_default_state(cdev->dev);
+
+ if (ret) {
+ netdev_err(cdev->net, "Failed to select pinctrl state %pE\n",
+ ERR_PTR(ret));
+ goto err_wakeup_enable;
+ }
+ }
+
+ return 0;
+
+err_wakeup_enable:
+ /* Revert wakeup enable */
+ device_set_wakeup_enable(cdev->dev, !wol_enable);
+
+ return ret;
+}
+
+static const struct ethtool_ops m_can_ethtool_ops_coalescing = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_TX_USECS_IRQ |
@@ -2138,20 +2316,26 @@ static const struct ethtool_ops m_can_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = m_can_get_coalesce,
.set_coalesce = m_can_set_coalesce,
+ .get_wol = m_can_get_wol,
+ .set_wol = m_can_set_wol,
};
-static const struct ethtool_ops m_can_ethtool_ops_polling = {
+static const struct ethtool_ops m_can_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = m_can_get_wol,
+ .set_wol = m_can_set_wol,
};
-static int register_m_can_dev(struct net_device *dev)
+static int register_m_can_dev(struct m_can_classdev *cdev)
{
+ struct net_device *dev = cdev->net;
+
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
- if (dev->irq)
- dev->ethtool_ops = &m_can_ethtool_ops;
+ if (dev->irq && cdev->is_peripheral)
+ dev->ethtool_ops = &m_can_ethtool_ops_coalescing;
else
- dev->ethtool_ops = &m_can_ethtool_ops_polling;
+ dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
@@ -2163,8 +2347,8 @@ int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size)
total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off +
cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
if (total_size > mram_max_size) {
- dev_err(cdev->dev, "Total size of mram config(%u) exceeds mram(%u)\n",
- total_size, mram_max_size);
+ netdev_err(cdev->net, "Total size of mram config(%u) exceeds mram(%u)\n",
+ total_size, mram_max_size);
return -EINVAL;
}
@@ -2199,39 +2383,17 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
FIELD_MAX(TXBC_NDTB_MASK);
- dev_dbg(cdev->dev,
- "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
- cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
- cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
- cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
- cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
- cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
- cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
- cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
+ netdev_dbg(cdev->net,
+ "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
+ cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
+ cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
+ cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
+ cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
+ cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
+ cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
+ cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
}
-int m_can_init_ram(struct m_can_classdev *cdev)
-{
- int end, i, start;
- int err = 0;
-
- /* initialize the entire Message RAM in use to avoid possible
- * ECC/parity checksum errors when reading an uninitialized buffer
- */
- start = cdev->mcfg[MRAM_SIDF].off;
- end = cdev->mcfg[MRAM_TXB].off +
- cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
-
- for (i = start; i < end; i += 4) {
- err = m_can_fifo_write_no_off(cdev, i, 0x0);
- if (err)
- break;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(m_can_init_ram);
-
int m_can_class_get_clocks(struct m_can_classdev *cdev)
{
int ret = 0;
@@ -2240,7 +2402,7 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
cdev->cclk = devm_clk_get(cdev->dev, "cclk");
if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) {
- dev_err(cdev->dev, "no clock found\n");
+ netdev_err(cdev->net, "no clock found\n");
ret = -ENODEV;
}
@@ -2248,6 +2410,42 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
}
EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
+static bool m_can_class_wakeup_pinctrl_enabled(struct m_can_classdev *class_dev)
+{
+ return device_may_wakeup(class_dev->dev) && class_dev->pinctrl_state_wakeup;
+}
+
+static int m_can_class_parse_pinctrl(struct m_can_classdev *class_dev)
+{
+ struct device *dev = class_dev->dev;
+ int ret;
+
+ class_dev->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(class_dev->pinctrl)) {
+ ret = PTR_ERR(class_dev->pinctrl);
+ class_dev->pinctrl = NULL;
+
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret, "Failed to get pinctrl\n");
+ }
+
+ class_dev->pinctrl_state_wakeup =
+ pinctrl_lookup_state(class_dev->pinctrl, "wakeup");
+ if (IS_ERR(class_dev->pinctrl_state_wakeup)) {
+ ret = PTR_ERR(class_dev->pinctrl_state_wakeup);
+ class_dev->pinctrl_state_wakeup = NULL;
+
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret, "Failed to lookup pinctrl wakeup state\n");
+ }
+
+ return 0;
+}
+
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
int sizeof_priv)
{
@@ -2263,9 +2461,12 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
sizeof(mram_config_vals) / 4);
if (ret) {
dev_err(dev, "Could not get Message RAM configuration.");
- goto out;
+ return ERR_PTR(ret);
}
+ if (dev->of_node && of_property_read_bool(dev->of_node, "wakeup-source"))
+ device_set_wakeup_capable(dev, true);
+
/* Get TX FIFO size
* Defines the total amount of echo buffers for loopback
*/
@@ -2275,7 +2476,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
net_dev = alloc_candev(sizeof_priv, tx_fifo_size);
if (!net_dev) {
dev_err(dev, "Failed to allocate CAN device");
- goto out;
+ return ERR_PTR(-ENOMEM);
}
class_dev = netdev_priv(net_dev);
@@ -2284,8 +2485,17 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
SET_NETDEV_DEV(net_dev, dev);
m_can_of_parse_mram(class_dev, mram_config_vals);
-out:
+ spin_lock_init(&class_dev->tx_handling_spinlock);
+
+ ret = m_can_class_parse_pinctrl(class_dev);
+ if (ret)
+ goto err_free_candev;
+
return class_dev;
+
+err_free_candev:
+ free_candev(net_dev);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
@@ -2306,52 +2516,60 @@ int m_can_class_register(struct m_can_classdev *cdev)
devm_kzalloc(cdev->dev,
cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
GFP_KERNEL);
- if (!cdev->tx_ops) {
- dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n");
+ if (!cdev->tx_ops)
return -ENOMEM;
- }
}
+ cdev->rst = devm_reset_control_get_optional_shared(cdev->dev, NULL);
+ if (IS_ERR(cdev->rst))
+ return dev_err_probe(cdev->dev, PTR_ERR(cdev->rst),
+ "Failed to get reset line\n");
+
ret = m_can_clk_start(cdev);
if (ret)
return ret;
+ ret = reset_control_deassert(cdev->rst);
+ if (ret)
+ goto clk_disable;
+
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
NAPI_POLL_WEIGHT);
if (ret)
- goto clk_disable;
+ goto out_reset_control_assert;
}
if (!cdev->net->irq) {
- dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- cdev->hrtimer.function = &hrtimer_callback;
+ netdev_dbg(cdev->net, "Polling enabled, initialize hrtimer");
+ hrtimer_setup(&cdev->hrtimer, m_can_polling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
} else {
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdev->hrtimer.function = m_can_coalescing_timer;
+ hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = m_can_dev_setup(cdev);
if (ret)
goto rx_offload_del;
- ret = register_m_can_dev(cdev->net);
+ ret = register_m_can_dev(cdev);
if (ret) {
- dev_err(cdev->dev, "registering %s failed (err=%d)\n",
- cdev->net->name, ret);
+ netdev_err(cdev->net, "registering %s failed (err=%d)\n",
+ cdev->net->name, ret);
goto rx_offload_del;
}
of_can_transceiver(cdev->net);
- dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
- KBUILD_MODNAME, cdev->net->irq, cdev->version);
+ netdev_info(cdev->net, "device registered (irq=%d, version=%d)\n",
+ cdev->net->irq, cdev->version);
/* Probe finished
- * Stop clocks. They will be reactivated once the M_CAN device is opened
+ * Assert reset and stop clocks.
+ * They will be reactivated once the M_CAN device is opened
*/
+ reset_control_assert(cdev->rst);
m_can_clk_stop(cdev);
return 0;
@@ -2359,6 +2577,8 @@ int m_can_class_register(struct m_can_classdev *cdev)
rx_offload_del:
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
+out_reset_control_assert:
+ reset_control_assert(cdev->rst);
clk_disable:
m_can_clk_stop(cdev);
@@ -2368,9 +2588,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
+ unregister_candev(cdev->net);
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
- unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
@@ -2378,27 +2598,34 @@ int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
/* leave the chip running with rx interrupt enabled if it is
- * used as a wake-up source.
+ * used as a wake-up source. Coalescing needs to be reset then,
+ * the timer is cancelled here, interrupts are done in resume.
*/
- if (cdev->pm_wake_source)
+ if (cdev->pm_wake_source) {
+ hrtimer_cancel(&cdev->hrtimer);
m_can_write(cdev, M_CAN_IE, IR_RF0N);
- else
+
+ if (cdev->ops->deinit)
+ ret = cdev->ops->deinit(cdev);
+ } else {
m_can_stop(ndev);
+ }
m_can_clk_stop(cdev);
+ cdev->can.state = CAN_STATE_SLEEPING;
}
- pinctrl_pm_select_sleep_state(dev);
+ if (!m_can_class_wakeup_pinctrl_enabled(cdev))
+ pinctrl_pm_select_sleep_state(dev);
- cdev->can.state = CAN_STATE_SLEEPING;
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2406,19 +2633,30 @@ int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
- pinctrl_pm_select_default_state(dev);
-
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ if (!m_can_class_wakeup_pinctrl_enabled(cdev))
+ pinctrl_pm_select_default_state(dev);
if (netif_running(ndev)) {
- int ret;
-
ret = m_can_clk_start(cdev);
if (ret)
return ret;
if (cdev->pm_wake_source) {
+ /* Restore active interrupts but disable coalescing as
+ * we may have missed important waterlevel interrupts
+ * between suspend and resume. Timers are already
+ * stopped in suspend. Here we enable all interrupts
+ * again.
+ */
+ cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+
+ if (cdev->ops->init)
+ ret = cdev->ops->init(cdev);
+
+ cdev->can.state = m_can_state_get_by_psr(cdev);
+
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
} else {
ret = m_can_start(ndev);
@@ -2432,11 +2670,11 @@ int m_can_class_resume(struct device *dev)
netif_start_queue(ndev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 3a9edc292593..4743342b2fba 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -68,6 +68,7 @@ struct m_can_ops {
int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
const void *val, size_t val_count);
int (*init)(struct m_can_classdev *cdev);
+ int (*deinit)(struct m_can_classdev *cdev);
};
struct m_can_tx_op {
@@ -85,13 +86,14 @@ struct m_can_classdev {
struct device *dev;
struct clk *hclk;
struct clk *cclk;
+ struct reset_control *rst;
struct workqueue_struct *tx_wq;
struct phy *transceiver;
ktime_t irq_timer_wait;
- struct m_can_ops *ops;
+ const struct m_can_ops *ops;
int version;
u32 irqstatus;
@@ -99,6 +101,7 @@ struct m_can_classdev {
int pm_clock_support;
int pm_wake_source;
int is_peripheral;
+ bool irq_edge_triggered;
// Cached M_CAN_IE register content
u32 active_interrupts;
@@ -126,6 +129,9 @@ struct m_can_classdev {
struct mram_cfg mcfg[MRAM_CFG_NUM];
struct hrtimer hrtimer;
+
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_state_wakeup;
};
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv);
@@ -133,7 +139,6 @@ void m_can_class_free_dev(struct net_device *net);
int m_can_class_register(struct m_can_classdev *cdev);
void m_can_class_unregister(struct m_can_classdev *cdev);
int m_can_class_get_clocks(struct m_can_classdev *cdev);
-int m_can_init_ram(struct m_can_classdev *priv);
int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size);
int m_can_class_suspend(struct device *dev);
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index 45400de4163d..eb31ed1f9644 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -77,7 +77,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
return 0;
}
-static struct m_can_ops m_can_pci_ops = {
+static const struct m_can_ops m_can_pci_ops = {
.read_reg = iomap_read_reg,
.write_reg = iomap_write_reg,
.write_fifo = iomap_write_fifo,
@@ -111,8 +111,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class = m_can_class_allocate_dev(&pci->dev,
sizeof(struct m_can_pci_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
priv = cdev_to_priv(mcan_class);
@@ -127,6 +127,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->pm_clock_support = 1;
mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
+ mcan_class->irq_edge_triggered = true;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index df0367124b4c..56da411878af 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// IOMapped CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
//
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
@@ -68,7 +68,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
return 0;
}
-static struct m_can_ops m_can_plat_ops = {
+static const struct m_can_ops m_can_plat_ops = {
.read_reg = iomap_read_reg,
.write_reg = iomap_write_reg,
.write_fifo = iomap_write_fifo,
@@ -87,8 +87,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class = m_can_class_allocate_dev(&pdev->dev,
sizeof(struct m_can_plat_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
priv = cdev_to_priv(mcan_class);
@@ -180,7 +180,7 @@ static void m_can_plat_remove(struct platform_device *pdev)
struct m_can_classdev *mcan_class = &priv->cdev;
m_can_class_unregister(mcan_class);
-
+ pm_runtime_disable(mcan_class->dev);
m_can_class_free_dev(mcan_class->net);
}
@@ -231,12 +231,12 @@ static struct platform_driver m_can_plat_driver = {
.pm = &m_can_pmops,
},
.probe = m_can_plat_probe,
- .remove_new = m_can_plat_remove,
+ .remove = m_can_plat_remove,
};
module_platform_driver(m_can_plat_driver);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers");
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index a42600dac70d..31cc9d0abd45 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -92,6 +92,8 @@
#define TCAN4X5X_MODE_STANDBY BIT(6)
#define TCAN4X5X_MODE_NORMAL BIT(7)
+#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19)
+
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
@@ -267,9 +269,24 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret)
return ret;
+ if (tcan4x5x->nwkrq_voltage_vio) {
+ ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_NWKRQ_VOLTAGE_VIO);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
+static int tcan4x5x_deinit(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_STANDBY);
+};
+
static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
@@ -318,21 +335,27 @@ static const struct tcan4x5x_version_info
return &tcan4x5x_versions[TCAN4X5X];
}
-static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
- const struct tcan4x5x_version_info *version_info)
+static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ tcan4x5x->nwkrq_voltage_vio =
+ of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio");
+}
+
+static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
int ret;
- if (version_info->has_wake_pin) {
- tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
- GPIOD_OUT_HIGH);
- if (IS_ERR(tcan4x5x->device_wake_gpio)) {
- if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ tcan4x5x->device_wake_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-wake",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tcan4x5x->device_wake_gpio)) {
+ if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
- tcan4x5x_disable_wake(cdev);
- }
+ tcan4x5x->device_wake_gpio = NULL;
}
tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
@@ -344,21 +367,39 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
if (ret)
return ret;
- if (version_info->has_state_pin) {
- tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
- "device-state",
- GPIOD_IN);
- if (IS_ERR(tcan4x5x->device_state_gpio)) {
- tcan4x5x->device_state_gpio = NULL;
- tcan4x5x_disable_state(cdev);
- }
+ tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-state",
+ GPIOD_IN);
+ if (IS_ERR(tcan4x5x->device_state_gpio))
+ tcan4x5x->device_state_gpio = NULL;
+
+ return 0;
+}
+
+static int tcan4x5x_check_gpios(struct m_can_classdev *cdev,
+ const struct tcan4x5x_version_info *version_info)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+ int ret;
+
+ if (version_info->has_wake_pin && !tcan4x5x->device_wake_gpio) {
+ ret = tcan4x5x_disable_wake(cdev);
+ if (ret)
+ return ret;
+ }
+
+ if (version_info->has_state_pin && !tcan4x5x->device_state_gpio) {
+ ret = tcan4x5x_disable_state(cdev);
+ if (ret)
+ return ret;
}
return 0;
}
-static struct m_can_ops tcan4x5x_ops = {
+static const struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
+ .deinit = tcan4x5x_deinit,
.read_reg = tcan4x5x_read_reg,
.write_reg = tcan4x5x_write_reg,
.write_fifo = tcan4x5x_write_fifo,
@@ -375,8 +416,8 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class = m_can_class_allocate_dev(&spi->dev,
sizeof(struct tcan4x5x_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
ret = m_can_check_mram_cfg(mcan_class, TCAN4X5X_MRAM_SIZE);
if (ret)
@@ -385,14 +426,15 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv = cdev_to_priv(mcan_class);
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
- if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto out_m_can_class_free_dev;
- } else {
+ if (IS_ERR(priv->power)) {
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_m_can_class_free_dev;
+ }
priv->power = NULL;
}
- m_can_class_get_clocks(mcan_class);
+ mcan_class->cclk = devm_clk_get(mcan_class->dev, "cclk");
if (IS_ERR(mcan_class->cclk)) {
dev_err(&spi->dev, "no CAN clock source defined\n");
freq = TCAN4X5X_EXT_CLK_DEF;
@@ -441,22 +483,37 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_m_can_class_free_dev;
}
+ ret = tcan4x5x_get_gpios(mcan_class);
+ if (ret) {
+ dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
+ goto out_power;
+ }
+
version_info = tcan4x5x_find_version(priv);
if (IS_ERR(version_info)) {
ret = PTR_ERR(version_info);
goto out_power;
}
- ret = tcan4x5x_get_gpios(mcan_class, version_info);
+ ret = tcan4x5x_check_gpios(mcan_class, version_info);
if (ret) {
- dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
+ dev_err(&spi->dev, "Checking gpios failed %pe\n", ERR_PTR(ret));
+ goto out_power;
+ }
+
+ tcan4x5x_get_dt_data(mcan_class);
+
+ tcan4x5x_check_wake(priv);
+
+ ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
+ if (ret) {
+ dev_err(&spi->dev, "Disabling interrupts failed %pe\n", ERR_PTR(ret));
goto out_power;
}
- ret = tcan4x5x_init(mcan_class);
+ ret = tcan4x5x_clear_interrupts(mcan_class);
if (ret) {
- dev_err(&spi->dev, "tcan initialization failed %pe\n",
- ERR_PTR(ret));
+ dev_err(&spi->dev, "Clearing interrupts failed %pe\n", ERR_PTR(ret));
goto out_power;
}
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
index e62c030d3e1e..203399d5e8cc 100644
--- a/drivers/net/can/m_can/tcan4x5x.h
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -42,6 +42,8 @@ struct tcan4x5x_priv {
struct tcan4x5x_map_buf map_buf_rx;
struct tcan4x5x_map_buf map_buf_tx;
+
+ bool nwkrq_voltage_vio;
};
static inline void
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5b3d69c3b6b6..0080c39ee182 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -435,7 +435,7 @@ static struct platform_driver mpc5xxx_can_driver = {
.of_match_table = mpc5xxx_can_table,
},
.probe = mpc5xxx_can_probe,
- .remove_new = mpc5xxx_can_remove,
+ .remove = mpc5xxx_can_remove,
#ifdef CONFIG_PM
.suspend = mpc5xxx_can_suspend,
.resume = mpc5xxx_can_resume,
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index a6829cdc0e81..39c7aa2a0b2f 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -34,12 +34,6 @@ static const struct can_bittiming_const mscan_bittiming_const = {
.brp_inc = 1,
};
-struct mscan_state {
- u8 mode;
- u8 canrier;
- u8 cantier;
-};
-
static enum can_state state_map[] = {
CAN_STATE_ERROR_ACTIVE,
CAN_STATE_ERROR_WARNING,
@@ -613,7 +607,6 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_open = mscan_open,
.ndo_stop = mscan_close,
.ndo_start_xmit = mscan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops mscan_ethtool_ops = {
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 31c9c127e24b..06cb2629f66a 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
- * Copyright (C) 2016 PEAK System-Technik GmbH
+ * Copyright (C) 2016-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/can.h>
@@ -624,7 +624,7 @@ static int peak_canfd_set_data_bittiming(struct net_device *ndev)
{
struct peak_canfd_priv *priv = netdev_priv(ndev);
- return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
+ return pucan_set_timing_fast(priv, &priv->can.fd.data_bittiming);
}
static int peak_canfd_close(struct net_device *ndev)
@@ -743,49 +743,42 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int peak_eth_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config hwts_cfg = { 0 };
+ config->tx_type = HWTSTAMP_TX_OFF;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
- switch (cmd) {
- case SIOCSHWTSTAMP: /* set */
- if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
- return -EFAULT;
- if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
- hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
- return 0;
- return -ERANGE;
+ return 0;
+}
- case SIOCGHWTSTAMP: /* get */
- hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
- if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
- return -EFAULT;
+static int peak_eth_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_ALL)
return 0;
- default:
- return -EOPNOTSUPP;
- }
+ NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported");
+ return -ERANGE;
}
static const struct net_device_ops peak_canfd_netdev_ops = {
.ndo_open = peak_canfd_open,
.ndo_stop = peak_canfd_close,
- .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = peak_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = peak_eth_hwtstamp_set,
};
static int peak_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
@@ -816,12 +809,12 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
/* complete now socket-can initialization side */
priv->can.state = CAN_STATE_STOPPED;
priv->can.bittiming_const = &peak_canfd_nominal_const;
- priv->can.data_bittiming_const = &peak_canfd_data_const;
+ priv->can.fd.data_bittiming_const = &peak_canfd_data_const;
priv->can.do_set_mode = peak_canfd_set_mode;
priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
priv->can.do_set_bittiming = peak_canfd_set_bittiming;
- priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = peak_canfd_set_data_bittiming;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES |
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
index a72719dc3b74..60c6542028cf 100644
--- a/drivers/net/can/peak_canfd/peak_canfd_user.h
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PEAK_CANFD_USER_H
#define PEAK_CANFD_USER_H
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 1df3c4b54f03..93558e33bc02 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -19,7 +19,7 @@
#include "peak_canfd_user.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index f5aa5dbacaf2..fc3df328e877 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -5,6 +5,8 @@
* Copyright (C) 2013 Renesas Solutions Corp.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -16,6 +18,7 @@
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#define RCAR_CAN_DRV_NAME "rcar_can"
@@ -92,7 +95,6 @@ struct rcar_can_priv {
struct net_device *ndev;
struct napi_struct napi;
struct rcar_can_regs __iomem *regs;
- struct clk *clk;
struct clk *can_clk;
u32 tx_head;
u32 tx_tail;
@@ -113,100 +115,102 @@ static const struct can_bittiming_const rcar_can_bittiming_const = {
};
/* Control Register bits */
-#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
-#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
- /* at bus-off entry */
-#define RCAR_CAN_CTLR_SLPM (1 << 10)
-#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
-#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
-#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
-#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
-#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
-#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
-#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
-#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+#define RCAR_CAN_CTLR_BOM GENMASK(12, 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT 1 /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM BIT(10) /* Sleep Mode */
+#define RCAR_CAN_CTLR_CANM GENMASK(9, 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_OPER 0 /* Operation Mode */
+#define RCAR_CAN_CTLR_CANM_RESET 1 /* Reset Mode */
+#define RCAR_CAN_CTLR_CANM_HALT 2 /* Halt Mode */
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET 3 /* Reset Mode (forcible) */
+#define RCAR_CAN_CTLR_MLM BIT(3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM GENMASK(2, 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_STD 0 /* Standard ID mode */
+#define RCAR_CAN_CTLR_IDFM_EXT 1 /* Extended ID mode */
+#define RCAR_CAN_CTLR_IDFM_MIXED 2 /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM BIT(0) /* Mailbox Mode select */
/* Status Register bits */
-#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+#define RCAR_CAN_STR_RSTST BIT(8) /* Reset Status Bit */
/* FIFO Received ID Compare Registers 0 and 1 bits */
-#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
-#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+#define RCAR_CAN_FIDCR_IDE BIT(31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR BIT(30) /* Remote Transmission Request Bit */
/* Receive FIFO Control Register bits */
-#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
-#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+#define RCAR_CAN_RFCR_RFEST BIT(7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE BIT(0) /* Receive FIFO Enable */
/* Transmit FIFO Control Register bits */
-#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
- /* Number Status Bits */
-#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
- /* Message Number Status Bits */
-#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
-
-#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
- /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_TFCR_TFUST GENMASK(3, 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFE BIT(0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
#define RCAR_CAN_N_RX_MKREGS2 8
/* Bit Configuration Register settings */
-#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
-#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
-#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
-#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+#define RCAR_CAN_BCR_TSEG1 GENMASK(23, 20)
+#define RCAR_CAN_BCR_BRP GENMASK(17, 8)
+#define RCAR_CAN_BCR_SJW GENMASK(5, 4)
+#define RCAR_CAN_BCR_TSEG2 GENMASK(2, 0)
/* Mailbox and Mask Registers bits */
-#define RCAR_CAN_IDE (1 << 31)
-#define RCAR_CAN_RTR (1 << 30)
-#define RCAR_CAN_SID_SHIFT 18
+#define RCAR_CAN_IDE BIT(31) /* ID Extension */
+#define RCAR_CAN_RTR BIT(30) /* Remote Transmission Request */
+#define RCAR_CAN_SID GENMASK(28, 18) /* Standard ID */
+#define RCAR_CAN_EID GENMASK(28, 0) /* Extended ID */
/* Mailbox Interrupt Enable Register 1 bits */
-#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
-#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_RXFIE BIT(28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE BIT(24) /* Transmit FIFO Interrupt Enable */
/* Interrupt Enable Register bits */
-#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
-#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
- /* Enable Bit */
-#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
- /* Enable Bit */
+#define RCAR_CAN_IER_ERSIE BIT(5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE BIT(4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE BIT(3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
/* Interrupt Status Register bits */
-#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
-#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
- /* Status Bit */
-#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
- /* Status Bit */
+#define RCAR_CAN_ISR_ERSF BIT(5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF BIT(4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF BIT(3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
/* Error Interrupt Enable Register bits */
-#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
-#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
- /* Interrupt Enable */
-#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
-#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
-#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
-#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
-#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
-#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+#define RCAR_CAN_EIER_BLIE BIT(7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE BIT(6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE BIT(5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE BIT(4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE BIT(3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE BIT(2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE BIT(1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE BIT(0) /* Bus Error Interrupt Enable */
/* Error Interrupt Factor Judge Register bits */
-#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
-#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
- /* Detect Flag */
-#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
-#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
-#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
-#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
-#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
-#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+#define RCAR_CAN_EIFR_BLIF BIT(7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF BIT(6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF BIT(5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF BIT(4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF BIT(3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF BIT(2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF BIT(1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF BIT(0) /* Bus Error Detect Flag */
/* Error Code Store Register bits */
-#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
-#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
-#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
-#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
-#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
-#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
-#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
-#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+#define RCAR_CAN_ECSR_EDPM BIT(7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF BIT(6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F BIT(5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F BIT(4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF BIT(3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF BIT(2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF BIT(1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF BIT(0) /* Stuff Error Flag */
#define RCAR_CAN_NAPI_WEIGHT 4
#define MAX_STR_READS 0x100
@@ -248,35 +252,35 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_ADEF) {
netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
}
if (ecsr & RCAR_CAN_ECSR_BE0F) {
netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT0;
}
if (ecsr & RCAR_CAN_ECSR_BE1F) {
netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT1;
}
if (ecsr & RCAR_CAN_ECSR_CEF) {
netdev_dbg(priv->ndev, "CRC Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
if (ecsr & RCAR_CAN_ECSR_AEF) {
netdev_dbg(priv->ndev, "ACK Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
if (skb) {
cf->can_id |= CAN_ERR_ACK;
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
@@ -285,14 +289,14 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_FEF) {
netdev_dbg(priv->ndev, "Form Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_FORM;
}
if (ecsr & RCAR_CAN_ECSR_SEF) {
netdev_dbg(priv->ndev, "Stuff Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_STUFF;
}
@@ -300,14 +304,14 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.can_stats.bus_error++;
ndev->stats.rx_errors += rx_errors;
ndev->stats.tx_errors += tx_errors;
- writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
}
if (eifr & RCAR_CAN_EIFR_EWIF) {
netdev_dbg(priv->ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -317,7 +321,7 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -329,7 +333,7 @@ static void rcar_can_error(struct net_device *ndev)
writeb(priv->ier, &priv->regs->ier);
priv->can.state = CAN_STATE_BUS_OFF;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
if (skb)
@@ -343,7 +347,7 @@ static void rcar_can_error(struct net_device *ndev)
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
@@ -354,7 +358,7 @@ static void rcar_can_error(struct net_device *ndev)
"Overload Frame Transmission error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_PROT;
cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
@@ -372,10 +376,9 @@ static void rcar_can_tx_done(struct net_device *ndev)
u8 isr;
while (1) {
- u8 unsent = readb(&priv->regs->tfcr);
+ u8 unsent = FIELD_GET(RCAR_CAN_TFCR_TFUST,
+ readb(&priv->regs->tfcr));
- unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
- RCAR_CAN_TFCR_TFUST_SHIFT;
if (priv->tx_head - priv->tx_tail <= unsent)
break;
stats->tx_packets++;
@@ -420,15 +423,16 @@ static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_can_set_bittiming(struct net_device *dev)
+static void rcar_can_set_bittiming(struct net_device *ndev)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
u32 bcr;
- bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
- RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
- RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ bcr = FIELD_PREP(RCAR_CAN_BCR_TSEG1, bt->phase_seg1 + bt->prop_seg - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_BRP, bt->brp - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_SJW, bt->sjw - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_TSEG2, bt->phase_seg2 - 1);
/* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
* All the registers are big-endian but they get byte-swapped on 32-bit
* read/write (but not on 8-bit, contrary to the manuals)...
@@ -452,16 +456,17 @@ static void rcar_can_start(struct net_device *ndev)
ctlr &= ~RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
/* Go to reset mode */
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
break;
}
rcar_can_set_bittiming(ndev);
- ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
- ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
- /* at bus-off */
+ /* Select mixed ID mode */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_IDFM, RCAR_CAN_CTLR_IDFM_MIXED);
+ /* Entry to halt mode automatically at bus-off */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_BOM, RCAR_CAN_CTLR_BOM_ENT);
ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
writew(ctlr, &priv->regs->ctlr);
@@ -491,7 +496,9 @@ static void rcar_can_start(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Go to operation mode */
- writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_OPER);
+ writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
break;
@@ -506,29 +513,28 @@ static int rcar_can_open(struct net_device *ndev)
struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err) {
- netdev_err(ndev,
- "failed to enable peripheral clock, error %d\n",
- err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
goto out;
}
err = clk_prepare_enable(priv->can_clk);
if (err) {
- netdev_err(ndev, "failed to enable CAN clock, error %d\n",
- err);
- goto out_clock;
+ netdev_err(ndev, "failed to enable CAN clock: %pe\n",
+ ERR_PTR(err));
+ goto out_rpm;
}
err = open_candev(ndev);
if (err) {
- netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ netdev_err(ndev, "open_candev() failed %pe\n", ERR_PTR(err));
goto out_can_clock;
}
napi_enable(&priv->napi);
err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
if (err) {
- netdev_err(ndev, "request_irq(%d) failed, error %d\n",
- ndev->irq, err);
+ netdev_err(ndev, "request_irq(%d) failed %pe\n", ndev->irq,
+ ERR_PTR(err));
goto out_close;
}
rcar_can_start(ndev);
@@ -539,8 +545,8 @@ out_close:
close_candev(ndev);
out_can_clock:
clk_disable_unprepare(priv->can_clk);
-out_clock:
- clk_disable_unprepare(priv->clk);
+out_rpm:
+ pm_runtime_put(ndev->dev.parent);
out:
return err;
}
@@ -553,7 +559,7 @@ static void rcar_can_stop(struct net_device *ndev)
/* Go to (force) reset mode */
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
@@ -578,7 +584,7 @@ static int rcar_can_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
napi_disable(&priv->napi);
clk_disable_unprepare(priv->can_clk);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_put(ndev->dev.parent);
close_candev(ndev);
return 0;
}
@@ -594,9 +600,10 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
- data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ data = FIELD_PREP(RCAR_CAN_EID, cf->can_id & CAN_EFF_MASK) |
+ RCAR_CAN_IDE;
else /* Standard frame format */
- data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+ data = FIELD_PREP(RCAR_CAN_SID, cf->can_id & CAN_SFF_MASK);
if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
data |= RCAR_CAN_RTR;
@@ -628,7 +635,6 @@ static const struct net_device_ops rcar_can_netdev_ops = {
.ndo_open = rcar_can_open,
.ndo_stop = rcar_can_close,
.ndo_start_xmit = rcar_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops rcar_can_ethtool_ops = {
@@ -651,9 +657,9 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
if (data & RCAR_CAN_IDE)
- cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cf->can_id = FIELD_GET(RCAR_CAN_EID, data) | CAN_EFF_FLAG;
else
- cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+ cf->can_id = FIELD_GET(RCAR_CAN_SID, data);
dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
cf->len = can_cc_dlc2len(dlc);
@@ -715,18 +721,21 @@ static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_can_get_berr_counter(const struct net_device *dev,
+static int rcar_can_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err)
return err;
+
bec->txerr = readb(&priv->regs->tecr);
bec->rxerr = readb(&priv->regs->recr);
- clk_disable_unprepare(priv->clk);
+
+ pm_runtime_put(ndev->dev.parent);
+
return 0;
}
@@ -738,6 +747,7 @@ static const char * const clock_names[] = {
static int rcar_can_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rcar_can_priv *priv;
struct net_device *ndev;
void __iomem *addr;
@@ -745,7 +755,7 @@ static int rcar_can_probe(struct platform_device *pdev)
int err = -ENODEV;
int irq;
- of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select",
+ of_property_read_u32(dev->of_node, "renesas,can-clock-select",
&clock_select);
irq = platform_get_irq(pdev, 0);
@@ -762,30 +772,21 @@ static int rcar_can_probe(struct platform_device *pdev)
ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
err = -ENOMEM;
goto fail;
}
priv = netdev_priv(ndev);
- priv->clk = devm_clk_get(&pdev->dev, "clkp1");
- if (IS_ERR(priv->clk)) {
- err = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_clk;
- }
-
if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
err = -EINVAL;
- dev_err(&pdev->dev, "invalid CAN clock selected\n");
+ dev_err(dev, "invalid CAN clock selected\n");
goto fail_clk;
}
- priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
+ priv->can_clk = devm_clk_get(dev, clock_names[clock_select]);
if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "cannot get CAN clock: %pe\n", priv->can_clk);
err = PTR_ERR(priv->can_clk);
- dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
goto fail_clk;
}
@@ -802,21 +803,24 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll,
RCAR_CAN_NAPI_WEIGHT);
+
+ pm_runtime_enable(dev);
+
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev, "register_candev() failed, error %d\n",
- err);
- goto fail_candev;
+ dev_err(dev, "register_candev() failed %pe\n", ERR_PTR(err));
+ goto fail_rpm;
}
- dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
+ dev_info(dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
-fail_candev:
+fail_rpm:
+ pm_runtime_disable(dev);
netif_napi_del(&priv->napi);
fail_clk:
free_candev(ndev);
@@ -830,11 +834,12 @@ static void rcar_can_remove(struct platform_device *pdev)
struct rcar_can_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
netif_napi_del(&priv->napi);
free_candev(ndev);
}
-static int __maybe_unused rcar_can_suspend(struct device *dev)
+static int rcar_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct rcar_can_priv *priv = netdev_priv(ndev);
@@ -847,38 +852,32 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
netif_device_detach(ndev);
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_HALT);
writew(ctlr, &priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ pm_runtime_put(dev);
return 0;
}
-static int __maybe_unused rcar_can_resume(struct device *dev)
+static int rcar_can_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct rcar_can_priv *priv = netdev_priv(ndev);
- u16 ctlr;
int err;
if (!netif_running(ndev))
return 0;
- err = clk_enable(priv->clk);
+ err = pm_runtime_resume_and_get(dev);
if (err) {
- netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
return err;
}
- ctlr = readw(&priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_SLPM;
- writew(ctlr, &priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_CANM;
- writew(ctlr, &priv->regs->ctlr);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ rcar_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
@@ -886,7 +885,8 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend,
+ rcar_can_resume);
static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
{ .compatible = "renesas,can-r8a7778" },
@@ -904,10 +904,10 @@ static struct platform_driver rcar_can_driver = {
.driver = {
.name = RCAR_CAN_DRV_NAME,
.of_match_table = of_match_ptr(rcar_can_of_table),
- .pm = &rcar_can_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_can_pm_ops),
},
.probe = rcar_can_probe,
- .remove_new = rcar_can_remove,
+ .remove = rcar_can_remove,
};
module_platform_driver(rcar_can_driver);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b82842718735..7895e1fdea1c 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,6 +21,7 @@
* wherever it is modified to a readable name.
*/
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/can/dev.h>
@@ -74,33 +75,24 @@
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
-#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
-#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch))
+#define RCANFD_GERFL_EEF GENMASK(23, 16)
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
#define RCANFD_GERFL_THLES BIT(2)
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
#define RCANFD_GERFL_ERR(gpriv, x) \
- ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \
- RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
- RCANFD_GERFL_MES | \
- ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ ((x) & ((FIELD_PREP(RCANFD_GERFL_EEF, (_gpriv)->channels_mask)) | \
+ RCANFD_GERFL_MES | ((_gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))); \
+})
/* AFL Rx rules registers */
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
- (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \
- (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8)))
-
-#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
- (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \
- reg_gen4(gpriv, 0x1ff, 0xff))
-
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f))
+#define RCANFD_GAFLECTR_AFLPN(gpriv, page_num) ((page_num) & (gpriv)->info->max_aflpn)
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -111,22 +103,13 @@
/* Channel register bits */
/* RSCFDnCmCFG - Classical CAN only */
-#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24)
-#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16)
-#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_CFG_SJW GENMASK(25, 24)
+#define RCANFD_CFG_TSEG2 GENMASK(22, 20)
+#define RCANFD_CFG_TSEG1 GENMASK(19, 16)
+#define RCANFD_CFG_BRP GENMASK(9, 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24))
-
-#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16))
-
-#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11))
-
-#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_NCFG_NBRP GENMASK(9, 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
#define RCANFD_CCTR_CTME BIT(24)
@@ -186,22 +169,24 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24)
-
-#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20))
-
-#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16))
-
-#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
+#define RCANFD_DCFG_DBRP GENMASK(7, 0)
/* RSCFDnCFDCmFDCFG */
#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
+#define RCANFD_FDCFG_TDCO GENMASK(23, 16)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
-#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
+
+/* RSCFDnCFDCmFDSTS */
+#define RCANFD_FDSTS_SOC GENMASK(31, 24)
+#define RCANFD_FDSTS_EOC GENMASK(23, 16)
+#define RCANFD_GEN4_FDSTS_TDCVF BIT(15)
+#define RCANFD_GEN4_FDSTS_PNSTS GENMASK(13, 12)
+#define RCANFD_FDSTS_SOCO BIT(9)
+#define RCANFD_FDSTS_EOCO BIT(8)
+#define RCANFD_FDSTS_TDCVF BIT(7)
+#define RCANFD_FDSTS_TDCR GENMASK(7, 0)
/* RSCFDnCFDRFCCx */
#define RCANFD_RFCC_RFIM BIT(12)
@@ -222,8 +207,6 @@
/* RSCFDnCFDRFPTRx */
#define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf)
-#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff)
-#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff)
/* RSCFDnCFDRFFDSTSx */
#define RCANFD_RFFDSTS_RFFDF BIT(2)
@@ -233,11 +216,14 @@
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20))
-#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16))
+#define RCANFD_CFCC_CFTML(gpriv, cftml) \
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ (((cftml) & (_gpriv)->info->max_cftml) << (_gpriv)->info->sh->cftml); \
+})
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << (gpriv)->info->sh->cfm)
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8))
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << (gpriv)->info->sh->cfdc)
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -252,12 +238,9 @@
/* RSCFDnCFDCFIDk */
#define RCANFD_CFID_CFIDE BIT(31)
#define RCANFD_CFID_CFRTR BIT(30)
-#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff)
/* RSCFDnCFDCFPTRk */
#define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28)
-#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16)
-#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCFFDCSTSk */
#define RCANFD_CFFDCSTS_CFFDF BIT(2)
@@ -298,14 +281,14 @@
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
-#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2)))
+#define RCANFD_GAFLCFG(w) (0x009c + (0x04 * (w)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) ((gpriv)->info->regs->rfcc + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
@@ -315,67 +298,14 @@
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
#define RCANFD_CFCC(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfcc + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
#define RCANFD_CFSTS(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfsts + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
#define RCANFD_CFPCTR(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
-
-/* RSCFDnCFDFESTS / RSCFDnFESTS */
-#define RCANFD_FESTS (0x0238)
-/* RSCFDnCFDFFSTS / RSCFDnFFSTS */
-#define RCANFD_FFSTS (0x023c)
-/* RSCFDnCFDFMSTS / RSCFDnFMSTS */
-#define RCANFD_FMSTS (0x0240)
-/* RSCFDnCFDRFISTS / RSCFDnRFISTS */
-#define RCANFD_RFISTS (0x0244)
-/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */
-#define RCANFD_CFRISTS (0x0248)
-/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */
-#define RCANFD_CFTISTS (0x024c)
-
-/* RSCFDnCFDTMCp / RSCFDnTMCp */
-#define RCANFD_TMC(p) (0x0250 + (0x01 * (p)))
-/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */
-#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p)))
-
-/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */
-#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y)))
-/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */
-#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y)))
-/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */
-#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y)))
-/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */
-#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y)))
-/* RSCFDnCFDTMIECy / RSCFDnTMIECy */
-#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y)))
-
-/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */
-#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m)))
-/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */
-#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m)))
-/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */
-#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m)))
-
-/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */
-#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m)))
-/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */
-#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m)))
-/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */
-#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m)))
-
-/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */
-#define RCANFD_GTINTSTS0 (0x0460)
-/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */
-#define RCANFD_GTINTSTS1 (0x0464)
-/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */
-#define RCANFD_GTSTCFG (0x0468)
-/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */
-#define RCANFD_GTSTCTR (0x046c)
-/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */
-#define RCANFD_GLOCKK (0x047c)
+ ((gpriv)->info->regs->cfpctr + (0x0c * (ch)) + (0x04 * (idx)))
+
/* RSCFDnCFDGRMCFG */
#define RCANFD_GRMCFG (0x04fc)
@@ -393,12 +323,6 @@
/* RSCFDnGAFLXXXj offset */
#define RCANFD_C_GAFL_OFFSET (0x0500)
-/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */
-#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q)))
-#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q)))
-#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q)))
-#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
-
/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
#define RCANFD_C_RFOFFSET (0x0e00)
#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
@@ -418,42 +342,26 @@
#define RCANFD_C_CFDF(ch, idx, df) \
(RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
-/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
-#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
-#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p)))
-#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p)))
-#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p)))
-
-/* RSCFDnTHLACCm */
-#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m)))
-/* RSCFDnRPGACCr */
-#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
-
/* R-Car Gen4 Classical and CAN FD mode specific register map */
-#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m)))
-
#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
/* CAN FD mode specific register map */
-/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m)))
-#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
-#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
-#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
-#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m)))
+/* RSCFDnCFDCmXXX -> gpriv->fcbase[m].xxx */
+struct rcar_canfd_f_c {
+ u32 dcfg;
+ u32 cfdcfg;
+ u32 cfdctr;
+ u32 cfdsts;
+ u32 cfdcrc;
+ u32 pad[3];
+};
/* RSCFDnCFDGAFLXXXj offset */
#define RCANFD_F_GAFL_OFFSET (0x1000)
-/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */
-#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q)))
-#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q)))
-#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q)))
-#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
-
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFOFFSET(gpriv) ((gpriv)->info->regs->rfoffset)
#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
@@ -461,7 +369,7 @@
(RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400)
+#define RCANFD_F_CFOFFSET(gpriv) ((gpriv)->info->regs->cfoffset)
#define RCANFD_F_CFID(gpriv, ch, idx) \
(RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
@@ -476,23 +384,11 @@
(RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
(0x04 * (df)))
-/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
-#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
-#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p)))
-#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p)))
-#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b)))
-
-/* RSCFDnCFDTHLACCm */
-#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m)))
-/* RSCFDnCFDRPGACCr */
-#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r)))
-
/* Constants */
#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */
-#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
#define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */
@@ -508,20 +404,46 @@
*/
#define RCANFD_CFFIFO_IDX 0
-/* fCAN clock select register settings */
-enum rcar_canfd_fcanclk {
- RCANFD_CANFDCLK = 0, /* CANFD clock */
- RCANFD_EXTCLK, /* Externally input clock */
+struct rcar_canfd_global;
+
+struct rcar_canfd_regs {
+ u16 rfcc; /* RX FIFO Configuration/Control Register */
+ u16 cfcc; /* Common FIFO Configuration/Control Register */
+ u16 cfsts; /* Common FIFO Status Register */
+ u16 cfpctr; /* Common FIFO Pointer Control Register */
+ u16 coffset; /* Channel Data Bitrate Configuration Register */
+ u16 rfoffset; /* Receive FIFO buffer access ID register */
+ u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */
};
-struct rcar_canfd_global;
+struct rcar_canfd_shift_data {
+ u8 ntseg2; /* Nominal Bit Rate Time Segment 2 Control */
+ u8 ntseg1; /* Nominal Bit Rate Time Segment 1 Control */
+ u8 nsjw; /* Nominal Bit Rate Resynchronization Jump Width Control */
+ u8 dtseg2; /* Data Bit Rate Time Segment 2 Control */
+ u8 dtseg1; /* Data Bit Rate Time Segment 1 Control */
+ u8 cftml; /* Common FIFO TX Message Buffer Link */
+ u8 cfm; /* Common FIFO Mode */
+ u8 cfdc; /* Common FIFO Depth Configuration */
+};
struct rcar_canfd_hw_info {
+ const struct can_bittiming_const *nom_bittiming;
+ const struct can_bittiming_const *data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ const struct rcar_canfd_regs *regs;
+ const struct rcar_canfd_shift_data *sh;
+ u8 rnc_field_width;
+ u8 max_aflpn;
+ u8 max_cftml;
u8 max_channels;
u8 postdiv;
/* hardware features */
unsigned shared_global_irqs:1; /* Has shared global irqs */
unsigned multi_channel_irqs:1; /* Has multiple channel irqs */
+ unsigned ch_interface_mode:1; /* Has channel interface mode */
+ unsigned shared_can_regs:1; /* Has shared classical can registers */
+ unsigned external_clk:1; /* Has external clock */
};
/* Channel priv data */
@@ -542,11 +464,13 @@ struct rcar_canfd_channel {
struct rcar_canfd_global {
struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS];
void __iomem *base; /* Register base address */
+ struct rcar_canfd_f_c __iomem *fcbase;
struct platform_device *pdev; /* Respective platform device */
struct clk *clkp; /* Peripheral clock */
struct clk *can_clk; /* fCAN clock */
- enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */
+ struct clk *clk_ram; /* Clock RAM */
unsigned long channels_mask; /* Enabled channels mask */
+ bool extclk; /* CANFD or Ext clock */
bool fdmode; /* CAN FD or Classical CAN only mode */
struct reset_control *rstc1;
struct reset_control *rstc2;
@@ -554,7 +478,7 @@ struct rcar_canfd_global {
};
/* CAN FD mode nominal rate constants */
-static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_nom_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 128,
@@ -566,8 +490,20 @@ static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_nom_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
/* CAN FD mode data rate constants */
-static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_data_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 16,
@@ -579,6 +515,18 @@ static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_data_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 32,
+ .tseg2_min = 2,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/* Classical CAN mode bitrate constants */
static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.name = RCANFD_DRV_NAME,
@@ -592,36 +540,136 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.brp_inc = 1,
};
+/* CAN FD Transmission Delay Compensation constants */
+static const struct can_tdc_const rcar_canfd_gen3_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 128,
+ .tdco_min = 1,
+ .tdco_max = 128,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+static const struct can_tdc_const rcar_canfd_gen4_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 256,
+ .tdco_min = 1,
+ .tdco_max = 256,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+static const struct rcar_canfd_regs rcar_gen3_regs = {
+ .rfcc = 0x00b8,
+ .cfcc = 0x0118,
+ .cfsts = 0x0178,
+ .cfpctr = 0x01d8,
+ .coffset = 0x0500,
+ .rfoffset = 0x3000,
+ .cfoffset = 0x3400,
+};
+
+static const struct rcar_canfd_regs rcar_gen4_regs = {
+ .rfcc = 0x00c0,
+ .cfcc = 0x0120,
+ .cfsts = 0x01e0,
+ .cfpctr = 0x0240,
+ .coffset = 0x1400,
+ .rfoffset = 0x6000,
+ .cfoffset = 0x6400,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen3_shift_data = {
+ .ntseg2 = 24,
+ .ntseg1 = 16,
+ .nsjw = 11,
+ .dtseg2 = 20,
+ .dtseg1 = 16,
+ .cftml = 20,
+ .cfm = 16,
+ .cfdc = 8,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen4_shift_data = {
+ .ntseg2 = 25,
+ .ntseg1 = 17,
+ .nsjw = 10,
+ .dtseg2 = 16,
+ .dtseg1 = 8,
+ .cftml = 16,
+ .cfm = 8,
+ .cfdc = 21,
+};
+
static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
.max_channels = 2,
.postdiv = 2,
.shared_global_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
};
static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 127,
+ .max_cftml = 31,
.max_channels = 8,
.postdiv = 2,
.shared_global_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 1,
};
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
.max_channels = 2,
.postdiv = 1,
.multi_channel_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
};
-/* Helper functions */
-static inline bool is_gen4(struct rcar_canfd_global *gpriv)
-{
- return gpriv->info == &rcar_gen4_hw_info;
-}
-
-static inline u32 reg_gen4(struct rcar_canfd_global *gpriv,
- u32 gen4, u32 not_gen4)
-{
- return is_gen4(gpriv) ? gen4 : not_gen4;
-}
+static const struct rcar_canfd_hw_info r9a09g047_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 63,
+ .max_cftml = 31,
+ .max_channels = 6,
+ .postdiv = 1,
+ .multi_channel_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 0,
+};
+/* Helper functions */
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
u32 data = readl(reg);
@@ -633,50 +681,65 @@ static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
static inline u32 rcar_canfd_read(void __iomem *base, u32 offset)
{
- return readl(base + (offset));
+ return readl(base + offset);
}
static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val)
{
- writel(val, base + (offset));
+ writel(val, base + offset);
}
static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val)
{
- rcar_canfd_update(val, val, base + (reg));
+ rcar_canfd_update(val, val, base + reg);
}
static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val)
{
- rcar_canfd_update(val, 0, base + (reg));
+ rcar_canfd_update(val, 0, base + reg);
}
static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
u32 mask, u32 val)
{
- rcar_canfd_update(mask, val, base + (reg));
+ rcar_canfd_update(mask, val, base + reg);
+}
+
+static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, val, addr);
+}
+
+static void rcar_canfd_clear_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, 0, addr);
+}
+
+static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val)
+{
+ rcar_canfd_update(mask, val, addr);
}
static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- *((u32 *)cf->data + i) =
- rcar_canfd_read(priv->base, off + (i * sizeof(u32)));
+ data[i] = rcar_canfd_read(priv->base, off + i * sizeof(u32));
}
static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ const u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- rcar_canfd_write(priv->base, off + (i * sizeof(u32)),
- *((u32 *)cf->data + i));
+ rcar_canfd_write(priv->base, off + i * sizeof(u32), data[i]);
}
static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
@@ -687,28 +750,20 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
-static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
+static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch,
+ unsigned int num_rules)
{
- if (is_gen4(gpriv)) {
- u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
- : RCANFD_GEN4_FDCFG_CLOE;
-
- for_each_set_bit(ch, &gpriv->channels_mask,
- gpriv->info->max_channels)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch),
- val);
- } else {
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- else
- rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- }
+ unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width;
+ unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width;
+ unsigned int w = ch / rnc_stride;
+ u32 rnc = num_rules << shift;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc);
}
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
+ struct device *dev = &gpriv->pdev->dev;
u32 sts, ch;
int err;
@@ -718,7 +773,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GRAMINIT), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global raminit failed\n");
+ dev_dbg(dev, "global raminit failed\n");
return err;
}
@@ -731,7 +786,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
(sts & RCANFD_GSTS_GRSTSTS), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global reset failed\n");
+ dev_dbg(dev, "global reset failed\n");
return err;
}
@@ -739,7 +794,14 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
/* Set the controller into appropriate mode */
- rcar_canfd_set_mode(gpriv);
+ if (!gpriv->info->ch_interface_mode) {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ }
/* Transition all Channels to reset mode */
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
@@ -755,11 +817,27 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
(sts & RCANFD_CSTS_CRSTSTS),
2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev,
- "channel %u reset failed\n", ch);
+ dev_dbg(dev, "channel %u reset failed\n", ch);
return err;
}
+
+ /* Set the controller into appropriate mode */
+ if (gpriv->info->ch_interface_mode) {
+ /* Do not set CLOE and FDOE simultaneously */
+ if (!gpriv->fdmode) {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
+ rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ } else {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ }
+ }
}
+
return 0;
}
@@ -777,7 +855,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
cfg |= RCANFD_GCFG_CMPOC;
/* Set External Clock if selected */
- if (gpriv->fcan != RCANFD_CANFDCLK)
+ if (gpriv->extclk)
cfg |= RCANFD_GCFG_DCS;
rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
@@ -793,30 +871,21 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
}
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
- u32 ch)
+ u32 ch, u32 rule_entry)
{
- u32 cfg;
- int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ unsigned int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (ch == 0) {
- start = 0; /* Channel 0 always starts from 0th rule */
- } else {
- /* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
- start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
- }
-
/* Enable write access to entry */
- page = RCANFD_GAFL_PAGENUM(start);
+ page = RCANFD_GAFL_PAGENUM(rule_entry);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
(RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
- RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
- if (is_gen4(gpriv))
+ rcar_canfd_set_rnc(gpriv, ch, num_rules);
+ if (gpriv->info->shared_can_regs)
offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
@@ -824,13 +893,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
offset = RCANFD_C_GAFL_OFFSET;
/* Accept all IDs */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0);
/* IDE or RTR is not considered for matching */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0);
/* Any data length accepted */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index),
RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
@@ -956,7 +1025,7 @@ static void rcar_canfd_global_error(struct net_device *ndev)
u32 ridx = ch + RCANFD_RFFIFO_IDX;
gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if (gerfl & RCANFD_GERFL_EEF(ch)) {
+ if (gerfl & FIELD_PREP(RCANFD_GERFL_EEF, BIT(ch))) {
netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch);
stats->tx_dropped++;
}
@@ -1313,14 +1382,52 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_canfd_set_bittiming(struct net_device *dev)
+static inline u32 rcar_canfd_compute_nominal_bit_rate_cfg(struct rcar_canfd_channel *priv,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
+{
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ const struct rcar_canfd_hw_info *info = gpriv->info;
+ u32 ntseg1, ntseg2, nsjw, nbrp;
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
+ ntseg1 = (tseg1 & (info->nom_bittiming->tseg1_max - 1)) << info->sh->ntseg1;
+ ntseg2 = (tseg2 & (info->nom_bittiming->tseg2_max - 1)) << info->sh->ntseg2;
+ nsjw = (sjw & (info->nom_bittiming->sjw_max - 1)) << info->sh->nsjw;
+ nbrp = FIELD_PREP(RCANFD_NCFG_NBRP, brp);
+ } else {
+ ntseg1 = FIELD_PREP(RCANFD_CFG_TSEG1, tseg1);
+ ntseg2 = FIELD_PREP(RCANFD_CFG_TSEG2, tseg2);
+ nsjw = FIELD_PREP(RCANFD_CFG_SJW, sjw);
+ nbrp = FIELD_PREP(RCANFD_CFG_BRP, brp);
+ }
+
+ return (ntseg1 | ntseg2 | nsjw | nbrp);
+}
+
+static inline u32 rcar_canfd_compute_data_bit_rate_cfg(const struct rcar_canfd_hw_info *info,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ u32 dtseg1, dtseg2, dsjw, dbrp;
+
+ dtseg1 = (tseg1 & (info->data_bittiming->tseg1_max - 1)) << info->sh->dtseg1;
+ dtseg2 = (tseg2 & (info->data_bittiming->tseg2_max - 1)) << info->sh->dtseg2;
+ dsjw = (sjw & (info->data_bittiming->sjw_max - 1)) << 24;
+ dbrp = FIELD_PREP(RCANFD_DCFG_DBRP, brp);
+
+ return (dtseg1 | dtseg2 | dsjw | dbrp);
+}
+
+static void rcar_canfd_set_bittiming(struct net_device *ndev)
+{
+ u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_tdc_const *tdc_const = priv->can.fd.tdc_const;
+ const struct can_tdc *tdc = &priv->can.fd.tdc;
+ u32 cfg, tdcmode = 0, tdco = 0;
u16 brp, sjw, tseg1, tseg2;
- u32 cfg;
u32 ch = priv->channel;
/* Nominal bit timing settings */
@@ -1328,47 +1435,33 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
+ cfg = rcar_canfd_compute_nominal_bit_rate_cfg(priv, tseg1, tseg2, sjw, brp);
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- /* CAN FD only mode */
- cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
-
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
-
- /* Data bit timing settings */
- brp = dbt->brp - 1;
- sjw = dbt->sjw - 1;
- tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
- tseg2 = dbt->phase_seg2 - 1;
-
- cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
-
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg);
- netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
- } else {
- /* Classical CAN only mode */
- if (is_gen4(gpriv)) {
- cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
- RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(gpriv, sjw) |
- RCANFD_NCFG_NTSEG2(gpriv, tseg2));
- } else {
- cfg = (RCANFD_CFG_TSEG1(tseg1) |
- RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) |
- RCANFD_CFG_TSEG2(tseg2));
- }
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return;
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev,
- "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
+ /* Data bit timing settings */
+ brp = dbt->brp - 1;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 1;
+ cfg = rcar_canfd_compute_data_bit_rate_cfg(gpriv->info, tseg1, tseg2, sjw, brp);
+ writel(cfg, &gpriv->fcbase[ch].dcfg);
+
+ /* Transceiver Delay Compensation */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) {
+ /* TDC enabled, measured + offset */
+ tdcmode = RCANFD_FDCFG_TDCE;
+ tdco = tdc->tdco - 1;
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ /* TDC enabled, offset only */
+ tdcmode = RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ tdco = min(tdc->tdcv + tdc->tdco, tdc_const->tdco_max) - 1;
}
+
+ rcar_canfd_update_bit_reg(&gpriv->fcbase[ch].cfdcfg, mask,
+ tdcmode | FIELD_PREP(RCANFD_FDCFG_TDCO, tdco));
}
static int rcar_canfd_start(struct net_device *ndev)
@@ -1486,8 +1579,8 @@ static int rcar_canfd_close(struct net_device *ndev)
netif_stop_queue(ndev);
rcar_canfd_stop(ndev);
napi_disable(&priv->napi);
- clk_disable_unprepare(gpriv->can_clk);
close_candev(ndev);
+ clk_disable_unprepare(gpriv->can_clk);
phy_power_off(priv->transceiver);
return 0;
}
@@ -1517,7 +1610,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
rcar_canfd_write(priv->base,
RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
@@ -1568,7 +1661,8 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
- struct net_device_stats *stats = &priv->ndev->stats;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
@@ -1576,7 +1670,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
@@ -1584,14 +1678,13 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
sts & RCANFD_RFFDSTS_RFFDF)
- skb = alloc_canfd_skb(priv->ndev, &cf);
+ skb = alloc_canfd_skb(ndev, &cf);
else
- skb = alloc_can_skb(priv->ndev,
- (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
} else {
id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx));
- skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
}
if (!skb) {
@@ -1612,7 +1705,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFESI) {
cf->flags |= CANFD_ESI;
- netdev_dbg(priv->ndev, "ESI Error\n");
+ netdev_dbg(ndev, "ESI Error\n");
}
if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) {
@@ -1627,7 +1720,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
- else if (is_gen4(gpriv))
+ else if (gpriv->info->shared_can_regs)
rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
@@ -1679,6 +1772,29 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
return num_pkts;
}
+static unsigned int rcar_canfd_get_tdcr(struct rcar_canfd_global *gpriv,
+ unsigned int ch)
+{
+ u32 sts = readl(&gpriv->fcbase[ch].cfdsts);
+ u32 tdcr = FIELD_GET(RCANFD_FDSTS_TDCR, sts);
+
+ return tdcr & (gpriv->info->tdc_const->tdcv_max - 1);
+}
+
+static int rcar_canfd_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ u32 tdco = priv->can.fd.tdc.tdco;
+ u32 tdcr;
+
+ /* Transceiver Delay Compensation Result */
+ tdcr = rcar_canfd_get_tdcr(priv->gpriv, priv->channel) + 1;
+
+ *tdcv = tdcr < tdco ? 0 : tdcr - tdco;
+
+ return 0;
+}
+
static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
{
int err;
@@ -1695,10 +1811,10 @@ static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_canfd_get_berr_counter(const struct net_device *dev,
+static int rcar_canfd_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
u32 val, ch = priv->channel;
/* Peripheral clock is already enabled in probe */
@@ -1712,7 +1828,6 @@ static const struct net_device_ops rcar_canfd_netdev_ops = {
.ndo_open = rcar_canfd_open,
.ndo_stop = rcar_canfd_close,
.ndo_start_xmit = rcar_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops rcar_canfd_ethtool_ops = {
@@ -1750,16 +1865,19 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
if (info->multi_channel_irqs) {
char *irq_name;
+ char name[10];
int err_irq;
int tx_irq;
- err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err");
+ scnprintf(name, sizeof(name), "ch%u_err", ch);
+ err_irq = platform_get_irq_byname(pdev, name);
if (err_irq < 0) {
err = err_irq;
goto fail;
}
- tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx");
+ scnprintf(name, sizeof(name), "ch%u_trx", ch);
+ tx_irq = platform_get_irq_byname(pdev, name);
if (tx_irq < 0) {
err = tx_irq;
goto fail;
@@ -1796,18 +1914,25 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
}
if (gpriv->fdmode) {
- priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
- priv->can.data_bittiming_const =
- &rcar_canfd_data_bittiming_const;
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming;
+ priv->can.fd.tdc_const = gpriv->info->tdc_const;
/* Controller starts in CAN FD only mode */
err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
if (err)
goto fail;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
+ priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv;
} else {
/* Controller starts in Classical CAN only mode */
- priv->can.bittiming_const = &rcar_canfd_bittiming_const;
+ if (gpriv->info->shared_can_regs)
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ else
+ priv->can.bittiming_const = &rcar_canfd_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
}
@@ -1845,13 +1970,112 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
}
}
+static int rcar_canfd_global_init(struct rcar_canfd_global *gpriv)
+{
+ struct device *dev = &gpriv->pdev->dev;
+ u32 rule_entry = 0;
+ u32 ch, sts;
+ int err;
+
+ err = reset_control_reset(gpriv->rstc1);
+ if (err)
+ return err;
+
+ err = reset_control_reset(gpriv->rstc2);
+ if (err)
+ goto fail_reset1;
+
+ /* Enable peripheral clock for register access */
+ err = clk_prepare_enable(gpriv->clkp);
+ if (err) {
+ dev_err(dev, "failed to enable peripheral clock: %pe\n",
+ ERR_PTR(err));
+ goto fail_reset2;
+ }
+
+ /* Enable RAM clock */
+ err = clk_prepare_enable(gpriv->clk_ram);
+ if (err) {
+ dev_err(dev,
+ "failed to enable RAM clock, error %d\n", err);
+ goto fail_clk;
+ }
+
+ err = rcar_canfd_reset_controller(gpriv);
+ if (err) {
+ dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err));
+ goto fail_ram_clk;
+ }
+
+ /* Controller in Global reset & Channel reset mode */
+ rcar_canfd_configure_controller(gpriv);
+
+ /* Configure per channel attributes */
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ /* Configure Channel's Rx fifo */
+ rcar_canfd_configure_rx(gpriv, ch);
+
+ /* Configure Channel's Tx (Common) fifo */
+ rcar_canfd_configure_tx(gpriv, ch);
+
+ /* Configure receive rules */
+ rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
+ rule_entry += RCANFD_CHANNEL_NUMRULES;
+ }
+
+ /* Configure common interrupts */
+ rcar_canfd_enable_global_interrupts(gpriv);
+
+ /* Start Global operation mode */
+ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
+ RCANFD_GCTR_GMDC_GOPM);
+
+ /* Verify mode change */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
+ if (err) {
+ dev_err(dev, "global operational mode failed\n");
+ goto fail_mode;
+ }
+
+ return 0;
+
+fail_mode:
+ rcar_canfd_disable_global_interrupts(gpriv);
+fail_ram_clk:
+ clk_disable_unprepare(gpriv->clk_ram);
+fail_clk:
+ clk_disable_unprepare(gpriv->clkp);
+fail_reset2:
+ reset_control_assert(gpriv->rstc2);
+fail_reset1:
+ reset_control_assert(gpriv->rstc1);
+ return err;
+}
+
+static void rcar_canfd_global_deinit(struct rcar_canfd_global *gpriv, bool full)
+{
+ rcar_canfd_disable_global_interrupts(gpriv);
+
+ if (full) {
+ rcar_canfd_reset_controller(gpriv);
+
+ /* Enter global sleep mode */
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
+ }
+
+ clk_disable_unprepare(gpriv->clk_ram);
+ clk_disable_unprepare(gpriv->clkp);
+ reset_control_assert(gpriv->rstc2);
+ reset_control_assert(gpriv->rstc1);
+}
+
static int rcar_canfd_probe(struct platform_device *pdev)
{
struct phy *transceivers[RCANFD_NUM_CHANNELS] = { NULL, };
const struct rcar_canfd_hw_info *info;
struct device *dev = &pdev->dev;
void __iomem *addr;
- u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
struct device_node *of_child;
unsigned long channels_mask = 0;
@@ -1859,6 +2083,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
int g_err_irq, g_recc_irq;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
+ u32 ch, fcan_freq;
int i;
info = of_device_get_match_data(dev);
@@ -1868,13 +2093,13 @@ static int rcar_canfd_probe(struct platform_device *pdev)
for (i = 0; i < info->max_channels; ++i) {
name[7] = '0' + i;
- of_child = of_get_child_by_name(dev->of_node, name);
- if (of_child && of_device_is_available(of_child)) {
+ of_child = of_get_available_child_by_name(dev->of_node, name);
+ if (of_child) {
channels_mask |= BIT(i);
transceivers[i] = devm_of_phy_optional_get(dev,
of_child, NULL);
+ of_node_put(of_child);
}
- of_node_put(of_child);
if (IS_ERR(transceivers[i]))
return PTR_ERR(transceivers[i]);
}
@@ -1941,16 +2166,17 @@ static int rcar_canfd_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(gpriv->can_clk),
"cannot get canfd clock\n");
- gpriv->fcan = RCANFD_CANFDCLK;
-
+ /* CANFD clock may be further divided within the IP */
+ fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv;
} else {
- gpriv->fcan = RCANFD_EXTCLK;
+ fcan_freq = clk_get_rate(gpriv->can_clk);
+ gpriv->extclk = gpriv->info->external_clk;
}
- fcan_freq = clk_get_rate(gpriv->can_clk);
- if (gpriv->fcan == RCANFD_CANFDCLK)
- /* CANFD clock is further divided by (1/2) within the IP */
- fcan_freq /= info->postdiv;
+ gpriv->clk_ram = devm_clk_get_optional(dev, "ram_clk");
+ if (IS_ERR(gpriv->clk_ram))
+ return dev_err_probe(dev, PTR_ERR(gpriv->clk_ram),
+ "cannot get ram clock\n");
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
@@ -1958,6 +2184,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
goto fail_dev;
}
gpriv->base = addr;
+ gpriv->fcbase = addr + gpriv->info->regs->coffset;
/* Request IRQ that's common for both channels */
if (info->shared_global_irqs) {
@@ -1998,58 +2225,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
}
- err = reset_control_reset(gpriv->rstc1);
+ err = rcar_canfd_global_init(gpriv);
if (err)
- goto fail_dev;
- err = reset_control_reset(gpriv->rstc2);
- if (err) {
- reset_control_assert(gpriv->rstc1);
- goto fail_dev;
- }
-
- /* Enable peripheral clock for register access */
- err = clk_prepare_enable(gpriv->clkp);
- if (err) {
- dev_err(dev, "failed to enable peripheral clock: %pe\n",
- ERR_PTR(err));
- goto fail_reset;
- }
-
- err = rcar_canfd_reset_controller(gpriv);
- if (err) {
- dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err));
- goto fail_clk;
- }
-
- /* Controller in Global reset & Channel reset mode */
- rcar_canfd_configure_controller(gpriv);
-
- /* Configure per channel attributes */
- for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
- /* Configure Channel's Rx fifo */
- rcar_canfd_configure_rx(gpriv, ch);
-
- /* Configure Channel's Tx (Common) fifo */
- rcar_canfd_configure_tx(gpriv, ch);
-
- /* Configure receive rules */
- rcar_canfd_configure_afl_rules(gpriv, ch);
- }
-
- /* Configure common interrupts */
- rcar_canfd_enable_global_interrupts(gpriv);
-
- /* Start Global operation mode */
- rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
- RCANFD_GCTR_GMDC_GOPM);
-
- /* Verify mode change */
- err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
- !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
- if (err) {
- dev_err(dev, "global operational mode failed\n");
goto fail_mode;
- }
for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq,
@@ -2059,20 +2237,16 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, gpriv);
- dev_info(dev, "global operational state (clk %d, fdmode %d)\n",
- gpriv->fcan, gpriv->fdmode);
+ dev_info(dev, "global operational state (%s clk, %s mode)\n",
+ gpriv->extclk ? "ext" : "canfd",
+ gpriv->fdmode ? "fd" : "classical");
return 0;
fail_channel:
for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels)
rcar_canfd_channel_remove(gpriv, ch);
fail_mode:
- rcar_canfd_disable_global_interrupts(gpriv);
-fail_clk:
- clk_disable_unprepare(gpriv->clkp);
-fail_reset:
- reset_control_assert(gpriv->rstc1);
- reset_control_assert(gpriv->rstc2);
+ rcar_canfd_global_deinit(gpriv, false);
fail_dev:
return err;
}
@@ -2082,36 +2256,83 @@ static void rcar_canfd_remove(struct platform_device *pdev)
struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev);
u32 ch;
- rcar_canfd_reset_controller(gpriv);
- rcar_canfd_disable_global_interrupts(gpriv);
-
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
rcar_canfd_channel_remove(gpriv, ch);
}
- /* Enter global sleep mode */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
- clk_disable_unprepare(gpriv->clkp);
- reset_control_assert(gpriv->rstc1);
- reset_control_assert(gpriv->rstc2);
+ rcar_canfd_global_deinit(gpriv, true);
}
-static int __maybe_unused rcar_canfd_suspend(struct device *dev)
+static int rcar_canfd_suspend(struct device *dev)
{
+ struct rcar_canfd_global *gpriv = dev_get_drvdata(dev);
+ int err;
+ u32 ch;
+
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ continue;
+
+ netif_device_detach(ndev);
+
+ err = rcar_canfd_close(ndev);
+ if (err) {
+ netdev_err(ndev, "rcar_canfd_close() failed %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+ }
+
+ /* TODO Skip if wake-up (which is not yet supported) is enabled */
+ rcar_canfd_global_deinit(gpriv, false);
+
return 0;
}
-static int __maybe_unused rcar_canfd_resume(struct device *dev)
+static int rcar_canfd_resume(struct device *dev)
{
+ struct rcar_canfd_global *gpriv = dev_get_drvdata(dev);
+ int err;
+ u32 ch;
+
+ err = rcar_canfd_global_init(gpriv);
+ if (err) {
+ dev_err(dev, "rcar_canfd_global_init() failed %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ continue;
+
+ err = rcar_canfd_open(ndev);
+ if (err) {
+ netdev_err(ndev, "rcar_canfd_open() failed %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ netif_device_attach(ndev);
+ }
+
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
- rcar_canfd_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
+ rcar_canfd_resume);
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
+ { .compatible = "renesas,r9a09g047-canfd", .data = &r9a09g047_hw_info },
{ .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
{ .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
@@ -2124,10 +2345,10 @@ static struct platform_driver rcar_canfd_driver = {
.driver = {
.name = RCANFD_DRV_NAME,
.of_match_table = of_match_ptr(rcar_canfd_of_table),
- .pm = &rcar_canfd_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_canfd_pm_ops),
},
.probe = rcar_canfd_probe,
- .remove_new = rcar_canfd_remove,
+ .remove = rcar_canfd_remove,
};
module_platform_driver(rcar_canfd_driver);
diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig
new file mode 100644
index 000000000000..d203c530551f
--- /dev/null
+++ b/drivers/net/can/rockchip/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CAN_ROCKCHIP_CANFD
+ tristate "Rockchip CAN-FD controller"
+ depends on OF
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select CAN_RX_OFFLOAD
+ help
+ Say Y here if you want to use CAN-FD controller found on
+ Rockchip SoCs.
diff --git a/drivers/net/can/rockchip/Makefile b/drivers/net/can/rockchip/Makefile
new file mode 100644
index 000000000000..3760d3e1baa3
--- /dev/null
+++ b/drivers/net/can/rockchip/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_ROCKCHIP_CANFD) += rockchip_canfd.o
+
+rockchip_canfd-objs :=
+rockchip_canfd-objs += rockchip_canfd-core.o
+rockchip_canfd-objs += rockchip_canfd-ethtool.o
+rockchip_canfd-objs += rockchip_canfd-rx.o
+rockchip_canfd-objs += rockchip_canfd-timestamp.o
+rockchip_canfd-objs += rockchip_canfd-tx.o
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
new file mode 100644
index 000000000000..29de0c01e4ed
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// Rockchip CANFD driver
+//
+// Copyright (c) 2020 Rockchip Electronics Co. Ltd.
+//
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+
+#include "rockchip_canfd.h"
+
+static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v2 = {
+ .model = RKCANFD_MODEL_RK3568V2,
+ .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_3 | RKCANFD_QUIRK_RK3568_ERRATUM_4 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_6 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_7 | RKCANFD_QUIRK_RK3568_ERRATUM_8 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_9 | RKCANFD_QUIRK_RK3568_ERRATUM_10 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 |
+ RKCANFD_QUIRK_CANFD_BROKEN,
+};
+
+/* The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00
+ * states that only the rk3568v2 is affected by erratum 5, but tests
+ * with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is
+ * sometimes too high. In contrast to the errata sheet mark rk3568v3
+ * as effected by erratum 5, too.
+ */
+static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v3 = {
+ .model = RKCANFD_MODEL_RK3568V3,
+ .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_7 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_8 | RKCANFD_QUIRK_RK3568_ERRATUM_10 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 |
+ RKCANFD_QUIRK_CANFD_BROKEN,
+};
+
+static const char *__rkcanfd_get_model_str(enum rkcanfd_model model)
+{
+ switch (model) {
+ case RKCANFD_MODEL_RK3568V2:
+ return "rk3568v2";
+ case RKCANFD_MODEL_RK3568V3:
+ return "rk3568v3";
+ }
+
+ return "<unknown>";
+}
+
+static inline const char *
+rkcanfd_get_model_str(const struct rkcanfd_priv *priv)
+{
+ return __rkcanfd_get_model_str(priv->devtype_data.model);
+}
+
+/* Note:
+ *
+ * The formula to calculate the CAN System Clock is:
+ *
+ * Tsclk = 2 x Tclk x (brp + 1)
+ *
+ * Double the data sheet's brp_min, brp_max and brp_inc values (both
+ * for the arbitration and data bit timing) to take the "2 x" into
+ * account.
+ */
+static const struct can_bittiming_const rkcanfd_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 2, /* value from data sheet x2 */
+ .brp_max = 512, /* value from data sheet x2 */
+ .brp_inc = 2, /* value from data sheet x2 */
+};
+
+static const struct can_bittiming_const rkcanfd_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 2, /* value from data sheet x2 */
+ .brp_max = 512, /* value from data sheet x2 */
+ .brp_inc = 2, /* value from data sheet x2 */
+};
+
+static void rkcanfd_chip_set_reset_mode(const struct rkcanfd_priv *priv)
+{
+ reset_control_assert(priv->reset);
+ udelay(2);
+ reset_control_deassert(priv->reset);
+
+ rkcanfd_write(priv, RKCANFD_REG_MODE, 0x0);
+}
+
+static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default);
+}
+
+static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 reg_nbt, reg_dbt, reg_tdc;
+ u32 tdco;
+
+ reg_nbt = FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW,
+ bt->sjw - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP,
+ (bt->brp / 2) - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2,
+ bt->phase_seg2 - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1,
+ bt->prop_seg + bt->phase_seg1 - 1);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_NOMINAL_BITTIMING, reg_nbt);
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return 0;
+
+ reg_dbt = FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_SJW,
+ dbt->sjw - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_BRP,
+ (dbt->brp / 2) - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG2,
+ dbt->phase_seg2 - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG1,
+ dbt->prop_seg + dbt->phase_seg1 - 1);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_DATA_BITTIMING, reg_dbt);
+
+ tdco = (priv->can.clock.freq / dbt->bitrate) * 2 / 3;
+ tdco = min(tdco, FIELD_MAX(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET));
+
+ reg_tdc = FIELD_PREP(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET, tdco) |
+ RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION,
+ reg_tdc);
+
+ return 0;
+}
+
+static void rkcanfd_get_berr_counter_corrected(struct rkcanfd_priv *priv,
+ struct can_berr_counter *bec)
+{
+ struct can_berr_counter bec_raw;
+ u32 reg_state;
+
+ bec->rxerr = rkcanfd_read(priv, RKCANFD_REG_RXERRORCNT);
+ bec->txerr = rkcanfd_read(priv, RKCANFD_REG_TXERRORCNT);
+ bec_raw = *bec;
+
+ /* Tests show that sometimes both CAN bus error counters read
+ * 0x0, even if the controller is in warning mode
+ * (RKCANFD_REG_STATE_ERROR_WARNING_STATE in RKCANFD_REG_STATE
+ * set).
+ *
+ * In case both error counters read 0x0, use the struct
+ * priv->bec, otherwise save the read value to priv->bec.
+ *
+ * rkcanfd_handle_rx_int_one() handles the decrementing of
+ * priv->bec.rxerr for successfully RX'ed CAN frames.
+ *
+ * Luckily the controller doesn't decrement the RX CAN bus
+ * error counter in hardware for self received TX'ed CAN
+ * frames (RKCANFD_REG_MODE_RXSTX_MODE), so RXSTX doesn't
+ * interfere with proper RX CAN bus error counters.
+ *
+ * rkcanfd_handle_tx_done_one() handles the decrementing of
+ * priv->bec.txerr for successfully TX'ed CAN frames.
+ */
+ if (!bec->rxerr && !bec->txerr)
+ *bec = priv->bec;
+ else
+ priv->bec = *bec;
+
+ reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE);
+ netdev_vdbg(priv->ndev,
+ "%s: Raw/Cor: txerr=%3u/%3u rxerr=%3u/%3u Bus Off=%u Warning=%u\n",
+ __func__,
+ bec_raw.txerr, bec->txerr, bec_raw.rxerr, bec->rxerr,
+ !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE),
+ !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE));
+}
+
+static int rkcanfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ return err;
+
+ rkcanfd_get_berr_counter_corrected(priv, bec);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static void rkcanfd_chip_interrupts_enable(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_INT_MASK, priv->reg_int_mask_default);
+
+ netdev_dbg(priv->ndev, "%s: reg_int_mask=0x%08x\n", __func__,
+ rkcanfd_read(priv, RKCANFD_REG_INT_MASK));
+}
+
+static void rkcanfd_chip_interrupts_disable(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_INT_MASK, RKCANFD_REG_INT_ALL);
+}
+
+static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv)
+{
+ u32 reg;
+
+ /* RX FIFO */
+ reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+ reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
+
+ WRITE_ONCE(priv->tx_head, 0);
+ WRITE_ONCE(priv->tx_tail, 0);
+ netdev_reset_queue(priv->ndev);
+}
+
+static void rkcanfd_chip_start(struct rkcanfd_priv *priv)
+{
+ u32 reg;
+
+ rkcanfd_chip_set_reset_mode(priv);
+
+ /* Receiving Filter: accept all */
+ rkcanfd_write(priv, RKCANFD_REG_IDCODE, 0x0);
+ rkcanfd_write(priv, RKCANFD_REG_IDMASK, RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID);
+
+ /* enable:
+ * - CAN_FD: enable CAN-FD
+ * - AUTO_RETX_MODE: auto retransmission on TX error
+ * - COVER_MODE: RX-FIFO overwrite mode, do not send OVERLOAD frames
+ * - RXSTX_MODE: Receive Self Transmit data mode
+ * - WORK_MODE: transition from reset to working mode
+ */
+ reg = rkcanfd_read(priv, RKCANFD_REG_MODE);
+ priv->reg_mode_default = reg |
+ RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE |
+ RKCANFD_REG_MODE_AUTO_RETX_MODE |
+ RKCANFD_REG_MODE_COVER_MODE |
+ RKCANFD_REG_MODE_RXSTX_MODE |
+ RKCANFD_REG_MODE_WORK_MODE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ priv->reg_mode_default |= RKCANFD_REG_MODE_LBACK_MODE |
+ RKCANFD_REG_MODE_SILENT_MODE |
+ RKCANFD_REG_MODE_SELF_TEST;
+
+ /* mask, i.e. ignore:
+ * - TIMESTAMP_COUNTER_OVERFLOW_INT - timestamp counter overflow interrupt
+ * - TX_ARBIT_FAIL_INT - TX arbitration fail interrupt
+ * - OVERLOAD_INT - CAN bus overload interrupt
+ * - TX_FINISH_INT - Transmit finish interrupt
+ */
+ priv->reg_int_mask_default =
+ RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT |
+ RKCANFD_REG_INT_TX_ARBIT_FAIL_INT |
+ RKCANFD_REG_INT_OVERLOAD_INT |
+ RKCANFD_REG_INT_TX_FINISH_INT;
+
+ /* Do not mask the bus error interrupt if the bus error
+ * reporting is requested.
+ */
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ priv->reg_int_mask_default |= RKCANFD_REG_INT_ERROR_INT;
+
+ memset(&priv->bec, 0x0, sizeof(priv->bec));
+
+ rkcanfd_chip_fifo_setup(priv);
+ rkcanfd_timestamp_init(priv);
+ rkcanfd_timestamp_start(priv);
+
+ rkcanfd_set_bittiming(priv);
+
+ rkcanfd_chip_interrupts_disable(priv);
+ rkcanfd_chip_set_work_mode(priv);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ netdev_dbg(priv->ndev, "%s: reg_mode=0x%08x\n", __func__,
+ rkcanfd_read(priv, RKCANFD_REG_MODE));
+}
+
+static void __rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_chip_set_reset_mode(priv);
+ rkcanfd_chip_interrupts_disable(priv);
+}
+
+static void rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_timestamp_stop(priv);
+ __rkcanfd_chip_stop(priv, state);
+}
+
+static void rkcanfd_chip_stop_sync(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_timestamp_stop_sync(priv);
+ __rkcanfd_chip_stop(priv, state);
+}
+
+static int rkcanfd_set_mode(struct net_device *ndev,
+ enum can_mode mode)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ rkcanfd_chip_start(priv);
+ rkcanfd_chip_interrupts_enable(priv);
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+rkcanfd_alloc_can_err_skb(struct rkcanfd_priv *priv,
+ struct can_frame **cf, u32 *timestamp)
+{
+ struct sk_buff *skb;
+
+ *timestamp = rkcanfd_get_timestamp(priv);
+
+ skb = alloc_can_err_skb(priv->ndev, cf);
+ if (skb)
+ rkcanfd_skb_set_timestamp(priv, skb, *timestamp);
+
+ return skb;
+}
+
+static const char *rkcanfd_get_error_type_str(unsigned int type)
+{
+ switch (type) {
+ case RKCANFD_REG_ERROR_CODE_TYPE_BIT:
+ return "Bit";
+ case RKCANFD_REG_ERROR_CODE_TYPE_STUFF:
+ return "Stuff";
+ case RKCANFD_REG_ERROR_CODE_TYPE_FORM:
+ return "Form";
+ case RKCANFD_REG_ERROR_CODE_TYPE_ACK:
+ return "ACK";
+ case RKCANFD_REG_ERROR_CODE_TYPE_CRC:
+ return "CRC";
+ }
+
+ return "<unknown>";
+}
+
+#define RKCAN_ERROR_CODE(reg_ec, code) \
+ ((reg_ec) & RKCANFD_REG_ERROR_CODE_##code ? __stringify(code) " " : "")
+
+static void
+rkcanfd_handle_error_int_reg_ec(struct rkcanfd_priv *priv, struct can_frame *cf,
+ const u32 reg_ec)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ unsigned int type;
+ u32 reg_state, reg_cmd;
+
+ type = FIELD_GET(RKCANFD_REG_ERROR_CODE_TYPE, reg_ec);
+ reg_cmd = rkcanfd_read(priv, RKCANFD_REG_CMD);
+ reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE);
+
+ netdev_dbg(priv->ndev, "%s Error in %s %s Phase: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s(0x%08x) CMD=%u RX=%u TX=%u Error-Warning=%u Bus-Off=%u\n",
+ rkcanfd_get_error_type_str(type),
+ reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX ? "RX" : "TX",
+ reg_ec & RKCANFD_REG_ERROR_CODE_PHASE ? "Data" : "Arbitration",
+ RKCAN_ERROR_CODE(reg_ec, TX_OVERLOAD),
+ RKCAN_ERROR_CODE(reg_ec, TX_ERROR),
+ RKCAN_ERROR_CODE(reg_ec, TX_ACK),
+ RKCAN_ERROR_CODE(reg_ec, TX_ACK_EOF),
+ RKCAN_ERROR_CODE(reg_ec, TX_CRC),
+ RKCAN_ERROR_CODE(reg_ec, TX_STUFF_COUNT),
+ RKCAN_ERROR_CODE(reg_ec, TX_DATA),
+ RKCAN_ERROR_CODE(reg_ec, TX_SOF_DLC),
+ RKCAN_ERROR_CODE(reg_ec, TX_IDLE),
+ RKCAN_ERROR_CODE(reg_ec, RX_BUF_INT),
+ RKCAN_ERROR_CODE(reg_ec, RX_SPACE),
+ RKCAN_ERROR_CODE(reg_ec, RX_EOF),
+ RKCAN_ERROR_CODE(reg_ec, RX_ACK_LIM),
+ RKCAN_ERROR_CODE(reg_ec, RX_ACK),
+ RKCAN_ERROR_CODE(reg_ec, RX_CRC_LIM),
+ RKCAN_ERROR_CODE(reg_ec, RX_CRC),
+ RKCAN_ERROR_CODE(reg_ec, RX_STUFF_COUNT),
+ RKCAN_ERROR_CODE(reg_ec, RX_DATA),
+ RKCAN_ERROR_CODE(reg_ec, RX_DLC),
+ RKCAN_ERROR_CODE(reg_ec, RX_BRS_ESI),
+ RKCAN_ERROR_CODE(reg_ec, RX_RES),
+ RKCAN_ERROR_CODE(reg_ec, RX_FDF),
+ RKCAN_ERROR_CODE(reg_ec, RX_ID2_RTR),
+ RKCAN_ERROR_CODE(reg_ec, RX_SOF_IDE),
+ RKCAN_ERROR_CODE(reg_ec, RX_IDLE),
+ reg_ec, reg_cmd,
+ !!(reg_state & RKCANFD_REG_STATE_RX_PERIOD),
+ !!(reg_state & RKCANFD_REG_STATE_TX_PERIOD),
+ !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE),
+ !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE));
+
+ priv->can.can_stats.bus_error++;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX)
+ stats->rx_errors++;
+ else
+ stats->tx_errors++;
+
+ if (!cf)
+ return;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX) {
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SOF_IDE)
+ cf->data[3] = CAN_ERR_PROT_LOC_SOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ID2_RTR)
+ cf->data[3] = CAN_ERR_PROT_LOC_RTR;
+ /* RKCANFD_REG_ERROR_CODE_RX_FDF */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_RES)
+ cf->data[3] = CAN_ERR_PROT_LOC_RES0;
+ /* RKCANFD_REG_ERROR_CODE_RX_BRS_ESI */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DLC)
+ cf->data[3] = CAN_ERR_PROT_LOC_DLC;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DATA)
+ cf->data[3] = CAN_ERR_PROT_LOC_DATA;
+ /* RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC_LIM)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK_LIM)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_EOF)
+ cf->data[3] = CAN_ERR_PROT_LOC_EOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SPACE)
+ cf->data[3] = CAN_ERR_PROT_LOC_EOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_BUF_INT)
+ cf->data[3] = CAN_ERR_PROT_LOC_INTERM;
+ } else {
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_SOF_DLC)
+ cf->data[3] = CAN_ERR_PROT_LOC_SOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_DATA)
+ cf->data[3] = CAN_ERR_PROT_LOC_DATA;
+ /* RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_CRC)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK_EOF)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ /* RKCANFD_REG_ERROR_CODE_TX_ERROR */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_OVERLOAD)
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+
+ switch (reg_ec & RKCANFD_REG_ERROR_CODE_TYPE) {
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_BIT):
+
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_STUFF):
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_FORM):
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_ACK):
+ cf->can_id |= CAN_ERR_ACK;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_CRC):
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+ }
+}
+
+static int rkcanfd_handle_error_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf = NULL;
+ u32 reg_ec, timestamp;
+ struct sk_buff *skb;
+ int err;
+
+ reg_ec = rkcanfd_read(priv, RKCANFD_REG_ERROR_CODE);
+
+ if (!reg_ec)
+ return 0;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (cf) {
+ struct can_berr_counter bec;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ }
+
+ rkcanfd_handle_error_int_reg_ec(priv, cf, reg_ec);
+
+ if (!cf)
+ return 0;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int rkcanfd_handle_state_error_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ enum can_state new_state, rx_state, tx_state;
+ struct net_device *ndev = priv->ndev;
+ struct can_berr_counter bec;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+ can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state);
+
+ new_state = max(tx_state, rx_state);
+ if (new_state == priv->can.state)
+ return 0;
+
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ rkcanfd_chip_stop(priv, CAN_STATE_BUS_OFF);
+ can_bus_off(ndev);
+ }
+
+ if (!skb)
+ return 0;
+
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int
+rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_berr_counter bec;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
+
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (!skb)
+ return 0;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+#define rkcanfd_handle(priv, irq, ...) \
+({ \
+ struct rkcanfd_priv *_priv = (priv); \
+ int err; \
+\
+ err = rkcanfd_handle_##irq(_priv, ## __VA_ARGS__); \
+ if (err) \
+ netdev_err(_priv->ndev, \
+ "IRQ handler rkcanfd_handle_%s() returned error: %pe\n", \
+ __stringify(irq), ERR_PTR(err)); \
+ err; \
+})
+
+static irqreturn_t rkcanfd_irq(int irq, void *dev_id)
+{
+ struct rkcanfd_priv *priv = dev_id;
+ u32 reg_int_unmasked, reg_int;
+
+ reg_int_unmasked = rkcanfd_read(priv, RKCANFD_REG_INT);
+ reg_int = reg_int_unmasked & ~priv->reg_int_mask_default;
+
+ if (!reg_int)
+ return IRQ_NONE;
+
+ /* First ACK then handle, to avoid lost-IRQ race condition on
+ * fast re-occurring interrupts.
+ */
+ rkcanfd_write(priv, RKCANFD_REG_INT, reg_int);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FINISH_INT)
+ rkcanfd_handle(priv, rx_int);
+
+ if (reg_int & RKCANFD_REG_INT_ERROR_INT)
+ rkcanfd_handle(priv, error_int);
+
+ if (reg_int & (RKCANFD_REG_INT_BUS_OFF_INT |
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT |
+ RKCANFD_REG_INT_ERROR_WARNING_INT) ||
+ priv->can.state > CAN_STATE_ERROR_ACTIVE)
+ rkcanfd_handle(priv, state_error_int);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT)
+ rkcanfd_handle(priv, rx_fifo_overflow_int);
+
+ if (reg_int & ~(RKCANFD_REG_INT_ALL_ERROR |
+ RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT |
+ RKCANFD_REG_INT_RX_FINISH_INT))
+ netdev_err(priv->ndev, "%s: int=0x%08x\n", __func__, reg_int);
+
+ if (reg_int & RKCANFD_REG_INT_WAKEUP_INT)
+ netdev_info(priv->ndev, "%s: WAKEUP_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_TXE_FIFO_FULL_INT)
+ netdev_info(priv->ndev, "%s: TXE_FIFO_FULL_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_TXE_FIFO_OV_INT)
+ netdev_info(priv->ndev, "%s: TXE_FIFO_OV_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT)
+ netdev_info(priv->ndev, "%s: BUS_OFF_RECOVERY_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FIFO_FULL_INT)
+ netdev_info(priv->ndev, "%s: RX_FIFO_FULL_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_OVERLOAD_INT)
+ netdev_info(priv->ndev, "%s: OVERLOAD_INT\n", __func__);
+
+ can_rx_offload_irq_finish(&priv->offload);
+
+ return IRQ_HANDLED;
+}
+
+static int rkcanfd_open(struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = open_candev(ndev);
+ if (err)
+ return err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ goto out_close_candev;
+
+ rkcanfd_chip_start(priv);
+ can_rx_offload_enable(&priv->offload);
+
+ err = request_irq(ndev->irq, rkcanfd_irq, IRQF_SHARED, ndev->name, priv);
+ if (err)
+ goto out_rkcanfd_chip_stop;
+
+ rkcanfd_chip_interrupts_enable(priv);
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+out_rkcanfd_chip_stop:
+ rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED);
+ pm_runtime_put(ndev->dev.parent);
+out_close_candev:
+ close_candev(ndev);
+ return err;
+}
+
+static int rkcanfd_stop(struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ rkcanfd_chip_interrupts_disable(priv);
+ free_irq(ndev->irq, priv);
+ can_rx_offload_disable(&priv->offload);
+ rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED);
+ close_candev(ndev);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static const struct net_device_ops rkcanfd_netdev_ops = {
+ .ndo_open = rkcanfd_open,
+ .ndo_stop = rkcanfd_stop,
+ .ndo_start_xmit = rkcanfd_start_xmit,
+};
+
+static int __maybe_unused rkcanfd_runtime_suspend(struct device *dev)
+{
+ struct rkcanfd_priv *priv = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(priv->clks_num, priv->clks);
+
+ return 0;
+}
+
+static int __maybe_unused rkcanfd_runtime_resume(struct device *dev)
+{
+ struct rkcanfd_priv *priv = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(priv->clks_num, priv->clks);
+}
+
+static void rkcanfd_register_done(const struct rkcanfd_priv *priv)
+{
+ u32 dev_id;
+
+ dev_id = rkcanfd_read(priv, RKCANFD_REG_RTL_VERSION);
+
+ netdev_info(priv->ndev,
+ "Rockchip-CANFD %s rev%lu.%lu (errata 0x%04x) found\n",
+ rkcanfd_get_model_str(priv),
+ FIELD_GET(RKCANFD_REG_RTL_VERSION_MAJOR, dev_id),
+ FIELD_GET(RKCANFD_REG_RTL_VERSION_MINOR, dev_id),
+ priv->devtype_data.quirks);
+
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_5 &&
+ priv->can.clock.freq < RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN)
+ netdev_info(priv->ndev,
+ "Erratum 5: CAN clock frequency (%luMHz) lower than known good (%luMHz), expect degraded performance\n",
+ priv->can.clock.freq / MEGA,
+ RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN / MEGA);
+}
+
+static int rkcanfd_register(struct rkcanfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ int err;
+
+ pm_runtime_enable(ndev->dev.parent);
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ goto out_pm_runtime_disable;
+
+ rkcanfd_ethtool_init(priv);
+
+ err = register_candev(ndev);
+ if (err)
+ goto out_pm_runtime_put_sync;
+
+ rkcanfd_register_done(priv);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+
+out_pm_runtime_put_sync:
+ pm_runtime_put_sync(ndev->dev.parent);
+out_pm_runtime_disable:
+ pm_runtime_disable(ndev->dev.parent);
+
+ return err;
+}
+
+static inline void rkcanfd_unregister(struct rkcanfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+
+ unregister_candev(ndev);
+ pm_runtime_disable(ndev->dev.parent);
+}
+
+static const struct of_device_id rkcanfd_of_match[] = {
+ {
+ .compatible = "rockchip,rk3568v2-canfd",
+ .data = &rkcanfd_devtype_data_rk3568v2,
+ }, {
+ .compatible = "rockchip,rk3568v3-canfd",
+ .data = &rkcanfd_devtype_data_rk3568v3,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, rkcanfd_of_match);
+
+static int rkcanfd_probe(struct platform_device *pdev)
+{
+ struct rkcanfd_priv *priv;
+ struct net_device *ndev;
+ const void *match;
+ int err;
+
+ ndev = alloc_candev(sizeof(struct rkcanfd_priv), RKCANFD_TXFIFO_DEPTH);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ err = ndev->irq;
+ goto out_free_candev;
+ }
+
+ priv->clks_num = devm_clk_bulk_get_all(&pdev->dev, &priv->clks);
+ if (priv->clks_num < 0) {
+ err = priv->clks_num;
+ goto out_free_candev;
+ }
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs)) {
+ err = PTR_ERR(priv->regs);
+ goto out_free_candev;
+ }
+
+ priv->reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(priv->reset)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(priv->reset),
+ "Failed to get reset line\n");
+ goto out_free_candev;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->netdev_ops = &rkcanfd_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+
+ platform_set_drvdata(pdev, priv);
+ priv->can.clock.freq = clk_get_rate(priv->clks[0].clk);
+ priv->can.bittiming_const = &rkcanfd_bittiming_const;
+ priv->can.fd.data_bittiming_const = &rkcanfd_data_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_BERR_REPORTING;
+ priv->can.do_set_mode = rkcanfd_set_mode;
+ priv->can.do_get_berr_counter = rkcanfd_get_berr_counter;
+ priv->ndev = ndev;
+
+ match = device_get_match_data(&pdev->dev);
+ if (match) {
+ priv->devtype_data = *(struct rkcanfd_devtype_data *)match;
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ }
+
+ err = can_rx_offload_add_manual(ndev, &priv->offload,
+ RKCANFD_NAPI_WEIGHT);
+ if (err)
+ goto out_free_candev;
+
+ err = rkcanfd_register(priv);
+ if (err)
+ goto out_can_rx_offload_del;
+
+ return 0;
+
+out_can_rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+out_free_candev:
+ free_candev(ndev);
+
+ return err;
+}
+
+static void rkcanfd_remove(struct platform_device *pdev)
+{
+ struct rkcanfd_priv *priv = platform_get_drvdata(pdev);
+ struct net_device *ndev = priv->ndev;
+
+ rkcanfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
+ free_candev(ndev);
+}
+
+static const struct dev_pm_ops rkcanfd_pm_ops = {
+ SET_RUNTIME_PM_OPS(rkcanfd_runtime_suspend,
+ rkcanfd_runtime_resume, NULL)
+};
+
+static struct platform_driver rkcanfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .pm = &rkcanfd_pm_ops,
+ .of_match_table = rkcanfd_of_match,
+ },
+ .probe = rkcanfd_probe,
+ .remove = rkcanfd_remove,
+};
+module_platform_driver(rkcanfd_driver);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_DESCRIPTION("Rockchip CAN-FD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/rockchip/rockchip_canfd-ethtool.c b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c
new file mode 100644
index 000000000000..5aeeef64a67a
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "rockchip_canfd.h"
+
+enum rkcanfd_stats_type {
+ RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS,
+ RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS,
+};
+
+static const char rkcanfd_stats_strings[][ETH_GSTRING_LEN] = {
+ [RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] = "rx_fifo_empty_errors",
+ [RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] = "tx_extended_as_standard_errors",
+};
+
+static void
+rkcanfd_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, rkcanfd_stats_strings,
+ sizeof(rkcanfd_stats_strings));
+ }
+}
+
+static int rkcanfd_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rkcanfd_stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+rkcanfd_ethtool_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ struct rkcanfd_stats *rkcanfd_stats;
+ unsigned int start;
+
+ rkcanfd_stats = &priv->stats;
+
+ do {
+ start = u64_stats_fetch_begin(&rkcanfd_stats->syncp);
+
+ data[RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] =
+ u64_stats_read(&rkcanfd_stats->rx_fifo_empty_errors);
+ data[RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] =
+ u64_stats_read(&rkcanfd_stats->tx_extended_as_standard_errors);
+ } while (u64_stats_fetch_retry(&rkcanfd_stats->syncp, start));
+}
+
+static const struct ethtool_ops rkcanfd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .get_strings = rkcanfd_ethtool_get_strings,
+ .get_sset_count = rkcanfd_ethtool_get_sset_count,
+ .get_ethtool_stats = rkcanfd_ethtool_get_ethtool_stats,
+};
+
+void rkcanfd_ethtool_init(struct rkcanfd_priv *priv)
+{
+ priv->ndev->ethtool_ops = &rkcanfd_ethtool_ops;
+
+ u64_stats_init(&priv->stats.syncp);
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-rx.c b/drivers/net/can/rockchip/rockchip_canfd-rx.c
new file mode 100644
index 000000000000..475c0409e215
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-rx.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <net/netdev_queues.h>
+
+#include "rockchip_canfd.h"
+
+static bool rkcanfd_can_frame_header_equal(const struct canfd_frame *const cfd1,
+ const struct canfd_frame *const cfd2,
+ const bool is_canfd)
+{
+ const u8 mask_flags = CANFD_BRS | CANFD_ESI | CANFD_FDF;
+ canid_t mask = CAN_EFF_FLAG;
+
+ if (canfd_sanitize_len(cfd1->len) != canfd_sanitize_len(cfd2->len))
+ return false;
+
+ if (!is_canfd)
+ mask |= CAN_RTR_FLAG;
+
+ if (cfd1->can_id & CAN_EFF_FLAG)
+ mask |= CAN_EFF_MASK;
+ else
+ mask |= CAN_SFF_MASK;
+
+ if ((cfd1->can_id & mask) != (cfd2->can_id & mask))
+ return false;
+
+ if (is_canfd &&
+ (cfd1->flags & mask_flags) != (cfd2->flags & mask_flags))
+ return false;
+
+ return true;
+}
+
+static bool rkcanfd_can_frame_data_equal(const struct canfd_frame *cfd1,
+ const struct canfd_frame *cfd2,
+ const bool is_canfd)
+{
+ u8 len;
+
+ if (!is_canfd && (cfd1->can_id & CAN_RTR_FLAG))
+ return true;
+
+ len = canfd_sanitize_len(cfd1->len);
+
+ return !memcmp(cfd1->data, cfd2->data, len);
+}
+
+static unsigned int
+rkcanfd_fifo_header_to_cfd_header(const struct rkcanfd_priv *priv,
+ const struct rkcanfd_fifo_header *header,
+ struct canfd_frame *cfd)
+{
+ unsigned int len = sizeof(*cfd) - sizeof(cfd->data);
+ u8 dlc;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT)
+ cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_EFF, header->id) |
+ CAN_EFF_FLAG;
+ else
+ cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_SFF, header->id);
+
+ dlc = FIELD_GET(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ header->frameinfo);
+
+ /* CAN-FD */
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF) {
+ cfd->len = can_fd_dlc2len(dlc);
+
+ /* The cfd is not allocated by alloc_canfd_skb(), so
+ * set CANFD_FDF here.
+ */
+ cfd->flags |= CANFD_FDF;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_BRS)
+ cfd->flags |= CANFD_BRS;
+ } else {
+ cfd->len = can_cc_dlc2len(dlc);
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_RTR) {
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ return len;
+ }
+ }
+
+ return len + cfd->len;
+}
+
+static int rkcanfd_rxstx_filter(struct rkcanfd_priv *priv,
+ const struct canfd_frame *cfd_rx, const u32 ts,
+ bool *tx_done)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct rkcanfd_stats *rkcanfd_stats = &priv->stats;
+ const struct canfd_frame *cfd_nominal;
+ const struct sk_buff *skb;
+ unsigned int tx_tail;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+ if (!skb) {
+ netdev_err(priv->ndev,
+ "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n",
+ __func__, tx_tail,
+ priv->tx_head, priv->tx_tail);
+
+ return -ENOMSG;
+ }
+ cfd_nominal = (struct canfd_frame *)skb->data;
+
+ /* We RX'ed a frame identical to our pending TX frame. */
+ if (rkcanfd_can_frame_header_equal(cfd_rx, cfd_nominal,
+ cfd_rx->flags & CANFD_FDF) &&
+ rkcanfd_can_frame_data_equal(cfd_rx, cfd_nominal,
+ cfd_rx->flags & CANFD_FDF)) {
+ unsigned int frame_len;
+
+ rkcanfd_handle_tx_done_one(priv, ts, &frame_len);
+
+ WRITE_ONCE(priv->tx_tail, priv->tx_tail + 1);
+ netif_subqueue_completed_wake(priv->ndev, 0, 1, frame_len,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_START_THRESHOLD);
+
+ *tx_done = true;
+
+ return 0;
+ }
+
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6))
+ return 0;
+
+ /* Erratum 6: Extended frames may be send as standard frames.
+ *
+ * Not affected if:
+ * - TX'ed a standard frame -or-
+ * - RX'ed an extended frame
+ */
+ if (!(cfd_nominal->can_id & CAN_EFF_FLAG) ||
+ (cfd_rx->can_id & CAN_EFF_FLAG))
+ return 0;
+
+ /* Not affected if:
+ * - standard part and RTR flag of the TX'ed frame
+ * is not equal the CAN-ID and RTR flag of the RX'ed frame.
+ */
+ if ((cfd_nominal->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)) !=
+ (cfd_rx->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)))
+ return 0;
+
+ /* Not affected if:
+ * - length is not the same
+ */
+ if (cfd_nominal->len != cfd_rx->len)
+ return 0;
+
+ /* Not affected if:
+ * - the data of non RTR frames is different
+ */
+ if (!(cfd_nominal->can_id & CAN_RTR_FLAG) &&
+ memcmp(cfd_nominal->data, cfd_rx->data, cfd_nominal->len))
+ return 0;
+
+ /* Affected by Erratum 6 */
+ u64_stats_update_begin(&rkcanfd_stats->syncp);
+ u64_stats_inc(&rkcanfd_stats->tx_extended_as_standard_errors);
+ u64_stats_update_end(&rkcanfd_stats->syncp);
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.txerr)
+ priv->bec.txerr--;
+
+ *tx_done = true;
+
+ stats->tx_packets++;
+ stats->tx_errors++;
+
+ rkcanfd_xmit_retry(priv);
+
+ return 0;
+}
+
+static inline bool
+rkcanfd_fifo_header_empty(const struct rkcanfd_fifo_header *header)
+{
+ /* Erratum 5: If the FIFO is empty, we read the same value for
+ * all elements.
+ */
+ return header->frameinfo == header->id &&
+ header->frameinfo == header->ts;
+}
+
+static int rkcanfd_handle_rx_int_one(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct canfd_frame cfd[1] = { }, *skb_cfd;
+ struct rkcanfd_fifo_header header[1] = { };
+ struct sk_buff *skb;
+ unsigned int len;
+ int err;
+
+ /* read header into separate struct and convert it later */
+ rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA,
+ header, sizeof(*header));
+ /* read data directly into cfd */
+ rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA,
+ cfd->data, sizeof(cfd->data));
+
+ /* Erratum 5: Counters for TXEFIFO and RXFIFO may be wrong */
+ if (rkcanfd_fifo_header_empty(header)) {
+ struct rkcanfd_stats *rkcanfd_stats = &priv->stats;
+
+ u64_stats_update_begin(&rkcanfd_stats->syncp);
+ u64_stats_inc(&rkcanfd_stats->rx_fifo_empty_errors);
+ u64_stats_update_end(&rkcanfd_stats->syncp);
+
+ return 0;
+ }
+
+ len = rkcanfd_fifo_header_to_cfd_header(priv, header, cfd);
+
+ /* Drop any received CAN-FD frames if CAN-FD mode is not
+ * requested.
+ */
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_FD)) {
+ stats->rx_dropped++;
+
+ return 0;
+ }
+
+ if (rkcanfd_get_tx_pending(priv)) {
+ bool tx_done = false;
+
+ err = rkcanfd_rxstx_filter(priv, cfd, header->ts, &tx_done);
+ if (err)
+ return err;
+ if (tx_done && !(priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK))
+ return 0;
+ }
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.rxerr)
+ priv->bec.rxerr = min(CAN_ERROR_PASSIVE_THRESHOLD,
+ priv->bec.rxerr) - 1;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &skb_cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&skb_cfd);
+
+ if (!skb) {
+ stats->rx_dropped++;
+
+ return 0;
+ }
+
+ memcpy(skb_cfd, cfd, len);
+ rkcanfd_skb_set_timestamp(priv, skb, header->ts);
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, header->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static inline unsigned int
+rkcanfd_rx_fifo_get_len(const struct rkcanfd_priv *priv)
+{
+ const u32 reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+
+ return FIELD_GET(RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT, reg);
+}
+
+int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv)
+{
+ unsigned int len;
+ int err;
+
+ while ((len = rkcanfd_rx_fifo_get_len(priv))) {
+ err = rkcanfd_handle_rx_int_one(priv);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
new file mode 100644
index 000000000000..72774cd2f94b
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/clocksource.h>
+
+#include "rockchip_canfd.h"
+
+static u64 rkcanfd_timestamp_read(struct cyclecounter *cc)
+{
+ const struct rkcanfd_priv *priv = container_of(cc, struct rkcanfd_priv, cc);
+
+ return rkcanfd_get_timestamp(priv);
+}
+
+void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv,
+ struct sk_buff *skb, const u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, timestamp);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void rkcanfd_timestamp_work(struct work_struct *work)
+{
+ const struct delayed_work *delayed_work = to_delayed_work(work);
+ struct rkcanfd_priv *priv;
+
+ priv = container_of(delayed_work, struct rkcanfd_priv, timestamp);
+ timecounter_read(&priv->tc);
+
+ schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies);
+}
+
+void rkcanfd_timestamp_init(struct rkcanfd_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ struct cyclecounter *cc = &priv->cc;
+ u32 bitrate, div, reg, rate;
+ u64 work_delay_ns;
+ u64 max_cycles;
+
+ /* At the standard clock rate of 300Mhz on the rk3658, the 32
+ * bit timer overflows every 14s. This means that we have to
+ * poll it quite often to avoid missing a wrap around.
+ *
+ * Divide it down to a reasonable rate, at least twice the bit
+ * rate.
+ */
+ bitrate = max(bt->bitrate, dbt->bitrate);
+ div = min(DIV_ROUND_UP(priv->can.clock.freq, bitrate * 2),
+ FIELD_MAX(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE) + 1);
+
+ reg = FIELD_PREP(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE,
+ div - 1) |
+ RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_TIMESTAMP_CTRL, reg);
+
+ cc->read = rkcanfd_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+
+ rate = priv->can.clock.freq / div;
+ clocks_calc_mult_shift(&cc->mult, &cc->shift, rate, NSEC_PER_SEC,
+ RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC);
+
+ max_cycles = div_u64(ULLONG_MAX, cc->mult);
+ max_cycles = min(max_cycles, cc->mask);
+ work_delay_ns = clocksource_cyc2ns(max_cycles, cc->mult, cc->shift);
+ priv->work_delay_jiffies = div_u64(work_delay_ns, 3u * NSEC_PER_SEC / HZ);
+ INIT_DELAYED_WORK(&priv->timestamp, rkcanfd_timestamp_work);
+
+ netdev_dbg(priv->ndev, "clock=%lu.%02luMHz bitrate=%lu.%02luMBit/s div=%u rate=%lu.%02luMHz mult=%u shift=%u delay=%lus\n",
+ priv->can.clock.freq / MEGA,
+ priv->can.clock.freq % MEGA / KILO / 10,
+ bitrate / MEGA,
+ bitrate % MEGA / KILO / 100,
+ div,
+ rate / MEGA,
+ rate % MEGA / KILO / 10,
+ cc->mult, cc->shift,
+ priv->work_delay_jiffies / HZ);
+}
+
+void rkcanfd_timestamp_start(struct rkcanfd_priv *priv)
+{
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+
+ schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies);
+}
+
+void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv)
+{
+ cancel_delayed_work(&priv->timestamp);
+}
+
+void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->timestamp);
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
new file mode 100644
index 000000000000..12200dcfd338
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <net/netdev_queues.h>
+
+#include "rockchip_canfd.h"
+
+static bool rkcanfd_tx_tail_is_eff(const struct rkcanfd_priv *priv)
+{
+ const struct canfd_frame *cfd;
+ const struct sk_buff *skb;
+ unsigned int tx_tail;
+
+ if (!rkcanfd_get_tx_pending(priv))
+ return false;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+ if (!skb) {
+ netdev_err(priv->ndev,
+ "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n",
+ __func__, tx_tail,
+ priv->tx_head, priv->tx_tail);
+
+ return false;
+ }
+
+ cfd = (struct canfd_frame *)skb->data;
+
+ return cfd->can_id & CAN_EFF_FLAG;
+}
+
+unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv)
+{
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6 &&
+ rkcanfd_tx_tail_is_eff(priv))
+ return 0;
+
+ return rkcanfd_get_tx_free(priv);
+}
+
+static void rkcanfd_start_xmit_write_cmd(const struct rkcanfd_priv *priv,
+ const u32 reg_cmd)
+{
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12)
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default |
+ RKCANFD_REG_MODE_SPACE_RX_MODE);
+
+ rkcanfd_write(priv, RKCANFD_REG_CMD, reg_cmd);
+
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12)
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default);
+}
+
+void rkcanfd_xmit_retry(struct rkcanfd_priv *priv)
+{
+ const unsigned int tx_head = rkcanfd_get_tx_head(priv);
+ const u32 reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head);
+
+ rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
+}
+
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ u32 reg_frameinfo, reg_id, reg_cmd;
+ unsigned int tx_head, frame_len;
+ const struct canfd_frame *cfd;
+ int err;
+ u8 i;
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (!netif_subqueue_maybe_stop(priv->ndev, 0,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_STOP_THRESHOLD,
+ RKCANFD_TX_START_THRESHOLD)) {
+ if (net_ratelimit())
+ netdev_info(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, tx_pending=%d)\n",
+ priv->tx_head, priv->tx_tail,
+ rkcanfd_get_tx_pending(priv));
+
+ return NETDEV_TX_BUSY;
+ }
+
+ cfd = (struct canfd_frame *)skb->data;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ reg_frameinfo = RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT;
+ reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_EFF, cfd->can_id);
+ } else {
+ reg_frameinfo = 0;
+ reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_SFF, cfd->can_id);
+ }
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_RTR;
+
+ if (can_is_canfd_skb(skb)) {
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_BRS;
+
+ reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ can_fd_len2dlc(cfd->len));
+ } else {
+ reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ cfd->len);
+ }
+
+ tx_head = rkcanfd_get_tx_head(priv);
+ reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXFRAMEINFO, reg_frameinfo);
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXID, reg_id);
+ for (i = 0; i < cfd->len; i += 4)
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXDATA0 + i,
+ *(u32 *)(cfd->data + i));
+
+ frame_len = can_skb_get_frame_len(skb);
+ err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ if (!err)
+ netdev_sent_queue(priv->ndev, frame_len);
+
+ WRITE_ONCE(priv->tx_head, priv->tx_head + 1);
+
+ rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
+
+ netif_subqueue_maybe_stop(priv->ndev, 0,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_STOP_THRESHOLD,
+ RKCANFD_TX_START_THRESHOLD);
+
+ return NETDEV_TX_OK;
+}
+
+void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
+ unsigned int *frame_len_p)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ unsigned int tx_tail;
+ struct sk_buff *skb;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.txerr)
+ priv->bec.txerr--;
+
+ if (skb)
+ rkcanfd_skb_set_timestamp(priv, skb, ts);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
+ tx_tail, ts,
+ frame_len_p);
+ stats->tx_packets++;
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd.h b/drivers/net/can/rockchip/rockchip_canfd.h
new file mode 100644
index 000000000000..93131c7d7f54
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2023, 2024 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _ROCKCHIP_CANFD_H
+#define _ROCKCHIP_CANFD_H
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <linux/skbuff.h>
+#include <linux/timecounter.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/units.h>
+
+#define RKCANFD_REG_MODE 0x000
+#define RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE BIT(15)
+#define RKCANFD_REG_MODE_DPEE BIT(14)
+#define RKCANFD_REG_MODE_BRSD BIT(13)
+#define RKCANFD_REG_MODE_SPACE_RX_MODE BIT(12)
+#define RKCANFD_REG_MODE_AUTO_BUS_ON BIT(11)
+#define RKCANFD_REG_MODE_AUTO_RETX_MODE BIT(10)
+#define RKCANFD_REG_MODE_OVLD_MODE BIT(9)
+#define RKCANFD_REG_MODE_COVER_MODE BIT(8)
+#define RKCANFD_REG_MODE_RXSORT_MODE BIT(7)
+#define RKCANFD_REG_MODE_TXORDER_MODE BIT(6)
+#define RKCANFD_REG_MODE_RXSTX_MODE BIT(5)
+#define RKCANFD_REG_MODE_LBACK_MODE BIT(4)
+#define RKCANFD_REG_MODE_SILENT_MODE BIT(3)
+#define RKCANFD_REG_MODE_SELF_TEST BIT(2)
+#define RKCANFD_REG_MODE_SLEEP_MODE BIT(1)
+#define RKCANFD_REG_MODE_WORK_MODE BIT(0)
+
+#define RKCANFD_REG_CMD 0x004
+#define RKCANFD_REG_CMD_TX1_REQ BIT(1)
+#define RKCANFD_REG_CMD_TX0_REQ BIT(0)
+#define RKCANFD_REG_CMD_TX_REQ(i) (RKCANFD_REG_CMD_TX0_REQ << (i))
+
+#define RKCANFD_REG_STATE 0x008
+#define RKCANFD_REG_STATE_SLEEP_STATE BIT(6)
+#define RKCANFD_REG_STATE_BUS_OFF_STATE BIT(5)
+#define RKCANFD_REG_STATE_ERROR_WARNING_STATE BIT(4)
+#define RKCANFD_REG_STATE_TX_PERIOD BIT(3)
+#define RKCANFD_REG_STATE_RX_PERIOD BIT(2)
+#define RKCANFD_REG_STATE_TX_BUFFER_FULL BIT(1)
+#define RKCANFD_REG_STATE_RX_BUFFER_FULL BIT(0)
+
+#define RKCANFD_REG_INT 0x00c
+#define RKCANFD_REG_INT_WAKEUP_INT BIT(14)
+#define RKCANFD_REG_INT_TXE_FIFO_FULL_INT BIT(13)
+#define RKCANFD_REG_INT_TXE_FIFO_OV_INT BIT(12)
+#define RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT BIT(11)
+#define RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT BIT(10)
+#define RKCANFD_REG_INT_BUS_OFF_INT BIT(9)
+#define RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT BIT(8)
+#define RKCANFD_REG_INT_RX_FIFO_FULL_INT BIT(7)
+#define RKCANFD_REG_INT_ERROR_INT BIT(6)
+#define RKCANFD_REG_INT_TX_ARBIT_FAIL_INT BIT(5)
+#define RKCANFD_REG_INT_PASSIVE_ERROR_INT BIT(4)
+#define RKCANFD_REG_INT_OVERLOAD_INT BIT(3)
+#define RKCANFD_REG_INT_ERROR_WARNING_INT BIT(2)
+#define RKCANFD_REG_INT_TX_FINISH_INT BIT(1)
+#define RKCANFD_REG_INT_RX_FINISH_INT BIT(0)
+
+#define RKCANFD_REG_INT_ALL \
+ (RKCANFD_REG_INT_WAKEUP_INT | \
+ RKCANFD_REG_INT_TXE_FIFO_FULL_INT | \
+ RKCANFD_REG_INT_TXE_FIFO_OV_INT | \
+ RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT | \
+ RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT | \
+ RKCANFD_REG_INT_BUS_OFF_INT | \
+ RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT | \
+ RKCANFD_REG_INT_RX_FIFO_FULL_INT | \
+ RKCANFD_REG_INT_ERROR_INT | \
+ RKCANFD_REG_INT_TX_ARBIT_FAIL_INT | \
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT | \
+ RKCANFD_REG_INT_OVERLOAD_INT | \
+ RKCANFD_REG_INT_ERROR_WARNING_INT | \
+ RKCANFD_REG_INT_TX_FINISH_INT | \
+ RKCANFD_REG_INT_RX_FINISH_INT)
+
+#define RKCANFD_REG_INT_ALL_ERROR \
+ (RKCANFD_REG_INT_BUS_OFF_INT | \
+ RKCANFD_REG_INT_ERROR_INT | \
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT | \
+ RKCANFD_REG_INT_ERROR_WARNING_INT)
+
+#define RKCANFD_REG_INT_MASK 0x010
+
+#define RKCANFD_REG_DMA_CTL 0x014
+#define RKCANFD_REG_DMA_CTL_DMA_RX_MODE BIT(1)
+#define RKCANFD_REG_DMA_CTL_DMA_TX_MODE BIT(9)
+
+#define RKCANFD_REG_BITTIMING 0x018
+#define RKCANFD_REG_BITTIMING_SAMPLE_MODE BIT(16)
+#define RKCANFD_REG_BITTIMING_SJW GENMASK(15, 14)
+#define RKCANFD_REG_BITTIMING_BRP GENMASK(13, 8)
+#define RKCANFD_REG_BITTIMING_TSEG2 GENMASK(6, 4)
+#define RKCANFD_REG_BITTIMING_TSEG1 GENMASK(3, 0)
+
+#define RKCANFD_REG_ARBITFAIL 0x028
+#define RKCANFD_REG_ARBITFAIL_ARBIT_FAIL_CODE GENMASK(6, 0)
+
+/* Register seems to be clear or read */
+#define RKCANFD_REG_ERROR_CODE 0x02c
+#define RKCANFD_REG_ERROR_CODE_PHASE BIT(29)
+#define RKCANFD_REG_ERROR_CODE_TYPE GENMASK(28, 26)
+#define RKCANFD_REG_ERROR_CODE_TYPE_BIT 0x0
+#define RKCANFD_REG_ERROR_CODE_TYPE_STUFF 0x1
+#define RKCANFD_REG_ERROR_CODE_TYPE_FORM 0x2
+#define RKCANFD_REG_ERROR_CODE_TYPE_ACK 0x3
+#define RKCANFD_REG_ERROR_CODE_TYPE_CRC 0x4
+#define RKCANFD_REG_ERROR_CODE_DIRECTION_RX BIT(25)
+#define RKCANFD_REG_ERROR_CODE_TX GENMASK(24, 16)
+#define RKCANFD_REG_ERROR_CODE_TX_OVERLOAD BIT(24)
+#define RKCANFD_REG_ERROR_CODE_TX_ERROR BIT(23)
+#define RKCANFD_REG_ERROR_CODE_TX_ACK BIT(22)
+#define RKCANFD_REG_ERROR_CODE_TX_ACK_EOF BIT(21)
+#define RKCANFD_REG_ERROR_CODE_TX_CRC BIT(20)
+#define RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT BIT(19)
+#define RKCANFD_REG_ERROR_CODE_TX_DATA BIT(18)
+#define RKCANFD_REG_ERROR_CODE_TX_SOF_DLC BIT(17)
+#define RKCANFD_REG_ERROR_CODE_TX_IDLE BIT(16)
+#define RKCANFD_REG_ERROR_CODE_RX GENMASK(15, 0)
+#define RKCANFD_REG_ERROR_CODE_RX_BUF_INT BIT(15)
+#define RKCANFD_REG_ERROR_CODE_RX_SPACE BIT(14)
+#define RKCANFD_REG_ERROR_CODE_RX_EOF BIT(13)
+#define RKCANFD_REG_ERROR_CODE_RX_ACK_LIM BIT(12)
+#define RKCANFD_REG_ERROR_CODE_RX_ACK BIT(11)
+#define RKCANFD_REG_ERROR_CODE_RX_CRC_LIM BIT(10)
+#define RKCANFD_REG_ERROR_CODE_RX_CRC BIT(9)
+#define RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT BIT(8)
+#define RKCANFD_REG_ERROR_CODE_RX_DATA BIT(7)
+#define RKCANFD_REG_ERROR_CODE_RX_DLC BIT(6)
+#define RKCANFD_REG_ERROR_CODE_RX_BRS_ESI BIT(5)
+#define RKCANFD_REG_ERROR_CODE_RX_RES BIT(4)
+#define RKCANFD_REG_ERROR_CODE_RX_FDF BIT(3)
+#define RKCANFD_REG_ERROR_CODE_RX_ID2_RTR BIT(2)
+#define RKCANFD_REG_ERROR_CODE_RX_SOF_IDE BIT(1)
+#define RKCANFD_REG_ERROR_CODE_RX_IDLE BIT(0)
+
+#define RKCANFD_REG_ERROR_CODE_NOACK \
+ (FIELD_PREP(RKCANFD_REG_ERROR_CODE_TYPE, \
+ RKCANFD_REG_ERROR_CODE_TYPE_ACK) | \
+ RKCANFD_REG_ERROR_CODE_TX_ACK_EOF | \
+ RKCANFD_REG_ERROR_CODE_RX_ACK)
+
+#define RKCANFD_REG_RXERRORCNT 0x034
+#define RKCANFD_REG_RXERRORCNT_RX_ERR_CNT GENMASK(7, 0)
+
+#define RKCANFD_REG_TXERRORCNT 0x038
+#define RKCANFD_REG_TXERRORCNT_TX_ERR_CNT GENMASK(8, 0)
+
+#define RKCANFD_REG_IDCODE 0x03c
+#define RKCANFD_REG_IDCODE_STANDARD_FRAME_ID GENMASK(10, 0)
+#define RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID GENMASK(28, 0)
+
+#define RKCANFD_REG_IDMASK 0x040
+
+#define RKCANFD_REG_TXFRAMEINFO 0x050
+#define RKCANFD_REG_FRAMEINFO_FRAME_FORMAT BIT(7)
+#define RKCANFD_REG_FRAMEINFO_RTR BIT(6)
+#define RKCANFD_REG_FRAMEINFO_DATA_LENGTH GENMASK(3, 0)
+
+#define RKCANFD_REG_TXID 0x054
+#define RKCANFD_REG_TXID_TX_ID GENMASK(28, 0)
+
+#define RKCANFD_REG_TXDATA0 0x058
+#define RKCANFD_REG_TXDATA1 0x05C
+#define RKCANFD_REG_RXFRAMEINFO 0x060
+#define RKCANFD_REG_RXID 0x064
+#define RKCANFD_REG_RXDATA0 0x068
+#define RKCANFD_REG_RXDATA1 0x06c
+
+#define RKCANFD_REG_RTL_VERSION 0x070
+#define RKCANFD_REG_RTL_VERSION_MAJOR GENMASK(7, 4)
+#define RKCANFD_REG_RTL_VERSION_MINOR GENMASK(3, 0)
+
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING 0x100
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SAMPLE_MODE BIT(31)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW GENMASK(30, 24)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP GENMASK(23, 16)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2 GENMASK(14, 8)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1 GENMASK(7, 0)
+
+#define RKCANFD_REG_FD_DATA_BITTIMING 0x104
+#define RKCANFD_REG_FD_DATA_BITTIMING_SAMPLE_MODE BIT(21)
+#define RKCANFD_REG_FD_DATA_BITTIMING_SJW GENMASK(20, 17)
+#define RKCANFD_REG_FD_DATA_BITTIMING_BRP GENMASK(16, 9)
+#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG2 GENMASK(8, 5)
+#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG1 GENMASK(4, 0)
+
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION 0x108
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET GENMASK(6, 1)
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE BIT(0)
+
+#define RKCANFD_REG_TIMESTAMP_CTRL 0x10c
+/* datasheet says 6:1, which is wrong */
+#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE GENMASK(5, 1)
+#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE BIT(0)
+
+#define RKCANFD_REG_TIMESTAMP 0x110
+
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL 0x114
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_CNT GENMASK(8, 5)
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_WATERMARK GENMASK(4, 1)
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_ENABLE BIT(0)
+
+#define RKCANFD_REG_RX_FIFO_CTRL 0x118
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT GENMASK(6, 4)
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_FULL_WATERMARK GENMASK(3, 1)
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE BIT(0)
+
+#define RKCANFD_REG_AFC_CTRL 0x11c
+#define RKCANFD_REG_AFC_CTRL_UAF5 BIT(4)
+#define RKCANFD_REG_AFC_CTRL_UAF4 BIT(3)
+#define RKCANFD_REG_AFC_CTRL_UAF3 BIT(2)
+#define RKCANFD_REG_AFC_CTRL_UAF2 BIT(1)
+#define RKCANFD_REG_AFC_CTRL_UAF1 BIT(0)
+
+#define RKCANFD_REG_IDCODE0 0x120
+#define RKCANFD_REG_IDMASK0 0x124
+#define RKCANFD_REG_IDCODE1 0x128
+#define RKCANFD_REG_IDMASK1 0x12c
+#define RKCANFD_REG_IDCODE2 0x130
+#define RKCANFD_REG_IDMASK2 0x134
+#define RKCANFD_REG_IDCODE3 0x138
+#define RKCANFD_REG_IDMASK3 0x13c
+#define RKCANFD_REG_IDCODE4 0x140
+#define RKCANFD_REG_IDMASK4 0x144
+
+#define RKCANFD_REG_FD_TXFRAMEINFO 0x200
+#define RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT BIT(7)
+#define RKCANFD_REG_FD_FRAMEINFO_RTR BIT(6)
+#define RKCANFD_REG_FD_FRAMEINFO_FDF BIT(5)
+#define RKCANFD_REG_FD_FRAMEINFO_BRS BIT(4)
+#define RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH GENMASK(3, 0)
+
+#define RKCANFD_REG_FD_TXID 0x204
+#define RKCANFD_REG_FD_ID_EFF GENMASK(28, 0)
+#define RKCANFD_REG_FD_ID_SFF GENMASK(11, 0)
+
+#define RKCANFD_REG_FD_TXDATA0 0x208
+#define RKCANFD_REG_FD_TXDATA1 0x20c
+#define RKCANFD_REG_FD_TXDATA2 0x210
+#define RKCANFD_REG_FD_TXDATA3 0x214
+#define RKCANFD_REG_FD_TXDATA4 0x218
+#define RKCANFD_REG_FD_TXDATA5 0x21c
+#define RKCANFD_REG_FD_TXDATA6 0x220
+#define RKCANFD_REG_FD_TXDATA7 0x224
+#define RKCANFD_REG_FD_TXDATA8 0x228
+#define RKCANFD_REG_FD_TXDATA9 0x22c
+#define RKCANFD_REG_FD_TXDATA10 0x230
+#define RKCANFD_REG_FD_TXDATA11 0x234
+#define RKCANFD_REG_FD_TXDATA12 0x238
+#define RKCANFD_REG_FD_TXDATA13 0x23c
+#define RKCANFD_REG_FD_TXDATA14 0x240
+#define RKCANFD_REG_FD_TXDATA15 0x244
+
+#define RKCANFD_REG_FD_RXFRAMEINFO 0x300
+#define RKCANFD_REG_FD_RXID 0x304
+#define RKCANFD_REG_FD_RXTIMESTAMP 0x308
+#define RKCANFD_REG_FD_RXDATA0 0x30c
+#define RKCANFD_REG_FD_RXDATA1 0x310
+#define RKCANFD_REG_FD_RXDATA2 0x314
+#define RKCANFD_REG_FD_RXDATA3 0x318
+#define RKCANFD_REG_FD_RXDATA4 0x31c
+#define RKCANFD_REG_FD_RXDATA5 0x320
+#define RKCANFD_REG_FD_RXDATA6 0x320
+#define RKCANFD_REG_FD_RXDATA7 0x328
+#define RKCANFD_REG_FD_RXDATA8 0x32c
+#define RKCANFD_REG_FD_RXDATA9 0x330
+#define RKCANFD_REG_FD_RXDATA10 0x334
+#define RKCANFD_REG_FD_RXDATA11 0x338
+#define RKCANFD_REG_FD_RXDATA12 0x33c
+#define RKCANFD_REG_FD_RXDATA13 0x340
+#define RKCANFD_REG_FD_RXDATA14 0x344
+#define RKCANFD_REG_FD_RXDATA15 0x348
+
+#define RKCANFD_REG_RX_FIFO_RDATA 0x400
+#define RKCANFD_REG_TXE_FIFO_RDATA 0x500
+
+#define DEVICE_NAME "rockchip_canfd"
+#define RKCANFD_NAPI_WEIGHT 32
+#define RKCANFD_TXFIFO_DEPTH 2
+#define RKCANFD_TX_STOP_THRESHOLD 1
+#define RKCANFD_TX_START_THRESHOLD 1
+
+#define RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC 60
+#define RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN (300 * MEGA)
+
+/* rk3568 CAN-FD Errata, as of Tue 07 Nov 2023 11:25:31 +08:00 */
+
+/* Erratum 1: The error frame sent by the CAN controller has an
+ * abnormal format.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_1 BIT(0)
+
+/* Erratum 2: The error frame sent after detecting a CRC error has an
+ * abnormal position.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_2 BIT(1)
+
+/* Erratum 3: Intermittent CRC calculation errors. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_3 BIT(2)
+
+/* Erratum 4: Intermittent occurrence of stuffing errors. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_4 BIT(3)
+
+/* Erratum 5: Counters related to the TXFIFO and RXFIFO exhibit
+ * abnormal counting behavior.
+ *
+ * The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00
+ * states that only the rk3568v2 is affected by this erratum, but
+ * tests with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is
+ * sometimes too high. This leads to CAN frames being read from the
+ * FIFO, which is then already empty.
+ *
+ * Further tests on the rk3568v2 and rk3568v3 show that in this
+ * situation (i.e. empty FIFO) all elements of the FIFO header
+ * (frameinfo, id, ts) contain the same data.
+ *
+ * On the rk3568v2 and rk3568v3, this problem only occurs extremely
+ * rarely with the standard clock of 300 MHz, but almost immediately
+ * at 80 MHz.
+ *
+ * To workaround this problem, check for empty FIFO with
+ * rkcanfd_fifo_header_empty() in rkcanfd_handle_rx_int_one() and exit
+ * early.
+ *
+ * To reproduce:
+ * assigned-clocks = <&cru CLK_CANx>;
+ * assigned-clock-rates = <80000000>;
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_5 BIT(4)
+
+/* Erratum 6: The CAN controller's transmission of extended frames may
+ * intermittently change into standard frames
+ *
+ * Work around this issue by activating self reception (RXSTX). If we
+ * have pending TX CAN frames, check all RX'ed CAN frames in
+ * rkcanfd_rxstx_filter().
+ *
+ * If it's a frame we've send and it's OK, call the TX complete
+ * handler: rkcanfd_handle_tx_done_one(). Mask the TX complete IRQ.
+ *
+ * If it's a frame we've send, but the CAN-ID is mangled, resend the
+ * original extended frame.
+ *
+ * To reproduce:
+ * host:
+ * canfdtest -evx -g can0
+ * candump any,0:80000000 -cexdtA
+ * dut:
+ * canfdtest -evx can0
+ * ethtool -S can0
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_6 BIT(5)
+
+/* Erratum 7: In the passive error state, the CAN controller's
+ * interframe space segment counting is inaccurate.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_7 BIT(6)
+
+/* Erratum 8: The Format-Error error flag is transmitted one bit
+ * later.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_8 BIT(7)
+
+/* Erratum 9: In the arbitration segment, the CAN controller will
+ * identify stuffing errors as arbitration failures.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_9 BIT(8)
+
+/* Erratum 10: Does not support the BUSOFF slow recovery mechanism. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_10 BIT(9)
+
+/* Erratum 11: Arbitration error. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_11 BIT(10)
+
+/* Erratum 12: A dominant bit at the third bit of the intermission may
+ * cause a transmission error.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_12 BIT(11)
+
+/* Tests on the rk3568v2 and rk3568v3 show that receiving certain
+ * CAN-FD frames trigger an Error Interrupt.
+ *
+ * - Form Error in RX Arbitration Phase: TX_IDLE RX_STUFF_COUNT (0x0a010100) CMD=0 RX=0 TX=0
+ * Error-Warning=1 Bus-Off=0
+ * To reproduce:
+ * host:
+ * cansend can0 002##01f
+ * DUT:
+ * candump any,0:0,#FFFFFFFF -cexdHtA
+ *
+ * - Form Error in RX Arbitration Phase: TX_IDLE RX_CRC (0x0a010200) CMD=0 RX=0 TX=0
+ * Error-Warning=1 Bus-Off=0
+ * To reproduce:
+ * host:
+ * cansend can0 002##07217010000000000
+ * DUT:
+ * candump any,0:0,#FFFFFFFF -cexdHtA
+ */
+#define RKCANFD_QUIRK_CANFD_BROKEN BIT(12)
+
+/* known issues with rk3568v3:
+ *
+ * - Overload situation during high bus load
+ * To reproduce:
+ * host:
+ * # add a 2nd CAN adapter to the CAN bus
+ * cangen can0 -I 1 -Li -Di -p10 -g 0.3
+ * cansequence -rve
+ * DUT:
+ * cangen can0 -I2 -L1 -Di -p10 -c10 -g 1 -e
+ * cansequence -rv -i 1
+ *
+ * - TX starvation after repeated Bus-Off
+ * To reproduce:
+ * host:
+ * sleep 3 && cangen can0 -I2 -Li -Di -p10 -g 0.0
+ * DUT:
+ * cangen can0 -I2 -Li -Di -p10 -g 0.05
+ */
+
+enum rkcanfd_model {
+ RKCANFD_MODEL_RK3568V2 = 0x35682,
+ RKCANFD_MODEL_RK3568V3 = 0x35683,
+};
+
+struct rkcanfd_devtype_data {
+ enum rkcanfd_model model;
+ u32 quirks;
+};
+
+struct rkcanfd_fifo_header {
+ u32 frameinfo;
+ u32 id;
+ u32 ts;
+};
+
+struct rkcanfd_stats {
+ struct u64_stats_sync syncp;
+
+ /* Erratum 5 */
+ u64_stats_t rx_fifo_empty_errors;
+
+ /* Erratum 6 */
+ u64_stats_t tx_extended_as_standard_errors;
+};
+
+struct rkcanfd_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+
+ void __iomem *regs;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+
+ u32 reg_mode_default;
+ u32 reg_int_mask_default;
+ struct rkcanfd_devtype_data devtype_data;
+
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct delayed_work timestamp;
+ unsigned long work_delay_jiffies;
+
+ struct can_berr_counter bec;
+
+ struct rkcanfd_stats stats;
+
+ struct reset_control *reset;
+ struct clk_bulk_data *clks;
+ int clks_num;
+};
+
+static inline u32
+rkcanfd_read(const struct rkcanfd_priv *priv, u32 reg)
+{
+ return readl(priv->regs + reg);
+}
+
+static inline void
+rkcanfd_read_rep(const struct rkcanfd_priv *priv, u32 reg,
+ void *buf, unsigned int len)
+{
+ readsl(priv->regs + reg, buf, len / sizeof(u32));
+}
+
+static inline void
+rkcanfd_write(const struct rkcanfd_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->regs + reg);
+}
+
+static inline u32
+rkcanfd_get_timestamp(const struct rkcanfd_priv *priv)
+{
+ return rkcanfd_read(priv, RKCANFD_REG_TIMESTAMP);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_head(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_head) & (RKCANFD_TXFIFO_DEPTH - 1);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_tail(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_tail) & (RKCANFD_TXFIFO_DEPTH - 1);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_pending(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_head) - READ_ONCE(priv->tx_tail);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_free(const struct rkcanfd_priv *priv)
+{
+ return RKCANFD_TXFIFO_DEPTH - rkcanfd_get_tx_pending(priv);
+}
+
+void rkcanfd_ethtool_init(struct rkcanfd_priv *priv);
+
+int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv);
+
+void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv,
+ struct sk_buff *skb, const u32 timestamp);
+void rkcanfd_timestamp_init(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_start(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv);
+
+unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv);
+void rkcanfd_xmit_retry(struct rkcanfd_priv *priv);
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
+ unsigned int *frame_len_p);
+
+#endif
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4b2f9cb17fc3..e061e35769bf 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -87,6 +87,7 @@ config CAN_PLX_PCI
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
+ depends on HAS_IOPORT
help
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
@@ -104,7 +105,7 @@ config CAN_SJA1000_PLATFORM
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
- depends on ISA
+ depends on (ISA && PC104) || (COMPILE_TEST && HAS_IOPORT)
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
https://www.embeddedts.com/products/TS-CAN1
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index da396d641e24..10d88cbda465 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -22,7 +22,7 @@
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index ebd5941c3f53..e1610b527d13 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
- *
* CAN driver for PEAK-System PCAN-PC Card
* Derived from the PCAN project file driver/src/pcan_pccard.c
- * Copyright (C) 2006-2010 PEAK System-Technik GmbH
+ *
+ * Copyright (C) 2006-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,7 +19,7 @@
#include <linux/can/dev.h>
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
MODULE_LICENSE("GPL v2");
@@ -167,7 +167,7 @@ static void pcan_start_led_timer(struct pcan_pccard *card)
*/
static void pcan_stop_led_timer(struct pcan_pccard *card)
{
- del_timer_sync(&card->led_timer);
+ timer_delete_sync(&card->led_timer);
}
/*
@@ -374,7 +374,7 @@ static inline void pcan_set_can_power(struct pcan_pccard *card, int onoff)
*/
static void pcan_led_timer(struct timer_list *t)
{
- struct pcan_pccard *card = from_timer(card, t, led_timer);
+ struct pcan_pccard *card = timer_container_of(card, t, led_timer);
struct net_device *netdev;
int i, up_count = 0;
u8 ccr;
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 5de1ebb0c6f0..67e5316c6372 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -122,7 +122,6 @@ struct plx_pci_card {
#define TEWS_PCI_VENDOR_ID 0x1498
#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
-#define CTI_PCI_VENDOR_ID 0x12c4
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
#define MOXA_PCI_VENDOR_ID 0x1393
@@ -358,7 +357,7 @@ static const struct pci_device_id plx_pci_tbl[] = {
{
/* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */
PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
- CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001,
+ PCI_SUBVENDOR_ID_CONNECT_TECH, CTI_PCI_DEVICE_ID_CRG001,
0, 0,
(kernel_ulong_t)&plx_pci_card_info_cti
},
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index ddb3247948ad..a8fa0d6516b9 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -416,8 +416,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
int ret = 0;
skb = alloc_can_err_skb(dev, &cf);
- if (skb == NULL)
- return -ENOMEM;
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
@@ -425,8 +423,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
stats->rx_over_errors++;
stats->rx_errors++;
sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
@@ -452,7 +453,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
- if (state != CAN_STATE_BUS_OFF) {
+ if (state != CAN_STATE_BUS_OFF && skb) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
@@ -460,33 +461,38 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
ecc = priv->read_reg(priv, SJA1000_ECC);
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
-
- /* set error type */
- switch (ecc & ECC_MASK) {
- case ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- break;
- }
+ /* set error type */
+ switch (ecc & ECC_MASK) {
+ case ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ break;
+ }
- /* set error location */
- cf->data[3] = ecc & ECC_SEG;
+ /* set error location */
+ cf->data[3] = ecc & ECC_SEG;
+ }
/* Error occurred during transmission? */
- if ((ecc & ECC_DIR) == 0)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if ((ecc & ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
}
if (isrc & IRQ_EPI) {
/* error passive interrupt */
@@ -502,8 +508,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
netdev_dbg(dev, "arbitration lost interrupt\n");
alc = priv->read_reg(priv, SJA1000_ALC);
priv->can.can_stats.arbitration_lost++;
- cf->can_id |= CAN_ERR_LOSTARB;
- cf->data[0] = alc & 0x1f;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = alc & 0x1f;
+ }
}
if (state != priv->can.state) {
@@ -516,6 +524,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
+ if (!skb)
+ return -ENOMEM;
+
netif_rx(skb);
return ret;
@@ -537,8 +548,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
goto out;
- while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
- (n < SJA1000_MAX_IRQ)) {
+ while ((n < SJA1000_MAX_IRQ) &&
+ (isrc = priv->read_reg(priv, SJA1000_IR))) {
status = priv->read_reg(priv, SJA1000_SR);
/* check for absent controller due to hw unplug */
@@ -686,7 +697,6 @@ static const struct net_device_ops sja1000_netdev_ops = {
.ndo_open = sja1000_open,
.ndo_stop = sja1000_close,
.ndo_start_xmit = sja1000_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops sja1000_ethtool_ops = {
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index fca5a9a1d857..2d1f715459d7 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -245,7 +245,7 @@ static void sja1000_isa_remove(struct platform_device *pdev)
static struct platform_driver sja1000_isa_driver = {
.probe = sja1000_isa_probe,
- .remove_new = sja1000_isa_remove,
+ .remove = sja1000_isa_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 33f0e46ab1c2..2d555f854008 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -230,18 +230,9 @@ static int sp_probe(struct platform_device *pdev)
return -ENODEV;
}
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res_mem->start,
- resource_size(res_mem), DRV_NAME))
- return -EBUSY;
-
- addr = devm_ioremap(&pdev->dev, res_mem->start,
- resource_size(res_mem));
- if (!addr)
- return -ENOMEM;
+ addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
if (of) {
irq = platform_get_irq(pdev, 0);
@@ -329,7 +320,7 @@ static void sp_remove(struct platform_device *pdev)
static struct platform_driver sp_driver = {
.probe = sp_probe,
- .remove_new = sp_remove,
+ .remove = sp_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sp_of_table,
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
index 24c6622d36bd..cd789e178d34 100644
--- a/drivers/net/can/slcan/slcan-core.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
#define SLCAN_CMD_LEN 1
#define SLCAN_SFF_ID_LEN 3
#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_DATA_LENGTH_LEN 1
+#define SLCAN_ERROR_LEN 1
#define SLCAN_STATE_LEN 1
#define SLCAN_STATE_BE_RXCNT_LEN 3
#define SLCAN_STATE_BE_TXCNT_LEN 3
-#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
- SLCAN_STATE_BE_RXCNT_LEN + \
- SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \
+ SLCAN_STATE_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_ERROR_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
+#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_SFF_ID_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
struct slcan {
struct can_priv can;
@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
+ if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
+ return;
+
skb = alloc_can_skb(sl->dev, &cf);
if (unlikely(!skb)) {
sl->dev->stats.rx_dropped++;
@@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl)
return;
}
- if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
+ if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
return;
cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
@@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl)
bool rx_errors = false, tx_errors = false, rx_over_errors = false;
int i, len;
+ if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
+ return;
+
/* get len from sanitized ASCII value */
len = cmd[1];
if (len >= '0' && len < '9')
@@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl)
static void slcan_unesc(struct slcan *sl, unsigned char s)
{
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount > 4)
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
slcan_bump(sl);
sl->rcount = 0;
@@ -760,7 +774,6 @@ static const struct net_device_ops slcan_netdev_ops = {
.ndo_open = slcan_netdev_open,
.ndo_stop = slcan_netdev_close,
.ndo_start_xmit = slcan_netdev_xmit,
- .ndo_change_mtu = can_change_mtu,
};
/******************************************
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index bd25137062c5..79bc64395ac4 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -609,7 +609,6 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_open = softing_netdev_open,
.ndo_stop = softing_netdev_stop,
.ndo_start_xmit = softing_netdev_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops softing_ethtool_ops = {
@@ -854,7 +853,7 @@ static struct platform_driver softing_driver = {
.name = KBUILD_MODNAME,
},
.probe = softing_pdev_probe,
- .remove_new = softing_pdev_remove,
+ .remove = softing_pdev_remove,
};
module_platform_driver(softing_driver);
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index e1b8533a602e..e00d3dbc4cf4 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -545,8 +545,6 @@ static int hi3110_stop(struct net_device *net)
priv->force_quit = 1;
free_irq(spi->irq, priv);
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
mutex_lock(&priv->hi3110_lock);
@@ -663,27 +661,27 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
u8 rxerr, txerr;
skb = alloc_can_err_skb(net, &cf);
- if (!skb)
- break;
txerr = hi3110_read(spi, HI3110_READ_TEC);
rxerr = hi3110_read(spi, HI3110_READ_REC);
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
- netif_rx(skb);
if (new_state == CAN_STATE_BUS_OFF) {
+ if (skb)
+ netif_rx(skb);
can_bus_off(net);
if (priv->can.restart_ms == 0) {
priv->force_quit = 1;
hi3110_hw_sleep(spi);
break;
}
- } else {
+ } else if (skb) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
+ netif_rx(skb);
}
}
@@ -696,27 +694,38 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
/* Check for protocol errors */
if (eflag & HI3110_ERR_PROTOCOL_MASK) {
skb = alloc_can_err_skb(net, &cf);
- if (!skb)
- break;
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
priv->can.can_stats.bus_error++;
- priv->net->stats.rx_errors++;
- if (eflag & HI3110_ERR_BITERR)
- cf->data[2] |= CAN_ERR_PROT_BIT;
- else if (eflag & HI3110_ERR_FRMERR)
- cf->data[2] |= CAN_ERR_PROT_FORM;
- else if (eflag & HI3110_ERR_STUFERR)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- else if (eflag & HI3110_ERR_CRCERR)
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
- else if (eflag & HI3110_ERR_ACKERR)
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
-
- cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
- cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ if (eflag & HI3110_ERR_BITERR) {
+ priv->net->stats.tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ } else if (eflag & HI3110_ERR_FRMERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ } else if (eflag & HI3110_ERR_STUFERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ } else if (eflag & HI3110_ERR_CRCERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ } else if (eflag & HI3110_ERR_ACKERR) {
+ priv->net->stats.tx_errors++;
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+
netdev_dbg(priv->net, "Bus Error\n");
- netif_rx(skb);
+ if (skb) {
+ cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
+ cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ netif_rx(skb);
+ }
}
}
@@ -759,34 +768,23 @@ static int hi3110_open(struct net_device *net)
goto out_close;
}
- priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
- 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto out_free_irq;
- }
- INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
- INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
-
ret = hi3110_hw_reset(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_setup(net);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_set_normal_mode(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
netif_wake_queue(net);
mutex_unlock(&priv->hi3110_lock);
return 0;
- out_free_wq:
- destroy_workqueue(priv->wq);
out_free_irq:
free_irq(spi->irq, priv);
hi3110_hw_sleep(spi);
@@ -830,7 +828,6 @@ static int hi3110_can_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct net_device *net;
struct hi3110_priv *priv;
- const void *match;
struct clk *clk;
u32 freq;
int ret;
@@ -874,11 +871,7 @@ static int hi3110_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING;
- match = device_get_match_data(dev);
- if (match)
- priv->model = (enum hi3110_model)(uintptr_t)match;
- else
- priv->model = spi_get_device_id(spi)->driver_data;
+ priv->model = (enum hi3110_model)(uintptr_t)spi_get_device_match_data(spi);
priv->net = net;
priv->clk = clk;
@@ -902,6 +895,16 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
+ priv->wq = alloc_workqueue("hi3110_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
+ INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+ INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
+
priv->spi = spi;
mutex_init(&priv->hi3110_lock);
@@ -937,6 +940,8 @@ static int hi3110_can_probe(struct spi_device *spi)
return 0;
error_probe:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
hi3110_power_enable(priv->power, 0);
out_clk:
@@ -957,6 +962,9 @@ static void hi3110_can_remove(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
clk_disable_unprepare(priv->clk);
free_candev(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 79c4bab5f724..fa97adf25b73 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -28,7 +28,6 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/freezer.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -389,8 +388,8 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
mcp251x_spi_write(spi, 4);
}
-static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
- u8 mask, u8 val)
+static int mcp251x_write_bits(struct spi_device *spi, u8 reg,
+ u8 mask, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -399,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
- mcp251x_spi_write(spi, 4);
+ return mcp251x_spi_write(spi, 4);
}
static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -442,6 +441,7 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
unsigned int offset)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ int ret;
u8 val;
/* nothing to be done for inputs */
@@ -451,8 +451,10 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl |= val;
@@ -482,9 +484,9 @@ static int mcp251x_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
if (mcp251x_gpio_is_input(offset))
- return GPIOF_DIR_IN;
+ return GPIO_LINE_DIRECTION_IN;
- return GPIOF_DIR_OUT;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -531,29 +533,35 @@ static int mcp251x_gpio_get_multiple(struct gpio_chip *chip,
return 0;
}
-static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
val = value ? mask : 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
-static void
+static int
mcp251x_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *maskp, unsigned long *bitsp)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]);
mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask);
@@ -562,14 +570,18 @@ mcp251x_gpio_set_multiple(struct gpio_chip *chip,
val = FIELD_PREP(BFPCTRL_BFS_MASK, val);
if (!mask)
- return;
+ return 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
static void mcp251x_gpio_restore(struct spi_device *spi)
@@ -753,7 +765,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
int ret;
/* Force wakeup interrupt to wake device, but don't execute IST */
- disable_irq(spi->irq);
+ disable_irq_nosync(spi->irq);
mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
/* Wait for oscillator startup timer after wake up */
@@ -1258,7 +1270,6 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_open = mcp251x_open,
.ndo_stop = mcp251x_stop,
.ndo_start_xmit = mcp251x_hard_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops mcp251x_ethtool_ops = {
@@ -1301,7 +1312,6 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static int mcp251x_can_probe(struct spi_device *spi)
{
- const void *match = device_get_match_data(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
struct clk *clk;
@@ -1310,7 +1320,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
clk = devm_clk_get_optional(&spi->dev, NULL);
if (IS_ERR(clk))
- return PTR_ERR(clk);
+ return dev_err_probe(&spi->dev, PTR_ERR(clk), "Cannot get clock\n");
freq = clk_get_rate(clk);
if (freq == 0)
@@ -1318,7 +1328,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Sanity check */
if (freq < 1000000 || freq > 25000000)
- return -ERANGE;
+ return dev_err_probe(&spi->dev, -ERANGE, "clock frequency out of range\n");
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
@@ -1326,8 +1336,10 @@ static int mcp251x_can_probe(struct spi_device *spi)
return -ENOMEM;
ret = clk_prepare_enable(clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot enable clock\n");
goto out_free;
+ }
net->netdev_ops = &mcp251x_netdev_ops;
net->ethtool_ops = &mcp251x_ethtool_ops;
@@ -1339,10 +1351,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->can.clock.freq = freq / 2;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
- if (match)
- priv->model = (enum mcp251x_model)(uintptr_t)match;
- else
- priv->model = spi_get_device_id(spi)->driver_data;
+ priv->model = (enum mcp251x_model)(uintptr_t)spi_get_device_match_data(spi);
priv->net = net;
priv->clk = clk;
@@ -1355,22 +1364,28 @@ static int mcp251x_can_probe(struct spi_device *spi)
else
spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
ret = spi_setup(spi);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot set up spi\n");
goto out_clk;
+ }
priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
+ dev_err_probe(&spi->dev, ret, "supply deferred\n");
goto out_clk;
}
ret = mcp251x_power_enable(priv->power, 1);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot enable power\n");
goto out_clk;
+ }
- priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ priv->wq = alloc_workqueue("mcp251x_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!priv->wq) {
ret = -ENOMEM;
@@ -1401,21 +1416,24 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Here is OK to not lock the MCP, no one knows about it yet */
ret = mcp251x_hw_probe(spi);
if (ret) {
- if (ret == -ENODEV)
- dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n",
- priv->model);
+ dev_err_probe(&spi->dev, ret, "Cannot initialize MCP%x. Wrong wiring?\n",
+ priv->model);
goto error_probe;
}
mcp251x_hw_sleep(spi);
ret = register_candev(net);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot register CAN device\n");
goto error_probe;
+ }
ret = mcp251x_gpio_setup(priv);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot set up gpios\n");
goto out_unregister_candev;
+ }
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
@@ -1434,7 +1452,6 @@ out_clk:
out_free:
free_candev(net);
- dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
return ret;
}
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
index 877e4356010d..7c29846e6051 100644
--- a/drivers/net/can/spi/mcp251xfd/Kconfig
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -5,6 +5,7 @@ config CAN_MCP251XFD
select CAN_RX_OFFLOAD
select REGMAP
select WANT_DEV_COREDUMP
+ select GPIOLIB
help
Driver for the Microchip MCP251XFD SPI FD-CAN controller
family.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 1d9057dc44f2..5134ebb85880 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -12,7 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
.brp_inc = 1,
};
+/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
+ * [-64,63] for TDCO, indicating a relative TDCO.
+ *
+ * Manual tests have shown, that using a relative TDCO configuration
+ * results in bus off, while an absolute configuration works.
+ *
+ * For TDCO use the max value (63) from the data sheet, but 0 as the
+ * minimum.
+ */
+static const struct can_tdc_const mcp251xfd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 63,
+ .tdco_min = 0,
+ .tdco_max = 63,
+ .tdcf_min = 0,
+ .tdcf_max = 0,
+};
+
static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
{
switch (model) {
@@ -509,9 +527,8 @@ static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
{
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
- u32 val = 0;
- s8 tdco;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ u32 tdcmod, val = 0;
int err;
/* CAN Control Register
@@ -575,34 +592,37 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
return err;
/* Transmitter Delay Compensation */
- tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
- -64, 63);
- val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
- MCP251XFD_REG_TDC_TDCMOD_AUTO) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
+ else
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
+
+ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.fd.tdc.tdcv) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.fd.tdc.tdco);
return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
}
static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
{
- u32 val;
+ u32 val, mask;
if (!priv->rx_int)
return 0;
- /* Configure GPIOs:
- * - PIN0: GPIO Input
- * - PIN1: GPIO Input/RX Interrupt
+ /* Configure PIN1 as RX Interrupt:
*
* PIN1 must be Input, otherwise there is a glitch on the
* rx-INT line. It happens between setting the PIN as output
* (in the first byte of the SPI transfer) and configuring the
* PIN as interrupt (in the last byte of the SPI transfer).
*/
- val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
- MCP251XFD_REG_IOCON_TRIS0;
- return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+ val = MCP251XFD_REG_IOCON_TRIS(1);
+ mask = MCP251XFD_REG_IOCON_TRIS(1) | MCP251XFD_REG_IOCON_PM(1);
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, mask, val);
}
static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
@@ -612,13 +632,9 @@ static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
if (!priv->rx_int)
return 0;
- /* Configure GPIOs:
- * - PIN0: GPIO Input
- * - PIN1: GPIO Input
- */
- val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
- MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
- return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+ /* Configure PIN1 as GPIO Input */
+ val = MCP251XFD_REG_IOCON_PM(1) | MCP251XFD_REG_IOCON_TRIS(1);
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val, val);
}
static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
@@ -744,25 +760,20 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
- mcp251xfd_chip_sleep(priv);
+ mcp251xfd_timestamp_stop(priv);
+ mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
}
static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
{
int err;
- err = mcp251xfd_chip_softreset(priv);
- if (err)
- goto out_chip_stop;
-
- err = mcp251xfd_chip_clock_init(priv);
- if (err)
- goto out_chip_stop;
-
err = mcp251xfd_chip_timestamp_init(priv);
if (err)
goto out_chip_stop;
+ mcp251xfd_timestamp_start(priv);
+
err = mcp251xfd_set_bittiming(priv);
if (err)
goto out_chip_stop;
@@ -791,7 +802,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
return 0;
- out_chip_stop:
+out_chip_stop:
mcp251xfd_dump(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
@@ -867,18 +878,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
static struct sk_buff *
mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
- struct can_frame **cf, u32 *timestamp)
+ struct can_frame **cf, u32 *ts_raw)
{
struct sk_buff *skb;
int err;
- err = mcp251xfd_get_timestamp(priv, timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, ts_raw);
if (err)
return NULL;
skb = alloc_can_err_skb(priv->ndev, cf);
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw);
return skb;
}
@@ -889,7 +900,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
struct mcp251xfd_rx_ring *ring;
struct sk_buff *skb;
struct can_frame *cf;
- u32 timestamp, rxovif;
+ u32 ts_raw, rxovif;
int err, i;
stats->rx_over_errors++;
@@ -924,14 +935,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
return err;
}
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
if (!skb)
return 0;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -948,12 +959,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
- u32 bdiag1, timestamp;
+ u32 bdiag1, ts_raw;
struct sk_buff *skb;
struct can_frame *cf = NULL;
int err;
- err = mcp251xfd_get_timestamp(priv, &timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
return err;
@@ -1035,8 +1046,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
if (!cf)
return 0;
- mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1049,7 +1060,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
struct sk_buff *skb;
struct can_frame *cf = NULL;
enum can_state new_state, rx_state, tx_state;
- u32 trec, timestamp;
+ u32 trec, ts_raw;
int err;
err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
@@ -1079,7 +1090,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
/* The skb allocation might fail, but can_change_state()
* handles cf == NULL.
*/
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1110,7 +1121,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1135,7 +1146,7 @@ mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
return 0;
}
- /* According to MCP2517FD errata DS80000792B 1., during a TX
+ /* According to MCP2517FD errata DS80000792C 1., during a TX
* MAB underflow, the controller will transition to Restricted
* Operation Mode or Listen Only Mode (depending on SERR2LOM).
*
@@ -1180,7 +1191,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
/* TX MAB underflow
*
- * According to MCP2517FD Errata DS80000792B 1. a TX MAB
+ * According to MCP2517FD Errata DS80000792C 1. a TX MAB
* underflow is indicated by SERRIF and MODIF.
*
* In addition to the effects mentioned in the Errata, there
@@ -1224,7 +1235,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
/* RX MAB overflow
*
- * According to MCP2517FD Errata DS80000792B 1. a RX MAB
+ * According to MCP2517FD Errata DS80000792C 1. a RX MAB
* overflow is indicated by SERRIF.
*
* In addition to the effects mentioned in the Errata, (most
@@ -1331,7 +1342,8 @@ mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
return err;
/* Errata Reference:
- * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
+ * mcp2517fd: DS80000789C 3., mcp2518fd: DS80000792E 2.,
+ * mcp251863: DS80000984A 2.
*
* ECC single error correction does not work in all cases:
*
@@ -1576,7 +1588,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
handled = IRQ_HANDLED;
} while (1);
- out_fail:
+out_fail:
can_rx_offload_threaded_irq_finish(&priv->offload);
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
@@ -1599,8 +1611,11 @@ static int mcp251xfd_open(struct net_device *ndev)
return err;
err = pm_runtime_resume_and_get(ndev->dev.parent);
- if (err)
+ if (err) {
+ if (err == -ETIMEDOUT || err == -ENODEV)
+ pm_runtime_set_suspended(ndev->dev.parent);
goto out_close_candev;
+ }
err = mcp251xfd_ring_alloc(priv);
if (err)
@@ -1610,19 +1625,29 @@ static int mcp251xfd_open(struct net_device *ndev)
if (err)
goto out_mcp251xfd_ring_free;
+ mcp251xfd_timestamp_init(priv);
+
err = mcp251xfd_chip_start(priv);
if (err)
goto out_transceiver_disable;
- mcp251xfd_timestamp_init(priv);
clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
+ priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ dev_name(&spi->dev));
+ if (!priv->wq) {
+ err = -ENOMEM;
+ goto out_can_rx_offload_disable;
+ }
+ INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
+
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&spi->dev), priv);
if (err)
- goto out_can_rx_offload_disable;
+ goto out_destroy_workqueue;
err = mcp251xfd_chip_interrupts_enable(priv);
if (err)
@@ -1632,20 +1657,21 @@ static int mcp251xfd_open(struct net_device *ndev)
return 0;
- out_free_irq:
+out_free_irq:
free_irq(spi->irq, priv);
- out_can_rx_offload_disable:
+out_destroy_workqueue:
+ destroy_workqueue(priv->wq);
+out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
- mcp251xfd_timestamp_stop(priv);
- out_transceiver_disable:
+out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
- out_mcp251xfd_ring_free:
+out_mcp251xfd_ring_free:
mcp251xfd_ring_free(priv);
- out_pm_runtime_put:
+out_pm_runtime_put:
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
pm_runtime_put(ndev->dev.parent);
- out_close_candev:
+out_close_candev:
close_candev(ndev);
return err;
@@ -1661,8 +1687,8 @@ static int mcp251xfd_stop(struct net_device *ndev)
hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
+ destroy_workqueue(priv->wq);
can_rx_offload_disable(&priv->offload);
- mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
mcp251xfd_transceiver_disable(priv);
mcp251xfd_ring_free(priv);
@@ -1677,8 +1703,8 @@ static const struct net_device_ops mcp251xfd_netdev_ops = {
.ndo_open = mcp251xfd_open,
.ndo_stop = mcp251xfd_stop,
.ndo_start_xmit = mcp251xfd_start_xmit,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static void
@@ -1771,6 +1797,160 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
return 0;
}
+static const char * const mcp251xfd_gpio_names[] = { "GPIO0", "GPIO1" };
+
+static int mcp251xfd_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 pin_mask = MCP251XFD_REG_IOCON_PM(offset);
+ int ret;
+
+ if (priv->rx_int && offset == 1) {
+ netdev_err(priv->ndev, "Can't use GPIO 1 with RX-INT!\n");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(priv->ndev->dev.parent);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, pin_mask, pin_mask);
+}
+
+static void mcp251xfd_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+
+ pm_runtime_put(priv->ndev->dev.parent);
+}
+
+static int mcp251xfd_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 mask = MCP251XFD_REG_IOCON_TRIS(offset);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ if (mask & val)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int mcp251xfd_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 mask = MCP251XFD_REG_IOCON_GPIO(offset);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ return !!(mask & val);
+}
+
+static int mcp251xfd_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bit)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ *bit = FIELD_GET(MCP251XFD_REG_IOCON_GPIO_MASK, val) & *mask;
+
+ return 0;
+}
+
+static int mcp251xfd_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset);
+ u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset);
+ u32 val;
+
+ if (value)
+ val = val_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON,
+ dir_mask | val_mask, val);
+}
+
+static int mcp251xfd_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset);
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, dir_mask, dir_mask);
+}
+
+static int mcp251xfd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset);
+ u32 val;
+
+ if (value)
+ val = val_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val_mask, val);
+}
+
+static int mcp251xfd_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val;
+
+ val = FIELD_PREP(MCP251XFD_REG_IOCON_LAT_MASK, *bits);
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON,
+ MCP251XFD_REG_IOCON_LAT_MASK, val);
+}
+
+static int mcp251fdx_gpio_setup(struct mcp251xfd_priv *priv)
+{
+ struct gpio_chip *gc = &priv->gc;
+
+ if (!device_property_present(&priv->spi->dev, "gpio-controller"))
+ return 0;
+
+ gc->label = dev_name(&priv->spi->dev);
+ gc->parent = &priv->spi->dev;
+ gc->owner = THIS_MODULE;
+ gc->request = mcp251xfd_gpio_request;
+ gc->free = mcp251xfd_gpio_free;
+ gc->get_direction = mcp251xfd_gpio_get_direction;
+ gc->direction_output = mcp251xfd_gpio_direction_output;
+ gc->direction_input = mcp251xfd_gpio_direction_input;
+ gc->get = mcp251xfd_gpio_get;
+ gc->get_multiple = mcp251xfd_gpio_get_multiple;
+ gc->set = mcp251xfd_gpio_set;
+ gc->set_multiple = mcp251xfd_gpio_set_multiple;
+ gc->base = -1;
+ gc->can_sleep = true;
+ gc->ngpio = ARRAY_SIZE(mcp251xfd_gpio_names);
+ gc->names = mcp251xfd_gpio_names;
+
+ return devm_gpiochip_add_data(&priv->spi->dev, gc, priv);
+}
+
static int
mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
u32 *effective_speed_hz_slow,
@@ -1808,9 +1988,9 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
*effective_speed_hz_slow = xfer[0].effective_speed_hz;
*effective_speed_hz_fast = xfer[1].effective_speed_hz;
- out_kfree_buf_tx:
+out_kfree_buf_tx:
kfree(buf_tx);
- out_kfree_buf_rx:
+out_kfree_buf_rx:
kfree(buf_rx);
return err;
@@ -1870,53 +2050,59 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
struct net_device *ndev = priv->ndev;
int err;
+ mcp251xfd_register_quirks(priv);
+
err = mcp251xfd_clks_and_vdd_enable(priv);
if (err)
return err;
- pm_runtime_get_noresume(ndev->dev.parent);
- err = pm_runtime_set_active(ndev->dev.parent);
- if (err)
- goto out_runtime_put_noidle;
- pm_runtime_enable(ndev->dev.parent);
-
- mcp251xfd_register_quirks(priv);
-
err = mcp251xfd_chip_softreset(priv);
if (err == -ENODEV)
- goto out_runtime_disable;
+ goto out_clks_and_vdd_disable;
if (err)
goto out_chip_sleep;
err = mcp251xfd_chip_clock_init(priv);
if (err == -ENODEV)
- goto out_runtime_disable;
+ goto out_clks_and_vdd_disable;
if (err)
goto out_chip_sleep;
+ pm_runtime_get_noresume(ndev->dev.parent);
+ err = pm_runtime_set_active(ndev->dev.parent);
+ if (err)
+ goto out_runtime_put_noidle;
+ pm_runtime_enable(ndev->dev.parent);
+
err = mcp251xfd_register_chip_detect(priv);
if (err)
- goto out_chip_sleep;
+ goto out_runtime_disable;
err = mcp251xfd_register_check_rx_int(priv);
if (err)
- goto out_chip_sleep;
+ goto out_runtime_disable;
mcp251xfd_ethtool_init(priv);
+ err = mcp251fdx_gpio_setup(priv);
+ if (err) {
+ dev_err_probe(&priv->spi->dev, err, "Failed to register gpio-controller.\n");
+ goto out_runtime_disable;
+ }
+
err = register_candev(ndev);
if (err)
- goto out_chip_sleep;
+ goto out_runtime_disable;
err = mcp251xfd_register_done(priv);
if (err)
goto out_unregister_candev;
- /* Put controller into sleep mode and let pm_runtime_put()
- * disable the clocks and vdd. If CONFIG_PM is not enabled,
- * the clocks and vdd will stay powered.
+ /* Put controller into Config mode and let pm_runtime_put()
+ * put in sleep mode, disable the clocks and vdd. If CONFIG_PM
+ * is not enabled, the clocks and vdd will stay powered.
*/
- err = mcp251xfd_chip_sleep(priv);
+ err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
if (err)
goto out_unregister_candev;
@@ -1924,14 +2110,15 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
return 0;
- out_unregister_candev:
+out_unregister_candev:
unregister_candev(ndev);
- out_chip_sleep:
- mcp251xfd_chip_sleep(priv);
- out_runtime_disable:
+out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
- out_runtime_put_noidle:
+out_runtime_put_noidle:
pm_runtime_put_noidle(ndev->dev.parent);
+out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
+out_clks_and_vdd_disable:
mcp251xfd_clks_and_vdd_disable(priv);
return err;
@@ -1943,10 +2130,12 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
unregister_candev(ndev);
- if (pm_runtime_enabled(ndev->dev.parent))
+ if (pm_runtime_enabled(ndev->dev.parent)) {
pm_runtime_disable(ndev->dev.parent);
- else
+ } else {
+ mcp251xfd_chip_sleep(priv);
mcp251xfd_clks_and_vdd_disable(priv);
+ }
}
static const struct of_device_id mcp251xfd_of_match[] = {
@@ -1989,7 +2178,6 @@ MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
static int mcp251xfd_probe(struct spi_device *spi)
{
- const void *match;
struct net_device *ndev;
struct mcp251xfd_priv *priv;
struct gpio_desc *rx_int;
@@ -2068,11 +2256,13 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
- priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.fd.tdc_const = &mcp251xfd_tdc_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
@@ -2081,16 +2271,11 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->pll_enable = pll_enable;
priv->reg_vdd = reg_vdd;
priv->reg_xceiver = reg_xceiver;
-
- match = device_get_match_data(&spi->dev);
- if (match)
- priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
- else
- priv->devtype_data = *(struct mcp251xfd_devtype_data *)
- spi_get_device_id(spi)->driver_data;
+ priv->devtype_data = *(struct mcp251xfd_devtype_data *)spi_get_device_match_data(spi);
/* Errata Reference:
- * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
+ * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789E 4.,
+ * mcp251863: DS80000984A 4.
*
* The SPI can write corrupted data to the RAM at fast SPI
* speeds:
@@ -2150,9 +2335,9 @@ static int mcp251xfd_probe(struct spi_device *spi)
return 0;
- out_can_rx_offload_del:
+out_can_rx_offload_del:
can_rx_offload_del(&priv->offload);
- out_free_candev:
+out_free_candev:
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
@@ -2165,24 +2350,49 @@ static void mcp251xfd_remove(struct spi_device *spi)
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
- can_rx_offload_del(&priv->offload);
mcp251xfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
}
static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
{
- const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ mcp251xfd_chip_sleep(priv);
return mcp251xfd_clks_and_vdd_disable(priv);
}
static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
{
- const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ int err;
+
+ err = mcp251xfd_clks_and_vdd_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_softreset(priv);
+ if (err == -ENODEV)
+ goto out_clks_and_vdd_disable;
+ if (err)
+ goto out_chip_sleep;
- return mcp251xfd_clks_and_vdd_enable(priv);
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err == -ENODEV)
+ goto out_clks_and_vdd_disable;
+ if (err)
+ goto out_chip_sleep;
+
+ return 0;
+
+out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
+out_clks_and_vdd_disable:
+ mcp251xfd_clks_and_vdd_disable(priv);
+
+ return err;
}
static const struct dev_pm_ops mcp251xfd_pm_ops = {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index 004eaf96262b..050321345304 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv,
kfree(buf);
}
- out:
+out:
mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
index 9e8e82cdba46..61b0d6fa52dd 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
@@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout,
if (ring) {
u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
- num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
+ /* If the ring parameters have been configured in
+ * CAN-CC mode, but and we are in CAN-FD mode now,
+ * they might be to big. Use the default CAN-FD values
+ * in this case.
+ */
+ num_rx = ring->rx_pending;
+ if (num_rx > layout->max_rx)
+ num_rx = layout->default_rx;
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
/* The ethtool doc says:
* To disable coalescing, set usecs = 0 and max_frames = 1.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 92b7bc7f14b9..70d5ff0ae7ac 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -8,22 +8,14 @@
#include "mcp251xfd.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static const struct regmap_config mcp251xfd_regmap_crc;
static int
-mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count)
-{
- struct spi_device *spi = context;
-
- return spi_write(spi, data, count);
-}
-
-static int
-mcp251xfd_regmap_nocrc_gather_write(void *context,
- const void *reg, size_t reg_len,
- const void *val, size_t val_len)
+_mcp251xfd_regmap_nocrc_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
{
struct spi_device *spi = context;
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
@@ -47,6 +39,54 @@ mcp251xfd_regmap_nocrc_gather_write(void *context,
return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
}
+static int
+mcp251xfd_regmap_nocrc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ const u16 byte_exclude = MCP251XFD_REG_IOCON +
+ mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK);
+ u16 reg = be16_to_cpu(*(__be16 *)reg_p) & MCP251XFD_SPI_ADDRESS_MASK;
+ int ret;
+
+ /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1
+ *
+ * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one
+ * SPI write command clears LAT0/LAT1.
+ *
+ * Errata Fix/Work Around suggests to write registers with single byte
+ * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16])
+ * is for read-only access and writing to it causes the clearing of LAT0/LAT1.
+ */
+ if (reg <= byte_exclude && reg + val_len > byte_exclude) {
+ size_t len = byte_exclude - reg;
+
+ /* Write up to 0xe05 */
+ ret = _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len, val, len);
+ if (ret)
+ return ret;
+
+ /* Write from 0xe07 on */
+ reg += len + 1;
+ reg = (__force unsigned short)cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | reg);
+ return _mcp251xfd_regmap_nocrc_gather_write(context, &reg, reg_len,
+ val + len + 1,
+ val_len - len - 1);
+ }
+
+ return _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len,
+ val, val_len);
+}
+
+static int
+mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count)
+{
+ const size_t data_offset = sizeof(__be16);
+
+ return mcp251xfd_regmap_nocrc_gather_write(context, data, data_offset,
+ data + data_offset, count - data_offset);
+}
+
static inline bool
mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
unsigned int reg)
@@ -64,6 +104,7 @@ mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
case MCP251XFD_REG_CON:
case MCP251XFD_REG_OSC:
case MCP251XFD_REG_ECCCON:
+ case MCP251XFD_REG_IOCON:
return true;
default:
mcp251xfd_for_each_rx_ring(priv, ring, n) {
@@ -139,10 +180,9 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
tmp_le32 = orig_le32 & ~mask_le32;
tmp_le32 |= val_le32 & mask_le32;
- mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg + first_byte);
- memcpy(buf_tx->data, &tmp_le32, len);
-
- return spi_write(spi, buf_tx, sizeof(buf_tx->cmd) + len);
+ reg += first_byte;
+ mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg);
+ return mcp251xfd_regmap_nocrc_gather_write(context, &buf_tx->cmd, 2, &tmp_le32, len);
}
static int
@@ -196,9 +236,9 @@ mcp251xfd_regmap_nocrc_read(void *context,
}
static int
-mcp251xfd_regmap_crc_gather_write(void *context,
- const void *reg_p, size_t reg_len,
- const void *val, size_t val_len)
+_mcp251xfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
{
struct spi_device *spi = context;
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
@@ -230,6 +270,44 @@ mcp251xfd_regmap_crc_gather_write(void *context,
}
static int
+mcp251xfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ const u16 byte_exclude = MCP251XFD_REG_IOCON +
+ mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK);
+ u16 reg = *(u16 *)reg_p;
+ int ret;
+
+ /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1
+ *
+ * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one
+ * SPI write command clears LAT0/LAT1.
+ *
+ * Errata Fix/Work Around suggests to write registers with single byte
+ * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16])
+ * is for read-only access and writing to it causes the clearing of LAT0/LAT1.
+ */
+ if (reg <= byte_exclude && reg + val_len > byte_exclude) {
+ size_t len = byte_exclude - reg;
+
+ /* Write up to 0xe05 */
+ ret = _mcp251xfd_regmap_crc_gather_write(context, &reg, reg_len, val, len);
+ if (ret)
+ return ret;
+
+ /* Write from 0xe07 on */
+ reg += len + 1;
+ return _mcp251xfd_regmap_crc_gather_write(context, &reg, reg_len,
+ val + len + 1,
+ val_len - len - 1);
+ }
+
+ return _mcp251xfd_regmap_crc_gather_write(context, reg_p, reg_len,
+ val, val_len);
+}
+
+static int
mcp251xfd_regmap_crc_write(void *context,
const void *data, size_t count)
{
@@ -397,7 +475,7 @@ mcp251xfd_regmap_crc_read(void *context,
return err;
}
- out:
+out:
memcpy(val_buf, buf_rx->data, val_len);
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index bfe4caa0c99d..c34f2067a989 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2024 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -12,7 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mcp251xfd.h"
#include "mcp251xfd-ram.h"
@@ -206,6 +206,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
int i, j;
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->last_valid = timecounter_read(&priv->tc);
rx_ring->head = 0;
rx_ring->tail = 0;
rx_ring->base = *base;
@@ -289,7 +290,7 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
const struct mcp251xfd_rx_ring *rx_ring;
u16 base = 0, ram_used;
u8 fifo_nr = 1;
- int i;
+ int err = 0, i;
netdev_reset_queue(priv->ndev);
@@ -385,10 +386,18 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
netdev_err(priv->ndev,
"Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
ram_used, MCP251XFD_RAM_SIZE);
- return -ENOMEM;
+ err = -ENOMEM;
}
- return 0;
+ if (priv->tx_obj_num_coalesce_irq &&
+ priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
+ netdev_err(priv->ndev,
+ "Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
+ priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
+ err = -EINVAL;
+ }
+
+ return err;
}
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
@@ -468,11 +477,27 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
/* switching from CAN-2.0 to CAN-FD mode or vice versa */
if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ const struct ethtool_coalesce ec = {
+ .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
+ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ?
+ 1 : priv->rx_obj_num_coalesce_irq,
+ .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
+ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ?
+ 1 : priv->tx_obj_num_coalesce_irq,
+ };
struct can_ram_layout layout;
- can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
- priv->rx_obj_num = layout.default_rx;
- tx_ring->obj_num = layout.default_tx;
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+
+ tx_ring->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
}
if (fd_mode) {
@@ -485,6 +510,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
}
+ tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
+ ilog2(tx_ring->obj_num);
tx_ring->obj_size = tx_obj_size;
rem = priv->rx_obj_num;
@@ -507,16 +534,18 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
+ ilog2(rx_obj_num);
rx_ring->obj_size = rx_obj_size;
priv->rx[i] = rx_ring;
}
priv->rx_ring_num = i;
- hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
+ hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
+ hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index ced8d9c81f8c..fe897f3e4c12 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -16,23 +16,14 @@
#include "mcp251xfd.h"
-static inline int
-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head, bool *fifo_empty)
+static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta)
{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
- &fifo_sta);
- if (err)
- return err;
-
- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
- *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+}
- return 0;
+static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
}
static inline int
@@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
}
static int
-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *len_p)
{
- u32 new_head;
- u8 chip_rx_head;
- bool fifo_empty;
+ const u8 shift = ring->obj_num_shift_to_u8;
+ u8 chip_head, tail, len;
+ u32 fifo_sta;
int err;
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
- &fifo_empty);
- if (err || fifo_empty)
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) {
+ *len_p = 0;
+ return 0;
+ }
+
+ if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) {
+ *len_p = ring->obj_num;
+ return 0;
+ }
+
+ chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ err = mcp251xfd_check_rx_tail(priv, ring);
+ if (err)
return err;
+ tail = mcp251xfd_get_rx_tail(ring);
- /* chip_rx_head, is the next RX-Object filled by the HW.
- * The new RX head must be >= the old head.
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
*/
- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
- if (new_head <= ring->head)
- new_head += ring->obj_num;
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len));
- ring->head = new_head;
+ len = (chip_head << shift) - (tail << shift);
+ *len_p = len >> shift;
- return mcp251xfd_check_rx_tail(priv, ring);
+ return 0;
}
static void
@@ -148,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
memcpy(cfd->data, hw_rx_obj->data, cfd->len);
-
- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
}
static int
@@ -160,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
struct net_device_stats *stats = &priv->ndev->stats;
struct sk_buff *skb;
struct canfd_frame *cfd;
+ u64 timestamp;
int err;
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the RX FIFO head index
+ * might be corrupted and we might process past the RX FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the timestamp of currently processed CAN frame with
+ * last valid frame received. Abort with -EBADMSG if an old
+ * CAN frame is detected.
+ */
+ timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts);
+ if (timestamp <= ring->last_valid) {
+ stats->rx_fifo_errors++;
+
+ return -EBADMSG;
+ }
+ ring->last_valid = timestamp;
+
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
skb = alloc_canfd_skb(priv->ndev, &cfd);
else
@@ -172,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
return 0;
}
+ mcp251xfd_skb_set_timestamp(skb, timestamp);
mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
if (err)
@@ -198,51 +226,80 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
}
static int
+mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ u8 len)
+{
+ int offset;
+ int err;
+
+ if (!len)
+ return 0;
+
+ ring->head += len;
+
+ /* Increment the RX FIFO tail pointer 'len' times in a
+ * single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ ring->tail += len;
+
+ return 0;
+}
+
+static int
mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
struct mcp251xfd_rx_ring *ring)
{
struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
- u8 rx_tail, len;
+ u8 rx_tail, len, l;
int err, i;
- err = mcp251xfd_rx_ring_update(priv, ring);
+ err = mcp251xfd_get_rx_len(priv, ring, &len);
if (err)
return err;
- while ((len = mcp251xfd_get_rx_linear_len(ring))) {
- int offset;
-
+ while ((l = mcp251xfd_get_rx_linear_len(ring, len))) {
rx_tail = mcp251xfd_get_rx_tail(ring);
err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
- rx_tail, len);
+ rx_tail, l);
if (err)
return err;
- for (i = 0; i < len; i++) {
+ for (i = 0; i < l; i++) {
err = mcp251xfd_handle_rxif_one(priv, ring,
(void *)hw_rx_obj +
i * ring->obj_size);
- if (err)
+
+ /* -EBADMSG means we're affected by mcp2518fd
+ * erratum DS80000789E 6., i.e. the timestamp
+ * in the RX object is older that the last
+ * valid received CAN frame. Don't process any
+ * further and mark processed frames as good.
+ */
+ if (err == -EBADMSG)
+ return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i);
+ else if (err)
return err;
}
- /* Increment the RX FIFO tail pointer 'len' times in a
- * single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
+ err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l);
if (err)
return err;
- ring->tail += len;
+ len -= l;
}
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index e5bd57b65aaf..e94321849fd7 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -16,6 +16,16 @@
#include "mcp251xfd.h"
+static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
+}
+
+static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF;
+}
+
static inline int
mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
u8 *tef_tail)
@@ -56,60 +66,43 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
}
static int
-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- u32 tef_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
- if (err)
- return err;
-
- if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
- netdev_err(priv->ndev,
- "Transmit Event FIFO buffer overflow.\n");
- return -ENOBUFS;
- }
-
- netdev_info(priv->ndev,
- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
- tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
- "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
- "not empty" : "empty",
- seq, priv->tef->tail, priv->tef->head, tx_ring->head);
-
- /* The Sequence Number in the TEF doesn't match our tef_tail. */
- return -EAGAIN;
-}
-
-static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
unsigned int *frame_len_ptr)
{
struct net_device_stats *stats = &priv->ndev->stats;
+ u32 seq, tef_tail_masked, tef_tail;
struct sk_buff *skb;
- u32 seq, seq_masked, tef_tail_masked, tef_tail;
- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this is enough to detect old TEF objects.
+ */
+ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK,
hw_tef_obj->flags);
-
- /* Use the MCP2517FD mask on the MCP2518FD, too. We only
- * compare 7 bits, this should be enough to detect
- * net-yet-completed, i.e. old TEF objects.
- */
- seq_masked = seq &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
tef_tail_masked = priv->tef->tail &
field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- if (seq_masked != tef_tail_masked)
- return mcp251xfd_handle_tefif_recover(priv, seq);
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the TX FIFO tail index
+ * might be corrupted and we might process past the TEF FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the sequence number of the currently processed CAN
+ * frame with the expected sequence number. Abort with
+ * -EBADMSG if an old CAN frame is detected.
+ */
+ if (seq != tef_tail_masked) {
+ netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__,
+ seq, tef_tail_masked);
+ stats->tx_fifo_errors++;
+
+ return -EBADMSG;
+ }
tef_tail = mcp251xfd_get_tef_tail(priv);
skb = priv->can.echo_skb[tef_tail];
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts);
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
tef_tail, hw_tef_obj->ts,
@@ -120,28 +113,70 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
return 0;
}
-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
+static int
+mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
{
const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- unsigned int new_head;
- u8 chip_tx_tail;
+ const u8 shift = tx_ring->obj_num_shift_to_u8;
+ u8 chip_tx_tail, tail, len;
+ u32 fifo_sta;
int err;
- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
+ &fifo_sta);
if (err)
return err;
- /* chip_tx_tail, is the next TX-Object send by the HW.
- * The new TEF head must be >= the old head, ...
+ /* If the chip says the TX-FIFO is empty, but there are no TX
+ * buffers free in the ring, we assume all have been sent.
*/
- new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
- if (new_head <= priv->tef->head)
- new_head += tx_ring->obj_num;
+ if (mcp251xfd_tx_fifo_sta_empty(fifo_sta) &&
+ mcp251xfd_get_tx_free(tx_ring) == 0) {
+ *len_p = tx_ring->obj_num;
+ return 0;
+ }
- /* ... but it cannot exceed the TX head. */
- priv->tef->head = min(new_head, tx_ring->head);
+ chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
- return mcp251xfd_check_tef_tail(priv);
+ err = mcp251xfd_check_tef_tail(priv);
+ if (err)
+ return err;
+ tail = mcp251xfd_get_tef_tail(priv);
+
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
+ */
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail));
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
+
+ len = (chip_tx_tail << shift) - (tail << shift);
+ len >>= shift;
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the TX-FIFO tail index
+ * might be corrupted.
+ *
+ * However here it seems the bit indicating that the TX-FIFO
+ * is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct
+ * while the TX-FIFO tail index is.
+ *
+ * We assume the TX-FIFO is empty, i.e. all pending CAN frames
+ * haven been send, if:
+ * - Chip's head and tail index are equal (len == 0).
+ * - The TX-FIFO is less than half full.
+ * (The TX-FIFO empty case has already been checked at the
+ * beginning of this function.)
+ * - No free buffers in the TX ring.
+ */
+ if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) &&
+ mcp251xfd_get_tx_free(tx_ring) == 0)
+ len = tx_ring->obj_num;
+
+ *len_p = len;
+
+ return 0;
}
static inline int
@@ -182,13 +217,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
u8 tef_tail, len, l;
int err, i;
- err = mcp251xfd_tef_ring_update(priv);
+ err = mcp251xfd_get_tef_len(priv, &len);
if (err)
return err;
tef_tail = mcp251xfd_get_tef_tail(priv);
- len = mcp251xfd_get_tef_len(priv);
- l = mcp251xfd_get_tef_linear_len(priv);
+ l = mcp251xfd_get_tef_linear_len(priv, len);
err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
if (err)
return err;
@@ -203,12 +237,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
unsigned int frame_len = 0;
err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
- /* -EAGAIN means the Sequence Number in the TEF
- * doesn't match our tef_tail. This can happen if we
- * read the TEF objects too early. Leave loop let the
- * interrupt handler call us again.
+ /* -EBADMSG means we're affected by mcp2518fd erratum
+ * DS80000789E 6., i.e. the Sequence Number in the TEF
+ * doesn't match our tef_tail. Don't process any
+ * further and mark processed frames as good.
*/
- if (err == -EAGAIN)
+ if (err == -EBADMSG)
goto out_netif_wake_queue;
if (err)
return err;
@@ -216,13 +250,15 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
total_frame_len += frame_len;
}
- out_netif_wake_queue:
+out_netif_wake_queue:
len = i; /* number of handled goods TEFs */
if (len) {
struct mcp251xfd_tef_ring *ring = priv->tef;
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
int offset;
+ ring->head += len;
+
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
*
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
index 712e09186987..413a5cb75c13 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2021 Pengutronix,
+// Copyright (c) 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
@@ -11,20 +11,20 @@
#include "mcp251xfd.h"
-static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
+static u64 mcp251xfd_timestamp_raw_read(struct cyclecounter *cc)
{
const struct mcp251xfd_priv *priv;
- u32 timestamp = 0;
+ u32 ts_raw = 0;
int err;
priv = container_of(cc, struct mcp251xfd_priv, cc);
- err = mcp251xfd_get_timestamp(priv, &timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
netdev_err(priv->ndev,
"Error %d while reading timestamp. HW timestamps may be inaccurate.",
err);
- return timestamp;
+ return ts_raw;
}
static void mcp251xfd_timestamp_work(struct work_struct *work)
@@ -39,28 +39,21 @@ static void mcp251xfd_timestamp_work(struct work_struct *work)
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp)
-{
- struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
- u64 ns;
-
- ns = timecounter_cyc2time(&priv->tc, timestamp);
- hwtstamps->hwtstamp = ns_to_ktime(ns);
-}
-
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
{
struct cyclecounter *cc = &priv->cc;
- cc->read = mcp251xfd_timestamp_read;
+ cc->read = mcp251xfd_timestamp_raw_read;
cc->mask = CYCLECOUNTER_MASK(32);
cc->shift = 1;
cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
- timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
-
INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work);
+}
+
+void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv)
+{
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
schedule_delayed_work(&priv->timestamp,
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
index 160528d3cc26..747ae3e8a768 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -12,7 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include "mcp251xfd.h"
@@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
tx_obj->xfer[0].len = len;
}
+static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring,
+ int err)
+{
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int frame_len = 0;
+ u8 tx_head;
+
+ tx_ring->head--;
+ stats->tx_dropped++;
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ can_free_echo_skb(ndev, tx_head, &frame_len);
+ netdev_completed_queue(ndev, 1, frame_len);
+ netif_wake_queue(ndev);
+
+ if (net_ratelimit())
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+}
+
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
+{
+ struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
+ tx_work);
+ struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int err;
+
+ err = spi_sync(priv->spi, &tx_obj->msg);
+ if (err)
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+}
+
static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
struct mcp251xfd_tx_obj *tx_obj)
{
@@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
return false;
}
+static bool mcp251xfd_work_busy(struct work_struct *work)
+{
+ return work_busy(work);
+}
+
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
- if (mcp251xfd_tx_busy(priv, tx_ring))
+ if (mcp251xfd_tx_busy(priv, tx_ring) ||
+ mcp251xfd_work_busy(&priv->tx_work))
return NETDEV_TX_BUSY;
tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
@@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
- if (err)
- goto out_err;
-
- return NETDEV_TX_OK;
-
- out_err:
- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+ if (err == -EBUSY) {
+ netif_stop_queue(ndev);
+ priv->tx_work_obj = tx_obj;
+ queue_work(priv->wq, &priv->tx_work);
+ } else if (err) {
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+ }
return NETDEV_TX_OK;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 24510b3b8020..085d7101e595 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,7 +2,7 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
* Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
*/
@@ -15,6 +15,7 @@
#include <linux/can/dev.h>
#include <linux/can/rx-offload.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/regmap.h>
@@ -335,13 +336,19 @@
#define MCP251XFD_REG_IOCON_TXCANOD BIT(28)
#define MCP251XFD_REG_IOCON_PM1 BIT(25)
#define MCP251XFD_REG_IOCON_PM0 BIT(24)
+#define MCP251XFD_REG_IOCON_PM(n) (MCP251XFD_REG_IOCON_PM0 << (n))
#define MCP251XFD_REG_IOCON_GPIO1 BIT(17)
#define MCP251XFD_REG_IOCON_GPIO0 BIT(16)
+#define MCP251XFD_REG_IOCON_GPIO(n) (MCP251XFD_REG_IOCON_GPIO0 << (n))
+#define MCP251XFD_REG_IOCON_GPIO_MASK GENMASK(17, 16)
#define MCP251XFD_REG_IOCON_LAT1 BIT(9)
#define MCP251XFD_REG_IOCON_LAT0 BIT(8)
+#define MCP251XFD_REG_IOCON_LAT(n) (MCP251XFD_REG_IOCON_LAT0 << (n))
+#define MCP251XFD_REG_IOCON_LAT_MASK GENMASK(9, 8)
#define MCP251XFD_REG_IOCON_XSTBYEN BIT(6)
#define MCP251XFD_REG_IOCON_TRIS1 BIT(1)
#define MCP251XFD_REG_IOCON_TRIS0 BIT(0)
+#define MCP251XFD_REG_IOCON_TRIS(n) (MCP251XFD_REG_IOCON_TRIS0 << (n))
#define MCP251XFD_REG_CRC 0xe08
#define MCP251XFD_REG_CRC_FERRIE BIT(25)
@@ -524,6 +531,7 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+ /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */
union mcp251xfd_write_reg_buf irq_enable_buf;
struct spi_transfer irq_enable_xfer;
@@ -542,6 +550,7 @@ struct mcp251xfd_tx_ring {
u8 nr;
u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
@@ -552,10 +561,14 @@ struct mcp251xfd_rx_ring {
unsigned int head;
unsigned int tail;
+ /* timestamp of the last valid received CAN frame */
+ u64 last_valid;
+
u16 base;
u8 nr;
u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
union mcp251xfd_write_reg_buf irq_enable_buf;
@@ -633,6 +646,10 @@ struct mcp251xfd_priv {
struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct mcp251xfd_tx_obj *tx_work_obj;
+
DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
@@ -660,6 +677,7 @@ struct mcp251xfd_priv {
struct mcp251xfd_devtype_data devtype_data;
struct can_berr_counter bec;
+ struct gpio_chip gc;
};
#define MCP251XFD_IS(_model) \
@@ -805,10 +823,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
return data;
}
-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
- u32 *timestamp)
+static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv,
+ u32 *ts_raw)
+{
+ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw);
+}
+
+static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns)
{
- return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static inline
+void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv,
+ struct sk_buff *skb, u32 ts_raw)
+{
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, ts_raw);
+ mcp251xfd_skb_set_timestamp(skb, ns);
}
static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
@@ -857,17 +892,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
return priv->tef->tail & (priv->tx->obj_num - 1);
}
-static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
-{
- return priv->tef->head - priv->tef->tail;
-}
-
-static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
+static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_tef_len(priv);
-
return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
}
@@ -910,18 +936,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
-{
- return ring->head - ring->tail;
-}
-
static inline u8
-mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_rx_len(ring);
-
return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
}
@@ -947,11 +964,11 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp);
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index ab8d01784686..af52285d5a4e 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -570,7 +570,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
- if (skb && state != CAN_STATE_BUS_OFF) {
+ if (likely(skb) && state != CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
@@ -579,11 +579,9 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
+ ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
if (likely(skb)) {
- ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
-
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & SUN4I_STA_MASK_ERR) {
@@ -601,9 +599,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
>> 16;
break;
}
- /* error occurred during transmission? */
- if ((ecc & SUN4I_STA_ERR_DIR) == 0)
+ }
+
+ /* error occurred during transmission? */
+ if ((ecc & SUN4I_STA_ERR_DIR) == 0) {
+ if (likely(skb))
cf->data[2] |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ } else {
+ stats->rx_errors++;
}
}
if (isrc & SUN4I_INT_ERR_PASSIVE) {
@@ -629,10 +633,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
tx_state = txerr >= rxerr ? state : 0;
rx_state = txerr <= rxerr ? state : 0;
- if (likely(skb))
- can_change_state(dev, cf, tx_state, rx_state);
- else
- priv->can.state = state;
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ can_change_state(dev, cf, tx_state, rx_state);
if (state == CAN_STATE_BUS_OFF)
can_bus_off(dev);
}
@@ -653,8 +657,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
u8 isrc, status;
int n = 0;
- while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) &&
- (n < SUN4I_CAN_MAX_IRQ)) {
+ while ((n < SUN4I_CAN_MAX_IRQ) &&
+ (isrc = readl(priv->base + SUN4I_REG_INT_ADDR))) {
n++;
status = readl(priv->base + SUN4I_REG_STA_ADDR);
@@ -914,7 +918,7 @@ static struct platform_driver sun4i_can_driver = {
.of_match_table = sun4ican_of_match,
},
.probe = sun4ican_probe,
- .remove_new = sun4ican_remove,
+ .remove = sun4ican_remove,
};
module_platform_driver(sun4i_can_driver);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5aab440074c6..1d3dbf28b105 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -383,7 +383,7 @@ static void ti_hecc_start(struct net_device *ndev)
* overflows instead of the hardware silently dropping the
* messages.
*/
- mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
+ mbx_mask = ~BIT_U32(HECC_RX_LAST_MBOX);
hecc_write(priv, HECC_CANOPC, mbx_mask);
/* Enable interrupts */
@@ -829,7 +829,6 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_open = ti_hecc_open,
.ndo_stop = ti_hecc_close,
.ndo_start_xmit = ti_hecc_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ti_hecc_ethtool_ops = {
@@ -1025,7 +1024,7 @@ static struct platform_driver ti_hecc_driver = {
.of_match_table = ti_hecc_dt_ids,
},
.probe = ti_hecc_probe,
- .remove_new = ti_hecc_remove,
+ .remove = ti_hecc_remove,
.suspend = ti_hecc_suspend,
.resume = ti_hecc_resume,
};
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bd58c636d465..cf65a90816b9 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -17,11 +17,12 @@ config CAN_EMS_USB
config CAN_ESD_USB
tristate "esd electronics gmbh CAN/USB interfaces"
help
- This driver adds supports for several CAN/USB interfaces
+ This driver adds support for several CAN/USB interfaces
from esd electronics gmbh (https://www.esd.eu).
The drivers supports the following devices:
- esd CAN-USB/2
+ - esd CAN-USB/3-FD
- esd CAN-USB/Micro
To compile this driver as a module, choose M here: the module
@@ -65,6 +66,7 @@ config CAN_GS_USB
config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
+ select NET_DEVLINK
help
This driver adds support for Kvaser CAN/USB devices like Kvaser
Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS.
@@ -91,6 +93,7 @@ config CAN_KVASER_USB
- Kvaser Leaf Light R v2
- Kvaser Mini PCI Express HS
- Kvaser Mini PCI Express 2xHS
+ - Kvaser Mini PCIe 1xCAN
- Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
@@ -111,12 +114,14 @@ config CAN_KVASER_USB
- Kvaser USBcan Light 4xHS
- Kvaser USBcan Pro 2xHS v2
- Kvaser USBcan Pro 4xHS
+ - Kvaser USBcan Pro 5xCAN
- Kvaser USBcan Pro 5xHS
- Kvaser U100
- Kvaser U100P
- Kvaser U100S
- ATI Memorator Pro 2xHS v2
- ATI USBcan Pro 2xHS v2
+ - Vining 800
If unsure, say N.
@@ -129,6 +134,17 @@ config CAN_MCBA_USB
This driver supports the CAN BUS Analyzer interface
from Microchip (http://www.microchip.com/development-tools/).
+config CAN_NCT6694
+ tristate "Nuvoton NCT6694 Socket CANfd support"
+ depends on MFD_NCT6694
+ select CAN_RX_OFFLOAD
+ help
+ If you say yes to this option, support will be included for Nuvoton
+ NCT6694, a USB device to socket CANfd controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called nct6694_canfd.
+
config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
help
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 8b11088e9a59..fcafb1ac262e 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_CAN_F81604) += f81604.o
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
+obj-$(CONFIG_CAN_NCT6694) += nct6694_canfd.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
obj-$(CONFIG_CAN_UCAN) += ucan.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 050c0b49938a..de8e212a1366 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -335,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
struct net_device_stats *stats = &dev->netdev->stats;
skb = alloc_can_err_skb(dev->netdev, &cf);
- if (skb == NULL)
- return;
if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
u8 state = msg->msg.can_state;
if (state & SJA1000_SR_BS) {
dev->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
dev->can.can_stats.bus_off++;
can_bus_off(dev->netdev);
@@ -361,44 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
/* bus error interrupt */
dev->can.can_stats.bus_error++;
- stats->rx_errors++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- switch (ecc & SJA1000_ECC_MASK) {
- case SJA1000_ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case SJA1000_ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case SJA1000_ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
- break;
+ switch (ecc & SJA1000_ECC_MASK) {
+ case SJA1000_ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case SJA1000_ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case SJA1000_ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
+ break;
+ }
}
/* Error occurred during transmission? */
- if ((ecc & SJA1000_ECC_DIR) == 0)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if ((ecc & SJA1000_ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
- if (dev->can.state == CAN_STATE_ERROR_WARNING ||
- dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING ||
+ dev->can.state == CAN_STATE_ERROR_PASSIVE)) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}
} else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
stats->rx_over_errors++;
stats->rx_errors++;
}
- netif_rx(skb);
+ if (skb)
+ netif_rx(skb);
}
/*
@@ -877,7 +885,6 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_open = ems_usb_open,
.ndo_stop = ems_usb_close,
.ndo_start_xmit = ems_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ems_usb_ethtool_ops = {
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 41a0e4261d15..08da507faef4 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -3,12 +3,13 @@
* CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro
*
* Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
- * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
+ * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/err.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -274,6 +275,7 @@ struct esd_usb {
int net_count;
u32 version;
int rxinitdone;
+ int in_usb_disconnect;
void *rxbuf[ESD_USB_MAX_RX_URBS];
dma_addr_t rxbuf_dma[ESD_USB_MAX_RX_URBS];
};
@@ -480,7 +482,7 @@ static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
static void esd_usb_read_bulk_callback(struct urb *urb)
{
struct esd_usb *dev = urb->context;
- int retval;
+ int err;
int pos = 0;
int i;
@@ -496,7 +498,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb)
default:
dev_info(dev->udev->dev.parent,
- "Rx URB aborted (%d)\n", urb->status);
+ "Rx URB aborted (%pe)\n", ERR_PTR(urb->status));
goto resubmit_urb;
}
@@ -539,15 +541,15 @@ resubmit_urb:
urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev);
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval == -ENODEV) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i])
netif_device_detach(dev->nets[i]->netdev);
}
- } else if (retval) {
+ } else if (err) {
dev_err(dev->udev->dev.parent,
- "failed resubmitting read bulk urb: %d\n", retval);
+ "failed resubmitting read bulk urb: %pe\n", ERR_PTR(err));
}
}
@@ -572,7 +574,7 @@ static void esd_usb_write_bulk_callback(struct urb *urb)
return;
if (urb->status)
- netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+ netdev_info(netdev, "Tx URB aborted (%pe)\n", ERR_PTR(urb->status));
netif_trans_update(netdev);
}
@@ -758,7 +760,7 @@ out:
if (err == -ENODEV)
netif_device_detach(netdev);
if (err)
- netdev_err(netdev, "couldn't start device: %d\n", err);
+ netdev_err(netdev, "couldn't start device: %pe\n", ERR_PTR(err));
kfree(msg);
return err;
@@ -800,7 +802,6 @@ static int esd_usb_open(struct net_device *netdev)
/* finally start device */
err = esd_usb_start(priv);
if (err) {
- netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
return err;
}
@@ -923,7 +924,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
if (err == -ENODEV)
netif_device_detach(netdev);
else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
+ netdev_warn(netdev, "failed tx_urb %pe\n", ERR_PTR(err));
goto releasebuf;
}
@@ -947,10 +948,11 @@ nourbmem:
return ret;
}
-static int esd_usb_close(struct net_device *netdev)
+/* Stop interface */
+static int esd_usb_stop(struct esd_usb_net_priv *priv)
{
- struct esd_usb_net_priv *priv = netdev_priv(netdev);
union esd_usb_msg *msg;
+ int err;
int i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -964,8 +966,11 @@ static int esd_usb_close(struct net_device *netdev)
msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_USB_MAX_ID_SEGMENT; i++)
msg->filter.mask[i] = 0;
- if (esd_usb_send_msg(priv->usb, msg) < 0)
- netdev_err(netdev, "sending idadd message failed\n");
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0) {
+ netdev_err(priv->netdev, "sending idadd message failed: %pe\n", ERR_PTR(err));
+ goto bail;
+ }
/* set CAN controller to reset mode */
msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */
@@ -973,8 +978,25 @@ static int esd_usb_close(struct net_device *netdev)
msg->setbaud.net = priv->index;
msg->setbaud.rsvd = 0;
msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
- if (esd_usb_send_msg(priv->usb, msg) < 0)
- netdev_err(netdev, "sending setbaud message failed\n");
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0)
+ netdev_err(priv->netdev, "sending setbaud message failed: %pe\n", ERR_PTR(err));
+
+bail:
+ kfree(msg);
+
+ return err;
+}
+
+static int esd_usb_close(struct net_device *netdev)
+{
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!priv->usb->in_usb_disconnect) {
+ /* It's moot to try this in usb_disconnect()! */
+ err = esd_usb_stop(priv);
+ }
priv->can.state = CAN_STATE_STOPPED;
@@ -982,16 +1004,13 @@ static int esd_usb_close(struct net_device *netdev)
close_candev(netdev);
- kfree(msg);
-
- return 0;
+ return err;
}
static const struct net_device_ops esd_usb_netdev_ops = {
.ndo_open = esd_usb_open,
.ndo_stop = esd_usb_close,
.ndo_start_xmit = esd_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops esd_usb_ethtool_ops = {
@@ -1098,7 +1117,7 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const;
struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *nom_bt = &priv->can.bittiming;
- struct can_bittiming *data_bt = &priv->can.data_bittiming;
+ struct can_bittiming *data_bt = &priv->can.fd.data_bittiming;
struct esd_usb_3_set_baudrate_msg_x *baud_x;
union esd_usb_msg *msg;
u16 flags = 0;
@@ -1116,9 +1135,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
flags |= ESD_USB_3_BAUDRATE_FLAG_LOM;
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- flags |= ESD_USB_3_BAUDRATE_FLAG_TRS;
-
baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1));
baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1));
baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1)
@@ -1219,12 +1235,11 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
switch (le16_to_cpu(dev->udev->descriptor.idProduct)) {
case ESD_USB_CANUSB3_PRODUCT_ID:
priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
- priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &esd_usb_3_data_bittiming_const;
priv->can.do_set_bittiming = esd_usb_3_set_bittiming;
- priv->can.do_set_data_bittiming = esd_usb_3_set_bittiming;
+ priv->can.fd.do_set_data_bittiming = esd_usb_3_set_bittiming;
break;
case ESD_USB_CANUSBM_PRODUCT_ID:
@@ -1255,14 +1270,14 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
err = register_candev(netdev);
if (err) {
- dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
+ dev_err(&intf->dev, "couldn't register CAN device: %pe\n", ERR_PTR(err));
free_candev(netdev);
err = -ENOMEM;
goto done;
}
dev->nets[index] = priv;
- netdev_info(netdev, "device %s registered\n", netdev->name);
+ netdev_info(netdev, "registered\n");
done:
return err;
@@ -1358,9 +1373,11 @@ static void esd_usb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (dev) {
+ dev->in_usb_disconnect = 1;
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i]) {
netdev = dev->nets[i]->netdev;
+ netdev_info(netdev, "unregister\n");
unregister_netdev(netdev);
free_candev(netdev);
}
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c
index 4151b18fd045..1888ca1de7b6 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.c
+++ b/drivers/net/can/usb/etas_es58x/es581_4.c
@@ -9,7 +9,7 @@
* Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/units.h>
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 5e3a72b7c469..f799233c2b72 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -7,10 +7,10 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/crc16.h>
#include <linux/ethtool.h>
#include <linux/kernel.h>
@@ -1976,7 +1976,8 @@ static const struct net_device_ops es58x_netdev_ops = {
.ndo_open = es58x_open,
.ndo_stop = es58x_stop,
.ndo_start_xmit = es58x_start_xmit,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static const struct ethtool_ops es58x_ethtool_ops = {
@@ -2059,8 +2060,8 @@ static int es58x_init_priv(struct es58x_device *es58x_dev,
can->bittiming_const = param->bittiming_const;
if (param->ctrlmode_supported & CAN_CTRLMODE_FD) {
- can->data_bittiming_const = param->data_bittiming_const;
- can->tdc_const = param->tdc_const;
+ can->fd.data_bittiming_const = param->data_bittiming_const;
+ can->fd.tdc_const = param->tdc_const;
}
can->bitrate_max = param->bitrate_max;
can->clock = param->clock;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
index 635edeb8f68c..0d155eb1b9e9 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -215,7 +215,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version;
struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version;
struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision;
- char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
+ char buf[MAX(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
int ret = 0;
if (es58x_sw_version_is_valid(fw_ver)) {
@@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
return ret;
}
- return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
+ if (es58x_dev->udev->serial)
+ ret = devlink_info_serial_number_put(req,
+ es58x_dev->udev->serial);
+
+ return ret;
}
const struct devlink_ops es58x_dl_ops = {
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index fa87b0b78e3e..6476add1c105 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -11,7 +11,7 @@
* Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/units.h>
@@ -427,12 +427,12 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
if (tx_conf_msg.canfd_enabled) {
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
- &priv->can.data_bittiming);
+ &priv->can.fd.data_bittiming);
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
- tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
- tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
+ tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco);
+ tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf);
}
conf_len = ES58X_FD_CANFD_CONF_LEN;
diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c
index ec8cef7fd2d5..efe61ece79ea 100644
--- a/drivers/net/can/usb/f81604.c
+++ b/drivers/net/can/usb/f81604.c
@@ -13,7 +13,7 @@
#include <linux/can/error.h>
#include <linux/can/platform/sja1000.h>
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned.h>
/* vendor and product id */
#define F81604_VENDOR_ID 0x2c42
@@ -526,7 +526,6 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
netdev_dbg(netdev, "bus error interrupt\n");
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
if (skb) {
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -548,10 +547,15 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
/* set error location */
cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG;
+ }
- /* Error occurred during transmission? */
- if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0)
+ /* Error occurred during transmission? */
+ if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
}
set_bit(F81604_CLEAR_ECC, &priv->clear_flags);
@@ -1048,7 +1052,6 @@ static const struct net_device_ops f81604_netdev_ops = {
.ndo_open = f81604_open,
.ndo_stop = f81604_close,
.ndo_start_xmit = f81604_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const f81604_bittiming_const = {
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 65c962f76898..e29e85b67fd4 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -40,8 +40,11 @@
#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
-#define GS_USB_ENDPOINT_IN 1
-#define GS_USB_ENDPOINT_OUT 2
+#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
+#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
+
+#define USB_CANNECTIVITY_VENDOR_ID 0x1209
+#define USB_CANNECTIVITY_PRODUCT_ID 0xca01
/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
* for timer overflow (will be after ~71 minutes)
@@ -258,14 +261,21 @@ struct canfd_quirk {
u8 quirk;
} __packed;
+/* struct gs_host_frame::echo_id == GS_HOST_FRAME_ECHO_ID_RX indicates
+ * a regular RX'ed CAN frame
+ */
+#define GS_HOST_FRAME_ECHO_ID_RX 0xffffffff
+
struct gs_host_frame {
- u32 echo_id;
- __le32 can_id;
+ struct_group(header,
+ u32 echo_id;
+ __le32 can_id;
- u8 can_dlc;
- u8 channel;
- u8 flags;
- u8 reserved;
+ u8 can_dlc;
+ u8 channel;
+ u8 flags;
+ u8 reserved;
+ );
union {
DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
@@ -286,11 +296,6 @@ struct gs_host_frame {
#define GS_MAX_RX_URBS 30
#define GS_NAPI_WEIGHT 32
-/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 3 interfaces. The future may vary.
- */
-#define GS_MAX_INTF 3
-
struct gs_tx_context {
struct gs_can *dev;
unsigned int echo_id;
@@ -321,7 +326,6 @@ struct gs_can {
/* usb interface struct */
struct gs_usb {
- struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
struct usb_device *udev;
@@ -333,6 +337,11 @@ struct gs_usb {
unsigned int hf_size_rx;
u8 active_channels;
+ u8 channel_cnt;
+
+ unsigned int pipe_in;
+ unsigned int pipe_out;
+ struct gs_can *canch[] __counted_by(channel_cnt);
};
/* 'allocate' a tx context.
@@ -414,7 +423,7 @@ static inline int gs_usb_get_timestamp(const struct gs_usb *parent,
return 0;
}
-static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
+static u64 gs_usb_timestamp_read(struct cyclecounter *cc) __must_hold(&dev->tc_lock)
{
struct gs_usb *parent = container_of(cc, struct gs_usb, cc);
u32 timestamp = 0;
@@ -566,6 +575,37 @@ gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb,
return len;
}
+static unsigned int
+gs_usb_get_minimum_rx_length(const struct gs_can *dev, const struct gs_host_frame *hf,
+ unsigned int *data_length_p)
+{
+ unsigned int minimum_length, data_length = 0;
+
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX)
+ data_length = can_fd_dlc2len(hf->can_dlc);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* timestamp follows data field of max size */
+ minimum_length = struct_size(hf, canfd_ts, 1);
+ else
+ minimum_length = sizeof(hf->header) + data_length;
+ } else {
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX &&
+ !(hf->can_id & cpu_to_le32(CAN_RTR_FLAG)))
+ data_length = can_cc_dlc2len(hf->can_dlc);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* timestamp follows data field of max size */
+ minimum_length = struct_size(hf, classic_can_ts, 1);
+ else
+ minimum_length = sizeof(hf->header) + data_length;
+ }
+
+ *data_length_p = data_length;
+ return minimum_length;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *parent = urb->context;
@@ -574,6 +614,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
int rc;
struct net_device_stats *stats;
struct gs_host_frame *hf = urb->transfer_buffer;
+ unsigned int minimum_length, data_length;
struct gs_tx_context *txc;
struct can_frame *cf;
struct canfd_frame *cfd;
@@ -592,8 +633,17 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
}
+ minimum_length = sizeof(hf->header);
+ if (urb->actual_length < minimum_length) {
+ dev_err_ratelimited(&parent->udev->dev,
+ "short read (actual_length=%u, minimum_length=%u)\n",
+ urb->actual_length, minimum_length);
+
+ goto resubmit_urb;
+ }
+
/* device reports out of range channel id */
- if (hf->channel >= GS_MAX_INTF)
+ if (hf->channel >= parent->channel_cnt)
goto device_detach;
dev = parent->canch[hf->channel];
@@ -607,20 +657,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
if (!netif_running(netdev))
goto resubmit_urb;
- if (hf->echo_id == -1) { /* normal rx */
+ minimum_length = gs_usb_get_minimum_rx_length(dev, hf, &data_length);
+ if (urb->actual_length < minimum_length) {
+ stats->rx_errors++;
+ stats->rx_length_errors++;
+
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "short read (actual_length=%u, minimum_length=%u)\n",
+ urb->actual_length, minimum_length);
+
+ goto resubmit_urb;
+ }
+
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX) { /* normal rx */
if (hf->flags & GS_CAN_FLAG_FD) {
skb = alloc_canfd_skb(netdev, &cfd);
if (!skb)
return;
cfd->can_id = le32_to_cpu(hf->can_id);
- cfd->len = can_fd_dlc2len(hf->can_dlc);
+ cfd->len = data_length;
if (hf->flags & GS_CAN_FLAG_BRS)
cfd->flags |= CANFD_BRS;
if (hf->flags & GS_CAN_FLAG_ESI)
cfd->flags |= CANFD_ESI;
- memcpy(cfd->data, hf->canfd->data, cfd->len);
+ memcpy(cfd->data, hf->canfd->data, data_length);
} else {
skb = alloc_can_skb(netdev, &cf);
if (!skb)
@@ -629,7 +692,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
cf->can_id = le32_to_cpu(hf->can_id);
can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- memcpy(cf->data, hf->classic_can->data, 8);
+ memcpy(cf->data, hf->classic_can->data, data_length);
/* ERROR frames tell us information about the controller */
if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
@@ -684,8 +747,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, parent->udev,
- usb_rcvbulkpipe(parent->udev, GS_USB_ENDPOINT_IN),
- hf, dev->parent->hf_size_rx,
+ parent->pipe_in,
+ hf, parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -693,7 +756,7 @@ resubmit_urb:
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
device_detach:
- for (rc = 0; rc < GS_MAX_INTF; rc++) {
+ for (rc = 0; rc < parent->channel_cnt; rc++) {
if (parent->canch[rc])
netif_device_detach(parent->canch[rc]->netdev);
}
@@ -722,7 +785,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
static int gs_usb_set_data_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
struct gs_device_bittiming dbt = {
.prop_seg = cpu_to_le32(bt->prop_seg),
.phase_seg1 = cpu_to_le32(bt->phase_seg1),
@@ -748,8 +811,21 @@ static void gs_usb_xmit_callback(struct urb *urb)
struct gs_can *dev = txc->dev;
struct net_device *netdev = dev->netdev;
- if (urb->status)
- netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
+ if (!urb->status)
+ return;
+
+ if (urb->status != -ESHUTDOWN && net_ratelimit())
+ netdev_info(netdev, "failed to xmit URB %u: %pe\n",
+ txc->echo_id, ERR_PTR(urb->status));
+
+ netdev->stats.tx_dropped++;
+ netdev->stats.tx_errors++;
+
+ can_free_echo_skb(netdev, txc->echo_id, NULL);
+ gs_free_tx_context(txc);
+ atomic_dec(&dev->active_tx_urbs);
+
+ netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -816,7 +892,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
}
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
+ dev->parent->pipe_out,
hf, dev->hf_size_tx,
gs_usb_xmit_callback, txc);
@@ -922,8 +998,7 @@ static int gs_can_open(struct net_device *netdev)
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
- usb_rcvbulkpipe(dev->udev,
- GS_USB_ENDPOINT_IN),
+ dev->parent->pipe_in,
buf,
dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
@@ -1086,12 +1161,25 @@ static int gs_can_close(struct net_device *netdev)
return 0;
}
-static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int gs_can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
{
const struct gs_can *dev = netdev_priv(netdev);
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
- return can_eth_ioctl_hwts(netdev, ifr, cmd);
+ return can_hwtstamp_get(netdev, cfg);
+
+ return -EOPNOTSUPP;
+}
+
+static int gs_can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_hwtstamp_set(netdev, cfg, extack);
return -EOPNOTSUPP;
}
@@ -1100,8 +1188,8 @@ static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
- .ndo_eth_ioctl = gs_can_eth_ioctl,
+ .ndo_hwtstamp_get = gs_can_hwtstamp_get,
+ .ndo_hwtstamp_set = gs_can_hwtstamp_set,
};
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
@@ -1145,7 +1233,7 @@ static int gs_usb_set_phys_id(struct net_device *netdev,
}
static int gs_usb_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct gs_can *dev = netdev_priv(netdev);
@@ -1244,6 +1332,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
netdev->dev_id = channel;
+ netdev->dev_port = channel;
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
@@ -1295,8 +1384,8 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* The data bit timing will be overwritten, if
* GS_CAN_FEATURE_BT_CONST_EXT is set.
*/
- dev->can.data_bittiming_const = &dev->bt_const;
- dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = &dev->bt_const;
+ dev->can.fd.do_set_data_bittiming = gs_usb_set_data_bittiming;
}
if (feature & GS_CAN_FEATURE_TERMINATION) {
@@ -1376,7 +1465,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
- dev->can.data_bittiming_const = &dev->data_bt_const;
+ dev->can.fd.data_bittiming_const = &dev->data_bt_const;
}
can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT);
@@ -1410,6 +1499,7 @@ static int gs_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
struct gs_host_frame *hf;
struct gs_usb *parent;
struct gs_host_config hconf = {
@@ -1419,6 +1509,13 @@ static int gs_usb_probe(struct usb_interface *intf,
unsigned int icount, i;
int rc;
+ rc = usb_find_common_endpoints(intf->cur_altsetting,
+ &ep_in, &ep_out, NULL, NULL);
+ if (rc) {
+ dev_err(&intf->dev, "Required endpoints not found\n");
+ return rc;
+ }
+
/* send host config */
rc = usb_control_msg_send(udev, 0,
GS_USB_BREQ_HOST_FORMAT,
@@ -1447,22 +1544,28 @@ static int gs_usb_probe(struct usb_interface *intf,
icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
- if (icount > GS_MAX_INTF) {
+ if (icount > type_max(parent->channel_cnt)) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
- GS_MAX_INTF);
+ type_max(parent->channel_cnt));
return -EINVAL;
}
- parent = kzalloc(sizeof(*parent), GFP_KERNEL);
+ parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL);
if (!parent)
return -ENOMEM;
+ parent->channel_cnt = icount;
+
init_usb_anchor(&parent->rx_submitted);
usb_set_intfdata(intf, parent);
parent->udev = udev;
+ /* store the detected endpoints */
+ parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress);
+ parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress);
+
for (i = 0; i < icount; i++) {
unsigned int hf_size_rx = 0;
@@ -1514,7 +1617,7 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
- for (i = 0; i < GS_MAX_INTF; i++)
+ for (i = 0; i < parent->channel_cnt; i++)
if (parent->canch[i])
gs_destroy_candev(parent->canch[i]);
@@ -1530,6 +1633,10 @@ static const struct usb_device_id gs_usb_table[] = {
USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID,
+ USB_XYLANTA_SAINT3_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANNECTIVITY_VENDOR_ID,
+ USB_CANNECTIVITY_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
index cf260044f0b9..41b4a11555aa 100644
--- a/drivers/net/can/usb/kvaser_usb/Makefile
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
-kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
+kvaser_usb-y = kvaser_usb_core.o kvaser_usb_devlink.o kvaser_usb_leaf.o kvaser_usb_hydra.o
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index ff10b3790d84..46a1b6907a50 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -22,9 +22,12 @@
*/
#include <linux/completion.h>
+#include <linux/ktime.h>
+#include <linux/math64.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/usb.h>
+#include <net/devlink.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -39,13 +42,16 @@
#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
-#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
#define KVASER_USB_CAP_EXT_CAP 0x02
#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04
+#define KVASER_USB_SW_VERSION_MAJOR_MASK GENMASK(31, 24)
+#define KVASER_USB_SW_VERSION_MINOR_MASK GENMASK(23, 16)
+#define KVASER_USB_SW_VERSION_BUILD_MASK GENMASK(15, 0)
+
struct kvaser_usb_dev_cfg;
enum kvaser_usb_leaf_family {
@@ -53,6 +59,11 @@ enum kvaser_usb_leaf_family {
KVASER_USBCAN,
};
+enum kvaser_usb_led_state {
+ KVASER_USB_LED_ON = 0,
+ KVASER_USB_LED_OFF = 1,
+};
+
#define KVASER_USB_HYDRA_MAX_CMD_LEN 128
struct kvaser_usb_dev_card_data_hydra {
u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES];
@@ -68,6 +79,7 @@ struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
struct kvaser_usb_dev_card_data_hydra hydra;
+ u32 usbcan_timestamp_msb;
};
/* Context for an outstanding, not yet ACKed, transmission */
@@ -76,6 +88,12 @@ struct kvaser_usb_tx_urb_context {
u32 echo_index;
};
+struct kvaser_usb_fw_version {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
struct kvaser_usb_busparams {
__le32 bitrate;
u8 tseg1;
@@ -94,12 +112,15 @@ struct kvaser_usb {
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_anchor rx_submitted;
+ u32 ean[2];
+ u32 serial_number;
+ struct kvaser_usb_fw_version fw_version;
+ u8 hw_revision;
+ unsigned int nchannels;
/* @max_tx_urbs: Firmware-reported maximum number of outstanding,
* not yet ACKed, transmissions on this device. This value is
* also used as a sentinel for marking free tx contexts.
*/
- u32 fw_version;
- unsigned int nchannels;
unsigned int max_tx_urbs;
struct kvaser_usb_dev_card_data card_data;
@@ -110,6 +131,7 @@ struct kvaser_usb {
struct kvaser_usb_net_priv {
struct can_priv can;
+ struct devlink_port devlink_port;
struct can_berr_counter bec;
/* subdriver-specific data */
@@ -135,7 +157,7 @@ struct kvaser_usb_net_priv {
* @dev_set_mode: used for can.do_set_mode
* @dev_set_bittiming: used for can.do_set_bittiming
* @dev_get_busparams: readback arbitration busparams
- * @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_set_data_bittiming: used for can.fd.do_set_data_bittiming
* @dev_get_data_busparams: readback data busparams
* @dev_get_berr_counter: used for can.do_get_berr_counter
*
@@ -147,6 +169,7 @@ struct kvaser_usb_net_priv {
* @dev_get_software_details: get software details
* @dev_get_card_info: get card info
* @dev_get_capabilities: discover device capabilities
+ * @dev_set_led: turn on/off device LED
*
* @dev_set_opt_mode: set ctrlmod
* @dev_start_chip: start the CAN controller
@@ -174,6 +197,9 @@ struct kvaser_usb_dev_ops {
int (*dev_get_software_details)(struct kvaser_usb *dev);
int (*dev_get_card_info)(struct kvaser_usb *dev);
int (*dev_get_capabilities)(struct kvaser_usb *dev);
+ int (*dev_set_led)(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms);
int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv);
int (*dev_start_chip)(struct kvaser_usb_net_priv *priv);
int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv);
@@ -202,6 +228,11 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+extern const struct devlink_ops kvaser_usb_devlink_ops;
+
+int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv);
+void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv);
+
void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
@@ -216,4 +247,26 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev);
extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+static inline ktime_t kvaser_usb_ticks_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ u64 ticks)
+{
+ return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+}
+
+static inline ktime_t kvaser_usb_timestamp48_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ const __le16 *timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp[0]) |
+ (u64)(le16_to_cpu(timestamp[1])) << 16 |
+ (u64)(le16_to_cpu(timestamp[2])) << 32;
+
+ return kvaser_usb_ticks_to_ktime(cfg, ticks);
+}
+
+static inline ktime_t kvaser_usb_timestamp64_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ __le64 timestamp)
+{
+ return kvaser_usb_ticks_to_ktime(cfg, le64_to_cpu(timestamp));
+}
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 8faf8a462c05..62701ec34272 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -89,9 +89,12 @@
#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
#define USB_LEAF_V3_PRODUCT_ID 0x0117
+#define USB_VINING_800_PRODUCT_ID 0x0119
+#define USB_USBCAN_PRO_5XCAN_PRODUCT_ID 0x011A
+#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
- .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
+ .quirks = 0,
.ops = &kvaser_usb_hydra_dev_ops,
};
@@ -125,6 +128,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
.quirks = 0,
+ .family = KVASER_LEAF,
.ops = &kvaser_usb_leaf_dev_ops,
};
@@ -238,6 +242,12 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID),
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_VINING_800_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5XCAN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_1XCAN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -294,7 +304,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
}
usb_free_urb(urb);
- return 0;
+ return err;
}
int kvaser_usb_can_rx_over_error(struct net_device *netdev)
@@ -354,10 +364,13 @@ resubmit_urb:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err == -ENODEV) {
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ struct kvaser_usb_net_priv *priv;
+
+ priv = dev->nets[i];
+ if (!priv)
continue;
- netif_device_detach(dev->nets[i]->netdev);
+ netif_device_detach(priv->netdev);
}
} else if (err) {
dev_err(&dev->intf->dev,
@@ -582,7 +595,7 @@ static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct kvaser_usb_busparams busparams;
int tseg1 = dbt->prop_seg + dbt->phase_seg1;
int tseg2 = dbt->phase_seg2;
@@ -743,51 +756,70 @@ freeurb:
return ret;
}
-static const struct net_device_ops kvaser_usb_netdev_ops = {
- .ndo_open = kvaser_usb_open,
- .ndo_stop = kvaser_usb_close,
- .ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
+static int kvaser_usb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ const struct kvaser_usb_dev_ops *ops = priv->dev->driver_info->ops;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 3; /* 3 On/Off cycles per second */
+
+ case ETHTOOL_ID_ON:
+ return ops->dev_set_led(priv, KVASER_USB_LED_ON, 1000);
+
+ case ETHTOOL_ID_OFF:
+ return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1000);
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Turn LED off and restore standard function after 1ms */
+ return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1);
-static const struct net_device_ops kvaser_usb_netdev_ops_hwts = {
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static const struct ethtool_ops kvaser_usb_ethtool_ops = {
- .get_ts_info = ethtool_op_get_ts_info,
-};
-
-static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .set_phys_id = kvaser_usb_set_phys_id,
};
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int i;
+ struct kvaser_usb_net_priv *priv;
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ priv = dev->nets[i];
+ if (!priv)
continue;
- unregister_candev(dev->nets[i]->netdev);
+ unregister_candev(priv->netdev);
}
kvaser_usb_unlink_all_urbs(dev);
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ priv = dev->nets[i];
+ if (!priv)
continue;
if (ops->dev_remove_channel)
- ops->dev_remove_channel(dev->nets[i]);
+ ops->dev_remove_channel(priv);
- free_candev(dev->nets[i]->netdev);
+ kvaser_usb_devlink_port_unregister(priv);
+ free_candev(priv->netdev);
}
}
@@ -819,7 +851,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_completion(&priv->stop_comp);
init_completion(&priv->flush_comp);
init_completion(&priv->get_busparams_comp);
- priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
priv->dev = dev;
priv->netdev = netdev;
@@ -842,42 +875,45 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
- priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
+ priv->can.fd.data_bittiming_const = dev->cfg->data_bittiming_const;
+ priv->can.fd.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
- if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) {
- netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts;
- netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts;
- } else {
- netdev->netdev_ops = &kvaser_usb_netdev_ops;
- netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
- }
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
+ netdev->dev_port = channel;
dev->nets[channel] = priv;
if (ops->dev_init_channel) {
err = ops->dev_init_channel(priv);
if (err)
- goto err;
+ goto candev_free;
+ }
+
+ err = kvaser_usb_devlink_port_register(priv);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register devlink port\n");
+ goto candev_free;
}
err = register_candev(netdev);
if (err) {
dev_err(&dev->intf->dev, "Failed to register CAN device\n");
- goto err;
+ goto unregister_devlink_port;
}
netdev_dbg(netdev, "device registered\n");
return 0;
-err:
+unregister_devlink_port:
+ kvaser_usb_devlink_port_unregister(priv);
+candev_free:
free_candev(netdev);
dev->nets[channel] = NULL;
return err;
@@ -887,6 +923,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct kvaser_usb *dev;
+ struct devlink *devlink;
int err;
int i;
const struct kvaser_usb_driver_info *driver_info;
@@ -896,18 +933,19 @@ static int kvaser_usb_probe(struct usb_interface *intf,
if (!driver_info)
return -ENODEV;
- dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ devlink = devlink_alloc(&kvaser_usb_devlink_ops, sizeof(*dev), &intf->dev);
+ if (!devlink)
return -ENOMEM;
+ dev = devlink_priv(devlink);
dev->intf = intf;
dev->driver_info = driver_info;
ops = driver_info->ops;
err = ops->dev_setup_endpoints(dev);
if (err) {
- dev_err(&intf->dev, "Cannot get usb endpoint(s)");
- return err;
+ dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)");
+ goto free_devlink;
}
dev->udev = interface_to_usbdev(intf);
@@ -920,62 +958,65 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.capabilities = 0;
err = ops->dev_init_card(dev);
if (err) {
- dev_err(&intf->dev,
- "Failed to initialize card, error %d\n", err);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Failed to initialize card\n");
+ goto free_devlink;
}
err = ops->dev_get_software_info(dev);
if (err) {
- dev_err(&intf->dev,
- "Cannot get software info, error %d\n", err);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Cannot get software info\n");
+ goto free_devlink;
}
if (ops->dev_get_software_details) {
err = ops->dev_get_software_details(dev);
if (err) {
- dev_err(&intf->dev,
- "Cannot get software details, error %d\n", err);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Cannot get software details\n");
+ goto free_devlink;
}
}
- if (WARN_ON(!dev->cfg))
- return -ENODEV;
-
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
+ if (WARN_ON(!dev->cfg)) {
+ err = -ENODEV;
+ goto free_devlink;
+ }
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
err = ops->dev_get_card_info(dev);
if (err) {
- dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Cannot get card info\n");
+ goto free_devlink;
}
if (ops->dev_get_capabilities) {
err = ops->dev_get_capabilities(dev);
if (err) {
- dev_err(&intf->dev,
- "Cannot get capabilities, error %d\n", err);
- kvaser_usb_remove_interfaces(dev);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Cannot get capabilities\n");
+ goto remove_interfaces;
}
}
for (i = 0; i < dev->nchannels; i++) {
err = kvaser_usb_init_one(dev, i);
- if (err) {
- kvaser_usb_remove_interfaces(dev);
- return err;
- }
+ if (err)
+ goto remove_interfaces;
}
+ devlink_register(devlink);
return 0;
+
+remove_interfaces:
+ kvaser_usb_remove_interfaces(dev);
+free_devlink:
+ devlink_free(devlink);
+
+ return err;
}
static void kvaser_usb_disconnect(struct usb_interface *intf)
@@ -988,6 +1029,8 @@ static void kvaser_usb_disconnect(struct usb_interface *intf)
return;
kvaser_usb_remove_interfaces(dev);
+ devlink_unregister(priv_to_devlink(dev));
+ devlink_free(priv_to_devlink(dev));
}
static struct usb_driver kvaser_usb_driver = {
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c
new file mode 100644
index 000000000000..e838b82298ae
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* kvaser_usb devlink functions
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+#include "kvaser_usb.h"
+
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+#define KVASER_USB_EAN_MSB 0x00073301
+
+static int kvaser_usb_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct kvaser_usb *dev = devlink_priv(devlink);
+ char buf[] = "73301XXXXXXXXXX";
+ int ret;
+
+ if (dev->serial_number) {
+ snprintf(buf, sizeof(buf), "%u", dev->serial_number);
+ ret = devlink_info_serial_number_put(req, buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->fw_version.major) {
+ snprintf(buf, sizeof(buf), "%u.%u.%u",
+ dev->fw_version.major,
+ dev->fw_version.minor,
+ dev->fw_version.build);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->hw_revision) {
+ snprintf(buf, sizeof(buf), "%u", dev->hw_revision);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->ean[1] == KVASER_USB_EAN_MSB) {
+ snprintf(buf, sizeof(buf), "%x%08x", dev->ean[1], dev->ean[0]);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct devlink_ops kvaser_usb_devlink_ops = {
+ .info_get = kvaser_usb_devlink_info_get,
+};
+
+int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv)
+{
+ int ret;
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ .phys.port_number = priv->channel,
+ };
+ devlink_port_attrs_set(&priv->devlink_port, &attrs);
+
+ ret = devlink_port_register(priv_to_devlink(priv->dev),
+ &priv->devlink_port, priv->channel);
+ if (ret)
+ return ret;
+
+ SET_NETDEV_DEVLINK_PORT(priv->netdev, &priv->devlink_port);
+
+ return 0;
+}
+
+void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv)
+{
+ devlink_port_unregister(&priv->devlink_port);
+}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index c7ba768dfe17..a59f20dad692 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -10,9 +10,9 @@
* - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
* reported after a call to do_get_berr_counter(), since firmware does not
* distinguish between ERROR_WARNING and ERROR_ACTIVE.
- * - Hardware timestamps are not set for CAN Tx frames.
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
@@ -68,6 +68,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
#define CMD_SET_BUSPARAMS_RESP 85
#define CMD_GET_CAPABILITIES_REQ 95
#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_LED_ACTION_REQ 101
+#define CMD_LED_ACTION_RESP 102
#define CMD_RX_MESSAGE 106
#define CMD_MAP_CHANNEL_REQ 200
#define CMD_MAP_CHANNEL_RESP 201
@@ -112,7 +114,7 @@ struct kvaser_cmd_card_info {
__le32 clock_res;
__le32 mfg_date;
__le32 ean[2];
- u8 hw_version;
+ u8 hw_revision;
u8 usb_mode;
u8 hw_type;
u8 reserved0;
@@ -218,6 +220,22 @@ struct kvaser_cmd_get_busparams_res {
u8 reserved[20];
} __packed;
+/* The device has two LEDs per CAN channel
+ * The LSB of action field controls the state:
+ * 0 = ON
+ * 1 = OFF
+ * The remaining bits of action field is the LED index
+ */
+#define KVASER_USB_HYDRA_LED_IDX_MASK GENMASK(31, 1)
+#define KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX 3
+#define KVASER_USB_HYDRA_LEDS_PER_CHANNEL 2
+struct kvaser_cmd_led_action_req {
+ u8 action;
+ u8 padding;
+ __le16 duration_ms;
+ u8 reserved[24];
+} __packed;
+
/* Ctrl modes */
#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
@@ -261,6 +279,15 @@ struct kvaser_cmd_tx_can {
u8 reserved[11];
} __packed;
+struct kvaser_cmd_tx_ack {
+ __le32 id;
+ u8 data[8];
+ u8 dlc;
+ u8 flags;
+ __le16 timestamp[3];
+ u8 reserved0[8];
+} __packed;
+
struct kvaser_cmd_header {
u8 cmd_no;
/* The destination HE address is stored in 0..5 of he_addr.
@@ -291,12 +318,15 @@ struct kvaser_cmd {
struct kvaser_cmd_get_busparams_req get_busparams_req;
struct kvaser_cmd_get_busparams_res get_busparams_res;
+ struct kvaser_cmd_led_action_req led_action_req;
+
struct kvaser_cmd_chip_state_event chip_state_event;
struct kvaser_cmd_set_ctrlmode set_ctrlmode;
struct kvaser_cmd_rx_can rx_can;
struct kvaser_cmd_tx_can tx_can;
+ struct kvaser_cmd_tx_ack tx_ack;
} __packed;
} __packed;
@@ -522,23 +552,25 @@ kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
return priv;
}
-static ktime_t
-kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
- const struct kvaser_cmd *cmd)
+static ktime_t kvaser_usb_hydra_ktime_from_cmd(const struct kvaser_usb_dev_cfg *cfg,
+ const struct kvaser_cmd *cmd)
{
- u64 ticks;
+ ktime_t hwtstamp = 0;
if (cmd->header.cmd_no == CMD_EXTENDED) {
struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
- ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
- } else {
- ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
+ if (cmd_ext->cmd_no_ext == CMD_RX_MESSAGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->rx_can.timestamp);
+ else if (cmd_ext->cmd_no_ext == CMD_TX_ACKNOWLEDGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->tx_ack.timestamp);
+ } else if (cmd->header.cmd_no == CMD_RX_MESSAGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->rx_can.timestamp);
+ } else if (cmd->header.cmd_no == CMD_TX_ACKNOWLEDGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->tx_ack.timestamp);
}
- return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+ return hwtstamp;
}
static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
@@ -915,6 +947,42 @@ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
}
}
+static void kvaser_usb_hydra_change_state(struct kvaser_usb_net_priv *priv,
+ const struct can_berr_counter *bec,
+ struct can_frame *cf,
+ enum can_state new_state)
+{
+ struct net_device *netdev = priv->netdev;
+ enum can_state old_state = priv->can.state;
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms == 0)
+ kvaser_usb_hydra_send_simple_cmd_async(priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ priv->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
+}
+
static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
u8 bus_status,
const struct can_berr_counter *bec)
@@ -940,41 +1008,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
return;
skb = alloc_can_err_skb(netdev, &cf);
- if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec->txerr >= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec->txerr <= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- can_change_state(netdev, cf, tx_state, rx_state);
- }
-
- if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
-
- can_bus_off(netdev);
- }
-
- if (!skb) {
+ kvaser_usb_hydra_change_state(priv, bec, cf, new_state);
+ if (skb)
+ netif_rx(skb);
+ else
netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- priv->can.can_stats.restarts++;
-
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
- }
-
- netif_rx(skb);
}
static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
@@ -1067,9 +1105,8 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
{
struct net_device *netdev = priv->netdev;
struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct can_berr_counter bec;
enum can_state new_state, old_state;
u8 bus_status;
@@ -1085,52 +1122,26 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
&new_state);
- skb = alloc_can_err_skb(netdev, &cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (new_state != old_state)
+ kvaser_usb_hydra_change_state(priv, &bec, cf, new_state);
- if (new_state != old_state) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec.txerr >= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec.txerr <= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
-
- can_change_state(netdev, cf, tx_state, rx_state);
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- if (new_state == CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
-
- can_bus_off(netdev);
+ shhwtstamps->hwtstamp = hwtstamp;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
}
}
- if (!skb) {
- stats->rx_dropped++;
- netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp = hwtstamp;
-
- cf->can_id |= CAN_ERR_BUSERROR;
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
- }
-
- netif_rx(skb);
-
priv->bec.txerr = bec.txerr;
priv->bec.rxerr = bec.rxerr;
}
@@ -1175,6 +1186,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
bool one_shot_fail = false;
bool is_err_frame = false;
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+ struct sk_buff *skb;
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
if (!priv)
@@ -1201,6 +1213,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
len = can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
@@ -1234,7 +1249,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
flags = cmd->rx_can.flags;
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
@@ -1302,7 +1317,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
KVASER_USB_KCAN_DATA_DLC_SHIFT;
flags = le32_to_cpu(cmd->rx_can.flags);
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, std_cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
@@ -1396,6 +1411,7 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
/* Ignored commands */
case CMD_SET_BUSPARAMS_RESP:
case CMD_SET_BUSPARAMS_FD_RESP:
+ case CMD_LED_ACTION_RESP:
break;
default:
@@ -1823,6 +1839,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
size_t cmd_len;
int err;
u32 flags;
+ u32 fw_version;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1847,7 +1864,10 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
if (err)
goto end;
- dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version);
flags = le32_to_cpu(cmd->sw_detail_res.sw_flags);
if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) {
@@ -1898,6 +1918,10 @@ static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev)
err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd);
if (err)
return err;
+ dev->ean[1] = le32_to_cpu(cmd.card_info.ean[1]);
+ dev->ean[0] = le32_to_cpu(cmd.card_info.ean[0]);
+ dev->serial_number = le32_to_cpu(cmd.card_info.serial_number);
+ dev->hw_revision = cmd.card_info.hw_revision;
dev->nchannels = cmd.card_info.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES)
@@ -1952,6 +1976,36 @@ static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_hydra_set_led(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ size_t cmd_len;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_LED_ACTION_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he(cmd, dev->card_data.hydra.sysdbg_he);
+ kvaser_usb_hydra_set_cmd_transid(cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ cmd->led_action_req.duration_ms = cpu_to_le16(duration_ms);
+ cmd->led_action_req.action = state |
+ FIELD_PREP(KVASER_USB_HYDRA_LED_IDX_MASK,
+ KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX +
+ KVASER_USB_HYDRA_LEDS_PER_CHANNEL * priv->channel);
+
+ ret = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ kfree(cmd);
+
+ return ret;
+}
+
static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{
struct kvaser_usb *dev = priv->dev;
@@ -2155,6 +2209,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
.dev_get_software_details = kvaser_usb_hydra_get_software_details,
.dev_get_card_info = kvaser_usb_hydra_get_card_info,
.dev_get_capabilities = kvaser_usb_hydra_get_capabilities,
+ .dev_set_led = kvaser_usb_hydra_set_led,
.dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode,
.dev_start_chip = kvaser_usb_hydra_start_chip,
.dev_stop_chip = kvaser_usb_hydra_stop_chip,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 23bd7574b1c7..1167d38344f1 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -10,6 +10,7 @@
* Copyright (C) 2015 Valeo S.A.
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
@@ -81,6 +82,8 @@
#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_GET_CAPABILITIES_REQ 95
#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_LED_ACTION_REQ 101
+#define CMD_LED_ACTION_RESP 102
#define CMD_LEAF_LOG_MESSAGE 106
@@ -119,6 +122,10 @@
/* Extended CAN identifier flag */
#define KVASER_EXTENDED_FRAME BIT(31)
+/* USBCanII timestamp */
+#define KVASER_USB_USBCAN_CLK_OVERFLOW_MASK GENMASK(31, 16)
+#define KVASER_USB_USBCAN_TIMESTAMP_FACTOR 10
+
struct kvaser_cmd_simple {
u8 tid;
u8 channel;
@@ -131,7 +138,7 @@ struct kvaser_cmd_cardinfo {
__le32 padding0;
__le32 clock_resolution;
__le32 mfgdate;
- u8 ean[8];
+ __le32 ean[2];
u8 hw_revision;
union {
struct {
@@ -169,6 +176,21 @@ struct kvaser_cmd_busparams {
struct kvaser_usb_busparams busparams;
} __packed;
+/* The device has one LED per CAN channel
+ * The LSB of action field controls the state:
+ * 0 = ON
+ * 1 = OFF
+ * The remaining bits of action field is the LED index
+ */
+#define KVASER_USB_LEAF_LED_IDX_MASK GENMASK(31, 1)
+#define KVASER_USB_LEAF_LED_YELLOW_CH0_IDX 2
+struct kvaser_cmd_led_action_req {
+ u8 tid;
+ u8 action;
+ __le16 duration_ms;
+ u8 padding[24];
+} __packed;
+
struct kvaser_cmd_tx_can {
u8 channel;
u8 tid;
@@ -235,6 +257,20 @@ struct kvaser_cmd_tx_acknowledge_header {
u8 tid;
} __packed;
+struct leaf_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time[3];
+ u8 padding[2];
+} __packed;
+
+struct usbcan_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time;
+ u8 padding[2];
+} __packed;
+
struct leaf_cmd_can_error_event {
u8 tid;
u8 flags;
@@ -281,6 +317,12 @@ struct usbcan_cmd_error_event {
__le16 padding;
} __packed;
+struct usbcan_cmd_clk_overflow_event {
+ u8 tid;
+ u8 padding;
+ __le32 time;
+} __packed;
+
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
@@ -335,6 +377,8 @@ struct kvaser_cmd {
struct kvaser_cmd_cardinfo cardinfo;
struct kvaser_cmd_busparams busparams;
+ struct kvaser_cmd_led_action_req led_action_req;
+
struct kvaser_cmd_rx_can_header rx_can_header;
struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
@@ -347,6 +391,7 @@ struct kvaser_cmd {
struct leaf_cmd_error_event error_event;
struct kvaser_cmd_cap_req cap_req;
struct kvaser_cmd_cap_res cap_res;
+ struct leaf_cmd_tx_acknowledge tx_ack;
} __packed leaf;
union {
@@ -355,6 +400,8 @@ struct kvaser_cmd {
struct usbcan_cmd_chip_state_event chip_state_event;
struct usbcan_cmd_can_error_event can_error_event;
struct usbcan_cmd_error_event error_event;
+ struct usbcan_cmd_tx_acknowledge tx_ack;
+ struct usbcan_cmd_clk_overflow_event clk_overflow_event;
} __packed usbcan;
struct kvaser_cmd_tx_can tx_can;
@@ -370,7 +417,7 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
- [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.leaf.tx_ack),
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
@@ -382,21 +429,23 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
[CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
/* ignored events: */
[CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+ [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY,
};
static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
- [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.usbcan.tx_ack),
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
[CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
[CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event),
/* ignored events: */
- [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+ [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY,
};
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
@@ -463,11 +512,27 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
.bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 16,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_24mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 24,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_32mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 32,
.bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
@@ -475,7 +540,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 16,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
@@ -483,7 +548,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
.freq = 24 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 24,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
@@ -491,10 +556,19 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
.freq = 32 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 32,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static inline ktime_t kvaser_usb_usbcan_timestamp_to_ktime(const struct kvaser_usb *dev,
+ __le16 timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp) |
+ dev->card_data.usbcan_timestamp_msb;
+
+ return kvaser_usb_ticks_to_ktime(dev->cfg, ticks * KVASER_USB_USBCAN_TIMESTAMP_FACTOR);
+}
+
static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -611,7 +685,7 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
* for further details.
*/
if (tmp->len == 0) {
- pos = round_up(pos,
+ pos = round_up(pos + 1,
le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
@@ -667,9 +741,13 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
const struct leaf_cmd_softinfo *softinfo)
{
+ u32 fw_version;
u32 sw_options = le32_to_cpu(softinfo->sw_options);
- dev->fw_version = le32_to_cpu(softinfo->fw_version);
+ fw_version = le32_to_cpu(softinfo->fw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
@@ -678,8 +756,19 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
/* Firmware expects bittiming parameters calculated for 16MHz
* clock, regardless of the actual clock
+ * Though, the reported freq is used for timestamps
*/
- dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_32mhz;
+ break;
+ }
} else {
switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
@@ -699,6 +788,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
int err;
+ u32 fw_version;
err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
if (err)
@@ -713,7 +803,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
case KVASER_USBCAN:
- dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK,
+ fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK,
+ fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK,
+ fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
@@ -758,6 +854,10 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
(dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
+ dev->ean[1] = le32_to_cpu(cmd.u.cardinfo.ean[1]);
+ dev->ean[0] = le32_to_cpu(cmd.u.cardinfo.ean[0]);
+ dev->serial_number = le32_to_cpu(cmd.u.cardinfo.serial_number);
+ dev->hw_revision = cmd.u.cardinfo.hw_revision;
return 0;
}
@@ -862,6 +962,34 @@ static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_leaf_set_led(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_LED_ACTION_REQ;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_led_action_req);
+ cmd->u.led_action_req.tid = 0xff;
+
+ cmd->u.led_action_req.duration_ms = cpu_to_le16(duration_ms);
+ cmd->u.led_action_req.action = state |
+ FIELD_PREP(KVASER_USB_LEAF_LED_IDX_MASK,
+ KVASER_USB_LEAF_LED_YELLOW_CH0_IDX +
+ priv->channel);
+
+ ret = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ kfree(cmd);
+
+ return ret;
+}
+
static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
{
int err = 0;
@@ -880,6 +1008,8 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_net_priv *priv;
unsigned long flags;
u8 channel, tid;
+ struct sk_buff *skb;
+ ktime_t hwtstamp = 0;
channel = cmd->u.tx_acknowledge_header.channel;
tid = cmd->u.tx_acknowledge_header.tid;
@@ -901,14 +1031,14 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
/* Sometimes the state change doesn't come after a bus-off event */
if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
- struct sk_buff *skb;
+ struct sk_buff *err_skb;
struct can_frame *cf;
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb) {
+ err_skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (err_skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
+ netif_rx(err_skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -919,9 +1049,20 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.tx_ack.time);
+ break;
+ case KVASER_USBCAN:
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.tx_ack.time);
+ break;
+ }
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->tx_packets++;
stats->tx_bytes += can_get_echo_skb(priv->netdev,
context->echo_index, NULL);
@@ -1045,10 +1186,8 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
const struct kvaser_usb_err_summary *es)
{
- struct can_frame *cf;
- struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
- .len = CAN_ERR_DLC };
- struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
struct kvaser_usb_net_leaf_priv *leaf;
@@ -1068,18 +1207,10 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (!netif_running(priv->netdev))
return;
- /* Update all of the CAN interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error CAN frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * CAN interface's state and counters, and the setting up of CAN error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
old_state = priv->can.state;
- kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, cf);
new_state = priv->can.state;
/* If there are errors, request status updates periodically as we do
@@ -1093,13 +1224,6 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
schedule_delayed_work(&leaf->chip_state_req_work,
msecs_to_jiffies(500));
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
-
if (new_state != old_state) {
if (es->status &
(M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
@@ -1112,11 +1236,20 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (priv->can.restart_ms &&
old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_RESTARTED;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
}
}
+ if (!skb) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ stats->rx_dropped++;
+ netdev_warn(priv->netdev, "No memory left for err_skb\n");
+ }
+ return;
+ }
+
switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
@@ -1299,6 +1432,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
struct net_device_stats *stats;
u8 channel = cmd->u.rx_can_header.channel;
const u8 *rx_data = NULL; /* GCC */
+ ktime_t hwtstamp = 0;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
@@ -1329,9 +1463,11 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.rx_can.time);
break;
case KVASER_USBCAN:
rx_data = cmd->u.usbcan.rx_can.data;
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.rx_can.time);
break;
}
@@ -1375,6 +1511,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
memcpy(cf->data, &rx_data[6], cf->len);
}
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->rx_packets++;
if (!(cf->can_id & CAN_RTR_FLAG))
stats->rx_bytes += cf->len;
@@ -1508,7 +1645,7 @@ static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
complete(&priv->get_busparams_comp);
}
-static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
@@ -1554,16 +1691,21 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
kvaser_usb_leaf_get_busparams_reply(dev, cmd);
break;
- /* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
+ dev->card_data.usbcan_timestamp_msb =
+ le32_to_cpu(cmd->u.usbcan.clk_overflow_event.time) &
+ KVASER_USB_USBCAN_CLK_OVERFLOW_MASK;
break;
+ /* Ignored commands */
case CMD_FLUSH_QUEUE_REPLY:
if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
+ case CMD_LED_ACTION_RESP:
+ break;
default:
warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
@@ -1590,7 +1732,7 @@ static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
* number of events in case of a heavy rx load on the bus.
*/
if (cmd->len == 0) {
- pos = round_up(pos, le16_to_cpu
+ pos = round_up(pos + 1, le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
}
@@ -1853,6 +1995,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_get_software_details = NULL,
.dev_get_card_info = kvaser_usb_leaf_get_card_info,
.dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
+ .dev_set_led = kvaser_usb_leaf_set_led,
.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
.dev_start_chip = kvaser_usb_leaf_start_chip,
.dev_stop_chip = kvaser_usb_leaf_stop_chip,
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 47619e9cb005..41c0a1c399bf 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -6,7 +6,7 @@
* This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
diff --git a/drivers/net/can/usb/nct6694_canfd.c b/drivers/net/can/usb/nct6694_canfd.c
new file mode 100644
index 000000000000..dd6df2ec3742
--- /dev/null
+++ b/drivers/net/can/usb/nct6694_canfd.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Nuvoton NCT6694 Socket CANfd driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/ethtool.h>
+#include <linux/idr.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#define DEVICE_NAME "nct6694-canfd"
+
+/* USB command module type for NCT6694 CANfd controller.
+ * This defines the module type used for communication with the NCT6694
+ * CANfd controller over the USB interface.
+ */
+#define NCT6694_CANFD_MOD 0x05
+
+/* Command 00h - CAN Setting and Initialization */
+#define NCT6694_CANFD_SETTING 0x00
+#define NCT6694_CANFD_SETTING_ACTIVE_CTRL1 BIT(0)
+#define NCT6694_CANFD_SETTING_ACTIVE_CTRL2 BIT(1)
+#define NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP BIT(2)
+#define NCT6694_CANFD_SETTING_CTRL1_MON BIT(0)
+#define NCT6694_CANFD_SETTING_CTRL1_NISO BIT(1)
+#define NCT6694_CANFD_SETTING_CTRL1_LBCK BIT(2)
+#define NCT6694_CANFD_SETTING_NBTP_NTSEG2 GENMASK(6, 0)
+#define NCT6694_CANFD_SETTING_NBTP_NTSEG1 GENMASK(15, 8)
+#define NCT6694_CANFD_SETTING_NBTP_NBRP GENMASK(24, 16)
+#define NCT6694_CANFD_SETTING_NBTP_NSJW GENMASK(31, 25)
+#define NCT6694_CANFD_SETTING_DBTP_DSJW GENMASK(3, 0)
+#define NCT6694_CANFD_SETTING_DBTP_DTSEG2 GENMASK(7, 4)
+#define NCT6694_CANFD_SETTING_DBTP_DTSEG1 GENMASK(12, 8)
+#define NCT6694_CANFD_SETTING_DBTP_DBRP GENMASK(20, 16)
+#define NCT6694_CANFD_SETTING_DBTP_TDC BIT(23)
+
+/* Command 01h - CAN Information */
+#define NCT6694_CANFD_INFORMATION 0x01
+#define NCT6694_CANFD_INFORMATION_SEL 0x00
+
+/* Command 02h - CAN Event */
+#define NCT6694_CANFD_EVENT 0x02
+#define NCT6694_CANFD_EVENT_SEL(idx, mask) \
+ ((idx ? 0x80 : 0x00) | ((mask) & 0x7F))
+
+#define NCT6694_CANFD_EVENT_MASK GENMASK(5, 0)
+#define NCT6694_CANFD_EVT_TX_FIFO_EMPTY BIT(7) /* Read-clear */
+#define NCT6694_CANFD_EVT_RX_DATA_LOST BIT(5) /* Read-clear */
+#define NCT6694_CANFD_EVT_RX_DATA_IN BIT(7) /* Read-clear */
+
+/* Command 10h - CAN Deliver */
+#define NCT6694_CANFD_DELIVER 0x10
+#define NCT6694_CANFD_DELIVER_SEL(buf_cnt) \
+ ((buf_cnt) & 0xFF)
+
+/* Command 11h - CAN Receive */
+#define NCT6694_CANFD_RECEIVE 0x11
+#define NCT6694_CANFD_RECEIVE_SEL(idx, buf_cnt) \
+ ((idx ? 0x80 : 0x00) | ((buf_cnt) & 0x7F))
+
+#define NCT6694_CANFD_FRAME_TAG(idx) (0xC0 | (idx))
+#define NCT6694_CANFD_FRAME_FLAG_EFF BIT(0)
+#define NCT6694_CANFD_FRAME_FLAG_RTR BIT(1)
+#define NCT6694_CANFD_FRAME_FLAG_FD BIT(2)
+#define NCT6694_CANFD_FRAME_FLAG_BRS BIT(3)
+#define NCT6694_CANFD_FRAME_FLAG_ERR BIT(4)
+
+#define NCT6694_NAPI_WEIGHT 32
+
+enum nct6694_event_err {
+ NCT6694_CANFD_EVT_ERR_NO_ERROR = 0,
+ NCT6694_CANFD_EVT_ERR_CRC_ERROR,
+ NCT6694_CANFD_EVT_ERR_STUFF_ERROR,
+ NCT6694_CANFD_EVT_ERR_ACK_ERROR,
+ NCT6694_CANFD_EVT_ERR_FORM_ERROR,
+ NCT6694_CANFD_EVT_ERR_BIT_ERROR,
+ NCT6694_CANFD_EVT_ERR_TIMEOUT_ERROR,
+ NCT6694_CANFD_EVT_ERR_UNKNOWN_ERROR,
+};
+
+enum nct6694_event_status {
+ NCT6694_CANFD_EVT_STS_ERROR_ACTIVE = 0,
+ NCT6694_CANFD_EVT_STS_ERROR_PASSIVE,
+ NCT6694_CANFD_EVT_STS_BUS_OFF,
+ NCT6694_CANFD_EVT_STS_WARNING,
+};
+
+struct __packed nct6694_canfd_setting {
+ __le32 nbr;
+ __le32 dbr;
+ u8 active;
+ u8 reserved[3];
+ __le16 ctrl1;
+ __le16 ctrl2;
+ __le32 nbtp;
+ __le32 dbtp;
+};
+
+struct __packed nct6694_canfd_information {
+ u8 tx_fifo_cnt;
+ u8 rx_fifo_cnt;
+ u8 reserved[2];
+ __le32 can_clk;
+};
+
+struct __packed nct6694_canfd_event {
+ u8 err;
+ u8 status;
+ u8 tx_evt;
+ u8 rx_evt;
+ u8 rec;
+ u8 tec;
+ u8 reserved[2];
+};
+
+struct __packed nct6694_canfd_frame {
+ u8 tag;
+ u8 flag;
+ u8 reserved;
+ u8 length;
+ __le32 id;
+ u8 data[CANFD_MAX_DLEN];
+};
+
+struct nct6694_canfd_priv {
+ struct can_priv can; /* must be the first member */
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+ struct nct6694 *nct6694;
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct nct6694_canfd_frame tx;
+ struct nct6694_canfd_frame rx;
+ struct nct6694_canfd_event event[2];
+ struct can_berr_counter bec;
+};
+
+static inline struct nct6694_canfd_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+ return container_of(offload, struct nct6694_canfd_priv, offload);
+}
+
+static const struct can_bittiming_const nct6694_canfd_bittiming_nominal_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const nct6694_canfd_bittiming_data_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static void nct6694_canfd_rx_offload(struct can_rx_offload *offload,
+ struct sk_buff *skb)
+{
+ struct nct6694_canfd_priv *priv = rx_offload_to_priv(offload);
+ int ret;
+
+ ret = can_rx_offload_queue_tail(offload, skb);
+ if (ret)
+ priv->ndev->stats.rx_fifo_errors++;
+}
+
+static void nct6694_canfd_handle_lost_msg(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_dbg(ndev, "RX FIFO overflow, message(s) lost.\n");
+
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_rx(struct net_device *ndev, u8 rx_evt)
+{
+ struct net_device_stats *stats = &ndev->stats;
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_frame *frame = &priv->rx;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_RECEIVE,
+ .sel = NCT6694_CANFD_RECEIVE_SEL(ndev->dev_port, 1),
+ .len = cpu_to_le16(sizeof(*frame))
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, frame);
+ if (ret)
+ return;
+
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_FD) {
+ struct canfd_frame *cfd;
+
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cfd->can_id = le32_to_cpu(frame->id);
+ cfd->len = canfd_sanitize_len(frame->length);
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF)
+ cfd->can_id |= CAN_EFF_FLAG;
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_ERR)
+ cfd->flags |= CANFD_ESI;
+
+ memcpy(cfd->data, frame->data, cfd->len);
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = le32_to_cpu(frame->id);
+ cf->len = can_cc_dlc2len(frame->length);
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, frame->data, cf->len);
+ }
+
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static int nct6694_canfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static void nct6694_canfd_handle_state_change(struct net_device *ndev, u8 status)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ enum can_state new_state, rx_state, tx_state;
+ struct can_berr_counter bec;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ nct6694_canfd_get_berr_counter(ndev, &bec);
+ can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state);
+
+ new_state = max(tx_state, rx_state);
+
+ /* state hasn't changed */
+ if (new_state == priv->can.state)
+ return;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(ndev);
+ } else if (cf) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ if (skb)
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_bus_err(struct net_device *ndev, u8 bus_err)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ priv->can.can_stats.bus_error++;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (bus_err) {
+ case NCT6694_CANFD_EVT_ERR_CRC_ERROR:
+ netdev_dbg(ndev, "CRC error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_STUFF_ERROR:
+ netdev_dbg(ndev, "Stuff error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_ACK_ERROR:
+ netdev_dbg(ndev, "Ack error\n");
+ ndev->stats.tx_errors++;
+ if (cf) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_FORM_ERROR:
+ netdev_dbg(ndev, "Form error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_BIT_ERROR:
+ netdev_dbg(ndev, "Bit error\n");
+ ndev->stats.tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT;
+ break;
+
+ default:
+ break;
+ }
+
+ if (skb)
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_tx(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->tx_bytes += can_rx_offload_get_echo_skb_queue_tail(&priv->offload,
+ 0, NULL);
+ stats->tx_packets++;
+ netif_wake_queue(ndev);
+}
+
+static irqreturn_t nct6694_canfd_irq(int irq, void *data)
+{
+ struct net_device *ndev = data;
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_event *event = &priv->event[ndev->dev_port];
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_EVENT,
+ .sel = NCT6694_CANFD_EVENT_SEL(ndev->dev_port, NCT6694_CANFD_EVENT_MASK),
+ .len = cpu_to_le16(sizeof(priv->event))
+ };
+ irqreturn_t handled = IRQ_NONE;
+ int ret;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, priv->event);
+ if (ret < 0)
+ return handled;
+
+ if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_IN) {
+ nct6694_canfd_handle_rx(ndev, event->rx_evt);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_LOST) {
+ nct6694_canfd_handle_lost_msg(ndev);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->status) {
+ nct6694_canfd_handle_state_change(ndev, event->status);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->err != NCT6694_CANFD_EVT_ERR_NO_ERROR) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ nct6694_canfd_handle_bus_err(ndev, event->err);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->tx_evt & NCT6694_CANFD_EVT_TX_FIFO_EMPTY) {
+ nct6694_canfd_handle_tx(ndev);
+ handled = IRQ_HANDLED;
+ }
+
+ if (handled)
+ can_rx_offload_threaded_irq_finish(&priv->offload);
+
+ priv->bec.rxerr = event->rec;
+ priv->bec.txerr = event->tec;
+
+ return handled;
+}
+
+static void nct6694_canfd_tx_work(struct work_struct *work)
+{
+ struct nct6694_canfd_priv *priv = container_of(work,
+ struct nct6694_canfd_priv,
+ tx_work);
+ struct nct6694_canfd_frame *frame = &priv->tx;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct sk_buff *skb = priv->can.echo_skb[0];
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_DELIVER,
+ .sel = NCT6694_CANFD_DELIVER_SEL(1),
+ .len = cpu_to_le16(sizeof(*frame))
+ };
+ u32 txid;
+ int err;
+
+ memset(frame, 0, sizeof(*frame));
+
+ frame->tag = NCT6694_CANFD_FRAME_TAG(ndev->dev_port);
+
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (cfd->flags & CANFD_BRS)
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_BRS;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ txid = cfd->can_id & CAN_EFF_MASK;
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF;
+ } else {
+ txid = cfd->can_id & CAN_SFF_MASK;
+ }
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_FD;
+ frame->id = cpu_to_le32(txid);
+ frame->length = canfd_sanitize_len(cfd->len);
+
+ memcpy(frame->data, cfd->data, frame->length);
+ } else {
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ txid = cf->can_id & CAN_EFF_MASK;
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF;
+ } else {
+ txid = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_RTR;
+ else
+ memcpy(frame->data, cf->data, cf->len);
+
+ frame->id = cpu_to_le32(txid);
+ frame->length = cf->len;
+ }
+
+ err = nct6694_write_msg(priv->nct6694, &cmd_hd, frame);
+ if (err) {
+ can_free_echo_skb(ndev, 0, NULL);
+ stats->tx_dropped++;
+ stats->tx_errors++;
+ netif_wake_queue(ndev);
+ }
+}
+
+static netdev_tx_t nct6694_canfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ netif_stop_queue(ndev);
+ can_put_echo_skb(skb, ndev, 0, 0);
+ queue_work(priv->wq, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static int nct6694_canfd_start(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *n_bt = &priv->can.bittiming;
+ const struct can_bittiming *d_bt = &priv->can.fd.data_bittiming;
+ struct nct6694_canfd_setting *setting __free(kfree) = NULL;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_SETTING,
+ .sel = ndev->dev_port,
+ .len = cpu_to_le16(sizeof(*setting))
+ };
+ u32 en_tdc;
+ int ret;
+
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting)
+ return -ENOMEM;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_NISO);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_LBCK);
+
+ /* Disable clock divider */
+ setting->ctrl2 = 0;
+
+ setting->nbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NSJW,
+ n_bt->sjw - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NBRP,
+ n_bt->brp - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG2,
+ n_bt->phase_seg2 - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG1,
+ n_bt->prop_seg + n_bt->phase_seg1 - 1));
+
+ if (d_bt->brp <= 2)
+ en_tdc = NCT6694_CANFD_SETTING_DBTP_TDC;
+ else
+ en_tdc = 0;
+
+ setting->dbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DSJW,
+ d_bt->sjw - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DBRP,
+ d_bt->brp - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG2,
+ d_bt->phase_seg2 - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG1,
+ d_bt->prop_seg + d_bt->phase_seg1 - 1) |
+ en_tdc);
+
+ setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1 |
+ NCT6694_CANFD_SETTING_ACTIVE_CTRL2 |
+ NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP;
+
+ ret = nct6694_write_msg(priv->nct6694, &cmd_hd, setting);
+ if (ret)
+ return ret;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+}
+
+static void nct6694_canfd_stop(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_setting *setting __free(kfree) = NULL;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_SETTING,
+ .sel = ndev->dev_port,
+ .len = cpu_to_le16(sizeof(*setting))
+ };
+
+ /* The NCT6694 cannot be stopped. To ensure safe operation and avoid
+ * interference, the control mode is set to Listen-Only mode. This
+ * mode allows the device to monitor bus activity without actively
+ * participating in communication.
+ */
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting)
+ return;
+
+ nct6694_read_msg(priv->nct6694, &cmd_hd, setting);
+ setting->ctrl1 = cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON);
+ setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1;
+ nct6694_write_msg(priv->nct6694, &cmd_hd, setting);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int nct6694_canfd_close(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ nct6694_canfd_stop(ndev);
+ destroy_workqueue(priv->wq);
+ free_irq(ndev->irq, ndev);
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ return 0;
+}
+
+static int nct6694_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = nct6694_canfd_start(ndev);
+ if (ret)
+ return ret;
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int nct6694_canfd_open(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = open_candev(ndev);
+ if (ret)
+ return ret;
+
+ can_rx_offload_enable(&priv->offload);
+
+ ret = request_threaded_irq(ndev->irq, NULL,
+ nct6694_canfd_irq, IRQF_ONESHOT,
+ "nct6694_canfd", ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to request IRQ\n");
+ goto can_rx_offload_disable;
+ }
+
+ priv->wq = alloc_ordered_workqueue("%s-nct6694_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ ndev->name);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ ret = nct6694_canfd_start(ndev);
+ if (ret)
+ goto destroy_wq;
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(priv->wq);
+free_irq:
+ free_irq(ndev->irq, ndev);
+can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ return ret;
+}
+
+static const struct net_device_ops nct6694_canfd_netdev_ops = {
+ .ndo_open = nct6694_canfd_open,
+ .ndo_stop = nct6694_canfd_close,
+ .ndo_start_xmit = nct6694_canfd_start_xmit,
+};
+
+static const struct ethtool_ops nct6694_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int nct6694_canfd_get_clock(struct nct6694_canfd_priv *priv)
+{
+ struct nct6694_canfd_information *info __free(kfree) = NULL;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_INFORMATION,
+ .sel = NCT6694_CANFD_INFORMATION_SEL,
+ .len = cpu_to_le16(sizeof(*info))
+ };
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, info);
+ if (ret)
+ return ret;
+
+ return le32_to_cpu(info->can_clk);
+}
+
+static int nct6694_canfd_probe(struct platform_device *pdev)
+{
+ struct nct6694 *nct6694 = dev_get_drvdata(pdev->dev.parent);
+ struct nct6694_canfd_priv *priv;
+ struct net_device *ndev;
+ int port, irq, ret, can_clk;
+
+ port = ida_alloc(&nct6694->canfd_ida, GFP_KERNEL);
+ if (port < 0)
+ return port;
+
+ irq = irq_create_mapping(nct6694->domain,
+ NCT6694_IRQ_CAN0 + port);
+ if (!irq) {
+ ret = -EINVAL;
+ goto free_ida;
+ }
+
+ ndev = alloc_candev(sizeof(struct nct6694_canfd_priv), 1);
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto dispose_irq;
+ }
+
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO;
+ ndev->dev_port = port;
+ ndev->netdev_ops = &nct6694_canfd_netdev_ops;
+ ndev->ethtool_ops = &nct6694_canfd_ethtool_ops;
+
+ priv = netdev_priv(ndev);
+ priv->nct6694 = nct6694;
+ priv->ndev = ndev;
+
+ can_clk = nct6694_canfd_get_clock(priv);
+ if (can_clk < 0) {
+ ret = dev_err_probe(&pdev->dev, can_clk,
+ "Failed to get clock\n");
+ goto free_candev;
+ }
+
+ INIT_WORK(&priv->tx_work, nct6694_canfd_tx_work);
+
+ priv->can.clock.freq = can_clk;
+ priv->can.bittiming_const = &nct6694_canfd_bittiming_nominal_const;
+ priv->can.fd.data_bittiming_const = &nct6694_canfd_bittiming_data_const;
+ priv->can.do_set_mode = nct6694_canfd_set_mode;
+ priv->can.do_get_berr_counter = nct6694_canfd_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_FD_NON_ISO;
+
+ ret = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ if (ret)
+ goto free_candev;
+
+ ret = can_rx_offload_add_manual(ndev, &priv->offload,
+ NCT6694_NAPI_WEIGHT);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Failed to add rx_offload\n");
+ goto free_candev;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ SET_NETDEV_DEV(priv->ndev, &pdev->dev);
+
+ ret = register_candev(priv->ndev);
+ if (ret)
+ goto rx_offload_del;
+
+ return 0;
+
+rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+free_candev:
+ free_candev(ndev);
+dispose_irq:
+ irq_dispose_mapping(irq);
+free_ida:
+ ida_free(&nct6694->canfd_ida, port);
+ return ret;
+}
+
+static void nct6694_canfd_remove(struct platform_device *pdev)
+{
+ struct nct6694_canfd_priv *priv = platform_get_drvdata(pdev);
+ struct nct6694 *nct6694 = priv->nct6694;
+ struct net_device *ndev = priv->ndev;
+ int port = ndev->dev_port;
+ int irq = ndev->irq;
+
+ unregister_candev(ndev);
+ can_rx_offload_del(&priv->offload);
+ free_candev(ndev);
+ irq_dispose_mapping(irq);
+ ida_free(&nct6694->canfd_ida, port);
+}
+
+static struct platform_driver nct6694_canfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ },
+ .probe = nct6694_canfd_probe,
+ .remove = nct6694_canfd_remove,
+};
+
+module_platform_driver(nct6694_canfd_driver);
+
+MODULE_DESCRIPTION("USB-CAN FD driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index b211b6e283a2..9278a1522aae 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -3,12 +3,12 @@
* CAN driver for PEAK System PCAN-USB adapter
* Derived from the PCAN project file driver/src/pcan_usb.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ethtool.h>
#include <linux/module.h>
@@ -319,7 +319,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
*/
static void pcan_usb_restart(struct timer_list *t)
{
- struct pcan_usb *pdev = from_timer(pdev, t, restart_timer);
+ struct pcan_usb *pdev = timer_container_of(pdev, t, restart_timer);
struct peak_usb_device *dev = &pdev->dev;
/* notify candev and netdev */
@@ -919,7 +919,7 @@ static int pcan_usb_init(struct peak_usb_device *dev)
CAN_CTRLMODE_LOOPBACK;
} else {
dev_info(dev->netdev->dev.parent,
- "Firmware update available. Please contact support@peak-system.com\n");
+ "Firmware update available. Please contact support.peak@hms-networks.com\n");
}
return 0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1efa39e134f4..cf48bb26d46d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -24,7 +24,7 @@
#include "pcan_usb_core.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters");
MODULE_LICENSE("GPL v2");
@@ -111,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
- delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1;
time_ref->ts_total += delta_ts;
}
@@ -770,7 +770,7 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
const struct peak_usb_adapter *pa = dev->adapter;
if (pa->dev_set_data_bittiming) {
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
int err = pa->dev_set_data_bittiming(dev, bt);
if (err)
@@ -784,37 +784,33 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
return 0;
}
-static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int peak_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config hwts_cfg = { 0 };
-
- switch (cmd) {
- case SIOCSHWTSTAMP: /* set */
- if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
- return -EFAULT;
- if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
- hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
- return 0;
- return -ERANGE;
-
- case SIOCGHWTSTAMP: /* get */
- hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
- if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
- return -EFAULT;
+ config->tx_type = HWTSTAMP_TX_OFF;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+
+ return 0;
+}
+
+static int peak_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_ALL)
return 0;
- default:
- return -EOPNOTSUPP;
- }
+ NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported");
+ return -ERANGE;
}
static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
- .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = peak_hwtstamp_get,
+ .ndo_hwtstamp_set = peak_hwtstamp_set,
};
/* CAN-USB devices generally handle 32-bit CAN channel IDs.
@@ -897,15 +893,12 @@ int peak_usb_set_eeprom(struct net_device *netdev,
return 0;
}
-int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
@@ -957,8 +950,8 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev->can.clock = peak_usb_adapter->clock;
dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
- dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
- dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
+ dev->can.fd.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index f6cf84bb718f..d1c1897d47b9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -145,7 +145,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
-int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
/* common 32-bit CAN channel ID ethtool management */
int peak_usb_get_eeprom_len(struct net_device *netdev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 4d85b29a17b7..be84191cde56 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -2,7 +2,8 @@
/*
* CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter
*
- * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2013-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/ethtool.h>
#include <linux/module.h>
@@ -49,7 +50,7 @@ struct __packed pcan_ufd_fw_info {
__le32 ser_no; /* S/N */
__le32 flags; /* special functions */
- /* extended data when type == PCAN_USBFD_TYPE_EXT */
+ /* extended data when type >= PCAN_USBFD_TYPE_EXT */
u8 cmd_out_ep; /* ep for cmd */
u8 cmd_in_ep; /* ep for replies */
u8 data_out_ep[2]; /* ep for CANx TX */
@@ -982,10 +983,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
}
- /* if vendor rsp is of type 2, then it contains EP numbers to
- * use for cmds pipes. If not, then default EP should be used.
+ /* if vendor rsp type is greater than or equal to 2, then it
+ * contains EP numbers to use for cmds pipes. If not, then
+ * default EP should be used.
*/
- if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ if (le16_to_cpu(fw_info->type) < PCAN_USBFD_TYPE_EXT) {
fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
}
@@ -1018,11 +1020,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can_channel_id =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
- /* if vendor rsp is of type 2, then it contains EP numbers to
- * use for data pipes. If not, then statically defined EP are used
- * (see peak_usb_create_dev()).
+ /* if vendor rsp type is greater than or equal to 2, then it contains EP
+ * numbers to use for data pipes. If not, then statically defined EP are
+ * used (see peak_usb_create_dev()).
*/
- if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ if (le16_to_cpu(fw_info->type) >= PCAN_USBFD_TYPE_EXT) {
dev->ep_msg_in = fw_info->data_in_ep;
dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index f736196383ac..7be286293b1a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro.c
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/ethtool.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 28e740af905d..162c7546d3a8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro_fw.h
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PCAN_USB_PRO_H
#define PCAN_USB_PRO_H
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 39a63b7313a4..de61d9da99e3 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -186,7 +186,7 @@ union ucan_ctl_payload {
*/
struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
- u8 raw[128];
+ u8 fw_str[128];
} __packed;
enum {
@@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up,
UCAN_USB_CTL_PIPE_TIMEOUT);
}
-static int ucan_device_request_in(struct ucan_priv *up,
- u8 cmd, u16 subcmd, u16 datalen)
+static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size)
{
- return usb_control_msg(up->udev,
- usb_rcvctrlpipe(up->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- subcmd,
- 0,
- up->ctl_msg_buffer,
- datalen,
- UCAN_USB_CTL_PIPE_TIMEOUT);
+ int ret;
+
+ ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0),
+ UCAN_DEVICE_GET_FW_STRING,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, 0, fw_str, size - 1,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+ if (ret > 0)
+ fw_str[ret] = '\0';
+ else
+ strscpy(fw_str, "unknown", size);
}
/* Parse the device information structure reported by the device and
@@ -1231,7 +1233,6 @@ static const struct net_device_ops ucan_netdev_ops = {
.ndo_open = ucan_open,
.ndo_stop = ucan_close,
.ndo_start_xmit = ucan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ucan_ethtool_ops = {
@@ -1314,7 +1315,6 @@ static int ucan_probe(struct usb_interface *intf,
u8 in_ep_addr;
u8 out_ep_addr;
union ucan_ctl_payload *ctl_msg_buffer;
- char firmware_str[sizeof(union ucan_ctl_payload) + 1];
udev = interface_to_usbdev(intf);
@@ -1527,17 +1527,6 @@ static int ucan_probe(struct usb_interface *intf,
*/
ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
- /* just print some device information - if available */
- ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
- sizeof(union ucan_ctl_payload));
- if (ret > 0) {
- /* copy string while ensuring zero termination */
- strscpy(firmware_str, up->ctl_msg_buffer->raw,
- sizeof(union ucan_ctl_payload) + 1);
- } else {
- strcpy(firmware_str, "unknown");
- }
-
/* device is compatible, reset it */
ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
if (ret < 0)
@@ -1555,7 +1544,10 @@ static int ucan_probe(struct usb_interface *intf,
/* initialisation complete, log device info */
netdev_info(up->netdev, "registered device\n");
- netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+ ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str,
+ sizeof(up->ctl_msg_buffer->fw_str));
+ netdev_info(up->netdev, "firmware string: %s\n",
+ up->ctl_msg_buffer->fw_str);
/* success */
return 0;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8a5596ce4e46..7449328f7cd7 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -868,7 +868,6 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_open = usb_8dev_open,
.ndo_stop = usb_8dev_close,
.ndo_start_xmit = usb_8dev_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops usb_8dev_ethtool_ops = {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 285635c23443..fdc662aea279 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -140,7 +140,7 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
!can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -156,7 +156,7 @@ static const struct ethtool_ops vcan_ethtool_ops = {
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index f7fabba707ea..b2c19f8c5f8e 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -135,7 +135,7 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
!can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -156,7 +156,7 @@ static void vxcan_setup(struct net_device *dev)
struct can_ml_priv *can_ml;
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
@@ -172,13 +172,15 @@ static void vxcan_setup(struct net_device *dev)
/* forward declaration for rtnl_create_link() */
static struct rtnl_link_ops vxcan_link_ops;
-static int vxcan_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vxcan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *peer_net = rtnl_newlink_peer_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct vxcan_priv *priv;
struct net_device *peer;
- struct net *peer_net;
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
char ifname[IFNAMSIZ];
@@ -188,14 +190,10 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
/* register peer device */
if (data && data[VXCAN_INFO_PEER]) {
- struct nlattr *nla_peer;
+ struct nlattr *nla_peer = data[VXCAN_INFO_PEER];
- nla_peer = data[VXCAN_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
- if (err < 0)
- return err;
-
+ rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
tbp = peer_tb;
}
@@ -207,23 +205,15 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
name_assign_type = NET_NAME_ENUM;
}
- peer_net = rtnl_link_get_net(net, tbp);
- if (IS_ERR(peer_net))
- return PTR_ERR(peer_net);
-
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
&vxcan_link_ops, tbp, extack);
- if (IS_ERR(peer)) {
- put_net(peer_net);
+ if (IS_ERR(peer))
return PTR_ERR(peer);
- }
if (ifmp && dev->ifindex)
peer->ifindex = ifmp->ifi_index;
err = register_netdevice(peer);
- put_net(peer_net);
- peer_net = NULL;
if (err < 0) {
free_netdev(peer);
return err;
@@ -302,6 +292,7 @@ static struct rtnl_link_ops vxcan_link_ops = {
.newlink = vxcan_newlink,
.dellink = vxcan_dellink,
.policy = vxcan_policy,
+ .peer_type = VXCAN_INFO_PEER,
.maxtype = VXCAN_INFO_MAX,
.get_link_net = vxcan_get_link_net,
};
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index fae0120473f8..43d7f22820b8 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -6,7 +6,7 @@
* Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
* Description:
- * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
+ * This driver is developed for AXI CAN IP, AXI CANFD IP, CANPS and CANFD PS Controller.
*/
#include <linux/bitfield.h>
@@ -481,7 +481,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u32 btr0, btr1;
u32 is_config_mode;
@@ -515,12 +515,12 @@ static int xcan_set_bittiming(struct net_device *ndev)
priv->devtype.cantype == XAXI_CANFD_2_0) {
/* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
if (priv->devtype.cantype == XAXI_CANFD)
- btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
else
- btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
}
@@ -690,14 +690,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
dlc |= XCAN_DLCR_EDL_MASK;
}
- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
- else
- can_put_echo_skb(skb, ndev, 0, 0);
-
- priv->tx_head++;
-
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
@@ -730,6 +722,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
data[1]);
}
}
+
+ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+ else
+ can_put_echo_skb(skb, ndev, 0, 0);
+
+ priv->tx_head++;
}
/**
@@ -1702,7 +1702,6 @@ static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
.ndo_start_xmit = xcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops xcan_ethtool_ops = {
@@ -1967,22 +1966,22 @@ static int xcan_probe(struct platform_device *pdev)
goto err_free;
if (devtype->cantype == XAXI_CANFD) {
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
- priv->can.tdc_const = &xcan_tdc_const_canfd;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd;
}
if (devtype->cantype == XAXI_CANFD_2_0) {
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
- priv->can.tdc_const = &xcan_tdc_const_canfd2;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd2;
}
if (devtype->cantype == XAXI_CANFD ||
devtype->cantype == XAXI_CANFD_2_0) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_TDC_AUTO;
- priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ priv->can.fd.do_get_auto_tdcv = xcan_get_auto_tdcv;
}
priv->reg_base = addr;
@@ -2103,7 +2102,7 @@ static void xcan_remove(struct platform_device *pdev)
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
- .remove_new = xcan_remove,
+ .remove = xcan_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &xcan_dev_pm_ops,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 3092b391031a..7eb301fd987d 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -26,17 +26,12 @@ config NET_DSA_LOOP
source "drivers/net/dsa/hirschmann/Kconfig"
-config NET_DSA_LANTIQ_GSWIP
- tristate "Lantiq / Intel GSWIP"
- depends on HAS_IOMEM
- select NET_DSA_TAG_GSWIP
- help
- This enables support for the Lantiq / Intel GSWIP 2.1 found in
- the xrx200 / VR9 SoC.
+source "drivers/net/dsa/lantiq/Kconfig"
config NET_DSA_MT7530
tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select REGMAP_IRQ
imply NET_DSA_MT7530_MDIO
imply NET_DSA_MT7530_MMIO
help
@@ -91,17 +86,26 @@ source "drivers/net/dsa/realtek/Kconfig"
config NET_DSA_RZN1_A5PSW
tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
- depends on OF && ARCH_RZN1
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
select NET_DSA_TAG_RZN1_A5PSW
select PCS_RZN1_MIIC
help
This driver supports the A5PSW switch, which is embedded in Renesas
RZ/N1 SoC.
+config NET_DSA_KS8995
+ tristate "Micrel KS8995 family 5-ports 10/100 Ethernet switches"
+ depends on SPI
+ select NET_DSA_TAG_NONE
+ help
+ This driver supports the Micrel KS8995 family of 10/100 Mbit ethernet
+ switches, managed over SPI.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
select REGMAP
+ imply SMSC_PHY
help
This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
@@ -126,7 +130,7 @@ config NET_DSA_SMSC_LAN9303_MDIO
config NET_DSA_VITESSE_VSC73XX
tristate
- select NET_DSA_TAG_NONE
+ select NET_DSA_TAG_VSC73XX_8021Q
select FIXED_PHY
select VITESSE_PHY
select GPIOLIB
@@ -150,4 +154,11 @@ config NET_DSA_VITESSE_VSC73XX_PLATFORM
This enables support for the Vitesse VSC7385, VSC7388, VSC7395
and VSC7398 SparX integrated ethernet switches, connected over
a CPU-attached address bus and work in memory-mapped I/O mode.
+
+config NET_DSA_YT921X
+ tristate "Motorcomm YT9215 ethernet switch chip support"
+ select NET_DSA_TAG_YT921X
+ help
+ This enables support for the Motorcomm YT9215 ethernet switch
+ chip.
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index cb9a97340e58..16de4ba3fa38 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -2,10 +2,7 @@
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
-ifdef CONFIG_NET_DSA_LOOP
-obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
-endif
-obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_KS8995) += ks8995.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MT7530_MDIO) += mt7530-mdio.o
obj-$(CONFIG_NET_DSA_MT7530_MMIO) += mt7530-mmio.o
@@ -17,8 +14,10 @@ obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
+obj-$(CONFIG_NET_DSA_YT921X) += yt921x.o
obj-y += b53/
obj-y += hirschmann/
+obj-y += lantiq/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index ebaa4a80d544..915008e8eff5 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -5,6 +5,7 @@ menuconfig B53
select NET_DSA_TAG_NONE
select NET_DSA_TAG_BRCM
select NET_DSA_TAG_BRCM_LEGACY
+ select NET_DSA_TAG_BRCM_LEGACY_FCS
select NET_DSA_TAG_BRCM_PREPEND
help
This driver adds support for Broadcom managed switch chips. It supports
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index b2eeff04f4c8..a1a177713d99 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -21,12 +21,15 @@
#include <linux/export.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/platform_data/b53.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
#include "b53_regs.h"
@@ -224,6 +227,9 @@ static const struct b53_mib_desc b53_mibs_58xx[] = {
#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
+#define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
+#define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
+
static int b53_do_vlan_op(struct b53_device *dev, u8 op)
{
unsigned int i;
@@ -322,6 +328,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
}
}
+static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
+{
+ u64 eap_conf;
+
+ if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
+ return;
+
+ b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
+
+ if (is63xx(dev)) {
+ eap_conf &= ~EAP_MODE_MASK_63XX;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
+ } else {
+ eap_conf &= ~EAP_MODE_MASK;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT;
+ }
+
+ b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
+}
+
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
u8 mgmt;
@@ -335,18 +361,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
- /* Include IMP port in dumb forwarding mode
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
- mgmt |= B53_MII_DUMB_FWDG_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+ if (!is5325(dev)) {
+ /* Include IMP port in dumb forwarding mode */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+ mgmt |= B53_MII_DUMB_FWDG_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
- /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
- * frames should be flooded or not.
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
+ * frames should be flooded or not.
+ */
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ } else {
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_IP_MC;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ }
}
static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
@@ -369,15 +400,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
+ vc1 &= ~VC1_RX_MCST_FWD_EN;
+
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc1 |= VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
if (enable_filtering) {
vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
vc5 |= VC5_DROP_VTABLE_MISS;
} else {
- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
vc5 &= ~VC5_DROP_VTABLE_MISS;
}
@@ -389,7 +422,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
} else {
vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc1 &= ~VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
vc5 &= ~VC5_DROP_VTABLE_MISS;
@@ -459,6 +492,9 @@ static int b53_flush_arl(struct b53_device *dev, u8 mask)
{
unsigned int i;
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
@@ -483,6 +519,9 @@ out:
static int b53_fast_age_port(struct b53_device *dev, int port)
{
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
return b53_flush_arl(dev, FAST_AGE_PORT);
@@ -490,6 +529,9 @@ static int b53_fast_age_port(struct b53_device *dev, int port)
static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
{
+ if (is5325(dev))
+ return 0;
+
b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
return b53_flush_arl(dev, FAST_AGE_VLAN);
@@ -501,6 +543,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
unsigned int i;
u16 pvlan;
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
+ cpu_port = B53_CPU_PORT;
+
/* Enable the IMP port to be in the same VLAN as the other ports
* on a per-port basis such that we only have Port i and IMP in
* the same VLAN.
@@ -518,12 +564,24 @@ static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
{
u16 uc;
- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
- if (unicast)
- uc |= BIT(port);
- else
- uc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
+
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc);
+ if (unicast)
+ uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN;
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+ if (unicast)
+ uc |= BIT(port);
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ }
}
static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
@@ -531,19 +589,31 @@ static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
{
u16 mc;
- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
- b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc);
+ if (multicast)
+ mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN;
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ }
}
static void b53_port_set_learning(struct b53_device *dev, int port,
@@ -551,6 +621,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
{
u16 reg;
+ if (is5325(dev))
+ return;
+
b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
if (learning)
reg &= ~BIT(port);
@@ -559,6 +632,25 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
}
+static void b53_port_set_isolated(struct b53_device *dev, int port,
+ bool isolated)
+{
+ u8 offset;
+ u16 reg;
+
+ if (is5325(dev))
+ offset = B53_PROTECTED_PORT_SEL_25;
+ else
+ offset = B53_PROTECTED_PORT_SEL;
+
+ b53_read16(dev, B53_CTRL_PAGE, offset, &reg);
+ if (isolated)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, offset, reg);
+}
+
static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
{
struct b53_device *dev = ds->priv;
@@ -572,6 +664,39 @@ static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
}
+int b53_setup_port(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+ b53_port_set_isolated(dev, port, false);
+
+ /* Force all traffic to go to the CPU port to prevent the ASIC from
+ * trying to forward to bridged ports on matching FDB entries, then
+ * dropping frames because it isn't allowed to forward there.
+ */
+ if (dsa_is_user_port(ds, port))
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
+ if (is5325(dev) &&
+ in_range(port, 1, 4)) {
+ u8 reg;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, &reg);
+ reg &= ~PD_MODE_POWER_DOWN_PORT(0);
+ if (dsa_is_unused_port(ds, port))
+ reg |= PD_MODE_POWER_DOWN_PORT(port);
+ else
+ reg &= ~PD_MODE_POWER_DOWN_PORT(port);
+ b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_setup_port);
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -584,9 +709,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
+ if (dev->ops->phy_enable)
+ dev->ops->phy_enable(dev, port);
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
@@ -626,6 +750,9 @@ void b53_disable_port(struct dsa_switch *ds, int port)
reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+ if (dev->ops->phy_disable)
+ dev->ops->phy_disable(dev, port);
+
if (dev->ops->irq_disable)
dev->ops->irq_disable(dev, port);
}
@@ -670,6 +797,11 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
hdr_ctl |= GC_FRM_MGMT_PORT_M;
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+ /* B53_BRCM_HDR not present on devices with legacy tags */
+ if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY ||
+ dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS)
+ return;
+
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
@@ -718,10 +850,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port);
-
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -733,12 +861,18 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
+static void b53_enable_stp(struct b53_device *dev)
+{
+ u8 gc;
+
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+ gc |= GC_RX_BPDU_EN;
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+}
+
static u16 b53_default_pvid(struct b53_device *dev)
{
- if (is5325(dev) || is5365(dev))
- return 1;
- else
- return 0;
+ return 0;
}
static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
@@ -748,6 +882,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
}
+static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (!dev->vlan_filtering)
+ return true;
+
+ dp = dsa_to_port(ds, port);
+
+ if (dsa_port_is_cpu(dp))
+ return true;
+
+ return dp->bridge == NULL;
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -766,7 +916,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
+ b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
/* Create an untagged VLAN entry for the default PVID in case
* CONFIG_VLAN_8021Q is disabled and there are no calls to
@@ -774,26 +924,39 @@ int b53_configure_vlan(struct dsa_switch *ds)
* entry. Do this only when the tagging protocol is not
* DSA_TAG_PROTO_NONE
*/
+ v = &dev->vlans[def_vid];
b53_for_each_port(dev, i) {
- v = &dev->vlans[def_vid];
- v->members |= BIT(i);
+ if (!b53_vlan_port_may_join_untagged(ds, i))
+ continue;
+
+ vl.members |= BIT(i);
if (!b53_vlan_port_needs_forced_tagged(ds, i))
- v->untag = v->members;
- b53_write16(dev, B53_VLAN_PAGE,
- B53_VLAN_PORT_DEF_TAG(i), def_vid);
+ vl.untag = vl.members;
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
+ def_vid);
}
+ b53_set_vlan_entry(dev, def_vid, &vl);
- /* Upon initial call we have not set-up any VLANs, but upon
- * system resume, we need to restore all VLAN entries.
- */
- for (vid = def_vid; vid < dev->num_vlans; vid++) {
- v = &dev->vlans[vid];
+ if (dev->vlan_filtering) {
+ /* Upon initial call we have not set-up any VLANs, but upon
+ * system resume, we need to restore all VLAN entries.
+ */
+ for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
+ v = &dev->vlans[vid];
- if (!v->members)
- continue;
+ if (!v->members)
+ continue;
+
+ b53_set_vlan_entry(dev, vid, v);
+ b53_fast_age_vlan(dev, vid);
+ }
- b53_set_vlan_entry(dev, vid, v);
- b53_fast_age_vlan(dev, vid);
+ b53_for_each_port(dev, i) {
+ if (!dsa_is_cpu_port(ds, i))
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i),
+ dev->ports[i].pvid);
+ }
}
return 0;
@@ -872,6 +1035,7 @@ static int b53_switch_reset(struct b53_device *dev)
}
b53_enable_mib(dev);
+ b53_enable_stp(dev);
return b53_flush_arl(dev, FAST_AGE_STATIC);
}
@@ -985,8 +1149,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
if (stringset == ETH_SS_STATS) {
for (i = 0; i < mib_size; i++)
- strscpy(data + i * ETH_GSTRING_LEN,
- mibs[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, mibs[i].name);
} else if (stringset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
if (!phydev)
@@ -1112,7 +1275,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources);
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
unsigned int port;
+ u16 pvid;
int ret;
/* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
@@ -1120,12 +1285,36 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+ /* The switch does not tell us the original VLAN for untagged
+ * packets, so keep the CPU port always tagged.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
+
+ if (dev->chip_id == BCM53101_DEVICE_ID) {
+ /* BCM53101 uses 0.5 second increments */
+ ds->ageing_time_min = 1 * 500;
+ ds->ageing_time_max = AGE_TIME_MAX * 500;
+ } else {
+ /* Everything else uses 1 second increments */
+ ds->ageing_time_min = 1 * 1000;
+ ds->ageing_time_max = AGE_TIME_MAX * 1000;
+ }
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
return ret;
}
+ /* setup default vlan for filtering mode */
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+ b53_for_each_port(dev, port) {
+ vl->members |= BIT(port);
+ if (!b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag |= BIT(port);
+ }
+
b53_reset_mib(dev);
ret = b53_apply_config(dev);
@@ -1160,6 +1349,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1184,6 +1375,8 @@ static void b53_force_port_config(struct b53_device *dev, int port,
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1196,6 +1389,10 @@ static void b53_force_port_config(struct b53_device *dev, int port,
else
reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
+ reg &= ~(0x3 << GMII_PO_SPEED_S);
+ if (is5301x(dev) || is58xx(dev))
+ reg &= ~PORT_OVERRIDE_SPEED_2000M;
+
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
@@ -1214,10 +1411,24 @@ static void b53_force_port_config(struct b53_device *dev, int port,
return;
}
- if (rx_pause)
- reg |= PORT_OVERRIDE_RX_FLOW;
- if (tx_pause)
- reg |= PORT_OVERRIDE_TX_FLOW;
+ if (is5325(dev))
+ reg &= ~PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW);
+
+ if (rx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ }
+
+ if (tx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_TX_FLOW;
+ }
b53_write8(dev, B53_CTRL_PAGE, off, reg);
}
@@ -1226,135 +1437,88 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- u8 rgmii_ctrl = 0, off;
-
- if (port == dev->imp_port)
- off = B53_RGMII_CTRL_IMP;
- else
- off = B53_RGMII_CTRL_P(port);
-
- b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ u8 rgmii_ctrl = 0;
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC);
- rgmii_ctrl |= RGMII_CTRL_DLL_RXC;
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC);
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- default:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- break;
- }
+ b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- if (port != dev->imp_port) {
- if (is63268(dev))
- rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
+ if (is6318_268(dev))
+ rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
- rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- }
+ rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+ b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl);
dev_dbg(ds->dev, "Configured port %d for %s\n", port,
phy_modes(interface));
}
-static void b53_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- struct ethtool_keee *p = &dev->ports[port].eee;
- u8 rgmii_ctrl = 0, reg = 0, off;
- bool tx_pause = false;
- bool rx_pause = false;
+ u8 rgmii_ctrl = 0, off;
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
+ if (port == dev->imp_port)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
- /* Enable flow control on BCM5301x's CPU port */
- if (is5301x(dev) && dsa_is_cpu_port(ds, port))
- tx_pause = rx_pause = true;
+ /* Configure the port RGMII clock delay by DLL disabled and
+ * tx_clk aligned timing (restoring to reset defaults)
+ */
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- if (phydev->pause) {
- if (phydev->asym_pause)
- tx_pause = true;
- rx_pause = true;
- }
+ /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ * sure that we enable the port TX clock internal delay to
+ * account for this internal delay that is inserted, otherwise
+ * the switch won't be able to receive correctly.
+ *
+ * PHY_INTERFACE_MODE_RGMII means that we are not introducing
+ * any delay neither on transmission nor reception, so the
+ * BCM53125 must also be configured accordingly to account for
+ * the lack of delay and introduce
+ *
+ * The BCM53125 switch has its RX clock and TX clock control
+ * swapped, hence the reason why we modify the TX clock path in
+ * the "RGMII" case
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ if (interface == PHY_INTERFACE_MODE_RGMII)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
- b53_force_port_config(dev, port, phydev->speed, phydev->duplex,
- tx_pause, rx_pause);
- b53_force_link(dev, port, phydev->link);
+ if (dev->chip_id != BCM53115_DEVICE_ID)
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
- b53_adjust_63xx_rgmii(ds, port, phydev->interface);
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
- if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
- if (port == dev->imp_port)
- off = B53_RGMII_CTRL_IMP;
- else
- off = B53_RGMII_CTRL_P(port);
+ dev_info(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(interface));
+}
- /* Configure the port RGMII clock delay by DLL disabled and
- * tx_clk aligned timing (restoring to reset defaults)
- */
- b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
- RGMII_CTRL_TIMING_SEL);
-
- /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
- * sure that we enable the port TX clock internal delay to
- * account for this internal delay that is inserted, otherwise
- * the switch won't be able to receive correctly.
- *
- * PHY_INTERFACE_MODE_RGMII means that we are not introducing
- * any delay neither on transmission nor reception, so the
- * BCM53125 must also be configured accordingly to account for
- * the lack of delay and introduce
- *
- * The BCM53125 switch has its RX clock and TX clock control
- * swapped, hence the reason why we modify the TX clock path in
- * the "RGMII" case
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
- rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
- b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+static void b53_adjust_5325_mii(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ u8 reg = 0;
- dev_info(ds->dev, "Configured port %d for %s\n", port,
- phy_modes(phydev->interface));
- }
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
- /* configure MII port if necessary */
- if (is5325(dev)) {
+ /* reverse mii needs to be enabled */
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ reg | PORT_OVERRIDE_RV_MII_25);
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
&reg);
- /* reverse mii needs to be enabled */
if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
- b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
- reg | PORT_OVERRIDE_RV_MII_25);
- b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
- &reg);
-
- if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
- dev_err(ds->dev,
- "Failed to enable reverse MII mode\n");
- return;
- }
+ dev_err(ds->dev,
+ "Failed to enable reverse MII mode\n");
+ return;
}
}
-
- /* Re-negotiate EEE if it was enabled already */
- p->eee_enabled = b53_eee_init(ds, port, phydev);
}
void b53_port_event(struct dsa_switch *ds, int port)
@@ -1391,6 +1555,10 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
+ /* BCM63xx RGMII ports support RGMII */
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ phy_interface_set_rgmii(config->supported_interfaces);
+
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100;
@@ -1408,33 +1576,54 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
dev->ops->phylink_get_caps(dev, port, config);
}
-static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
- int port,
+static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct b53_device *dev = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct b53_device *dev = dp->ds->priv;
if (!dev->ops->phylink_mac_select_pcs)
return NULL;
- return dev->ops->phylink_mac_select_pcs(dev, port, interface);
+ return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface);
}
-void b53_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static void b53_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ phy_interface_t interface = state->interface;
+ struct dsa_switch *ds = dp->ds;
+ struct b53_device *dev = ds->priv;
+ int port = dp->index;
+
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ b53_adjust_63xx_rgmii(ds, port, interface);
+
+ if (mode == MLO_AN_FIXED) {
+ if (is531x5(dev) && phy_interface_mode_is_rgmii(interface))
+ b53_adjust_531x5_rgmii(ds, port, interface);
+
+ /* configure MII port if necessary */
+ if (is5325(dev))
+ b53_adjust_5325_mii(ds, port);
+ }
}
-EXPORT_SYMBOL(b53_phylink_mac_config);
-void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface)
+static void b53_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
{
- struct b53_device *dev = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct b53_device *dev = dp->ds->priv;
+ int port = dp->index;
- if (mode == MLO_AN_PHY)
+ if (mode == MLO_AN_PHY) {
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ b53_force_link(dev, port, false);
return;
+ }
if (mode == MLO_AN_FIXED) {
b53_force_link(dev, port, false);
@@ -1445,24 +1634,38 @@ void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, false);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_down);
-void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
+static void b53_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
struct b53_device *dev = ds->priv;
+ struct ethtool_keee *p = &dev->ports[dp->index].eee;
+ int port = dp->index;
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
- b53_adjust_63xx_rgmii(ds, port, interface);
+ if (mode == MLO_AN_PHY) {
+ /* Re-negotiate EEE if it was enabled already */
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
+
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) {
+ b53_force_port_config(dev, port, speed, duplex,
+ tx_pause, rx_pause);
+ b53_force_link(dev, port, true);
+ }
- if (mode == MLO_AN_PHY)
return;
+ }
if (mode == MLO_AN_FIXED) {
+ /* Force flow control on BCM5301x's CPU port */
+ if (is5301x(dev) && dsa_is_cpu_port(ds, port))
+ tx_pause = rx_pause = true;
+
b53_force_port_config(dev, port, speed, duplex,
tx_pause, rx_pause);
b53_force_link(dev, port, true);
@@ -1473,14 +1676,16 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, true);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
- b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
+ if (dev->vlan_filtering != vlan_filtering) {
+ dev->vlan_filtering = vlan_filtering;
+ b53_apply_config(dev);
+ }
return 0;
}
@@ -1491,9 +1696,6 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
{
struct b53_device *dev = ds->priv;
- if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
- return -EOPNOTSUPP;
-
/* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
* receiving VLAN tagged frames at all, we can still allow the port to
* be configured for egress untagged.
@@ -1505,7 +1707,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid >= dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, port, true, ds->vlan_filtering);
+ b53_enable_vlan(dev, port, true, dev->vlan_filtering);
return 0;
}
@@ -1518,18 +1720,29 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
+ u16 old_pvid, new_pvid;
int err;
err = b53_vlan_prepare(ds, port, vlan);
if (err)
return err;
- vl = &dev->vlans[vlan->vid];
+ if (vlan->vid == 0)
+ return 0;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ old_pvid = dev->ports[port].pvid;
+ if (pvid)
+ new_pvid = vlan->vid;
+ else if (!pvid && vlan->vid == old_pvid)
+ new_pvid = b53_default_pvid(dev);
+ else
+ new_pvid = old_pvid;
+ dev->ports[port].pvid = new_pvid;
- if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
- untagged = true;
+ vl = &dev->vlans[vlan->vid];
+
+ if (dsa_is_cpu_port(ds, port))
+ untagged = false;
vl->members |= BIT(port);
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
@@ -1537,13 +1750,16 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
else
vl->untag &= ~BIT(port);
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
- if (pvid && !dsa_is_cpu_port(ds, port)) {
+ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- vlan->vid);
- b53_fast_age_vlan(dev, vlan->vid);
+ new_pvid);
+ b53_fast_age_vlan(dev, old_pvid);
}
return 0;
@@ -1558,20 +1774,25 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
struct b53_vlan *vl;
u16 pvid;
- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+ if (vlan->vid == 0)
+ return 0;
- vl = &dev->vlans[vlan->vid];
+ pvid = dev->ports[port].pvid;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ vl = &dev->vlans[vlan->vid];
vl->members &= ~BIT(port);
if (pvid == vlan->vid)
pvid = b53_default_pvid(dev);
+ dev->ports[port].pvid = pvid;
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag &= ~(BIT(port));
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
@@ -1623,7 +1844,82 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
return b53_arl_op_wait(dev);
}
-static int b53_arl_read(struct b53_device *dev, u64 mac,
+static void b53_arl_read_entry_25(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u8 vid_entry;
+ u64 mac_vid;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx),
+ &vid_entry);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_arl_to_entry_25(ent, mac_vid, vid_entry);
+}
+
+static void b53_arl_write_entry_25(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u8 vid_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry_25(&mac_vid, &vid_entry, ent);
+ b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), vid_entry);
+ b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ mac_vid);
+}
+
+static void b53_arl_read_entry_89(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u64 mac_vid;
+ u16 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry);
+ b53_arl_to_entry_89(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_write_entry_89(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry_89(&mac_vid, &fwd_entry, ent);
+ b53_write64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
+ b53_write16(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+}
+
+static void b53_arl_read_entry_95(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_write_entry_95(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry(&mac_vid, &fwd_entry, ent);
+ b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ mac_vid);
+ b53_write32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx),
+ fwd_entry);
+}
+
+static int b53_arl_read(struct b53_device *dev, const u8 *mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx)
{
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
@@ -1638,23 +1934,15 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
/* Read the bins */
for (i = 0; i < dev->num_arl_bins; i++) {
- u64 mac_vid;
- u32 fwd_entry;
+ b53_arl_read_entry(dev, ent, i);
- b53_read64(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
- b53_read32(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
- b53_arl_to_entry(ent, mac_vid, fwd_entry);
-
- if (!(fwd_entry & ARLTBL_VALID)) {
+ if (!ent->is_valid) {
set_bit(i, free_bins);
continue;
}
- if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ if (!ether_addr_equal(ent->mac, mac))
continue;
- if (dev->vlan_enabled &&
- ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
+ if (dev->vlan_enabled && ent->vid != vid)
continue;
*idx = i;
return 0;
@@ -1668,9 +1956,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
const unsigned char *addr, u16 vid, bool is_valid)
{
struct b53_arl_entry ent;
- u32 fwd_entry;
- u64 mac, mac_vid = 0;
u8 idx = 0;
+ u64 mac;
int ret;
/* Convert the array into a 64-bit MAC */
@@ -1678,14 +1965,19 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
/* Perform a read for the given MAC and VID */
b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
- b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ if (!is5325m(dev)) {
+ if (is5325(dev) || is5365(dev))
+ b53_write8(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ else
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ }
/* Issue a read operation for this MAC */
ret = b53_arl_rw_op(dev, 1);
if (ret)
return ret;
- ret = b53_arl_read(dev, mac, vid, &ent, &idx);
+ ret = b53_arl_read(dev, addr, vid, &ent, &idx);
/* If this is a read, just finish now */
if (op)
@@ -1702,7 +1994,6 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
/* We could not find a matching MAC, so reset to a new entry */
dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
addr, vid, idx);
- fwd_entry = 0;
break;
default:
dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
@@ -1729,12 +2020,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
ent.is_static = true;
ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
- b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
-
- b53_write64(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
- b53_write32(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+ b53_arl_write_entry(dev, &ent, idx);
return b53_arl_rw_op(dev, 0);
}
@@ -1746,12 +2032,6 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv;
int ret;
- /* 5325 and 5365 require some more massaging, but could
- * be supported eventually
- */
- if (is5325(priv) || is5365(priv))
- return -EOPNOTSUPP;
-
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, true);
mutex_unlock(&priv->arl_mutex);
@@ -1775,15 +2055,55 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_del);
+static void b53_read_arl_srch_ctl(struct b53_device *dev, u8 *val)
+{
+ u8 offset;
+
+ if (is5325(dev) || is5365(dev))
+ offset = B53_ARL_SRCH_CTL_25;
+ else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) ||
+ is63xx(dev))
+ offset = B53_ARL_SRCH_CTL_89;
+ else
+ offset = B53_ARL_SRCH_CTL;
+
+ if (is63xx(dev)) {
+ u16 val16;
+
+ b53_read16(dev, B53_ARLIO_PAGE, offset, &val16);
+ *val = val16 & 0xff;
+ } else {
+ b53_read8(dev, B53_ARLIO_PAGE, offset, val);
+ }
+}
+
+static void b53_write_arl_srch_ctl(struct b53_device *dev, u8 val)
+{
+ u8 offset;
+
+ if (is5325(dev) || is5365(dev))
+ offset = B53_ARL_SRCH_CTL_25;
+ else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) ||
+ is63xx(dev))
+ offset = B53_ARL_SRCH_CTL_89;
+ else
+ offset = B53_ARL_SRCH_CTL;
+
+ if (is63xx(dev))
+ b53_write16(dev, B53_ARLIO_PAGE, offset, val);
+ else
+ b53_write8(dev, B53_ARLIO_PAGE, offset, val);
+}
+
static int b53_arl_search_wait(struct b53_device *dev)
{
unsigned int timeout = 1000;
u8 reg;
do {
- b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ b53_read_arl_srch_ctl(dev, &reg);
if (!(reg & ARL_SRCH_STDN))
- return 0;
+ return -ENOENT;
if (reg & ARL_SRCH_VLID)
return 0;
@@ -1794,16 +2114,52 @@ static int b53_arl_search_wait(struct b53_device *dev)
return -ETIMEDOUT;
}
-static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
- struct b53_arl_entry *ent)
+static void b53_arl_search_read_25(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u64 mac_vid;
+ u8 ext;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_EXT_25, &ext);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25,
+ &mac_vid);
+ b53_arl_search_to_entry_25(ent, mac_vid, ext);
+}
+
+static void b53_arl_search_read_89(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u16 fwd_entry;
+ u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_89,
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_89, &fwd_entry);
+ b53_arl_to_entry_89(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_search_read_63xx(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
{
+ u16 fwd_entry;
u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_63XX,
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_63XX, &fwd_entry);
+ b53_arl_search_to_entry_63xx(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_search_read_95(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
u32 fwd_entry;
+ u64 mac_vid;
- b53_read64(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
- b53_read32(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL(idx), &fwd_entry);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx),
+ &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx),
+ &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
}
@@ -1822,30 +2178,31 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ unsigned int count = 0, results_per_hit = 1;
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
- unsigned int count = 0;
int ret;
- u8 reg;
+
+ if (priv->num_arl_bins > 2)
+ results_per_hit = 2;
mutex_lock(&priv->arl_mutex);
/* Start search operation */
- reg = ARL_SRCH_STDN;
- b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+ b53_write_arl_srch_ctl(priv, ARL_SRCH_STDN);
do {
ret = b53_arl_search_wait(priv);
if (ret)
break;
- b53_arl_search_rd(priv, 0, &results[0]);
+ b53_arl_search_read(priv, 0, &results[0]);
ret = b53_fdb_copy(port, &results[0], cb, data);
if (ret)
break;
- if (priv->num_arl_bins > 2) {
- b53_arl_search_rd(priv, 1, &results[1]);
+ if (results_per_hit == 2) {
+ b53_arl_search_read(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
break;
@@ -1854,7 +2211,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
break;
}
- } while (count++ < b53_max_arl_entries(priv) / 2);
+ } while (count++ < b53_max_arl_entries(priv) / results_per_hit);
mutex_unlock(&priv->arl_mutex);
@@ -1904,8 +2261,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- u16 pvlan, reg;
+ u16 pvlan, reg, pvid;
unsigned int i;
/* On 7278, port 7 which connects to the ASP should only receive
@@ -1914,15 +2272,26 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
return -EINVAL;
- /* Make this port leave the all VLANs join since we will have proper
- * VLAN entries from now on
- */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg &= ~BIT(port);
- if ((reg & BIT(cpu_port)) == BIT(cpu_port))
- reg &= ~BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port leave the all VLANs join since we will have
+ * proper VLAN entries from now on
+ */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ &reg);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ reg);
+ }
+
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members &= ~BIT(port);
+ b53_set_vlan_entry(dev, pvid, vl);
}
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
@@ -1942,6 +2311,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
pvlan |= BIT(i);
}
+ /* Disable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
+
/* Configure the local port VLAN control membership to include
* remote ports and update the local port bitmask
*/
@@ -1955,7 +2327,7 @@ EXPORT_SYMBOL(b53_br_join);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
- struct b53_vlan *vl = &dev->vlans[0];
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1977,22 +2349,27 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
pvlan &= ~BIT(i);
}
+ /* Enable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
- /* Make this port join all VLANs without VLAN entries */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg |= BIT(port);
- if (!(reg & BIT(cpu_port)))
- reg |= BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
- } else {
b53_get_vlan_entry(dev, pvid, vl);
- vl->members |= BIT(port) | BIT(cpu_port);
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->members |= BIT(port);
b53_set_vlan_entry(dev, pvid, vl);
}
}
@@ -2045,7 +2422,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
+ struct b53_device *dev = ds->priv;
+ unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD | BR_ISOLATED);
+
+ if (!is5325(dev))
+ mask |= BR_LEARNING;
+
+ if (flags.mask & ~mask)
return -EINVAL;
return 0;
@@ -2065,6 +2448,9 @@ int b53_br_flags(struct dsa_switch *ds, int port,
if (flags.mask & BR_LEARNING)
b53_port_set_learning(ds->priv, port,
!!(flags.val & BR_LEARNING));
+ if (flags.mask & BR_ISOLATED)
+ b53_port_set_isolated(ds->priv, port,
+ !!(flags.val & BR_ISOLATED));
return 0;
}
@@ -2121,8 +2507,11 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
goto out;
}
- /* Older models require a different 6 byte tag */
- if (is5325(dev) || is5365(dev) || is63xx(dev)) {
+ /* Older models require different 6 byte tags */
+ if (is5325(dev) || is5365(dev)) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS;
+ goto out;
+ } else if (is63xx(dev)) {
dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
goto out;
}
@@ -2212,6 +2601,9 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
+ if (!b53_support_eee(ds, port))
+ return 0;
+
ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2222,25 +2614,19 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+bool b53_support_eee(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
- return 0;
+ return !is5325(dev) && !is5365(dev) && !is63xx(dev);
}
-EXPORT_SYMBOL(b53_get_mac_eee);
+EXPORT_SYMBOL(b53_support_eee);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
struct ethtool_keee *p = &dev->ports[port].eee;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
p->eee_enabled = e->eee_enabled;
b53_eee_enable_set(ds, port, e->eee_enabled);
@@ -2255,18 +2641,58 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
bool allow_10_100;
if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
+ return 0;
+
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
- enable_jumbo = (mtu >= JMS_MIN_SIZE);
- allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
+ enable_jumbo = (mtu > ETH_DATA_LEN);
+ allow_10_100 = !is63xx(dev);
return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
}
static int b53_get_max_mtu(struct dsa_switch *ds, int port)
{
- return JMS_MAX_SIZE;
+ struct b53_device *dev = ds->priv;
+
+ if (is5325(dev) || is5365(dev))
+ return B53_MAX_MTU_25;
+
+ return B53_MAX_MTU;
+}
+
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct b53_device *dev = ds->priv;
+ u32 atc;
+ int reg;
+
+ if (is63xx(dev))
+ reg = B53_AGING_TIME_CONTROL_63XX;
+ else
+ reg = B53_AGING_TIME_CONTROL;
+
+ if (dev->chip_id == BCM53101_DEVICE_ID)
+ atc = DIV_ROUND_CLOSEST(msecs, 500);
+ else
+ atc = DIV_ROUND_CLOSEST(msecs, 1000);
+
+ if (!is5325(dev) && !is5365(dev))
+ atc |= AGE_CHANGE;
+
+ b53_write32(dev, B53_MGMT_PAGE, reg, atc);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(b53_set_ageing_time);
+
+static const struct phylink_mac_ops b53_phylink_mac_ops = {
+ .mac_select_pcs = b53_phylink_mac_select_pcs,
+ .mac_config = b53_phylink_mac_config,
+ .mac_link_down = b53_phylink_mac_link_down,
+ .mac_link_up = b53_phylink_mac_link_up,
+};
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
@@ -2278,16 +2704,13 @@ static const struct dsa_switch_ops b53_switch_ops = {
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
- .adjust_link = b53_adjust_link,
.phylink_get_caps = b53_phylink_get_caps,
- .phylink_mac_select_pcs = b53_phylink_mac_select_pcs,
- .phylink_mac_config = b53_phylink_mac_config,
- .phylink_mac_link_down = b53_phylink_mac_link_down,
- .phylink_mac_link_up = b53_phylink_mac_link_up,
+ .port_setup = b53_setup_port,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
@@ -2308,6 +2731,30 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_change_mtu = b53_change_mtu,
};
+static const struct b53_arl_ops b53_arl_ops_25 = {
+ .arl_read_entry = b53_arl_read_entry_25,
+ .arl_write_entry = b53_arl_write_entry_25,
+ .arl_search_read = b53_arl_search_read_25,
+};
+
+static const struct b53_arl_ops b53_arl_ops_89 = {
+ .arl_read_entry = b53_arl_read_entry_89,
+ .arl_write_entry = b53_arl_write_entry_89,
+ .arl_search_read = b53_arl_search_read_89,
+};
+
+static const struct b53_arl_ops b53_arl_ops_63xx = {
+ .arl_read_entry = b53_arl_read_entry_89,
+ .arl_write_entry = b53_arl_write_entry_89,
+ .arl_search_read = b53_arl_search_read_63xx,
+};
+
+static const struct b53_arl_ops b53_arl_ops_95 = {
+ .arl_read_entry = b53_arl_read_entry_95,
+ .arl_write_entry = b53_arl_write_entry_95,
+ .arl_search_read = b53_arl_search_read_95,
+};
+
struct b53_chip_data {
u32 chip_id;
const char *dev_name;
@@ -2321,6 +2768,7 @@ struct b53_chip_data {
u8 duplex_reg;
u8 jumbo_pm_reg;
u8 jumbo_size_reg;
+ const struct b53_arl_ops *arl_ops;
};
#define B53_VTA_REGS \
@@ -2340,6 +2788,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_buckets = 1024,
.imp_port = 5,
.duplex_reg = B53_DUPLEX_STAT_FE,
+ .arl_ops = &b53_arl_ops_25,
},
{
.chip_id = BCM5365_DEVICE_ID,
@@ -2350,6 +2799,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_buckets = 1024,
.imp_port = 5,
.duplex_reg = B53_DUPLEX_STAT_FE,
+ .arl_ops = &b53_arl_ops_25,
},
{
.chip_id = BCM5389_DEVICE_ID,
@@ -2363,6 +2813,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
},
{
.chip_id = BCM5395_DEVICE_ID,
@@ -2376,6 +2827,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM5397_DEVICE_ID,
@@ -2389,6 +2841,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
},
{
.chip_id = BCM5398_DEVICE_ID,
@@ -2402,6 +2855,21 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
+ },
+ {
+ .chip_id = BCM53101_DEVICE_ID,
+ .dev_name = "BCM53101",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 512,
+ .vta_regs = B53_VTA_REGS,
+ .imp_port = 8,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53115_DEVICE_ID,
@@ -2415,6 +2883,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53125_DEVICE_ID,
@@ -2428,6 +2897,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53128_DEVICE_ID,
@@ -2441,32 +2911,21 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM63XX_DEVICE_ID,
.dev_name = "BCM63xx",
.vlans = 4096,
.enabled_ports = 0, /* pdata must provide them */
- .arl_bins = 4,
- .arl_buckets = 1024,
- .imp_port = 8,
- .vta_regs = B53_VTA_REGS_63XX,
- .duplex_reg = B53_DUPLEX_STAT_63XX,
- .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
- .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
- },
- {
- .chip_id = BCM63268_DEVICE_ID,
- .dev_name = "BCM63268",
- .vlans = 4096,
- .enabled_ports = 0, /* pdata must provide them */
- .arl_bins = 4,
- .arl_buckets = 1024,
+ .arl_bins = 1,
+ .arl_buckets = 4096,
.imp_port = 8,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
+ .arl_ops = &b53_arl_ops_63xx,
},
{
.chip_id = BCM53010_DEVICE_ID,
@@ -2480,6 +2939,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53011_DEVICE_ID,
@@ -2493,6 +2953,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53012_DEVICE_ID,
@@ -2506,6 +2967,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53018_DEVICE_ID,
@@ -2519,6 +2981,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53019_DEVICE_ID,
@@ -2532,6 +2995,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM58XX_DEVICE_ID,
@@ -2545,6 +3009,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM583XX_DEVICE_ID,
@@ -2558,6 +3023,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
/* Starfighter 2 */
{
@@ -2572,6 +3038,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM7445_DEVICE_ID,
@@ -2585,6 +3052,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM7278_DEVICE_ID,
@@ -2598,6 +3066,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53134_DEVICE_ID,
@@ -2612,18 +3081,23 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
};
static int b53_switch_init(struct b53_device *dev)
{
+ u32 chip_id = dev->chip_id;
unsigned int i;
int ret;
+ if (is63xx(dev))
+ chip_id = BCM63XX_DEVICE_ID;
+
for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
const struct b53_chip_data *chip = &b53_switch_chips[i];
- if (chip->chip_id == dev->chip_id) {
+ if (chip->chip_id == chip_id) {
if (!dev->enabled_ports)
dev->enabled_ports = chip->enabled_ports;
dev->name = chip->dev_name;
@@ -2636,6 +3110,7 @@ static int b53_switch_init(struct b53_device *dev)
dev->num_vlans = chip->vlans;
dev->num_arl_bins = chip->arl_bins;
dev->num_arl_buckets = chip->arl_buckets;
+ dev->arl_ops = chip->arl_ops;
break;
}
}
@@ -2666,6 +3141,9 @@ static int b53_switch_init(struct b53_device *dev)
}
}
+ if (is5325e(dev))
+ dev->num_arl_buckets = 512;
+
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
@@ -2726,7 +3204,9 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
+ ds->phylink_mac_ops = &b53_phylink_mac_ops;
dev->vlan_enabled = true;
+ dev->vlan_filtering = false;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
* would be breaking filtering semantics for any of the other bridge
@@ -2765,10 +3245,24 @@ int b53_switch_detect(struct b53_device *dev)
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
- if (tmp == 0xf)
+ if (tmp == 0xf) {
+ u32 phy_id;
+ int val;
+
dev->chip_id = BCM5325_DEVICE_ID;
- else
+
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID1);
+ phy_id = (val & 0xffff) << 16;
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID2);
+ phy_id |= (val & 0xfff0);
+
+ if (phy_id == 0x00406330)
+ dev->variant_id = B53_VARIANT_5325M;
+ else if (phy_id == 0x0143bc30)
+ dev->variant_id = B53_VARIANT_5325E;
+ } else {
dev->chip_id = BCM5365_DEVICE_ID;
+ }
break;
case BCM5389_DEVICE_ID:
case BCM5395_DEVICE_ID:
@@ -2782,6 +3276,7 @@ int b53_switch_detect(struct b53_device *dev)
return ret;
switch (id32) {
+ case BCM53101_DEVICE_ID:
case BCM53115_DEVICE_ID:
case BCM53125_DEVICE_ID:
case BCM53128_DEVICE_ID:
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 897e5e8b3d69..43a3b37b731b 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -343,10 +343,9 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
dev_set_drvdata(&mdiodev->dev, dev);
ret = b53_switch_register(dev);
- if (ret) {
- dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&mdiodev->dev, ret,
+ "failed to register switch\n");
return ret;
}
@@ -375,6 +374,7 @@ static void b53_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53101" },
{ .compatible = "brcm,bcm53115" },
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 3a89349dc918..f4a59d8fbdd6 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -21,13 +21,60 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/platform_data/b53.h>
+#include <linux/regmap.h>
#include "b53_priv.h"
+#define BCM63XX_EPHY_REG 0x3C
+#define BCM63268_GPHY_REG 0x54
+
+#define GPHY_CTRL_LOW_PWR BIT(3)
+#define GPHY_CTRL_IDDQ_BIAS BIT(0)
+
+struct b53_phy_info {
+ u32 gphy_port_mask;
+ u32 ephy_enable_mask;
+ u32 ephy_port_mask;
+ u32 ephy_bias_bit;
+ const u32 *ephy_offset;
+};
+
struct b53_mmap_priv {
void __iomem *regs;
+ struct regmap *gpio_ctrl;
+ const struct b53_phy_info *phy_info;
+ u32 phys_enabled;
+};
+
+static const u32 bcm6318_ephy_offsets[] = {4, 5, 6, 7};
+
+static const struct b53_phy_info bcm6318_ephy_info = {
+ .ephy_enable_mask = BIT(0) | BIT(4) | BIT(8) | BIT(12) | BIT(16),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm6318_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 24,
+ .ephy_offset = bcm6318_ephy_offsets,
+};
+
+static const u32 bcm6368_ephy_offsets[] = {2, 3, 4, 5};
+
+static const struct b53_phy_info bcm6368_ephy_info = {
+ .ephy_enable_mask = BIT(0),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm6368_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 0,
+ .ephy_offset = bcm6368_ephy_offsets,
+};
+
+static const u32 bcm63268_ephy_offsets[] = {4, 9, 14};
+
+static const struct b53_phy_info bcm63268_ephy_info = {
+ .gphy_port_mask = BIT(3),
+ .ephy_enable_mask = GENMASK(4, 0),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm63268_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 24,
+ .ephy_offset = bcm63268_ephy_offsets,
};
static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
@@ -229,6 +276,71 @@ static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
return -EIO;
}
+static int bcm63xx_ephy_set(struct b53_device *dev, int port, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ const struct b53_phy_info *info = priv->phy_info;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask, val;
+
+ if (enable) {
+ mask = (info->ephy_enable_mask << info->ephy_offset[port])
+ | BIT(info->ephy_bias_bit);
+ val = 0;
+ } else {
+ mask = (info->ephy_enable_mask << info->ephy_offset[port]);
+ if (!((priv->phys_enabled & ~BIT(port)) & info->ephy_port_mask))
+ mask |= BIT(info->ephy_bias_bit);
+ val = mask;
+ }
+ return regmap_update_bits(gpio_ctrl, BCM63XX_EPHY_REG, mask, val);
+}
+
+static int bcm63268_gphy_set(struct b53_device *dev, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask = GPHY_CTRL_IDDQ_BIAS | GPHY_CTRL_LOW_PWR;
+ u32 val = 0;
+
+ if (!enable)
+ val = mask;
+
+ return regmap_update_bits(gpio_ctrl, BCM63268_GPHY_REG, mask, val);
+}
+
+static void b53_mmap_phy_enable(struct b53_device *dev, int port)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ int ret = 0;
+
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, true);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, true);
+ }
+
+ if (!ret)
+ priv->phys_enabled |= BIT(port);
+}
+
+static void b53_mmap_phy_disable(struct b53_device *dev, int port)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ int ret = 0;
+
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, false);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, false);
+ }
+
+ if (!ret)
+ priv->phys_enabled &= ~BIT(port);
+}
+
static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
@@ -242,6 +354,8 @@ static const struct b53_io_ops b53_mmap_ops = {
.write64 = b53_mmap_write64,
.phy_read16 = b53_mmap_phy_read16,
.phy_write16 = b53_mmap_phy_write16,
+ .phy_enable = b53_mmap_phy_enable,
+ .phy_disable = b53_mmap_phy_disable,
};
static int b53_mmap_probe_of(struct platform_device *pdev,
@@ -313,6 +427,18 @@ static int b53_mmap_probe(struct platform_device *pdev)
priv->regs = pdata->regs;
+ priv->gpio_ctrl = syscon_regmap_lookup_by_phandle(np, "brcm,gpio-ctrl");
+ if (!IS_ERR(priv->gpio_ctrl)) {
+ if (pdata->chip_id == BCM6318_DEVICE_ID ||
+ pdata->chip_id == BCM6328_DEVICE_ID ||
+ pdata->chip_id == BCM6362_DEVICE_ID)
+ priv->phy_info = &bcm6318_ephy_info;
+ else if (pdata->chip_id == BCM6368_DEVICE_ID)
+ priv->phy_info = &bcm6368_ephy_info;
+ else if (pdata->chip_id == BCM63268_DEVICE_ID)
+ priv->phy_info = &bcm63268_ephy_info;
+ }
+
dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, priv);
if (!dev)
return -ENOMEM;
@@ -348,16 +474,16 @@ static const struct of_device_id b53_mmap_of_table[] = {
.data = (void *)BCM63XX_DEVICE_ID,
}, {
.compatible = "brcm,bcm6318-switch",
- .data = (void *)BCM63268_DEVICE_ID,
+ .data = (void *)BCM6318_DEVICE_ID,
}, {
.compatible = "brcm,bcm6328-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6328_DEVICE_ID,
}, {
.compatible = "brcm,bcm6362-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6362_DEVICE_ID,
}, {
.compatible = "brcm,bcm6368-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6368_DEVICE_ID,
}, {
.compatible = "brcm,bcm63268-switch",
.data = (void *)BCM63268_DEVICE_ID,
@@ -370,7 +496,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
- .remove_new = b53_mmap_remove,
+ .remove = b53_mmap_remove,
.shutdown = b53_mmap_shutdown,
.driver = {
.name = "b53-switch",
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index c13a907947f1..bd6849e5bb93 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -45,6 +45,8 @@ struct b53_io_ops {
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
int (*irq_enable)(struct b53_device *dev, int port);
void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phy_enable)(struct b53_device *dev, int port);
+ void (*phy_disable)(struct b53_device *dev, int port);
void (*phylink_get_caps)(struct b53_device *dev, int port,
struct phylink_config *config);
struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev,
@@ -56,6 +58,17 @@ struct b53_io_ops {
bool link_up);
};
+struct b53_arl_entry;
+
+struct b53_arl_ops {
+ void (*arl_read_entry)(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx);
+ void (*arl_write_entry)(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx);
+ void (*arl_search_read)(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent);
+};
+
#define B53_INVALID_LANE 0xff
enum {
@@ -66,10 +79,15 @@ enum {
BCM5395_DEVICE_ID = 0x95,
BCM5397_DEVICE_ID = 0x97,
BCM5398_DEVICE_ID = 0x98,
+ BCM53101_DEVICE_ID = 0x53101,
BCM53115_DEVICE_ID = 0x53115,
BCM53125_DEVICE_ID = 0x53125,
BCM53128_DEVICE_ID = 0x53128,
BCM63XX_DEVICE_ID = 0x6300,
+ BCM6318_DEVICE_ID = 0x6318,
+ BCM6328_DEVICE_ID = 0x6328,
+ BCM6362_DEVICE_ID = 0x6362,
+ BCM6368_DEVICE_ID = 0x6368,
BCM63268_DEVICE_ID = 0x63268,
BCM53010_DEVICE_ID = 0x53010,
BCM53011_DEVICE_ID = 0x53011,
@@ -83,6 +101,12 @@ enum {
BCM53134_DEVICE_ID = 0x5075,
};
+enum b53_variant_id {
+ B53_VARIANT_NONE = 0,
+ B53_VARIANT_5325E,
+ B53_VARIANT_5325M,
+};
+
struct b53_pcs {
struct phylink_pcs pcs;
struct b53_device *dev;
@@ -95,6 +119,7 @@ struct b53_pcs {
struct b53_port {
u16 vlan_ctl_mask;
+ u16 pvid;
struct ethtool_keee eee;
};
@@ -113,9 +138,11 @@ struct b53_device {
struct mutex stats_mutex;
struct mutex arl_mutex;
const struct b53_io_ops *ops;
+ const struct b53_arl_ops *arl_ops;
/* chip specific data */
u32 chip_id;
+ enum b53_variant_id variant_id;
u8 core_rev;
u8 vta_regs[3];
u8 duplex_reg;
@@ -146,6 +173,7 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
bool vlan_enabled;
+ bool vlan_filtering;
unsigned int num_ports;
struct b53_port *ports;
@@ -162,6 +190,18 @@ static inline int is5325(struct b53_device *dev)
return dev->chip_id == BCM5325_DEVICE_ID;
}
+static inline int is5325e(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325E;
+}
+
+static inline int is5325m(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325M;
+}
+
static inline int is5365(struct b53_device *dev)
{
#ifdef CONFIG_BCM47XX
@@ -188,6 +228,7 @@ static inline int is531x5(struct b53_device *dev)
{
return dev->chip_id == BCM53115_DEVICE_ID ||
dev->chip_id == BCM53125_DEVICE_ID ||
+ dev->chip_id == BCM53101_DEVICE_ID ||
dev->chip_id == BCM53128_DEVICE_ID ||
dev->chip_id == BCM53134_DEVICE_ID;
}
@@ -195,12 +236,17 @@ static inline int is531x5(struct b53_device *dev)
static inline int is63xx(struct b53_device *dev)
{
return dev->chip_id == BCM63XX_DEVICE_ID ||
+ dev->chip_id == BCM6318_DEVICE_ID ||
+ dev->chip_id == BCM6328_DEVICE_ID ||
+ dev->chip_id == BCM6362_DEVICE_ID ||
+ dev->chip_id == BCM6368_DEVICE_ID ||
dev->chip_id == BCM63268_DEVICE_ID;
}
-static inline int is63268(struct b53_device *dev)
+static inline int is6318_268(struct b53_device *dev)
{
- return dev->chip_id == BCM63268_DEVICE_ID;
+ return dev->chip_id == BCM6318_DEVICE_ID ||
+ dev->chip_id == BCM63268_DEVICE_ID;
}
static inline int is5301x(struct b53_device *dev)
@@ -294,6 +340,33 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
ent->vid = mac_vid >> ARLTBL_VID_S;
}
+static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent,
+ u64 mac_vid, u8 vid_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->is_valid = !!(mac_vid & ARLTBL_VALID_25);
+ ent->is_age = !!(mac_vid & ARLTBL_AGE_25);
+ ent->is_static = !!(mac_vid & ARLTBL_STATIC_25);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->port = (mac_vid & ARLTBL_DATA_PORT_ID_MASK_25) >>
+ ARLTBL_DATA_PORT_ID_S_25;
+ if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT)
+ ent->port = B53_CPU_PORT_25;
+ ent->vid = vid_entry;
+}
+
+static inline void b53_arl_to_entry_89(struct b53_arl_entry *ent,
+ u64 mac_vid, u16 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK_89;
+ ent->is_valid = !!(fwd_entry & ARLTBL_VALID_89);
+ ent->is_age = !!(fwd_entry & ARLTBL_AGE_89);
+ ent->is_static = !!(fwd_entry & ARLTBL_STATIC_89);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+}
+
static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
const struct b53_arl_entry *ent)
{
@@ -308,6 +381,89 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
*fwd_entry |= ARLTBL_AGE;
}
+static inline void b53_arl_from_entry_25(u64 *mac_vid, u8 *vid_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT_25)
+ *mac_vid |= (u64)B53_CPU_PORT << ARLTBL_DATA_PORT_ID_S_25;
+ else
+ *mac_vid |= ((u64)ent->port << ARLTBL_DATA_PORT_ID_S_25) &
+ ARLTBL_DATA_PORT_ID_MASK_25;
+ if (ent->is_valid)
+ *mac_vid |= ARLTBL_VALID_25;
+ if (ent->is_static)
+ *mac_vid |= ARLTBL_STATIC_25;
+ if (ent->is_age)
+ *mac_vid |= ARLTBL_AGE_25;
+ *vid_entry = ent->vid;
+}
+
+static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
+ *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK_89;
+ if (ent->is_valid)
+ *fwd_entry |= ARLTBL_VALID_89;
+ if (ent->is_static)
+ *fwd_entry |= ARLTBL_STATIC_89;
+ if (ent->is_age)
+ *fwd_entry |= ARLTBL_AGE_89;
+}
+
+static inline void b53_arl_search_to_entry_25(struct b53_arl_entry *ent,
+ u64 mac_vid, u8 ext)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->is_valid = !!(mac_vid & ARLTBL_VALID_25);
+ ent->is_age = !!(mac_vid & ARLTBL_AGE_25);
+ ent->is_static = !!(mac_vid & ARLTBL_STATIC_25);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = (mac_vid & ARL_SRCH_RSLT_VID_MASK_25) >>
+ ARL_SRCH_RSLT_VID_S_25;
+ ent->port = (mac_vid & ARL_SRCH_RSLT_PORT_ID_MASK_25) >>
+ ARL_SRCH_RSLT_PORT_ID_S_25;
+ if (is_multicast_ether_addr(ent->mac) && (ext & ARL_SRCH_RSLT_EXT_MC_MII))
+ ent->port |= BIT(B53_CPU_PORT_25);
+ else if (!is_multicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT)
+ ent->port = B53_CPU_PORT_25;
+}
+
+static inline void b53_arl_search_to_entry_63xx(struct b53_arl_entry *ent,
+ u64 mac_vid, u16 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+
+ ent->port = fwd_entry & ARL_SRST_PORT_ID_MASK_63XX;
+ ent->port >>= 1;
+
+ ent->is_age = !!(fwd_entry & ARL_SRST_AGE_63XX);
+ ent->is_static = !!(fwd_entry & ARL_SRST_STATIC_63XX);
+ ent->is_valid = 1;
+}
+
+static inline void b53_arl_read_entry(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ dev->arl_ops->arl_read_entry(dev, ent, idx);
+}
+
+static inline void b53_arl_write_entry(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ dev->arl_ops->arl_write_entry(dev, ent, idx);
+}
+
+static inline void b53_arl_search_read(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ dev->arl_ops->arl_search_read(dev, idx, ent);
+}
+
#ifdef CONFIG_BCM47XX
#include <linux/bcm47xx_nvram.h>
@@ -339,6 +495,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
@@ -352,18 +509,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-void b53_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
-void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
-void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack);
int b53_vlan_add(struct dsa_switch *ds, int port,
@@ -392,11 +537,12 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int b53_setup_port(struct dsa_switch *ds, int port);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+bool b53_support_eee(struct dsa_switch *ds, int port);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index bfbcb66bef66..54a278db67c9 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -29,6 +29,7 @@
#define B53_ARLIO_PAGE 0x05 /* ARL Access */
#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+#define B53_IEEE_PAGE 0x0a /* IEEE 802.1X */
/* PHY Registers */
#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
@@ -50,6 +51,9 @@
/* Jumbo Frame Registers */
#define B53_JUMBO_PAGE 0x40
+/* EAP Registers */
+#define B53_EAP_PAGE 0x42
+
/* EEE Control Registers Page */
#define B53_EEE_PAGE 0x92
@@ -92,18 +96,22 @@
#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_LP_FLOW_25 BIT(3) /* BCM5325 only */
#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
#define PORT_OVERRIDE_RX_FLOW BIT(4)
#define PORT_OVERRIDE_TX_FLOW BIT(5)
#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
-/* Power-down mode control */
+/* Power-down mode control (8 bit) */
#define B53_PD_MODE_CTRL_25 0x0f
+#define PD_MODE_PORT_MASK 0x1f
+/* Bit 0 also powers down the switch. */
+#define PD_MODE_POWER_DOWN_PORT(i) BIT(i)
/* IP Multicast control (8 bit) */
#define B53_IP_MULTICAST_CTRL 0x21
-#define B53_IPMC_FWD_EN BIT(1)
+#define B53_IP_MC BIT(0)
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
@@ -111,6 +119,10 @@
#define B53_SWITCH_CTRL 0x22
#define B53_MII_DUMB_FWDG_EN BIT(6)
+/* Protected Port Selection (16 bit) */
+#define B53_PROTECTED_PORT_SEL 0x24
+#define B53_PROTECTED_PORT_SEL_25 0x26
+
/* (16 bit) */
#define B53_UC_FLOOD_MASK 0x32
#define B53_MC_FLOOD_MASK 0x34
@@ -217,6 +229,13 @@
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
#define BRCM_HDR_P7_EN BIT(2) /* Enable tagging on port 7 */
+/* Aging Time control register (32 bit) */
+#define B53_AGING_TIME_CONTROL 0x06
+#define B53_AGING_TIME_CONTROL_63XX 0x08
+#define AGE_CHANGE BIT(20)
+#define AGE_TIME_MASK 0x7ffff
+#define AGE_TIME_MAX 1048575
+
/* Mirror capture control register (16 bit) */
#define B53_MIR_CAP_CTL 0x10
#define CAP_PORT_MASK 0xf
@@ -310,13 +329,12 @@
#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
-#define ARLTBL_VID_MASK_25 0xff
#define ARLTBL_VID_MASK 0xfff
#define ARLTBL_DATA_PORT_ID_S_25 48
-#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
-#define ARLTBL_AGE_25 BIT(61)
-#define ARLTBL_STATIC_25 BIT(62)
-#define ARLTBL_VALID_25 BIT(63)
+#define ARLTBL_DATA_PORT_ID_MASK_25 GENMASK_ULL(53, 48)
+#define ARLTBL_AGE_25 BIT_ULL(61)
+#define ARLTBL_STATIC_25 BIT_ULL(62)
+#define ARLTBL_VALID_25 BIT_ULL(63)
/* ARL Table Data Entry N Registers (32 bit) */
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
@@ -326,12 +344,23 @@
#define ARLTBL_STATIC BIT(15)
#define ARLTBL_VALID BIT(16)
+/* BCM5389 ARL Table Data Entry N Register format (16 bit) */
+#define ARLTBL_DATA_PORT_ID_MASK_89 GENMASK(8, 0)
+#define ARLTBL_TC_MASK_89 GENMASK(12, 10)
+#define ARLTBL_AGE_89 BIT(13)
+#define ARLTBL_STATIC_89 BIT(14)
+#define ARLTBL_VALID_89 BIT(15)
+
+/* BCM5325/BCM565 ARL Table VID Entry N Registers (8 bit) */
+#define B53_ARLTBL_VID_ENTRY_25(n) ((0x2 * (n)) + 0x30)
+
/* Maximum number of bin entries in the ARL for all switches */
#define B53_ARLTBL_MAX_BIN_ENTRIES 4
/* ARL Search Control Register (8 bit) */
#define B53_ARL_SRCH_CTL 0x50
#define B53_ARL_SRCH_CTL_25 0x20
+#define B53_ARL_SRCH_CTL_89 0x30
#define ARL_SRCH_VLID BIT(0)
#define ARL_SRCH_STDN BIT(7)
@@ -339,22 +368,54 @@
#define B53_ARL_SRCH_ADDR 0x51
#define B53_ARL_SRCH_ADDR_25 0x22
#define B53_ARL_SRCH_ADDR_65 0x24
+#define B53_ARL_SRCH_ADDR_89 0x31
+#define B53_ARL_SRCH_ADDR_63XX 0x32
#define ARL_ADDR_MASK GENMASK(14, 0)
/* ARL Search MAC/VID Result (64 bit) */
#define B53_ARL_SRCH_RSTL_0_MACVID 0x60
+#define B53_ARL_SRCH_RSLT_MACVID_89 0x33
+#define B53_ARL_SRCH_RSLT_MACVID_63XX 0x34
-/* Single register search result on 5325 */
+/* Single register search result on 5325/5365 */
#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24
-/* Single register search result on 5365 */
-#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30
+#define ARL_SRCH_RSLT_PORT_ID_S_25 48
+#define ARL_SRCH_RSLT_PORT_ID_MASK_25 GENMASK_ULL(52, 48)
+#define ARL_SRCH_RSLT_VID_S_25 53
+#define ARL_SRCH_RSLT_VID_MASK_25 GENMASK_ULL(60, 53)
+
+/* BCM5325/5365 Search result extend register (8 bit) */
+#define B53_ARL_SRCH_RSLT_EXT_25 0x2c
+#define ARL_SRCH_RSLT_EXT_MC_MII BIT(2)
/* ARL Search Data Result (32 bit) */
#define B53_ARL_SRCH_RSTL_0 0x68
+/* BCM5389 ARL Search Data Result (16 bit) */
+#define B53_ARL_SRCH_RSLT_89 0x3b
+
#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10))
#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
+/* 63XX ARL Search Data Result (16 bit) */
+#define B53_ARL_SRCH_RSLT_63XX 0x3c
+#define ARL_SRST_PORT_ID_MASK_63XX GENMASK(9, 1)
+#define ARL_SRST_TC_MASK_63XX GENMASK(13, 11)
+#define ARL_SRST_AGE_63XX BIT(14)
+#define ARL_SRST_STATIC_63XX BIT(15)
+
+/*************************************************************************
+ * IEEE 802.1X Registers
+ *************************************************************************/
+
+/* Multicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_MCAST_DLF 0x94
+#define B53_IEEE_MCAST_DROP_EN BIT(11)
+
+/* Unicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_UCAST_DLF 0x96
+#define B53_IEEE_UCAST_DROP_EN BIT(11)
+
/*************************************************************************
* Port VLAN Registers
*************************************************************************/
@@ -481,6 +542,17 @@
#define JMS_MAX_SIZE 9724
/*************************************************************************
+ * EAP Page Registers
+ *************************************************************************/
+#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i))
+#define EAP_MODE_SHIFT 51
+#define EAP_MODE_SHIFT_63XX 50
+#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT)
+#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX)
+#define EAP_MODE_BASIC 0
+#define EAP_MODE_SIMPLIFIED 3
+
+/*************************************************************************
* EEE Configuration Page Registers
*************************************************************************/
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 3f8a491ce885..7460122f6abc 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -99,8 +99,8 @@ static void b53_serdes_an_restart(struct phylink_pcs *pcs)
SERDES_MII_BLK, reg);
}
-static void b53_serdes_get_state(struct phylink_pcs *pcs,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
{
struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
u8 lane = pcs_to_b53_pcs(pcs)->lane;
@@ -239,7 +239,6 @@ int b53_serdes_init(struct b53_device *dev, int port)
pcs->dev = dev;
pcs->lane = lane;
pcs->pcs.ops = &b53_pcs_ops;
- pcs->pcs.neg_mode = true;
return 0;
}
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 308f15d3832e..467da057579e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -16,7 +16,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/kernel.h>
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index f3f95332ff17..b9939bbd2cd5 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -682,7 +682,7 @@ static void b53_srab_shutdown(struct platform_device *pdev)
static struct platform_driver b53_srab_driver = {
.probe = b53_srab_probe,
- .remove_new = b53_srab_remove,
+ .remove = b53_srab_remove,
.shutdown = b53_srab_shutdown,
.driver = {
.name = "b53-srab-switch",
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index bc77ee9e6d0a..960685596093 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -513,12 +513,12 @@ static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
u32 reg;
int i;
- mask = BIT(priv->num_crossbar_int_ports) - 1;
+ mask = BIT(priv->num_crossbar_ext_bits) - 1;
reg = reg_readl(priv, REG_CROSSBAR);
switch (priv->type) {
case BCM4908_DEVICE_ID:
- shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports;
+ shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_ext_bits;
reg &= ~(mask << shift);
if (0) /* FIXME */
reg |= CROSSBAR_BCM4908_EXT_SERDES << shift;
@@ -536,7 +536,7 @@ static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
reg = reg_readl(priv, REG_CROSSBAR);
for (i = 0; i < priv->num_crossbar_int_ports; i++) {
- shift = i * priv->num_crossbar_int_ports;
+ shift = i * priv->num_crossbar_ext_bits;
dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i,
(reg >> shift) & mask);
@@ -675,8 +675,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
of_remove_property(child, prop);
phydev = of_phy_find_device(child);
- if (phydev)
+ if (phydev) {
phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
}
err = mdiobus_register(priv->user_mii_bus);
@@ -740,16 +742,19 @@ static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
MAC_10 | MAC_100 | MAC_1000;
}
-static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
u32 id_mode_dis = 0, port_mode;
+ struct bcm_sf2_priv *priv;
u32 reg_rgmii_ctrl;
u32 reg;
- if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+ priv = bcm_sf2_to_priv(dp->ds);
+
+ if (dp->index == core_readl(priv, CORE_IMP0_PRT_ID))
return;
switch (state->interface) {
@@ -770,7 +775,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
return;
}
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, dp->index);
/* Clear id_mode_dis bit, and the existing port mode, let
* RGMII_MODE_EN bet set by mac_link_{up,down}
@@ -809,13 +814,16 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
reg_writel(priv, reg, reg_rgmii_ctrl);
}
-static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct bcm_sf2_priv *priv;
+ int port = dp->index;
u32 reg, offset;
+ priv = bcm_sf2_to_priv(dp->ds);
if (priv->wol_ports_mask & BIT(port))
return;
@@ -824,23 +832,26 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
reg &= ~LINK_STS;
core_writel(priv, reg, offset);
- bcm_sf2_sw_mac_link_set(ds, port, interface, false);
+ bcm_sf2_sw_mac_link_set(dp->ds, port, interface, false);
}
-static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_keee *p = &priv->dev->ports[port].eee;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct bcm_sf2_priv *priv;
u32 reg_rgmii_ctrl = 0;
+ struct ethtool_keee *p;
+ int port = dp->index;
u32 reg, offset;
- bcm_sf2_sw_mac_link_set(ds, port, interface, true);
+ bcm_sf2_sw_mac_link_set(dp->ds, port, interface, true);
+ priv = bcm_sf2_to_priv(dp->ds);
offset = bcm_sf2_port_override_offset(priv, port);
if (phy_interface_mode_is_rgmii(interface) ||
@@ -886,8 +897,10 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
core_writel(priv, reg, offset);
- if (mode == MLO_AN_PHY && phydev)
- p->eee_enabled = b53_eee_init(ds, port, phydev);
+ if (mode == MLO_AN_PHY && phydev) {
+ p = &priv->dev->ports[port].eee;
+ p->eee_enabled = b53_eee_init(dp->ds, port, phydev);
+ }
}
static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
@@ -1170,8 +1183,8 @@ static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
int cnt = b53_get_sset_count(ds, port, stringset);
b53_get_strings(ds, port, stringset, data);
- bcm_sf2_cfp_get_strings(ds, port, stringset,
- data + cnt * ETH_GSTRING_LEN);
+ data += cnt * ETH_GSTRING_LEN;
+ bcm_sf2_cfp_get_strings(ds, port, stringset, &data);
}
static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -1196,6 +1209,12 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
return cnt;
}
+static const struct phylink_mac_ops bcm_sf2_phylink_mac_ops = {
+ .mac_config = bcm_sf2_sw_mac_config,
+ .mac_link_down = bcm_sf2_sw_mac_link_down,
+ .mac_link_up = bcm_sf2_sw_mac_link_up,
+};
+
static const struct dsa_switch_ops bcm_sf2_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
@@ -1206,18 +1225,17 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
.phylink_get_caps = bcm_sf2_sw_get_caps,
- .phylink_mac_config = bcm_sf2_sw_mac_config,
- .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
- .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
.phylink_fixed_state = bcm_sf2_sw_fixed_state,
.suspend = bcm_sf2_sw_suspend,
.resume = bcm_sf2_sw_resume,
.get_wol = bcm_sf2_sw_get_wol,
.set_wol = bcm_sf2_sw_set_wol,
+ .port_setup = b53_setup_port,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
@@ -1244,6 +1262,7 @@ struct bcm_sf2_of_data {
unsigned int core_reg_align;
unsigned int num_cfp_rules;
unsigned int num_crossbar_int_ports;
+ unsigned int num_crossbar_ext_bits;
};
static const u16 bcm_sf2_4908_reg_offsets[] = {
@@ -1272,6 +1291,7 @@ static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
.reg_offsets = bcm_sf2_4908_reg_offsets,
.num_cfp_rules = 256,
.num_crossbar_int_ports = 2,
+ .num_crossbar_ext_bits = 2,
};
/* Register offsets for the SWITCH_REG_* block */
@@ -1383,6 +1403,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
priv->num_crossbar_int_ports = data->num_crossbar_int_ports;
+ priv->num_crossbar_ext_bits = data->num_crossbar_ext_bits;
priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
"switch");
@@ -1399,6 +1420,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->dev = dev;
ds = dev->ds;
ds->ops = &bcm_sf2_ops;
+ ds->phylink_mac_ops = &bcm_sf2_phylink_mac_ops;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
@@ -1603,7 +1625,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
- .remove_new = bcm_sf2_sw_remove,
+ .remove = bcm_sf2_sw_remove,
.shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index f95f4880b69e..be9f3b29019f 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -75,6 +75,7 @@ struct bcm_sf2_priv {
unsigned int core_reg_align;
unsigned int num_cfp_rules;
unsigned int num_crossbar_int_ports;
+ unsigned int num_crossbar_ext_bits;
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;
@@ -227,8 +228,8 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
void bcm_sf2_cfp_exit(struct dsa_switch *ds);
int bcm_sf2_cfp_resume(struct dsa_switch *ds);
-void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *data);
+void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t **data);
void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data);
int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset);
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index c88ee3dd4299..e22362e6f0cd 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -1279,27 +1279,19 @@ static const struct bcm_sf2_cfp_stat {
},
};
-void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *data)
+void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t **data)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
- char buf[ETH_GSTRING_LEN];
- unsigned int i, j, iter;
+ unsigned int i, j;
if (stringset != ETH_SS_STATS)
return;
- for (i = 1; i < priv->num_cfp_rules; i++) {
- for (j = 0; j < s; j++) {
- snprintf(buf, sizeof(buf),
- "CFP%03d_%sCntr",
- i, bcm_sf2_cfp_stats[j].name);
- iter = (i - 1) * s + j;
- strscpy(data + iter * ETH_GSTRING_LEN,
- buf, ETH_GSTRING_LEN);
- }
- }
+ for (i = 1; i < priv->num_cfp_rules; i++)
+ for (j = 0; j < ARRAY_SIZE(bcm_sf2_cfp_stats); j++)
+ ethtool_sprintf(data, "CFP%03d_%sCntr", i,
+ bcm_sf2_cfp_stats[j].name);
}
void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index c70ed67cc188..4a416f2717ba 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -17,7 +17,19 @@
#include <linux/dsa/loop.h>
#include <net/dsa.h>
-#include "dsa_loop.h"
+#define DSA_LOOP_NUM_PORTS 6
+#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
+#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
+struct dsa_loop_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *name;
+ unsigned int enabled_ports;
+ const char *netdev;
+};
static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
[DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
@@ -27,6 +39,7 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
};
static struct phy_device *phydevs[PHY_MAX_ADDR];
+static struct mdio_device *switch_mdiodev;
enum dsa_loop_devlink_resource_id {
DSA_LOOP_DEVLINK_PARAM_ID_VTU,
@@ -121,8 +134,7 @@ static void dsa_loop_get_strings(struct dsa_switch *ds, int port,
return;
for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- ps->ports[port].mib[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, ps->ports[port].mib[i].name);
}
static void dsa_loop_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -383,34 +395,68 @@ static struct mdio_driver dsa_loop_drv = {
.shutdown = dsa_loop_drv_shutdown,
};
-#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
-
static void dsa_loop_phydevs_unregister(void)
{
- unsigned int i;
-
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i])) {
+ for (int i = 0; i < NUM_FIXED_PHYS; i++) {
+ if (!IS_ERR(phydevs[i]))
fixed_phy_unregister(phydevs[i]);
- phy_device_free(phydevs[i]);
- }
+ }
}
-static int __init dsa_loop_init(void)
+static int __init dsa_loop_create_switch_mdiodev(void)
{
- struct fixed_phy_status status = {
- .link = 1,
- .speed = SPEED_100,
- .duplex = DUPLEX_FULL,
+ static struct dsa_loop_pdata dsa_loop_pdata = {
+ .cd = {
+ .port_names[0] = "lan1",
+ .port_names[1] = "lan2",
+ .port_names[2] = "lan3",
+ .port_names[3] = "lan4",
+ .port_names[DSA_LOOP_CPU_PORT] = "cpu",
+ },
+ .name = "DSA mockup driver",
+ .enabled_ports = 0x1f,
+ .netdev = "eth0",
};
- unsigned int i, ret;
+ struct mii_bus *bus;
+ int ret = -ENODEV;
+
+ bus = mdio_find_bus("fixed-0");
+ if (WARN_ON(!bus))
+ return ret;
+
+ switch_mdiodev = mdio_device_create(bus, 31);
+ if (IS_ERR(switch_mdiodev))
+ goto out;
+
+ strscpy(switch_mdiodev->modalias, "dsa-loop");
+ switch_mdiodev->dev.platform_data = &dsa_loop_pdata;
+
+ ret = mdio_device_register(switch_mdiodev);
+ if (ret)
+ mdio_device_free(switch_mdiodev);
+out:
+ put_device(&bus->dev);
+ return ret;
+}
+
+static int __init dsa_loop_init(void)
+{
+ unsigned int i;
+ int ret;
+
+ ret = dsa_loop_create_switch_mdiodev();
+ if (ret)
+ return ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
- phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
+ phydevs[i] = fixed_phy_register_100fd();
ret = mdio_driver_register(&dsa_loop_drv);
- if (ret)
+ if (ret) {
dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
+ }
return ret;
}
@@ -420,10 +466,11 @@ static void __exit dsa_loop_exit(void)
{
mdio_driver_unregister(&dsa_loop_drv);
dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
}
module_exit(dsa_loop_exit);
-MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
deleted file mode 100644
index 93e5c15d0efd..000000000000
--- a/drivers/net/dsa/dsa_loop.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DSA_LOOP_H
-#define __DSA_LOOP_H
-
-struct dsa_chip_data;
-
-struct dsa_loop_pdata {
- /* Must be first, such that dsa_register_switch() can access this
- * without gory pointer manipulations
- */
- struct dsa_chip_data cd;
- const char *name;
- unsigned int enabled_ports;
- const char *netdev;
-};
-
-#define DSA_LOOP_NUM_PORTS 6
-#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
-
-#endif /* __DSA_LOOP_H */
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
deleted file mode 100644
index 14ca42491512..000000000000
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-
-#include "dsa_loop.h"
-
-static struct dsa_loop_pdata dsa_loop_pdata = {
- .cd = {
- .port_names[0] = "lan1",
- .port_names[1] = "lan2",
- .port_names[2] = "lan3",
- .port_names[3] = "lan4",
- .port_names[DSA_LOOP_CPU_PORT] = "cpu",
- },
- .name = "DSA mockup driver",
- .enabled_ports = 0x1f,
- .netdev = "eth0",
-};
-
-static const struct mdio_board_info bdinfo = {
- .bus_id = "fixed-0",
- .modalias = "dsa-loop",
- .mdio_addr = 31,
- .platform_data = &dsa_loop_pdata,
-};
-
-static int __init dsa_loop_bdinfo_init(void)
-{
- return mdiobus_register_board_info(&bdinfo, 1);
-}
-arch_initcall(dsa_loop_bdinfo_init)
-
-MODULE_DESCRIPTION("DSA mock-up switch driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index beda1e9d350f..dd5f263ab984 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -294,12 +294,8 @@ static void hellcreek_get_strings(struct dsa_switch *ds, int port,
{
int i;
- for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
- const struct hellcreek_counter *counter = &hellcreek_counter[i];
-
- strscpy(data + i * ETH_GSTRING_LEN,
- counter->name, ETH_GSTRING_LEN);
- }
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i)
+ ethtool_puts(&data, hellcreek_counter[i].name);
}
static int hellcreek_get_sset_count(struct dsa_switch *ds, int port, int sset)
@@ -1065,7 +1061,7 @@ static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
{
- static struct hellcreek_fdb_entry l2_ptp = {
+ static const struct hellcreek_fdb_entry l2_ptp = {
/* MAC: 01-1B-19-00-00-00 */
.mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1076,7 +1072,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp4_ptp = {
+ static const struct hellcreek_fdb_entry udp4_ptp = {
/* MAC: 01-00-5E-00-01-81 */
.mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 },
.portmask = 0x03, /* Management ports */
@@ -1087,7 +1083,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp6_ptp = {
+ static const struct hellcreek_fdb_entry udp6_ptp = {
/* MAC: 33-33-00-00-01-81 */
.mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 },
.portmask = 0x03, /* Management ports */
@@ -1098,7 +1094,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry l2_p2p = {
+ static const struct hellcreek_fdb_entry l2_p2p = {
/* MAC: 01-80-C2-00-00-0E */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
.portmask = 0x03, /* Management ports */
@@ -1109,7 +1105,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp4_p2p = {
+ static const struct hellcreek_fdb_entry udp4_p2p = {
/* MAC: 01-00-5E-00-00-6B */
.mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b },
.portmask = 0x03, /* Management ports */
@@ -1120,7 +1116,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp6_p2p = {
+ static const struct hellcreek_fdb_entry udp6_p2p = {
/* MAC: 33-33-00-00-00-6B */
.mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b },
.portmask = 0x03, /* Management ports */
@@ -1131,7 +1127,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry stp = {
+ static const struct hellcreek_fdb_entry stp = {
/* MAC: 01-80-C2-00-00-00 */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1324,13 +1320,13 @@ static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl,
return 0;
}
-static struct devlink_region_ops hellcreek_region_vlan_ops = {
+static const struct devlink_region_ops hellcreek_region_vlan_ops = {
.name = "vlan",
.snapshot = hellcreek_devlink_region_vlan_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops hellcreek_region_fdb_ops = {
+static const struct devlink_region_ops hellcreek_region_fdb_ops = {
.name = "fdb",
.snapshot = hellcreek_devlink_region_fdb_snapshot,
.destructor = kfree,
@@ -1339,7 +1335,7 @@ static struct devlink_region_ops hellcreek_region_fdb_ops = {
static int hellcreek_setup_devlink_regions(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int ret;
@@ -1930,6 +1926,8 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.port_vlan_filtering = hellcreek_vlan_filtering,
.setup = hellcreek_setup,
.teardown = hellcreek_teardown,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static int hellcreek_probe(struct platform_device *pdev)
@@ -2105,7 +2103,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match);
static struct platform_driver hellcreek_driver = {
.probe = hellcreek_probe,
- .remove_new = hellcreek_remove,
+ .remove = hellcreek_remove,
.shutdown = hellcreek_shutdown,
.driver = {
.name = "hellcreek",
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 6874cb9dc361..bebf0d3ff330 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -12,14 +12,16 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/container_of.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/mutex.h>
#include <linux/platform_data/hirschmann-hellcreek.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <net/dsa.h>
#include <net/pkt_sched.h>
@@ -242,7 +244,7 @@ struct hellcreek_port_hwtstamp {
struct sk_buff *tx_skb;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
struct hellcreek_port {
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index bd7aacc71a63..99941ff1ebf9 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -16,7 +16,7 @@
#include "hellcreek_ptp.h"
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hellcreek *hellcreek = ds->priv;
@@ -40,7 +40,7 @@ int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
* the user requested what is actually available or not
*/
static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek_port_hwtstamp *ps =
&hellcreek->ports[port].port_hwtstamp;
@@ -110,41 +110,35 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
}
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config config;
int err;
ps = &hellcreek->ports[port].port_hwtstamp;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config *config;
ps = &hellcreek->ports[port].port_hwtstamp;
- config = &ps->tstamp_config;
+ *config = ps->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
index 71af77efb28b..388821c4aa10 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -38,9 +38,10 @@
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config);
bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
@@ -48,7 +49,7 @@ void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp);
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 5249a1c2a80b..cb23bea9c21b 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -27,7 +27,8 @@ void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
}
/* Get nanoseconds from PTP clock */
-static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
+static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek,
+ struct ptp_system_timestamp *sts)
{
u16 nsl, nsh;
@@ -45,16 +46,19 @@ static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ ptp_read_system_prets(sts);
nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ ptp_read_system_postts(sts);
return (u64)nsl | ((u64)nsh << 16);
}
-static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
+static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek,
+ struct ptp_system_timestamp *sts)
{
u64 ns;
- ns = hellcreek_ptp_clock_read(hellcreek);
+ ns = hellcreek_ptp_clock_read(hellcreek, sts);
if (ns < hellcreek->last_ts)
hellcreek->seconds++;
hellcreek->last_ts = ns;
@@ -72,7 +76,7 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
{
u64 s;
- __hellcreek_ptp_gettime(hellcreek);
+ __hellcreek_ptp_gettime(hellcreek, NULL);
if (hellcreek->last_ts > ns)
s = hellcreek->seconds * NSEC_PER_SEC;
else
@@ -81,14 +85,15 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
return s;
}
-static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int hellcreek_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
u64 ns;
mutex_lock(&hellcreek->ptp_lock);
- ns = __hellcreek_ptp_gettime(hellcreek);
+ ns = __hellcreek_ptp_gettime(hellcreek, sts);
mutex_unlock(&hellcreek->ptp_lock);
*ts = ns_to_timespec64(ns);
@@ -184,7 +189,7 @@ static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
struct timespec64 now, then = ns_to_timespec64(delta);
- hellcreek_ptp_gettime(ptp, &now);
+ hellcreek_ptp_gettimex(ptp, &now, NULL);
now = timespec64_add(now, then);
hellcreek_ptp_settime(ptp, &now);
@@ -233,7 +238,7 @@ static void hellcreek_ptp_overflow_check(struct work_struct *work)
hellcreek = dw_overflow_to_hellcreek(dw);
mutex_lock(&hellcreek->ptp_lock);
- __hellcreek_ptp_gettime(hellcreek);
+ __hellcreek_ptp_gettime(hellcreek, NULL);
mutex_unlock(&hellcreek->ptp_lock);
schedule_delayed_work(&hellcreek->overflow_work,
@@ -371,8 +376,18 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
/* Register both leds */
- led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
- led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to register sync_good LED\n");
+ goto out;
+ }
+
+ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to register is_gm LED\n");
+ led_classdev_unregister(&hellcreek->led_sync_good);
+ goto out;
+ }
ret = 0;
@@ -409,7 +424,7 @@ int hellcreek_ptp_setup(struct hellcreek *hellcreek)
hellcreek->ptp_clock_info.pps = 0;
hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
- hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
+ hellcreek->ptp_clock_info.gettimex64 = hellcreek_ptp_gettimex;
hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
diff --git a/drivers/net/dsa/ks8995.c b/drivers/net/dsa/ks8995.c
new file mode 100644
index 000000000000..77d8b842693c
--- /dev/null
+++ b/drivers/net/dsa/ks8995.c
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ * Copyright (C) 2025 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ * Copyright (C) 2006 David Brownell
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bits.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <net/dsa.h>
+
+#define DRV_VERSION "0.1.1"
+#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0 0x00 /* Chip ID0 */
+#define KS8995_REG_ID1 0x01 /* Chip ID1 */
+
+#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+
+#define KS8995_GC0_P5_PHY BIT(3) /* Port 5 PHY enabled */
+
+#define KS8995_REG_GC1 0x03 /* Global Control 1 */
+#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+
+#define KS8995_GC2_HUGE BIT(2) /* Huge packet support */
+#define KS8995_GC2_LEGAL BIT(1) /* Legal size override */
+
+#define KS8995_REG_GC3 0x05 /* Global Control 3 */
+#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+
+#define KS8995_GC4_10BT BIT(4) /* Force switch to 10Mbit */
+#define KS8995_GC4_MII_FLOW BIT(5) /* MII full-duplex flow control enable */
+#define KS8995_GC4_MII_HD BIT(6) /* MII half-duplex mode enable */
+
+#define KS8995_REG_GC5 0x07 /* Global Control 5 */
+#define KS8995_REG_GC6 0x08 /* Global Control 6 */
+#define KS8995_REG_GC7 0x09 /* Global Control 7 */
+#define KS8995_REG_GC8 0x0a /* Global Control 8 */
+#define KS8995_REG_GC9 0x0b /* Global Control 9 */
+
+#define KS8995_GC9_SPECIAL BIT(0) /* Special tagging mode (DSA) */
+
+/* In DSA the ports 1-4 are numbered 0-3 and the CPU port is port 4 */
+#define KS8995_REG_PC(p, r) (0x10 + (0x10 * (p)) + (r)) /* Port Control */
+#define KS8995_REG_PS(p, r) (0x1e + (0x10 * (p)) + (r)) /* Port Status */
+
+#define KS8995_REG_PC0 0x00 /* Port Control 0 */
+#define KS8995_REG_PC1 0x01 /* Port Control 1 */
+#define KS8995_REG_PC2 0x02 /* Port Control 2 */
+#define KS8995_REG_PC3 0x03 /* Port Control 3 */
+#define KS8995_REG_PC4 0x04 /* Port Control 4 */
+#define KS8995_REG_PC5 0x05 /* Port Control 5 */
+#define KS8995_REG_PC6 0x06 /* Port Control 6 */
+#define KS8995_REG_PC7 0x07 /* Port Control 7 */
+#define KS8995_REG_PC8 0x08 /* Port Control 8 */
+#define KS8995_REG_PC9 0x09 /* Port Control 9 */
+#define KS8995_REG_PC10 0x0a /* Port Control 10 */
+#define KS8995_REG_PC11 0x0b /* Port Control 11 */
+#define KS8995_REG_PC12 0x0c /* Port Control 12 */
+#define KS8995_REG_PC13 0x0d /* Port Control 13 */
+
+#define KS8995_PC0_TAG_INS BIT(2) /* Enable tag insertion on port */
+#define KS8995_PC0_TAG_REM BIT(1) /* Enable tag removal on port */
+#define KS8995_PC0_PRIO_EN BIT(0) /* Enable priority handling */
+
+#define KS8995_PC2_TXEN BIT(2) /* Enable TX on port */
+#define KS8995_PC2_RXEN BIT(1) /* Enable RX on port */
+#define KS8995_PC2_LEARN_DIS BIT(0) /* Disable learning on port */
+
+#define KS8995_PC13_TXDIS BIT(6) /* Disable transmitter */
+#define KS8995_PC13_PWDN BIT(3) /* Power down */
+
+#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0 0x68 /* MAC address 0 */
+#define KS8995_REG_MAC1 0x69 /* MAC address 1 */
+#define KS8995_REG_MAC2 0x6a /* MAC address 2 */
+#define KS8995_REG_MAC3 0x6b /* MAC address 3 */
+#define KS8995_REG_MAC4 0x6c /* MAC address 4 */
+#define KS8995_REG_MAC5 0x6d /* MAC address 5 */
+
+#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
+
+#define KSZ8864_REG_ID1 0xfe /* Chip ID in bit 7 */
+
+#define KS8995_REGS_SIZE 0x80
+#define KSZ8864_REGS_SIZE 0x100
+#define KSZ8795_REGS_SIZE 0x100
+
+#define ID1_CHIPID_M 0xf
+#define ID1_CHIPID_S 4
+#define ID1_REVISION_M 0x7
+#define ID1_REVISION_S 1
+#define ID1_START_SW 1 /* start the switch */
+
+#define FAMILY_KS8995 0x95
+#define FAMILY_KSZ8795 0x87
+#define CHIPID_M 0
+#define KS8995_CHIP_ID 0x00
+#define KSZ8864_CHIP_ID 0x01
+#define KSZ8795_CHIP_ID 0x09
+
+#define KS8995_CMD_WRITE 0x02U
+#define KS8995_CMD_READ 0x03U
+
+#define KS8995_CPU_PORT 4
+#define KS8995_NUM_PORTS 5 /* 5 ports including the CPU port */
+#define KS8995_RESET_DELAY 10 /* usec */
+
+enum ks8995_chip_variant {
+ ks8995,
+ ksz8864,
+ ksz8795,
+ max_variant
+};
+
+struct ks8995_chip_params {
+ char *name;
+ int family_id;
+ int chip_id;
+ int regs_size;
+ int addr_width;
+ int addr_shift;
+};
+
+static const struct ks8995_chip_params ks8995_chip[] = {
+ [ks8995] = {
+ .name = "KS8995MA",
+ .family_id = FAMILY_KS8995,
+ .chip_id = KS8995_CHIP_ID,
+ .regs_size = KS8995_REGS_SIZE,
+ .addr_width = 8,
+ .addr_shift = 0,
+ },
+ [ksz8864] = {
+ .name = "KSZ8864RMN",
+ .family_id = FAMILY_KS8995,
+ .chip_id = KSZ8864_CHIP_ID,
+ .regs_size = KSZ8864_REGS_SIZE,
+ .addr_width = 8,
+ .addr_shift = 0,
+ },
+ [ksz8795] = {
+ .name = "KSZ8795CLX",
+ .family_id = FAMILY_KSZ8795,
+ .chip_id = KSZ8795_CHIP_ID,
+ .regs_size = KSZ8795_REGS_SIZE,
+ .addr_width = 12,
+ .addr_shift = 1,
+ },
+};
+
+struct ks8995_switch {
+ struct spi_device *spi;
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct mutex lock;
+ struct gpio_desc *reset_gpio;
+ struct bin_attribute regs_attr;
+ const struct ks8995_chip_params *chip;
+ int revision_id;
+ unsigned int max_mtu[KS8995_NUM_PORTS];
+};
+
+static const struct spi_device_id ks8995_id[] = {
+ {"ks8995", ks8995},
+ {"ksz8864", ksz8864},
+ {"ksz8795", ksz8795},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ks8995_id);
+
+static const struct of_device_id ks8995_spi_of_match[] = {
+ { .compatible = "micrel,ks8995" },
+ { .compatible = "micrel,ksz8864" },
+ { .compatible = "micrel,ksz8795" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks8995_spi_of_match);
+
+static inline u8 get_chip_id(u8 val)
+{
+ return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+ return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* create_spi_cmd - create a chip specific SPI command header
+ * @ks: pointer to switch instance
+ * @cmd: SPI command for switch
+ * @address: register address for command
+ *
+ * Different chip families use different bit pattern to address the switches
+ * registers:
+ *
+ * KS8995: 8bit command + 8bit address
+ * KSZ8795: 3bit command + 12bit address + 1bit TR (?)
+ */
+static inline __be16 create_spi_cmd(struct ks8995_switch *ks, int cmd,
+ unsigned address)
+{
+ u16 result = cmd;
+
+ /* make room for address (incl. address shift) */
+ result <<= ks->chip->addr_width + ks->chip->addr_shift;
+ /* add address */
+ result |= address << ks->chip->addr_shift;
+ /* SPI protocol needs big endian */
+ return cpu_to_be16(result);
+}
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ __be16 cmd;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ cmd = create_spi_cmd(ks, KS8995_CMD_READ, offset);
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ __be16 cmd;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ cmd = create_spi_cmd(ks, KS8995_CMD_WRITE, offset);
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+ return ks8995_read(ks, buf, addr, 1) != 1;
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+ char buf = val;
+
+ return ks8995_write(ks, &buf, addr, 1) != 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+ int err;
+
+ err = ks8995_stop(ks);
+ if (err)
+ return err;
+
+ udelay(KS8995_RESET_DELAY);
+
+ return ks8995_start(ks);
+}
+
+/* ks8995_get_revision - get chip revision
+ * @ks: pointer to switch instance
+ *
+ * Verify chip family and id and get chip revision.
+ */
+static int ks8995_get_revision(struct ks8995_switch *ks)
+{
+ int err;
+ u8 id0, id1, ksz8864_id;
+
+ /* read family id */
+ err = ks8995_read_reg(ks, KS8995_REG_ID0, &id0);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* verify family id */
+ if (id0 != ks->chip->family_id) {
+ dev_err(&ks->spi->dev, "chip family id mismatch: expected 0x%02x but 0x%02x read\n",
+ ks->chip->family_id, id0);
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ switch (ks->chip->family_id) {
+ case FAMILY_KS8995:
+ /* try reading chip id at CHIP ID1 */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* verify chip id */
+ if ((get_chip_id(id1) == CHIPID_M) &&
+ (get_chip_id(id1) == ks->chip->chip_id)) {
+ /* KS8995MA */
+ ks->revision_id = get_chip_rev(id1);
+ } else if (get_chip_id(id1) != CHIPID_M) {
+ /* KSZ8864RMN */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &ksz8864_id);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ if ((ksz8864_id & 0x80) &&
+ (ks->chip->chip_id == KSZ8864_CHIP_ID)) {
+ ks->revision_id = get_chip_rev(id1);
+ }
+
+ } else {
+ dev_err(&ks->spi->dev, "unsupported chip id for KS8995 family: 0x%02x\n",
+ id1);
+ err = -ENODEV;
+ }
+ break;
+ case FAMILY_KSZ8795:
+ /* try reading chip id at CHIP ID1 */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ if (get_chip_id(id1) == ks->chip->chip_id) {
+ ks->revision_id = get_chip_rev(id1);
+ } else {
+ dev_err(&ks->spi->dev, "unsupported chip id for KSZ8795 family: 0x%02x\n",
+ id1);
+ err = -ENODEV;
+ }
+ break;
+ default:
+ dev_err(&ks->spi->dev, "unsupported family id: 0x%02x\n", id0);
+ err = -ENODEV;
+ break;
+ }
+err_out:
+ return err;
+}
+
+static int ks8995_check_config(struct ks8995_switch *ks)
+{
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC0, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC0\n");
+ return ret;
+ }
+
+ dev_dbg(ks->dev, "port 5 PHY %senabled\n",
+ (val & KS8995_GC0_P5_PHY) ? "" : "not ");
+
+ val |= KS8995_GC0_P5_PHY;
+ ret = ks8995_write_reg(ks, KS8995_REG_GC0, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC0\n");
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC0 to 0x%02x\n", val);
+
+ return 0;
+}
+
+static void
+ks8995_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+ks8995_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+ int ret;
+ u8 val;
+
+ /* Allow forcing the mode on the fixed CPU port, no autonegotiation.
+ * We assume autonegotiation works on the PHY-facing ports.
+ */
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link up on CPU port (%d)\n", port);
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC4, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC4\n");
+ return;
+ }
+
+ /* Conjure port config */
+ switch (speed) {
+ case SPEED_10:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val |= KS8995_GC4_10BT;
+ break;
+ case SPEED_100:
+ default:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val &= ~KS8995_GC4_10BT;
+ break;
+ }
+
+ if (duplex == DUPLEX_HALF) {
+ dev_dbg(ks->dev, "set switch MII to half duplex\n");
+ val |= KS8995_GC4_MII_HD;
+ } else {
+ dev_dbg(ks->dev, "set switch MII to full duplex\n");
+ val &= ~KS8995_GC4_MII_HD;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC4 to %02x\n", val);
+
+ /* Enable the CPU port */
+ ret = ks8995_write_reg(ks, KS8995_REG_GC4, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC4\n");
+}
+
+static void
+ks8995_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link down on CPU port (%d)\n", port);
+
+ /* Disable the CPU port */
+}
+
+static const struct phylink_mac_ops ks8995_phylink_mac_ops = {
+ .mac_config = ks8995_mac_config,
+ .mac_link_up = ks8995_mac_link_up,
+ .mac_link_down = ks8995_mac_link_down,
+};
+
+static enum
+dsa_tag_protocol ks8995_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ /* This switch actually uses the 6 byte KS8995 protocol */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int ks8995_setup(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static int ks8995_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "enable port %d\n", port);
+
+ return 0;
+}
+
+static void ks8995_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "disable port %d\n", port);
+}
+
+static int ks8995_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ks8995_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+
+ if (flags.val & BR_LEARNING)
+ val &= ~KS8995_PC2_LEARN_DIS;
+ else
+ val |= KS8995_PC2_LEARN_DIS;
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ks8995_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ /* Set the bits for the different STP states in accordance with
+ * the datasheet, pages 36-37 "Spanning tree support".
+ */
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val |= KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_LEARNING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_FORWARDING:
+ val |= KS8995_PC2_TXEN;
+ val |= KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ default:
+ dev_err(ks->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_PC2 for port %d to %02x\n", port, val);
+}
+
+static void ks8995_phylink_get_caps(struct dsa_switch *dsa, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+
+ if (port == KS8995_CPU_PORT)
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+
+ if (port <= 3) {
+ /* Internal PHYs */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ /* phylib default */
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ }
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+}
+
+/* Huge packet support up to 1916 byte packages "inclusive"
+ * which means that tags are included. If the bit is not set
+ * it is 1536 bytes "inclusive". We present the length without
+ * tags or ethernet headers. The setting affects all ports.
+ */
+static int ks8995_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct ks8995_switch *ks = ds->priv;
+ unsigned int max_mtu;
+ int ret;
+ u8 val;
+ int i;
+
+ ks->max_mtu[port] = new_mtu;
+
+ /* Roof out the MTU for the entire switch to the greatest
+ * common denominator: the biggest set for any one port will
+ * be the biggest MTU for the switch.
+ */
+ max_mtu = ETH_DATA_LEN;
+ for (i = 0; i < KS8995_NUM_PORTS; i++) {
+ if (ks->max_mtu[i] > max_mtu)
+ max_mtu = ks->max_mtu[i];
+ }
+
+ /* Translate to layer 2 size.
+ * Add ethernet and (possible) VLAN headers, and checksum to the size.
+ * For ETH_DATA_LEN (1500 bytes) this will add up to 1522 bytes.
+ */
+ max_mtu += VLAN_ETH_HLEN;
+ max_mtu += ETH_FCS_LEN;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC2, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC2\n");
+ return ret;
+ }
+
+ if (max_mtu <= 1522) {
+ val &= ~KS8995_GC2_HUGE;
+ val &= ~KS8995_GC2_LEGAL;
+ } else if (max_mtu > 1522 && max_mtu <= 1536) {
+ /* This accepts packets up to 1536 bytes */
+ val &= ~KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ } else {
+ /* This accepts packets up to 1916 bytes */
+ val |= KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ }
+
+ dev_dbg(ks->dev, "new max MTU %d bytes (inclusive)\n", max_mtu);
+
+ ret = ks8995_write_reg(ks, KS8995_REG_GC2, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC2\n");
+
+ return ret;
+}
+
+static int ks8995_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 1916 - ETH_HLEN - ETH_FCS_LEN;
+}
+
+static const struct dsa_switch_ops ks8995_ds_ops = {
+ .get_tag_protocol = ks8995_get_tag_protocol,
+ .setup = ks8995_setup,
+ .port_pre_bridge_flags = ks8995_port_pre_bridge_flags,
+ .port_bridge_flags = ks8995_port_bridge_flags,
+ .port_enable = ks8995_port_enable,
+ .port_disable = ks8995_port_disable,
+ .port_stp_state_set = ks8995_port_stp_state_set,
+ .port_change_mtu = ks8995_change_mtu,
+ .port_max_mtu = ks8995_get_max_mtu,
+ .phylink_get_caps = ks8995_phylink_get_caps,
+};
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_probe(struct spi_device *spi)
+{
+ struct ks8995_switch *ks;
+ int err;
+ int variant = spi_get_device_id(spi)->driver_data;
+
+ if (variant >= max_variant) {
+ dev_err(&spi->dev, "bad chip variant %d\n", variant);
+ return -ENODEV;
+ }
+
+ ks = devm_kzalloc(&spi->dev, sizeof(*ks), GFP_KERNEL);
+ if (!ks)
+ return -ENOMEM;
+
+ mutex_init(&ks->lock);
+ ks->spi = spi;
+ ks->dev = &spi->dev;
+ ks->chip = &ks8995_chip[variant];
+
+ ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ err = PTR_ERR_OR_ZERO(ks->reset_gpio);
+ if (err) {
+ dev_err(&spi->dev,
+ "failed to get reset gpio: %d\n", err);
+ return err;
+ }
+
+ err = gpiod_set_consumer_name(ks->reset_gpio, "switch-reset");
+ if (err)
+ return err;
+
+ if (ks->reset_gpio) {
+ /*
+ * If a reset line was obtained, wait for 100us after
+ * de-asserting RESET before accessing any registers, see
+ * the KS8995MA datasheet, page 44.
+ */
+ gpiod_set_value_cansleep(ks->reset_gpio, 0);
+ udelay(100);
+ }
+
+ spi_set_drvdata(spi, ks);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+ return err;
+ }
+
+ err = ks8995_get_revision(ks);
+ if (err)
+ return err;
+
+ err = ks8995_reset(ks);
+ if (err)
+ return err;
+
+ dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n",
+ ks->chip->name, ks->chip->chip_id, ks->revision_id);
+
+ err = ks8995_check_config(ks);
+ if (err)
+ return err;
+
+ ks->ds = devm_kzalloc(&spi->dev, sizeof(*ks->ds), GFP_KERNEL);
+ if (!ks->ds)
+ return -ENOMEM;
+
+ ks->ds->dev = &spi->dev;
+ ks->ds->num_ports = KS8995_NUM_PORTS;
+ ks->ds->ops = &ks8995_ds_ops;
+ ks->ds->phylink_mac_ops = &ks8995_phylink_mac_ops;
+ ks->ds->priv = ks;
+
+ err = dsa_register_switch(ks->ds);
+ if (err)
+ return dev_err_probe(&spi->dev, err,
+ "unable to register DSA switch\n");
+
+ return 0;
+}
+
+static void ks8995_remove(struct spi_device *spi)
+{
+ struct ks8995_switch *ks = spi_get_drvdata(spi);
+
+ dsa_unregister_switch(ks->ds);
+ /* assert reset */
+ gpiod_set_value_cansleep(ks->reset_gpio, 1);
+}
+
+/* ------------------------------------------------------------------------ */
+static struct spi_driver ks8995_driver = {
+ .driver = {
+ .name = "spi-ks8995",
+ .of_match_table = ks8995_spi_of_match,
+ },
+ .probe = ks8995_probe,
+ .remove = ks8995_remove,
+ .id_table = ks8995_id,
+};
+
+module_spi_driver(ks8995_driver);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index fcb20eac332a..d246f95d57ec 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
+#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/mii.h>
#include <linux/of.h>
@@ -839,6 +840,8 @@ static void lan9303_handle_reset(struct lan9303 *chip)
if (!chip->reset_gpio)
return;
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+
if (chip->reset_duration != 0)
msleep(chip->reset_duration);
@@ -864,8 +867,34 @@ static int lan9303_disable_processing(struct lan9303 *chip)
static int lan9303_check_device(struct lan9303 *chip)
{
int ret;
+ int err;
u32 reg;
+ /* In I2C-managed configurations this polling loop will clash with
+ * switch's reading of EEPROM right after reset and this behaviour is
+ * not configurable. While lan9303_read() already has quite long retry
+ * timeout, seems not all cases are being detected as arbitration error.
+ *
+ * According to datasheet, EEPROM loader has 30ms timeout (in case of
+ * missing EEPROM).
+ *
+ * Loading of the largest supported EEPROM is expected to take at least
+ * 5.9s.
+ */
+ err = read_poll_timeout(lan9303_read, ret,
+ !ret && reg & LAN9303_HW_CFG_READY,
+ 20000, 6000000, false,
+ chip->regmap, LAN9303_HW_CFG, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to read HW_CFG reg: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ if (err) {
+ dev_err(chip->dev, "HW_CFG not ready: 0x%08x\n", reg);
+ return err;
+ }
+
ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, &reg);
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
@@ -1007,15 +1036,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
static void lan9303_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
+ u8 *buf = data;
unsigned int u;
if (stringset != ETH_SS_STATS)
return;
- for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
- strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
- ETH_GSTRING_LEN);
- }
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++)
+ ethtool_puts(&buf, lan9303_mib[u].name);
}
static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -1048,31 +1076,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(lan9303_mib);
}
-static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
+static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
- if (phy == phy_base)
+ if (port == 0)
return lan9303_virt_phy_reg_read(chip, regnum);
- if (phy > phy_base + 2)
+ if (port > 2)
return -ENODEV;
- return chip->ops->phy_read(chip, phy, regnum);
+ return chip->ops->phy_read(chip, phy_base + port, regnum);
}
-static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
+static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum,
u16 val)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
- if (phy == phy_base)
+ if (port == 0)
return lan9303_virt_phy_reg_write(chip, regnum, val);
- if (phy > phy_base + 2)
+ if (port > 2)
return -ENODEV;
- return chip->ops->phy_write(chip, phy, regnum, val);
+ return chip->ops->phy_write(chip, phy_base + port, regnum, val);
}
static int lan9303_port_enable(struct dsa_switch *ds, int port,
@@ -1100,7 +1128,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
lan9303_disable_processing_port(chip, port);
- lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
+ lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
@@ -1293,14 +1321,29 @@ static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void lan9303_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void lan9303_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+}
+
+static void lan9303_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
- struct lan9303 *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct lan9303 *chip = dp->ds->priv;
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
u32 ctl;
u32 reg;
@@ -1330,6 +1373,12 @@ static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
regmap_write(chip->regmap, flow_ctl_reg[port], reg);
}
+static const struct phylink_mac_ops lan9303_phylink_mac_ops = {
+ .mac_config = lan9303_phylink_mac_config,
+ .mac_link_down = lan9303_phylink_mac_link_down,
+ .mac_link_up = lan9303_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops lan9303_switch_ops = {
.get_tag_protocol = lan9303_get_tag_protocol,
.setup = lan9303_setup,
@@ -1337,7 +1386,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
.phy_read = lan9303_phy_read,
.phy_write = lan9303_phy_write,
.phylink_get_caps = lan9303_phylink_get_caps,
- .phylink_mac_link_up = lan9303_phylink_mac_link_up,
.get_ethtool_stats = lan9303_get_ethtool_stats,
.get_sset_count = lan9303_get_sset_count,
.port_enable = lan9303_port_enable,
@@ -1355,8 +1403,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
static int lan9303_register_switch(struct lan9303 *chip)
{
- int base;
-
chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
@@ -1365,8 +1411,8 @@ static int lan9303_register_switch(struct lan9303 *chip)
chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
- base = chip->phy_addr_base;
- chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
+ chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops;
+ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0);
return dsa_register_switch(chip->ds);
}
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index bbbec322bc4f..c62d27cdc117 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -89,7 +89,7 @@ static void lan9303_i2c_shutdown(struct i2c_client *client)
/*-------------------------------------------------------------------------*/
static const struct i2c_device_id lan9303_i2c_id[] = {
- { "lan9303", 0 },
+ { "lan9303" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id);
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index 167a86f39f27..0ac4857e5ee8 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -58,19 +58,19 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
return 0;
}
-static int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg,
+static int lan9303_mdio_phy_write(struct lan9303 *chip, int addr, int reg,
u16 val)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
- return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val);
+ return mdiobus_write_nested(sw_dev->device->bus, addr, reg, val);
}
-static int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg)
+static int lan9303_mdio_phy_read(struct lan9303 *chip, int addr, int reg)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
- return mdiobus_read_nested(sw_dev->device->bus, phy, reg);
+ return mdiobus_read_nested(sw_dev->device->bus, addr, reg);
}
static const struct lan9303_phy_ops lan9303_mdio_phy_ops = {
diff --git a/drivers/net/dsa/lantiq/Kconfig b/drivers/net/dsa/lantiq/Kconfig
new file mode 100644
index 000000000000..4a9771be5d58
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Kconfig
@@ -0,0 +1,24 @@
+config NET_DSA_LANTIQ_COMMON
+ tristate
+ select REGMAP
+
+config NET_DSA_LANTIQ_GSWIP
+ tristate "Lantiq / Intel GSWIP"
+ depends on HAS_IOMEM
+ select NET_DSA_TAG_GSWIP
+ select NET_DSA_LANTIQ_COMMON
+ help
+ This enables support for the Lantiq / Intel GSWIP 2.1 found in
+ the xrx200 / VR9 SoC.
+
+config NET_DSA_MXL_GSW1XX
+ tristate "MaxLinear GSW1xx Ethernet switch support"
+ select NET_DSA_TAG_MXL_GSW1XX
+ select NET_DSA_LANTIQ_COMMON
+ help
+ This enables support for the MaxLinear GSW1xx family of 1GE switches
+ GSW120 4 port, 2 PHYs, RGMII & SGMII/2500Base-X
+ GSW125 4 port, 2 PHYs, RGMII & SGMII/2500Base-X, industrial temperature
+ GSW140 6 port, 4 PHYs, RGMII & SGMII/2500Base-X
+ GSW141 6 port, 4 PHYs, RGMII & SGMII
+ GSW145 6 port, 4 PHYs, RGMII & SGMII/2500Base-X, industrial temperature
diff --git a/drivers/net/dsa/lantiq/Makefile b/drivers/net/dsa/lantiq/Makefile
new file mode 100644
index 000000000000..85fce605310b
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_LANTIQ_COMMON) += lantiq_gswip_common.o
+obj-$(CONFIG_NET_DSA_MXL_GSW1XX) += mxl-gsw1xx.o
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c
new file mode 100644
index 000000000000..57dd063c0740
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include "lantiq_gswip.h"
+#include "lantiq_pce.h"
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <dt-bindings/mips/lantiq_rcu_gphy.h>
+
+#include <net/dsa.h>
+
+struct xway_gphy_match_data {
+ char *fe_firmware_name;
+ char *ge_firmware_name;
+};
+
+static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ case 1:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+
+ case 2:
+ case 3:
+ case 4:
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
+};
+
+static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
+};
+
+static const struct xway_gphy_match_data xrx300_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
+ .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
+};
+
+static const struct of_device_id xway_gphy_match[] __maybe_unused = {
+ { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
+ { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
+ { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
+ { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
+ { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
+ {},
+};
+
+static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
+{
+ struct device *dev = priv->dev;
+ const struct firmware *fw;
+ void *fw_addr;
+ dma_addr_t dma_addr;
+ dma_addr_t dev_addr;
+ size_t size;
+ int ret;
+
+ ret = clk_prepare_enable(gphy_fw->clk_gate);
+ if (ret)
+ return ret;
+
+ reset_control_assert(gphy_fw->reset);
+
+ /* The vendor BSP uses a 200ms delay after asserting the reset line.
+ * Without this some users are observing that the PHY is not coming up
+ * on the MDIO bus.
+ */
+ msleep(200);
+
+ ret = request_firmware(&fw, gphy_fw->fw_name, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to load firmware: %s\n",
+ gphy_fw->fw_name);
+
+ /* GPHY cores need the firmware code in a persistent and contiguous
+ * memory area with a 16 kB boundary aligned start address.
+ */
+ size = fw->size + XRX200_GPHY_FW_ALIGN;
+
+ fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+ if (fw_addr) {
+ fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
+ dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
+ memcpy(fw_addr, fw->data, fw->size);
+ } else {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ release_firmware(fw);
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
+ if (ret)
+ return ret;
+
+ reset_control_deassert(gphy_fw->reset);
+
+ return ret;
+}
+
+static int gswip_gphy_fw_probe(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw,
+ struct device_node *gphy_fw_np, int i)
+{
+ struct device *dev = priv->dev;
+ u32 gphy_mode;
+ int ret;
+ char gphyname[10];
+
+ snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
+
+ gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
+ if (IS_ERR(gphy_fw->clk_gate)) {
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate),
+ "Failed to lookup gate clock\n");
+ }
+
+ ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
+ /* Default to GE mode */
+ if (ret)
+ gphy_mode = GPHY_MODE_GE;
+
+ switch (gphy_mode) {
+ case GPHY_MODE_FE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
+ break;
+ case GPHY_MODE_GE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n",
+ gphy_mode);
+ }
+
+ gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
+
+ return gswip_gphy_fw_load(priv, gphy_fw);
+}
+
+static void gswip_gphy_fw_remove(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw)
+{
+ int ret;
+
+ /* check if the device was fully probed */
+ if (!gphy_fw->fw_name)
+ return;
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
+ if (ret)
+ dev_err(priv->dev, "can not reset GPHY FW pointer\n");
+
+ clk_disable_unprepare(gphy_fw->clk_gate);
+
+ reset_control_put(gphy_fw->reset);
+}
+
+static int gswip_gphy_fw_list(struct gswip_priv *priv,
+ struct device_node *gphy_fw_list_np, u32 version)
+{
+ struct device *dev = priv->dev;
+ struct device_node *gphy_fw_np;
+ const struct of_device_id *match;
+ int err;
+ int i = 0;
+
+ /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
+ * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
+ * needs a different GPHY firmware.
+ */
+ if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
+ break;
+ case GSWIP_VERSION_2_1:
+ priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
+ break;
+ default:
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n",
+ version);
+ }
+ }
+
+ match = of_match_node(xway_gphy_match, gphy_fw_list_np);
+ if (match && match->data)
+ priv->gphy_fw_name_cfg = match->data;
+
+ if (!priv->gphy_fw_name_cfg)
+ return dev_err_probe(dev, -ENOENT,
+ "GPHY compatible type not supported\n");
+
+ priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
+ if (!priv->num_gphy_fw)
+ return -ENOENT;
+
+ priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
+ "lantiq,rcu");
+ if (IS_ERR(priv->rcu_regmap))
+ return PTR_ERR(priv->rcu_regmap);
+
+ priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
+ sizeof(*priv->gphy_fw),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!priv->gphy_fw)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
+ err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
+ gphy_fw_np, i);
+ if (err) {
+ of_node_put(gphy_fw_np);
+ goto remove_gphy;
+ }
+ i++;
+ }
+
+ /* The standalone PHY11G requires 300ms to be fully
+ * initialized and ready for any MDIO communication after being
+ * taken out of reset. For the SoC-internal GPHY variant there
+ * is no (known) documentation for the minimum time after a
+ * reset. Use the same value as for the standalone variant as
+ * some users have reported internal PHYs not being detected
+ * without any delay.
+ */
+ msleep(300);
+
+ return 0;
+
+remove_gphy:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static const struct regmap_config sw_regmap_config = {
+ .name = "switch",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_SDMA_PCTRLp(6),
+};
+
+static const struct regmap_config mdio_regmap_config = {
+ .name = "mdio",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_MDIO_PHYp(0),
+};
+
+static const struct regmap_config mii_regmap_config = {
+ .name = "mii",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_MII_CFGp(6),
+};
+
+static int gswip_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *gphy_fw_np;
+ __iomem void *gswip, *mdio, *mii;
+ struct device *dev = &pdev->dev;
+ struct gswip_priv *priv;
+ int err;
+ int i;
+ u32 version;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ gswip = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gswip))
+ return PTR_ERR(gswip);
+
+ mdio = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mdio))
+ return PTR_ERR(mdio);
+
+ mii = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(mii))
+ return PTR_ERR(mii);
+
+ priv->gswip = devm_regmap_init_mmio(dev, gswip, &sw_regmap_config);
+ if (IS_ERR(priv->gswip))
+ return PTR_ERR(priv->gswip);
+
+ priv->mdio = devm_regmap_init_mmio(dev, mdio, &mdio_regmap_config);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->mii = devm_regmap_init_mmio(dev, mii, &mii_regmap_config);
+ if (IS_ERR(priv->mii))
+ return PTR_ERR(priv->mii);
+
+ priv->hw_info = of_device_get_match_data(dev);
+ if (!priv->hw_info)
+ return -EINVAL;
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ regmap_read(priv->gswip, GSWIP_VERSION, &version);
+
+ np = dev->of_node;
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ case GSWIP_VERSION_2_1:
+ if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
+ return -EINVAL;
+ break;
+ case GSWIP_VERSION_2_2:
+ case GSWIP_VERSION_2_2_ETC:
+ if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
+ !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
+ return -EINVAL;
+ break;
+ default:
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n", version);
+ }
+
+ /* bring up the mdio bus */
+ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
+ if (gphy_fw_np) {
+ err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
+ of_node_put(gphy_fw_np);
+ if (err)
+ return dev_err_probe(dev, err,
+ "gphy fw probe failed\n");
+ }
+
+ err = gswip_probe_common(priv, version);
+ if (err)
+ goto gphy_fw_remove;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+gphy_fw_remove:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static void gswip_remove(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ if (!priv)
+ return;
+
+ /* disable the switch */
+ gswip_disable_switch(priv);
+
+ dsa_unregister_switch(priv->ds);
+
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+}
+
+static void gswip_shutdown(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct gswip_hw_info gswip_xrx200 = {
+ .max_ports = 7,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(1) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx200_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
+};
+
+static const struct gswip_hw_info gswip_xrx300 = {
+ .max_ports = 7,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx300_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
+};
+
+static const struct of_device_id gswip_of_match[] = {
+ { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
+ { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
+ { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gswip_of_match);
+
+static struct platform_driver gswip_driver = {
+ .probe = gswip_probe,
+ .remove = gswip_remove,
+ .shutdown = gswip_shutdown,
+ .driver = {
+ .name = "gswip",
+ .of_match_table = gswip_of_match,
+ },
+};
+
+module_platform_driver(gswip_driver);
+
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h
new file mode 100644
index 000000000000..9c38e51a75e8
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.h
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __LANTIQ_GSWIP_H
+#define __LANTIQ_GSWIP_H
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/swab.h>
+#include <net/dsa.h>
+
+/* GSWIP MDIO Registers */
+#define GSWIP_MDIO_GLOB 0x00
+#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
+#define GSWIP_MDIO_CTRL 0x08
+#define GSWIP_MDIO_CTRL_BUSY BIT(12)
+#define GSWIP_MDIO_CTRL_RD BIT(11)
+#define GSWIP_MDIO_CTRL_WR BIT(10)
+#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
+#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
+#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
+#define GSWIP_MDIO_READ 0x09
+#define GSWIP_MDIO_WRITE 0x0A
+#define GSWIP_MDIO_MDC_CFG0 0x0B
+#define GSWIP_MDIO_MDC_CFG1 0x0C
+#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
+#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
+#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
+#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
+#define GSWIP_MDIO_PHY_LINK_UP 0x2000
+#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
+#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
+#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
+#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
+#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
+#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
+#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
+#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
+#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
+#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
+#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
+#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
+#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
+#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
+#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
+#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
+ GSWIP_MDIO_PHY_FCONRX_MASK | \
+ GSWIP_MDIO_PHY_FCONTX_MASK | \
+ GSWIP_MDIO_PHY_LINK_MASK | \
+ GSWIP_MDIO_PHY_SPEED_MASK | \
+ GSWIP_MDIO_PHY_FDUP_MASK)
+
+/* GSWIP MII Registers */
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
+#define GSWIP_MII_CFG_RESET BIT(15)
+#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_ISOLATE BIT(13)
+#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
+#define GSWIP_MII_CFG_RMII_CLK BIT(7)
+#define GSWIP_MII_CFG_MODE_MIIP 0x0
+#define GSWIP_MII_CFG_MODE_MIIM 0x1
+#define GSWIP_MII_CFG_MODE_RMIIP 0x2
+#define GSWIP_MII_CFG_MODE_RMIIM 0x3
+#define GSWIP_MII_CFG_MODE_RGMII 0x4
+#define GSWIP_MII_CFG_MODE_GMII 0x9
+#define GSWIP_MII_CFG_MODE_MASK 0xf
+#define GSWIP_MII_CFG_RATE_M2P5 0x00
+#define GSWIP_MII_CFG_RATE_M25 0x10
+#define GSWIP_MII_CFG_RATE_M125 0x20
+#define GSWIP_MII_CFG_RATE_M50 0x30
+#define GSWIP_MII_CFG_RATE_AUTO 0x40
+#define GSWIP_MII_CFG_RATE_MASK 0x70
+#define GSWIP_MII_PCDU0 0x01
+#define GSWIP_MII_PCDU1 0x03
+#define GSWIP_MII_PCDU5 0x05
+#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
+#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
+#define GSWIP_MII_PCDU_TXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_TXDLY_MASK)
+#define GSWIP_MII_PCDU_RXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_RXDLY_MASK)
+#define GSWIP_MII_PCDU_RXDLY_DEFAULT 2000 /* picoseconds */
+#define GSWIP_MII_PCDU_TXDLY_DEFAULT 2000 /* picoseconds */
+
+/* GSWIP Core Registers */
+#define GSWIP_SWRES 0x000
+#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
+#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
+#define GSWIP_VERSION 0x013
+#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
+#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
+#define GSWIP_VERSION_REV(v) FIELD_GET(GSWIP_VERSION_REV_MASK, v)
+#define GSWIP_VERSION_MOD(v) FIELD_GET(GSWIP_VERSION_MOD_MASK, v)
+#define GSWIP_VERSION_2_0 0x100
+#define GSWIP_VERSION_2_1 0x021
+#define GSWIP_VERSION_2_2 0x122
+#define GSWIP_VERSION_2_2_ETC 0x022
+/* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Swap the bytes of the 16-bit value
+ * to end up with REV being the most significant byte and MOD being the
+ * least significant byte, which then allows comparing it with the
+ * value stored in struct gswip_priv.
+ */
+#define GSWIP_VERSION_GE(priv, ver) ((priv)->version >= swab16(ver))
+
+#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
+#define GSWIP_BM_RAM_ADDR 0x044
+#define GSWIP_BM_RAM_CTRL 0x045
+#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
+#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
+#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_BM_QUEUE_GCTRL 0x04A
+#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
+/* buffer management Port Configuration Register */
+#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
+#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
+#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
+/* buffer management Port Control Register */
+#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
+#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
+#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
+
+/* PCE */
+#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
+#define GSWIP_PCE_TBL_MASK 0x448
+#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
+#define GSWIP_PCE_TBL_ADDR 0x44E
+#define GSWIP_PCE_TBL_CTRL 0x44F
+#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
+#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
+#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
+#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
+#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
+#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
+#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
+#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
+#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
+#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
+#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
+#define GSWIP_PCE_GCTRL_1 0x457
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
+#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
+#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
+#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
+#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
+#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
+#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
+#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
+#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+/* Ethernet Switch PCE Port Control Register 3 */
+#define GSWIP_PCE_PCTRL_3p(p) (0x483 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_3_LNDIS BIT(15) /* Learning Disable */
+#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
+#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
+#define GSWIP_PCE_VCTRL_VINR GENMASK(2, 1) /* VLAN Ingress Tag Rule */
+#define GSWIP_PCE_VCTRL_VINR_ALL 0 /* Admit tagged and untagged packets */
+#define GSWIP_PCE_VCTRL_VINR_TAGGED 1 /* Admit only tagged packets */
+#define GSWIP_PCE_VCTRL_VINR_UNTAGGED 2 /* Admit only untagged packets */
+#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
+#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
+#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
+#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
+#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
+
+#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
+#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
+#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
+#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
+#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
+#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
+#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
+#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
+#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
+#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
+#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
+#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
+#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
+#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
+#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
+#define GSWIP_MAC_CTRL_4p(p) (0x907 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_4_LPIEN BIT(7) /* LPI Mode Enable */
+#define GSWIP_MAC_CTRL_4_GWAIT_MASK GENMASK(14, 8) /* LPI Wait Time 1G */
+#define GSWIP_MAC_CTRL_4_GWAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_GWAIT_MASK)
+#define GSWIP_MAC_CTRL_4_WAIT_MASK GENMASK(6, 0) /* LPI Wait Time 100M */
+#define GSWIP_MAC_CTRL_4_WAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_WAIT_MASK)
+
+/* Ethernet Switch Fetch DMA Port Control Register */
+#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
+#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
+#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
+#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+
+/* Ethernet Switch Store DMA Port Control Register */
+#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
+#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
+#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
+
+#define GSWIP_TABLE_ACTIVE_VLAN 0x01
+#define GSWIP_TABLE_VLAN_MAPPING 0x02
+#define GSWIP_TABLE_MAC_BRIDGE 0x0b
+#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID BIT(1) /* Valid bit */
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
+#define GSWIP_VLAN_UNAWARE_PVID 0
+
+struct gswip_pce_microcode {
+ u16 val_3;
+ u16 val_2;
+ u16 val_1;
+ u16 val_0;
+};
+
+struct gswip_hw_info {
+ int max_ports;
+ unsigned int allowed_cpu_ports;
+ unsigned int mii_ports;
+ int mii_port_reg_offset;
+ bool supports_2500m;
+ const struct gswip_pce_microcode (*pce_microcode)[];
+ size_t pce_microcode_size;
+ enum dsa_tag_protocol tag_protocol;
+ void (*phylink_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
+};
+
+struct gswip_gphy_fw {
+ struct clk *clk_gate;
+ struct reset_control *reset;
+ u32 fw_addr_offset;
+ char *fw_name;
+};
+
+struct gswip_vlan {
+ struct net_device *bridge;
+ u16 vid;
+ u8 fid;
+};
+
+struct gswip_priv {
+ struct regmap *gswip;
+ struct regmap *mdio;
+ struct regmap *mii;
+ const struct gswip_hw_info *hw_info;
+ const struct xway_gphy_match_data *gphy_fw_name_cfg;
+ struct dsa_switch *ds;
+ struct device *dev;
+ struct regmap *rcu_regmap;
+ struct gswip_vlan vlans[64];
+ int num_gphy_fw;
+ struct gswip_gphy_fw *gphy_fw;
+ struct mutex pce_table_lock;
+ u16 version;
+};
+
+void gswip_disable_switch(struct gswip_priv *priv);
+
+int gswip_probe_common(struct gswip_priv *priv, u32 version);
+
+#endif /* __LANTIQ_GSWIP_H */
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip_common.c b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
new file mode 100644
index 000000000000..9da39edf8f57
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
@@ -0,0 +1,1739 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel / MaxLinear GSWIP common function library
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ *
+ * The VLAN and bridge model the GSWIP hardware uses does not directly
+ * matches the model DSA uses.
+ *
+ * The hardware has 64 possible table entries for bridges with one VLAN
+ * ID, one flow id and a list of ports for each bridge. All entries which
+ * match the same flow ID are combined in the mac learning table, they
+ * act as one global bridge.
+ * The hardware does not support VLAN filter on the port, but on the
+ * bridge, this driver converts the DSA model to the hardware.
+ *
+ * The CPU gets all the exception frames which do not match any forwarding
+ * rule and the CPU port is also added to all bridges. This makes it possible
+ * to handle all the special cases easily in software.
+ * At the initialization the driver allocates one bridge table entry for
+ * each switch port which is used when the port is used without an
+ * explicit bridge. This prevents the frames from being forwarded
+ * between all LAN ports by default.
+ */
+
+#include "lantiq_gswip.h"
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+struct gswip_pce_table_entry {
+ u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
+ u16 table; // PCE_TBL_CTRL.ADDR = pData->table
+ u16 key[8];
+ u16 val[5];
+ u16 mask;
+ u8 gmap;
+ bool type;
+ bool valid;
+ bool key_mode;
+};
+
+struct gswip_rmon_cnt_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
+
+static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
+ /** Receive Packet Count (only packets that are accepted and not discarded). */
+ MIB_DESC(1, 0x1F, "RxGoodPkts"),
+ MIB_DESC(1, 0x23, "RxUnicastPkts"),
+ MIB_DESC(1, 0x22, "RxMulticastPkts"),
+ MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
+ MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
+ MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
+ MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
+ MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
+ MIB_DESC(1, 0x20, "RxGoodPausePkts"),
+ MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
+ MIB_DESC(1, 0x12, "Rx64BytePkts"),
+ MIB_DESC(1, 0x13, "Rx127BytePkts"),
+ MIB_DESC(1, 0x14, "Rx255BytePkts"),
+ MIB_DESC(1, 0x15, "Rx511BytePkts"),
+ MIB_DESC(1, 0x16, "Rx1023BytePkts"),
+ /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x17, "RxMaxBytePkts"),
+ MIB_DESC(1, 0x18, "RxDroppedPkts"),
+ MIB_DESC(1, 0x19, "RxFilteredPkts"),
+ MIB_DESC(2, 0x24, "RxGoodBytes"),
+ MIB_DESC(2, 0x26, "RxBadBytes"),
+ MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
+ MIB_DESC(1, 0x0C, "TxGoodPkts"),
+ MIB_DESC(1, 0x06, "TxUnicastPkts"),
+ MIB_DESC(1, 0x07, "TxMulticastPkts"),
+ MIB_DESC(1, 0x00, "Tx64BytePkts"),
+ MIB_DESC(1, 0x01, "Tx127BytePkts"),
+ MIB_DESC(1, 0x02, "Tx255BytePkts"),
+ MIB_DESC(1, 0x03, "Tx511BytePkts"),
+ MIB_DESC(1, 0x04, "Tx1023BytePkts"),
+ /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x05, "TxMaxBytePkts"),
+ MIB_DESC(1, 0x08, "TxSingleCollCount"),
+ MIB_DESC(1, 0x09, "TxMultCollCount"),
+ MIB_DESC(1, 0x0A, "TxLateCollCount"),
+ MIB_DESC(1, 0x0B, "TxExcessCollCount"),
+ MIB_DESC(1, 0x0D, "TxPauseCount"),
+ MIB_DESC(1, 0x10, "TxDroppedPkts"),
+ MIB_DESC(2, 0x0E, "TxGoodBytes"),
+};
+
+static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
+ u32 cleared)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->gswip, offset, val,
+ !(val & cleared), 20, 50000);
+}
+
+static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 mask, u32 set,
+ int port)
+{
+ int reg_port;
+
+ /* MII_CFG register only exists for MII ports */
+ if (!(priv->hw_info->mii_ports & BIT(port)))
+ return;
+
+ reg_port = port + priv->hw_info->mii_port_reg_offset;
+
+ regmap_write_bits(priv->mii, GSWIP_MII_CFGp(reg_port), mask,
+ set);
+}
+
+static int gswip_mdio_poll(struct gswip_priv *priv)
+{
+ u32 ctrl;
+
+ return regmap_read_poll_timeout(priv->mdio, GSWIP_MDIO_CTRL, ctrl,
+ !(ctrl & GSWIP_MDIO_CTRL_BUSY), 40, 4000);
+}
+
+static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct gswip_priv *priv = bus->priv;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ regmap_write(priv->mdio, GSWIP_MDIO_WRITE, val);
+ regmap_write(priv->mdio, GSWIP_MDIO_CTRL,
+ GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK));
+
+ return 0;
+}
+
+static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
+{
+ struct gswip_priv *priv = bus->priv;
+ u32 val;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ regmap_write(priv->mdio, GSWIP_MDIO_CTRL,
+ GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK));
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ err = regmap_read(priv->mdio, GSWIP_MDIO_READ, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int gswip_mdio(struct gswip_priv *priv)
+{
+ struct device_node *mdio_np, *switch_np = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ struct mii_bus *bus;
+ int err = 0;
+
+ mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
+ if (!mdio_np)
+ mdio_np = of_get_child_by_name(switch_np, "mdio");
+
+ if (!of_device_is_available(mdio_np))
+ goto out_put_node;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus) {
+ err = -ENOMEM;
+ goto out_put_node;
+ }
+
+ bus->priv = priv;
+ bus->read = gswip_mdio_rd;
+ bus->write = gswip_mdio_wr;
+ bus->name = "lantiq,xrx200-mdio";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ err = devm_of_mdiobus_register(dev, bus, mdio_np);
+
+out_put_node:
+ of_node_put(mdio_np);
+
+ return err;
+}
+
+static int gswip_pce_table_entry_read(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u32 crtl;
+ u32 tmp;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
+
+ mutex_lock(&priv->pce_table_lock);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ goto out_unlock;
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK |
+ GSWIP_PCE_TBL_CTRL_BAS,
+ tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ goto out_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++) {
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_KEY(i), &tmp);
+ if (err)
+ goto out_unlock;
+ tbl->key[i] = tmp;
+ }
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++) {
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_VAL(i), &tmp);
+ if (err)
+ goto out_unlock;
+ tbl->val[i] = tmp;
+ }
+
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_MASK, &tmp);
+ if (err)
+ goto out_unlock;
+
+ tbl->mask = tmp;
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl);
+ if (err)
+ goto out_unlock;
+
+ tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
+ tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
+ tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
+
+out_unlock:
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
+}
+
+static int gswip_pce_table_entry_write(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u32 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
+
+ mutex_lock(&priv->pce_table_lock);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
+ return err;
+ }
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_KEY(i), tbl->key[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(i), tbl->val[i]);
+
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode);
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, tbl->mask);
+
+ regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl);
+ crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
+ GSWIP_PCE_TBL_CTRL_GMAP_MASK);
+ if (tbl->type)
+ crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
+ if (tbl->valid)
+ crtl |= GSWIP_PCE_TBL_CTRL_VLD;
+ crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
+ crtl |= GSWIP_PCE_TBL_CTRL_BAS;
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_CTRL, crtl);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
+}
+
+/* Add the LAN port into a bridge with the CPU port by
+ * default. This prevents automatic forwarding of
+ * packages between the LAN ports when no explicit
+ * bridge is configured.
+ */
+static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ int err;
+
+ vlan_active.index = port + 1;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = GSWIP_VLAN_UNAWARE_PVID;
+ vlan_active.val[0] = port + 1 /* fid */;
+ vlan_active.valid = add;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ if (!add)
+ return 0;
+
+ vlan_mapping.index = port + 1;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ vlan_mapping.val[0] = GSWIP_VLAN_UNAWARE_PVID;
+ vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds);
+ vlan_mapping.val[2] = 0;
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_set_learning(struct gswip_priv *priv, int port,
+ bool enable)
+{
+ if (!GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ return -EOPNOTSUPP;
+
+ /* learning disable bit */
+ return regmap_update_bits(priv->gswip, GSWIP_PCE_PCTRL_3p(port),
+ GSWIP_PCE_PCTRL_3_LNDIS,
+ enable ? 0 : GSWIP_PCE_PCTRL_3_LNDIS);
+}
+
+static int gswip_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+ unsigned long supported = 0;
+
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ supported |= BR_LEARNING;
+
+ if (flags.mask & ~supported)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gswip_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (flags.mask & BR_LEARNING)
+ return gswip_port_set_learning(priv, port,
+ !!(flags.val & BR_LEARNING));
+
+ return 0;
+}
+
+static int gswip_port_setup(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ err = gswip_add_single_port_br(priv, port, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ u32 mdio_phy = 0;
+
+ if (phydev)
+ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_ADDR_MASK,
+ mdio_phy);
+ }
+
+ /* RMON Counter Enable for port */
+ regmap_write(priv->gswip, GSWIP_BM_PCFGp(port), GSWIP_BM_PCFG_CNTEN);
+
+ /* enable port fetch/store dma & VLAN Modification */
+ regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port),
+ GSWIP_FDMA_PCTRL_EN | GSWIP_FDMA_PCTRL_VLANMOD_BOTH);
+ regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+
+ return 0;
+}
+
+static void gswip_port_disable(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ regmap_clear_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port),
+ GSWIP_FDMA_PCTRL_EN);
+ regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+}
+
+static int gswip_pce_load_microcode(struct gswip_priv *priv)
+{
+ int i;
+ int err;
+
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR,
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, 0);
+
+ for (i = 0; i < priv->hw_info->pce_microcode_size; i++) {
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, i);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(0),
+ (*priv->hw_info->pce_microcode)[i].val_0);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(1),
+ (*priv->hw_info->pce_microcode)[i].val_1);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(2),
+ (*priv->hw_info->pce_microcode)[i].val_2);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(3),
+ (*priv->hw_info->pce_microcode)[i].val_3);
+
+ /* start the table access: */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+ }
+
+ /* tell the switch that the microcode is loaded */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MC_VALID);
+
+ return 0;
+}
+
+static void gswip_port_commit_pvid(struct gswip_priv *priv, int port)
+{
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ u32 vinr;
+ int idx;
+
+ if (!dsa_port_is_user(dp))
+ return;
+
+ if (br) {
+ u16 pvid = GSWIP_VLAN_UNAWARE_PVID;
+
+ if (br_vlan_enabled(br))
+ br_vlan_get_pvid(br, &pvid);
+
+ /* VLAN-aware bridge ports with no PVID will use Active VLAN
+ * index 0. The expectation is that this drops all untagged and
+ * VID-0 tagged ingress traffic.
+ */
+ idx = 0;
+ for (int i = priv->hw_info->max_ports;
+ i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == br &&
+ priv->vlans[i].vid == pvid) {
+ idx = i;
+ break;
+ }
+ }
+ } else {
+ /* The Active VLAN table index as configured by
+ * gswip_add_single_port_br()
+ */
+ idx = port + 1;
+ }
+
+ vinr = idx ? GSWIP_PCE_VCTRL_VINR_ALL : GSWIP_PCE_VCTRL_VINR_TAGGED;
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_VINR,
+ FIELD_PREP(GSWIP_PCE_VCTRL_VINR, vinr));
+
+ /* Note that in GSWIP 2.2 VLAN mode the VID needs to be programmed
+ * directly instead of referencing the index in the Active VLAN Tablet.
+ * However, without the VLANMD bit (9) in PCE_GCTRL_1 (0x457) even
+ * GSWIP 2.2 and newer hardware maintain the GSWIP 2.1 behavior.
+ */
+ regmap_write(priv->gswip, GSWIP_PCE_DEFPVID(port), idx);
+}
+
+static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (vlan_filtering) {
+ /* Use tag based VLAN */
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_VSR |
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0,
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0);
+ regmap_clear_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_TVM);
+ } else {
+ /* Use port based VLAN */
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0 |
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL_VSR);
+ regmap_set_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_TVM);
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static void gswip_mii_delay_setup(struct gswip_priv *priv, struct dsa_port *dp,
+ phy_interface_t interface)
+{
+ u32 tx_delay = GSWIP_MII_PCDU_TXDLY_DEFAULT;
+ u32 rx_delay = GSWIP_MII_PCDU_RXDLY_DEFAULT;
+ struct device_node *port_dn = dp->dn;
+ u16 mii_pcdu_reg;
+
+ /* As MII_PCDU registers only exist for MII ports, silently return
+ * unless the port is an MII port
+ */
+ if (!(priv->hw_info->mii_ports & BIT(dp->index)))
+ return;
+
+ switch (dp->index + priv->hw_info->mii_port_reg_offset) {
+ case 0:
+ mii_pcdu_reg = GSWIP_MII_PCDU0;
+ break;
+ case 1:
+ mii_pcdu_reg = GSWIP_MII_PCDU1;
+ break;
+ case 5:
+ mii_pcdu_reg = GSWIP_MII_PCDU5;
+ break;
+ default:
+ return;
+ }
+
+ /* legacy code to set default delays according to the interface mode */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ tx_delay = 0;
+ rx_delay = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ rx_delay = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ tx_delay = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* allow settings delays using device tree properties */
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ regmap_write_bits(priv->mii, mii_pcdu_reg,
+ GSWIP_MII_PCDU_TXDLY_MASK |
+ GSWIP_MII_PCDU_RXDLY_MASK,
+ GSWIP_MII_PCDU_TXDLY(tx_delay) |
+ GSWIP_MII_PCDU_RXDLY(rx_delay));
+}
+
+static int gswip_setup(struct dsa_switch *ds)
+{
+ unsigned int cpu_ports = dsa_cpu_ports(ds);
+ struct gswip_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int err, i;
+
+ regmap_write(priv->gswip, GSWIP_SWRES, GSWIP_SWRES_R0);
+ usleep_range(5000, 10000);
+ regmap_write(priv->gswip, GSWIP_SWRES, 0);
+
+ /* disable port fetch/store dma on all ports */
+ for (i = 0; i < priv->hw_info->max_ports; i++) {
+ gswip_port_disable(ds, i);
+ gswip_port_vlan_filtering(ds, i, false, NULL);
+ }
+
+ /* enable Switch */
+ regmap_set_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
+
+ err = gswip_pce_load_microcode(priv);
+ if (err) {
+ dev_err(priv->dev, "writing PCE microcode failed, %i\n", err);
+ return err;
+ }
+
+ /* Default unknown Broadcast/Multicast/Unicast port maps */
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP1, cpu_ports);
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP2, cpu_ports);
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP3, cpu_ports);
+
+ /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+ * interoperability problem with this auto polling mechanism because
+ * their status registers think that the link is in a different state
+ * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+ * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+ * auto polling state machine consider the link being negotiated with
+ * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+ * to the switch port being completely dead (RX and TX are both not
+ * working).
+ * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+ * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+ * it would work fine for a few minutes to hours and then stop, on
+ * other device it would no traffic could be sent or received at all.
+ * Testing shows that when PHY auto polling is disabled these problems
+ * go away.
+ */
+ regmap_write(priv->mdio, GSWIP_MDIO_MDC_CFG0, 0x0);
+
+ /* Configure the MDIO Clock 2.5 MHz */
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_MDC_CFG1, 0xff, 0x09);
+
+ /* bring up the mdio bus */
+ err = gswip_mdio(priv);
+ if (err) {
+ dev_err(priv->dev, "mdio bus setup failed\n");
+ return err;
+ }
+
+ /* Disable the xMII interface and clear it's isolation bit */
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+ 0, i);
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ /* enable special tag insertion on cpu port */
+ regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(cpu_dp->index),
+ GSWIP_FDMA_PCTRL_STEN);
+
+ /* accept special tag in ingress direction */
+ regmap_set_bits(priv->gswip,
+ GSWIP_PCE_PCTRL_0p(cpu_dp->index),
+ GSWIP_PCE_PCTRL_0_INGRESS);
+ }
+
+ regmap_set_bits(priv->gswip, GSWIP_BM_QUEUE_GCTRL,
+ GSWIP_BM_QUEUE_GCTRL_GL_MOD);
+
+ /* VLAN aware Switching */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_VLAN);
+
+ /* Flush MAC Table */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+ if (err) {
+ dev_err(priv->dev, "MAC flushing didn't finish\n");
+ return err;
+ }
+
+ ds->mtu_enforcement_ingress = true;
+
+ return 0;
+}
+
+static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ return priv->hw_info->tag_protocol;
+}
+
+static int gswip_vlan_active_create(struct gswip_priv *priv,
+ struct net_device *bridge,
+ int fid, u16 vid)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int err;
+ int i;
+
+ /* Look for a free slot */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (!priv->vlans[i].bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1)
+ return -ENOSPC;
+
+ if (fid == -1)
+ fid = idx;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = vid;
+ vlan_active.val[0] = fid;
+ vlan_active.valid = true;
+
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ priv->vlans[idx].bridge = bridge;
+ priv->vlans[idx].vid = vid;
+ priv->vlans[idx].fid = fid;
+
+ return idx;
+}
+
+static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ int err;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.valid = false;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err)
+ dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
+ priv->vlans[idx].bridge = NULL;
+
+ return err;
+}
+
+static int gswip_vlan_add(struct gswip_priv *priv, struct net_device *bridge,
+ int port, u16 vid, bool untagged, bool pvid,
+ bool vlan_aware)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_ports = dsa_cpu_ports(priv->ds);
+ bool active_vlan_created = false;
+ int fid = -1, idx = -1;
+ int i, err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ if (vlan_aware) {
+ if (fid != -1 && fid != priv->vlans[i].fid)
+ dev_err(priv->dev, "one bridge with multiple flow ids\n");
+ fid = priv->vlans[i].fid;
+ }
+ if (priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, fid, vid);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = vid;
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= cpu_ports;
+ vlan_mapping.val[1] |= BIT(port);
+ if (vlan_aware)
+ vlan_mapping.val[2] |= cpu_ports;
+ if (untagged)
+ vlan_mapping.val[2] &= ~BIT(port);
+ else
+ vlan_mapping.val[2] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static int gswip_vlan_remove(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ dev_err(priv->dev, "Port %d cannot find VID %u of bridge %s\n",
+ port, vid, bridge ? bridge->name : "(null)");
+ return -ENOENT;
+ }
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ vlan_mapping.val[1] &= ~BIT(port);
+ vlan_mapping.val[2] &= ~BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ /* In case all ports are removed from the bridge, remove the VLAN */
+ if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) {
+ err = gswip_vlan_active_remove(priv, idx);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *br = bridge.dev;
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ /* Set up the VLAN for VLAN-unaware bridging for this port, and remove
+ * it from the "single-port bridge" through which it was operating as
+ * standalone.
+ */
+ err = gswip_vlan_add(priv, br, port, GSWIP_VLAN_UNAWARE_PVID,
+ true, true, false);
+ if (err)
+ return err;
+
+ return gswip_add_single_port_br(priv, port, false);
+}
+
+static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct net_device *br = bridge.dev;
+ struct gswip_priv *priv = ds->priv;
+
+ /* Add the port back to the "single-port bridge", and remove it from
+ * the VLAN-unaware PVID created for this bridge.
+ */
+ gswip_add_single_port_br(priv, port, true);
+ gswip_vlan_remove(priv, br, port, GSWIP_VLAN_UNAWARE_PVID);
+}
+
+static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int pos = max_ports;
+ int i, idx = -1;
+
+ /* We only support VLAN filtering on bridges */
+ if (!dsa_is_cpu_port(ds, port) && !bridge)
+ return -EOPNOTSUPP;
+
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vlan->vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
+ return -ENOSPC;
+ }
+ }
+
+ return 0;
+}
+
+static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ int err;
+
+ if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID)
+ return 0;
+
+ err = gswip_port_vlan_prepare(ds, port, vlan, extack);
+ if (err)
+ return err;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return gswip_vlan_add(priv, bridge, port, vlan->vid, untagged, pvid,
+ true);
+}
+
+static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+
+ if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID)
+ return 0;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return gswip_vlan_remove(priv, bridge, port, vlan->vid);
+}
+
+static void gswip_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to read mac bridge: %d\n",
+ err);
+ return;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC)
+ continue;
+
+ if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0]))
+ continue;
+
+ mac_bridge.valid = false;
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to write mac bridge: %d\n",
+ err);
+ return;
+ }
+ }
+}
+
+static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct gswip_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+ return;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
+ break;
+ default:
+ dev_err(priv->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_PSTATE_MASK,
+ stp_state);
+}
+
+static int gswip_port_fdb(struct dsa_switch *ds, int port,
+ struct net_device *bridge, const unsigned char *addr,
+ u16 vid, bool add)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int fid = -1;
+ int i;
+ int err;
+
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ fid = priv->vlans[i].fid;
+ break;
+ }
+ }
+
+ if (fid == -1) {
+ dev_err(priv->dev, "no FID found for bridge %s\n",
+ bridge->name);
+ return -EINVAL;
+ }
+
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.key_mode = true;
+ mac_bridge.key[0] = addr[5] | (addr[4] << 8);
+ mac_bridge.key[1] = addr[3] | (addr[2] << 8);
+ mac_bridge.key[2] = addr[1] | (addr[0] << 8);
+ mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid);
+ mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2_ETC))
+ mac_bridge.val[1] = add ? (GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC |
+ GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID) : 0;
+ else
+ mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC;
+
+ mac_bridge.valid = add;
+
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err)
+ dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
+
+ return err;
+}
+
+static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ if (db.type != DSA_DB_BRIDGE)
+ return -EOPNOTSUPP;
+
+ return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, true);
+}
+
+static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ if (db.type != DSA_DB_BRIDGE)
+ return -EOPNOTSUPP;
+
+ return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, false);
+}
+
+static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned char addr[ETH_ALEN];
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev,
+ "failed to read mac bridge entry %d: %d\n",
+ i, err);
+ return err;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ addr[5] = mac_bridge.key[0] & 0xff;
+ addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
+ addr[3] = mac_bridge.key[1] & 0xff;
+ addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
+ addr[1] = mac_bridge.key[2] & 0xff;
+ addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) {
+ if (mac_bridge.val[0] & BIT(port)) {
+ err = cb(addr, 0, true, data);
+ if (err)
+ return err;
+ }
+ } else {
+ if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0])) {
+ err = cb(addr, 0, false, data);
+ if (err)
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Includes 8 bytes for special header. */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ /* CPU port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header.
+ */
+ if (dsa_is_cpu_port(ds, port)) {
+ new_mtu += 8;
+ regmap_write(priv->gswip, GSWIP_MAC_FLEN,
+ VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN);
+ }
+
+ /* Enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above.
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ regmap_set_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port),
+ GSWIP_MAC_CTRL_2_MLEN);
+ else
+ regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port),
+ GSWIP_MAC_CTRL_2_MLEN);
+
+ return 0;
+}
+
+static void gswip_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ priv->hw_info->phylink_get_caps(ds, port, config);
+}
+
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+ u32 mdio_phy;
+
+ if (link)
+ mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+ else
+ mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_LINK_MASK, mdio_phy);
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+ phy_interface_t interface)
+{
+ u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_100:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_1000:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+ mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+ break;
+ }
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy);
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0);
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (duplex == DUPLEX_FULL) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+ }
+
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0);
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy);
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (tx_pause && rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else if (tx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ } else if (rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ }
+
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_FCON_MASK, mac_ctrl_0);
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_FCONTX_MASK | GSWIP_MDIO_PHY_FCONRX_MASK,
+ mdio_phy);
+}
+
+static void gswip_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+ u32 miicfg = 0;
+
+ miicfg |= GSWIP_MII_CFG_LDCLKDIS;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_INTERNAL:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIM;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIP;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+ if (of_property_read_bool(dp->dn, "maxlinear,rmii-refclk-out"))
+ miicfg |= GSWIP_MII_CFG_RMII_CLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ miicfg |= GSWIP_MII_CFG_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ miicfg |= GSWIP_MII_CFG_MODE_GMII;
+ break;
+ default:
+ dev_err(dp->ds->dev,
+ "Unsupported interface: %d\n", state->interface);
+ return;
+ }
+
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+ GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+ miicfg, port);
+
+ gswip_mii_delay_setup(priv, dp, state->interface);
+}
+
+static void gswip_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+ if (!dsa_port_is_cpu(dp))
+ gswip_port_set_link(priv, port, false);
+}
+
+static void gswip_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+
+ if (!dsa_port_is_cpu(dp) || interface != PHY_INTERFACE_MODE_INTERNAL) {
+ gswip_port_set_link(priv, port, true);
+ gswip_port_set_speed(priv, port, speed, interface);
+ gswip_port_set_duplex(priv, port, duplex);
+ gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+ }
+
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, GSWIP_MII_CFG_EN, port);
+}
+
+static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
+ ethtool_puts(&data, gswip_rmon_cnt[i].name);
+}
+
+static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
+ u32 index)
+{
+ u32 result, val;
+ int err;
+
+ regmap_write(priv->gswip, GSWIP_BM_RAM_ADDR, index);
+ regmap_write_bits(priv->gswip, GSWIP_BM_RAM_CTRL,
+ GSWIP_BM_RAM_CTRL_ADDR_MASK | GSWIP_BM_RAM_CTRL_OPMOD |
+ GSWIP_BM_RAM_CTRL_BAS,
+ table | GSWIP_BM_RAM_CTRL_BAS);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
+ GSWIP_BM_RAM_CTRL_BAS);
+ if (err) {
+ dev_err(priv->dev, "timeout while reading table: %u, index: %u\n",
+ table, index);
+ return 0;
+ }
+
+ regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(0), &result);
+ regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(1), &val);
+ result |= val << 16;
+
+ return result;
+}
+
+static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ const struct gswip_rmon_cnt_desc *rmon_cnt;
+ int i;
+ u64 high;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
+ rmon_cnt = &gswip_rmon_cnt[i];
+
+ data[i] = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset);
+ if (rmon_cnt->size == 2) {
+ high = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset + 1);
+ data[i] |= high << 32;
+ }
+ }
+}
+
+static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(gswip_rmon_cnt);
+}
+
+static int gswip_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_keee *e)
+{
+ if (e->tx_lpi_timer > 0x7f)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void gswip_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index),
+ GSWIP_MAC_CTRL_4_LPIEN);
+}
+
+static int gswip_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ return regmap_update_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index),
+ GSWIP_MAC_CTRL_4_LPIEN |
+ GSWIP_MAC_CTRL_4_GWAIT_MASK |
+ GSWIP_MAC_CTRL_4_WAIT_MASK,
+ GSWIP_MAC_CTRL_4_LPIEN |
+ GSWIP_MAC_CTRL_4_GWAIT(timer) |
+ GSWIP_MAC_CTRL_4_WAIT(timer));
+}
+
+static bool gswip_support_eee(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ return true;
+
+ return false;
+}
+
+static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ if (priv->hw_info->mac_select_pcs)
+ return priv->hw_info->mac_select_pcs(config, interface);
+
+ return NULL;
+}
+
+static const struct phylink_mac_ops gswip_phylink_mac_ops = {
+ .mac_config = gswip_phylink_mac_config,
+ .mac_link_down = gswip_phylink_mac_link_down,
+ .mac_link_up = gswip_phylink_mac_link_up,
+ .mac_disable_tx_lpi = gswip_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = gswip_phylink_mac_enable_tx_lpi,
+ .mac_select_pcs = gswip_phylink_mac_select_pcs,
+};
+
+static const struct dsa_switch_ops gswip_switch_ops = {
+ .get_tag_protocol = gswip_get_tag_protocol,
+ .setup = gswip_setup,
+ .port_setup = gswip_port_setup,
+ .port_enable = gswip_port_enable,
+ .port_disable = gswip_port_disable,
+ .port_pre_bridge_flags = gswip_port_pre_bridge_flags,
+ .port_bridge_flags = gswip_port_bridge_flags,
+ .port_bridge_join = gswip_port_bridge_join,
+ .port_bridge_leave = gswip_port_bridge_leave,
+ .port_fast_age = gswip_port_fast_age,
+ .port_vlan_filtering = gswip_port_vlan_filtering,
+ .port_vlan_add = gswip_port_vlan_add,
+ .port_vlan_del = gswip_port_vlan_del,
+ .port_stp_state_set = gswip_port_stp_state_set,
+ .port_fdb_add = gswip_port_fdb_add,
+ .port_fdb_del = gswip_port_fdb_del,
+ .port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
+ .phylink_get_caps = gswip_phylink_get_caps,
+ .get_strings = gswip_get_strings,
+ .get_ethtool_stats = gswip_get_ethtool_stats,
+ .get_sset_count = gswip_get_sset_count,
+ .set_mac_eee = gswip_set_mac_eee,
+ .support_eee = gswip_support_eee,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+};
+
+void gswip_disable_switch(struct gswip_priv *priv)
+{
+ regmap_clear_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
+}
+EXPORT_SYMBOL_GPL(gswip_disable_switch);
+
+static int gswip_validate_cpu_port(struct dsa_switch *ds)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int cpu_port = -1;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ if (cpu_port != -1)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "only a single CPU port is supported\n");
+
+ cpu_port = cpu_dp->index;
+ }
+
+ if (cpu_port == -1)
+ return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n");
+
+ if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "unsupported CPU port defined\n");
+
+ return 0;
+}
+
+int gswip_probe_common(struct gswip_priv *priv, u32 version)
+{
+ int err;
+
+ mutex_init(&priv->pce_table_lock);
+
+ priv->ds = devm_kzalloc(priv->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = priv->dev;
+ priv->ds->num_ports = priv->hw_info->max_ports;
+ priv->ds->ops = &gswip_switch_ops;
+ priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
+ priv->ds->priv = priv;
+
+ /* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Construct a 16-bit unsigned integer
+ * having the REV field as most significant byte and the MOD field as
+ * least significant byte. This is effectively swapping the two bytes of
+ * the version variable, but other than using swab16 it doesn't affect
+ * the source variable.
+ */
+ priv->version = GSWIP_VERSION_REV(version) << 8 |
+ GSWIP_VERSION_MOD(version);
+
+ err = dsa_register_switch(priv->ds);
+ if (err)
+ return dev_err_probe(priv->dev, err, "dsa switch registration failed\n");
+
+ err = gswip_validate_cpu_port(priv->ds);
+ if (err)
+ goto disable_switch;
+
+ dev_info(priv->dev, "probed GSWIP version %lx mod %lx\n",
+ GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version));
+
+ return 0;
+
+disable_switch:
+ gswip_disable_switch(priv);
+ dsa_unregister_switch(priv->ds);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(gswip_probe_common);
+
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_DESCRIPTION("Lantiq / Intel / MaxLinear GSWIP common functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq/lantiq_pce.h
index e2be31f3672a..659f9a0638d9 100644
--- a/drivers/net/dsa/lantiq_pce.h
+++ b/drivers/net/dsa/lantiq/lantiq_pce.h
@@ -7,6 +7,8 @@
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
*/
+#include "lantiq_gswip.h"
+
enum {
OUT_MAC0 = 0,
OUT_MAC1,
@@ -74,13 +76,6 @@ enum {
FLAG_NO, /*13*/
};
-struct gswip_pce_microcode {
- u16 val_3;
- u16 val_2;
- u16 val_1;
- u16 val_0;
-};
-
#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
{ val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.c b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
new file mode 100644
index 000000000000..0816c61a47f1
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* DSA Driver for MaxLinear GSW1xx switch devices
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#include "lantiq_gswip.h"
+#include "mxl-gsw1xx.h"
+#include "mxl-gsw1xx_pce.h"
+
+struct gsw1xx_priv {
+ struct mdio_device *mdio_dev;
+ int smdio_badr;
+ struct regmap *sgmii;
+ struct regmap *gpio;
+ struct regmap *clk;
+ struct regmap *shell;
+ struct phylink_pcs pcs;
+ phy_interface_t tbi_interface;
+ struct gswip_priv gswip;
+};
+
+static int gsw1xx_config_smdio_badr(struct gsw1xx_priv *priv,
+ unsigned int reg)
+{
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr = priv->smdio_badr;
+ int res;
+
+ if (smdio_badr == GSW1XX_SMDIO_BADR_UNKNOWN ||
+ reg - smdio_badr >= GSW1XX_SMDIO_BADR ||
+ smdio_badr > reg) {
+ /* Configure the Switch Base Address */
+ smdio_badr = reg & ~GENMASK(3, 0);
+ res = __mdiobus_write(bus, sw_addr, GSW1XX_SMDIO_BADR, smdio_badr);
+ if (res < 0) {
+ dev_err(&priv->mdio_dev->dev,
+ "%s: Error %d, configuring switch base\n",
+ __func__, res);
+ return res;
+ }
+ priv->smdio_badr = smdio_badr;
+ }
+
+ return smdio_badr;
+}
+
+static int gsw1xx_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct gsw1xx_priv *priv = context;
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr;
+ int res;
+
+ smdio_badr = gsw1xx_config_smdio_badr(priv, reg);
+ if (smdio_badr < 0)
+ return smdio_badr;
+
+ res = __mdiobus_read(bus, sw_addr, reg - smdio_badr);
+ if (res < 0) {
+ dev_err(&priv->mdio_dev->dev, "%s: Error %d reading 0x%x\n",
+ __func__, res, reg);
+ return res;
+ }
+
+ *val = res;
+
+ return 0;
+}
+
+static int gsw1xx_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct gsw1xx_priv *priv = context;
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr;
+ int res;
+
+ smdio_badr = gsw1xx_config_smdio_badr(priv, reg);
+ if (smdio_badr < 0)
+ return smdio_badr;
+
+ res = __mdiobus_write(bus, sw_addr, reg - smdio_badr, val);
+ if (res < 0)
+ dev_err(&priv->mdio_dev->dev,
+ "%s: Error %d, writing 0x%x:0x%x\n", __func__, res, reg,
+ val);
+
+ return res;
+}
+
+static const struct regmap_bus gsw1xx_regmap_bus = {
+ .reg_write = gsw1xx_regmap_write,
+ .reg_read = gsw1xx_regmap_read,
+};
+
+static void gsw1xx_mdio_regmap_lock(void *mdio_lock)
+{
+ mutex_lock_nested(mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void gsw1xx_mdio_regmap_unlock(void *mdio_lock)
+{
+ mutex_unlock(mdio_lock);
+}
+
+static unsigned int gsw1xx_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static struct gsw1xx_priv *pcs_to_gsw1xx(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct gsw1xx_priv, pcs);
+}
+
+static int gsw1xx_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ /* Deassert SGMII shell reset */
+ return regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+}
+
+static void gsw1xx_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ /* Assert SGMII shell reset */
+ regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+}
+
+static void gsw1xx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_TBISTAT, &val);
+ if (ret < 0)
+ return;
+
+ state->link = !!(val & GSW1XX_SGMII_TBI_TBISTAT_LINK);
+ state->an_complete = !!(val & GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE);
+
+ ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, &val);
+ if (ret < 0)
+ return;
+
+ state->duplex = (val & GSW1XX_SGMII_TBI_LPSTAT_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX)
+ state->pause |= MLO_PAUSE_RX;
+
+ if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX)
+ state->pause |= MLO_PAUSE_TX;
+
+ switch (FIELD_GET(GSW1XX_SGMII_TBI_LPSTAT_SPEED, val)) {
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII:
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ state->speed = SPEED_1000;
+ else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int gsw1xx_pcs_phy_xaui_write(struct gsw1xx_priv *priv, u16 addr,
+ u16 data)
+{
+ int ret, val;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_D, data);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_A, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_C,
+ GSW1XX_SGMII_PHY_WRITE |
+ GSW1XX_SGMII_PHY_RESET_N);
+ if (ret < 0)
+ return ret;
+
+ return regmap_read_poll_timeout(priv->sgmii, GSW1XX_SGMII_PHY_C,
+ val, val & GSW1XX_SGMII_PHY_STATUS,
+ 1000, 100000);
+}
+
+static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv)
+{
+ int ret;
+ u16 val;
+
+ /* Assert and deassert SGMII shell reset */
+ ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ /* Hardware Bringup FSM Enable */
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_HWBU_CTRL,
+ GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM |
+ GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Configure SGMII PHY Receiver */
+ val = FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_EQ,
+ GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF) |
+ GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN |
+ GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN |
+ FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT,
+ GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF);
+
+ /* TODO: Take care of inverted RX pair once generic property is
+ * available
+ */
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_RX0_CFG2, val);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_PREP(GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL,
+ GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF);
+
+ /* TODO: Take care of inverted TX pair once generic property is
+ * available
+ */
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_TX0_CFG3, val);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and Release TBI */
+ val = GSW1XX_SGMII_TBI_TBICTL_INITTBI | GSW1XX_SGMII_TBI_TBICTL_ENTBI |
+ GSW1XX_SGMII_TBI_TBICTL_CRSTRR | GSW1XX_SGMII_TBI_TBICTL_CRSOFF;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val);
+ if (ret < 0)
+ return ret;
+ val &= ~GSW1XX_SGMII_TBI_TBICTL_INITTBI;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val);
+ if (ret < 0)
+ return ret;
+
+ /* Release Tx Data Buffers */
+ ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL,
+ GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB);
+ if (ret < 0)
+ return ret;
+ ret = regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL,
+ GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB);
+ if (ret < 0)
+ return ret;
+
+ /* Release Rx Data Buffers */
+ ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL,
+ GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB);
+ if (ret < 0)
+ return ret;
+ return regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL,
+ GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB);
+}
+
+static int gsw1xx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ u16 txaneg, anegctl, nco_ctrl;
+ bool reconf = false;
+ int ret = 0;
+
+ /* do not unnecessarily disrupt link and skip resetting the hardware in
+ * case the PCS has previously been successfully configured for this
+ * interface mode
+ */
+ if (priv->tbi_interface == interface)
+ reconf = true;
+
+ /* mark PCS configuration as incomplete */
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+
+ if (!reconf)
+ ret = gsw1xx_pcs_reset(priv);
+
+ if (ret)
+ return ret;
+
+ /* override bootstrap pin settings
+ * OVRANEG sets ANEG Mode, Enable ANEG and restart ANEG to be
+ * taken from bits ANMODE, ANEGEN, RANEG of the ANEGCTL register.
+ * OVERABL sets ability bits in tx_config_reg to be taken from
+ * the TXANEGH and TXANEGL registers.
+ */
+ anegctl = GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG |
+ GSW1XX_SGMII_TBI_ANEGCTL_OVRABL;
+
+ switch (phylink_get_link_timer_ns(interface)) {
+ case 10000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_10US);
+ break;
+ case 1600000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS);
+ break;
+ case 5000000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS);
+ break;
+ case 10000000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (neg_mode & PHYLINK_PCS_NEG_INBAND)
+ anegctl |= GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN;
+
+ txaneg = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* lacking a defined reverse-SGMII interface mode this
+ * driver only supports SGMII (MAC side) for now
+ */
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE,
+ GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC);
+ txaneg |= ADVERTISE_LPACK;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_2500BASEX) {
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE,
+ GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX);
+ } else {
+ dev_err(priv->gswip.dev, "%s: wrong interface mode %s\n",
+ __func__, phy_modes(interface));
+ return -EINVAL;
+ }
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGH,
+ FIELD_GET(GENMASK(15, 8), txaneg));
+ if (ret < 0)
+ return ret;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGL,
+ FIELD_GET(GENMASK(7, 0), txaneg));
+ if (ret < 0)
+ return ret;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL, anegctl);
+ if (ret < 0)
+ return ret;
+
+ if (!reconf) {
+ /* setup SerDes clock speed */
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ nco_ctrl = GSW1XX_SGMII_2G5 | GSW1XX_SGMII_2G5_NCO2;
+ else
+ nco_ctrl = GSW1XX_SGMII_1G | GSW1XX_SGMII_1G_NCO1;
+
+ ret = regmap_update_bits(priv->clk, GSW1XX_CLK_NCO_CTRL,
+ GSW1XX_SGMII_HSP_MASK |
+ GSW1XX_SGMII_SEL,
+ nco_ctrl);
+ if (ret)
+ return ret;
+
+ ret = gsw1xx_pcs_phy_xaui_write(priv, 0x30, 0x80);
+ if (ret)
+ return ret;
+ }
+
+ /* PCS configuration has now been completed, store mode to prevent
+ * disrupting the link in case of future calls of this function for the
+ * same interface mode.
+ */
+ priv->tbi_interface = interface;
+
+ return 0;
+}
+
+static void gsw1xx_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ regmap_set_bits(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL,
+ GSW1XX_SGMII_TBI_ANEGCTL_RANEG);
+}
+
+static void gsw1xx_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface, int speed,
+ int duplex)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ u16 lpstat;
+
+ /* When in-band AN is enabled hardware will set lpstat */
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ return;
+
+ /* Force speed and duplex settings */
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ if (speed == SPEED_10)
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_10);
+ else if (speed == SPEED_100)
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_100);
+ else
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000);
+ } else {
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII);
+ }
+
+ if (duplex == DUPLEX_FULL)
+ lpstat |= GSW1XX_SGMII_TBI_LPSTAT_DUPLEX;
+
+ regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, lpstat);
+}
+
+static const struct phylink_pcs_ops gsw1xx_pcs_ops = {
+ .pcs_inband_caps = gsw1xx_pcs_inband_caps,
+ .pcs_enable = gsw1xx_pcs_enable,
+ .pcs_disable = gsw1xx_pcs_disable,
+ .pcs_get_state = gsw1xx_pcs_get_state,
+ .pcs_config = gsw1xx_pcs_config,
+ .pcs_an_restart = gsw1xx_pcs_an_restart,
+ .pcs_link_up = gsw1xx_pcs_link_up,
+};
+
+static void gsw1xx_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+
+ switch (port) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+ case 4: /* port 4: SGMII */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ if (priv->hw_info->supports_2500m) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
+ }
+ return; /* no support for EEE on SGMII port */
+ case 5: /* port 5: RGMII or RMII */
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
+
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD;
+ config->lpi_timer_default = 20;
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+}
+
+static struct phylink_pcs *gsw1xx_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *gswip_priv = dp->ds->priv;
+ struct gsw1xx_priv *gsw1xx_priv = container_of(gswip_priv,
+ struct gsw1xx_priv,
+ gswip);
+
+ switch (dp->index) {
+ case GSW1XX_SGMII_PORT:
+ return &gsw1xx_priv->pcs;
+ default:
+ return NULL;
+ }
+}
+
+static struct regmap *gsw1xx_regmap_init(struct gsw1xx_priv *priv,
+ const char *name,
+ unsigned int reg_base,
+ unsigned int max_register)
+{
+ const struct regmap_config config = {
+ .name = name,
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_base = reg_base,
+ .max_register = max_register,
+ .lock = gsw1xx_mdio_regmap_lock,
+ .unlock = gsw1xx_mdio_regmap_unlock,
+ .lock_arg = &priv->mdio_dev->bus->mdio_lock,
+ };
+
+ return devm_regmap_init(&priv->mdio_dev->dev, &gsw1xx_regmap_bus,
+ priv, &config);
+}
+
+static int gsw1xx_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct gsw1xx_priv *priv;
+ u32 version;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mdio_dev = mdiodev;
+ priv->smdio_badr = GSW1XX_SMDIO_BADR_UNKNOWN;
+
+ priv->gswip.dev = dev;
+ priv->gswip.hw_info = of_device_get_match_data(dev);
+ if (!priv->gswip.hw_info)
+ return -EINVAL;
+
+ priv->gswip.gswip = gsw1xx_regmap_init(priv, "switch",
+ GSW1XX_SWITCH_BASE, 0xfff);
+ if (IS_ERR(priv->gswip.gswip))
+ return PTR_ERR(priv->gswip.gswip);
+
+ priv->gswip.mdio = gsw1xx_regmap_init(priv, "mdio", GSW1XX_MMDIO_BASE,
+ 0xff);
+ if (IS_ERR(priv->gswip.mdio))
+ return PTR_ERR(priv->gswip.mdio);
+
+ priv->gswip.mii = gsw1xx_regmap_init(priv, "mii", GSW1XX_RGMII_BASE,
+ 0xff);
+ if (IS_ERR(priv->gswip.mii))
+ return PTR_ERR(priv->gswip.mii);
+
+ priv->sgmii = gsw1xx_regmap_init(priv, "sgmii", GSW1XX_SGMII_BASE,
+ 0xfff);
+ if (IS_ERR(priv->sgmii))
+ return PTR_ERR(priv->sgmii);
+
+ priv->gpio = gsw1xx_regmap_init(priv, "gpio", GSW1XX_GPIO_BASE, 0xff);
+ if (IS_ERR(priv->gpio))
+ return PTR_ERR(priv->gpio);
+
+ priv->clk = gsw1xx_regmap_init(priv, "clk", GSW1XX_CLK_BASE, 0xff);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->shell = gsw1xx_regmap_init(priv, "shell", GSW1XX_SHELL_BASE,
+ 0xff);
+ if (IS_ERR(priv->shell))
+ return PTR_ERR(priv->shell);
+
+ priv->pcs.ops = &gsw1xx_pcs_ops;
+ priv->pcs.poll = true;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->pcs.supported_interfaces);
+ if (priv->gswip.hw_info->supports_2500m)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ priv->pcs.supported_interfaces);
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+
+ /* assert SGMII reset to power down SGMII unit */
+ ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ /* configure GPIO pin-mux for MMDIO in case of external PHY connected to
+ * SGMII or RGMII as slave interface
+ */
+ regmap_set_bits(priv->gpio, GPIO_ALTSEL0, 3);
+ regmap_set_bits(priv->gpio, GPIO_ALTSEL1, 3);
+
+ ret = regmap_read(priv->gswip.gswip, GSWIP_VERSION, &version);
+ if (ret)
+ return ret;
+
+ ret = gswip_probe_common(&priv->gswip, version);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, &priv->gswip);
+
+ return 0;
+}
+
+static void gsw1xx_remove(struct mdio_device *mdiodev)
+{
+ struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ gswip_disable_switch(priv);
+
+ dsa_unregister_switch(priv->ds);
+}
+
+static void gsw1xx_shutdown(struct mdio_device *mdiodev)
+{
+ struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+
+ gswip_disable_switch(priv);
+}
+
+static const struct gswip_hw_info gsw12x_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = &gsw1xx_phylink_get_caps,
+ .supports_2500m = true,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+static const struct gswip_hw_info gsw140_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = &gsw1xx_phylink_get_caps,
+ .supports_2500m = true,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+static const struct gswip_hw_info gsw141_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = gsw1xx_phylink_get_caps,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+/*
+ * GSW125 is the industrial temperature version of GSW120.
+ * GSW145 is the industrial temperature version of GSW140.
+ */
+static const struct of_device_id gsw1xx_of_match[] = {
+ { .compatible = "maxlinear,gsw120", .data = &gsw12x_data },
+ { .compatible = "maxlinear,gsw125", .data = &gsw12x_data },
+ { .compatible = "maxlinear,gsw140", .data = &gsw140_data },
+ { .compatible = "maxlinear,gsw141", .data = &gsw141_data },
+ { .compatible = "maxlinear,gsw145", .data = &gsw140_data },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, gsw1xx_of_match);
+
+static struct mdio_driver gsw1xx_driver = {
+ .probe = gsw1xx_probe,
+ .remove = gsw1xx_remove,
+ .shutdown = gsw1xx_shutdown,
+ .mdiodrv.driver = {
+ .name = "mxl-gsw1xx",
+ .of_match_table = gsw1xx_of_match,
+ },
+};
+
+mdio_module_driver(gsw1xx_driver);
+
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_DESCRIPTION("Driver for MaxLinear GSW1xx ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.h b/drivers/net/dsa/lantiq/mxl-gsw1xx.h
new file mode 100644
index 000000000000..38e03c048a26
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Register definitions for MaxLinear GSW1xx series switches
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ */
+#ifndef __MXL_GSW1XX_H
+#define __MXL_GSW1XX_H
+
+#include <linux/bitfield.h>
+
+#define GSW1XX_PORTS 6
+/* Port used for RGMII or optional RMII */
+#define GSW1XX_MII_PORT 5
+/* Port used for SGMII */
+#define GSW1XX_SGMII_PORT 4
+
+#define GSW1XX_SYS_CLK_FREQ 340000000
+
+/* SMDIO switch register base address */
+#define GSW1XX_SMDIO_BADR 0x1f
+#define GSW1XX_SMDIO_BADR_UNKNOWN -1
+
+/* GSW1XX SGMII PCS */
+#define GSW1XX_SGMII_BASE 0xd000
+#define GSW1XX_SGMII_PHY_HWBU_CTRL 0x009
+#define GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM BIT(0)
+#define GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN BIT(3)
+#define GSW1XX_SGMII_TBI_TXANEGH 0x300
+#define GSW1XX_SGMII_TBI_TXANEGL 0x301
+#define GSW1XX_SGMII_TBI_ANEGCTL 0x304
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT GENMASK(1, 0)
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10US 0
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS 1
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS 2
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS 3
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN BIT(2)
+#define GSW1XX_SGMII_TBI_ANEGCTL_RANEG BIT(3)
+#define GSW1XX_SGMII_TBI_ANEGCTL_OVRABL BIT(4)
+#define GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG BIT(5)
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE GENMASK(7, 6)
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX 1
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_PHY 2
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC 3
+#define GSW1XX_SGMII_TBI_ANEGCTL_BCOMP BIT(15)
+
+#define GSW1XX_SGMII_TBI_TBICTL 0x305
+#define GSW1XX_SGMII_TBI_TBICTL_INITTBI BIT(0)
+#define GSW1XX_SGMII_TBI_TBICTL_ENTBI BIT(1)
+#define GSW1XX_SGMII_TBI_TBICTL_CRSTRR BIT(4)
+#define GSW1XX_SGMII_TBI_TBICTL_CRSOFF BIT(5)
+#define GSW1XX_SGMII_TBI_TBISTAT 0x309
+#define GSW1XX_SGMII_TBI_TBISTAT_LINK BIT(0)
+#define GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE BIT(1)
+#define GSW1XX_SGMII_TBI_LPSTAT 0x30a
+#define GSW1XX_SGMII_TBI_LPSTAT_DUPLEX BIT(0)
+#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX BIT(1)
+#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX BIT(2)
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED GENMASK(6, 5)
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_10 0
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_100 1
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000 2
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII 3
+#define GSW1XX_SGMII_PHY_D 0x100
+#define GSW1XX_SGMII_PHY_A 0x101
+#define GSW1XX_SGMII_PHY_C 0x102
+#define GSW1XX_SGMII_PHY_STATUS BIT(0)
+#define GSW1XX_SGMII_PHY_READ BIT(4)
+#define GSW1XX_SGMII_PHY_WRITE BIT(8)
+#define GSW1XX_SGMII_PHY_RESET_N BIT(12)
+#define GSW1XX_SGMII_PCS_RXB_CTL 0x401
+#define GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB BIT(1)
+#define GSW1XX_SGMII_PCS_TXB_CTL 0x404
+#define GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB BIT(1)
+
+#define GSW1XX_SGMII_PHY_RX0_CFG2 0x004
+#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ GENMASK(2, 0)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF 2
+#define GSW1XX_SGMII_PHY_RX0_CFG2_INVERT BIT(3)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN BIT(4)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN BIT(5)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT GENMASK(12, 6)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF 20
+
+#define GSW1XX_SGMII_PHY_TX0_CFG3 0x007
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_EN BIT(12)
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL GENMASK(11, 9)
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF 4
+#define GSW1XX_SGMII_PHY_TX0_CFG3_INVERT BIT(8)
+
+/* GSW1XX PDI Registers */
+#define GSW1XX_SWITCH_BASE 0xe000
+
+/* GSW1XX MII Registers */
+#define GSW1XX_RGMII_BASE 0xf100
+
+/* GSW1XX GPIO Registers */
+#define GSW1XX_GPIO_BASE 0xf300
+#define GPIO_ALTSEL0 0x83
+#define GPIO_ALTSEL0_EXTPHY_MUX_VAL 0x03c3
+#define GPIO_ALTSEL1 0x84
+#define GPIO_ALTSEL1_EXTPHY_MUX_VAL 0x003f
+
+/* MDIO bus controller */
+#define GSW1XX_MMDIO_BASE 0xf400
+
+/* generic IC registers */
+#define GSW1XX_SHELL_BASE 0xfa00
+#define GSW1XX_SHELL_RST_REQ 0x01
+#define GSW1XX_RST_REQ_SGMII_SHELL BIT(5)
+/* RGMII PAD Slew Control Register */
+#define GSW1XX_SHELL_RGMII_SLEW_CFG 0x78
+#define RGMII_SLEW_CFG_RX_2_5_V BIT(4)
+#define RGMII_SLEW_CFG_TX_2_5_V BIT(5)
+
+/* SGMII clock related settings */
+#define GSW1XX_CLK_BASE 0xf900
+#define GSW1XX_CLK_NCO_CTRL 0x68
+#define GSW1XX_SGMII_HSP_MASK GENMASK(3, 2)
+#define GSW1XX_SGMII_SEL BIT(1)
+#define GSW1XX_SGMII_1G 0x0
+#define GSW1XX_SGMII_2G5 0xc
+#define GSW1XX_SGMII_1G_NCO1 0x0
+#define GSW1XX_SGMII_2G5_NCO2 0x2
+
+#endif /* __MXL_GSW1XX_H */
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h
new file mode 100644
index 000000000000..eefcd411a340
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCE microcode code update for driver for MaxLinear GSW1xx switch chips
+ *
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include "lantiq_gswip.h"
+
+#define INSTR 0
+#define IPV6 1
+#define LENACCU 2
+
+/* GSWIP_2.X */
+enum {
+ OUT_MAC0 = 0,
+ OUT_MAC1,
+ OUT_MAC2,
+ OUT_MAC3,
+ OUT_MAC4,
+ OUT_MAC5,
+ OUT_ETHTYP,
+ OUT_VTAG0,
+ OUT_VTAG1,
+ OUT_ITAG0,
+ OUT_ITAG1, /* 10 */
+ OUT_ITAG2,
+ OUT_ITAG3,
+ OUT_IP0,
+ OUT_IP1,
+ OUT_IP2,
+ OUT_IP3,
+ OUT_SIP0,
+ OUT_SIP1,
+ OUT_SIP2,
+ OUT_SIP3, /* 20 */
+ OUT_SIP4,
+ OUT_SIP5,
+ OUT_SIP6,
+ OUT_SIP7,
+ OUT_DIP0,
+ OUT_DIP1,
+ OUT_DIP2,
+ OUT_DIP3,
+ OUT_DIP4,
+ OUT_DIP5, /* 30 */
+ OUT_DIP6,
+ OUT_DIP7,
+ OUT_SESID,
+ OUT_PROT,
+ OUT_APP0,
+ OUT_APP1,
+ OUT_IGMP0,
+ OUT_IGMP1,
+ OUT_STAG0 = 61,
+ OUT_STAG1 = 62,
+ OUT_NONE = 63,
+};
+
+/* parser's microcode flag type */
+enum {
+ FLAG_ITAG = 0,
+ FLAG_VLAN,
+ FLAG_SNAP,
+ FLAG_PPPOE,
+ FLAG_IPV6,
+ FLAG_IPV6FL,
+ FLAG_IPV4,
+ FLAG_IGMP,
+ FLAG_TU,
+ FLAG_HOP,
+ FLAG_NN1, /* 10 */
+ FLAG_NN2,
+ FLAG_END,
+ FLAG_NO, /* 13 */
+ FLAG_SVLAN, /* 14 */
+};
+
+#define PCE_MC_M(val, msk, ns, out, len, type, flags, ipv4_len) \
+ { (val), (msk), ((ns) << 10 | (out) << 4 | (len) >> 1),\
+ ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
+
+/* V22_2X (IPv6 issue fixed) */
+static const struct gswip_pce_microcode gsw1xx_pce_microcode[] = {
+ /* value mask ns fields L type flags ipv4_len */
+ PCE_MC_M(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
+ PCE_MC_M(0x8100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x88A8, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x9100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x8100, 0xFFFF, 5, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x88A8, 0xFFFF, 6, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x9100, 0xFFFF, 4, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x8864, 0xFFFF, 20, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0800, 0xFFFF, 24, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x86DD, 0xFFFF, 25, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x8863, 0xFFFF, 19, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xF800, 13, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0600, 0x0600, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 15, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0xAAAA, 0xFFFF, 17, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0300, 0xFF00, 45, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 21, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
+ PCE_MC_M(0x0021, 0xFFFF, 24, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0057, 0xFFFF, 25, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x4000, 0xF000, 27, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
+ PCE_MC_M(0x6000, 0xF000, 30, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 28, OUT_IP3, 2, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 29, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
+ PCE_MC_M(0x1100, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0600, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
+ PCE_MC_M(0x2B00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
+ PCE_MC_M(0x3C00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
+ PCE_MC_M(0x0000, 0x0000, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x00F0, 38, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
+ PCE_MC_M(0x2B00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
+ PCE_MC_M(0x3C00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
+ PCE_MC_M(0x0000, 0x00FC, 44, OUT_PROT, 0, IPV6, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, IPV6, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+};
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
deleted file mode 100644
index de48b194048f..000000000000
--- a/drivers/net/dsa/lantiq_gswip.c
+++ /dev/null
@@ -1,2276 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
- *
- * Copyright (C) 2010 Lantiq Deutschland
- * Copyright (C) 2012 John Crispin <john@phrozen.org>
- * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * The VLAN and bridge model the GSWIP hardware uses does not directly
- * matches the model DSA uses.
- *
- * The hardware has 64 possible table entries for bridges with one VLAN
- * ID, one flow id and a list of ports for each bridge. All entries which
- * match the same flow ID are combined in the mac learning table, they
- * act as one global bridge.
- * The hardware does not support VLAN filter on the port, but on the
- * bridge, this driver converts the DSA model to the hardware.
- *
- * The CPU gets all the exception frames which do not match any forwarding
- * rule and the CPU port is also added to all bridges. This makes it possible
- * to handle all the special cases easily in software.
- * At the initialization the driver allocates one bridge table entry for
- * each switch port which is used when the port is used without an
- * explicit bridge. This prevents the frames from being forwarded
- * between all LAN ports by default.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/firmware.h>
-#include <linux/if_bridge.h>
-#include <linux/if_vlan.h>
-#include <linux/iopoll.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
-#include <linux/phy.h>
-#include <linux/phylink.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <net/dsa.h>
-#include <dt-bindings/mips/lantiq_rcu_gphy.h>
-
-#include "lantiq_pce.h"
-
-/* GSWIP MDIO Registers */
-#define GSWIP_MDIO_GLOB 0x00
-#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
-#define GSWIP_MDIO_CTRL 0x08
-#define GSWIP_MDIO_CTRL_BUSY BIT(12)
-#define GSWIP_MDIO_CTRL_RD BIT(11)
-#define GSWIP_MDIO_CTRL_WR BIT(10)
-#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
-#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
-#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
-#define GSWIP_MDIO_READ 0x09
-#define GSWIP_MDIO_WRITE 0x0A
-#define GSWIP_MDIO_MDC_CFG0 0x0B
-#define GSWIP_MDIO_MDC_CFG1 0x0C
-#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
-#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
-#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
-#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
-#define GSWIP_MDIO_PHY_LINK_UP 0x2000
-#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
-#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
-#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
-#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
-#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
-#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
-#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
-#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
-#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
-#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
-#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
-#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
-#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
-#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
-#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
-#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
- GSWIP_MDIO_PHY_FCONRX_MASK | \
- GSWIP_MDIO_PHY_FCONTX_MASK | \
- GSWIP_MDIO_PHY_LINK_MASK | \
- GSWIP_MDIO_PHY_SPEED_MASK | \
- GSWIP_MDIO_PHY_FDUP_MASK)
-
-/* GSWIP MII Registers */
-#define GSWIP_MII_CFGp(p) (0x2 * (p))
-#define GSWIP_MII_CFG_RESET BIT(15)
-#define GSWIP_MII_CFG_EN BIT(14)
-#define GSWIP_MII_CFG_ISOLATE BIT(13)
-#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
-#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
-#define GSWIP_MII_CFG_RMII_CLK BIT(7)
-#define GSWIP_MII_CFG_MODE_MIIP 0x0
-#define GSWIP_MII_CFG_MODE_MIIM 0x1
-#define GSWIP_MII_CFG_MODE_RMIIP 0x2
-#define GSWIP_MII_CFG_MODE_RMIIM 0x3
-#define GSWIP_MII_CFG_MODE_RGMII 0x4
-#define GSWIP_MII_CFG_MODE_GMII 0x9
-#define GSWIP_MII_CFG_MODE_MASK 0xf
-#define GSWIP_MII_CFG_RATE_M2P5 0x00
-#define GSWIP_MII_CFG_RATE_M25 0x10
-#define GSWIP_MII_CFG_RATE_M125 0x20
-#define GSWIP_MII_CFG_RATE_M50 0x30
-#define GSWIP_MII_CFG_RATE_AUTO 0x40
-#define GSWIP_MII_CFG_RATE_MASK 0x70
-#define GSWIP_MII_PCDU0 0x01
-#define GSWIP_MII_PCDU1 0x03
-#define GSWIP_MII_PCDU5 0x05
-#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
-#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
-
-/* GSWIP Core Registers */
-#define GSWIP_SWRES 0x000
-#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
-#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
-#define GSWIP_VERSION 0x013
-#define GSWIP_VERSION_REV_SHIFT 0
-#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
-#define GSWIP_VERSION_MOD_SHIFT 8
-#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
-#define GSWIP_VERSION_2_0 0x100
-#define GSWIP_VERSION_2_1 0x021
-#define GSWIP_VERSION_2_2 0x122
-#define GSWIP_VERSION_2_2_ETC 0x022
-
-#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
-#define GSWIP_BM_RAM_ADDR 0x044
-#define GSWIP_BM_RAM_CTRL 0x045
-#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
-#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
-#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_BM_QUEUE_GCTRL 0x04A
-#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
-/* buffer management Port Configuration Register */
-#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
-#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
-#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
-/* buffer management Port Control Register */
-#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
-#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
-#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
-
-/* PCE */
-#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
-#define GSWIP_PCE_TBL_MASK 0x448
-#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
-#define GSWIP_PCE_TBL_ADDR 0x44E
-#define GSWIP_PCE_TBL_CTRL 0x44F
-#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
-#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
-#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
-#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
-#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
-#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
-#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
-#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
-#define GSWIP_PCE_GCTRL_0 0x456
-#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
-#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
-#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
-#define GSWIP_PCE_GCTRL_1 0x457
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
-#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
-#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
-#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
-#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
-#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
-#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
-#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
-#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
-#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
-#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
-#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
-#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
-#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
-#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
-#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
-#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
-#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
-
-#define GSWIP_MAC_FLEN 0x8C5
-#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
-#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
-#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
-#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
-#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
-#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
-#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
-#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
-#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
-#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
-#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
-#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
-#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
-#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
-#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
-
-/* Ethernet Switch Fetch DMA Port Control Register */
-#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
-#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
-#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
-#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-
-/* Ethernet Switch Store DMA Port Control Register */
-#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
-#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
-#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
-
-#define GSWIP_TABLE_ACTIVE_VLAN 0x01
-#define GSWIP_TABLE_VLAN_MAPPING 0x02
-#define GSWIP_TABLE_MAC_BRIDGE 0x0b
-#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
-
-#define XRX200_GPHY_FW_ALIGN (16 * 1024)
-
-/* Maximum packet size supported by the switch. In theory this should be 10240,
- * but long packets currently cause lock-ups with an MTU of over 2526. Medium
- * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
- * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
- * packet reception. This is probably caused by the PPA engine, which is on the
- * RX part of the device. Packet transmission works properly up to 10240.
- */
-#define GSWIP_MAX_PACKET_LENGTH 2400
-
-struct gswip_hw_info {
- int max_ports;
- int cpu_port;
- const struct dsa_switch_ops *ops;
-};
-
-struct xway_gphy_match_data {
- char *fe_firmware_name;
- char *ge_firmware_name;
-};
-
-struct gswip_gphy_fw {
- struct clk *clk_gate;
- struct reset_control *reset;
- u32 fw_addr_offset;
- char *fw_name;
-};
-
-struct gswip_vlan {
- struct net_device *bridge;
- u16 vid;
- u8 fid;
-};
-
-struct gswip_priv {
- __iomem void *gswip;
- __iomem void *mdio;
- __iomem void *mii;
- const struct gswip_hw_info *hw_info;
- const struct xway_gphy_match_data *gphy_fw_name_cfg;
- struct dsa_switch *ds;
- struct device *dev;
- struct regmap *rcu_regmap;
- struct gswip_vlan vlans[64];
- int num_gphy_fw;
- struct gswip_gphy_fw *gphy_fw;
- u32 port_vlan_filter;
- struct mutex pce_table_lock;
-};
-
-struct gswip_pce_table_entry {
- u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
- u16 table; // PCE_TBL_CTRL.ADDR = pData->table
- u16 key[8];
- u16 val[5];
- u16 mask;
- u8 gmap;
- bool type;
- bool valid;
- bool key_mode;
-};
-
-struct gswip_rmon_cnt_desc {
- unsigned int size;
- unsigned int offset;
- const char *name;
-};
-
-#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
-
-static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
- /** Receive Packet Count (only packets that are accepted and not discarded). */
- MIB_DESC(1, 0x1F, "RxGoodPkts"),
- MIB_DESC(1, 0x23, "RxUnicastPkts"),
- MIB_DESC(1, 0x22, "RxMulticastPkts"),
- MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
- MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
- MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
- MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
- MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
- MIB_DESC(1, 0x20, "RxGoodPausePkts"),
- MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
- MIB_DESC(1, 0x12, "Rx64BytePkts"),
- MIB_DESC(1, 0x13, "Rx127BytePkts"),
- MIB_DESC(1, 0x14, "Rx255BytePkts"),
- MIB_DESC(1, 0x15, "Rx511BytePkts"),
- MIB_DESC(1, 0x16, "Rx1023BytePkts"),
- /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
- MIB_DESC(1, 0x17, "RxMaxBytePkts"),
- MIB_DESC(1, 0x18, "RxDroppedPkts"),
- MIB_DESC(1, 0x19, "RxFilteredPkts"),
- MIB_DESC(2, 0x24, "RxGoodBytes"),
- MIB_DESC(2, 0x26, "RxBadBytes"),
- MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
- MIB_DESC(1, 0x0C, "TxGoodPkts"),
- MIB_DESC(1, 0x06, "TxUnicastPkts"),
- MIB_DESC(1, 0x07, "TxMulticastPkts"),
- MIB_DESC(1, 0x00, "Tx64BytePkts"),
- MIB_DESC(1, 0x01, "Tx127BytePkts"),
- MIB_DESC(1, 0x02, "Tx255BytePkts"),
- MIB_DESC(1, 0x03, "Tx511BytePkts"),
- MIB_DESC(1, 0x04, "Tx1023BytePkts"),
- /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
- MIB_DESC(1, 0x05, "TxMaxBytePkts"),
- MIB_DESC(1, 0x08, "TxSingleCollCount"),
- MIB_DESC(1, 0x09, "TxMultCollCount"),
- MIB_DESC(1, 0x0A, "TxLateCollCount"),
- MIB_DESC(1, 0x0B, "TxExcessCollCount"),
- MIB_DESC(1, 0x0D, "TxPauseCount"),
- MIB_DESC(1, 0x10, "TxDroppedPkts"),
- MIB_DESC(2, 0x0E, "TxGoodBytes"),
-};
-
-static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->gswip + (offset * 4));
-}
-
-static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->gswip + (offset * 4));
-}
-
-static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_switch_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_switch_w(priv, val, offset);
-}
-
-static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
- u32 cleared)
-{
- u32 val;
-
- return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
- (val & cleared) == 0, 20, 50000);
-}
-
-static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->mdio + (offset * 4));
-}
-
-static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->mdio + (offset * 4));
-}
-
-static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_mdio_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_mdio_w(priv, val, offset);
-}
-
-static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->mii + (offset * 4));
-}
-
-static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->mii + (offset * 4));
-}
-
-static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_mii_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_mii_w(priv, val, offset);
-}
-
-static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
- int port)
-{
- /* There's no MII_CFG register for the CPU port */
- if (!dsa_is_cpu_port(priv->ds, port))
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
-}
-
-static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
- int port)
-{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
- break;
- }
-}
-
-static int gswip_mdio_poll(struct gswip_priv *priv)
-{
- int cnt = 100;
-
- while (likely(cnt--)) {
- u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
-
- if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
- return 0;
- usleep_range(20, 40);
- }
-
- return -ETIMEDOUT;
-}
-
-static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
-{
- struct gswip_priv *priv = bus->priv;
- int err;
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
- gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
- ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
- (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
- GSWIP_MDIO_CTRL);
-
- return 0;
-}
-
-static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
-{
- struct gswip_priv *priv = bus->priv;
- int err;
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
- ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
- (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
- GSWIP_MDIO_CTRL);
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- return gswip_mdio_r(priv, GSWIP_MDIO_READ);
-}
-
-static int gswip_mdio(struct gswip_priv *priv)
-{
- struct device_node *mdio_np, *switch_np = priv->dev->of_node;
- struct device *dev = priv->dev;
- struct mii_bus *bus;
- int err = 0;
-
- mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
- if (!of_device_is_available(mdio_np))
- goto out_put_node;
-
- bus = devm_mdiobus_alloc(dev);
- if (!bus) {
- err = -ENOMEM;
- goto out_put_node;
- }
-
- bus->priv = priv;
- bus->read = gswip_mdio_rd;
- bus->write = gswip_mdio_wr;
- bus->name = "lantiq,xrx200-mdio";
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
- bus->parent = priv->dev;
-
- err = devm_of_mdiobus_register(dev, bus, mdio_np);
-
-out_put_node:
- of_node_put(mdio_np);
-
- return err;
-}
-
-static int gswip_pce_table_entry_read(struct gswip_priv *priv,
- struct gswip_pce_table_entry *tbl)
-{
- int i;
- int err;
- u16 crtl;
- u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
- GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
-
- mutex_lock(&priv->pce_table_lock);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
- GSWIP_PCE_TBL_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
- tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
-
- for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
- tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
-
- tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
-
- crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
-
- tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
- tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
- tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
-
- mutex_unlock(&priv->pce_table_lock);
-
- return 0;
-}
-
-static int gswip_pce_table_entry_write(struct gswip_priv *priv,
- struct gswip_pce_table_entry *tbl)
-{
- int i;
- int err;
- u16 crtl;
- u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
- GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
-
- mutex_lock(&priv->pce_table_lock);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode,
- GSWIP_PCE_TBL_CTRL);
-
- for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
- gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
-
- for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
- gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
-
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode,
- GSWIP_PCE_TBL_CTRL);
-
- gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
-
- crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
- crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
- GSWIP_PCE_TBL_CTRL_GMAP_MASK);
- if (tbl->type)
- crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
- if (tbl->valid)
- crtl |= GSWIP_PCE_TBL_CTRL_VLD;
- crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
- crtl |= GSWIP_PCE_TBL_CTRL_BAS;
- gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
-
- mutex_unlock(&priv->pce_table_lock);
-
- return err;
-}
-
-/* Add the LAN port into a bridge with the CPU port by
- * default. This prevents automatic forwarding of
- * packages between the LAN ports when no explicit
- * bridge is configured.
- */
-static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
- unsigned int max_ports = priv->hw_info->max_ports;
- int err;
-
- if (port >= max_ports) {
- dev_err(priv->dev, "single port for %i supported\n", port);
- return -EIO;
- }
-
- vlan_active.index = port + 1;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.key[0] = 0; /* vid */
- vlan_active.val[0] = port + 1 /* fid */;
- vlan_active.valid = add;
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
- return err;
- }
-
- if (!add)
- return 0;
-
- vlan_mapping.index = port + 1;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- vlan_mapping.val[0] = 0 /* vid */;
- vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
- vlan_mapping.val[2] = 0;
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-static int gswip_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct gswip_priv *priv = ds->priv;
- int err;
-
- if (!dsa_is_user_port(ds, port))
- return 0;
-
- if (!dsa_is_cpu_port(ds, port)) {
- err = gswip_add_single_port_br(priv, port, true);
- if (err)
- return err;
- }
-
- /* RMON Counter Enable for port */
- gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
-
- /* enable port fetch/store dma & VLAN Modification */
- gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
- GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
- GSWIP_FDMA_PCTRLp(port));
- gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
- GSWIP_SDMA_PCTRLp(port));
-
- if (!dsa_is_cpu_port(ds, port)) {
- u32 mdio_phy = 0;
-
- if (phydev)
- mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
- }
-
- return 0;
-}
-
-static void gswip_port_disable(struct dsa_switch *ds, int port)
-{
- struct gswip_priv *priv = ds->priv;
-
- if (!dsa_is_user_port(ds, port))
- return;
-
- gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
- GSWIP_FDMA_PCTRLp(port));
- gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
- GSWIP_SDMA_PCTRLp(port));
-}
-
-static int gswip_pce_load_microcode(struct gswip_priv *priv)
-{
- int i;
- int err;
-
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
- gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
-
- for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
- gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
- gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
- GSWIP_PCE_TBL_VAL(0));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
- GSWIP_PCE_TBL_VAL(1));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
- GSWIP_PCE_TBL_VAL(2));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
- GSWIP_PCE_TBL_VAL(3));
-
- /* start the table access: */
- gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
- GSWIP_PCE_TBL_CTRL);
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
- return err;
- }
-
- /* tell the switch that the microcode is loaded */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
- GSWIP_PCE_GCTRL_0);
-
- return 0;
-}
-
-static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
-
- /* Do not allow changing the VLAN filtering options while in bridge */
- if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
- NL_SET_ERR_MSG_MOD(extack,
- "Dynamic toggling of vlan_filtering not supported");
- return -EIO;
- }
-
- if (vlan_filtering) {
- /* Use port based VLAN tag */
- gswip_switch_mask(priv,
- GSWIP_PCE_VCTRL_VSR,
- GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
- GSWIP_PCE_VCTRL_VEMR,
- GSWIP_PCE_VCTRL(port));
- gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
- GSWIP_PCE_PCTRL_0p(port));
- } else {
- /* Use port based VLAN tag */
- gswip_switch_mask(priv,
- GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
- GSWIP_PCE_VCTRL_VEMR,
- GSWIP_PCE_VCTRL_VSR,
- GSWIP_PCE_VCTRL(port));
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
- GSWIP_PCE_PCTRL_0p(port));
- }
-
- return 0;
-}
-
-static int gswip_setup(struct dsa_switch *ds)
-{
- struct gswip_priv *priv = ds->priv;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int i;
- int err;
-
- gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
- usleep_range(5000, 10000);
- gswip_switch_w(priv, 0, GSWIP_SWRES);
-
- /* disable port fetch/store dma on all ports */
- for (i = 0; i < priv->hw_info->max_ports; i++) {
- gswip_port_disable(ds, i);
- gswip_port_vlan_filtering(ds, i, false, NULL);
- }
-
- /* enable Switch */
- gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
-
- err = gswip_pce_load_microcode(priv);
- if (err) {
- dev_err(priv->dev, "writing PCE microcode failed, %i", err);
- return err;
- }
-
- /* Default unknown Broadcast/Multicast/Unicast port maps */
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
-
- /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
- * interoperability problem with this auto polling mechanism because
- * their status registers think that the link is in a different state
- * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
- * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
- * auto polling state machine consider the link being negotiated with
- * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
- * to the switch port being completely dead (RX and TX are both not
- * working).
- * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
- * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
- * it would work fine for a few minutes to hours and then stop, on
- * other device it would no traffic could be sent or received at all.
- * Testing shows that when PHY auto polling is disabled these problems
- * go away.
- */
- gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
-
- /* Configure the MDIO Clock 2.5 MHz */
- gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
-
- /* Disable the xMII interface and clear it's isolation bit */
- for (i = 0; i < priv->hw_info->max_ports; i++)
- gswip_mii_mask_cfg(priv,
- GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
- 0, i);
-
- /* enable special tag insertion on cpu port */
- gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
- GSWIP_FDMA_PCTRLp(cpu_port));
-
- /* accept special tag in ingress direction */
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
- GSWIP_PCE_PCTRL_0p(cpu_port));
-
- gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
- GSWIP_BM_QUEUE_GCTRL);
-
- /* VLAN aware Switching */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
-
- /* Flush MAC Table */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
- GSWIP_PCE_GCTRL_0_MTFL);
- if (err) {
- dev_err(priv->dev, "MAC flushing didn't finish\n");
- return err;
- }
-
- ds->mtu_enforcement_ingress = true;
-
- gswip_port_enable(ds, cpu_port, NULL);
-
- ds->configure_vlan_while_not_filtering = false;
-
- return 0;
-}
-
-static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
-{
- return DSA_TAG_PROTO_GSWIP;
-}
-
-static int gswip_vlan_active_create(struct gswip_priv *priv,
- struct net_device *bridge,
- int fid, u16 vid)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- int idx = -1;
- int err;
- int i;
-
- /* Look for a free slot */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (!priv->vlans[i].bridge) {
- idx = i;
- break;
- }
- }
-
- if (idx == -1)
- return -ENOSPC;
-
- if (fid == -1)
- fid = idx;
-
- vlan_active.index = idx;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.key[0] = vid;
- vlan_active.val[0] = fid;
- vlan_active.valid = true;
-
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
- return err;
- }
-
- priv->vlans[idx].bridge = bridge;
- priv->vlans[idx].vid = vid;
- priv->vlans[idx].fid = fid;
-
- return idx;
-}
-
-static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- int err;
-
- vlan_active.index = idx;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.valid = false;
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err)
- dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
- priv->vlans[idx].bridge = NULL;
-
- return err;
-}
-
-static int gswip_vlan_add_unaware(struct gswip_priv *priv,
- struct net_device *bridge, int port)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- bool active_vlan_created = false;
- int idx = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- idx = i;
- break;
- }
- }
-
- /* If this bridge is not programmed yet, add a Active VLAN table
- * entry in a free slot and prepare the VLAN mapping table entry.
- */
- if (idx == -1) {
- idx = gswip_vlan_active_create(priv, bridge, -1, 0);
- if (idx < 0)
- return idx;
- active_vlan_created = true;
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- /* VLAN ID byte, maps to the VLAN ID of vlan active table */
- vlan_mapping.val[0] = 0;
- } else {
- /* Read the existing VLAN mapping entry from the switch */
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
- err);
- return err;
- }
- }
-
- /* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
- vlan_mapping.val[1] |= BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- /* In case an Active VLAN was creaetd delete it again */
- if (active_vlan_created)
- gswip_vlan_active_remove(priv, idx);
- return err;
- }
-
- gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
- return 0;
-}
-
-static int gswip_vlan_add_aware(struct gswip_priv *priv,
- struct net_device *bridge, int port,
- u16 vid, bool untagged,
- bool pvid)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- bool active_vlan_created = false;
- int idx = -1;
- int fid = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- if (fid != -1 && fid != priv->vlans[i].fid)
- dev_err(priv->dev, "one bridge with multiple flow ids\n");
- fid = priv->vlans[i].fid;
- if (priv->vlans[i].vid == vid) {
- idx = i;
- break;
- }
- }
- }
-
- /* If this bridge is not programmed yet, add a Active VLAN table
- * entry in a free slot and prepare the VLAN mapping table entry.
- */
- if (idx == -1) {
- idx = gswip_vlan_active_create(priv, bridge, fid, vid);
- if (idx < 0)
- return idx;
- active_vlan_created = true;
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- /* VLAN ID byte, maps to the VLAN ID of vlan active table */
- vlan_mapping.val[0] = vid;
- } else {
- /* Read the existing VLAN mapping entry from the switch */
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
- err);
- return err;
- }
- }
-
- vlan_mapping.val[0] = vid;
- /* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
- vlan_mapping.val[2] |= BIT(cpu_port);
- vlan_mapping.val[1] |= BIT(port);
- if (untagged)
- vlan_mapping.val[2] &= ~BIT(port);
- else
- vlan_mapping.val[2] |= BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- /* In case an Active VLAN was creaetd delete it again */
- if (active_vlan_created)
- gswip_vlan_active_remove(priv, idx);
- return err;
- }
-
- if (pvid)
- gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
-
- return 0;
-}
-
-static int gswip_vlan_remove(struct gswip_priv *priv,
- struct net_device *bridge, int port,
- u16 vid, bool pvid, bool vlan_aware)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int idx = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- (!vlan_aware || priv->vlans[i].vid == vid)) {
- idx = i;
- break;
- }
- }
-
- if (idx == -1) {
- dev_err(priv->dev, "bridge to leave does not exists\n");
- return -ENOENT;
- }
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
- return err;
- }
-
- vlan_mapping.val[1] &= ~BIT(port);
- vlan_mapping.val[2] &= ~BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- return err;
- }
-
- /* In case all ports are removed from the bridge, remove the VLAN */
- if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
- err = gswip_vlan_active_remove(priv, idx);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n",
- err);
- return err;
- }
- }
-
- /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
- if (pvid)
- gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
-
- return 0;
-}
-
-static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge,
- bool *tx_fwd_offload,
- struct netlink_ext_ack *extack)
-{
- struct net_device *br = bridge.dev;
- struct gswip_priv *priv = ds->priv;
- int err;
-
- /* When the bridge uses VLAN filtering we have to configure VLAN
- * specific bridges. No bridge is configured here.
- */
- if (!br_vlan_enabled(br)) {
- err = gswip_vlan_add_unaware(priv, br, port);
- if (err)
- return err;
- priv->port_vlan_filter &= ~BIT(port);
- } else {
- priv->port_vlan_filter |= BIT(port);
- }
- return gswip_add_single_port_br(priv, port, false);
-}
-
-static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
-{
- struct net_device *br = bridge.dev;
- struct gswip_priv *priv = ds->priv;
-
- gswip_add_single_port_br(priv, port, true);
-
- /* When the bridge uses VLAN filtering we have to configure VLAN
- * specific bridges. No bridge is configured here.
- */
- if (!br_vlan_enabled(br))
- gswip_vlan_remove(priv, br, port, 0, true, false);
-}
-
-static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
- unsigned int max_ports = priv->hw_info->max_ports;
- int pos = max_ports;
- int i, idx = -1;
-
- /* We only support VLAN filtering on bridges */
- if (!dsa_is_cpu_port(ds, port) && !bridge)
- return -EOPNOTSUPP;
-
- /* Check if there is already a page for this VLAN */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- priv->vlans[i].vid == vlan->vid) {
- idx = i;
- break;
- }
- }
-
- /* If this VLAN is not programmed yet, we have to reserve
- * one entry in the VLAN table. Make sure we start at the
- * next position round.
- */
- if (idx == -1) {
- /* Look for a free slot */
- for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
- if (!priv->vlans[pos].bridge) {
- idx = pos;
- pos++;
- break;
- }
- }
-
- if (idx == -1) {
- NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
- return -ENOSPC;
- }
- }
-
- return 0;
-}
-
-static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- int err;
-
- err = gswip_port_vlan_prepare(ds, port, vlan, extack);
- if (err)
- return err;
-
- /* We have to receive all packets on the CPU port and should not
- * do any VLAN filtering here. This is also called with bridge
- * NULL and then we do not know for which bridge to configure
- * this.
- */
- if (dsa_is_cpu_port(ds, port))
- return 0;
-
- return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
- untagged, pvid);
-}
-
-static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
-
- /* We have to receive all packets on the CPU port and should not
- * do any VLAN filtering here. This is also called with bridge
- * NULL and then we do not know for which bridge to configure
- * this.
- */
- if (dsa_is_cpu_port(ds, port))
- return 0;
-
- return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
-}
-
-static void gswip_port_fast_age(struct dsa_switch *ds, int port)
-{
- struct gswip_priv *priv = ds->priv;
- struct gswip_pce_table_entry mac_bridge = {0,};
- int i;
- int err;
-
- for (i = 0; i < 2048; i++) {
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.index = i;
-
- err = gswip_pce_table_entry_read(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev, "failed to read mac bridge: %d\n",
- err);
- return;
- }
-
- if (!mac_bridge.valid)
- continue;
-
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
- continue;
-
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
- continue;
-
- mac_bridge.valid = false;
- err = gswip_pce_table_entry_write(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev, "failed to write mac bridge: %d\n",
- err);
- return;
- }
- }
-}
-
-static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct gswip_priv *priv = ds->priv;
- u32 stp_state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
- GSWIP_SDMA_PCTRLp(port));
- return;
- case BR_STATE_BLOCKING:
- case BR_STATE_LISTENING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
- break;
- case BR_STATE_LEARNING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
- break;
- default:
- dev_err(priv->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
- GSWIP_SDMA_PCTRLp(port));
- gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
- GSWIP_PCE_PCTRL_0p(port));
-}
-
-static int gswip_port_fdb(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid, bool add)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
- struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- int fid = -1;
- int i;
- int err;
-
- if (!bridge)
- return -EINVAL;
-
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- fid = priv->vlans[i].fid;
- break;
- }
- }
-
- if (fid == -1) {
- dev_err(priv->dev, "Port not part of a bridge\n");
- return -EINVAL;
- }
-
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.key_mode = true;
- mac_bridge.key[0] = addr[5] | (addr[4] << 8);
- mac_bridge.key[1] = addr[3] | (addr[2] << 8);
- mac_bridge.key[2] = addr[1] | (addr[0] << 8);
- mac_bridge.key[3] = fid;
- mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
- mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
- mac_bridge.valid = add;
-
- err = gswip_pce_table_entry_write(priv, &mac_bridge);
- if (err)
- dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
-
- return err;
-}
-
-static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- return gswip_port_fdb(ds, port, addr, vid, true);
-}
-
-static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- return gswip_port_fdb(ds, port, addr, vid, false);
-}
-
-static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct gswip_priv *priv = ds->priv;
- struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned char addr[6];
- int i;
- int err;
-
- for (i = 0; i < 2048; i++) {
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.index = i;
-
- err = gswip_pce_table_entry_read(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev,
- "failed to read mac bridge entry %d: %d\n",
- i, err);
- return err;
- }
-
- if (!mac_bridge.valid)
- continue;
-
- addr[5] = mac_bridge.key[0] & 0xff;
- addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
- addr[3] = mac_bridge.key[1] & 0xff;
- addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
- addr[1] = mac_bridge.key[2] & 0xff;
- addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
- if (mac_bridge.val[0] & BIT(port)) {
- err = cb(addr, 0, true, data);
- if (err)
- return err;
- }
- } else {
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
- err = cb(addr, 0, false, data);
- if (err)
- return err;
- }
- }
- }
- return 0;
-}
-
-static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
-{
- /* Includes 8 bytes for special header. */
- return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
-}
-
-static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
- struct gswip_priv *priv = ds->priv;
- int cpu_port = priv->hw_info->cpu_port;
-
- /* CPU port always has maximum mtu of user ports, so use it to set
- * switch frame size, including 8 byte special header.
- */
- if (port == cpu_port) {
- new_mtu += 8;
- gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
- GSWIP_MAC_FLEN);
- }
-
- /* Enable MLEN for ports with non-standard MTUs, including the special
- * header on the CPU port added above.
- */
- if (new_mtu != ETH_DATA_LEN)
- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
- GSWIP_MAC_CTRL_2p(port));
- else
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
- GSWIP_MAC_CTRL_2p(port));
-
- return 0;
-}
-
-static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
-{
- switch (port) {
- case 0:
- case 1:
- phy_interface_set_rgmii(config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_MII,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_REVMII,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_RMII,
- config->supported_interfaces);
- break;
-
- case 2:
- case 3:
- case 4:
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- break;
-
- case 5:
- phy_interface_set_rgmii(config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- break;
- }
-
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000;
-}
-
-static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
-{
- switch (port) {
- case 0:
- phy_interface_set_rgmii(config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_GMII,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_RMII,
- config->supported_interfaces);
- break;
-
- case 1:
- case 2:
- case 3:
- case 4:
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- break;
-
- case 5:
- phy_interface_set_rgmii(config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_RMII,
- config->supported_interfaces);
- break;
- }
-
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000;
-}
-
-static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
-{
- u32 mdio_phy;
-
- if (link)
- mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
- else
- mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
- phy_interface_t interface)
-{
- u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
-
- switch (speed) {
- case SPEED_10:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
-
- if (interface == PHY_INTERFACE_MODE_RMII)
- mii_cfg = GSWIP_MII_CFG_RATE_M50;
- else
- mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
- break;
-
- case SPEED_100:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
-
- if (interface == PHY_INTERFACE_MODE_RMII)
- mii_cfg = GSWIP_MII_CFG_RATE_M50;
- else
- mii_cfg = GSWIP_MII_CFG_RATE_M25;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
- break;
-
- case SPEED_1000:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
-
- mii_cfg = GSWIP_MII_CFG_RATE_M125;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
- break;
- }
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
- GSWIP_MAC_CTRL_0p(port));
-}
-
-static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
-{
- u32 mac_ctrl_0, mdio_phy;
-
- if (duplex == DUPLEX_FULL) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
- mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
- } else {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
- mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
- }
-
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
- GSWIP_MAC_CTRL_0p(port));
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_port_set_pause(struct gswip_priv *priv, int port,
- bool tx_pause, bool rx_pause)
-{
- u32 mac_ctrl_0, mdio_phy;
-
- if (tx_pause && rx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
- GSWIP_MDIO_PHY_FCONRX_EN;
- } else if (tx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
- GSWIP_MDIO_PHY_FCONRX_DIS;
- } else if (rx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
- GSWIP_MDIO_PHY_FCONRX_EN;
- } else {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
- GSWIP_MDIO_PHY_FCONRX_DIS;
- }
-
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
- mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
- gswip_mdio_mask(priv,
- GSWIP_MDIO_PHY_FCONTX_MASK |
- GSWIP_MDIO_PHY_FCONRX_MASK,
- mdio_phy, GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct gswip_priv *priv = ds->priv;
- u32 miicfg = 0;
-
- miicfg |= GSWIP_MII_CFG_LDCLKDIS;
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_INTERNAL:
- miicfg |= GSWIP_MII_CFG_MODE_MIIM;
- break;
- case PHY_INTERFACE_MODE_REVMII:
- miicfg |= GSWIP_MII_CFG_MODE_MIIP;
- break;
- case PHY_INTERFACE_MODE_RMII:
- miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- miicfg |= GSWIP_MII_CFG_MODE_RGMII;
- break;
- case PHY_INTERFACE_MODE_GMII:
- miicfg |= GSWIP_MII_CFG_MODE_GMII;
- break;
- default:
- dev_err(ds->dev,
- "Unsupported interface: %d\n", state->interface);
- return;
- }
-
- gswip_mii_mask_cfg(priv,
- GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
- GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
- miicfg, port);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
- GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
- break;
- default:
- break;
- }
-}
-
-static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface)
-{
- struct gswip_priv *priv = ds->priv;
-
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
-
- if (!dsa_is_cpu_port(ds, port))
- gswip_port_set_link(priv, port, false);
-}
-
-static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
-{
- struct gswip_priv *priv = ds->priv;
-
- if (!dsa_is_cpu_port(ds, port)) {
- gswip_port_set_link(priv, port, true);
- gswip_port_set_speed(priv, port, speed, interface);
- gswip_port_set_duplex(priv, port, duplex);
- gswip_port_set_pause(priv, port, tx_pause, rx_pause);
- }
-
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
-}
-
-static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
- uint8_t *data)
-{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
- ethtool_puts(&data, gswip_rmon_cnt[i].name);
-}
-
-static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
- u32 index)
-{
- u32 result;
- int err;
-
- gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
- gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
- GSWIP_BM_RAM_CTRL_OPMOD,
- table | GSWIP_BM_RAM_CTRL_BAS,
- GSWIP_BM_RAM_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
- GSWIP_BM_RAM_CTRL_BAS);
- if (err) {
- dev_err(priv->dev, "timeout while reading table: %u, index: %u",
- table, index);
- return 0;
- }
-
- result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
- result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
-
- return result;
-}
-
-static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
-{
- struct gswip_priv *priv = ds->priv;
- const struct gswip_rmon_cnt_desc *rmon_cnt;
- int i;
- u64 high;
-
- for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
- rmon_cnt = &gswip_rmon_cnt[i];
-
- data[i] = gswip_bcm_ram_entry_read(priv, port,
- rmon_cnt->offset);
- if (rmon_cnt->size == 2) {
- high = gswip_bcm_ram_entry_read(priv, port,
- rmon_cnt->offset + 1);
- data[i] |= high << 32;
- }
- }
-}
-
-static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
- if (sset != ETH_SS_STATS)
- return 0;
-
- return ARRAY_SIZE(gswip_rmon_cnt);
-}
-
-static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
- .get_tag_protocol = gswip_get_tag_protocol,
- .setup = gswip_setup,
- .port_enable = gswip_port_enable,
- .port_disable = gswip_port_disable,
- .port_bridge_join = gswip_port_bridge_join,
- .port_bridge_leave = gswip_port_bridge_leave,
- .port_fast_age = gswip_port_fast_age,
- .port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_add = gswip_port_vlan_add,
- .port_vlan_del = gswip_port_vlan_del,
- .port_stp_state_set = gswip_port_stp_state_set,
- .port_fdb_add = gswip_port_fdb_add,
- .port_fdb_del = gswip_port_fdb_del,
- .port_fdb_dump = gswip_port_fdb_dump,
- .port_change_mtu = gswip_port_change_mtu,
- .port_max_mtu = gswip_port_max_mtu,
- .phylink_get_caps = gswip_xrx200_phylink_get_caps,
- .phylink_mac_config = gswip_phylink_mac_config,
- .phylink_mac_link_down = gswip_phylink_mac_link_down,
- .phylink_mac_link_up = gswip_phylink_mac_link_up,
- .get_strings = gswip_get_strings,
- .get_ethtool_stats = gswip_get_ethtool_stats,
- .get_sset_count = gswip_get_sset_count,
-};
-
-static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
- .get_tag_protocol = gswip_get_tag_protocol,
- .setup = gswip_setup,
- .port_enable = gswip_port_enable,
- .port_disable = gswip_port_disable,
- .port_bridge_join = gswip_port_bridge_join,
- .port_bridge_leave = gswip_port_bridge_leave,
- .port_fast_age = gswip_port_fast_age,
- .port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_add = gswip_port_vlan_add,
- .port_vlan_del = gswip_port_vlan_del,
- .port_stp_state_set = gswip_port_stp_state_set,
- .port_fdb_add = gswip_port_fdb_add,
- .port_fdb_del = gswip_port_fdb_del,
- .port_fdb_dump = gswip_port_fdb_dump,
- .port_change_mtu = gswip_port_change_mtu,
- .port_max_mtu = gswip_port_max_mtu,
- .phylink_get_caps = gswip_xrx300_phylink_get_caps,
- .phylink_mac_config = gswip_phylink_mac_config,
- .phylink_mac_link_down = gswip_phylink_mac_link_down,
- .phylink_mac_link_up = gswip_phylink_mac_link_up,
- .get_strings = gswip_get_strings,
- .get_ethtool_stats = gswip_get_ethtool_stats,
- .get_sset_count = gswip_get_sset_count,
-};
-
-static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
- .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
- .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
-};
-
-static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
- .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
- .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
-};
-
-static const struct xway_gphy_match_data xrx300_gphy_data = {
- .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
- .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
-};
-
-static const struct of_device_id xway_gphy_match[] __maybe_unused = {
- { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
- { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
- { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
- { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
- { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
- {},
-};
-
-static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
-{
- struct device *dev = priv->dev;
- const struct firmware *fw;
- void *fw_addr;
- dma_addr_t dma_addr;
- dma_addr_t dev_addr;
- size_t size;
- int ret;
-
- ret = clk_prepare_enable(gphy_fw->clk_gate);
- if (ret)
- return ret;
-
- reset_control_assert(gphy_fw->reset);
-
- /* The vendor BSP uses a 200ms delay after asserting the reset line.
- * Without this some users are observing that the PHY is not coming up
- * on the MDIO bus.
- */
- msleep(200);
-
- ret = request_firmware(&fw, gphy_fw->fw_name, dev);
- if (ret) {
- dev_err(dev, "failed to load firmware: %s, error: %i\n",
- gphy_fw->fw_name, ret);
- return ret;
- }
-
- /* GPHY cores need the firmware code in a persistent and contiguous
- * memory area with a 16 kB boundary aligned start address.
- */
- size = fw->size + XRX200_GPHY_FW_ALIGN;
-
- fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
- if (fw_addr) {
- fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
- dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
- memcpy(fw_addr, fw->data, fw->size);
- } else {
- dev_err(dev, "failed to alloc firmware memory\n");
- release_firmware(fw);
- return -ENOMEM;
- }
-
- release_firmware(fw);
-
- ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
- if (ret)
- return ret;
-
- reset_control_deassert(gphy_fw->reset);
-
- return ret;
-}
-
-static int gswip_gphy_fw_probe(struct gswip_priv *priv,
- struct gswip_gphy_fw *gphy_fw,
- struct device_node *gphy_fw_np, int i)
-{
- struct device *dev = priv->dev;
- u32 gphy_mode;
- int ret;
- char gphyname[10];
-
- snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
-
- gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
- if (IS_ERR(gphy_fw->clk_gate)) {
- dev_err(dev, "Failed to lookup gate clock\n");
- return PTR_ERR(gphy_fw->clk_gate);
- }
-
- ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
- if (ret)
- return ret;
-
- ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
- /* Default to GE mode */
- if (ret)
- gphy_mode = GPHY_MODE_GE;
-
- switch (gphy_mode) {
- case GPHY_MODE_FE:
- gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
- break;
- case GPHY_MODE_GE:
- gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
- break;
- default:
- dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
- return -EINVAL;
- }
-
- gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
- if (IS_ERR(gphy_fw->reset))
- return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
- "Failed to lookup gphy reset\n");
-
- return gswip_gphy_fw_load(priv, gphy_fw);
-}
-
-static void gswip_gphy_fw_remove(struct gswip_priv *priv,
- struct gswip_gphy_fw *gphy_fw)
-{
- int ret;
-
- /* check if the device was fully probed */
- if (!gphy_fw->fw_name)
- return;
-
- ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
- if (ret)
- dev_err(priv->dev, "can not reset GPHY FW pointer");
-
- clk_disable_unprepare(gphy_fw->clk_gate);
-
- reset_control_put(gphy_fw->reset);
-}
-
-static int gswip_gphy_fw_list(struct gswip_priv *priv,
- struct device_node *gphy_fw_list_np, u32 version)
-{
- struct device *dev = priv->dev;
- struct device_node *gphy_fw_np;
- const struct of_device_id *match;
- int err;
- int i = 0;
-
- /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
- * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
- * needs a different GPHY firmware.
- */
- if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
- switch (version) {
- case GSWIP_VERSION_2_0:
- priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
- break;
- case GSWIP_VERSION_2_1:
- priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
- break;
- default:
- dev_err(dev, "unknown GSWIP version: 0x%x", version);
- return -ENOENT;
- }
- }
-
- match = of_match_node(xway_gphy_match, gphy_fw_list_np);
- if (match && match->data)
- priv->gphy_fw_name_cfg = match->data;
-
- if (!priv->gphy_fw_name_cfg) {
- dev_err(dev, "GPHY compatible type not supported");
- return -ENOENT;
- }
-
- priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
- if (!priv->num_gphy_fw)
- return -ENOENT;
-
- priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
- "lantiq,rcu");
- if (IS_ERR(priv->rcu_regmap))
- return PTR_ERR(priv->rcu_regmap);
-
- priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
- sizeof(*priv->gphy_fw),
- GFP_KERNEL | __GFP_ZERO);
- if (!priv->gphy_fw)
- return -ENOMEM;
-
- for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
- err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
- gphy_fw_np, i);
- if (err) {
- of_node_put(gphy_fw_np);
- goto remove_gphy;
- }
- i++;
- }
-
- /* The standalone PHY11G requires 300ms to be fully
- * initialized and ready for any MDIO communication after being
- * taken out of reset. For the SoC-internal GPHY variant there
- * is no (known) documentation for the minimum time after a
- * reset. Use the same value as for the standalone variant as
- * some users have reported internal PHYs not being detected
- * without any delay.
- */
- msleep(300);
-
- return 0;
-
-remove_gphy:
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- return err;
-}
-
-static int gswip_probe(struct platform_device *pdev)
-{
- struct device_node *np, *gphy_fw_np;
- struct device *dev = &pdev->dev;
- struct gswip_priv *priv;
- int err;
- int i;
- u32 version;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->gswip = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->gswip))
- return PTR_ERR(priv->gswip);
-
- priv->mdio = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(priv->mdio))
- return PTR_ERR(priv->mdio);
-
- priv->mii = devm_platform_ioremap_resource(pdev, 2);
- if (IS_ERR(priv->mii))
- return PTR_ERR(priv->mii);
-
- priv->hw_info = of_device_get_match_data(dev);
- if (!priv->hw_info)
- return -EINVAL;
-
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->hw_info->max_ports;
- priv->ds->priv = priv;
- priv->ds->ops = priv->hw_info->ops;
- priv->dev = dev;
- mutex_init(&priv->pce_table_lock);
- version = gswip_switch_r(priv, GSWIP_VERSION);
-
- np = dev->of_node;
- switch (version) {
- case GSWIP_VERSION_2_0:
- case GSWIP_VERSION_2_1:
- if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
- return -EINVAL;
- break;
- case GSWIP_VERSION_2_2:
- case GSWIP_VERSION_2_2_ETC:
- if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
- !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
- return -EINVAL;
- break;
- default:
- dev_err(dev, "unknown GSWIP version: 0x%x", version);
- return -ENOENT;
- }
-
- /* bring up the mdio bus */
- gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
- if (gphy_fw_np) {
- err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
- of_node_put(gphy_fw_np);
- if (err) {
- dev_err(dev, "gphy fw probe failed\n");
- return err;
- }
- }
-
- /* bring up the mdio bus */
- err = gswip_mdio(priv);
- if (err) {
- dev_err(dev, "mdio probe failed\n");
- goto gphy_fw_remove;
- }
-
- err = dsa_register_switch(priv->ds);
- if (err) {
- dev_err(dev, "dsa switch register failed: %i\n", err);
- goto gphy_fw_remove;
- }
- if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
- dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
- priv->hw_info->cpu_port);
- err = -EINVAL;
- goto disable_switch;
- }
-
- platform_set_drvdata(pdev, priv);
-
- dev_info(dev, "probed GSWIP version %lx mod %lx\n",
- (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
- (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
- return 0;
-
-disable_switch:
- gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
- dsa_unregister_switch(priv->ds);
-gphy_fw_remove:
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- return err;
-}
-
-static void gswip_remove(struct platform_device *pdev)
-{
- struct gswip_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- if (!priv)
- return;
-
- /* disable the switch */
- gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
-
- dsa_unregister_switch(priv->ds);
-
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
-}
-
-static void gswip_shutdown(struct platform_device *pdev)
-{
- struct gswip_priv *priv = platform_get_drvdata(pdev);
-
- if (!priv)
- return;
-
- dsa_switch_shutdown(priv->ds);
-
- platform_set_drvdata(pdev, NULL);
-}
-
-static const struct gswip_hw_info gswip_xrx200 = {
- .max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx200_switch_ops,
-};
-
-static const struct gswip_hw_info gswip_xrx300 = {
- .max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx300_switch_ops,
-};
-
-static const struct of_device_id gswip_of_match[] = {
- { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
- { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
- { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
- {},
-};
-MODULE_DEVICE_TABLE(of, gswip_of_match);
-
-static struct platform_driver gswip_driver = {
- .probe = gswip_probe,
- .remove_new = gswip_remove,
- .shutdown = gswip_shutdown,
- .driver = {
- .name = "gswip",
- .of_match_table = gswip_of_match,
- },
-};
-
-module_platform_driver(gswip_driver);
-
-MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
-MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
-MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
-MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 394ca8678d2b..c71d3fd5dfeb 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,12 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
- tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
+ tristate "Microchip KSZ8XXX/KSZ9XXX/LAN937X series switch support"
depends on NET_DSA
select NET_DSA_TAG_KSZ
select NET_DSA_TAG_NONE
+ select NET_IEEE8021Q_HELPERS
+ select DCB
+ select PCS_XPCS
help
- This driver adds support for Microchip KSZ9477 series switch and
- KSZ8795/KSZ88x3 switch chips.
+ This driver adds support for Microchip KSZ8, KSZ9 and
+ LAN937X series switch chips, being KSZ8863/8873,
+ KSZ8895/8864, KSZ8794/8795/8765,
+ KSZ9477/9897/9896/9567/8567, KSZ9893/9563/8563 and
+ LAN9370/9371/9372/9373/9374.
config NET_DSA_MICROCHIP_KSZ9477_I2C
tristate "KSZ series I2C connected switch driver"
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 49459a50dbc8..9347cfb3d0b5 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
-ksz_switch-objs := ksz_common.o
+ksz_switch-objs := ksz_common.o ksz_dcb.o
ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o
-ksz_switch-objs += ksz8795.o
+ksz_switch-objs += ksz8.o
ksz_switch-objs += lan937x_main.o
ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8.c
index 14923535ca7e..c354abdafc1b 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -1,6 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Microchip KSZ8795 switch driver
+ * Microchip KSZ8XXX series switch driver
+ *
+ * It supports the following switches:
+ * - KSZ8463
+ * - KSZ8863, KSZ8873 aka KSZ88X3
+ * - KSZ8895, KSZ8864 aka KSZ8895 family
+ * - KSZ8794, KSZ8795, KSZ8765 aka KSZ87XX
+ * Note that it does NOT support:
+ * - KSZ8563, KSZ8567 - see KSZ9477 driver
*
* Copyright (C) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
@@ -23,21 +31,35 @@
#include <linux/phylink.h>
#include "ksz_common.h"
-#include "ksz8795_reg.h"
+#include "ksz8_reg.h"
#include "ksz8.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
- regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
+ ksz_rmw8(dev, addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
- regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
- bits, set ? bits : 0);
+ ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits,
+ set ? bits : 0);
}
+/**
+ * ksz8_ind_write8 - EEE/ACL/PME indirect register write
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @data: The data to be written.
+ *
+ * This function performs an indirect register write for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
{
const u16 *regs;
@@ -58,6 +80,59 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
return ret;
}
+/**
+ * ksz8_ind_read8 - EEE/ACL/PME indirect register read
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @val: The value read.
+ *
+ * This function performs an indirect register read for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ksz8_ind_read8(struct ksz_device *dev, u8 table, u16 addr, u8 *val)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (!ret)
+ ret = ksz_read8(dev, regs[REG_IND_BYTE], val);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ return ksz8_ind_write8(dev, (u8)(reg >> 8), (u8)(reg), value);
+}
+
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_read8(dev, table, (u8)(offset), data);
+}
+
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_write8(dev, table, (u8)(offset), data);
+}
+
int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
@@ -66,6 +141,11 @@ int ksz8_reset_switch(struct ksz_device *dev)
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true);
ksz_cfg(dev, KSZ8863_REG_SW_RESET,
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false);
+ } else if (ksz_is_ksz8463(dev)) {
+ ksz_cfg(dev, KSZ8463_REG_SW_RESET,
+ KSZ8463_GLOBAL_SOFTWARE_RESET, true);
+ ksz_cfg(dev, KSZ8463_REG_SW_RESET,
+ KSZ8463_GLOBAL_SOFTWARE_RESET, false);
} else {
/* reset switch */
ksz_write8(dev, REG_POWER_MANAGEMENT_1,
@@ -120,44 +200,86 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return ksz8795_change_mtu(dev, frame_size);
- case KSZ8830_CHIP_ID:
+ case KSZ8463_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
return ksz8863_change_mtu(dev, frame_size);
}
return -EOPNOTSUPP;
}
-static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
+static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues)
{
- u8 hi, lo;
+ u8 mask_4q, mask_2q;
+ u8 reg_4q, reg_2q;
+ u8 data_4q = 0;
+ u8 data_2q = 0;
+ int ret;
- /* Number of queues can only be 1, 2, or 4. */
- switch (queue) {
- case 4:
- case 3:
- queue = PORT_QUEUE_SPLIT_4;
- break;
- case 2:
- queue = PORT_QUEUE_SPLIT_2;
- break;
- default:
- queue = PORT_QUEUE_SPLIT_1;
+ if (ksz_is_ksz88x3(dev)) {
+ mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = REG_PORT_CTRL_0;
+ reg_2q = REG_PORT_CTRL_2;
+
+ /* KSZ8795 family switches have Weighted Fair Queueing (WFQ)
+ * enabled by default. Enable it for KSZ8873 family switches
+ * too. Default value for KSZ8873 family is strict priority,
+ * which should be enabled by using TC_SETUP_QDISC_ETS, not
+ * by default.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_3, WEIGHTED_FAIR_QUEUE_ENABLE,
+ WEIGHTED_FAIR_QUEUE_ENABLE);
+ if (ret)
+ return ret;
+ } else if (ksz_is_ksz8463(dev)) {
+ mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = P1CR1;
+ reg_2q = P1CR1 + 1;
+ } else {
+ mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = REG_PORT_CTRL_13;
+ reg_2q = REG_PORT_CTRL_0;
+
+ /* TODO: this is legacy from initial KSZ8795 driver, should be
+ * moved to appropriate place in the future.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_19,
+ SW_OUT_RATE_LIMIT_QUEUE_BASED,
+ SW_OUT_RATE_LIMIT_QUEUE_BASED);
+ if (ret)
+ return ret;
+ }
+
+ if (queues == 4)
+ data_4q = mask_4q;
+ else if (queues == 2)
+ data_2q = mask_2q;
+
+ ret = ksz_prmw8(dev, port, reg_4q, mask_4q, data_4q);
+ if (ret)
+ return ret;
+
+ return ksz_prmw8(dev, port, reg_2q, mask_2q, data_2q);
+}
+
+int ksz8_all_queues_split(struct ksz_device *dev, int queues)
+{
+ struct dsa_switch *ds = dev->ds;
+ const struct dsa_port *dp;
+
+ dsa_switch_for_each_port(dp, ds) {
+ int ret = ksz8_port_queue_split(dev, dp->index, queues);
+
+ if (ret)
+ return ret;
}
- ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo);
- ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi);
- lo &= ~PORT_QUEUE_SPLIT_L;
- if (queue & PORT_QUEUE_SPLIT_2)
- lo |= PORT_QUEUE_SPLIT_L;
- hi &= ~PORT_QUEUE_SPLIT_H;
- if (queue & PORT_QUEUE_SPLIT_4)
- hi |= PORT_QUEUE_SPLIT_H;
- ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo);
- ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi);
-
- /* Default is port based for egress rate limit. */
- if (queue != PORT_QUEUE_SPLIT_1)
- ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED,
- true);
+
+ return 0;
}
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
@@ -261,6 +383,9 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
+ if (ksz_is_8895_family(dev) &&
+ ctrl_addr == KSZ8863_MIB_PACKET_DROPPED_RX_0)
+ ctrl_addr = KSZ8895_MIB_PACKET_DROPPED_RX_0;
ctrl_addr += port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -283,7 +408,7 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- if (ksz_is_ksz88x3(dev))
+ if (is_ksz88xx(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
else
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
@@ -291,7 +416,7 @@ void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
- if (ksz_is_ksz88x3(dev))
+ if (is_ksz88xx(dev))
return;
/* enable the port for flush/freeze function */
@@ -309,7 +434,8 @@ void ksz8_port_init_cnt(struct ksz_device *dev, int port)
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
- if (!ksz_is_ksz88x3(dev)) {
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
/* flush all enabled port MIB counters */
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
@@ -385,39 +511,39 @@ static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
int timeout = 100;
const u32 *masks;
const u16 *regs;
+ int ret;
masks = dev->info->masks;
regs = dev->info->regs;
do {
- ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
+ ret = ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
+ if (ret)
+ return ret;
+
timeout--;
} while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout);
/* Entry is not ready for accessing. */
- if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) {
- return -EAGAIN;
- /* Entry is ready for accessing. */
- } else {
- ksz_read8(dev, regs[REG_IND_DATA_8], data);
+ if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY])
+ return -ETIMEDOUT;
- /* There is no valid entry in the table. */
- if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY])
- return -ENXIO;
- }
- return 0;
+ /* Entry is ready for accessing. */
+ return ksz_read8(dev, regs[REG_IND_DATA_8], data);
}
-int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
+static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u16 *entries)
{
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
const u16 *regs;
u16 ctrl_addr;
+ u64 buf = 0;
u8 data;
- int rc;
+ int cnt;
+ int ret;
shifts = dev->info->shifts;
masks = dev->info->masks;
@@ -426,49 +552,50 @@ int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz8_valid_dyn_entry(dev, &data);
+ if (ret)
+ goto unlock_alu;
- rc = ksz8_valid_dyn_entry(dev, &data);
- if (rc == -EAGAIN) {
- if (addr == 0)
- *entries = 0;
- } else if (rc == -ENXIO) {
+ if (data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) {
*entries = 0;
- /* At least one valid entry in the table. */
- } else {
- u64 buf = 0;
- int cnt;
-
- ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
- data_hi = (u32)(buf >> 32);
- data_lo = (u32)buf;
-
- /* Check out how many valid entry in the table. */
- cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
- cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
- cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
- shifts[DYNAMIC_MAC_ENTRIES];
- *entries = cnt + 1;
-
- *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
- shifts[DYNAMIC_MAC_FID];
- *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
- shifts[DYNAMIC_MAC_SRC_PORT];
- *timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >>
- shifts[DYNAMIC_MAC_TIMESTAMP];
-
- mac_addr[5] = (u8)data_lo;
- mac_addr[4] = (u8)(data_lo >> 8);
- mac_addr[3] = (u8)(data_lo >> 16);
- mac_addr[2] = (u8)(data_lo >> 24);
-
- mac_addr[1] = (u8)data_hi;
- mac_addr[0] = (u8)(data_hi >> 8);
- rc = 0;
+ goto unlock_alu;
}
+
+ ret = ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
+ if (ret)
+ goto unlock_alu;
+
+ data_hi = (u32)(buf >> 32);
+ data_lo = (u32)buf;
+
+ /* Check out how many valid entry in the table. */
+ cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
+ cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
+ cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
+ shifts[DYNAMIC_MAC_ENTRIES];
+ *entries = cnt + 1;
+
+ *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
+ shifts[DYNAMIC_MAC_FID];
+ *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
+ shifts[DYNAMIC_MAC_SRC_PORT];
+
+ mac_addr[5] = (u8)data_lo;
+ mac_addr[4] = (u8)(data_lo >> 8);
+ mac_addr[3] = (u8)(data_lo >> 16);
+ mac_addr[2] = (u8)(data_lo >> 24);
+
+ mac_addr[1] = (u8)data_hi;
+ mac_addr[0] = (u8)(data_hi >> 8);
+
+unlock_alu:
mutex_unlock(&dev->alu_mutex);
- return rc;
+ return ret;
}
static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
@@ -507,11 +634,11 @@ static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
shifts[STATIC_MAC_FWD_PORTS];
alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
- /* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
+ /* KSZ8795/KSZ8895 family switches have STATIC_MAC_TABLE_USE_FID and
* STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
* static MAC table compared to doing write.
*/
- if (ksz_is_ksz87xx(dev))
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
data_hi >>= 1;
alu->is_static = true;
alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
@@ -1153,12 +1280,15 @@ int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
+ int offset = P_MIRROR_CTRL;
u8 data;
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
- data &= ~PORT_VLAN_MEMBERSHIP;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ ksz_pread8(dev, port, offset, &data);
+ data &= ~dev->port_mask;
data |= (member & dev->port_mask);
- ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
+ ksz_pwrite8(dev, port, offset, data);
}
void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1166,6 +1296,8 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
const u16 *regs;
+ int reg = S_FLUSH_TABLE_CTRL;
+ int mask = SW_FLUSH_DYN_MAC_TABLE;
regs = dev->info->regs;
@@ -1183,7 +1315,11 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
ksz_pwrite8(dev, index, regs[P_STP_CTRL],
learn[index] | PORT_LEARN_DISABLE);
}
- ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ if (ksz_is_ksz8463(dev)) {
+ reg = KSZ8463_FLUSH_TABLE_CTRL;
+ mask = KSZ8463_FLUSH_DYN_MAC_TABLE;
+ }
+ ksz_cfg(dev, reg, mask, true);
for (index = first; index < cnt; index++) {
if (!(learn[index] & PORT_LEARN_DISABLE))
ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
@@ -1193,28 +1329,28 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
int ksz8_fdb_dump(struct ksz_device *dev, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- int ret = 0;
- u16 i = 0;
- u16 entries = 0;
- u8 timestamp = 0;
- u8 fid;
- u8 src_port;
u8 mac[ETH_ALEN];
+ u8 src_port, fid;
+ u16 entries = 0;
+ int ret, i;
- do {
+ for (i = 0; i < KSZ8_DYN_MAC_ENTRIES; i++) {
ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
- &timestamp, &entries);
- if (!ret && port == src_port) {
+ &entries);
+ if (ret)
+ return ret;
+
+ if (i >= entries)
+ return 0;
+
+ if (port == src_port) {
ret = cb(mac, fid, false, data);
if (ret)
- break;
+ return ret;
}
- i++;
- } while (i < entries);
- if (i >= entries)
- ret = 0;
+ }
- return ret;
+ return 0;
}
static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
@@ -1322,7 +1458,7 @@ int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
struct netlink_ext_ack *extack)
{
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
/* Discard packets with VID not enabled on the switch */
@@ -1338,9 +1474,12 @@ int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
{
- if (ksz_is_ksz88x3(dev)) {
- ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
- 0x03 << (4 - 2 * port), state);
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)) {
+ int reg = REG_SW_INSERT_SRC_PVID;
+
+ if (ksz_is_ksz8463(dev))
+ reg = KSZ8463_REG_SW_CTRL_9;
+ ksz_cfg(dev, reg, 0x03 << (4 - 2 * port), state);
} else {
ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
}
@@ -1355,7 +1494,7 @@ int ksz8_port_vlan_add(struct ksz_device *dev, int port,
u16 data, new_pvid = 0;
u8 fid, member, valid;
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
/* If a VLAN is added with untagged flag different from the
@@ -1424,7 +1563,7 @@ int ksz8_port_vlan_del(struct ksz_device *dev, int port,
u16 data, pvid;
u8 fid, member, valid;
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
@@ -1454,19 +1593,23 @@ int ksz8_port_mirror_add(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
+ int offset = P_MIRROR_CTRL;
+
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
if (ingress) {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_RX, true);
dev->mirror_rx |= BIT(port);
} else {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_TX, true);
dev->mirror_tx |= BIT(port);
}
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_SNIFFER, false);
/* configure mirror port */
if (dev->mirror_rx || dev->mirror_tx)
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ ksz_port_cfg(dev, mirror->to_local_port, offset,
PORT_MIRROR_SNIFFER, true);
return 0;
@@ -1475,20 +1618,23 @@ int ksz8_port_mirror_add(struct ksz_device *dev, int port,
void ksz8_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
+ int offset = P_MIRROR_CTRL;
u8 data;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
if (mirror->ingress) {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_RX, false);
dev->mirror_rx &= ~BIT(port);
} else {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_TX, false);
dev->mirror_tx &= ~BIT(port);
}
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+ ksz_pread8(dev, port, offset, &data);
if (!dev->mirror_rx && !dev->mirror_tx)
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ ksz_port_cfg(dev, mirror->to_local_port, offset,
PORT_MIRROR_SNIFFER, false);
}
@@ -1510,34 +1656,42 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
+ const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
const u32 *masks;
+ int offset;
u8 member;
masks = dev->info->masks;
/* enable broadcast storm limit */
- ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+ offset = P_BCAST_STORM_CTRL;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR1;
+ ksz_port_cfg(dev, port, offset, PORT_BROADCAST_STORM, true);
- if (!ksz_is_ksz88x3(dev))
- ksz8795_set_prio_queue(dev, port, 4);
-
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
+ ksz8_port_queue_split(dev, port, dev->info->num_tx_queues);
/* replace priority */
- ksz_port_cfg(dev, port, P_802_1P_CTRL,
+ offset = P_802_1P_CTRL;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ ksz_port_cfg(dev, port, offset,
masks[PORT_802_1P_REMAPPING], false);
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
-
if (cpu_port)
member = dsa_user_ports(ds);
else
member = BIT(dsa_upstream_port(ds, port));
ksz8_cfg_port_member(dev, port, member);
+
+ /* Disable all WoL options by default. Otherwise
+ * ksz_switch_macaddr_get/put logic will not work properly.
+ * CPU port 4 has no WoL functionality.
+ */
+ if (ksz_is_ksz87xx(dev) && !cpu_port)
+ ksz8_pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
static void ksz88x3_config_rmii_clk(struct ksz_device *dev)
@@ -1562,6 +1716,7 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
const u32 *masks;
const u16 *regs;
u8 remote;
+ u8 fiber_ports = 0;
int i;
masks = dev->info->masks;
@@ -1580,7 +1735,8 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- if (!ksz_is_ksz88x3(dev)) {
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
@@ -1591,6 +1747,32 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
else
ksz_port_cfg(dev, i, regs[P_STP_CTRL],
PORT_FORCE_FLOW_CTRL, false);
+ if (p->fiber)
+ fiber_ports |= (1 << i);
+ }
+ if (ksz_is_ksz8463(dev)) {
+ /* Setup fiber ports. */
+ if (fiber_ports) {
+ fiber_ports &= 3;
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_REG_CFG_CTRL,
+ fiber_ports << PORT_COPPER_MODE_S,
+ 0);
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_REG_DSP_CTRL_6,
+ COPPER_RECEIVE_ADJUSTMENT, 0);
+ }
+
+ /* Turn off PTP function as the switch's proprietary way of
+ * handling timestamp is not supported in current Linux PTP
+ * stack implementation.
+ */
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_PTP_MSG_CONF1,
+ PTP_ENABLE, 0);
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_PTP_CLK_CTRL,
+ PTP_CLK_ENABLE, 0);
}
}
@@ -1701,11 +1883,15 @@ static void ksz8_cpu_port_link_up(struct ksz_device *dev, int speed, int duplex,
SW_10_MBIT, ctrl);
}
-void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port,
- unsigned int mode, phy_interface_t interface,
- struct phy_device *phydev, int speed, int duplex,
+void ksz8_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
+
/* If the port is the CPU port, apply special handling. Only the CPU
* port is configured via global registers.
*/
@@ -1749,7 +1935,8 @@ int ksz8_enable_stp_addr(struct ksz_device *dev)
int ksz8_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
- int i;
+ const u16 *regs = dev->info->regs;
+ int i, ret = 0;
ds->mtu_enforcement_ingress = true;
@@ -1767,28 +1954,45 @@ int ksz8_setup(struct dsa_switch *ds)
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
/* Enable aggressive back off algorithm in half duplex mode. */
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1,
- SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ if (ret)
+ return ret;
/*
* Make sure unicast VLAN boundary is set as default and
* enable no excessive collision drop.
*/
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ if (ret)
+ return ret;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
- if (!ksz_is_ksz88x3(dev))
+ if (!ksz_is_ksz88x3(dev) && !ksz_is_ksz8463(dev))
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
- return ksz8_handle_global_errata(ds);
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown. PME only
+ * available on KSZ87xx family.
+ */
+ if (ksz_is_ksz87xx(dev)) {
+ ret = ksz8_pme_write8(dev, regs[REG_SW_PME_CTRL], 0);
+ if (!ret)
+ ret = ksz_rmw8(dev, REG_INT_ENABLE, INT_PME, 0);
+ }
+
+ if (!ret)
+ return ksz8_handle_global_errata(ds);
+ else
+ return ret;
}
void ksz8_get_caps(struct ksz_device *dev, int port,
@@ -1814,6 +2018,84 @@ u32 ksz8_get_port_addr(int port, int offset)
return PORT_CTRL_ADDR(port, offset);
}
+u32 ksz8463_get_port_addr(int port, int offset)
+{
+ return offset + 0x18 * port;
+}
+
+static u16 ksz8463_get_phy_addr(u16 phy, u16 reg, u16 offset)
+{
+ return offset + reg * 2 + phy * (P2MBCR - P1MBCR);
+}
+
+int ksz8463_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u16 sw_reg = 0;
+ u16 data = 0;
+ int ret;
+
+ if (phy > 1)
+ return -ENOSPC;
+ switch (reg) {
+ case MII_PHYSID1:
+ sw_reg = ksz8463_get_phy_addr(phy, 0, PHY1IHR);
+ break;
+ case MII_PHYSID2:
+ sw_reg = ksz8463_get_phy_addr(phy, 0, PHY1ILR);
+ break;
+ case MII_BMCR:
+ case MII_BMSR:
+ case MII_ADVERTISE:
+ case MII_LPA:
+ sw_reg = ksz8463_get_phy_addr(phy, reg, P1MBCR);
+ break;
+ case MII_TPISTATUS:
+ /* This register holds the PHY interrupt status for simulated
+ * Micrel KSZ PHY.
+ */
+ data = 0x0505;
+ break;
+ default:
+ break;
+ }
+ if (sw_reg) {
+ ret = ksz_read16(dev, sw_reg, &data);
+ if (ret)
+ return ret;
+ }
+ *val = data;
+
+ return 0;
+}
+
+int ksz8463_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u16 sw_reg = 0;
+ int ret;
+
+ if (phy > 1)
+ return -ENOSPC;
+
+ /* No write to fiber port. */
+ if (dev->ports[phy].fiber)
+ return 0;
+ switch (reg) {
+ case MII_BMCR:
+ case MII_ADVERTISE:
+ sw_reg = ksz8463_get_phy_addr(phy, reg, P1MBCR);
+ break;
+ default:
+ break;
+ }
+ if (sw_reg) {
+ ret = ksz_write16(dev, sw_reg, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int ksz8_switch_init(struct ksz_device *dev)
{
dev->cpu_port = fls(dev->info->cpu_ports) - 1;
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 1a5225264e6a..0f2cd1474b44 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -19,8 +19,6 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
-int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
@@ -56,9 +54,17 @@ int ksz8_reset_switch(struct ksz_device *dev);
int ksz8_switch_init(struct ksz_device *dev);
void ksz8_switch_exit(struct ksz_device *dev);
int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
-void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port,
- unsigned int mode, phy_interface_t interface,
- struct phy_device *phydev, int speed, int duplex,
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value);
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data);
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data);
+void ksz8_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause);
+int ksz8_all_queues_split(struct ksz_device *dev, int queues);
+
+u32 ksz8463_get_port_addr(int port, int offset);
+int ksz8463_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8463_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5711a59e2ac9..a8bfcd917bf7 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -199,11 +199,11 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id ksz8863_dt_ids[] = {
{
.compatible = "microchip,ksz8863",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
},
{
.compatible = "microchip,ksz8873",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
},
{ },
};
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8_reg.h
index 7c9341ef73b0..332408567b47 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8_reg.h
@@ -1,13 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Microchip KSZ8795 register definitions
+ * Microchip KSZ8XXX series register definitions
+ *
+ * The base for these definitions is KSZ8795 but unless indicated
+ * differently by their prefix, they apply to all KSZ8 series
+ * devices. Registers and masks that do change are defined in
+ * dedicated structures in ksz_common.c.
*
* Copyright (c) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
*/
-#ifndef __KSZ8795_REG_H
-#define __KSZ8795_REG_H
+#ifndef __KSZ8_REG_H
+#define __KSZ8_REG_H
#define KS_PORT_M 0x1F
@@ -124,7 +129,8 @@
#define PORT_BASED_PRIO_3 3
#define PORT_INSERT_TAG BIT(2)
#define PORT_REMOVE_TAG BIT(1)
-#define PORT_QUEUE_SPLIT_L BIT(0)
+#define KSZ8795_PORT_2QUEUE_SPLIT_EN BIT(0)
+#define KSZ8873_PORT_4QUEUE_SPLIT_EN BIT(0)
#define REG_PORT_1_CTRL_1 0x11
#define REG_PORT_2_CTRL_1 0x21
@@ -143,6 +149,7 @@
#define REG_PORT_4_CTRL_2 0x42
#define REG_PORT_5_CTRL_2 0x52
+#define KSZ8873_PORT_2QUEUE_SPLIT_EN BIT(7)
#define PORT_INGRESS_FILTER BIT(6)
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
@@ -357,8 +364,6 @@
#define REG_IND_DATA_1 0x77
#define REG_IND_DATA_0 0x78
-#define REG_IND_DATA_PME_EEE_ACL 0xA0
-
#define REG_INT_STATUS 0x7C
#define REG_INT_ENABLE 0x7D
@@ -463,10 +468,7 @@
#define REG_PORT_4_CTRL_13 0xE1
#define REG_PORT_5_CTRL_13 0xF1
-#define PORT_QUEUE_SPLIT_H BIT(1)
-#define PORT_QUEUE_SPLIT_1 0
-#define PORT_QUEUE_SPLIT_2 1
-#define PORT_QUEUE_SPLIT_4 2
+#define KSZ8795_PORT_4QUEUE_SPLIT_EN BIT(1)
#define PORT_DROP_TAG BIT(0)
#define REG_PORT_1_CTRL_14 0xB2
@@ -705,8 +707,6 @@
#define KSZ8795_ID_LO 0x1550
#define KSZ8863_ID_LO 0x1430
-#define KSZ8795_SW_ID 0x8795
-
#define PHY_REG_LINK_MD 0x1D
#define PHY_START_CABLE_DIAG BIT(15)
@@ -729,6 +729,55 @@
#define PHY_POWER_SAVING_ENABLE BIT(2)
#define PHY_REMOTE_LOOPBACK BIT(1)
+/* KSZ8463 specific registers. */
+#define P1MBCR 0x4C
+#define P1MBSR 0x4E
+#define PHY1ILR 0x50
+#define PHY1IHR 0x52
+#define P1ANAR 0x54
+#define P1ANLPR 0x56
+#define P2MBCR 0x58
+#define P2MBSR 0x5A
+#define PHY2ILR 0x5C
+#define PHY2IHR 0x5E
+#define P2ANAR 0x60
+#define P2ANLPR 0x62
+
+#define P1CR1 0x6C
+#define P1CR2 0x6E
+#define P1CR3 0x72
+#define P1CR4 0x7E
+#define P1SR 0x80
+
+#define KSZ8463_FLUSH_TABLE_CTRL 0xAD
+
+#define KSZ8463_FLUSH_DYN_MAC_TABLE BIT(2)
+#define KSZ8463_FLUSH_STA_MAC_TABLE BIT(1)
+
+#define KSZ8463_REG_SW_CTRL_9 0xAE
+
+#define KSZ8463_REG_CFG_CTRL 0xD8
+
+#define PORT_2_COPPER_MODE BIT(7)
+#define PORT_1_COPPER_MODE BIT(6)
+#define PORT_COPPER_MODE_S 6
+
+#define KSZ8463_REG_SW_RESET 0x126
+
+#define KSZ8463_GLOBAL_SOFTWARE_RESET BIT(0)
+
+#define KSZ8463_PTP_CLK_CTRL 0x600
+
+#define PTP_CLK_ENABLE BIT(1)
+
+#define KSZ8463_PTP_MSG_CONF1 0x620
+
+#define PTP_ENABLE BIT(6)
+
+#define KSZ8463_REG_DSP_CTRL_6 0x734
+
+#define COPPER_RECEIVE_ADJUSTMENT BIT(13)
+
/* Chip resource */
#define PRIO_QUEUES 4
@@ -784,7 +833,9 @@
#define KSZ8795_MIB_TOTAL_TX_1 0x105
#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100
-#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105
+#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x103
+
+#define KSZ8895_MIB_PACKET_DROPPED_RX_0 0x105
#define MIB_PACKET_DROPPED 0x0000FFFF
@@ -794,5 +845,6 @@
#define TAIL_TAG_LOOKUP BIT(7)
#define FID_ENTRIES 128
+#define KSZ8_DYN_MAC_ENTRIES 1024
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 7f745628c84d..5facffbb9c9a 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@@ -56,187 +56,6 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
REG_SW_MTU_MASK, frame_size);
}
-/**
- * ksz9477_handle_wake_reason - Handle wake reason on a specified port.
- * @dev: The device structure.
- * @port: The port number.
- *
- * This function reads the PME (Power Management Event) status register of a
- * specified port to determine the wake reason. If there is no wake event, it
- * returns early. Otherwise, it logs the wake reason which could be due to a
- * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
- * is then cleared to acknowledge the handling of the wake event.
- *
- * Return: 0 on success, or an error code on failure.
- */
-static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port)
-{
- u8 pme_status;
- int ret;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status);
- if (ret)
- return ret;
-
- if (!pme_status)
- return 0;
-
- dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
- pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
- pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
- pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
-
- return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status);
-}
-
-/**
- * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port.
- * @dev: The device structure.
- * @port: The port number.
- * @wol: Pointer to ethtool Wake-on-LAN settings structure.
- *
- * This function checks the PME Pin Control Register to see if PME Pin Output
- * Enable is set, indicating PME is enabled. If enabled, it sets the supported
- * and active WoL flags.
- */
-void ksz9477_get_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol)
-{
- u8 pme_ctrl;
- int ret;
-
- if (!dev->wakeup_source)
- return;
-
- wol->supported = WAKE_PHY;
-
- /* Check if the current MAC address on this port can be set
- * as global for WAKE_MAGIC support. The result may vary
- * dynamically based on other ports configurations.
- */
- if (ksz_is_port_mac_global_usable(dev->ds, port))
- wol->supported |= WAKE_MAGIC;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl);
- if (ret)
- return;
-
- if (pme_ctrl & PME_WOL_MAGICPKT)
- wol->wolopts |= WAKE_MAGIC;
- if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
- wol->wolopts |= WAKE_PHY;
-}
-
-/**
- * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port.
- * @dev: The device structure.
- * @port: The port number.
- * @wol: Pointer to ethtool Wake-on-LAN settings structure.
- *
- * This function configures Wake-on-LAN (WoL) settings for a specified port.
- * It validates the provided WoL options, checks if PME is enabled via the
- * switch's PME Pin Control Register, clears any previous wake reasons,
- * and sets the Magic Packet flag in the port's PME control register if
- * specified.
- *
- * Return: 0 on success, or other error codes on failure.
- */
-int ksz9477_set_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol)
-{
- u8 pme_ctrl = 0, pme_ctrl_old = 0;
- bool magic_switched_off;
- bool magic_switched_on;
- int ret;
-
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
-
- if (!dev->wakeup_source)
- return -EOPNOTSUPP;
-
- ret = ksz9477_handle_wake_reason(dev, port);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- pme_ctrl |= PME_WOL_MAGICPKT;
- if (wol->wolopts & WAKE_PHY)
- pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old);
- if (ret)
- return ret;
-
- if (pme_ctrl_old == pme_ctrl)
- return 0;
-
- magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
- !(pme_ctrl & PME_WOL_MAGICPKT);
- magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
- (pme_ctrl & PME_WOL_MAGICPKT);
-
- /* To keep reference count of MAC address, we should do this
- * operation only on change of WOL settings.
- */
- if (magic_switched_on) {
- ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
- if (ret)
- return ret;
- } else if (magic_switched_off) {
- ksz_switch_macaddr_put(dev->ds);
- }
-
- ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl);
- if (ret) {
- if (magic_switched_on)
- ksz_switch_macaddr_put(dev->ds);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while
- * considering Wake-on-LAN (WoL) settings.
- * @dev: The switch device structure.
- * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
- * enabled on any port.
- *
- * This function prepares the switch device for a safe shutdown while taking
- * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
- * the wol_enabled flag accordingly to reflect whether WoL is active on any
- * port.
- */
-void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
-{
- struct dsa_port *dp;
- int ret;
-
- *wol_enabled = false;
-
- if (!dev->wakeup_source)
- return;
-
- dsa_switch_for_each_user_port(dp, dev->ds) {
- u8 pme_ctrl = 0;
-
- ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl);
- if (!ret && pme_ctrl)
- *wol_enabled = true;
-
- /* make sure there are no pending wake events which would
- * prevent the device from going to sleep/shutdown.
- */
- ksz9477_handle_wake_reason(dev, dp->index);
- }
-
- /* Now we are save to enable PME pin. */
- if (*wol_enabled)
- ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE);
-}
-
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -342,6 +161,190 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
10, 1000);
}
+static void port_sgmii_s(struct ksz_device *dev, uint port, u16 devid, u16 reg)
+{
+ u32 data;
+
+ data = (devid & MII_MMD_CTRL_DEVAD_MASK) << 16;
+ data |= reg;
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_ADDR__4, data);
+}
+
+static void port_sgmii_r(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 *buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pread16(dev, port, REG_PORT_SGMII_DATA__4 + 2, buf);
+}
+
+static void port_sgmii_w(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_DATA__4, buf);
+}
+
+static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+ u16 val;
+
+ port_sgmii_r(dev, port, mmd, reg, &val);
+
+ /* Simulate a value to activate special code in the XPCS driver if
+ * supported.
+ */
+ if (mmd == MDIO_MMD_PMAPMD) {
+ if (reg == MDIO_DEVID1)
+ val = 0x9477;
+ else if (reg == MDIO_DEVID2)
+ val = 0x22 << 10;
+ } else if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ /* Need to update MII_BMCR register with the exact speed and
+ * duplex mode when running in SGMII mode and this register is
+ * used to detect connected speed in that mode.
+ */
+ if (reg == MMD_SR_MII_AUTO_NEG_STATUS) {
+ int duplex, speed;
+
+ if (val & SR_MII_STAT_LINK_UP) {
+ speed = (val >> SR_MII_STAT_S) & SR_MII_STAT_M;
+ if (speed == SR_MII_STAT_1000_MBPS)
+ speed = SPEED_1000;
+ else if (speed == SR_MII_STAT_100_MBPS)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if (val & SR_MII_STAT_FULL_DUPLEX)
+ duplex = DUPLEX_FULL;
+ else
+ duplex = DUPLEX_HALF;
+
+ if (!p->phydev.link ||
+ p->phydev.speed != speed ||
+ p->phydev.duplex != duplex) {
+ u16 ctrl;
+
+ p->phydev.link = 1;
+ p->phydev.speed = speed;
+ p->phydev.duplex = duplex;
+ port_sgmii_r(dev, port, mmd, MII_BMCR,
+ &ctrl);
+ ctrl &= BMCR_ANENABLE;
+ ctrl |= mii_bmcr_encode_fixed(speed,
+ duplex);
+ port_sgmii_w(dev, port, mmd, MII_BMCR,
+ ctrl);
+ }
+ } else {
+ p->phydev.link = 0;
+ }
+ } else if (reg == MII_BMSR) {
+ p->phydev.link = !!(val & BMSR_LSTATUS);
+ }
+ }
+
+ return val;
+}
+
+static int ksz9477_pcs_write(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+
+ if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ if (reg == MMD_SR_MII_AUTO_NEG_CTRL) {
+ u16 sgmii_mode = SR_MII_PCS_SGMII << SR_MII_PCS_MODE_S;
+
+ /* Need these bits for 1000BASE-X mode to work with
+ * AN on.
+ */
+ if (!(val & sgmii_mode))
+ val |= SR_MII_SGMII_LINK_UP |
+ SR_MII_TX_CFG_PHY_MASTER;
+
+ /* SGMII interrupt in the port cannot be masked, so
+ * make sure interrupt is not enabled as it is not
+ * handled.
+ */
+ val &= ~SR_MII_AUTO_NEG_COMPLETE_INTR;
+ } else if (reg == MII_BMCR) {
+ /* The MII_ADVERTISE register needs to write once
+ * before doing auto-negotiation for the correct
+ * config_word to be sent out after reset.
+ */
+ if ((val & BMCR_ANENABLE) && !p->sgmii_adv_write) {
+ u16 adv;
+
+ /* The SGMII port cannot disable flow control
+ * so it is better to just advertise symmetric
+ * pause.
+ */
+ port_sgmii_r(dev, port, mmd, MII_ADVERTISE,
+ &adv);
+ adv |= ADVERTISE_1000XPAUSE;
+ adv &= ~ADVERTISE_1000XPSE_ASYM;
+ port_sgmii_w(dev, port, mmd, MII_ADVERTISE,
+ adv);
+ p->sgmii_adv_write = 1;
+ } else if (val & BMCR_RESET) {
+ p->sgmii_adv_write = 0;
+ }
+ } else if (reg == MII_ADVERTISE) {
+ /* XPCS driver writes to this register so there is no
+ * need to update it for the errata.
+ */
+ p->sgmii_adv_write = 1;
+ }
+ }
+ port_sgmii_w(dev, port, mmd, reg, val);
+
+ return 0;
+}
+
+int ksz9477_pcs_create(struct ksz_device *dev)
+{
+ /* This chip has a SGMII port. */
+ if (ksz_has_sgmii_port(dev)) {
+ int port = ksz_get_sgmii_port(dev);
+ struct ksz_port *p = &dev->ports[port];
+ struct phylink_pcs *pcs;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(dev->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "ksz_pcs_mdio_bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(dev->dev));
+ bus->read_c45 = &ksz9477_pcs_read;
+ bus->write_c45 = &ksz9477_pcs_write;
+ bus->parent = dev->dev;
+ bus->phy_mask = ~0;
+ bus->priv = dev;
+
+ ret = devm_mdiobus_register(dev->dev, bus);
+ if (ret)
+ return ret;
+
+ pcs = xpcs_create_pcs_mdiodev(bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+ p->pcs = pcs;
+ }
+
+ return 0;
+}
+
int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
@@ -355,10 +358,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ ksz_write8(dev, REG_SW_LUE_CTRL_1,
+ SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
/* disable interrupts */
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
@@ -429,6 +430,73 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
+static int ksz9477_half_duplex_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u8 lue_ctrl;
+ u32 pmavbc;
+ u16 pqm;
+ int ret;
+
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states. If you see this message, please read the
+ * errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &lue_ctrl);
+ if (ret)
+ return ret;
+ if (lue_ctrl & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
+ }
+ }
+
+ return ret;
+}
+
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u8 status;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status)
+ == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ ret = ksz9477_half_duplex_monitor(dev, port, tx_late_col);
+ }
+
+ return ret;
+}
+
void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -1094,31 +1162,64 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
if (dev->info->gbit_capable[port])
config->mac_capabilities |= MAC_1000FD;
+
+ if (ksz_is_sgmii_port(dev, port)) {
+ struct ksz_port *p = &dev->ports[port];
+
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ p->pcs->supported_interfaces);
+ }
}
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
- u8 value;
- u8 data;
+ u8 data, mult, value;
+ u32 max_val;
int ret;
- value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+#define MAX_TIMER_VAL ((1 << 8) - 1)
- ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
- if (ret < 0)
- return ret;
+ /* The aging timer comprises a 3-bit multiplier and an 8-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 255 = 1785 seconds.
+ */
+ if (!secs)
+ secs = 1;
- data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
if (ret < 0)
return ret;
- value &= ~SW_AGE_CNT_M;
- value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ if (ret < 0)
+ return ret;
+ }
- return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ value = DIV_ROUND_UP(secs, data);
+ return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
}
void ksz9477_port_queue_split(struct ksz_device *dev, int port)
@@ -1139,6 +1240,7 @@ void ksz9477_port_queue_split(struct ksz_device *dev, int port)
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
+ const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
u16 data16;
u8 member;
@@ -1158,18 +1260,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
-
/* replace priority */
ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
false);
ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
MTI_PVID_REPLACE, false);
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
-
/* force flow control for non-PHY ports only */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
@@ -1189,12 +1285,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_port_acl_init(dev, port);
/* clear pending wake flags */
- ksz9477_handle_wake_reason(dev, port);
+ ksz_handle_wake_reason(dev, port);
/* Disable all WoL options by default. Otherwise
* ksz_switch_macaddr_get/put logic will not work properly.
*/
- ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0);
+ ksz_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
void ksz9477_config_cpu_port(struct dsa_switch *ds)
@@ -1252,12 +1348,22 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds)
if (i == dev->cpu_port)
continue;
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+
+ /* Power down the internal PHY if port is unused. */
+ if (dsa_is_unused_port(ds, i) && dev->info->internal_phy[i])
+ ksz_pwrite16(dev, i, 0x100, BMCR_PDOWN);
}
}
+#define RESV_MCAST_CNT 8
+
+static u8 reserved_mcast_map[RESV_MCAST_CNT] = { 0, 1, 3, 16, 32, 33, 2, 17 };
+
int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
+ u8 i, ports, update;
const u32 *masks;
+ bool override;
u32 data;
int ret;
@@ -1266,23 +1372,87 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
/* Enable Reserved multicast table */
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
- /* Set the Override bit for forwarding BPDU packet to CPU */
- ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
- ALU_V_OVERRIDE | BIT(dev->cpu_port));
- if (ret < 0)
- return ret;
+ /* The reserved multicast address table has 8 entries. Each entry has
+ * a default value of which port to forward. It is assumed the host
+ * port is the last port in most of the switches, but that is not the
+ * case for KSZ9477 or maybe KSZ9897. For LAN937X family the default
+ * port is port 5, the first RGMII port. It is okay for LAN9370, a
+ * 5-port switch, but may not be correct for the other 8-port
+ * versions. It is necessary to update the whole table to forward to
+ * the right ports.
+ * Furthermore PTP messages can use a reserved multicast address and
+ * the host will not receive them if this table is not correct.
+ */
+ for (i = 0; i < RESV_MCAST_CNT; i++) {
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_READ];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
- data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
- ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
- if (ret < 0)
- return ret;
+ ret = ksz_read32(dev, REG_SW_ALU_VAL_B, &data);
+ if (ret < 0)
+ return ret;
- /* wait to be finished */
- ret = ksz9477_wait_alu_sta_ready(dev);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
- return ret;
+ override = false;
+ ports = data & dev->port_mask;
+ switch (i) {
+ case 0:
+ case 6:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ override = true;
+ break;
+ case 2:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ break;
+ case 4:
+ case 5:
+ case 7:
+ /* Skip the host port. */
+ update = dev->port_mask & ~BIT(dev->cpu_port);
+ break;
+ default:
+ update = ports;
+ break;
+ }
+ if (update != ports || override) {
+ data &= ~dev->port_mask;
+ data |= update;
+ /* Set Override bit to receive frame even when port is
+ * closed.
+ */
+ if (override)
+ data |= ALU_V_OVERRIDE;
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B, data);
+ if (ret < 0)
+ return ret;
+
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_WRITE];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
@@ -1291,6 +1461,7 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
int ksz9477_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
int ret = 0;
ds->mtu_enforcement_ingress = true;
@@ -1305,6 +1476,10 @@ int ksz9477_setup(struct dsa_switch *ds)
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+ /* Use collision based back pressure mode. */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
+ SW_BACK_PRESSURE_COLLISION);
+
/* Now we can configure default MTU value */
ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
@@ -1317,13 +1492,11 @@ int ksz9477_setup(struct dsa_switch *ds)
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* Make sure PME (WoL) is not enabled. If requested, it will be
- * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not
- * like PME events changes before shutdown.
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown.
*/
- ksz_write8(dev, REG_SW_PME_CTRL, 0);
-
- return 0;
+ return ksz_write8(dev, regs[REG_SW_PME_CTRL], 0);
}
u32 ksz9477_get_port_addr(int port, int offset)
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index ce1e656b800b..0d1a6dfda23e 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 series Header file
*
- * Copyright (C) 2017-2022 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_H
@@ -36,6 +36,8 @@ int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
bool ingress, struct netlink_ext_ack *extack);
void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col);
void ksz9477_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
int ksz9477_fdb_dump(struct ksz_device *dev, int port,
@@ -58,11 +60,6 @@ void ksz9477_switch_exit(struct ksz_device *dev);
void ksz9477_port_queue_split(struct ksz_device *dev, int port);
void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr);
void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr);
-void ksz9477_get_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
-int ksz9477_set_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
-void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled);
int ksz9477_port_acl_init(struct ksz_device *dev, int port);
void ksz9477_port_acl_free(struct ksz_device *dev, int port);
@@ -100,4 +97,6 @@ void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port,
u16 ethtype, u8 *src_mac, u8 *dst_mac,
unsigned long cookie, u32 prio);
+int ksz9477_pcs_create(struct ksz_device *dev);
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 82bebee4615c..a2beb27459f1 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 series register access through I2C
*
- * Copyright (C) 2018-2019 Microchip Technology Inc.
+ * Copyright (C) 2018-2024 Microchip Technology Inc.
*/
#include <linux/i2c.h>
@@ -16,6 +16,8 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
static int ksz9477_i2c_probe(struct i2c_client *i2c)
{
+ const struct ksz_chip_data *chip;
+ struct device *ddev = &i2c->dev;
struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -24,6 +26,12 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c)
if (!dev)
return -ENOMEM;
+ chip = device_get_match_data(ddev);
+ if (!chip)
+ return -EINVAL;
+
+ /* Save chip id to do special initialization when probing. */
+ dev->chip_id = chip->chip_id;
for (i = 0; i < __KSZ_NUM_REGMAPS; i++) {
rc = ksz9477_regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
@@ -72,8 +80,8 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
}
static const struct i2c_device_id ksz9477_i2c_id[] = {
- { "ksz9477-switch", 0 },
- {},
+ { "ksz9477-switch" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
@@ -111,14 +119,22 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
+ {
+ .compatible = "microchip,lan9646",
+ .data = &ksz_switch_chips[LAN9646]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_i2c_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
.of_match_table = ksz9477_dt_ids,
+ .pm = &ksz_i2c_pm_ops,
},
.probe = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index f3a205ee483f..61ea11e3338e 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@@ -38,11 +38,6 @@
#define SWITCH_REVISION_S 4
#define SWITCH_RESET 0x01
-#define REG_SW_PME_CTRL 0x0006
-
-#define PME_ENABLE BIT(1)
-#define PME_POLARITY BIT(0)
-
#define REG_GLOBAL_OPTIONS 0x000F
#define SW_GIGABIT_ABLE BIT(6)
@@ -170,8 +165,6 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
#define SW_AGE_CNT_M GENMASK(5, 3)
-#define SW_AGE_CNT_S 3
-#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
@@ -247,6 +240,7 @@
#define REG_SW_MAC_CTRL_1 0x0331
#define SW_BACK_PRESSURE BIT(5)
+#define SW_BACK_PRESSURE_COLLISION 0
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_JUMBO_PACKET BIT(2)
@@ -403,7 +397,6 @@
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
-#define ALU_RESV_MCAST_ADDR BIT(1)
#define REG_SW_ALU_VAL_A 0x0420
@@ -806,13 +799,6 @@
#define REG_PORT_AVB_SR_1_TYPE 0x0008
#define REG_PORT_AVB_SR_2_TYPE 0x000A
-#define REG_PORT_PME_STATUS 0x0013
-#define REG_PORT_PME_CTRL 0x0017
-
-#define PME_WOL_MAGICPKT BIT(2)
-#define PME_WOL_LINKUP BIT(1)
-#define PME_WOL_ENERGY BIT(0)
-
#define REG_PORT_INT_STATUS 0x001B
#define REG_PORT_INT_MASK 0x001F
@@ -842,8 +828,8 @@
#define REG_PORT_STATUS_0 0x0030
-#define PORT_INTF_SPEED_M 0x3
-#define PORT_INTF_SPEED_S 3
+#define PORT_INTF_SPEED_MASK GENMASK(4, 3)
+#define PORT_INTF_SPEED_NONE GENMASK(1, 0)
#define PORT_INTF_FULL_DUPLEX BIT(2)
#define PORT_TX_FLOW_CTRL BIT(1)
#define PORT_RX_FLOW_CTRL BIT(0)
@@ -1167,6 +1153,11 @@
#define PORT_RMII_CLK_SEL BIT(7)
#define PORT_MII_SEL_EDGE BIT(5)
+#define REG_PMAVBC 0x03AC
+
+#define PMAVBC_MASK GENMASK(26, 16)
+#define PMAVBC_MIN 0x580
+
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1494,6 +1485,7 @@
#define PORT_QM_TX_CNT_USED_S 0
#define PORT_QM_TX_CNT_M (BIT(11) - 1)
+#define PORT_QM_TX_CNT_MAX 0x200
#define REG_PORT_QM_TX_CNT_1__4 0x0A14
diff --git a/drivers/net/dsa/microchip/ksz9477_tc_flower.c b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
index 8b2f5be667e0..ca7830ab168a 100644
--- a/drivers/net/dsa/microchip/ksz9477_tc_flower.c
+++ b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
@@ -124,6 +124,9 @@ static int ksz9477_flower_parse_key(struct ksz_device *dev, int port,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 2b510f150dd8..0c10351fe5eb 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,7 +2,7 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/delay.h>
@@ -23,11 +23,14 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
+#include <linux/pinctrl/consumer.h>
#include <net/dsa.h>
+#include <net/ieee8021q.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz_dcb.h"
#include "ksz_ptp.h"
#include "ksz8.h"
#include "ksz9477.h"
@@ -244,16 +247,124 @@ static const struct ksz_drive_strength ksz9477_drive_strengths[] = {
{ SW_DRIVE_STRENGTH_28MA, 28000 },
};
-/* ksz8830_drive_strengths - Drive strength mapping for KSZ8830, KSZ8873, ..
+/* ksz88x3_drive_strengths - Drive strength mapping for KSZ8863, KSZ8873, ..
* variants.
* This values are documented in KSZ8873 and KSZ8863 datasheets.
*/
-static const struct ksz_drive_strength ksz8830_drive_strengths[] = {
+static const struct ksz_drive_strength ksz88x3_drive_strengths[] = {
{ 0, 8000 },
{ KSZ8873_DRIVE_STRENGTH_16MA, 16000 },
};
-static const struct ksz_dev_ops ksz8_dev_ops = {
+static void ksz88x3_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+static void ksz_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+static void ksz_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface);
+
+/**
+ * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ *
+ * This function is a dummy handler. See ksz_phylink_mac_enable_tx_lpi() for
+ * a detailed explanation of EEE/LPI handling in KSZ switches.
+ */
+static void ksz_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+}
+
+/**
+ * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ * @timer: timer value before entering LPI (unused)
+ * @tx_clock_stop: whether to stop the TX clock in LPI mode (unused)
+ *
+ * This function signals to phylink that the driver architecture supports
+ * LPI management, enabling phylink to control EEE advertisement during
+ * negotiation according to IEEE Std 802.3 (Clause 78).
+ *
+ * Hardware Management of EEE/LPI State:
+ * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
+ * observation and testing suggest that the actual EEE / Low Power Idle (LPI)
+ * state transitions are managed autonomously by the hardware based on
+ * the auto-negotiation results. (Note: While the datasheet describes EEE
+ * operation based on negotiation, it doesn't explicitly detail the internal
+ * MAC/PHY interaction, so autonomous hardware management of the MAC state
+ * for LPI is inferred from observed behavior).
+ * This hardware control, consistent with the switch's ability to operate
+ * autonomously via strapping, means MAC-level software intervention is not
+ * required or exposed for managing the LPI state once EEE is negotiated.
+ * (Ref: KSZ9893R Data Sheet DS00002420D, primarily Section 4.7.5 explaining
+ * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
+ * Straps).
+ *
+ * Additionally, ports configured as MAC interfaces (e.g., KSZ9893R port 3)
+ * lack documented MAC-level LPI control.
+ *
+ * Therefore, this callback performs no action and serves primarily to inform
+ * phylink of LPI awareness and to document the inferred hardware behavior.
+ *
+ * Returns: 0 (Always success)
+ */
+static int ksz_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ return 0;
+}
+
+static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
+ .mac_config = ksz88x3_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+};
+
+static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+};
+
+static const struct ksz_dev_ops ksz8463_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8463_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8463_r_phy,
+ .w_phy = ksz8463_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz88xx_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+};
+
+static const struct ksz_dev_ops ksz88xx_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
.cfg_port_member = ksz8_cfg_port_member,
@@ -277,22 +388,84 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.mirror_add = ksz8_port_mirror_add,
.mirror_del = ksz8_port_mirror_del,
.get_caps = ksz8_get_caps,
- .phylink_mac_link_up = ksz8_phylink_mac_link_up,
.config_cpu_port = ksz8_config_cpu_port,
.enable_stp_addr = ksz8_enable_stp_addr,
.reset = ksz8_reset_switch,
.init = ksz8_switch_init,
.exit = ksz8_switch_exit,
.change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
+};
+
+static const struct ksz_dev_ops ksz87xx_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
};
-static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause);
+static struct phylink_pcs *
+ksz_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ struct ksz_port *p = &dev->ports[dp->index];
+
+ if (ksz_is_sgmii_port(dev, dp->index) &&
+ (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX))
+ return p->pcs;
+
+ return NULL;
+}
+
+static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+ .mac_select_pcs = ksz_phylink_mac_select_pcs,
+};
+
static const struct ksz_dev_ops ksz9477_dev_ops = {
.setup = ksz9477_setup,
.get_port_addr = ksz9477_get_port_addr,
@@ -319,16 +492,24 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
- .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
- .get_wol = ksz9477_get_wol,
- .set_wol = ksz9477_set_wol,
- .wol_pre_shutdown = ksz9477_wol_pre_shutdown,
+ .pme_write8 = ksz_write8,
+ .pme_pread8 = ksz_pread8,
+ .pme_pwrite8 = ksz_pwrite8,
.config_cpu_port = ksz9477_config_cpu_port,
.tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = ksz9477_reset_switch,
.init = ksz9477_switch_init,
.exit = ksz9477_switch_exit,
+ .pcs_create = ksz9477_pcs_create,
+};
+
+static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops lan937x_dev_ops = {
@@ -339,6 +520,8 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = lan937x_port_setup,
.set_ageing_time = lan937x_set_ageing_time,
+ .mdio_bus_preinit = lan937x_mdio_bus_preinit,
+ .create_phy_addr_map = lan937x_create_phy_addr_map,
.r_phy = lan937x_r_phy,
.w_phy = lan937x_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
@@ -359,7 +542,6 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
- .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -368,6 +550,60 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.exit = lan937x_switch_exit,
};
+static const u16 ksz8463_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x10,
+ [REG_IND_CTRL_0] = 0x30,
+ [REG_IND_DATA_8] = 0x26,
+ [REG_IND_DATA_CHECK] = 0x26,
+ [REG_IND_DATA_HI] = 0x28,
+ [REG_IND_DATA_LO] = 0x2C,
+ [REG_IND_MIB_CHECK] = 0x2F,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0xAD,
+ [P_STP_CTRL] = 0x6F,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8463_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(0),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8463_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 8,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 22,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
static const u16 ksz8795_regs[] = {
[REG_SW_MAC_ADDR] = 0x68,
[REG_IND_CTRL_0] = 0x6E,
@@ -390,6 +626,9 @@ static const u16 ksz8795_regs[] = {
[S_MULTICAST_CTRL] = 0x04,
[P_XMII_CTRL_0] = 0x06,
[P_XMII_CTRL_1] = 0x06,
+ [REG_SW_PME_CTRL] = 0x8003,
+ [REG_PORT_PME_STATUS] = 0x8003,
+ [REG_PORT_PME_CTRL] = 0x8007,
};
static const u32 ksz8795_masks[] = {
@@ -498,6 +737,61 @@ static u8 ksz8863_shifts[] = {
[DYNAMIC_MAC_SRC_PORT] = 20,
};
+static const u16 ksz8895_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x68,
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x75,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8895_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+};
+
+static const u8 ksz8895_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 13,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
static const u16 ksz9477_regs[] = {
[REG_SW_MAC_ADDR] = 0x0302,
[P_STP_CTRL] = 0x0B04,
@@ -506,11 +800,16 @@ static const u16 ksz9477_regs[] = {
[S_MULTICAST_CTRL] = 0x0331,
[P_XMII_CTRL_0] = 0x0300,
[P_XMII_CTRL_1] = 0x0301,
+ [REG_SW_PME_CTRL] = 0x0006,
+ [REG_PORT_PME_STATUS] = 0x0013,
+ [REG_PORT_PME_CTRL] = 0x0017,
};
static const u32 ksz9477_masks[] = {
[ALU_STAT_WRITE] = 0,
[ALU_STAT_READ] = 1,
+ [ALU_STAT_DIRECT] = 0,
+ [ALU_RESV_MCAST_ADDR] = BIT(1),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
@@ -538,6 +837,8 @@ static const u8 ksz9477_xmii_ctrl1[] = {
static const u32 lan937x_masks[] = {
[ALU_STAT_WRITE] = 1,
[ALU_STAT_READ] = 2,
+ [ALU_STAT_DIRECT] = BIT(3),
+ [ALU_RESV_MCAST_ADDR] = BIT(2),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
@@ -901,8 +1202,7 @@ static const struct regmap_range ksz9477_valid_regs[] = {
regmap_reg_range(0x701b, 0x701b),
regmap_reg_range(0x701f, 0x7020),
regmap_reg_range(0x7030, 0x7030),
- regmap_reg_range(0x7200, 0x7203),
- regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7200, 0x7207),
regmap_reg_range(0x7300, 0x7301),
regmap_reg_range(0x7400, 0x7401),
regmap_reg_range(0x7403, 0x7403),
@@ -966,10 +1266,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x1030, 0x1030),
regmap_reg_range(0x1100, 0x1115),
regmap_reg_range(0x111a, 0x111f),
- regmap_reg_range(0x1122, 0x1127),
- regmap_reg_range(0x112a, 0x112b),
- regmap_reg_range(0x1136, 0x1139),
- regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1120, 0x112b),
+ regmap_reg_range(0x1134, 0x113b),
+ regmap_reg_range(0x113c, 0x113f),
regmap_reg_range(0x1400, 0x1401),
regmap_reg_range(0x1403, 0x1403),
regmap_reg_range(0x1410, 0x1417),
@@ -996,10 +1295,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x2030, 0x2030),
regmap_reg_range(0x2100, 0x2115),
regmap_reg_range(0x211a, 0x211f),
- regmap_reg_range(0x2122, 0x2127),
- regmap_reg_range(0x212a, 0x212b),
- regmap_reg_range(0x2136, 0x2139),
- regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2120, 0x212b),
+ regmap_reg_range(0x2134, 0x213b),
+ regmap_reg_range(0x213c, 0x213f),
regmap_reg_range(0x2400, 0x2401),
regmap_reg_range(0x2403, 0x2403),
regmap_reg_range(0x2410, 0x2417),
@@ -1026,10 +1324,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x3030, 0x3030),
regmap_reg_range(0x3100, 0x3115),
regmap_reg_range(0x311a, 0x311f),
- regmap_reg_range(0x3122, 0x3127),
- regmap_reg_range(0x312a, 0x312b),
- regmap_reg_range(0x3136, 0x3139),
- regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3120, 0x312b),
+ regmap_reg_range(0x3134, 0x313b),
+ regmap_reg_range(0x313c, 0x313f),
regmap_reg_range(0x3400, 0x3401),
regmap_reg_range(0x3403, 0x3403),
regmap_reg_range(0x3410, 0x3417),
@@ -1056,10 +1353,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x4030, 0x4030),
regmap_reg_range(0x4100, 0x4115),
regmap_reg_range(0x411a, 0x411f),
- regmap_reg_range(0x4122, 0x4127),
- regmap_reg_range(0x412a, 0x412b),
- regmap_reg_range(0x4136, 0x4139),
- regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4120, 0x412b),
+ regmap_reg_range(0x4134, 0x413b),
+ regmap_reg_range(0x413c, 0x413f),
regmap_reg_range(0x4400, 0x4401),
regmap_reg_range(0x4403, 0x4403),
regmap_reg_range(0x4410, 0x4417),
@@ -1086,10 +1382,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x5030, 0x5030),
regmap_reg_range(0x5100, 0x5115),
regmap_reg_range(0x511a, 0x511f),
- regmap_reg_range(0x5122, 0x5127),
- regmap_reg_range(0x512a, 0x512b),
- regmap_reg_range(0x5136, 0x5139),
- regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5120, 0x512b),
+ regmap_reg_range(0x5134, 0x513b),
+ regmap_reg_range(0x513c, 0x513f),
regmap_reg_range(0x5400, 0x5401),
regmap_reg_range(0x5403, 0x5403),
regmap_reg_range(0x5410, 0x5417),
@@ -1116,10 +1411,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x6030, 0x6030),
regmap_reg_range(0x6100, 0x6115),
regmap_reg_range(0x611a, 0x611f),
- regmap_reg_range(0x6122, 0x6127),
- regmap_reg_range(0x612a, 0x612b),
- regmap_reg_range(0x6136, 0x6139),
- regmap_reg_range(0x613e, 0x613f),
+ regmap_reg_range(0x6120, 0x612b),
+ regmap_reg_range(0x6134, 0x613b),
+ regmap_reg_range(0x613c, 0x613f),
regmap_reg_range(0x6300, 0x6301),
regmap_reg_range(0x6400, 0x6401),
regmap_reg_range(0x6403, 0x6403),
@@ -1158,6 +1452,7 @@ static const struct regmap_range ksz8873_valid_regs[] = {
regmap_reg_range(0x3f, 0x3f),
/* advanced control registers */
+ regmap_reg_range(0x43, 0x43),
regmap_reg_range(0x60, 0x6f),
regmap_reg_range(0x70, 0x75),
regmap_reg_range(0x76, 0x78),
@@ -1184,6 +1479,29 @@ static const struct regmap_access_table ksz8873_register_set = {
};
const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8463] = {
+ .chip_id = KSZ8463_CHIP_ID,
+ .dev_name = "KSZ8463",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz8463_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8463_regs,
+ .masks = ksz8463_masks,
+ .shifts = ksz8463_shifts,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
[KSZ8563] = {
.chip_id = KSZ8563_CHIP_ID,
.dev_name = "KSZ8563",
@@ -1194,9 +1512,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1210,6 +1529,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {false, false, true},
+ .ptp_capable = true,
.wr_table = &ksz8563_register_set,
.rd_table = &ksz8563_register_set,
},
@@ -1219,11 +1539,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8795",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
- .ops = &ksz8_dev_ops,
+ .num_ipms = 4,
+ .ops = &ksz87xx_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1258,11 +1580,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8794",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
- .ops = &ksz8_dev_ops,
+ .num_ipms = 4,
+ .ops = &ksz87xx_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1283,11 +1607,13 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8765",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
- .ops = &ksz8_dev_ops,
+ .num_ipms = 4,
+ .ops = &ksz87xx_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1303,8 +1629,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true, false},
},
- [KSZ8830] = {
- .chip_id = KSZ8830_CHIP_ID,
+ [KSZ88X3] = {
+ .chip_id = KSZ88X3_CHIP_ID,
.dev_name = "KSZ8863/KSZ8873",
.num_vlans = 16,
.num_alus = 0,
@@ -1312,7 +1638,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x4, /* can be configured as cpu port */
.port_cnt = 3,
.num_tx_queues = 4,
- .ops = &ksz8_dev_ops,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1326,6 +1654,61 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.rd_table = &ksz8873_register_set,
},
+ [KSZ8864] = {
+ /* WARNING
+ * =======
+ * KSZ8864 is similar to KSZ8895, except the first port
+ * does not exist.
+ * external cpu
+ * KSZ8864 1,2,3 4
+ * KSZ8895 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8864_CHIP_ID,
+ .dev_name = "KSZ8864",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {false, true, true, true, false},
+ },
+
+ [KSZ8895] = {
+ .chip_id = KSZ8895_CHIP_ID,
+ .dev_name = "KSZ8895",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
[KSZ9477] = {
.chip_id = KSZ9477_CHIP_ID,
.dev_name = "KSZ9477",
@@ -1336,9 +1719,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 4,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1356,6 +1741,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
+ .sgmii_port = 7,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -1370,7 +1757,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 6, /* total physical port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1402,7 +1792,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1432,7 +1825,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1458,9 +1853,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1474,6 +1870,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {true, true, true},
+ .ptp_capable = true,
},
[KSZ8567] = {
@@ -1486,9 +1883,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1507,6 +1906,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, false, false},
.gbit_capable = {false, false, false, false, false,
true, true},
+ .ptp_capable = true,
},
[KSZ9567] = {
@@ -1519,8 +1919,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1539,6 +1939,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
},
[LAN9370] = {
@@ -1551,9 +1952,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1566,6 +1969,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, true, false},
+ .ptp_capable = true,
},
[LAN9371] = {
@@ -1578,9 +1982,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 6, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1593,6 +1999,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true, true},
.supports_rgmii = {false, false, false, false, true, true},
.internal_phy = {true, true, true, true, false, false},
+ .ptp_capable = true,
},
[LAN9372] = {
@@ -1605,9 +2012,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1624,6 +2033,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9373] = {
@@ -1636,9 +2046,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1655,6 +2067,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, false,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9374] = {
@@ -1667,9 +2080,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1686,6 +2101,43 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
+ },
+
+ [LAN9646] = {
+ .chip_id = LAN9646_CHIP_ID,
+ .dev_name = "LAN9646",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .sgmii_port = 7,
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
},
};
EXPORT_SYMBOL_GPL(ksz_switch_chips);
@@ -1756,6 +2208,18 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
+
+ if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) {
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+
+ config->lpi_capabilities = MAC_100FD;
+ if (dev->info->gbit_capable[port])
+ config->lpi_capabilities |= MAC_1000FD;
+
+ /* EEE is fully operational */
+ config->eee_enabled_default = true;
+ }
}
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
@@ -1764,6 +2228,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
struct rtnl_link_stats64 *stats;
struct ksz_stats_raw *raw;
struct ksz_port_mib *mib;
+ int ret;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
@@ -1805,6 +2270,12 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
pstats->rx_pause_frames = raw->rx_pause;
spin_unlock(&mib->stats64_lock);
+
+ if (dev->info->phy_errata_9477 && !ksz_is_sgmii_port(dev, port)) {
+ ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
+ if (ret)
+ dev_err(dev->dev, "Failed to monitor transmission halt\n");
+ }
}
void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
@@ -1891,10 +2362,8 @@ static void ksz_get_strings(struct dsa_switch *ds, int port,
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < dev->info->mib_cnt; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN,
- dev->info->mib_names[i].string, ETH_GSTRING_LEN);
- }
+ for (i = 0; i < dev->info->mib_cnt; i++)
+ ethtool_puts(&buf, dev->info->mib_names[i].string);
}
/**
@@ -1993,6 +2462,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
+ /* HSR ports are setup once so need to use the assigned membership
+ * when the port is enabled.
+ */
+ if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
+ (dev->hsr_ports & BIT(port)))
+ port_member = dev->hsr_ports;
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
@@ -2017,19 +2492,103 @@ static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
return dev->dev_ops->w_phy(dev, addr, regnum, val);
}
+/**
+ * ksz_parent_mdio_read - Read data from a PHY register on the parent MDIO bus.
+ * @bus: MDIO bus structure.
+ * @addr: PHY address on the parent MDIO bus.
+ * @regnum: Register number to read.
+ *
+ * This function provides a direct read operation on the parent MDIO bus for
+ * accessing PHY registers. By bypassing SPI or I2C, it uses the parent MDIO bus
+ * to retrieve data from the PHY registers at the specified address and register
+ * number.
+ *
+ * Return: Value of the PHY register, or a negative error code on failure.
+ */
+static int ksz_parent_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+
+ return mdiobus_read_nested(dev->parent_mdio_bus, addr, regnum);
+}
+
+/**
+ * ksz_parent_mdio_write - Write data to a PHY register on the parent MDIO bus.
+ * @bus: MDIO bus structure.
+ * @addr: PHY address on the parent MDIO bus.
+ * @regnum: Register number to write to.
+ * @val: Value to write to the PHY register.
+ *
+ * This function provides a direct write operation on the parent MDIO bus for
+ * accessing PHY registers. Bypassing SPI or I2C, it uses the parent MDIO bus
+ * to modify the PHY register values at the specified address.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_parent_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ return mdiobus_write_nested(dev->parent_mdio_bus, addr, regnum, val);
+}
+
+/**
+ * ksz_phy_addr_to_port - Map a PHY address to the corresponding switch port.
+ * @dev: Pointer to device structure.
+ * @addr: PHY address to map to a port.
+ *
+ * This function finds the corresponding switch port for a given PHY address by
+ * iterating over all user ports on the device. It checks if a port's PHY
+ * address in `phy_addr_map` matches the specified address and if the port
+ * contains an internal PHY. If a match is found, the index of the port is
+ * returned.
+ *
+ * Return: Port index on success, or -EINVAL if no matching port is found.
+ */
+static int ksz_phy_addr_to_port(struct ksz_device *dev, int addr)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (dev->info->internal_phy[dp->index] &&
+ dev->phy_addr_map[dp->index] == addr)
+ return dp->index;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ksz_irq_phy_setup - Configure IRQs for PHYs in the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * Sets up IRQs for each active PHY connected to the KSZ switch by mapping the
+ * appropriate IRQs for each PHY and assigning them to the `user_mii_bus` in
+ * the DSA switch structure. Each IRQ is mapped based on the port's IRQ domain.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
static int ksz_irq_phy_setup(struct ksz_device *dev)
{
struct dsa_switch *ds = dev->ds;
- int phy;
+ int phy, port;
int irq;
int ret;
- for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++) {
if (BIT(phy) & ds->phys_mii_mask) {
- irq = irq_find_mapping(dev->ports[phy].pirq.domain,
+ port = ksz_phy_addr_to_port(dev, phy);
+ if (port < 0) {
+ ret = port;
+ goto out;
+ }
+
+ irq = irq_find_mapping(dev->ports[port].pirq.domain,
PORT_SRC_PHY_INT);
- if (irq < 0) {
- ret = irq;
+ if (!irq) {
+ ret = -EINVAL;
goto out;
}
ds->user_mii_bus->irq[phy] = irq;
@@ -2044,49 +2603,187 @@ out:
return ret;
}
+/**
+ * ksz_irq_phy_free - Release IRQ mappings for PHYs in the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * Releases any IRQ mappings previously assigned to active PHYs in the KSZ
+ * switch by disposing of each mapped IRQ in the `user_mii_bus` structure.
+ */
static void ksz_irq_phy_free(struct ksz_device *dev)
{
struct dsa_switch *ds = dev->ds;
int phy;
- for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
if (BIT(phy) & ds->phys_mii_mask)
irq_dispose_mapping(ds->user_mii_bus->irq[phy]);
}
+/**
+ * ksz_parse_dt_phy_config - Parse and validate PHY configuration from DT
+ * @dev: pointer to the KSZ device structure
+ * @bus: pointer to the MII bus structure
+ * @mdio_np: pointer to the MDIO node in the device tree
+ *
+ * This function parses and validates PHY configurations for each user port
+ * defined in the device tree for a KSZ switch device. It verifies that the
+ * `phy-handle` properties are correctly set and that the internal PHYs match
+ * expected addresses and parent nodes. Sets up the PHY mask in the MII bus if
+ * all validations pass. Logs error messages for any mismatches or missing data.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_parse_dt_phy_config(struct ksz_device *dev, struct mii_bus *bus,
+ struct device_node *mdio_np)
+{
+ struct device_node *phy_node, *phy_parent_node;
+ bool phys_are_valid = true;
+ struct dsa_port *dp;
+ u32 phy_addr;
+ int ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ if (!dev->info->internal_phy[dp->index])
+ continue;
+
+ phy_node = of_parse_phandle(dp->dn, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(dev->dev, "failed to parse phy-handle for port %d.\n",
+ dp->index);
+ phys_are_valid = false;
+ continue;
+ }
+
+ phy_parent_node = of_get_parent(phy_node);
+ if (!phy_parent_node) {
+ dev_err(dev->dev, "failed to get PHY-parent node for port %d\n",
+ dp->index);
+ phys_are_valid = false;
+ } else if (phy_parent_node != mdio_np) {
+ dev_err(dev->dev, "PHY-parent node mismatch for port %d, expected %pOF, got %pOF\n",
+ dp->index, mdio_np, phy_parent_node);
+ phys_are_valid = false;
+ } else {
+ ret = of_property_read_u32(phy_node, "reg", &phy_addr);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to read PHY address for port %d. Error %d\n",
+ dp->index, ret);
+ phys_are_valid = false;
+ } else if (phy_addr != dev->phy_addr_map[dp->index]) {
+ dev_err(dev->dev, "PHY address mismatch for port %d, expected 0x%x, got 0x%x\n",
+ dp->index, dev->phy_addr_map[dp->index],
+ phy_addr);
+ phys_are_valid = false;
+ } else {
+ bus->phy_mask |= BIT(phy_addr);
+ }
+ }
+
+ of_node_put(phy_node);
+ of_node_put(phy_parent_node);
+ }
+
+ if (!phys_are_valid)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ksz_mdio_register - Register and configure the MDIO bus for the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * This function sets up and registers an MDIO bus for the KSZ switch device,
+ * allowing access to its internal PHYs. If the device supports side MDIO,
+ * the function will configure the external MDIO controller specified by the
+ * "mdio-parent-bus" device tree property to directly manage internal PHYs.
+ * Otherwise, SPI or I2C access is set up for PHY access.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
static int ksz_mdio_register(struct ksz_device *dev)
{
+ struct device_node *parent_bus_node;
+ struct mii_bus *parent_bus = NULL;
struct dsa_switch *ds = dev->ds;
struct device_node *mdio_np;
struct mii_bus *bus;
- int ret;
+ int ret, i;
mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
if (!mdio_np)
return 0;
+ parent_bus_node = of_parse_phandle(mdio_np, "mdio-parent-bus", 0);
+ if (parent_bus_node && !dev->info->phy_side_mdio_supported) {
+ dev_err(dev->dev, "Side MDIO bus is not supported for this HW, ignoring 'mdio-parent-bus' property.\n");
+ ret = -EINVAL;
+
+ goto put_mdio_node;
+ } else if (parent_bus_node) {
+ parent_bus = of_mdio_find_bus(parent_bus_node);
+ if (!parent_bus) {
+ ret = -EPROBE_DEFER;
+
+ goto put_mdio_node;
+ }
+
+ dev->parent_mdio_bus = parent_bus;
+ }
+
bus = devm_mdiobus_alloc(ds->dev);
if (!bus) {
- of_node_put(mdio_np);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_mdio_node;
+ }
+
+ if (dev->dev_ops->mdio_bus_preinit) {
+ ret = dev->dev_ops->mdio_bus_preinit(dev, !!parent_bus);
+ if (ret)
+ goto put_mdio_node;
+ }
+
+ if (dev->dev_ops->create_phy_addr_map) {
+ ret = dev->dev_ops->create_phy_addr_map(dev, !!parent_bus);
+ if (ret)
+ goto put_mdio_node;
+ } else {
+ for (i = 0; i < dev->info->port_cnt; i++)
+ dev->phy_addr_map[i] = i;
}
bus->priv = dev;
- bus->read = ksz_sw_mdio_read;
- bus->write = ksz_sw_mdio_write;
- bus->name = "ksz user smi";
- snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ if (parent_bus) {
+ bus->read = ksz_parent_mdio_read;
+ bus->write = ksz_parent_mdio_write;
+ bus->name = "KSZ side MDIO";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "ksz-side-mdio-%d",
+ ds->index);
+ } else {
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz user smi";
+ if (ds->dst->index != 0) {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index);
+ } else {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ }
+ }
+
+ ret = ksz_parse_dt_phy_config(dev, bus, mdio_np);
+ if (ret)
+ goto put_mdio_node;
+
+ ds->phys_mii_mask = bus->phy_mask;
bus->parent = ds->dev;
- bus->phy_mask = ~ds->phys_mii_mask;
ds->user_mii_bus = bus;
if (dev->irq > 0) {
ret = ksz_irq_phy_setup(dev);
- if (ret) {
- of_node_put(mdio_np);
- return ret;
- }
+ if (ret)
+ goto put_mdio_node;
}
ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
@@ -2097,7 +2794,9 @@ static int ksz_mdio_register(struct ksz_device *dev)
ksz_irq_phy_free(dev);
}
+put_mdio_node:
of_node_put(mdio_np);
+ of_node_put(parent_bus_node);
return ret;
}
@@ -2129,7 +2828,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
struct ksz_device *dev = kirq->dev;
int ret;
- ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
if (ret)
dev_err(dev->dev, "failed to change IRQ mask\n");
@@ -2208,8 +2907,8 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
kirq->dev = dev;
kirq->masked = ~0;
- kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
- &ksz_irq_domain_ops, kirq);
+ kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
if (!kirq->domain)
return -ENOMEM;
@@ -2253,8 +2952,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
- if (pirq->irq_num < 0)
- return pirq->irq_num;
+ if (!pirq->irq_num)
+ return -EINVAL;
return ksz_irq_common_setup(dev, pirq);
}
@@ -2264,6 +2963,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev);
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ u16 storm_mask, storm_rate;
struct dsa_port *dp;
struct ksz_port *p;
const u16 *regs;
@@ -2286,11 +2986,21 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ if (ksz_has_sgmii_port(dev) && dev->dev_ops->pcs_create) {
+ ret = dev->dev_ops->pcs_create(dev);
+ if (ret)
+ return ret;
+ }
+
/* set broadcast storm protection 10% rate */
+ storm_mask = BROADCAST_STORM_RATE;
+ storm_rate = (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
+ if (ksz_is_ksz8463(dev)) {
+ storm_mask = swab16(storm_mask);
+ storm_rate = swab16(storm_rate);
+ }
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
+ storm_mask, storm_rate);
dev->dev_ops->config_cpu_port(ds);
@@ -2304,6 +3014,7 @@ static int ksz_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
ds->configure_vlan_while_not_filtering = false;
+ ds->dscp_prio_mapping_is_global = true;
if (dev->dev_ops->setup) {
ret = dev->dev_ops->setup(ds);
@@ -2327,18 +3038,23 @@ static int ksz_setup(struct dsa_switch *ds)
dsa_switch_for_each_user_port(dp, dev->ds) {
ret = ksz_pirq_setup(dev, dp->index);
if (ret)
- goto out_girq;
+ goto port_release;
- ret = ksz_ptp_irq_setup(ds, dp->index);
- if (ret)
- goto out_pirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto pirq_release;
+ }
}
}
- ret = ksz_ptp_clock_register(ds);
- if (ret) {
- dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
- goto out_ptpirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n",
+ ret);
+ goto port_release;
+ }
}
ret = ksz_mdio_register(dev);
@@ -2347,6 +3063,10 @@ static int ksz_setup(struct dsa_switch *ds)
goto out_ptp_clock_unregister;
}
+ ret = ksz_dcb_init(dev);
+ if (ret)
+ goto out_ptp_clock_unregister;
+
/* start switch */
regmap_update_bits(ksz_regmap_8(dev), regs[S_START_CTRL],
SW_START, SW_START);
@@ -2354,18 +3074,18 @@ static int ksz_setup(struct dsa_switch *ds)
return 0;
out_ptp_clock_unregister:
- ksz_ptp_clock_unregister(ds);
-out_ptpirq:
- if (dev->irq > 0)
- dsa_switch_for_each_user_port(dp, dev->ds)
- ksz_ptp_irq_free(ds, dp->index);
-out_pirq:
- if (dev->irq > 0)
- dsa_switch_for_each_user_port(dp, dev->ds)
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
+port_release:
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port_continue_reverse(dp, dev->ds) {
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
+pirq_release:
ksz_irq_free(&dev->ports[dp->index].pirq);
-out_girq:
- if (dev->irq > 0)
+ }
ksz_irq_free(&dev->girq);
+ }
return ret;
}
@@ -2375,11 +3095,13 @@ static void ksz_teardown(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
- ksz_ptp_clock_unregister(ds);
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
if (dev->irq > 0) {
dsa_switch_for_each_user_port(dp, dev->ds) {
- ksz_ptp_irq_free(ds, dp->index);
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
ksz_irq_free(&dev->ports[dp->index].pirq);
}
@@ -2498,7 +3220,7 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
struct ksz_device *dev = ds->priv;
switch (dev->chip_id) {
- case KSZ8830_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
/* Silicon Errata Sheet (DS80000830A):
* Port 1 does not work with LinkMD Cable-Testing.
* Port 1 does not respond to received PAUSE control frames.
@@ -2506,31 +3228,20 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
if (!port)
return MICREL_KSZ8_P1_ERRATA;
break;
- case KSZ9477_CHIP_ID:
- /* KSZ9477 Errata DS80000754C
- *
- * Module 4: Energy Efficient Ethernet (EEE) feature select must
- * be manually disabled
- * The EEE feature is enabled by default, but it is not fully
- * operational. It must be manually disabled through register
- * controls. If not disabled, the PHY ports can auto-negotiate
- * to enable EEE, and this feature can cause link drops when
- * linked to another device supporting EEE.
- */
- return MICREL_NO_EEE;
}
return 0;
}
-static void ksz_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface)
+static void ksz_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
/* Read all MIB counters when the link is going down. */
- p->read = true;
+ dev->ports[dp->index].read = true;
/* timer started */
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
@@ -2660,9 +3371,33 @@ static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
+static int ksz9477_set_default_prio_queue_mapping(struct ksz_device *dev,
+ int port)
+{
+ u32 queue_map = 0;
+ int ipm;
+
+ for (ipm = 0; ipm < dev->info->num_ipms; ipm++) {
+ int queue;
+
+ /* Traffic Type (TT) is corresponding to the Internal Priority
+ * Map (IPM) in the switch. Traffic Class (TC) is
+ * corresponding to the queue in the switch.
+ */
+ queue = ieee8021q_tt_to_tc(ipm, dev->info->num_tx_queues);
+ if (queue < 0)
+ return queue;
+
+ queue_map |= queue << (ipm * KSZ9477_PORT_TC_MAP_S);
+ }
+
+ return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+}
+
static int ksz_port_setup(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
+ int ret;
if (!dsa_is_user_port(ds, port))
return 0;
@@ -2670,11 +3405,17 @@ static int ksz_port_setup(struct dsa_switch *ds, int port)
/* setup user port */
dev->dev_ops->port_setup(dev, port, false);
+ if (!is_ksz8(dev)) {
+ ret = ksz9477_set_default_prio_queue_mapping(dev, port);
+ if (ret)
+ return ret;
+ }
+
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.
*/
- return 0;
+ return ksz_dcb_init_port(dev, port);
}
void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
@@ -2736,6 +3477,7 @@ static void ksz_port_teardown(struct dsa_switch *ds, int port)
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
if (dsa_is_user_port(ds, port))
ksz9477_port_acl_free(dev, port);
}
@@ -2779,12 +3521,11 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
struct ksz_device *dev = ds->priv;
enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
- if (dev->chip_id == KSZ8795_CHIP_ID ||
- dev->chip_id == KSZ8794_CHIP_ID ||
- dev->chip_id == KSZ8765_CHIP_ID)
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
proto = DSA_TAG_PROTO_KSZ8795;
- if (dev->chip_id == KSZ8830_CHIP_ID ||
+ if (dev->chip_id == KSZ88X3_CHIP_ID ||
+ dev->chip_id == KSZ8463_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ9563_CHIP_ID)
@@ -2794,7 +3535,8 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
dev->chip_id == KSZ9477_CHIP_ID ||
dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
- dev->chip_id == KSZ9567_CHIP_ID)
+ dev->chip_id == KSZ9567_CHIP_ID ||
+ dev->chip_id == LAN9646_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9477;
if (is_lan937x(dev))
@@ -2896,7 +3638,10 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
- case KSZ8830_CHIP_ID:
+ case KSZ8463_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
case KSZ8567_CHIP_ID:
@@ -2911,63 +3656,74 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case LAN9372_CHIP_ID:
case LAN9373_CHIP_ID:
case LAN9374_CHIP_ID:
+ case LAN9646_CHIP_ID:
return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
return -EOPNOTSUPP;
}
-static int ksz_validate_eee(struct dsa_switch *ds, int port)
+/**
+ * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
+ * port
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number to check
+ *
+ * This function also documents devices where EEE was initially advertised but
+ * later withdrawn due to reliability issues, as described in official errata
+ * documents. These devices are explicitly listed to record known limitations,
+ * even if there is no technical necessity for runtime checks.
+ *
+ * Returns: true if the internal PHY on the given port supports fully
+ * operational EEE, false otherwise.
+ */
+static bool ksz_support_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
if (!dev->info->internal_phy[port])
- return -EOPNOTSUPP;
+ return false;
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ return true;
case KSZ8567_CHIP_ID:
+ /* KSZ8567R Errata DS80000752C Module 4 */
+ case KSZ8765_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8795_CHIP_ID:
+ /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
case KSZ9477_CHIP_ID:
- case KSZ9563_CHIP_ID:
+ /* KSZ9477S Errata DS80000754A Module 4 */
case KSZ9567_CHIP_ID:
- case KSZ9893_CHIP_ID:
+ /* KSZ9567S Errata DS80000756A Module 4 */
case KSZ9896_CHIP_ID:
+ /* KSZ9896C Errata DS80000757A Module 3 */
case KSZ9897_CHIP_ID:
- return 0;
+ case LAN9646_CHIP_ID:
+ /* KSZ9897R Errata DS80000758C Module 4 */
+ /* Energy Efficient Ethernet (EEE) feature select must be
+ * manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ *
+ * The same item appears in the errata for all switches above.
+ */
+ break;
}
- return -EOPNOTSUPP;
-}
-
-static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
-
- /* There is no documented control of Tx LPI configuration. */
- e->tx_lpi_enabled = true;
-
- /* There is no documented control of Tx LPI timer. According to tests
- * Tx LPI timer seems to be set by default to minimal value.
- */
- e->tx_lpi_timer = 0;
-
- return 0;
+ return false;
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
if (!e->tx_lpi_enabled) {
dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
@@ -3013,7 +3769,8 @@ static void ksz_set_xmii(struct ksz_device *dev, int port,
/* On KSZ9893, disable RGMII in-band status support */
if (dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
- dev->chip_id == KSZ9563_CHIP_ID)
+ dev->chip_id == KSZ9563_CHIP_ID ||
+ is_lan937x(dev))
data8 &= ~P_MII_MAC_MODE;
break;
default:
@@ -3050,7 +3807,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
else
interface = PHY_INTERFACE_MODE_MII;
} else if (val == bitval[P_RMII_SEL]) {
- interface = PHY_INTERFACE_MODE_RGMII;
+ interface = PHY_INTERFACE_MODE_RMII;
} else {
interface = PHY_INTERFACE_MODE_RGMII;
if (data8 & P_RGMII_ID_EG_ENABLE)
@@ -3065,21 +3822,32 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
return interface;
}
-static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
+static void ksz88x3_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+
+ dev->ports[dp->index].manual_flow = !(state->pause & MLO_PAUSE_AN);
+}
+
+static void ksz_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ksz_device *dev = ds->priv;
-
- if (ksz_is_ksz88x3(dev)) {
- dev->ports[port].manual_flow = !(state->pause & MLO_PAUSE_AN);
- return;
- }
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
/* Internal PHYs */
if (dev->info->internal_phy[port])
return;
+ /* No need to configure XMII control register when using SGMII. */
+ if (ksz_is_sgmii_port(dev, port))
+ return;
+
if (phylink_autoneg_inband(mode)) {
dev_err(dev->dev, "In-band AN not supported!\n");
return;
@@ -3087,9 +3855,6 @@ static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
ksz_set_xmii(dev, port, state->interface);
- if (dev->dev_ops->phylink_mac_config)
- dev->dev_ops->phylink_mac_config(dev, port, mode, state);
-
if (dev->dev_ops->setup_rgmii_delay)
dev->dev_ops->setup_rgmii_delay(dev, port);
}
@@ -3187,13 +3952,16 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
}
-static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
struct ksz_port *p;
p = &dev->ports[port];
@@ -3209,18 +3977,6 @@ static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
}
-static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
-{
- struct ksz_device *dev = ds->priv;
-
- dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface, phydev,
- speed, duplex, tx_pause, rx_pause);
-}
-
static int ksz_switch_detect(struct ksz_device *dev)
{
u8 id1, id2, id4;
@@ -3237,6 +3993,9 @@ static int ksz_switch_detect(struct ksz_device *dev)
id2 = FIELD_GET(SW_CHIP_ID_M, id16);
switch (id1) {
+ case KSZ84_FAMILY_ID:
+ dev->chip_id = KSZ8463_CHIP_ID;
+ break;
case KSZ87_FAMILY_ID:
if (id2 == KSZ87_CHIP_ID_95) {
u8 val;
@@ -3254,10 +4013,22 @@ static int ksz_switch_detect(struct ksz_device *dev)
break;
case KSZ88_FAMILY_ID:
if (id2 == KSZ88_CHIP_ID_63)
- dev->chip_id = KSZ8830_CHIP_ID;
+ dev->chip_id = KSZ88X3_CHIP_ID;
else
return -ENODEV;
break;
+ case KSZ8895_FAMILY_ID:
+ if (id2 == KSZ8895_CHIP_ID_95 ||
+ id2 == KSZ8895_CHIP_ID_95R)
+ dev->chip_id = KSZ8895_CHIP_ID;
+ else
+ return -ENODEV;
+ ret = ksz_read8(dev, REG_KSZ8864_CHIP_ID, &id4);
+ if (ret)
+ return ret;
+ if (id4 & SW_KSZ8864)
+ dev->chip_id = KSZ8864_CHIP_ID;
+ break;
default:
ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
if (ret)
@@ -3277,7 +4048,10 @@ static int ksz_switch_detect(struct ksz_device *dev)
case LAN9372_CHIP_ID:
case LAN9373_CHIP_ID:
case LAN9374_CHIP_ID:
- dev->chip_id = id32;
+
+ /* LAN9646 does not have its own chip id. */
+ if (dev->chip_id != LAN9646_CHIP_ID)
+ dev->chip_id = id32;
break;
case KSZ9893_CHIP_ID:
ret = ksz_read8(dev, REG_CHIP_ID4,
@@ -3316,6 +4090,7 @@ static int ksz_cls_flower_add(struct dsa_switch *ds, int port,
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
return ksz9477_cls_flower_add(ds, port, cls, ingress);
}
@@ -3336,6 +4111,7 @@ static int ksz_cls_flower_del(struct dsa_switch *ds, int port,
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
return ksz9477_cls_flower_del(ds, port, cls, ingress);
}
@@ -3461,6 +4237,104 @@ static int ksz_ets_band_to_queue(struct tc_ets_qopt_offload_replace_params *p,
return p->bands - 1 - band;
}
+static u8 ksz8463_tc_ctrl(int port, int queue)
+{
+ u8 reg;
+
+ reg = 0xC8 + port * 4;
+ reg += ((3 - queue) / 2) * 2;
+ reg++;
+ reg -= (queue & 1);
+ return reg;
+}
+
+/**
+ * ksz88x3_tc_ets_add - Configure ETS (Enhanced Transmission Selection)
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to configure
+ * @p: Pointer to offload replace parameters describing ETS bands and mapping
+ *
+ * The KSZ88x3 supports two scheduling modes: Strict Priority and
+ * Weighted Fair Queuing (WFQ). Both modes have fixed behavior:
+ * - No configurable queue-to-priority mapping
+ * - No weight adjustment in WFQ mode
+ *
+ * This function configures the switch to use strict priority mode by
+ * clearing the WFQ enable bit for all queues associated with ETS bands.
+ * If strict priority is not explicitly requested, the switch will default
+ * to WFQ mode.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_add(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int ret, band;
+
+ /* Only strict priority mode is supported for now.
+ * WFQ is implicitly enabled when strict mode is disabled.
+ */
+ for (band = 0; band < p->bands; band++) {
+ int queue = ksz_ets_band_to_queue(p, band);
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+ if (ksz_is_ksz8463(dev))
+ reg = ksz8463_tc_ctrl(port, queue);
+
+ /* Clear WFQ enable bit to select strict priority scheduling */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz88x3_tc_ets_del - Reset ETS (Enhanced Transmission Selection) config
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to reset
+ *
+ * The KSZ88x3 supports only fixed scheduling modes: Strict Priority or
+ * Weighted Fair Queuing (WFQ), with no reconfiguration of weights or
+ * queue mapping. This function resets the port’s scheduling mode to
+ * the default, which is WFQ, by enabling the WFQ bit for all queues.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_del(struct ksz_device *dev, int port)
+{
+ int ret, queue;
+
+ /* Iterate over all transmit queues for this port */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+ if (ksz_is_ksz8463(dev))
+ reg = ksz8463_tc_ctrl(port, queue);
+
+ /* Set WFQ enable bit to revert back to default scheduling
+ * mode
+ */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE,
+ KSZ8873_TXQ_WFQ_ENABLE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ksz_queue_set_strict(struct ksz_device *dev, int port, int queue)
{
int ret;
@@ -3522,7 +4396,7 @@ static int ksz_tc_ets_add(struct ksz_device *dev, int port,
for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) {
int queue;
- if (tc_prio > KSZ9477_MAX_TC_PRIO)
+ if (tc_prio >= dev->info->num_ipms)
break;
queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]);
@@ -3534,8 +4408,7 @@ static int ksz_tc_ets_add(struct ksz_device *dev, int port,
static int ksz_tc_ets_del(struct ksz_device *dev, int port)
{
- int ret, queue, tc_prio, s;
- u32 queue_map = 0;
+ int ret, queue;
/* To restore the default chip configuration, set all queues to use the
* WRR scheduler with a weight of 1.
@@ -3543,35 +4416,15 @@ static int ksz_tc_ets_del(struct ksz_device *dev, int port)
for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
ret = ksz_queue_set_wrr(dev, port, queue,
KSZ9477_DEFAULT_WRR_WEIGHT);
+
if (ret)
return ret;
}
- switch (dev->info->num_tx_queues) {
- case 2:
- s = 2;
- break;
- case 4:
- s = 1;
- break;
- case 8:
- s = 0;
- break;
- default:
- return -EINVAL;
- }
-
/* Revert the queue mapping for TC-priority to its default setting on
* the chip.
*/
- for (tc_prio = 0; tc_prio <= KSZ9477_MAX_TC_PRIO; tc_prio++) {
- int queue;
-
- queue = tc_prio >> s;
- queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S);
- }
-
- return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+ return ksz9477_set_default_prio_queue_mapping(dev, port);
}
static int ksz_tc_ets_validate(struct ksz_device *dev, int port,
@@ -3616,7 +4469,7 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
int ret;
- if (!dev->info->tc_ets_supported)
+ if (is_ksz8(dev) && !(ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)))
return -EOPNOTSUPP;
if (qopt->parent != TC_H_ROOT) {
@@ -3630,9 +4483,16 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
if (ret)
return ret;
- return ksz_tc_ets_add(dev, port, &qopt->replace_params);
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return ksz88x3_tc_ets_add(dev, port,
+ &qopt->replace_params);
+ else
+ return ksz_tc_ets_add(dev, port, &qopt->replace_params);
case TC_ETS_DESTROY:
- return ksz_tc_ets_del(dev, port);
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return ksz88x3_tc_ets_del(dev, port);
+ else
+ return ksz_tc_ets_del(dev, port);
case TC_ETS_STATS:
case TC_ETS_GRAFT:
return -EOPNOTSUPP;
@@ -3654,24 +4514,214 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port,
}
}
+/**
+ * ksz_handle_wake_reason - Handle wake reason on a specified port.
+ * @dev: The device structure.
+ * @port: The port number.
+ *
+ * This function reads the PME (Power Management Event) status register of a
+ * specified port to determine the wake reason. If there is no wake event, it
+ * returns early. Otherwise, it logs the wake reason which could be due to a
+ * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
+ * is then cleared to acknowledge the handling of the wake event.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int ksz_handle_wake_reason(struct ksz_device *dev, int port)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_status;
+ int ret;
+
+ ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS],
+ &pme_status);
+ if (ret)
+ return ret;
+
+ if (!pme_status)
+ return 0;
+
+ dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
+ pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
+ pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
+ pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
+
+ return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS],
+ pme_status);
+}
+
+/**
+ * ksz_get_wol - Get Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function checks the device PME wakeup_source flag and chip_id.
+ * If enabled and supported, it sets the supported and active WoL
+ * flags.
+ */
static void ksz_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ u8 pme_ctrl;
+ int ret;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ wol->supported = WAKE_PHY;
+
+ /* Check if the current MAC address on this port can be set
+ * as global for WAKE_MAGIC support. The result may vary
+ * dynamically based on other ports configurations.
+ */
+ if (ksz_is_port_mac_global_usable(dev->ds, port))
+ wol->supported |= WAKE_MAGIC;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl);
+ if (ret)
+ return;
- if (dev->dev_ops->get_wol)
- dev->dev_ops->get_wol(dev, port, wol);
+ if (pme_ctrl & PME_WOL_MAGICPKT)
+ wol->wolopts |= WAKE_MAGIC;
+ if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
+ wol->wolopts |= WAKE_PHY;
}
+/**
+ * ksz_set_wol - Set Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function configures Wake-on-LAN (WoL) settings for a specified
+ * port. It validates the provided WoL options, checks if PME is
+ * enabled and supported, clears any previous wake reasons, and sets
+ * the Magic Packet flag in the port's PME control register if
+ * specified.
+ *
+ * Return: 0 on success, or other error codes on failure.
+ */
static int ksz_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
+ u8 pme_ctrl = 0, pme_ctrl_old = 0;
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ bool magic_switched_off;
+ bool magic_switched_on;
+ int ret;
- if (dev->dev_ops->set_wol)
- return dev->dev_ops->set_wol(dev, port, wol);
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
- return -EOPNOTSUPP;
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return -EOPNOTSUPP;
+
+ if (!dev->wakeup_source)
+ return -EOPNOTSUPP;
+
+ ret = ksz_handle_wake_reason(dev, port);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ pme_ctrl |= PME_WOL_MAGICPKT;
+ if (wol->wolopts & WAKE_PHY)
+ pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl_old);
+ if (ret)
+ return ret;
+
+ if (pme_ctrl_old == pme_ctrl)
+ return 0;
+
+ magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ !(pme_ctrl & PME_WOL_MAGICPKT);
+ magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ (pme_ctrl & PME_WOL_MAGICPKT);
+
+ /* To keep reference count of MAC address, we should do this
+ * operation only on change of WOL settings.
+ */
+ if (magic_switched_on) {
+ ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
+ if (ret)
+ return ret;
+ } else if (magic_switched_off) {
+ ksz_switch_macaddr_put(dev->ds);
+ }
+
+ ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL],
+ pme_ctrl);
+ if (ret) {
+ if (magic_switched_on)
+ ksz_switch_macaddr_put(dev->ds);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while
+ * considering Wake-on-LAN (WoL) settings.
+ * @dev: The switch device structure.
+ * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
+ * enabled on any port.
+ *
+ * This function prepares the switch device for a safe shutdown while taking
+ * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
+ * the wol_enabled flag accordingly to reflect whether WoL is active on any
+ * port.
+ */
+static void ksz_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_pin_en = PME_ENABLE;
+ struct dsa_port *dp;
+ int ret;
+
+ *wol_enabled = false;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ u8 pme_ctrl = 0;
+
+ ret = ops->pme_pread8(dev, dp->index,
+ regs[REG_PORT_PME_CTRL], &pme_ctrl);
+ if (!ret && pme_ctrl)
+ *wol_enabled = true;
+
+ /* make sure there are no pending wake events which would
+ * prevent the device from going to sleep/shutdown.
+ */
+ ksz_handle_wake_reason(dev, dp->index);
+ }
+
+ /* Now we are save to enable PME pin. */
+ if (*wol_enabled) {
+ if (dev->pme_active_high)
+ pme_pin_en |= PME_POLARITY;
+ ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en);
+ if (ksz_is_ksz87xx(dev))
+ ksz_write8(dev, KSZ87XX_REG_INT_EN, KSZ87XX_INT_PME_MASK);
+ }
}
static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
@@ -3687,6 +4737,11 @@ static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
return -EBUSY;
}
+ /* Need to initialize variable as the code to fill in settings may
+ * not be executed.
+ */
+ wol.wolopts = 0;
+
ksz_get_wol(ds, dp->index, &wol);
if (wol.wolopts & WAKE_MAGIC) {
dev_err(ds->dev,
@@ -3777,7 +4832,16 @@ int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
/* Program the switch MAC address to hardware */
for (i = 0; i < ETH_ALEN; i++) {
- ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, addr[i]);
+ if (ksz_is_ksz8463(dev)) {
+ u16 addr16 = ((u16)addr[i] << 8) | addr[i + 1];
+
+ ret = ksz_write16(dev, regs[REG_SW_MAC_ADDR] + i,
+ addr16);
+ i++;
+ } else {
+ ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i,
+ addr[i]);
+ }
if (ret)
goto macaddr_drop;
}
@@ -3841,6 +4905,13 @@ static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr,
return -EOPNOTSUPP;
}
+ /* KSZ9477 can only perform HSR offloading for up to two ports */
+ if (hweight8(dev->hsr_ports) >= 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than two ports - using software HSR");
+ return -EOPNOTSUPP;
+ }
+
/* Self MAC address filtering, to avoid frames traversing
* the HSR ring more than once.
*/
@@ -3872,6 +4943,23 @@ static int ksz_hsr_leave(struct dsa_switch *ds, int port,
return 0;
}
+static int ksz_suspend(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ cancel_delayed_work_sync(&dev->mib_read);
+ return 0;
+}
+
+static int ksz_resume(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
+ return 0;
+}
+
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.connect_tag_protocol = ksz_connect_tag_protocol,
@@ -3881,9 +4969,6 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
.phylink_get_caps = ksz_phylink_get_caps,
- .phylink_mac_config = ksz_phylink_mac_config,
- .phylink_mac_link_up = ksz_phylink_mac_link_up,
- .phylink_mac_link_down = ksz_mac_link_down,
.port_setup = ksz_port_setup,
.set_ageing_time = ksz_set_ageing_time,
.get_strings = ksz_get_strings,
@@ -3915,6 +5000,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_max_mtu = ksz_max_mtu,
.get_wol = ksz_get_wol,
.set_wol = ksz_set_wol,
+ .suspend = ksz_suspend,
+ .resume = ksz_resume,
.get_ts_info = ksz_get_ts_info,
.port_hwtstamp_get = ksz_hwtstamp_get,
.port_hwtstamp_set = ksz_hwtstamp_set,
@@ -3923,8 +5010,15 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.cls_flower_add = ksz_cls_flower_add,
.cls_flower_del = ksz_cls_flower_del,
.port_setup_tc = ksz_setup_tc,
- .get_mac_eee = ksz_get_mac_eee,
+ .support_eee = ksz_support_eee,
.set_mac_eee = ksz_set_mac_eee,
+ .port_get_default_prio = ksz_port_get_default_prio,
+ .port_set_default_prio = ksz_port_set_default_prio,
+ .port_get_dscp_prio = ksz_port_get_dscp_prio,
+ .port_add_dscp_prio = ksz_port_add_dscp_prio,
+ .port_del_dscp_prio = ksz_port_del_dscp_prio,
+ .port_get_apptrust = ksz_port_get_apptrust,
+ .port_set_apptrust = ksz_port_set_apptrust,
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
@@ -3968,8 +5062,7 @@ void ksz_switch_shutdown(struct ksz_device *dev)
{
bool wol_enabled = false;
- if (dev->dev_ops->wol_pre_shutdown)
- dev->dev_ops->wol_pre_shutdown(dev, &wol_enabled);
+ ksz_wol_pre_shutdown(dev, &wol_enabled);
if (dev->dev_ops->reset && !wol_enabled)
dev->dev_ops->reset(dev);
@@ -4133,24 +5226,24 @@ static int ksz9477_drive_strength_write(struct ksz_device *dev,
}
/**
- * ksz8830_drive_strength_write() - Set the drive strength configuration for
- * KSZ8830 compatible chip variants.
+ * ksz88x3_drive_strength_write() - Set the drive strength configuration for
+ * KSZ8863 compatible chip variants.
* @dev: ksz device
* @props: Array of drive strength properties to be set
* @num_props: Number of properties in the array
*
- * This function applies the specified drive strength settings to KSZ8830 chip
+ * This function applies the specified drive strength settings to KSZ88X3 chip
* variants (KSZ8873, KSZ8863).
* It ensures the configurations align with what the chip variant supports and
* warns or errors out on unsupported settings.
*
* Return: 0 on success, error code otherwise
*/
-static int ksz8830_drive_strength_write(struct ksz_device *dev,
+static int ksz88x3_drive_strength_write(struct ksz_device *dev,
struct ksz_driver_strength_prop *props,
int num_props)
{
- size_t array_size = ARRAY_SIZE(ksz8830_drive_strengths);
+ size_t array_size = ARRAY_SIZE(ksz88x3_drive_strengths);
int microamp;
int i, ret;
@@ -4163,10 +5256,10 @@ static int ksz8830_drive_strength_write(struct ksz_device *dev,
}
microamp = props[KSZ_DRIVER_STRENGTH_IO].value;
- ret = ksz_drive_strength_to_reg(ksz8830_drive_strengths, array_size,
+ ret = ksz_drive_strength_to_reg(ksz88x3_drive_strengths, array_size,
microamp);
if (ret < 0) {
- ksz_drive_strength_error(dev, ksz8830_drive_strengths,
+ ksz_drive_strength_error(dev, ksz88x3_drive_strengths,
array_size, microamp);
return ret;
}
@@ -4226,8 +5319,8 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
return 0;
switch (dev->chip_id) {
- case KSZ8830_CHIP_ID:
- return ksz8830_drive_strength_write(dev, of_props,
+ case KSZ88X3_CHIP_ID:
+ return ksz88x3_drive_strength_write(dev, of_props,
ARRAY_SIZE(of_props));
case KSZ8795_CHIP_ID:
case KSZ8794_CHIP_ID:
@@ -4240,6 +5333,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
return ksz9477_drive_strength_write(dev, of_props,
ARRAY_SIZE(of_props));
default:
@@ -4255,10 +5349,42 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
return 0;
}
+static int ksz8463_configure_straps_spi(struct ksz_device *dev)
+{
+ struct pinctrl *pinctrl;
+ struct gpio_desc *rxd0;
+ struct gpio_desc *rxd1;
+
+ rxd0 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(rxd0))
+ return PTR_ERR(rxd0);
+
+ rxd1 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 1, GPIOD_OUT_HIGH);
+ if (IS_ERR(rxd1))
+ return PTR_ERR(rxd1);
+
+ if (!rxd0 && !rxd1)
+ return 0;
+
+ if ((rxd0 && !rxd1) || (rxd1 && !rxd0))
+ return -EINVAL;
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "reset");
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ return 0;
+}
+
+static int ksz8463_release_straps_spi(struct ksz_device *dev)
+{
+ return pinctrl_select_default_state(dev->dev);
+}
+
int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
- struct device_node *port, *ports;
+ struct device_node *ports;
phy_interface_t interface;
unsigned int port_num;
int ret;
@@ -4270,10 +5396,22 @@ int ksz_switch_register(struct ksz_device *dev)
return PTR_ERR(dev->reset_gpio);
if (dev->reset_gpio) {
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_configure_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
+
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
msleep(100);
+
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_release_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
}
mutex_init(&dev->dev_mutex);
@@ -4328,6 +5466,9 @@ int ksz_switch_register(struct ksz_device *dev)
/* set the real number of ports */
dev->ds->num_ports = dev->info->port_cnt;
+ /* set the phylink ops */
+ dev->ds->phylink_mac_ops = dev->info->phylink_mac_ops;
+
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
@@ -4341,12 +5482,11 @@ int ksz_switch_register(struct ksz_device *dev)
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
if (ports) {
- for_each_available_child_of_node(ports, port) {
+ for_each_available_child_of_node_scoped(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
- of_node_put(port);
of_node_put(ports);
return -EINVAL;
}
@@ -4354,6 +5494,9 @@ int ksz_switch_register(struct ksz_device *dev)
&dev->ports[port_num].interface);
ksz_parse_rgmii_delay(dev, port_num, port);
+ dev->ports[port_num].fiber =
+ of_property_read_bool(port,
+ "micrel,fiber-mode");
}
of_node_put(ports);
}
@@ -4368,6 +5511,8 @@ int ksz_switch_register(struct ksz_device *dev)
dev->wakeup_source = of_property_read_bool(dev->dev->of_node,
"wakeup-source");
+ dev->pme_active_high = of_property_read_bool(dev->dev->of_node,
+ "microchip,pme-active-high");
}
ret = dsa_register_switch(dev->ds);
@@ -4403,6 +5548,24 @@ void ksz_switch_remove(struct ksz_device *dev)
}
EXPORT_SYMBOL(ksz_switch_remove);
+#ifdef CONFIG_PM_SLEEP
+int ksz_switch_suspend(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_suspend(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_suspend);
+
+int ksz_switch_resume(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_resume(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_resume);
+#endif
+
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 40c11b0d6b62..c65188cd3c0a 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip switch driver common header
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
@@ -10,6 +10,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
@@ -19,9 +20,14 @@
#include "ksz_ptp.h"
#define KSZ_MAX_NUM_PORTS 8
+/* all KSZ switches count ports from 1 */
+#define KSZ_PORT_1 0
+#define KSZ_PORT_2 1
+#define KSZ_PORT_4 3
struct ksz_device;
struct ksz_port;
+struct phylink_mac_ops;
enum ksz_regmap_width {
KSZ_REGMAP_8,
@@ -58,9 +64,17 @@ struct ksz_chip_data {
int port_cnt;
u8 port_nirqs;
u8 num_tx_queues;
+ u8 num_ipms; /* number of Internal Priority Maps */
bool tc_cbs_supported;
- bool tc_ets_supported;
+
+ /**
+ * @phy_side_mdio_supported: Indicates if the chip supports an additional
+ * side MDIO channel for accessing integrated PHYs.
+ */
+ bool phy_side_mdio_supported;
const struct ksz_dev_ops *ops;
+ const struct phylink_mac_ops *phylink_mac_ops;
+ bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
const struct ksz_mib_names *mib_names;
int mib_cnt;
@@ -79,6 +93,8 @@ struct ksz_chip_data {
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
bool internal_phy[KSZ_MAX_NUM_PORTS];
bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ bool ptp_capable;
+ u8 sgmii_port;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
};
@@ -118,6 +134,7 @@ struct ksz_port {
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
+ u32 sgmii_adv_write:1;
struct ksz_port_mib mib;
phy_interface_t interface;
@@ -127,8 +144,9 @@ struct ksz_port {
void *acl_priv;
struct ksz_irq pirq;
u8 num;
+ struct phylink_pcs *pcs;
#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
bool hwts_tx_en;
bool hwts_rx_en;
struct ksz_irq ptpirq;
@@ -167,6 +185,7 @@ struct ksz_device {
bool synclko_125;
bool synclko_disable;
bool wakeup_source;
+ bool pme_active_high;
struct vlan_table *vlan_cache;
@@ -183,16 +202,35 @@ struct ksz_device {
struct ksz_switch_macaddr *switch_macaddr;
struct net_device *hsr_dev; /* HSR */
u8 hsr_ports;
+
+ /**
+ * @phy_addr_map: Array mapping switch ports to their corresponding PHY
+ * addresses.
+ */
+ u8 phy_addr_map[KSZ_MAX_NUM_PORTS];
+
+ /**
+ * @parent_mdio_bus: Pointer to the external MDIO bus controller.
+ *
+ * This points to an external MDIO bus controller that is used to access
+ * the PHYs integrated within the switch. Unlike an integrated MDIO
+ * bus, this external controller provides a direct path for managing
+ * the switch’s internal PHYs, bypassing the main SPI interface.
+ */
+ struct mii_bus *parent_mdio_bus;
};
/* List of supported models */
enum ksz_model {
+ KSZ8463,
KSZ8563,
KSZ8567,
KSZ8795,
KSZ8794,
KSZ8765,
- KSZ8830,
+ KSZ88X3,
+ KSZ8864,
+ KSZ8895,
KSZ9477,
KSZ9896,
KSZ9897,
@@ -204,6 +242,7 @@ enum ksz_model {
LAN9372,
LAN9373,
LAN9374,
+ LAN9646,
};
enum ksz_regs {
@@ -228,6 +267,9 @@ enum ksz_regs {
S_MULTICAST_CTRL,
P_XMII_CTRL_0,
P_XMII_CTRL_1,
+ REG_SW_PME_CTRL,
+ REG_PORT_PME_STATUS,
+ REG_PORT_PME_CTRL,
};
enum ksz_masks {
@@ -252,6 +294,8 @@ enum ksz_masks {
DYNAMIC_MAC_TABLE_TIMESTAMP,
ALU_STAT_WRITE,
ALU_STAT_READ,
+ ALU_STAT_DIRECT,
+ ALU_RESV_MCAST_ADDR,
P_MII_TX_FLOW_CTRL,
P_MII_RX_FLOW_CTRL,
};
@@ -313,6 +357,43 @@ struct ksz_dev_ops {
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+
+ /**
+ * @mdio_bus_preinit: Function pointer to pre-initialize the MDIO bus
+ * for accessing PHYs.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side
+ * MDIO bus.
+ *
+ * This function pointer is used to configure the MDIO bus for PHY
+ * access before initiating regular PHY operations. It enables either
+ * SPI/I2C or side MDIO access modes by unlocking necessary registers
+ * and setting up access permissions for the selected mode.
+ *
+ * Return:
+ * - 0 on success.
+ * - Negative error code on failure.
+ */
+ int (*mdio_bus_preinit)(struct ksz_device *dev, bool side_mdio);
+
+ /**
+ * @create_phy_addr_map: Function pointer to create a port-to-PHY
+ * address map.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side
+ * MDIO bus.
+ *
+ * This function pointer is responsible for mapping switch ports to PHY
+ * addresses according to the configured access mode (SPI or side MDIO)
+ * and the device’s strap configuration. The mapping setup may vary
+ * depending on the chip variant and configuration. Ensures the correct
+ * address mapping for PHY communication.
+ *
+ * Return:
+ * - 0 on success.
+ * - Negative error code on failure (e.g., invalid configuration).
+ */
+ int (*create_phy_addr_map)(struct ksz_device *dev, bool side_mdio);
int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
@@ -347,11 +428,13 @@ struct ksz_dev_ops {
void (*get_caps)(struct ksz_device *dev, int port,
struct phylink_config *config);
int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*pme_write8)(struct ksz_device *dev, u32 reg, u8 value);
+ int (*pme_pread8)(struct ksz_device *dev, int port, int offset,
+ u8 *data);
+ int (*pme_pwrite8)(struct ksz_device *dev, int port, int offset,
+ u8 data);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
- void (*phylink_mac_config)(struct ksz_device *dev, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
unsigned int mode,
phy_interface_t interface,
@@ -359,21 +442,20 @@ struct ksz_dev_ops {
int duplex, bool tx_pause, bool rx_pause);
void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
- void (*get_wol)(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
- int (*set_wol)(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
- void (*wol_pre_shutdown)(struct ksz_device *dev, bool *wol_enabled);
void (*config_cpu_port)(struct dsa_switch *ds);
int (*enable_stp_addr)(struct ksz_device *dev);
int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
+
+ int (*pcs_create)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
+int ksz_switch_suspend(struct device *dev);
+int ksz_switch_resume(struct device *dev);
void ksz_init_mib_timer(struct ksz_device *dev);
bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port);
@@ -387,6 +469,7 @@ int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
void ksz_switch_macaddr_put(struct dsa_switch *ds);
void ksz_switch_shutdown(struct ksz_device *dev);
+int ksz_handle_wake_reason(struct ksz_device *dev, int port);
/* Common register access functions */
static inline struct regmap *ksz_regmap_8(struct ksz_device *dev)
@@ -404,6 +487,11 @@ static inline struct regmap *ksz_regmap_32(struct ksz_device *dev)
return dev->regmap[KSZ_REGMAP_32];
}
+static inline bool ksz_is_ksz8463(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8463_CHIP_ID;
+}
+
static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
{
unsigned int value;
@@ -617,7 +705,30 @@ static inline bool ksz_is_ksz87xx(struct ksz_device *dev)
static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
{
- return dev->chip_id == KSZ8830_CHIP_ID;
+ return dev->chip_id == KSZ88X3_CHIP_ID;
+}
+
+static inline bool ksz_is_8895_family(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8895_CHIP_ID ||
+ dev->chip_id == KSZ8864_CHIP_ID;
+}
+
+static inline bool is_ksz8(struct ksz_device *dev)
+{
+ return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev) ||
+ ksz_is_8895_family(dev) || ksz_is_ksz8463(dev);
+}
+
+static inline bool is_ksz88xx(struct ksz_device *dev)
+{
+ return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev) ||
+ ksz_is_ksz8463(dev);
+}
+
+static inline bool is_ksz9477(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ9477_CHIP_ID;
}
static inline int is_lan937x(struct ksz_device *dev)
@@ -629,6 +740,27 @@ static inline int is_lan937x(struct ksz_device *dev)
dev->chip_id == LAN9374_CHIP_ID;
}
+static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
+{
+ return (dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4;
+}
+
+static inline int ksz_get_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port - 1;
+}
+
+static inline bool ksz_has_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port > 0;
+}
+
+static inline bool ksz_is_sgmii_port(struct ksz_device *dev, int port)
+{
+ return dev->info->sgmii_port == port + 1;
+}
+
/* STP State Defines */
#define PORT_TX_ENABLE BIT(2)
#define PORT_RX_ENABLE BIT(1)
@@ -638,8 +770,10 @@ static inline int is_lan937x(struct ksz_device *dev)
#define REG_CHIP_ID0 0x00
#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ84_FAMILY_ID 0x84
#define KSZ87_FAMILY_ID 0x87
#define KSZ88_FAMILY_ID 0x88
+#define KSZ8895_FAMILY_ID 0x95
#define KSZ8_PORT_STATUS_0 0x08
#define KSZ8_PORT_FIBER_MODE BIT(7)
@@ -648,6 +782,12 @@ static inline int is_lan937x(struct ksz_device *dev)
#define KSZ87_CHIP_ID_94 0x6
#define KSZ87_CHIP_ID_95 0x9
#define KSZ88_CHIP_ID_63 0x3
+#define KSZ8895_CHIP_ID_95 0x4
+#define KSZ8895_CHIP_ID_95R 0x6
+
+/* KSZ8895 specific register */
+#define REG_KSZ8864_CHIP_ID 0xFE
+#define SW_KSZ8864 BIT(7)
#define SW_REV_ID_M GENMASK(7, 4)
@@ -680,6 +820,17 @@ static inline int is_lan937x(struct ksz_device *dev)
#define P_MII_MAC_MODE BIT(2)
#define P_MII_SEL_M 0x3
+/* KSZ9477, KSZ87xx Wake-on-LAN (WoL) masks */
+#define PME_WOL_MAGICPKT BIT(2)
+#define PME_WOL_LINKUP BIT(1)
+#define PME_WOL_ENERGY BIT(0)
+
+#define PME_ENABLE BIT(1)
+#define PME_POLARITY BIT(0)
+
+#define KSZ87XX_REG_INT_EN 0x7D
+#define KSZ87XX_INT_PME_MASK BIT(4)
+
/* Interrupt */
#define REG_SW_PORT_INT_STATUS__1 0x001B
#define REG_SW_PORT_INT_MASK__1 0x001F
@@ -716,13 +867,31 @@ static inline int is_lan937x(struct ksz_device *dev)
#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
+/* TXQ Split Control Register for per-port, per-queue configuration.
+ * Register 0xAF is TXQ Split for Q3 on Port 1.
+ * Register offset formula: 0xAF + (port * 4) + (3 - queue)
+ * where: port = 0..2, queue = 0..3
+ */
+#define KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue) \
+ (0xAF + ((port) * 4) + (3 - (queue)))
+
+/* Bit 7 selects between:
+ * 0 = Strict priority mode (highest-priority queue first)
+ * 1 = Weighted Fair Queuing (WFQ) mode:
+ * Queue weights: Q3:Q2:Q1:Q0 = 8:4:2:1
+ * If any queues are empty, weight is redistributed.
+ *
+ * Note: This is referred to as "Weighted Fair Queuing" (WFQ) in KSZ8863/8873
+ * documentation, and as "Weighted Round Robin" (WRR) in KSZ9477 family docs.
+ */
+#define KSZ8873_TXQ_WFQ_ENABLE BIT(7)
+
#define KSZ9477_REG_PORT_OUT_RATE_0 0x0420
#define KSZ9477_OUT_RATE_NO_LIMIT 0
#define KSZ9477_PORT_MRI_TC_MAP__4 0x0808
#define KSZ9477_PORT_TC_MAP_S 4
-#define KSZ9477_MAX_TC_PRIO 7
/* CBS related registers */
#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
@@ -780,4 +949,29 @@ static inline int is_lan937x(struct ksz_device *dev)
[KSZ_REGMAP_32] = KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \
}
+#define KSZ8463_REGMAP_ENTRY(width, regbits, regpad, regalign) \
+ { \
+ .name = #width, \
+ .val_bits = (width), \
+ .reg_stride = (width / 8), \
+ .reg_bits = (regbits) + (regalign), \
+ .pad_bits = (regpad), \
+ .read = ksz8463_spi_read, \
+ .write = ksz8463_spi_write, \
+ .max_register = BIT(regbits) - 1, \
+ .cache_type = REGCACHE_NONE, \
+ .zero_flag_mask = 1, \
+ .use_single_read = 1, \
+ .use_single_write = 1, \
+ .lock = ksz_regmap_lock, \
+ .unlock = ksz_regmap_unlock, \
+ }
+
+#define KSZ8463_REGMAP_TABLE(ksz, regbits, regpad, regalign) \
+ static const struct regmap_config ksz##_regmap_config[] = { \
+ [KSZ_REGMAP_8] = KSZ8463_REGMAP_ENTRY(8, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_16] = KSZ8463_REGMAP_ENTRY(16, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_32] = KSZ8463_REGMAP_ENTRY(32, (regbits), (regpad), (regalign)), \
+ }
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
new file mode 100644
index 000000000000..7131c5caac54
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+#include <linux/dsa/ksz_common.h>
+#include <net/dsa.h>
+#include <net/dscp.h>
+#include <net/ieee8021q.h>
+
+#include "ksz_common.h"
+#include "ksz_dcb.h"
+#include "ksz8.h"
+
+/* Port X Control 0 register.
+ * The datasheet specifies: Port 1 - 0x10, Port 2 - 0x20, Port 3 - 0x30.
+ * However, the driver uses get_port_addr(), which maps Port 1 to offset 0.
+ * Therefore, we define the base offset as 0x00 here to align with that logic.
+ */
+#define KSZ8_REG_PORT_1_CTRL_0 0x00
+#define KSZ8463_REG_PORT_1_CTRL_0 0x6C
+#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6)
+#define KSZ8_PORT_802_1P_ENABLE BIT(5)
+#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3)
+
+#define KSZ8463_REG_TOS_DSCP_CTRL 0x16
+#define KSZ88X3_REG_TOS_DSCP_CTRL 0x60
+#define KSZ8765_REG_TOS_DSCP_CTRL 0x90
+
+#define KSZ9477_REG_SW_MAC_TOS_CTRL 0x033e
+#define KSZ9477_SW_TOS_DSCP_REMAP BIT(0)
+#define KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M GENMASK(5, 3)
+
+#define KSZ9477_REG_DIFFSERV_PRIO_MAP 0x0340
+
+#define KSZ9477_REG_PORT_MRI_PRIO_CTRL 0x0801
+#define KSZ9477_PORT_HIGHEST_PRIO BIT(7)
+#define KSZ9477_PORT_OR_PRIO BIT(6)
+#define KSZ9477_PORT_MAC_PRIO_ENABLE BIT(4)
+#define KSZ9477_PORT_VLAN_PRIO_ENABLE BIT(3)
+#define KSZ9477_PORT_802_1P_PRIO_ENABLE BIT(2)
+#define KSZ9477_PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define KSZ9477_PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define KSZ9477_REG_PORT_MRI_MAC_CTRL 0x0802
+#define KSZ9477_PORT_BASED_PRIO_M GENMASK(2, 0)
+
+struct ksz_apptrust_map {
+ u8 apptrust;
+ u8 bit;
+};
+
+static const struct ksz_apptrust_map ksz8_apptrust_map_to_bit[] = {
+ { DCB_APP_SEL_PCP, KSZ8_PORT_802_1P_ENABLE },
+ { IEEE_8021QAZ_APP_SEL_DSCP, KSZ8_PORT_DIFFSERV_ENABLE },
+};
+
+static const struct ksz_apptrust_map ksz9477_apptrust_map_to_bit[] = {
+ { DCB_APP_SEL_PCP, KSZ9477_PORT_802_1P_PRIO_ENABLE },
+ { IEEE_8021QAZ_APP_SEL_DSCP, KSZ9477_PORT_DIFFSERV_PRIO_ENABLE },
+};
+
+/* ksz_supported_apptrust[] - Supported apptrust selectors and Priority Order
+ * of Internal Priority Map (IPM) sources.
+ *
+ * This array defines the apptrust selectors supported by the hardware, where
+ * the index within the array indicates the priority of the selector - lower
+ * indices correspond to higher priority. This fixed priority scheme is due to
+ * the hardware's design, which does not support configurable priority among
+ * different priority sources.
+ *
+ * The priority sources, including Tail Tag, ACL, VLAN PCP and DSCP are ordered
+ * by the hardware's fixed logic, as detailed below. The order reflects a
+ * non-configurable precedence where certain types of priority information
+ * override others:
+ *
+ * 1. Tail Tag - Highest priority, overrides ACL, VLAN PCP, and DSCP priorities.
+ * 2. ACL - Overrides VLAN PCP and DSCP priorities.
+ * 3. VLAN PCP - Overrides DSCP priority.
+ * 4. DSCP - Lowest priority, does not override any other priority source.
+ *
+ * In this context, the array's lower index (higher priority) for
+ * 'DCB_APP_SEL_PCP' suggests its relative priority over
+ * 'IEEE_8021QAZ_APP_SEL_DSCP' within the system's fixed priority scheme.
+ *
+ * DCB_APP_SEL_PCP - Priority Code Point selector
+ * IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector
+ */
+static const u8 ksz_supported_apptrust[] = {
+ DCB_APP_SEL_PCP,
+ IEEE_8021QAZ_APP_SEL_DSCP,
+};
+
+static const char * const ksz_supported_apptrust_variants[] = {
+ "empty", "dscp", "pcp", "dscp pcp"
+};
+
+static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
+ u8 *mask, int *shift)
+{
+ if (is_ksz8(dev)) {
+ *reg = KSZ8_REG_PORT_1_CTRL_0;
+ *mask = KSZ8_PORT_BASED_PRIO_M;
+ *shift = __bf_shf(KSZ8_PORT_BASED_PRIO_M);
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_PORT_1_CTRL_0;
+ } else {
+ *reg = KSZ9477_REG_PORT_MRI_MAC_CTRL;
+ *mask = KSZ9477_PORT_BASED_PRIO_M;
+ *shift = __bf_shf(KSZ9477_PORT_BASED_PRIO_M);
+ }
+}
+
+/**
+ * ksz_get_dscp_prio_reg - Retrieves the DSCP-to-priority-mapping register
+ * @dev: Pointer to the KSZ switch device structure
+ * @reg: Pointer to the register address to be set
+ * @per_reg: Pointer to the number of DSCP values per register
+ * @mask: Pointer to the mask to be set
+ *
+ * This function retrieves the DSCP to priority mapping register, the number of
+ * DSCP values per register, and the mask to be set.
+ */
+static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
+ int *per_reg, u8 *mask)
+{
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev)) {
+ *reg = KSZ8765_REG_TOS_DSCP_CTRL;
+ *per_reg = 4;
+ *mask = GENMASK(1, 0);
+ } else if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)) {
+ *reg = KSZ88X3_REG_TOS_DSCP_CTRL;
+ *per_reg = 4;
+ *mask = GENMASK(1, 0);
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_TOS_DSCP_CTRL;
+ } else {
+ *reg = KSZ9477_REG_DIFFSERV_PRIO_MAP;
+ *per_reg = 2;
+ *mask = GENMASK(2, 0);
+ }
+}
+
+/**
+ * ksz_get_apptrust_map_and_reg - Retrieves the apptrust map and register
+ * @dev: Pointer to the KSZ switch device structure
+ * @map: Pointer to the apptrust map to be set
+ * @reg: Pointer to the register address to be set
+ * @mask: Pointer to the mask to be set
+ *
+ * This function retrieves the apptrust map and register address for the
+ * apptrust configuration.
+ */
+static void ksz_get_apptrust_map_and_reg(struct ksz_device *dev,
+ const struct ksz_apptrust_map **map,
+ int *reg, u8 *mask)
+{
+ if (is_ksz8(dev)) {
+ *map = ksz8_apptrust_map_to_bit;
+ *reg = KSZ8_REG_PORT_1_CTRL_0;
+ *mask = KSZ8_PORT_DIFFSERV_ENABLE | KSZ8_PORT_802_1P_ENABLE;
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_PORT_1_CTRL_0;
+ } else {
+ *map = ksz9477_apptrust_map_to_bit;
+ *reg = KSZ9477_REG_PORT_MRI_PRIO_CTRL;
+ *mask = KSZ9477_PORT_802_1P_PRIO_ENABLE |
+ KSZ9477_PORT_DIFFSERV_PRIO_ENABLE;
+ }
+}
+
+/**
+ * ksz_port_get_default_prio - Retrieves the default priority for a port on a
+ * KSZ switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number from which to get the default priority
+ *
+ * This function fetches the default priority for the specified port on a KSZ
+ * switch.
+ *
+ * Return: The default priority of the port on success, or a negative error
+ * code on failure.
+ */
+int ksz_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret, reg, shift;
+ u8 data, mask;
+
+ ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
+
+ ret = ksz_pread8(dev, port, reg, &data);
+ if (ret)
+ return ret;
+
+ return (data & mask) >> shift;
+}
+
+/**
+ * ksz_port_set_default_prio - Sets the default priority for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to set the default priority
+ * @prio: Priority value to set
+ *
+ * This function sets the default priority for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+ int reg, shift;
+ u8 mask;
+
+ if (prio >= dev->info->num_ipms)
+ return -EINVAL;
+
+ ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
+
+ return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask);
+}
+
+/**
+ * ksz_port_get_dscp_prio - Retrieves the priority for a DSCP value on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to get the priority
+ * @dscp: DSCP value for which to get the priority
+ *
+ * This function fetches the priority value from switch global DSCP-to-priorty
+ * mapping table for the specified DSCP value.
+ *
+ * Return: The priority value for the DSCP on success, or a negative error
+ * code on failure.
+ */
+int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ksz_device *dev = ds->priv;
+ int reg, per_reg, ret, shift;
+ u8 data, mask;
+
+ ksz_get_dscp_prio_reg(dev, &reg, &per_reg, &mask);
+
+ /* If DSCP remapping is disabled, DSCP bits 3-5 are used as Internal
+ * Priority Map (IPM)
+ */
+ if (!is_ksz8(dev)) {
+ ret = ksz_read8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL, &data);
+ if (ret)
+ return ret;
+
+ /* If DSCP remapping is disabled, DSCP bits 3-5 are used as
+ * Internal Priority Map (IPM)
+ */
+ if (!(data & KSZ9477_SW_TOS_DSCP_REMAP))
+ return FIELD_GET(KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M,
+ dscp);
+ }
+
+ /* In case DSCP remapping is enabled, we need to write the DSCP to
+ * priority mapping table.
+ */
+ reg += dscp / per_reg;
+ ret = ksz_read8(dev, reg, &data);
+ if (ret)
+ return ret;
+
+ shift = (dscp % per_reg) * (8 / per_reg);
+
+ return (data >> shift) & mask;
+}
+
+/**
+ * ksz_set_global_dscp_entry - Sets the global DSCP-to-priority mapping entry
+ * @dev: Pointer to the KSZ switch device structure
+ * @dscp: DSCP value for which to set the priority
+ * @ipm: Priority value to set
+ *
+ * This function sets the global DSCP-to-priority mapping entry for the
+ * specified DSCP value.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_set_global_dscp_entry(struct ksz_device *dev, u8 dscp, u8 ipm)
+{
+ int reg, per_reg, shift;
+ u8 mask;
+
+ ksz_get_dscp_prio_reg(dev, &reg, &per_reg, &mask);
+
+ shift = (dscp % per_reg) * (8 / per_reg);
+
+ return ksz_rmw8(dev, reg + (dscp / per_reg), mask << shift,
+ ipm << shift);
+}
+
+/**
+ * ksz_init_global_dscp_map - Initializes the global DSCP-to-priority mapping
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function initializes the global DSCP-to-priority mapping table for the
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz_init_global_dscp_map(struct ksz_device *dev)
+{
+ int ret, dscp;
+
+ /* On KSZ9xxx variants, DSCP remapping is disabled by default.
+ * Enable to have, predictable and reproducible behavior across
+ * different devices.
+ */
+ if (!is_ksz8(dev)) {
+ ret = ksz_rmw8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL,
+ KSZ9477_SW_TOS_DSCP_REMAP,
+ KSZ9477_SW_TOS_DSCP_REMAP);
+ if (ret)
+ return ret;
+ }
+
+ for (dscp = 0; dscp < DSCP_MAX; dscp++) {
+ int ipm, tt;
+
+ /* Map DSCP to Traffic Type, which is corresponding to the
+ * Internal Priority Map (IPM) in the switch.
+ */
+ if (!is_ksz8(dev)) {
+ ipm = ietf_dscp_to_ieee8021q_tt(dscp);
+ } else {
+ /* On KSZ8xxx variants we do not have IPM to queue
+ * remapping table. We need to convert DSCP to Traffic
+ * Type and then to queue.
+ */
+ tt = ietf_dscp_to_ieee8021q_tt(dscp);
+ if (tt < 0)
+ return tt;
+
+ ipm = ieee8021q_tt_to_tc(tt, dev->info->num_tx_queues);
+ }
+
+ if (ipm < 0)
+ return ipm;
+
+ ret = ksz_set_global_dscp_entry(dev, dscp, ipm);
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_port_add_dscp_prio - Adds a DSCP-to-priority mapping entry for a port on
+ * a KSZ switch.
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to add the DSCP-to-priority mapping entry
+ * @dscp: DSCP value for which to add the priority
+ * @prio: Priority value to set
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (prio >= dev->info->num_ipms)
+ return -ERANGE;
+
+ return ksz_set_global_dscp_entry(dev, dscp, prio);
+}
+
+/**
+ * ksz_port_del_dscp_prio - Deletes a DSCP-to-priority mapping entry for a port
+ * on a KSZ switch.
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to delete the DSCP-to-priority mapping entry
+ * @dscp: DSCP value for which to delete the priority
+ * @prio: Priority value to delete
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+ int ipm;
+
+ if (ksz_port_get_dscp_prio(ds, port, dscp) != prio)
+ return 0;
+
+ if (is_ksz8(dev)) {
+ ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE,
+ dev->info->num_tx_queues);
+ if (ipm < 0)
+ return ipm;
+ } else {
+ ipm = IEEE8021Q_TT_BE;
+ }
+
+ return ksz_set_global_dscp_entry(dev, dscp, ipm);
+}
+
+/**
+ * ksz_apptrust_error - Prints an error message for an invalid apptrust selector
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function prints an error message when an invalid apptrust selector is
+ * provided.
+ */
+static void ksz_apptrust_error(struct ksz_device *dev)
+{
+ char supported_apptrust_variants[64];
+ int i;
+
+ supported_apptrust_variants[0] = '\0';
+ for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust_variants); i++) {
+ if (i > 0)
+ strlcat(supported_apptrust_variants, ", ",
+ sizeof(supported_apptrust_variants));
+ strlcat(supported_apptrust_variants,
+ ksz_supported_apptrust_variants[i],
+ sizeof(supported_apptrust_variants));
+ }
+
+ dev_err(dev->dev, "Invalid apptrust selector or priority order. Supported: %s\n",
+ supported_apptrust_variants);
+}
+
+/**
+ * ksz_port_set_apptrust_validate - Validates the apptrust selectors
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @sel: Array of apptrust selectors to validate
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function validates the apptrust selectors provided and ensures that
+ * they are in the correct order.
+ *
+ * This family of switches supports two apptrust selectors: DCB_APP_SEL_PCP and
+ * IEEE_8021QAZ_APP_SEL_DSCP. The priority order of the selectors is fixed and
+ * cannot be changed. The order is as follows:
+ * 1. DCB_APP_SEL_PCP - Priority Code Point selector (highest priority)
+ * 2. IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector
+ * (lowest priority)
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz_port_set_apptrust_validate(struct ksz_device *dev, int port,
+ const u8 *sel, int nsel)
+{
+ int i, j, found;
+ int j_prev = 0;
+
+ /* Iterate through the requested selectors */
+ for (i = 0; i < nsel; i++) {
+ found = 0;
+
+ /* Check if the current selector is supported by the hardware */
+ for (j = 0; j < sizeof(ksz_supported_apptrust); j++) {
+ if (sel[i] != ksz_supported_apptrust[j])
+ continue;
+
+ found = 1;
+
+ /* Ensure that no higher priority selector (lower index)
+ * precedes a lower priority one
+ */
+ if (i > 0 && j <= j_prev)
+ goto err_sel_not_vaild;
+
+ j_prev = j;
+ break;
+ }
+
+ if (!found)
+ goto err_sel_not_vaild;
+ }
+
+ return 0;
+
+err_sel_not_vaild:
+ ksz_apptrust_error(dev);
+
+ return -EINVAL;
+}
+
+/**
+ * ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to set the apptrust selectors
+ * @sel: Array of apptrust selectors to set
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function sets the apptrust selectors for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
+ const u8 *sel, int nsel)
+{
+ const struct ksz_apptrust_map *map;
+ struct ksz_device *dev = ds->priv;
+ int reg, i, ret;
+ u8 data = 0;
+ u8 mask;
+
+ ret = ksz_port_set_apptrust_validate(dev, port, sel, nsel);
+ if (ret)
+ return ret;
+
+ ksz_get_apptrust_map_and_reg(dev, &map, &reg, &mask);
+
+ for (i = 0; i < nsel; i++) {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(ksz_supported_apptrust); j++) {
+ if (sel[i] != ksz_supported_apptrust[j])
+ continue;
+
+ data |= map[j].bit;
+ break;
+ }
+ }
+
+ return ksz_prmw8(dev, port, reg, mask, data);
+}
+
+/**
+ * ksz_port_get_apptrust - Retrieves the apptrust selectors for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to get the apptrust selectors
+ * @sel: Array to store the apptrust selectors
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function fetches the apptrust selectors for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel)
+{
+ const struct ksz_apptrust_map *map;
+ struct ksz_device *dev = ds->priv;
+ int reg, i, ret;
+ u8 data;
+ u8 mask;
+
+ ksz_get_apptrust_map_and_reg(dev, &map, &reg, &mask);
+
+ ret = ksz_pread8(dev, port, reg, &data);
+ if (ret)
+ return ret;
+
+ *nsel = 0;
+ for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust); i++) {
+ if (data & map[i].bit)
+ sel[(*nsel)++] = ksz_supported_apptrust[i];
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_dcb_init_port - Initializes the DCB configuration for a port on a KSZ
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to initialize the DCB configuration
+ *
+ * This function initializes the DCB configuration for the specified port on a
+ * KSZ switch. Particular DCB configuration is set for the port, including the
+ * default priority and apptrust selectors.
+ * The default priority is set to Best Effort, and the apptrust selectors are
+ * set to all supported selectors.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_dcb_init_port(struct ksz_device *dev, int port)
+{
+ const u8 ksz_default_apptrust[] = { DCB_APP_SEL_PCP };
+ int ret, ipm;
+
+ if (is_ksz8(dev)) {
+ ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE,
+ dev->info->num_tx_queues);
+ if (ipm < 0)
+ return ipm;
+ } else {
+ ipm = IEEE8021Q_TT_BE;
+ }
+
+ /* Set the default priority for the port to Best Effort */
+ ret = ksz_port_set_default_prio(dev->ds, port, ipm);
+ if (ret)
+ return ret;
+
+ return ksz_port_set_apptrust(dev->ds, port, ksz_default_apptrust,
+ ARRAY_SIZE(ksz_default_apptrust));
+}
+
+/**
+ * ksz_dcb_init - Initializes the DCB configuration for a KSZ switch
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function initializes the DCB configuration for a KSZ switch. The global
+ * DSCP-to-priority mapping table is initialized.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_dcb_init(struct ksz_device *dev)
+{
+ return ksz_init_global_dscp_map(dev);
+}
diff --git a/drivers/net/dsa/microchip/ksz_dcb.h b/drivers/net/dsa/microchip/ksz_dcb.h
new file mode 100644
index 000000000000..e2065223ba90
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_dcb.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */
+
+#ifndef __KSZ_DCB_H
+#define __KSZ_DCB_H
+
+#include <net/dsa.h>
+
+#include "ksz_common.h"
+
+int ksz_port_get_default_prio(struct dsa_switch *ds, int port);
+int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio);
+int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp);
+int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio);
+int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio);
+int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
+ const unsigned char *sel,
+ int nsel);
+int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel);
+int ksz_dcb_init_port(struct ksz_device *dev, int port);
+int ksz_dcb_init(struct ksz_device *dev);
+
+#endif /* __KSZ_DCB_H */
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 1fe105913c75..997e4a76d0a6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -266,7 +266,6 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
struct ksz_port *prt;
struct dsa_port *dp;
bool tag_en = false;
- int ret;
dsa_switch_for_each_user_port(dp, dev->ds) {
prt = &dev->ports[dp->index];
@@ -277,9 +276,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
}
if (tag_en) {
- ret = ptp_schedule_worker(ptp_data->clock, 0);
- if (ret)
- return ret;
+ ptp_schedule_worker(ptp_data->clock, 0);
} else {
ptp_cancel_worker_sync(ptp_data->clock);
}
@@ -293,7 +290,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
/* The function is return back the capability of timestamping feature when
* requested through ethtool -T <interface> utility
*/
-int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
+int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts)
{
struct ksz_device *dev = ds->priv;
struct ksz_ptp_data *ptp_data;
@@ -322,22 +319,21 @@ int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
return 0;
}
-int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct ksz_device *dev = ds->priv;
- struct hwtstamp_config *config;
struct ksz_port *prt;
prt = &dev->ports[port];
- config = &prt->tstamp_config;
+ *config = prt->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
static int ksz_set_hwtstamp_config(struct ksz_device *dev,
struct ksz_port *prt,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
int ret;
@@ -407,26 +403,21 @@ static int ksz_set_hwtstamp_config(struct ksz_device *dev,
return ksz_ptp_enable_mode(dev);
}
-int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
- struct hwtstamp_config config;
struct ksz_port *prt;
int ret;
prt = &dev->ports[port];
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret = ksz_set_hwtstamp_config(dev, prt, &config);
+ ret = ksz_set_hwtstamp_config(dev, prt, config);
if (ret)
return ret;
- memcpy(&prt->tstamp_config, &config, sizeof(config));
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ prt->tstamp_config = *config;
return 0;
}
@@ -1102,18 +1093,18 @@ static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
static const char * const name[] = {"pdresp-msg", "xdreq-msg",
"sync-msg"};
const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
+ struct ksz_irq *ptpirq = &port->ptpirq;
struct ksz_ptp_irq *ptpmsg_irq;
ptpmsg_irq = &port->ptpmsg_irq[n];
+ ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
+ if (!ptpmsg_irq->num)
+ return -EINVAL;
ptpmsg_irq->port = port;
ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
- snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
-
- ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
- if (ptpmsg_irq->num < 0)
- return ptpmsg_irq->num;
+ strscpy(ptpmsg_irq->name, name[n]);
return request_threaded_irq(ptpmsg_irq->num, NULL,
ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
@@ -1139,17 +1130,14 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
init_completion(&port->tstamp_msg_comp);
- ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
- &ksz_ptp_irq_domain_ops, ptpirq);
+ ptpirq->domain = irq_domain_create_linear(dev_fwnode(dev->dev), ptpirq->nirqs,
+ &ksz_ptp_irq_domain_ops, ptpirq);
if (!ptpirq->domain)
return -ENOMEM;
- for (irq = 0; irq < ptpirq->nirqs; irq++)
- irq_create_mapping(ptpirq->domain, irq);
-
ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
- if (ptpirq->irq_num < 0) {
- ret = ptpirq->irq_num;
+ if (!ptpirq->irq_num) {
+ ret = -EINVAL;
goto out;
}
@@ -1168,12 +1156,11 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
out_ptp_msg:
free_irq(ptpirq->irq_num, ptpirq);
- while (irq--)
+ while (irq--) {
free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
-out:
- for (irq = 0; irq < ptpirq->nirqs; irq++)
irq_dispose_mapping(port->ptpmsg_irq[irq].num);
-
+ }
+out:
irq_domain_remove(ptpirq->domain);
return ret;
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
index 0ca8ca4f804e..3086e519b1b6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.h
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -38,9 +38,12 @@ int ksz_ptp_clock_register(struct dsa_switch *ds);
void ksz_ptp_clock_unregister(struct dsa_switch *ds);
int ksz_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
-int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
-int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+ struct kernel_ethtool_ts_info *ts);
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
void ksz_port_deferred_xmit(struct kthread_work *work);
bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index c8166fb440ab..d8001734b057 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -2,11 +2,11 @@
/*
* Microchip ksz series register access through SPI
*
- * Copyright (C) 2017 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -16,6 +16,10 @@
#include "ksz_common.h"
+#define KSZ8463_SPI_ADDR_SHIFT 13
+#define KSZ8463_SPI_ADDR_ALIGN 3
+#define KSZ8463_SPI_TURNAROUND_SHIFT 2
+
#define KSZ8795_SPI_ADDR_SHIFT 12
#define KSZ8795_SPI_ADDR_ALIGN 3
#define KSZ8795_SPI_TURNAROUND_SHIFT 1
@@ -37,6 +41,99 @@ KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+static u16 ksz8463_reg(u16 reg, size_t size)
+{
+ switch (size) {
+ case 1:
+ reg = ((reg >> 2) << 4) | (1 << (reg & 3));
+ break;
+ case 2:
+ reg = ((reg >> 2) << 4) | (reg & 2 ? 0x0c : 0x03);
+ break;
+ default:
+ reg = ((reg >> 2) << 4) | 0xf;
+ break;
+ }
+ reg <<= KSZ8463_SPI_TURNAROUND_SHIFT;
+ return reg;
+}
+
+static int ksz8463_spi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 bytes[2];
+ u16 cmd;
+ int rc;
+
+ if (reg_size > 2 || val_size > 4)
+ return -EINVAL;
+ memcpy(&cmd, reg, sizeof(u16));
+ cmd = ksz8463_reg(cmd, val_size);
+ /* SPI command uses big-endian format. */
+ put_unaligned_be16(cmd, bytes);
+ rc = spi_write_then_read(spi, bytes, reg_size, val, val_size);
+#if defined(__BIG_ENDIAN)
+ /* Register value uses little-endian format so need to convert when
+ * running in big-endian system.
+ */
+ if (!rc && val_size > 1) {
+ if (val_size == 2) {
+ u16 v = get_unaligned_le16(val);
+
+ memcpy(val, &v, sizeof(v));
+ } else if (val_size == 4) {
+ u32 v = get_unaligned_le32(val);
+
+ memcpy(val, &v, sizeof(v));
+ }
+ }
+#endif
+ return rc;
+}
+
+static int ksz8463_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ size_t val_size = count - 2;
+ u8 bytes[6];
+ u16 cmd;
+
+ if (count <= 2 || count > 6)
+ return -EINVAL;
+ memcpy(bytes, data, count);
+ memcpy(&cmd, data, sizeof(u16));
+ cmd = ksz8463_reg(cmd, val_size);
+ cmd |= (1 << (KSZ8463_SPI_ADDR_SHIFT + KSZ8463_SPI_TURNAROUND_SHIFT));
+ /* SPI command uses big-endian format. */
+ put_unaligned_be16(cmd, bytes);
+#if defined(__BIG_ENDIAN)
+ /* Register value uses little-endian format so need to convert when
+ * running in big-endian system.
+ */
+ if (val_size == 2) {
+ u8 *val = &bytes[2];
+ u16 v;
+
+ memcpy(&v, val, sizeof(v));
+ put_unaligned_le16(v, val);
+ } else if (val_size == 4) {
+ u8 *val = &bytes[2];
+ u32 v;
+
+ memcpy(&v, val, sizeof(v));
+ put_unaligned_le32(v, val);
+ }
+#endif
+ return spi_write(spi, bytes, count);
+}
+
+KSZ8463_REGMAP_TABLE(ksz8463, KSZ8463_SPI_ADDR_SHIFT, 0,
+ KSZ8463_SPI_ADDR_ALIGN);
+
static int ksz_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config;
@@ -54,12 +151,19 @@ static int ksz_spi_probe(struct spi_device *spi)
if (!chip)
return -EINVAL;
- if (chip->chip_id == KSZ8830_CHIP_ID)
+ /* Save chip id to do special initialization when probing. */
+ dev->chip_id = chip->chip_id;
+ if (chip->chip_id == KSZ88X3_CHIP_ID)
regmap_config = ksz8863_regmap_config;
+ else if (chip->chip_id == KSZ8463_CHIP_ID)
+ regmap_config = ksz8463_regmap_config;
else if (chip->chip_id == KSZ8795_CHIP_ID ||
chip->chip_id == KSZ8794_CHIP_ID ||
chip->chip_id == KSZ8765_CHIP_ID)
regmap_config = ksz8795_regmap_config;
+ else if (chip->chip_id == KSZ8895_CHIP_ID ||
+ chip->chip_id == KSZ8864_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
else
regmap_config = ksz9477_regmap_config;
@@ -121,6 +225,10 @@ static void ksz_spi_shutdown(struct spi_device *spi)
static const struct of_device_id ksz_dt_ids[] = {
{
+ .compatible = "microchip,ksz8463",
+ .data = &ksz_switch_chips[KSZ8463]
+ },
+ {
.compatible = "microchip,ksz8765",
.data = &ksz_switch_chips[KSZ8765]
},
@@ -134,11 +242,19 @@ static const struct of_device_id ksz_dt_ids[] = {
},
{
.compatible = "microchip,ksz8863",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8864",
+ .data = &ksz_switch_chips[KSZ8864]
},
{
.compatible = "microchip,ksz8873",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8895",
+ .data = &ksz_switch_chips[KSZ8895]
},
{
.compatible = "microchip,ksz9477",
@@ -192,16 +308,23 @@ static const struct of_device_id ksz_dt_ids[] = {
.compatible = "microchip,lan9374",
.data = &ksz_switch_chips[LAN9374]
},
+ {
+ .compatible = "microchip,lan9646",
+ .data = &ksz_switch_chips[LAN9646]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz_dt_ids);
static const struct spi_device_id ksz_spi_ids[] = {
+ { "ksz8463" },
{ "ksz8765" },
{ "ksz8794" },
{ "ksz8795" },
{ "ksz8863" },
+ { "ksz8864" },
{ "ksz8873" },
+ { "ksz8895" },
{ "ksz9477" },
{ "ksz9896" },
{ "ksz9897" },
@@ -215,15 +338,19 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "lan9372" },
{ "lan9373" },
{ "lan9374" },
+ { "lan9646" },
{ },
};
MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_spi_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct spi_driver ksz_spi_driver = {
.driver = {
.name = "ksz-switch",
- .owner = THIS_MODULE,
.of_match_table = ksz_dt_ids,
+ .pm = &ksz_spi_pm_ops,
},
.id_table = ksz_spi_ids,
.probe = ksz_spi_probe,
@@ -233,13 +360,6 @@ static struct spi_driver ksz_spi_driver = {
module_spi_driver(ksz_spi_driver);
-MODULE_ALIAS("spi:ksz9477");
-MODULE_ALIAS("spi:ksz9896");
-MODULE_ALIAS("spi:ksz9897");
-MODULE_ALIAS("spi:ksz9893");
-MODULE_ALIAS("spi:ksz9563");
-MODULE_ALIAS("spi:ksz8563");
-MODULE_ALIAS("spi:ksz9567");
MODULE_ALIAS("spi:lan937x");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
index 3388d91dbc44..df13ebbd356f 100644
--- a/drivers/net/dsa/microchip/lan937x.h
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -13,6 +13,8 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
void lan937x_config_cpu_port(struct dsa_switch *ds);
int lan937x_switch_init(struct ksz_device *dev);
void lan937x_switch_exit(struct ksz_device *dev);
+int lan937x_mdio_bus_preinit(struct ksz_device *dev, bool side_mdio);
+int lan937x_create_phy_addr_map(struct ksz_device *dev, bool side_mdio);
int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index b479a628b1ae..5a1496fff445 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Microchip LAN937X switch driver main logic
- * Copyright (C) 2019-2022 Microchip Technology Inc.
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -18,6 +18,87 @@
#include "ksz9477.h"
#include "lan937x.h"
+/* marker for ports without built-in PHY */
+#define LAN937X_NO_PHY U8_MAX
+
+/*
+ * lan9370_phy_addr - Mapping of LAN9370 switch ports to PHY addresses.
+ *
+ * Each entry corresponds to a specific port on the LAN9370 switch,
+ * where ports 1-4 are connected to integrated 100BASE-T1 PHYs, and
+ * Port 5 is connected to an RGMII interface without a PHY. The values
+ * are based on the documentation (DS00003108E, section 3.3).
+ */
+static const u8 lan9370_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 6, /* Port 4, T1 AFE4 */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+};
+
+/*
+ * lan9371_phy_addr - Mapping of LAN9371 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003109E, section 3.3).
+ */
+static const u8 lan9371_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 8, /* Port 4, TX PHY */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+};
+
+/*
+ * lan9372_phy_addr - Mapping of LAN9372 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9372_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 8, /* Port 4, TX PHY */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
+/*
+ * lan9373_phy_addr - Mapping of LAN9373 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9373_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = LAN937X_NO_PHY, /* Port 4, SGMII */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
+/*
+ * lan9374_phy_addr - Mapping of LAN9374 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9374_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 7, /* Port 4, T1 AFE5 */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
return regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
@@ -30,24 +111,144 @@ static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
bits, set ? bits : 0);
}
-static int lan937x_enable_spi_indirect_access(struct ksz_device *dev)
+/**
+ * lan937x_create_phy_addr_map - Create port-to-PHY address map for MDIO bus.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side MDIO bus.
+ *
+ * This function sets up the PHY address mapping for the LAN937x switches,
+ * which support two access modes for internal PHYs:
+ * 1. **SPI Access**: A straightforward one-to-one port-to-PHY address
+ * mapping is applied.
+ * 2. **MDIO Access**: The PHY address mapping varies based on chip variant
+ * and strap configuration. An offset is calculated based on strap settings
+ * to ensure correct PHY addresses are assigned. The offset calculation logic
+ * is based on Microchip's Article Number 000015828, available at:
+ * https://microchip.my.site.com/s/article/LAN9374-Virtual-PHY-PHY-Address-Mapping
+ *
+ * The function first checks if side MDIO access is disabled, in which case a
+ * simple direct mapping (port number = PHY address) is applied. If side MDIO
+ * access is enabled, it reads the strap configuration to determine the correct
+ * offset for PHY addresses.
+ *
+ * The appropriate mapping table is selected based on the chip ID, and the
+ * `phy_addr_map` is populated with the correct addresses for each port. Any
+ * port with no PHY is assigned a `LAN937X_NO_PHY` marker.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int lan937x_create_phy_addr_map(struct ksz_device *dev, bool side_mdio)
+{
+ static const u8 *phy_addr_map;
+ u32 strap_val;
+ u8 offset = 0;
+ size_t size;
+ int ret, i;
+
+ if (!side_mdio) {
+ /* simple direct mapping */
+ for (i = 0; i < dev->info->port_cnt; i++)
+ dev->phy_addr_map[i] = i;
+
+ return 0;
+ }
+
+ ret = ksz_read32(dev, REG_SW_CFG_STRAP_VAL, &strap_val);
+ if (ret < 0)
+ return ret;
+
+ if (!(strap_val & SW_CASCADE_ID_CFG) && !(strap_val & SW_VPHY_ADD_CFG))
+ offset = 0;
+ else if (!(strap_val & SW_CASCADE_ID_CFG) && (strap_val & SW_VPHY_ADD_CFG))
+ offset = 7;
+ else if ((strap_val & SW_CASCADE_ID_CFG) && !(strap_val & SW_VPHY_ADD_CFG))
+ offset = 15;
+ else
+ offset = 22;
+
+ switch (dev->info->chip_id) {
+ case LAN9370_CHIP_ID:
+ phy_addr_map = lan9370_phy_addr;
+ size = ARRAY_SIZE(lan9370_phy_addr);
+ break;
+ case LAN9371_CHIP_ID:
+ phy_addr_map = lan9371_phy_addr;
+ size = ARRAY_SIZE(lan9371_phy_addr);
+ break;
+ case LAN9372_CHIP_ID:
+ phy_addr_map = lan9372_phy_addr;
+ size = ARRAY_SIZE(lan9372_phy_addr);
+ break;
+ case LAN9373_CHIP_ID:
+ phy_addr_map = lan9373_phy_addr;
+ size = ARRAY_SIZE(lan9373_phy_addr);
+ break;
+ case LAN9374_CHIP_ID:
+ phy_addr_map = lan9374_phy_addr;
+ size = ARRAY_SIZE(lan9374_phy_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (size < dev->info->port_cnt)
+ return -EINVAL;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (phy_addr_map[i] == LAN937X_NO_PHY)
+ dev->phy_addr_map[i] = phy_addr_map[i];
+ else
+ dev->phy_addr_map[i] = phy_addr_map[i] + offset;
+ }
+
+ return 0;
+}
+
+/**
+ * lan937x_mdio_bus_preinit - Pre-initialize MDIO bus for accessing PHYs.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side MDIO bus.
+ *
+ * This function configures the LAN937x switch for PHY access either through
+ * SPI or the side MDIO bus, unlocking the necessary registers for each access
+ * mode.
+ *
+ * Operation Modes:
+ * 1. **SPI Access**: Enables SPI indirect access to address clock domain
+ * crossing issues when SPI is used for PHY access.
+ * 2. **MDIO Access**: Grants access to internal PHYs over the side MDIO bus,
+ * required when using the MDIO bus for PHY management.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int lan937x_mdio_bus_preinit(struct ksz_device *dev, bool side_mdio)
{
u16 data16;
int ret;
- /* Enable Phy access through SPI */
+ /* Unlock access to the PHYs, needed for SPI and side MDIO access */
ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
if (ret < 0)
- return ret;
+ goto print_error;
- ret = ksz_read16(dev, REG_VPHY_SPECIAL_CTRL__2, &data16);
- if (ret < 0)
- return ret;
+ if (side_mdio)
+ /* Allow access to internal PHYs over MDIO bus */
+ data16 = VPHY_MDIO_INTERNAL_ENABLE;
+ else
+ /* Enable SPI indirect access to address clock domain crossing
+ * issue
+ */
+ data16 = VPHY_SPI_INDIRECT_ENABLE;
- /* Allow SPI access */
- data16 |= VPHY_SPI_INDIRECT_ENABLE;
+ ret = ksz_rmw16(dev, REG_VPHY_SPECIAL_CTRL__2,
+ VPHY_SPI_INDIRECT_ENABLE | VPHY_MDIO_INTERNAL_ENABLE,
+ data16);
- return ksz_write16(dev, REG_VPHY_SPECIAL_CTRL__2, data16);
+print_error:
+ if (ret < 0)
+ dev_err(dev->dev, "failed to preinit the MDIO bus\n");
+
+ return ret;
}
static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
@@ -55,6 +256,9 @@ static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
u16 temp;
+ if (is_lan937x_tx_phy(dev, addr))
+ addr_base = REG_PORT_TX_PHY_CTRL_BASE;
+
/* get register address based on the logical port */
temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
@@ -257,10 +461,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
- u32 secs = msecs / 1000;
- u32 value;
+ u8 data, mult, value8;
+ bool in_msec = false;
+ u32 max_val, value;
+ u32 secs = msecs;
int ret;
+#define MAX_TIMER_VAL ((1 << 20) - 1)
+
+ /* The aging timer comprises a 3-bit multiplier and a 20-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 1048575 = 7340025 seconds. As this value is too large for
+ * practical use it can be interpreted as microseconds, making the
+ * maximum timer 7340 seconds with finer control. This allows for
+ * maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
+ */
+ if (msecs % 1000)
+ in_msec = true;
+ else
+ secs /= 1000;
+ if (!secs)
+ secs = 1;
+
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
+
+ /* Configure how to interpret the number value. */
+ ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
+ in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
+ if (ret < 0)
+ return ret;
+
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value8);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value8 &= ~SW_AGE_CNT_M;
+ value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
+ if (ret < 0)
+ return ret;
+ }
+
+ secs = DIV_ROUND_UP(secs, data);
+
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
@@ -280,6 +540,7 @@ static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
ksz_pread16(dev, port, reg, &data16);
/* Update tune Adjust */
+ data16 &= ~PORT_TUNE_ADJ;
data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
ksz_pwrite16(dev, port, reg, data16);
@@ -320,6 +581,9 @@ void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
/* MII/RMII/RGMII ports */
config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_100HD | MAC_10 | MAC_1000FD;
+ } else if (is_lan937x_tx_phy(dev, port)) {
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10;
}
}
@@ -357,36 +621,39 @@ int lan937x_setup(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
int ret;
- /* enable Indirect Access from SPI to the VPHY registers */
- ret = lan937x_enable_spi_indirect_access(dev);
- if (ret < 0) {
- dev_err(dev->dev, "failed to enable spi indirect access");
- return ret;
- }
-
/* The VLAN aware is a global setting. Mixed vlan
* filterings are not supported.
*/
ds->vlan_filtering_is_global = true;
/* Enable aggressive back off for half duplex & UNH mode */
- lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
- (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
- true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_0, (SW_PAUSE_UNH_MODE |
+ SW_NEW_BACKOFF |
+ SW_AGGR_BACKOFF), true);
+ if (ret < 0)
+ return ret;
/* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
* packets when 16 or more collisions occur
*/
- lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+ if (ret < 0)
+ return ret;
/* enable global MIB counter freeze function */
- lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+ if (ret < 0)
+ return ret;
/* disable CLK125 & CLK25, 1: disable, 0: enable */
- lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
- (SW_CLK125_ENB | SW_CLK25_ENB), true);
+ ret = lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+ if (ret < 0)
+ return ret;
- return 0;
+ /* Disable global VPHY support. Related to CPU interface only? */
+ return ksz_rmw32(dev, REG_SW_CFG_STRAP_OVR, SW_VPHY_DISABLE,
+ SW_VPHY_DISABLE);
}
void lan937x_teardown(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index 45b606b6429f..72042fd64e5b 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip LAN937X switch register definitions
- * Copyright (C) 2019-2021 Microchip Technology Inc.
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#ifndef __LAN937X_REG_H
#define __LAN937X_REG_H
@@ -37,6 +37,14 @@
#define SW_CLK125_ENB BIT(1)
#define SW_CLK25_ENB BIT(0)
+#define REG_SW_CFG_STRAP_VAL 0x0200
+#define SW_CASCADE_ID_CFG BIT(15)
+#define SW_VPHY_ADD_CFG BIT(0)
+
+/* 2 - PHY Control */
+#define REG_SW_CFG_STRAP_OVR 0x0214
+#define SW_VPHY_DISABLE BIT(31)
+
/* 3 - Operation Control */
#define REG_SW_OPERATION 0x0300
@@ -48,8 +56,7 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
-#define SW_AGE_CNT_S 3
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define REG_SW_LUE_CTRL_1 0x0311
@@ -62,6 +69,10 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
+#define REG_SW_LUE_CTRL_2 0x0312
+
+#define SW_AGE_CNT_IN_MICROSEC BIT(7)
+
#define REG_SW_AGE_PERIOD__1 0x0313
#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
@@ -147,6 +158,7 @@
/* 1 - Phy */
#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+#define REG_PORT_TX_PHY_CTRL_BASE 0x0280
/* 3 - xMII */
#define PORT_SGMII_SEL BIT(7)
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index fa3ee85a99c1..0286a6cecb6f 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -18,7 +18,8 @@
static int
mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
{
- struct mii_bus *bus = context;
+ struct mt7530_priv *priv = context;
+ struct mii_bus *bus = priv->bus;
u16 page, r, lo, hi;
int ret;
@@ -27,36 +28,35 @@ mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
lo = val & 0xffff;
hi = val >> 16;
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page);
if (ret < 0)
return ret;
- ret = bus->write(bus, 0x1f, r, lo);
+ ret = bus->write(bus, priv->mdiodev->addr, r, lo);
if (ret < 0)
return ret;
- ret = bus->write(bus, 0x1f, 0x10, hi);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x10, hi);
return ret;
}
static int
mt7530_regmap_read(void *context, unsigned int reg, unsigned int *val)
{
- struct mii_bus *bus = context;
+ struct mt7530_priv *priv = context;
+ struct mii_bus *bus = priv->bus;
u16 page, r, lo, hi;
int ret;
page = (reg >> 6) & 0x3ff;
r = (reg >> 2) & 0xf;
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page);
if (ret < 0)
return ret;
- lo = bus->read(bus, 0x1f, r);
- hi = bus->read(bus, 0x1f, 0x10);
+ lo = bus->read(bus, priv->mdiodev->addr, r);
+ hi = bus->read(bus, priv->mdiodev->addr, 0x10);
*val = (hi << 16) | (lo & 0xffff);
@@ -107,8 +107,7 @@ mt7531_create_sgmii(struct mt7530_priv *priv)
mt7531_pcs_config[i]->unlock = mt7530_mdio_regmap_unlock;
mt7531_pcs_config[i]->lock_arg = &priv->bus->mdio_lock;
- regmap = devm_regmap_init(priv->dev,
- &mt7530_regmap_bus, priv->bus,
+ regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
mt7531_pcs_config[i]);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
@@ -137,10 +136,17 @@ static const struct of_device_id mt7530_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mt7530_of_match);
+static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MT7530_CREV,
+ .disable_locking = true,
+};
+
static int
mt7530_probe(struct mdio_device *mdiodev)
{
- static struct regmap_config *regmap_config;
struct mt7530_priv *priv;
struct device_node *dn;
int ret;
@@ -153,6 +159,7 @@ mt7530_probe(struct mdio_device *mdiodev)
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->mdiodev = mdiodev;
ret = mt7530_probe_common(priv);
if (ret)
@@ -193,18 +200,8 @@ mt7530_probe(struct mdio_device *mdiodev)
return PTR_ERR(priv->io_pwr);
}
- regmap_config = devm_kzalloc(&mdiodev->dev, sizeof(*regmap_config),
- GFP_KERNEL);
- if (!regmap_config)
- return -ENOMEM;
-
- regmap_config->reg_bits = 16;
- regmap_config->val_bits = 32;
- regmap_config->reg_stride = 4;
- regmap_config->max_register = MT7530_CREV;
- regmap_config->disable_locking = true;
- priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus,
- priv->bus, regmap_config);
+ priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
+ &regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index b74a230a3f13..1dc8b93fb51a 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -11,15 +11,24 @@
#include "mt7530.h"
static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "airoha,an7583-switch", .data = &mt753x_table[ID_AN7583], },
+ { .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
{ .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mt7988_of_match);
+static const struct regmap_config sw_regmap_config = {
+ .name = "switch",
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MT7530_CREV,
+};
+
static int
mt7988_probe(struct platform_device *pdev)
{
- static struct regmap_config *sw_regmap_config;
struct mt7530_priv *priv;
void __iomem *base_addr;
int ret;
@@ -47,16 +56,8 @@ mt7988_probe(struct platform_device *pdev)
return -ENXIO;
}
- sw_regmap_config = devm_kzalloc(&pdev->dev, sizeof(*sw_regmap_config), GFP_KERNEL);
- if (!sw_regmap_config)
- return -ENOMEM;
-
- sw_regmap_config->name = "switch";
- sw_regmap_config->reg_bits = 16;
- sw_regmap_config->val_bits = 32;
- sw_regmap_config->reg_stride = 4;
- sw_regmap_config->max_register = MT7530_CREV;
- priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr, sw_regmap_config);
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr,
+ &sw_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
@@ -85,7 +86,7 @@ static void mt7988_shutdown(struct platform_device *pdev)
static struct platform_driver mt7988_platform_driver = {
.probe = mt7988_probe,
- .remove_new = mt7988_remove,
+ .remove = mt7988_remove,
.shutdown = mt7988_shutdown,
.driver = {
.name = "mt7530-mmio",
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 8090390edaf9..b9423389c2ef 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -21,6 +21,7 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <net/dsa.h>
+#include <net/pkt_cls.h>
#include "mt7530.h"
@@ -31,151 +32,105 @@ static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
- MIB_DESC(1, 0x00, "TxDrop"),
- MIB_DESC(1, 0x04, "TxCrcErr"),
- MIB_DESC(1, 0x08, "TxUnicast"),
- MIB_DESC(1, 0x0c, "TxMulticast"),
- MIB_DESC(1, 0x10, "TxBroadcast"),
- MIB_DESC(1, 0x14, "TxCollision"),
- MIB_DESC(1, 0x18, "TxSingleCollision"),
- MIB_DESC(1, 0x1c, "TxMultipleCollision"),
- MIB_DESC(1, 0x20, "TxDeferred"),
- MIB_DESC(1, 0x24, "TxLateCollision"),
- MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
- MIB_DESC(1, 0x2c, "TxPause"),
- MIB_DESC(1, 0x30, "TxPktSz64"),
- MIB_DESC(1, 0x34, "TxPktSz65To127"),
- MIB_DESC(1, 0x38, "TxPktSz128To255"),
- MIB_DESC(1, 0x3c, "TxPktSz256To511"),
- MIB_DESC(1, 0x40, "TxPktSz512To1023"),
- MIB_DESC(1, 0x44, "Tx1024ToMax"),
- MIB_DESC(2, 0x48, "TxBytes"),
- MIB_DESC(1, 0x60, "RxDrop"),
- MIB_DESC(1, 0x64, "RxFiltering"),
- MIB_DESC(1, 0x68, "RxUnicast"),
- MIB_DESC(1, 0x6c, "RxMulticast"),
- MIB_DESC(1, 0x70, "RxBroadcast"),
- MIB_DESC(1, 0x74, "RxAlignErr"),
- MIB_DESC(1, 0x78, "RxCrcErr"),
- MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
- MIB_DESC(1, 0x80, "RxFragErr"),
- MIB_DESC(1, 0x84, "RxOverSzErr"),
- MIB_DESC(1, 0x88, "RxJabberErr"),
- MIB_DESC(1, 0x8c, "RxPause"),
- MIB_DESC(1, 0x90, "RxPktSz64"),
- MIB_DESC(1, 0x94, "RxPktSz65To127"),
- MIB_DESC(1, 0x98, "RxPktSz128To255"),
- MIB_DESC(1, 0x9c, "RxPktSz256To511"),
- MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
- MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
- MIB_DESC(2, 0xa8, "RxBytes"),
- MIB_DESC(1, 0xb0, "RxCtrlDrop"),
- MIB_DESC(1, 0xb4, "RxIngressDrop"),
- MIB_DESC(1, 0xb8, "RxArlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_DROP, "TxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_CRC_ERR, "TxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_COLLISION, "TxCollision"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_DROP, "RxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_FILTERING, "RxFiltering"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CRC_ERR, "RxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CTRL_DROP, "RxCtrlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_INGRESS_DROP, "RxIngressDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_ARL_DROP, "RxArlDrop"),
};
-/* Since phy_device has not yet been created and
- * phy_{read,write}_mmd_indirect is not available, we provide our own
- * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers
- * to complete this function.
- */
-static int
-core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
+static void
+mt7530_mutex_lock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void
+mt7530_mutex_unlock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static void
+core_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
struct mii_bus *bus = priv->bus;
- int value, ret;
+ int ret;
+
+ mt7530_mutex_lock(priv);
/* Write the desired MMD Devad */
- ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
- ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, reg);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
- ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
if (ret < 0)
goto err;
- /* Read the content of the MMD's selected register */
- value = bus->read(bus, 0, MII_MMD_DATA);
-
- return value;
+ /* Write the data into MMD's selected register */
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, val);
err:
- dev_err(&bus->dev, "failed to read mmd register\n");
+ if (ret < 0)
+ dev_err(&bus->dev, "failed to write mmd register\n");
- return ret;
+ mt7530_mutex_unlock(priv);
}
-static int
-core_write_mmd_indirect(struct mt7530_priv *priv, int prtad,
- int devad, u32 data)
+static void
+core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
{
struct mii_bus *bus = priv->bus;
+ u32 val;
int ret;
+ mt7530_mutex_lock(priv);
+
/* Write the desired MMD Devad */
- ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
- ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, reg);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
- ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
if (ret < 0)
goto err;
+ /* Read the content of the MMD's selected register */
+ val = bus->read(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA);
+ val &= ~mask;
+ val |= set;
/* Write the data into MMD's selected register */
- ret = bus->write(bus, 0, MII_MMD_DATA, data);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, val);
err:
if (ret < 0)
- dev_err(&bus->dev,
- "failed to write mmd register\n");
- return ret;
-}
-
-static void
-mt7530_mutex_lock(struct mt7530_priv *priv)
-{
- if (priv->bus)
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
-}
-
-static void
-mt7530_mutex_unlock(struct mt7530_priv *priv)
-{
- if (priv->bus)
- mutex_unlock(&priv->bus->mdio_lock);
-}
-
-static void
-core_write(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- mt7530_mutex_lock(priv);
-
- core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
-
- mt7530_mutex_unlock(priv);
-}
-
-static void
-core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
-{
- u32 val;
-
- mt7530_mutex_lock(priv);
-
- val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
- val &= ~mask;
- val |= set;
- core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+ dev_err(&bus->dev, "failed to write mmd register\n");
mt7530_mutex_unlock(priv);
}
@@ -431,23 +386,23 @@ mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
- xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ xtal = mt7530_read(priv, MT753X_MTRAP) & MT7530_XTAL_MASK;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ssc_delta = 0x57;
else
ssc_delta = 0x87;
if (priv->id == ID_MT7621) {
/* PLL frequency: 125MHz: 1.0GBit */
- if (xtal == HWTRAP_XTAL_40MHZ)
+ if (xtal == MT7530_XTAL_40MHZ)
ncpo1 = 0x0640;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ncpo1 = 0x0a00;
} else { /* PLL frequency: 250MHz: 2.0Gbit */
- if (xtal == HWTRAP_XTAL_40MHZ)
+ if (xtal == MT7530_XTAL_40MHZ)
ncpo1 = 0x0c80;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ncpo1 = 0x1400;
}
@@ -470,19 +425,20 @@ mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
static void
mt7531_pll_setup(struct mt7530_priv *priv)
{
+ enum mt7531_xtal_fsel xtal;
u32 top_sig;
u32 hwstrap;
- u32 xtal;
u32 val;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
- hwstrap = mt7530_read(priv, MT7531_HWTRAP);
+ hwstrap = mt7530_read(priv, MT753X_TRAP);
if ((val & CHIP_REV_M) > 0)
- xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
- HWTRAP_XTAL_FSEL_25MHZ;
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? MT7531_XTAL_FSEL_40MHZ :
+ MT7531_XTAL_FSEL_25MHZ;
else
- xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
+ xtal = (hwstrap & MT7531_XTAL25) ? MT7531_XTAL_FSEL_25MHZ :
+ MT7531_XTAL_FSEL_40MHZ;
/* Step 1 : Disable MT7531 COREPLL */
val = mt7530_read(priv, MT7531_PLLGP_EN);
@@ -511,13 +467,13 @@ mt7531_pll_setup(struct mt7530_priv *priv)
usleep_range(25, 35);
switch (xtal) {
- case HWTRAP_XTAL_FSEL_25MHZ:
+ case MT7531_XTAL_FSEL_25MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
break;
- case HWTRAP_XTAL_FSEL_40MHZ:
+ case MT7531_XTAL_FSEL_40MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
@@ -802,23 +758,33 @@ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
}
static void
+mt7530_read_port_stats(struct mt7530_priv *priv, int port,
+ u32 offset, u8 size, uint64_t *data)
+{
+ u32 val, reg = MT7530_PORT_MIB_COUNTER(port) + offset;
+
+ val = mt7530_read(priv, reg);
+ *data = val;
+
+ if (size == 2) {
+ val = mt7530_read(priv, reg + 4);
+ *data |= (u64)val << 32;
+ }
+}
+
+static void
mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct mt7530_priv *priv = ds->priv;
const struct mt7530_mib_desc *mib;
- u32 reg, i;
- u64 hi;
+ int i;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
mib = &mt7530_mib[i];
- reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
- data[i] = mt7530_read(priv, reg);
- if (mib->size == 2) {
- hi = mt7530_read(priv, reg + 4);
- data[i] |= hi << 32;
- }
+ mt7530_read_port_stats(priv, port, mib->offset, mib->size,
+ data + i);
}
}
@@ -831,6 +797,172 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(mt7530_mib);
}
+static void mt7530_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &mac_stats->FramesTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_SINGLE_COLLISION, 1,
+ &mac_stats->SingleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTIPLE_COLLISION, 1,
+ &mac_stats->MultipleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &mac_stats->FramesReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &mac_stats->OctetsTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_ALIGN_ERR, 1,
+ &mac_stats->AlignmentErrors);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DEFERRED, 1,
+ &mac_stats->FramesWithDeferredXmissions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_LATE_COLLISION, 1,
+ &mac_stats->LateCollisions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION, 1,
+ &mac_stats->FramesAbortedDueToXSColls);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &mac_stats->OctetsReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &mac_stats->MulticastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->MulticastFramesXmittedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->BroadcastFramesXmittedOK;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &mac_stats->MulticastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->MulticastFramesReceivedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->BroadcastFramesReceivedOK;
+}
+
+static const struct ethtool_rmon_hist_range mt7530_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, MT7530_MAX_MTU },
+ {}
+};
+
+static void mt7530_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNDER_SIZE_ERR, 1,
+ &rmon_stats->undersize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_OVER_SZ_ERR, 1,
+ &rmon_stats->oversize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_FRAG_ERR, 1,
+ &rmon_stats->fragments);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_JABBER_ERR, 1,
+ &rmon_stats->jabbers);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_64, 1,
+ &rmon_stats->hist[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist[5]);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_64, 1,
+ &rmon_stats->hist_tx[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist_tx[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist_tx[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist_tx[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist_tx[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist_tx[5]);
+
+ *ranges = mt7530_rmon_ranges;
+}
+
+static void mt7530_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mt7530_priv *priv = ds->priv;
+ uint64_t data;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &storage->rx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &storage->multicast);
+ storage->rx_packets += storage->multicast;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &data);
+ storage->rx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &storage->tx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &data);
+ storage->tx_packets += data;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &data);
+ storage->tx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &storage->rx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &storage->tx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_DROP, 1,
+ &storage->rx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DROP, 1,
+ &storage->tx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_CRC_ERR, 1,
+ &storage->rx_crc_errors);
+}
+
+static void mt7530_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesTransmitted);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesReceived);
+}
+
static int
mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
@@ -871,19 +1003,15 @@ mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
return 0;
}
-static const char *p5_intf_modes(unsigned int p5_interface)
+static const char *mt7530_p5_mode_str(unsigned int mode)
{
- switch (p5_interface) {
- case P5_DISABLED:
- return "DISABLED";
- case P5_INTF_SEL_PHY_P0:
- return "PHY P0";
- case P5_INTF_SEL_PHY_P4:
- return "PHY P4";
- case P5_INTF_SEL_GMAC5:
- return "GMAC5";
+ switch (mode) {
+ case MUX_PHY_P0:
+ return "MUX PHY P0";
+ case MUX_PHY_P4:
+ return "MUX PHY P4";
default:
- return "unknown";
+ return "GMAC5";
}
}
@@ -895,34 +1023,31 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
mutex_lock(&priv->reg_mutex);
- val = mt7530_read(priv, MT7530_MHWTRAP);
+ val = mt7530_read(priv, MT753X_MTRAP);
- val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS;
- val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL;
+ val &= ~MT7530_P5_PHY0_SEL & ~MT7530_P5_MAC_SEL & ~MT7530_P5_RGMII_MODE;
- switch (priv->p5_intf_sel) {
- case P5_INTF_SEL_PHY_P0:
- /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
- val |= MHWTRAP_PHY0_SEL;
+ switch (priv->p5_mode) {
+ /* MUX_PHY_P0: P0 -> P5 -> SoC MAC */
+ case MUX_PHY_P0:
+ val |= MT7530_P5_PHY0_SEL;
fallthrough;
- case P5_INTF_SEL_PHY_P4:
- /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
- val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
+ /* MUX_PHY_P4: P4 -> P5 -> SoC MAC */
+ case MUX_PHY_P4:
/* Setup the MAC by default for the cpu port */
- mt7530_write(priv, MT7530_PMCR_P(5), 0x56300);
- break;
- case P5_INTF_SEL_GMAC5:
- /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
- val &= ~MHWTRAP_P5_DIS;
+ mt7530_write(priv, MT753X_PMCR_P(5), 0x56300);
break;
+
+ /* GMAC5: P5 -> SoC MAC or external PHY */
default:
+ val |= MT7530_P5_MAC_SEL;
break;
}
/* Setup RGMII settings */
if (phy_interface_mode_is_rgmii(interface)) {
- val |= MHWTRAP_P5_RGMII_MODE;
+ val |= MT7530_P5_RGMII_MODE;
/* P5 RGMII RX Clock Control: delay setting for 1000M */
mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
@@ -942,10 +1067,10 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
}
- mt7530_write(priv, MT7530_MHWTRAP, val);
+ mt7530_write(priv, MT753X_MTRAP, val);
- dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
- val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
+ dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, mode=%s, phy-mode=%s\n", val,
+ mt7530_p5_mode_str(priv->p5_mode), phy_modes(interface));
mutex_unlock(&priv->reg_mutex);
}
@@ -1125,42 +1250,34 @@ mt753x_trap_frames(struct mt7530_priv *priv)
* VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_BPC,
- MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
- MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
- MT753X_BPDU_PORT_FW_MASK,
- MT753X_PAE_BPDU_FR |
- MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ PAE_BPDU_FR | PAE_EG_TAG_MASK | PAE_PORT_FW_MASK |
+ BPDU_EG_TAG_MASK | BPDU_PORT_FW_MASK,
+ PAE_BPDU_FR | PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ PAE_PORT_FW(TO_CPU_FW_CPU_ONLY) |
+ BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_RGAC1,
- MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
- MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
- MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
- MT753X_R02_BPDU_FR |
- MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R01_BPDU_FR |
- MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ R02_BPDU_FR | R02_EG_TAG_MASK | R02_PORT_FW_MASK |
+ R01_BPDU_FR | R01_EG_TAG_MASK | R01_PORT_FW_MASK,
+ R02_BPDU_FR | R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ R02_PORT_FW(TO_CPU_FW_CPU_ONLY) | R01_BPDU_FR |
+ R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_RGAC2,
- MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
- MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
- MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
- MT753X_R0E_BPDU_FR |
- MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R03_BPDU_FR |
- MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ R0E_BPDU_FR | R0E_EG_TAG_MASK | R0E_PORT_FW_MASK |
+ R03_BPDU_FR | R03_EG_TAG_MASK | R03_PORT_FW_MASK,
+ R0E_BPDU_FR | R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ R0E_PORT_FW(TO_CPU_FW_CPU_ONLY) | R03_BPDU_FR |
+ R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
}
static void
@@ -1173,14 +1290,15 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
PORT_SPEC_TAG);
/* Enable flooding on the CPU port */
- mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
+ mt7530_set(priv, MT753X_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
UNU_FFP(BIT(port)));
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
*/
- if (priv->id == ID_MT7531 || priv->id == ID_MT7988)
+ if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
+ priv->id == ID_EN7581 || priv->id == ID_AN7583)
mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
@@ -1218,6 +1336,14 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
mutex_unlock(&priv->reg_mutex);
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return 0;
+
+ if (port == 5)
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ else if (port == 6)
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P6_DIS);
+
return 0;
}
@@ -1236,6 +1362,15 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
PCR_MATRIX_CLR);
mutex_unlock(&priv->reg_mutex);
+
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
+
+ /* Do not set MT7530_P5_DIS when port 5 is being used for PHY muxing. */
+ if (port == 5 && priv->p5_mode == GMAC5)
+ mt7530_set(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ else if (port == 6)
+ mt7530_set(priv, MT753X_MTRAP, MT7530_P6_DIS);
}
static int
@@ -1313,13 +1448,62 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
FID_PST(FID_BRIDGED, stp_state));
}
+static void mt7530_update_port_member(struct mt7530_priv *priv, int port,
+ const struct net_device *bridge_dev,
+ bool join) __must_hold(&priv->reg_mutex)
+{
+ struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
+ struct mt7530_port *p = &priv->ports[port], *other_p;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ u32 port_bitmap = BIT(cpu_dp->index);
+ int other_port;
+ bool isolated;
+
+ dsa_switch_for_each_user_port(other_dp, priv->ds) {
+ other_port = other_dp->index;
+ other_p = &priv->ports[other_port];
+
+ if (dp == other_dp)
+ continue;
+
+ /* Add/remove this port to/from the port matrix of the other
+ * ports in the same bridge. If the port is disabled, port
+ * matrix is kept and not being setup until the port becomes
+ * enabled.
+ */
+ if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
+ continue;
+
+ isolated = p->isolated && other_p->isolated;
+
+ if (join && !isolated) {
+ other_p->pm |= PCR_MATRIX(BIT(port));
+ port_bitmap |= BIT(other_port);
+ } else {
+ other_p->pm &= ~PCR_MATRIX(BIT(port));
+ }
+
+ if (other_p->enable)
+ mt7530_rmw(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX_MASK, other_p->pm);
+ }
+
+ /* Add/remove the all other ports to this port matrix. For !join
+ * (leaving the bridge), only the CPU port will remain in the port matrix
+ * of this port.
+ */
+ p->pm = PCR_MATRIX(port_bitmap);
+ if (priv->ports[port].enable)
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, p->pm);
+}
+
static int
mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -1337,17 +1521,28 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
flags.val & BR_LEARNING ? 0 : SA_DIS);
if (flags.mask & BR_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, UNU_FFP(BIT(port)),
flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
if (flags.mask & BR_MCAST_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, UNM_FFP(BIT(port)),
flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
if (flags.mask & BR_BCAST_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)),
flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
+ if (flags.mask & BR_ISOLATED) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
+
+ priv->ports[port].isolated = !!(flags.val & BR_ISOLATED);
+
+ mutex_lock(&priv->reg_mutex);
+ mt7530_update_port_member(priv, port, bridge_dev, true);
+ mutex_unlock(&priv->reg_mutex);
+ }
+
return 0;
}
@@ -1356,39 +1551,11 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge, bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
- struct dsa_port *cpu_dp = dp->cpu_dp;
- u32 port_bitmap = BIT(cpu_dp->index);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
- dsa_switch_for_each_user_port(other_dp, ds) {
- int other_port = other_dp->index;
-
- if (dp == other_dp)
- continue;
-
- /* Add this port to the port matrix of the other ports in the
- * same bridge. If the port is disabled, port matrix is kept
- * and not being setup until the port becomes enabled.
- */
- if (!dsa_port_offloads_bridge(other_dp, &bridge))
- continue;
-
- if (priv->ports[other_port].enable)
- mt7530_set(priv, MT7530_PCR_P(other_port),
- PCR_MATRIX(BIT(port)));
- priv->ports[other_port].pm |= PCR_MATRIX(BIT(port));
-
- port_bitmap |= BIT(other_port);
- }
-
- /* Add the all other ports to this port matrix. */
- if (priv->ports[port].enable)
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
- priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
+ mt7530_update_port_member(priv, port, bridge.dev, true);
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
@@ -1423,7 +1590,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
if (dsa_is_user_port(ds, i) &&
dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
@@ -1489,38 +1656,11 @@ static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
- struct dsa_port *cpu_dp = dp->cpu_dp;
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
- dsa_switch_for_each_user_port(other_dp, ds) {
- int other_port = other_dp->index;
-
- if (dp == other_dp)
- continue;
-
- /* Remove this port from the port matrix of the other ports
- * in the same bridge. If the port is disabled, port matrix
- * is kept and not being setup until the port becomes enabled.
- */
- if (!dsa_port_offloads_bridge(other_dp, &bridge))
- continue;
-
- if (priv->ports[other_port].enable)
- mt7530_clear(priv, MT7530_PCR_P(other_port),
- PCR_MATRIX(BIT(port)));
- priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port));
- }
-
- /* Set the cpu port to be the only one in the port matrix of
- * this port.
- */
- if (priv->ports[port].enable)
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
- PCR_MATRIX(BIT(cpu_dp->index)));
- priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index));
+ mt7530_update_port_member(priv, port, bridge.dev, false);
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN-unaware
@@ -1881,20 +2021,6 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int mt753x_mirror_port_get(unsigned int id, u32 val)
-{
- return (id == ID_MT7531 || id == ID_MT7988) ?
- MT7531_MIRROR_PORT_GET(val) :
- MIRROR_PORT(val);
-}
-
-static int mt753x_mirror_port_set(unsigned int id, u32 val)
-{
- return (id == ID_MT7531 || id == ID_MT7988) ?
- MT7531_MIRROR_PORT_SET(val) :
- MIRROR_PORT(val);
-}
-
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
@@ -1910,14 +2036,14 @@ static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
/* MT7530 only supports one monitor port */
- monitor_port = mt753x_mirror_port_get(priv->id, val);
+ monitor_port = MT753X_MIRROR_PORT_GET(priv->id, val);
if (val & MT753X_MIRROR_EN(priv->id) &&
monitor_port != mirror->to_local_port)
return -EEXIST;
val |= MT753X_MIRROR_EN(priv->id);
- val &= ~MT753X_MIRROR_MASK(priv->id);
- val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
+ val &= ~MT753X_MIRROR_PORT_MASK(priv->id);
+ val |= MT753X_MIRROR_PORT_SET(priv->id, mirror->to_local_port);
mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
val = mt7530_read(priv, MT7530_PCR_P(port));
@@ -1986,7 +2112,7 @@ mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
}
-static void
+static int
mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
@@ -1996,6 +2122,8 @@ mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
else
mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+
+ return 0;
}
static int
@@ -2068,131 +2196,6 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
}
#endif /* CONFIG_GPIOLIB */
-static irqreturn_t
-mt7530_irq_thread_fn(int irq, void *dev_id)
-{
- struct mt7530_priv *priv = dev_id;
- bool handled = false;
- u32 val;
- int p;
-
- mt7530_mutex_lock(priv);
- val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
- mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
- mt7530_mutex_unlock(priv);
-
- for (p = 0; p < MT7530_NUM_PHYS; p++) {
- if (BIT(p) & val) {
- unsigned int irq;
-
- irq = irq_find_mapping(priv->irq_domain, p);
- handle_nested_irq(irq);
- handled = true;
- }
- }
-
- return IRQ_RETVAL(handled);
-}
-
-static void
-mt7530_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_bus_lock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mutex_lock(priv);
-}
-
-static void
-mt7530_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
- mt7530_mutex_unlock(priv);
-}
-
-static struct irq_chip mt7530_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7530_irq_mask,
- .irq_unmask = mt7530_irq_unmask,
- .irq_bus_lock = mt7530_irq_bus_lock,
- .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
-};
-
-static int
-mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7530_irq_domain_ops = {
- .map = mt7530_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static void
-mt7988_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
-}
-
-static void
-mt7988_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
-}
-
-static struct irq_chip mt7988_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7988_irq_mask,
- .irq_unmask = mt7988_irq_unmask,
-};
-
-static int
-mt7988_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7988_irq_domain_ops = {
- .map = mt7988_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
static void
mt7530_setup_mdio_irq(struct mt7530_priv *priv)
{
@@ -2209,49 +2212,72 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv)
}
}
+static const struct regmap_irq mt7530_irqs[] = {
+ REGMAP_IRQ_REG_LINE(0, 32), /* PHY0_LC */
+ REGMAP_IRQ_REG_LINE(1, 32), /* PHY1_LC */
+ REGMAP_IRQ_REG_LINE(2, 32), /* PHY2_LC */
+ REGMAP_IRQ_REG_LINE(3, 32), /* PHY3_LC */
+ REGMAP_IRQ_REG_LINE(4, 32), /* PHY4_LC */
+ REGMAP_IRQ_REG_LINE(5, 32), /* PHY5_LC */
+ REGMAP_IRQ_REG_LINE(6, 32), /* PHY6_LC */
+ REGMAP_IRQ_REG_LINE(16, 32), /* MAC_PC */
+ REGMAP_IRQ_REG_LINE(17, 32), /* BMU */
+ REGMAP_IRQ_REG_LINE(18, 32), /* MIB */
+ REGMAP_IRQ_REG_LINE(22, 32), /* ARL_COL_FULL_COL */
+ REGMAP_IRQ_REG_LINE(23, 32), /* ARL_COL_FULL */
+ REGMAP_IRQ_REG_LINE(24, 32), /* ARL_TBL_ERR */
+ REGMAP_IRQ_REG_LINE(25, 32), /* ARL_PKT_QERR */
+ REGMAP_IRQ_REG_LINE(26, 32), /* ARL_EQ_ERR */
+ REGMAP_IRQ_REG_LINE(27, 32), /* ARL_PKT_BC */
+ REGMAP_IRQ_REG_LINE(28, 32), /* ARL_SEC_IG1X */
+ REGMAP_IRQ_REG_LINE(29, 32), /* ARL_SEC_VLAN */
+ REGMAP_IRQ_REG_LINE(30, 32), /* ARL_SEC_TAG */
+ REGMAP_IRQ_REG_LINE(31, 32), /* ACL */
+};
+
+static const struct regmap_irq_chip mt7530_regmap_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .status_base = MT7530_SYS_INT_STS,
+ .unmask_base = MT7530_SYS_INT_EN,
+ .ack_base = MT7530_SYS_INT_STS,
+ .init_ack_masked = true,
+ .irqs = mt7530_irqs,
+ .num_irqs = ARRAY_SIZE(mt7530_irqs),
+ .num_regs = 1,
+};
+
static int
mt7530_setup_irq(struct mt7530_priv *priv)
{
+ struct regmap_irq_chip_data *irq_data;
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- int ret;
+ int irq, ret;
if (!of_property_read_bool(np, "interrupt-controller")) {
dev_info(dev, "no interrupt support\n");
return 0;
}
- priv->irq = of_irq_get(np, 0);
- if (priv->irq <= 0) {
- dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
- return priv->irq ? : -EINVAL;
- }
-
- if (priv->id == ID_MT7988)
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7988_irq_domain_ops,
- priv);
- else
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7530_irq_domain_ops,
- priv);
-
- if (!priv->irq_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ: %d\n", irq);
+ return irq ? : -EINVAL;
}
/* This register must be set for MT7530 to properly fire interrupts */
if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
- ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
- IRQF_ONESHOT, KBUILD_MODNAME, priv);
- if (ret) {
- irq_domain_remove(priv->irq_domain);
- dev_err(dev, "failed to request IRQ: %d\n", ret);
+ ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
+ priv->regmap, irq,
+ IRQF_ONESHOT,
+ 0, &mt7530_regmap_irq_chip,
+ &irq_data);
+ if (ret)
return ret;
- }
+
+ priv->irq_domain = regmap_irq_get_domain(irq_data);
return 0;
}
@@ -2271,26 +2297,6 @@ mt7530_free_mdio_irq(struct mt7530_priv *priv)
}
}
-static void
-mt7530_free_irq_common(struct mt7530_priv *priv)
-{
- free_irq(priv->irq, priv);
- irq_domain_remove(priv->irq_domain);
-}
-
-static void
-mt7530_free_irq(struct mt7530_priv *priv)
-{
- struct device_node *mnp, *np = priv->dev->of_node;
-
- mnp = of_get_child_by_name(np, "mdio");
- if (!mnp)
- mt7530_free_mdio_irq(priv);
- of_node_put(mnp);
-
- mt7530_free_irq_common(priv);
-}
-
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
@@ -2325,13 +2331,13 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq && !mnp)
+ if (priv->irq_domain && !mnp)
mt7530_setup_mdio_irq(priv);
ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq && !mnp)
+ if (priv->irq_domain && !mnp)
mt7530_free_mdio_irq(priv);
}
@@ -2405,7 +2411,7 @@ mt7530_setup(struct dsa_switch *ds)
}
/* Waiting for MT7530 got to stable */
- INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
@@ -2420,7 +2426,7 @@ mt7530_setup(struct dsa_switch *ds)
return -ENODEV;
}
- if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) {
+ if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_20MHZ) {
dev_err(priv->dev,
"MT7530 with a 20MHz XTAL is not supported!\n");
return -EINVAL;
@@ -2440,13 +2446,13 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7530_TRGMII_RD(i),
RD_TAP_MASK, RD_TAP(16));
- /* Enable port 6 */
- val = mt7530_read(priv, MT7530_MHWTRAP);
- val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
- val |= MHWTRAP_MANUAL;
- mt7530_write(priv, MT7530_MHWTRAP, val);
+ /* Allow modifying the trap and directly access PHY registers via the
+ * MDIO bus the switch is on.
+ */
+ mt7530_rmw(priv, MT753X_MTRAP, MT7530_CHG_TRAP |
+ MT7530_PHY_INDIRECT_ACCESS, MT7530_CHG_TRAP);
- if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
+ if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_40MHZ)
mt7530_pll_setup(priv);
mt753x_trap_frames(priv);
@@ -2454,12 +2460,14 @@ mt7530_setup(struct dsa_switch *ds)
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- PMCR_FORCE_MODE, PMCR_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2490,13 +2498,11 @@ mt7530_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Setup port 5 */
- if (!dsa_is_unused_port(ds, 5)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- } else {
+ /* Check for PHY muxing on port 5 */
+ if (dsa_is_unused_port(ds, 5)) {
/* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
- * Set priv->p5_intf_sel to the appropriate value if PHY muxing
- * is detected.
+ * Set priv->p5_mode to the appropriate value if PHY muxing is
+ * detected.
*/
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
@@ -2511,7 +2517,8 @@ mt7530_setup(struct dsa_switch *ds)
if (!phy_node)
continue;
- if (phy_node->parent == priv->dev->of_node->parent) {
+ if (phy_node->parent == priv->dev->of_node->parent ||
+ phy_node->parent->parent == priv->dev->of_node) {
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
@@ -2520,18 +2527,20 @@ mt7530_setup(struct dsa_switch *ds)
}
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
- priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
+ priv->p5_mode = MUX_PHY_P0;
if (id == 4)
- priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
+ priv->p5_mode = MUX_PHY_P4;
}
of_node_put(mac_np);
of_node_put(phy_node);
break;
}
- if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 ||
- priv->p5_intf_sel == P5_INTF_SEL_PHY_P4)
+ if (priv->p5_mode == MUX_PHY_P0 ||
+ priv->p5_mode == MUX_PHY_P4) {
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
mt7530_setup_port5(ds, interface);
+ }
}
#ifdef CONFIG_GPIOLIB
@@ -2556,21 +2565,26 @@ mt7531_setup_common(struct dsa_switch *ds)
struct mt7530_priv *priv = ds->priv;
int ret, i;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->mtu_enforcement_ingress = true;
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
/* Disable flooding on all ports */
- mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
+ mt7530_clear(priv, MT753X_MFC, BC_FFP_MASK | UNM_FFP_MASK |
UNU_FFP_MASK);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7531_FORCE_MODE, MT7531_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2599,12 +2613,18 @@ mt7531_setup_common(struct dsa_switch *ds)
/* Allow mirroring frames received on the local port (monitor port). */
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+ /* Enable Special Tag for rx frames */
+ if (priv->id == ID_EN7581 || priv->id == ID_AN7583)
+ mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
+ CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
return ret;
- return 0;
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ return mt7530_setup_vlan0(priv);
}
static int
@@ -2629,7 +2649,7 @@ mt7531_setup(struct dsa_switch *ds)
}
/* Waiting for MT7530 got to stable */
- INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
@@ -2652,8 +2672,8 @@ mt7531_setup(struct dsa_switch *ds)
priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
/* Force link down on all ports before internal reset */
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+ for (i = 0; i < priv->ds->num_ports; i++)
+ mt7530_write(priv, MT753X_PMCR_P(i), MT7531_FORCE_MODE_LNK);
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
@@ -2661,16 +2681,16 @@ mt7531_setup(struct dsa_switch *ds)
if (!priv->p5_sgmii) {
mt7531_pll_setup(priv);
} else {
- /* Let ds->user_mii_bus be able to access external phy. */
+ /* Unlike MT7531BE, the GPIO 6-12 pins are not used for RGMII on
+ * MT7531AE. Set the GPIO 11-12 pins to function as MDC and MDIO
+ * to expose the MDIO bus of the switch.
+ */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
}
- if (!dsa_is_unused_port(ds, 5))
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
-
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
@@ -2679,36 +2699,35 @@ mt7531_setup(struct dsa_switch *ds)
* phy_[read,write]_mmd_indirect is called, we provide our own
* mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
- val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ val = mt7531_ind_c45_phy_read(priv,
+ MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
- mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
- CORE_PLL_GROUP4, val);
+ mt7531_ind_c45_phy_write(priv,
+ MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4, val);
/* Disable EEE advertisement on the switch PHYs. */
- for (i = MT753X_CTRL_PHY_ADDR;
- i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
+ for (i = MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr);
+ i < MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr) + MT7530_NUM_PHYS;
+ i++) {
mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
0);
}
- mt7531_setup_common(ds);
-
- /* Setup VLAN ID 0 for VLAN-unaware bridges */
- ret = mt7530_setup_vlan0(priv);
+ ret = mt7531_setup_common(ds);
if (ret)
return ret;
- ds->assisted_learning_on_cpu_port = true;
- ds->mtu_enforcement_ingress = true;
-
return 0;
}
static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+
switch (port) {
/* Ports which are connected to switch PHYs. There is no MII pinout. */
case 0 ... 4:
@@ -2740,6 +2759,8 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+
switch (port) {
/* Ports which are connected to switch PHYs. There is no MII pinout. */
case 0 ... 4:
@@ -2779,14 +2800,39 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
case 0 ... 3:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
break;
/* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10000FD;
+
+ config->mac_capabilities |= MAC_10000FD;
+ break;
+ }
+}
+
+static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+ break;
+
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10000FD;
+ break;
}
}
@@ -2802,7 +2848,7 @@ mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
mt7530_setup_port6(priv->ds, interface);
}
-static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+static void mt7531_rgmii_setup(struct mt7530_priv *priv,
phy_interface_t interface,
struct phy_device *phydev)
{
@@ -2853,62 +2899,70 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
if (phy_interface_mode_is_rgmii(interface)) {
dp = dsa_to_port(ds, port);
phydev = dp->user->phydev;
- mt7531_rgmii_setup(priv, port, interface, phydev);
+ mt7531_rgmii_setup(priv, interface, phydev);
}
}
static struct phylink_pcs *
-mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+mt753x_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
switch (interface) {
case PHY_INTERFACE_MODE_TRGMII:
- return &priv->pcs[port].pcs;
+ return &priv->pcs[dp->index].pcs;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- return priv->ports[port].sgmii_pcs;
+ return priv->ports[dp->index].sgmii_pcs;
default:
return NULL;
}
}
static void
-mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
+ struct mt7530_priv *priv;
+ int port = dp->index;
+
+ priv = ds->priv;
if ((port == 5 || port == 6) && priv->info->mac_port_config)
priv->info->mac_port_config(ds, port, mode, state->interface);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
- mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY);
+ mt7530_set(priv, MT753X_PMCR_P(port), PMCR_EXT_PHY);
}
-static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void mt753x_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void mt753x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
u32 mcr;
- mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+ mcr = PMCR_MAC_RX_EN | PMCR_MAC_TX_EN | PMCR_FORCE_LNK;
switch (speed) {
case SPEED_1000:
@@ -2923,34 +2977,65 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (duplex == DUPLEX_FULL) {
mcr |= PMCR_FORCE_FDX;
if (tx_pause)
- mcr |= PMCR_TX_FC_EN;
+ mcr |= PMCR_FORCE_TX_FC_EN;
if (rx_pause)
- mcr |= PMCR_RX_FC_EN;
+ mcr |= PMCR_FORCE_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
- switch (speed) {
- case SPEED_1000:
- case SPEED_2500:
- mcr |= PMCR_FORCE_EEE1G;
- break;
- case SPEED_100:
- mcr |= PMCR_FORCE_EEE100;
- break;
- }
- }
+ mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
+}
+
+static void mt753x_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
+}
+
+static int mt753x_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+ u32 val;
+
+ /* If the timer is zero, then set LPI_MODE_EN, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = LPI_MODE_EN;
+ else if (FIELD_FIT(LPI_THRESH_MASK, timer))
+ val = FIELD_PREP(LPI_THRESH_MASK, timer);
+ else
+ val = LPI_THRESH_MASK;
+
+ mt7530_rmw(priv, MT753X_PMEEECR_P(dp->index),
+ LPI_THRESH_MASK | LPI_MODE_EN, val);
+
+ mt7530_set(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
- mt7530_set(priv, MT7530_PMCR_P(port), mcr);
+ return 0;
}
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
+ u32 eeecr;
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
- /* This switch only supports full-duplex at 1Gbps */
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000FD;
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
+
+ eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
+ /* tx_lpi_timer should be in microseconds. The time units for
+ * LPI threshold are unspecified.
+ */
+ config->lpi_timer_default = FIELD_GET(LPI_THRESH_MASK, eeecr);
priv->info->mac_port_get_caps(ds, port, config);
}
@@ -2967,7 +3052,7 @@ static int mt753x_pcs_validate(struct phylink_pcs *pcs,
return 0;
}
-static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
@@ -3036,53 +3121,31 @@ mt753x_setup(struct dsa_switch *ds)
return ret;
ret = mt7530_setup_mdio(priv);
- if (ret && priv->irq)
- mt7530_free_irq_common(priv);
+ if (ret)
+ return ret;
/* Initialise the PCS devices */
for (i = 0; i < priv->ds->num_ports; i++) {
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
- priv->pcs[i].pcs.neg_mode = true;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
}
- if (priv->create_sgmii) {
+ if (priv->create_sgmii)
ret = priv->create_sgmii(priv);
- if (ret && priv->irq)
- mt7530_free_irq(priv);
- }
-
- return ret;
-}
-static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- struct mt7530_priv *priv = ds->priv;
- u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
-
- e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
- e->tx_lpi_timer = GET_LPI_THRESH(eeecr);
+ if (ret && priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
- return 0;
+ return ret;
}
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
- struct mt7530_priv *priv = ds->priv;
- u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
-
if (e->tx_lpi_timer > 0xFFF)
return -EINVAL;
- set = SET_LPI_THRESH(e->tx_lpi_timer);
- if (!e->tx_lpi_enabled)
- /* Force LPI Mode without a delay */
- set |= LPI_MODE_EN;
- mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set);
-
return 0;
}
@@ -3110,10 +3173,59 @@ mt753x_conduit_state_change(struct dsa_switch *ds,
else
priv->active_cpu_ports &= ~mask;
- if (priv->active_cpu_ports)
- val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports));
+ if (priv->active_cpu_ports) {
+ val = MT7530_CPU_EN |
+ MT7530_CPU_PORT(__ffs(priv->active_cpu_ports));
+ }
+
+ mt7530_rmw(priv, MT753X_MFC, MT7530_CPU_EN | MT7530_CPU_PORT_MASK, val);
+}
- mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val);
+static int mt753x_tc_setup_qdisc_tbf(struct dsa_switch *ds, int port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = &qopt->replace_params;
+ struct mt7530_priv *priv = ds->priv;
+ u32 rate = 0;
+
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ rate = div_u64(p->rate.rate_bytes_ps, 1000) << 3; /* kbps */
+ fallthrough;
+ case TC_TBF_DESTROY: {
+ u32 val, tick;
+
+ mt7530_rmw(priv, MT753X_GERLCR, EGR_BC_MASK,
+ EGR_BC_CRC_IPG_PREAMBLE);
+
+ /* if rate is greater than 10Mbps tick is 1/32 ms,
+ * 1ms otherwise
+ */
+ tick = rate > 10000 ? 2 : 7;
+ val = FIELD_PREP(ERLCR_CIR_MASK, (rate >> 5)) |
+ FIELD_PREP(ERLCR_EN_MASK, !!rate) |
+ FIELD_PREP(ERLCR_EXP_MASK, tick) |
+ ERLCR_TBF_MODE_MASK |
+ FIELD_PREP(ERLCR_MANT_MASK, 0xf);
+ mt7530_write(priv, MT753X_ERLCR_P(port), val);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mt753x_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TBF:
+ return mt753x_tc_setup_qdisc_tbf(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int mt7988_setup(struct dsa_switch *ds)
@@ -3126,19 +3238,33 @@ static int mt7988_setup(struct dsa_switch *ds)
reset_control_deassert(priv->rstc);
usleep_range(20, 50);
+ /* AN7583 require additional tweak to CONN_CFG */
+ if (priv->id == ID_AN7583)
+ mt7530_rmw(priv, AN7583_GEPHY_CONN_CFG,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ AN7583_CSR_ETHER_AFE_PWD,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ FIELD_PREP(AN7583_CSR_ETHER_AFE_PWD, 0));
+
/* Reset the switch PHYs */
mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
return mt7531_setup_common(ds);
}
-const struct dsa_switch_ops mt7530_switch_ops = {
+static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
.preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
+ .get_eth_mac_stats = mt7530_get_eth_mac_stats,
+ .get_rmon_stats = mt7530_get_rmon_stats,
+ .get_eth_ctrl_stats = mt7530_get_eth_ctrl_stats,
+ .get_stats64 = mt7530_get_stats64,
.set_ageing_time = mt7530_set_ageing_time,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
@@ -3160,15 +3286,22 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
.phylink_get_caps = mt753x_phylink_get_caps,
- .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
- .phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_link_down = mt753x_phylink_mac_link_down,
- .phylink_mac_link_up = mt753x_phylink_mac_link_up,
- .get_mac_eee = mt753x_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mt753x_set_mac_eee,
.conduit_state_change = mt753x_conduit_state_change,
+ .port_setup_tc = mt753x_setup_tc,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+};
+
+static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
+ .mac_select_pcs = mt753x_phylink_mac_select_pcs,
+ .mac_config = mt753x_phylink_mac_config,
+ .mac_link_down = mt753x_phylink_mac_link_down,
+ .mac_link_up = mt753x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = mt753x_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mt753x_phylink_mac_enable_tx_lpi,
};
-EXPORT_SYMBOL_GPL(mt7530_switch_ops);
const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
@@ -3214,6 +3347,26 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c45 = mt7531_ind_c45_phy_write,
.mac_port_get_caps = mt7988_mac_port_get_caps,
},
+ [ID_EN7581] = {
+ .id = ID_EN7581,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
+ [ID_AN7583] = {
+ .id = ID_AN7583,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
};
EXPORT_SYMBOL_GPL(mt753x_table);
@@ -3236,17 +3389,11 @@ mt7530_probe_common(struct mt7530_priv *priv)
if (!priv->info)
return -EINVAL;
- /* Sanity check if these required device operations are filled
- * properly.
- */
- if (!priv->info->sw_setup || !priv->info->phy_read_c22 ||
- !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps)
- return -EINVAL;
-
priv->id = priv->info->id;
priv->dev = dev;
priv->ds->priv = priv;
priv->ds->ops = &mt7530_switch_ops;
+ priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(dev, priv);
@@ -3257,8 +3404,8 @@ EXPORT_SYMBOL_GPL(mt7530_probe_common);
void
mt7530_remove_common(struct mt7530_priv *priv)
{
- if (priv->irq)
- mt7530_free_irq(priv);
+ if (priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
dsa_unregister_switch(priv->ds);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index a08053390b28..3e0090bed298 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -19,6 +19,8 @@ enum mt753x_id {
ID_MT7621 = 1,
ID_MT7531 = 2,
ID_MT7988 = 3,
+ ID_EN7581 = 4,
+ ID_AN7583 = 5,
};
#define NUM_TRGMII_CTRL 5
@@ -36,78 +38,106 @@ enum mt753x_id {
#define MT753X_AGC 0xc
#define LOCAL_EN BIT(7)
-/* Registers to mac forward control for unknown frames */
-#define MT7530_MFC 0x10
-#define BC_FFP(x) (((x) & 0xff) << 24)
-#define BC_FFP_MASK BC_FFP(~0)
-#define UNM_FFP(x) (((x) & 0xff) << 16)
-#define UNM_FFP_MASK UNM_FFP(~0)
-#define UNU_FFP(x) (((x) & 0xff) << 8)
-#define UNU_FFP_MASK UNU_FFP(~0)
-#define CPU_EN BIT(7)
-#define CPU_PORT_MASK GENMASK(6, 4)
-#define CPU_PORT(x) FIELD_PREP(CPU_PORT_MASK, x)
-#define MIRROR_EN BIT(3)
-#define MIRROR_PORT(x) ((x) & 0x7)
-#define MIRROR_MASK 0x7
-
-/* Registers for CPU forward control */
+/* Register for MAC forward control */
+#define MT753X_MFC 0x10
+#define BC_FFP_MASK GENMASK(31, 24)
+#define BC_FFP(x) FIELD_PREP(BC_FFP_MASK, x)
+#define UNM_FFP_MASK GENMASK(23, 16)
+#define UNM_FFP(x) FIELD_PREP(UNM_FFP_MASK, x)
+#define UNU_FFP_MASK GENMASK(15, 8)
+#define UNU_FFP(x) FIELD_PREP(UNU_FFP_MASK, x)
+#define MT7530_CPU_EN BIT(7)
+#define MT7530_CPU_PORT_MASK GENMASK(6, 4)
+#define MT7530_CPU_PORT(x) FIELD_PREP(MT7530_CPU_PORT_MASK, x)
+#define MT7530_MIRROR_EN BIT(3)
+#define MT7530_MIRROR_PORT_MASK GENMASK(2, 0)
+#define MT7530_MIRROR_PORT_GET(x) FIELD_GET(MT7530_MIRROR_PORT_MASK, x)
+#define MT7530_MIRROR_PORT_SET(x) FIELD_PREP(MT7530_MIRROR_PORT_MASK, x)
+#define MT7531_QRY_FFP_MASK GENMASK(7, 0)
+#define MT7531_QRY_FFP(x) FIELD_PREP(MT7531_QRY_FFP_MASK, x)
+
+/* Register for CPU forward control */
#define MT7531_CFC 0x4
#define MT7531_MIRROR_EN BIT(19)
-#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
-#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
-#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
+#define MT7531_MIRROR_PORT_MASK GENMASK(18, 16)
+#define MT7531_MIRROR_PORT_GET(x) FIELD_GET(MT7531_MIRROR_PORT_MASK, x)
+#define MT7531_MIRROR_PORT_SET(x) FIELD_PREP(MT7531_MIRROR_PORT_MASK, x)
#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
-#define MT753X_MIRROR_REG(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_CFC : MT7530_MFC)
-#define MT753X_MIRROR_EN(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_MIRROR_EN : MIRROR_EN)
-#define MT753X_MIRROR_MASK(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_MIRROR_MASK : MIRROR_MASK)
-
-/* Registers for BPDU and PAE frame control*/
+#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
+ MT7531_CFC : MT753X_MFC)
+
+#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
+ MT7531_MIRROR_EN : MT7530_MIRROR_EN)
+
+#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
+ MT7531_MIRROR_PORT_MASK : \
+ MT7530_MIRROR_PORT_MASK)
+
+#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
+ MT7531_MIRROR_PORT_GET(val) : \
+ MT7530_MIRROR_PORT_GET(val))
+
+#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
+ MT7531_MIRROR_PORT_SET(val) : \
+ MT7530_MIRROR_PORT_SET(val))
+
+/* Register for BPDU and PAE frame control */
#define MT753X_BPC 0x24
-#define MT753X_PAE_BPDU_FR BIT(25)
-#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
-#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
-#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
-
-/* Register for :01 and :02 MAC DA frame control */
+#define PAE_BPDU_FR BIT(25)
+#define PAE_EG_TAG_MASK GENMASK(24, 22)
+#define PAE_EG_TAG(x) FIELD_PREP(PAE_EG_TAG_MASK, x)
+#define PAE_PORT_FW_MASK GENMASK(18, 16)
+#define PAE_PORT_FW(x) FIELD_PREP(PAE_PORT_FW_MASK, x)
+#define BPDU_EG_TAG_MASK GENMASK(8, 6)
+#define BPDU_EG_TAG(x) FIELD_PREP(BPDU_EG_TAG_MASK, x)
+#define BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for 01-80-C2-00-00-[01,02] MAC DA frame control */
#define MT753X_RGAC1 0x28
-#define MT753X_R02_BPDU_FR BIT(25)
-#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
-#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
-#define MT753X_R01_BPDU_FR BIT(9)
-#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
-#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
-
-/* Register for :03 and :0E MAC DA frame control */
+#define R02_BPDU_FR BIT(25)
+#define R02_EG_TAG_MASK GENMASK(24, 22)
+#define R02_EG_TAG(x) FIELD_PREP(R02_EG_TAG_MASK, x)
+#define R02_PORT_FW_MASK GENMASK(18, 16)
+#define R02_PORT_FW(x) FIELD_PREP(R02_PORT_FW_MASK, x)
+#define R01_BPDU_FR BIT(9)
+#define R01_EG_TAG_MASK GENMASK(8, 6)
+#define R01_EG_TAG(x) FIELD_PREP(R01_EG_TAG_MASK, x)
+#define R01_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for 01-80-C2-00-00-[03,0E] MAC DA frame control */
#define MT753X_RGAC2 0x2c
-#define MT753X_R0E_BPDU_FR BIT(25)
-#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
-#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
-#define MT753X_R03_BPDU_FR BIT(9)
-#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
-#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
-
-enum mt753x_bpdu_port_fw {
- MT753X_BPDU_FOLLOW_MFC,
- MT753X_BPDU_CPU_EXCLUDE = 4,
- MT753X_BPDU_CPU_INCLUDE = 5,
- MT753X_BPDU_CPU_ONLY = 6,
- MT753X_BPDU_DROP = 7,
+#define R0E_BPDU_FR BIT(25)
+#define R0E_EG_TAG_MASK GENMASK(24, 22)
+#define R0E_EG_TAG(x) FIELD_PREP(R0E_EG_TAG_MASK, x)
+#define R0E_PORT_FW_MASK GENMASK(18, 16)
+#define R0E_PORT_FW(x) FIELD_PREP(R0E_PORT_FW_MASK, x)
+#define R03_BPDU_FR BIT(9)
+#define R03_EG_TAG_MASK GENMASK(8, 6)
+#define R03_EG_TAG(x) FIELD_PREP(R03_EG_TAG_MASK, x)
+#define R03_PORT_FW_MASK GENMASK(2, 0)
+
+enum mt753x_to_cpu_fw {
+ TO_CPU_FW_SYSTEM_DEFAULT,
+ TO_CPU_FW_CPU_EXCLUDE = 4,
+ TO_CPU_FW_CPU_INCLUDE = 5,
+ TO_CPU_FW_CPU_ONLY = 6,
+ TO_CPU_FW_DROP = 7,
};
/* Registers for address table access */
@@ -223,6 +253,18 @@ enum mt7530_vlan_egress_attr {
#define AGE_UNIT_MAX 0xfff
#define AGE_UNIT(x) (AGE_UNIT_MASK & (x))
+#define MT753X_ERLCR_P(x) (0x1040 + ((x) * 0x100))
+#define ERLCR_CIR_MASK GENMASK(31, 16)
+#define ERLCR_EN_MASK BIT(15)
+#define ERLCR_EXP_MASK GENMASK(11, 8)
+#define ERLCR_TBF_MODE_MASK BIT(7)
+#define ERLCR_MANT_MASK GENMASK(6, 0)
+
+#define MT753X_GERLCR 0x10e0
+#define EGR_BC_MASK GENMASK(7, 0)
+#define EGR_BC_CRC 0x4 /* crc */
+#define EGR_BC_CRC_IPG_PREAMBLE 0x18 /* crc + ipg + preamble */
+
/* Register for port STP state control */
#define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100))
#define FID_PST(fid, state) (((state) & 0x3) << ((fid) * 2))
@@ -304,48 +346,59 @@ enum mt7530_vlan_port_acc_frm {
#define G0_PORT_VID_DEF G0_PORT_VID(0)
/* Register for port MAC control register */
-#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
-#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18)
+#define MT753X_PMCR_P(x) (0x3000 + ((x) * 0x100))
+#define PMCR_IFG_XMIT_MASK GENMASK(19, 18)
+#define PMCR_IFG_XMIT(x) FIELD_PREP(PMCR_IFG_XMIT_MASK, x)
#define PMCR_EXT_PHY BIT(17)
#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE_MODE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
+#define MT7530_FORCE_MODE BIT(15)
+#define PMCR_MAC_TX_EN BIT(14)
+#define PMCR_MAC_RX_EN BIT(13)
#define PMCR_BACKOFF_EN BIT(9)
#define PMCR_BACKPR_EN BIT(8)
#define PMCR_FORCE_EEE1G BIT(7)
#define PMCR_FORCE_EEE100 BIT(6)
-#define PMCR_TX_FC_EN BIT(5)
-#define PMCR_RX_FC_EN BIT(4)
+#define PMCR_FORCE_RX_FC_EN BIT(5)
+#define PMCR_FORCE_TX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3)
#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0)
-#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
- PMCR_FORCE_SPEED_1000)
-#define MT7531_FORCE_LNK BIT(31)
-#define MT7531_FORCE_SPD BIT(30)
-#define MT7531_FORCE_DPX BIT(29)
-#define MT7531_FORCE_RX_FC BIT(28)
-#define MT7531_FORCE_TX_FC BIT(27)
-#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
- MT7531_FORCE_SPD | \
- MT7531_FORCE_DPX | \
- MT7531_FORCE_RX_FC | \
- MT7531_FORCE_TX_FC)
-#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
- PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
- PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
-
-#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
-#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
-#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16)
+#define MT7531_FORCE_MODE_LNK BIT(31)
+#define MT7531_FORCE_MODE_SPD BIT(30)
+#define MT7531_FORCE_MODE_DPX BIT(29)
+#define MT7531_FORCE_MODE_RX_FC BIT(28)
+#define MT7531_FORCE_MODE_TX_FC BIT(27)
+#define MT7531_FORCE_MODE_EEE100 BIT(26)
+#define MT7531_FORCE_MODE_EEE1G BIT(25)
+#define MT7531_FORCE_MODE_MASK (MT7531_FORCE_MODE_LNK | \
+ MT7531_FORCE_MODE_SPD | \
+ MT7531_FORCE_MODE_DPX | \
+ MT7531_FORCE_MODE_RX_FC | \
+ MT7531_FORCE_MODE_TX_FC | \
+ MT7531_FORCE_MODE_EEE100 | \
+ MT7531_FORCE_MODE_EEE1G)
+#define MT753X_FORCE_MODE(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_FORCE_MODE_MASK : \
+ MT7530_FORCE_MODE)
+#define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \
+ PMCR_FORCE_EEE1G | \
+ PMCR_FORCE_EEE100 | \
+ PMCR_FORCE_RX_FC_EN | \
+ PMCR_FORCE_TX_FC_EN | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_SPEED_100 | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+
+#define MT753X_PMEEECR_P(x) (0x3004 + (x) * 0x100)
+#define WAKEUP_TIME_1000_MASK GENMASK(31, 24)
+#define WAKEUP_TIME_1000(x) FIELD_PREP(WAKEUP_TIME_1000_MASK, x)
+#define WAKEUP_TIME_100_MASK GENMASK(23, 16)
+#define WAKEUP_TIME_100(x) FIELD_PREP(WAKEUP_TIME_100_MASK, x)
#define LPI_THRESH_MASK GENMASK(15, 4)
-#define LPI_THRESH_SHT 4
-#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK)
-#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT)
+#define LPI_THRESH_GET(x) FIELD_GET(LPI_THRESH_MASK, x)
+#define LPI_THRESH_SET(x) FIELD_PREP(LPI_THRESH_MASK, x)
#define LPI_MODE_EN BIT(0)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
@@ -375,6 +428,48 @@ enum mt7530_vlan_port_acc_frm {
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+/* Each define is an offset of MT7530_PORT_MIB_COUNTER */
+#define MT7530_PORT_MIB_TX_DROP 0x00
+#define MT7530_PORT_MIB_TX_CRC_ERR 0x04
+#define MT7530_PORT_MIB_TX_UNICAST 0x08
+#define MT7530_PORT_MIB_TX_MULTICAST 0x0c
+#define MT7530_PORT_MIB_TX_BROADCAST 0x10
+#define MT7530_PORT_MIB_TX_COLLISION 0x14
+#define MT7530_PORT_MIB_TX_SINGLE_COLLISION 0x18
+#define MT7530_PORT_MIB_TX_MULTIPLE_COLLISION 0x1c
+#define MT7530_PORT_MIB_TX_DEFERRED 0x20
+#define MT7530_PORT_MIB_TX_LATE_COLLISION 0x24
+#define MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION 0x28
+#define MT7530_PORT_MIB_TX_PAUSE 0x2c
+#define MT7530_PORT_MIB_TX_PKT_SZ_64 0x30
+#define MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127 0x34
+#define MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255 0x38
+#define MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511 0x3c
+#define MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023 0x40
+#define MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX 0x44
+#define MT7530_PORT_MIB_TX_BYTES 0x48 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_DROP 0x60
+#define MT7530_PORT_MIB_RX_FILTERING 0x64
+#define MT7530_PORT_MIB_RX_UNICAST 0x68
+#define MT7530_PORT_MIB_RX_MULTICAST 0x6c
+#define MT7530_PORT_MIB_RX_BROADCAST 0x70
+#define MT7530_PORT_MIB_RX_ALIGN_ERR 0x74
+#define MT7530_PORT_MIB_RX_CRC_ERR 0x78
+#define MT7530_PORT_MIB_RX_UNDER_SIZE_ERR 0x7c
+#define MT7530_PORT_MIB_RX_FRAG_ERR 0x80
+#define MT7530_PORT_MIB_RX_OVER_SZ_ERR 0x84
+#define MT7530_PORT_MIB_RX_JABBER_ERR 0x88
+#define MT7530_PORT_MIB_RX_PAUSE 0x8c
+#define MT7530_PORT_MIB_RX_PKT_SZ_64 0x90
+#define MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127 0x94
+#define MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255 0x98
+#define MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511 0x9c
+#define MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023 0xa0
+#define MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX 0xa4
+#define MT7530_PORT_MIB_RX_BYTES 0xa8 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_CTRL_DROP 0xb0
+#define MT7530_PORT_MIB_RX_INGRESS_DROP 0xb4
+#define MT7530_PORT_MIB_RX_ARL_DROP 0xb8
#define MT7530_MIB_CCR 0x4fe0
#define CCR_MIB_ENABLE BIT(31)
#define CCR_RX_OCT_CNT_GOOD BIT(7)
@@ -470,32 +565,30 @@ enum mt7531_clk_skew {
MT7531_CLK_SKEW_REVERSE = 3,
};
-/* Register for hw trap status */
-#define MT7530_HWTRAP 0x7800
-#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
-#define HWTRAP_XTAL_25MHZ (BIT(10) | BIT(9))
-#define HWTRAP_XTAL_40MHZ (BIT(10))
-#define HWTRAP_XTAL_20MHZ (BIT(9))
-
-#define MT7531_HWTRAP 0x7800
-#define HWTRAP_XTAL_FSEL_MASK BIT(7)
-#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
-#define HWTRAP_XTAL_FSEL_40MHZ 0
-/* Unique fields of (M)HWSTRAP for MT7531 */
-#define XTAL_FSEL_S 7
-#define XTAL_FSEL_M BIT(7)
-#define PHY_EN BIT(6)
-#define CHG_STRAP BIT(8)
-
-/* Register for hw trap modification */
-#define MT7530_MHWTRAP 0x7804
-#define MHWTRAP_PHY0_SEL BIT(20)
-#define MHWTRAP_MANUAL BIT(16)
-#define MHWTRAP_P5_MAC_SEL BIT(13)
-#define MHWTRAP_P6_DIS BIT(8)
-#define MHWTRAP_P5_RGMII_MODE BIT(7)
-#define MHWTRAP_P5_DIS BIT(6)
-#define MHWTRAP_PHY_ACCESS BIT(5)
+/* Register for trap status */
+#define MT753X_TRAP 0x7800
+#define MT7530_XTAL_MASK (BIT(10) | BIT(9))
+#define MT7530_XTAL_25MHZ (BIT(10) | BIT(9))
+#define MT7530_XTAL_40MHZ BIT(10)
+#define MT7530_XTAL_20MHZ BIT(9)
+#define MT7531_XTAL25 BIT(7)
+
+/* Register for trap modification */
+#define MT753X_MTRAP 0x7804
+#define MT7530_P5_PHY0_SEL BIT(20)
+#define MT7530_CHG_TRAP BIT(16)
+#define MT7530_P5_MAC_SEL BIT(13)
+#define MT7530_P6_DIS BIT(8)
+#define MT7530_P5_RGMII_MODE BIT(7)
+#define MT7530_P5_DIS BIT(6)
+#define MT7530_PHY_INDIRECT_ACCESS BIT(5)
+#define MT7531_CHG_STRAP BIT(8)
+#define MT7531_PHY_EN BIT(6)
+
+enum mt7531_xtal_fsel {
+ MT7531_XTAL_FSEL_25MHZ,
+ MT7531_XTAL_FSEL_40MHZ,
+};
/* Register for TOP signal control */
#define MT7530_TOP_SIG_CTRL 0x7808
@@ -581,6 +674,15 @@ enum mt7531_clk_skew {
#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
#define MT7531_EXT_P_MDIO_12 (2 << 16)
+#define MT753X_CPORT_SPTAG_CFG 0x7c10
+#define CPORT_SW2FE_STAG_EN BIT(1)
+#define CPORT_FE2SW_STAG_EN BIT(0)
+
+#define AN7583_GEPHY_CONN_CFG 0x7c14
+#define AN7583_CSR_DPHY_CKIN_SEL BIT(31)
+#define AN7583_CSR_PHY_CORE_REG_CLK_SEL BIT(30)
+#define AN7583_CSR_ETHER_AFE_PWD GENMASK(28, 24)
+
/* Registers for LED GPIO control (MT7530 only)
* All registers follow this pattern:
* [ 2: 0] port 0
@@ -629,7 +731,7 @@ enum mt7531_clk_skew {
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
-#define MT753X_CTRL_PHY_ADDR 0
+#define MT753X_CTRL_PHY_ADDR(addr) ((addr + 1) & 0x1f)
#define CORE_PLL_GROUP5 0x404
#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
@@ -697,17 +799,17 @@ struct mt7530_fdb {
*/
struct mt7530_port {
bool enable;
+ bool isolated;
u32 pm;
u16 pvid;
struct phylink_pcs *sgmii_pcs;
};
-/* Port 5 interface select definitions */
-enum p5_interface_select {
- P5_DISABLED,
- P5_INTF_SEL_PHY_P0,
- P5_INTF_SEL_PHY_P4,
- P5_INTF_SEL_GMAC5,
+/* Port 5 mode definitions of the MT7530 switch */
+enum mt7530_p5_mode {
+ GMAC5,
+ MUX_PHY_P0,
+ MUX_PHY_P4,
};
struct mt7530_priv;
@@ -720,15 +822,14 @@ struct mt753x_pcs {
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
+ * @id: Holding the identifier to a switch model
+ * @pcs_ops: Holding the pointer to the MAC PCS operations structure
* @sw_setup: Holding the handler to a device initialization
* @phy_read_c22: Holding the way reading PHY port using C22
* @phy_write_c22: Holding the way writing PHY port using C22
* @phy_read_c45: Holding the way reading PHY port using C45
* @phy_write_c45: Holding the way writing PHY port using C45
- * @phy_mode_supported: Check if the PHY type is being supported on a certain
- * port
- * @mac_port_validate: Holding the way to set addition validate type for a
- * certan MAC port
+ * @mac_port_get_caps: Holding the handler that provides MAC capabilities
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
*/
@@ -747,9 +848,6 @@ struct mt753x_info {
int regnum, u16 val);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
- void (*mac_port_validate)(struct dsa_switch *ds, int port,
- phy_interface_t interface,
- unsigned long *supported);
void (*mac_port_config)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
@@ -770,14 +868,13 @@ struct mt753x_info {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
- * @p5_intf_sel: Holding the current port 5 interface select
+ * @p5_mode: Holding the current mode of port 5 of the MT7530 switch
* @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
* has got SGMII
- * @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
- * @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
* @active_cpu_ports: Holding the active CPU ports
+ * @mdiodev: The pointer to the MDIO device structure
*/
struct mt7530_priv {
struct device *dev;
@@ -791,7 +888,7 @@ struct mt7530_priv {
const struct mt753x_info *info;
unsigned int id;
bool mcm;
- enum p5_interface_select p5_intf_sel;
+ enum mt7530_p5_mode p5_mode;
bool p5_sgmii;
u8 mirror_rx;
u8 mirror_tx;
@@ -799,11 +896,10 @@ struct mt7530_priv {
struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
- int irq;
struct irq_domain *irq_domain;
- u32 irq_enable;
int (*create_sgmii)(struct mt7530_priv *priv);
u8 active_cpu_ports;
+ struct mdio_device *mdiodev;
};
struct mt7530_hw_vlan_entry {
@@ -843,7 +939,6 @@ static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p,
int mt7530_probe_common(struct mt7530_priv *priv);
void mt7530_remove_common(struct mt7530_priv *priv);
-extern const struct dsa_switch_ops mt7530_switch_ops;
extern const struct mt753x_info mt753x_table[];
#endif /* __MT7530_H */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 294312b58e4f..9c8ac14cd4f5 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -297,6 +297,8 @@ static const struct dsa_switch_ops mv88e6060_switch_ops = {
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
.phylink_get_caps = mv88e6060_phylink_get_caps,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static int mv88e6060_probe(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index e3181d5471df..64ae3882d17c 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -17,3 +17,13 @@ config NET_DSA_MV88E6XXX_PTP
help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it.
+
+config NET_DSA_MV88E6XXX_LEDS
+ bool "LED support for Marvell 88E6xxx"
+ default y
+ depends on NET_DSA_MV88E6XXX
+ depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_MV88E6XXX
+ depends on LEDS_TRIGGERS
+ help
+ This enabled support for controlling the LEDs attached to the
+ Marvell 88E6xxx switch chips.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index a9a9651187db..dd961081d631 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -9,6 +9,7 @@ mv88e6xxx-objs += global2.o
mv88e6xxx-objs += global2_avb.o
mv88e6xxx-objs += global2_scratch.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_LEDS) += leds.o
mv88e6xxx-objs += pcs-6185.o
mv88e6xxx-objs += pcs-6352.o
mv88e6xxx-objs += pcs-639x.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c95787cb9086..b4d48997bf46 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -27,6 +27,7 @@
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/mv88e6xxx.h>
+#include <linux/property.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/phylink.h>
@@ -131,8 +132,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
- mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
- list);
+ mdio_bus = list_first_entry_or_null(&chip->mdios,
+ struct mv88e6xxx_mdio_bus, list);
if (!mdio_bus)
return NULL;
@@ -296,7 +297,7 @@ static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
u16 reg, mask;
chip->g1_irq.nirqs = chip->info->g1_irqs;
- chip->g1_irq.domain = irq_domain_add_simple(
+ chip->g1_irq.domain = irq_domain_create_simple(
NULL, chip->g1_irq.nirqs, 0,
&mv88e6xxx_g1_irq_domain_ops, chip);
if (!chip->g1_irq.domain)
@@ -393,7 +394,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll);
- chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
+ chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker);
@@ -566,13 +567,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
phy_interface_set_rgmii(supported);
}
-static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
- struct phylink_config *config)
+static void
+mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
+ int err;
+ u16 reg;
- /* Translate the default cmode */
- mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev, "p%d: failed to read port status\n", port);
+ return;
+ }
+
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
+ case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ break;
+
+ default:
+ dev_err(chip->dev,
+ "p%d: invalid port mode in status register: %04x\n",
+ port, reg);
+ }
+}
+
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ if (!mv88e6xxx_phy_is_internal(chip, port))
+ mv88e6250_setup_supported_interfaces(chip, port, config);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
@@ -589,12 +638,12 @@ static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
MAC_1000FD;
}
-static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
+static int mv88e63xx_get_port_serdes_cmode(struct mv88e6xxx_chip *chip, int port)
{
u16 reg, val;
int err;
- err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err)
return err;
@@ -603,16 +652,16 @@ static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
return 0xf;
val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
- err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, val);
if (err)
return err;
- err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &val);
if (err)
return err;
/* Restore PHY_DETECT value */
- err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
@@ -640,7 +689,30 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
if (err <= 0)
return;
- cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+ }
+}
+
+static void mv88e632x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+ int cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 0/1 are serdes only ports */
+ if (port == 0 || port == 1) {
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
if (cmode < 0)
dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
port);
@@ -790,24 +862,27 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
}
}
-static struct phylink_pcs *mv88e6xxx_mac_select_pcs(struct dsa_switch *ds,
- int port,
- phy_interface_t interface)
+static struct phylink_pcs *
+mv88e6xxx_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ struct phylink_pcs *pcs = NULL;
if (chip->info->ops->pcs_ops)
- pcs = chip->info->ops->pcs_ops->pcs_select(chip, port,
+ pcs = chip->info->ops->pcs_ops->pcs_select(chip, dp->index,
interface);
return pcs;
}
-static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port,
+static int mv88e6xxx_mac_prepare(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
/* In inband mode, the link may come up at any time while the link
@@ -826,11 +901,13 @@ static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
+static void mv88e6xxx_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
mv88e6xxx_reg_lock(chip);
@@ -846,13 +923,15 @@ err_unlock:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port);
+ dev_err(chip->dev, "p%d: failed to configure MAC/PCS\n", port);
}
-static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port,
+static int mv88e6xxx_mac_finish(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
/* Undo the forced down state above after completing configuration
@@ -876,12 +955,14 @@ static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+static void mv88e6xxx_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
const struct mv88e6xxx_ops *ops;
+ int port = dp->index;
int err = 0;
ops = chip->info->ops;
@@ -904,14 +985,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
"p%d: failed to force MAC link down\n", port);
}
-static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
+static void mv88e6xxx_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
const struct mv88e6xxx_ops *ops;
+ int port = dp->index;
int err = 0;
ops = chip->info->ops;
@@ -937,7 +1020,7 @@ error:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev,
+ dev_err(chip->dev,
"p%d: failed to configure MAC link up\n", port);
}
@@ -1070,42 +1153,37 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
return value;
}
-static int mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data, int types)
+static void mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t **data, int types)
{
const struct mv88e6xxx_hw_stat *stat;
- int i, j;
+ int i;
- for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (stat->type & types) {
- memcpy(data + j * ETH_GSTRING_LEN, stat->string,
- ETH_GSTRING_LEN);
- j++;
- }
+ if (stat->type & types)
+ ethtool_puts(data, stat->string);
}
-
- return j;
}
-static int mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data)
+static void mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t **data)
{
- return mv88e6xxx_stats_get_strings(chip, data,
- STATS_TYPE_BANK0 | STATS_TYPE_PORT);
+ mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_PORT);
}
-static int mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data)
+static void mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t **data)
{
- return mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
+ mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
}
-static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data)
+static void mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t **data)
{
- return mv88e6xxx_stats_get_strings(chip, data,
- STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
+ mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
}
static const uint8_t *mv88e6xxx_atu_vtu_stats_strings[] = {
@@ -1116,21 +1194,18 @@ static const uint8_t *mv88e6xxx_atu_vtu_stats_strings[] = {
"vtu_miss_violation",
};
-static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
+static void mv88e6xxx_atu_vtu_get_strings(uint8_t **data)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
- strscpy(data + i * ETH_GSTRING_LEN,
- mv88e6xxx_atu_vtu_stats_strings[i],
- ETH_GSTRING_LEN);
+ ethtool_puts(data, mv88e6xxx_atu_vtu_stats_strings[i]);
}
static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int count = 0;
if (stringset != ETH_SS_STATS)
return;
@@ -1138,15 +1213,12 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_strings)
- count = chip->info->ops->stats_get_strings(chip, data);
+ chip->info->ops->stats_get_strings(chip, &data);
- if (chip->info->ops->serdes_get_strings) {
- data += count * ETH_GSTRING_LEN;
- count = chip->info->ops->serdes_get_strings(chip, port, data);
- }
+ if (chip->info->ops->serdes_get_strings)
+ chip->info->ops->serdes_get_strings(chip, port, &data);
- data += count * ETH_GSTRING_LEN;
- mv88e6xxx_atu_vtu_get_strings(data);
+ mv88e6xxx_atu_vtu_get_strings(&data);
mv88e6xxx_reg_unlock(chip);
}
@@ -1217,9 +1289,6 @@ static size_t mv88e6095_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_PORT)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1229,9 +1298,6 @@ static size_t mv88e6250_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & STATS_TYPE_BANK0))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1241,9 +1307,6 @@ static size_t mv88e6320_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
MV88E6XXX_G1_STATS_OP_HIST_RX);
@@ -1254,9 +1317,6 @@ static size_t mv88e6390_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
0);
@@ -1269,6 +1329,9 @@ static size_t mv88e6xxx_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
{
int ret = 0;
+ if (!(stat->type & chip->info->stats_type))
+ return 0;
+
if (chip->info->ops->stats_get_stat) {
mv88e6xxx_reg_lock(chip);
ret = chip->info->ops->stats_get_stat(chip, port, stat, data);
@@ -1450,13 +1513,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -1796,6 +1852,8 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
if (!chip->info->ops->vtu_getnext)
return -EOPNOTSUPP;
+ memset(entry, 0, sizeof(*entry));
+
entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip);
entry->valid = false;
@@ -1847,36 +1905,9 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
return chip->info->ops->vtu_loadpurge(chip, entry);
}
-static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
- const struct mv88e6xxx_vtu_entry *entry,
- void *_fid_bitmap)
-{
- unsigned long *fid_bitmap = _fid_bitmap;
-
- set_bit(entry->fid, fid_bitmap);
- return 0;
-}
-
-int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
-{
- bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
-
- /* Every FID has an associated VID, so walking the VTU
- * will discover the full set of FIDs in use.
- */
- return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
-}
-
static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
- DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
- int err;
-
- err = mv88e6xxx_fid_map(chip, fid_bitmap);
- if (err)
- return err;
-
- *fid = find_first_zero_bit(fid_bitmap, MV88E6XXX_N_FID);
+ *fid = find_first_zero_bit(chip->fid_bitmap, MV88E6XXX_N_FID);
if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
return -ENOSPC;
@@ -1931,7 +1962,16 @@ static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
struct mv88e6xxx_mst *mst, *tmp;
int err;
- if (!sid)
+ /* If the SID is zero, it is for a VLAN mapped to the default MSTI,
+ * and mv88e6xxx_stu_setup() made sure it is always present, and thus,
+ * should not be removed here.
+ *
+ * If the chip lacks STU support, numerically the "sid" variable will
+ * happen to also be zero, but we don't want to rely on that fact, so
+ * we explicitly test that first. In that case, there is also nothing
+ * to do here.
+ */
+ if (!mv88e6xxx_has_stu(chip) || !sid)
return 0;
list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
@@ -2179,13 +2219,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return err;
}
-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
+static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid,
+ u16 *fid, struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_atu_entry entry;
struct mv88e6xxx_vtu_entry vlan;
- u16 fid;
int err;
/* Ports have two private address databases: one for when the port is
@@ -2196,7 +2234,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
* VLAN ID into the port's database used for VLAN-unaware bridging.
*/
if (vid == 0) {
- fid = MV88E6XXX_FID_BRIDGED;
+ *fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -2206,14 +2244,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid)
return -EOPNOTSUPP;
- fid = vlan.fid;
+ *fid = vlan.fid;
}
- entry.state = 0;
- ether_addr_copy(entry.mac, addr);
- eth_addr_dec(entry.mac);
+ entry->state = 0;
+ ether_addr_copy(entry->mac, addr);
+ eth_addr_dec(entry->mac);
+
+ return mv88e6xxx_g1_atu_getnext(chip, *fid, entry);
+}
+
+static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
- err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
+ if (err)
+ return false;
+
+ return entry.state && ether_addr_equal(entry.mac, addr);
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
if (err)
return err;
@@ -2583,6 +2646,9 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
port, vid);
}
+ /* Record FID used in SW FID map */
+ bitmap_set(chip->fid_bitmap, vlan.fid, 1);
+
return 0;
}
@@ -2688,6 +2754,9 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
err = mv88e6xxx_mst_put(chip, vlan.sid);
if (err)
return err;
+
+ /* Record FID freed in SW FID map */
+ bitmap_clear(chip->fid_bitmap, vlan.fid, 1);
}
return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
@@ -2811,6 +2880,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, addr, vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -3075,6 +3151,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
{
struct gpio_desc *gpiod = chip->reset;
+ int err;
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
@@ -3083,17 +3160,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* mid-byte, causing the first EEPROM read after the reset
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
+ * For this reason, switch families with EEPROM support
+ * generally wait for EEPROM loads to complete as their pre-
+ * and post-reset handlers.
*/
- if (chip->info->ops->get_eeprom)
- mv88e6xxx_g2_eeprom_wait(chip);
+ if (chip->info->ops->hardware_reset_pre) {
+ err = chip->info->ops->hardware_reset_pre(chip);
+ if (err)
+ dev_err(chip->dev, "pre-reset error: %d\n", err);
+ }
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- if (chip->info->ops->get_eeprom)
- mv88e6xxx_g2_eeprom_wait(chip);
+ if (chip->info->ops->hardware_reset_post) {
+ err = chip->info->ops->hardware_reset_post(chip);
+ if (err)
+ dev_err(chip->dev, "post-reset error: %d\n", err);
+ }
}
}
@@ -3279,14 +3365,44 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct device_node *phy_handle = NULL;
+ struct fwnode_handle *ports_fwnode;
+ struct fwnode_handle *port_fwnode;
struct dsa_switch *ds = chip->ds;
+ struct mv88e6xxx_port *p;
struct dsa_port *dp;
int tx_amp;
int err;
u16 reg;
+ u32 val;
+
+ p = &chip->ports[port];
+ p->chip = chip;
+ p->port = port;
+
+ /* Look up corresponding fwnode if any */
+ ports_fwnode = device_get_named_child_node(chip->dev, "ethernet-ports");
+ if (!ports_fwnode)
+ ports_fwnode = device_get_named_child_node(chip->dev, "ports");
+ if (ports_fwnode) {
+ fwnode_for_each_child_node(ports_fwnode, port_fwnode) {
+ if (fwnode_property_read_u32(port_fwnode, "reg", &val))
+ continue;
+ if (val == port) {
+ p->fwnode = port_fwnode;
+ p->fiber = fwnode_property_present(port_fwnode, "sfp");
+ break;
+ }
+ }
+ fwnode_handle_put(ports_fwnode);
+ } else {
+ dev_dbg(chip->dev, "no ethernet ports node defined for the device\n");
+ }
- chip->ports[port].chip = chip;
- chip->ports[port].port = port;
+ if (chip->info->ops->port_setup_leds) {
+ err = chip->info->ops->port_setup_leds(chip, port);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
SPEED_UNFORCED, DUPLEX_UNFORCED,
@@ -3534,7 +3650,8 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_jumbo_size)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
- else if (chip->info->ops->set_max_frame_size)
+ else if (chip->info->ops->set_max_frame_size &&
+ dsa_is_cpu_port(ds, port))
ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
mv88e6xxx_reg_unlock(chip);
@@ -3568,6 +3685,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_stats_clear(chip);
}
+static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ u16 dummy;
+ int err;
+
+ /* Workaround for erratum
+ * 3.3 RGMII timing may be out of spec when transmit delay is enabled
+ */
+ err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy);
+}
+
/* Check if the errata has already been applied. */
static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
{
@@ -3833,6 +3965,8 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
mv88e6xxx_teardown_devlink_regions_global(ds);
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
mv88e6xxx_mdios_unregister(chip);
}
@@ -3973,7 +4107,7 @@ unlock:
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out_mdios;
+ goto out_hwtstamp;
/* Have to be called without holding the register lock, since
* they take the devlink lock, and we later take the locks in
@@ -3982,7 +4116,7 @@ unlock:
*/
err = mv88e6xxx_setup_devlink_resources(ds);
if (err)
- goto out_mdios;
+ goto out_hwtstamp;
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
@@ -3998,7 +4132,9 @@ out_params:
mv88e6xxx_teardown_devlink_params(ds);
out_resources:
dsa_devlink_resources_unregister(ds);
-out_mdios:
+out_hwtstamp:
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
mv88e6xxx_mdios_unregister(chip);
return err;
@@ -4323,6 +4459,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4502,6 +4640,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_leds = mv88e6xxx_port_setup_leds,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
@@ -4513,6 +4652,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4602,6 +4743,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_leds = mv88e6xxx_port_setup_leds,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
@@ -4613,6 +4755,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4707,6 +4851,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4765,6 +4911,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4821,6 +4969,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4869,6 +5019,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_leds = mv88e6xxx_port_setup_leds,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
@@ -4880,6 +5031,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4933,11 +5086,13 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.watchdog_ops = &mv88e6250_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset,
+ .hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done,
.reset = mv88e6250_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
- .ptp_ops = &mv88e6250_ptp_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6250_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -4980,6 +5135,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5003,6 +5160,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
@@ -5018,6 +5176,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5039,17 +5198,22 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
@@ -5065,6 +5229,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5085,13 +5250,17 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -5135,6 +5304,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5279,6 +5450,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_leds = mv88e6xxx_port_setup_leds,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
@@ -5290,6 +5462,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5352,6 +5526,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5414,6 +5590,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5479,6 +5657,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.watchdog_ops = &mv88e6393x_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5517,6 +5697,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5537,6 +5718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5559,6 +5741,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5580,6 +5763,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6095_ops,
@@ -5602,6 +5786,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5626,6 +5811,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5648,6 +5834,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6131_ops,
@@ -5657,7 +5844,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
@@ -5669,9 +5856,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5695,6 +5883,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5720,6 +5909,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5744,6 +5934,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5769,6 +5960,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5793,6 +5985,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5818,6 +6011,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5840,6 +6034,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5864,6 +6059,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.atu_move_port_mask = 0x1f,
@@ -5888,6 +6084,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5911,6 +6108,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5935,6 +6133,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5959,6 +6158,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5986,6 +6186,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6010,6 +6211,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6033,6 +6235,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6056,6 +6259,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6070,9 +6274,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -6080,6 +6286,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6095,9 +6302,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -6105,7 +6314,9 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
+ .pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
@@ -6116,7 +6327,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
@@ -6128,9 +6339,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -6155,6 +6367,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6179,6 +6392,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6204,6 +6418,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6222,7 +6437,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
.num_internal_phys = 5,
.internal_phys_offset = 3,
- .max_vid = 4095,
+ .max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
@@ -6231,6 +6446,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6255,6 +6471,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6280,6 +6497,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6305,6 +6523,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6468,6 +6687,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -6922,6 +7148,15 @@ static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
return err_sync ? : err_pvt;
}
+static const struct phylink_mac_ops mv88e6xxx_phylink_mac_ops = {
+ .mac_select_pcs = mv88e6xxx_mac_select_pcs,
+ .mac_prepare = mv88e6xxx_mac_prepare,
+ .mac_config = mv88e6xxx_mac_config,
+ .mac_finish = mv88e6xxx_mac_finish,
+ .mac_link_down = mv88e6xxx_mac_link_down,
+ .mac_link_up = mv88e6xxx_mac_link_up,
+};
+
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.change_tag_protocol = mv88e6xxx_change_tag_protocol,
@@ -6930,12 +7165,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
.phylink_get_caps = mv88e6xxx_get_caps,
- .phylink_mac_select_pcs = mv88e6xxx_mac_select_pcs,
- .phylink_mac_prepare = mv88e6xxx_mac_prepare,
- .phylink_mac_config = mv88e6xxx_mac_config,
- .phylink_mac_finish = mv88e6xxx_mac_finish,
- .phylink_mac_link_down = mv88e6xxx_mac_link_down,
- .phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_eth_mac_stats = mv88e6xxx_get_eth_mac_stats,
@@ -6943,7 +7172,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_sset_count = mv88e6xxx_get_sset_count,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
- .get_mac_eee = mv88e6xxx_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mv88e6xxx_set_mac_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
@@ -7004,6 +7233,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
+ ds->phylink_mac_ops = &mv88e6xxx_phylink_mac_ops;
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
@@ -7135,13 +7365,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
err = mv88e6xxx_switch_reset(chip);
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out;
+ goto out_phy;
if (np) {
chip->irq = of_irq_get(np, 0);
if (chip->irq == -EPROBE_DEFER) {
err = chip->irq;
- goto out;
+ goto out_phy;
}
}
@@ -7160,7 +7390,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out;
+ goto out_phy;
if (chip->info->g2_irqs > 0) {
err = mv88e6xxx_g2_irq_setup(chip);
@@ -7194,6 +7424,8 @@ out_g1_irq:
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
+out_phy:
+ mv88e6xxx_phy_destroy(chip);
out:
if (pdata)
dev_put(pdata->netdev);
@@ -7211,12 +7443,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
chip = ds->priv;
- if (chip->info->ptp_support) {
- mv88e6xxx_hwtstamp_free(chip);
- mv88e6xxx_ptp_free(chip);
- }
-
- mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip);
@@ -7229,6 +7455,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
+
+ mv88e6xxx_phy_destroy(chip);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 85eb293381a7..2f211e55cb47 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -13,7 +13,9 @@
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#include <linux/kthread.h>
+#include <linux/leds.h>
#include <linux/phy.h>
+#include <linux/property.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <net/dsa.h>
@@ -142,6 +144,7 @@ struct mv88e6xxx_info {
unsigned int age_time_coeff;
unsigned int g1_irqs;
unsigned int g2_irqs;
+ int stats_type;
bool pvt;
/* Mark certain ports as invalid. This is required for example for the
@@ -206,6 +209,7 @@ struct mv88e6xxx_gpio_ops;
struct mv88e6xxx_avb_ops;
struct mv88e6xxx_ptp_ops;
struct mv88e6xxx_pcs_ops;
+struct mv88e6xxx_cc_coeffs;
struct mv88e6xxx_irq {
u16 masked;
@@ -237,7 +241,7 @@ struct mv88e6xxx_port_hwtstamp {
u16 tx_seq_id;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
enum mv88e6xxx_policy_mapping {
@@ -275,6 +279,7 @@ struct mv88e6xxx_vlan {
struct mv88e6xxx_port {
struct mv88e6xxx_chip *chip;
int port;
+ struct fwnode_handle *fwnode;
struct mv88e6xxx_vlan bridge_pvid;
u64 serdes_stats[2];
u64 atu_member_violation;
@@ -289,6 +294,11 @@ struct mv88e6xxx_port {
struct devlink_region *region;
void *pcs_private;
+ /* LED related information */
+ bool fiber;
+ struct led_classdev led0;
+ struct led_classdev led1;
+
/* MacAuth Bypass control flag */
bool mab;
};
@@ -408,13 +418,12 @@ struct mv88e6xxx_chip {
struct cyclecounter tstamp_cc;
struct timecounter tstamp_tc;
struct delayed_work overflow_work;
+ const struct mv88e6xxx_cc_coeffs *cc_coeffs;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct delayed_work tai_event_work;
struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
- u16 trig_config;
- u16 evcap_config;
u16 enable_count;
/* Current ingress and egress monitor ports */
@@ -432,6 +441,9 @@ struct mv88e6xxx_chip {
/* Bridge MST to SID mappings */
struct list_head msts;
+
+ /* FID map */
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
};
struct mv88e6xxx_bus_ops {
@@ -487,6 +499,12 @@ struct mv88e6xxx_ops {
int (*ppu_enable)(struct mv88e6xxx_chip *chip);
int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+ /* Additional handlers to run before and after hard reset, to make sure
+ * that the switch and EEPROM are in a good state.
+ */
+ int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip);
+ int (*hardware_reset_post)(struct mv88e6xxx_chip *chip);
+
/* Switch Software Reset */
int (*reset)(struct mv88e6xxx_chip *chip);
@@ -566,6 +584,9 @@ struct mv88e6xxx_ops {
phy_interface_t mode);
int (*port_get_cmode)(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+ /* LED control */
+ int (*port_setup_leds)(struct mv88e6xxx_chip *chip, int port);
+
/* Some devices have a per port register indicating what is
* the upstream port this port should forward to.
*/
@@ -584,7 +605,7 @@ struct mv88e6xxx_ops {
/* Return the number of strings describing statistics */
int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip);
- int (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
+ void (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t **data);
size_t (*stats_get_stat)(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data);
@@ -611,8 +632,8 @@ struct mv88e6xxx_ops {
/* Statistics from the SERDES interface */
int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
- int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
- uint8_t *data);
+ int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data);
size_t (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
@@ -709,7 +730,7 @@ struct mv88e6xxx_avb_ops {
};
struct mv88e6xxx_ptp_ops {
- u64 (*clock_read)(const struct cyclecounter *cc);
+ u64 (*clock_read)(struct cyclecounter *cc);
int (*ptp_enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin,
@@ -725,10 +746,6 @@ struct mv88e6xxx_ptp_ops {
int arr1_sts_reg;
int dep_sts_reg;
u32 rx_filters;
- u32 cc_shift;
- u32 cc_mult;
- u32 cc_mult_num;
- u32 cc_mult_dem;
};
struct mv88e6xxx_pcs_ops {
@@ -826,6 +843,4 @@ int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
void *priv),
void *priv);
-int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
-
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index a08dab75e0c0..da69e0b85879 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -374,32 +374,20 @@ static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
u8 **data)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
- DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_devlink_atu_entry *table;
struct mv88e6xxx_chip *chip = ds->priv;
- int fid = -1, count, err;
+ int fid = -1, err = 0, count = 0;
- table = kmalloc_array(mv88e6xxx_num_databases(chip),
- sizeof(struct mv88e6xxx_devlink_atu_entry),
- GFP_KERNEL);
+ table = kcalloc(mv88e6xxx_num_databases(chip),
+ sizeof(struct mv88e6xxx_devlink_atu_entry),
+ GFP_KERNEL);
if (!table)
return -ENOMEM;
- memset(table, 0, mv88e6xxx_num_databases(chip) *
- sizeof(struct mv88e6xxx_devlink_atu_entry));
-
- count = 0;
-
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_fid_map(chip, fid_bitmap);
- if (err) {
- kfree(table);
- goto out;
- }
-
while (1) {
- fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
+ fid = find_next_bit(chip->fid_bitmap, MV88E6XXX_N_FID, fid + 1);
if (fid == MV88E6XXX_N_FID)
break;
@@ -654,7 +642,7 @@ static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
.id = MV88E6XXX_REGION_GLOBAL1,
};
-static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_global1_ops = {
.name = "global1",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
@@ -665,32 +653,32 @@ static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
.id = MV88E6XXX_REGION_GLOBAL2,
};
-static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_global2_ops = {
.name = "global2",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
.priv = &mv88e6xxx_region_global2_priv,
};
-static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_atu_ops = {
.name = "atu",
.snapshot = mv88e6xxx_region_atu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.name = "vtu",
.snapshot = mv88e6xxx_region_vtu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_stu_ops = {
.name = "stu",
.snapshot = mv88e6xxx_region_stu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
.destructor = kfree,
@@ -703,13 +691,13 @@ static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
};
struct mv88e6xxx_region {
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
u64 size;
bool (*cond)(struct mv88e6xxx_chip *chip);
};
-static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+static const struct mv88e6xxx_region mv88e6xxx_regions[] = {
[MV88E6XXX_REGION_GLOBAL1] = {
.ops = &mv88e6xxx_region_global1_ops,
.size = 32 * sizeof(u16)
@@ -743,7 +731,8 @@ void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
- dsa_devlink_region_destroy(chip->regions[i]);
+ if (chip->regions[i])
+ dsa_devlink_region_destroy(chip->regions[i]);
}
void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
@@ -774,7 +763,7 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
{
bool (*cond)(struct mv88e6xxx_chip *chip);
struct mv88e6xxx_chip *chip = ds->priv;
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int i, j;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 49444a72ff09..9820cd596757 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
+static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip)
+{
+ /* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */
+ int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM);
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6185_G1_CTL1_RELOAD_EEPROM;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0);
+}
+
+/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */
+static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+ if (err < 0) {
+ dev_err(chip->dev, "Error reading status");
+ return err;
+ }
+
+ /* If the switch is still resetting, it may not
+ * respond on the bus, and so MDIO read returns
+ * 0xffff. Differentiate between that, and waiting for
+ * the EEPROM to be done by bit 0 being set.
+ */
+ if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)))
+ return -EBUSY;
+
+ return 0;
+}
+
+/* As the EEInt (EEPROM done) flag clears on read if the status register, this
+ * function must be called directly after a hard reset or EEPROM ReLoad request,
+ * or the done condition may have been missed
+ */
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ int ret;
+
+ /* Wait up to 1 second for the switch to finish reading the
+ * EEPROM.
+ */
+ while (time_before(jiffies, timeout)) {
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+ }
+
+ dev_err(chip->dev, "Timeout waiting for EEPROM done");
+ return -ETIMEDOUT;
+}
+
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+
+ /* Pre-reset, we don't know the state of the switch - when
+ * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because
+ * the switch is actually busy reading the EEPROM, or because
+ * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated
+ * status register read already.
+ *
+ * To account for the latter case, trigger another EEPROM reload for
+ * another chance at seeing the done flag.
+ */
+ ret = mv88e6250_g1_eeprom_reload(chip);
+ if (ret)
+ return ret;
+
+ return mv88e6xxx_g1_wait_eeprom_done(chip);
+}
+
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1095261f5b49..3dbb7a1b8fe1 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -282,6 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index ce3b3690c3c0..c47f068f56b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -457,7 +457,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
- chip->ports[spid].atu_full_violation++;
+ if (spid < ARRAY_SIZE(chip->ports))
+ chip->ports[spid].atu_full_violation++;
}
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index bcfb4a812055..b524f27a2f0d 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -471,6 +471,9 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
{
int err;
+ /* As part of the VTU flush, refresh FID map */
+ bitmap_zero(chip->fid_bitmap, MV88E6XXX_N_FID);
+
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index b2b5f6ba438f..30a6ffa7817b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1154,8 +1154,8 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- chip->g2_irq.domain = irq_domain_add_simple(
- chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
+ chip->g2_irq.domain = irq_domain_create_simple(dev_fwnode(chip->dev), 16, 0,
+ &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index 61ab6cc4fbfc..53a6d3ed63b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -146,7 +146,7 @@ static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip,
* @chip: chip private data
* @pin: gpio index
*
- * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX).
+ * Return: 0 for output, 1 for input.
*/
static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
unsigned int pin)
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 331b4ca089ff..6e6472a3b75a 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -64,7 +64,7 @@ static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
const struct mv88e6xxx_ptp_ops *ptp_ops;
struct mv88e6xxx_chip *chip;
@@ -89,7 +89,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
@@ -169,42 +169,38 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config config;
int err;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mv88e6xxx_set_hwtstamp_config(chip, port, &config);
+ err = mv88e6xxx_set_hwtstamp_config(chip, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later. */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config *config = &ps->tstamp_config;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = ps->tstamp_config;
+
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp,
@@ -574,7 +570,7 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
}
/* Set the ethertype of L2 PTP messages */
- err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_ETHERTYPE, ETH_P_1588);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index cf7fb6d660b1..c359821d5a6e 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -111,9 +111,10 @@
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg);
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
@@ -121,8 +122,9 @@ void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
@@ -132,14 +134,17 @@ int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static inline int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
@@ -157,7 +162,7 @@ static inline void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
}
static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/mv88e6xxx/leds.c b/drivers/net/dsa/mv88e6xxx/leds.c
new file mode 100644
index 000000000000..ab3bc645da56
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/leds.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/bitfield.h>
+#include <linux/leds.h>
+#include <linux/property.h>
+
+#include "chip.h"
+#include "global2.h"
+#include "port.h"
+
+/* Offset 0x16: LED control */
+
+static int mv88e6xxx_port_led_write(struct mv88e6xxx_chip *chip, int port, u16 reg)
+{
+ reg |= MV88E6XXX_PORT_LED_CONTROL_UPDATE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_LED_CONTROL, reg);
+}
+
+static int mv88e6xxx_port_led_read(struct mv88e6xxx_chip *chip, int port,
+ u16 ptr, u16 *val)
+{
+ int err;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_LED_CONTROL, ptr);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_LED_CONTROL, val);
+ *val &= 0x3ff;
+
+ return err;
+}
+
+static int mv88e6xxx_led_brightness_set(struct mv88e6xxx_port *p, int led,
+ int brightness)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_led_read(p->chip, p->port,
+ MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL,
+ &reg);
+ if (err)
+ return err;
+
+ if (led == 1)
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK;
+ else
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK;
+
+ if (brightness) {
+ /* Selector 0x0f == Force LED ON */
+ if (led == 1)
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELF;
+ else
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELF;
+ } else {
+ /* Selector 0x0e == Force LED OFF */
+ if (led == 1)
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELE;
+ else
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELE;
+ }
+
+ reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL;
+
+ return mv88e6xxx_port_led_write(p->chip, p->port, reg);
+}
+
+static int mv88e6xxx_led0_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_brightness_set(p, 0, brightness);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int mv88e6xxx_led1_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_brightness_set(p, 1, brightness);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+struct mv88e6xxx_led_hwconfig {
+ int led;
+ u8 portmask;
+ unsigned long rules;
+ bool fiber;
+ bool blink_activity;
+ u16 selector;
+};
+
+/* The following is a lookup table to check what rules we can support on a
+ * certain LED given restrictions such as that some rules only work with fiber
+ * (SFP) connections and some blink on activity by default.
+ */
+#define MV88E6XXX_PORTS_0_3 (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define MV88E6XXX_PORTS_4_5 (BIT(4) | BIT(5))
+#define MV88E6XXX_PORT_4 BIT(4)
+#define MV88E6XXX_PORT_5 BIT(5)
+
+/* Entries are listed in selector order.
+ *
+ * These configurations vary across different switch families, list
+ * different tables per-family here.
+ */
+static const struct mv88e6xxx_led_hwconfig mv88e6352_led_hwconfigs[] = {
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORT_4,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL0,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORT_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL0,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL3,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORT_4,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL4,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORT_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL5,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORT_4,
+ .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORT_5,
+ .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL7,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL7,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL8,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORT_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL9,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL9,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SELA,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SELA,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SELB,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SELB,
+ },
+};
+
+/* mv88e6xxx_led_match_selector() - look up the appropriate LED mode selector
+ * @p: port state container
+ * @led: LED number, 0 or 1
+ * @blink_activity: blink the LED (usually blink on indicated activity)
+ * @fiber: the link is connected to fiber such as SFP
+ * @rules: LED status flags from the LED classdev core
+ * @selector: fill in the selector in this parameter with an OR operation
+ */
+static int mv88e6xxx_led_match_selector(struct mv88e6xxx_port *p, int led, bool blink_activity,
+ bool fiber, unsigned long rules, u16 *selector)
+{
+ const struct mv88e6xxx_led_hwconfig *conf;
+ int i;
+
+ /* No rules means we turn the LED off */
+ if (!rules) {
+ if (led == 1)
+ *selector |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELE;
+ else
+ *selector |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELE;
+ return 0;
+ }
+
+ /* TODO: these rules are for MV88E6352, when adding other families,
+ * think about making sure you select the table that match the
+ * specific switch family.
+ */
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_led_hwconfigs); i++) {
+ conf = &mv88e6352_led_hwconfigs[i];
+
+ if (conf->led != led)
+ continue;
+
+ if (!(conf->portmask & BIT(p->port)))
+ continue;
+
+ if (conf->blink_activity != blink_activity)
+ continue;
+
+ if (conf->fiber != fiber)
+ continue;
+
+ if (conf->rules == rules) {
+ dev_dbg(p->chip->dev, "port%d LED %d set selector %04x for rules %08lx\n",
+ p->port, led, conf->selector, rules);
+ *selector |= conf->selector;
+ return 0;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* mv88e6xxx_led_match_selector() - find Linux netdev rules from a selector value
+ * @p: port state container
+ * @selector: the selector value from the LED actity register
+ * @led: LED number, 0 or 1
+ * @rules: Linux netdev activity rules found from selector
+ */
+static int
+mv88e6xxx_led_match_rule(struct mv88e6xxx_port *p, u16 selector, int led, unsigned long *rules)
+{
+ const struct mv88e6xxx_led_hwconfig *conf;
+ int i;
+
+ /* Find the selector in the table, we just look for the right selector
+ * and ignore if the activity has special properties such as blinking
+ * or is fiber-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_led_hwconfigs); i++) {
+ conf = &mv88e6352_led_hwconfigs[i];
+
+ if (conf->led != led)
+ continue;
+
+ if (!(conf->portmask & BIT(p->port)))
+ continue;
+
+ if (conf->selector == selector) {
+ dev_dbg(p->chip->dev, "port%d LED %d has selector %04x, rules %08lx\n",
+ p->port, led, selector, conf->rules);
+ *rules = conf->rules;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* mv88e6xxx_led_get_selector() - get the appropriate LED mode selector
+ * @p: port state container
+ * @led: LED number, 0 or 1
+ * @fiber: the link is connected to fiber such as SFP
+ * @rules: LED status flags from the LED classdev core
+ * @selector: fill in the selector in this parameter with an OR operation
+ */
+static int mv88e6xxx_led_get_selector(struct mv88e6xxx_port *p, int led,
+ bool fiber, unsigned long rules, u16 *selector)
+{
+ int err;
+
+ /* What happens here is that we first try to locate a trigger with solid
+ * indicator (such as LED is on for a 1000 link) else we try a second
+ * sweep to find something suitable with a trigger that will blink on
+ * activity.
+ */
+ err = mv88e6xxx_led_match_selector(p, led, false, fiber, rules, selector);
+ if (err)
+ return mv88e6xxx_led_match_selector(p, led, true, fiber, rules, selector);
+
+ return 0;
+}
+
+/* Sets up the hardware blinking period */
+static int mv88e6xxx_led_set_blinking_period(struct mv88e6xxx_port *p, int led,
+ unsigned long delay_on, unsigned long delay_off)
+{
+ unsigned long period;
+ u16 reg;
+
+ period = delay_on + delay_off;
+
+ reg = 0;
+
+ switch (period) {
+ case 21:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_21MS;
+ break;
+ case 42:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_42MS;
+ break;
+ case 84:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_84MS;
+ break;
+ case 168:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_168MS;
+ break;
+ case 336:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_336MS;
+ break;
+ case 672:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_672MS;
+ break;
+ default:
+ /* Fall back to software blinking */
+ return -EINVAL;
+ }
+
+ /* This is essentially PWM duty cycle: how long time of the period
+ * will the LED be on. Zero isn't great in most cases.
+ */
+ switch (delay_on) {
+ case 0:
+ /* This is usually pretty useless and will make the LED look OFF */
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_NONE;
+ break;
+ case 21:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS;
+ break;
+ case 42:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_42MS;
+ break;
+ case 84:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_84MS;
+ break;
+ case 168:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_168MS;
+ break;
+ default:
+ /* Just use something non-zero */
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS;
+ break;
+ }
+
+ /* Set up blink rate */
+ reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_STRETCH_BLINK;
+
+ return mv88e6xxx_port_led_write(p->chip, p->port, reg);
+}
+
+static int mv88e6xxx_led_blink_set(struct mv88e6xxx_port *p, int led,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ u16 reg;
+ int err;
+
+ /* Choose a sensible default 336 ms (~3 Hz) */
+ if ((*delay_on == 0) && (*delay_off == 0)) {
+ *delay_on = 168;
+ *delay_off = 168;
+ }
+
+ /* No off delay is just on */
+ if (*delay_off == 0)
+ return mv88e6xxx_led_brightness_set(p, led, 1);
+
+ err = mv88e6xxx_led_set_blinking_period(p, led, *delay_on, *delay_off);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_led_read(p->chip, p->port,
+ MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL,
+ &reg);
+ if (err)
+ return err;
+
+ if (led == 1)
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK;
+ else
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK;
+
+ /* This will select the forced blinking status */
+ if (led == 1)
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELD;
+ else
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELD;
+
+ reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL;
+
+ return mv88e6xxx_port_led_write(p->chip, p->port, reg);
+}
+
+static int mv88e6xxx_led0_blink_set(struct led_classdev *ldev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_blink_set(p, 0, delay_on, delay_off);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int mv88e6xxx_led1_blink_set(struct led_classdev *ldev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_blink_set(p, 1, delay_on, delay_off);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int
+mv88e6xxx_led0_hw_control_is_supported(struct led_classdev *ldev, unsigned long rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+ u16 selector = 0;
+
+ return mv88e6xxx_led_get_selector(p, 0, p->fiber, rules, &selector);
+}
+
+static int
+mv88e6xxx_led1_hw_control_is_supported(struct led_classdev *ldev, unsigned long rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+ u16 selector = 0;
+
+ return mv88e6xxx_led_get_selector(p, 1, p->fiber, rules, &selector);
+}
+
+static int mv88e6xxx_led_hw_control_set(struct mv88e6xxx_port *p,
+ int led, unsigned long rules)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_led_read(p->chip, p->port,
+ MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL,
+ &reg);
+ if (err)
+ return err;
+
+ if (led == 1)
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK;
+ else
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK;
+
+ err = mv88e6xxx_led_get_selector(p, led, p->fiber, rules, &reg);
+ if (err)
+ return err;
+
+ reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL;
+
+ if (led == 0)
+ dev_dbg(p->chip->dev, "LED 0 hw control on port %d trigger selector 0x%02x\n",
+ p->port,
+ (unsigned int)(reg & MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK));
+ else
+ dev_dbg(p->chip->dev, "LED 1 hw control on port %d trigger selector 0x%02x\n",
+ p->port,
+ (unsigned int)(reg & MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK) >> 4);
+
+ return mv88e6xxx_port_led_write(p->chip, p->port, reg);
+}
+
+static int
+mv88e6xxx_led_hw_control_get(struct mv88e6xxx_port *p, int led, unsigned long *rules)
+{
+ u16 val;
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_port_led_read(p->chip, p->port,
+ MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL, &val);
+ mv88e6xxx_reg_unlock(p->chip);
+ if (err)
+ return err;
+
+ /* Mask out the selector bits for this port */
+ if (led == 1) {
+ val &= MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK;
+ /* It's forced blinking/OFF/ON */
+ if (val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELD ||
+ val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELE ||
+ val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELF) {
+ *rules = 0;
+ return 0;
+ }
+ } else {
+ val &= MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK;
+ /* It's forced blinking/OFF/ON */
+ if (val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELD ||
+ val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELE ||
+ val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELF) {
+ *rules = 0;
+ return 0;
+ }
+ }
+
+ err = mv88e6xxx_led_match_rule(p, val, led, rules);
+ if (!err)
+ return 0;
+
+ dev_dbg(p->chip->dev, "couldn't find matching selector for %04x\n", val);
+ *rules = 0;
+ return 0;
+}
+
+static int
+mv88e6xxx_led0_hw_control_set(struct led_classdev *ldev, unsigned long rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_hw_control_set(p, 0, rules);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int
+mv88e6xxx_led1_hw_control_set(struct led_classdev *ldev, unsigned long rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_hw_control_set(p, 1, rules);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int
+mv88e6xxx_led0_hw_control_get(struct led_classdev *ldev, unsigned long *rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+
+ return mv88e6xxx_led_hw_control_get(p, 0, rules);
+}
+
+static int
+mv88e6xxx_led1_hw_control_get(struct led_classdev *ldev, unsigned long *rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+
+ return mv88e6xxx_led_hw_control_get(p, 1, rules);
+}
+
+static struct device *mv88e6xxx_led_hw_control_get_device(struct mv88e6xxx_port *p)
+{
+ struct dsa_port *dp;
+
+ dp = dsa_to_port(p->chip->ds, p->port);
+ if (!dp)
+ return NULL;
+ if (dp->user)
+ return &dp->user->dev;
+ return NULL;
+}
+
+static struct device *
+mv88e6xxx_led0_hw_control_get_device(struct led_classdev *ldev)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+
+ return mv88e6xxx_led_hw_control_get_device(p);
+}
+
+static struct device *
+mv88e6xxx_led1_hw_control_get_device(struct led_classdev *ldev)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+
+ return mv88e6xxx_led_hw_control_get_device(p);
+}
+
+int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port)
+{
+ struct fwnode_handle *led = NULL, *leds = NULL;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct mv88e6xxx_port *p;
+ struct led_classdev *l;
+ struct device *dev;
+ u32 led_num;
+ int ret;
+
+ /* LEDs are on ports 1,2,3,4, 5 and 6 (index 0..5), no more */
+ if (port > 5)
+ return -EOPNOTSUPP;
+
+ p = &chip->ports[port];
+ if (!p->fwnode)
+ return 0;
+
+ dev = chip->dev;
+
+ leds = fwnode_get_named_child_node(p->fwnode, "leds");
+ if (!leds) {
+ dev_dbg(dev, "No Leds node specified in device tree for port %d!\n",
+ port);
+ return 0;
+ }
+
+ fwnode_for_each_child_node(leds, led) {
+ /* Reg represent the led number of the port, max 2
+ * LEDs can be connected to each port, in some designs
+ * only one LED is connected.
+ */
+ if (fwnode_property_read_u32(led, "reg", &led_num))
+ continue;
+ if (led_num > 1) {
+ dev_err(dev, "invalid LED specified port %d\n", port);
+ ret = -EINVAL;
+ goto err_put_led;
+ }
+
+ if (led_num == 0)
+ l = &p->led0;
+ else
+ l = &p->led1;
+
+ state = led_init_default_state_get(led);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ l->brightness = 1;
+ mv88e6xxx_led_brightness_set(p, led_num, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ break;
+ default:
+ l->brightness = 0;
+ mv88e6xxx_led_brightness_set(p, led_num, 0);
+ }
+
+ l->max_brightness = 1;
+ if (led_num == 0) {
+ l->brightness_set_blocking = mv88e6xxx_led0_brightness_set_blocking;
+ l->blink_set = mv88e6xxx_led0_blink_set;
+ l->hw_control_is_supported = mv88e6xxx_led0_hw_control_is_supported;
+ l->hw_control_set = mv88e6xxx_led0_hw_control_set;
+ l->hw_control_get = mv88e6xxx_led0_hw_control_get;
+ l->hw_control_get_device = mv88e6xxx_led0_hw_control_get_device;
+ } else {
+ l->brightness_set_blocking = mv88e6xxx_led1_brightness_set_blocking;
+ l->blink_set = mv88e6xxx_led1_blink_set;
+ l->hw_control_is_supported = mv88e6xxx_led1_hw_control_is_supported;
+ l->hw_control_set = mv88e6xxx_led1_hw_control_set;
+ l->hw_control_get = mv88e6xxx_led1_hw_control_get;
+ l->hw_control_get_device = mv88e6xxx_led1_hw_control_get_device;
+ }
+ l->hw_control_trigger = "netdev";
+
+ init_data.default_label = ":port";
+ init_data.fwnode = led;
+ init_data.devname_mandatory = true;
+ init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d:0%d", chip->info->name,
+ port, led_num);
+ if (!init_data.devicename) {
+ ret = -ENOMEM;
+ goto err_put_led;
+ }
+
+ ret = devm_led_classdev_register_ext(dev, l, &init_data);
+ kfree(init_data.devicename);
+
+ if (ret) {
+ dev_err(dev, "Failed to init LED %d for port %d", led_num, port);
+ goto err_put_led;
+ }
+ }
+
+ fwnode_handle_put(leds);
+ return 0;
+
+err_put_led:
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
+ return ret;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 5a27d047a38e..af7e06d265f7 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -55,6 +55,7 @@ static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id)
}
static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs);
@@ -137,7 +138,6 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
- mpcs->phylink_pcs.neg_mode = true;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
index 88f624b65470..36993400837e 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6352.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -158,6 +158,7 @@ static void marvell_c22_pcs_disable(struct phylink_pcs *pcs)
}
static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
@@ -274,7 +275,6 @@ static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev,
mpcs->mdio.bus = bus;
mpcs->mdio.addr = addr;
mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops;
- mpcs->phylink_pcs.neg_mode = true;
return mpcs;
}
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index d758a6c1b226..5db17c0b77f5 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/mii.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -257,6 +258,7 @@ static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
@@ -395,6 +397,7 @@ static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs)
}
static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -562,9 +565,7 @@ static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops;
- mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops;
- mpcs->xg_pcs.neg_mode = true;
if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6190X ||
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6390X)
@@ -748,7 +749,7 @@ static int mv88e6393x_sgmii_apply_2500basex_an(struct mv88e639x_pcs *mpcs,
if (err)
dev_err(mpcs->mdio.dev.parent,
"failed to %s 2500basex fix: %pe\n",
- enable ? "enable" : "disable", ERR_PTR(err));
+ str_enable_disable(enable), ERR_PTR(err));
return err;
}
@@ -889,6 +890,7 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -896,7 +898,7 @@ static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
int err;
if (state->interface != PHY_INTERFACE_MODE_USXGMII)
- return mv88e639x_xg_pcs_get_state(pcs, state);
+ return mv88e639x_xg_pcs_get_state(pcs, neg_mode, state);
state->link = false;
@@ -941,9 +943,7 @@ static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops;
- mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops;
- mpcs->xg_pcs.neg_mode = true;
mpcs->supports_5g = true;
err = mv88e6393x_erratum_4_6(mpcs);
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 8bb88b3d900d..4e7827ee684a 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -182,7 +182,7 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
- struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
+ struct mv88e6xxx_chip *chip = timer_container_of(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
@@ -206,7 +206,7 @@ static int mv88e6xxx_phy_ppu_access_get(struct mv88e6xxx_chip *chip)
}
chip->ppu_disabled = 1;
} else {
- del_timer(&chip->ppu_timer);
+ timer_delete(&chip->ppu_timer);
ret = 0;
}
@@ -229,7 +229,10 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
{
- del_timer_sync(&chip->ppu_timer);
+ mutex_lock(&chip->ppu_mutex);
+ timer_delete_sync(&chip->ppu_timer);
+ cancel_work_sync(&chip->ppu_work);
+ mutex_unlock(&chip->ppu_mutex);
}
int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 5394a8cf7bf1..66b1b7277281 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -12,6 +12,8 @@
#include <linux/if_bridge.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <linux/property.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -175,7 +177,7 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
dev_dbg(chip->dev, "p%d: %s link %s\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_FORCE_LINK ? "Force" : "Unforce",
- reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP ? "up" : "down");
+ str_up_down(reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP));
return 0;
}
@@ -1713,6 +1715,7 @@ int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
ptr = shift / 8;
shift %= 8;
mask >>= ptr * 8;
+ ptr <<= 8;
err = mv88e6393x_port_policy_read(chip, port, ptr, &reg);
if (err)
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 86deeb347cbc..c1d2f99efb1c 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
+/* - Modes with PHY suffix use output instead of input clock
+ * - Modes without RMII or RGMII use MII
+ * - Modes without speed do not have a fixed speed specified in the manual
+ * ("DC to x MHz" - variable clock support?)
+ */
+#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
+#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
+#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
@@ -294,6 +309,130 @@
/* Offset 0x13: OutFiltered Counter */
#define MV88E6XXX_PORT_OUT_FILTERED 0x13
+/* Offset 0x16: LED Control */
+#define MV88E6XXX_PORT_LED_CONTROL 0x16
+#define MV88E6XXX_PORT_LED_CONTROL_UPDATE BIT(15)
+#define MV88E6XXX_PORT_LED_CONTROL_POINTER_MASK GENMASK(14, 12)
+#define MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL (0x00 << 12) /* Control for LED 0 and 1 */
+#define MV88E6XXX_PORT_LED_CONTROL_POINTER_STRETCH_BLINK (0x06 << 12) /* Stetch and Blink Rate */
+#define MV88E6XXX_PORT_LED_CONTROL_POINTER_CNTL_SPECIAL (0x07 << 12) /* Control for the Port's Special LED */
+#define MV88E6XXX_PORT_LED_CONTROL_DATA_MASK GENMASK(10, 0)
+/* Selection masks valid for either port 1,2,3,4 or 5 */
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK GENMASK(3, 0)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK GENMASK(7, 4)
+/* Selection control for LED 0 and 1, ports 5 and 6 only has LED 0
+ * Bits Function
+ * 0..3 LED 0 control selector on ports 1-5
+ * 4..7 LED 1 control selector on ports 1-4 on port 5 this controls LED 0 of port 6
+ *
+ * Sel Port LED Function for the 6352 family:
+ * 0 1-4 0 Link/Act/Speed by Blink Rate (off=no link, on=link, blink=activity, blink speed=link speed)
+ * 1-4 1 Port 2's Special LED
+ * 5-6 0 Port 5 Link/Act (off=no link, on=link, blink=activity)
+ * 5-6 1 Port 6 Link/Act (off=no link, on=link 1000, blink=activity)
+ * 1 1-4 0 100/1000 Link/Act (off=no link, on=100 or 1000 link, blink=activity)
+ * 1-4 1 10/100 Link Act (off=no link, on=10 or 100 link, blink=activity)
+ * 5-6 0 Fiber 100 Link/Act (off=no link, on=link 100, blink=activity)
+ * 5-6 1 Fiber 1000 Link/Act (off=no link, on=link 1000, blink=activity)
+ * 2 1-4 0 1000 Link/Act (off=no link, on=link 1000, blink=activity)
+ * 1-4 1 10/100 Link/Act (off=no link, on=10 or 100 link, blink=activity)
+ * 5-6 0 Fiber 1000 Link/Act (off=no link, on=link 1000, blink=activity)
+ * 5-6 1 Fiber 100 Link/Act (off=no link, on=link 100, blink=activity)
+ * 3 1-4 0 Link/Act (off=no link, on=link, blink=activity)
+ * 1-4 1 1000 Link (off=no link, on=1000 link)
+ * 5-6 0 Port 0's Special LED
+ * 5-6 1 Fiber Link (off=no link, on=link)
+ * 4 1-4 0 Port 0's Special LED
+ * 1-4 1 Port 1's Special LED
+ * 5-6 0 Port 1's Special LED
+ * 5-6 1 Port 5 Link/Act (off=no link, on=link, blink=activity)
+ * 5 1-4 0 Reserved
+ * 1-4 1 Reserved
+ * 5-6 0 Port 2's Special LED
+ * 5-6 1 Port 6 Link (off=no link, on=link)
+ * 6 1-4 0 Duplex/Collision (off=half-duplex,on=full-duplex,blink=collision)
+ * 1-4 1 10/1000 Link/Act (off=no link, on=10 or 1000 link, blink=activity)
+ * 5-6 0 Port 5 Duplex/Collision (off=half-duplex, on=full-duplex, blink=col)
+ * 5-6 1 Port 6 Duplex/Collision (off=half-duplex, on=full-duplex, blink=col)
+ * 7 1-4 0 10/1000 Link/Act (off=no link, on=10 or 1000 link, blink=activity)
+ * 1-4 1 10/1000 Link (off=no link, on=10 or 1000 link)
+ * 5-6 0 Port 5 Link/Act/Speed by Blink rate (off=no link, on=link, blink=activity, blink speed=link speed)
+ * 5-6 1 Port 6 Link/Act/Speed by Blink rate (off=no link, on=link, blink=activity, blink speed=link speed)
+ * 8 1-4 0 Link (off=no link, on=link)
+ * 1-4 1 Activity (off=no link, blink on=activity)
+ * 5-6 0 Port 6 Link/Act (off=no link, on=link, blink=activity)
+ * 5-6 1 Port 0's Special LED
+ * 9 1-4 0 10 Link (off=no link, on=10 link)
+ * 1-4 1 100 Link (off=no link, on=100 link)
+ * 5-6 0 Reserved
+ * 5-6 1 Port 1's Special LED
+ * a 1-4 0 10 Link/Act (off=no link, on=10 link, blink=activity)
+ * 1-4 1 100 Link/Act (off=no link, on=100 link, blink=activity)
+ * 5-6 0 Reserved
+ * 5-6 1 Port 2's Special LED
+ * b 1-4 0 100/1000 Link (off=no link, on=100 or 1000 link)
+ * 1-4 1 10/100 Link (off=no link, on=100 link, blink=activity)
+ * 5-6 0 Reserved
+ * 5-6 1 Reserved
+ * c * * PTP Act (blink on=PTP activity)
+ * d * * Force Blink
+ * e * * Force Off
+ * f * * Force On
+ */
+/* Select LED0 output */
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL0 0x0
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1 0x1
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2 0x2
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL3 0x3
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL4 0x4
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL5 0x5
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6 0x6
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL7 0x7
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8 0x8
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL9 0x9
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELA 0xa
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELB 0xb
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELC 0xc
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELD 0xd
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELE 0xe
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELF 0xf
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL0 (0x0 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1 (0x1 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2 (0x2 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3 (0x3 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL4 (0x4 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL5 (0x5 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6 (0x6 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL7 (0x7 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL8 (0x8 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL9 (0x9 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELA (0xa << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELB (0xb << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELC (0xc << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELD (0xd << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELE (0xe << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELF (0xf << 4)
+/* Stretch and Blink Rate Control (Index 0x06 of LED Control) */
+/* Pulse Stretch Selection for all LED's on this port */
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_NONE (0 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS (1 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_42MS (2 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_84MS (3 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_168MS (4 << 4)
+/* Blink Rate Selection for all LEDs on this port */
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_21MS 0
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_42MS 1
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_84MS 2
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_168MS 3
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_336MS 4
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_672MS 5
+ /* Control for Special LED (Index 0x7 of LED Control on Port0) */
+#define MV88E6XXX_PORT_LED_CONTROL_0x07_P0_LAN_LINKACT_SHIFT 0 /* bits 6:0 LAN Link Activity LED */
+/* Control for Special LED (Index 0x7 of LED Control on Port 1) */
+#define MV88E6XXX_PORT_LED_CONTROL_0x07_P1_WAN_LINKACT_SHIFT 0 /* bits 6:0 WAN Link Activity LED */
+/* Control for Special LED (Index 0x7 of LED Control on Port 2) */
+#define MV88E6XXX_PORT_LED_CONTROL_0x07_P2_PTP_ACT 0 /* bits 6:0 PTP Activity */
+
/* Offset 0x18: IEEE Priority Mapping Table */
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000
@@ -442,6 +581,15 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+#ifdef CONFIG_NET_DSA_MV88E6XXX_LEDS
+int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port);
+#else
+static inline int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ return 0;
+}
+#endif
int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
bool drop_untagged);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 56391e09b325..f7603573d3a9 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -18,6 +18,13 @@
#define MV88E6XXX_MAX_ADJ_PPB 1000000
+struct mv88e6xxx_cc_coeffs {
+ u32 cc_shift;
+ u32 cc_mult;
+ u32 cc_mult_num;
+ u32 cc_mult_dem;
+};
+
/* Family MV88E6250:
* Raw timestamps are in units of 10-ns clock periods.
*
@@ -25,22 +32,43 @@
* simplifies to
* clkadj = scaled_ppm * 2^7 / 5^5
*/
-#define MV88E6250_CC_SHIFT 28
-#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
-#define MV88E6250_CC_MULT_NUM (1 << 7)
-#define MV88E6250_CC_MULT_DEM 3125ULL
+#define MV88E6XXX_CC_10NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_10ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_10NS_SHIFT,
+ .cc_mult = 10 << MV88E6XXX_CC_10NS_SHIFT,
+ .cc_mult_num = 1 << 7,
+ .cc_mult_dem = 3125ULL,
+};
-/* Other families:
+/* Other families except MV88E6393X in internal clock mode:
* Raw timestamps are in units of 8-ns clock periods.
*
* clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
* simplifies to
* clkadj = scaled_ppm * 2^9 / 5^6
*/
-#define MV88E6XXX_CC_SHIFT 28
-#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
-#define MV88E6XXX_CC_MULT_NUM (1 << 9)
-#define MV88E6XXX_CC_MULT_DEM 15625ULL
+#define MV88E6XXX_CC_8NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_8ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_8NS_SHIFT,
+ .cc_mult = 8 << MV88E6XXX_CC_8NS_SHIFT,
+ .cc_mult_num = 1 << 9,
+ .cc_mult_dem = 15625ULL
+};
+
+/* Family MV88E6393X using internal clock:
+ * Raw timestamps are in units of 4-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 4*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^8 / 5^6
+ */
+#define MV88E6XXX_CC_4NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_4ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_4NS_SHIFT,
+ .cc_mult = 4 << MV88E6XXX_CC_4NS_SHIFT,
+ .cc_mult_num = 1 << 8,
+ .cc_mult_dem = 15625ULL
+};
#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
@@ -83,13 +111,40 @@ static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
}
-static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
+static const struct mv88e6xxx_cc_coeffs *
+mv88e6xxx_cc_coeff_get(struct mv88e6xxx_chip *chip)
+{
+ u16 period_ps;
+ int err;
+
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_CLOCK_PERIOD, &period_ps, 1);
+ if (err) {
+ dev_err(chip->dev, "failed to read cycle counter period: %d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ switch (period_ps) {
+ case 4000:
+ return &mv88e6xxx_cc_4ns_coeffs;
+ case 8000:
+ return &mv88e6xxx_cc_8ns_coeffs;
+ case 10000:
+ return &mv88e6xxx_cc_10ns_coeffs;
+ default:
+ dev_err(chip->dev, "unexpected cycle counter period of %u ps\n",
+ period_ps);
+ return ERR_PTR(-ENODEV);
+ }
+}
+
+static u64 mv88e6352_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -97,13 +152,13 @@ static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
return ((u32)phc_time[1] << 16) | phc_time[0];
}
-static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6165_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6165_PTP_GC_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -112,42 +167,26 @@ static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
}
/* mv88e6352_config_eventcap - configure TAI event capture
- * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
* @rising: zero for falling-edge trigger, else rising-edge trigger
*
* This will also reset the capture sequence counter.
*/
-static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
- int rising)
+static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int rising)
{
- u16 global_config;
- u16 cap_config;
+ u16 evcap_config;
int err;
- chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE |
- MV88E6XXX_TAI_CFG_CAP_CTR_START;
+ evcap_config = MV88E6352_TAI_CFG_CAP_OVERWRITE |
+ MV88E6352_TAI_CFG_CAP_CTR_START;
if (!rising)
- chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING;
+ evcap_config |= MV88E6352_TAI_CFG_EVREQ_FALLING;
- global_config = (chip->evcap_config | chip->trig_config);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_CFG, evcap_config);
if (err)
return err;
- if (event == PTP_CLOCK_PPS) {
- cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG;
- } else if (event == PTP_CLOCK_EXTTS) {
- /* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */
- cap_config = 0;
- } else {
- return -EINVAL;
- }
-
/* Write the capture config; this also clears the capture counter */
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS,
- cap_config);
-
- return err;
+ return mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, 0);
}
static void mv88e6352_tai_event_work(struct work_struct *ugly)
@@ -160,7 +199,7 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_EVENT_STATUS,
status, ARRAY_SIZE(status));
mv88e6xxx_reg_unlock(chip);
@@ -168,19 +207,19 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
dev_err(chip->dev, "failed to read TAI status register\n");
return;
}
- if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) {
+ if (status[0] & MV88E6352_TAI_EVENT_STATUS_ERROR) {
dev_warn(chip->dev, "missed event capture\n");
return;
}
- if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID))
+ if (!(status[0] & MV88E6352_TAI_EVENT_STATUS_VALID))
goto out;
raw_ts = ((u32)status[2] << 16) | status[1];
/* Clear the valid bit so the next timestamp can come in */
- status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
+ status[0] &= ~MV88E6352_TAI_EVENT_STATUS_VALID;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, status[0]);
mv88e6xxx_reg_unlock(chip);
if (err) {
dev_err(chip->dev, "failed to write TAI status register\n");
@@ -204,7 +243,6 @@ out:
static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
- const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int neg_adj = 0;
u32 diff, mult;
u64 adj;
@@ -214,10 +252,10 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- mult = ptp_ops->cc_mult;
- adj = ptp_ops->cc_mult_num;
+ mult = chip->cc_coeffs->cc_mult;
+ adj = chip->cc_coeffs->cc_mult_num;
adj *= scaled_ppm;
- diff = div_u64(adj, ptp_ops->cc_mult_dem);
+ diff = div_u64(adj, chip->cc_coeffs->cc_mult_dem);
mv88e6xxx_reg_lock(chip);
@@ -278,13 +316,6 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -308,7 +339,7 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
schedule_delayed_work(&chip->tai_event_work,
TAI_EVENT_WORK_INTERVAL);
- err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ err = mv88e6352_config_eventcap(chip, rising);
} else {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
@@ -364,37 +395,6 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6XXX_CC_SHIFT,
- .cc_mult = MV88E6XXX_CC_MULT,
- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
-};
-
-const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
- .clock_read = mv88e6352_ptp_clock_read,
- .ptp_enable = mv88e6352_ptp_enable,
- .ptp_verify = mv88e6352_ptp_verify,
- .event_work = mv88e6352_tai_event_work,
- .port_enable = mv88e6352_hwtstamp_port_enable,
- .port_disable = mv88e6352_hwtstamp_port_disable,
- .n_ext_ts = 1,
- .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
- .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
- .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
- .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6250_CC_SHIFT,
- .cc_mult = MV88E6250_CC_MULT,
- .cc_mult_num = MV88E6250_CC_MULT_NUM,
- .cc_mult_dem = MV88E6250_CC_MULT_DEM,
};
const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
@@ -418,10 +418,6 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6XXX_CC_SHIFT,
- .cc_mult = MV88E6XXX_CC_MULT,
- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
@@ -446,13 +442,9 @@ const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6XXX_CC_SHIFT,
- .cc_mult = MV88E6XXX_CC_MULT,
- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
-static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6xxx_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
@@ -462,10 +454,10 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
return 0;
}
-/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
+/* With a 250MHz input clock, the 32-bit timestamp counter overflows in ~17.2
* seconds; this task forces periodic reads so that we don't miss any.
*/
-#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16)
+#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 8)
static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
@@ -484,11 +476,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
int i;
/* Set up the cycle counter */
+ chip->cc_coeffs = mv88e6xxx_cc_coeff_get(chip);
+ if (IS_ERR(chip->cc_coeffs))
+ return PTR_ERR(chip->cc_coeffs);
+
memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
- chip->tstamp_cc.mult = ptp_ops->cc_mult;
- chip->tstamp_cc.shift = ptp_ops->cc_shift;
+ chip->tstamp_cc.mult = chip->cc_coeffs->cc_mult;
+ chip->tstamp_cc.shift = chip->cc_coeffs->cc_shift;
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
ktime_to_ns(ktime_get_real()));
@@ -524,6 +520,10 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+ chip->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
if (ptp_ops->set_ptp_cpu_port) {
struct dsa_port *dp;
int upstream = 0;
@@ -551,6 +551,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
return 0;
}
+/* This must never be called holding the register lock */
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
if (chip->ptp_clock) {
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 6c4d09adc93c..95bdddb0bf39 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -16,132 +16,56 @@
#include "chip.h"
/* Offset 0x00: TAI Global Config */
-#define MV88E6XXX_TAI_CFG 0x00
-#define MV88E6XXX_TAI_CFG_CAP_OVERWRITE 0x8000
-#define MV88E6XXX_TAI_CFG_CAP_CTR_START 0x4000
-#define MV88E6XXX_TAI_CFG_EVREQ_FALLING 0x2000
-#define MV88E6XXX_TAI_CFG_TRIG_ACTIVE_LO 0x1000
-#define MV88E6XXX_TAI_CFG_IRL_ENABLE 0x0400
-#define MV88E6XXX_TAI_CFG_TRIG_IRQ_EN 0x0200
-#define MV88E6XXX_TAI_CFG_EVREQ_IRQ_EN 0x0100
-#define MV88E6XXX_TAI_CFG_TRIG_LOCK 0x0080
-#define MV88E6XXX_TAI_CFG_BLOCK_UPDATE 0x0008
-#define MV88E6XXX_TAI_CFG_MULTI_PTP 0x0004
-#define MV88E6XXX_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
-#define MV88E6XXX_TAI_CFG_TRIG_ENABLE 0x0001
+#define MV88E6352_TAI_CFG 0x00
+#define MV88E6352_TAI_CFG_CAP_OVERWRITE 0x8000
+#define MV88E6352_TAI_CFG_CAP_CTR_START 0x4000
+#define MV88E6352_TAI_CFG_EVREQ_FALLING 0x2000
+#define MV88E6352_TAI_CFG_TRIG_ACTIVE_LO 0x1000
+#define MV88E6352_TAI_CFG_IRL_ENABLE 0x0400
+#define MV88E6352_TAI_CFG_TRIG_IRQ_EN 0x0200
+#define MV88E6352_TAI_CFG_EVREQ_IRQ_EN 0x0100
+#define MV88E6352_TAI_CFG_TRIG_LOCK 0x0080
+#define MV88E6352_TAI_CFG_BLOCK_UPDATE 0x0008
+#define MV88E6352_TAI_CFG_MULTI_PTP 0x0004
+#define MV88E6352_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
+#define MV88E6352_TAI_CFG_TRIG_ENABLE 0x0001
/* Offset 0x01: Timestamp Clock Period (ps) */
#define MV88E6XXX_TAI_CLOCK_PERIOD 0x01
-/* Offset 0x02/0x03: Trigger Generation Amount */
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_LO 0x02
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_HI 0x03
-
-/* Offset 0x04: Clock Compensation */
-#define MV88E6XXX_TAI_TRIG_CLOCK_COMP 0x04
-
-/* Offset 0x05: Trigger Configuration */
-#define MV88E6XXX_TAI_TRIG_CFG 0x05
-
-/* Offset 0x06: Ingress Rate Limiter Clock Generation Amount */
-#define MV88E6XXX_TAI_IRL_AMOUNT 0x06
-
-/* Offset 0x07: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP 0x07
-
-/* Offset 0x08: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP_PS 0x08
-
/* Offset 0x09: Event Status */
-#define MV88E6XXX_TAI_EVENT_STATUS 0x09
-#define MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG 0x4000
-#define MV88E6XXX_TAI_EVENT_STATUS_ERROR 0x0200
-#define MV88E6XXX_TAI_EVENT_STATUS_VALID 0x0100
-#define MV88E6XXX_TAI_EVENT_STATUS_CTR_MASK 0x00ff
-
-/* Offset 0x0A/0x0B: Event Time */
-#define MV88E6XXX_TAI_EVENT_TIME_LO 0x0a
-#define MV88E6XXX_TAI_EVENT_TYPE_HI 0x0b
+#define MV88E6352_TAI_EVENT_STATUS 0x09
+#define MV88E6352_TAI_EVENT_STATUS_ERROR 0x0200
+#define MV88E6352_TAI_EVENT_STATUS_VALID 0x0100
+#define MV88E6352_TAI_EVENT_STATUS_CTR_MASK 0x00ff
+/* Offset 0x0A/0x0B: Event Time Lo/Hi. Always read with Event Status. */
/* Offset 0x0E/0x0F: PTP Global Time */
-#define MV88E6XXX_TAI_TIME_LO 0x0e
-#define MV88E6XXX_TAI_TIME_HI 0x0f
-
-/* Offset 0x10/0x11: Trig Generation Time */
-#define MV88E6XXX_TAI_TRIG_TIME_LO 0x10
-#define MV88E6XXX_TAI_TRIG_TIME_HI 0x11
-
-/* Offset 0x12: Lock Status */
-#define MV88E6XXX_TAI_LOCK_STATUS 0x12
-
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
+#define MV88E6352_TAI_TIME_LO 0x0e
+#define MV88E6352_TAI_TIME_HI 0x0f
/* 6165 Global Control Registers */
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
-
-/* Offset 0x01: Message ID */
-#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
-
-/* Offset 0x02: Time Stamp Arrive Time */
-#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
-
-/* Offset 0x03: Port Arrival Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
-
-/* Offset 0x04: Port Departure Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
-
-/* Offset 0x05: Configuration */
-#define MV88E6XXX_PTP_GC_CONFIG 0x05
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
-
-/* Offset 0x8: Interrupt Status */
-#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
-
/* Offset 0x9/0xa: Global Time */
-#define MV88E6XXX_PTP_GC_TIME_LO 0x09
-#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
+#define MV88E6165_PTP_GC_TIME_LO 0x09
+#define MV88E6165_PTP_GC_TIME_HI 0x0A
-/* 6165 Per Port Registers */
+/* 6165 Per Port Registers. The arrival and departure registers are a
+ * common block consisting of status, two time registers and the sequence ID
+ */
/* Offset 0: Arrival Time 0 Status */
#define MV88E6165_PORT_PTP_ARR0_STS 0x00
-/* Offset 0x01/0x02: PTP Arrival 0 Time */
-#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
-#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
-
-/* Offset 0x03: PTP Arrival 0 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
-
/* Offset 0x04: PTP Arrival 1 Status */
#define MV88E6165_PORT_PTP_ARR1_STS 0x04
-/* Offset 0x05/0x6E: PTP Arrival 1 Time */
-#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
-#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
-
-/* Offset 0x07: PTP Arrival 1 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
-
/* Offset 0x08: PTP Departure Status */
#define MV88E6165_PORT_PTP_DEP_STS 0x08
-/* Offset 0x09/0x0a: PTP Deperture Time */
-#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
-#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
-
-/* Offset 0x0b: PTP Departure Sequence ID */
-#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
-
/* Offset 0x0d: Port Status */
#define MV88E6164_PORT_STATUS 0x0d
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
-long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
@@ -149,17 +73,11 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
ptp_clock_info)
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
-extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
-{
- return -1;
-}
-
static inline int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
{
return 0;
@@ -170,7 +88,6 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
}
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
-static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {};
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 01ea53940786..b3330211edbc 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -132,8 +132,8 @@ int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
-int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data)
+int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data)
{
struct mv88e6352_serdes_hw_stat *stat;
int err, i;
@@ -144,8 +144,7 @@ int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
- memcpy(data + i * ETH_GSTRING_LEN, stat->string,
- ETH_GSTRING_LEN);
+ ethtool_puts(data, stat->string);
}
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
@@ -394,8 +393,8 @@ int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
-int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data)
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data)
{
struct mv88e6390_serdes_hw_stat *stat;
int i;
@@ -405,8 +404,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
stat = &mv88e6390_serdes_hw_stats[i];
- memcpy(data + i * ETH_GSTRING_LEN, stat->string,
- ETH_GSTRING_LEN);
+ ethtool_puts(data, stat->string);
}
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index ff5c3ab31e15..ad887d8601bc 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -125,13 +125,13 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
-int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data);
+int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data);
size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
-int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data);
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data);
size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
diff --git a/drivers/net/dsa/mv88e6xxx/trace.h b/drivers/net/dsa/mv88e6xxx/trace.h
index f59ca04768e7..5bd015b2b97a 100644
--- a/drivers/net/dsa/mv88e6xxx/trace.h
+++ b/drivers/net/dsa/mv88e6xxx/trace.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(mv88e6xxx_atu_violation,
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name);
__entry->spid = spid;
__entry->portvec = portvec;
memcpy(__entry->addr, addr, ETH_ALEN);
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(mv88e6xxx_vtu_violation,
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name);
__entry->spid = spid;
__entry->vid = vid;
),
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 61e95487732d..9e5ede932b42 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -61,11 +61,46 @@ static int felix_cpu_port_for_conduit(struct dsa_switch *ds,
return cpu_dp->index;
}
+/**
+ * felix_update_tag_8021q_rx_rule - Update VCAP ES0 tag_8021q rule after
+ * vlan_filtering change
+ * @outer_tagging_rule: Pointer to VCAP filter on which the update is performed
+ * @vlan_filtering: Current bridge VLAN filtering setting
+ *
+ * Source port identification for tag_8021q is done using VCAP ES0 rules on the
+ * CPU port(s). The ES0 tag B (inner tag from the packet) can be configured as
+ * either:
+ * - push_inner_tag=0: the inner tag is never pushed into the frame
+ * (and we lose info about the classified VLAN). This is
+ * good when the classified VLAN is a discardable quantity
+ * for the software RX path: it is either set to
+ * OCELOT_STANDALONE_PVID, or to
+ * ocelot_vlan_unaware_pvid(bridge).
+ * - push_inner_tag=1: the inner tag is always pushed. This is good when the
+ * classified VLAN is not a discardable quantity (the port
+ * is under a VLAN-aware bridge, and software needs to
+ * continue processing the packet in the same VLAN as the
+ * hardware).
+ * The point is that what is good for a VLAN-unaware port is not good for a
+ * VLAN-aware port, and vice versa. Thus, the RX tagging rules must be kept in
+ * sync with the VLAN filtering state of the port.
+ */
+static void
+felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter *outer_tagging_rule,
+ bool vlan_filtering)
+{
+ if (vlan_filtering)
+ outer_tagging_rule->action.push_inner_tag = OCELOT_ES0_TAG;
+ else
+ outer_tagging_rule->action.push_inner_tag = OCELOT_NO_ES0_TAG;
+}
+
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
- int upstream, u16 vid)
+ int upstream, u16 vid,
+ bool vlan_filtering)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot *ocelot = ds->priv;
@@ -96,6 +131,14 @@ static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
outer_tagging_rule->action.tag_a_vid_sel = 1;
outer_tagging_rule->action.vid_a_val = vid;
+ felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
+ outer_tagging_rule->action.tag_b_tpid_sel = OCELOT_TAG_TPID_SEL_8021Q;
+ /* Leave TAG_B_VID_SEL at 0 (Classified VID + VID_B_VAL). Since we also
+ * leave VID_B_VAL at 0, this makes ES0 tag B (the inner tag) equal to
+ * the classified VID, which we need to see in the DSA tagger's receive
+ * path. Note: the inner tag is only visible in the packet when pushed
+ * (push_inner_tag == OCELOT_ES0_TAG).
+ */
err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
if (err)
@@ -227,6 +270,7 @@ static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
u16 flags)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_port *cpu_dp;
int err;
@@ -234,11 +278,12 @@ static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
* membership, which we aren't. So we don't need to add any VCAP filter
* for the CPU port.
*/
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
- err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
+ dsa_port_is_vlan_filtering(dp));
if (err)
return err;
}
@@ -258,10 +303,11 @@ add_tx_failed:
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_port *cpu_dp;
int err;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
@@ -278,11 +324,41 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
del_tx_failed:
dsa_switch_for_each_cpu_port(cpu_dp, ds)
- felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
+ dsa_port_is_vlan_filtering(dp));
return err;
}
+static int felix_update_tag_8021q_rx_rules(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ unsigned long cookie;
+ int err;
+
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port,
+ cpu_dp->index);
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ cookie, false);
+
+ felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
+
+ err = ocelot_vcap_filter_replace(ocelot, outer_tagging_rule);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int felix_trap_get_cpu_port(struct dsa_switch *ds,
const struct ocelot_vcap_filter *trap)
{
@@ -528,7 +604,19 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
+ ocelot_lock_xtr_grp_bh(ocelot, 0);
ocelot_drain_cpu_queue(ocelot, 0);
+ ocelot_unlock_xtr_grp_bh(ocelot, 0);
+
+ /* Problem: when using push_inner_tag=1 for ES0 tag B, we lose info
+ * about whether the received packets were VLAN-tagged on the wire,
+ * since they are always tagged on egress towards the CPU port.
+ *
+ * Since using push_inner_tag=1 is unavoidable for VLAN-aware bridges,
+ * we must work around the fallout by untagging in software to make
+ * untagged reception work more or less as expected.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
return 0;
}
@@ -554,6 +642,8 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds)
ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
dsa_tag_8021q_unregister(ds);
+
+ ds->untag_vlan_aware_bridge_pvid = false;
}
static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
@@ -1008,8 +1098,23 @@ static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ bool using_tag_8021q;
+ struct felix *felix;
+ int err;
+
+ err = ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+ if (err)
+ return err;
- return ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+ felix = ocelot_to_felix(ocelot);
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+ if (using_tag_8021q) {
+ err = felix_update_tag_8021q_rx_rules(ds, port, enabled);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int felix_vlan_add(struct dsa_switch *ds, int port,
@@ -1048,26 +1153,37 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(ocelot->ports[port]->phy_mode,
config->supported_interfaces);
+ if (ocelot->ports[port]->phy_mode == PHY_INTERFACE_MODE_USXGMII)
+ __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
+ config->supported_interfaces);
}
-static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
if (felix->info->phylink_mac_config)
felix->info->phylink_mac_config(ocelot, port, mode, state);
}
-static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
- int port,
- phy_interface_t iface)
+static struct phylink_pcs *
+felix_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
struct phylink_pcs *pcs = NULL;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
if (felix->pcs && felix->pcs[port])
pcs = felix->pcs[port];
@@ -1075,11 +1191,13 @@ static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
return pcs;
}
-static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_link_down(struct phylink_config *config,
unsigned int link_an_mode,
phy_interface_t interface)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
struct felix *felix;
felix = ocelot_to_felix(ocelot);
@@ -1088,15 +1206,19 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
felix->info->quirks);
}
-static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int link_an_mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
@@ -1111,6 +1233,7 @@ static int felix_port_enable(struct dsa_switch *ds, int port,
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
if (!dsa_port_is_user(dp))
return 0;
@@ -1124,7 +1247,25 @@ static int felix_port_enable(struct dsa_switch *ds, int port,
}
}
- return 0;
+ if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return 0;
+
+ return dsa_port_simple_hsr_join(ds, port, dp->hsr_dev, NULL);
+}
+
+static void felix_port_disable(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (!dsa_port_is_user(dp))
+ return;
+
+ if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return;
+
+ dsa_port_simple_hsr_leave(ds, port, dp->hsr_dev);
}
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
@@ -1197,6 +1338,14 @@ static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
}
+static void felix_get_ts_stats(struct dsa_switch *ds, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -1220,7 +1369,7 @@ static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
static int felix_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ocelot *ocelot = ds->priv;
@@ -1232,6 +1381,7 @@ static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
[PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
[PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
[PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_10G_QXGMII] = OCELOT_PORT_MODE_10G_QXGMII,
[PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
[PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
};
@@ -1251,9 +1401,8 @@ static int felix_parse_ports_node(struct felix *felix,
phy_interface_t *port_phy_modes)
{
struct device *dev = felix->ocelot.dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
phy_interface_t phy_mode;
u32 port;
int err;
@@ -1262,7 +1411,6 @@ static int felix_parse_ports_node(struct felix *felix,
if (of_property_read_u32(child, "reg", &port) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1272,7 +1420,6 @@ static int felix_parse_ports_node(struct felix *felix,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
port);
- of_node_put(child);
return -ENODEV;
}
@@ -1504,6 +1651,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
int port = xmit_work->dp->index;
int retries = 10;
+ ocelot_lock_inj_grp(ocelot, 0);
+
do {
if (ocelot_can_inject(ocelot, 0))
break;
@@ -1512,6 +1661,7 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
} while (--retries);
if (!retries) {
+ ocelot_unlock_inj_grp(ocelot, 0);
dev_err(ocelot->dev, "port %d failed to inject skb\n",
port);
ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
@@ -1521,6 +1671,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ ocelot_unlock_inj_grp(ocelot, 0);
+
consume_skb(skb);
kfree(xmit_work);
}
@@ -1583,6 +1735,15 @@ static int felix_setup(struct dsa_switch *ds)
felix_port_qos_map_init(ocelot, dp->index);
}
+ if (felix->info->request_irq) {
+ err = felix->info->request_irq(ocelot);
+ if (err) {
+ dev_err(ocelot->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(err));
+ goto out_deinit_ports;
+ }
+ }
+
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
@@ -1636,22 +1797,25 @@ static void felix_teardown(struct dsa_switch *ds)
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_hwstamp_get(ocelot, port, ifr);
+ ocelot_hwstamp_get(ocelot, port, config);
+
+ return 0;
}
static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
bool using_tag_8021q;
int err;
- err = ocelot_hwstamp_set(ocelot, port, ifr);
+ err = ocelot_hwstamp_set(ocelot, port, config, extack);
if (err)
return err;
@@ -1671,6 +1835,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
if (!felix->info->quirk_no_xtr_irq)
return false;
+ ocelot_lock_xtr_grp(ocelot, grp);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
@@ -1707,6 +1873,8 @@ out:
ocelot_drain_cpu_queue(ocelot, 0);
}
+ ocelot_unlock_xtr_grp(ocelot, grp);
+
return true;
}
@@ -2083,7 +2251,60 @@ static void felix_get_mm_stats(struct dsa_switch *ds, int port,
ocelot_port_get_mm_stats(ocelot, port, stats);
}
-const struct dsa_switch_ops felix_switch_ops = {
+/* Depending on port type, we may be able to support the offload later (with
+ * the "ocelot"/"seville" tagging protocols), or never.
+ * If we return 0, the dp->hsr_dev reference is kept for later; if we return
+ * -EOPNOTSUPP, it is cleared (which helps to not bother
+ * dsa_port_simple_hsr_leave() with an offload that didn't pass validation).
+ */
+static int felix_port_hsr_join(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) {
+ int err;
+
+ err = dsa_port_simple_hsr_validate(ds, port, hsr, extack);
+ if (err)
+ return err;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offloading not supported with \"ocelot-8021q\"");
+ return 0;
+ }
+
+ if (!(dsa_to_port(ds, port)->user->flags & IFF_UP))
+ return 0;
+
+ return dsa_port_simple_hsr_join(ds, port, hsr, extack);
+}
+
+static int felix_port_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return 0;
+
+ if (!(dsa_to_port(ds, port)->user->flags & IFF_UP))
+ return 0;
+
+ return dsa_port_simple_hsr_leave(ds, port, hsr);
+}
+
+static const struct phylink_mac_ops felix_phylink_mac_ops = {
+ .mac_select_pcs = felix_phylink_mac_select_pcs,
+ .mac_config = felix_phylink_mac_config,
+ .mac_link_down = felix_phylink_mac_link_down,
+ .mac_link_up = felix_phylink_mac_link_up,
+};
+
+static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
.connect_tag_protocol = felix_connect_tag_protocol,
@@ -2096,6 +2317,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_stats64 = felix_get_stats64,
.get_pause_stats = felix_get_pause_stats,
.get_rmon_stats = felix_get_rmon_stats,
+ .get_ts_stats = felix_get_ts_stats,
.get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
.get_eth_mac_stats = felix_get_eth_mac_stats,
.get_eth_phy_stats = felix_get_eth_phy_stats,
@@ -2104,11 +2326,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_get_caps = felix_phylink_get_caps,
- .phylink_mac_config = felix_phylink_mac_config,
- .phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
- .phylink_mac_link_down = felix_phylink_mac_link_down,
- .phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
@@ -2165,8 +2384,56 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
.port_change_conduit = felix_port_change_conduit,
+ .port_hsr_join = felix_port_hsr_join,
+ .port_hsr_leave = felix_port_hsr_leave,
};
-EXPORT_SYMBOL_GPL(felix_switch_ops);
+
+int felix_register_switch(struct device *dev, resource_size_t switch_base,
+ int num_flooding_pgids, bool ptp,
+ bool mm_supported,
+ enum dsa_tag_protocol init_tag_proto,
+ const struct felix_info *info)
+{
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ felix = devm_kzalloc(dev, sizeof(*felix), GFP_KERNEL);
+ if (!felix)
+ return -ENOMEM;
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = dev;
+ ocelot->num_flooding_pgids = num_flooding_pgids;
+ ocelot->ptp = ptp;
+ ocelot->mm_supported = mm_supported;
+
+ felix->info = info;
+ felix->switch_base = switch_base;
+ felix->ds = ds;
+ felix->tag_proto = init_tag_proto;
+
+ ds->dev = dev;
+ ds->num_ports = info->num_ports;
+ ds->num_tx_queues = OCELOT_NUM_TC;
+ ds->ops = &felix_switch_ops;
+ ds->phylink_mac_ops = &felix_phylink_mac_ops;
+ ds->priv = ocelot;
+
+ err = dsa_register_switch(ds);
+ if (err)
+ dev_err_probe(dev, err, "Failed to register DSA switch\n");
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(felix_register_switch);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index dbf5872fe367..a657b190c5d7 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -12,8 +12,9 @@
#define OCELOT_PORT_MODE_SGMII BIT(1)
#define OCELOT_PORT_MODE_QSGMII BIT(2)
#define OCELOT_PORT_MODE_2500BASEX BIT(3)
-#define OCELOT_PORT_MODE_USXGMII BIT(4)
+#define OCELOT_PORT_MODE_USXGMII BIT(4) /* compatibility */
#define OCELOT_PORT_MODE_1000BASEX BIT(5)
+#define OCELOT_PORT_MODE_10G_QXGMII BIT(6)
struct device_node;
@@ -32,7 +33,6 @@ struct felix_info {
const u32 *port_modes;
int num_mact_rows;
int num_ports;
- int num_tx_queues;
struct vcap_props *vcap;
u16 vcap_pol_base;
u16 vcap_pol_max;
@@ -64,6 +64,7 @@ struct felix_info {
const struct phylink_link_state *state);
int (*configure_serdes)(struct ocelot *ocelot, int port,
struct device_node *portnp);
+ int (*request_irq)(struct ocelot *ocelot);
};
/* Methods for initializing the hardware resources specific to a tagging
@@ -82,8 +83,6 @@ struct felix_tag_proto_ops {
struct netlink_ext_ack *extack);
};
-extern const struct dsa_switch_ops felix_switch_ops;
-
/* DSA glue / front-end for struct ocelot */
struct felix {
struct dsa_switch *ds;
@@ -99,6 +98,11 @@ struct felix {
unsigned long host_flood_mc_mask;
};
+int felix_register_switch(struct device *dev, resource_size_t switch_base,
+ int num_flooding_pgids, bool ptp,
+ bool mm_supported,
+ enum dsa_tag_protocol init_tag_proto,
+ const struct felix_info *info);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
int felix_netdev_to_port(struct net_device *dev);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 3c5509e75a54..8cf4c8986587 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -24,7 +24,7 @@
#define VSC9959_NUM_PORTS 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
-#define VSC9959_TAS_MIN_GATE_LEN_NS 33
+#define VSC9959_TAS_MIN_GATE_LEN_NS 35
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
@@ -34,7 +34,8 @@
OCELOT_PORT_MODE_QSGMII | \
OCELOT_PORT_MODE_1000BASEX | \
OCELOT_PORT_MODE_2500BASEX | \
- OCELOT_PORT_MODE_USXGMII)
+ OCELOT_PORT_MODE_USXGMII | \
+ OCELOT_PORT_MODE_10G_QXGMII)
static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
VSC9959_PORT_MODE_SERDES,
@@ -1056,11 +1057,15 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio);
}
-/* The switch considers any frame (regardless of size) as eligible for
- * transmission if the traffic class gate is open for at least 33 ns.
+/* The switch considers any frame (regardless of size) as eligible
+ * for transmission if the traffic class gate is open for at least
+ * VSC9959_TAS_MIN_GATE_LEN_NS.
+ *
* Overruns are prevented by cropping an interval at the end of the gate time
- * slot for which egress scheduling is blocked, but we need to still keep 33 ns
- * available for one packet to be transmitted, otherwise the port tc will hang.
+ * slot for which egress scheduling is blocked, but we need to still keep
+ * VSC9959_TAS_MIN_GATE_LEN_NS available for one packet to be transmitted,
+ * otherwise the port tc will hang.
+ *
* This function returns the size of a gate interval that remains available for
* setting the guard band, after reserving the space for one egress frame.
*/
@@ -1303,7 +1308,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
* per-tc static guard band lengths, so it reduces the
* useful gate interval length. Therefore, be careful
* to calculate a guard band (and therefore max_sdu)
- * that still leaves 33 ns available in the time slot.
+ * that still leaves VSC9959_TAS_MIN_GATE_LEN_NS
+ * available in the time slot.
*/
max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
/* A TC gate may be completely closed, which is a
@@ -1474,10 +1480,13 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
/* Hardware errata - Admin config could not be overwritten if
* config is pending, need reset the TAS module
*/
- val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
- ret = -EBUSY;
- goto err_reset_tc;
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ if (val & QSYS_TAG_CONFIG_ENABLE) {
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err_reset_tc;
+ }
}
ocelot_rmw_rix(ocelot,
@@ -1535,7 +1544,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
struct tc_taprio_qopt_offload *taprio;
struct ocelot_port *ocelot_port;
struct timespec64 base_ts;
- int port;
+ int i, port;
u32 val;
mutex_lock(&ocelot->fwd_domain_lock);
@@ -1567,6 +1576,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
QSYS_PARAM_CFG_REG_3);
+ for (i = 0; i < taprio->num_entries; i++)
+ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
+
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);
@@ -1733,7 +1745,7 @@ struct felix_stream_gate {
u64 cycletime;
u64 cycletime_ext;
u32 num_entries;
- struct action_gate_entry entries[];
+ struct action_gate_entry entries[] __counted_by(num_entries);
};
struct felix_stream_gate_entry {
@@ -1755,6 +1767,9 @@ static int vsc9959_stream_identify(struct flow_cls_offload *f,
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
return -EOPNOTSUPP;
+ if (flow_rule_match_has_control_flags(rule, f->common.extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -2602,6 +2617,28 @@ set:
}
}
+/* The INTB interrupt is shared between for PTP TX timestamp availability
+ * notification and MAC Merge status change on each port.
+ */
+static irqreturn_t vsc9959_irq_handler(int irq, void *data)
+{
+ struct ocelot *ocelot = data;
+
+ ocelot_get_txtstamp(ocelot);
+ ocelot_mm_irq(ocelot);
+
+ return IRQ_HANDLED;
+}
+
+static int vsc9959_request_irq(struct ocelot *ocelot)
+{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
+
+ return devm_request_threaded_irq(ocelot->dev, pdev->irq, NULL,
+ &vsc9959_irq_handler, IRQF_ONESHOT,
+ "felix-intb", ocelot);
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
@@ -2633,7 +2670,6 @@ static const struct felix_info felix_info_vsc9959 = {
.vcap_pol_max2 = 0,
.num_mact_rows = 2048,
.num_ports = VSC9959_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.quirks = FELIX_MAC_QUIRKS,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
@@ -2642,98 +2678,36 @@ static const struct felix_info felix_info_vsc9959 = {
.port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
+ .request_irq = vsc9959_request_irq,
};
-/* The INTB interrupt is shared between for PTP TX timestamp availability
- * notification and MAC Merge status change on each port.
- */
-static irqreturn_t felix_irq_handler(int irq, void *data)
-{
- struct ocelot *ocelot = (struct ocelot *)data;
-
- ocelot_get_txtstamp(ocelot);
- ocelot_mm_irq(ocelot);
-
- return IRQ_HANDLED;
-}
-
static int felix_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- struct dsa_switch *ds;
- struct ocelot *ocelot;
- struct felix *felix;
+ struct device *dev = &pdev->dev;
+ resource_size_t switch_base;
int err;
- if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
- dev_info(&pdev->dev, "device is disabled, skipping\n");
- return -ENODEV;
- }
-
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- goto err_pci_enable;
- }
-
- felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
- if (!felix) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate driver memory\n");
- goto err_alloc_felix;
+ dev_err(dev, "device enable failed: %pe\n", ERR_PTR(err));
+ return err;
}
- pci_set_drvdata(pdev, felix);
- ocelot = &felix->ocelot;
- ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = OCELOT_NUM_TC;
- felix->info = &felix_info_vsc9959;
- felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
-
pci_set_master(pdev);
- err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
- &felix_irq_handler, IRQF_ONESHOT,
- "felix-intb", ocelot);
- if (err) {
- dev_err(&pdev->dev, "Failed to request irq\n");
- goto err_alloc_irq;
- }
-
- ocelot->ptp = 1;
- ocelot->mm_supported = true;
+ switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
- ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
- goto err_alloc_ds;
- }
-
- ds->dev = &pdev->dev;
- ds->num_ports = felix->info->num_ports;
- ds->num_tx_queues = felix->info->num_tx_queues;
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_OCELOT;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
- goto err_register_ds;
- }
+ err = felix_register_switch(dev, switch_base, OCELOT_NUM_TC,
+ true, true, DSA_TAG_PROTO_OCELOT,
+ &felix_info_vsc9959);
+ if (err)
+ goto out_disable;
return 0;
-err_register_ds:
- kfree(ds);
-err_alloc_ds:
-err_alloc_irq:
- kfree(felix);
-err_alloc_felix:
+out_disable:
pci_disable_device(pdev);
-err_pci_enable:
return err;
}
@@ -2746,9 +2720,6 @@ static void felix_pci_remove(struct pci_dev *pdev)
dsa_unregister_switch(felix->ds);
- kfree(felix->ds);
- kfree(felix);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
index 22187d831c4b..d5c557a20292 100644
--- a/drivers/net/dsa/ocelot/ocelot_ext.c
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -57,7 +57,6 @@ static const struct felix_info vsc7512_info = {
.vcap = vsc7514_vcap_props,
.num_mact_rows = 1024,
.num_ports = VSC7514_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.port_modes = vsc7512_port_modes,
.phylink_mac_config = ocelot_phylink_mac_config,
.configure_serdes = ocelot_port_configure_serdes,
@@ -65,54 +64,8 @@ static const struct felix_info vsc7512_info = {
static int ocelot_ext_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct dsa_switch *ds;
- struct ocelot *ocelot;
- struct felix *felix;
- int err;
-
- felix = kzalloc(sizeof(*felix), GFP_KERNEL);
- if (!felix)
- return -ENOMEM;
-
- dev_set_drvdata(dev, felix);
-
- ocelot = &felix->ocelot;
- ocelot->dev = dev;
-
- ocelot->num_flooding_pgids = 1;
-
- felix->info = &vsc7512_info;
-
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err_probe(dev, err, "Failed to allocate DSA switch\n");
- goto err_free_felix;
- }
-
- ds->dev = dev;
- ds->num_ports = felix->info->num_ports;
- ds->num_tx_queues = felix->info->num_tx_queues;
-
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_OCELOT;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err_probe(dev, err, "Failed to register DSA switch\n");
- goto err_free_ds;
- }
-
- return 0;
-
-err_free_ds:
- kfree(ds);
-err_free_felix:
- kfree(felix);
- return err;
+ return felix_register_switch(&pdev->dev, 0, 1, false, false,
+ DSA_TAG_PROTO_OCELOT, &vsc7512_info);
}
static void ocelot_ext_remove(struct platform_device *pdev)
@@ -123,9 +76,6 @@ static void ocelot_ext_remove(struct platform_device *pdev)
return;
dsa_unregister_switch(felix->ds);
-
- kfree(felix->ds);
- kfree(felix);
}
static void ocelot_ext_shutdown(struct platform_device *pdev)
@@ -152,11 +102,11 @@ static struct platform_driver ocelot_ext_switch_driver = {
.of_match_table = ocelot_ext_switch_of_match,
},
.probe = ocelot_ext_probe,
- .remove_new = ocelot_ext_remove,
+ .remove = ocelot_ext_remove,
.shutdown = ocelot_ext_shutdown,
};
module_platform_driver(ocelot_ext_switch_driver);
MODULE_DESCRIPTION("External Ocelot Switch driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(MFD_OCELOT);
+MODULE_IMPORT_NS("MFD_OCELOT");
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 049930da0521..eb3944ba2a72 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -963,7 +963,6 @@ static const struct felix_info seville_info_vsc9953 = {
.quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
.num_ports = VSC9953_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.port_modes = vsc9953_port_modes,
@@ -971,62 +970,18 @@ static const struct felix_info seville_info_vsc9953 = {
static int seville_probe(struct platform_device *pdev)
{
- struct dsa_switch *ds;
- struct ocelot *ocelot;
+ struct device *dev = &pdev->dev;
struct resource *res;
- struct felix *felix;
- int err;
-
- felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
- if (!felix) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate driver memory\n");
- goto err_alloc_felix;
- }
-
- platform_set_drvdata(pdev, felix);
-
- ocelot = &felix->ocelot;
- ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = 1;
- felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Invalid resource\n");
- goto err_alloc_felix;
- }
- felix->switch_base = res->start;
-
- ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
- goto err_alloc_ds;
- }
-
- ds->dev = &pdev->dev;
- ds->num_ports = felix->info->num_ports;
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_SEVILLE;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
- goto err_register_ds;
+ dev_err(dev, "Invalid resource\n");
+ return -EINVAL;
}
- return 0;
-
-err_register_ds:
- kfree(ds);
-err_alloc_ds:
-err_alloc_felix:
- kfree(felix);
- return err;
+ return felix_register_switch(dev, res->start, 1, false, false,
+ DSA_TAG_PROTO_SEVILLE,
+ &seville_info_vsc9953);
}
static void seville_remove(struct platform_device *pdev)
@@ -1037,9 +992,6 @@ static void seville_remove(struct platform_device *pdev)
return;
dsa_unregister_switch(felix->ds);
-
- kfree(felix->ds);
- kfree(felix);
}
static void seville_shutdown(struct platform_device *pdev)
@@ -1062,7 +1014,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match);
static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
- .remove_new = seville_remove,
+ .remove = seville_remove,
.shutdown = seville_shutdown,
.driver = {
.name = "mscc_seville",
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 8d9d271ac3af..0526aa96146e 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -523,28 +523,30 @@ static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ar9331_sw_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
int ret;
- ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(dp->index),
AR9331_SW_PORT_STATUS_LINK_EN |
AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
-static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct ar9331_sw_priv *priv = ds->priv;
- struct ar9331_sw_port *p = &priv->port[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
+ int port = dp->index;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
@@ -552,23 +554,24 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
- cancel_delayed_work_sync(&p->mib_read);
+ cancel_delayed_work_sync(&priv->port[port].mib_read);
}
-static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct ar9331_sw_priv *priv = ds->priv;
- struct ar9331_sw_port *p = &priv->port[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
+ int port = dp->index;
u32 val;
int ret;
- schedule_delayed_work(&p->mib_read, 0);
+ schedule_delayed_work(&priv->port[port].mib_read, 0);
val = AR9331_SW_PORT_STATUS_MAC_MASK;
switch (speed) {
@@ -684,14 +687,17 @@ static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
+static const struct phylink_mac_ops ar9331_phylink_mac_ops = {
+ .mac_config = ar9331_sw_phylink_mac_config,
+ .mac_link_down = ar9331_sw_phylink_mac_link_down,
+ .mac_link_up = ar9331_sw_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
.phylink_get_caps = ar9331_sw_phylink_get_caps,
- .phylink_mac_config = ar9331_sw_phylink_mac_config,
- .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
- .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
.get_stats64 = ar9331_get_stats64,
.get_pause_stats = ar9331_get_pause_stats,
};
@@ -815,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return ret;
}
- priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(dev_fwnode(dev), 1, &ar9331_sw_irqdomain_ops,
+ priv);
if (!priv->irqdomain) {
dev_err(dev, "failed to create IRQ domain\n");
return -EINVAL;
@@ -1015,7 +1021,7 @@ static const struct regmap_config ar9331_mdio_regmap_config = {
.cache_type = REGCACHE_MAPLE,
};
-static struct regmap_bus ar9331_sw_bus = {
+static const struct regmap_bus ar9331_sw_bus = {
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.read = ar9331_mdio_read,
@@ -1059,6 +1065,7 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
ds->priv = priv;
priv->ops = ar9331_sw_ops;
ds->ops = &priv->ops;
+ ds->phylink_mac_ops = &ar9331_phylink_mac_ops;
dev_set_drvdata(&mdiodev->dev, priv);
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index dab66c0c6f64..a36b8b07030e 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -342,7 +342,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -394,7 +394,7 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -565,7 +565,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
}
-static struct regmap_config qca8k_regmap_config = {
+static const struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
@@ -673,7 +673,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
* We therefore need to lock the MDIO bus onto which the switch is
* connected.
*/
- mutex_lock(&priv->bus->mdio_lock);
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
/* Actually start the request:
* 1. Send mdio master packet
@@ -1019,7 +1019,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
of_get_phy_mode(port, &mode);
- if (of_property_read_bool(port, "phy-handle") &&
+ if (of_property_present(port, "phy-handle") &&
mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
@@ -1283,11 +1283,13 @@ qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_inde
}
static struct phylink_pcs *
-qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+qca8k_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
struct phylink_pcs *pcs = NULL;
+ int port = dp->index;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -1311,13 +1313,18 @@ qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
}
static void
-qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+qca8k_phylink_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
+ struct qca8k_priv *priv;
+ int port = dp->index;
int cpu_port_index;
u32 reg;
+ priv = ds->priv;
+
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
@@ -1426,20 +1433,24 @@ static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
}
static void
-qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+qca8k_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
- qca8k_port_set_status(priv, port, 0);
+ qca8k_port_set_status(priv, dp->index, 0);
}
static void
-qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
- int speed, int duplex, bool tx_pause, bool rx_pause)
+qca8k_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
+ int port = dp->index;
u32 reg;
if (phylink_autoneg_inband(mode)) {
@@ -1463,10 +1474,10 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
if (duplex == DUPLEX_FULL)
reg |= QCA8K_PORT_STATUS_DUPLEX;
- if (rx_pause || dsa_is_cpu_port(ds, port))
+ if (rx_pause || dsa_port_is_cpu(dp))
reg |= QCA8K_PORT_STATUS_RXFLOW;
- if (tx_pause || dsa_is_cpu_port(ds, port))
+ if (tx_pause || dsa_port_is_cpu(dp))
reg |= QCA8K_PORT_STATUS_TXFLOW;
}
@@ -1480,7 +1491,7 @@ static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
return container_of(pcs, struct qca8k_pcs, pcs);
}
-static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
@@ -1623,7 +1634,6 @@ static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
int port)
{
qpcs->pcs.ops = &qca8k_pcs_ops;
- qpcs->pcs.neg_mode = true;
/* We don't have interrupts for link changes, so we need to poll */
qpcs->pcs.poll = true;
@@ -1991,6 +2001,13 @@ qca8k_setup(struct dsa_switch *ds)
return 0;
}
+static const struct phylink_mac_ops qca8k_phylink_mac_ops = {
+ .mac_select_pcs = qca8k_phylink_mac_select_pcs,
+ .mac_config = qca8k_phylink_mac_config,
+ .mac_link_down = qca8k_phylink_mac_link_down,
+ .mac_link_up = qca8k_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
@@ -1998,7 +2015,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.set_ageing_time = qca8k_set_ageing_time,
- .get_mac_eee = qca8k_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
.port_disable = qca8k_port_disable,
@@ -2021,10 +2038,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_get_caps = qca8k_phylink_get_caps,
- .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
- .phylink_mac_config = qca8k_phylink_mac_config,
- .phylink_mac_link_down = qca8k_phylink_mac_link_down,
- .phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
@@ -2091,6 +2104,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ds->ops = &qca8k_switch_ops;
+ priv->ds->phylink_mac_ops = &qca8k_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 7f80035c5441..13005f10edb7 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -557,13 +557,6 @@ exit:
return ret;
}
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
bool learning)
{
@@ -614,11 +607,57 @@ void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
qca8k_port_configure_learning(ds, port, learning);
}
+static int qca8k_update_port_member(struct qca8k_priv *priv, int port,
+ const struct net_device *bridge_dev,
+ bool join)
+{
+ bool isolated = !!(priv->port_isolated_map & BIT(port)), other_isolated;
+ struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
+ u32 port_mask = BIT(dp->cpu_dp->index);
+ int i, ret;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (i == port)
+ continue;
+ if (dsa_is_cpu_port(priv->ds, i))
+ continue;
+
+ other_dp = dsa_to_port(priv->ds, i);
+ if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
+ continue;
+
+ other_isolated = !!(priv->port_isolated_map & BIT(i));
+
+ /* Add/remove this port to/from the portvlan mask of the other
+ * ports in the bridge
+ */
+ if (join && !(isolated && other_isolated)) {
+ port_mask |= BIT(i);
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ } else {
+ ret = regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ /* Add/remove all other ports to/from this port's portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~BR_LEARNING)
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -628,6 +667,7 @@ int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
+ struct qca8k_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
@@ -637,6 +677,20 @@ int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
return ret;
}
+ if (flags.mask & BR_ISOLATED) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
+
+ if (flags.val & BR_ISOLATED)
+ priv->port_isolated_map |= BIT(port);
+ else
+ priv->port_isolated_map &= ~BIT(port);
+
+ ret = qca8k_update_port_member(priv, port, bridge_dev, true);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -646,62 +700,21 @@ int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
- int port_mask, cpu_port;
- int i, ret;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = regmap_set_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
-
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
- return ret;
+ return qca8k_update_port_member(priv, port, bridge.dev, true);
}
void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct qca8k_priv *priv = ds->priv;
- int cpu_port, i;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ int err;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- regmap_clear_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- }
-
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+ err = qca8k_update_port_member(priv, port, bridge.dev, false);
+ if (err)
+ dev_err(priv->dev,
+ "Failed to update switch config for bridge leave: %d\n",
+ err);
}
void qca8k_port_fast_age(struct dsa_switch *ds, int port)
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index 811ebeeff4ed..43ac68052baf 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
priv->internal_mdio_bus->id,
port_num);
- if (!init_data.devicename)
+ if (!init_data.devicename) {
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
return -ENOMEM;
+ }
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
if (ret)
@@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
kfree(init_data.devicename);
}
+ fwnode_handle_put(leds);
return 0;
}
@@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
* the correct port for LED setup.
*/
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
- if (ret)
+ if (ret) {
+ fwnode_handle_put(port);
+ fwnode_handle_put(ports);
return ret;
+ }
}
+ fwnode_handle_put(ports);
return 0;
}
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 2184d8d2d5a9..d046679265fa 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -16,7 +16,7 @@
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
#define QCA8K_ETHERNET_PHY_PRIORITY 6
-#define QCA8K_ETHERNET_TIMEOUT 5
+#define QCA8K_ETHERNET_TIMEOUT msecs_to_jiffies(5)
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
@@ -451,6 +451,7 @@ struct qca8k_priv {
* Bit 1: port enabled. Bit 0: port disabled.
*/
u8 port_enabled_map;
+ u8 port_isolated_map;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
@@ -519,7 +520,6 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 6989972eebc3..d6eb6713e5f6 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for Realtek RTL8366RB.
+config NET_DSA_REALTEK_RTL8366RB_LEDS
+ bool
+ depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
+ depends on NET_DSA_REALTEK_RTL8366RB
+ default NET_DSA_REALTEK_RTL8366RB
+
endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 35491dc20d6d..17367bcba496 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -12,4 +12,7 @@ endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
+ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
+rtl8366-objs += rtl8366rb-leds.o
+endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 04b758e5a680..a5e7dff96e91 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -140,13 +140,13 @@ int realtek_mdio_probe(struct mdio_device *mdiodev)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, "REALTEK_DSA");
/**
* realtek_mdio_remove() - Remove the driver of an MDIO-connected switch
* @mdiodev: mdio_device to be removed.
*
- * This function should be used as the .remove_new in an mdio_driver. First
+ * This function should be used as the .remove in an mdio_driver. First
* it unregisters the DSA switch and then it calls the common remove function.
*
* Context: Can sleep.
@@ -163,7 +163,7 @@ void realtek_mdio_remove(struct mdio_device *mdiodev)
rtl83xx_remove(priv);
}
-EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, "REALTEK_DSA");
/**
* realtek_mdio_shutdown() - Shutdown the driver of a MDIO-connected switch
@@ -184,4 +184,4 @@ void realtek_mdio_shutdown(struct mdio_device *mdiodev)
rtl83xx_shutdown(priv);
}
-EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 88590ae95a75..972e22218418 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -361,13 +361,13 @@ int realtek_smi_probe(struct platform_device *pdev)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, "REALTEK_DSA");
/**
* realtek_smi_remove() - Remove the driver of a SMI-connected switch
* @pdev: platform_device to be removed.
*
- * This function should be used as the .remove_new in a platform_driver. First
+ * This function should be used as the .remove in a platform_driver. First
* it unregisters the DSA switch and then it calls the common remove function.
*
* Context: Can sleep.
@@ -384,7 +384,7 @@ void realtek_smi_remove(struct platform_device *pdev)
rtl83xx_remove(priv);
}
-EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, "REALTEK_DSA");
/**
* realtek_smi_shutdown() - Shutdown the driver of a SMI-connected switch
@@ -405,4 +405,4 @@ void realtek_smi_shutdown(struct platform_device *pdev)
rtl83xx_shutdown(priv);
}
-EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index e0b1aa01337b..c03485a80d93 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -17,10 +17,8 @@
#define REALTEK_HW_STOP_DELAY 25 /* msecs */
#define REALTEK_HW_START_DELAY 100 /* msecs */
+struct phylink_mac_ops;
struct realtek_ops;
-struct dentry;
-struct inode;
-struct file;
struct rtl8366_mib_counter {
unsigned int base;
@@ -117,6 +115,7 @@ struct realtek_ops {
struct realtek_variant {
const struct dsa_switch_ops *ds_ops;
const struct realtek_ops *ops;
+ const struct phylink_mac_ops *phylink_mac_ops;
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 12665a8a3412..c575e164368c 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1048,11 +1048,13 @@ static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
phy_interface_set_rgmii(config->supported_interfaces);
}
-static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ u8 port = dp->index;
int ret;
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
@@ -1076,13 +1078,15 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
*/
}
-static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
+ u8 port = dp->index;
int ret;
mb = priv->chip_data;
@@ -1101,16 +1105,18 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
}
}
-static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
+ u8 port = dp->index;
int ret;
mb = priv->chip_data;
@@ -1713,8 +1719,8 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv)
goto out_put_node;
}
- priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
- &rtl8365mb_irqdomain_ops, priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
@@ -1734,7 +1740,7 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv)
}
/* Configure chip interrupt signal polarity */
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irq_get_trigger_type(irq);
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
@@ -2106,15 +2112,18 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
return 0;
}
+static const struct phylink_mac_ops rtl8365mb_phylink_mac_ops = {
+ .mac_config = rtl8365mb_phylink_mac_config,
+ .mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .mac_link_up = rtl8365mb_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
.phylink_get_caps = rtl8365mb_phylink_get_caps,
- .phylink_mac_config = rtl8365mb_phylink_mac_config,
- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
@@ -2125,6 +2134,8 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_stats64 = rtl8365mb_get_stats64,
.port_change_mtu = rtl8365mb_port_change_mtu,
.port_max_mtu = rtl8365mb_port_max_mtu,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static const struct realtek_ops rtl8365mb_ops = {
@@ -2136,6 +2147,7 @@ static const struct realtek_ops rtl8365mb_ops = {
const struct realtek_variant rtl8365mb_variant = {
.ds_ops = &rtl8365mb_switch_ops,
.ops = &rtl8365mb_ops,
+ .phylink_mac_ops = &rtl8365mb_phylink_mac_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
@@ -2154,7 +2166,7 @@ static struct platform_driver rtl8365mb_smi_driver = {
.of_match_table = rtl8365mb_of_match,
},
.probe = realtek_smi_probe,
- .remove_new = realtek_smi_remove,
+ .remove = realtek_smi_remove,
.shutdown = realtek_smi_shutdown,
};
@@ -2196,4 +2208,4 @@ module_exit(rtl8365mb_exit);
MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(REALTEK_DSA);
+MODULE_IMPORT_NS("REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c
index 7c6520ba3a26..047feeed96a2 100644
--- a/drivers/net/dsa/realtek/rtl8366-core.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -34,7 +34,7 @@ int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, "REALTEK_DSA");
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
@@ -187,7 +187,7 @@ int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, "REALTEK_DSA");
int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
@@ -217,7 +217,7 @@ int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, "REALTEK_DSA");
int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
@@ -243,7 +243,7 @@ int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
priv->vlan4k_enabled = enable;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, "REALTEK_DSA");
int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
@@ -265,7 +265,7 @@ int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, "REALTEK_DSA");
int rtl8366_reset_vlan(struct realtek_priv *priv)
{
@@ -290,7 +290,7 @@ int rtl8366_reset_vlan(struct realtek_priv *priv)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, "REALTEK_DSA");
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
@@ -345,7 +345,7 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, "REALTEK_DSA");
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
@@ -389,7 +389,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, "REALTEK_DSA");
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -403,7 +403,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
for (i = 0; i < priv->num_mib_counters; i++)
ethtool_puts(&data, priv->mib_counters[i].name);
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, "REALTEK_DSA");
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
@@ -417,7 +417,7 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
return priv->num_mib_counters;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, "REALTEK_DSA");
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
@@ -441,4 +441,4 @@ void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
data[i] = mibvalue;
}
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
new file mode 100644
index 000000000000..99c890681ae6
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include "rtl83xx.h"
+#include "rtl8366rb.h"
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node_scoped(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret)
+ break;
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index e10ae94cf771..d96ae72b0a5c 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -21,16 +21,13 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "realtek.h"
#include "realtek-smi.h"
#include "realtek-mdio.h"
#include "rtl83xx.h"
-
-#define RTL8366RB_PORT_NUM_CPU 5
-#define RTL8366RB_NUM_PORTS 6
-#define RTL8366RB_PHY_NO_MAX 4
-#define RTL8366RB_PHY_ADDR_MAX 31
+#include "rtl8366rb.h"
/* Switch Global Configuration register */
#define RTL8366RB_SGCR 0x0000
@@ -175,38 +172,6 @@
*/
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
-/* LED control registers */
-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
-
-#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_OFF 0x0
-#define RTL8366RB_LED_DUP_COL 0x1
-#define RTL8366RB_LED_LINK_ACT 0x2
-#define RTL8366RB_LED_SPD1000 0x3
-#define RTL8366RB_LED_SPD100 0x4
-#define RTL8366RB_LED_SPD10 0x5
-#define RTL8366RB_LED_SPD1000_ACT 0x6
-#define RTL8366RB_LED_SPD100_ACT 0x7
-#define RTL8366RB_LED_SPD10_ACT 0x8
-#define RTL8366RB_LED_SPD100_10_ACT 0x9
-#define RTL8366RB_LED_FIBER 0xa
-#define RTL8366RB_LED_AN_FAULT 0xb
-#define RTL8366RB_LED_LINK_RX 0xc
-#define RTL8366RB_LED_LINK_TX 0xd
-#define RTL8366RB_LED_MASTER 0xe
-#define RTL8366RB_LED_FORCE 0xf
-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_1_OFFSET 6
-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_3_OFFSET 6
-
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
@@ -242,7 +207,6 @@
#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
#define RTL8366RB_NUM_VLANS 16
-#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
#define RTL8366RB_NUM_FIDS 8
@@ -349,16 +313,6 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
-/**
- * struct rtl8366rb - RTL8366RB-specific data
- * @max_mtu: per-port max MTU setting
- * @pvid_enabled: if PVID is set for respective port
- */
-struct rtl8366rb {
- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
- bool pvid_enabled[RTL8366RB_NUM_PORTS];
-};
-
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -568,7 +522,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
}
/* Fetch IRQ edge information from the descriptor */
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irq_get_trigger_type(irq);
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
@@ -596,10 +550,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- priv->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
@@ -799,6 +751,47 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
+{
+ int ret;
+ u32 val;
+
+ val = mode << RTL8366RB_LED_CTRL_OFFSET(led_group);
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_CTRL_REG,
+ RTL8366RB_LED_CTRL_MASK(led_group),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* This code is used also with LEDs disabled */
+static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+{
+ int ret = 0;
+ int i;
+
+ regmap_update_bits(priv->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ 0);
+
+ for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) {
+ ret = rb8366rb_set_ledgroup_mode(priv, i,
+ RTL8366RB_LEDGROUP_OFF);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
@@ -807,7 +800,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
u32 chip_ver = 0;
u32 chip_id = 0;
int jam_size;
- u32 val;
int ret;
int i;
@@ -987,7 +979,9 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Set blinking, TODO: make this configurable */
+ /* Set blinking, used by all LED groups using HW triggers.
+ * TODO: make this configurable
+ */
ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
@@ -995,32 +989,17 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
/* Set up LED activity:
- * Each port has 4 LEDs, we configure all ports to the same
- * behaviour (no individual config) but we can set up each
- * LED separately.
+ * Each port has 4 LEDs on fixed groups. Each group shares the same
+ * hardware trigger across all ports. LEDs can only be indiviually
+ * controlled setting the LED group to fixed mode and using the driver
+ * to toggle them LEDs on/off.
*/
if (priv->leds_disabled) {
- /* Turn everything off */
- regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(priv->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- 0);
- val = RTL8366RB_LED_OFF;
+ ret = rtl8366rb_setup_all_leds_off(priv);
+ if (ret)
+ return ret;
} else {
- /* TODO: make this configurable per LED */
- val = RTL8366RB_LED_FORCE;
- }
- for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_CTRL_REG,
- 0xf << (i * 4),
- val << (i * 4));
+ ret = rtl8366rb_setup_leds(priv);
if (ret)
return ret;
}
@@ -1077,11 +1056,19 @@ static void rtl8366rb_phylink_get_caps(struct dsa_switch *ds, int port,
}
static void
-rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
+rtl8366rb_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+rtl8366rb_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ int port = dp->index;
unsigned int val;
int ret;
@@ -1147,10 +1134,12 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
}
static void
-rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+rtl8366rb_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ int port = dp->index;
int ret;
if (port != priv->cpu_port)
@@ -1167,52 +1156,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
}
}
-static void rb8366rb_set_port_led(struct realtek_priv *priv,
- int port, bool enable)
-{
- u16 val = enable ? 0x3f : 0;
- int ret;
-
- if (priv->leds_disabled)
- return;
-
- switch (port) {
- case 0:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F, val);
- break;
- case 1:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F << RTL8366RB_LED_1_OFFSET,
- val << RTL8366RB_LED_1_OFFSET);
- break;
- case 2:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F, val);
- break;
- case 3:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F << RTL8366RB_LED_3_OFFSET,
- val << RTL8366RB_LED_3_OFFSET);
- break;
- case 4:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- enable ? RTL8366RB_P4_RGMII_LED : 0);
- break;
- default:
- dev_err(priv->dev, "no LED for port %d\n", port);
- return;
- }
- if (ret)
- dev_err(priv->dev, "error updating LED on port %d\n", port);
-}
-
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
@@ -1226,7 +1169,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
if (ret)
return ret;
- rb8366rb_set_port_led(priv, port, true);
return 0;
}
@@ -1241,8 +1183,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
BIT(port));
if (ret)
return;
-
- rb8366rb_set_port_led(priv, port, false);
}
static int
@@ -1335,7 +1275,7 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
rb = priv->chip_data;
dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
- vlan_filtering ? "enable" : "disable");
+ str_enable_disable(vlan_filtering));
/* If the port is not in the member set, the frame will be dropped */
ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
@@ -1697,7 +1637,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan
static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ dev_dbg(priv->dev, "%s VLAN\n", str_enable_disable(enable));
return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
@@ -1705,7 +1645,7 @@ static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ dev_dbg(priv->dev, "%s VLAN 4k\n", str_enable_disable(enable));
return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
@@ -1849,12 +1789,16 @@ static int rtl8366rb_detect(struct realtek_priv *priv)
return 0;
}
+static const struct phylink_mac_ops rtl8366rb_phylink_mac_ops = {
+ .mac_config = rtl8366rb_mac_config,
+ .mac_link_down = rtl8366rb_mac_link_down,
+ .mac_link_up = rtl8366rb_mac_link_up,
+};
+
static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
- .phylink_mac_link_up = rtl8366rb_mac_link_up,
- .phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
@@ -1871,6 +1815,8 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static const struct realtek_ops rtl8366rb_ops = {
@@ -1892,6 +1838,7 @@ static const struct realtek_ops rtl8366rb_ops = {
const struct realtek_variant rtl8366rb_variant = {
.ds_ops = &rtl8366rb_switch_ops,
.ops = &rtl8366rb_ops,
+ .phylink_mac_ops = &rtl8366rb_phylink_mac_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
@@ -1910,7 +1857,7 @@ static struct platform_driver rtl8366rb_smi_driver = {
.of_match_table = rtl8366rb_of_match,
},
.probe = realtek_smi_probe,
- .remove_new = realtek_smi_remove,
+ .remove = realtek_smi_remove,
.shutdown = realtek_smi_shutdown,
};
@@ -1952,4 +1899,4 @@ module_exit(rtl8366rb_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(REALTEK_DSA);
+MODULE_IMPORT_NS("REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
new file mode 100644
index 000000000000..685ff3275faa
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL8366RB_H
+#define _RTL8366RB_H
+
+#include "realtek.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+/* LED trigger event for each group */
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
+
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv);
+
+#else
+
+static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
+
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+#endif
+};
+
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode);
+
+#endif /* _RTL8366RB_H */
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
index d2e876805393..2b9bd4462714 100644
--- a/drivers/net/dsa/realtek/rtl83xx.c
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -25,7 +25,7 @@ void rtl83xx_lock(void *ctx)
mutex_lock(&priv->map_lock);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, "REALTEK_DSA");
/**
* rtl83xx_unlock() - Unlocks the mutex used by regmaps
@@ -42,7 +42,7 @@ void rtl83xx_unlock(void *ctx)
mutex_unlock(&priv->map_lock);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, "REALTEK_DSA");
static int rtl83xx_user_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
@@ -109,7 +109,7 @@ err_put_node:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, "REALTEK_DSA");
/**
* rtl83xx_probe() - probe a Realtek switch
@@ -185,11 +185,9 @@ rtl83xx_probe(struct device *dev,
/* TODO: if power is software controlled, set up any regulators here */
priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
- if (IS_ERR(priv->reset_ctl)) {
- ret = PTR_ERR(priv->reset_ctl);
- dev_err_probe(dev, ret, "failed to get reset control\n");
- return ERR_CAST(priv->reset_ctl);
- }
+ if (IS_ERR(priv->reset_ctl))
+ return dev_err_cast_probe(dev, priv->reset_ctl,
+ "failed to get reset control\n");
priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(priv->reset)) {
@@ -210,7 +208,7 @@ rtl83xx_probe(struct device *dev,
return priv;
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, "REALTEK_DSA");
/**
* rtl83xx_register_switch() - detects and register a switch
@@ -236,6 +234,7 @@ int rtl83xx_register_switch(struct realtek_priv *priv)
ds->priv = priv;
ds->dev = priv->dev;
ds->ops = priv->variant->ds_ops;
+ ds->phylink_mac_ops = priv->variant->phylink_mac_ops;
ds->num_ports = priv->num_ports;
ret = dsa_register_switch(ds);
@@ -246,7 +245,7 @@ int rtl83xx_register_switch(struct realtek_priv *priv)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, "REALTEK_DSA");
/**
* rtl83xx_unregister_switch() - unregister a switch
@@ -263,7 +262,7 @@ void rtl83xx_unregister_switch(struct realtek_priv *priv)
dsa_unregister_switch(ds);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, "REALTEK_DSA");
/**
* rtl83xx_shutdown() - shutdown a switch
@@ -284,24 +283,21 @@ void rtl83xx_shutdown(struct realtek_priv *priv)
dev_set_drvdata(priv->dev, NULL);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, "REALTEK_DSA");
/**
* rtl83xx_remove() - Cleanup a realtek switch driver
* @priv: realtek_priv pointer
*
- * If a method is provided, this function asserts the hard reset of the switch
- * in order to avoid leaking traffic when the driver is gone.
+ * Placehold for common cleanup procedures.
*
- * Context: Might sleep if priv->gdev->chip->can_sleep.
+ * Context: Any
* Return: nothing
*/
void rtl83xx_remove(struct realtek_priv *priv)
{
- /* leave the device reset asserted */
- rtl83xx_reset_assert(priv);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, "REALTEK_DSA");
void rtl83xx_reset_assert(struct realtek_priv *priv)
{
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 10092ea85e46..4d857e3be10b 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -239,23 +239,31 @@ static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
}
static struct phylink_pcs *
-a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+a5psw_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
- if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
- return a5psw->pcs[port];
+ if (dsa_port_is_cpu(dp))
+ return NULL;
- return NULL;
+ return a5psw->pcs[dp->index];
+}
+
+static void a5psw_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
}
-static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void a5psw_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
+ int port = dp->index;
u32 cmd_cfg;
cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
@@ -263,15 +271,17 @@ static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
}
-static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void a5psw_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
{
u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
A5PSW_CMD_CFG_TX_CRC_APPEND;
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
if (speed == SPEED_1000)
cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
@@ -284,7 +294,7 @@ static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (!rx_pause)
cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
- a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(dp->index), cmd_cfg);
}
static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
@@ -327,8 +337,9 @@ static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
bool set)
{
- u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
- A5PSW_MCAST_DEF_MASK};
+ static const u8 offsets[] = {
+ A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK, A5PSW_MCAST_DEF_MASK
+ };
int i;
for (i = 0; i < ARRAY_SIZE(offsets); i++)
@@ -792,10 +803,8 @@ static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
if (stringset != ETH_SS_STATS)
return;
- for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
- memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
- ETH_GSTRING_LEN);
- }
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
+ ethtool_puts(&data, a5psw_stats[u].name);
}
static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -992,15 +1001,19 @@ static int a5psw_setup(struct dsa_switch *ds)
return 0;
}
+static const struct phylink_mac_ops a5psw_phylink_mac_ops = {
+ .mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .mac_config = a5psw_phylink_mac_config,
+ .mac_link_down = a5psw_phylink_mac_link_down,
+ .mac_link_up = a5psw_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops a5psw_switch_ops = {
.get_tag_protocol = a5psw_get_tag_protocol,
.setup = a5psw_setup,
.port_disable = a5psw_port_disable,
.port_enable = a5psw_port_enable,
.phylink_get_caps = a5psw_phylink_get_caps,
- .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
- .phylink_mac_link_down = a5psw_phylink_mac_link_down,
- .phylink_mac_link_up = a5psw_phylink_mac_link_up,
.port_change_mtu = a5psw_port_change_mtu,
.port_max_mtu = a5psw_port_max_mtu,
.get_sset_count = a5psw_get_sset_count,
@@ -1022,6 +1035,8 @@ static const struct dsa_switch_ops a5psw_switch_ops = {
.port_fdb_add = a5psw_port_fdb_add,
.port_fdb_del = a5psw_port_fdb_del,
.port_fdb_dump = a5psw_port_fdb_dump,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
@@ -1214,58 +1229,45 @@ static int a5psw_probe(struct platform_device *pdev)
if (ret)
return ret;
- a5psw->hclk = devm_clk_get(dev, "hclk");
+ a5psw->hclk = devm_clk_get_enabled(dev, "hclk");
if (IS_ERR(a5psw->hclk)) {
dev_err(dev, "failed get hclk clock\n");
ret = PTR_ERR(a5psw->hclk);
goto free_pcs;
}
- a5psw->clk = devm_clk_get(dev, "clk");
+ a5psw->clk = devm_clk_get_enabled(dev, "clk");
if (IS_ERR(a5psw->clk)) {
dev_err(dev, "failed get clk_switch clock\n");
ret = PTR_ERR(a5psw->clk);
goto free_pcs;
}
- ret = clk_prepare_enable(a5psw->clk);
- if (ret)
- goto free_pcs;
-
- ret = clk_prepare_enable(a5psw->hclk);
- if (ret)
- goto clk_disable;
-
- mdio = of_get_child_by_name(dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
+ mdio = of_get_available_child_by_name(dev->of_node, "mdio");
+ if (mdio) {
ret = a5psw_probe_mdio(a5psw, mdio);
+ of_node_put(mdio);
if (ret) {
- of_node_put(mdio);
dev_err(dev, "Failed to register MDIO: %d\n", ret);
- goto hclk_disable;
+ goto free_pcs;
}
}
- of_node_put(mdio);
-
ds = &a5psw->ds;
ds->dev = dev;
ds->num_ports = A5PSW_PORTS_NUM;
ds->ops = &a5psw_switch_ops;
+ ds->phylink_mac_ops = &a5psw_phylink_mac_ops;
ds->priv = a5psw;
ret = dsa_register_switch(ds);
if (ret) {
dev_err(dev, "Failed to register DSA switch: %d\n", ret);
- goto hclk_disable;
+ goto free_pcs;
}
return 0;
-hclk_disable:
- clk_disable_unprepare(a5psw->hclk);
-clk_disable:
- clk_disable_unprepare(a5psw->clk);
free_pcs:
a5psw_pcs_free(a5psw);
@@ -1281,8 +1283,6 @@ static void a5psw_remove(struct platform_device *pdev)
dsa_unregister_switch(&a5psw->ds);
a5psw_pcs_free(a5psw);
- clk_disable_unprepare(a5psw->hclk);
- clk_disable_unprepare(a5psw->clk);
}
static void a5psw_shutdown(struct platform_device *pdev)
@@ -1309,7 +1309,7 @@ static struct platform_driver a5psw_driver = {
.of_match_table = a5psw_of_mtable,
},
.probe = a5psw_probe,
- .remove_new = a5psw_remove,
+ .remove = a5psw_remove,
.shutdown = a5psw_shutdown,
};
module_platform_driver(a5psw_driver);
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 8c66d3bf61f0..dceb96ae9c83 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -278,7 +278,7 @@ struct sja1105_private {
struct mii_bus *mdio_base_t1;
struct mii_bus *mdio_base_tx;
struct mii_bus *mdio_pcs;
- struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
+ struct phylink_pcs *pcs[SJA1105_MAX_NUM_PORTS];
struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
};
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index decc6c931dc1..84d7d3f66bd0 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -571,6 +571,9 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
if (rc) {
dev_err(ds->dev,
@@ -586,7 +589,6 @@ void sja1105_get_strings(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
enum sja1105_counter_index max_ctr, i;
- char *p = data;
if (stringset != ETH_SS_STATS)
return;
@@ -598,8 +600,10 @@ void sja1105_get_strings(struct dsa_switch *ds, int port,
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
- strscpy(p, sja1105_port_counters[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
+ ethtool_puts(&data, sja1105_port_counters[i].name);
}
}
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 9e8ca182c722..05d8ed3121e7 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -214,6 +214,9 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 6646f7fb0f90..aa2145cf29a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/pcs/pcs-xpcs.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
@@ -1188,9 +1187,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device_node *ports_node)
{
struct device *dev = &priv->spidev->dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
@@ -1200,7 +1198,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (of_property_read_u32(child, "reg", &index) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1210,7 +1207,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
- of_node_put(child);
return -ENODEV;
}
@@ -1219,7 +1215,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (!of_phy_is_fixed_link(child)) {
dev_err(dev, "phy-handle or fixed-link "
"properties missing!\n");
- of_node_put(child);
return -ENODEV;
}
/* phy-handle is missing, but fixed-link isn't.
@@ -1233,10 +1228,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
priv->phy_mode[index] = phy_mode;
err = sja1105_parse_rgmii_delays(priv, index, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
@@ -1263,29 +1256,11 @@ static int sja1105_parse_dt(struct sja1105_private *priv)
return rc;
}
-/* Convert link speed from SJA1105 to ethtool encoding */
-static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
- u64 speed)
-{
- if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
- return SPEED_10;
- if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
- return SPEED_100;
- if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
- return SPEED_1000;
- if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
- return SPEED_2500;
- return SPEED_UNKNOWN;
-}
-
-/* Set link speed in the MAC configuration for a specific port. */
-static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
- int speed_mbps)
+static int sja1105_set_port_speed(struct sja1105_private *priv, int port,
+ int speed_mbps)
{
struct sja1105_mac_config_entry *mac;
- struct device *dev = priv->ds->dev;
u64 speed;
- int rc;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
* tables. On E/T, MAC reconfig tables are not readable, only writable.
@@ -1319,7 +1294,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
break;
default:
- dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
+ dev_err(priv->ds->dev, "Invalid speed %iMbps\n", speed_mbps);
return -EINVAL;
}
@@ -1327,15 +1302,28 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* table, since this will be used for the clocking setup, and we no
* longer need to store it in the static config (already told hardware
* we want auto during upload phase).
- * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
- * we need to configure the PCS only (if even that).
*/
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
- mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
- else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
- mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
- else
- mac[port].speed = speed;
+ mac[port].speed = speed;
+
+ return 0;
+}
+
+/* Write the MAC Configuration Table entry and, if necessary, the CGU settings,
+ * after a link speedchange for this port.
+ */
+static int sja1105_set_port_config(struct sja1105_private *priv, int port)
+{
+ struct sja1105_mac_config_entry *mac;
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
+ * tables. On E/T, MAC reconfig tables are not readable, only writable.
+ * We have to *know* what the MAC looks like. For the sake of keeping
+ * the code common, we'll use the static configuration tables as a
+ * reasonable approximation for both E/T and P/Q/R/S.
+ */
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
/* Write to the dynamic reconfiguration tables */
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
@@ -1358,34 +1346,42 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
}
static struct phylink_pcs *
-sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
+sja1105_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
- struct sja1105_private *priv = ds->priv;
- struct dw_xpcs *xpcs = priv->xpcs[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct sja1105_private *priv = dp->ds->priv;
- if (xpcs)
- return &xpcs->pcs;
+ return priv->pcs[dp->index];
+}
- return NULL;
+static void sja1105_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
}
-static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
+static void sja1105_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- sja1105_inhibit_tx(ds->priv, BIT(port), true);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+
+ sja1105_inhibit_tx(dp->ds->priv, BIT(dp->index), true);
}
-static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+static void sja1105_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct sja1105_private *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct sja1105_private *priv = dp->ds->priv;
+ int port = dp->index;
- sja1105_adjust_port_config(priv, port, speed);
+ if (!sja1105_set_port_speed(priv, port, speed))
+ sja1105_set_port_config(priv, port);
sja1105_inhibit_tx(priv, BIT(port), false);
}
@@ -2078,6 +2074,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
/* From UM10944 description of DRPDTAG (why put this there?):
* "Management traffic flows to the port regardless of the state
* of the INGRESS flag". So BPDUs are still be allowed to pass.
@@ -2087,11 +2084,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
mac[port].egress = false;
mac[port].dyn_learn = false;
break;
- case BR_STATE_LISTENING:
- mac[port].ingress = true;
- mac[port].egress = false;
- mac[port].dyn_learn = false;
- break;
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
@@ -2122,14 +2114,13 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
if (rc)
return rc;
- rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge, tx_fwd_offload,
+ extack);
if (rc) {
sja1105_bridge_member(ds, port, bridge, false);
return rc;
}
- *tx_fwd_offload = true;
-
return 0;
}
@@ -2289,8 +2280,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
{
struct ptp_system_timestamp ptp_sts_before;
struct ptp_system_timestamp ptp_sts_after;
- int speed_mbps[SJA1105_MAX_NUM_PORTS];
u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
+ u64 mac_speed[SJA1105_MAX_NUM_PORTS];
struct sja1105_mac_config_entry *mac;
struct dsa_switch *ds = priv->ds;
s64 t1, t2, t3, t4;
@@ -2303,17 +2294,16 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- /* Back up the dynamic link speed changed by sja1105_adjust_port_config
+ /* Back up the dynamic link speed changed by sja1105_set_port_speed()
* in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
* switch wants to see in the static config in order to allow us to
* change it through the dynamic interface later.
*/
for (i = 0; i < ds->num_ports; i++) {
- speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
- mac[i].speed);
+ mac_speed[i] = mac[i].speed;
mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
- if (priv->xpcs[i])
+ if (priv->pcs[i])
bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
MDIO_MMD_VEND2, MDIO_CTRL1);
}
@@ -2370,14 +2360,15 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
}
for (i = 0; i < ds->num_ports; i++) {
- struct dw_xpcs *xpcs = priv->xpcs[i];
+ struct phylink_pcs *pcs = priv->pcs[i];
unsigned int neg_mode;
- rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
+ mac[i].speed = mac_speed[i];
+ rc = sja1105_set_port_config(priv, i);
if (rc < 0)
goto out;
- if (!xpcs)
+ if (!pcs)
continue;
if (bmcr[i] & BMCR_ANENABLE)
@@ -2385,7 +2376,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
else
neg_mode = PHYLINK_PCS_NEG_OUTBAND;
- rc = xpcs_do_config(xpcs, priv->phy_mode[i], NULL, neg_mode);
+ rc = pcs->ops->pcs_config(pcs, neg_mode, priv->phy_mode[i],
+ NULL, true);
if (rc < 0)
goto out;
@@ -2401,8 +2393,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
else
speed = SPEED_10;
- xpcs_link_up(&xpcs->pcs, neg_mode, priv->phy_mode[i],
- speed, DUPLEX_FULL);
+ pcs->ops->pcs_link_up(pcs, neg_mode, priv->phy_mode[i],
+ speed, DUPLEX_FULL);
}
}
@@ -3154,10 +3146,8 @@ static int sja1105_setup(struct dsa_switch *ds)
* TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
*/
ds->vlan_filtering_is_global = true;
- ds->untag_bridge_pvid = true;
ds->fdb_isolation = true;
- /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
- ds->max_num_bridges = 7;
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
@@ -3198,6 +3188,13 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_static_config_free(&priv->static_config);
}
+static const struct phylink_mac_ops sja1105_phylink_mac_ops = {
+ .mac_select_pcs = sja1105_mac_select_pcs,
+ .mac_config = sja1105_mac_config,
+ .mac_link_up = sja1105_mac_link_up,
+ .mac_link_down = sja1105_mac_link_down,
+};
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.connect_tag_protocol = sja1105_connect_tag_protocol,
@@ -3207,9 +3204,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
.phylink_get_caps = sja1105_phylink_get_caps,
- .phylink_mac_select_pcs = sja1105_mac_select_pcs,
- .phylink_mac_link_up = sja1105_mac_link_up,
- .phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
@@ -3375,6 +3369,7 @@ static int sja1105_probe(struct spi_device *spi)
ds->dev = dev;
ds->num_ports = priv->info->num_ports;
ds->ops = &sja1105_switch_ops;
+ ds->phylink_mac_ops = &sja1105_phylink_mac_ops;
ds->priv = priv;
priv->ds = ds;
@@ -3456,7 +3451,6 @@ MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
.id_table = sja1105_spi_ids,
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 52ddb4ef259e..8d535c033cef 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -400,7 +400,7 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
}
for (port = 0; port < ds->num_ports; port++) {
- struct dw_xpcs *xpcs;
+ struct phylink_pcs *pcs;
if (dsa_is_unused_port(ds, port))
continue;
@@ -409,13 +409,13 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX)
continue;
- xpcs = xpcs_create_mdiodev(bus, port, priv->phy_mode[port]);
- if (IS_ERR(xpcs)) {
- rc = PTR_ERR(xpcs);
+ pcs = xpcs_create_pcs_mdiodev(bus, port);
+ if (IS_ERR(pcs)) {
+ rc = PTR_ERR(pcs);
goto out_pcs_free;
}
- priv->xpcs[port] = xpcs;
+ priv->pcs[port] = pcs;
}
priv->mdio_pcs = bus;
@@ -424,11 +424,10 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
out_pcs_free:
for (port = 0; port < ds->num_ports; port++) {
- if (!priv->xpcs[port])
- continue;
-
- xpcs_destroy(priv->xpcs[port]);
- priv->xpcs[port] = NULL;
+ if (priv->pcs[port]) {
+ xpcs_destroy_pcs(priv->pcs[port]);
+ priv->pcs[port] = NULL;
+ }
}
mdiobus_unregister(bus);
@@ -446,11 +445,10 @@ static void sja1105_mdiobus_pcs_unregister(struct sja1105_private *priv)
return;
for (port = 0; port < ds->num_ports; port++) {
- if (!priv->xpcs[port])
- continue;
-
- xpcs_destroy(priv->xpcs[port]);
- priv->xpcs[port] = NULL;
+ if (priv->pcs[port]) {
+ xpcs_destroy_pcs(priv->pcs[port]);
+ priv->pcs[port] = NULL;
+ }
}
mdiobus_unregister(priv->mdio_pcs);
@@ -470,13 +468,10 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
if (rc)
return rc;
- mdio_node = of_get_child_by_name(switch_node, "mdios");
+ mdio_node = of_get_available_child_by_name(switch_node, "mdios");
if (!mdio_node)
return 0;
- if (!of_device_is_available(mdio_node))
- goto out_put_mdio_node;
-
if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
if (rc)
@@ -489,7 +484,6 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
goto err_free_base_tx_mdiobus;
}
-out_put_mdio_node:
of_node_put(mdio_node);
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index a7d41e781398..fefe46e2a5e6 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -58,60 +58,64 @@ enum sja1105_ptp_clk_mode {
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
+ unsigned long hwts_tx_en, hwts_rx_en;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ hwts_tx_en = priv->hwts_tx_en;
+ hwts_rx_en = priv->hwts_rx_en;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- priv->hwts_tx_en &= ~BIT(port);
+ hwts_tx_en &= ~BIT(port);
break;
case HWTSTAMP_TX_ON:
- priv->hwts_tx_en |= BIT(port);
+ hwts_tx_en |= BIT(port);
break;
default:
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->hwts_rx_en &= ~BIT(port);
+ hwts_rx_en &= ~BIT(port);
break;
- default:
- priv->hwts_rx_en |= BIT(port);
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ hwts_rx_en |= BIT(port);
break;
+ default:
+ return -ERANGE;
}
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ priv->hwts_tx_en = hwts_tx_en;
+ priv->hwts_rx_en = hwts_rx_en;
+
return 0;
}
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- config.flags = 0;
+ config->flags = 0;
if (priv->hwts_tx_en & BIT(port))
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
if (priv->hwts_rx_en & BIT(port))
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
@@ -727,10 +731,6 @@ static int sja1105_per_out_enable(struct sja1105_private *priv,
if (perout->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (perout->flags)
- return -EOPNOTSUPP;
-
mutex_lock(&ptp_data->lock);
rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
@@ -810,13 +810,6 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (extts->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (extts->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* We can only enable time stamping on both edges, sadly. */
if ((extts->flags & PTP_STRICT_FLAGS) &&
(extts->flags & PTP_ENABLE_FEATURE) &&
@@ -832,7 +825,7 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (on)
sja1105_ptp_extts_setup_timer(&priv->ptp_data);
else
- del_timer_sync(&priv->ptp_data.extts_timer);
+ timer_delete_sync(&priv->ptp_data.extts_timer);
return 0;
}
@@ -902,6 +895,9 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.n_pins = 1,
.n_ext_ts = 1,
.n_per_out = 1,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
};
/* Only used on SJA1105 */
@@ -929,7 +925,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- del_timer_sync(&ptp_data->extts_timer);
+ timer_delete_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 416461ee95d2..325e3777ea07 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -101,7 +101,7 @@ void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
struct sk_buff *clone);
@@ -112,9 +112,12 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
void sja1105_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
struct ptp_system_timestamp *sts);
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index baba204ad62f..ffece8a400a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -26,12 +26,8 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
} else if (rc == -ERANGE) {
- if ((start - end + 1) > 64)
- pr_err("Field %d-%d too large for 64 bits!\n",
- start, end);
- else
- pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
- *val, start, end);
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
}
dump_stack();
}
@@ -1921,8 +1917,10 @@ int sja1105_table_delete_entry(struct sja1105_table *table, int i)
if (i > table->entry_count)
return -ERANGE;
- memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
- (table->entry_count - i) * entry_size);
+ if (i + 1 < table->entry_count) {
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i - 1) * entry_size);
+ }
table->entry_count--;
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index d7818710bc02..d5949d2c3e71 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -775,9 +775,8 @@ static void sja1105_tas_state_machine(struct work_struct *work)
base_time_ts = ns_to_timespec64(base_time);
now_ts = ns_to_timespec64(now);
- dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
- base_time_ts.tv_sec, base_time_ts.tv_nsec,
- now_ts.tv_sec, now_ts.tv_nsec);
+ dev_dbg(ds->dev, "OPER base time %ptSp (now %ptSp)\n",
+ &base_time_ts, &now_ts);
break;
@@ -798,8 +797,7 @@ static void sja1105_tas_state_machine(struct work_struct *work)
if (now < tas_data->oper_base_time) {
/* TAS has not started yet */
diff = ns_to_timespec64(tas_data->oper_base_time - now);
- dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
- diff.tv_sec, diff.tv_nsec);
+ dev_dbg(ds->dev, "time to start: [%ptSp]", &diff);
break;
}
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index ae70eac3be28..9d31b8258268 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -17,13 +17,17 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/dsa/8021q.h>
#include <linux/random.h>
#include <net/dsa.h>
@@ -33,11 +37,17 @@
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
-#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
+#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+/* MII Block subblock */
+#define VSC73XX_BLOCK_MII_INTERNAL 0x0 /* Internal MDIO subblock */
+#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */
+
#define CPU_PORT 6 /* CPU port */
+#define VSC73XX_NUM_FDB_ROWS 2048
+#define VSC73XX_NUM_BUCKETS 4
/* MAC Block registers */
#define VSC73XX_MAC_CFG 0x00
@@ -61,6 +71,8 @@
#define VSC73XX_CAT_DROP 0x6e
#define VSC73XX_CAT_PR_MISC_L2 0x6f
#define VSC73XX_CAT_PR_USR_PRIO 0x75
+#define VSC73XX_CAT_VLAN_MISC 0x79
+#define VSC73XX_CAT_PORT_VLAN 0x7a
#define VSC73XX_Q_MISC_CONF 0xdf
/* MAC_CFG register bits */
@@ -121,6 +133,17 @@
#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
+/* TXUPDCFG transmit modify setup bits */
+#define VSC73XX_TXUPDCFG_DSCP_REWR_MODE GENMASK(20, 19)
+#define VSC73XX_TXUPDCFG_DSCP_REWR_ENA BIT(18)
+#define VSC73XX_TXUPDCFG_TX_INT_TO_USRPRIO_ENA BIT(17)
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID GENMASK(15, 4)
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA BIT(3)
+#define VSC73XX_TXUPDCFG_TX_UPDATE_CRC_CPU_ENA BIT(1)
+#define VSC73XX_TXUPDCFG_TX_INSERT_TAG BIT(0)
+
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT 4
+
/* CAT_DROP categorizer frame dropping register bits */
#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
@@ -134,6 +157,15 @@
#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
+/* CAT_VLAN_MISC categorizer VLAN miscellaneous bits */
+#define VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA BIT(8)
+#define VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA BIT(7)
+
+/* CAT_PORT_VLAN categorizer port VLAN */
+#define VSC73XX_CAT_PORT_VLAN_VLAN_CFI BIT(15)
+#define VSC73XX_CAT_PORT_VLAN_VLAN_USR_PRIO GENMASK(14, 12)
+#define VSC73XX_CAT_PORT_VLAN_VLAN_VID GENMASK(11, 0)
+
/* Frame analyzer block 2 registers */
#define VSC73XX_STORMLIMIT 0x02
#define VSC73XX_ADVLEARN 0x03
@@ -163,6 +195,44 @@
#define VSC73XX_AGENCTRL 0xf0
#define VSC73XX_CAPRST 0xff
+#define VSC73XX_SRCMASKS_CPU_COPY BIT(27)
+#define VSC73XX_SRCMASKS_MIRROR BIT(26)
+#define VSC73XX_SRCMASKS_PORTS_MASK GENMASK(7, 0)
+
+#define VSC73XX_MACHDATA_VID GENMASK(27, 16)
+#define VSC73XX_MACHDATA_MAC0 GENMASK(15, 8)
+#define VSC73XX_MACHDATA_MAC1 GENMASK(7, 0)
+#define VSC73XX_MACLDATA_MAC2 GENMASK(31, 24)
+#define VSC73XX_MACLDATA_MAC3 GENMASK(23, 16)
+#define VSC73XX_MACLDATA_MAC4 GENMASK(15, 8)
+#define VSC73XX_MACLDATA_MAC5 GENMASK(7, 0)
+
+#define VSC73XX_HASH0_VID_FROM_MASK GENMASK(5, 0)
+#define VSC73XX_HASH0_MAC0_FROM_MASK GENMASK(7, 4)
+#define VSC73XX_HASH1_MAC0_FROM_MASK GENMASK(3, 0)
+#define VSC73XX_HASH1_MAC1_FROM_MASK GENMASK(7, 1)
+#define VSC73XX_HASH2_MAC1_FROM_MASK BIT(0)
+#define VSC73XX_HASH2_MAC2_FROM_MASK GENMASK(7, 0)
+#define VSC73XX_HASH2_MAC3_FROM_MASK GENMASK(7, 6)
+#define VSC73XX_HASH3_MAC3_FROM_MASK GENMASK(5, 0)
+#define VSC73XX_HASH3_MAC4_FROM_MASK GENMASK(7, 3)
+#define VSC73XX_HASH4_MAC4_FROM_MASK GENMASK(2, 0)
+
+#define VSC73XX_HASH0_VID_TO_MASK GENMASK(9, 4)
+#define VSC73XX_HASH0_MAC0_TO_MASK GENMASK(3, 0)
+#define VSC73XX_HASH1_MAC0_TO_MASK GENMASK(10, 7)
+#define VSC73XX_HASH1_MAC1_TO_MASK GENMASK(6, 0)
+#define VSC73XX_HASH2_MAC1_TO_MASK BIT(10)
+#define VSC73XX_HASH2_MAC2_TO_MASK GENMASK(9, 2)
+#define VSC73XX_HASH2_MAC3_TO_MASK GENMASK(1, 0)
+#define VSC73XX_HASH3_MAC3_TO_MASK GENMASK(10, 5)
+#define VSC73XX_HASH3_MAC4_TO_MASK GENMASK(4, 0)
+#define VSC73XX_HASH4_MAC4_TO_MASK GENMASK(10, 8)
+
+#define VSC73XX_MACTINDX_SHADOW BIT(13)
+#define VSC73XX_MACTINDX_BUCKET_MSK GENMASK(12, 11)
+#define VSC73XX_MACTINDX_INDEX_MSK GENMASK(10, 0)
+
#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
@@ -184,16 +254,37 @@
#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
-#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT 2
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(1, 0)
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
/* MII block 3 registers */
-#define VSC73XX_MII_STAT 0x0
-#define VSC73XX_MII_CMD 0x1
-#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_STAT 0x0
+#define VSC73XX_MII_CMD 0x1
+#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_MPRES 0x3
+
+#define VSC73XX_MII_STAT_BUSY BIT(3)
+#define VSC73XX_MII_STAT_READ BIT(2)
+#define VSC73XX_MII_STAT_WRITE BIT(1)
+
+#define VSC73XX_MII_CMD_SCAN BIT(27)
+#define VSC73XX_MII_CMD_OPERATION BIT(26)
+#define VSC73XX_MII_CMD_PHY_ADDR GENMASK(25, 21)
+#define VSC73XX_MII_CMD_PHY_REG GENMASK(20, 16)
+#define VSC73XX_MII_CMD_WRITE_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_DATA_FAILURE BIT(16)
+#define VSC73XX_MII_DATA_READ_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_MPRES_NOPREAMBLE BIT(6)
+#define VSC73XX_MII_MPRES_PRESCALEVAL GENMASK(5, 0)
+#define VSC73XX_MII_PRESCALEVAL_MIN 3 /* min allowed mdio clock prescaler */
+
+#define VSC73XX_MII_STAT_BUSY BIT(3)
/* Arbiter block 5 registers */
#define VSC73XX_ARBEMPTY 0x0c
@@ -268,11 +359,22 @@
#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
#define IS_739X(a) (IS_7395(a) || IS_7398(a))
+#define VSC73XX_POLL_SLEEP_US 1000
+#define VSC73XX_MDIO_POLL_SLEEP_US 5
+#define VSC73XX_POLL_TIMEOUT_US 10000
+
struct vsc73xx_counter {
u8 counter;
const char *name;
};
+struct vsc73xx_fdb {
+ u16 vid;
+ u8 port;
+ u8 mac[ETH_ALEN];
+ bool valid;
+};
+
/* Counters are named according to the MIB standards where applicable.
* Some counters are custom, non-standard. The standard counters are
* named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
@@ -339,6 +441,17 @@ static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
{ 29, "TxQoSClass3" }, /* non-standard counter */
};
+struct vsc73xx_vlan_summary {
+ size_t num_tagged;
+ size_t num_untagged;
+};
+
+enum vsc73xx_port_vlan_conf {
+ VSC73XX_VLAN_FILTER,
+ VSC73XX_VLAN_FILTER_UNTAG_ALL,
+ VSC73XX_VLAN_IGNORE,
+};
+
int vsc73xx_is_addr_valid(u8 block, u8 subblock)
{
switch (block) {
@@ -359,13 +472,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
break;
case VSC73XX_BLOCK_MII:
- case VSC73XX_BLOCK_CAPTURE:
case VSC73XX_BLOCK_ARBITER:
switch (subblock) {
case 0 ... 1:
return 1;
}
break;
+ case VSC73XX_BLOCK_CAPTURE:
+ switch (subblock) {
+ case 0 ... 4:
+ case 6 ... 7:
+ return 1;
+ }
+ break;
}
return 0;
@@ -483,6 +602,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
return 0;
}
+static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || !(val & VSC73XX_MII_STAT_BUSY),
+ VSC73XX_MDIO_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false, vsc,
+ VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_STAT, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct vsc73xx *vsc = ds->priv;
@@ -490,21 +625,33 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
u32 val;
int ret;
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
+
/* Setting bit 26 means "read" */
- cmd = BIT(26) | (phy << 21) | (regnum << 16);
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = VSC73XX_MII_CMD_OPERATION |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_mdio_busy_check(vsc);
if (ret)
return ret;
- msleep(2);
- ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_DATA, &val);
if (ret)
return ret;
- if (val & BIT(16)) {
+ if (val & VSC73XX_MII_DATA_FAILURE) {
dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
regnum, phy);
return -EIO;
}
- val &= 0xFFFFU;
+ val &= VSC73XX_MII_DATA_READ_DATA;
dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
regnum, phy, val);
@@ -519,19 +666,15 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
u32 cmd;
int ret;
- /* It was found through tedious experiments that this router
- * chip really hates to have it's PHYs reset. They
- * never recover if that happens: autonegotiation stops
- * working after a reset. Just filter out this command.
- * (Resetting the whole chip is OK.)
- */
- if (regnum == 0 && (val & BIT(15))) {
- dev_info(vsc->dev, "reset PHY - disallowed\n");
- return 0;
- }
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
- cmd = (phy << 21) | (regnum << 16);
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum) |
+ FIELD_PREP(VSC73XX_MII_CMD_WRITE_DATA, val);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
if (ret)
return ret;
@@ -553,16 +696,164 @@ static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
* cannot access the tag. (See "Internal frame header" section
* 3.9.1 in the manual.)
*/
- return DSA_TAG_PROTO_NONE;
+ return DSA_TAG_PROTO_VSC73XX_8021Q;
+}
+
+static int vsc73xx_wait_for_vlan_table_cmd(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 ||
+ ((val & VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK) ==
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE),
+ VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US,
+ false, vsc, VSC73XX_BLOCK_ANALYZER,
+ 0, VSC73XX_VLANACCESS, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
+static int
+vsc73xx_read_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 *portmap)
+{
+ u32 val;
+ int ret;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS, &val);
+ *portmap = (val & VSC73XX_VLANACCESS_VLAN_PORT_MASK) >>
+ VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT;
+
+ return 0;
+}
+
+static int
+vsc73xx_write_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 portmap)
+{
+ int ret;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK |
+ VSC73XX_VLANACCESS_VLAN_SRC_CHECK |
+ VSC73XX_VLANACCESS_VLAN_PORT_MASK,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY |
+ VSC73XX_VLANACCESS_VLAN_SRC_CHECK |
+ (portmap << VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT));
+
+ return vsc73xx_wait_for_vlan_table_cmd(vsc);
+}
+
+static int
+vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u8 portmap;
+ int ret;
+
+ ret = vsc73xx_read_vlan_table_entry(vsc, vid, &portmap);
+ if (ret)
+ return ret;
+
+ if (set)
+ portmap |= BIT(port);
+ else
+ portmap &= ~BIT(port);
+
+ return vsc73xx_write_vlan_table_entry(vsc, vid, portmap);
+}
+
+static int vsc73xx_configure_rgmii_port_delay(struct dsa_switch *ds)
+{
+ /* Keep 2.0 ns delay for backward complatibility */
+ u32 tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS;
+ u32 rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS;
+ struct dsa_port *dp = dsa_to_port(ds, CPU_PORT);
+ struct device_node *port_dn = dp->dn;
+ struct vsc73xx *vsc = ds->priv;
+ u32 delay;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE;
+ break;
+ case 1400:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS;
+ break;
+ case 1700:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Transmit Clock Delay\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Transmit Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE;
+ break;
+ case 1400:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS;
+ break;
+ case 1700:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Receive Clock Delay value\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Receive Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ /* MII delay, set both GTX and RX delay */
+ return vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+ tx_delay | rx_delay);
}
static int vsc73xx_setup(struct dsa_switch *ds)
{
struct vsc73xx *vsc = ds->priv;
- int i;
+ int i, ret, val;
dev_info(vsc->dev, "set up the switch\n");
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
+ ds->fdb_isolation = true;
+
/* Issue RESET */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_MASTER_RESET);
@@ -590,7 +881,7 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MACACCESS,
VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
- /* Clear VLAN table */
+ /* Set VLAN table to default values */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_VLANACCESS,
VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
@@ -615,26 +906,53 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
}
- /* MII delay, set both GTX and RX delay to 2 ns */
- vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
- VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
- VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
- /* Enable reception of frames on all ports */
- vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
- 0x5f);
+ /* Configure RGMII delay */
+ ret = vsc73xx_configure_rgmii_port_delay(ds);
+ if (ret)
+ return ret;
+
+ /* Ingess VLAN reception mask (table 145) */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK,
+ 0xff);
/* IP multicast flood mask (table 144) */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
0xff);
mdelay(50);
+ /* Disable preamble and use maximum allowed clock for the internal
+ * mdio bus, used for communication with internal PHYs only.
+ */
+ val = VSC73XX_MII_MPRES_NOPREAMBLE |
+ FIELD_PREP(VSC73XX_MII_MPRES_PRESCALEVAL,
+ VSC73XX_MII_PRESCALEVAL_MIN);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_MPRES, val);
+
/* Release reset from the internal PHYs */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_PHY_RESET);
udelay(4);
- return 0;
+ /* Clear VLAN table */
+ for (i = 0; i < VLAN_N_VID; i++)
+ vsc73xx_write_vlan_table_entry(vsc, i, 0);
+
+ INIT_LIST_HEAD(&vsc->vlans);
+
+ rtnl_lock();
+ ret = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
+ rtnl_unlock();
+
+ return ret;
+}
+
+static void vsc73xx_teardown(struct dsa_switch *ds)
+{
+ rtnl_lock();
+ dsa_tag_8021q_unregister(ds);
+ rtnl_unlock();
}
static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
@@ -713,22 +1031,123 @@ static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
port, VSC73XX_C_RX0, 0);
}
-static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
- int port, struct phy_device *phydev,
- u32 initval)
+static void vsc73xx_reset_port(struct vsc73xx *vsc, int port, u32 initval)
+{
+ int ret, err;
+ u32 val;
+
+ /* Disable RX on this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RX_EN, 0);
+
+ /* Discard packets */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+ /* Wait until queue is empty */
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || (val & BIT(port)),
+ VSC73XX_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false,
+ vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ if (ret)
+ dev_err(vsc->dev,
+ "timeout waiting for block arbiter\n");
+ else if (err < 0)
+ dev_err(vsc->dev, "error reading arbiter\n");
+
+ /* Put this port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET | initval);
+}
+
+static void vsc73xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+
+ /* Special handling of the CPU-facing port */
+ if (port == CPU_PORT) {
+ /* Other ports are already initialized but not this one */
+ vsc73xx_init_port(vsc, CPU_PORT);
+ /* Select the external port for this interface (EXT_PORT)
+ * Enable the GMII GTX external clock
+ * Use double data rate (DDR mode)
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ CPU_PORT,
+ VSC73XX_ADVPORTM,
+ VSC73XX_ADVPORTM_EXT_PORT |
+ VSC73XX_ADVPORTM_ENA_GTX |
+ VSC73XX_ADVPORTM_DDR_MODE);
+ }
+}
+
+static void vsc73xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
{
- u32 val = initval;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+
+ /* This routine is described in the datasheet (below ARBDISC register
+ * description)
+ */
+ vsc73xx_reset_port(vsc, port, 0);
+
+ /* Allow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+}
+
+static void vsc73xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+ u32 val;
u8 seed;
- /* Reset this port FIXME: break out subroutine */
- val |= VSC73XX_MAC_CFG_RESET;
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+ if (speed == SPEED_1000)
+ val = VSC73XX_MAC_CFG_GIGA_MODE | VSC73XX_MAC_CFG_TX_IPG_1000M;
+ else
+ val = VSC73XX_MAC_CFG_TX_IPG_100_10M;
+
+ if (phy_interface_mode_is_rgmii(interface))
+ val |= VSC73XX_MAC_CFG_CLK_SEL_1000M;
+ else
+ val |= VSC73XX_MAC_CFG_CLK_SEL_EXT;
+
+ if (duplex == DUPLEX_FULL)
+ val |= VSC73XX_MAC_CFG_FDX;
+ else
+ /* In datasheet description ("Port Mode Procedure" in 5.6.2)
+ * this bit is configured only for half duplex.
+ */
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
+
+ /* This routine is described in the datasheet (below ARBDISC register
+ * description)
+ */
+ vsc73xx_reset_port(vsc, port, val);
/* Seed the port randomness with randomness */
get_random_bytes(&seed, 1);
val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
val |= VSC73XX_MAC_CFG_SEED_LOAD;
- val |= VSC73XX_MAC_CFG_WEXC_DIS;
+
+ /* Those bits are responsible for MTU only. Kernel takes care about MTU,
+ * let's enable +8 bytes frame length unconditionally.
+ */
+ val |= VSC73XX_MAC_CFG_VLAN_AWR | VSC73XX_MAC_CFG_VLAN_DBLAWR;
+
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
/* Flow control for the PHY facing ports:
@@ -741,6 +1160,10 @@ static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
VSC73XX_FCCONF_FLOW_CTRL_OBEY |
0xff);
+ /* Accept packets again */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), 0);
+
/* Disallow backward dropping of frames from this port */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_SBACKWDROP, BIT(port), 0);
@@ -753,125 +1176,255 @@ static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
}
-static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static bool vsc73xx_tag_8021q_active(struct dsa_port *dp)
{
- struct vsc73xx *vsc = ds->priv;
- u32 val;
+ return !dsa_port_is_vlan_filtering(dp);
+}
- /* Special handling of the CPU-facing port */
- if (port == CPU_PORT) {
- /* Other ports are already initialized but not this one */
- vsc73xx_init_port(vsc, CPU_PORT);
- /* Select the external port for this interface (EXT_PORT)
- * Enable the GMII GTX external clock
- * Use double data rate (DDR mode)
- */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
- CPU_PORT,
- VSC73XX_ADVPORTM,
- VSC73XX_ADVPORTM_EXT_PORT |
- VSC73XX_ADVPORTM_ENA_GTX |
- VSC73XX_ADVPORTM_DDR_MODE);
+static struct vsc73xx_bridge_vlan *
+vsc73xx_bridge_vlan_find(struct vsc73xx *vsc, u16 vid)
+{
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list)
+ if (vlan->vid == vid)
+ return vlan;
+
+ return NULL;
+}
+
+static void
+vsc73xx_bridge_vlan_remove_port(struct vsc73xx_bridge_vlan *vsc73xx_vlan,
+ int port)
+{
+ vsc73xx_vlan->portmask &= ~BIT(port);
+
+ if (vsc73xx_vlan->portmask)
+ return;
+
+ list_del(&vsc73xx_vlan->list);
+ kfree(vsc73xx_vlan);
+}
+
+static void vsc73xx_bridge_vlan_summary(struct vsc73xx *vsc, int port,
+ struct vsc73xx_vlan_summary *summary,
+ u16 ignored_vid)
+{
+ size_t num_tagged = 0, num_untagged = 0;
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list) {
+ if (!(vlan->portmask & BIT(port)) || vlan->vid == ignored_vid)
+ continue;
+
+ if (vlan->untagged & BIT(port))
+ num_untagged++;
+ else
+ num_tagged++;
}
- /* This is the MAC confiuration that always need to happen
- * after a PHY or the CPU port comes up or down.
- */
- if (!phydev->link) {
- int maxloop = 10;
-
- dev_dbg(vsc->dev, "port %d: went down\n",
- port);
-
- /* Disable RX on this port */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
- VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RX_EN, 0);
-
- /* Discard packets */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBDISC, BIT(port), BIT(port));
-
- /* Wait until queue is empty */
- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBEMPTY, &val);
- while (!(val & BIT(port))) {
- msleep(1);
- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBEMPTY, &val);
- if (--maxloop == 0) {
- dev_err(vsc->dev,
- "timeout waiting for block arbiter\n");
- /* Continue anyway */
- break;
- }
- }
+ summary->num_untagged = num_untagged;
+ summary->num_tagged = num_tagged;
+}
- /* Put this port into reset */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RESET);
+static u16 vsc73xx_find_first_vlan_untagged(struct vsc73xx *vsc, int port)
+{
+ struct vsc73xx_bridge_vlan *vlan;
- /* Accept packets again */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBDISC, BIT(port), 0);
+ list_for_each_entry(vlan, &vsc->vlans, list)
+ if ((vlan->portmask & BIT(port)) &&
+ (vlan->untagged & BIT(port)))
+ return vlan->vid;
- /* Allow backward dropping of frames from this port */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+ return VLAN_N_VID;
+}
- /* Receive mask (disable forwarding) */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), 0);
+static int vsc73xx_set_vlan_conf(struct vsc73xx *vsc, int port,
+ enum vsc73xx_port_vlan_conf port_vlan_conf)
+{
+ u32 val = 0;
+ int ret;
- return;
+ if (port_vlan_conf == VSC73XX_VLAN_IGNORE)
+ val = VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA |
+ VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_VLAN_MISC,
+ VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA |
+ VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA, val);
+ if (ret)
+ return ret;
+
+ val = (port_vlan_conf == VSC73XX_VLAN_FILTER) ?
+ VSC73XX_TXUPDCFG_TX_INSERT_TAG : 0;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_TXUPDCFG,
+ VSC73XX_TXUPDCFG_TX_INSERT_TAG, val);
+}
+
+/**
+ * vsc73xx_vlan_commit_conf - Update VLAN configuration of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the VLAN behavior of a port to make sure that when it is under
+ * a VLAN filtering bridge, the port is either filtering with tag
+ * preservation, or filtering with all VLANs egress-untagged. Otherwise,
+ * the port ignores VLAN tags from packets and applies the port-based
+ * VID.
+ *
+ * Must be called when changes are made to:
+ * - the bridge VLAN filtering state of the port
+ * - the number or attributes of VLANs from the bridge VLAN table,
+ * while the port is currently VLAN-aware
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_conf(struct vsc73xx *vsc, int port)
+{
+ enum vsc73xx_port_vlan_conf port_vlan_conf = VSC73XX_VLAN_IGNORE;
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+
+ if (port == CPU_PORT) {
+ port_vlan_conf = VSC73XX_VLAN_FILTER;
+ } else if (dsa_port_is_vlan_filtering(dp)) {
+ struct vsc73xx_vlan_summary summary;
+
+ port_vlan_conf = VSC73XX_VLAN_FILTER;
+
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID);
+ if (summary.num_tagged == 0)
+ port_vlan_conf = VSC73XX_VLAN_FILTER_UNTAG_ALL;
}
- /* Figure out what speed was negotiated */
- if (phydev->speed == SPEED_1000) {
- dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
- port);
+ return vsc73xx_set_vlan_conf(vsc, port, port_vlan_conf);
+}
- /* Set up default for internal port or external RGMII */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
- val = VSC73XX_MAC_CFG_1000M_F_RGMII;
- else
- val = VSC73XX_MAC_CFG_1000M_F_PHY;
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else if (phydev->speed == SPEED_100) {
- if (phydev->duplex == DUPLEX_FULL) {
- val = VSC73XX_MAC_CFG_100_10M_F_PHY;
- dev_dbg(vsc->dev,
- "port %d: 100 Mbit full duplex mode\n",
- port);
- } else {
- val = VSC73XX_MAC_CFG_100_10M_H_PHY;
- dev_dbg(vsc->dev,
- "port %d: 100 Mbit half duplex mode\n",
- port);
- }
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else if (phydev->speed == SPEED_10) {
- if (phydev->duplex == DUPLEX_FULL) {
- val = VSC73XX_MAC_CFG_100_10M_F_PHY;
- dev_dbg(vsc->dev,
- "port %d: 10 Mbit full duplex mode\n",
- port);
- } else {
- val = VSC73XX_MAC_CFG_100_10M_H_PHY;
- dev_dbg(vsc->dev,
- "port %d: 10 Mbit half duplex mode\n",
- port);
- }
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else {
- dev_err(vsc->dev,
- "could not adjust link: unknown speed\n");
+static int
+vsc73xx_vlan_change_untagged(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u32 val = 0;
+
+ if (set)
+ val = VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA |
+ ((vid << VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT) &
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID);
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_TXUPDCFG,
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA |
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID, val);
+}
+
+/**
+ * vsc73xx_vlan_commit_untagged - Update native VLAN of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the native VLAN of a port (the one VLAN which is transmitted
+ * as egress-tagged on a trunk port) when port is in VLAN filtering mode and
+ * only one untagged vid is configured.
+ * In other cases no need to configure it because switch can untag all vlans on
+ * the port.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_untagged(struct vsc73xx *vsc, int port)
+{
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+ struct vsc73xx_vlan_summary summary;
+ u16 vid = 0;
+ bool valid;
+
+ if (!dsa_port_is_vlan_filtering(dp))
+ /* Port is configured to untag all vlans in that case.
+ * No need to commit untagged config change.
+ */
+ return 0;
+
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID);
+
+ if (summary.num_untagged > 1)
+ /* Port must untag all vlans in that case.
+ * No need to commit untagged config change.
+ */
+ return 0;
+
+ valid = (summary.num_untagged == 1);
+ if (valid)
+ vid = vsc73xx_find_first_vlan_untagged(vsc, port);
+
+ return vsc73xx_vlan_change_untagged(vsc, port, vid, valid);
+}
+
+static int
+vsc73xx_vlan_change_pvid(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u32 val = 0;
+ int ret;
+
+ val = set ? 0 : VSC73XX_CAT_DROP_UNTAGGED_ENA;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_DROP,
+ VSC73XX_CAT_DROP_UNTAGGED_ENA, val);
+ if (!set || ret)
+ return ret;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_PORT_VLAN,
+ VSC73XX_CAT_PORT_VLAN_VLAN_VID,
+ vid & VSC73XX_CAT_PORT_VLAN_VLAN_VID);
+}
+
+/**
+ * vsc73xx_vlan_commit_pvid - Update port-based default VLAN of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the PVID of a port so that it follows either the bridge PVID
+ * configuration, when the bridge is currently VLAN-aware, or the PVID
+ * from tag_8021q, when the port is standalone or under a VLAN-unaware
+ * bridge. A port with no PVID drops all untagged and VID 0 tagged
+ * traffic.
+ *
+ * Must be called when changes are made to:
+ * - the bridge VLAN filtering state of the port
+ * - the number or attributes of VLANs from the bridge VLAN table,
+ * while the port is currently VLAN-aware
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_pvid(struct vsc73xx *vsc, int port)
+{
+ struct vsc73xx_portinfo *portinfo = &vsc->portinfo[port];
+ bool valid = portinfo->pvid_tag_8021q_configured;
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+ u16 vid = portinfo->pvid_tag_8021q;
+
+ if (dsa_port_is_vlan_filtering(dp)) {
+ vid = portinfo->pvid_vlan_filtering;
+ valid = portinfo->pvid_vlan_filtering_configured;
}
- /* Enable port (forwarding) in the receieve mask */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), BIT(port));
+ return vsc73xx_vlan_change_pvid(vsc, port, vid, valid);
+}
+
+static int vsc73xx_vlan_commit_settings(struct vsc73xx *vsc, int port)
+{
+ int ret;
+
+ ret = vsc73xx_vlan_commit_untagged(vsc, port);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_vlan_commit_pvid(vsc, port);
+ if (ret)
+ return ret;
+
+ return vsc73xx_vlan_commit_conf(vsc, port);
}
static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
@@ -882,7 +1435,7 @@ static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
dev_info(vsc->dev, "enable port %d\n", port);
vsc73xx_init_port(vsc, port);
- return 0;
+ return vsc73xx_vlan_commit_settings(vsc, port);
}
static void vsc73xx_port_disable(struct dsa_switch *ds, int port)
@@ -1053,20 +1606,642 @@ static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port,
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000;
}
+static int
+vsc73xx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering, struct netlink_ext_ack *extack)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ /* The commit to hardware processed below is required because vsc73xx
+ * is using tag_8021q. When vlan_filtering is disabled, tag_8021q uses
+ * pvid/untagged vlans for port recognition. The values configured for
+ * vlans and pvid/untagged states are stored in portinfo structure.
+ * When vlan_filtering is enabled, we need to restore pvid/untagged from
+ * portinfo structure. Analogous routine is processed when
+ * vlan_filtering is disabled, but values used for tag_8021q are
+ * restored.
+ */
+
+ return vsc73xx_vlan_commit_settings(vsc, port);
+}
+
+static int vsc73xx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct vsc73xx_bridge_vlan *vsc73xx_vlan;
+ struct vsc73xx_vlan_summary summary;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret = 0;
+
+ /* Be sure to deny alterations to the configuration done by tag_8021q.
+ */
+ if (vid_is_dsa_8021q(vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range 3072-4095 reserved for dsa_8021q operation");
+ return -EBUSY;
+ }
+
+ /* The processed vlan->vid is excluded from the search because the VLAN
+ * can be re-added with a different set of flags, so it's easiest to
+ * ignore its old flags from the VLAN database software copy.
+ */
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, vlan->vid);
+
+ /* VSC73XX allows only three untagged states: none, one or all */
+ if ((untagged && summary.num_tagged > 0 && summary.num_untagged > 0) ||
+ (!untagged && summary.num_untagged > 1)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port can have only none, one or all untagged vlan");
+ return -EBUSY;
+ }
+
+ vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid);
+
+ if (!vsc73xx_vlan) {
+ vsc73xx_vlan = kzalloc(sizeof(*vsc73xx_vlan), GFP_KERNEL);
+ if (!vsc73xx_vlan)
+ return -ENOMEM;
+
+ vsc73xx_vlan->vid = vlan->vid;
+
+ list_add_tail(&vsc73xx_vlan->list, &vsc->vlans);
+ }
+
+ vsc73xx_vlan->portmask |= BIT(port);
+
+ /* CPU port must be always tagged because source port identification is
+ * based on tag_8021q.
+ */
+ if (port == CPU_PORT)
+ goto update_vlan_table;
+
+ if (untagged)
+ vsc73xx_vlan->untagged |= BIT(port);
+ else
+ vsc73xx_vlan->untagged &= ~BIT(port);
+
+ portinfo = &vsc->portinfo[port];
+
+ if (pvid) {
+ portinfo->pvid_vlan_filtering_configured = true;
+ portinfo->pvid_vlan_filtering = vlan->vid;
+ } else if (portinfo->pvid_vlan_filtering_configured &&
+ portinfo->pvid_vlan_filtering == vlan->vid) {
+ portinfo->pvid_vlan_filtering_configured = false;
+ }
+
+ commit_to_hardware = !vsc73xx_tag_8021q_active(dp);
+ if (commit_to_hardware) {
+ ret = vsc73xx_vlan_commit_settings(vsc, port);
+ if (ret)
+ goto err;
+ }
+
+update_vlan_table:
+ ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, true);
+ if (!ret)
+ return 0;
+err:
+ vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port);
+ return ret;
+}
+
+static int vsc73xx_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct vsc73xx_bridge_vlan *vsc73xx_vlan;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret;
+
+ ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, false);
+ if (ret)
+ return ret;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (portinfo->pvid_vlan_filtering_configured &&
+ portinfo->pvid_vlan_filtering == vlan->vid)
+ portinfo->pvid_vlan_filtering_configured = false;
+
+ vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid);
+
+ if (vsc73xx_vlan)
+ vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port);
+
+ commit_to_hardware = !vsc73xx_tag_8021q_active(dsa_to_port(ds, port));
+
+ if (commit_to_hardware)
+ return vsc73xx_vlan_commit_settings(vsc, port);
+
+ return 0;
+}
+
+static int vsc73xx_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (pvid) {
+ portinfo->pvid_tag_8021q_configured = true;
+ portinfo->pvid_tag_8021q = vid;
+ }
+
+ commit_to_hardware = vsc73xx_tag_8021q_active(dsa_to_port(ds, port));
+ if (commit_to_hardware) {
+ ret = vsc73xx_vlan_commit_settings(vsc, port);
+ if (ret)
+ return ret;
+ }
+
+ return vsc73xx_update_vlan_table(vsc, port, vid, true);
+}
+
+static int vsc73xx_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (portinfo->pvid_tag_8021q_configured &&
+ portinfo->pvid_tag_8021q == vid) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ bool commit_to_hardware;
+ int err;
+
+ portinfo->pvid_tag_8021q_configured = false;
+
+ commit_to_hardware = vsc73xx_tag_8021q_active(dp);
+ if (commit_to_hardware) {
+ err = vsc73xx_vlan_commit_settings(vsc, port);
+ if (err)
+ return err;
+ }
+ }
+
+ return vsc73xx_update_vlan_table(vsc, port, vid, false);
+}
+
+static int vsc73xx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vsc73xx_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & BR_LEARNING) {
+ u32 val = flags.val & BR_LEARNING ? BIT(port) : 0;
+ struct vsc73xx *vsc = ds->priv;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_LEARNMASK, BIT(port), val);
+ }
+
+ return 0;
+}
+
+static void vsc73xx_refresh_fwd_map(struct dsa_switch *ds, int port, u8 state)
+{
+ struct dsa_port *other_dp, *dp = dsa_to_port(ds, port);
+ struct vsc73xx *vsc = ds->priv;
+ u16 mask;
+
+ if (state != BR_STATE_FORWARDING) {
+ /* Ports that aren't in the forwarding state must not
+ * forward packets anywhere.
+ */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + port,
+ VSC73XX_SRCMASKS_PORTS_MASK, 0);
+
+ dsa_switch_for_each_available_port(other_dp, ds) {
+ if (other_dp == dp)
+ continue;
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + other_dp->index,
+ BIT(port), 0);
+ }
+
+ return;
+ }
+
+ /* Forwarding ports must forward to the CPU and to other ports
+ * in the same bridge
+ */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + CPU_PORT, BIT(port), BIT(port));
+
+ mask = BIT(CPU_PORT);
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (port == other_port || !dsa_port_bridge_same(dp, other_dp) ||
+ other_dp->stp_state != BR_STATE_FORWARDING)
+ continue;
+
+ mask |= BIT(other_port);
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + other_port,
+ BIT(port), BIT(port));
+ }
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + port,
+ VSC73XX_SRCMASKS_PORTS_MASK, mask);
+}
+
+/* FIXME: STP frames aren't forwarded at this moment. BPDU frames are
+ * forwarded only from and to PI/SI interface. For more info see chapter
+ * 2.7.1 (CPU Forwarding) in datasheet.
+ * This function is required for tag_8021q operations.
+ */
+static void vsc73xx_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct vsc73xx *vsc = ds->priv;
+ u32 val = 0;
+
+ if (state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING)
+ val = dp->learning ? BIT(port) : 0;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_LEARNMASK, BIT(port), val);
+
+ val = (state == BR_STATE_BLOCKING || state == BR_STATE_DISABLED) ?
+ 0 : BIT(port);
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), val);
+
+ /* CPU Port should always forward packets when user ports are forwarding
+ * so let's configure it from other ports only.
+ */
+ if (port != CPU_PORT)
+ vsc73xx_refresh_fwd_map(ds, port, state);
+}
+
+static u16 vsc73xx_calc_hash(const unsigned char *addr, u16 vid)
+{
+ /* VID 5-0, MAC 47-44 */
+ u16 hash = FIELD_PREP(VSC73XX_HASH0_VID_TO_MASK,
+ FIELD_GET(VSC73XX_HASH0_VID_FROM_MASK, vid)) |
+ FIELD_PREP(VSC73XX_HASH0_MAC0_TO_MASK,
+ FIELD_GET(VSC73XX_HASH0_MAC0_FROM_MASK, addr[0]));
+ /* MAC 43-33 */
+ hash ^= FIELD_PREP(VSC73XX_HASH1_MAC0_TO_MASK,
+ FIELD_GET(VSC73XX_HASH1_MAC0_FROM_MASK, addr[0])) |
+ FIELD_PREP(VSC73XX_HASH1_MAC1_TO_MASK,
+ FIELD_GET(VSC73XX_HASH1_MAC1_FROM_MASK, addr[1]));
+ /* MAC 32-22 */
+ hash ^= FIELD_PREP(VSC73XX_HASH2_MAC1_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC1_FROM_MASK, addr[1])) |
+ FIELD_PREP(VSC73XX_HASH2_MAC2_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC2_FROM_MASK, addr[2])) |
+ FIELD_PREP(VSC73XX_HASH2_MAC3_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC3_FROM_MASK, addr[3]));
+ /* MAC 21-11 */
+ hash ^= FIELD_PREP(VSC73XX_HASH3_MAC3_TO_MASK,
+ FIELD_GET(VSC73XX_HASH3_MAC3_FROM_MASK, addr[3])) |
+ FIELD_PREP(VSC73XX_HASH3_MAC4_TO_MASK,
+ FIELD_GET(VSC73XX_HASH3_MAC4_FROM_MASK, addr[4]));
+ /* MAC 10-0 */
+ hash ^= FIELD_PREP(VSC73XX_HASH4_MAC4_TO_MASK,
+ FIELD_GET(VSC73XX_HASH4_MAC4_FROM_MASK, addr[4])) |
+ addr[5];
+
+ return hash;
+}
+
+static int
+vsc73xx_port_wait_for_mac_table_cmd(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 ||
+ ((val & VSC73XX_MACACCESS_CMD_MASK) ==
+ VSC73XX_MACACCESS_CMD_IDLE),
+ VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US,
+ false, vsc, VSC73XX_BLOCK_ANALYZER,
+ 0, VSC73XX_MACACCESS, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
+static int vsc73xx_port_read_mac_table_row(struct vsc73xx *vsc, u16 index,
+ struct vsc73xx_fdb *fdb)
+{
+ int ret, i;
+ u32 val;
+
+ if (!fdb)
+ return -EINVAL;
+ if (index >= VSC73XX_NUM_FDB_ROWS)
+ return -EINVAL;
+
+ for (i = 0; i < VSC73XX_NUM_BUCKETS; i++) {
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACTINDX,
+ (i ? 0 : VSC73XX_MACTINDX_SHADOW) |
+ FIELD_PREP(VSC73XX_MACTINDX_BUCKET_MSK, i) |
+ index);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS,
+ VSC73XX_MACACCESS_CMD_MASK,
+ VSC73XX_MACACCESS_CMD_READ_ENTRY);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].valid = FIELD_GET(VSC73XX_MACACCESS_VALID, val);
+ if (!fdb[i].valid)
+ continue;
+
+ fdb[i].port = FIELD_GET(VSC73XX_MACACCESS_DEST_IDX_MASK, val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACHDATA, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].vid = FIELD_GET(VSC73XX_MACHDATA_VID, val);
+ fdb[i].mac[0] = FIELD_GET(VSC73XX_MACHDATA_MAC0, val);
+ fdb[i].mac[1] = FIELD_GET(VSC73XX_MACHDATA_MAC1, val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACLDATA, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].mac[2] = FIELD_GET(VSC73XX_MACLDATA_MAC2, val);
+ fdb[i].mac[3] = FIELD_GET(VSC73XX_MACLDATA_MAC3, val);
+ fdb[i].mac[4] = FIELD_GET(VSC73XX_MACLDATA_MAC4, val);
+ fdb[i].mac[5] = FIELD_GET(VSC73XX_MACLDATA_MAC5, val);
+ }
+
+ return ret;
+}
+
+static int
+vsc73xx_fdb_operation(struct vsc73xx *vsc, const unsigned char *addr, u16 vid,
+ u16 hash, u16 cmd_mask, u16 cmd_val)
+{
+ int ret;
+ u32 val;
+
+ val = FIELD_PREP(VSC73XX_MACHDATA_VID, vid) |
+ FIELD_PREP(VSC73XX_MACHDATA_MAC0, addr[0]) |
+ FIELD_PREP(VSC73XX_MACHDATA_MAC1, addr[1]);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACHDATA,
+ val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(VSC73XX_MACLDATA_MAC2, addr[2]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC3, addr[3]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC4, addr[4]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC5, addr[5]);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACLDATA,
+ val);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACTINDX,
+ hash);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS, cmd_mask, cmd_val);
+ if (ret)
+ return ret;
+
+ return vsc73xx_port_wait_for_mac_table_cmd(vsc);
+}
+
+static int vsc73xx_fdb_del_entry(struct vsc73xx *vsc, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ u16 hash = vsc73xx_calc_hash(addr, vid);
+ int bucket, ret;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb);
+ if (ret)
+ goto err;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (fdb[bucket].valid && fdb[bucket].port == port &&
+ ether_addr_equal(addr, fdb[bucket].mac))
+ break;
+ }
+
+ if (bucket == VSC73XX_NUM_BUCKETS) {
+ /* Can't find MAC in MAC table */
+ ret = -ENODATA;
+ goto err;
+ }
+
+ ret = vsc73xx_fdb_operation(vsc, addr, vid, hash,
+ VSC73XX_MACACCESS_CMD_MASK,
+ VSC73XX_MACACCESS_CMD_FORGET);
+err:
+ mutex_unlock(&vsc->fdb_lock);
+ return ret;
+}
+
+static int vsc73xx_fdb_add_entry(struct vsc73xx *vsc, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ u16 hash = vsc73xx_calc_hash(addr, vid);
+ int bucket, ret;
+ u32 val;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb);
+ if (ret)
+ goto err;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (!fdb[bucket].valid)
+ break;
+ }
+
+ if (bucket == VSC73XX_NUM_BUCKETS) {
+ /* Bucket is full */
+ ret = -EOVERFLOW;
+ goto err;
+ }
+
+ val = VSC73XX_MACACCESS_VALID | VSC73XX_MACACCESS_LOCKED |
+ FIELD_PREP(VSC73XX_MACACCESS_DEST_IDX_MASK, port) |
+ VSC73XX_MACACCESS_CMD_LEARN;
+ ret = vsc73xx_fdb_operation(vsc, addr, vid, hash,
+ VSC73XX_MACACCESS_VALID |
+ VSC73XX_MACACCESS_LOCKED |
+ VSC73XX_MACACCESS_DEST_IDX_MASK |
+ VSC73XX_MACACCESS_CMD_MASK, val);
+err:
+ mutex_unlock(&vsc->fdb_lock);
+ return ret;
+}
+
+static int vsc73xx_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return vsc73xx_fdb_add_entry(vsc, port, addr, vid);
+}
+
+static int vsc73xx_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return vsc73xx_fdb_del_entry(vsc, port, addr, vid);
+}
+
+static int vsc73xx_port_fdb_dump(struct dsa_switch *ds,
+ int port, dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ struct vsc73xx *vsc = ds->priv;
+ u16 i, bucket;
+ int err = 0;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ for (i = 0; i < VSC73XX_NUM_FDB_ROWS; i++) {
+ err = vsc73xx_port_read_mac_table_row(vsc, i, fdb);
+ if (err)
+ goto unlock;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (!fdb[bucket].valid || fdb[bucket].port != port)
+ continue;
+
+ /* We need to hide dsa_8021q VLANs from the user */
+ if (vid_is_dsa_8021q(fdb[bucket].vid))
+ fdb[bucket].vid = 0;
+
+ err = cb(fdb[bucket].mac, fdb[bucket].vid, false, data);
+ if (err)
+ goto unlock;
+ }
+ }
+unlock:
+ mutex_unlock(&vsc->fdb_lock);
+ return err;
+}
+
+static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = {
+ .mac_config = vsc73xx_mac_config,
+ .mac_link_down = vsc73xx_mac_link_down,
+ .mac_link_up = vsc73xx_mac_link_up,
+};
+
static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup,
+ .teardown = vsc73xx_teardown,
.phy_read = vsc73xx_phy_read,
.phy_write = vsc73xx_phy_write,
- .adjust_link = vsc73xx_adjust_link,
.get_strings = vsc73xx_get_strings,
.get_ethtool_stats = vsc73xx_get_ethtool_stats,
.get_sset_count = vsc73xx_get_sset_count,
.port_enable = vsc73xx_port_enable,
.port_disable = vsc73xx_port_disable,
+ .port_pre_bridge_flags = vsc73xx_port_pre_bridge_flags,
+ .port_bridge_flags = vsc73xx_port_bridge_flags,
+ .port_bridge_join = dsa_tag_8021q_bridge_join,
+ .port_bridge_leave = dsa_tag_8021q_bridge_leave,
.port_change_mtu = vsc73xx_change_mtu,
+ .port_fdb_add = vsc73xx_fdb_add,
+ .port_fdb_del = vsc73xx_fdb_del,
+ .port_fdb_dump = vsc73xx_port_fdb_dump,
.port_max_mtu = vsc73xx_get_max_mtu,
+ .port_stp_state_set = vsc73xx_port_stp_state_set,
+ .port_vlan_filtering = vsc73xx_port_vlan_filtering,
+ .port_vlan_add = vsc73xx_port_vlan_add,
+ .port_vlan_del = vsc73xx_port_vlan_del,
.phylink_get_caps = vsc73xx_phylink_get_caps,
+ .tag_8021q_vlan_add = vsc73xx_tag_8021q_vlan_add,
+ .tag_8021q_vlan_del = vsc73xx_tag_8021q_vlan_del,
};
static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -1083,14 +2258,14 @@ static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 tmp = val ? BIT(offset) : 0;
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
- VSC73XX_GPIO, BIT(offset), tmp);
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset), tmp);
}
static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
@@ -1189,32 +2364,24 @@ int vsc73xx_probe(struct vsc73xx *vsc)
return -ENODEV;
}
+ mutex_init(&vsc->fdb_lock);
+
eth_random_addr(vsc->addr);
dev_info(vsc->dev,
"MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
vsc->addr[0], vsc->addr[1], vsc->addr[2],
vsc->addr[3], vsc->addr[4], vsc->addr[5]);
- /* The VSC7395 switch chips have 5+1 ports which means 5
- * ordinary ports and a sixth CPU port facing the processor
- * with an RGMII interface. These ports are numbered 0..4
- * and 6, so they leave a "hole" in the port map for port 5,
- * which is invalid.
- *
- * The VSC7398 has 8 ports, port 7 is again the CPU port.
- *
- * We allocate 8 ports and avoid access to the nonexistant
- * ports.
- */
vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
vsc->ds->dev = dev;
- vsc->ds->num_ports = 8;
+ vsc->ds->num_ports = VSC73XX_MAX_NUM_PORTS;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
+ vsc->ds->phylink_mac_ops = &vsc73xx_phylink_mac_ops;
ret = dsa_register_switch(vsc->ds);
if (ret) {
dev_err(dev, "unable to register switch (%d)\n", ret);
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index 755b7895a15a..7a2e0a619b85 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -158,7 +158,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct platform_driver vsc73xx_platform_driver = {
.probe = vsc73xx_platform_probe,
- .remove_new = vsc73xx_platform_remove,
+ .remove = vsc73xx_platform_remove,
.shutdown = vsc73xx_platform_shutdown,
.driver = {
.name = "vsc73xx-platform",
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 30b1f0a36566..3c30e143c14f 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -3,8 +3,49 @@
#include <linux/etherdevice.h>
#include <linux/gpio/driver.h>
+/* The VSC7395 switch chips have 5+1 ports which means 5 ordinary ports and
+ * a sixth CPU port facing the processor with an RGMII interface. These ports
+ * are numbered 0..4 and 6, so they leave a "hole" in the port map for port 5,
+ * which is invalid.
+ *
+ * The VSC7398 has 8 ports, port 7 is again the CPU port.
+ *
+ * We allocate 8 ports and avoid access to the nonexistent ports.
+ */
+#define VSC73XX_MAX_NUM_PORTS 8
+
+/**
+ * struct vsc73xx_portinfo - port data structure: contains storage data
+ * @pvid_vlan_filtering: pvid vlan number used in vlan filtering mode
+ * @pvid_tag_8021q: pvid vlan number used in tag_8021q mode
+ * @pvid_vlan_filtering_configured: informs if port has configured pvid in vlan
+ * filtering mode
+ * @pvid_tag_8021q_configured: imforms if port have configured pvid in tag_8021q
+ * mode
+ */
+struct vsc73xx_portinfo {
+ u16 pvid_vlan_filtering;
+ u16 pvid_tag_8021q;
+ bool pvid_vlan_filtering_configured;
+ bool pvid_tag_8021q_configured;
+};
+
/**
- * struct vsc73xx - VSC73xx state container
+ * struct vsc73xx - VSC73xx state container: main data structure
+ * @dev: The device pointer
+ * @reset: The descriptor for the GPIO line tied to the reset pin
+ * @ds: Pointer to the DSA core structure
+ * @gc: Main structure of the GPIO controller
+ * @chipid: Storage for the Chip ID value read from the CHIPID register of the
+ * switch
+ * @addr: MAC address used in flow control frames
+ * @ops: Structure with hardware-dependent operations
+ * @priv: Pointer to the configuration interface structure
+ * @portinfo: Storage table portinfo structructures
+ * @vlans: List of configured vlans. Contains port mask and untagged status of
+ * every vlan configured in port vlan operation. It doesn't cover tag_8021q
+ * vlans.
+ * @fdb_lock: Mutex protects fdb access
*/
struct vsc73xx {
struct device *dev;
@@ -15,8 +56,16 @@ struct vsc73xx {
u8 addr[ETH_ALEN];
const struct vsc73xx_ops *ops;
void *priv;
+ struct vsc73xx_portinfo portinfo[VSC73XX_MAX_NUM_PORTS];
+ struct list_head vlans;
+ struct mutex fdb_lock;
};
+/**
+ * struct vsc73xx_ops - VSC73xx methods container
+ * @read: Method for register reading over the hardware-dependent interface
+ * @write: Method for register writing over the hardware-dependent interface
+ */
struct vsc73xx_ops {
int (*read)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 *val);
@@ -24,6 +73,21 @@ struct vsc73xx_ops {
u32 val);
};
+/**
+ * struct vsc73xx_bridge_vlan - VSC73xx driver structure which keeps vlan
+ * database copy
+ * @vid: VLAN number
+ * @portmask: each bit represents one port
+ * @untagged: each bit represents one port configured with @vid untagged
+ * @list: list structure
+ */
+struct vsc73xx_bridge_vlan {
+ u16 vid;
+ u8 portmask;
+ u8 untagged;
+ struct list_head list;
+};
+
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
void vsc73xx_remove(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 96db032b478f..0a05f4156ef4 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -91,10 +91,8 @@ static void xrs700x_get_strings(struct dsa_switch *ds, int port,
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
- strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++)
+ ethtool_puts(&data, xrs700x_mibs[i].name);
}
static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
@@ -466,13 +464,25 @@ static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
+static void xrs700x_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void xrs700x_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void xrs700x_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct xrs700x *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct xrs700x *priv = dp->ds->priv;
+ int port = dp->index;
unsigned int val;
switch (speed) {
@@ -556,6 +566,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
struct xrs700x *priv = ds->priv;
struct net_device *user;
int ret, i, hsr_pair[2];
+ enum hsr_port_type type;
enum hsr_version ver;
bool fwd = false;
@@ -579,6 +590,16 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
return -EOPNOTSUPP;
}
+ ret = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type);
+ if (ret)
+ return ret;
+
+ if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only HSR slave ports can be offloaded");
+ return -EOPNOTSUPP;
+ }
+
dsa_hsr_foreach_port(dp, ds, hsr) {
if (dp->index != port) {
partner = dp;
@@ -699,13 +720,18 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
return 0;
}
+static const struct phylink_mac_ops xrs700x_phylink_mac_ops = {
+ .mac_config = xrs700x_mac_config,
+ .mac_link_down = xrs700x_mac_link_down,
+ .mac_link_up = xrs700x_mac_link_up,
+};
+
static const struct dsa_switch_ops xrs700x_ops = {
.get_tag_protocol = xrs700x_get_tag_protocol,
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
.phylink_get_caps = xrs700x_phylink_get_caps,
- .phylink_mac_link_up = xrs700x_mac_link_up,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
.get_ethtool_stats = xrs700x_get_ethtool_stats,
@@ -763,6 +789,7 @@ struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
ds->ops = &xrs700x_ops;
+ ds->phylink_mac_ops = &xrs700x_phylink_mac_ops;
ds->priv = priv;
priv->dev = base;
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index c1179d7311f7..9b731dea78c1 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -127,8 +127,8 @@ static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
}
static const struct i2c_device_id xrs700x_i2c_id[] = {
- { "xrs700x-switch", 0 },
- {},
+ { "xrs700x-switch" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c
new file mode 100644
index 000000000000..1c511f5dc6ab
--- /dev/null
+++ b/drivers/net/dsa/yt921x.c
@@ -0,0 +1,3006 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Motorcomm YT921x Switch
+ *
+ * Should work on YT9213/YT9214/YT9215/YT9218, but only tested on YT9215+SGMII,
+ * be sure to do your own checks before porting to another chip.
+ *
+ * Copyright (c) 2025 David Yang
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_hsr.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include <net/dsa.h>
+
+#include "yt921x.h"
+
+struct yt921x_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+#define MIB_DESC(_size, _offset, _name) \
+ {_size, _offset, _name}
+
+/* Must agree with yt921x_mib
+ *
+ * Unstructured fields (name != NULL) will appear in get_ethtool_stats(),
+ * structured go to their *_stats() methods, but we need their sizes and offsets
+ * to perform 32bit MIB overflow wraparound.
+ */
+static const struct yt921x_mib_desc yt921x_mib_descs[] = {
+ MIB_DESC(1, YT921X_MIB_DATA_RX_BROADCAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PAUSE, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_MULTICAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_CRC_ERR, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_ALIGN_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_UNDERSIZE_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_FRAG_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_64, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX, NULL),
+ MIB_DESC(2, YT921X_MIB_DATA_RX_GOOD_BYTES, NULL),
+
+ MIB_DESC(2, YT921X_MIB_DATA_RX_BAD_BYTES, "RxBadBytes"),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_OVERSIZE_ERR, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_DROPPED, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_BROADCAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PAUSE, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_MULTICAST, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_UNDERSIZE_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_64, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX, NULL),
+
+ MIB_DESC(2, YT921X_MIB_DATA_TX_GOOD_BYTES, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_COLLISION, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_MULTIPLE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_SINGLE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_DEFERRED, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_LATE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_OAM, "RxOAM"),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_OAM, "TxOAM"),
+};
+
+struct yt921x_info {
+ const char *name;
+ u16 major;
+ /* Unknown, seems to be plain enumeration */
+ u8 mode;
+ u8 extmode;
+ /* Ports with integral GbE PHYs, not including MCU Port 10 */
+ u16 internal_mask;
+ /* TODO: see comments in yt921x_dsa_phylink_get_caps() */
+ u16 external_mask;
+};
+
+#define YT921X_PORT_MASK_INTn(port) BIT(port)
+#define YT921X_PORT_MASK_INT0_n(n) GENMASK((n) - 1, 0)
+#define YT921X_PORT_MASK_EXT0 BIT(8)
+#define YT921X_PORT_MASK_EXT1 BIT(9)
+
+static const struct yt921x_info yt921x_infos[] = {
+ {
+ "YT9215SC", YT9215_MAJOR, 1, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9215S", YT9215_MAJOR, 2, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9215RB", YT9215_MAJOR, 3, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9214NB", YT9215_MAJOR, 3, 2,
+ YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9213NB", YT9215_MAJOR, 3, 3,
+ YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3),
+ YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9218N", YT9218_MAJOR, 0, 0,
+ YT921X_PORT_MASK_INT0_n(8),
+ 0,
+ },
+ {
+ "YT9218MB", YT9218_MAJOR, 1, 0,
+ YT921X_PORT_MASK_INT0_n(8),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {}
+};
+
+#define YT921X_NAME "yt921x"
+
+#define YT921X_VID_UNWARE 4095
+
+#define YT921X_POLL_SLEEP_US 10000
+#define YT921X_POLL_TIMEOUT_US 100000
+
+/* The interval should be small enough to avoid overflow of 32bit MIBs.
+ *
+ * Until we can read MIBs from stats64 call directly (i.e. sleep
+ * there), we have to poll stats more frequently then it is actually needed.
+ * For overflow protection, normally, 100 sec interval should have been OK.
+ */
+#define YT921X_STATS_INTERVAL_JIFFIES (3 * HZ)
+
+struct yt921x_reg_mdio {
+ struct mii_bus *bus;
+ int addr;
+ /* SWITCH_ID_1 / SWITCH_ID_0 of the device
+ *
+ * This is a way to multiplex multiple devices on the same MII phyaddr
+ * and should be configurable in DT. However, MDIO core simply doesn't
+ * allow multiple devices over one reg addr, so this is a fixed value
+ * for now until a solution is found.
+ *
+ * Keep this because we need switchid to form MII regaddrs anyway.
+ */
+ unsigned char switchid;
+};
+
+/* TODO: SPI/I2C */
+
+#define to_yt921x_priv(_ds) container_of_const(_ds, struct yt921x_priv, ds)
+#define to_device(priv) ((priv)->ds.dev)
+
+static int yt921x_reg_read(struct yt921x_priv *priv, u32 reg, u32 *valp)
+{
+ WARN_ON(!mutex_is_locked(&priv->reg_lock));
+
+ return priv->reg_ops->read(priv->reg_ctx, reg, valp);
+}
+
+static int yt921x_reg_write(struct yt921x_priv *priv, u32 reg, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&priv->reg_lock));
+
+ return priv->reg_ops->write(priv->reg_ctx, reg, val);
+}
+
+static int
+yt921x_reg_wait(struct yt921x_priv *priv, u32 reg, u32 mask, u32 *valp)
+{
+ u32 val;
+ int res;
+ int ret;
+
+ ret = read_poll_timeout(yt921x_reg_read, res,
+ res || (val & mask) == *valp,
+ YT921X_POLL_SLEEP_US, YT921X_POLL_TIMEOUT_US,
+ false, priv, reg, &val);
+ if (ret)
+ return ret;
+ if (res)
+ return res;
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_reg_update_bits(struct yt921x_priv *priv, u32 reg, u32 mask, u32 val)
+{
+ int res;
+ u32 v;
+ u32 u;
+
+ res = yt921x_reg_read(priv, reg, &v);
+ if (res)
+ return res;
+
+ u = v;
+ u &= ~mask;
+ u |= val;
+ if (u == v)
+ return 0;
+
+ return yt921x_reg_write(priv, reg, u);
+}
+
+static int yt921x_reg_set_bits(struct yt921x_priv *priv, u32 reg, u32 mask)
+{
+ return yt921x_reg_update_bits(priv, reg, 0, mask);
+}
+
+static int yt921x_reg_clear_bits(struct yt921x_priv *priv, u32 reg, u32 mask)
+{
+ return yt921x_reg_update_bits(priv, reg, mask, 0);
+}
+
+static int
+yt921x_reg_toggle_bits(struct yt921x_priv *priv, u32 reg, u32 mask, bool set)
+{
+ return yt921x_reg_update_bits(priv, reg, mask, !set ? 0 : mask);
+}
+
+/* Some registers, like VLANn_CTRL, should always be written in 64-bit, even if
+ * you are to write only the lower / upper 32 bits.
+ *
+ * There is no such restriction for reading, but we still provide 64-bit read
+ * wrappers so that we always handle u64 values.
+ */
+
+static int yt921x_reg64_read(struct yt921x_priv *priv, u32 reg, u64 *valp)
+{
+ u32 lo;
+ u32 hi;
+ int res;
+
+ res = yt921x_reg_read(priv, reg, &lo);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, reg + 4, &hi);
+ if (res)
+ return res;
+
+ *valp = ((u64)hi << 32) | lo;
+ return 0;
+}
+
+static int yt921x_reg64_write(struct yt921x_priv *priv, u32 reg, u64 val)
+{
+ int res;
+
+ res = yt921x_reg_write(priv, reg, (u32)val);
+ if (res)
+ return res;
+ return yt921x_reg_write(priv, reg + 4, (u32)(val >> 32));
+}
+
+static int
+yt921x_reg64_update_bits(struct yt921x_priv *priv, u32 reg, u64 mask, u64 val)
+{
+ int res;
+ u64 v;
+ u64 u;
+
+ res = yt921x_reg64_read(priv, reg, &v);
+ if (res)
+ return res;
+
+ u = v;
+ u &= ~mask;
+ u |= val;
+ if (u == v)
+ return 0;
+
+ return yt921x_reg64_write(priv, reg, u);
+}
+
+static int yt921x_reg64_clear_bits(struct yt921x_priv *priv, u32 reg, u64 mask)
+{
+ return yt921x_reg64_update_bits(priv, reg, mask, 0);
+}
+
+static int yt921x_reg_mdio_read(void *context, u32 reg, u32 *valp)
+{
+ struct yt921x_reg_mdio *mdio = context;
+ struct mii_bus *bus = mdio->bus;
+ int addr = mdio->addr;
+ u32 reg_addr;
+ u32 reg_data;
+ u32 val;
+ int res;
+
+ /* Hold the mdio bus lock to avoid (un)locking for 4 times */
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR |
+ YT921X_SMI_READ;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)reg);
+ if (res)
+ goto end;
+
+ reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA |
+ YT921X_SMI_READ;
+ res = __mdiobus_read(bus, addr, reg_data);
+ if (res < 0)
+ goto end;
+ val = (u16)res;
+ res = __mdiobus_read(bus, addr, reg_data);
+ if (res < 0)
+ goto end;
+ val = (val << 16) | (u16)res;
+
+ *valp = val;
+ res = 0;
+
+end:
+ mutex_unlock(&bus->mdio_lock);
+ return res;
+}
+
+static int yt921x_reg_mdio_write(void *context, u32 reg, u32 val)
+{
+ struct yt921x_reg_mdio *mdio = context;
+ struct mii_bus *bus = mdio->bus;
+ int addr = mdio->addr;
+ u32 reg_addr;
+ u32 reg_data;
+ int res;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR |
+ YT921X_SMI_WRITE;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)reg);
+ if (res)
+ goto end;
+
+ reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA |
+ YT921X_SMI_WRITE;
+ res = __mdiobus_write(bus, addr, reg_data, (u16)(val >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_data, (u16)val);
+ if (res)
+ goto end;
+
+ res = 0;
+
+end:
+ mutex_unlock(&bus->mdio_lock);
+ return res;
+}
+
+static const struct yt921x_reg_ops yt921x_reg_ops_mdio = {
+ .read = yt921x_reg_mdio_read,
+ .write = yt921x_reg_mdio_write,
+};
+
+/* TODO: SPI/I2C */
+
+static int yt921x_intif_wait(struct yt921x_priv *priv)
+{
+ u32 val = 0;
+
+ return yt921x_reg_wait(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START,
+ &val);
+}
+
+static int
+yt921x_intif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_READ;
+ res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_INT_MBUS_DIN, &val);
+ if (res)
+ return res;
+
+ if ((u16)val != val)
+ dev_info(dev,
+ "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n",
+ __func__, port, reg, val);
+ *valp = (u16)val;
+ return 0;
+}
+
+static int
+yt921x_intif_write(struct yt921x_priv *priv, int port, int reg, u16 val)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_WRITE;
+ res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_DOUT, val);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ return yt921x_intif_wait(priv);
+}
+
+static int yt921x_mbus_int_read(struct mii_bus *mbus, int port, int reg)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ u16 val;
+ int res;
+
+ if (port >= YT921X_PORT_NUM)
+ return U16_MAX;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_intif_read(priv, port, reg, &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+ return val;
+}
+
+static int
+yt921x_mbus_int_write(struct mii_bus *mbus, int port, int reg, u16 data)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ int res;
+
+ if (port >= YT921X_PORT_NUM)
+ return -ENODEV;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_intif_write(priv, port, reg, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_mbus_int_init(struct yt921x_priv *priv, struct device_node *mnp)
+{
+ struct device *dev = to_device(priv);
+ struct mii_bus *mbus;
+ int res;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = "YT921x internal MDIO bus";
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+ mbus->priv = priv;
+ mbus->read = yt921x_mbus_int_read;
+ mbus->write = yt921x_mbus_int_write;
+ mbus->parent = dev;
+ mbus->phy_mask = (u32)~GENMASK(YT921X_PORT_NUM - 1, 0);
+
+ res = devm_of_mdiobus_register(dev, mbus, mnp);
+ if (res)
+ return res;
+
+ priv->mbus_int = mbus;
+
+ return 0;
+}
+
+static int yt921x_extif_wait(struct yt921x_priv *priv)
+{
+ u32 val = 0;
+
+ return yt921x_reg_wait(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START,
+ &val);
+}
+
+static int
+yt921x_extif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_READ;
+ res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_EXT_MBUS_DIN, &val);
+ if (res)
+ return res;
+
+ if ((u16)val != val)
+ dev_info(dev,
+ "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n",
+ __func__, port, reg, val);
+ *valp = (u16)val;
+ return 0;
+}
+
+static int
+yt921x_extif_write(struct yt921x_priv *priv, int port, int reg, u16 val)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_WRITE;
+ res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_DOUT, val);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ return yt921x_extif_wait(priv);
+}
+
+static int yt921x_mbus_ext_read(struct mii_bus *mbus, int port, int reg)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ u16 val;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_extif_read(priv, port, reg, &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+ return val;
+}
+
+static int
+yt921x_mbus_ext_write(struct mii_bus *mbus, int port, int reg, u16 data)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_extif_write(priv, port, reg, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_mbus_ext_init(struct yt921x_priv *priv, struct device_node *mnp)
+{
+ struct device *dev = to_device(priv);
+ struct mii_bus *mbus;
+ int res;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = "YT921x external MDIO bus";
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%s@ext", dev_name(dev));
+ mbus->priv = priv;
+ /* TODO: c45? */
+ mbus->read = yt921x_mbus_ext_read;
+ mbus->write = yt921x_mbus_ext_write;
+ mbus->parent = dev;
+
+ res = devm_of_mdiobus_register(dev, mbus, mnp);
+ if (res)
+ return res;
+
+ priv->mbus_ext = mbus;
+
+ return 0;
+}
+
+/* Read and handle overflow of 32bit MIBs. MIB buffer must be zeroed before. */
+static int yt921x_read_mib(struct yt921x_priv *priv, int port)
+{
+ struct yt921x_port *pp = &priv->ports[port];
+ struct device *dev = to_device(priv);
+ struct yt921x_mib *mib = &pp->mib;
+ int res = 0;
+
+ /* Reading of yt921x_port::mib is not protected by a lock and it's vain
+ * to keep its consistency, since we have to read registers one by one
+ * and there is no way to make a snapshot of MIB stats.
+ *
+ * Writing (by this function only) is and should be protected by
+ * reg_lock.
+ */
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+ u32 reg = YT921X_MIBn_DATA0(port) + desc->offset;
+ u64 *valp = &((u64 *)mib)[i];
+ u64 val = *valp;
+ u32 val0;
+ u32 val1;
+
+ res = yt921x_reg_read(priv, reg, &val0);
+ if (res)
+ break;
+
+ if (desc->size <= 1) {
+ if (val < (u32)val)
+ /* overflow */
+ val += (u64)U32_MAX + 1;
+ val &= ~U32_MAX;
+ val |= val0;
+ } else {
+ res = yt921x_reg_read(priv, reg + 4, &val1);
+ if (res)
+ break;
+ val = ((u64)val1 << 32) | val0;
+ }
+
+ WRITE_ONCE(*valp, val);
+ }
+
+ pp->rx_frames = mib->rx_64byte + mib->rx_65_127byte +
+ mib->rx_128_255byte + mib->rx_256_511byte +
+ mib->rx_512_1023byte + mib->rx_1024_1518byte +
+ mib->rx_jumbo;
+ pp->tx_frames = mib->tx_64byte + mib->tx_65_127byte +
+ mib->tx_128_255byte + mib->tx_256_511byte +
+ mib->tx_512_1023byte + mib->tx_1024_1518byte +
+ mib->tx_jumbo;
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "read stats for",
+ port, res);
+ return res;
+}
+
+static void yt921x_poll_mib(struct work_struct *work)
+{
+ struct yt921x_port *pp = container_of_const(work, struct yt921x_port,
+ mib_read.work);
+ struct yt921x_priv *priv = (void *)(pp - pp->index) -
+ offsetof(struct yt921x_priv, ports);
+ unsigned long delay = YT921X_STATS_INTERVAL_JIFFIES;
+ int port = pp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+ if (res)
+ delay *= 4;
+
+ schedule_delayed_work(&pp->mib_read, delay);
+}
+
+static void
+yt921x_dsa_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (desc->name)
+ ethtool_puts(&data, desc->name);
+ }
+}
+
+static void
+yt921x_dsa_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+ size_t j;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ j = 0;
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (!desc->name)
+ continue;
+
+ data[j] = ((u64 *)mib)[i];
+ j++;
+ }
+}
+
+static int yt921x_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ int cnt = 0;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (desc->name)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void
+yt921x_dsa_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ mac_stats->FramesTransmittedOK = pp->tx_frames;
+ mac_stats->SingleCollisionFrames = mib->tx_single_collisions;
+ mac_stats->MultipleCollisionFrames = mib->tx_multiple_collisions;
+ mac_stats->FramesReceivedOK = pp->rx_frames;
+ mac_stats->FrameCheckSequenceErrors = mib->rx_crc_errors;
+ mac_stats->AlignmentErrors = mib->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = mib->tx_good_bytes;
+ mac_stats->FramesWithDeferredXmissions = mib->tx_deferred;
+ mac_stats->LateCollisions = mib->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = mib->tx_aborted_errors;
+ /* mac_stats->FramesLostDueToIntMACXmitError */
+ /* mac_stats->CarrierSenseErrors */
+ mac_stats->OctetsReceivedOK = mib->rx_good_bytes;
+ /* mac_stats->FramesLostDueToIntMACRcvError */
+ mac_stats->MulticastFramesXmittedOK = mib->tx_multicast;
+ mac_stats->BroadcastFramesXmittedOK = mib->tx_broadcast;
+ /* mac_stats->FramesWithExcessiveDeferral */
+ mac_stats->MulticastFramesReceivedOK = mib->rx_multicast;
+ mac_stats->BroadcastFramesReceivedOK = mib->rx_broadcast;
+ /* mac_stats->InRangeLengthErrors */
+ /* mac_stats->OutOfRangeLengthField */
+ mac_stats->FrameTooLongErrors = mib->rx_oversize_errors;
+}
+
+static void
+yt921x_dsa_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ ctrl_stats->MACControlFramesTransmitted = mib->tx_pause;
+ ctrl_stats->MACControlFramesReceived = mib->rx_pause;
+ /* ctrl_stats->UnsupportedOpcodesReceived */
+}
+
+static const struct ethtool_rmon_hist_range yt921x_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, YT921X_FRAME_SIZE_MAX },
+ {}
+};
+
+static void
+yt921x_dsa_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ *ranges = yt921x_rmon_ranges;
+
+ rmon_stats->undersize_pkts = mib->rx_undersize_errors;
+ rmon_stats->oversize_pkts = mib->rx_oversize_errors;
+ rmon_stats->fragments = mib->rx_alignment_errors;
+ /* rmon_stats->jabbers */
+
+ rmon_stats->hist[0] = mib->rx_64byte;
+ rmon_stats->hist[1] = mib->rx_65_127byte;
+ rmon_stats->hist[2] = mib->rx_128_255byte;
+ rmon_stats->hist[3] = mib->rx_256_511byte;
+ rmon_stats->hist[4] = mib->rx_512_1023byte;
+ rmon_stats->hist[5] = mib->rx_1024_1518byte;
+ rmon_stats->hist[6] = mib->rx_jumbo;
+
+ rmon_stats->hist_tx[0] = mib->tx_64byte;
+ rmon_stats->hist_tx[1] = mib->tx_65_127byte;
+ rmon_stats->hist_tx[2] = mib->tx_128_255byte;
+ rmon_stats->hist_tx[3] = mib->tx_256_511byte;
+ rmon_stats->hist_tx[4] = mib->tx_512_1023byte;
+ rmon_stats->hist_tx[5] = mib->tx_1024_1518byte;
+ rmon_stats->hist_tx[6] = mib->tx_jumbo;
+}
+
+static void
+yt921x_dsa_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ stats->rx_length_errors = mib->rx_undersize_errors +
+ mib->rx_fragment_errors;
+ stats->rx_over_errors = mib->rx_oversize_errors;
+ stats->rx_crc_errors = mib->rx_crc_errors;
+ stats->rx_frame_errors = mib->rx_alignment_errors;
+ /* stats->rx_fifo_errors */
+ /* stats->rx_missed_errors */
+
+ stats->tx_aborted_errors = mib->tx_aborted_errors;
+ /* stats->tx_carrier_errors */
+ stats->tx_fifo_errors = mib->tx_undersize_errors;
+ /* stats->tx_heartbeat_errors */
+ stats->tx_window_errors = mib->tx_late_collisions;
+
+ stats->rx_packets = pp->rx_frames;
+ stats->tx_packets = pp->tx_frames;
+ stats->rx_bytes = mib->rx_good_bytes - ETH_FCS_LEN * stats->rx_packets;
+ stats->tx_bytes = mib->tx_good_bytes - ETH_FCS_LEN * stats->tx_packets;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_over_errors +
+ stats->rx_crc_errors + stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_fifo_errors +
+ stats->tx_window_errors;
+ stats->rx_dropped = mib->rx_dropped;
+ /* stats->tx_dropped */
+ stats->multicast = mib->rx_multicast;
+ stats->collisions = mib->tx_collisions;
+}
+
+static void
+yt921x_dsa_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ pause_stats->tx_pause_frames = mib->tx_pause;
+ pause_stats->rx_pause_frames = mib->rx_pause;
+}
+
+static int
+yt921x_set_eee(struct yt921x_priv *priv, int port, struct ethtool_keee *e)
+{
+ /* Poor datasheet for EEE operations; don't ask if you are confused */
+
+ bool enable = e->eee_enabled;
+ u16 new_mask;
+ int res;
+
+ /* Enable / disable global EEE */
+ new_mask = priv->eee_ports_mask;
+ new_mask &= ~BIT(port);
+ new_mask |= !enable ? 0 : BIT(port);
+
+ if (!!new_mask != !!priv->eee_ports_mask) {
+ res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_FUNC,
+ YT921X_PON_STRAP_EEE, !!new_mask);
+ if (res)
+ return res;
+ res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_VAL,
+ YT921X_PON_STRAP_EEE, !!new_mask);
+ if (res)
+ return res;
+ }
+
+ priv->eee_ports_mask = new_mask;
+
+ /* Enable / disable port EEE */
+ res = yt921x_reg_toggle_bits(priv, YT921X_EEE_CTRL,
+ YT921X_EEE_CTRL_ENn(port), enable);
+ if (res)
+ return res;
+ res = yt921x_reg_toggle_bits(priv, YT921X_EEEn_VAL(port),
+ YT921X_EEE_VAL_DATA, enable);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_dsa_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_set_eee(priv, port, e);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ /* Only serves as packet filter, since the frame size is always set to
+ * maximum after reset
+ */
+
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ int frame_size;
+ int res;
+
+ frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ if (dsa_port_is_cpu(dp))
+ frame_size += YT921X_TAG_LEN;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_update_bits(priv, YT921X_MACn_FRAME(port),
+ YT921X_MAC_FRAME_SIZE_M,
+ YT921X_MAC_FRAME_SIZE(frame_size));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_dsa_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Only called for user ports, exclude tag len here */
+ return YT921X_FRAME_SIZE_MAX - ETH_HLEN - ETH_FCS_LEN - YT921X_TAG_LEN;
+}
+
+static int
+yt921x_mirror_del(struct yt921x_priv *priv, int port, bool ingress)
+{
+ u32 mask;
+
+ if (ingress)
+ mask = YT921X_MIRROR_IGR_PORTn(port);
+ else
+ mask = YT921X_MIRROR_EGR_PORTn(port);
+ return yt921x_reg_clear_bits(priv, YT921X_MIRROR, mask);
+}
+
+static int
+yt921x_mirror_add(struct yt921x_priv *priv, int port, bool ingress,
+ int to_local_port, struct netlink_ext_ack *extack)
+{
+ u32 srcs;
+ u32 ctrl;
+ u32 val;
+ u32 dst;
+ int res;
+
+ if (ingress)
+ srcs = YT921X_MIRROR_IGR_PORTn(port);
+ else
+ srcs = YT921X_MIRROR_EGR_PORTn(port);
+ dst = YT921X_MIRROR_PORT(to_local_port);
+
+ res = yt921x_reg_read(priv, YT921X_MIRROR, &val);
+ if (res)
+ return res;
+
+ /* other mirror tasks & different dst port -> conflict */
+ if ((val & ~srcs & (YT921X_MIRROR_EGR_PORTS_M |
+ YT921X_MIRROR_IGR_PORTS_M)) &&
+ (val & YT921X_MIRROR_PORT_M) != dst) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sniffer port is already configured, delete existing rules & retry");
+ return -EBUSY;
+ }
+
+ ctrl = val & ~YT921X_MIRROR_PORT_M;
+ ctrl |= srcs;
+ ctrl |= dst;
+
+ if (ctrl == val)
+ return 0;
+
+ return yt921x_reg_write(priv, YT921X_MIRROR, ctrl);
+}
+
+static void
+yt921x_dsa_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_mirror_del(priv, port, mirror->ingress);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "unmirror",
+ port, res);
+}
+
+static int
+yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_mirror_add(priv, port, ingress,
+ mirror->to_local_port, extack);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 val = YT921X_FDB_RESULT_DONE;
+ int res;
+
+ res = yt921x_reg_wait(priv, YT921X_FDB_RESULT, YT921X_FDB_RESULT_DONE,
+ &val);
+ if (res) {
+ dev_err(dev, "FDB probably stuck\n");
+ return res;
+ }
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_fdb_in01(struct yt921x_priv *priv, const unsigned char *addr,
+ u16 vid, u32 ctrl1)
+{
+ u32 ctrl;
+ int res;
+
+ ctrl = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ res = yt921x_reg_write(priv, YT921X_FDB_IN0, ctrl);
+ if (res)
+ return res;
+
+ ctrl = ctrl1 | YT921X_FDB_IO1_FID(vid) | (addr[4] << 8) | addr[5];
+ return yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl);
+}
+
+static int
+yt921x_fdb_has(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 *indexp)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_fdb_in01(priv, addr, vid, 0);
+ if (res)
+ return res;
+
+ ctrl = 0;
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+ if (val & YT921X_FDB_RESULT_NOTFOUND) {
+ *indexp = YT921X_FDB_NUM;
+ return 0;
+ }
+
+ *indexp = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val);
+ return 0;
+}
+
+static int
+yt921x_fdb_read(struct yt921x_priv *priv, unsigned char *addr, u16 *vidp,
+ u16 *ports_maskp, u16 *indexp, u8 *statusp)
+{
+ struct device *dev = to_device(priv);
+ u16 index;
+ u32 data0;
+ u32 data1;
+ u32 data2;
+ u32 val;
+ int res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+ if (val & YT921X_FDB_RESULT_NOTFOUND) {
+ *ports_maskp = 0;
+ return 0;
+ }
+ index = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val);
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &data1);
+ if (res)
+ return res;
+ if ((data1 & YT921X_FDB_IO1_STATUS_M) ==
+ YT921X_FDB_IO1_STATUS_INVALID) {
+ *ports_maskp = 0;
+ return 0;
+ }
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT0, &data0);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &data2);
+ if (res)
+ return res;
+
+ addr[0] = data0 >> 24;
+ addr[1] = data0 >> 16;
+ addr[2] = data0 >> 8;
+ addr[3] = data0;
+ addr[4] = data1 >> 8;
+ addr[5] = data1;
+ *vidp = FIELD_GET(YT921X_FDB_IO1_FID_M, data1);
+ *indexp = index;
+ *ports_maskp = FIELD_GET(YT921X_FDB_IO2_EGR_PORTS_M, data2);
+ *statusp = FIELD_GET(YT921X_FDB_IO1_STATUS_M, data1);
+
+ dev_dbg(dev,
+ "%s: index 0x%x, mac %02x:%02x:%02x:%02x:%02x:%02x, vid %d, ports 0x%x, status %d\n",
+ __func__, *indexp, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], *vidp, *ports_maskp, *statusp);
+ return 0;
+}
+
+static int
+yt921x_fdb_dump(struct yt921x_priv *priv, u16 ports_mask,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ unsigned char addr[ETH_ALEN];
+ u8 status;
+ u16 pmask;
+ u16 index;
+ u32 ctrl;
+ u16 vid;
+ int res;
+
+ ctrl = YT921X_FDB_OP_INDEX(0) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+ res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, &status);
+ if (res)
+ return res;
+ if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) {
+ res = cb(addr, vid,
+ status == YT921X_FDB_ENTRY_STATUS_STATIC, data);
+ if (res)
+ return res;
+ }
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ index = 0;
+ do {
+ ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT |
+ YT921X_FDB_OP_OP_GET_NEXT | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index,
+ &status);
+ if (res)
+ return res;
+ if (!pmask)
+ break;
+
+ if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) {
+ res = cb(addr, vid,
+ status == YT921X_FDB_ENTRY_STATUS_STATIC,
+ data);
+ if (res)
+ return res;
+ }
+
+ /* Never call GET_NEXT with 4095, otherwise it will hang
+ * forever until a reset!
+ */
+ } while (index < YT921X_FDB_NUM - 1);
+
+ return 0;
+}
+
+static int
+yt921x_fdb_flush_raw(struct yt921x_priv *priv, u16 ports_mask, u16 vid,
+ bool flush_static)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ if (vid < 4096) {
+ ctrl = YT921X_FDB_IO1_FID(vid);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl);
+ if (res)
+ return res;
+ }
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_FLUSH | YT921X_FDB_OP_START;
+ if (vid >= 4096)
+ ctrl |= YT921X_FDB_OP_FLUSH_PORT;
+ else
+ ctrl |= YT921X_FDB_OP_FLUSH_PORT_VID;
+ if (flush_static)
+ ctrl |= YT921X_FDB_OP_FLUSH_STATIC;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_fdb_flush_port(struct yt921x_priv *priv, int port, bool flush_static)
+{
+ return yt921x_fdb_flush_raw(priv, BIT(port), 4096, flush_static);
+}
+
+static int
+yt921x_fdb_add_index_in12(struct yt921x_priv *priv, u16 index, u16 ctrl1,
+ u16 ctrl2)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl1);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl2);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+}
+
+static int
+yt921x_fdb_add(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 ports_mask)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ ctrl = YT921X_FDB_IO1_STATUS_STATIC;
+ res = yt921x_fdb_in01(priv, addr, vid, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+}
+
+static int
+yt921x_fdb_leave(struct yt921x_priv *priv, const unsigned char *addr,
+ u16 vid, u16 ports_mask)
+{
+ u16 index;
+ u32 ctrl1;
+ u32 ctrl2;
+ u32 ctrl;
+ u32 val2;
+ u32 val;
+ int res;
+
+ /* Check for presence */
+ res = yt921x_fdb_has(priv, addr, vid, &index);
+ if (res)
+ return res;
+ if (index >= YT921X_FDB_NUM)
+ return 0;
+
+ /* Check if action required */
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2);
+ if (res)
+ return res;
+
+ ctrl2 = val2 & ~YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ if (ctrl2 == val2)
+ return 0;
+ if (!(ctrl2 & YT921X_FDB_IO2_EGR_PORTS_M)) {
+ ctrl = YT921X_FDB_OP_OP_DEL | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+ }
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &ctrl1);
+ if (res)
+ return res;
+
+ return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2);
+}
+
+static int
+yt921x_fdb_join(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 ports_mask)
+{
+ u16 index;
+ u32 ctrl1;
+ u32 ctrl2;
+ u32 val1;
+ u32 val2;
+ int res;
+
+ /* Check for presence */
+ res = yt921x_fdb_has(priv, addr, vid, &index);
+ if (res)
+ return res;
+ if (index >= YT921X_FDB_NUM)
+ return yt921x_fdb_add(priv, addr, vid, ports_mask);
+
+ /* Check if action required */
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &val1);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2);
+ if (res)
+ return res;
+
+ ctrl1 = val1 & ~YT921X_FDB_IO1_STATUS_M;
+ ctrl1 |= YT921X_FDB_IO1_STATUS_STATIC;
+ ctrl2 = val2 | YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ if (ctrl1 == val1 && ctrl2 == val2)
+ return 0;
+
+ return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2);
+}
+
+static int
+yt921x_dsa_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ res = yt921x_fdb_dump(priv, BIT(port), cb, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void yt921x_dsa_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_flush_port(priv, port, false);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "clear FDB for",
+ port, res);
+}
+
+static int
+yt921x_dsa_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 ctrl;
+ int res;
+
+ /* AGEING reg is set in 5s step */
+ ctrl = clamp(msecs / 5000, 1, U16_MAX);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_write(priv, YT921X_AGEING, ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_leave(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_join(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const unsigned char *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_leave(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const unsigned char *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_join(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid)
+{
+ u32 mask;
+ u32 ctrl;
+
+ mask = YT921X_PORT_VLAN_CTRL_CVID_M;
+ ctrl = YT921X_PORT_VLAN_CTRL_CVID(vid);
+ return yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL(port),
+ mask, ctrl);
+}
+
+static int
+yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ struct net_device *bdev;
+ u16 pvid;
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+
+ if (!bdev || !vlan_filtering)
+ pvid = YT921X_VID_UNWARE;
+ else
+ br_vlan_get_pvid(bdev, &pvid);
+ res = yt921x_port_set_pvid(priv, port, pvid);
+ if (res)
+ return res;
+
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED |
+ YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ ctrl = 0;
+ /* Do not drop tagged frames here; let VLAN_IGR_FILTER do it */
+ if (vlan_filtering && !pvid)
+ ctrl |= YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL1(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_reg_toggle_bits(priv, YT921X_VLAN_IGR_FILTER,
+ YT921X_VLAN_IGR_FILTER_PORTn(port),
+ vlan_filtering);
+ if (res)
+ return res;
+
+ /* Turn on / off VLAN awareness */
+ mask = YT921X_PORT_IGR_TPIDn_CTAG_M;
+ if (!vlan_filtering)
+ ctrl = 0;
+ else
+ ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0);
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_IGR_TPID(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_vlan_del(struct yt921x_priv *priv, int port, u16 vid)
+{
+ u64 mask64;
+
+ mask64 = YT921X_VLAN_CTRL_PORTS(port) |
+ YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+
+ return yt921x_reg64_clear_bits(priv, YT921X_VLANn_CTRL(vid), mask64);
+}
+
+static int
+yt921x_vlan_add(struct yt921x_priv *priv, int port, u16 vid, bool untagged)
+{
+ u64 mask64;
+ u64 ctrl64;
+
+ mask64 = YT921X_VLAN_CTRL_PORTn(port) |
+ YT921X_VLAN_CTRL_PORTS(priv->cpu_ports_mask);
+ ctrl64 = mask64;
+
+ mask64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+ if (untagged)
+ ctrl64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+
+ return yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(vid),
+ mask64, ctrl64);
+}
+
+static int
+yt921x_pvid_clear(struct yt921x_priv *priv, int port)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ bool vlan_filtering;
+ u32 mask;
+ int res;
+
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ res = yt921x_port_set_pvid(priv, port,
+ vlan_filtering ? 0 : YT921X_VID_UNWARE);
+ if (res)
+ return res;
+
+ if (vlan_filtering) {
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_VLAN_CTRL1(port),
+ mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_pvid_set(struct yt921x_priv *priv, int port, u16 vid)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ bool vlan_filtering;
+ u32 mask;
+ int res;
+
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ if (vlan_filtering) {
+ res = yt921x_port_set_pvid(priv, port, vid);
+ if (res)
+ return res;
+ }
+
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_dsa_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_vlan_filtering(priv, port, vlan_filtering);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 vid = vlan->vid;
+ u16 pvid;
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ res = yt921x_vlan_del(priv, port, vid);
+ if (res)
+ break;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ br_vlan_get_pvid(bdev, &pvid);
+ if (pvid == vid)
+ res = yt921x_pvid_clear(priv, port);
+ }
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 vid = vlan->vid;
+ u16 pvid;
+ int res;
+
+ /* CPU port is supposed to be a member of every VLAN; see
+ * yt921x_vlan_add() and yt921x_port_setup()
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ res = yt921x_vlan_add(priv, port, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (res)
+ break;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ res = yt921x_pvid_set(priv, port, vid);
+ } else {
+ br_vlan_get_pvid(bdev, &pvid);
+ if (pvid == vid)
+ res = yt921x_pvid_clear(priv, port);
+ }
+ }
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_userport_standalone(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ ctrl = ~priv->cpu_ports_mask;
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), ctrl);
+ if (res)
+ return res;
+
+ /* Turn off FDB learning to prevent FDB pollution */
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_LEARN(port), mask);
+ if (res)
+ return res;
+
+ /* Turn off VLAN awareness */
+ mask = YT921X_PORT_IGR_TPIDn_CTAG_M;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_IGR_TPID(port), mask);
+ if (res)
+ return res;
+
+ /* Unrelated since learning is off and all packets are trapped;
+ * set it anyway
+ */
+ res = yt921x_port_set_pvid(priv, port, YT921X_VID_UNWARE);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_userport_bridge(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_LEARN(port), mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_isolate(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = BIT(port);
+ for (int i = 0; i < YT921X_PORT_NUM; i++) {
+ if ((BIT(i) & priv->cpu_ports_mask) || i == port)
+ continue;
+
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_ISOLATION(i),
+ mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+/* Make sure to include the CPU port in ports_mask, or your bridge will
+ * not have it.
+ */
+static int yt921x_bridge(struct yt921x_priv *priv, u16 ports_mask)
+{
+ unsigned long targets_mask = ports_mask & ~priv->cpu_ports_mask;
+ u32 isolated_mask;
+ u32 ctrl;
+ int port;
+ int res;
+
+ isolated_mask = 0;
+ for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
+ struct yt921x_port *pp = &priv->ports[port];
+
+ if (pp->isolated)
+ isolated_mask |= BIT(port);
+ }
+
+ /* Block from non-cpu bridge ports ... */
+ for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
+ struct yt921x_port *pp = &priv->ports[port];
+
+ /* to non-bridge ports */
+ ctrl = ~ports_mask;
+ /* to isolated ports when isolated */
+ if (pp->isolated)
+ ctrl |= isolated_mask;
+ /* to itself when non-hairpin */
+ if (!pp->hairpin)
+ ctrl |= BIT(port);
+ else
+ ctrl &= ~BIT(port);
+
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port),
+ ctrl);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int yt921x_bridge_leave(struct yt921x_priv *priv, int port)
+{
+ int res;
+
+ res = yt921x_userport_standalone(priv, port);
+ if (res)
+ return res;
+
+ res = yt921x_isolate(priv, port);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_bridge_join(struct yt921x_priv *priv, int port, u16 ports_mask)
+{
+ int res;
+
+ res = yt921x_userport_bridge(priv, port);
+ if (res)
+ return res;
+
+ res = yt921x_bridge(priv, ports_mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static u32
+dsa_bridge_ports(struct dsa_switch *ds, const struct net_device *bdev)
+{
+ struct dsa_port *dp;
+ u32 mask = 0;
+
+ dsa_switch_for_each_user_port(dp, ds)
+ if (dsa_port_offloads_bridge_dev(dp, bdev))
+ mask |= BIT(dp->index);
+
+ return mask;
+}
+
+static int
+yt921x_bridge_flags(struct yt921x_priv *priv, int port,
+ struct switchdev_brport_flags flags)
+{
+ struct yt921x_port *pp = &priv->ports[port];
+ bool do_flush;
+ u32 mask;
+ int res;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learning = flags.val & BR_LEARNING;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_toggle_bits(priv, YT921X_PORTn_LEARN(port),
+ mask, !learning);
+ if (res)
+ return res;
+ }
+
+ /* BR_FLOOD, BR_MCAST_FLOOD: see the comment where ACT_UNK_ACTn_TRAP
+ * is set
+ */
+
+ /* BR_BCAST_FLOOD: we can filter bcast, but cannot trap them */
+
+ do_flush = false;
+ if (flags.mask & BR_HAIRPIN_MODE) {
+ pp->hairpin = flags.val & BR_HAIRPIN_MODE;
+ do_flush = true;
+ }
+ if (flags.mask & BR_ISOLATED) {
+ pp->isolated = flags.val & BR_ISOLATED;
+ do_flush = true;
+ }
+ if (do_flush) {
+ struct dsa_switch *ds = &priv->ds;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ u32 ports_mask;
+
+ ports_mask = dsa_bridge_ports(ds, bdev);
+ ports_mask |= priv->cpu_ports_mask;
+ res = yt921x_bridge(priv, ports_mask);
+ if (res)
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static int
+yt921x_dsa_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_HAIRPIN_MODE | BR_LEARNING | BR_FLOOD |
+ BR_MCAST_FLOOD | BR_ISOLATED))
+ return -EINVAL;
+ return 0;
+}
+
+static int
+yt921x_dsa_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_flags(priv, port, flags);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void
+yt921x_dsa_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_leave(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "unbridge",
+ port, res);
+}
+
+static int
+yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 ports_mask;
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ ports_mask = dsa_bridge_ports(ds, bridge.dev);
+ ports_mask |= priv->cpu_ports_mask;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_join(priv, port, ports_mask);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ mask = YT921X_STP_PORTn_M(port);
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ ctrl = YT921X_STP_PORTn_DISABLED(port);
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_LEARNING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ ctrl = YT921X_STP_PORTn_FORWARD(port);
+ break;
+ case BR_STATE_BLOCKING:
+ ctrl = YT921X_STP_PORTn_BLOCKING(port);
+ break;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_update_bits(priv, YT921X_STPn(st->msti), mask, ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_vlan_msti_set(struct dsa_switch *ds, struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u64 mask64;
+ u64 ctrl64;
+ int res;
+
+ if (!msti->vid)
+ return -EINVAL;
+ if (!msti->msti || msti->msti >= YT921X_MSTI_NUM)
+ return -EINVAL;
+
+ mask64 = YT921X_VLAN_CTRL_STP_ID_M;
+ ctrl64 = YT921X_VLAN_CTRL_STP_ID(msti->msti);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(msti->vid),
+ mask64, ctrl64);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void
+yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct device *dev = to_device(priv);
+ bool learning;
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ mask = YT921X_STP_PORTn_M(port);
+ learning = false;
+ switch (state) {
+ case BR_STATE_DISABLED:
+ ctrl = YT921X_STP_PORTn_DISABLED(port);
+ break;
+ case BR_STATE_LISTENING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ break;
+ case BR_STATE_LEARNING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ learning = dp->learning;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ ctrl = YT921X_STP_PORTn_FORWARD(port);
+ learning = dp->learning;
+ break;
+ case BR_STATE_BLOCKING:
+ ctrl = YT921X_STP_PORTn_BLOCKING(port);
+ break;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ res = yt921x_reg_update_bits(priv, YT921X_STPn(0), mask, ctrl);
+ if (res)
+ break;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ ctrl = !learning ? YT921X_PORT_LEARN_DIS : 0;
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_LEARN(port),
+ mask, ctrl);
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "set STP state for",
+ port, res);
+}
+
+static int yt921x_port_down(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = YT921X_PORT_LINK | YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_CTRL(port), mask);
+ if (res)
+ return res;
+
+ if (yt921x_port_is_external(port)) {
+ mask = YT921X_SERDES_LINK;
+ res = yt921x_reg_clear_bits(priv, YT921X_SERDESn(port), mask);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_LINK;
+ res = yt921x_reg_clear_bits(priv, YT921X_XMIIn(port), mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_port_up(struct yt921x_priv *priv, int port, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_PORT_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_PORT_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_PORT_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_PORT_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_PORT_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_PORT_DUPLEX_FULL;
+ if (tx_pause)
+ ctrl |= YT921X_PORT_TX_PAUSE;
+ if (rx_pause)
+ ctrl |= YT921X_PORT_RX_PAUSE;
+ ctrl |= YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN;
+ res = yt921x_reg_write(priv, YT921X_PORTn_CTRL(port), ctrl);
+ if (res)
+ return res;
+
+ if (yt921x_port_is_external(port)) {
+ mask = YT921X_SERDES_SPEED_M;
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_SERDES_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_SERDES_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_SERDES_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_SERDES_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_SERDES_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mask |= YT921X_SERDES_DUPLEX_FULL;
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_SERDES_DUPLEX_FULL;
+ mask |= YT921X_SERDES_TX_PAUSE;
+ if (tx_pause)
+ ctrl |= YT921X_SERDES_TX_PAUSE;
+ mask |= YT921X_SERDES_RX_PAUSE;
+ if (rx_pause)
+ ctrl |= YT921X_SERDES_RX_PAUSE;
+ mask |= YT921X_SERDES_LINK;
+ ctrl |= YT921X_SERDES_LINK;
+ res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_LINK;
+ res = yt921x_reg_set_bits(priv, YT921X_XMIIn(port), mask);
+ if (res)
+ return res;
+
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_MDIO_POLLING_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_MDIO_POLLING_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_MDIO_POLLING_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_MDIO_POLLING_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_MDIO_POLLING_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_MDIO_POLLING_DUPLEX_FULL;
+ ctrl |= YT921X_MDIO_POLLING_LINK;
+ res = yt921x_reg_write(priv, YT921X_MDIO_POLLINGn(port), ctrl);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_port_config(struct yt921x_priv *priv, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ if (!yt921x_port_is_external(port)) {
+ if (interface != PHY_INTERFACE_MODE_INTERNAL) {
+ dev_err(dev, "Wrong mode %d on port %d\n",
+ interface, port);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ switch (interface) {
+ /* SERDES */
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mask = YT921X_SERDES_CTRL_PORTn(port);
+ res = yt921x_reg_set_bits(priv, YT921X_SERDES_CTRL, mask);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_CTRL_PORTn(port);
+ res = yt921x_reg_clear_bits(priv, YT921X_XMII_CTRL, mask);
+ if (res)
+ return res;
+
+ mask = YT921X_SERDES_MODE_M;
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ ctrl = YT921X_SERDES_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_100BASEX:
+ ctrl = YT921X_SERDES_MODE_100BASEX;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ctrl = YT921X_SERDES_MODE_1000BASEX;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ ctrl = YT921X_SERDES_MODE_2500BASEX;
+ break;
+ default:
+ return -EINVAL;
+ }
+ res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ break;
+ /* add XMII support here */
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+yt921x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ /* No need to sync; port control block is hold until device remove */
+ cancel_delayed_work(&priv->ports[port].mib_read);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_down(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring down",
+ port, res);
+}
+
+static void
+yt921x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_up(priv, port, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring up",
+ port, res);
+
+ schedule_delayed_work(&priv->ports[port].mib_read, 0);
+}
+
+static void
+yt921x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_config(priv, port, mode, state->interface);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "config",
+ port, res);
+}
+
+static void
+yt921x_dsa_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const struct yt921x_info *info = priv->info;
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+
+ if (info->internal_mask & BIT(port)) {
+ /* Port 10 for MCU should probably go here too. But since that
+ * is untested yet, turn it down for the moment by letting it
+ * fall to the default branch.
+ */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ } else if (info->external_mask & BIT(port)) {
+ /* TODO: external ports may support SERDES only, XMII only, or
+ * SERDES + XMII depending on the chip. However, we can't get
+ * the accurate config table due to lack of document, thus
+ * we simply declare SERDES + XMII and rely on the correctness
+ * of devicetree for now.
+ */
+
+ /* SERDES */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ /* REVSGMII (SGMII in PHY role) should go here, once
+ * PHY_INTERFACE_MODE_REVSGMII is introduced.
+ */
+ __set_bit(PHY_INTERFACE_MODE_100BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
+
+ /* XMII */
+
+ /* Not tested. To add support for XMII:
+ * - Add proper interface modes below
+ * - Handle them in yt921x_port_config()
+ */
+ }
+ /* no such port: empty supported_interfaces causes phylink to turn it
+ * down
+ */
+}
+
+static int yt921x_port_setup(struct yt921x_priv *priv, int port)
+{
+ struct dsa_switch *ds = &priv->ds;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_userport_standalone(priv, port);
+ if (res)
+ return res;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Egress of CPU port is supposed to be completely controlled
+ * via tagging, so set to oneway isolated (drop all packets
+ * without tag).
+ */
+ ctrl = ~(u32)0;
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port),
+ ctrl);
+ if (res)
+ return res;
+
+ /* To simplify FDB "isolation" simulation, we also disable
+ * learning on the CPU port, and let software identify packets
+ * towarding CPU (either trapped or a static FDB entry is
+ * matched, no matter which bridge that entry is for), which is
+ * already done by yt921x_userport_standalone(). As a result,
+ * VLAN-awareness becomes unrelated on the CPU port (set to
+ * VLAN-unaware by the way).
+ */
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+yt921x_dsa_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_YT921X;
+}
+
+static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_setup(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp)
+{
+ u32 val = YT921X_EDATA_DATA_IDLE;
+ int res;
+
+ res = yt921x_reg_wait(priv, YT921X_EDATA_DATA,
+ YT921X_EDATA_DATA_STATUS_M, &val);
+ if (res)
+ return res;
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_edata_read_cont(struct yt921x_priv *priv, u8 addr, u8 *valp)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ ctrl = YT921X_EDATA_CTRL_ADDR(addr) | YT921X_EDATA_CTRL_READ;
+ res = yt921x_reg_write(priv, YT921X_EDATA_CTRL, ctrl);
+ if (res)
+ return res;
+ res = yt921x_edata_wait(priv, &val);
+ if (res)
+ return res;
+
+ *valp = FIELD_GET(YT921X_EDATA_DATA_DATA_M, val);
+ return 0;
+}
+
+static int yt921x_edata_read(struct yt921x_priv *priv, u8 addr, u8 *valp)
+{
+ u32 val;
+ int res;
+
+ res = yt921x_edata_wait(priv, &val);
+ if (res)
+ return res;
+ return yt921x_edata_read_cont(priv, addr, valp);
+}
+
+static int yt921x_chip_detect(struct yt921x_priv *priv)
+{
+ struct device *dev = to_device(priv);
+ const struct yt921x_info *info;
+ u8 extmode;
+ u32 chipid;
+ u32 major;
+ u32 mode;
+ int res;
+
+ res = yt921x_reg_read(priv, YT921X_CHIP_ID, &chipid);
+ if (res)
+ return res;
+
+ major = FIELD_GET(YT921X_CHIP_ID_MAJOR, chipid);
+
+ for (info = yt921x_infos; info->name; info++)
+ if (info->major == major)
+ break;
+ if (!info->name) {
+ dev_err(dev, "Unexpected chipid 0x%x\n", chipid);
+ return -ENODEV;
+ }
+
+ res = yt921x_reg_read(priv, YT921X_CHIP_MODE, &mode);
+ if (res)
+ return res;
+ res = yt921x_edata_read(priv, YT921X_EDATA_EXTMODE, &extmode);
+ if (res)
+ return res;
+
+ for (; info->name; info++)
+ if (info->major == major && info->mode == mode &&
+ info->extmode == extmode)
+ break;
+ if (!info->name) {
+ dev_err(dev,
+ "Unsupported chipid 0x%x with chipmode 0x%x 0x%x\n",
+ chipid, mode, extmode);
+ return -ENODEV;
+ }
+
+ /* Print chipid here since we are interested in lower 16 bits */
+ dev_info(dev,
+ "Motorcomm %s ethernet switch, chipid: 0x%x, chipmode: 0x%x 0x%x\n",
+ info->name, chipid, mode, extmode);
+
+ priv->info = info;
+ return 0;
+}
+
+static int yt921x_chip_reset(struct yt921x_priv *priv)
+{
+ struct device *dev = to_device(priv);
+ u16 eth_p_tag;
+ u32 val;
+ int res;
+
+ res = yt921x_chip_detect(priv);
+ if (res)
+ return res;
+
+ /* Reset */
+ res = yt921x_reg_write(priv, YT921X_RST, YT921X_RST_HW);
+ if (res)
+ return res;
+
+ /* RST_HW is almost same as GPIO hard reset, so we need this delay. */
+ fsleep(YT921X_RST_DELAY_US);
+
+ val = 0;
+ res = yt921x_reg_wait(priv, YT921X_RST, ~0, &val);
+ if (res)
+ return res;
+
+ /* Check for tag EtherType; do it after reset in case you messed it up
+ * before.
+ */
+ res = yt921x_reg_read(priv, YT921X_CPU_TAG_TPID, &val);
+ if (res)
+ return res;
+ eth_p_tag = FIELD_GET(YT921X_CPU_TAG_TPID_TPID_M, val);
+ if (eth_p_tag != ETH_P_YT921X) {
+ dev_err(dev, "Tag type 0x%x != 0x%x\n", eth_p_tag,
+ ETH_P_YT921X);
+ /* Despite being possible, we choose not to set CPU_TAG_TPID,
+ * since there is no way it can be different unless you have the
+ * wrong chip.
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int yt921x_chip_setup(struct yt921x_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ unsigned long cpu_ports_mask;
+ u64 ctrl64;
+ u32 ctrl;
+ int port;
+ int res;
+
+ /* Enable DSA */
+ priv->cpu_ports_mask = dsa_cpu_ports(ds);
+
+ ctrl = YT921X_EXT_CPU_PORT_TAG_EN | YT921X_EXT_CPU_PORT_PORT_EN |
+ YT921X_EXT_CPU_PORT_PORT(__ffs(priv->cpu_ports_mask));
+ res = yt921x_reg_write(priv, YT921X_EXT_CPU_PORT, ctrl);
+ if (res)
+ return res;
+
+ /* Enable and clear MIB */
+ res = yt921x_reg_set_bits(priv, YT921X_FUNC, YT921X_FUNC_MIB);
+ if (res)
+ return res;
+
+ ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT;
+ res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl);
+ if (res)
+ return res;
+
+ /* Setup software switch */
+ ctrl = YT921X_CPU_COPY_TO_EXT_CPU;
+ res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl);
+ if (res)
+ return res;
+
+ ctrl = GENMASK(10, 0);
+ res = yt921x_reg_write(priv, YT921X_FILTER_UNK_UCAST, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_FILTER_UNK_MCAST, ctrl);
+ if (res)
+ return res;
+
+ /* YT921x does not support native DSA port bridging, so we use port
+ * isolation to emulate it. However, be especially careful that port
+ * isolation takes _after_ FDB lookups, i.e. if an FDB entry (from
+ * another bridge) is matched and the destination port (in another
+ * bridge) is blocked, the packet will be dropped instead of flooding to
+ * the "bridged" ports, thus we need to trap and handle those packets by
+ * software.
+ *
+ * If there is no more than one bridge, we might be able to drop them
+ * directly given some conditions are met, but we trap them in all cases
+ * for now.
+ */
+ ctrl = 0;
+ for (int i = 0; i < YT921X_PORT_NUM; i++)
+ ctrl |= YT921X_ACT_UNK_ACTn_TRAP(i);
+ /* Except for CPU ports, if any packets are sent via CPU ports without
+ * tag, they should be dropped.
+ */
+ cpu_ports_mask = priv->cpu_ports_mask;
+ for_each_set_bit(port, &cpu_ports_mask, YT921X_PORT_NUM) {
+ ctrl &= ~YT921X_ACT_UNK_ACTn_M(port);
+ ctrl |= YT921X_ACT_UNK_ACTn_DROP(port);
+ }
+ res = yt921x_reg_write(priv, YT921X_ACT_UNK_UCAST, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_ACT_UNK_MCAST, ctrl);
+ if (res)
+ return res;
+
+ /* Tagged VID 0 should be treated as untagged, which confuses the
+ * hardware a lot
+ */
+ ctrl64 = YT921X_VLAN_CTRL_LEARN_DIS | YT921X_VLAN_CTRL_PORTS_M;
+ res = yt921x_reg64_write(priv, YT921X_VLANn_CTRL(0), ctrl64);
+ if (res)
+ return res;
+
+ /* Miscellaneous */
+ res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_dsa_setup(struct dsa_switch *ds)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_chip_reset(priv);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ /* Register the internal mdio bus. Nodes for internal ports should have
+ * proper phy-handle pointing to their PHYs. Not enabling the internal
+ * bus is possible, though pretty wired, if internal ports are not used.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ if (child) {
+ res = yt921x_mbus_int_init(priv, child);
+ of_node_put(child);
+ if (res)
+ return res;
+ }
+
+ /* External mdio bus is optional */
+ child = of_get_child_by_name(np, "mdio-external");
+ if (child) {
+ res = yt921x_mbus_ext_init(priv, child);
+ of_node_put(child);
+ if (res)
+ return res;
+
+ dev_err(dev, "Untested external mdio bus\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_chip_setup(priv);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static const struct phylink_mac_ops yt921x_phylink_mac_ops = {
+ .mac_link_down = yt921x_phylink_mac_link_down,
+ .mac_link_up = yt921x_phylink_mac_link_up,
+ .mac_config = yt921x_phylink_mac_config,
+};
+
+static const struct dsa_switch_ops yt921x_dsa_switch_ops = {
+ /* mib */
+ .get_strings = yt921x_dsa_get_strings,
+ .get_ethtool_stats = yt921x_dsa_get_ethtool_stats,
+ .get_sset_count = yt921x_dsa_get_sset_count,
+ .get_eth_mac_stats = yt921x_dsa_get_eth_mac_stats,
+ .get_eth_ctrl_stats = yt921x_dsa_get_eth_ctrl_stats,
+ .get_rmon_stats = yt921x_dsa_get_rmon_stats,
+ .get_stats64 = yt921x_dsa_get_stats64,
+ .get_pause_stats = yt921x_dsa_get_pause_stats,
+ /* eee */
+ .support_eee = dsa_supports_eee,
+ .set_mac_eee = yt921x_dsa_set_mac_eee,
+ /* mtu */
+ .port_change_mtu = yt921x_dsa_port_change_mtu,
+ .port_max_mtu = yt921x_dsa_port_max_mtu,
+ /* hsr */
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ /* mirror */
+ .port_mirror_del = yt921x_dsa_port_mirror_del,
+ .port_mirror_add = yt921x_dsa_port_mirror_add,
+ /* fdb */
+ .port_fdb_dump = yt921x_dsa_port_fdb_dump,
+ .port_fast_age = yt921x_dsa_port_fast_age,
+ .set_ageing_time = yt921x_dsa_set_ageing_time,
+ .port_fdb_del = yt921x_dsa_port_fdb_del,
+ .port_fdb_add = yt921x_dsa_port_fdb_add,
+ .port_mdb_del = yt921x_dsa_port_mdb_del,
+ .port_mdb_add = yt921x_dsa_port_mdb_add,
+ /* vlan */
+ .port_vlan_filtering = yt921x_dsa_port_vlan_filtering,
+ .port_vlan_del = yt921x_dsa_port_vlan_del,
+ .port_vlan_add = yt921x_dsa_port_vlan_add,
+ /* bridge */
+ .port_pre_bridge_flags = yt921x_dsa_port_pre_bridge_flags,
+ .port_bridge_flags = yt921x_dsa_port_bridge_flags,
+ .port_bridge_leave = yt921x_dsa_port_bridge_leave,
+ .port_bridge_join = yt921x_dsa_port_bridge_join,
+ /* mst */
+ .port_mst_state_set = yt921x_dsa_port_mst_state_set,
+ .vlan_msti_set = yt921x_dsa_vlan_msti_set,
+ .port_stp_state_set = yt921x_dsa_port_stp_state_set,
+ /* port */
+ .get_tag_protocol = yt921x_dsa_get_tag_protocol,
+ .phylink_get_caps = yt921x_dsa_phylink_get_caps,
+ .port_setup = yt921x_dsa_port_setup,
+ /* chip */
+ .setup = yt921x_dsa_setup,
+};
+
+static void yt921x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(&priv->ds);
+}
+
+static void yt921x_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev);
+
+ if (!priv)
+ return;
+
+ for (size_t i = ARRAY_SIZE(priv->ports); i-- > 0; ) {
+ struct yt921x_port *pp = &priv->ports[i];
+
+ disable_delayed_work_sync(&pp->mib_read);
+ }
+
+ dsa_unregister_switch(&priv->ds);
+
+ mutex_destroy(&priv->reg_lock);
+}
+
+static int yt921x_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct yt921x_reg_mdio *mdio;
+ struct yt921x_priv *priv;
+ struct dsa_switch *ds;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mdio = devm_kzalloc(dev, sizeof(*mdio), GFP_KERNEL);
+ if (!mdio)
+ return -ENOMEM;
+
+ mdio->bus = mdiodev->bus;
+ mdio->addr = mdiodev->addr;
+ mdio->switchid = 0;
+
+ mutex_init(&priv->reg_lock);
+
+ priv->reg_ops = &yt921x_reg_ops_mdio;
+ priv->reg_ctx = mdio;
+
+ for (size_t i = 0; i < ARRAY_SIZE(priv->ports); i++) {
+ struct yt921x_port *pp = &priv->ports[i];
+
+ pp->index = i;
+ INIT_DELAYED_WORK(&pp->mib_read, yt921x_poll_mib);
+ }
+
+ ds = &priv->ds;
+ ds->dev = dev;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->priv = priv;
+ ds->ops = &yt921x_dsa_switch_ops;
+ ds->ageing_time_min = 1 * 5000;
+ ds->ageing_time_max = U16_MAX * 5000;
+ ds->phylink_mac_ops = &yt921x_phylink_mac_ops;
+ ds->num_ports = YT921X_PORT_NUM;
+
+ mdiodev_set_drvdata(mdiodev, priv);
+
+ return dsa_register_switch(ds);
+}
+
+static const struct of_device_id yt921x_of_match[] = {
+ { .compatible = "motorcomm,yt9215" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, yt921x_of_match);
+
+static struct mdio_driver yt921x_mdio_driver = {
+ .probe = yt921x_mdio_probe,
+ .remove = yt921x_mdio_remove,
+ .shutdown = yt921x_mdio_shutdown,
+ .mdiodrv.driver = {
+ .name = YT921X_NAME,
+ .of_match_table = yt921x_of_match,
+ },
+};
+
+mdio_module_driver(yt921x_mdio_driver);
+
+MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
+MODULE_DESCRIPTION("Driver for Motorcomm YT921x Switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h
new file mode 100644
index 000000000000..61bb0ab3b09a
--- /dev/null
+++ b/drivers/net/dsa/yt921x.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 David Yang
+ */
+
+#ifndef __YT921X_H
+#define __YT921X_H
+
+#include <net/dsa.h>
+
+#define YT921X_SMI_SWITCHID_M GENMASK(3, 2)
+#define YT921X_SMI_SWITCHID(x) FIELD_PREP(YT921X_SMI_SWITCHID_M, (x))
+#define YT921X_SMI_AD BIT(1)
+#define YT921X_SMI_ADDR 0
+#define YT921X_SMI_DATA YT921X_SMI_AD
+#define YT921X_SMI_RW BIT(0)
+#define YT921X_SMI_WRITE 0
+#define YT921X_SMI_READ YT921X_SMI_RW
+
+#define YT921X_SWITCHID_NUM 4
+
+#define YT921X_RST 0x80000
+#define YT921X_RST_HW BIT(31)
+#define YT921X_RST_SW BIT(1)
+#define YT921X_FUNC 0x80004
+#define YT921X_FUNC_MIB BIT(1)
+#define YT921X_CHIP_ID 0x80008
+#define YT921X_CHIP_ID_MAJOR GENMASK(31, 16)
+#define YT921X_EXT_CPU_PORT 0x8000c
+#define YT921X_EXT_CPU_PORT_TAG_EN BIT(15)
+#define YT921X_EXT_CPU_PORT_PORT_EN BIT(14)
+#define YT921X_EXT_CPU_PORT_PORT_M GENMASK(3, 0)
+#define YT921X_EXT_CPU_PORT_PORT(x) FIELD_PREP(YT921X_EXT_CPU_PORT_PORT_M, (x))
+#define YT921X_CPU_TAG_TPID 0x80010
+#define YT921X_CPU_TAG_TPID_TPID_M GENMASK(15, 0)
+/* Same as ETH_P_YT921X, but this represents the true HW default, while the
+ * former is a local convention chosen by us.
+ */
+#define YT921X_CPU_TAG_TPID_TPID_DEFAULT 0x9988
+#define YT921X_PVID_SEL 0x80014
+#define YT921X_PVID_SEL_SVID_PORTn(port) BIT(port)
+#define YT921X_SERDES_CTRL 0x80028
+#define YT921X_SERDES_CTRL_PORTn_TEST(port) BIT((port) - 3)
+#define YT921X_SERDES_CTRL_PORTn(port) BIT((port) - 8)
+#define YT921X_IO_LEVEL 0x80030
+#define YT9215_IO_LEVEL_NORMAL_M GENMASK(5, 4)
+#define YT9215_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9215_IO_LEVEL_NORMAL_M, (x))
+#define YT9215_IO_LEVEL_NORMAL_3V3 YT9215_IO_LEVEL_NORMAL(0)
+#define YT9215_IO_LEVEL_NORMAL_1V8 YT9215_IO_LEVEL_NORMAL(3)
+#define YT9215_IO_LEVEL_RGMII1_M GENMASK(3, 2)
+#define YT9215_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII1_M, (x))
+#define YT9215_IO_LEVEL_RGMII1_3V3 YT9215_IO_LEVEL_RGMII1(0)
+#define YT9215_IO_LEVEL_RGMII1_2V5 YT9215_IO_LEVEL_RGMII1(1)
+#define YT9215_IO_LEVEL_RGMII1_1V8 YT9215_IO_LEVEL_RGMII1(2)
+#define YT9215_IO_LEVEL_RGMII0_M GENMASK(1, 0)
+#define YT9215_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII0_M, (x))
+#define YT9215_IO_LEVEL_RGMII0_3V3 YT9215_IO_LEVEL_RGMII0(0)
+#define YT9215_IO_LEVEL_RGMII0_2V5 YT9215_IO_LEVEL_RGMII0(1)
+#define YT9215_IO_LEVEL_RGMII0_1V8 YT9215_IO_LEVEL_RGMII0(2)
+#define YT9218_IO_LEVEL_RGMII1_M GENMASK(5, 4)
+#define YT9218_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII1_M, (x))
+#define YT9218_IO_LEVEL_RGMII1_3V3 YT9218_IO_LEVEL_RGMII1(0)
+#define YT9218_IO_LEVEL_RGMII1_2V5 YT9218_IO_LEVEL_RGMII1(1)
+#define YT9218_IO_LEVEL_RGMII1_1V8 YT9218_IO_LEVEL_RGMII1(2)
+#define YT9218_IO_LEVEL_RGMII0_M GENMASK(3, 2)
+#define YT9218_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII0_M, (x))
+#define YT9218_IO_LEVEL_RGMII0_3V3 YT9218_IO_LEVEL_RGMII0(0)
+#define YT9218_IO_LEVEL_RGMII0_2V5 YT9218_IO_LEVEL_RGMII0(1)
+#define YT9218_IO_LEVEL_RGMII0_1V8 YT9218_IO_LEVEL_RGMII0(2)
+#define YT9218_IO_LEVEL_NORMAL_M GENMASK(1, 0)
+#define YT9218_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9218_IO_LEVEL_NORMAL_M, (x))
+#define YT9218_IO_LEVEL_NORMAL_3V3 YT9218_IO_LEVEL_NORMAL(0)
+#define YT9218_IO_LEVEL_NORMAL_1V8 YT9218_IO_LEVEL_NORMAL(3)
+#define YT921X_MAC_ADDR_HI2 0x80080
+#define YT921X_MAC_ADDR_LO4 0x80084
+#define YT921X_SERDESn(port) (0x8008c + 4 * ((port) - 8))
+#define YT921X_SERDES_MODE_M GENMASK(9, 7)
+#define YT921X_SERDES_MODE(x) FIELD_PREP(YT921X_SERDES_MODE_M, (x))
+#define YT921X_SERDES_MODE_SGMII YT921X_SERDES_MODE(0)
+#define YT921X_SERDES_MODE_REVSGMII YT921X_SERDES_MODE(1)
+#define YT921X_SERDES_MODE_1000BASEX YT921X_SERDES_MODE(2)
+#define YT921X_SERDES_MODE_100BASEX YT921X_SERDES_MODE(3)
+#define YT921X_SERDES_MODE_2500BASEX YT921X_SERDES_MODE(4)
+#define YT921X_SERDES_RX_PAUSE BIT(6)
+#define YT921X_SERDES_TX_PAUSE BIT(5)
+#define YT921X_SERDES_LINK BIT(4) /* force link */
+#define YT921X_SERDES_DUPLEX_FULL BIT(3)
+#define YT921X_SERDES_SPEED_M GENMASK(2, 0)
+#define YT921X_SERDES_SPEED(x) FIELD_PREP(YT921X_SERDES_SPEED_M, (x))
+#define YT921X_SERDES_SPEED_10 YT921X_SERDES_SPEED(0)
+#define YT921X_SERDES_SPEED_100 YT921X_SERDES_SPEED(1)
+#define YT921X_SERDES_SPEED_1000 YT921X_SERDES_SPEED(2)
+#define YT921X_SERDES_SPEED_10000 YT921X_SERDES_SPEED(3)
+#define YT921X_SERDES_SPEED_2500 YT921X_SERDES_SPEED(4)
+#define YT921X_PORTn_CTRL(port) (0x80100 + 4 * (port))
+#define YT921X_PORT_CTRL_PAUSE_AN BIT(10)
+#define YT921X_PORTn_STATUS(port) (0x80200 + 4 * (port))
+#define YT921X_PORT_LINK BIT(9) /* CTRL: auto negotiation */
+#define YT921X_PORT_HALF_PAUSE BIT(8) /* Half-duplex back pressure mode */
+#define YT921X_PORT_DUPLEX_FULL BIT(7)
+#define YT921X_PORT_RX_PAUSE BIT(6)
+#define YT921X_PORT_TX_PAUSE BIT(5)
+#define YT921X_PORT_RX_MAC_EN BIT(4)
+#define YT921X_PORT_TX_MAC_EN BIT(3)
+#define YT921X_PORT_SPEED_M GENMASK(2, 0)
+#define YT921X_PORT_SPEED(x) FIELD_PREP(YT921X_PORT_SPEED_M, (x))
+#define YT921X_PORT_SPEED_10 YT921X_PORT_SPEED(0)
+#define YT921X_PORT_SPEED_100 YT921X_PORT_SPEED(1)
+#define YT921X_PORT_SPEED_1000 YT921X_PORT_SPEED(2)
+#define YT921X_PORT_SPEED_10000 YT921X_PORT_SPEED(3)
+#define YT921X_PORT_SPEED_2500 YT921X_PORT_SPEED(4)
+#define YT921X_PON_STRAP_FUNC 0x80320
+#define YT921X_PON_STRAP_VAL 0x80324
+#define YT921X_PON_STRAP_CAP 0x80328
+#define YT921X_PON_STRAP_EEE BIT(16)
+#define YT921X_PON_STRAP_LOOP_DETECT BIT(7)
+#define YT921X_MDIO_POLLINGn(port) (0x80364 + 4 * ((port) - 8))
+#define YT921X_MDIO_POLLING_DUPLEX_FULL BIT(4)
+#define YT921X_MDIO_POLLING_LINK BIT(3)
+#define YT921X_MDIO_POLLING_SPEED_M GENMASK(2, 0)
+#define YT921X_MDIO_POLLING_SPEED(x) FIELD_PREP(YT921X_MDIO_POLLING_SPEED_M, (x))
+#define YT921X_MDIO_POLLING_SPEED_10 YT921X_MDIO_POLLING_SPEED(0)
+#define YT921X_MDIO_POLLING_SPEED_100 YT921X_MDIO_POLLING_SPEED(1)
+#define YT921X_MDIO_POLLING_SPEED_1000 YT921X_MDIO_POLLING_SPEED(2)
+#define YT921X_MDIO_POLLING_SPEED_10000 YT921X_MDIO_POLLING_SPEED(3)
+#define YT921X_MDIO_POLLING_SPEED_2500 YT921X_MDIO_POLLING_SPEED(4)
+#define YT921X_SENSOR 0x8036c
+#define YT921X_SENSOR_TEMP BIT(18)
+#define YT921X_TEMP 0x80374
+#define YT921X_CHIP_MODE 0x80388
+#define YT921X_CHIP_MODE_MODE GENMASK(1, 0)
+#define YT921X_XMII_CTRL 0x80394
+#define YT921X_XMII_CTRL_PORTn(port) BIT(9 - (port)) /* Yes, it's reversed */
+#define YT921X_XMIIn(port) (0x80400 + 8 * ((port) - 8))
+#define YT921X_XMII_MODE_M GENMASK(31, 29)
+#define YT921X_XMII_MODE(x) FIELD_PREP(YT921X_XMII_MODE_M, (x))
+#define YT921X_XMII_MODE_MII YT921X_XMII_MODE(0)
+#define YT921X_XMII_MODE_REVMII YT921X_XMII_MODE(1)
+#define YT921X_XMII_MODE_RMII YT921X_XMII_MODE(2)
+#define YT921X_XMII_MODE_REVRMII YT921X_XMII_MODE(3)
+#define YT921X_XMII_MODE_RGMII YT921X_XMII_MODE(4)
+#define YT921X_XMII_MODE_DISABLE YT921X_XMII_MODE(5)
+#define YT921X_XMII_LINK BIT(19) /* force link */
+#define YT921X_XMII_EN BIT(18)
+#define YT921X_XMII_SOFT_RST BIT(17)
+#define YT921X_XMII_RGMII_TX_DELAY_150PS_M GENMASK(16, 13)
+#define YT921X_XMII_RGMII_TX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_TX_DELAY_150PS_M, (x))
+#define YT921X_XMII_TX_CLK_IN BIT(11)
+#define YT921X_XMII_RX_CLK_IN BIT(10)
+#define YT921X_XMII_RGMII_TX_DELAY_2NS BIT(8)
+#define YT921X_XMII_RGMII_TX_CLK_OUT BIT(7)
+#define YT921X_XMII_RGMII_RX_DELAY_150PS_M GENMASK(6, 3)
+#define YT921X_XMII_RGMII_RX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_RX_DELAY_150PS_M, (x))
+#define YT921X_XMII_RMII_PHY_TX_CLK_OUT BIT(2)
+#define YT921X_XMII_REVMII_TX_CLK_OUT BIT(1)
+#define YT921X_XMII_REVMII_RX_CLK_OUT BIT(0)
+
+#define YT921X_MACn_FRAME(port) (0x81008 + 0x1000 * (port))
+#define YT921X_MAC_FRAME_SIZE_M GENMASK(21, 8)
+#define YT921X_MAC_FRAME_SIZE(x) FIELD_PREP(YT921X_MAC_FRAME_SIZE_M, (x))
+
+#define YT921X_EEEn_VAL(port) (0xa0000 + 0x40 * (port))
+#define YT921X_EEE_VAL_DATA BIT(1)
+
+#define YT921X_EEE_CTRL 0xb0000
+#define YT921X_EEE_CTRL_ENn(port) BIT(port)
+
+#define YT921X_MIB_CTRL 0xc0004
+#define YT921X_MIB_CTRL_CLEAN BIT(30)
+#define YT921X_MIB_CTRL_PORT_M GENMASK(6, 3)
+#define YT921X_MIB_CTRL_PORT(x) FIELD_PREP(YT921X_MIB_CTRL_PORT_M, (x))
+#define YT921X_MIB_CTRL_ONE_PORT BIT(1)
+#define YT921X_MIB_CTRL_ALL_PORT BIT(0)
+#define YT921X_MIBn_DATA0(port) (0xc0100 + 0x100 * (port))
+#define YT921X_MIBn_DATAm(port, x) (YT921X_MIBn_DATA0(port) + 4 * (x))
+#define YT921X_MIB_DATA_RX_BROADCAST 0x00
+#define YT921X_MIB_DATA_RX_PAUSE 0x04
+#define YT921X_MIB_DATA_RX_MULTICAST 0x08
+#define YT921X_MIB_DATA_RX_CRC_ERR 0x0c
+
+#define YT921X_MIB_DATA_RX_ALIGN_ERR 0x10
+#define YT921X_MIB_DATA_RX_UNDERSIZE_ERR 0x14
+#define YT921X_MIB_DATA_RX_FRAG_ERR 0x18
+#define YT921X_MIB_DATA_RX_PKT_SZ_64 0x1c
+
+#define YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127 0x20
+#define YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255 0x24
+#define YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511 0x28
+#define YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023 0x2c
+
+#define YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518 0x30
+#define YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX 0x34
+/* 0x38: unused */
+#define YT921X_MIB_DATA_RX_GOOD_BYTES 0x3c
+
+/* 0x40: 64 bytes */
+#define YT921X_MIB_DATA_RX_BAD_BYTES 0x44
+/* 0x48: 64 bytes */
+#define YT921X_MIB_DATA_RX_OVERSIZE_ERR 0x4c
+
+#define YT921X_MIB_DATA_RX_DROPPED 0x50
+#define YT921X_MIB_DATA_TX_BROADCAST 0x54
+#define YT921X_MIB_DATA_TX_PAUSE 0x58
+#define YT921X_MIB_DATA_TX_MULTICAST 0x5c
+
+#define YT921X_MIB_DATA_TX_UNDERSIZE_ERR 0x60
+#define YT921X_MIB_DATA_TX_PKT_SZ_64 0x64
+#define YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127 0x68
+#define YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255 0x6c
+
+#define YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511 0x70
+#define YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023 0x74
+#define YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518 0x78
+#define YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX 0x7c
+
+/* 0x80: unused */
+#define YT921X_MIB_DATA_TX_GOOD_BYTES 0x84
+/* 0x88: 64 bytes */
+#define YT921X_MIB_DATA_TX_COLLISION 0x8c
+
+#define YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION 0x90
+#define YT921X_MIB_DATA_TX_MULTIPLE_COLLISION 0x94
+#define YT921X_MIB_DATA_TX_SINGLE_COLLISION 0x98
+#define YT921X_MIB_DATA_TX_PKT 0x9c
+
+#define YT921X_MIB_DATA_TX_DEFERRED 0xa0
+#define YT921X_MIB_DATA_TX_LATE_COLLISION 0xa4
+#define YT921X_MIB_DATA_RX_OAM 0xa8
+#define YT921X_MIB_DATA_TX_OAM 0xac
+
+#define YT921X_EDATA_CTRL 0xe0000
+#define YT921X_EDATA_CTRL_ADDR_M GENMASK(15, 8)
+#define YT921X_EDATA_CTRL_ADDR(x) FIELD_PREP(YT921X_EDATA_CTRL_ADDR_M, (x))
+#define YT921X_EDATA_CTRL_OP_M GENMASK(3, 0)
+#define YT921X_EDATA_CTRL_OP(x) FIELD_PREP(YT921X_EDATA_CTRL_OP_M, (x))
+#define YT921X_EDATA_CTRL_READ YT921X_EDATA_CTRL_OP(5)
+#define YT921X_EDATA_DATA 0xe0004
+#define YT921X_EDATA_DATA_DATA_M GENMASK(31, 24)
+#define YT921X_EDATA_DATA_STATUS_M GENMASK(3, 0)
+#define YT921X_EDATA_DATA_STATUS(x) FIELD_PREP(YT921X_EDATA_DATA_STATUS_M, (x))
+#define YT921X_EDATA_DATA_IDLE YT921X_EDATA_DATA_STATUS(3)
+
+#define YT921X_EXT_MBUS_OP 0x6a000
+#define YT921X_INT_MBUS_OP 0xf0000
+#define YT921X_MBUS_OP_START BIT(0)
+#define YT921X_EXT_MBUS_CTRL 0x6a004
+#define YT921X_INT_MBUS_CTRL 0xf0004
+#define YT921X_MBUS_CTRL_PORT_M GENMASK(25, 21)
+#define YT921X_MBUS_CTRL_PORT(x) FIELD_PREP(YT921X_MBUS_CTRL_PORT_M, (x))
+#define YT921X_MBUS_CTRL_REG_M GENMASK(20, 16)
+#define YT921X_MBUS_CTRL_REG(x) FIELD_PREP(YT921X_MBUS_CTRL_REG_M, (x))
+#define YT921X_MBUS_CTRL_TYPE_M GENMASK(11, 8) /* wild guess */
+#define YT921X_MBUS_CTRL_TYPE(x) FIELD_PREP(YT921X_MBUS_CTRL_TYPE_M, (x))
+#define YT921X_MBUS_CTRL_TYPE_C22 YT921X_MBUS_CTRL_TYPE(4)
+#define YT921X_MBUS_CTRL_OP_M GENMASK(3, 2) /* wild guess */
+#define YT921X_MBUS_CTRL_OP(x) FIELD_PREP(YT921X_MBUS_CTRL_OP_M, (x))
+#define YT921X_MBUS_CTRL_WRITE YT921X_MBUS_CTRL_OP(1)
+#define YT921X_MBUS_CTRL_READ YT921X_MBUS_CTRL_OP(2)
+#define YT921X_EXT_MBUS_DOUT 0x6a008
+#define YT921X_INT_MBUS_DOUT 0xf0008
+#define YT921X_EXT_MBUS_DIN 0x6a00c
+#define YT921X_INT_MBUS_DIN 0xf000c
+
+#define YT921X_PORTn_EGR(port) (0x100000 + 4 * (port))
+#define YT921X_PORT_EGR_TPID_CTAG_M GENMASK(5, 4)
+#define YT921X_PORT_EGR_TPID_CTAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_CTAG_M, (x))
+#define YT921X_PORT_EGR_TPID_STAG_M GENMASK(3, 2)
+#define YT921X_PORT_EGR_TPID_STAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_STAG_M, (x))
+#define YT921X_TPID_EGRn(x) (0x100300 + 4 * (x)) /* [0, 3] */
+#define YT921X_TPID_EGR_TPID_M GENMASK(15, 0)
+
+#define YT921X_VLAN_IGR_FILTER 0x180280
+#define YT921X_VLAN_IGR_FILTER_PORTn_BYPASS_IGMP(port) BIT((port) + 11)
+#define YT921X_VLAN_IGR_FILTER_PORTn(port) BIT(port)
+#define YT921X_PORTn_ISOLATION(port) (0x180294 + 4 * (port))
+#define YT921X_PORT_ISOLATION_BLOCKn(port) BIT(port)
+#define YT921X_STPn(n) (0x18038c + 4 * (n))
+#define YT921X_STP_PORTn_M(port) GENMASK(2 * (port) + 1, 2 * (port))
+#define YT921X_STP_PORTn(port, x) ((x) << (2 * (port)))
+#define YT921X_STP_PORTn_DISABLED(port) YT921X_STP_PORTn(port, 0)
+#define YT921X_STP_PORTn_LEARNING(port) YT921X_STP_PORTn(port, 1)
+#define YT921X_STP_PORTn_BLOCKING(port) YT921X_STP_PORTn(port, 2)
+#define YT921X_STP_PORTn_FORWARD(port) YT921X_STP_PORTn(port, 3)
+#define YT921X_PORTn_LEARN(port) (0x1803d0 + 4 * (port))
+#define YT921X_PORT_LEARN_VID_LEARN_MULTI_EN BIT(22)
+#define YT921X_PORT_LEARN_VID_LEARN_MODE BIT(21)
+#define YT921X_PORT_LEARN_VID_LEARN_EN BIT(20)
+#define YT921X_PORT_LEARN_SUSPEND_COPY_EN BIT(19)
+#define YT921X_PORT_LEARN_SUSPEND_DROP_EN BIT(18)
+#define YT921X_PORT_LEARN_DIS BIT(17)
+#define YT921X_PORT_LEARN_LIMIT_EN BIT(16)
+#define YT921X_PORT_LEARN_LIMIT_M GENMASK(15, 8)
+#define YT921X_PORT_LEARN_LIMIT(x) FIELD_PREP(YT921X_PORT_LEARN_LIMIT_M, (x))
+#define YT921X_PORT_LEARN_DROP_ON_EXCEEDED BIT(2)
+#define YT921X_PORT_LEARN_MODE_M GENMASK(1, 0)
+#define YT921X_PORT_LEARN_MODE(x) FIELD_PREP(YT921X_PORT_LEARN_MODE_M, (x))
+#define YT921X_PORT_LEARN_MODE_AUTO YT921X_PORT_LEARN_MODE(0)
+#define YT921X_PORT_LEARN_MODE_AUTO_AND_COPY YT921X_PORT_LEARN_MODE(1)
+#define YT921X_PORT_LEARN_MODE_CPU_CONTROL YT921X_PORT_LEARN_MODE(2)
+#define YT921X_AGEING 0x180440
+#define YT921X_AGEING_INTERVAL_M GENMASK(15, 0)
+#define YT921X_FDB_IN0 0x180454
+#define YT921X_FDB_IN1 0x180458
+#define YT921X_FDB_IN2 0x18045c
+#define YT921X_FDB_OP 0x180460
+#define YT921X_FDB_OP_INDEX_M GENMASK(22, 11)
+#define YT921X_FDB_OP_INDEX(x) FIELD_PREP(YT921X_FDB_OP_INDEX_M, (x))
+#define YT921X_FDB_OP_MODE_INDEX BIT(10) /* mac+fid / index */
+#define YT921X_FDB_OP_FLUSH_MCAST BIT(9) /* ucast / mcast */
+#define YT921X_FDB_OP_FLUSH_M GENMASK(8, 7)
+#define YT921X_FDB_OP_FLUSH(x) FIELD_PREP(YT921X_FDB_OP_FLUSH_M, (x))
+#define YT921X_FDB_OP_FLUSH_ALL YT921X_FDB_OP_FLUSH(0)
+#define YT921X_FDB_OP_FLUSH_PORT YT921X_FDB_OP_FLUSH(1)
+#define YT921X_FDB_OP_FLUSH_PORT_VID YT921X_FDB_OP_FLUSH(2)
+#define YT921X_FDB_OP_FLUSH_VID YT921X_FDB_OP_FLUSH(3)
+#define YT921X_FDB_OP_FLUSH_STATIC BIT(6)
+#define YT921X_FDB_OP_NEXT_TYPE_M GENMASK(5, 4)
+#define YT921X_FDB_OP_NEXT_TYPE(x) FIELD_PREP(YT921X_FDB_OP_NEXT_TYPE_M, (x))
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT YT921X_FDB_OP_NEXT_TYPE(0)
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST_VID YT921X_FDB_OP_NEXT_TYPE(1)
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST YT921X_FDB_OP_NEXT_TYPE(2)
+#define YT921X_FDB_OP_NEXT_TYPE_MCAST YT921X_FDB_OP_NEXT_TYPE(3)
+#define YT921X_FDB_OP_OP_M GENMASK(3, 1)
+#define YT921X_FDB_OP_OP(x) FIELD_PREP(YT921X_FDB_OP_OP_M, (x))
+#define YT921X_FDB_OP_OP_ADD YT921X_FDB_OP_OP(0)
+#define YT921X_FDB_OP_OP_DEL YT921X_FDB_OP_OP(1)
+#define YT921X_FDB_OP_OP_GET_ONE YT921X_FDB_OP_OP(2)
+#define YT921X_FDB_OP_OP_GET_NEXT YT921X_FDB_OP_OP(3)
+#define YT921X_FDB_OP_OP_FLUSH YT921X_FDB_OP_OP(4)
+#define YT921X_FDB_OP_START BIT(0)
+#define YT921X_FDB_RESULT 0x180464
+#define YT921X_FDB_RESULT_DONE BIT(15)
+#define YT921X_FDB_RESULT_NOTFOUND BIT(14)
+#define YT921X_FDB_RESULT_OVERWRITED BIT(13)
+#define YT921X_FDB_RESULT_INDEX_M GENMASK(11, 0)
+#define YT921X_FDB_RESULT_INDEX(x) FIELD_PREP(YT921X_FDB_RESULT_INDEX_M, (x))
+#define YT921X_FDB_OUT0 0x1804b0
+#define YT921X_FDB_IO0_ADDR_HI4_M GENMASK(31, 0)
+#define YT921X_FDB_OUT1 0x1804b4
+#define YT921X_FDB_IO1_EGR_INT_PRI_EN BIT(31)
+#define YT921X_FDB_IO1_STATUS_M GENMASK(30, 28)
+#define YT921X_FDB_IO1_STATUS(x) FIELD_PREP(YT921X_FDB_IO1_STATUS_M, (x))
+#define YT921X_FDB_IO1_STATUS_INVALID YT921X_FDB_IO1_STATUS(0)
+#define YT921X_FDB_IO1_STATUS_MIN_TIME YT921X_FDB_IO1_STATUS(1)
+#define YT921X_FDB_IO1_STATUS_MOVE_AGING_MAX_TIME YT921X_FDB_IO1_STATUS(3)
+#define YT921X_FDB_IO1_STATUS_MAX_TIME YT921X_FDB_IO1_STATUS(5)
+#define YT921X_FDB_IO1_STATUS_PENDING YT921X_FDB_IO1_STATUS(6)
+#define YT921X_FDB_IO1_STATUS_STATIC YT921X_FDB_IO1_STATUS(7)
+#define YT921X_FDB_IO1_FID_M GENMASK(27, 16) /* filtering ID (VID) */
+#define YT921X_FDB_IO1_FID(x) FIELD_PREP(YT921X_FDB_IO1_FID_M, (x))
+#define YT921X_FDB_IO1_ADDR_LO2_M GENMASK(15, 0)
+#define YT921X_FDB_OUT2 0x1804b8
+#define YT921X_FDB_IO2_MOVE_AGING_STATUS_M GENMASK(31, 30)
+#define YT921X_FDB_IO2_IGR_DROP BIT(29)
+#define YT921X_FDB_IO2_EGR_PORTS_M GENMASK(28, 18)
+#define YT921X_FDB_IO2_EGR_PORTS(x) FIELD_PREP(YT921X_FDB_IO2_EGR_PORTS_M, (x))
+#define YT921X_FDB_IO2_EGR_DROP BIT(17)
+#define YT921X_FDB_IO2_COPY_TO_CPU BIT(16)
+#define YT921X_FDB_IO2_IGR_INT_PRI_EN BIT(15)
+#define YT921X_FDB_IO2_INT_PRI_M GENMASK(14, 12)
+#define YT921X_FDB_IO2_INT_PRI(x) FIELD_PREP(YT921X_FDB_IO2_INT_PRI_M, (x))
+#define YT921X_FDB_IO2_NEW_VID_M GENMASK(11, 0)
+#define YT921X_FDB_IO2_NEW_VID(x) FIELD_PREP(YT921X_FDB_IO2_NEW_VID_M, (x))
+#define YT921X_FILTER_UNK_UCAST 0x180508
+#define YT921X_FILTER_UNK_MCAST 0x18050c
+#define YT921X_FILTER_MCAST 0x180510
+#define YT921X_FILTER_BCAST 0x180514
+#define YT921X_FILTER_PORTS_M GENMASK(10, 0)
+#define YT921X_FILTER_PORTS(x) FIELD_PREP(YT921X_FILTER_PORTS_M, (x))
+#define YT921X_FILTER_PORTn(port) BIT(port)
+#define YT921X_VLAN_EGR_FILTER 0x180598
+#define YT921X_VLAN_EGR_FILTER_PORTn(port) BIT(port)
+#define YT921X_CPU_COPY 0x180690
+#define YT921X_CPU_COPY_FORCE_INT_PORT BIT(2)
+#define YT921X_CPU_COPY_TO_INT_CPU BIT(1)
+#define YT921X_CPU_COPY_TO_EXT_CPU BIT(0)
+#define YT921X_ACT_UNK_UCAST 0x180734
+#define YT921X_ACT_UNK_MCAST 0x180738
+#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_RMA BIT(23)
+#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_IGMP BIT(22)
+#define YT921X_ACT_UNK_ACTn_M(port) GENMASK(2 * (port) + 1, 2 * (port))
+#define YT921X_ACT_UNK_ACTn(port, x) ((x) << (2 * (port)))
+#define YT921X_ACT_UNK_ACTn_FORWARD(port) YT921X_ACT_UNK_ACTn(port, 0) /* flood */
+#define YT921X_ACT_UNK_ACTn_TRAP(port) YT921X_ACT_UNK_ACTn(port, 1) /* steer to CPU */
+#define YT921X_ACT_UNK_ACTn_DROP(port) YT921X_ACT_UNK_ACTn(port, 2) /* discard */
+/* NEVER use this action; see comments in the tag driver */
+#define YT921X_ACT_UNK_ACTn_COPY(port) YT921X_ACT_UNK_ACTn(port, 3) /* flood and copy */
+#define YT921X_FDB_HW_FLUSH 0x180958
+#define YT921X_FDB_HW_FLUSH_ON_LINKDOWN BIT(0)
+
+#define YT921X_VLANn_CTRL(vlan) (0x188000 + 8 * (vlan))
+#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK_ULL(50, 40)
+#define YT921X_VLAN_CTRL_UNTAG_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_UNTAG_PORTS_M, (x))
+#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT_ULL((port) + 40)
+#define YT921X_VLAN_CTRL_STP_ID_M GENMASK_ULL(39, 36)
+#define YT921X_VLAN_CTRL_STP_ID(x) FIELD_PREP(YT921X_VLAN_CTRL_STP_ID_M, (x))
+#define YT921X_VLAN_CTRL_SVLAN_EN BIT_ULL(35)
+#define YT921X_VLAN_CTRL_FID_M GENMASK_ULL(34, 23)
+#define YT921X_VLAN_CTRL_FID(x) FIELD_PREP(YT921X_VLAN_CTRL_FID_M, (x))
+#define YT921X_VLAN_CTRL_LEARN_DIS BIT_ULL(22)
+#define YT921X_VLAN_CTRL_INT_PRI_EN BIT_ULL(21)
+#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK_ULL(20, 18)
+#define YT921X_VLAN_CTRL_PORTS_M GENMASK_ULL(17, 7)
+#define YT921X_VLAN_CTRL_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_PORTS_M, (x))
+#define YT921X_VLAN_CTRL_PORTn(port) BIT_ULL((port) + 7)
+#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT_ULL(6)
+#define YT921X_VLAN_CTRL_METER_EN BIT_ULL(5)
+#define YT921X_VLAN_CTRL_METER_ID_M GENMASK_ULL(4, 0)
+
+#define YT921X_TPID_IGRn(x) (0x210000 + 4 * (x)) /* [0, 3] */
+#define YT921X_TPID_IGR_TPID_M GENMASK(15, 0)
+#define YT921X_PORTn_IGR_TPID(port) (0x210010 + 4 * (port))
+#define YT921X_PORT_IGR_TPIDn_STAG_M GENMASK(7, 4)
+#define YT921X_PORT_IGR_TPIDn_STAG(x) BIT((x) + 4)
+#define YT921X_PORT_IGR_TPIDn_CTAG_M GENMASK(3, 0)
+#define YT921X_PORT_IGR_TPIDn_CTAG(x) BIT(x)
+
+#define YT921X_PORTn_VLAN_CTRL(port) (0x230010 + 4 * (port))
+#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_EN BIT(31)
+#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_EN BIT(30)
+#define YT921X_PORT_VLAN_CTRL_SVID_M GENMASK(29, 18)
+#define YT921X_PORT_VLAN_CTRL_SVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_SVID_M, (x))
+#define YT921X_PORT_VLAN_CTRL_CVID_M GENMASK(17, 6)
+#define YT921X_PORT_VLAN_CTRL_CVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_CVID_M, (x))
+#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_M GENMASK(5, 3)
+#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_M GENMASK(2, 0)
+#define YT921X_PORTn_VLAN_CTRL1(port) (0x230080 + 4 * (port))
+#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_EN BIT(8)
+#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_PROFILE_ID_M GENMASK(7, 4)
+#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_TAGGED BIT(3)
+#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_UNTAGGED BIT(2)
+#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED BIT(1)
+#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED BIT(0)
+
+#define YT921X_MIRROR 0x300300
+#define YT921X_MIRROR_IGR_PORTS_M GENMASK(26, 16)
+#define YT921X_MIRROR_IGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_IGR_PORTS_M, (x))
+#define YT921X_MIRROR_IGR_PORTn(port) BIT((port) + 16)
+#define YT921X_MIRROR_EGR_PORTS_M GENMASK(14, 4)
+#define YT921X_MIRROR_EGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_EGR_PORTS_M, (x))
+#define YT921X_MIRROR_EGR_PORTn(port) BIT((port) + 4)
+#define YT921X_MIRROR_PORT_M GENMASK(3, 0)
+#define YT921X_MIRROR_PORT(x) FIELD_PREP(YT921X_MIRROR_PORT_M, (x))
+
+#define YT921X_EDATA_EXTMODE 0xfb
+#define YT921X_EDATA_LEN 0x100
+
+#define YT921X_FDB_NUM 4096
+
+enum yt921x_fdb_entry_status {
+ YT921X_FDB_ENTRY_STATUS_INVALID = 0,
+ YT921X_FDB_ENTRY_STATUS_MIN_TIME = 1,
+ YT921X_FDB_ENTRY_STATUS_MOVE_AGING_MAX_TIME = 3,
+ YT921X_FDB_ENTRY_STATUS_MAX_TIME = 5,
+ YT921X_FDB_ENTRY_STATUS_PENDING = 6,
+ YT921X_FDB_ENTRY_STATUS_STATIC = 7,
+};
+
+#define YT921X_MSTI_NUM 16
+
+#define YT9215_MAJOR 0x9002
+#define YT9218_MAJOR 0x9001
+
+/* required for a hard reset */
+#define YT921X_RST_DELAY_US 10000
+
+#define YT921X_FRAME_SIZE_MAX 0x2400 /* 9216 */
+
+#define YT921X_TAG_LEN 8
+
+/* 8 internal + 2 external + 1 mcu */
+#define YT921X_PORT_NUM 11
+
+#define yt921x_port_is_internal(port) ((port) < 8)
+#define yt921x_port_is_external(port) (8 <= (port) && (port) < 9)
+
+struct yt921x_mib {
+ u64 rx_broadcast;
+ u64 rx_pause;
+ u64 rx_multicast;
+ u64 rx_crc_errors;
+
+ u64 rx_alignment_errors;
+ u64 rx_undersize_errors;
+ u64 rx_fragment_errors;
+ u64 rx_64byte;
+
+ u64 rx_65_127byte;
+ u64 rx_128_255byte;
+ u64 rx_256_511byte;
+ u64 rx_512_1023byte;
+
+ u64 rx_1024_1518byte;
+ u64 rx_jumbo;
+ u64 rx_good_bytes;
+
+ u64 rx_bad_bytes;
+ u64 rx_oversize_errors;
+
+ u64 rx_dropped;
+ u64 tx_broadcast;
+ u64 tx_pause;
+ u64 tx_multicast;
+
+ u64 tx_undersize_errors;
+ u64 tx_64byte;
+ u64 tx_65_127byte;
+ u64 tx_128_255byte;
+
+ u64 tx_256_511byte;
+ u64 tx_512_1023byte;
+ u64 tx_1024_1518byte;
+ u64 tx_jumbo;
+
+ u64 tx_good_bytes;
+ u64 tx_collisions;
+
+ u64 tx_aborted_errors;
+ u64 tx_multiple_collisions;
+ u64 tx_single_collisions;
+ u64 tx_good;
+
+ u64 tx_deferred;
+ u64 tx_late_collisions;
+ u64 rx_oam;
+ u64 tx_oam;
+};
+
+struct yt921x_port {
+ unsigned char index;
+
+ bool hairpin;
+ bool isolated;
+
+ struct delayed_work mib_read;
+ struct yt921x_mib mib;
+ u64 rx_frames;
+ u64 tx_frames;
+};
+
+struct yt921x_reg_ops {
+ int (*read)(void *context, u32 reg, u32 *valp);
+ int (*write)(void *context, u32 reg, u32 val);
+};
+
+struct yt921x_priv {
+ struct dsa_switch ds;
+
+ const struct yt921x_info *info;
+ /* cache of dsa_cpu_ports(ds) */
+ u16 cpu_ports_mask;
+
+ /* protect the access to the switch registers */
+ struct mutex reg_lock;
+ const struct yt921x_reg_ops *reg_ops;
+ void *reg_ctx;
+
+ /* mdio master bus */
+ struct mii_bus *mbus_int;
+ struct mii_bus *mbus_ext;
+
+ struct yt921x_port ports[YT921X_PORT_NUM];
+
+ u16 eee_ports_mask;
+};
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d29b5d7af0d7..d6bdad4baadd 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -38,6 +38,7 @@
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
#include <linux/net_tstamp.h>
+#include <net/netdev_lock.h>
#include <net/rtnetlink.h>
#include <linux/u64_stats_sync.h>
@@ -104,14 +105,16 @@ static void dummy_setup(struct net_device *dev)
dev->netdev_ops = &dummy_netdev_ops;
dev->ethtool_ops = &dummy_ethtool_ops;
dev->needs_free_netdev = true;
+ dev->request_ops_lock = true;
/* Fill in device structure with ethernet-generic values. */
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->lltx = true;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
dev->features |= NETIF_F_GSO_ENCAP_ALL;
dev->hw_features |= dev->features;
dev->hw_enc_features |= dev->features;
@@ -167,22 +170,21 @@ static int __init dummy_init_module(void)
{
int i, err = 0;
- down_write(&pernet_ops_rwsem);
- rtnl_lock();
- err = __rtnl_link_register(&dummy_link_ops);
+ err = rtnl_link_register(&dummy_link_ops);
if (err < 0)
- goto out;
+ return err;
+
+ rtnl_net_lock(&init_net);
for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
cond_resched();
}
- if (err < 0)
- __rtnl_link_unregister(&dummy_link_ops);
-out:
- rtnl_unlock();
- up_write(&pernet_ops_rwsem);
+ rtnl_net_unlock(&init_net);
+
+ if (err < 0)
+ rtnl_link_unregister(&dummy_link_ops);
return err;
}
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 3c2efda916f1..9ba10efd3794 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -143,7 +143,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
static void eql_timer(struct timer_list *t)
{
- equalizer_t *eql = from_timer(eql, t, timer);
+ equalizer_t *eql = timer_container_of(eql, t, timer);
struct list_head *this, *tmp, *head;
spin_lock(&eql->queue.lock);
@@ -254,7 +254,7 @@ static int eql_close(struct net_device *dev)
* at the data structure it scans every so often...
*/
- del_timer_sync(&eql->timer);
+ timer_delete_sync(&eql->timer);
eql_kill_slave_queue(&eql->queue);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index ba3e7aa1a28f..2227c83a4862 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -31,9 +31,6 @@
Setting to > 1512 effectively disables this feature. */
static int rx_copybreak = 200;
-/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
-static const int mtu = 1500;
-
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
@@ -862,7 +859,7 @@ static int corkscrew_open(struct net_device *dev)
static void corkscrew_timer(struct timer_list *t)
{
#ifdef AUTOMEDIA
- struct corkscrew_private *vp = from_timer(vp, t, timer);
+ struct corkscrew_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->our_dev;
int ioaddr = dev->base_addr;
unsigned long flags;
@@ -1417,7 +1414,7 @@ static int corkscrew_close(struct net_device *dev)
dev->name, rx_nocopy, rx_copy, queued_packet);
}
- del_timer_sync(&vp->timer);
+ timer_delete_sync(&vp->timer);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
@@ -1550,9 +1547,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.set_msglevel = netdev_set_msglevel,
};
-
#ifdef MODULE
-void cleanup_module(void)
+static void __exit corkscrew_exit_module(void)
{
while (!list_empty(&root_corkscrew_dev)) {
struct net_device *dev;
@@ -1566,4 +1562,5 @@ void cleanup_module(void)
free_netdev(dev);
}
}
+module_exit(corkscrew_exit_module);
#endif /* MODULE */
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index dc3b7c960611..1f2070497a75 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -858,7 +858,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
*/
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
unsigned long flags;
@@ -1140,7 +1140,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ timer_delete_sync(&lp->media);
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5267e9dcd87e..ea49be43b8c3 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -502,7 +502,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
{
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 3) {
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
tc589_set_xcvr(dev, dev->if_port);
} else {
@@ -685,7 +685,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
u16 media, errs;
@@ -946,7 +946,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ timer_delete_sync(&lp->media);
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 082388bb6169..8c9cc97efd4e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1302,7 +1302,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if (print_info)
pr_cont(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
- if (dev->irq <= 0 || dev->irq >= nr_irqs)
+ if (dev->irq <= 0 || dev->irq >= irq_get_nr_irqs())
pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
dev->irq);
@@ -1783,7 +1783,7 @@ out:
static void
vortex_timer(struct timer_list *t)
{
- struct vortex_private *vp = from_timer(vp, t, timer);
+ struct vortex_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->mii.dev;
void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
@@ -2691,7 +2691,7 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
- del_timer_sync(&vp->timer);
+ timer_delete_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 706bd59bf645..1fbab79e2be4 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -44,7 +44,7 @@ config 3C515
config PCMCIA_3C574
tristate "3Com 3c574 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA
(PC-card) Fast Ethernet card to your computer.
@@ -54,7 +54,7 @@ config PCMCIA_3C574
config PCMCIA_3C589
tristate "3Com 3c589 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA
(PC-card) Ethernet card to your computer.
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a4130e643342..345f250781c6 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_8390
config PCMCIA_AXNET
tristate "Asix AX88190 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach an Asix AX88190-based PCMCIA
(PC-card) Fast Ethernet card to your computer. These cards are
@@ -117,7 +117,7 @@ config NE2000
config NE2K_PCI
tristate "PCI NE2000 and clones support (see help)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
help
This driver is for NE2000 compatible PCI cards. It will not work
@@ -146,7 +146,7 @@ config APNE
config PCMCIA_PCNET
tristate "NE2000 compatible PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
help
Say Y here if you intend to attach an NE2000 compatible PCMCIA
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 2874680ef24d..e1695d0fbd8b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -1009,7 +1009,7 @@ static struct platform_driver axdrv = {
.name = "ax88796",
},
.probe = ax_probe,
- .remove_new = ax_remove,
+ .remove = ax_remove,
.suspend = ax_suspend,
.resume = ax_resume,
};
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index fea489af72fb..7c8213011b5c 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -504,7 +504,7 @@ static int axnet_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&info->watchdog);
+ timer_delete_sync(&info->watchdog);
return 0;
} /* axnet_close */
@@ -550,7 +550,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct axnet_dev *info = from_timer(info, t, watchdog);
+ struct axnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + AXNET_MII_EEP;
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 05d39ecb97ff..e876fe52399a 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -258,7 +258,7 @@ static int etherh_set_config(struct net_device *dev, struct ifmap *map)
* media type, turn off automedia detection.
*/
dev->flags &= ~IFF_AUTOMEDIA;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
break;
default:
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 5a0fa995e643..94ff8364cdf0 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -457,7 +457,7 @@ static struct platform_driver mcf8390_drv = {
.name = "mcf8390",
},
.probe = mcf8390_probe,
- .remove_new = mcf8390_remove,
+ .remove = mcf8390_remove,
};
module_platform_driver(mcf8390_drv);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 350683a09d2e..961019c32842 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -894,7 +894,7 @@ static int ne_drv_resume(struct platform_device *pdev)
#endif
static struct platform_driver ne_driver = {
- .remove_new = ne_drv_remove,
+ .remove = ne_drv_remove,
.suspend = ne_drv_suspend,
.resume = ne_drv_resume,
.driver = {
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 65f56a98c0a0..1a34da07c0db 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -186,17 +186,6 @@ static void ne2k_pci_block_output(struct net_device *dev, const int count,
static const struct ethtool_ops ne2k_pci_ethtool_ops;
-
-/* There is no room in the standard 8390 structure for extra info we need,
- * so we build a meta/outer-wrapper structure..
- */
-struct ne2k_pci_card {
- struct net_device *dev;
- struct pci_dev *pci_dev;
-};
-
-
-
/* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
* buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
* 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9bd5e991f1e5..19f9c5db3f3b 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -947,7 +947,7 @@ static int pcnet_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&info->watchdog);
+ timer_delete_sync(&info->watchdog);
return 0;
} /* pcnet_close */
@@ -994,7 +994,7 @@ static int set_config(struct net_device *dev, struct ifmap *map)
return -EOPNOTSUPP;
else if ((map->port < 1) || (map->port > 2))
return -EINVAL;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
NS8390_init(dev, 1);
}
@@ -1018,7 +1018,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct pcnet_dev *info = from_timer(info, t, watchdog);
+ struct pcnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + DLINK_GPIO;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6a19b5393ed1..4a1b368ca7e6 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -20,6 +20,7 @@ source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/airoha/Kconfig"
source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
@@ -122,11 +123,13 @@ source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
+source "drivers/net/ethernet/meta/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
+source "drivers/net/ethernet/mucse/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -157,6 +160,17 @@ config ETHOC
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+config OA_TC6
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST
+ depends on SPI
+ select PHYLIB
+ help
+ This library implements OPEN Alliance TC6 10BASE-T1x MAC-PHY
+ Serial Interface protocol for supporting 10BASE-T1x MAC-PHYs.
+
+ To know the implementation details, refer documentation in
+ <file:Documentation/networking/oa-tc6-framework.rst>.
+
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
@@ -175,6 +189,7 @@ source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
+source "drivers/net/ethernet/spacemit/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
source "drivers/net/ethernet/sunplus/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 0d872d4efcd1..2e18df8ca8ec 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
@@ -59,10 +60,12 @@ obj-$(CONFIG_NET_VENDOR_LITEX) += litex/
obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
+obj-$(CONFIG_NET_VENDOR_META) += meta/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
+obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
@@ -89,6 +92,7 @@ obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
+obj-$(CONFIG_NET_VENDOR_SPACEMIT) += spacemit/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
obj-$(CONFIG_NET_VENDOR_SUNPLUS) += sunplus/
@@ -104,3 +108,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/
+obj-$(CONFIG_OA_TC6) += oa_tc6.o
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index e03193da5874..0a08da799255 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1325,15 +1325,10 @@ static int owl_emac_mdio_init(struct net_device *netdev)
struct device_node *mdio_node;
int ret;
- mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(dev->of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
@@ -1607,7 +1602,7 @@ static struct platform_driver owl_emac_driver = {
.pm = &owl_emac_pm_ops,
},
.probe = owl_emac_probe,
- .remove_new = owl_emac_remove,
+ .remove = owl_emac_remove,
};
module_platform_driver(owl_emac_driver);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 857361c74f5d..e1b8794b14c9 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -441,14 +441,6 @@ enum rx_desc_bits {
};
/* Completion queue entry. */
-struct short_rx_done_desc {
- __le32 status; /* Low 16 bits is length. */
-};
-struct basic_rx_done_desc {
- __le32 status; /* Low 16 bits is length. */
- __le16 vlanid;
- __le16 status2;
-};
struct csum_rx_done_desc {
__le32 status; /* Low 16 bits is length. */
__le16 csum; /* Partial checksum */
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 8b4ef5121308..30f9d271e595 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -11,10 +11,10 @@
#include <linux/crc8.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/gpio/consumer.h>
#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
-#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/module.h>
@@ -26,7 +26,7 @@
#include <net/switchdev.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define ADIN1110_PHY_ID 0x1
@@ -318,11 +318,11 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
* from the ADIN1110 frame header.
*/
if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
- return ret;
+ return -EINVAL;
round_len = adin1110_round_len(frame_size);
if (round_len < 0)
- return ret;
+ return -EINVAL;
frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
@@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)
netdev->netdev_ops = &adin1110_netdev_ops;
netdev->ethtool_ops = &adin1110_ethtool_ops;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->features |= NETIF_F_NETNS_LOCAL;
+ netdev->netns_immutable = true;
port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
if (IS_ERR(port_priv->phydev)) {
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 27af7746d645..a593adc16c78 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++;
- goto out;
+ goto len_error;
}
/* Save skb pointer. */
@@ -575,6 +575,7 @@ frag_map_error:
map_error:
if (net_ratelimit())
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
+len_error:
dev_kfree_skb(skb);
out:
return err;
@@ -1564,7 +1565,7 @@ static struct platform_driver greth_of_driver = {
.of_match_table = greth_of_match,
},
.probe = greth_of_probe,
- .remove_new = greth_of_remove,
+ .remove = greth_of_remove,
};
module_platform_driver(greth_of_driver);
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3d9220f9c9fe..5c8217638dda 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2459,6 +2459,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb),
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2468,6 +2472,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2478,6 +2486,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb_headlen(skb) / 2,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ goto unmap_first_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2489,6 +2501,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
0,
desc[frag].len_vlan,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, dma_addr))
+ goto unmap_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2578,6 +2593,27 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
&adapter->regs->global.watchdog_timer);
}
return 0;
+
+unmap_out:
+ // Unmap the body of the packet with map_page
+ while (--i) {
+ frag--;
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_page(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+unmap_first_out:
+ // Unmap the header with map_single
+ while (frag--) {
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_single(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
@@ -3076,7 +3112,8 @@ err_out:
*/
static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
+ struct et131x_adapter *adapter = timer_container_of(adapter, t,
+ error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
@@ -3639,7 +3676,7 @@ static int et131x_close(struct net_device *netdev)
free_irq(adapter->pdev->irq, netdev);
/* Stop the error timer */
- return del_timer_sync(&adapter->error_timer);
+ return timer_delete_sync(&adapter->error_timer);
}
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
@@ -3852,7 +3889,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_disable_txrx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
et131x_adapter_memory_free(adapter);
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
new file mode 100644
index 000000000000..ad3ce501e7a5
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_AIROHA
+ bool "Airoha devices"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ help
+ If you have a Airoha SoC with ethernet, say Y.
+
+if NET_VENDOR_AIROHA
+
+config NET_AIROHA_NPU
+ tristate "Airoha NPU support"
+ select WANT_DEV_COREDUMP
+ select REGMAP_MMIO
+ help
+ This driver supports Airoha Network Processor (NPU) available
+ on the Airoha Soc family.
+
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select NET_AIROHA_NPU
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
+config NET_AIROHA_FLOW_STATS
+ default y
+ bool "Airoha flow stats"
+ depends on NET_AIROHA && NET_AIROHA_NPU
+ help
+ Enable Aiorha flowtable statistic counters.
+
+endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile
new file mode 100644
index 000000000000..94468053e34b
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Airoha for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
+airoha-eth-y := airoha_eth.o airoha_ppe.o
+airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
+obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
new file mode 100644
index 000000000000..75893c90a0a1
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -0,0 +1,3180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+#include <net/dst_metadata.h>
+#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
+#include <uapi/linux/ppp_defs.h>
+
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+u32 airoha_rr(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+void airoha_wr(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= (airoha_rr(base, offset) & ~mask);
+ airoha_wr(base, offset, val);
+
+ return val;
+}
+
+static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
+ int index, u32 clear, u32 set)
+{
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ int bank = irq_bank - &qdma->irq_banks[0];
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
+ return;
+
+ spin_lock_irqsave(&irq_bank->irq_lock, flags);
+
+ irq_bank->irqmask[index] &= ~clear;
+ irq_bank->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
+ irq_bank->irqmask[index]);
+ /* Read irq_enable register in order to guarantee the update above
+ * completes in the spinlock critical section.
+ */
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
+
+ spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
+}
+
+static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
+{
+ airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
+}
+
+static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
+{
+ airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
+}
+
+static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, reg;
+
+ reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
+ : REG_FE_WAN_MAC_H;
+ val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
+ airoha_fe_wr(eth, reg, val);
+
+ val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
+
+ airoha_ppe_init_upd_mem(port);
+}
+
+static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
+ u32 val)
+{
+ airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
+ FIELD_PREP(GDM_OCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
+ FIELD_PREP(GDM_MCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
+ FIELD_PREP(GDM_BCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
+ FIELD_PREP(GDM_UCFQ_MASK, val));
+}
+
+static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
+ bool enable)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 vip_port;
+
+ switch (port->id) {
+ case 3:
+ /* FIXME: handle XSI_PCIE1_PORT */
+ vip_port = XSI_PCIE0_VIP_PORT_MASK;
+ break;
+ case 4:
+ /* FIXME: handle XSI_USB_PORT */
+ vip_port = XSI_ETH_VIP_PORT_MASK;
+ break;
+ default:
+ return 0;
+ }
+
+ if (enable) {
+ airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
+ airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
+ } else {
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
+ }
+
+ return 0;
+}
+
+static void airoha_fe_maccr_init(struct airoha_eth *eth)
+{
+ int p;
+
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
+ GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK |
+ GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK);
+
+ airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK,
+ FIELD_PREP(CDM_VLAN_MASK, 0x8100));
+
+ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
+}
+
+static void airoha_fe_vip_setup(struct airoha_eth *eth)
+{
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(4),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(6),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(7),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* BOOTP (0x43) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(8),
+ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ /* BOOTP (0x44) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(9),
+ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ /* ISAKMP */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(10),
+ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(11),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* DHCPv6 */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(12),
+ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(19),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* ETH->ETH_P_1905 (0x893a) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(20),
+ PATN_FCPU_EN_MASK | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(21),
+ PATN_FCPU_EN_MASK | PATN_EN_MASK);
+}
+
+static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
+ u32 port, u32 queue)
+{
+ u32 val;
+
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
+ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
+ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
+ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
+ val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
+
+ return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
+}
+
+static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+{
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
+ FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
+ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
+ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
+ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
+}
+
+static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
+{
+ u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
+
+ return FIELD_GET(PSE_ALLRSV_MASK, val);
+}
+
+static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+{
+ u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
+ u32 tmp, all_rsv, fq_limit;
+
+ airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
+
+ /* modify all rsv */
+ all_rsv = airoha_fe_get_pse_all_rsv(eth);
+ all_rsv += (val - orig_val);
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
+ FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
+
+ /* modify hthd */
+ tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
+ fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
+ tmp = fq_limit - all_rsv - 0x20;
+ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
+ PSE_SHARE_USED_HTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
+
+ tmp = fq_limit - all_rsv - 0x100;
+ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
+ PSE_SHARE_USED_MTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
+ tmp = (3 * tmp) >> 2;
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
+ PSE_SHARE_USED_LTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
+
+ return 0;
+}
+
+static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
+{
+ const u32 pse_port_num_queues[] = {
+ [FE_PSE_PORT_CDM1] = 6,
+ [FE_PSE_PORT_GDM1] = 6,
+ [FE_PSE_PORT_GDM2] = 32,
+ [FE_PSE_PORT_GDM3] = 6,
+ [FE_PSE_PORT_PPE1] = 4,
+ [FE_PSE_PORT_CDM2] = 6,
+ [FE_PSE_PORT_CDM3] = 8,
+ [FE_PSE_PORT_CDM4] = 10,
+ [FE_PSE_PORT_PPE2] = 4,
+ [FE_PSE_PORT_GDM4] = 2,
+ [FE_PSE_PORT_CDM5] = 2,
+ };
+ u32 all_rsv;
+ int q;
+
+ all_rsv = airoha_fe_get_pse_all_rsv(eth);
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* hw misses PPE2 oq rsv */
+ all_rsv += PSE_RSV_PAGES *
+ pse_port_num_queues[FE_PSE_PORT_PPE2];
+ }
+ airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
+
+ /* CMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* GMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* GMD2 */
+ for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
+ /* GMD3 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* PPE1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
+ }
+ /* CDM2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* CDM3 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
+ /* CDM4 */
+ for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
+ PSE_QUEUE_RSV_PAGES);
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* PPE2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
+ q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
+ q, 0);
+ }
+ }
+ /* GMD4 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* CDM5 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
+ PSE_QUEUE_RSV_PAGES);
+}
+
+static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
+ int err, j;
+ u32 val;
+
+ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
+
+ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
+ MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
+ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
+ err = read_poll_timeout(airoha_fe_rr, val,
+ val & MC_VLAN_CFG_CMD_DONE_MASK,
+ USEC_PER_MSEC, 5 * USEC_PER_MSEC,
+ false, eth, REG_MC_VLAN_CFG);
+ if (err)
+ return err;
+
+ for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
+ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
+
+ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
+ FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
+ MC_VLAN_CFG_RW_MASK;
+ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
+ err = read_poll_timeout(airoha_fe_rr, val,
+ val & MC_VLAN_CFG_CMD_DONE_MASK,
+ USEC_PER_MSEC,
+ 5 * USEC_PER_MSEC, false, eth,
+ REG_MC_VLAN_CFG);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
+{
+ /* CDM1_CRSN_QSEL */
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ CDM_CRSN_QSEL_Q6));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ CDM_CRSN_QSEL_Q1));
+ /* CDM2_CRSN_QSEL */
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ CDM_CRSN_QSEL_Q6));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ CDM_CRSN_QSEL_Q1));
+}
+
+static int airoha_fe_init(struct airoha_eth *eth)
+{
+ airoha_fe_maccr_init(eth);
+
+ /* PSE IQ reserve */
+ airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
+ FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
+ airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
+ PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
+ FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
+ FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
+
+ /* enable FE copy engine for MC/KA/DPI */
+ airoha_fe_wr(eth, REG_FE_PCE_CFG,
+ PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
+ /* set vip queue selection to ring 1 */
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK,
+ FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK,
+ FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
+ /* set GDM4 source interface offset to 8 */
+ airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
+ GDM_SPORT_OFF2_MASK |
+ GDM_SPORT_OFF1_MASK |
+ GDM_SPORT_OFF0_MASK,
+ FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
+ FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
+ FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
+
+ /* set PSE Page as 128B */
+ airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
+ FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
+ FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
+ FE_DMA_GLO_PG_SZ_MASK);
+ airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
+ FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
+ FE_RST_GDM4_MBI_ARB_MASK);
+ usleep_range(1000, 2000);
+
+ /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
+ * connect other rings to PSE Port0 OQ-0
+ */
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
+
+ airoha_fe_vip_setup(eth);
+ airoha_fe_pse_ports_init(eth);
+
+ airoha_fe_set(eth, REG_GDM_MISC_CFG,
+ GDM2_RDM_ACK_WAIT_PREF_MASK |
+ GDM2_CHN_VLD_MODE_MASK);
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK,
+ FIELD_PREP(CDM_OAM_QSEL_MASK, 15));
+
+ /* init fragment and assemble Force Port */
+ /* NPU Core-3, NPU Bridge Channel-3 */
+ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
+ IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
+ FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
+ FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
+ /* QDMA LAN, RX Ring-22 */
+ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
+ IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
+ FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
+ FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
+
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(3), GDM_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(4), GDM_PAD_EN_MASK);
+
+ airoha_fe_crsn_qsel_init(eth);
+
+ airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
+ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
+
+ /* default aging mode for mbi unlock issue */
+ airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2),
+ MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
+ FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
+ FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
+
+ /* disable IFC by default */
+ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
+
+ /* enable 1:N vlan action, init vlan table */
+ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
+
+ return airoha_fe_mc_vlan_clear(eth);
+}
+
+static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
+{
+ struct airoha_qdma *qdma = q->qdma;
+ int qid = q - &qdma->q_rx[0];
+ int nframes = 0;
+
+ while (q->queued < q->ndesc - 1) {
+ struct airoha_queue_entry *e = &q->entry[q->head];
+ struct airoha_qdma_desc *desc = &q->desc[q->head];
+ struct page *page;
+ int offset;
+ u32 val;
+
+ page = page_pool_dev_alloc_frag(q->page_pool, &offset,
+ q->buf_size);
+ if (!page)
+ break;
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued++;
+ nframes++;
+
+ e->buf = page_address(page) + offset;
+ e->dma_addr = page_pool_get_dma_addr(page) + offset;
+ e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ WRITE_ONCE(desc->msg2, 0);
+ WRITE_ONCE(desc->msg3, 0);
+
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
+ RX_RING_CPU_IDX_MASK,
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
+ }
+
+ return nframes;
+}
+
+static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
+ struct airoha_qdma_desc *desc)
+{
+ u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
+
+ sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
+ switch (sport) {
+ case 0x10 ... 0x14:
+ port = 0;
+ break;
+ case 0x2 ... 0x4:
+ port = sport - 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
+}
+
+static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
+{
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
+ int done = 0;
+
+ while (done < budget) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
+ struct page *page = virt_to_head_page(e->buf);
+ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+ struct airoha_gdm_port *port;
+ int data_len, len, p;
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
+ break;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size), dir);
+
+ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
+ data_len = q->skb ? q->buf_size
+ : SKB_WITH_OVERHEAD(q->buf_size);
+ if (!len || data_len < len)
+ goto free_frag;
+
+ p = airoha_qdma_get_gdm_port(eth, desc);
+ if (p < 0 || !eth->ports[p])
+ goto free_frag;
+
+ port = eth->ports[p];
+ if (!q->skb) { /* first buffer */
+ q->skb = napi_build_skb(e->buf, q->buf_size);
+ if (!q->skb)
+ goto free_frag;
+
+ __skb_put(q->skb, len);
+ skb_mark_for_recycle(q->skb);
+ q->skb->dev = port->dev;
+ q->skb->protocol = eth_type_trans(q->skb, port->dev);
+ q->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(q->skb, qid);
+ } else { /* scattered frame */
+ struct skb_shared_info *shinfo = skb_shinfo(q->skb);
+ int nr_frags = shinfo->nr_frags;
+
+ if (nr_frags >= ARRAY_SIZE(shinfo->frags))
+ goto free_frag;
+
+ skb_add_rx_frag(q->skb, nr_frags, page,
+ e->buf - page_address(page), len,
+ q->buf_size);
+ }
+
+ if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
+ continue;
+
+ if (netdev_uses_dsa(port->dev)) {
+ /* PPE module requires untagged packets to work
+ * properly and it provides DSA port index via the
+ * DMA descriptor. Report DSA tag to the DSA stack
+ * via skb dst info.
+ */
+ u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
+ le32_to_cpu(desc->msg0));
+
+ if (sptag < ARRAY_SIZE(port->dsa_meta) &&
+ port->dsa_meta[sptag])
+ skb_dst_set_noref(q->skb,
+ &port->dsa_meta[sptag]->dst);
+ }
+
+ hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
+ if (hash != AIROHA_RXD4_FOE_ENTRY)
+ skb_set_hash(q->skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
+ false);
+
+ done++;
+ napi_gro_receive(&q->napi, q->skb);
+ q->skb = NULL;
+ continue;
+free_frag:
+ if (q->skb) {
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
+ } else {
+ page_pool_put_full_page(q->page_pool, page, true);
+ }
+ }
+ airoha_qdma_fill_rx_queue(q);
+
+ return done;
+}
+
+static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
+ int cur, done = 0;
+
+ do {
+ cur = airoha_qdma_rx_process(q, budget - done);
+ done += cur;
+ } while (cur && done < budget);
+
+ if (done < budget && napi_complete(napi)) {
+ struct airoha_qdma *qdma = q->qdma;
+ int i, qid = q - &qdma->q_rx[0];
+ int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
+ : QDMA_INT_REG_IDX2;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
+ BIT(qid % RX_DONE_HIGH_OFFSET));
+ }
+ }
+
+ return done;
+}
+
+static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int ndesc)
+{
+ const struct page_pool_params pp_params = {
+ .order = 0,
+ .pool_size = 256,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = qdma->eth->dev,
+ .napi = &q->napi,
+ };
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0], thr;
+ dma_addr_t dma_addr;
+
+ q->buf_size = PAGE_SIZE / 2;
+ q->ndesc = ndesc;
+ q->qdma = qdma;
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(q->page_pool)) {
+ int err = PTR_ERR(q->page_pool);
+
+ q->page_pool = NULL;
+ return err;
+ }
+
+ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
+
+ airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
+ RX_RING_SIZE_MASK,
+ FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
+
+ thr = clamp(ndesc >> 3, 1, 32);
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ FIELD_PREP(RX_RING_THR_MASK, thr));
+ airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+ airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
+
+ airoha_qdma_fill_rx_queue(q);
+
+ return 0;
+}
+
+static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
+{
+ struct airoha_eth *eth = q->qdma->eth;
+
+ while (q->queued) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct page *page = virt_to_head_page(e->buf);
+
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
+ page_pool_get_dma_dir(q->page_pool));
+ page_pool_put_full_page(q->page_pool, page, false);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ }
+}
+
+static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!(RX_DONE_INT_MASK & BIT(i))) {
+ /* rx-queue not binded to irq */
+ continue;
+ }
+
+ err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
+ RX_DSCP_NUM(i));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct airoha_tx_irq_queue *irq_q;
+ int id, done = 0, irq_queued;
+ struct airoha_qdma *qdma;
+ struct airoha_eth *eth;
+ u32 status, head;
+
+ irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
+ qdma = irq_q->qdma;
+ id = irq_q - &qdma->q_tx_irq[0];
+ eth = qdma->eth;
+
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
+ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
+ head = head % irq_q->size;
+ irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+
+ while (irq_queued > 0 && done < budget) {
+ u32 qid, val = irq_q->q[head];
+ struct airoha_qdma_desc *desc;
+ struct airoha_queue_entry *e;
+ struct airoha_queue *q;
+ u32 index, desc_ctrl;
+ struct sk_buff *skb;
+
+ if (val == 0xff)
+ break;
+
+ irq_q->q[head] = 0xff; /* mark as done */
+ head = (head + 1) % irq_q->size;
+ irq_queued--;
+ done++;
+
+ qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
+ if (qid >= ARRAY_SIZE(qdma->q_tx))
+ continue;
+
+ q = &qdma->q_tx[qid];
+ if (!q->ndesc)
+ continue;
+
+ index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
+ if (index >= q->ndesc)
+ continue;
+
+ spin_lock_bh(&q->lock);
+
+ if (!q->queued)
+ goto unlock;
+
+ desc = &q->desc[index];
+ desc_ctrl = le32_to_cpu(desc->ctrl);
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
+ !(desc_ctrl & QDMA_DESC_DROP_MASK))
+ goto unlock;
+
+ e = &q->entry[index];
+ skb = e->skb;
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ e->dma_addr = 0;
+ list_add_tail(&e->list, &q->tx_list);
+
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ q->queued--;
+
+ if (skb) {
+ u16 queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(skb->dev, queue);
+ netdev_tx_completed_queue(txq, 1, skb->len);
+ if (netif_tx_queue_stopped(txq) &&
+ q->ndesc - q->queued >= q->free_thr)
+ netif_tx_wake_queue(txq);
+
+ dev_kfree_skb_any(skb);
+ }
+unlock:
+ spin_unlock_bh(&q->lock);
+ }
+
+ if (done) {
+ int i, len = done >> 7;
+
+ for (i = 0; i < len; i++)
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, 0x80);
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, (done & 0x7f));
+ }
+
+ if (done < budget && napi_complete(napi))
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(id));
+
+ return done;
+}
+
+static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int size)
+{
+ struct airoha_eth *eth = qdma->eth;
+ int i, qid = q - &qdma->q_tx[0];
+ dma_addr_t dma_addr;
+
+ spin_lock_init(&q->lock);
+ q->ndesc = size;
+ q->qdma = qdma;
+ q->free_thr = 1 + MAX_SKB_FRAGS;
+ INIT_LIST_HEAD(&q->tx_list);
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ for (i = 0; i < q->ndesc; i++) {
+ u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
+
+ list_add_tail(&q->entry[i].list, &q->tx_list);
+ WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
+ }
+
+ /* xmit ring drop default setting */
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
+ TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
+
+ airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, 0));
+ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, 0));
+
+ return 0;
+}
+
+static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
+ struct airoha_qdma *qdma, int size)
+{
+ int id = irq_q - &qdma->q_tx_irq[0];
+ struct airoha_eth *eth = qdma->eth;
+ dma_addr_t dma_addr;
+
+ netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
+ airoha_qdma_tx_napi_poll);
+ irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
+ &dma_addr, GFP_KERNEL);
+ if (!irq_q->q)
+ return -ENOMEM;
+
+ memset(irq_q->q, 0xff, size * sizeof(u32));
+ irq_q->size = size;
+ irq_q->qdma = qdma;
+
+ airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
+ FIELD_PREP(TX_IRQ_THR_MASK, 1));
+
+ return 0;
+}
+
+static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
+ IRQ_QUEUE_LEN(i));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
+ TX_DSCP_NUM);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
+{
+ struct airoha_eth *eth = q->qdma->eth;
+ int i;
+
+ spin_lock_bh(&q->lock);
+ for (i = 0; i < q->ndesc; i++) {
+ struct airoha_queue_entry *e = &q->entry[i];
+
+ if (!e->dma_addr)
+ continue;
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(e->skb);
+ e->dma_addr = 0;
+ e->skb = NULL;
+ list_add_tail(&e->list, &q->tx_list);
+ q->queued--;
+ }
+ spin_unlock_bh(&q->lock);
+}
+
+static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
+{
+ int size, index, num_desc = HW_DSCP_NUM;
+ struct airoha_eth *eth = qdma->eth;
+ int id = qdma - &eth->qdma[0];
+ u32 status, buf_size;
+ dma_addr_t dma_addr;
+ const char *name;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
+ if (!name)
+ return -ENOMEM;
+
+ buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
+ index = of_property_match_string(eth->dev->of_node,
+ "memory-region-names", name);
+ if (index >= 0) {
+ struct reserved_mem *rmem;
+ struct device_node *np;
+
+ /* Consume reserved memory for hw forwarding buffers queue if
+ * available in the DTS
+ */
+ np = of_parse_phandle(eth->dev->of_node, "memory-region",
+ index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+ dma_addr = rmem->base;
+ /* Compute the number of hw descriptors according to the
+ * reserved memory size and the payload buffer size
+ */
+ num_desc = div_u64(rmem->size, buf_size);
+ } else {
+ size = buf_size * num_desc;
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL))
+ return -ENOMEM;
+ }
+
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+
+ size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ /* QDMA0: 2KB. QDMA1: 1KB */
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
+ HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
+ airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
+ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
+ HW_FWD_DESC_NUM_MASK,
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
+
+ return read_poll_timeout(airoha_qdma_rr, status,
+ !(status & LMGR_INIT_START), USEC_PER_MSEC,
+ 30 * USEC_PER_MSEC, true, qdma,
+ REG_LMGR_INIT_CFG);
+}
+
+static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
+{
+ airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+ airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+
+ airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
+ PSE_BUF_ESTIMATE_EN_MASK);
+
+ airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_EN_MASK |
+ EGRESS_RATE_METER_EQ_RATE_EN_MASK);
+ /* 2047us x 31 = 63.457ms */
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_WINDOW_SZ_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_TIMESLICE_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
+
+ /* ratelimit init */
+ airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
+ /* fast-tick 25us */
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
+ FIELD_PREP(GLB_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
+
+ airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
+ FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
+ EGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
+
+ airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
+ airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
+ INGRESS_TRTCM_MODE_MASK);
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
+ FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
+ INGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
+
+ airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
+ FIELD_PREP(SLA_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
+}
+
+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
+ /* Tx-cpu transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ /* Tx-fwd transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_SRC_MASK, 1) |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ }
+}
+
+static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ /* clear pending irqs */
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
+ /* setup rx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
+ INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
+ INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
+ INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
+ INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ }
+ /* setup tx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
+ TX_COHERENT_HIGH_INT_MASK);
+
+ /* setup irq binding */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ else
+ airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ }
+
+ airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
+ FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
+ GLOBAL_CFG_CPU_TXR_RR_MASK |
+ GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
+ GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
+ GLOBAL_CFG_MULTICAST_EN_MASK |
+ GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
+ GLOBAL_CFG_TX_WB_DONE_MASK |
+ FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
+
+ airoha_qdma_init_qos(qdma);
+
+ /* disable qdma rx delay interrupt */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
+ RX_DELAY_INT_MASK);
+ }
+
+ airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
+ TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+ airoha_qdma_init_qos_stats(qdma);
+
+ return 0;
+}
+
+static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
+{
+ struct airoha_irq_bank *irq_bank = dev_instance;
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
+ u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intr); i++) {
+ intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
+ intr[i] &= irq_bank->irqmask[i];
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
+ }
+
+ if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
+ return IRQ_NONE;
+
+ rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
+ if (rx_intr1) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
+ rx_intr_mask |= rx_intr1;
+ }
+
+ rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
+ if (rx_intr2) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
+ rx_intr_mask |= (rx_intr2 << 16);
+ }
+
+ for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ if (rx_intr_mask & BIT(i))
+ napi_schedule(&qdma->q_rx[i].napi);
+ }
+
+ if (intr[0] & INT_TX_MASK) {
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ if (!(intr[0] & TX_DONE_INT_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(i));
+ napi_schedule(&qdma->q_tx_irq[i].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
+ struct airoha_qdma *qdma)
+{
+ struct airoha_eth *eth = qdma->eth;
+ int i, id = qdma - &eth->qdma[0];
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
+ int err, irq_index = 4 * id + i;
+ const char *name;
+
+ spin_lock_init(&irq_bank->irq_lock);
+ irq_bank->qdma = qdma;
+
+ irq_bank->irq = platform_get_irq(pdev, irq_index);
+ if (irq_bank->irq < 0)
+ return irq_bank->irq;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL,
+ KBUILD_MODNAME ".%d", irq_index);
+ if (!name)
+ return -ENOMEM;
+
+ err = devm_request_irq(eth->dev, irq_bank->irq,
+ airoha_irq_handler, IRQF_SHARED, name,
+ irq_bank);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_init(struct platform_device *pdev,
+ struct airoha_eth *eth,
+ struct airoha_qdma *qdma)
+{
+ int err, id = qdma - &eth->qdma[0];
+ const char *res;
+
+ qdma->eth = eth;
+ res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
+ if (!res)
+ return -ENOMEM;
+
+ qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
+ if (IS_ERR(qdma->regs))
+ return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
+ "failed to iomap qdma%d regs\n", id);
+
+ err = airoha_qdma_init_irq_banks(pdev, qdma);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_rx(qdma);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_tx(qdma);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_hfwd_queues(qdma);
+ if (err)
+ return err;
+
+ return airoha_qdma_hw_init(qdma);
+}
+
+static int airoha_hw_init(struct platform_device *pdev,
+ struct airoha_eth *eth)
+{
+ int err, i;
+
+ /* disable xsi */
+ err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts);
+ if (err)
+ return err;
+
+ err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
+
+ msleep(20);
+ err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
+
+ msleep(20);
+ err = airoha_fe_init(eth);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
+ if (err)
+ return err;
+ }
+
+ err = airoha_ppe_init(eth);
+ if (err)
+ return err;
+
+ set_bit(DEV_STATE_INITIALIZED, &eth->state);
+
+ return 0;
+}
+
+static void airoha_hw_cleanup(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ netif_napi_del(&qdma->q_rx[i].napi);
+ airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
+ if (qdma->q_rx[i].page_pool)
+ page_pool_destroy(qdma->q_rx[i].page_pool);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ netif_napi_del(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
+}
+
+static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_enable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_enable(&qdma->q_rx[i].napi);
+ }
+}
+
+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_disable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&qdma->q_rx[i].napi);
+ }
+}
+
+static void airoha_update_hw_stats(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, i = 0;
+
+ spin_lock(&port->stats.lock);
+ u64_stats_update_begin(&port->stats.syncp);
+
+ /* TX */
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
+ port->stats.tx_ok_pkts += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
+ port->stats.tx_ok_pkts += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
+ port->stats.tx_ok_bytes += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
+ port->stats.tx_ok_bytes += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
+ port->stats.tx_drops += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
+ port->stats.tx_broadcast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
+ port->stats.tx_multicast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
+ port->stats.tx_len[i] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
+ port->stats.tx_len[i++] += val;
+
+ /* RX */
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
+ port->stats.rx_ok_pkts += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
+ port->stats.rx_ok_pkts += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
+ port->stats.rx_ok_bytes += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
+ port->stats.rx_ok_bytes += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
+ port->stats.rx_drops += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
+ port->stats.rx_broadcast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
+ port->stats.rx_multicast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
+ port->stats.rx_errors += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
+ port->stats.rx_crc_error += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
+ port->stats.rx_over_errors += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
+ port->stats.rx_fragment += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
+ port->stats.rx_jabber += val;
+
+ i = 0;
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
+ port->stats.rx_len[i] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
+ port->stats.rx_len[i++] += val;
+
+ /* reset mib counters */
+ airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
+ FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
+
+ u64_stats_update_end(&port->stats.syncp);
+ spin_unlock(&port->stats.lock);
+}
+
+static int airoha_dev_open(struct net_device *dev)
+{
+ int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+
+ netif_tx_start_all_queues(dev);
+ err = airoha_set_vip_for_gdm_port(port, true);
+ if (err)
+ return err;
+
+ if (netdev_uses_dsa(dev))
+ airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+ else
+ airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+
+ airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+
+ airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
+ atomic_inc(&qdma->users);
+
+ return 0;
+}
+
+static int airoha_dev_stop(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+ int i, err;
+
+ netif_tx_disable(dev);
+ err = airoha_set_vip_for_gdm_port(port, false);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
+ netdev_tx_reset_subqueue(dev, i);
+
+ if (atomic_dec_and_test(&qdma->users)) {
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int err;
+
+ err = eth_mac_addr(dev, p);
+ if (err)
+ return err;
+
+ airoha_set_macaddr(port, dev->dev_addr);
+
+ return 0;
+}
+
+static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, pse_port, chan, nbq;
+ int src_port;
+
+ /* Forward the traffic to the proper GDM port */
+ pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
+ : FE_PSE_PORT_GDM4;
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC_MASK);
+
+ /* Enable GDM2 loopback */
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+
+ chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
+ FIELD_PREP(LPBK_CHAN_MASK, chan) |
+ LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
+ LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
+
+ /* Disable VIP and IFC for GDM2 */
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+
+ /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
+ nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
+ src_port = eth->soc->ops.get_src_port_id(port, nbq);
+ if (src_port < 0)
+ return src_port;
+
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, src_port));
+ val = src_port & SP_CPORT_DFT_MASK;
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)),
+ SP_CPORT_MASK(val),
+ FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val)));
+
+ if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth))
+ airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
+ FC_ID_OF_SRC_PORT24_MASK,
+ FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
+
+ return 0;
+}
+
+static int airoha_dev_init(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ u32 pse_port, fe_cpu_port;
+ u8 ppe_id;
+
+ airoha_set_macaddr(port, dev->dev_addr);
+
+ switch (port->id) {
+ case 3:
+ case 4:
+ /* If GDM2 is active we can't enable loopback */
+ if (!eth->ports[1]) {
+ int err;
+
+ err = airhoha_set_gdm2_loopback(port);
+ if (err)
+ return err;
+ }
+ fallthrough;
+ case 2:
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* For PPE2 always use secondary cpu port. */
+ fe_cpu_port = FE_PSE_PORT_CDM2;
+ pse_port = FE_PSE_PORT_PPE2;
+ break;
+ }
+ fallthrough;
+ default: {
+ u8 qdma_id = qdma - &eth->qdma[0];
+
+ /* For PPE1 select cpu port according to the running QDMA. */
+ fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1;
+ pse_port = FE_PSE_PORT_PPE1;
+ break;
+ }
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
+ ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0;
+ airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id),
+ DFT_CPORT_MASK(port->id),
+ fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id)));
+
+ return 0;
+}
+
+static void airoha_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ unsigned int start;
+
+ airoha_update_hw_stats(port);
+ do {
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ storage->rx_packets = port->stats.rx_ok_pkts;
+ storage->tx_packets = port->stats.tx_ok_pkts;
+ storage->rx_bytes = port->stats.rx_ok_bytes;
+ storage->tx_bytes = port->stats.tx_ok_bytes;
+ storage->multicast = port->stats.rx_multicast;
+ storage->rx_errors = port->stats.rx_errors;
+ storage->rx_dropped = port->stats.rx_drops;
+ storage->tx_dropped = port->stats.tx_drops;
+ storage->rx_crc_errors = port->stats.rx_crc_error;
+ storage->rx_over_errors = port->stats.rx_over_errors;
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
+
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
+ GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+ WRITE_ONCE(dev->mtu, mtu);
+
+ return 0;
+}
+
+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int queue, channel;
+
+ /* For dsa device select QoS channel according to the dsa user port
+ * index, rely on port id otherwise. Select QoS queue based on the
+ * skb priority.
+ */
+ channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+ queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
+ queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
+
+ return queue < dev->num_tx_queues ? queue : 0;
+}
+
+static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct ethhdr *ehdr;
+ u8 xmit_tpid;
+ u16 tag;
+
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+ if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
+ return 0;
+
+ ehdr = (struct ethhdr *)skb->data;
+ tag = be16_to_cpu(ehdr->h_proto);
+ xmit_tpid = tag >> 8;
+
+ switch (xmit_tpid) {
+ case MTK_HDR_XMIT_TAGGED_TPID_8100:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
+ break;
+ case MTK_HDR_XMIT_TAGGED_TPID_88A8:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
+ break;
+ default:
+ /* PPE module requires untagged DSA packets to work properly,
+ * so move DSA tag to DMA descriptor.
+ */
+ memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, MTK_HDR_LEN);
+ break;
+ }
+
+ return tag;
+#else
+ return 0;
+#endif
+}
+
+static int airoha_get_fe_port(struct airoha_gdm_port *port)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ struct airoha_eth *eth = qdma->eth;
+
+ switch (eth->soc->version) {
+ case 0x7583:
+ return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
+ : port->id;
+ case 0x7581:
+ default:
+ return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4
+ : port->id;
+ }
+}
+
+static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+ u32 nr_frags, tag, msg0, msg1, len;
+ struct airoha_queue_entry *e;
+ struct netdev_queue *txq;
+ struct airoha_queue *q;
+ LIST_HEAD(tx_list);
+ void *data;
+ int i, qid;
+ u16 index;
+ u8 fport;
+
+ qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ tag = airoha_get_dsa_tag(skb, dev);
+
+ msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
+ qid / AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
+ qid % AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
+ FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
+ FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
+
+ /* TSO: fill MSS info in tcp checksum field */
+ if (skb_is_gso(skb)) {
+ if (skb_cow_head(skb, 0))
+ goto error;
+
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
+ SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
+
+ tcp_hdr(skb)->check = (__force __sum16)csum;
+ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
+ }
+ }
+
+ fport = airoha_get_fe_port(port);
+ msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
+ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
+
+ q = &qdma->q_tx[qid];
+ if (WARN_ON_ONCE(!q->ndesc))
+ goto error;
+
+ spin_lock_bh(&q->lock);
+
+ txq = netdev_get_tx_queue(dev, qid);
+ nr_frags = 1 + skb_shinfo(skb)->nr_frags;
+
+ if (q->queued + nr_frags >= q->ndesc) {
+ /* not enough space in the queue */
+ netif_tx_stop_queue(txq);
+ spin_unlock_bh(&q->lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ len = skb_headlen(skb);
+ data = skb->data;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
+
+ for (i = 0; i < nr_frags; i++) {
+ struct airoha_qdma_desc *desc = &q->desc[index];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t addr;
+ u32 val;
+
+ addr = dma_map_single(dev->dev.parent, data, len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
+ goto error_unmap;
+
+ list_move_tail(&e->list, &tx_list);
+ e->skb = i ? NULL : skb;
+ e->dma_addr = addr;
+ e->dma_len = len;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
+ if (i < nr_frags - 1)
+ val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
+ WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
+ WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
+
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+ }
+ q->queued += i;
+
+ skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(txq, skb->len);
+
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
+ TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+
+ if (q->ndesc - q->queued < q->free_thr)
+ netif_tx_stop_queue(txq);
+
+ spin_unlock_bh(&q->lock);
+
+ return NETDEV_TX_OK;
+
+error_unmap:
+ while (!list_empty(&tx_list)) {
+ e = list_first_entry(&tx_list, struct airoha_queue_entry,
+ list);
+ dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ e->dma_addr = 0;
+ list_move_tail(&e->list, &q->tx_list);
+ }
+
+ spin_unlock_bh(&q->lock);
+error:
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static void airoha_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+
+ strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
+}
+
+static void airoha_ethtool_get_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ unsigned int start;
+
+ airoha_update_hw_stats(port);
+ do {
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->FramesTransmittedOK = port->stats.tx_ok_pkts;
+ stats->OctetsTransmittedOK = port->stats.tx_ok_bytes;
+ stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
+ stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
+ stats->FramesReceivedOK = port->stats.rx_ok_pkts;
+ stats->OctetsReceivedOK = port->stats.rx_ok_bytes;
+ stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {},
+};
+
+static void
+airoha_ethtool_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_hw_stats *hw_stats = &port->stats;
+ unsigned int start;
+
+ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
+ ARRAY_SIZE(hw_stats->tx_len) + 1);
+ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
+ ARRAY_SIZE(hw_stats->rx_len) + 1);
+
+ *ranges = airoha_ethtool_rmon_ranges;
+ airoha_update_hw_stats(port);
+ do {
+ int i;
+
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->fragments = hw_stats->rx_fragment;
+ stats->jabbers = hw_stats->rx_jabber;
+ for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
+ i++) {
+ stats->hist[i] = hw_stats->rx_len[i];
+ stats->hist_tx[i] = hw_stats->tx_len[i];
+ }
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
+ int channel, enum tx_sched_mode mode,
+ const u16 *weights, u8 n_weights)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_TX_RING; i++)
+ airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
+ TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
+
+ for (i = 0; i < n_weights; i++) {
+ u32 status;
+ int err;
+
+ airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
+ TWRR_RW_CMD_MASK |
+ FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
+ FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
+ FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
+ err = read_poll_timeout(airoha_qdma_rr, status,
+ status & TWRR_RW_CMD_DONE,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC,
+ true, port->qdma,
+ REG_TXWRR_WEIGHT_CFG);
+ if (err)
+ return err;
+ }
+
+ airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
+ CHAN_QOS_MODE_MASK(channel),
+ mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
+
+ return 0;
+}
+
+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
+ int channel)
+{
+ static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+ enum tx_sched_mode mode = TC_SCH_SP;
+ u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+ int i, nstrict = 0;
+
+ if (p->bands > AIROHA_NUM_QOS_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < p->bands; i++) {
+ if (!p->quanta[i])
+ nstrict++;
+ }
+
+ /* this configuration is not supported by the hw */
+ if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
+ return -EINVAL;
+
+ /* EN7581 SoC supports fixed QoS band priority where WRR queues have
+ * lowest priorities with respect to SP ones.
+ * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
+ */
+ for (i = 0; i < nstrict; i++) {
+ if (p->priomap[p->bands - i - 1] != i)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < p->bands - nstrict; i++) {
+ if (p->priomap[i] != nstrict + i)
+ return -EINVAL;
+
+ w[i] = p->weights[nstrict + i];
+ }
+
+ if (!nstrict)
+ mode = TC_SCH_WRR8;
+ else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
+ mode = nstrict + 1;
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL(channel << 1));
+ u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL((channel << 1) + 1));
+ u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
+ (fwd_tx_packets - port->fwd_tx_packets);
+ _bstats_update(opt->stats.bstats, 0, tx_packets);
+
+ port->cpu_tx_packets = cpu_tx_packets;
+ port->fwd_tx_packets = fwd_tx_packets;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
+ struct tc_ets_qopt_offload *opt)
+{
+ int channel;
+
+ if (opt->parent == TC_H_ROOT)
+ return -EINVAL;
+
+ channel = TC_H_MAJ(opt->handle) >> 16;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+
+ switch (opt->command) {
+ case TC_ETS_REPLACE:
+ return airoha_qdma_set_tx_ets_sched(port, channel, opt);
+ case TC_ETS_DESTROY:
+ /* PRIO is default qdisc scheduler */
+ return airoha_qdma_set_tx_prio_sched(port, channel);
+ case TC_ETS_STATS:
+ return airoha_qdma_get_tx_ets_stats(port, channel, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
+ REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 val)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 config = RATE_LIMIT_PARAM_RW_MASK |
+ FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, bool enable, u32 enable_mask)
+{
+ u32 val;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ &val, NULL);
+ if (err)
+ return err;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ val);
+}
+
+static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
+ int queue_id, u32 rate_val,
+ u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_MISC_MODE, &config, NULL);
+ if (err)
+ return err;
+
+ val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_TOKEN_RATE_MODE, rate);
+ if (err)
+ return err;
+
+ val = bucket_size;
+ if (!(config & TRTCM_PKT_MODE))
+ val = max_t(u32, val, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_BUCKETSIZE_SHIFT_MODE, val);
+}
+
+static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
+ bool enable, enum trtcm_unit_type unit)
+{
+ bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
+ enum trtcm_param mode = TRTCM_METER_MODE;
+ int err;
+
+ mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
+ err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ enable, mode);
+ if (err)
+ return err;
+
+ return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ tick_sel, TRTCM_TICK_SEL);
+}
+
+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode, u32 val)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 config = TRTCM_PARAM_RW_MASK |
+ FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_mode_type mode,
+ bool enable, u32 enable_mask)
+{
+ u32 val;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &val, NULL))
+ return -EINVAL;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
+ int channel, u32 addr,
+ enum trtcm_mode_type mode,
+ u32 rate_val, u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &config, NULL))
+ return -EINVAL;
+
+ val = airoha_qdma_rr(qdma, addr);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_TOKEN_RATE_MODE, mode, rate);
+ if (err)
+ return err;
+
+ val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
+ int channel, u32 rate,
+ u32 bucket_size)
+{
+ int i, err;
+
+ for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
+ err = airoha_qdma_set_trtcm_config(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG, i,
+ !!rate, TRTCM_METER_MODE);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG,
+ i, rate, bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+ u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
+ struct net_device *dev = port->dev;
+ int num_tx_queues = dev->real_num_tx_queues;
+ int err;
+
+ if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
+ return -EINVAL;
+ }
+
+ err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed configuring htb offload");
+ return err;
+ }
+
+ if (opt->command == TC_HTB_NODE_MODIFY)
+ return 0;
+
+ err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
+ if (err) {
+ airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed setting real_num_tx_queues");
+ return err;
+ }
+
+ set_bit(channel, port->qos_sq_bmap);
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
+ u32 rate, u32 bucket_size,
+ enum trtcm_unit_type unit_type)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
+ bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
+{
+ const struct flow_action *actions = &f->rule->action;
+ const struct flow_action_entry *act;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "filter run with no actions");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &actions->entries[0];
+ if (act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid exceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid notexceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(actions, act)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "action accept must be last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps || act->police.avrate ||
+ act->police.overhead || act->police.mtu) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "peakrate/avrate/overhead/mtu unsupported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *f)
+{
+ enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 rate = 0, bucket_size = 0;
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE: {
+ const struct flow_action_entry *act;
+ int err;
+
+ err = airoha_tc_matchall_act_validate(f);
+ if (err)
+ return err;
+
+ act = &f->rule->action.entries[0];
+ if (act->police.rate_pkt_ps) {
+ rate = act->police.rate_pkt_ps;
+ bucket_size = act->police.burst_pkt;
+ unit_type = TRTCM_PACKET_UNIT;
+ } else {
+ rate = div_u64(act->police.rate_bytes_ps, 1000);
+ rate = rate << 3; /* Kbps */
+ bucket_size = act->police.burst;
+ }
+ fallthrough;
+ }
+ case TC_CLSMATCHALL_DESTROY:
+ return airoha_qdma_set_rx_meter(port, rate, bucket_size,
+ unit_type);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return airoha_ppe_setup_tc_block_cb(&eth->ppe->dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return airoha_dev_tc_matchall(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &block_cb_list;
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
+{
+ struct net_device *dev = port->dev;
+
+ netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
+ airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
+ clear_bit(queue, port->qos_sq_bmap);
+}
+
+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ airoha_tc_remove_htb_queue(port, channel);
+
+ return 0;
+}
+
+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
+{
+ int q;
+
+ for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
+ airoha_tc_remove_htb_queue(port, q);
+
+ return 0;
+}
+
+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_HTB_CREATE:
+ break;
+ case TC_HTB_DESTROY:
+ return airoha_tc_htb_destroy(port);
+ case TC_HTB_NODE_MODIFY:
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ return airoha_tc_htb_alloc_leaf_queue(port, opt);
+ case TC_HTB_LEAF_DEL:
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return airoha_tc_htb_delete_leaf_queue(port, opt);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ return airoha_tc_get_htb_get_leaf_queue(port, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_ETS:
+ return airoha_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_QDISC_HTB:
+ return airoha_tc_setup_qdisc_htb(port, type_data);
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return airoha_dev_setup_tc_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops airoha_netdev_ops = {
+ .ndo_init = airoha_dev_init,
+ .ndo_open = airoha_dev_open,
+ .ndo_stop = airoha_dev_stop,
+ .ndo_change_mtu = airoha_dev_change_mtu,
+ .ndo_select_queue = airoha_dev_select_queue,
+ .ndo_start_xmit = airoha_dev_xmit,
+ .ndo_get_stats64 = airoha_dev_get_stats64,
+ .ndo_set_mac_address = airoha_dev_set_macaddr,
+ .ndo_setup_tc = airoha_dev_tc_setup,
+};
+
+static const struct ethtool_ops airoha_ethtool_ops = {
+ .get_drvinfo = airoha_ethtool_get_drvinfo,
+ .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
+ .get_rmon_stats = airoha_ethtool_get_rmon_stats,
+ .get_link = ethtool_op_get_link,
+};
+
+static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ struct metadata_dst *md_dst;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ port->dsa_meta[i] = md_dst;
+ }
+
+ return 0;
+}
+
+static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ if (!port->dsa_meta[i])
+ continue;
+
+ metadata_dst_free(port->dsa_meta[i]);
+ }
+}
+
+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ if (eth->ports[i] == port)
+ return true;
+ }
+
+ return false;
+}
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth,
+ struct device_node *np, int index)
+{
+ const __be32 *id_ptr = of_get_property(np, "reg", NULL);
+ struct airoha_gdm_port *port;
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int err, p;
+ u32 id;
+
+ if (!id_ptr) {
+ dev_err(eth->dev, "missing gdm port id\n");
+ return -EINVAL;
+ }
+
+ id = be32_to_cpup(id_ptr);
+ p = id - 1;
+
+ if (!id || id > ARRAY_SIZE(eth->ports)) {
+ dev_err(eth->dev, "invalid gdm port id: %d\n", id);
+ return -EINVAL;
+ }
+
+ if (eth->ports[p]) {
+ dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
+ return -EINVAL;
+ }
+
+ dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
+ AIROHA_NUM_NETDEV_TX_RINGS,
+ AIROHA_NUM_RX_RING);
+ if (!dev) {
+ dev_err(eth->dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+
+ qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
+ dev->netdev_ops = &airoha_netdev_ops;
+ dev->ethtool_ops = &airoha_ethtool_ops;
+ dev->max_mtu = AIROHA_MAX_MTU;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_TC;
+ dev->features |= dev->hw_features;
+ dev->vlan_features = dev->hw_features;
+ dev->dev.of_node = np;
+ dev->irq = qdma->irq_banks[0].irq;
+ SET_NETDEV_DEV(dev, eth->dev);
+
+ /* reserve hw queues for HTB offloading */
+ err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
+ if (err)
+ return err;
+
+ err = of_get_ethdev_address(np, dev);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ eth_hw_addr_random(dev);
+ dev_info(eth->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ port = netdev_priv(dev);
+ u64_stats_init(&port->stats.syncp);
+ spin_lock_init(&port->stats.lock);
+ port->qdma = qdma;
+ port->dev = dev;
+ port->id = id;
+ eth->ports[p] = port;
+
+ err = airoha_metadata_dst_alloc(port);
+ if (err)
+ return err;
+
+ err = register_netdev(dev);
+ if (err)
+ goto free_metadata_dst;
+
+ return 0;
+
+free_metadata_dst:
+ airoha_metadata_dst_free(port);
+ return err;
+}
+
+static int airoha_probe(struct platform_device *pdev)
+{
+ struct reset_control_bulk_data *xsi_rsts;
+ struct device_node *np;
+ struct airoha_eth *eth;
+ int i, err;
+
+ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+ if (!eth)
+ return -ENOMEM;
+
+ eth->soc = of_device_get_match_data(&pdev->dev);
+ if (!eth->soc)
+ return -EINVAL;
+
+ eth->dev = &pdev->dev;
+
+ err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(eth->dev, "failed configuring DMA mask\n");
+ return err;
+ }
+
+ eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
+ if (IS_ERR(eth->fe_regs))
+ return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
+ "failed to iomap fe regs\n");
+
+ eth->rsts[0].id = "fe";
+ eth->rsts[1].id = "pdma";
+ eth->rsts[2].id = "qdma";
+ err = devm_reset_control_bulk_get_exclusive(eth->dev,
+ ARRAY_SIZE(eth->rsts),
+ eth->rsts);
+ if (err) {
+ dev_err(eth->dev, "failed to get bulk reset lines\n");
+ return err;
+ }
+
+ xsi_rsts = devm_kcalloc(eth->dev,
+ eth->soc->num_xsi_rsts, sizeof(*xsi_rsts),
+ GFP_KERNEL);
+ if (!xsi_rsts)
+ return -ENOMEM;
+
+ eth->xsi_rsts = xsi_rsts;
+ for (i = 0; i < eth->soc->num_xsi_rsts; i++)
+ eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i];
+
+ err = devm_reset_control_bulk_get_exclusive(eth->dev,
+ eth->soc->num_xsi_rsts,
+ eth->xsi_rsts);
+ if (err) {
+ dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
+ return err;
+ }
+
+ eth->napi_dev = alloc_netdev_dummy(0);
+ if (!eth->napi_dev)
+ return -ENOMEM;
+
+ /* Enable threaded NAPI by default */
+ eth->napi_dev->threaded = true;
+ strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
+ platform_set_drvdata(pdev, eth);
+
+ err = airoha_hw_init(pdev, eth);
+ if (err)
+ goto error_hw_cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_start_napi(&eth->qdma[i]);
+
+ i = 0;
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_device_is_compatible(np, "airoha,eth-mac"))
+ continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
+ err = airoha_alloc_gdm_port(eth, np, i++);
+ if (err) {
+ of_node_put(np);
+ goto error_napi_stop;
+ }
+ }
+
+ return 0;
+
+error_napi_stop:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+ airoha_ppe_deinit(eth);
+error_hw_cleanup:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_hw_cleanup(&eth->qdma[i]);
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (port && port->dev->reg_state == NETREG_REGISTERED) {
+ unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
+ }
+ free_netdev(eth->napi_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void airoha_remove(struct platform_device *pdev)
+{
+ struct airoha_eth *eth = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+ airoha_hw_cleanup(&eth->qdma[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (!port)
+ continue;
+
+ airoha_dev_stop(port->dev);
+ unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
+ free_netdev(eth->napi_dev);
+
+ airoha_ppe_deinit(eth);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const char * const en7581_xsi_rsts_names[] = {
+ "xsi-mac",
+ "hsi0-mac",
+ "hsi1-mac",
+ "hsi-mac",
+ "xfp-mac",
+};
+
+static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
+{
+ switch (port->id) {
+ case 3:
+ /* 7581 SoC supports PCIe serdes on GDM3 port */
+ if (nbq == 4)
+ return HSGMII_LAN_7581_PCIE0_SRCPORT;
+ if (nbq == 5)
+ return HSGMII_LAN_7581_PCIE1_SRCPORT;
+ break;
+ case 4:
+ /* 7581 SoC supports eth and usb serdes on GDM4 port */
+ if (!nbq)
+ return HSGMII_LAN_7581_ETH_SRCPORT;
+ if (nbq == 1)
+ return HSGMII_LAN_7581_USB_SRCPORT;
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const char * const an7583_xsi_rsts_names[] = {
+ "xsi-mac",
+ "hsi0-mac",
+ "hsi1-mac",
+ "xfp-mac",
+};
+
+static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
+{
+ switch (port->id) {
+ case 3:
+ /* 7583 SoC supports eth serdes on GDM3 port */
+ if (!nbq)
+ return HSGMII_LAN_7583_ETH_SRCPORT;
+ break;
+ case 4:
+ /* 7583 SoC supports PCIe and USB serdes on GDM4 port */
+ if (!nbq)
+ return HSGMII_LAN_7583_PCIE_SRCPORT;
+ if (nbq == 1)
+ return HSGMII_LAN_7583_USB_SRCPORT;
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct airoha_eth_soc_data en7581_soc_data = {
+ .version = 0x7581,
+ .xsi_rsts_names = en7581_xsi_rsts_names,
+ .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names),
+ .num_ppe = 2,
+ .ops = {
+ .get_src_port_id = airoha_en7581_get_src_port_id,
+ },
+};
+
+static const struct airoha_eth_soc_data an7583_soc_data = {
+ .version = 0x7583,
+ .xsi_rsts_names = an7583_xsi_rsts_names,
+ .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names),
+ .num_ppe = 1,
+ .ops = {
+ .get_src_port_id = airoha_an7583_get_src_port_id,
+ },
+};
+
+static const struct of_device_id of_airoha_match[] = {
+ { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data },
+ { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_match);
+
+static struct platform_driver airoha_driver = {
+ .probe = airoha_probe,
+ .remove = airoha_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_airoha_match,
+ },
+};
+module_platform_driver(airoha_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
new file mode 100644
index 000000000000..fbbc58133364
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -0,0 +1,673 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_ETH_H
+#define AIROHA_ETH_H
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <linux/soc/airoha/airoha_offload.h>
+#include <net/dsa.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 4
+#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_NUM_IRQ_BANKS 4
+#define AIROHA_MAX_DSA_PORTS 7
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
+#define PPE_SRAM_NUM_ENTRIES (8 * 1024)
+#define PPE_STATS_NUM_ENTRIES (4 * 1024)
+#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
+#define PPE_ENTRY_SIZE 80
+#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
+
+#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ HSGMII_LAN_7581_PCIE0_SRCPORT = 0x16,
+ HSGMII_LAN_7581_PCIE1_SRCPORT,
+ HSGMII_LAN_7581_ETH_SRCPORT,
+ HSGMII_LAN_7581_USB_SRCPORT,
+};
+
+enum {
+ HSGMII_LAN_7583_ETH_SRCPORT = 0x16,
+ HSGMII_LAN_7583_PCIE_SRCPORT = 0x18,
+ HSGMII_LAN_7583_USB_SRCPORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum airoha_gdm_index {
+ AIROHA_GDM1_IDX = 1,
+ AIROHA_GDM2_IDX = 2,
+ AIROHA_GDM3_IDX = 3,
+ AIROHA_GDM4_IDX = 4,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_unit_type {
+ TRTCM_BYTE_UNIT,
+ TRTCM_PACKET_UNIT,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct {
+ struct list_head list;
+ struct sk_buff *skb;
+ };
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+ struct sk_buff *skb;
+
+ struct list_head tx_list;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_qdma *qdma;
+
+ struct napi_struct napi;
+
+ int size;
+ u32 *q;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+enum {
+ AIROHA_FOE_STATE_INVALID,
+ AIROHA_FOE_STATE_UNBIND,
+ AIROHA_FOE_STATE_BIND,
+ AIROHA_FOE_STATE_FIN
+};
+
+enum {
+ PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ PPE_PKT_TYPE_BRIDGE = 2,
+ PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
+#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+
+#define AIROHA_FOE_MAC_WDMA_QOS GENMASK(15, 12)
+#define AIROHA_FOE_MAC_WDMA_BAND BIT(11)
+#define AIROHA_FOE_MAC_WDMA_WCID GENMASK(10, 0)
+
+struct airoha_foe_mac_info_common {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+};
+
+struct airoha_foe_mac_info {
+ struct airoha_foe_mac_info_common common;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+
+ u32 meter;
+};
+
+#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
+#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+
+#define AIROHA_FOE_IB1_BIND_STATIC BIT(31)
+#define AIROHA_FOE_IB1_BIND_UDP BIT(30)
+#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28)
+#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25)
+#define AIROHA_FOE_IB1_BIND_TTL BIT(24)
+#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22)
+#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20)
+#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16)
+#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+
+#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24)
+#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13)
+#define AIROHA_FOE_IB2_PCP BIT(12)
+#define AIROHA_FOE_IB2_MULTICAST BIT(11)
+#define AIROHA_FOE_IB2_FAST_PATH BIT(10)
+#define AIROHA_FOE_IB2_PSE_QOS BIT(9)
+#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5)
+#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0)
+
+#define AIROHA_FOE_ACTDP GENMASK(31, 24)
+#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16)
+#define AIROHA_FOE_CHANNEL GENMASK(15, 11)
+#define AIROHA_FOE_QID GENMASK(10, 8)
+#define AIROHA_FOE_DPI BIT(7)
+#define AIROHA_FOE_TUNNEL BIT(6)
+#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+
+#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16)
+#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9)
+#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5)
+#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0)
+
+struct airoha_foe_bridge {
+ u32 dest_mac_hi;
+
+ u16 src_mac_hi;
+ u16 dest_mac_lo;
+
+ u32 src_mac_lo;
+
+ u32 ib2;
+
+ u32 rsv[5];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct airoha_foe_ipv4 {
+ struct airoha_foe_ipv4_tuple orig_tuple;
+
+ u32 ib2;
+
+ struct airoha_foe_ipv4_tuple new_tuple;
+
+ u32 rsv[2];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_dslite {
+ struct airoha_foe_ipv4_tuple ip4;
+
+ u32 ib2;
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 rsv[4];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 pad[3];
+ };
+ u32 ports;
+ };
+
+ u32 data;
+
+ u32 ib2;
+
+ struct airoha_foe_mac_info_common l2;
+
+ u32 meter;
+};
+
+struct airoha_foe_entry {
+ union {
+ struct {
+ u32 ib1;
+ union {
+ struct airoha_foe_bridge bridge;
+ struct airoha_foe_ipv4 ipv4;
+ struct airoha_foe_ipv4_dslite dslite;
+ struct airoha_foe_ipv6 ipv6;
+ DECLARE_FLEX_ARRAY(u32, d);
+ };
+ };
+ u8 data[PPE_ENTRY_SIZE];
+ };
+};
+
+struct airoha_foe_stats {
+ u32 bytes;
+ u32 packets;
+};
+
+struct airoha_foe_stats64 {
+ u64 bytes;
+ u64 packets;
+};
+
+struct airoha_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ struct {
+ struct {
+ u16 id;
+ __be16 proto;
+ } hdr[2];
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+enum airoha_flow_entry_type {
+ FLOW_TYPE_L4,
+ FLOW_TYPE_L2,
+ FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct airoha_flow_table_entry {
+ union {
+ struct hlist_node list; /* PPE L3 flow entry */
+ struct {
+ struct rhash_head l2_node; /* L2 flow entry */
+ struct hlist_head l2_flows; /* PPE L2 subflows list */
+ };
+ };
+
+ struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
+ u32 hash;
+
+ struct airoha_foe_stats64 stats;
+ enum airoha_flow_entry_type type;
+
+ struct rhash_head node;
+ unsigned long cookie;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct airoha_foe_entry data;
+};
+
+struct airoha_wdma_info {
+ u8 idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+};
+
+/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
+#define RX_IRQ0_BANK_PIN_MASK 0x839f
+#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
+#define RX_IRQ2_BANK_PIN_MASK 0x20
+#define RX_IRQ3_BANK_PIN_MASK 0x40
+#define RX_IRQ_BANK_PIN_MASK(_n) \
+ (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
+ ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
+ ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
+ RX_IRQ0_BANK_PIN_MASK)
+
+struct airoha_irq_bank {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ atomic_t users;
+
+ struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+};
+
+struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int id;
+
+ struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
+
+ struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
+};
+
+#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16)
+#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
+
+struct airoha_ppe {
+ struct airoha_ppe_dev dev;
+ struct airoha_eth *eth;
+
+ void *foe;
+ dma_addr_t foe_dma;
+
+ struct rhashtable l2_flows;
+
+ struct hlist_head *foe_flow;
+ u16 *foe_check_time;
+
+ struct airoha_foe_stats *foe_stats;
+ dma_addr_t foe_stats_dma;
+
+ struct dentry *debugfs_dir;
+};
+
+struct airoha_eth_soc_data {
+ u16 version;
+ const char * const *xsi_rsts_names;
+ int num_xsi_rsts;
+ int num_ppe;
+ struct {
+ int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq);
+ } ops;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ const struct airoha_eth_soc_data *soc;
+
+ unsigned long state;
+ void __iomem *fe_regs;
+
+ struct airoha_npu __rcu *npu;
+
+ struct airoha_ppe *ppe;
+ struct rhashtable flow_table;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data *xsi_rsts;
+
+ struct net_device *napi_dev;
+
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+};
+
+u32 airoha_rr(void __iomem *base, u32 offset);
+void airoha_wr(void __iomem *base, u32 offset, u32 val);
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+{
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+
+static inline bool airoha_is_7581(struct airoha_eth *eth)
+{
+ return eth->soc->version == 0x7581;
+}
+
+static inline bool airoha_is_7583(struct airoha_eth *eth)
+{
+ return eth->soc->version == 0x7583;
+}
+
+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port);
+
+bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index);
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan);
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
+int airoha_ppe_init(struct airoha_eth *eth);
+void airoha_ppe_deinit(struct airoha_eth *eth);
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
+u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe);
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats);
+
+#ifdef CONFIG_DEBUG_FS
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+#else
+static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ return 0;
+}
+#endif
+
+#endif /* AIROHA_ETH_H */
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
new file mode 100644
index 000000000000..68b7f9684dc7
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/regmap.h>
+
+#include "airoha_eth.h"
+
+#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_AN7583_FIRMWARE_DATA "airoha/an7583_npu_data.bin"
+#define NPU_AN7583_FIRMWARE_RV32 "airoha/an7583_npu_rv32.bin"
+#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
+#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
+#define NPU_DUMP_SIZE 512
+
+#define REG_NPU_LOCAL_SRAM 0x0
+
+#define NPU_PC_BASE_ADDR 0x305000
+#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100))
+
+#define NPU_CLUSTER_BASE_ADDR 0x306000
+
+#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000)
+#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004)
+#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
+
+#define NPU_MBOX_BASE_ADDR 0x30c000
+
+#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000)
+#define MBOX_INT_STATUS_MASK BIT(8)
+
+#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
+#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
+#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
+#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+
+#define NPU_WLAN_BASE_ADDR 0x30d000
+
+#define REG_IRQ_STATUS (NPU_WLAN_BASE_ADDR + 0x030)
+#define REG_IRQ_RXDONE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 2) + 0x034)
+#define NPU_IRQ_RX_MASK(_n) ((_n) == 1 ? BIT(17) : BIT(16))
+
+#define REG_TX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x080)
+#define REG_TX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x084)
+#define REG_TX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x088)
+#define REG_TX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x08c)
+
+#define REG_RX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x180)
+#define REG_RX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x184)
+#define REG_RX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x188)
+#define REG_RX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x18c)
+
+#define NPU_TIMER_BASE_ADDR 0x310100
+#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
+#define WDT_EN_MASK BIT(25)
+#define WDT_INTR_MASK BIT(21)
+
+enum {
+ NPU_OP_SET = 1,
+ NPU_OP_SET_NO_WAIT,
+ NPU_OP_GET,
+ NPU_OP_GET_NO_WAIT,
+};
+
+enum {
+ NPU_FUNC_WIFI,
+ NPU_FUNC_TUNNEL,
+ NPU_FUNC_NOTIFY,
+ NPU_FUNC_DBA,
+ NPU_FUNC_TR471,
+ NPU_FUNC_PPE,
+};
+
+enum {
+ NPU_MBOX_ERROR,
+ NPU_MBOX_SUCCESS,
+};
+
+enum {
+ PPE_FUNC_SET_WAIT,
+ PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ PPE_FUNC_SET_WAIT_API,
+ PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP,
+};
+
+enum {
+ PPE2_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_VAL,
+ PPE_SRAM_RESET_VAL,
+};
+
+enum {
+ QDMA_WAN_ETHER = 1,
+ QDMA_WAN_PON_XDSL,
+};
+
+struct airoha_npu_fw {
+ const char *name;
+ int max_size;
+};
+
+struct airoha_npu_soc_data {
+ struct airoha_npu_fw fw_rv32;
+ struct airoha_npu_fw fw_data;
+};
+
+#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
+#define MBOX_MSG_STATIC_BUF BIT(5)
+#define MBOX_MSG_STATUS GENMASK(4, 2)
+#define MBOX_MSG_DONE BIT(1)
+#define MBOX_MSG_WAIT_RSP BIT(0)
+
+#define PPE_TYPE_L2B_IPV4 2
+#define PPE_TYPE_L2B_IPV4_IPV6 3
+
+struct ppe_mbox_data {
+ u32 func_type;
+ u32 func_id;
+ union {
+ struct {
+ u8 cds;
+ u8 xpon_hal_api;
+ u8 wan_xsi;
+ u8 ct_joyme4;
+ u8 max_packet;
+ u8 rsv[3];
+ u32 ppe_type;
+ u32 wan_mode;
+ u32 wan_sel;
+ } init_info;
+ struct {
+ u32 func_id;
+ u32 size;
+ u32 data;
+ } set_info;
+ struct {
+ u32 npu_stats_addr;
+ u32 foe_stats_addr;
+ } stats_info;
+ };
+};
+
+struct wlan_mbox_data {
+ u32 ifindex:4;
+ u32 func_type:4;
+ u32 func_id;
+ DECLARE_FLEX_ARRAY(u8, d);
+};
+
+static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
+ void *p, int size)
+{
+ u16 core = 0; /* FIXME */
+ u32 val, offset = core << 4;
+ dma_addr_t dma_addr;
+ int ret;
+
+ dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(npu->dev, dma_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&npu->cores[core].lock);
+
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
+ regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
+ val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
+
+ ret = regmap_read_poll_timeout_atomic(npu->regmap,
+ REG_CR_MBQ0_CTRL(3) + offset,
+ val, (val & MBOX_MSG_DONE),
+ 100, 100 * MSEC_PER_SEC);
+ if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
+ ret = -EINVAL;
+
+ spin_unlock_bh(&npu->cores[core].lock);
+
+ dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int airoha_npu_load_firmware(struct device *dev, void __iomem *addr,
+ const struct airoha_npu_fw *fw_info)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_info->name, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > fw_info->max_size) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ fw_info->name, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy_toio(addr, fw->data, fw->size);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
+ struct resource *res)
+{
+ const struct airoha_npu_soc_data *soc;
+ void __iomem *addr;
+ int ret;
+
+ soc = of_device_get_match_data(dev);
+ if (!soc)
+ return -EINVAL;
+
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
+ /* Load rv32 npu firmware */
+ ret = airoha_npu_load_firmware(dev, addr, &soc->fw_rv32);
+ if (ret)
+ return ret;
+
+ /* Load data npu firmware */
+ return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM,
+ &soc->fw_data);
+}
+
+static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
+{
+ struct airoha_npu *npu = npu_instance;
+
+ /* clear mbox interrupt status */
+ regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
+ MBOX_INT_STATUS_MASK);
+
+ /* acknowledge npu */
+ regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
+ MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static void airoha_npu_wdt_work(struct work_struct *work)
+{
+ struct airoha_npu_core *core;
+ struct airoha_npu *npu;
+ void *dump;
+ u32 val[3];
+ int c;
+
+ core = container_of(work, struct airoha_npu_core, wdt_work);
+ npu = core->npu;
+
+ dump = vzalloc(NPU_DUMP_SIZE);
+ if (!dump)
+ return;
+
+ c = core - &npu->cores[0];
+ regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
+ snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
+ val[0], val[1], val[2]);
+
+ dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
+}
+
+static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
+{
+ struct airoha_npu_core *core = core_instance;
+ struct airoha_npu *npu = core->npu;
+ int c = core - &npu->cores[0];
+ u32 val;
+
+ regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
+ if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
+ FIELD_GET(WDT_EN_MASK, val))
+ schedule_work(&core->wdt_work);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_npu_ppe_init(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
+ ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
+ ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = sram_num_entries;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash, bool ppe2)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = entry_size;
+ ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data->set_info.data = hash;
+ ppe_data->set_info.size = sizeof(u32);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_stats_setup(struct airoha_npu *npu,
+ dma_addr_t foe_stats_addr,
+ u32 num_stats_entries)
+{
+ int err, size = num_stats_entries * sizeof(*npu->stats);
+ struct ppe_mbox_data *ppe_data;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP;
+ ppe_data->stats_info.foe_stats_addr = foe_stats_addr;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ npu->stats = devm_ioremap(npu->dev,
+ ppe_data->stats_info.npu_stats_addr,
+ size);
+ if (!npu->stats)
+ err = -ENOMEM;
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_wlan_msg_send(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_SET;
+ wlan_data->func_id = func_id;
+ memcpy(wlan_data->d, data, data_len);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int airoha_npu_wlan_msg_get(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_GET;
+ wlan_data->func_id = func_id;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ if (!err)
+ memcpy(data, wlan_data->d, data_len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int
+airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
+ int ifindex, const char *name,
+ enum airoha_npu_wlan_set_cmd func_id)
+{
+ struct device *dev = npu->dev;
+ struct resource res;
+ int err;
+ u32 val;
+
+ err = of_reserved_mem_region_to_resource_byname(dev->of_node, name,
+ &res);
+ if (err)
+ return err;
+
+ val = res.start;
+ return airoha_npu_wlan_msg_send(npu, ifindex, func_id, &val,
+ sizeof(val), GFP_KERNEL);
+}
+
+static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
+{
+ enum airoha_npu_wlan_set_cmd cmd = WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU;
+ u32 val = 0;
+ int err;
+
+ err = airoha_npu_wlan_msg_send(npu, 1, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-bufid", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
+ return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+}
+
+static u32 airoha_npu_wlan_queue_addr_get(struct airoha_npu *npu, int qid,
+ bool xmit)
+{
+ if (xmit)
+ return REG_TX_BASE(qid + 2);
+
+ return REG_RX_BASE(qid);
+}
+
+static void airoha_npu_wlan_irq_status_set(struct airoha_npu *npu, u32 val)
+{
+ regmap_write(npu->regmap, REG_IRQ_STATUS, val);
+}
+
+static u32 airoha_npu_wlan_irq_status_get(struct airoha_npu *npu, int q)
+{
+ u32 val;
+
+ regmap_read(npu->regmap, REG_IRQ_STATUS, &val);
+ return val;
+}
+
+static void airoha_npu_wlan_irq_enable(struct airoha_npu *npu, int q)
+{
+ regmap_set_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+static void airoha_npu_wlan_irq_disable(struct airoha_npu *npu, int q)
+{
+ regmap_clear_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_npu *npu;
+
+ np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(np);
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ npu = ERR_PTR(-ENODEV);
+ goto error_pdev_put;
+ }
+
+ npu = platform_get_drvdata(pdev);
+ if (!npu) {
+ npu = ERR_PTR(-ENODEV);
+ goto error_module_put;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ npu = ERR_PTR(-EINVAL);
+ goto error_module_put;
+ }
+
+ return npu;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return npu;
+}
+EXPORT_SYMBOL_GPL(airoha_npu_get);
+
+void airoha_npu_put(struct airoha_npu *npu)
+{
+ module_put(THIS_MODULE);
+ put_device(npu->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_npu_put);
+
+static const struct airoha_npu_soc_data en7581_npu_soc_data = {
+ .fw_rv32 = {
+ .name = NPU_EN7581_FIRMWARE_RV32,
+ .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE,
+ },
+ .fw_data = {
+ .name = NPU_EN7581_FIRMWARE_DATA,
+ .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE,
+ },
+};
+
+static const struct airoha_npu_soc_data an7583_npu_soc_data = {
+ .fw_rv32 = {
+ .name = NPU_AN7583_FIRMWARE_RV32,
+ .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE,
+ },
+ .fw_data = {
+ .name = NPU_AN7583_FIRMWARE_DATA,
+ .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE,
+ },
+};
+
+static const struct of_device_id of_airoha_npu_match[] = {
+ { .compatible = "airoha,en7581-npu", .data = &en7581_npu_soc_data },
+ { .compatible = "airoha,an7583-npu", .data = &an7583_npu_soc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
+
+static const struct regmap_config regmap_config = {
+ .name = "npu",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static int airoha_npu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct airoha_npu *npu;
+ struct resource res;
+ void __iomem *base;
+ int i, irq, err;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
+
+ npu->dev = dev;
+ npu->ops.ppe_init = airoha_npu_ppe_init;
+ npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_init_stats = airoha_npu_ppe_stats_setup;
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+ npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
+ npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
+ npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
+ npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
+ npu->ops.wlan_set_irq_status = airoha_npu_wlan_irq_status_set;
+ npu->ops.wlan_get_irq_status = airoha_npu_wlan_irq_status_get;
+ npu->ops.wlan_enable_irq = airoha_npu_wlan_irq_enable;
+ npu->ops.wlan_disable_irq = airoha_npu_wlan_irq_disable;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(npu->regmap))
+ return PTR_ERR(npu->regmap);
+
+ err = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
+ IRQF_SHARED, "airoha-npu-mbox", npu);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
+ struct airoha_npu_core *core = &npu->cores[i];
+
+ spin_lock_init(&core->lock);
+ core->npu = npu;
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
+ IRQF_SHARED, "airoha-npu-wdt", core);
+ if (err)
+ return err;
+
+ INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
+ }
+
+ /* wlan IRQ lines */
+ for (i = 0; i < ARRAY_SIZE(npu->irqs); i++) {
+ irq = platform_get_irq(pdev, i + ARRAY_SIZE(npu->cores) + 1);
+ if (irq < 0)
+ return irq;
+
+ npu->irqs[i] = irq;
+ }
+
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = airoha_npu_run_firmware(dev, base, &res);
+ if (err)
+ return dev_err_probe(dev, err, "failed to run npu firmware\n");
+
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
+ res.start + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
+ msleep(100);
+
+ /* setting booting address */
+ for (i = 0; i < NPU_NUM_CORES; i++)
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), res.start);
+ usleep_range(1000, 2000);
+
+ /* enable NPU cores */
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xff);
+ regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
+ msleep(100);
+
+ platform_set_drvdata(pdev, npu);
+
+ return 0;
+}
+
+static void airoha_npu_remove(struct platform_device *pdev)
+{
+ struct airoha_npu *npu = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
+ cancel_work_sync(&npu->cores[i].wdt_work);
+}
+
+static struct platform_driver airoha_npu_driver = {
+ .probe = airoha_npu_probe,
+ .remove = airoha_npu_remove,
+ .driver = {
+ .name = "airoha-npu",
+ .of_match_table = of_airoha_npu_match,
+ },
+};
+module_platform_driver(airoha_npu_driver);
+
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
+MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_RV32);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
new file mode 100644
index 000000000000..0caabb0c3aa0
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -0,0 +1,1561 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rhashtable.h>
+#include <net/ipv6.h>
+#include <net/pkt_cls.h>
+
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+static DEFINE_MUTEX(flow_offload_mutex);
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params airoha_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params airoha_l2_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
+ .key_len = 2 * ETH_ALEN,
+ .automatic_shrinking = true,
+};
+
+static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe)
+{
+ if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS))
+ return -EOPNOTSUPP;
+
+ if (airoha_is_7583(ppe->eth))
+ return -EOPNOTSUPP;
+
+ return PPE_STATS_NUM_ENTRIES;
+}
+
+static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe)
+{
+ int num_stats = airoha_ppe_get_num_stats_entries(ppe);
+
+ if (num_stats > 0) {
+ struct airoha_eth *eth = ppe->eth;
+
+ num_stats = num_stats * eth->soc->num_ppe;
+ }
+
+ return num_stats;
+}
+
+static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe)
+{
+ struct airoha_eth *eth = ppe->eth;
+
+ return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe;
+}
+
+u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+
+ return sram_num_entries + PPE_DRAM_NUM_ENTRIES;
+}
+
+bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index)
+{
+ if (index >= eth->soc->num_ppe)
+ return false;
+
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK;
+}
+
+static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
+{
+ u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
+
+ return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
+}
+
+static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
+{
+ u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries;
+ u32 sram_tb_size, dram_num_entries;
+ struct airoha_eth *eth = ppe->eth;
+ int i, sram_num_stats_entries;
+
+ sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry);
+ dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
+
+ sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe);
+ if (sram_num_stats_entries > 0)
+ sram_ppe_num_data_entries -= sram_num_stats_entries;
+ sram_ppe_num_data_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries);
+
+ for (i = 0; i < eth->soc->num_ppe; i++) {
+ int p;
+
+ airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
+ ppe->foe_dma + sram_tb_size);
+
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
+ PPE_BIND_AGE0_DELTA_NON_L4 |
+ PPE_BIND_AGE0_DELTA_UDP,
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
+ PPE_BIND_AGE1_DELTA_TCP_FIN |
+ PPE_BIND_AGE1_DELTA_TCP,
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
+ PPE_SRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH1_EN_MASK |
+ PPE_DRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH0_MODE_MASK |
+ PPE_SRAM_HASH1_MODE_MASK |
+ PPE_DRAM_HASH0_MODE_MASK |
+ PPE_DRAM_HASH1_MODE_MASK,
+ FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
+ FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
+ PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK |
+ PPE_TB_CFG_KEEPALIVE_MASK |
+ PPE_TB_ENTRY_SIZE_MASK,
+ FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) |
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_ppe_num_data_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+
+ airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
+
+ for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
+ FP0_EGRESS_MTU_MASK |
+ FP1_EGRESS_MTU_MASK,
+ FIELD_PREP(FP0_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU) |
+ FIELD_PREP(FP1_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU));
+ }
+}
+
+static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ u32 val = be32_to_cpu((__force __be32)act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
+ struct airoha_wdma_info *info)
+{
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
+
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->idx = path->mtk_wdma.wdma_idx;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
+
+ return 0;
+}
+
+static int airoha_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp = dsa_port_from_netdev(*dev);
+
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ *dev = dsa_port_to_conduit(dp);
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
+ struct ethhdr *eh)
+{
+ br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
+ br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
+ br->src_mac_hi = get_unaligned_be16(eh->h_source);
+ br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
+}
+
+static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
+ struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+ struct airoha_flow_data *data,
+ int l4proto)
+{
+ u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
+ int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+ u8 smac_id = 0xf;
+
+ memset(hwe, 0, sizeof(*hwe));
+
+ val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
+ if (dev) {
+ struct airoha_wdma_info info = {};
+
+ if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
+ FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
+ FE_PSE_PORT_CDM4);
+ qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
+ wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
+ info.idx) |
+ FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
+ info.wcid);
+ } else {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (!airoha_is_valid_gdm_port(eth, port))
+ return -EINVAL;
+
+ if (dsa_port >= 0 || eth->ports[1])
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
+ : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2
+ * loopback
+ */
+
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ /* For downlink traffic consume SRAM memory for hw
+ * forwarding descriptors queue.
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
+ dsa_port);
+
+ smac_id = port->id;
+ }
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+ val |= AIROHA_FOE_IB2_MULTICAST;
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == PPE_PKT_TYPE_IPV4_ROUTE)
+ hwe->ipv4.orig_tuple.ports = ports_pad;
+ if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ hwe->ipv6.ports = ports_pad;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
+ hwe->bridge.data = qdata;
+ hwe->bridge.ib2 = val;
+ l2 = &hwe->bridge.l2.common;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ hwe->ipv6.data = qdata;
+ hwe->ipv6.ib2 = val;
+ l2 = &hwe->ipv6.l2;
+ l2->etype = ETH_P_IPV6;
+ } else {
+ hwe->ipv4.data = qdata;
+ hwe->ipv4.ib2 = val;
+ l2 = &hwe->ipv4.l2.common;
+ l2->etype = ETH_P_IP;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
+ if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ struct airoha_foe_mac_info *mac_info;
+
+ l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
+
+ mac_info = (struct airoha_foe_mac_info *)l2;
+ mac_info->pppoe_id = data->pppoe.sid;
+ } else {
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
+ FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
+ data->pppoe.sid);
+ }
+
+ if (data->vlan.num) {
+ l2->vlan1 = data->vlan.hdr[0].id;
+ if (data->vlan.num == 2)
+ l2->vlan2 = data->vlan.hdr[1].id;
+ }
+
+ if (wlan_etype >= 0) {
+ l2->etype = wlan_etype;
+ } else if (dsa_port >= 0) {
+ l2->etype = BIT(dsa_port);
+ l2->etype |= !data->vlan.num ? BIT(15) : 0;
+ } else if (data->pppoe.num) {
+ l2->etype = ETH_P_PPP_SES;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data,
+ bool egress)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ struct airoha_foe_ipv4_tuple *t;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &hwe->ipv4.new_tuple;
+ break;
+ }
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &hwe->ipv4.orig_tuple;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(data->v4.src_addr);
+ t->dest_ip = be32_to_cpu(data->v4.dst_addr);
+
+ if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
+ t->src_port = be16_to_cpu(data->src_port);
+ t->dest_port = be16_to_cpu(data->dst_port);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data)
+
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 *src, *dest;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ hwe->ipv6.src_port = be16_to_cpu(data->src_port);
+ hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = hwe->ipv6.src_ip;
+ dest = hwe->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
+ ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
+
+ return 0;
+}
+
+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
+ u32 hash, hv1, hv2, hv3;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = hwe->ipv4.orig_tuple.ports;
+ hv2 = hwe->ipv4.orig_tuple.dest_ip;
+ hv3 = hwe->ipv4.orig_tuple.src_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
+ hv1 ^= hwe->ipv6.ports;
+
+ hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
+ hv2 ^= hwe->ipv6.dest_ip[0];
+
+ hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
+ hv3 ^= hwe->ipv6.src_ip[0];
+ break;
+ case PPE_PKT_TYPE_BRIDGE: {
+ struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
+
+ hv1 = l2->common.src_mac_hi & 0xffff;
+ hv1 = hv1 << 16 | l2->src_mac_lo;
+
+ hv2 = l2->common.dest_mac_lo;
+ hv2 = hv2 << 16;
+ hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
+
+ hv3 = l2->common.dest_mac_hi;
+ break;
+ }
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return ppe_hash_mask;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash &= ppe_hash_mask;
+
+ return hash;
+}
+
+static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe,
+ u32 hash, u32 *index)
+{
+ int ppe_num_stats_entries;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return ppe_num_stats_entries;
+
+ *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES
+ : hash;
+
+ return 0;
+}
+
+static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ int index)
+{
+ memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
+ memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
+}
+
+static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i, ppe_num_stats_entries;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ for (i = 0; i < ppe_num_stats_entries; i++)
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
+}
+
+static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ struct airoha_foe_entry *hwe,
+ u32 hash)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 index, pse_port, val, *data, *ib2, *meter;
+ int ppe_num_stats_entries;
+ u8 nbq;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
+ return;
+
+ if (index >= ppe_num_stats_entries)
+ return;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ data = &hwe->bridge.data;
+ ib2 = &hwe->bridge.ib2;
+ meter = &hwe->bridge.l2.meter;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = &hwe->ipv6.data;
+ ib2 = &hwe->ipv6.ib2;
+ meter = &hwe->ipv6.meter;
+ } else {
+ data = &hwe->ipv4.data;
+ ib2 = &hwe->ipv4.ib2;
+ meter = &hwe->ipv4.l2.meter;
+ }
+
+ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ if (pse_port == FE_PSE_PORT_CDM4)
+ return;
+
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
+
+ val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
+ *data = (*data & ~AIROHA_FOE_ACTDP) |
+ FIELD_PREP(AIROHA_FOE_ACTDP, val);
+
+ val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
+ *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
+
+ nbq = pse_port == 1 ? 6 : 5;
+ *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS);
+ *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
+ FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
+}
+
+static struct airoha_foe_entry *
+airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+
+ lockdep_assert_held(&ppe_lock);
+
+ if (hash < sram_num_entries) {
+ u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
+ struct airoha_eth *eth = ppe->eth;
+ u32 val;
+ int i;
+
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_REQ_MASK);
+ if (read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, eth,
+ REG_PPE_RAM_CTRL(ppe2)))
+ return NULL;
+
+ for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe);
+ i++)
+ hwe[i] = airoha_fe_rr(eth,
+ REG_PPE_RAM_ENTRY(ppe2, i));
+ }
+
+ return ppe->foe + hash * sizeof(struct airoha_foe_entry);
+}
+
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ spin_unlock_bh(&ppe_lock);
+
+ return hwe;
+}
+
+static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ int len;
+
+ if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
+ return false;
+
+ if (type > PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct airoha_foe_entry, ipv6.data);
+ else
+ len = offsetof(struct airoha_foe_entry, ipv4.ib2);
+
+ return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
+}
+
+static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash)
+{
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
+ u32 *ptr = (u32 *)hwe, val;
+ int i;
+
+ for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++)
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]);
+
+ wmb();
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK);
+
+ return read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, ppe->eth,
+ REG_PPE_RAM_CTRL(ppe2));
+}
+
+static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *e,
+ u32 hash, bool rx_wlan)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+ struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+ int err = 0;
+
+ memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
+ wmb();
+
+ e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
+ hwe->ib1 = e->ib1;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (!npu) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (!rx_wlan)
+ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+
+ if (hash < sram_num_entries)
+ err = airoha_ppe_foe_commit_sram_entry(ppe, hash);
+unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
+ e->hash = 0xffff;
+ }
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ hlist_del_init(&e->l2_subflow_node);
+ kfree(e);
+ }
+}
+
+static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct hlist_head *head = &e->l2_flows;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
+ airoha_ppe_foe_remove_flow(ppe, e);
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2)
+ airoha_ppe_foe_remove_l2_flow(ppe, e);
+ else
+ airoha_ppe_foe_remove_flow(ppe, e);
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e,
+ u32 hash, bool rx_wlan)
+{
+ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
+ struct airoha_foe_entry *hwe_p, hwe;
+ struct airoha_flow_table_entry *f;
+ int type;
+
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ if (!hwe_p)
+ return -EINVAL;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return -ENOMEM;
+
+ hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
+ f->type = FLOW_TYPE_L2_SUBFLOW;
+ f->hash = hash;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
+
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
+ hwe.ipv6.ib2 = e->data.bridge.ib2;
+ /* setting smac_id to 0xf instruct the hw to keep original
+ * source mac address
+ */
+ hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
+ 0xf);
+ } else {
+ memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
+ sizeof(hwe.bridge.l2));
+ hwe.bridge.ib2 = e->data.bridge.ib2;
+ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
+ sizeof(hwe.ipv4.new_tuple));
+ }
+
+ hwe.bridge.data = e->data.bridge.data;
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
+
+ return 0;
+}
+
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
+ struct sk_buff *skb,
+ u32 hash, bool rx_wlan)
+{
+ struct airoha_flow_table_entry *e;
+ struct airoha_foe_bridge br = {};
+ struct airoha_foe_entry *hwe;
+ bool commit_done = false;
+ struct hlist_node *n;
+ u32 index, state;
+
+ spin_lock_bh(&ppe_lock);
+
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ if (!hwe)
+ goto unlock;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state == AIROHA_FOE_STATE_BIND)
+ goto unlock;
+
+ index = airoha_ppe_foe_get_entry_hash(ppe, hwe);
+ hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ e->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, e);
+ }
+ continue;
+ }
+
+ if (!airoha_ppe_foe_compare_entry(e, hwe))
+ continue;
+
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
+ commit_done = true;
+ e->hash = hash;
+ }
+
+ if (commit_done)
+ goto unlock;
+
+ airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
+ e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
+ airoha_l2_flow_table_params);
+ if (e)
+ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_flow_table_entry *prev;
+
+ e->type = FLOW_TYPE_L2;
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ if (!prev)
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &e->l2_node,
+ airoha_l2_flow_table_params);
+}
+
+static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ u32 hash;
+
+ if (type == PPE_PKT_TYPE_BRIDGE)
+ return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
+
+ hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data);
+ e->type = FLOW_TYPE_L4;
+ e->hash = 0xffff;
+
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&e->list, &ppe->foe_flow[hash]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
+{
+ u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
+ int idle;
+
+ if (state == AIROHA_FOE_STATE_BIND) {
+ ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
+ ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ } else {
+ ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
+ now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
+ ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
+ }
+ idle = now - ts;
+
+ return idle < 0 ? idle + ts_mask + 1 : idle;
+}
+
+static void
+airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+ struct airoha_flow_table_entry *iter;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
+ struct airoha_foe_entry *hwe;
+ u32 ib1, state;
+ int idle;
+
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
+ if (!hwe)
+ continue;
+
+ ib1 = READ_ONCE(hwe->ib1);
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ iter->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, iter);
+ continue;
+ }
+
+ idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
+ if (idle >= min_idle)
+ continue;
+
+ min_idle = idle;
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_foe_entry *hwe_p, hwe = {};
+
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2) {
+ airoha_ppe_foe_flow_l2_entry_update(ppe, e);
+ goto unlock;
+ }
+
+ if (e->hash == 0xffff)
+ goto unlock;
+
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
+ if (!hwe_p)
+ goto unlock;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
+ e->hash = 0xffff;
+ goto unlock;
+ }
+
+ e->data.ib1 = hwe.ib1;
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ airoha_ppe_foe_flow_entry_update(ppe, e);
+
+ return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+}
+
+static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct airoha_flow_table_entry *e;
+ struct airoha_flow_data data = {};
+ struct net_device *odev = NULL;
+ struct flow_action_entry *act;
+ struct airoha_foe_entry hwe;
+ int err, i, offload_type;
+ u16 addr_type = 0;
+ u8 l4proto = 0;
+
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params))
+ return -EEXIST;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (addr_type) {
+ case 0:
+ offload_type = PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ airoha_ppe_flow_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 2 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
+ data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1 || data.vlan.num == 2)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
+ &data, l4proto);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = airoha_ppe_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = airoha_ppe_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
+ if (err)
+ return err;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->cookie = f->cookie;
+ memcpy(&e->data, &hwe, sizeof(e->data));
+
+ err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
+ if (err)
+ goto free_entry;
+
+ err = rhashtable_insert_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ if (err < 0)
+ goto remove_foe_entry;
+
+ return 0;
+
+remove_foe_entry:
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+free_entry:
+ kfree(e);
+
+ return err;
+}
+
+static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct airoha_flow_table_entry *e;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+ rhashtable_remove_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ kfree(e);
+
+ return 0;
+}
+
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats)
+{
+ struct airoha_eth *eth = ppe->eth;
+ int ppe_num_stats_entries;
+ struct airoha_npu *npu;
+ u32 index;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
+ return;
+
+ if (index >= ppe_num_stats_entries)
+ return;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ u64 packets = ppe->foe_stats[index].packets;
+ u64 bytes = ppe->foe_stats[index].bytes;
+ struct airoha_foe_stats npu_stats;
+
+ memcpy_fromio(&npu_stats, &npu->stats[index],
+ sizeof(*npu->stats));
+ stats->packets = packets << 32 | npu_stats.packets;
+ stats->bytes = bytes << 32 | npu_stats.bytes;
+ }
+
+ rcu_read_unlock();
+}
+
+static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct airoha_flow_table_entry *e;
+ u32 idle;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ idle = airoha_ppe_entry_idle_time(eth->ppe, e);
+ f->stats.lastused = jiffies - idle * HZ;
+
+ if (e->hash != 0xffff) {
+ struct airoha_foe_stats64 stats = {};
+
+ airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
+ f->stats.pkts += (stats.packets - e->stats.packets);
+ f->stats.bytes += (stats.bytes - e->stats.bytes);
+ e->stats = stats;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return airoha_ppe_flow_offload_replace(eth, f);
+ case FLOW_CLS_DESTROY:
+ return airoha_ppe_flow_offload_destroy(eth, f);
+ case FLOW_CLS_STATS:
+ return airoha_ppe_flow_offload_stats(eth, f);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ struct airoha_foe_entry *hwe = ppe->foe;
+ int i, err = 0;
+
+ for (i = 0; i < sram_num_entries; i++) {
+ int err;
+
+ memset(&hwe[i], 0, sizeof(*hwe));
+ err = airoha_ppe_foe_commit_sram_entry(ppe, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_npu_get(eth->dev);
+
+ if (IS_ERR(npu)) {
+ request_module("airoha-npu");
+ npu = airoha_npu_get(eth->dev);
+ }
+
+ return npu;
+}
+
+static int airoha_ppe_offload_setup(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ struct airoha_ppe *ppe = eth->ppe;
+ int err, ppe_num_stats_entries;
+
+ if (IS_ERR(npu))
+ return PTR_ERR(npu);
+
+ err = npu->ops.ppe_init(npu);
+ if (err)
+ goto error_npu_put;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries > 0) {
+ err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
+ ppe_num_stats_entries);
+ if (err)
+ goto error_npu_put;
+ }
+
+ airoha_ppe_hw_init(ppe);
+ airoha_ppe_foe_flow_stats_reset(ppe, npu);
+
+ rcu_assign_pointer(eth->npu, npu);
+ synchronize_rcu();
+
+ return 0;
+
+error_npu_put:
+ airoha_npu_put(npu);
+
+ return err;
+}
+
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
+ int err = 0;
+
+ mutex_lock(&flow_offload_mutex);
+
+ if (!eth->npu)
+ err = airoha_ppe_offload_setup(eth);
+ if (!err)
+ err = airoha_ppe_flow_offload_cmd(eth, type_data);
+
+ mutex_unlock(&flow_offload_mutex);
+
+ return err;
+}
+
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
+ u16 now, diff;
+
+ if (hash > ppe_hash_mask)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
+}
+
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct net_device *dev = port->dev;
+ const u8 *addr = dev->dev_addr;
+ u32 val;
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+
+ val = (addr[0] << 8) | addr[1];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+}
+
+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_eth *eth;
+
+ np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(np);
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ goto error_pdev_put;
+ }
+
+ eth = platform_get_drvdata(pdev);
+ if (!eth)
+ goto error_module_put;
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ goto error_module_put;
+ }
+
+ return &eth->ppe->dev;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
+
+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
+
+ module_put(THIS_MODULE);
+ put_device(eth->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
+
+int airoha_ppe_init(struct airoha_eth *eth)
+{
+ int foe_size, err, ppe_num_stats_entries;
+ u32 ppe_num_entries;
+ struct airoha_ppe *ppe;
+
+ ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return -ENOMEM;
+
+ ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
+ ppe->dev.ops.check_skb = airoha_ppe_check_skb;
+ ppe->dev.priv = ppe;
+ ppe->eth = eth;
+ eth->ppe = ppe;
+
+ ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
+ foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry);
+ ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
+ GFP_KERNEL);
+ if (!ppe->foe)
+ return -ENOMEM;
+
+ ppe->foe_flow = devm_kzalloc(eth->dev,
+ ppe_num_entries * sizeof(*ppe->foe_flow),
+ GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return -ENOMEM;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries > 0) {
+ foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats);
+ ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
+ &ppe->foe_stats_dma,
+ GFP_KERNEL);
+ if (!ppe->foe_stats)
+ return -ENOMEM;
+ }
+
+ ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries,
+ GFP_KERNEL);
+ if (!ppe->foe_check_time)
+ return -ENOMEM;
+
+ err = airoha_ppe_flush_sram_entries(ppe);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
+ if (err)
+ goto error_flow_table_destroy;
+
+ err = airoha_ppe_debugfs_init(ppe);
+ if (err)
+ goto error_l2_flow_table_destroy;
+
+ return 0;
+
+error_l2_flow_table_destroy:
+ rhashtable_destroy(&ppe->l2_flows);
+error_flow_table_destroy:
+ rhashtable_destroy(&eth->flow_table);
+
+ return err;
+}
+
+void airoha_ppe_deinit(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ npu->ops.ppe_deinit(npu);
+ airoha_npu_put(npu);
+ }
+ rcu_read_unlock();
+
+ rhashtable_destroy(&eth->ppe->l2_flows);
+ rhashtable_destroy(&eth->flow_table);
+ debugfs_remove(eth->ppe->debugfs_dir);
+}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
new file mode 100644
index 000000000000..0112c41150bb
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "airoha_eth.h"
+
+static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
+ void *src_addr, void *dest_addr,
+ u16 *src_port, u16 *dest_port,
+ bool ipv6)
+{
+ __be32 n_addr[IPV6_ADDR_WORDS];
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, src_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", src_addr);
+ }
+ if (src_port)
+ seq_printf(m, ":%d", *src_port);
+
+ seq_puts(m, "->");
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, dest_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", dest_addr);
+ }
+ if (dest_port)
+ seq_printf(m, ":%d", *dest_port);
+}
+
+static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
+ bool bind)
+{
+ static const char *const ppe_type_str[] = {
+ [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [PPE_PKT_TYPE_BRIDGE] = "L2B",
+ [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+ static const char *const ppe_state_str[] = {
+ [AIROHA_FOE_STATE_INVALID] = "INV",
+ [AIROHA_FOE_STATE_UNBIND] = "UNB",
+ [AIROHA_FOE_STATE_BIND] = "BND",
+ [AIROHA_FOE_STATE_FIN] = "FIN",
+ };
+ struct airoha_ppe *ppe = m->private;
+ u32 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
+ int i;
+
+ for (i = 0; i < ppe_num_entries; i++) {
+ const char *state_str, *type_str = "UNKNOWN";
+ void *src_addr = NULL, *dest_addr = NULL;
+ u16 *src_port = NULL, *dest_port = NULL;
+ struct airoha_foe_mac_info_common *l2;
+ unsigned char h_source[ETH_ALEN] = {};
+ struct airoha_foe_stats64 stats = {};
+ unsigned char h_dest[ETH_ALEN];
+ struct airoha_foe_entry *hwe;
+ u32 type, state, ib2, data;
+ bool ipv6 = false;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, i);
+ if (!hwe)
+ continue;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != AIROHA_FOE_STATE_BIND)
+ continue;
+
+ state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
+ type_str = ppe_type_str[type];
+
+ seq_printf(m, "%05x %s %7s", i, state_str, type_str);
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.orig_tuple.src_port;
+ dest_port = &hwe->ipv4.orig_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.orig_tuple.src_ip;
+ dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ src_port = &hwe->ipv6.src_port;
+ dest_port = &hwe->ipv6.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ src_addr = &hwe->ipv6.src_ip;
+ dest_addr = &hwe->ipv6.dest_ip;
+ ipv6 = true;
+ break;
+ default:
+ break;
+ }
+
+ if (src_addr && dest_addr) {
+ seq_puts(m, " orig=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port, ipv6);
+ }
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.new_tuple.src_port;
+ dest_port = &hwe->ipv4.new_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.new_tuple.src_ip;
+ dest_addr = &hwe->ipv4.new_tuple.dest_ip;
+ seq_puts(m, " new=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port,
+ ipv6);
+ break;
+ default:
+ break;
+ }
+
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = hwe->ipv6.data;
+ ib2 = hwe->ipv6.ib2;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ data = hwe->ipv4.data;
+ ib2 = hwe->ipv4.ib2;
+ l2 = &hwe->ipv4.l2.common;
+ *((__be16 *)&h_source[4]) =
+ cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
+ }
+
+ airoha_ppe_foe_entry_get_stats(ppe, i, &stats);
+
+ *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
+ *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
+ h_source, h_dest, l2->etype, data,
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2,
+ stats.packets, stats.bytes);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, false);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
+
+static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, true);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
+
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
+ debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_bind_fops);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
new file mode 100644
index 000000000000..ed4e3407f4a0
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -0,0 +1,923 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_REGS_H
+#define AIROHA_REGS_H
+
+#include <linux/types.h>
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+#define PPE2_BASE 0x1c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define CDM_BASE(_n) \
+ ((_n) == 2 ? CDM2_BASE : CDM1_BASE)
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_FOE_TS 0x0010
+
+#define REG_FE_WAN_PORT 0x0024
+#define WAN1_EN_MASK BIT(16)
+#define WAN1_MASK GENMASK(12, 8)
+#define WAN0_MASK GENMASK(4, 0)
+
+#define REG_FE_WAN_MAC_H 0x0030
+#define REG_FE_LAN_MAC_H 0x0040
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM_VLAN_CTRL(_n) CDM_BASE(_n)
+#define CDM_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM_FWD_CFG(_n) (CDM_BASE(_n) + 0x08)
+#define CDM_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM_CRSN_QSEL(_n, _m) (CDM_BASE(_n) + 0x10 + ((_m) << 2))
+#define CDM_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_PAD_EN_MASK BIT(28)
+#define GDM_DROP_CRC_ERR_MASK BIT(23)
+#define GDM_IP4_CKSUM_MASK BIT(22)
+#define GDM_TCP_CKSUM_MASK BIT(21)
+#define GDM_UDP_CKSUM_MASK BIT(20)
+#define GDM_STRIP_CRC_MASK BIT(16)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c)
+#define LPBK_GAP_MASK GENMASK(31, 24)
+#define LPBK_LEN_MASK GENMASK(23, 10)
+#define LPBK_CHAN_MASK GENMASK(8, 4)
+#define LPBK_MODE_MASK GENMASK(3, 1)
+#define LBK_GAP_MODE_MASK BIT(3)
+#define LBK_LEN_MODE_MASK BIT(2)
+#define LBK_CHAN_MODE_MASK BIT(1)
+#define LPBK_EN_MASK BIT(0)
+
+#define REG_GDM_CHN_RLS(_n) (GDM_BASE(_n) + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
+#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM_MIB_CFG(_n) (GDM_BASE(_n) + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_GDM_SRC_PORT_SET(_n) (GDM_BASE(_n) + 0x23c)
+#define GDM_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
+#define PPE_GLO_CFG_BUSY_MASK BIT(31)
+#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
+#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6)
+#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5)
+#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4)
+#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3)
+#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2)
+#define PPE_GLO_CFG_EN_MASK BIT(0)
+
+#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
+#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20)
+#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19)
+#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
+#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17)
+#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16)
+#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14)
+#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13)
+#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12)
+#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10)
+#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9)
+#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8)
+#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7)
+#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
+
+#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(31, 16)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(15, 0)
+
+#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
+#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
+#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12)
+#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11)
+#define PPE_TB_CFG_AGE_UDP_MASK BIT(10)
+#define PPE_TB_CFG_AGE_TCP_MASK BIT(9)
+#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8)
+#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7)
+#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6)
+#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4)
+#define PPE_TB_ENTRY_SIZE_MASK BIT(3)
+#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0)
+
+#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
+
+#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
+#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16)
+#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0)
+
+#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
+#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16)
+#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0)
+
+#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
+#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16)
+#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0)
+
+#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
+#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
+#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16)
+#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0)
+
+#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
+#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
+#define PPE_HASH_SEED 0x12345678
+
+#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+#define DFT_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
+
+#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
+
+#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
+#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28)
+#define PPE_DRAM_HASH1_EN_MASK BIT(24)
+#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20)
+#define PPE_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12)
+#define PPE_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4)
+#define PPE_SRAM_TABLE_EN_MASK BIT(0)
+
+#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
+#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
+#define FP1_EGRESS_MTU_MASK GENMASK(29, 16)
+#define FP0_EGRESS_MTU_MASK GENMASK(13, 0)
+
+#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
+#define PPE_SRAM_CTRL_ACK_MASK BIT(31)
+#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30)
+#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8)
+#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2)
+#define PPE_SRAM_CTRL_WR_MASK BIT(1)
+#define PPE_SRAM_CTRL_REQ_MASK BIT(0)
+
+#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
+#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+
+#define REG_UPDMEM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
+#define PPE_UPDMEM_ACK_MASK BIT(31)
+#define PPE_UPDMEM_ADDR_MASK GENMASK(11, 8)
+#define PPE_UPDMEM_OFFSET_MASK GENMASK(7, 4)
+#define PPE_UPDMEM_SEL_MASK GENMASK(3, 2)
+#define PPE_UPDMEM_WR_MASK BIT(1)
+#define PPE_UPDMEM_REQ_MASK BIT(0)
+
+#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
+#define SP_CPORT_DFT_MASK GENMASK(2, 0)
+#define SP_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
+
+#define REG_SRC_PORT_FC_MAP6 0x2298
+#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)
+#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16)
+#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8)
+#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0)
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_b, _n) \
+ (((_n) == 4) ? 0x0750 + ((_b) << 5) : \
+ ((_n) == 3) ? 0x0744 + ((_b) << 5) : \
+ ((_n) == 2) ? 0x0740 + ((_b) << 5) : \
+ ((_n) == 1) ? 0x002c + ((_b) << 3) : \
+ 0x0028 + ((_b) << 3))
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define RX_COHERENT_LOW_INT_MASK \
+ (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
+ RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
+ RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \
+ RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \
+ RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \
+ RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \
+ RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \
+ RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
+
+#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK)
+#define INT_RX0_MASK(_n) \
+ (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
+
+#define TX_COHERENT_LOW_INT_MASK \
+ (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \
+ TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \
+ TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \
+ TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_LOW_INT_MASK \
+ (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
+ RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
+ RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \
+ RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \
+ RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \
+ RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \
+ RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \
+ RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_LOW_INT_MASK \
+ (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \
+ RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \
+ RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \
+ RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \
+ RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \
+ RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \
+ RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \
+ RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
+
+#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
+#define INT_RX1_MASK(_n) \
+ ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \
+ (RX_DONE_LOW_INT_MASK & (_n)))
+
+/* QDMA_CSR_INT_ENABLE3 */
+#define RX31_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX30_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX29_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX28_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX27_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX26_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX25_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX24_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX23_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX22_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX21_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX20_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX19_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX18_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX17_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX16_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX31_DONE_INT_MASK BIT(15)
+#define RX30_DONE_INT_MASK BIT(14)
+#define RX29_DONE_INT_MASK BIT(13)
+#define RX28_DONE_INT_MASK BIT(12)
+#define RX27_DONE_INT_MASK BIT(11)
+#define RX26_DONE_INT_MASK BIT(10)
+#define RX25_DONE_INT_MASK BIT(9)
+#define RX24_DONE_INT_MASK BIT(8)
+#define RX23_DONE_INT_MASK BIT(7)
+#define RX22_DONE_INT_MASK BIT(6)
+#define RX21_DONE_INT_MASK BIT(5)
+#define RX20_DONE_INT_MASK BIT(4)
+#define RX19_DONE_INT_MASK BIT(3)
+#define RX18_DONE_INT_MASK BIT(2)
+#define RX17_DONE_INT_MASK BIT(1)
+#define RX16_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_HIGH_INT_MASK \
+ (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \
+ RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \
+ RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \
+ RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \
+ RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \
+ RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \
+ RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \
+ RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_HIGH_INT_MASK \
+ (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \
+ RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \
+ RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \
+ RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \
+ RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \
+ RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \
+ RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
+ RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
+
+#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
+#define RX_DONE_INT_MASK \
+ ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
+
+#define INT_RX2_MASK(_n) \
+ ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
+ (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
+
+/* QDMA_CSR_INT_ENABLE4 */
+#define RX31_COHERENT_INT_MASK BIT(31)
+#define RX30_COHERENT_INT_MASK BIT(30)
+#define RX29_COHERENT_INT_MASK BIT(29)
+#define RX28_COHERENT_INT_MASK BIT(28)
+#define RX27_COHERENT_INT_MASK BIT(27)
+#define RX26_COHERENT_INT_MASK BIT(26)
+#define RX25_COHERENT_INT_MASK BIT(25)
+#define RX24_COHERENT_INT_MASK BIT(24)
+#define RX23_COHERENT_INT_MASK BIT(23)
+#define RX22_COHERENT_INT_MASK BIT(22)
+#define RX21_COHERENT_INT_MASK BIT(21)
+#define RX20_COHERENT_INT_MASK BIT(20)
+#define RX19_COHERENT_INT_MASK BIT(19)
+#define RX18_COHERENT_INT_MASK BIT(18)
+#define RX17_COHERENT_INT_MASK BIT(17)
+#define RX16_COHERENT_INT_MASK BIT(16)
+
+#define RX_COHERENT_HIGH_INT_MASK \
+ (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \
+ RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \
+ RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \
+ RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \
+ RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \
+ RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \
+ RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \
+ RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
+
+#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n))
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define TX_COHERENT_HIGH_INT_MASK \
+ (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \
+ TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \
+ TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \
+ TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \
+ TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \
+ TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \
+ TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \
+ TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \
+ TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \
+ TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \
+ TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \
+ TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define REG_RX_SCATTER_CFG(_n) \
+ (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define RX_RING_SG_EN_MASK BIT(0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(31)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
+#define RATE_LIMIT_PARAM_RW_MASK BIT(31)
+#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30)
+#define RATE_LIMIT_PARAM_TYPE_MASK GENMASK(29, 28)
+#define RATE_LIMIT_METER_GROUP_MASK GENMASK(27, 26)
+#define RATE_LIMIT_PARAM_INDEX_MASK GENMASK(23, 16)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG0 */
+#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+#endif /* AIROHA_REGS_H */
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 78231c85234d..f62851708d4f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1678,17 +1678,15 @@ static int slic_init(struct slic_device *sdev)
slic_card_reset(sdev);
err = slic_load_firmware(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to load firmware\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to load firmware\n");
/* we need the shared memory to read EEPROM so set it up temporarily */
err = slic_init_shmem(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to init shared memory\n");
err = slic_read_eeprom(sdev);
if (err) {
@@ -1741,10 +1739,9 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to enable PCI device\n");
pci_set_master(pdev);
pci_try_set_mwi(pdev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index d761c08fe5c1..2f516b950f4e 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -1142,7 +1142,7 @@ static struct platform_driver emac_driver = {
.of_match_table = emac_of_match,
},
.probe = emac_probe,
- .remove_new = emac_remove,
+ .remove = emac_remove,
.suspend = emac_suspend,
.resume = emac_resume,
};
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index eafef84fe3be..9e6f91df2ba0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1560,9 +1560,9 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
}
-static void ace_tasklet(struct tasklet_struct *t)
+static void ace_bh_work(struct work_struct *work)
{
- struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+ struct ace_private *ap = from_work(ap, work, ace_bh_work);
struct net_device *dev = ap->ndev;
int cur_size;
@@ -1595,7 +1595,7 @@ static void ace_tasklet(struct tasklet_struct *t)
#endif
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
- ap->tasklet_pending = 0;
+ ap->bh_work_pending = 0;
}
@@ -1617,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap)
*
* Loading rings is safe without holding the spin lock since this is
* done only before the device is enabled, thus no interrupts are
- * generated and by the interrupt handler/tasklet handler.
+ * generated and by the interrupt handler/bh handler.
*/
static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
@@ -2160,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
*/
if (netif_running(dev)) {
int cur_size;
- int run_tasklet = 0;
+ int run_bh_work = 0;
cur_size = atomic_read(&ap->cur_rx_bufs);
if (cur_size < RX_LOW_STD_THRES) {
@@ -2172,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
if (!ACE_IS_TIGON_I(ap)) {
@@ -2188,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_mini_rx_ring(dev,
RX_MINI_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
@@ -2205,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_jumbo_rx_ring(dev,
RX_JUMBO_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
- if (run_tasklet && !ap->tasklet_pending) {
- ap->tasklet_pending = 1;
- tasklet_schedule(&ap->ace_tasklet);
+ if (run_bh_work && !ap->bh_work_pending) {
+ ap->bh_work_pending = 1;
+ queue_work(system_bh_wq, &ap->ace_bh_work);
}
}
@@ -2267,7 +2267,7 @@ static int ace_open(struct net_device *dev)
/*
* Setup the bottom half rx ring refill handler
*/
- tasklet_setup(&ap->ace_tasklet, ace_tasklet);
+ INIT_WORK(&ap->ace_bh_work, ace_bh_work);
return 0;
}
@@ -2301,7 +2301,7 @@ static int ace_close(struct net_device *dev)
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
- tasklet_kill(&ap->ace_tasklet);
+ cancel_work_sync(&ap->ace_bh_work);
/*
* Make sure one CPU is not processing packets while
@@ -2539,7 +2539,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
struct ace_regs __iomem *regs = ap->regs;
writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ACE_STD_MTU) {
if (!(ap->jumbo)) {
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index ca5ce0cbbad1..0e45a97b9c9b 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -2,7 +2,7 @@
#ifndef _ACENIC_H_
#define _ACENIC_H_
#include <linux/interrupt.h>
-
+#include <linux/workqueue.h>
/*
* Generate TX index update each time, when TX ring is closed.
@@ -667,8 +667,8 @@ struct ace_private
struct rx_desc *rx_mini_ring;
struct rx_desc *rx_return_ring;
- int tasklet_pending, jumbo;
- struct tasklet_struct ace_tasklet;
+ int bh_work_pending, jumbo;
+ struct work_struct ace_bh_work;
struct event *evt_ring;
@@ -776,7 +776,7 @@ static int ace_open(struct net_device *dev);
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ace_close(struct net_device *dev);
-static void ace_tasklet(struct tasklet_struct *t);
+static void ace_bh_work(struct work_struct *work);
static void ace_dump_trace(struct ace_private *ap);
static void ace_set_multicast_list(struct net_device *dev);
static int ace_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 82f2363a45cd..e5a56bb989da 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -401,9 +401,6 @@ struct altera_tse_private {
/* MAC address space */
struct altera_tse_mac __iomem *mac_dev;
- /* TSE Revision */
- u32 revision;
-
/* mSGDMA Rx Dispatcher address space */
void __iomem *rx_dma_csr;
void __iomem *rx_dma_desc;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1c8763be0e4b..ca55c5fd11df 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -788,7 +788,7 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
@@ -892,9 +892,6 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "device MAC address %pM\n",
dev->dev_addr);
- if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
- netdev_warn(dev, "TSE revision %x\n", priv->revision);
-
spin_lock(&priv->mac_cfg_lock);
ret = reset_mac(priv);
@@ -1142,6 +1139,7 @@ static int altera_tse_probe(struct platform_device *pdev)
struct net_device *ndev;
void __iomem *descmap;
int ret = -ENODEV;
+ u32 revision;
ndev = alloc_etherdev(sizeof(struct altera_tse_private));
if (!ndev) {
@@ -1150,6 +1148,7 @@ static int altera_tse_probe(struct platform_device *pdev)
}
SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
priv->device = &pdev->dev;
@@ -1387,25 +1386,7 @@ static int altera_tse_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->rxdma_irq_lock);
- netif_carrier_off(ndev);
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(&pdev->dev, "failed to register TSE net device\n");
- goto err_register_netdev;
- }
-
- platform_set_drvdata(pdev, ndev);
-
- priv->revision = ioread32(&priv->mac_dev->megacore_revision);
-
- if (netif_msg_probe(priv))
- dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
- (priv->revision >> 8) & 0xff,
- priv->revision & 0xff,
- (unsigned long) control_port->start, priv->rx_irq,
- priv->tx_irq);
-
- snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name);
+ snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", dev_name(&pdev->dev));
pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc);
if (IS_ERR(pcs_bus)) {
ret = PTR_ERR(pcs_bus);
@@ -1442,12 +1423,30 @@ static int altera_tse_probe(struct platform_device *pdev)
goto err_init_phylink;
}
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register TSE net device\n");
+ goto err_register_netdev;
+ }
+
+ revision = ioread32(&priv->mac_dev->megacore_revision);
+
+ if (revision < 0xd00 || revision > 0xe00)
+ netdev_warn(ndev, "TSE revision %x\n", revision);
+
+ if (netif_msg_probe(priv))
+ dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
+ (revision >> 8) & 0xff, revision & 0xff,
+ (unsigned long)control_port->start, priv->rx_irq,
+ priv->tx_irq);
+
return 0;
+
+err_register_netdev:
+ phylink_destroy(priv->phylink);
err_init_phylink:
lynx_pcs_destroy(priv->pcs);
err_init_pcs:
- unregister_netdev(ndev);
-err_register_netdev:
netif_napi_del(&priv->napi);
altera_tse_mdio_destroy(ndev);
err_free_netdev:
@@ -1519,7 +1518,7 @@ MODULE_DEVICE_TABLE(of, altera_tse_ids);
static struct platform_driver altera_tse_driver = {
.probe = altera_tse_probe,
- .remove_new = altera_tse_remove,
+ .remove = altera_tse_remove,
.suspend = NULL,
.resume = NULL,
.driver = {
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index c37fa393b99e..95dcc3969f0c 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -19,7 +19,9 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
depends on PCI_MSI && !CPU_BIG_ENDIAN
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
+ select NET_DEVLINK
help
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index 6ab615365172..6d8036bc1823 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o ena_phc.o ena_devlink.o ena_debugfs.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 6de0d590be34..898ecd96b96a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -7,6 +7,21 @@
#define ENA_ADMIN_RSS_KEY_PARTS 10
+#define ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 0x3F
+#define ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK 0x1F
+
+ /* customer metrics - in correlation with
+ * ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ */
+enum ena_admin_customer_metrics_id {
+ ENA_ADMIN_BW_IN_ALLOWANCE_EXCEEDED = 0,
+ ENA_ADMIN_BW_OUT_ALLOWANCE_EXCEEDED = 1,
+ ENA_ADMIN_PPS_ALLOWANCE_EXCEEDED = 2,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_EXCEEDED = 3,
+ ENA_ADMIN_LINKLOCAL_ALLOWANCE_EXCEEDED = 4,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_AVAILABLE = 5,
+};
+
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
ENA_ADMIN_DESTROY_SQ = 2,
@@ -45,12 +60,16 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_AENQ_CONFIG = 26,
ENA_ADMIN_LINK_CONFIG = 27,
ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_PHC_CONFIG = 29,
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
/* device capabilities */
enum ena_admin_aq_caps_id {
ENA_ADMIN_ENI_STATS = 0,
+ /* ENA SRD customer metrics */
+ ENA_ADMIN_ENA_SRD_INFO = 1,
+ ENA_ADMIN_CUSTOMER_METRICS = 2,
};
enum ena_admin_placement_policy_type {
@@ -99,6 +118,9 @@ enum ena_admin_get_stats_type {
ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
/* extra HW stats for specific network interface */
ENA_ADMIN_GET_STATS_TYPE_ENI = 2,
+ /* extra HW stats for ENA SRD */
+ ENA_ADMIN_GET_STATS_TYPE_ENA_SRD = 3,
+ ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS = 4,
};
enum ena_admin_get_stats_scope {
@@ -106,6 +128,24 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+enum ena_admin_phc_type {
+ ENA_ADMIN_PHC_TYPE_READLESS = 0,
+};
+
+enum ena_admin_phc_error_flags {
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP = BIT(0),
+};
+
+/* ENA SRD configuration for ENI */
+enum ena_admin_ena_srd_flags {
+ /* Feature enabled */
+ ENA_ADMIN_ENA_SRD_ENABLED = BIT(0),
+ /* UDP support enabled */
+ ENA_ADMIN_ENA_SRD_UDP_ENABLED = BIT(1),
+ /* Bypass Rx UDP ordering */
+ ENA_ADMIN_ENA_SRD_UDP_ORDERING_BYPASS_ENABLED = BIT(2),
+};
+
struct ena_admin_aq_common_desc {
/* 11:0 : command_id
* 15:12 : reserved12
@@ -363,6 +403,9 @@ struct ena_admin_aq_get_stats_cmd {
* stats of other device
*/
u16 device_id;
+
+ /* a bitmap representing the requested metric values */
+ u64 requested_metrics;
};
/* Basic Statistics Command. */
@@ -419,6 +462,40 @@ struct ena_admin_eni_stats {
u64 linklocal_allowance_exceeded;
};
+struct ena_admin_ena_srd_stats {
+ /* Number of packets transmitted over ENA SRD */
+ u64 ena_srd_tx_pkts;
+
+ /* Number of packets transmitted or could have been
+ * transmitted over ENA SRD
+ */
+ u64 ena_srd_eligible_tx_pkts;
+
+ /* Number of packets received over ENA SRD */
+ u64 ena_srd_rx_pkts;
+
+ /* Percentage of the ENA SRD resources that is in use */
+ u64 ena_srd_resource_utilization;
+};
+
+/* ENA SRD Statistics Command */
+struct ena_admin_ena_srd_info {
+ /* ENA SRD configuration bitmap. See ena_admin_ena_srd_flags for
+ * details
+ */
+ u64 flags;
+
+ struct ena_admin_ena_srd_stats ena_srd_stats;
+};
+
+/* Customer Metrics Command. */
+struct ena_admin_customer_metrics {
+ /* A bitmap representing the reported customer metrics according to
+ * the order they are reported
+ */
+ u64 reported_metrics;
+};
+
struct ena_admin_acq_get_stats_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -428,6 +505,10 @@ struct ena_admin_acq_get_stats_resp {
struct ena_admin_basic_stats basic_stats;
struct ena_admin_eni_stats eni_stats;
+
+ struct ena_admin_ena_srd_info ena_srd_info;
+
+ struct ena_admin_customer_metrics customer_metrics;
} u;
};
@@ -871,7 +952,9 @@ struct ena_admin_host_info {
* 4 : rss_configurable_function_key
* 5 : reserved
* 6 : rx_page_reuse
- * 31:7 : reserved
+ * 7 : reserved
+ * 8 : phc
+ * 31:9 : reserved
*/
u32 driver_supported_features;
};
@@ -903,7 +986,7 @@ struct ena_admin_feature_rss_ind_table {
struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* When hint value is 0, driver should use it's own predefined value */
+/* When hint value is 0, driver should use its own predefined value */
struct ena_admin_ena_hw_hints {
/* value in ms */
u16 mmio_read_timeout;
@@ -951,6 +1034,43 @@ struct ena_admin_queue_ext_feature_desc {
};
};
+struct ena_admin_feature_phc_desc {
+ /* PHC type as defined in enum ena_admin_get_phc_type,
+ * used only for GET command.
+ */
+ u8 type;
+
+ /* Reserved - MBZ */
+ u8 reserved1[3];
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR,
+ * used only for GET command.
+ */
+ u32 doorbell_offset;
+
+ /* Max time for valid PHC retrieval, passing this threshold will
+ * fail the get-time request and block PHC requests for
+ * block_timeout_usec, used only for GET command.
+ */
+ u32 expire_timeout_usec;
+
+ /* PHC requests block period, blocking starts if PHC request expired
+ * in order to prevent floods on busy device,
+ * used only for GET command.
+ */
+ u32 block_timeout_usec;
+
+ /* Shared PHC physical address (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ struct ena_common_mem_addr output_address;
+
+ /* Shared PHC Size (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ u32 output_length;
+};
+
struct ena_admin_get_feat_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -980,6 +1100,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints;
+
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1013,6 +1135,9 @@ struct ena_admin_set_feat_cmd {
/* LLQ configuration */
struct ena_admin_feature_llq_desc llq;
+
+ /* PHC configuration */
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1090,6 +1215,23 @@ struct ena_admin_ena_mmio_req_read_less_resp {
u32 reg_val;
};
+struct ena_admin_phc_resp {
+ /* Request Id, received from DB register */
+ u16 req_id;
+
+ u8 reserved1[6];
+
+ /* PHC timestamp (nsec) */
+ u64 timestamp;
+
+ u8 reserved2[12];
+
+ /* Bit field of enum ena_admin_phc_error_flags */
+ u32 error_flags;
+
+ u8 reserved3[32];
+};
+
/* aq_common_desc */
#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
@@ -1188,6 +1330,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6)
+#define ENA_ADMIN_HOST_INFO_PHC_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_PHC_MASK BIT(8)
/* aenq_common_desc */
#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 2d8a66ea82fa..e67b592e5697 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,6 +41,12 @@
#define ENA_MAX_ADMIN_POLL_US 5000
+/* PHC definitions */
+#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
+#define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
+#define ENA_PHC_REQ_ID_OFFSET 0xDEAD
+#define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP)
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -312,7 +318,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
struct ena_com_io_sq *io_sq)
{
size_t size;
- int dev_node = 0;
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
@@ -325,12 +330,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = io_sq->desc_entry_size * io_sq->q_depth;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
@@ -354,10 +356,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
@@ -397,7 +396,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq)
{
size_t size;
- int prev_node = 0;
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
@@ -409,11 +407,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- prev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
@@ -774,25 +769,16 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (comp_ctx->status == ENA_CMD_COMPLETED) {
netdev_err(admin_queue->ena_dev->net_device,
- "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
- /* Check if fallback to polling is enabled */
- if (admin_queue->auto_polling)
- admin_queue->polling = true;
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
} else {
netdev_err(admin_queue->ena_dev->net_device,
"The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
}
- /* Check if shifted to polling mode.
- * This will happen if there is a completion without an interrupt
- * and autopolling mode is enabled. Continuing normal execution in such case
- */
- if (!admin_queue->polling) {
- admin_queue->running_state = false;
- ret = -ETIME;
- goto err;
- }
+ admin_queue->running_state = false;
+ ret = -ETIME;
+ goto err;
}
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
@@ -1661,10 +1647,265 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling)
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
+}
+
+int ena_com_phc_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+
+ memset(phc, 0x0, sizeof(*phc));
+
+ /* Allocate shared mem used PHC timestamp retrieved from device */
+ phc->virt_addr = dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ &phc->phys_addr,
+ GFP_KERNEL);
+ if (unlikely(!phc->virt_addr))
+ return -ENOMEM;
+
+ spin_lock_init(&phc->lock);
+
+ phc->virt_addr->req_id = 0;
+ phc->virt_addr->timestamp = 0;
+
+ return 0;
+}
+
+int ena_com_phc_config(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ struct ena_admin_get_feat_resp get_feat_resp;
+ struct ena_admin_set_feat_resp set_feat_resp;
+ struct ena_admin_set_feat_cmd set_feat_cmd;
+ int ret = 0;
+
+ /* Get device PHC default configuration */
+ ret = ena_com_get_feature(ena_dev,
+ &get_feat_resp,
+ ENA_ADMIN_PHC_CONFIG,
+ 0);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to get PHC feature configuration, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Supporting only readless PHC retrieval */
+ if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
+ netdev_err(ena_dev->net_device,
+ "Unsupported PHC type, error: %d\n",
+ -EOPNOTSUPP);
+ return -EOPNOTSUPP;
+ }
+
+ /* Update PHC doorbell offset according to device value,
+ * used to write req_id to PHC bar
+ */
+ phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
+
+ /* Update PHC expire timeout according to device
+ * or default driver value
+ */
+ phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
+ get_feat_resp.u.phc.expire_timeout_usec :
+ ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
+
+ /* Update PHC block timeout according to device
+ * or default driver value
+ */
+ phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
+ get_feat_resp.u.phc.block_timeout_usec :
+ ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
+
+ /* Sanity check - expire timeout must not exceed block timeout */
+ if (phc->expire_timeout_usec > phc->block_timeout_usec)
+ phc->expire_timeout_usec = phc->block_timeout_usec;
+
+ /* Prepare PHC feature command */
+ memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
+ set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
+ set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
+ ret = ena_com_mem_addr_set(ena_dev,
+ &set_feat_cmd.u.phc.output_address,
+ phc->phys_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed setting PHC output address, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Send PHC feature command to the device */
+ ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
+ (struct ena_admin_aq_entry *)&set_feat_cmd,
+ sizeof(set_feat_cmd),
+ (struct ena_admin_acq_entry *)&set_feat_resp,
+ sizeof(set_feat_resp));
+
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to enable PHC, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ phc->active = true;
+ netdev_dbg(ena_dev->net_device, "PHC is active in the device\n");
+
+ return ret;
+}
+
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
{
- ena_dev->admin_queue.auto_polling = polling;
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ unsigned long flags = 0;
+
+ /* In case PHC is not supported by the device, silently exiting */
+ if (!phc->virt_addr)
+ return;
+
+ spin_lock_irqsave(&phc->lock, flags);
+ phc->active = false;
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ dma_free_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ phc->virt_addr,
+ phc->phys_addr);
+ phc->virt_addr = NULL;
+}
+
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
+{
+ volatile struct ena_admin_phc_resp *resp = ena_dev->phc.virt_addr;
+ const ktime_t zero_system_time = ktime_set(0, 0);
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ ktime_t expire_time;
+ ktime_t block_time;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (!phc->active) {
+ netdev_err(ena_dev->net_device, "PHC feature is not active in the device\n");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&phc->lock, flags);
+
+ /* Check if PHC is in blocked state */
+ if (unlikely(ktime_compare(phc->system_time, zero_system_time))) {
+ /* Check if blocking time expired */
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ if (!ktime_after(ktime_get(), block_time)) {
+ /* PHC is still in blocked state, skip PHC request */
+ phc->stats.phc_skp++;
+ ret = -EBUSY;
+ goto skip;
+ }
+
+ /* PHC is in active state, update statistics according
+ * to req_id and error_flags
+ */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* Device didn't update req_id during blocking time,
+ * this indicates on a device error
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (device error)\n",
+ phc->req_id);
+ phc->stats.phc_err_dv++;
+ } else if (resp->error_flags & ENA_PHC_ERROR_FLAGS) {
+ /* Device updated req_id during blocking time but got
+ * a PHC error, this occurs if device:
+ * - exceeded the get time request limit
+ * - received an invalid timestamp
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (error 0x%x)\n",
+ phc->req_id,
+ resp->error_flags);
+ phc->stats.phc_err_ts += !!(resp->error_flags &
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP);
+ } else {
+ /* Device updated req_id during blocking time
+ * with valid timestamp
+ */
+ phc->stats.phc_exp++;
+ }
+ }
+
+ /* Setting relative timeouts */
+ phc->system_time = ktime_get();
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ expire_time = ktime_add_us(phc->system_time, phc->expire_timeout_usec);
+
+ /* We expect the device to return this req_id once
+ * the new PHC timestamp is updated
+ */
+ phc->req_id++;
+
+ /* Initialize PHC shared memory with different req_id value
+ * to be able to identify once the device changes it to req_id
+ */
+ resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
+
+ /* Writing req_id to PHC bar */
+ writel(phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
+
+ /* Stalling until the device updates req_id */
+ while (1) {
+ if (unlikely(ktime_after(ktime_get(), expire_time))) {
+ /* Gave up waiting for updated req_id, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp will fail with
+ * device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Check if req_id was updated by the device */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* req_id was not updated by the device yet,
+ * check again on next loop
+ */
+ continue;
+ }
+
+ /* req_id was updated by the device which indicates that
+ * PHC timestamp and error_flags are updated too,
+ * checking errors before retrieving timestamp
+ */
+ if (unlikely(resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
+ /* Retrieved invalid PHC timestamp, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp requests
+ * will fail with device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* PHC timestamp value is returned to the caller */
+ *timestamp = resp->timestamp;
+
+ /* Update statistic on valid PHC timestamp retrieval */
+ phc->stats.phc_cnt++;
+
+ /* This indicates PHC state is active */
+ phc->system_time = zero_system_time;
+ break;
+ }
+
+skip:
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ return ret;
}
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
@@ -1892,6 +2133,56 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
}
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics;
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ customer_metrics = &ena_dev->customer_metrics;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
+ return;
+ }
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
+ if (likely(ret == 0))
+ customer_metrics->supported_metrics =
+ ctx.get_resp.u.customer_metrics.reported_metrics;
+ else
+ netdev_err(ena_dev->net_device,
+ "Failed to query customer metrics support. error: %d\n", ret);
+}
+
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -1971,6 +2262,8 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
+ ena_com_set_supported_customer_metrics(ena_dev);
+
return 0;
}
@@ -2115,65 +2408,88 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_com_stats_ctx *ctx,
- enum ena_admin_get_stats_type type)
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats)
{
- struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
- struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
- struct ena_com_admin_queue *admin_queue;
+ struct ena_com_stats_ctx ctx;
int ret;
- admin_queue = &ena_dev->admin_queue;
-
- get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
- get_cmd->aq_common_descriptor.flags = 0;
- get_cmd->type = type;
-
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
- if (unlikely(ret))
- netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.u.eni_stats,
+ sizeof(ctx.get_resp.u.eni_stats));
return ret;
}
-int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_eni_stats *stats)
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info)
{
struct ena_com_stats_ctx ctx;
int ret;
- if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
- ENA_ADMIN_ENI_STATS);
+ ENA_ADMIN_ENA_SRD_INFO);
return -EOPNOTSUPP;
}
memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.eni_stats,
- sizeof(ctx.get_resp.u.eni_stats));
+ memcpy(info, &ctx.get_resp.u.ena_srd_info,
+ sizeof(ctx.get_resp.u.ena_srd_info));
return ret;
}
-int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats)
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
{
+ struct ena_admin_aq_get_stats_cmd *get_cmd;
struct ena_com_stats_ctx ctx;
int ret;
+ if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
+ netdev_err(ena_dev->net_device,
+ "Invalid buffer size %u. The given buffer is too big.\n", len);
+ return -EINVAL;
+ }
+
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ netdev_err(ena_dev->net_device, "Capability %d not supported.\n",
+ ENA_ADMIN_CUSTOMER_METRICS);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ena_dev->customer_metrics.supported_metrics) {
+ netdev_err(ena_dev->net_device, "No supported customer metrics.\n");
+ return -EOPNOTSUPP;
+ }
+
+ get_cmd = &ctx.get_cmd;
memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd->u.control_buffer.address,
+ ena_dev->customer_metrics.buffer_dma_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device, "Memory address set failed.\n");
+ return ret;
+ }
+
+ get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
+ get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.basic_stats,
- sizeof(ctx.get_resp.u.basic_stats));
+ memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
+ else
+ netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret);
return ret;
}
@@ -2210,24 +2526,6 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
return ret;
}
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload)
-{
- int ret;
- struct ena_admin_get_feat_resp resp;
-
- ret = ena_com_get_feature(ena_dev, &resp,
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
- if (unlikely(ret)) {
- netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
- return ret;
- }
-
- memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
-
- return 0;
-}
-
int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -2717,6 +3015,24 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
return 0;
}
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
+ customer_metrics->buffer_virt_addr = NULL;
+
+ customer_metrics->buffer_virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ &customer_metrics->buffer_dma_addr, GFP_KERNEL);
+ if (!customer_metrics->buffer_virt_addr) {
+ customer_metrics->buffer_len = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
@@ -2739,6 +3055,19 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
}
}
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ if (customer_metrics->buffer_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ customer_metrics->buffer_virt_addr,
+ customer_metrics->buffer_dma_addr);
+ customer_metrics->buffer_virt_addr = NULL;
+ customer_metrics->buffer_len = 0;
+ }
+}
+
int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index fea57eb8e58b..64df2c48c9a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -42,12 +42,14 @@
#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+#define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512
+
/*****************************************************************************/
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
-#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 20
#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
#define ENA_HASH_KEY_SIZE 40
@@ -208,6 +210,14 @@ struct ena_com_stats_admin {
u64 no_completion;
};
+struct ena_com_stats_phc {
+ u64 phc_cnt;
+ u64 phc_exp;
+ u64 phc_skp;
+ u64 phc_err_dv;
+ u64 phc_err_ts;
+};
+
struct ena_com_admin_queue {
void *q_dmadev;
struct ena_com_dev *ena_dev;
@@ -222,9 +232,6 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */
bool polling;
- /* Define if fallback to polling mode should occur */
- bool auto_polling;
-
u16 curr_cmd_id;
/* Indicate that the ena was initialized and can
@@ -259,6 +266,47 @@ struct ena_com_mmio_read {
spinlock_t lock;
};
+/* PTP hardware clock (PHC) MMIO read data info */
+struct ena_com_phc_info {
+ /* Internal PHC statistics */
+ struct ena_com_stats_phc stats;
+
+ /* PHC shared memory - virtual address */
+ struct ena_admin_phc_resp *virt_addr;
+
+ /* System time of last PHC request */
+ ktime_t system_time;
+
+ /* Spin lock to ensure a single outstanding PHC read */
+ spinlock_t lock;
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR */
+ u32 doorbell_offset;
+
+ /* Shared memory read expire timeout (usec)
+ * Max time for valid PHC retrieval, passing this threshold will fail
+ * the get time request and block new PHC requests for block_timeout_usec
+ * in order to prevent floods on busy device
+ */
+ u32 expire_timeout_usec;
+
+ /* Shared memory read abort timeout (usec)
+ * PHC requests block period, blocking starts once PHC request expired
+ * in order to prevent floods on busy device,
+ * any PHC requests during block period will be skipped
+ */
+ u32 block_timeout_usec;
+
+ /* PHC shared memory - physical address */
+ dma_addr_t phys_addr;
+
+ /* Request id sent to the device */
+ u16 req_id;
+
+ /* True if PHC is active in the device */
+ bool active;
+};
+
struct ena_rss {
/* Indirect table */
u16 *host_rss_ind_tbl;
@@ -278,6 +326,16 @@ struct ena_rss {
};
+struct ena_customer_metrics {
+ /* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ * and ena_admin_customer_metrics_id
+ */
+ u64 supported_metrics;
+ dma_addr_t buffer_dma_addr;
+ void *buffer_virt_addr;
+ u32 buffer_len;
+};
+
struct ena_host_attribute {
/* Debug area */
u8 *debug_area_virt_addr;
@@ -305,7 +363,10 @@ struct ena_com_dev {
u16 stats_func; /* Selected function for extended statistic dump */
u16 stats_queue; /* Selected queue for extended statistic dump */
+ u32 ena_min_poll_delay_us;
+
struct ena_com_mmio_read mmio_read;
+ struct ena_com_phc_info phc;
struct ena_rss rss;
u32 supported_features;
@@ -326,7 +387,7 @@ struct ena_com_dev {
struct ena_com_llq_info llq_info;
- u32 ena_min_poll_delay_us;
+ struct ena_customer_metrics customer_metrics;
};
struct ena_com_dev_get_features_ctx {
@@ -371,6 +432,40 @@ struct ena_aenq_handlers {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+/* ena_com_phc_init - Allocate and initialize PHC feature
+ * @ena_dev: ENA communication layer struct
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_supported - Return if PHC feature is supported by the device
+ * @ena_dev: ENA communication layer struct
+ * @note: This method must be called after getting supported features
+ * @return - supported or not
+ */
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_config - Configure PHC feature
+ * @ena_dev: ENA communication layer struct
+ * Configure PHC feature in driver and device
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_config(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_destroy - Destroy PHC feature
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_get_timestamp - Retrieve PHC timestamp
+ * @ena_dev: ENA communication layer struct
+ * @timestamp: Retrieved PHC timestamp
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp);
+
/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
@@ -479,17 +574,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
-/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
- * @ena_dev: ENA communication layer struct
- * @polling: Enable/Disable polling mode
- *
- * Set the autopolling mode.
- * If autopolling is on:
- * In case of missing interrupt when data is available switch to polling.
- */
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling);
-
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
@@ -577,40 +661,40 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx);
-/* ena_com_get_dev_basic_stats - Get device basic statistics
+/* ena_com_get_eni_stats - Get extended network interface statistics
* @ena_dev: ENA communication layer struct
* @stats: stats return value
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats);
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats);
-/* ena_com_get_eni_stats - Get extended network interface statistics
+/* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics
* @ena_dev: ENA communication layer struct
- * @stats: stats return value
+ * @info: ena srd stats and flags
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_eni_stats *stats);
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info);
-/* ena_com_set_dev_mtu - Configure the device mtu.
+/* ena_com_get_customer_metrics - Get customer metrics for network interface
* @ena_dev: ENA communication layer struct
- * @mtu: mtu value
+ * @buffer: buffer for returned customer metrics
+ * @len: size of the buffer
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len);
-/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
+/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
- * @offlad: offload return value
+ * @mtu: mtu value
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload);
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
/* ena_com_rss_init - Init RSS
* @ena_dev: ENA communication layer struct
@@ -805,6 +889,13 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
u32 debug_area_size);
+/* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
@@ -819,6 +910,13 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+/* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocated customer metrics area.
+ */
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_set_host_attributes - Update the device with the host
* attributes (debug area and host info) base address.
* @ena_dev: ENA communication layer struct
@@ -975,6 +1073,28 @@ static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
return !!(ena_dev->capabilities & BIT(cap_id));
}
+/* ena_com_get_customer_metric_support - query whether device supports a given customer metric.
+ * @ena_dev: ENA communication layer struct
+ * @metric_id: enum value representing the customer metric
+ *
+ * @return - true if customer metric is supported or false otherwise
+ */
+static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev,
+ enum ena_admin_customer_metrics_id metric_id)
+{
+ return !!(ena_dev->customer_metrics.supported_metrics & BIT(metric_id));
+}
+
+/* ena_com_get_customer_metric_count - return the number of supported customer metrics.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - the number of supported customer metrics
+ */
+static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev)
+{
+ return hweight64(ena_dev->customer_metrics.supported_metrics);
+}
+
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.c b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
new file mode 100644
index 000000000000..46ed80986724
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include "ena_debugfs.h"
+#include "ena_phc.h"
+
+static int phc_stats_show(struct seq_file *file, void *priv)
+{
+ struct ena_adapter *adapter = file->private;
+
+ if (!ena_phc_is_active(adapter))
+ return 0;
+
+ seq_printf(file,
+ "phc_cnt: %llu\n",
+ adapter->ena_dev->phc.stats.phc_cnt);
+ seq_printf(file,
+ "phc_exp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_exp);
+ seq_printf(file,
+ "phc_skp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_skp);
+ seq_printf(file,
+ "phc_err_dv: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_dv);
+ seq_printf(file,
+ "phc_err_ts: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_ts);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(phc_stats);
+
+void ena_debugfs_init(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ adapter->debugfs_base =
+ debugfs_create_dir(dev_name(&adapter->pdev->dev), NULL);
+
+ debugfs_create_file("phc_stats",
+ 0400,
+ adapter->debugfs_base,
+ adapter,
+ &phc_stats_fops);
+}
+
+void ena_debugfs_terminate(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ debugfs_remove_recursive(adapter->debugfs_base);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.h b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
new file mode 100644
index 000000000000..dc61dd998867
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifndef __ENA_DEBUGFS_H__
+#define __ENA_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include "ena_netdev.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+void ena_debugfs_init(struct net_device *dev);
+
+void ena_debugfs_terminate(struct net_device *dev);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ena_debugfs_init(struct net_device *dev) {}
+
+static inline void ena_debugfs_terminate(struct net_device *dev) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __ENA_DEBUGFS_H__ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c
new file mode 100644
index 000000000000..ac81c24016dd
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#include "linux/pci.h"
+#include "ena_devlink.h"
+#include "ena_phc.h"
+
+static int ena_devlink_enable_phc_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (!val.vbool)
+ return 0;
+
+ if (!ena_com_phc_supported(adapter->ena_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support PHC");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ena_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_PHC,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL,
+ NULL,
+ ena_devlink_enable_phc_validate),
+};
+
+void ena_devlink_params_get(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ &val);
+ if (err) {
+ netdev_err(adapter->netdev, "Failed to query PHC param\n");
+ return;
+ }
+
+ ena_phc_enable(adapter, val.vbool);
+}
+
+void ena_devlink_disable_phc_param(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = false;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+}
+
+static void ena_devlink_port_register(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ struct devlink_port_attrs attrs = {};
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(&adapter->devlink_port, &attrs);
+ devl_port_register(devlink, &adapter->devlink_port, 0);
+}
+
+static void ena_devlink_port_unregister(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ devl_port_unregister(&adapter->devlink_port);
+}
+
+static int ena_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ ena_devlink_port_unregister(devlink);
+
+ rtnl_lock();
+ ena_destroy_device(adapter, false);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int ena_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ int err = 0;
+
+ rtnl_lock();
+ /* Check that no other routine initialized the device (e.g.
+ * ena_fw_reset_device()). Also we're under devlink_mutex here,
+ * so devlink isn't freed under our feet.
+ */
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ err = ena_restore_device(adapter);
+
+ rtnl_unlock();
+
+ ena_devlink_port_register(devlink);
+
+ if (!err)
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return err;
+}
+
+static const struct devlink_ops ena_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = ena_devlink_reload_down,
+ .reload_up = ena_devlink_reload_up,
+};
+
+static int ena_devlink_configure_params(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value value;
+ int rc;
+
+ rc = devlink_params_register(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+ if (rc) {
+ netdev_err(adapter->netdev, "Failed to register devlink params\n");
+ return rc;
+ }
+
+ value.vbool = ena_phc_is_enabled(adapter);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+
+ return 0;
+}
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ena_devlink_ops,
+ sizeof(struct ena_adapter *),
+ dev);
+ if (!devlink) {
+ netdev_err(adapter->netdev,
+ "Failed to allocate devlink struct\n");
+ return NULL;
+ }
+
+ ENA_DEVLINK_PRIV(devlink) = adapter;
+ adapter->devlink = devlink;
+
+ if (ena_devlink_configure_params(devlink))
+ goto free_devlink;
+
+ return devlink;
+
+free_devlink:
+ devlink_free(devlink);
+ return NULL;
+}
+
+static void ena_devlink_configure_params_clean(struct devlink *devlink)
+{
+ devlink_params_unregister(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+}
+
+void ena_devlink_free(struct devlink *devlink)
+{
+ ena_devlink_configure_params_clean(devlink);
+
+ devlink_free(devlink);
+}
+
+void ena_devlink_register(struct devlink *devlink, struct device *dev)
+{
+ devl_lock(devlink);
+ ena_devlink_port_register(devlink);
+ devl_register(devlink);
+ devl_unlock(devlink);
+}
+
+void ena_devlink_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ ena_devlink_port_unregister(devlink);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.h b/drivers/net/ethernet/amazon/ena/ena_devlink.h
new file mode 100644
index 000000000000..7a19ce4830d9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+#ifndef DEVLINK_H
+#define DEVLINK_H
+
+#include "ena_netdev.h"
+#include <net/devlink.h>
+
+#define ENA_DEVLINK_PRIV(devlink) \
+ (*(struct ena_adapter **)devlink_priv(devlink))
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter);
+void ena_devlink_free(struct devlink *devlink);
+void ena_devlink_register(struct devlink *devlink, struct device *dev);
+void ena_devlink_unregister(struct devlink *devlink);
+void ena_devlink_params_get(struct devlink *devlink);
+void ena_devlink_disable_phc_param(struct devlink *devlink);
+
+#endif /* DEVLINK_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 933e619b3a31..4c6e07aa4bbb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -229,30 +229,43 @@ static struct ena_eth_io_rx_cdesc_base *
idx * io_cq->cdesc_entry_size_in_bytes);
}
-static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
- u16 *first_cdesc_idx)
+static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx,
+ u16 *num_descs)
{
+ u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
struct ena_eth_io_rx_cdesc_base *cdesc;
- u16 count = 0, head_masked;
u32 last = 0;
do {
+ u32 status;
+
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (!cdesc)
break;
+ status = READ_ONCE(cdesc->status);
ena_com_cq_inc_head(io_cq);
+ if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
+ struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
+
+ netdev_err(dev->net_device,
+ "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
+ count, io_cq->qid, cdesc->req_id);
+ return -EFAULT;
+ }
count++;
- last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
if (last) {
*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
- count += io_cq->cur_rx_pkt_cdesc_count;
head_masked = io_cq->head & (io_cq->q_depth - 1);
+ *num_descs = count;
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
@@ -260,11 +273,11 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
"ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
- io_cq->cur_rx_pkt_cdesc_count += count;
- count = 0;
+ io_cq->cur_rx_pkt_cdesc_count = count;
+ *num_descs = 0;
}
- return count;
+ return 0;
}
static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
@@ -539,10 +552,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 cdesc_idx = 0;
u16 nb_hw_desc;
u16 i = 0;
+ int rc;
WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
- nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
+ if (unlikely(rc != 0))
+ return -EFAULT;
+
if (nb_hw_desc == 0) {
ena_rx_ctx->descs = nb_hw_desc;
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 72b019758caa..449bc4960ccc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -47,7 +47,7 @@ struct ena_com_rx_ctx {
bool frag;
u32 hash;
u16 descs;
- int max_bufs;
+ u16 max_bufs;
u8 pkt_offset;
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 0cb6cc1cef56..fe3479b84a1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -5,15 +5,21 @@
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "ena_netdev.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
int stat_offset;
};
+struct ena_hw_metrics {
+ char name[ETH_GSTRING_LEN];
+};
+
#define ENA_STAT_ENA_COM_ENTRY(stat) { \
.name = #stat, \
.stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
@@ -41,6 +47,18 @@ struct ena_stats {
#define ENA_STAT_ENI_ENTRY(stat) \
ENA_STAT_HW_ENTRY(stat, eni_stats)
+#define ENA_STAT_ENA_SRD_ENTRY(stat) \
+ ENA_STAT_HW_ENTRY(stat, ena_srd_stats)
+
+#define ENA_STAT_ENA_SRD_MODE_ENTRY(stat) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_admin_ena_srd_info, flags) / sizeof(u64) \
+}
+
+#define ENA_METRIC_ENI_ENTRY(stat) { \
+ .name = #stat \
+}
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(suspend),
@@ -49,8 +67,12 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(interface_up),
ENA_STAT_GLOBAL_ENTRY(interface_down),
ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
+ ENA_STAT_GLOBAL_ENTRY(reset_fail),
};
+/* A partial list of hw stats. Used when admin command
+ * with type ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS is not supported
+ */
static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
@@ -59,6 +81,23 @@ static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
};
+static const struct ena_hw_metrics ena_hw_stats_strings[] = {
+ ENA_METRIC_ENI_ENTRY(bw_in_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(bw_out_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(pps_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(linklocal_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_available),
+};
+
+static const struct ena_stats ena_srd_info_strings[] = {
+ ENA_STAT_ENA_SRD_MODE_ENTRY(ena_srd_mode),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization)
+};
+
static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(cnt),
ENA_STAT_TX_ENTRY(bytes),
@@ -111,7 +150,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
-#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_srd_info_strings)
+#define ENA_METRICS_ARRAY_ENI ARRAY_SIZE(ena_hw_stats_strings)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -124,6 +165,57 @@ static void ena_safe_update_stat(u64 *src, u64 *dst,
} while (u64_stats_fetch_retry(syncp, start));
}
+static void ena_metrics_stats(struct ena_adapter *adapter, u64 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_stats *ena_stats;
+ u64 *ptr;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ u32 supported_metrics_count;
+ int len;
+
+ supported_metrics_count = ena_com_get_customer_metric_count(dev);
+ len = supported_metrics_count * sizeof(u64);
+
+ /* Fill the data buffer, and advance its pointer */
+ ena_com_get_customer_metrics(dev, (char *)(*data), len);
+ (*data) += supported_metrics_count;
+
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ ena_com_get_eni_stats(dev, &adapter->eni_stats);
+ /* Updating regardless of rc - once we told ethtool how many stats we have
+ * it will print that much stats. We can't leave holes in the stats
+ */
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+
+ ptr = (u64 *)&adapter->eni_stats +
+ ena_stats->stat_offset;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ ena_com_get_ena_srd_info(dev, &adapter->ena_srd_info);
+ /* Get ENA SRD mode */
+ ptr = (u64 *)&adapter->ena_srd_info;
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ for (i = 1; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ /* Wrapped within an outer struct - need to accommodate an
+ * additional offset of the ENA SRD mode that was already processed
+ */
+ ptr = (u64 *)&adapter->ena_srd_info +
+ ena_stats->stat_offset + 1;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+}
+
static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
{
const struct ena_stats *ena_stats;
@@ -178,7 +270,7 @@ static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
static void ena_get_stats(struct ena_adapter *adapter,
u64 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
u64 *ptr;
@@ -192,17 +284,8 @@ static void ena_get_stats(struct ena_adapter *adapter,
ena_safe_update_stat(ptr, data++, &adapter->syncp);
}
- if (eni_stats_needed) {
- ena_update_hw_stats(adapter);
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
-
- ptr = (u64 *)&adapter->eni_stats +
- ena_stats->stat_offset;
-
- ena_safe_update_stat(ptr, data++, &adapter->syncp);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats(adapter, &data);
ena_queue_stats(adapter, &data);
ena_dev_admin_queue_stats(adapter, &data);
@@ -213,9 +296,20 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_com_dev *dev = adapter->ena_dev;
- ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
+ ena_get_stats(adapter, data, true);
+}
+
+static int ena_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ info->phc_index = ena_phc_get_index(adapter);
+
+ return 0;
}
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
@@ -227,9 +321,17 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{
- bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS);
+ struct ena_com_dev *dev = adapter->ena_dev;
+ int count;
+
+ count = ENA_STATS_ARRAY_ENA_SRD * ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO);
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS))
+ count += ena_com_get_customer_metric_count(dev);
+ else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS))
+ count += ENA_STATS_ARRAY_ENI;
- return ENA_STATS_ARRAY_ENI(adapter) * supported;
+ return count;
}
int ena_get_sset_count(struct net_device *netdev, int sset)
@@ -245,6 +347,35 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
return -EOPNOTSUPP;
}
+static void ena_metrics_stats_strings(struct ena_adapter *adapter, u8 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_hw_metrics *ena_metrics;
+ const struct ena_stats *ena_stats;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ for (i = 0; i < ENA_METRICS_ARRAY_ENI; i++) {
+ if (ena_com_get_customer_metric_support(dev, i)) {
+ ena_metrics = &ena_hw_stats_strings[i];
+ ethtool_puts(data, ena_metrics->name);
+ }
+ }
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+}
+
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
{
const struct ena_stats *ena_stats;
@@ -290,7 +421,7 @@ static void ena_com_dev_strings(u8 **data)
static void ena_get_strings(struct ena_adapter *adapter,
u8 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
int i;
@@ -300,12 +431,8 @@ static void ena_get_strings(struct ena_adapter *adapter,
ethtool_puts(&data, ena_stats->name);
}
- if (eni_stats_needed) {
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
- ethtool_puts(&data, ena_stats->name);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats_strings(adapter, &data);
ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data);
@@ -316,11 +443,10 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
u8 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_com_dev *dev = adapter->ena_dev;
switch (sset) {
case ETH_SS_STATS:
- ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
+ ena_get_strings(adapter, data, true);
break;
}
}
@@ -459,10 +585,18 @@ static void ena_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct ena_adapter *adapter = netdev_priv(dev);
-
- strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strscpy(info->bus_info, pci_name(adapter->pdev),
- sizeof(info->bus_info));
+ ssize_t ret = 0;
+
+ ret = strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ if (ret < 0)
+ netif_dbg(adapter, drv, dev,
+ "module name will be truncated, status = %zd\n", ret);
+
+ ret = strscpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (ret < 0)
+ netif_dbg(adapter, drv, dev,
+ "bus info will be truncated, status = %zd\n", ret);
}
static void ena_get_ringparam(struct net_device *netdev,
@@ -601,9 +735,11 @@ static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
return data;
}
-static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
int rc;
@@ -652,9 +788,12 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
@@ -696,26 +835,6 @@ static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
}
-static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = ena_set_rss_hash(adapter->ena_dev, info);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- case ETHTOOL_SRXCLSRLINS:
- default:
- netif_err(adapter, drv, netdev,
- "Command parameter %d is not supported\n", info->cmd);
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -727,9 +846,6 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
info->data = adapter->num_io_queues;
rc = 0;
break;
- case ETHTOOL_GRXFH:
- rc = ena_get_rss_hash(adapter->ena_dev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
@@ -749,7 +865,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
{
- return ENA_HASH_KEY_SIZE;
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_rss *rss = &adapter->ena_dev->rss;
+
+ return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
}
static int ena_indirection_table_set(struct ena_adapter *adapter,
@@ -978,16 +1097,17 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
.get_rxnfc = ena_get_rxnfc,
- .set_rxnfc = ena_set_rxnfc,
.get_rxfh_indir_size = ena_get_rxfh_indir_size,
.get_rxfh_key_size = ena_get_rxfh_key_size,
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
+ .get_rxfh_fields = ena_get_rxfh_fields,
+ .set_rxfh_fields = ena_set_rxfh_fields,
.get_channels = ena_get_channels,
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = ena_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
@@ -1009,22 +1129,18 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
return;
}
- strings_buf = devm_kcalloc(&adapter->pdev->dev,
- ETH_GSTRING_LEN, strings_num,
- GFP_ATOMIC);
+ strings_buf = kcalloc(strings_num, ETH_GSTRING_LEN, GFP_ATOMIC);
if (!strings_buf) {
netif_err(adapter, drv, netdev,
"Failed to allocate strings_buf\n");
return;
}
- data_buf = devm_kcalloc(&adapter->pdev->dev,
- strings_num, sizeof(u64),
- GFP_ATOMIC);
+ data_buf = kcalloc(strings_num, sizeof(u64), GFP_ATOMIC);
if (!data_buf) {
netif_err(adapter, drv, netdev,
"Failed to allocate data buf\n");
- devm_kfree(&adapter->pdev->dev, strings_buf);
+ kfree(strings_buf);
return;
}
@@ -1046,8 +1162,8 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
strings_buf + i * ETH_GSTRING_LEN,
data_buf[i]);
- devm_kfree(&adapter->pdev->dev, strings_buf);
- devm_kfree(&adapter->pdev->dev, data_buf);
+ kfree(strings_buf);
+ kfree(data_buf);
}
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index be5acfa41ee0..92d149d4f091 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -5,9 +5,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#ifdef CONFIG_RFS_ACCEL
-#include <linux/cpu_rmap.h>
-#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -22,6 +19,12 @@
#include "ena_pci_id_tbl.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
+
+#include "ena_devlink.h"
+
+#include "ena_debugfs.h"
+
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
MODULE_LICENSE("GPL");
@@ -42,8 +45,6 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
-static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
@@ -74,7 +75,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (threshold < time_since_last_napi && napi_scheduled) {
netdev_err(dev,
"napi handler hasn't been called for a long time but is scheduled\n");
- reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
schedule_reset:
/* Change the state of the device to trigger reset
@@ -104,7 +105,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
if (!ret) {
netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
update_rx_ring_mtu(adapter, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
} else {
netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
new_mtu);
@@ -162,30 +163,6 @@ int ena_xmit_common(struct ena_adapter *adapter,
return 0;
}
-static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
-{
-#ifdef CONFIG_RFS_ACCEL
- u32 i;
- int rc;
-
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
- if (!adapter->netdev->rx_cpu_rmap)
- return -ENOMEM;
- for (i = 0; i < adapter->num_io_queues; i++) {
- int irq_idx = ENA_IO_IRQ_IDX(i);
-
- rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- pci_irq_vector(adapter->pdev, irq_idx));
- if (rc) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- return rc;
- }
- }
-#endif /* CONFIG_RFS_ACCEL */
- return 0;
-}
-
static void ena_init_io_rings_common(struct ena_adapter *adapter,
struct ena_ring *ring, u16 qid)
{
@@ -1347,6 +1324,8 @@ error:
if (rc == -ENOSPC) {
ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ } else if (rc == -EFAULT) {
+ ena_reset_device(adapter, ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp);
@@ -1381,7 +1360,7 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
rx_ring->rx_stats.bytes,
&dim_sample);
- net_dim(&ena_napi->dim, dim_sample);
+ net_dim(&ena_napi->dim, &dim_sample);
rx_ring->per_napi_packets = 0;
}
@@ -1594,7 +1573,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- if (ena_init_rx_cpu_rmap(adapter))
+ if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues))
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
@@ -1675,9 +1654,9 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
static int ena_request_io_irq(struct ena_adapter *adapter)
{
u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
+ int rc = 0, i, k, irq_idx;
unsigned long flags = 0;
struct ena_irq *irq;
- int rc = 0, i, k;
if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
netif_err(adapter, ifup, adapter->netdev,
@@ -1703,6 +1682,16 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
}
+ /* Now that IO IRQs have been successfully allocated map them to the
+ * corresponding IO NAPI instance. Note that the mgmnt IRQ does not
+ * have a NAPI, so care must be taken to correctly map IRQs to NAPIs.
+ */
+ for (i = 0; i < io_queue_count; i++) {
+ irq_idx = ENA_IO_IRQ_IDX(i);
+ irq = &adapter->irq_tbl[irq_idx];
+ netif_napi_set_irq(&adapter->ena_napi[i].napi, irq->vector);
+ }
+
return rc;
err:
@@ -1730,16 +1719,13 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int i;
-#ifdef CONFIG_RFS_ACCEL
- if (adapter->msix_vecs >= 1) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- }
-#endif /* CONFIG_RFS_ACCEL */
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+ struct ena_napi *ena_napi;
+
irq = &adapter->irq_tbl[i];
irq_set_affinity_hint(irq->vector, NULL);
+ ena_napi = irq->data;
+ netif_napi_set_irq(&ena_napi->napi, -1);
free_irq(irq->vector, irq->data);
}
}
@@ -1795,7 +1781,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
if (ENA_IS_XDP_INDEX(adapter, i))
napi_handler = ena_xdp_io_poll;
- netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
+ netif_napi_add_config(adapter->netdev, &napi->napi, napi_handler, i);
if (!ENA_IS_XDP_INDEX(adapter, i))
napi->rx_ring = rx_ring;
@@ -1809,20 +1795,40 @@ static void ena_napi_disable_in_range(struct ena_adapter *adapter,
int first_index,
int count)
{
+ struct napi_struct *napi;
int i;
- for (i = first_index; i < first_index + count; i++)
- napi_disable(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ napi = &adapter->ena_napi[i].napi;
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* This API is supported for non-XDP queues only */
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+ napi_disable(napi);
+ }
}
static void ena_napi_enable_in_range(struct ena_adapter *adapter,
int first_index,
int count)
{
+ struct napi_struct *napi;
int i;
- for (i = first_index; i < first_index + count; i++)
- napi_enable(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ napi = &adapter->ena_napi[i].napi;
+ napi_enable(napi);
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* This API is supported for non-XDP queues only */
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_RX, napi);
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_TX, napi);
+ }
+ }
}
/* Configure the Rx forwarding */
@@ -2701,6 +2707,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
{
struct device *dev = &pdev->dev;
struct ena_admin_host_info *host_info;
+ ssize_t ret;
int rc;
/* Allocate only the host info */
@@ -2715,11 +2722,19 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info->bdf = pci_dev_id(pdev);
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strscpy(host_info->kernel_ver_str, utsname()->version,
- sizeof(host_info->kernel_ver_str) - 1);
+ ret = strscpy(host_info->kernel_ver_str, utsname()->version,
+ sizeof(host_info->kernel_ver_str));
+ if (ret < 0)
+ dev_dbg(dev,
+ "kernel version string will be truncated, status = %zd\n", ret);
+
host_info->os_dist = 0;
- strscpy(host_info->os_dist_str, utsname()->release,
- sizeof(host_info->os_dist_str));
+ ret = strscpy(host_info->os_dist_str, utsname()->release,
+ sizeof(host_info->os_dist_str));
+ if (ret < 0)
+ dev_dbg(dev,
+ "OS distribution string will be truncated, status = %zd\n", ret);
+
host_info->driver_version =
(DRV_MODULE_GEN_MAJOR) |
(DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
@@ -2732,7 +2747,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
- ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
+ ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK |
+ ENA_ADMIN_HOST_INFO_PHC_MASK;
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -2787,19 +2803,6 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
-int ena_update_hw_stats(struct ena_adapter *adapter)
-{
- int rc;
-
- rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
- if (rc) {
- netdev_err(adapter->netdev, "Failed to get ENI stats\n");
- return rc;
- }
-
- return 0;
-}
-
static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3137,6 +3140,8 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
goto err_mmio_read_less;
}
+ ena_devlink_params_get(adapter->devlink);
+
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
@@ -3190,6 +3195,10 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
if (unlikely(rc))
goto err_admin_init;
+ rc = ena_phc_init(adapter);
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
+ netdev_err(netdev, "Failed initializing PHC, error: %d\n", rc);
+
return 0;
err_admin_init:
@@ -3235,18 +3244,19 @@ err_disable_msix:
return rc;
}
-static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_up;
+ int rc = 0;
if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- return;
+ return 0;
netif_carrier_off(netdev);
- del_timer_sync(&adapter->timer_service);
+ timer_delete_sync(&adapter->timer_service);
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
@@ -3260,7 +3270,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
* and device is up, ena_down() already reset the device.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
- ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
+ rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
ena_free_mgmnt_irq(adapter);
@@ -3272,6 +3282,8 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_admin_destroy(ena_dev);
+ ena_phc_destroy(adapter);
+
ena_com_mmio_reg_read_request_destroy(ena_dev);
/* return reset reason to default value */
@@ -3279,9 +3291,11 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+ return rc;
}
-static int ena_restore_device(struct ena_adapter *adapter)
+int ena_restore_device(struct ena_adapter *adapter)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3343,6 +3357,7 @@ err_device_destroy:
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_phc_destroy(adapter);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -3355,14 +3370,17 @@ err:
static void ena_fw_reset_device(struct work_struct *work)
{
+ int rc = 0;
+
struct ena_adapter *adapter =
container_of(work, struct ena_adapter, reset_task);
rtnl_lock();
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
- ena_destroy_device(adapter, false);
- ena_restore_device(adapter);
+ rc |= ena_destroy_device(adapter, false);
+ rc |= ena_restore_device(adapter);
+ adapter->dev_stats.reset_fail += !!rc;
dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
}
@@ -3663,7 +3681,8 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
+ struct ena_adapter *adapter = timer_container_of(adapter, t,
+ timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
@@ -3862,6 +3881,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
+ struct devlink *devlink;
u32 max_num_io_queues;
bool wd_state;
int bars, rc;
@@ -3927,10 +3947,30 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
+ rc = ena_phc_alloc(adapter);
+ if (rc) {
+ netdev_err(netdev, "ena_phc_alloc failed\n");
+ goto err_netdev_destroy;
+ }
+
+ rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
+ if (rc) {
+ netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
+ goto err_free_phc;
+ }
+
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
- goto err_netdev_destroy;
+ goto err_metrics_destroy;
+ }
+
+ /* Need to do this before ena_device_init */
+ devlink = ena_devlink_alloc(adapter);
+ if (!devlink) {
+ netdev_err(netdev, "ena_devlink_alloc failed\n");
+ rc = -ENOMEM;
+ goto err_metrics_destroy;
}
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
@@ -3938,7 +3978,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_netdev_destroy;
+ goto ena_devlink_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4022,6 +4062,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_rss;
}
+ ena_debugfs_init(netdev);
+
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
@@ -4043,6 +4085,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapters_found++;
+ /* From this point, the devlink device is visible to users.
+ * Perform the registration last to ensure that all the resources
+ * are available and that the netdevice is registered.
+ */
+ ena_devlink_register(devlink, &pdev->dev);
+
return 0;
err_rss:
@@ -4055,10 +4103,16 @@ err_free_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_worker_destroy:
- del_timer(&adapter->timer_service);
+ timer_delete(&adapter->timer_service);
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+ena_devlink_destroy:
+ ena_devlink_free(devlink);
+err_metrics_destroy:
+ ena_com_delete_customer_metrics_buffer(ena_dev);
+err_free_phc:
+ ena_phc_free(adapter);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4089,23 +4143,23 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
-#ifdef CONFIG_RFS_ACCEL
- if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
- }
+ ena_debugfs_terminate(netdev);
-#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
- del_timer_sync(&adapter->timer_service);
+ timer_delete_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
+ ena_phc_free(adapter);
+
+ ena_devlink_unregister(adapter->devlink);
+ ena_devlink_free(adapter->devlink);
+
if (shutdown) {
netif_device_detach(netdev);
dev_close(netdev);
@@ -4122,6 +4176,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_com_delete_host_info(ena_dev);
+ ena_com_delete_customer_metrics_buffer(ena_dev);
+
ena_release_bars(ena_dev, pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 6d2cc20210cc..006f9a3acea6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -16,6 +16,7 @@
#include <linux/skbuff.h>
#include <net/xdp.h>
#include <uapi/linux/bpf.h>
+#include <net/devlink.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -110,6 +111,8 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+struct ena_phc_info;
+
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -290,6 +293,7 @@ struct ena_stats_dev {
u64 admin_q_pause;
u64 rx_drops;
u64 tx_drops;
+ u64 reset_fail;
};
enum ena_flags_t {
@@ -347,6 +351,8 @@ struct ena_adapter {
char name[ENA_NAME_MAX_LEN];
+ struct ena_phc_info *phc_info;
+
unsigned long flags;
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
@@ -372,6 +378,7 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats;
+ struct ena_admin_ena_srd_info ena_srd_info;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -381,6 +388,13 @@ struct ena_adapter {
struct bpf_prog *xdp_bpf_prog;
u32 xdp_first_ring;
u32 xdp_num_queues;
+
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+#ifdef CONFIG_DEBUG_FS
+
+ struct dentry *debugfs_base;
+#endif /* CONFIG_DEBUG_FS */
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -389,7 +403,6 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
-int ena_update_hw_stats(struct ena_adapter *adapter);
int ena_update_queue_params(struct ena_adapter *adapter,
u32 new_tx_size,
@@ -411,6 +424,8 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+int ena_restore_device(struct ena_adapter *adapter);
int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
struct ena_tx_buffer *tx_info, bool is_xdp);
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.c b/drivers/net/ethernet/amazon/ena/ena_phc.c
new file mode 100644
index 000000000000..7867e893fd15
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/pci.h>
+#include "ena_netdev.h"
+#include "ena_phc.h"
+#include "ena_devlink.h"
+
+static int ena_phc_adjtime(struct ptp_clock_info *clock_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_adjfine(struct ptp_clock_info *clock_info, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_feature_enable(struct ptp_clock_info *clock_info,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_gettimex64(struct ptp_clock_info *clock_info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ena_phc_info *phc_info =
+ container_of(clock_info, struct ena_phc_info, clock_info);
+ unsigned long flags;
+ u64 timestamp_nsec;
+ int rc;
+
+ spin_lock_irqsave(&phc_info->lock, flags);
+
+ ptp_read_system_prets(sts);
+
+ rc = ena_com_phc_get_timestamp(phc_info->adapter->ena_dev,
+ &timestamp_nsec);
+
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&phc_info->lock, flags);
+
+ *ts = ns_to_timespec64(timestamp_nsec);
+
+ return rc;
+}
+
+static int ena_phc_settime64(struct ptp_clock_info *clock_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ena_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjtime = ena_phc_adjtime,
+ .adjfine = ena_phc_adjfine,
+ .gettimex64 = ena_phc_gettimex64,
+ .settime64 = ena_phc_settime64,
+ .enable = ena_phc_feature_enable,
+};
+
+/* Enable/Disable PHC by the kernel, affects on the next init flow */
+void ena_phc_enable(struct ena_adapter *adapter, bool enable)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ if (!phc_info) {
+ netdev_err(adapter->netdev, "phc_info is not allocated\n");
+ return;
+ }
+
+ phc_info->enabled = enable;
+}
+
+/* Check if PHC is enabled by the kernel */
+bool ena_phc_is_enabled(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->enabled);
+}
+
+/* PHC is activated if ptp clock is registered in the kernel */
+bool ena_phc_is_active(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->clock);
+}
+
+static int ena_phc_register(struct ena_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ptp_clock_info *clock_info;
+ struct ena_phc_info *phc_info;
+ int rc = 0;
+
+ phc_info = adapter->phc_info;
+ clock_info = &phc_info->clock_info;
+
+ /* PHC may already be registered in case of a reset */
+ if (ena_phc_is_active(adapter))
+ return 0;
+
+ phc_info->adapter = adapter;
+
+ spin_lock_init(&phc_info->lock);
+
+ /* Fill the ptp_clock_info struct and register PTP clock */
+ *clock_info = ena_ptp_clock_info;
+ snprintf(clock_info->name,
+ sizeof(clock_info->name),
+ "ena-ptp-%02x",
+ PCI_SLOT(pdev->devfn));
+
+ phc_info->clock = ptp_clock_register(clock_info, &pdev->dev);
+ if (IS_ERR(phc_info->clock)) {
+ rc = PTR_ERR(phc_info->clock);
+ netdev_err(adapter->netdev, "Failed registering ptp clock, error: %d\n",
+ rc);
+ phc_info->clock = NULL;
+ }
+
+ return rc;
+}
+
+static void ena_phc_unregister(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ /* During reset flow, PHC must stay registered
+ * to keep kernel's PHC index
+ */
+ if (ena_phc_is_active(adapter) &&
+ !test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
+ ptp_clock_unregister(phc_info->clock);
+ phc_info->clock = NULL;
+ }
+}
+
+int ena_phc_alloc(struct ena_adapter *adapter)
+{
+ /* Allocate driver specific PHC info */
+ adapter->phc_info = vzalloc(sizeof(*adapter->phc_info));
+ if (unlikely(!adapter->phc_info)) {
+ netdev_err(adapter->netdev, "Failed to alloc phc_info\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ena_phc_free(struct ena_adapter *adapter)
+{
+ if (adapter->phc_info) {
+ vfree(adapter->phc_info);
+ adapter->phc_info = NULL;
+ }
+}
+
+int ena_phc_init(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
+ int rc = -EOPNOTSUPP;
+
+ /* Validate PHC feature is supported in the device */
+ if (!ena_com_phc_supported(ena_dev)) {
+ netdev_dbg(netdev, "PHC feature is not supported by the device\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Validate PHC feature is enabled by the kernel */
+ if (!ena_phc_is_enabled(adapter)) {
+ netdev_dbg(netdev, "PHC feature is not enabled by the kernel\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Initialize device specific PHC info */
+ rc = ena_com_phc_init(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to init phc, error: %d\n", rc);
+ goto err_ena_com_phc_init;
+ }
+
+ /* Configure PHC feature in driver and device */
+ rc = ena_com_phc_config(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to config phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ /* Register to PTP class driver */
+ rc = ena_phc_register(adapter);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to register phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ return 0;
+
+err_ena_com_phc_config:
+ ena_com_phc_destroy(ena_dev);
+err_ena_com_phc_init:
+ ena_phc_enable(adapter, false);
+ ena_devlink_disable_phc_param(adapter->devlink);
+ return rc;
+}
+
+void ena_phc_destroy(struct ena_adapter *adapter)
+{
+ ena_phc_unregister(adapter);
+ ena_com_phc_destroy(adapter->ena_dev);
+}
+
+int ena_phc_get_index(struct ena_adapter *adapter)
+{
+ if (ena_phc_is_active(adapter))
+ return ptp_clock_index(adapter->phc_info->clock);
+
+ return -1;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.h b/drivers/net/ethernet/amazon/ena/ena_phc.h
new file mode 100644
index 000000000000..7364fe714e44
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_PHC_H
+#define ENA_PHC_H
+
+#include <linux/ptp_clock_kernel.h>
+
+struct ena_phc_info {
+ /* PTP hardware capabilities */
+ struct ptp_clock_info clock_info;
+
+ /* Registered PTP clock device */
+ struct ptp_clock *clock;
+
+ /* Adapter specific private data structure */
+ struct ena_adapter *adapter;
+
+ /* PHC lock */
+ spinlock_t lock;
+
+ /* Enabled by kernel */
+ bool enabled;
+};
+
+void ena_phc_enable(struct ena_adapter *adapter, bool enable);
+bool ena_phc_is_enabled(struct ena_adapter *adapter);
+bool ena_phc_is_active(struct ena_adapter *adapter);
+int ena_phc_get_index(struct ena_adapter *adapter);
+int ena_phc_init(struct ena_adapter *adapter);
+void ena_phc_destroy(struct ena_adapter *adapter);
+int ena_phc_alloc(struct ena_adapter *adapter);
+void ena_phc_free(struct ena_adapter *adapter);
+
+#endif /* ENA_PHC_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 2c3d6a77ea79..51068dc1cc2a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -22,6 +22,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
+ ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED = 16,
};
/* ena_registers offsets */
@@ -52,6 +53,11 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+/* phc_registers offsets */
+
+/* 100 base */
+#define ENA_REGS_PHC_DB_OFF 0x100
+
/* version register */
#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
@@ -128,4 +134,7 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+/* phc_db_req_id register */
+#define ENA_REGS_PHC_DB_REQ_ID_MASK 0xffff
+
#endif /* _ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index ef512cf89abf..27792a52b6cf 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -667,4 +667,5 @@ void lance_poll(struct net_device *dev)
EXPORT_SYMBOL_GPL(lance_poll);
#endif
+MODULE_DESCRIPTION("LANCE Ethernet IC generic routines");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index f8cc8925161c..d54dca3074eb 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -56,7 +56,7 @@ config LANCE
config PCNET32
tristate "AMD PCnet32 PCI support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
@@ -122,7 +122,7 @@ config MVME147_NET
config PCMCIA_NMCLAN
tristate "New Media PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a New Media Ethernet or LiveWire
PCMCIA (PC-card) Ethernet card to your computer.
@@ -165,6 +165,7 @@ config AMD_XGBE
select CRC32
select PHYLIB
select AMD_XGBE_HAVE_ECC if X86
+ select NET_SELFTESTS
help
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 68983b717145..ce9445425045 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -486,7 +486,7 @@ static int lance_close(struct net_device *dev)
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
/* Stop the card */
ll->rap = LE_CSR0;
@@ -636,7 +636,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
lance_set_multicast(lp->dev);
}
@@ -781,4 +781,5 @@ static void __exit a2065_cleanup_module(void)
module_init(a2065_init_module);
module_exit(a2065_cleanup_module);
+MODULE_DESCRIPTION("Commodore A2065 Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index ea6cfc2095e1..76e8c13d5985 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1173,7 +1173,7 @@ static int amd8111e_close(struct net_device *dev)
/* Delete ipg timer */
if (lp->options & OPTION_DYN_IPG_ENABLE)
- del_timer_sync(&lp->ipg_data.ipg_timer);
+ timer_delete_sync(&lp->ipg_data.ipg_timer);
spin_unlock_irq(&lp->lock);
free_irq(dev->irq, dev);
@@ -1520,9 +1520,9 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
if (!netif_running(dev)) {
/* new_mtu will be used
- * when device starts netxt time
+ * when device starts next time
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1531,7 +1531,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
/* stop the chip */
writel(RUN, lp->mmio + CMD0);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
@@ -1598,7 +1598,7 @@ static int __maybe_unused amd8111e_suspend(struct device *dev_d)
/* stop chip */
spin_lock_irq(&lp->lock);
if (lp->options & OPTION_DYN_IPG_ENABLE)
- del_timer_sync(&lp->ipg_data.ipg_timer);
+ timer_delete_sync(&lp->ipg_data.ipg_timer);
amd8111e_stop_chip(lp);
spin_unlock_irq(&lp->lock);
@@ -1641,7 +1641,8 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d)
static void amd8111e_config_ipg(struct timer_list *t)
{
- struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
+ struct amd8111e_priv *lp = timer_container_of(lp, t,
+ ipg_data.ipg_timer);
struct ipg_info *ipg_data = &lp->ipg_data;
void __iomem *mmio = lp->mmio;
unsigned int prev_col_cnt = ipg_data->col_cnt;
@@ -1796,7 +1797,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
lp = netdev_priv(dev);
lp->pci_dev = pdev;
lp->amd8111e_net_dev = dev;
- lp->pm_cap = pdev->pm_cap;
spin_lock_init(&lp->lock);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 9d570adb295b..e4ee4c28800c 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -550,7 +550,6 @@ typedef enum {
/* Driver definitions */
-#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
#define MAX_UNITS 8 /* Maximum number of devices possible */
@@ -764,7 +763,6 @@ struct amd8111e_priv{
u32 ext_phy_id;
struct amd8111e_link_config link_config;
- int pm_cap;
struct net_device *next;
int mii;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 38153e633231..fa201da567ed 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -790,4 +790,5 @@ static void __exit ariadne_cleanup_module(void)
module_init(ariadne_init_module);
module_exit(ariadne_cleanup_module);
+MODULE_DESCRIPTION("Ariadne Ethernet Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 751454d305c6..8c8cc7d0f42d 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -79,6 +79,7 @@ static int lance_debug = 1;
#endif
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
+MODULE_DESCRIPTION("Atari LANCE Ethernet driver");
MODULE_LICENSE("GPL");
/* Print debug messages on probing? */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 85c978149bf6..9d35ac348ebe 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -571,7 +571,7 @@ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+static void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
@@ -1363,7 +1363,7 @@ static void au1000_remove(struct platform_device *pdev)
static struct platform_driver au1000_eth_driver = {
.probe = au1000_probe,
- .remove_new = au1000_remove,
+ .remove = au1000_remove,
.driver = {
.name = "au1000-eth",
},
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index ec8df05e7bf6..8d05a0c5f2d5 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -842,7 +842,7 @@ static int lance_close(struct net_device *dev)
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
/* Stop the card */
writereg(&ll->rap, LE_CSR0);
@@ -1004,7 +1004,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 055fda11c572..df42294530cb 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -234,4 +234,5 @@ static void __exit hplance_cleanup_module(void)
module_init(hplance_init_module);
module_exit(hplance_cleanup_module);
+MODULE_DESCRIPTION("HP300 on-board LANCE Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 6cf38180cc01..b1e6620ad41d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -385,6 +385,7 @@ static void __exit lance_cleanup_module(void)
}
module_exit(lance_cleanup_module);
#endif /* MODULE */
+MODULE_DESCRIPTION("AMD LANCE/PCnet Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 410c7b67eba4..f19b04b92fa9 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -105,10 +105,6 @@ static struct net_device * __init mvme147lance_probe(void)
macaddr[3] = address&0xff;
eth_hw_addr_set(dev, macaddr);
- printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
- dev->name, dev->base_addr, MVME147_LANCE_IRQ,
- dev->dev_addr);
-
lp = netdev_priv(dev);
lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
if (!lp->ram) {
@@ -138,6 +134,9 @@ static struct net_device * __init mvme147lance_probe(void)
return ERR_PTR(err);
}
+ netdev_info(dev, "MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
+ dev->base_addr, MVME147_LANCE_IRQ, dev->dev_addr);
+
return dev;
}
@@ -178,6 +177,7 @@ static int m147lance_close(struct net_device *dev)
return 0;
}
+MODULE_DESCRIPTION("MVME147 LANCE Ethernet driver");
MODULE_LICENSE("GPL");
static struct net_device *dev_mvme147_lance;
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 0dd391c84c13..37054a670407 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -760,7 +760,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map)
{
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 2) {
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
} else
return -EINVAL;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 72db9f9e7bee..9eaefa0f5e80 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev)
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
}
/*
@@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
@@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1122,6 +1125,7 @@ clean_up:
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return rc;
} /* end pcnet32_loopback_test */
@@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev)
return -EAGAIN;
}
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev)
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return 0; /* Always succeed */
@@ -2315,6 +2321,7 @@ err_free_ring:
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
free_irq(dev->irq, dev);
return rc;
}
@@ -2623,7 +2630,7 @@ static int pcnet32_close(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
- del_timer_sync(&lp->watchdog_timer);
+ timer_delete_sync(&lp->watchdog_timer);
netif_stop_queue(dev);
napi_disable(&lp->napi);
@@ -2898,7 +2905,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
static void pcnet32_watchdog(struct timer_list *t)
{
- struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
+ struct pcnet32_private *lp = timer_container_of(lp, t, watchdog_timer);
struct net_device *dev = lp->dev;
unsigned long flags;
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index c83a0a80d533..097bb092bdb8 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -5,11 +5,6 @@
#include "core.h"
-struct pdsc_wait_context {
- struct pdsc_qcq *qcq;
- struct completion wait_completion;
-};
-
static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
{
union pds_core_notifyq_comp *comp;
@@ -109,10 +104,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
q_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- /* Copy out the completion data */
- memcpy(q_info->dest, comp, sizeof(*comp));
-
- complete_all(&q_info->wc->wait_completion);
+ if (!completion_done(&q_info->completion)) {
+ memcpy(q_info->dest, comp, sizeof(*comp));
+ complete(&q_info->completion);
+ }
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
@@ -162,8 +157,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
static int __pdsc_adminq_post(struct pdsc *pdsc,
struct pdsc_qcq *qcq,
union pds_core_adminq_cmd *cmd,
- union pds_core_adminq_comp *comp,
- struct pdsc_wait_context *wc)
+ union pds_core_adminq_comp *comp)
{
struct pdsc_queue *q = &qcq->q;
struct pdsc_q_info *q_info;
@@ -205,9 +199,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Post the request */
index = q->head_idx;
q_info = &q->info[index];
- q_info->wc = wc;
q_info->dest = comp;
memcpy(q_info->desc, cmd, sizeof(*cmd));
+ reinit_completion(&q_info->completion);
dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
q->head_idx, q->tail_idx);
@@ -231,16 +225,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
union pds_core_adminq_comp *comp,
bool fast_poll)
{
- struct pdsc_wait_context wc = {
- .wait_completion =
- COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
- };
- unsigned long poll_interval = 1;
+ unsigned long poll_interval = 200;
unsigned long poll_jiffies;
unsigned long time_limit;
unsigned long time_start;
unsigned long time_done;
unsigned long remaining;
+ struct completion *wc;
int err = 0;
int index;
@@ -250,20 +241,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
return -ENXIO;
}
- wc.qcq = &pdsc->adminqcq;
- index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
if (index < 0) {
err = index;
goto err_out;
}
+ wc = &pdsc->adminqcq.q.info[index].completion;
time_start = jiffies;
time_limit = time_start + HZ * pdsc->devcmd_timeout;
do {
/* Timeslice the actual wait to catch IO errors etc early */
- poll_jiffies = msecs_to_jiffies(poll_interval);
- remaining = wait_for_completion_timeout(&wc.wait_completion,
- poll_jiffies);
+ poll_jiffies = usecs_to_jiffies(poll_interval);
+ remaining = wait_for_completion_timeout(wc, poll_jiffies);
if (remaining)
break;
@@ -292,9 +282,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
__func__, jiffies_to_msecs(time_done - time_start));
- /* Check the results */
- if (time_after_eq(time_done, time_limit))
+ /* Check the results and clear an un-completed timeout */
+ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
err = -ETIMEDOUT;
+ complete(wc);
+ }
dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 2babea110991..92f359f2b449 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
dev_dbg(pf->dev, "%s: %s opcode %d\n",
__func__, dev_name(&padev->aux_dev.dev), req->opcode);
- if (pf->state)
- return -ENXIO;
-
/* Wrap the client's request */
cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
cmd.client_request.client_id = cpu_to_le16(padev->client_id);
@@ -175,34 +172,31 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
return padev;
}
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
- int err = 0;
- if (!cf)
- return -ENODEV;
+ if (!*pd_ptr)
+ return;
mutex_lock(&pf->config_lock);
- padev = pf->vfs[cf->vf_id].padev;
- if (padev) {
- pds_client_unregister(pf, padev->client_id);
- auxiliary_device_delete(&padev->aux_dev);
- auxiliary_device_uninit(&padev->aux_dev);
- padev->client_id = 0;
- }
- pf->vfs[cf->vf_id].padev = NULL;
+ padev = *pd_ptr;
+ pds_client_unregister(pf, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ *pd_ptr = NULL;
mutex_unlock(&pf->config_lock);
- return err;
}
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
char devname[PDS_DEVNAME_LEN];
- enum pds_core_vif_types vt;
unsigned long mask;
u16 vt_support;
int client_id;
@@ -211,6 +205,9 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
if (!cf)
return -ENODEV;
+ if (vt >= PDS_DEV_TYPE_MAX)
+ return -EINVAL;
+
mutex_lock(&pf->config_lock);
mask = BIT_ULL(PDSC_S_FW_DEAD) |
@@ -222,17 +219,10 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
goto out_unlock;
}
- /* We only support vDPA so far, so it is the only one to
- * be verified that it is available in the Core device and
- * enabled in the devlink param. In the future this might
- * become a loop for several VIF types.
- */
-
/* Verify that the type is supported and enabled. It is not
- * an error if there is no auxbus device support for this
- * VF, it just means something else needs to happen with it.
+ * an error if the firmware doesn't support the feature, the
+ * driver just won't set up an auxiliary_device for it.
*/
- vt = PDS_DEV_TYPE_VDPA;
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
if (!(vt_support &&
pf->viftype_status[vt].supported &&
@@ -258,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
err = PTR_ERR(padev);
goto out_unlock;
}
- pf->vfs[cf->vf_id].padev = padev;
+ *pd_ptr = padev;
out_unlock:
mutex_unlock(&pf->config_lock);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 536635e57727..076dfe2910c7 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -167,8 +167,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
q->base = base;
q->base_pa = base_pa;
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
cur->desc = base + (i * q->desc_size);
+ init_completion(&cur->completion);
+ }
}
static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
@@ -325,10 +327,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
size_t sz;
int err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
+ numdescs = PDSC_ADMINQ_MAX_LENGTH;
err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
numdescs,
@@ -402,6 +401,10 @@ err_out_uninit:
}
static struct pdsc_viftype pdsc_viftype_defaults[] = {
+ [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
+ .enabled = true,
+ .vif_id = PDS_DEV_TYPE_FWCTL,
+ .dl_id = -1 },
[PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
.vif_id = PDS_DEV_TYPE_VDPA,
.dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
@@ -412,7 +415,8 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
{
enum pds_core_vif_types vt;
- pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
+ pdsc->viftype_status = kcalloc(ARRAY_SIZE(pdsc_viftype_defaults),
+ sizeof(*pdsc->viftype_status),
GFP_KERNEL);
if (!pdsc->viftype_status)
return -ENOMEM;
@@ -428,6 +432,7 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
/* See what the Core device has for support */
vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
+
dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
pdsc->viftype_status[vt].name,
vt_support ? "" : "not ");
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index a3e17a0c187a..4a6b35c84dab 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -16,7 +16,7 @@
#define PDSC_WATCHDOG_SECS 5
#define PDSC_QUEUE_NAME_MAX_SZ 16
-#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#define PDSC_TEARDOWN_RECOVERY false
#define PDSC_TEARDOWN_REMOVING true
@@ -96,7 +96,7 @@ struct pdsc_q_info {
unsigned int bytes;
unsigned int nbufs;
struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
- struct pdsc_wait_context *wc;
+ struct completion completion;
void *dest;
};
@@ -156,6 +156,7 @@ struct pdsc {
struct dentry *dentry;
struct device *dev;
struct pdsc_dev_bar bars[PDS_CORE_BARS_MAX];
+ struct pds_auxiliary_dev *padev;
struct pdsc_vf *vfs;
int num_vfs;
int vf_id;
@@ -254,9 +255,11 @@ int pdsc_dl_flash_update(struct devlink *dl,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack);
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack);
@@ -302,8 +305,11 @@ void pdsc_health_thread(struct work_struct *work);
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void pdsc_notify(unsigned long event, void *data);
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr);
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr);
void pdsc_process_adminq(struct pdsc_qcq *qcq);
void pdsc_work_thread(struct work_struct *work);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 6bdd02b7aa6d..04c5e3abd8d7 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -112,7 +112,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
struct pdsc_cq *cq = &qcq->cq;
qcq_dentry = debugfs_create_dir(q->name, pdsc->dentry);
- if (IS_ERR_OR_NULL(qcq_dentry))
+ if (IS_ERR(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
@@ -123,7 +123,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_x32("accum_work", 0400, qcq_dentry, &qcq->accum_work);
q_dentry = debugfs_create_dir("q", qcq->dentry);
- if (IS_ERR_OR_NULL(q_dentry))
+ if (IS_ERR(q_dentry))
return;
debugfs_create_u32("index", 0400, q_dentry, &q->index);
@@ -135,7 +135,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u16("head", 0400, q_dentry, &q->head_idx);
cq_dentry = debugfs_create_dir("cq", qcq->dentry);
- if (IS_ERR_OR_NULL(cq_dentry))
+ if (IS_ERR(cq_dentry))
return;
debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
@@ -148,14 +148,15 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
intr_dentry = debugfs_create_dir("intr", qcq->dentry);
- if (IS_ERR_OR_NULL(intr_dentry))
+ if (IS_ERR(intr_dentry))
return;
debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector);
- intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset),
- GFP_KERNEL);
+ intr_ctrl_regset = devm_kzalloc(pdsc->dev,
+ sizeof(*intr_ctrl_regset),
+ GFP_KERNEL);
if (!intr_ctrl_regset)
return;
intr_ctrl_regset->regs = intr_ctrl_regs;
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 54864f27c87a..b576be626a29 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -22,7 +22,8 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
}
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_priv(dl);
struct pdsc_viftype *vt_entry;
@@ -37,7 +38,8 @@ int pdsc_dl_enable_get(struct devlink *dl, u32 id,
}
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_priv(dl);
struct pdsc_viftype *vt_entry;
@@ -55,8 +57,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
struct pdsc *vf = pdsc->vfs[vf_id].vf;
- err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
- pdsc_auxbus_dev_del(vf, pdsc);
+ if (ctx->val.vbool)
+ err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
+ &pdsc->vfs[vf_id].padev);
+ else
+ pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
}
return err;
@@ -101,7 +106,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
.fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
.fw_control.oper = PDS_CORE_FW_GET_LIST,
};
- struct pds_core_fw_list_info fw_list;
+ struct pds_core_fw_list_info fw_list = {};
struct pdsc *pdsc = devlink_priv(dl);
union pds_core_dev_comp comp;
char buf[32];
@@ -114,10 +119,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (!err)
memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
- if (err && err != -EIO)
- return err;
- listlen = fw_list.num_fw_slots;
+ listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
for (i = 0; i < listlen; i++) {
if (i < ARRAY_SIZE(fw_slotnames))
strscpy(buf, fw_slotnames[i], sizeof(buf));
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 660268ff9562..c7a2eff57632 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -23,7 +23,7 @@ MODULE_DEVICE_TABLE(pci, pdsc_id_table);
static void pdsc_wdtimer_cb(struct timer_list *t)
{
- struct pdsc *pdsc = from_timer(pdsc, t, wdtimer);
+ struct pdsc *pdsc = timer_container_of(pdsc, t, wdtimer);
dev_dbg(pdsc->dev, "%s: jiffies %ld\n", __func__, jiffies);
mod_timer(&pdsc->wdtimer,
@@ -190,7 +190,8 @@ static int pdsc_init_vf(struct pdsc *vf)
devl_unlock(dl);
pf->vfs[vf->vf_id].vf = vf;
- err = pdsc_auxbus_dev_add(vf, pf);
+ err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[vf->vf_id].padev);
if (err) {
devl_lock(dl);
devl_unregister(dl);
@@ -264,6 +265,10 @@ static int pdsc_init_pf(struct pdsc *pdsc)
mutex_unlock(&pdsc->config_lock);
+ err = pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL, &pdsc->padev);
+ if (err)
+ goto err_out_stop;
+
dl = priv_to_devlink(pdsc);
devl_lock(dl);
err = devl_params_register(dl, pdsc_dl_params,
@@ -272,10 +277,10 @@ static int pdsc_init_pf(struct pdsc *pdsc)
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
ERR_PTR(err));
- goto err_out_stop;
+ goto err_out_del_dev;
}
- hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
+ hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, pdsc);
if (IS_ERR(hr)) {
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr);
@@ -295,6 +300,8 @@ static int pdsc_init_pf(struct pdsc *pdsc)
err_out_unreg_params:
devlink_params_unregister(dl, pdsc_dl_params,
ARRAY_SIZE(pdsc_dl_params));
+err_out_del_dev:
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
err_out_stop:
pdsc_stop(pdsc);
err_out_teardown:
@@ -417,7 +424,7 @@ static void pdsc_remove(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf)) {
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
pf->vfs[pdsc->vf_id].vf = NULL;
}
} else {
@@ -426,6 +433,7 @@ static void pdsc_remove(struct pci_dev *pdev)
* shut themselves down.
*/
pdsc_sriov_configure(pdev, 0);
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
@@ -482,7 +490,10 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
}
pdsc_unmap_bars(pdsc);
@@ -527,7 +538,11 @@ static void pdsc_reset_done(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_add(pdsc, pf);
+ pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL,
+ &pdsc->padev);
}
}
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 246f34c43765..c60df4a21158 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -74,6 +74,7 @@ static int lance_debug = 1;
#endif
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
+MODULE_DESCRIPTION("Sun3/Sun3x on-board LANCE Ethernet driver");
MODULE_LICENSE("GPL");
#define DPRINTK(n,a) \
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index c78706d21a6a..0b273327f5a6 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -963,7 +963,7 @@ static int lance_close(struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
STOP_LANCE(lp);
@@ -1246,7 +1246,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
@@ -1514,7 +1514,7 @@ static struct platform_driver sunlance_sbus_driver = {
.of_match_table = sunlance_sbus_match,
},
.probe = sunlance_sbus_probe,
- .remove_new = sunlance_sbus_remove,
+ .remove = sunlance_sbus_remove,
};
module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 620785ffbd51..5992f7fd4d9b 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -3,9 +3,9 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-ptp.o \
+ xgbe-hwtstamp.o xgbe-ptp.o xgbe-pps.o \
xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
- xgbe-platform.o
+ xgbe-platform.o xgbe-selftest.o
amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o
amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 3b70f6737633..62b01de93db4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_COMMON_H__
@@ -332,7 +223,15 @@
#define MAC_TSSR 0x0d20
#define MAC_TXSNR 0x0d30
#define MAC_TXSSR 0x0d34
-
+#define MAC_TICNR 0x0d58
+#define MAC_TICSNR 0x0d5C
+#define MAC_TECNR 0x0d60
+#define MAC_TECSNR 0x0d64
+#define MAC_PPSCR 0x0d70
+#define MAC_PPS0_TTSR 0x0d80
+#define MAC_PPS0_TTNSR 0x0d84
+#define MAC_PPS0_INTERVAL 0x0d88
+#define MAC_PPS0_WIDTH 0x0d8C
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
#define MAC_HTR_INC 4
@@ -340,6 +239,18 @@
#define MAC_RQC2_INC 4
#define MAC_RQC2_Q_PER_REG 4
+/* PPS helpers */
+#define PPSEN0 BIT(4)
+#define MAC_PPSx_TTSR(x) ((MAC_PPS0_TTSR) + ((x) * 0x10))
+#define MAC_PPSx_TTNSR(x) ((MAC_PPS0_TTNSR) + ((x) * 0x10))
+#define MAC_PPSx_INTERVAL(x) ((MAC_PPS0_INTERVAL) + ((x) * 0x10))
+#define MAC_PPSx_WIDTH(x) ((MAC_PPS0_WIDTH) + ((x) * 0x10))
+#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define PPS_MINIDX(x) ((x) * 8)
+#define XGBE_PPSCMD_STOP 0x5
+#define XGBE_PPSCMD_START 0x2
+#define XGBE_PPSTARGET_PULSE 0x2
+
/* MAC register entry bit positions and sizes */
#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
@@ -473,6 +384,10 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_GPSLCE_INDEX 6
+#define MAC_RCR_GPSLCE_WIDTH 1
+#define MAC_RCR_WD_INDEX 7
+#define MAC_RCR_WD_WIDTH 1
#define MAC_RCR_HDSMS_INDEX 12
#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
@@ -483,6 +398,8 @@
#define MAC_RCR_LM_WIDTH 1
#define MAC_RCR_RE_INDEX 0
#define MAC_RCR_RE_WIDTH 1
+#define MAC_RCR_GPSL_INDEX 16
+#define MAC_RCR_GPSL_WIDTH 14
#define MAC_RFCR_PFCE_INDEX 8
#define MAC_RFCR_PFCE_WIDTH 1
#define MAC_RFCR_RFE_INDEX 0
@@ -521,6 +438,8 @@
#define MAC_TCR_VNE_WIDTH 1
#define MAC_TCR_VNM_INDEX 25
#define MAC_TCR_VNM_WIDTH 1
+#define MAC_TCR_JD_INDEX 16
+#define MAC_TCR_JD_WIDTH 1
#define MAC_TIR_TNID_INDEX 0
#define MAC_TIR_TNID_WIDTH 16
#define MAC_TSCR_AV8021ASMEN_INDEX 28
@@ -529,6 +448,8 @@
#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
#define MAC_TSCR_TSADDREG_INDEX 5
#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSUPDT_INDEX 3
+#define MAC_TSCR_TSUPDT_WIDTH 1
#define MAC_TSCR_TSCFUPDT_INDEX 1
#define MAC_TSCR_TSCFUPDT_WIDTH 1
#define MAC_TSCR_TSCTRLSSR_INDEX 9
@@ -557,6 +478,10 @@
#define MAC_TSSR_TXTSC_WIDTH 1
#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_TICSNR_TSICSNS_INDEX 8
+#define MAC_TICSNR_TSICSNS_WIDTH 8
+#define MAC_TECSNR_TSECSNS_INDEX 8
+#define MAC_TECSNR_TSECSNS_WIDTH 8
#define MAC_VLANHTR_VLHT_INDEX 0
#define MAC_VLANHTR_VLHT_WIDTH 16
#define MAC_VLANIR_VLTI_INDEX 20
@@ -587,8 +512,10 @@
#define MAC_VR_SNPSVER_WIDTH 8
#define MAC_VR_USERVER_INDEX 16
#define MAC_VR_USERVER_WIDTH 8
+#define MAC_PPSx_TTNSR_TRGTBUSY0_INDEX 31
+#define MAC_PPSx_TTNSR_TRGTBUSY0_WIDTH 1
-/* MMC register offsets */
+ /* MMC register offsets */
#define MMC_CR 0x0800
#define MMC_RISR 0x0804
#define MMC_TISR 0x0808
@@ -900,6 +827,11 @@
#define PCS_V2_RV_WINDOW_SELECT 0x1064
#define PCS_V2_YC_WINDOW_DEF 0x18060
#define PCS_V2_YC_WINDOW_SELECT 0x18064
+#define PCS_V3_RN_WINDOW_DEF 0xf8078
+#define PCS_V3_RN_WINDOW_SELECT 0xf807c
+
+#define PCS_RN_SMN_BASE_ADDR 0x11e00000
+#define PCS_RN_PORT_ADDR_SIZE 0x100000
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
@@ -1373,6 +1305,8 @@
#define MDIO_VEND2_CTRL1_SS13 BIT(13)
#endif
+#define XGBE_VEND2_MAC_AUTO_SW BIT(9)
+
/* MDIO mask values */
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
#define XGBE_AN_CL73_INC_LINK BIT(1)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index c68ace804e37..1474df5544fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b0a6c96b6ef4..d9157c4acde9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/debugfs.h>
@@ -505,21 +396,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- char *buf;
-
- if (!pdata->xgbe_debugfs)
- return;
-
- buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
- if (!buf)
- return;
-
- if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
- goto out;
-
- debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
-
-out:
- kfree(buf);
+ debugfs_change_name(pdata->xgbe_debugfs,
+ "amd-xgbe-%s", pdata->netdev->name);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 230726d7b74f..7c8a19988a52 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include "xgbe.h"
@@ -373,8 +264,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
}
/* Set up the header page info */
- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
- XGBE_SKB_ALLOC_SIZE);
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+ } else {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ pdata->rx_buf_size);
+ }
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f393228d41c7..b646ae575e6a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/phy.h>
@@ -120,9 +11,11 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
#include <linux/crc32poly.h>
+#include <linux/pci.h>
#include "xgbe.h"
#include "xgbe-common.h"
+#include "xgbe-smn.h"
static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
{
@@ -318,6 +211,20 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
}
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+ pdata->sph = true;
+}
+
+static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
+ }
+ pdata->sph = false;
}
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
@@ -1150,18 +1057,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
return 0;
}
-static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg)
+static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata,
+ int mmd_reg)
{
- unsigned long flags;
- unsigned int mmd_address, index, offset;
- int mmd_data;
-
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ return (mmd_reg & XGBE_ADDR_C45) ?
+ mmd_reg & ~XGBE_ADDR_C45 :
+ (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+}
+static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata,
+ unsigned int mmd_address,
+ unsigned int *index,
+ unsigned int *offset)
+{
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1172,8 +1080,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
* offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ *index = mmd_address & ~pdata->xpcs_window_mask;
+ *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+}
+
+static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ u32 smn_address;
+ int mmd_data;
+ int ret;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret)
+ return ret;
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data);
+ if (ret)
+ return ret;
+
+ mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) :
+ FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int pci_mmd_data, hi_mask, lo_mask;
+ unsigned int mmd_address, index, offset;
+ struct pci_dev *dev;
+ u32 smn_address;
+ int ret;
+
+ dev = pdata->pcidev;
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to read data\n");
+ return;
+ }
+
+ if (offset % 4) {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data);
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data);
+ } else {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK,
+ FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data));
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+ }
+
+ pci_mmd_data = hi_mask | lo_mask;
+
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data);
+ return;
+ }
+}
+
+static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ unsigned long flags;
+ int mmd_data;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1189,23 +1187,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
- /* The PCS registers are accessed using mmio. The underlying
- * management interface uses indirect addressing to access the MMD
- * register sets. This requires accessing of the PCS register in two
- * phases, an address phase and a data phase.
- *
- * The mmio interface is based on 16-bit offsets and values. All
- * register offsets must therefore be adjusted by left shifting the
- * offset 1 bit and writing 16 bits of data.
- */
- mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1220,10 +1204,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1248,10 +1229,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1278,6 +1256,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
}
}
@@ -1288,6 +1269,9 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V1:
return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
+
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
@@ -1576,125 +1560,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
DBGPR("<--rx_desc_init\n");
}
-static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
- unsigned int addend)
-{
- unsigned int count = 10000;
-
- /* Set the addend register value and tell the device */
- XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
-
- /* Wait for addend update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev,
- "timed out updating timestamp addend register\n");
-}
-
-static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
- unsigned int nsec)
-{
- unsigned int count = 10000;
-
- /* Set the time values and tell the device */
- XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
- XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
-
- /* Wait for time update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev, "timed out initializing timestamp\n");
-}
-
-static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
-{
- u64 nsec;
-
- nsec = XGMAC_IOREAD(pdata, MAC_STSR);
- nsec *= NSEC_PER_SEC;
- nsec += XGMAC_IOREAD(pdata, MAC_STNR);
-
- return nsec;
-}
-
-static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
-{
- unsigned int tx_snr, tx_ssr;
- u64 nsec;
-
- if (pdata->vdata->tx_tstamp_workaround) {
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- } else {
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- }
-
- if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
- return 0;
-
- nsec = tx_ssr;
- nsec *= NSEC_PER_SEC;
- nsec += tx_snr;
-
- return nsec;
-}
-
-static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
- struct xgbe_ring_desc *rdesc)
-{
- u64 nsec;
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
- !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
- nsec = le32_to_cpu(rdesc->desc1);
- nsec <<= 32;
- nsec |= le32_to_cpu(rdesc->desc0);
- if (nsec != 0xffffffffffffffffULL) {
- packet->rx_tstamp = nsec;
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RX_TSTAMP, 1);
- }
- }
-}
-
-static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
- unsigned int mac_tscr)
-{
- /* Set one nano-second accuracy */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
-
- /* Set fine timestamp update */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
-
- /* Overwrite earlier timestamps */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
-
- XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
-
- /* Exit if timestamping is not enabled */
- if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
- return 0;
-
- /* Initialize time registers */
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
- xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
- xgbe_set_tstamp_time(pdata, 0, 0);
-
- /* Initialize the timecounter */
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- return 0;
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
@@ -2868,9 +2733,19 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
{
unsigned int val;
- val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
-
- XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL,
+ XGMAC_GIANT_PACKET_MTU);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1);
+ } else {
+ val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0;
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ }
}
static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
@@ -3545,8 +3420,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
- xgbe_config_sph_mode(pdata);
- xgbe_config_rss(pdata);
+
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ }
+
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -3675,13 +3554,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
- /* For PTP config */
- hw_if->config_tstamp = xgbe_config_tstamp;
- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
-
/* For Data Center Bridging config */
hw_if->config_tc = xgbe_config_tc;
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
@@ -3702,5 +3574,26 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->disable_vxlan = xgbe_disable_vxlan;
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+ /* For Split Header*/
+ hw_if->enable_sph = xgbe_config_sph_mode;
+ hw_if->disable_sph = xgbe_disable_sph_mode;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
+
+int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ /* Enable MAC loopback mode */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 1);
+
+ /* Wait for loopback to stabilize */
+ usleep_range(10, 15);
+
+ return 0;
+}
+
+void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ /* Disable MAC loopback mode */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 0);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 6b73648b3779..3ddd896d6987 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -403,9 +294,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static void xgbe_ecc_isr_task(struct tasklet_struct *t)
+static void xgbe_ecc_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
unsigned int ecc_isr;
bool stop = false;
@@ -465,21 +356,22 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_ecc);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->ecc_bh_work);
else
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
return IRQ_HANDLED;
}
-static void xgbe_isr_task(struct tasklet_struct *t)
+static void xgbe_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
+ unsigned int mac_isr, mac_tssr, mac_mdioisr;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_channel *channel;
+ bool per_ch_irq, ti, ri, rbu, fbe;
unsigned int dma_isr, dma_ch_isr;
- unsigned int mac_isr, mac_tssr, mac_mdioisr;
+ struct xgbe_channel *channel;
unsigned int i;
/* The DMA interrupt status register also reports MAC and MTL
@@ -493,43 +385,73 @@ static void xgbe_isr_task(struct tasklet_struct *t)
netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
for (i = 0; i < pdata->channel_count; i++) {
+ bool schedule_napi = false;
+ struct napi_struct *napi;
+
if (!(dma_isr & (1 << i)))
continue;
channel = pdata->channel[i];
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+
+ /* Precompute flags once */
+ ti = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI);
+ ri = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI);
+ rbu = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU);
+ fbe = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE);
+
netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
i, dma_ch_isr);
- /* The TI or RI interrupt bits may still be set even if using
- * per channel DMA interrupts. Check to be sure those are not
- * enabled before using the private data napi structure.
+ per_ch_irq = pdata->per_channel_irq;
+
+ /*
+ * Decide which NAPI to use and whether to schedule:
+ * - When not using per-channel IRQs: schedule on global NAPI
+ * if TI or RI are set.
+ * - RBU should also trigger NAPI (either per-channel or global)
+ * to allow refill.
*/
- if (!pdata->per_channel_irq &&
- (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
- XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
- if (napi_schedule_prep(&pdata->napi)) {
- /* Disable Tx and Rx interrupts */
- xgbe_disable_rx_tx_ints(pdata);
+ if (!per_ch_irq && (ti || ri))
+ schedule_napi = true;
+
+ if (rbu) {
+ schedule_napi = true;
+ pdata->ext_stats.rx_buffer_unavailable++;
+ }
- /* Turn on polling */
- __napi_schedule(&pdata->napi);
+ napi = per_ch_irq ? &channel->napi : &pdata->napi;
+
+ if (schedule_napi && napi_schedule_prep(napi)) {
+ /* Disable interrupts appropriately before polling */
+ if (per_ch_irq) {
+ if (pdata->channel_irq_mode)
+ xgbe_disable_rx_tx_int(pdata, channel);
+ else
+ disable_irq_nosync(channel->dma_irq);
+ } else {
+ xgbe_disable_rx_tx_ints(pdata);
}
+
+ /* Turn on polling */
+ __napi_schedule(napi);
} else {
- /* Don't clear Rx/Tx status if doing per channel DMA
- * interrupts, these will be cleared by the ISR for
- * per channel DMA interrupts.
+ /*
+ * Don't clear Rx/Tx status if doing per-channel DMA
+ * interrupts; those bits will be serviced/cleared by
+ * the per-channel ISR/NAPI. In non-per-channel mode
+ * when we're not scheduling NAPI here, ensure we don't
+ * accidentally clear TI/RI in HW: zero them in the
+ * local copy so that the eventual write-back does not
+ * clear TI/RI.
*/
XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
}
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
- pdata->ext_stats.rx_buffer_unavailable++;
-
/* Restart the device on a Fatal Bus Error */
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ if (fbe)
schedule_work(&pdata->restart_work);
/* Clear interrupt signals */
@@ -557,7 +479,7 @@ static void xgbe_isr_task(struct tasklet_struct *t)
if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
- hw_if->get_tx_tstamp(pdata);
+ xgbe_get_tx_tstamp(pdata);
queue_work(pdata->dev_workqueue,
&pdata->tx_tstamp_work);
}
@@ -582,7 +504,7 @@ isr_done:
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -604,10 +526,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_dev);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->dev_bh_work);
else
- xgbe_isr_task(&pdata->tasklet_dev);
+ xgbe_isr_bh_work(&pdata->dev_bh_work);
return IRQ_HANDLED;
}
@@ -643,7 +565,8 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
static void xgbe_tx_timer(struct timer_list *t)
{
- struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
+ struct xgbe_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
@@ -681,7 +604,8 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
- struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_prv_data *pdata = timer_container_of(pdata, t,
+ service_timer);
struct xgbe_channel *channel;
unsigned int i;
@@ -728,7 +652,7 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
- del_timer_sync(&pdata->service_timer);
+ timer_delete_sync(&pdata->service_timer);
for (i = 0; i < pdata->channel_count; i++) {
channel = pdata->channel[i];
@@ -736,7 +660,7 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
break;
/* Deactivate the Tx timer */
- del_timer_sync(&channel->tx_timer);
+ timer_delete_sync(&channel->tx_timer);
channel->tx_timer_active = 0;
}
}
@@ -798,6 +722,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+ /* Sanity check and warn if hardware reports more than supported */
+ if (hw_feat->pps_out_num > XGBE_MAX_PPS_OUT) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u PPS outputs, limiting to %u\n",
+ hw_feat->pps_out_num, XGBE_MAX_PPS_OUT);
+ hw_feat->pps_out_num = XGBE_MAX_PPS_OUT;
+ }
+
+ if (hw_feat->aux_snap_num > XGBE_MAX_AUX_SNAP) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u aux snapshot inputs, limiting to %u\n",
+ hw_feat->aux_snap_num, XGBE_MAX_AUX_SNAP);
+ hw_feat->aux_snap_num = XGBE_MAX_AUX_SNAP;
+ }
+
/* Translate the Hash Table size into actual number */
switch (hw_feat->hash_table_size) {
case 0:
@@ -1007,8 +946,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
- tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
+ INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work);
+ INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev_name(netdev), pdata);
@@ -1078,8 +1017,8 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- tasklet_kill(&pdata->tasklet_dev);
- tasklet_kill(&pdata->tasklet_ecc);
+ cancel_work_sync(&pdata->dev_bh_work);
+ cancel_work_sync(&pdata->ecc_bh_work);
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
@@ -1172,7 +1111,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
- pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
return pdata->phy_if.phy_reset(pdata);
@@ -1352,6 +1290,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
udp_tunnel_nic_reset_ntf(netdev);
+ /* Reset the phy settings */
+ ret = xgbe_phy_reset(pdata);
+ if (ret)
+ goto err_txrx;
+
netif_tx_start_all_queues(netdev);
xgbe_start_timers(pdata);
@@ -1361,6 +1304,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
return 0;
+err_txrx:
+ hw_if->disable_rx(pdata);
+ hw_if->disable_tx(pdata);
+
err_irqs:
xgbe_free_irqs(pdata);
@@ -1478,199 +1425,6 @@ static void xgbe_restart(struct work_struct *work)
rtnl_unlock();
}
-static void xgbe_tx_tstamp(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- tx_tstamp_work);
- struct skb_shared_hwtstamps hwtstamps;
- u64 nsec;
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (!pdata->tx_tstamp_skb)
- goto unlock;
-
- if (pdata->tx_tstamp) {
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- pdata->tx_tstamp);
-
- memset(&hwtstamps, 0, sizeof(hwtstamps));
- hwtstamps.hwtstamp = ns_to_ktime(nsec);
- skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
- }
-
- dev_kfree_skb_any(pdata->tx_tstamp_skb);
-
- pdata->tx_tstamp_skb = NULL;
-
-unlock:
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-}
-
-static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
- sizeof(pdata->tstamp_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- struct hwtstamp_config config;
- unsigned int mac_tscr;
-
- if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- mac_tscr = 0;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
-
- case HWTSTAMP_TX_ON:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
-
- case HWTSTAMP_FILTER_NTP_ALL:
- case HWTSTAMP_FILTER_ALL:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- pdata->hw_if.config_tstamp(pdata, mac_tscr);
-
- memcpy(&pdata->tstamp_config, &config, sizeof(config));
-
- return 0;
-}
-
-static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
- struct sk_buff *skb,
- struct xgbe_packet_data *packet)
-{
- unsigned long flags;
-
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (pdata->tx_tstamp_skb) {
- /* Another timestamp in progress, ignore this one */
- XGMAC_SET_BITS(packet->attributes,
- TX_PACKET_ATTRIBUTES, PTP, 0);
- } else {
- pdata->tx_tstamp_skb = skb_get(skb);
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- }
-
- skb_tx_timestamp(skb);
-}
-
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
if (skb_vlan_tag_present(skb))
@@ -1860,11 +1614,6 @@ static int xgbe_open(struct net_device *netdev)
goto err_dev_wq;
}
- /* Reset the phy settings */
- ret = xgbe_phy_reset(pdata);
- if (ret)
- goto err_an_wq;
-
/* Enable the clocks */
ret = clk_prepare_enable(pdata->sysclk);
if (ret) {
@@ -1883,6 +1632,9 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ /* Initialize PTP timestamping and clock. */
+ xgbe_init_ptp(pdata);
+
ret = xgbe_alloc_memory(pdata);
if (ret)
goto err_ptpclk;
@@ -2037,27 +1789,6 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
-static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
- break;
-
- case SIOCSHWTSTAMP:
- ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
static int xgbe_change_mtu(struct net_device *netdev, int mtu)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -2070,7 +1801,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xgbe_restart_dev(pdata);
@@ -2257,10 +1988,17 @@ static int xgbe_set_features(struct net_device *netdev,
if (ret)
return ret;
- if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ if ((features & NETIF_F_RXCSUM) && !rxcsum) {
+ hw_if->enable_sph(pdata);
+ hw_if->enable_vxlan(pdata);
hw_if->enable_rx_csum(pdata);
- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ schedule_work(&pdata->restart_work);
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
+ hw_if->disable_sph(pdata);
+ hw_if->disable_vxlan(pdata);
hw_if->disable_rx_csum(pdata);
+ schedule_work(&pdata->restart_work);
+ }
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);
@@ -2296,7 +2034,6 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_set_rx_mode = xgbe_set_rx_mode,
.ndo_set_mac_address = xgbe_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = xgbe_ioctl,
.ndo_change_mtu = xgbe_change_mtu,
.ndo_tx_timeout = xgbe_tx_timeout,
.ndo_get_stats64 = xgbe_get_stats64,
@@ -2309,6 +2046,8 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_fix_features = xgbe_fix_features,
.ndo_set_features = xgbe_set_features,
.ndo_features_check = xgbe_features_check,
+ .ndo_hwtstamp_get = xgbe_get_hwtstamp_settings,
+ .ndo_hwtstamp_set = xgbe_set_hwtstamp_settings,
};
const struct net_device_ops *xgbe_get_netdev_ops(void)
@@ -2646,12 +2385,8 @@ skip_data:
if (XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
- u64 nsec;
-
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- packet->rx_tstamp);
hwtstamps = skb_hwtstamps(skb);
- hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ hwtstamps->hwtstamp = ns_to_ktime(packet->rx_tstamp);
}
if (XGMAC_GET_BITS(packet->attributes,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 58e7e88aae5b..0d19b09497a0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/spinlock.h>
@@ -194,24 +85,23 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
int i;
switch (stringset) {
+ case ETH_SS_TEST:
+ xgbe_selftest_get_strings(pdata, data);
+ break;
case ETH_SS_STATS:
- for (i = 0; i < XGBE_STATS_COUNT; i++) {
- memcpy(data, xgbe_gstring_stats[i].stat_string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < XGBE_STATS_COUNT; i++)
+ ethtool_puts(&data, xgbe_gstring_stats[i].stat_string);
+
for (i = 0; i < pdata->tx_ring_count; i++) {
- sprintf(data, "txq_%u_packets", i);
- data += ETH_GSTRING_LEN;
- sprintf(data, "txq_%u_bytes", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "txq_%u_packets", i);
+ ethtool_sprintf(&data, "txq_%u_bytes", i);
}
+
for (i = 0; i < pdata->rx_ring_count; i++) {
- sprintf(data, "rxq_%u_packets", i);
- data += ETH_GSTRING_LEN;
- sprintf(data, "rxq_%u_bytes", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "rxq_%u_packets", i);
+ ethtool_sprintf(&data, "rxq_%u_bytes", i);
}
+
break;
}
}
@@ -244,6 +134,9 @@ static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
int ret;
switch (stringset) {
+ case ETH_SS_TEST:
+ ret = xgbe_selftest_get_count(pdata);
+ break;
case ETH_SS_STATS:
ret = XGBE_STATS_COUNT +
(pdata->tx_ring_count * 2) +
@@ -442,6 +335,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
ec->rx_coalesce_usecs = pdata->rx_usecs;
ec->rx_max_coalesced_frames = pdata->rx_frames;
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
ec->tx_max_coalesced_frames = pdata->tx_frames;
return 0;
@@ -455,7 +349,8 @@ static int xgbe_set_coalesce(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int rx_frames, rx_riwt, rx_usecs;
- unsigned int tx_frames;
+ unsigned int tx_frames, tx_usecs;
+ unsigned int jiffy_us = jiffies_to_usecs(1);
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
@@ -477,20 +372,42 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ tx_usecs = ec->tx_coalesce_usecs;
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
+ if (!tx_usecs) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs must not be 0");
+ return -EINVAL;
+ }
+ if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx-usecs is limited to %d usec",
+ XGMAC_MAX_COAL_TX_TICK);
+ return -EINVAL;
+ }
if (tx_frames > pdata->tx_desc_count) {
netdev_err(netdev, "tx-frames is limited to %d frames\n",
pdata->tx_desc_count);
return -EINVAL;
}
+ /* Round tx-usecs to nearest multiple of jiffy granularity */
+ if (tx_usecs % jiffy_us) {
+ tx_usecs = rounddown(tx_usecs, jiffy_us);
+ if (!tx_usecs)
+ tx_usecs = jiffy_us;
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs rounded to %u usec due to jiffy granularity (%u usec)",
+ tx_usecs, jiffy_us);
+ }
+
pdata->rx_riwt = rx_riwt;
pdata->rx_usecs = rx_usecs;
pdata->rx_frames = rx_frames;
hw_if->config_rx_coalesce(pdata);
+ pdata->tx_usecs = tx_usecs;
pdata->tx_frames = tx_frames;
hw_if->config_tx_coalesce(pdata);
@@ -553,7 +470,7 @@ static int xgbe_set_rxfh(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int ret;
+ int ret;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
@@ -577,21 +494,17 @@ static int xgbe_set_rxfh(struct net_device *netdev,
}
static int xgbe_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (pdata->ptp_clock)
ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
- else
- ts_info->phc_index = -1;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
@@ -826,7 +739,7 @@ out:
}
static const struct ethtool_ops xgbe_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
@@ -853,6 +766,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.set_ringparam = xgbe_set_ringparam,
.get_channels = xgbe_get_channels,
.set_channels = xgbe_set_channels,
+ .self_test = xgbe_selftest_run,
};
const struct ethtool_ops *xgbe_get_ethtool_ops(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
new file mode 100644
index 000000000000..0127988e10be
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata,
+ unsigned int sec, unsigned int nsec)
+{
+ int count;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+
+ /* issue command to update the system time value */
+ XGMAC_IOWRITE(pdata, MAC_TSCR,
+ XGMAC_IOREAD(pdata, MAC_TSCR) |
+ (1 << MAC_TSCR_TSUPDT_INDEX));
+
+ /* Wait for the time adjust/update to complete */
+ count = 10000;
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
+ udelay(5);
+
+ if (count < 0)
+ netdev_err(pdata->netdev,
+ "timed out updating system timestamp\n");
+}
+
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ unsigned int count = 10000;
+
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
+}
+
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ unsigned int count = 10000;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
+}
+
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr, tx_ssr;
+ u64 nsec;
+
+ if (pdata->vdata->tx_tstamp_workaround) {
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ } else {
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ }
+
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = tx_ssr;
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec *= NSEC_PER_SEC;
+ nsec += le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr)
+{
+ unsigned int value = 0;
+
+ value = XGMAC_IOREAD(pdata, MAC_TSCR);
+ value |= mac_tscr;
+ XGMAC_IOWRITE(pdata, MAC_TSCR, value);
+}
+
+void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
+ if (pdata->tx_tstamp) {
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(pdata->tx_tstamp);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ pdata->tx_tstamp_skb = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+int xgbe_get_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ *config = pdata->tstamp_config;
+
+ return 0;
+}
+
+int xgbe_set_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int mac_tscr = 0;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ pdata->tstamp_config = *config;
+
+ return 0;
+}
+
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ skb_tx_timestamp(skb);
+}
+
+int xgbe_init_ptp(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_tscr = 0;
+ struct timespec64 now;
+ u64 dividend;
+
+ /* Register Settings to be done based on the link speed. */
+ switch (pdata->phy.speed) {
+ case SPEED_1000:
+ XGMAC_IOWRITE(pdata, MAC_TICNR, MAC_TICNR_1G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_1G_INITVAL);
+ break;
+ case SPEED_2500:
+ case SPEED_10000:
+ XGMAC_IOWRITE_BITS(pdata, MAC_TICSNR, TSICSNS,
+ MAC_TICSNR_10G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_10G_INITVAL);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TECSNR, TSECSNS,
+ MAC_TECSNR_10G_INITVAL);
+ break;
+ case SPEED_UNKNOWN:
+ default:
+ break;
+ }
+
+ /* Enable IEEE1588 PTP clock. */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return -EOPNOTSUPP;
+
+ if (pdata->vdata->tstamp_ptp_clock_freq) {
+ /* Initialize time registers based on
+ * 125MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC,
+ XGBE_V2_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC,
+ XGBE_V2_TSTAMP_SNSINC);
+ } else {
+ /* Initialize time registers based on
+ * 50MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ }
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / (PTP clock based on SSINC))
+ * = (2^32 * (PTP clock based on SSINC)) / PTP ref clock
+ */
+ if (pdata->vdata->tstamp_ptp_clock_freq)
+ dividend = XGBE_V2_PTP_ACT_CLK_FREQ;
+ else
+ dividend = XGBE_PTP_ACT_CLK_FREQ;
+
+ dividend = (u64)(dividend << 32);
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+
+ dma_wmb();
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ xgbe_set_tstamp_time(pdata, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index a9ccc4258ee5..65eb7b577b65 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -179,7 +70,7 @@ static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
static int xgbe_i2c_disable(struct xgbe_prv_data *pdata)
{
- unsigned int ret;
+ int ret;
ret = xgbe_i2c_set_enable(pdata, false);
if (ret) {
@@ -274,9 +165,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static void xgbe_i2c_isr_task(struct tasklet_struct *t)
+static void xgbe_i2c_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, i2c_bh_work);
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
@@ -321,10 +212,10 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_i2c);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->i2c_bh_work);
else
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -369,7 +260,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -449,7 +340,7 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
- tasklet_kill(&pdata->tasklet_i2c);
+ cancel_work_sync(&pdata->i2c_bh_work);
}
}
@@ -464,7 +355,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
- tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
+ INIT_WORK(&pdata->i2c_bh_work, xgbe_i2c_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 0e8698928e4d..d1f0419edb23 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -384,7 +275,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 0;
- netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
+ netdev->max_mtu = XGMAC_GIANT_PACKET_MTU - XGBE_ETH_FRAME_HDR;
/* Use default watchdog timeout */
netdev->watchdog_timeo = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4a2dc705b528..7675bb98f029 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/interrupt.h>
@@ -375,6 +266,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL);
+ reg |= XGBE_VEND2_MAC_AUTO_SW;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg);
}
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
@@ -703,9 +598,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_an_isr_task(struct tasklet_struct *t)
+static void xgbe_an_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, an_bh_work);
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
@@ -727,17 +622,17 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_an);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->an_bh_work);
else
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
@@ -1003,6 +898,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+
}
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
@@ -1404,6 +1304,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
&an_restart);
+ /* bail out if the link status register read fails */
+ if (pdata->phy.link < 0)
+ return;
+
if (an_restart) {
xgbe_phy_config_aneg(pdata);
goto adjust_link;
@@ -1454,7 +1358,7 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
- tasklet_kill(&pdata->tasklet_an);
+ cancel_work_sync(&pdata->an_bh_work);
}
pdata->phy_if.phy_impl.stop(pdata);
@@ -1477,7 +1381,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
- tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
+ INIT_WORK(&pdata->an_bh_work, xgbe_an_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
@@ -1651,6 +1555,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
}
+ pdata->phy_link = 0;
pdata->phy.link = 0;
pdata->phy.pause_autoneg = pdata->pause_autoneg;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index f409d7bd1f1e..e3e1dca9856a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -1,123 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/log2.h>
+#include "xgbe-smn.h"
#include "xgbe.h"
#include "xgbe-common.h"
@@ -139,7 +31,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
return ret;
}
- pdata->isr_as_tasklet = 1;
+ pdata->isr_as_bh_work = 1;
pdata->irq_count = ret;
pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
@@ -170,13 +62,13 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
goto out;
ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
- PCI_IRQ_LEGACY | PCI_IRQ_MSI);
+ PCI_IRQ_INTX | PCI_IRQ_MSI);
if (ret < 0) {
dev_info(pdata->dev, "single IRQ enablement failed\n");
return ret;
}
- pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
+ pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0;
pdata->irq_count = 1;
pdata->channel_irq_count = 1;
@@ -207,14 +99,14 @@ out:
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct xgbe_prv_data *pdata;
- struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
- struct pci_dev *rdev;
+ unsigned int port_addr_size, reg;
+ struct device *dev = &pdev->dev;
+ struct xgbe_prv_data *pdata;
unsigned int ma_lo, ma_hi;
- unsigned int reg;
- int bar_mask;
- int ret;
+ struct pci_dev *rdev;
+ int bar_mask, ret;
+ u32 address;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
@@ -274,20 +166,31 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the PCS indirect addressing definition registers */
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
- if (rdev &&
- (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
- pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
- } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
- (rdev->device == 0x14b5)) {
- pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
-
- /* Yellow Carp devices do not need cdr workaround */
- pdata->vdata->an_cdr_workaround = 0;
-
- /* Yellow Carp devices do not need rrc */
- pdata->vdata->enable_rrc = 0;
+ if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) {
+ switch (rdev->device) {
+ case XGBE_RV_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ break;
+ case XGBE_YC_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
+
+ /* Yellow Carp devices do not need rrc */
+ pdata->vdata->enable_rrc = 0;
+ break;
+ case XGBE_RN_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT;
+ break;
+ default:
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ break;
+ }
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -295,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_dev_put(rdev);
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) {
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ port_addr_size = PCS_RN_PORT_ADDR_SIZE *
+ XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size;
+
+ address = pdata->smn_base + (pdata->xpcs_window_def_reg);
+ ret = amd_smn_read(0, address, &reg);
+ if (ret) {
+ pci_err(pdata->pcidev, "Failed to read data\n");
+ goto err_pci_enable;
+ }
+ } else {
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ }
+
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
@@ -473,6 +391,22 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
+static struct xgbe_version_data xgbe_v3 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V3,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 0,
+ .enable_rrc = 0,
+};
+
static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
@@ -480,6 +414,7 @@ static struct xgbe_version_data xgbe_v2a = {
.tx_max_fifo_size = 229376,
.rx_max_fifo_size = 229376,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
@@ -496,6 +431,7 @@ static struct xgbe_version_data xgbe_v2b = {
.tx_max_fifo_size = 65536,
.rx_max_fifo_size = 65536,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
@@ -510,6 +446,8 @@ static const struct pci_device_id xgbe_pci_table[] = {
.driver_data = (kernel_ulong_t)&xgbe_v2a },
{ PCI_VDEVICE(AMD, 0x1459),
.driver_data = (kernel_ulong_t)&xgbe_v2b },
+ { PCI_VDEVICE(AMD, 0x1641),
+ .driver_data = (kernel_ulong_t)&xgbe_v3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
index d16eae415f72..2e6b8ffe785c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6a716337f48b..a68757e8fd22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -777,7 +668,7 @@ static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad,
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg);
else
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
xgbe_phy_put_comm_ownership(pdata);
@@ -923,7 +814,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -945,14 +835,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -964,7 +847,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -1028,13 +910,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -1113,6 +989,7 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
return ret;
}
phy_data->phydev = phydev;
+ phy_data->phydev->mac_managed_pm = true;
xgbe_phy_external_phy_quirks(pdata);
@@ -2870,8 +2747,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg;
- int ret;
+ int reg, ret;
*an_restart = 0;
@@ -2905,11 +2781,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- /* Link status is latched low, so read once to clear
- * and then read again to get current state
- */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+
+ /* Link status is latched low so that momentary link drops
+ * can be detected. If link was already down read again
+ * to get the latest state.
+ */
+
+ if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+ }
if (pdata->en_rx_adap) {
/* if the link is available and adaptation is done,
@@ -2928,9 +2813,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
}
- /* check again for the link and adaptation status */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ if (pdata->rx_adapt_done)
return 1;
} else if (reg & MDIO_STAT1_LSTATUS)
return 1;
@@ -3020,7 +2903,7 @@ static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int ret;
+ int ret;
ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
if (ret)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 9131020d06af..47d53e59ccf6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -538,7 +429,6 @@ static const struct xgbe_version_data xgbe_v1 = {
.tx_tstamp_workaround = 1,
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ .id = "AMDI8001",
.driver_data = (kernel_ulong_t)&xgbe_v1 },
@@ -546,9 +436,7 @@ static const struct acpi_device_id xgbe_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a",
.data = &xgbe_v1 },
@@ -556,7 +444,6 @@ static const struct of_device_id xgbe_of_match[] = {
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
-#endif
static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
xgbe_platform_suspend, xgbe_platform_resume);
@@ -564,16 +451,12 @@ static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
static struct platform_driver xgbe_driver = {
.driver = {
.name = XGBE_DRV_NAME,
-#ifdef CONFIG_ACPI
.acpi_match_table = xgbe_acpi_match,
-#endif
-#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
-#endif
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
- .remove_new = xgbe_platform_remove,
+ .remove = xgbe_platform_remove,
};
int xgbe_platform_init(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pps.c b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
new file mode 100644
index 000000000000..6d03ae7ab36f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static u32 get_pps_mask(unsigned int x)
+{
+ return GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x));
+}
+
+static u32 get_pps_cmd(unsigned int x, u32 val)
+{
+ return (val & GENMASK(3, 0)) << PPS_MINIDX(x);
+}
+
+static u32 get_target_mode_sel(unsigned int x, u32 val)
+{
+ return (val & GENMASK(1, 0)) << (PPS_MAXIDX(x) - 2);
+}
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata,
+ struct xgbe_pps_config *cfg, int index, bool on)
+{
+ unsigned int ppscr = 0;
+ unsigned int tnsec;
+ u64 period;
+
+ /* Check if target time register is busy */
+ tnsec = XGMAC_IOREAD(pdata, MAC_PPSx_TTNSR(index));
+ if (XGMAC_GET_BITS(tnsec, MAC_PPSx_TTNSR, TRGTBUSY0))
+ return -EBUSY;
+
+ ppscr = XGMAC_IOREAD(pdata, MAC_PPSCR);
+ ppscr &= ~get_pps_mask(index);
+
+ if (!on) {
+ /* Disable PPS output */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_STOP);
+ ppscr |= PPSEN0;
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+ }
+
+ /* Configure start time */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTSR(index), cfg->start.tv_sec);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTNSR(index), cfg->start.tv_nsec);
+
+ period = cfg->period.tv_sec * NSEC_PER_SEC + cfg->period.tv_nsec;
+ period = div_u64(period, XGBE_V2_TSTAMP_SSINC);
+
+ if (period < 4)
+ return -EINVAL;
+
+ /* Configure interval and pulse width (50% duty cycle) */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_INTERVAL(index), period - 1);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_WIDTH(index), (period >> 1) - 1);
+
+ /* Enable PPS with pulse train mode */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_START);
+ ppscr |= get_target_mode_sel(index, XGBE_PPSTARGET_PULSE);
+ ppscr |= PPSEN0;
+
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 7051bd7cf6dc..0e0b8ec3b504 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/clk.h>
@@ -122,18 +13,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static u64 xgbe_cc_read(const struct cyclecounter *cc)
-{
- struct xgbe_prv_data *pdata = container_of(cc,
- struct xgbe_prv_data,
- tstamp_cc);
- u64 nsec;
-
- nsec = pdata->hw_if.get_tstamp_time(pdata);
-
- return nsec;
-}
-
static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct xgbe_prv_data *pdata = container_of(info,
@@ -146,7 +25,7 @@ static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- pdata->hw_if.update_tstamp_addend(pdata, addend);
+ xgbe_update_tstamp_addend(pdata, addend);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
@@ -158,16 +37,39 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
+ unsigned int neg_adjust = 0;
+ unsigned int sec, nsec;
+ u32 quotient, reminder;
unsigned long flags;
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ /* Negative adjustment for Hw timer register. */
+ if (neg_adjust) {
+ sec = -sec;
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSCTRLSSR))
+ nsec = (1000000000UL - nsec);
+ else
+ nsec = (0x80000000UL - nsec);
+ }
+ nsec = (neg_adjust << 31) | nsec;
+
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- timecounter_adjtime(&pdata->tstamp_tc, delta);
+ xgbe_update_tstamp_time(pdata, sec, nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
}
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+static int xgbe_gettimex(struct ptp_clock_info *info, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
@@ -176,9 +78,9 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
+ ptp_read_system_prets(sts);
+ nsec = xgbe_get_tstamp_time(pdata);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
*ts = ns_to_timespec64(nsec);
@@ -193,14 +95,9 @@ static int xgbe_settime(struct ptp_clock_info *info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+ xgbe_set_tstamp_time(pdata, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
@@ -209,15 +106,35 @@ static int xgbe_settime(struct ptp_clock_info *info,
static int xgbe_enable(struct ptp_clock_info *info,
struct ptp_clock_request *request, int on)
{
- return -EOPNOTSUPP;
+ struct xgbe_prv_data *pdata = container_of(info, struct xgbe_prv_data,
+ ptp_clock_info);
+ struct xgbe_pps_config *pps_cfg;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(pdata->dev, "rq->type %d on %d\n", request->type, on);
+
+ if (request->type != PTP_CLK_REQ_PEROUT)
+ return -EOPNOTSUPP;
+
+ pps_cfg = &pdata->pps[request->perout.index];
+
+ pps_cfg->start.tv_sec = request->perout.start.sec;
+ pps_cfg->start.tv_nsec = request->perout.start.nsec;
+ pps_cfg->period.tv_sec = request->perout.period.sec;
+ pps_cfg->period.tv_nsec = request->perout.period.nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ ret = xgbe_pps_config(pdata, pps_cfg, request->perout.index, on);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return ret;
}
void xgbe_ptp_register(struct xgbe_prv_data *pdata)
{
struct ptp_clock_info *info = &pdata->ptp_clock_info;
struct ptp_clock *clock;
- struct cyclecounter *cc = &pdata->tstamp_cc;
- u64 dividend;
snprintf(info->name, sizeof(info->name), "%s",
netdev_name(pdata->netdev));
@@ -225,8 +142,10 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
info->max_adj = pdata->ptpclk_rate;
info->adjfine = xgbe_adjfine;
info->adjtime = xgbe_adjtime;
- info->gettime64 = xgbe_gettime;
+ info->gettimex64 = xgbe_gettimex;
info->settime64 = xgbe_settime;
+ info->n_per_out = pdata->hw_feat.pps_out_num;
+ info->n_ext_ts = pdata->hw_feat.aux_snap_num;
info->enable = xgbe_enable;
clock = ptp_clock_register(info, pdata->dev);
@@ -237,23 +156,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
pdata->ptp_clock = clock;
- /* Calculate the addend:
- * addend = 2^32 / (PTP ref clock / 50Mhz)
- * = (2^32 * 50Mhz) / PTP ref clock
- */
- dividend = 50000000;
- dividend <<= 32;
- pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
-
- /* Setup the timecounter */
- cc->read = xgbe_cc_read;
- cc->mask = CLOCKSOURCE_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
/* Disable all timestamping to start */
XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c
new file mode 100644
index 000000000000..55e5e467facd
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+#include <linux/crc32.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/selftests.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_LOOPBACK_NONE 0
+#define XGBE_LOOPBACK_MAC 1
+#define XGBE_LOOPBACK_PHY 2
+
+struct xgbe_test {
+ char name[ETH_GSTRING_LEN];
+ int lb;
+ int (*fn)(struct xgbe_prv_data *pdata);
+};
+
+static u8 xgbe_test_id;
+
+static int xgbe_test_loopback_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct net_test_priv *tdata = pt->af_packet_priv;
+ const unsigned char *dst = tdata->packet->dst;
+ const unsigned char *src = tdata->packet->src;
+ struct netsfhdr *hdr;
+ struct ethhdr *eh;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct iphdr *ih;
+ int eat;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ eat = (skb->tail + skb->data_len) - skb->end;
+ if (eat > 0 && skb_shared(skb)) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+ }
+
+ if (skb_linearize(skb))
+ goto out;
+
+ if (skb_headlen(skb) < (NET_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+
+ eh = (struct ethhdr *)skb_mac_header(skb);
+ if (dst) {
+ if (!ether_addr_equal_unaligned(eh->h_dest, dst))
+ goto out;
+ }
+ if (src) {
+ if (!ether_addr_equal_unaligned(eh->h_source, src))
+ goto out;
+ }
+
+ ih = ip_hdr(skb);
+
+ if (tdata->packet->tcp) {
+ if (ih->protocol != IPPROTO_TCP)
+ goto out;
+
+ th = (struct tcphdr *)((u8 *)ih + 4 * ih->ihl);
+ if (th->dest != htons(tdata->packet->dport))
+ goto out;
+
+ hdr = (struct netsfhdr *)((u8 *)th + sizeof(*th));
+ } else {
+ if (ih->protocol != IPPROTO_UDP)
+ goto out;
+
+ uh = (struct udphdr *)((u8 *)ih + 4 * ih->ihl);
+ if (uh->dest != htons(tdata->packet->dport))
+ goto out;
+
+ hdr = (struct netsfhdr *)((u8 *)uh + sizeof(*uh));
+ }
+
+ if (hdr->magic != cpu_to_be64(NET_TEST_PKT_MAGIC))
+ goto out;
+ if (tdata->packet->id != hdr->id)
+ goto out;
+
+ tdata->ok = true;
+ complete(&tdata->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int __xgbe_test_loopback(struct xgbe_prv_data *pdata,
+ struct net_packet_attrs *attr)
+{
+ struct net_test_priv *tdata;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ tdata = kzalloc(sizeof(*tdata), GFP_KERNEL);
+ if (!tdata)
+ return -ENOMEM;
+
+ tdata->ok = false;
+ init_completion(&tdata->comp);
+
+ tdata->pt.type = htons(ETH_P_IP);
+ tdata->pt.func = xgbe_test_loopback_validate;
+ tdata->pt.dev = pdata->netdev;
+ tdata->pt.af_packet_priv = tdata;
+ tdata->packet = attr;
+
+ dev_add_pack(&tdata->pt);
+
+ skb = net_test_get_skb(pdata->netdev, xgbe_test_id, attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ xgbe_test_id++;
+ ret = dev_direct_xmit(skb, attr->queue_mapping);
+ if (ret)
+ goto cleanup;
+
+ if (!attr->timeout)
+ attr->timeout = NET_LB_TIMEOUT;
+
+ wait_for_completion_timeout(&tdata->comp, attr->timeout);
+ ret = tdata->ok ? 0 : -ETIMEDOUT;
+
+ if (ret)
+ netdev_err(pdata->netdev, "Response timedout: ret %d\n", ret);
+cleanup:
+ dev_remove_pack(&tdata->pt);
+ kfree(tdata);
+ return ret;
+}
+
+static int xgbe_test_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+
+ attr.dst = pdata->netdev->dev_addr;
+ return __xgbe_test_loopback(pdata, &attr);
+}
+
+static int xgbe_test_phy_loopback(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ int ret;
+
+ if (!pdata->netdev->phydev) {
+ netdev_err(pdata->netdev, "phydev not found: cannot start PHY loopback test\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = phy_loopback(pdata->netdev->phydev, true, 0);
+ if (ret)
+ return ret;
+
+ attr.dst = pdata->netdev->dev_addr;
+ ret = __xgbe_test_loopback(pdata, &attr);
+
+ phy_loopback(pdata->netdev->phydev, false, 0);
+ return ret;
+}
+
+static int xgbe_test_sph(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ unsigned long cnt_end, cnt_start;
+ int ret;
+
+ cnt_start = pdata->ext_stats.rx_split_header_packets;
+
+ if (!pdata->sph) {
+ netdev_err(pdata->netdev, "Split Header not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* UDP test */
+ attr.dst = pdata->netdev->dev_addr;
+ attr.tcp = false;
+
+ ret = __xgbe_test_loopback(pdata, &attr);
+ if (ret)
+ return ret;
+
+ cnt_end = pdata->ext_stats.rx_split_header_packets;
+ if (cnt_end <= cnt_start)
+ return -EINVAL;
+
+ /* TCP test */
+ cnt_start = cnt_end;
+
+ attr.dst = pdata->netdev->dev_addr;
+ attr.tcp = true;
+
+ ret = __xgbe_test_loopback(pdata, &attr);
+ if (ret)
+ return ret;
+
+ cnt_end = pdata->ext_stats.rx_split_header_packets;
+ if (cnt_end <= cnt_start)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int xgbe_test_jumbo(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ int size = pdata->rx_buf_size;
+
+ attr.dst = pdata->netdev->dev_addr;
+ attr.max_size = size - ETH_FCS_LEN;
+
+ return __xgbe_test_loopback(pdata, &attr);
+}
+
+static const struct xgbe_test xgbe_selftests[] = {
+ {
+ .name = "MAC Loopback ",
+ .lb = XGBE_LOOPBACK_MAC,
+ .fn = xgbe_test_mac_loopback,
+ }, {
+ .name = "PHY Loopback ",
+ .lb = XGBE_LOOPBACK_NONE,
+ .fn = xgbe_test_phy_loopback,
+ }, {
+ .name = "Split Header ",
+ .lb = XGBE_LOOPBACK_PHY,
+ .fn = xgbe_test_sph,
+ }, {
+ .name = "Jumbo Frame ",
+ .lb = XGBE_LOOPBACK_PHY,
+ .fn = xgbe_test_jumbo,
+ },
+};
+
+void xgbe_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(dev);
+ int count = xgbe_selftest_get_count(pdata);
+ int i, ret;
+
+ memset(buf, 0, sizeof(*buf) * count);
+ xgbe_test_id = 0;
+
+ if (etest->flags != ETH_TEST_FL_OFFLINE) {
+ netdev_err(pdata->netdev, "Only offline tests are supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ } else if (!netif_carrier_ok(dev)) {
+ netdev_err(pdata->netdev,
+ "Invalid link, cannot execute tests\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ /* Wait for queues drain */
+ msleep(200);
+
+ for (i = 0; i < count; i++) {
+ ret = 0;
+
+ switch (xgbe_selftests[i].lb) {
+ case XGBE_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, true, 0);
+ if (!ret)
+ break;
+ fallthrough;
+ case XGBE_LOOPBACK_MAC:
+ ret = xgbe_enable_mac_loopback(pdata);
+ break;
+ case XGBE_LOOPBACK_NONE:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ /*
+ * First tests will always be MAC / PHY loopback.
+ * If any of them is not supported we abort earlier.
+ */
+ if (ret) {
+ netdev_err(pdata->netdev, "Loopback not supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ break;
+ }
+
+ ret = xgbe_selftests[i].fn(pdata);
+ if (ret && (ret != -EOPNOTSUPP))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ buf[i] = ret;
+
+ switch (xgbe_selftests[i].lb) {
+ case XGBE_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, false, 0);
+ if (!ret)
+ break;
+ fallthrough;
+ case XGBE_LOOPBACK_MAC:
+ xgbe_disable_mac_loopback(pdata);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < xgbe_selftest_get_count(pdata); i++)
+ ethtool_puts(&p, xgbe_selftests[i].name);
+}
+
+int xgbe_selftest_get_count(struct xgbe_prv_data *pdata)
+{
+ return ARRAY_SIZE(xgbe_selftests);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-smn.h b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
new file mode 100644
index 000000000000..c6ae127ced03
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#ifndef __SMN_H__
+#define __SMN_H__
+
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd/nb.h>
+
+#else
+
+static inline int amd_smn_write(u16 node, u32 address, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int amd_smn_read(u16 node, u32 address, u32 *value)
+{
+ return -ENODEV;
+}
+
+#endif
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f01a1e566da6..03ef0f548483 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_H__
@@ -189,11 +80,13 @@
#define XGBE_IRQ_MODE_EDGE 0
#define XGBE_IRQ_MODE_LEVEL 1
+#define XGBE_ETH_FRAME_HDR (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGMAC_MIN_PACKET 60
#define XGMAC_STD_PACKET_MTU 1500
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
+#define XGMAC_GIANT_PACKET_MTU 16368
#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
#define XGMAC_PFC_DATA_LEN 46
@@ -226,6 +119,14 @@
#define XGBE_MSI_BASE_COUNT 4
#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+/* Initial PTP register values based on Link Speed. */
+#define MAC_TICNR_1G_INITVAL 0x10
+#define MAC_TECNR_1G_INITVAL 0x28
+
+#define MAC_TICSNR_10G_INITVAL 0x33
+#define MAC_TECNR_10G_INITVAL 0x14
+#define MAC_TECSNR_10G_INITVAL 0xCC
+
/* PCI clock frequencies */
#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
@@ -235,6 +136,15 @@
*/
#define XGBE_TSTAMP_SSINC 20
#define XGBE_TSTAMP_SNSINC 0
+#define XGBE_PTP_ACT_CLK_FREQ 500000000
+
+#define XGBE_V2_TSTAMP_SSINC 0xA
+#define XGBE_V2_TSTAMP_SNSINC 0
+#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
+
+/* Define maximum supported values */
+#define XGBE_MAX_PPS_OUT 4
+#define XGBE_MAX_AUX_SNAP 4
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
@@ -262,6 +172,7 @@
/* Default coalescing parameters */
#define XGMAC_INIT_DMA_TX_USECS 1000
#define XGMAC_INIT_DMA_TX_FRAMES 25
+#define XGMAC_MAX_COAL_TX_TICK 100000
#define XGMAC_MAX_DMA_RIWT 0xff
#define XGMAC_INIT_DMA_RX_USECS 30
@@ -292,12 +203,12 @@
#define XGBE_LINK_TIMEOUT 5
#define XGBE_KR_TRAINING_WAIT_ITER 50
-#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
-#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+#define XGBE_SGMII_AN_LINK_STATUS BIT(4)
/* ECC correctable error notification window (seconds) */
#define XGBE_ECC_LIMIT 60
@@ -347,6 +258,15 @@
(_src)->link_modes._sname, \
__ETHTOOL_LINK_MODE_MASK_NBITS)
+/* XGBE PCI device id */
+#define XGBE_RV_PCI_DEVICE_ID 0x15d0
+#define XGBE_YC_PCI_DEVICE_ID 0x14b5
+#define XGBE_RN_PCI_DEVICE_ID 0x1630
+
+ /* Generic low and high masks */
+#define XGBE_GEN_HI_MASK GENMASK(31, 16)
+#define XGBE_GEN_LO_MASK GENMASK(15, 0)
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -565,6 +485,7 @@ enum xgbe_speed {
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
+ XGBE_XPCS_ACCESS_V3,
};
enum xgbe_an_mode {
@@ -756,6 +677,11 @@ struct xgbe_ext_stats {
u64 rx_vxlan_csum_errors;
};
+struct xgbe_pps_config {
+ struct timespec64 start;
+ struct timespec64 period;
+};
+
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
@@ -838,14 +764,6 @@ struct xgbe_hw_if {
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
- /* For Timestamp config */
- int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
- void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
- void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
- unsigned int nsec);
- u64 (*get_tstamp_time)(struct xgbe_prv_data *);
- u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
-
/* For Data Center Bridging config */
void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *);
@@ -865,6 +783,10 @@ struct xgbe_hw_if {
void (*enable_vxlan)(struct xgbe_prv_data *);
void (*disable_vxlan)(struct xgbe_prv_data *);
void (*set_vxlan_id)(struct xgbe_prv_data *);
+
+ /* For Split Header */
+ void (*enable_sph)(struct xgbe_prv_data *pdata);
+ void (*disable_sph)(struct xgbe_prv_data *pdata);
};
/* This structure represents implementation specific routines for an
@@ -1039,6 +961,7 @@ struct xgbe_version_data {
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
unsigned int tx_tstamp_workaround;
+ unsigned int tstamp_ptp_clock_freq;
unsigned int ecc_support;
unsigned int i2c_support;
unsigned int irq_reissue_support;
@@ -1056,6 +979,7 @@ struct xgbe_prv_data {
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
+ unsigned int smn_base;
/* Version related data */
struct xgbe_version_data *vdata;
@@ -1222,14 +1146,15 @@ struct xgbe_prv_data {
spinlock_t tstamp_lock;
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
- struct hwtstamp_config tstamp_config;
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned int tstamp_addend;
struct work_struct tx_tstamp_work;
struct sk_buff *tx_tstamp_skb;
u64 tx_tstamp;
+ /* Pulse Per Second output */
+ struct xgbe_pps_config pps[XGBE_MAX_PPS_OUT];
+
/* DCB support */
struct ieee_ets *ets;
struct ieee_pfc *pfc;
@@ -1298,11 +1223,11 @@ struct xgbe_prv_data {
unsigned int lpm_ctrl; /* CTRL1 for resume */
- unsigned int isr_as_tasklet;
- struct tasklet_struct tasklet_dev;
- struct tasklet_struct tasklet_ecc;
- struct tasklet_struct tasklet_i2c;
- struct tasklet_struct tasklet_an;
+ unsigned int isr_as_bh_work;
+ struct work_struct dev_bh_work;
+ struct work_struct ecc_bh_work;
+ struct work_struct i2c_bh_work;
+ struct work_struct an_bh_work;
struct dentry *xgbe_debugfs;
@@ -1321,6 +1246,7 @@ struct xgbe_prv_data {
int rx_adapt_retries;
bool rx_adapt_done;
bool mode_set;
+ bool sph;
};
/* Function prototypes*/
@@ -1369,6 +1295,44 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
void xgbe_restart_dev(struct xgbe_prv_data *pdata);
void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
+/* For Timestamp config */
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr);
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata);
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend);
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+void xgbe_tx_tstamp(struct work_struct *work);
+int xgbe_get_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int xgbe_set_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet);
+int xgbe_init_ptp(struct xgbe_prv_data *pdata);
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata, struct xgbe_pps_config *cfg,
+ int index, bool on);
+
+/* Selftest functions */
+void xgbe_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf);
+void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data);
+int xgbe_selftest_get_count(struct xgbe_prv_data *pdata);
+
+/* Loopback control */
+int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata);
+void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata);
+
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 9e90c2381491..d7ca847d44c7 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[];
-
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@@ -731,10 +729,10 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
- .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ .acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
- .remove_new = xge_remove,
+ .remove = xge_remove,
.shutdown = xge_shutdown,
};
module_platform_driver(xge_driver);
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index eba06831aec2..6a17045a5f62 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -97,7 +97,6 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -137,17 +136,12 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
-
- linkmode_andnot(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index e641dbbea1e2..b854b6b42d77 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
if (dev->of_node) {
struct clk *parent = clk_get_parent(pdata->clk);
+ long rate = rgmii_clock(pdata->phy_speed);
- switch (pdata->phy_speed) {
- case SPEED_10:
- clk_set_rate(parent, 2500000);
- break;
- case SPEED_100:
- clk_set_rate(parent, 25000000);
- break;
- default:
- clk_set_rate(parent, 125000000);
- break;
- }
+ if (rate < 0)
+ rate = 125000000;
+
+ clk_set_rate(parent, rate);
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 44900026d11b..3b2951030a38 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1530,7 +1530,7 @@ static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
xgene_enet_close(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
pdata->mac_ops->set_framesize(pdata, frame_size);
xgene_enet_open(ndev);
@@ -2159,7 +2159,7 @@ static struct platform_driver xgene_enet_driver = {
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
- .remove_new = xgene_enet_remove,
+ .remove = xgene_enet_remove,
.shutdown = xgene_enet_shutdown,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 86607b79c09f..cc3b1631c905 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -6,8 +6,14 @@
* Keyur Chudgar <kchudgar@apm.com>
*/
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 292b1f9cd9e7..b3bf8d6f88e8 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
-#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
@@ -461,7 +460,7 @@ static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
/* prolly should wait for dma to finish & turn off the chip */
spin_lock_irqsave(&bp->lock, flags);
if (bp->timeout_active) {
- del_timer(&bp->tx_timeout);
+ timer_delete(&bp->tx_timeout);
bp->timeout_active = 0;
}
disable_irq(dev->irq);
@@ -546,7 +545,7 @@ static inline void bmac_set_timeout(struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
if (bp->timeout_active)
- del_timer(&bp->tx_timeout);
+ timer_delete(&bp->tx_timeout);
bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
add_timer(&bp->tx_timeout);
bp->timeout_active = 1;
@@ -755,7 +754,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
XXDEBUG(("bmac_txdma_intr\n"));
}
- /* del_timer(&bp->tx_timeout); */
+ /* timer_delete(&bp->tx_timeout); */
/* bp->timeout_active = 0; */
while (1) {
@@ -796,59 +795,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
}
#ifndef SUNHME_MULTICAST
-/* Real fast bit-reversal algorithm, 6-bit values */
-static int reverse6[64] = {
- 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
- 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
- 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
- 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
- 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
- 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
- 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
- 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
-};
-
-static unsigned int
-crc416(unsigned int curval, unsigned short nxtval)
-{
- unsigned int counter, cur = curval, next = nxtval;
- int high_crc_set, low_data_set;
-
- /* Swap bytes */
- next = ((next & 0x00FF) << 8) | (next >> 8);
-
- /* Compute bit-by-bit */
- for (counter = 0; counter < 16; ++counter) {
- /* is high CRC bit set? */
- if ((cur & 0x80000000) == 0) high_crc_set = 0;
- else high_crc_set = 1;
-
- cur = cur << 1;
-
- if ((next & 0x0001) == 0) low_data_set = 0;
- else low_data_set = 1;
-
- next = next >> 1;
-
- /* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
- }
- return cur;
-}
-
-static unsigned int
-bmac_crc(unsigned short *address)
-{
- unsigned int newcrc;
-
- XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
- newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
- newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
- newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
-
- return(newcrc);
-}
-
/*
* Add requested mcast addr to BMac's hash table filter.
*
@@ -861,8 +807,7 @@ bmac_addhash(struct bmac_data *bp, unsigned char *addr)
unsigned short mask;
if (!(*addr)) return;
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc]++) return; /* This bit is already set */
mask = crc % 16;
mask = (unsigned char)1 << mask;
@@ -876,8 +821,7 @@ bmac_removehash(struct bmac_data *bp, unsigned char *addr)
unsigned char mask;
/* Now, delete the address from the filter copy, as indicated */
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
mask = crc % 16;
@@ -1317,7 +1261,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
- ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
+ ret = request_irq(dev->irq, bmac_misc_intr, IRQF_NO_AUTOEN, "BMAC-misc", dev);
if (ret) {
printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
goto err_out_iounmap_rx;
@@ -1336,7 +1280,6 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* Mask chip interrupts and disable chip, will be
* re-enabled on open()
*/
- disable_irq(dev->irq);
pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
if (register_netdev(dev) != 0) {
@@ -1467,7 +1410,7 @@ bmac_output(struct sk_buff *skb, struct net_device *dev)
static void bmac_tx_timeout(struct timer_list *t)
{
- struct bmac_data *bp = from_timer(bp, t, tx_timeout);
+ struct bmac_data *bp = timer_container_of(bp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(bp->mdev);
volatile struct dbdma_regs __iomem *td = bp->tx_dma;
volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index e6350971c707..af26905e44e3 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -523,7 +523,7 @@ static inline void mace_set_timeout(struct net_device *dev)
struct mace_data *mp = netdev_priv(dev);
if (mp->timeout_active)
- del_timer(&mp->tx_timeout);
+ timer_delete(&mp->tx_timeout);
mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
add_timer(&mp->tx_timeout);
mp->timeout_active = 1;
@@ -676,7 +676,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
i = mp->tx_empty;
while (in_8(&mb->pr) & XMTSV) {
- del_timer(&mp->tx_timeout);
+ timer_delete(&mp->tx_timeout);
mp->timeout_active = 0;
/*
* Clear any interrupt indication associated with this status
@@ -805,7 +805,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
static void mace_tx_timeout(struct timer_list *t)
{
- struct mace_data *mp = from_timer(mp, t, tx_timeout);
+ struct mace_data *mp = timer_container_of(mp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(mp->mdev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 766ab78256fe..8989506e6248 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -759,7 +759,7 @@ static void mac_mace_device_remove(struct platform_device *pdev)
static struct platform_driver mac_mace_driver = {
.probe = mace_probe,
- .remove_new = mac_mace_device_remove,
+ .remove = mac_mace_device_remove,
.driver = {
.name = mac_mace_string,
},
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 8ebcc68e807f..f6a96931c89a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -8,7 +8,7 @@
obj-$(CONFIG_AQTION) += atlantic.o
-ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(src)
atlantic-objs := aq_main.o \
aq_nic.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 7e9c74b141ef..fc2b325f34e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -17,7 +17,7 @@
#define AQ_CFG_IS_POLLING_DEF 0U
-#define AQ_CFG_FORCE_LEGACY_INT 0U
+#define AQ_CFG_FORCE_INTX 0U
#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
#define AQ_CFG_INTERRUPT_MODERATION_ON 1
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index 414b2e448d59..787ea91802e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -113,19 +113,9 @@ static const struct hwmon_ops aq_hwmon_ops = {
.read_string = aq_hwmon_read_string,
};
-static u32 aq_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info aq_hwmon_temp = {
- .type = hwmon_temp,
- .config = aq_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aq_hwmon_info[] = {
- &aq_hwmon_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a2606ee3b0a5..6fef47ba0a59 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -15,6 +15,7 @@
#include "aq_macsec.h"
#include "aq_main.h"
+#include <linux/ethtool.h>
#include <linux/linkmode.h>
#include <linux/ptp_clock_kernel.h>
@@ -266,7 +267,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
char tc_string[8];
- int tc;
+ unsigned int tc;
memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
@@ -275,22 +276,20 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (tc = 0; tc < cfg->tcs; tc++) {
if (cfg->is_qos)
- snprintf(tc_string, 8, "TC%d ", tc);
+ snprintf(tc_string, 8, "TC%u ", tc);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
}
}
@@ -305,20 +304,18 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
if (i >= tx_ring_cnt)
continue;
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -338,9 +335,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsc_stat_names[si], i);
- p += ETH_GSTRING_LEN;
}
aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
@@ -349,10 +345,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -369,10 +364,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_rxsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -652,7 +646,7 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
}
static int aq_ethtool_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -984,6 +978,76 @@ static int aq_ethtool_set_phy_tunable(struct net_device *ndev,
return err;
}
+static int aq_ethtool_get_module_info(struct net_device *ndev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u8 compliance_val, dom_type;
+ int err;
+
+ /* Module EEPROM is only supported for controllers with external PHY */
+ if (aq_nic->aq_nic_cfg.aq_hw_caps->media_type != AQ_HW_MEDIA_TYPE_FIBRE ||
+ !aq_nic->aq_hw_ops->hw_read_module_eeprom)
+ return -EOPNOTSUPP;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, SFF_8472_COMP_ADDR, 1, &compliance_val);
+ if (err)
+ return err;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, SFF_8472_DOM_TYPE_ADDR, 1, &dom_type);
+ if (err)
+ return err;
+
+ if (dom_type & SFF_8472_ADDRESS_CHANGE_REQ_MASK || compliance_val == 0x00) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ return 0;
+}
+
+static int aq_ethtool_get_module_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, unsigned char *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ unsigned int first, last, len;
+ int err;
+
+ if (!aq_nic->aq_hw_ops->hw_read_module_eeprom)
+ return -EOPNOTSUPP;
+
+ first = ee->offset;
+ last = ee->offset + ee->len;
+
+ if (first < ETH_MODULE_SFF_8079_LEN) {
+ len = min(last, ETH_MODULE_SFF_8079_LEN);
+ len -= first;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, first, len, data);
+ if (err)
+ return err;
+
+ first += len;
+ data += len;
+ }
+ if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) {
+ len = min(last, ETH_MODULE_SFF_8472_LEN);
+ len -= first;
+ first -= ETH_MODULE_SFF_8079_LEN;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_DIAGNOSTICS_ADDR, first, len, data);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1021,4 +1085,6 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_ts_info = aq_ethtool_get_ts_info,
.get_phy_tunable = aq_ethtool_get_phy_tunable,
.set_phy_tunable = aq_ethtool_set_phy_tunable,
+ .get_module_info = aq_ethtool_get_module_info,
+ .get_module_eeprom = aq_ethtool_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
index 6d5be5ebeb13..f26fe1a75539 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -14,4 +14,12 @@
extern const struct ethtool_ops aq_ethtool_ops;
#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK)
+#define SFF_8472_ID_ADDR 0x50
+#define SFF_8472_DIAGNOSTICS_ADDR 0x51
+
+#define SFF_8472_COMP_ADDR 0x5e
+#define SFF_8472_DOM_TYPE_ADDR 0x5c
+
+#define SFF_8472_ADDRESS_CHANGE_REQ_MASK 0x4
+
#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index dbd284660135..4e66fd9b2ab1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -104,7 +104,7 @@ struct aq_stats_s {
};
#define AQ_HW_IRQ_INVALID 0U
-#define AQ_HW_IRQ_LEGACY 1U
+#define AQ_HW_IRQ_INTX 1U
#define AQ_HW_IRQ_MSI 2U
#define AQ_HW_IRQ_MSIX 3U
@@ -113,6 +113,8 @@ struct aq_stats_s {
#define AQ_HW_POWER_STATE_D0 0U
#define AQ_HW_POWER_STATE_D3 3U
+#define AQ_FW_WAKE_ON_LINK_RTPM BIT(10)
+
#define AQ_HW_FLAG_STARTED 0x00000004U
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
@@ -340,6 +342,9 @@ struct aq_hw_ops {
int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*hw_get_mac_temp)(struct aq_hw_s *self, u32 *temp);
+
+ int (*hw_read_module_eeprom)(struct aq_hw_s *self, u8 dev_addr,
+ u8 reg_start_addr, int len, u8 *data);
};
struct aq_fw_ops {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 1921741f7311..18b08277d2e1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -15,6 +15,7 @@
#include "aq_hw.h"
#include "aq_nic.h"
+#include "hw_atl/hw_atl_llh.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -81,6 +82,27 @@ void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
lo_hi_writeq(value, hw->mmio + reg);
}
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
+{
+ int err;
+ u32 val;
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
+
+ err = aq_hw_err_from_flags(hw);
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ hw, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
+}
+
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index ffa6e4067c21..d89c63d88e4a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 0b2a52199914..4ef4fe64b8ac 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
}
#endif
- skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
@@ -146,7 +145,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
if (err < 0)
goto err_exit;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err_exit:
return err;
@@ -259,10 +258,15 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
(void)aq_nic_set_multicast_list(aq_nic, ndev);
}
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
-static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
- struct hwtstamp_config *config)
+static int aq_ndev_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+
+ if (!IS_REACHABLE(CONFIG_PTP_1588_CLOCK) || !aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
@@ -291,59 +295,17 @@ static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
}
-#endif
-
-static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
-{
- struct hwtstamp_config config;
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- int ret_val;
-#endif
-
- if (!aq_nic->aq_ptp)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
- if (ret_val)
- return ret_val;
-#endif
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
-static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+static int aq_ndev_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
if (!aq_nic->aq_ptp)
return -EOPNOTSUPP;
- aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-#endif
-
-static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct aq_nic_s *aq_nic = netdev_priv(netdev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return aq_ndev_hwtstamp_set(aq_nic, ifr);
-
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- case SIOCGHWTSTAMP:
- return aq_ndev_hwtstamp_get(aq_nic, ifr);
-#endif
- }
-
- return -EOPNOTSUPP;
+ aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, config);
+ return 0;
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
@@ -501,12 +463,13 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
.ndo_fix_features = aq_ndev_fix_features,
- .ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
.ndo_bpf = aq_xdp,
.ndo_xdp_xmit = aq_xdp_xmit,
+ .ndo_hwtstamp_get = aq_ndev_hwtstamp_get,
+ .ndo_hwtstamp_set = aq_ndev_hwtstamp_set,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index d6d6d5d37ff3..b24eaa5283fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -127,7 +127,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->irq_type = aq_pci_func_get_irq_type(self);
- if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
+ if ((cfg->irq_type == AQ_HW_IRQ_INTX) ||
(cfg->aq_hw_caps->vecs == 1U) ||
(cfg->vecs == 1U)) {
cfg->is_rss = 0U;
@@ -254,7 +254,7 @@ static void aq_nic_service_task(struct work_struct *work)
static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, service_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, service_timer);
mod_timer(&self->service_timer,
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
@@ -264,7 +264,7 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, polling_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, polling_timer);
unsigned int i = 0U;
for (i = 0U; self->aq_vecs > i; ++i)
@@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
+ skb_tx_timestamp(skb);
+
if (likely(frags)) {
err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
ring, frags);
@@ -1389,13 +1391,13 @@ int aq_nic_stop(struct aq_nic_s *self)
netif_tx_disable(self->ndev);
netif_carrier_off(self->ndev);
- del_timer_sync(&self->service_timer);
+ timer_delete_sync(&self->service_timer);
cancel_work_sync(&self->service_task);
self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
if (self->aq_nic_cfg.is_polling)
- del_timer_sync(&self->polling_timer);
+ timer_delete_sync(&self->polling_timer);
else
aq_pci_func_free_irqs(self);
@@ -1441,7 +1443,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit) && link_down) {
+ /* May be invoked during hot unplug. */
+ if (pci_device_is_present(self->pdev) &&
+ likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index baa5f8cc31f2..ed5231dece3f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -162,8 +162,8 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
self->msix_entry_mask |= (1 << i);
if (pdev->msix_enabled && affinity_mask)
- irq_set_affinity_hint(pci_irq_vector(pdev, i),
- affinity_mask);
+ irq_update_affinity_hint(pci_irq_vector(pdev, i),
+ affinity_mask);
}
return err;
@@ -187,7 +187,7 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self)
continue;
if (pdev->msix_enabled)
- irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ irq_update_affinity_hint(pci_irq_vector(pdev, i), NULL);
free_irq(pci_irq_vector(pdev, i), irq_data);
self->msix_entry_mask &= ~(1U << i);
}
@@ -200,7 +200,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSI;
- return AQ_HW_IRQ_LEGACY;
+ return AQ_HW_IRQ_INTX;
}
static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
@@ -298,11 +298,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
-#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
- PCI_IRQ_MSIX | PCI_IRQ_MSI |
- PCI_IRQ_LEGACY);
-
+#if !AQ_CFG_FORCE_INTX
+ err = pci_alloc_irq_vectors(self->pdev, 1, numvecs, PCI_IRQ_ALL_TYPES);
if (err < 0)
goto err_hwinit;
numvecs = err;
@@ -466,7 +463,7 @@ static const struct dev_pm_ops aq_pm_ops = {
};
#endif
-static struct pci_driver aq_pci_ops = {
+static struct pci_driver aq_pci_driver = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
@@ -479,11 +476,11 @@ static struct pci_driver aq_pci_ops = {
int aq_pci_func_register_driver(void)
{
- return pci_register_driver(&aq_pci_ops);
+ return pci_register_driver(&aq_pci_driver);
}
void aq_pci_func_unregister_driver(void)
{
- pci_unregister_driver(&aq_pci_ops);
+ pci_unregister_driver(&aq_pci_driver);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 5acb3e16b567..0fa0f891c0e0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -51,7 +51,7 @@ struct ptp_tx_timeout {
struct aq_ptp_s {
struct aq_nic_s *aq_nic;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
spinlock_t ptp_lock;
spinlock_t ptp_ring_lock;
struct ptp_clock *ptp_clock;
@@ -567,7 +567,7 @@ static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtsta
}
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
*config = aq_ptp->hwtstamp_config;
}
@@ -588,7 +588,7 @@ static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
}
int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
const struct aq_hw_ops *hw_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
index 210b723f2207..5e643ec7cc06 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
@@ -60,9 +60,9 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
/* Must be to check available of PTP before call */
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
/* Return either ring is belong to PTP or not*/
bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
@@ -130,9 +130,9 @@ static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {}
static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config) {}
+ struct kernel_hwtstamp_config *config) {}
static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index f7433abd6591..d23d23bed39f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -547,6 +547,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
+
+ /* There will be an extra fragment */
+ if (buff->len > AQ_CFG_RX_HDR_SIZE)
+ frag_cnt++;
+
buff_ = buff;
do {
bool is_rsc_completed = true;
@@ -557,7 +562,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
}
frag_cnt++;
- next_ = buff_->next,
+ next_ = buff_->next;
buff_ = &self->buff_ring[next_];
is_rsc_completed =
aq_ring_dx_in_range(self->sw_head,
@@ -583,7 +588,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
err = -EIO;
goto err_exit;
}
- next_ = buff_->next,
+ next_ = buff_->next;
buff_ = &self->buff_ring[next_];
buff_->is_cleaned = true;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 9dfd68f0fda9..8de2cdd09213 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -352,7 +352,7 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 54e70f07b573..c7895bfb2ecf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -562,7 +562,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
@@ -1198,26 +1198,9 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
- int err;
- u32 val;
-
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
- /* Invalidate Descriptor Cache to prevent writing to the cached
- * descriptors and to the data pointer of those descriptors
- */
- hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
-
- err = aq_hw_err_from_flags(self);
-
- if (err)
- goto err_exit;
-
- readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
- self, val, val == 1, 1000U, 10000U);
-
-err_exit:
- return err;
+ return aq_hw_invalidate_descriptor_cache(self);
}
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
@@ -1654,6 +1637,137 @@ static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
return 0;
}
+#define START_TRANSMIT 0x5001
+#define START_READ_TRANSMIT 0x5101
+#define STOP_TRANSMIT 0x3001
+#define REPEAT_TRANSMIT 0x1001
+#define REPEAT_NACK_TRANSMIT 0x1011
+
+static int hw_atl_b0_smb0_wait_result(struct aq_hw_s *self, bool expect_ack)
+{
+ int err;
+ u32 val;
+
+ err = readx_poll_timeout(hw_atl_smb0_byte_transfer_complete_get,
+ self, val, val == 1, 100U, 10000U);
+ if (err)
+ return err;
+ if (hw_atl_smb0_receive_acknowledged_get(self) != expect_ack)
+ return -EIO;
+ return 0;
+}
+
+/* Starts an I2C/SMBUS write to a given address. addr is in 7-bit format,
+ * the read/write bit is not part of it.
+ */
+static int hw_atl_b0_smb0_start_write(struct aq_hw_s *self, u32 addr)
+{
+ hw_atl_smb0_tx_data_set(self, (addr << 1) | 0);
+ hw_atl_smb0_provisioning2_set(self, START_TRANSMIT);
+ return hw_atl_b0_smb0_wait_result(self, 0);
+}
+
+/* Writes a single byte as part of an ongoing write started by start_write. */
+static int hw_atl_b0_smb0_write_byte(struct aq_hw_s *self, u32 data)
+{
+ hw_atl_smb0_tx_data_set(self, data);
+ hw_atl_smb0_provisioning2_set(self, REPEAT_TRANSMIT);
+ return hw_atl_b0_smb0_wait_result(self, 0);
+}
+
+/* Starts an I2C/SMBUS read to a given address. addr is in 7-bit format,
+ * the read/write bit is not part of it.
+ */
+static int hw_atl_b0_smb0_start_read(struct aq_hw_s *self, u32 addr)
+{
+ int err;
+
+ hw_atl_smb0_tx_data_set(self, (addr << 1) | 1);
+ hw_atl_smb0_provisioning2_set(self, START_READ_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 0);
+ if (err)
+ return err;
+ if (hw_atl_smb0_repeated_start_detect_get(self) == 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a single byte as part of an ongoing read started by start_read. */
+static int hw_atl_b0_smb0_read_byte(struct aq_hw_s *self)
+{
+ int err;
+
+ hw_atl_smb0_provisioning2_set(self, REPEAT_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 0);
+ if (err)
+ return err;
+ return hw_atl_smb0_rx_data_get(self);
+}
+
+/* Reads the last byte of an ongoing read. */
+static int hw_atl_b0_smb0_read_byte_nack(struct aq_hw_s *self)
+{
+ int err;
+
+ hw_atl_smb0_provisioning2_set(self, REPEAT_NACK_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 1);
+ if (err)
+ return err;
+ return hw_atl_smb0_rx_data_get(self);
+}
+
+/* Sends a stop condition and ends a transfer. */
+static void hw_atl_b0_smb0_stop(struct aq_hw_s *self)
+{
+ hw_atl_smb0_provisioning2_set(self, STOP_TRANSMIT);
+}
+
+static int hw_atl_b0_read_module_eeprom(struct aq_hw_s *self, u8 dev_addr,
+ u8 reg_start_addr, int len, u8 *data)
+{
+ int i, b;
+ int err;
+ u32 val;
+
+ /* Wait for SMBUS0 to be idle */
+ err = readx_poll_timeout(hw_atl_smb0_bus_busy_get, self,
+ val, val == 0, 100U, 10000U);
+ if (err)
+ return err;
+
+ err = hw_atl_b0_smb0_start_write(self, dev_addr);
+ if (err)
+ goto out;
+
+ err = hw_atl_b0_smb0_write_byte(self, reg_start_addr);
+ if (err)
+ goto out;
+
+ err = hw_atl_b0_smb0_start_read(self, dev_addr);
+ if (err)
+ goto out;
+
+ for (i = 0; i < len - 1; i++) {
+ b = hw_atl_b0_smb0_read_byte(self);
+ if (b < 0) {
+ err = b;
+ goto out;
+ }
+ data[i] = (u8)b;
+ }
+
+ b = hw_atl_b0_smb0_read_byte_nack(self);
+ if (b < 0) {
+ err = b;
+ goto out;
+ }
+ data[i] = (u8)b;
+
+out:
+ hw_atl_b0_smb0_stop(self);
+ return err;
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_soft_reset = hw_atl_utils_soft_reset,
.hw_prepare = hw_atl_utils_initfw,
@@ -1712,4 +1826,5 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_fc = hw_atl_b0_set_fc,
.hw_get_mac_temp = hw_atl_b0_get_mac_temp,
+ .hw_read_module_eeprom = hw_atl_b0_read_module_eeprom,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 7b67bdd8a258..d07af1271d59 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -57,6 +57,49 @@ u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw)
HW_ATL_TS_DATA_OUT_SHIFT);
}
+u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_BUS_BUSY_ADR,
+ HW_ATL_SMB0_BUS_BUSY_MSK,
+ HW_ATL_SMB0_BUS_BUSY_SHIFT);
+}
+
+u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_ADR,
+ HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_MSK,
+ HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_SHIFT);
+}
+
+u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_RX_ACKNOWLEDGED_ADR,
+ HW_ATL_SMB0_RX_ACKNOWLEDGED_MSK,
+ HW_ATL_SMB0_RX_ACKNOWLEDGED_SHIFT);
+}
+
+u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_REPEATED_START_DETECT_ADR,
+ HW_ATL_SMB0_REPEATED_START_DETECT_MSK,
+ HW_ATL_SMB0_REPEATED_START_DETECT_SHIFT);
+}
+
+u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_SMB0_RECEIVED_DATA_ADR);
+}
+
+void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL_SMB0_TRANSMITTED_DATA_ADR, data);
+}
+
+void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL_SMB0_PROVISIONING2_ADR, data);
+}
+
/* global */
void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
u32 semaphore)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 58f5ee0a6214..5fd506acacb5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -34,6 +34,27 @@ u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw);
/* get temperature sense data */
u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw);
+/* SMBUS0 bus busy */
+u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 byte transfer complete */
+u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 receive acknowledged */
+u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 set transmitted data (only leftmost byte of data valid) */
+void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data);
+
+/* SMBUS0 provisioning2 command register */
+void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data);
+
+/* SMBUS0 repeated start detect */
+u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 received data register */
+u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw);
+
/* global */
/* set global microprocessor semaphore */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 4a6467031b9e..fce30d90b6cb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -42,6 +42,38 @@
#define HW_ATL_TS_DATA_OUT_SHIFT 0
#define HW_ATL_TS_DATA_OUT_WIDTH 12
+/* SMBUS0 Received Data register */
+#define HW_ATL_SMB0_RECEIVED_DATA_ADR 0x00000748
+/* SMBUS0 Transmitted Data register */
+#define HW_ATL_SMB0_TRANSMITTED_DATA_ADR 0x00000608
+
+/* SMBUS0 Global Provisioning 2 register */
+#define HW_ATL_SMB0_PROVISIONING2_ADR 0x00000604
+
+/* SMBUS0 Bus Busy Bitfield Definitions */
+#define HW_ATL_SMB0_BUS_BUSY_ADR 0x00000744
+#define HW_ATL_SMB0_BUS_BUSY_MSK 0x00000080
+#define HW_ATL_SMB0_BUS_BUSY_SHIFT 7
+#define HW_ATL_SMB0_BUS_BUSY_WIDTH 1
+
+/* SMBUS0 Byte Transfer Complete Bitfield Definitions */
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_ADR 0x00000744
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_MSK 0x00000002
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_SHIFT 1
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_WIDTH 1
+
+/* SMBUS0 Receive Acknowledge Bitfield Definitions */
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_ADR 0x00000744
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_MSK 0x00000100
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_SHIFT 8
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_WIDTH 1
+
+/* SMBUS0 Repeated Start Detect Bitfield Definitions */
+#define HW_ATL_SMB0_REPEATED_START_DETECT_ADR 0x00000744
+#define HW_ATL_SMB0_REPEATED_START_DETECT_MSK 0x00000004
+#define HW_ATL_SMB0_REPEATED_START_DETECT_SHIFT 2
+#define HW_ATL_SMB0_REPEATED_START_DETECT_WIDTH 1
+
/* global microprocessor semaphore definitions
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index f5901f8e3907..f6b990b7f5b4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -226,7 +226,6 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 220400a633f5..0ce9caae8799 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -534,7 +534,7 @@ static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
@@ -759,7 +759,7 @@ static int hw_atl2_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
- return 0;
+ return aq_hw_invalidate_descriptor_cache(self);
}
static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 52e2070a4a2f..7370e3f76b62 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -462,6 +462,44 @@ static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
return aq_a2_fw_get_phy_temp(self, temp);
}
+static int aq_a2_fw_set_wol_params(struct aq_hw_s *self, const u8 *mac, u32 wol)
+{
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ struct wake_on_lan_s wake_on_lan;
+
+ memcpy(mac_address.aligned.mac_address, mac, ETH_ALEN);
+ hw_atl2_shared_buffer_write(self, mac_address, mac_address);
+
+ memset(&wake_on_lan, 0, sizeof(wake_on_lan));
+
+ if (wol & WAKE_MAGIC)
+ wake_on_lan.wake_on_magic_packet = 1U;
+
+ if (wol & (WAKE_PHY | AQ_FW_WAKE_ON_LINK_RTPM))
+ wake_on_lan.wake_on_link_up = 1U;
+
+ hw_atl2_shared_buffer_write(self, sleep_proxy, wake_on_lan);
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SLEEP_PROXY;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_set_power(struct aq_hw_s *self, unsigned int power_state,
+ const u8 *mac)
+{
+ u32 wol = self->aq_nic_cfg->wol;
+ int err = 0;
+
+ if (wol)
+ err = aq_a2_fw_set_wol_params(self, mac, wol);
+
+ return err;
+}
+
static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
struct link_options_s link_options;
@@ -605,6 +643,7 @@ const struct aq_fw_ops aq_a2_fw_ops = {
.set_state = aq_a2_fw_set_state,
.update_link_status = aq_a2_fw_update_link_status,
.update_stats = aq_a2_fw_update_stats,
+ .set_power = aq_a2_fw_set_power,
.get_mac_temp = aq_a2_fw_get_mac_temp,
.get_phy_temp = aq_a2_fw_get_phy_temp,
.set_eee_rate = aq_a2_fw_set_eee_rate,
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 0a67612af228..0d400a7d8d91 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -23,16 +23,6 @@ config ARC_EMAC_CORE
select PHYLIB
select CRC32
-config ARC_EMAC
- tristate "ARC EMAC support"
- select ARC_EMAC_CORE
- depends on OF_IRQ
- depends on ARC || COMPILE_TEST
- help
- On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
- non-standard on-chip ethernet device ARC EMAC 10/100 is used.
- Say Y here if you have such a board. If unsure, say N.
-
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile
index d63ada577c8e..23586eefec44 100644
--- a/drivers/net/ethernet/arc/Makefile
+++ b/drivers/net/ethernet/arc/Makefile
@@ -5,5 +5,4 @@
arc_emac-objs := emac_main.o emac_mdio.o
obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o
-obj-$(CONFIG_ARC_EMAC) += emac_arc.o
obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
deleted file mode 100644
index a3afddb23ee8..000000000000
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/**
- * DOC: emac_arc.c - ARC EMAC specific glue layer
- *
- * Copyright (C) 2014 Romain Perier
- *
- * Romain Perier <romain.perier@gmail.com>
- */
-
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/of_net.h>
-#include <linux/platform_device.h>
-
-#include "emac.h"
-
-#define DRV_NAME "emac_arc"
-
-static int emac_arc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct arc_emac_priv *priv;
- phy_interface_t interface;
- struct net_device *ndev;
- int err;
-
- if (!dev->of_node)
- return -ENODEV;
-
- ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
- if (!ndev)
- return -ENOMEM;
- platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, dev);
-
- priv = netdev_priv(ndev);
- priv->drv_name = DRV_NAME;
-
- err = of_get_phy_mode(dev->of_node, &interface);
- if (err) {
- if (err == -ENODEV)
- interface = PHY_INTERFACE_MODE_MII;
- else
- goto out_netdev;
- }
-
- priv->clk = devm_clk_get(dev, "hclk");
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to retrieve host clock from device tree\n");
- err = -EINVAL;
- goto out_netdev;
- }
-
- err = arc_emac_probe(ndev, interface);
-out_netdev:
- if (err)
- free_netdev(ndev);
- return err;
-}
-
-static void emac_arc_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
-
- arc_emac_remove(ndev);
- free_netdev(ndev);
-}
-
-static const struct of_device_id emac_arc_dt_ids[] = {
- { .compatible = "snps,arc-emac" },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
-
-static struct platform_driver emac_arc_driver = {
- .probe = emac_arc_probe,
- .remove_new = emac_arc_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = emac_arc_dt_ids,
- },
-};
-
-module_platform_driver(emac_arc_driver);
-
-MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
-MODULE_DESCRIPTION("ARC EMAC platform driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 31ee477dd131..8283aeee35fb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < TX_BD_NUM; i++) {
@@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
stats->tx_bytes += skb->len;
}
- dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
+ dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr),
dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
/* return the sk_buff to system */
@@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
static int arc_emac_rx(struct net_device *ndev, int budget)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int work_done;
for (work_done = 0; work_done < budget; work_done++) {
@@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- addr = dma_map_single(&ndev->dev, (void *)skb->data,
+ addr = dma_map_single(dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
if (net_ratelimit())
netdev_err(ndev, "cannot map dma buffer\n");
dev_kfree_skb(skb);
@@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
}
/* unmap previosly mapped skb */
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
pktlen = info & LEN_MASK;
@@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = ndev->phydev;
+ struct device *dev = ndev->dev.parent;
int i;
phy_dev->autoneg = AUTONEG_ENABLE;
@@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev)
if (unlikely(!rx_buff->skb))
return -ENOMEM;
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(dev, (void *)rx_buff->skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
netdev_err(ndev, "cannot dma map\n");
dev_kfree_skb(rx_buff->skb);
return -ENOMEM;
@@ -548,6 +551,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
static void arc_free_tx_queue(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < TX_BD_NUM; i++) {
@@ -555,7 +559,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
struct buffer_state *tx_buff = &priv->tx_buff[i];
if (tx_buff->skb) {
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(dev,
dma_unmap_addr(tx_buff, addr),
dma_unmap_len(tx_buff, len),
DMA_TO_DEVICE);
@@ -579,6 +583,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
static void arc_free_rx_queue(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < RX_BD_NUM; i++) {
@@ -586,7 +591,7 @@ static void arc_free_rx_queue(struct net_device *ndev)
struct buffer_state *rx_buff = &priv->rx_buff[i];
if (rx_buff->skb) {
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(dev,
dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len),
DMA_FROM_DEVICE);
@@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
unsigned int len, *txbd_curr = &priv->txbd_curr;
struct net_device_stats *stats = &ndev->stats;
__le32 *info = &priv->txbd[*txbd_curr].info;
+ struct device *dev = ndev->dev.parent;
dma_addr_t addr;
if (skb_padto(skb, ETH_ZLEN))
@@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
- DMA_TO_DEVICE);
+ addr = dma_map_single(dev, (void *)skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
+ if (unlikely(dma_mapping_error(dev, addr))) {
stats->tx_dropped++;
stats->tx_errors++;
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 87f40c2ba904..078b1a72c161 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -133,6 +133,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
struct device_node *np = priv->dev->of_node;
const char *name = "Synopsys MII Bus";
+ struct device_node *mdio_node;
struct mii_bus *bus;
int error;
@@ -164,7 +165,13 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name);
- error = of_mdiobus_register(bus, priv->dev->of_node);
+ /* Backwards compatibility for EMAC nodes without MDIO subnode. */
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ mdio_node = of_node_get(np);
+
+ error = of_mdiobus_register(bus, mdio_node);
+ of_node_put(mdio_node);
if (error) {
mdiobus_free(bus);
return dev_err_probe(priv->dev, error,
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 493d6356c8ca..780e70ea1c22 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -264,7 +264,7 @@ static void emac_rockchip_remove(struct platform_device *pdev)
static struct platform_driver emac_rockchip_driver = {
.probe = emac_rockchip_probe,
- .remove_new = emac_rockchip_remove,
+ .remove = emac_rockchip_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = emac_rockchip_dt_ids,
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 482c58c4c584..bec5cdf8d1da 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_ATHEROS
bool "Atheros devices"
default y
- depends on (PCI || ATH79)
+ depends on PCI || ATH79 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,7 +19,7 @@ if NET_VENDOR_ATHEROS
config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
- depends on ATH79
+ depends on ATH79 || COMPILE_TEST
select PHYLINK
imply NET_SELFTESTS
help
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 0f2f400b5bc4..cbc730c7cff2 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -149,11 +149,11 @@
#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
#define FIFO_CFG4_DR BIT(10) /* Dribble */
-#define FIFO_CFG4_LE BIT(11) /* Long Event */
-#define FIFO_CFG4_CF BIT(12) /* Control Frame */
-#define FIFO_CFG4_PF BIT(13) /* Pause Frame */
-#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
-#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
+#define FIFO_CFG4_CF BIT(11) /* Control Frame */
+#define FIFO_CFG4_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG4_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG4_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG4_LE BIT(15) /* Long Event */
#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
@@ -168,28 +168,28 @@
#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
#define FIFO_CFG5_FC BIT(2) /* False Carrier */
#define FIFO_CFG5_CE BIT(3) /* Code Error */
-#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
-#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
-#define FIFO_CFG5_OK BIT(6) /* Packet is OK */
-#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
-#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
-#define FIFO_CFG5_DR BIT(9) /* Dribble */
-#define FIFO_CFG5_CF BIT(10) /* Control Frame */
-#define FIFO_CFG5_PF BIT(11) /* Pause Frame */
-#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
-#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
-#define FIFO_CFG5_LE BIT(14) /* Long Event */
-#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
-#define FIFO_CFG5_16 BIT(16) /* unknown */
-#define FIFO_CFG5_17 BIT(17) /* unknown */
+#define FIFO_CFG5_CR BIT(4) /* CRC error */
+#define FIFO_CFG5_LM BIT(5) /* Length Mismatch */
+#define FIFO_CFG5_LO BIT(6) /* Length Out of Range */
+#define FIFO_CFG5_OK BIT(7) /* Packet is OK */
+#define FIFO_CFG5_MC BIT(8) /* Multicast Packet */
+#define FIFO_CFG5_BC BIT(9) /* Broadcast Packet */
+#define FIFO_CFG5_DR BIT(10) /* Dribble */
+#define FIFO_CFG5_CF BIT(11) /* Control Frame */
+#define FIFO_CFG5_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG5_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG5_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG5_LE BIT(15) /* Long Event */
+#define FIFO_CFG5_FT BIT(16) /* Frame Truncated */
+#define FIFO_CFG5_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG5_SF BIT(18) /* Short Frame */
#define FIFO_CFG5_BM BIT(19) /* Byte Mode */
#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
- FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
- FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
- FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
- FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
- FIFO_CFG5_17 | FIFO_CFG5_SF)
+ FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \
+ FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \
+ FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \
+ FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \
+ FIFO_CFG5_UC | FIFO_CFG5_SF)
#define AG71XX_REG_TX_CTRL 0x0180
#define TX_CTRL_TXE BIT(0) /* Tx Enable */
@@ -379,10 +379,7 @@ struct ag71xx {
u32 fifodata[3];
int mac_idx;
- struct reset_control *mdio_reset;
- struct mii_bus *mii_bus;
struct clk *clk_mdio;
- struct clk *clk_eth;
};
static int ag71xx_desc_empty(struct ag71xx_desc *desc)
@@ -447,6 +444,13 @@ static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
}
+static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_mii_ioctl(ag->phylink, ifr, cmd);
+}
+
static void ag71xx_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -504,8 +508,7 @@ static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- ag71xx_statistics[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, ag71xx_statistics[i].name);
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -685,36 +688,27 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
{
struct device *dev = &ag->pdev->dev;
struct net_device *ndev = ag->ndev;
+ struct reset_control *mdio_reset;
static struct mii_bus *mii_bus;
struct device_node *np, *mnp;
int err;
np = dev->of_node;
- ag->mii_bus = NULL;
- ag->clk_mdio = devm_clk_get(dev, "mdio");
+ ag->clk_mdio = devm_clk_get_enabled(dev, "mdio");
if (IS_ERR(ag->clk_mdio)) {
netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
return PTR_ERR(ag->clk_mdio);
}
- err = clk_prepare_enable(ag->clk_mdio);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
- return err;
- }
-
mii_bus = devm_mdiobus_alloc(dev);
- if (!mii_bus) {
- err = -ENOMEM;
- goto mdio_err_put_clk;
- }
+ if (!mii_bus)
+ return -ENOMEM;
- ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
- if (IS_ERR(ag->mdio_reset)) {
+ mdio_reset = devm_reset_control_get_exclusive(dev, "mdio");
+ if (IS_ERR(mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
- err = PTR_ERR(ag->mdio_reset);
- goto mdio_err_put_clk;
+ return PTR_ERR(mdio_reset);
}
mii_bus->name = "ag71xx_mdio";
@@ -725,33 +719,18 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
mii_bus->parent = dev;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
- if (!IS_ERR(ag->mdio_reset)) {
- reset_control_assert(ag->mdio_reset);
- msleep(100);
- reset_control_deassert(ag->mdio_reset);
- msleep(200);
- }
+ reset_control_assert(mdio_reset);
+ msleep(100);
+ reset_control_deassert(mdio_reset);
+ msleep(200);
mnp = of_get_child_by_name(np, "mdio");
- err = of_mdiobus_register(mii_bus, mnp);
+ err = devm_of_mdiobus_register(dev, mii_bus, mnp);
of_node_put(mnp);
if (err)
- goto mdio_err_put_clk;
-
- ag->mii_bus = mii_bus;
+ return err;
return 0;
-
-mdio_err_put_clk:
- clk_disable_unprepare(ag->clk_mdio);
- return err;
-}
-
-static void ag71xx_mdio_remove(struct ag71xx *ag)
-{
- if (ag->mii_bus)
- mdiobus_unregister(ag->mii_bus);
- clk_disable_unprepare(ag->clk_mdio);
}
static void ag71xx_hw_stop(struct ag71xx *ag)
@@ -1234,6 +1213,11 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
buf->rx.rx_buf = data;
buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, buf->rx.dma_addr)) {
+ skb_free_frag(data);
+ buf->rx.rx_buf = NULL;
+ return false;
+ }
desc->data = (u32)buf->rx.dma_addr + offset;
return true;
}
@@ -1412,7 +1396,7 @@ static void ag71xx_hw_disable(struct ag71xx *ag)
ag71xx_dma_reset(ag);
napi_disable(&ag->napi);
- del_timer_sync(&ag->oom_timer);
+ timer_delete_sync(&ag->oom_timer);
ag71xx_rings_cleanup(ag);
}
@@ -1532,6 +1516,10 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, dma_addr)) {
+ netif_dbg(ag, tx_err, ndev, "DMA mapping error\n");
+ goto err_drop;
+ }
i = ring->curr & ring_mask;
desc = ag71xx_ring_desc(ring, i);
@@ -1584,7 +1572,7 @@ err_drop:
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
- struct ag71xx *ag = from_timer(ag, t, oom_timer);
+ struct ag71xx *ag = timer_container_of(ag, t, oom_timer);
napi_schedule(&ag->napi);
}
@@ -1619,8 +1607,8 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
int ring_mask, ring_size, done = 0;
unsigned int pktlen_mask, offset;
struct ag71xx_ring *ring;
- struct list_head rx_list;
struct sk_buff *skb;
+ LIST_HEAD(rx_list);
ring = &ag->rx_ring;
pktlen_mask = ag->dcfg->desc_pktlen_mask;
@@ -1631,13 +1619,10 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
limit, ring->curr, ring->dirty);
- INIT_LIST_HEAD(&rx_list);
-
while (done < limit) {
unsigned int i = ring->curr & ring_mask;
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
int pktlen;
- int err = 0;
if (ag71xx_desc_empty(desc))
break;
@@ -1660,6 +1645,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
+ ndev->stats.rx_errors++;
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
}
@@ -1667,14 +1653,10 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb_reserve(skb, offset);
skb_put(skb, pktlen);
- if (err) {
- ndev->stats.rx_dropped++;
- kfree_skb(skb);
- } else {
- skb->dev = ndev;
- skb->ip_summed = CHECKSUM_NONE;
- list_add_tail(&skb->list, &rx_list);
- }
+ skb->dev = ndev;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->protocol = eth_type_trans(skb, ndev);
+ list_add_tail(&skb->list, &rx_list);
next:
ring->buf[i].rx.rx_buf = NULL;
@@ -1685,8 +1667,6 @@ next:
ag71xx_ring_rx_refill(ag);
- list_for_each_entry(skb, &rx_list, list)
- skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb_list(&rx_list);
netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
@@ -1788,7 +1768,7 @@ static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ag71xx *ag = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
ag71xx_max_frame_len(ndev->mtu));
@@ -1799,7 +1779,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = ag71xx_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1816,6 +1796,7 @@ static int ag71xx_probe(struct platform_device *pdev)
const struct ag71xx_dcfg *dcfg;
struct net_device *ndev;
struct resource *res;
+ struct clk *clk_eth;
int tx_size, err, i;
struct ag71xx *ag;
@@ -1846,11 +1827,10 @@ static int ag71xx_probe(struct platform_device *pdev)
return -EINVAL;
}
- ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
- if (IS_ERR(ag->clk_eth)) {
- netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
- return PTR_ERR(ag->clk_eth);
- }
+ clk_eth = devm_clk_get_enabled(&pdev->dev, "eth");
+ if (IS_ERR(clk_eth))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_eth),
+ "Failed to get eth clk.");
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1861,14 +1841,19 @@ static int ag71xx_probe(struct platform_device *pdev)
memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
- if (IS_ERR(ag->mac_reset)) {
- netif_err(ag, probe, ndev, "missing mac reset\n");
- return PTR_ERR(ag->mac_reset);
- }
+ if (IS_ERR(ag->mac_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ag->mac_reset),
+ "missing mac reset");
- ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ag->mac_base)
- return -ENOMEM;
+ ag->mac_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ag->mac_base))
+ return PTR_ERR(ag->mac_base);
+
+ /* ensure that HW is in manual polling mode before interrupts are
+ * activated. Otherwise ag71xx_interrupt might call napi_schedule
+ * before it is initialized by netif_napi_add.
+ */
+ ag71xx_int_disable(ag, AG71XX_INT_POLL);
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
@@ -1912,6 +1897,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->next = (u32)ag->stop_desc_dma;
err = of_get_ethdev_address(np, ndev);
+ if (err == -EPROBE_DEFER)
+ return err;
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_hw_addr_random(ndev);
@@ -1926,33 +1913,23 @@ static int ag71xx_probe(struct platform_device *pdev)
netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
AG71XX_NAPI_WEIGHT);
- err = clk_prepare_enable(ag->clk_eth);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- return err;
- }
-
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
ag71xx_hw_init(ag);
err = ag71xx_mdio_probe(ag);
if (err)
- goto err_put_clk;
-
- platform_set_drvdata(pdev, ndev);
+ return err;
err = ag71xx_phylink_setup(ag);
- if (err) {
- netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
- goto err_mdio_remove;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to setup phylink");
- err = register_netdev(ndev);
+ err = devm_register_netdev(&pdev->dev, ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
- platform_set_drvdata(pdev, NULL);
- goto err_mdio_remove;
+ return err;
}
netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
@@ -1960,27 +1937,6 @@ static int ag71xx_probe(struct platform_device *pdev)
phy_modes(ag->phy_if_mode));
return 0;
-
-err_mdio_remove:
- ag71xx_mdio_remove(ag);
-err_put_clk:
- clk_disable_unprepare(ag->clk_eth);
- return err;
-}
-
-static void ag71xx_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct ag71xx *ag;
-
- if (!ndev)
- return;
-
- ag = netdev_priv(ndev);
- unregister_netdev(ndev);
- ag71xx_mdio_remove(ag);
- clk_disable_unprepare(ag->clk_eth);
- platform_set_drvdata(pdev, NULL);
}
static const u32 ar71xx_fifo_ar7100[] = {
@@ -2064,10 +2020,10 @@ static const struct of_device_id ag71xx_match[] = {
{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
{}
};
+MODULE_DEVICE_TABLE(of, ag71xx_match);
static struct platform_driver ag71xx_driver = {
.probe = ag71xx_probe,
- .remove_new = ag71xx_remove,
.driver = {
.name = "ag71xx",
.of_match_table = ag71xx_match,
@@ -2075,4 +2031,5 @@ static struct platform_driver ag71xx_driver = {
};
module_platform_driver(ag71xx_driver);
+MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 49bb9a8f00e6..ad6d6abd885f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -901,7 +901,7 @@ static int alx_init_intr(struct alx_priv *alx)
int ret;
ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ PCI_IRQ_MSI | PCI_IRQ_INTX);
if (ret < 0)
return ret;
@@ -1176,7 +1176,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 46cdc32b4e31..7efa3fc257b3 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -231,8 +231,8 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
*/
static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1c_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
@@ -357,7 +357,7 @@ static void atl1c_common_task(struct work_struct *work)
static void atl1c_del_timer(struct atl1c_adapter *adapter)
{
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
}
@@ -561,7 +561,7 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
atl1c_set_rxbufsize(adapter, netdev);
atl1c_down(adapter);
@@ -2688,7 +2688,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
- dev_set_threaded(netdev, true);
+ netif_threaded_enable(netdev);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
atl1c_clean_rx);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 5f2a6fcba967..40290028580b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -115,8 +115,8 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
*/
static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1e_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
@@ -232,7 +232,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
static void atl1e_del_timer(struct atl1e_adapter *adapter)
{
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
}
static void atl1e_cancel_work(struct atl1e_adapter *adapter)
@@ -428,7 +428,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
atl1e_down(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index a9014d7932db..98a4d089270e 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
break;
}
- buffer_info->alloced = 1;
- buffer_info->skb = skb;
- buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ kfree_skb(skb);
+ adapter->soft_stats.rx_dropped++;
+ break;
+ }
+
+ buffer_info->alloced = 1;
+ buffer_info->skb = skb;
+ buffer_info->length = (u16)adapter->rx_buffer_len;
+
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0;
@@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
return 0;
}
-static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
- struct tx_packet_desc *ptpd)
+static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tx_packet_desc *ptpd)
{
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
struct atl1_buffer *buffer_info;
@@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
unsigned int nr_frags;
unsigned int f;
int retval;
+ u16 first_mapped;
u16 next_to_use;
u16 data_len;
u8 hdr_len;
@@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buf_len -= skb->data_len;
nr_frags = skb_shinfo(skb)->nr_frags;
next_to_use = atomic_read(&tpd_ring->next_to_use);
+ first_mapped = next_to_use;
buffer_info = &tpd_ring->buffer_info[next_to_use];
BUG_ON(buffer_info->skb);
/* put skb in last TPD */
@@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
page, offset,
buffer_info->length,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, buf_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, i * ATL1_MAX_TX_BUF_LEN,
buffer_info->length, DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
/* last tpd's buffer-info */
buffer_info->skb = skb;
+
+ return true;
+
+ dma_err:
+ while (first_mapped != next_to_use) {
+ buffer_info = &tpd_ring->buffer_info[first_mapped];
+ dma_unmap_page(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+
+ if (++first_mapped == tpd_ring->count)
+ first_mapped = 0;
+ }
+ return false;
}
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
@@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
len = skb_headlen(skb);
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(skb->len <= 0))
+ goto drop_packet;
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
@@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
proto_hdr_len = skb_tcp_all_headers(skb);
- if (unlikely(proto_hdr_len > len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(proto_hdr_len > len))
+ goto drop_packet;
+
/* need additional TPD ? */
if (proto_hdr_len != len)
count += (len - proto_hdr_len +
@@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
}
tso = atl1_tso(adapter, skb, ptpd);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (tso < 0)
+ goto drop_packet;
if (!tso) {
ret_val = atl1_tx_csum(adapter, skb, ptpd);
- if (ret_val < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (ret_val < 0)
+ goto drop_packet;
}
- atl1_tx_map(adapter, skb, ptpd);
+ if (!atl1_tx_map(adapter, skb, ptpd))
+ goto drop_packet;
+
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->soft_stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
static int atl1_rings_clean(struct napi_struct *napi, int budget)
@@ -2556,8 +2591,8 @@ static irqreturn_t atl1_intr(int irq, void *data)
*/
static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2641,7 +2676,7 @@ static void atl1_down(struct atl1_adapter *adapter)
napi_disable(&adapter->napi);
netif_stop_queue(netdev);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
adapter->phy_timer_pending = false;
atlx_irq_disable(adapter);
@@ -2687,7 +2722,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = (max_frame + 7) & ~7;
adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
atl1_down(adapter);
atl1_up(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bcfc9488125b..280e2f5f4aa5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -752,8 +752,8 @@ static void atl2_down(struct atl2_adapter *adapter)
atl2_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
clear_bit(0, &adapter->cfg_phy);
netif_carrier_off(netdev);
@@ -905,7 +905,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
struct atl2_hw *hw = &adapter->hw;
/* set MTU */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
hw->max_frame_size = new_mtu;
ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN +
VLAN_HLEN + ETH_FCS_LEN);
@@ -1010,7 +1010,8 @@ static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
*/
static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
@@ -1035,8 +1036,8 @@ static void atl2_watchdog(struct timer_list *t)
*/
static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
@@ -1468,8 +1469,8 @@ static void atl2_remove(struct pci_dev *pdev)
* explicitly disable watchdog tasks from being rescheduled */
set_bit(__ATL2_DOWN, &adapter->flags);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 75ca3ddda1f5..666522d64775 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -25,6 +25,7 @@ config B44
select SSB
select MII
select PHYLIB
+ select FIXED_PHY if BCM47XX
help
If you have a network (Ethernet) controller of this type, say Y
or M here.
@@ -72,7 +73,6 @@ config BCMGENET
tristate "Broadcom GENET internal MAC support"
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835
- select MII
select PHYLIB
select FIXED_PHY
select BCM7XXX_PHY
@@ -97,7 +97,6 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
depends on PCI && (IPV6 || IPV6=n)
- depends on MMU
select BNX2
select UIO
help
@@ -124,6 +123,7 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select PHYLIB
help
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -144,7 +144,7 @@ config BNX2X
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select ZLIB_INFLATE
- select LIBCRC32C
+ select CRC32
select MDIO
help
This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
@@ -195,7 +195,6 @@ config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
depends on HAS_IOMEM
depends on NET_DSA || !NET_DSA
- select MII
select PHYLIB
select FIXED_PHY
select DIMLIB
@@ -209,7 +208,7 @@ config BNXT
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
- select LIBCRC32C
+ select CRC32
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
@@ -255,12 +254,21 @@ config BNXT_HWMON
Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
devices, via the hwmon sysfs interface.
+config BNGE
+ tristate "Broadcom Ethernet device support"
+ depends on PCI
+ select NET_DEVLINK
+ select PAGE_POOL
+ help
+ This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
+ The module will be called bng_en. To compile this driver as a module,
+ choose M here.
+
config BCMASP
tristate "Broadcom ASP 2.0 Ethernet support"
depends on ARCH_BRCMSTB || COMPILE_TEST
default ARCH_BRCMSTB
depends on OF
- select MII
select PHYLIB
select MDIO_BCM_UNIMAC
help
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index bac5cb6ad0cd..10cc1c92ecfc 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
obj-$(CONFIG_BCMASP) += asp2/
+obj-$(CONFIG_BNGE) += bnge/
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index a806dadc4196..fd35f4b4dc50 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -141,7 +141,7 @@ void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
return;
}
- rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
+ rx_ctrl_core_wl(priv, mask, ASP_RX_CTRL_FLUSH);
}
static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
@@ -518,7 +518,7 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
int ret, i;
/* Write all filters to HW */
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
/* If the filter does not match the port, skip programming. */
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
@@ -551,7 +551,7 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -577,7 +577,7 @@ int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
struct bcmasp_priv *priv = intf->parent;
int cnt = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -602,7 +602,7 @@ bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
size_t fs_size = 0;
int i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -670,7 +670,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
int i, open_index = -1;
/* Check whether we exceed the filter table capacity */
- if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
+ if (loc != RX_CLS_LOC_ANY && loc >= priv->num_net_filters)
return ERR_PTR(-EINVAL);
/* If the filter location is busy (already claimed) and we are initializing
@@ -686,7 +686,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
/* Initialize the loop index based on the desired location or from 0 */
i = loc == RX_CLS_LOC_ANY ? 0 : loc;
- for ( ; i < NUM_NET_FILTERS; i++) {
+ for ( ; i < priv->num_net_filters; i++) {
/* Found matching network filter */
if (!init &&
priv->net_filters[i].claimed &&
@@ -779,7 +779,7 @@ static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
priv->mda_filters[i].en = en;
priv->mda_filters[i].port = intf->port;
- rx_filter_core_wl(priv, ((intf->channel + 8) |
+ rx_filter_core_wl(priv, ((intf->channel + priv->tx_chan_offset) |
(en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
ASP_RX_FILTER_MDA_CFG(i));
@@ -865,7 +865,7 @@ void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
res_count = bcmasp_total_res_mda_cnt(intf->parent);
/* Disable all filters held by this port */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en &&
priv->mda_filters[i].port == intf->port)
bcmasp_en_mda_filter(intf, 0, i);
@@ -909,7 +909,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
res_count = bcmasp_total_res_mda_cnt(intf->parent);
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
/* If filter not enabled or belongs to another port skip */
if (!priv->mda_filters[i].en ||
priv->mda_filters[i].port != intf->port)
@@ -924,7 +924,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
}
/* Create new filter if possible */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en)
continue;
@@ -944,12 +944,12 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* Disable all filters and reset software view since the HW
* can lose context while in deep sleep suspend states
*/
- for (i = 0; i < NUM_MDA_FILTERS; i++) {
+ for (i = 0; i < priv->num_mda_filters; i++) {
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
priv->mda_filters[i].en = 0;
}
- for (i = 0; i < NUM_NET_FILTERS; i++)
+ for (i = 0; i < priv->num_net_filters; i++)
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
/* Top level filter enable bit should be enabled at all times, set
@@ -966,18 +966,8 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* ASP core initialization */
static void bcmasp_core_init(struct bcmasp_priv *priv)
{
- tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
- rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
-
- rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
- ASP_EDPKT_HDR_CFG);
- rx_edpkt_core_wl(priv,
- (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
- ASP_EDPKT_ENDI);
-
rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
- rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
@@ -1020,6 +1010,18 @@ static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
}
+static void bcmasp_core_clock_select_one_ctrl2(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+}
+
static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
{
u32 reg;
@@ -1108,7 +1110,7 @@ static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
return irq;
}
-static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
+static void bcmasp_init_wol(struct bcmasp_priv *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
@@ -1125,7 +1127,7 @@ static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
device_set_wakeup_capable(&pdev->dev, 1);
}
-static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en)
{
struct bcmasp_priv *priv = intf->parent;
struct device *dev = &priv->pdev->dev;
@@ -1154,54 +1156,12 @@ static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
}
}
-static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
+static void bcmasp_wol_irq_destroy(struct bcmasp_priv *priv)
{
if (priv->wol_irq > 0)
free_irq(priv->wol_irq, priv);
}
-static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
-{
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
- struct bcmasp_intf *intf;
- int irq;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
- if (irq < 0) {
- dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
- intf->port, irq);
- continue;
- }
-
- intf->wol_irq = irq;
- intf->wol_irq_enabled = false;
- device_set_wakeup_capable(&pdev->dev, 1);
- }
-}
-
-static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
-{
- struct device *dev = &intf->parent->pdev->dev;
-
- if (en ^ intf->wol_irq_enabled)
- irq_set_irq_wake(intf->wol_irq, en);
-
- intf->wol_irq_enabled = en;
- device_set_wakeup_enable(dev, en);
-}
-
-static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
-{
- struct bcmasp_intf *intf;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- if (intf->wol_irq > 0)
- free_irq(intf->wol_irq, priv);
- }
-}
-
static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
{
u32 reg, phy_lpi_overwrite;
@@ -1220,70 +1180,53 @@ static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
usleep_range(50, 100);
}
-static struct bcmasp_hw_info v20_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
- .umac2fb = UMAC2FB_OFFSET,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
- .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
-};
-
-static const struct bcmasp_plat_data v20_plat_data = {
- .init_wol = bcmasp_init_wol_per_intf,
- .enable_wol = bcmasp_enable_wol_per_intf,
- .destroy_wol = bcmasp_wol_irq_destroy_per_intf,
- .core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v20_hw_info,
-};
-
-static struct bcmasp_hw_info v21_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
- .umac2fb = UMAC2FB_OFFSET_2_1,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_filt_out_frame_count =
- ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
-};
-
static const struct bcmasp_plat_data v21_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v21_hw_info,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
};
static const struct bcmasp_plat_data v22_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_many,
- .hw_info = &v21_hw_info,
.eee_fixup = bcmasp_eee_fixup,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
+};
+
+static const struct bcmasp_plat_data v30_plat_data = {
+ .core_clock_select = bcmasp_core_clock_select_one_ctrl2,
+ .num_mda_filters = 20,
+ .num_net_filters = 16,
+ .tx_chan_offset = 0,
+ .rx_ctrl_offset = 0x10000,
};
static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
{
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
priv->core_clock_select = pdata->core_clock_select;
priv->eee_fixup = pdata->eee_fixup;
- priv->hw_info = pdata->hw_info;
+ priv->num_mda_filters = pdata->num_mda_filters;
+ priv->num_net_filters = pdata->num_net_filters;
+ priv->tx_chan_offset = pdata->tx_chan_offset;
+ priv->rx_ctrl_offset = pdata->rx_ctrl_offset;
}
static const struct of_device_id bcmasp_of_match[] = {
- { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
{ .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
+ { .compatible = "brcm,asp-v3.0", .data = &v30_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
- { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
- { .compatible = "brcm,asp-v2.0-mdio", },
+ { .compatible = "brcm,asp-v2.2-mdio", },
+ { .compatible = "brcm,asp-v3.0-mdio", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
@@ -1300,9 +1243,9 @@ static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
static int bcmasp_probe(struct platform_device *pdev)
{
- struct device_node *ports_node, *intf_node;
const struct bcmasp_plat_data *pdata;
struct device *dev = &pdev->dev;
+ struct device_node *ports_node;
struct bcmasp_priv *priv;
struct bcmasp_intf *intf;
int ret = 0, count = 0;
@@ -1365,6 +1308,17 @@ static int bcmasp_probe(struct platform_device *pdev)
* how many interfaces come up.
*/
bcmasp_core_init(priv);
+
+ priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters,
+ sizeof(*priv->mda_filters), GFP_KERNEL);
+ if (!priv->mda_filters)
+ return -ENOMEM;
+
+ priv->net_filters = devm_kcalloc(dev, priv->num_net_filters,
+ sizeof(*priv->net_filters), GFP_KERNEL);
+ if (!priv->net_filters)
+ return -ENOMEM;
+
bcmasp_core_init_filters(priv);
ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
@@ -1374,12 +1328,12 @@ static int bcmasp_probe(struct platform_device *pdev)
}
i = 0;
- for_each_available_child_of_node(ports_node, intf_node) {
+ for_each_available_child_of_node_scoped(ports_node, intf_node) {
intf = bcmasp_interface_create(priv, intf_node, i);
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
- of_node_put(intf_node);
+ ret = -ENOMEM;
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);
@@ -1387,7 +1341,7 @@ static int bcmasp_probe(struct platform_device *pdev)
}
/* Check and enable WoL */
- priv->init_wol(priv);
+ bcmasp_init_wol(priv);
/* Drop the clock reference count now and let ndo_open()/ndo_close()
* manage it for us from now on.
@@ -1404,7 +1358,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (ret) {
netdev_err(intf->ndev,
"failed to register net_device: %d\n", ret);
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
goto of_put_exit;
}
@@ -1425,7 +1379,7 @@ static void bcmasp_remove(struct platform_device *pdev)
if (!priv)
return;
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
}
@@ -1500,7 +1454,7 @@ static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
static struct platform_driver bcmasp_driver = {
.probe = bcmasp_probe,
- .remove_new = bcmasp_remove,
+ .remove = bcmasp_remove,
.shutdown = bcmasp_shutdown,
.driver = {
.name = "brcm,asp-v2",
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index f93cb3da44b0..74adfdb50e11 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -53,22 +53,15 @@
#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14
#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18
#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c
-/* asp2.1 diverges offsets here */
-/* ASP2.0 */
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24
-#define ASP_RX_CTRL_FLUSH 0x28
-#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
-#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
-#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30
-/* ASP2.1 */
-#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20
-#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c
-#define ASP_RX_CTRL_FLUSH_2_1 0x30
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38
+#define ASP_RX_CTRL_FB_9_FRAME_COUNT 0x20
+#define ASP_RX_CTRL_FB_10_FRAME_COUNT 0x24
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x28
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x2c
+#define ASP_RX_CTRL_FLUSH 0x30
+#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
+#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
+#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x38
#define ASP_RX_FILTER_OFFSET 0x80000
#define ASP_RX_FILTER_BLK_CTRL 0x0
@@ -345,11 +338,6 @@ struct bcmasp_intf {
u32 wolopts;
u8 sopass[SOPASS_MAX];
- /* Used if per intf wol irq */
- int wol_irq;
- unsigned int wol_irq_enabled:1;
-
- struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -372,21 +360,13 @@ struct bcmasp_mda_filter {
u8 mask[ETH_ALEN];
};
-struct bcmasp_hw_info {
- u32 rx_ctrl_flush;
- u32 umac2fb;
- u32 rx_ctrl_fb_out_frame_count;
- u32 rx_ctrl_fb_filt_out_frame_count;
- u32 rx_ctrl_fb_rx_fifo_depth;
-};
-
struct bcmasp_plat_data {
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
- struct bcmasp_hw_info *hw_info;
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
};
struct bcmasp_priv {
@@ -401,18 +381,18 @@ struct bcmasp_priv {
int wol_irq;
unsigned long wol_irq_enabled_mask;
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
void __iomem *base;
- struct bcmasp_hw_info *hw_info;
struct list_head intfs;
- struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS];
+ struct bcmasp_mda_filter *mda_filters;
/* MAC destination address filters lock */
spinlock_t mda_lock;
@@ -420,7 +400,7 @@ struct bcmasp_priv {
/* Protects accesses to ASP_CTRL_CLOCK_CTRL */
spinlock_t clk_lock;
- struct bcmasp_net_filter net_filters[NUM_NET_FILTERS];
+ struct bcmasp_net_filter *net_filters;
/* Network filter lock */
struct mutex net_lock;
@@ -510,8 +490,8 @@ BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg);
#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21)
#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19)
#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16)
-#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15)
-#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14)
+#define PKT_OFFLOAD_EPKT_CSUM_L4 BIT(15)
+#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(14)
#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12)
#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10)
#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8)
@@ -543,12 +523,27 @@ BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
-BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
+#define BCMASP_CORE_IO_MACRO_OFFSET(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + priv->name##_offset + \
+ (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + priv->name##_offset + \
+ (offset) + off); \
+}
+BCMASP_CORE_IO_MACRO_OFFSET(rx_ctrl, ASP_RX_CTRL_OFFSET);
+
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -601,5 +596,5 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 484fc2b5626f..dd80ccfca19d 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "bcmasp_ethtool: " fmt
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
@@ -10,7 +10,6 @@
#include "bcmasp_intf_defs.h"
enum bcmasp_stat_type {
- BCMASP_STAT_RX_EDPKT,
BCMASP_STAT_RX_CTRL,
BCMASP_STAT_RX_CTRL_PER_INTF,
BCMASP_STAT_SOFT,
@@ -33,8 +32,6 @@ struct bcmasp_stats {
.reg_offset = offset, \
}
-#define STAT_BCMASP_RX_EDPKT(str, offset) \
- STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset)
#define STAT_BCMASP_RX_CTRL(str, offset) \
STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset)
#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \
@@ -42,11 +39,6 @@ struct bcmasp_stats {
/* Must match the order of struct bcmasp_mib_counters */
static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
- /* EDPKT counters */
- STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER),
- STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT),
/* ASP RX control */
STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac",
ASP_RX_CTRL_UMAC_0_FRAME_COUNT),
@@ -71,23 +63,6 @@ static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats)
-static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf,
- const struct bcmasp_stats *s)
-{
- struct bcmasp_priv *priv = intf->parent;
-
- if (!strcmp("Frames Out(Buffer)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_out_frame_count;
-
- if (!strcmp("Frames Out(Filters)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_filt_out_frame_count;
-
- if (!strcmp("RX Buffer FIFO Depth", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_rx_fifo_depth;
-
- return s->reg_offset;
-}
-
static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
@@ -101,14 +76,14 @@ static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
static void bcmasp_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
unsigned int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMASP_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmasp_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmasp_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
default:
@@ -126,13 +101,10 @@ static void bcmasp_update_mib_counters(struct bcmasp_intf *intf)
char *p;
s = &bcmasp_gstrings_stats[i];
- offset = bcmasp_stat_fixup_offset(intf, s);
+ offset = s->reg_offset;
switch (s->type) {
case BCMASP_STAT_SOFT:
continue;
- case BCMASP_STAT_RX_EDPKT:
- val = rx_edpkt_core_rl(intf->parent, offset);
- break;
case BCMASP_STAT_RX_CTRL:
val = rx_ctrl_core_rl(intf->parent, offset);
break;
@@ -191,11 +163,30 @@ static void bcmasp_set_msglevel(struct net_device *dev, u32 level)
static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
+
+ if (dev->phydev) {
+ phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
+
+ /* MAC is not wake-up capable, return what the PHY does */
+ if (!device_can_wakeup(kdev))
+ return;
+
+ /* Overlay MAC capabilities with that of the PHY queried before */
+ wol->supported |= BCMASP_SUPPORTED_WAKE;
+ wol->wolopts |= intf->wolopts;
+
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
- wol->supported = BCMASP_SUPPORTED_WAKE;
- wol->wolopts = intf->wolopts;
memset(wol->sopass, 0, sizeof(wol->sopass));
+ /* Otherwise the MAC one */
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass));
}
@@ -205,17 +196,28 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct bcmasp_intf *intf = netdev_priv(dev);
struct bcmasp_priv *priv = intf->parent;
struct device *kdev = &priv->pdev->dev;
+ int ret = 0;
+
+ /* Try Wake-on-LAN from the PHY first */
+ if (dev->phydev) {
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (ret != -EOPNOTSUPP && wol->wolopts)
+ return ret;
+ }
if (!device_can_wakeup(kdev))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~BCMASP_SUPPORTED_WAKE)
+ return -EINVAL;
+
/* Interface Specific */
intf->wolopts = wol->wolopts;
if (intf->wolopts & WAKE_MAGICSECURE)
memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass));
mutex_lock(&priv->wol_lock);
- priv->enable_wol(intf, !!intf->wolopts);
+ bcmasp_enable_wol(intf, !!intf->wolopts);
mutex_unlock(&priv->wol_lock);
return 0;
@@ -289,7 +291,7 @@ static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
return 0;
}
@@ -336,7 +338,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLALL:
err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
break;
default:
err = -EOPNOTSUPP;
@@ -348,58 +350,19 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
-{
- u32 reg;
-
- reg = umac_rl(intf, UMC_EEE_CTRL);
- if (enable)
- reg |= EEE_EN;
- else
- reg &= ~EEE_EN;
- umac_wl(intf, reg, UMC_EEE_CTRL);
-
- intf->eee.eee_enabled = enable;
-}
-
static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
-
if (!dev->phydev)
return -ENODEV;
- e->tx_lpi_enabled = p->tx_lpi_enabled;
- e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
-
return phy_ethtool_get_eee(dev->phydev, e);
}
static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
- int ret;
-
if (!dev->phydev)
return -ENODEV;
- if (!p->eee_enabled) {
- bcmasp_eee_enable_set(intf, false);
- } else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(intf, hw, dev,
- "EEE initialization failed: %d\n", ret);
- return ret;
- }
-
- umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
- bcmasp_eee_enable_set(intf, true);
- }
-
return phy_ethtool_set_eee(dev->phydev, e);
}
@@ -496,4 +459,6 @@ const struct ethtool_ops bcmasp_ethtool_ops = {
.get_strings = bcmasp_get_strings,
.get_ethtool_stats = bcmasp_get_ethtool_stats,
.get_sset_count = bcmasp_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .nway_reset = phy_ethtool_nway_reset,
};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 72ea97c5d5d4..b9973956c480 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -180,14 +180,14 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
case htons(ETH_P_IP):
header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(0);
ip_proto = ip_hdr(skb)->protocol;
header_cnt += 2;
break;
case htons(ETH_P_IPV6):
header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(1);
ip_proto = ipv6_hdr(skb)->nexthdr;
header_cnt += 2;
break;
@@ -198,12 +198,12 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
switch (ip_proto) {
case IPPROTO_TCP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
- epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
case IPPROTO_UDP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
- epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
default:
@@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Rewind so we do not have a hole */
spb_index = intf->tx_spb_index;
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -364,6 +365,9 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
intf->tx_spb_index = spb_index;
intf->tx_spb_dma_valid = valid;
+
+ skb_tx_timestamp(skb);
+
bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
@@ -436,10 +440,8 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
}
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
- struct bcmasp_intf *intf =
- container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@@ -482,10 +484,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
- /* Ensure all descriptors have been written to DRAM for the hardware
- * to see updated contents.
- */
- wmb();
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@@ -597,10 +605,8 @@ next:
bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
- if (processed < budget) {
- napi_complete_done(&intf->rx_napi, processed);
+ if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
bcmasp_enable_rx_irq(intf, 1);
- }
return processed;
}
@@ -611,7 +617,6 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
- bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -669,8 +674,13 @@ static void bcmasp_adj_link(struct net_device *dev)
}
umac_wl(intf, reg, UMC_CMD);
- active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, active);
+ umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (phydev->enable_tx_lpi)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -797,6 +807,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
@@ -805,9 +816,10 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
/* Tx SPB */
tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
TX_SPB_CTRL_XF_CTRL2);
- tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
+
+ if (intf->parent->tx_chan_offset)
+ tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
- tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
@@ -885,6 +897,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+ bcmasp_tx_reclaim(intf);
+
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);
@@ -1044,6 +1058,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
+
+ /* Set phylib's copy of the LPI timer */
+ phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
}
umac_reset(intf);
@@ -1167,7 +1184,7 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
{
/* Per port */
intf->res.umac = priv->base + UMC_OFFSET(intf);
- intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
+ intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset +
(intf->port * 0x4));
intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
@@ -1182,7 +1199,6 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
}
-#define MAX_IRQ_STR_LEN 64
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i)
{
@@ -1266,6 +1282,8 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
ndev->hw_features |= ndev->features;
ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
+ netdev_sw_irq_coalesce_default_on(ndev);
+
return intf;
err_free_netdev:
@@ -1320,7 +1338,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, true);
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
@@ -1362,7 +1381,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, false);
reg = umac_rl(intf, UMC_MPD_CTRL);
@@ -1393,9 +1413,6 @@ int bcmasp_interface_resume(struct bcmasp_intf *intf)
bcmasp_resume_from_wol(intf);
- if (intf->eee.eee_enabled)
- bcmasp_eee_enable_set(intf, true);
-
netif_device_attach(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
index ad742612895f..af7418348e81 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
@@ -118,8 +118,7 @@
#define UMC_PSW_MS 0x624
#define UMC_PSW_LS 0x628
-#define UMAC2FB_OFFSET_2_1 0x9f044
-#define UMAC2FB_OFFSET 0x9f03c
+#define UMAC2FB_OFFSET 0x9f044
#define UMAC2FB_CFG 0x0
#define UMAC2FB_CFG_OPUT_EN BIT(0)
#define UMAC2FB_CFG_VLAN_EN BIT(1)
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3e4fb3c3e834..888f28f11406 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -31,6 +31,7 @@
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -575,7 +576,7 @@ static void b44_check_phy(struct b44 *bp)
static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = from_timer(bp, t, timer);
+ struct b44 *bp = timer_container_of(bp, t, timer);
spin_lock_irq(&bp->lock);
@@ -1042,13 +1043,13 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu)
/* We'll just catch it later when the
* device is up'd.
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
spin_lock_irq(&bp->lock);
b44_halt(bp);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
@@ -1628,7 +1629,7 @@ static int b44_close(struct net_device *dev)
napi_disable(&bp->napi);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
spin_lock_irq(&bp->lock);
@@ -2009,12 +2010,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
@@ -2231,7 +2234,6 @@ static int b44_register_phy_one(struct b44 *bp)
struct mii_bus *mii_bus;
struct ssb_device *sdev = bp->sdev;
struct phy_device *phydev;
- char bus_id[MII_BUS_ID_SIZE + 3];
struct ssb_sprom *sprom = &sdev->bus->sprom;
int err;
@@ -2258,27 +2260,26 @@ static int b44_register_phy_one(struct b44 *bp)
goto err_out_mdiobus;
}
- if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
- (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
-
+ phydev = mdiobus_get_phy(bp->mii_bus, bp->phy_addr);
+ if (!phydev &&
+ sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM)) {
dev_info(sdev->dev,
"could not find PHY at %i, use fixed one\n",
bp->phy_addr);
- bp->phy_addr = 0;
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
- bp->phy_addr);
- } else {
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
- bp->phy_addr);
+ phydev = fixed_phy_register_100fd();
+ if (!IS_ERR(phydev))
+ bp->phy_addr = phydev->mdio.addr;
}
- phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phydev)) {
+ if (IS_ERR_OR_NULL(phydev))
+ err = -ENODEV;
+ else
+ err = phy_connect_direct(bp->dev, phydev, &b44_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (err) {
dev_err(sdev->dev, "could not attach PHY at %i\n",
bp->phy_addr);
- err = PTR_ERR(phydev);
goto err_out_mdiobus_unregister;
}
@@ -2291,7 +2292,6 @@ static int b44_register_phy_one(struct b44 *bp)
linkmode_copy(phydev->advertising, phydev->supported);
bp->old_link = 0;
- bp->phy_addr = phydev->mdio.addr;
phy_attached_info(phydev);
@@ -2309,10 +2309,15 @@ err_out:
static void b44_unregister_phy_one(struct b44 *bp)
{
- struct net_device *dev = bp->dev;
struct mii_bus *mii_bus = bp->mii_bus;
+ struct net_device *dev = bp->dev;
+ struct phy_device *phydev;
+
+ phydev = dev->phydev;
- phy_disconnect(dev->phydev);
+ phy_disconnect(phydev);
+ if (phy_is_pseudo_fixed_link(phydev))
+ fixed_phy_unregister(phydev);
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
}
@@ -2471,7 +2476,7 @@ static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
spin_lock_irq(&bp->lock);
@@ -2568,7 +2573,7 @@ static int __init b44_init(void)
unsigned int dma_desc_align_size = dma_get_cache_alignment();
int err;
- /* Setup paramaters for syncing RX/TX DMA descriptors */
+ /* Setup parameters for syncing RX/TX DMA descriptors */
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
err = b44_pci_init();
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 72df1bb10172..203e8d0dd04b 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -789,7 +789,7 @@ static struct platform_driver bcm4908_enet_driver = {
.of_match_table = bcm4908_enet_of_match,
},
.probe = bcm4908_enet_probe,
- .remove_new = bcm4908_enet_remove,
+ .remove = bcm4908_enet_remove,
};
module_platform_driver(bcm4908_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3196c4dea076..92204fea1f08 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -286,7 +286,7 @@ static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
*/
static void bcm_enet_refill_rx_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, rx_timeout);
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
@@ -1195,7 +1195,7 @@ static int bcm_enet_stop(struct net_device *dev)
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
- del_timer_sync(&priv->rx_timeout);
+ timer_delete_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_writel(priv, 0, ENET_IRMASK_REG);
@@ -1339,14 +1339,14 @@ static int bcm_enet_get_sset_count(struct net_device *netdev,
static void bcm_enet_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -1652,7 +1652,7 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1936,7 +1936,7 @@ static void bcm_enet_remove(struct platform_device *pdev)
static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
- .remove_new = bcm_enet_remove,
+ .remove = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
},
@@ -2001,7 +2001,7 @@ static inline int bcm_enet_port_is_rgmii(int portid)
*/
static void swphy_poll_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, swphy_poll);
unsigned int i;
for (i = 0; i < priv->num_ports; i++) {
@@ -2346,10 +2346,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
- del_timer_sync(&priv->swphy_poll);
+ timer_delete_sync(&priv->swphy_poll);
netif_stop_queue(dev);
napi_disable(&priv->napi);
- del_timer_sync(&priv->rx_timeout);
+ timer_delete_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
@@ -2503,14 +2503,14 @@ static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
static void bcm_enetsw_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enetsw_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enetsw_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -2755,7 +2755,7 @@ static void bcm_enetsw_remove(struct platform_device *pdev)
static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
- .remove_new = bcm_enetsw_remove,
+ .remove = bcm_enetsw_remove,
.driver = {
.name = "bcm63xx_enetsw",
},
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c9faa8540859..bc4e1f3b3752 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -27,30 +27,6 @@
#include "bcmsysport.h"
-/* I/O accessors register helpers */
-#define BCM_SYSPORT_IO_MACRO(name, offset) \
-static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
-{ \
- u32 reg = readl_relaxed(priv->base + offset + off); \
- return reg; \
-} \
-static inline void name##_writel(struct bcm_sysport_priv *priv, \
- u32 val, u32 off) \
-{ \
- writel_relaxed(val, priv->base + offset + off); \
-} \
-
-BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
-BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
-BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
-BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
-BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
-BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
-BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
-BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
-BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
-BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
-
/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
* same layout, except it has been moved by 4 bytes up, *sigh*
*/
@@ -370,32 +346,22 @@ static void bcm_sysport_get_strings(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
const struct bcm_sysport_stats *s;
- char buf[128];
- int i, j;
+ int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
s = &bcm_sysport_gstrings_stats[i];
if (priv->is_lite &&
!bcm_sysport_lite_stat_valid(s->type))
continue;
- memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&data, s->stat_string);
}
for (i = 0; i < dev->num_tx_queues; i++) {
- snprintf(buf, sizeof(buf), "txq%d_packets", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
-
- snprintf(buf, sizeof(buf), "txq%d_bytes", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_sprintf(&data, "txq%d_packets", i);
+ ethtool_sprintf(&data, "txq%d_bytes", i);
}
break;
default:
@@ -1053,7 +1019,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
if (priv->dim.use_dim) {
dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
priv->dim.bytes, &dim_sample);
- net_dim(&priv->dim.dim, dim_sample);
+ net_dim(&priv->dim.dim, &dim_sample);
}
return work_done;
@@ -1359,6 +1325,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
skb->data, skb_len);
ret = NETDEV_TX_OK;
+ dev_kfree_skb_any(skb);
goto out;
}
@@ -1966,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
/* Reset UniMAC */
umac_reset(priv);
@@ -2624,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable priv clock\n");
+ goto err_deregister_netdev;
+ }
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@@ -2638,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
+err_deregister_netdev:
+ unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@@ -2807,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
+
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);
@@ -2899,7 +2881,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
- .remove_new = bcm_sysport_remove,
+ .remove = bcm_sysport_remove,
.driver = {
.name = "brcm-systemport",
.of_match_table = bcm_sysport_of_match,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 335cf6631db5..a34296f989f1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -773,4 +773,27 @@ struct bcm_sysport_priv {
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
};
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + (offset) + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + (offset) + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 77425c7a32db..4e266ce41180 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac)
static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device_node *phy_node;
struct bgmac *bgmac;
struct resource *regs;
int ret;
@@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
- if (of_parse_phandle(np, "phy-handle", 0)) {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ of_node_put(phy_node);
bgmac->phy_connect = platform_phy_connect;
} else {
bgmac->phy_connect = bgmac_phy_connect_direct;
@@ -294,7 +297,7 @@ static struct platform_driver bgmac_enet_driver = {
.pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
- .remove_new = bgmac_remove,
+ .remove = bgmac_remove,
};
module_platform_driver(bgmac_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 6ffdc4229407..3e9c57196a39 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1367,8 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strscpy(data + i * ETH_GSTRING_LEN,
- bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, bgmac_get_strings_stats[i].name);
}
static void bgmac_get_ethtool_stats(struct net_device *dev,
@@ -1447,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phy_dev = fixed_phy_register(&fphy_status, NULL);
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return PTR_ERR(phy_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index d73ef262991d..6fee9a41839c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -328,8 +328,7 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-/* Jumbo frame size with FCS */
-#define BGMAC_RX_MAX_FRAME_SIZE 9724
+#define BGMAC_RX_MAX_FRAME_SIZE 1536
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/drivers/net/ethernet/broadcom/bnge/Makefile b/drivers/net/ethernet/broadcom/bnge/Makefile
new file mode 100644
index 000000000000..ea6596854e5c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_BNGE) += bng_en.o
+
+bng_en-y := bnge_core.o \
+ bnge_devlink.o \
+ bnge_hwrm.o \
+ bnge_hwrm_lib.o \
+ bnge_rmem.o \
+ bnge_resc.o \
+ bnge_netdev.o \
+ bnge_ethtool.o \
+ bnge_auxr.o
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
new file mode 100644
index 000000000000..411744894349
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -0,0 +1,255 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_H_
+#define _BNGE_H_
+
+#define DRV_NAME "bng_en"
+#define DRV_SUMMARY "Broadcom 800G Ethernet Linux Driver"
+
+#include <linux/etherdevice.h>
+#include <linux/bnxt/hsi.h>
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+#include "bnge_auxr.h"
+
+#define DRV_VER_MAJ 1
+#define DRV_VER_MIN 15
+#define DRV_VER_UPD 1
+
+extern char bnge_driver_name[];
+
+enum board_idx {
+ BCM57708,
+};
+
+struct bnge_auxr_priv {
+ struct auxiliary_device aux_dev;
+ struct bnge_auxr_dev *auxr_dev;
+ int id;
+};
+
+struct bnge_pf_info {
+ u16 fw_fid;
+ u16 port_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+enum {
+ BNGE_FW_CAP_SHORT_CMD = BIT_ULL(0),
+ BNGE_FW_CAP_LLDP_AGENT = BIT_ULL(1),
+ BNGE_FW_CAP_DCBX_AGENT = BIT_ULL(2),
+ BNGE_FW_CAP_IF_CHANGE = BIT_ULL(3),
+ BNGE_FW_CAP_KONG_MB_CHNL = BIT_ULL(4),
+ BNGE_FW_CAP_ERROR_RECOVERY = BIT_ULL(5),
+ BNGE_FW_CAP_PKG_VER = BIT_ULL(6),
+ BNGE_FW_CAP_CFA_ADV_FLOW = BIT_ULL(7),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 = BIT_ULL(8),
+ BNGE_FW_CAP_PCIE_STATS_SUPPORTED = BIT_ULL(9),
+ BNGE_FW_CAP_EXT_STATS_SUPPORTED = BIT_ULL(10),
+ BNGE_FW_CAP_ERR_RECOVER_RELOAD = BIT_ULL(11),
+ BNGE_FW_CAP_HOT_RESET = BIT_ULL(12),
+ BNGE_FW_CAP_RX_ALL_PKT_TS = BIT_ULL(13),
+ BNGE_FW_CAP_VLAN_RX_STRIP = BIT_ULL(14),
+ BNGE_FW_CAP_VLAN_TX_INSERT = BIT_ULL(15),
+ BNGE_FW_CAP_EXT_HW_STATS_SUPPORTED = BIT_ULL(16),
+ BNGE_FW_CAP_LIVEPATCH = BIT_ULL(17),
+ BNGE_FW_CAP_HOT_RESET_IF = BIT_ULL(18),
+ BNGE_FW_CAP_RING_MONITOR = BIT_ULL(19),
+ BNGE_FW_CAP_DBG_QCAPS = BIT_ULL(20),
+ BNGE_FW_CAP_THRESHOLD_TEMP_SUPPORTED = BIT_ULL(21),
+ BNGE_FW_CAP_DFLT_VLAN_TPID_PCP = BIT_ULL(22),
+ BNGE_FW_CAP_VNIC_TUNNEL_TPA = BIT_ULL(23),
+ BNGE_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO = BIT_ULL(24),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 = BIT_ULL(25),
+ BNGE_FW_CAP_VNIC_RE_FLUSH = BIT_ULL(26),
+};
+
+enum {
+ BNGE_EN_ROCE_V1 = BIT_ULL(0),
+ BNGE_EN_ROCE_V2 = BIT_ULL(1),
+ BNGE_EN_STRIP_VLAN = BIT_ULL(2),
+ BNGE_EN_SHARED_CHNL = BIT_ULL(3),
+ BNGE_EN_UDP_GSO_SUPP = BIT_ULL(4),
+};
+
+#define BNGE_EN_ROCE (BNGE_EN_ROCE_V1 | BNGE_EN_ROCE_V2)
+
+enum {
+ BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA = BIT(0),
+ BNGE_RSS_CAP_UDP_RSS_CAP = BIT(1),
+ BNGE_RSS_CAP_NEW_RSS_CAP = BIT(2),
+ BNGE_RSS_CAP_RSS_TCAM = BIT(3),
+ BNGE_RSS_CAP_AH_V4_RSS_CAP = BIT(4),
+ BNGE_RSS_CAP_AH_V6_RSS_CAP = BIT(5),
+ BNGE_RSS_CAP_ESP_V4_RSS_CAP = BIT(6),
+ BNGE_RSS_CAP_ESP_V6_RSS_CAP = BIT(7),
+};
+
+#define BNGE_MAX_QUEUE 8
+struct bnge_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnge_dev {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ u64 dsn;
+#define BNGE_VPD_FLD_LEN 32
+ char board_partno[BNGE_VPD_FLD_LEN];
+ char board_serialno[BNGE_VPD_FLD_LEN];
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+
+ u16 chip_num;
+ u8 chip_rev;
+
+#if BITS_PER_LONG == 32
+ /* ensure atomic 64-bit doorbell writes on 32-bit systems. */
+ spinlock_t db_lock;
+#endif
+ int db_offset; /* db_offset within db_size */
+ int db_size;
+
+ /* HWRM members */
+ u16 hwrm_cmd_seq;
+ u16 hwrm_cmd_kong_seq;
+ struct dma_pool *hwrm_dma_pool;
+ struct hlist_head hwrm_pending_list;
+ u16 hwrm_max_req_len;
+ u16 hwrm_max_ext_req_len;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+ char fw_ver_str[FW_VER_STR_LEN];
+ char hwrm_ver_supp[FW_VER_STR_LEN];
+ char nvm_cfg_ver[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNGE_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+
+ struct bnge_pf_info pf;
+
+ unsigned long state;
+#define BNGE_STATE_DRV_REGISTERED 0
+#define BNGE_STATE_OPEN 1
+
+ u64 fw_cap;
+
+ /* Backing stores */
+ struct bnge_ctx_mem_info *ctx;
+
+ u64 flags;
+
+ struct bnge_hw_resc hw_resc;
+
+ u16 tso_max_segs;
+
+ int max_fltr;
+#define BNGE_L2_FLTR_MAX_FLTR 1024
+
+ u32 *rss_indir_tbl;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+#define BNGE_MAX_RSS_TABLE_ENTRIES \
+ (BNGE_RSS_TABLE_ENTRIES * BNGE_RSS_TABLE_MAX_TBL)
+ u16 rss_indir_tbl_entries;
+
+ u32 rss_cap;
+ u32 rss_hash_cfg;
+
+ u16 rx_nr_rings;
+ u16 tx_nr_rings;
+ u16 tx_nr_rings_per_tc;
+ /* Number of NQs */
+ u16 nq_nr_rings;
+
+ /* Aux device resources */
+ u16 aux_num_msix;
+ u16 aux_num_stat_ctxs;
+
+ u16 max_mtu;
+#define BNGE_MAX_MTU 9500
+
+ u16 hw_ring_stats_size;
+#define BNGE_NUM_RX_RING_STATS 8
+#define BNGE_NUM_TX_RING_STATS 8
+#define BNGE_NUM_TPA_RING_STATS 6
+#define BNGE_RING_STATS_SIZE \
+ ((BNGE_NUM_RX_RING_STATS + BNGE_NUM_TX_RING_STATS + \
+ BNGE_NUM_TPA_RING_STATS) * 8)
+
+ u16 max_tpa_v2;
+#define BNGE_SUPPORTS_TPA(bd) ((bd)->max_tpa_v2)
+
+ u8 num_tc;
+ u8 max_tc;
+ u8 max_lltc; /* lossless TCs */
+ struct bnge_queue_info q_info[BNGE_MAX_QUEUE];
+ u8 tc_to_qidx[BNGE_MAX_QUEUE];
+ u8 q_ids[BNGE_MAX_QUEUE];
+ u8 max_q;
+ u8 port_count;
+
+ struct bnge_irq *irq_tbl;
+ u16 irqs_acquired;
+
+ struct bnge_auxr_priv *aux_priv;
+ struct bnge_auxr_dev *auxr_dev;
+};
+
+static inline bool bnge_is_roce_en(struct bnge_dev *bd)
+{
+ return bd->flags & BNGE_EN_ROCE;
+}
+
+static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
+{
+ if (bd->netdev) {
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA ||
+ bn->priv_flags & BNGE_NET_EN_JUMBO)
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static inline void bnge_writeq(struct bnge_dev *bd, u64 val,
+ void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bd->db_lock);
+ lo_hi_writeq(val, addr);
+ spin_unlock(&bd->db_lock);
+#else
+ writeq(val, addr);
+#endif
+}
+
+/* For TX and RX ring doorbells */
+static inline void bnge_db_write(struct bnge_dev *bd, struct bnge_db_info *db,
+ u32 idx)
+{
+ bnge_writeq(bd, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd);
+u16 bnge_aux_get_msix(struct bnge_dev *bd);
+
+#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
new file mode 100644
index 000000000000..d64592b64e17
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <asm/byteorder.h>
+#include <linux/bitmap.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_auxr.h"
+
+static DEFINE_IDA(bnge_aux_dev_ids);
+
+static void bnge_fill_msix_vecs(struct bnge_dev *bd,
+ struct bnge_msix_info *info)
+{
+ struct bnge_auxr_dev *auxr_dev = bd->auxr_dev;
+ int num_msix, i;
+
+ if (!auxr_dev->auxr_info->msix_requested) {
+ dev_warn(bd->dev, "Requested MSI-X vectors not allocated\n");
+ return;
+ }
+ num_msix = auxr_dev->auxr_info->msix_requested;
+ for (i = 0; i < num_msix; i++) {
+ info[i].vector = bd->irq_tbl[i].vector;
+ info[i].db_offset = bd->db_offset;
+ info[i].ring_idx = i;
+ }
+}
+
+int bnge_register_dev(struct bnge_auxr_dev *auxr_dev,
+ void *handle)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct bnge_auxr_info *auxr_info;
+ int rc = 0;
+
+ netdev_lock(bd->netdev);
+ mutex_lock(&auxr_dev->auxr_dev_lock);
+ if (!bd->irq_tbl) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ if (!bnge_aux_has_enough_resources(bd)) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ auxr_info = auxr_dev->auxr_info;
+ auxr_info->handle = handle;
+
+ auxr_info->msix_requested = bd->aux_num_msix;
+
+ bnge_fill_msix_vecs(bd, bd->auxr_dev->msix_info);
+ auxr_dev->flags |= BNGE_ARDEV_MSIX_ALLOC;
+
+exit:
+ mutex_unlock(&auxr_dev->auxr_dev_lock);
+ netdev_unlock(bd->netdev);
+ return rc;
+}
+EXPORT_SYMBOL(bnge_register_dev);
+
+void bnge_unregister_dev(struct bnge_auxr_dev *auxr_dev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct bnge_auxr_info *auxr_info;
+
+ auxr_info = auxr_dev->auxr_info;
+ netdev_lock(bd->netdev);
+ mutex_lock(&auxr_dev->auxr_dev_lock);
+ if (auxr_info->msix_requested)
+ auxr_dev->flags &= ~BNGE_ARDEV_MSIX_ALLOC;
+ auxr_info->msix_requested = 0;
+
+ mutex_unlock(&auxr_dev->auxr_dev_lock);
+ netdev_unlock(bd->netdev);
+}
+EXPORT_SYMBOL(bnge_unregister_dev);
+
+int bnge_send_msg(struct bnge_auxr_dev *auxr_dev, struct bnge_fw_msg *fw_msg)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct output *resp;
+ struct input *req;
+ u32 resp_len;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, 0 /* don't care */);
+ if (rc)
+ return rc;
+
+ rc = bnge_hwrm_req_replace(bd, req, fw_msg->msg, fw_msg->msg_len);
+ if (rc)
+ goto drop_req;
+
+ bnge_hwrm_req_timeout(bd, req, fw_msg->timeout);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ resp_len = le16_to_cpu(resp->resp_len);
+ if (resp_len) {
+ if (fw_msg->resp_max_len < resp_len)
+ resp_len = fw_msg->resp_max_len;
+
+ memcpy(fw_msg->resp, resp, resp_len);
+ }
+drop_req:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+EXPORT_SYMBOL(bnge_send_msg);
+
+void bnge_rdma_aux_device_uninit(struct bnge_dev *bd)
+{
+ struct bnge_auxr_priv *aux_priv;
+ struct auxiliary_device *adev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!bd->aux_priv)
+ return;
+
+ aux_priv = bd->aux_priv;
+ adev = &aux_priv->aux_dev;
+ auxiliary_device_uninit(adev);
+}
+
+static void bnge_aux_dev_release(struct device *dev)
+{
+ struct bnge_auxr_priv *aux_priv =
+ container_of(dev, struct bnge_auxr_priv, aux_dev.dev);
+ struct bnge_dev *bd = pci_get_drvdata(aux_priv->auxr_dev->pdev);
+
+ ida_free(&bnge_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv->auxr_dev->auxr_info);
+ bd->auxr_dev = NULL;
+ kfree(aux_priv->auxr_dev);
+ kfree(aux_priv);
+ bd->aux_priv = NULL;
+}
+
+void bnge_rdma_aux_device_del(struct bnge_dev *bd)
+{
+ if (!bd->auxr_dev)
+ return;
+
+ auxiliary_device_delete(&bd->aux_priv->aux_dev);
+}
+
+static void bnge_set_auxr_dev_info(struct bnge_auxr_dev *auxr_dev,
+ struct bnge_dev *bd)
+{
+ auxr_dev->pdev = bd->pdev;
+ auxr_dev->l2_db_size = bd->db_size;
+ auxr_dev->l2_db_size_nc = bd->db_size;
+ auxr_dev->l2_db_offset = bd->db_offset;
+ mutex_init(&auxr_dev->auxr_dev_lock);
+
+ if (bd->flags & BNGE_EN_ROCE_V1)
+ auxr_dev->flags |= BNGE_ARDEV_ROCEV1_SUPP;
+ if (bd->flags & BNGE_EN_ROCE_V2)
+ auxr_dev->flags |= BNGE_ARDEV_ROCEV2_SUPP;
+
+ auxr_dev->chip_num = bd->chip_num;
+ auxr_dev->hw_ring_stats_size = bd->hw_ring_stats_size;
+ auxr_dev->pf_port_id = bd->pf.port_id;
+ auxr_dev->en_state = bd->state;
+ auxr_dev->bar0 = bd->bar0;
+}
+
+void bnge_rdma_aux_device_add(struct bnge_dev *bd)
+{
+ struct auxiliary_device *aux_dev;
+ int rc;
+
+ if (!bd->auxr_dev)
+ return;
+
+ aux_dev = &bd->aux_priv->aux_dev;
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ dev_warn(bd->dev, "Failed to add auxiliary device for ROCE\n");
+ auxiliary_device_uninit(aux_dev);
+ bd->flags &= ~BNGE_EN_ROCE;
+ }
+
+ bd->auxr_dev->net = bd->netdev;
+}
+
+void bnge_rdma_aux_device_init(struct bnge_dev *bd)
+{
+ struct auxiliary_device *aux_dev;
+ struct bnge_auxr_info *auxr_info;
+ struct bnge_auxr_priv *aux_priv;
+ struct bnge_auxr_dev *auxr_dev;
+ int rc;
+
+ if (!bnge_is_roce_en(bd))
+ return;
+
+ aux_priv = kzalloc(sizeof(*aux_priv), GFP_KERNEL);
+ if (!aux_priv)
+ goto exit;
+
+ aux_priv->id = ida_alloc(&bnge_aux_dev_ids, GFP_KERNEL);
+ if (aux_priv->id < 0) {
+ dev_warn(bd->dev, "ida alloc failed for aux device\n");
+ kfree(aux_priv);
+ goto exit;
+ }
+
+ aux_dev = &aux_priv->aux_dev;
+ aux_dev->id = aux_priv->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bd->pdev->dev;
+ aux_dev->dev.release = bnge_aux_dev_release;
+
+ rc = auxiliary_device_init(aux_dev);
+ if (rc) {
+ ida_free(&bnge_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv);
+ goto exit;
+ }
+ bd->aux_priv = aux_priv;
+
+ auxr_dev = kzalloc(sizeof(*auxr_dev), GFP_KERNEL);
+ if (!auxr_dev)
+ goto aux_dev_uninit;
+
+ aux_priv->auxr_dev = auxr_dev;
+
+ auxr_info = kzalloc(sizeof(*auxr_info), GFP_KERNEL);
+ if (!auxr_info)
+ goto aux_dev_uninit;
+
+ auxr_dev->auxr_info = auxr_info;
+ bd->auxr_dev = auxr_dev;
+ bnge_set_auxr_dev_info(auxr_dev, bd);
+
+ return;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+exit:
+ bd->flags &= ~BNGE_EN_ROCE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h
new file mode 100644
index 000000000000..6c5c15ef2b0a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_AUXR_H_
+#define _BNGE_AUXR_H_
+
+#include <linux/auxiliary_bus.h>
+
+#define BNGE_MIN_ROCE_CP_RINGS 2
+#define BNGE_MIN_ROCE_STAT_CTXS 1
+
+#define BNGE_MAX_ROCE_MSIX 64
+
+struct hwrm_async_event_cmpl;
+struct bnge;
+
+struct bnge_msix_info {
+ u32 vector;
+ u32 ring_idx;
+ u32 db_offset;
+};
+
+struct bnge_fw_msg {
+ void *msg;
+ int msg_len;
+ void *resp;
+ int resp_max_len;
+ int timeout;
+};
+
+struct bnge_auxr_info {
+ void *handle;
+ u16 msix_requested;
+};
+
+enum {
+ BNGE_ARDEV_ROCEV1_SUPP = BIT(0),
+ BNGE_ARDEV_ROCEV2_SUPP = BIT(1),
+ BNGE_ARDEV_MSIX_ALLOC = BIT(2),
+};
+
+#define BNGE_ARDEV_ROCE_SUPP (BNGE_ARDEV_ROCEV1_SUPP | \
+ BNGE_ARDEV_ROCEV2_SUPP)
+
+struct bnge_auxr_dev {
+ struct net_device *net;
+ struct pci_dev *pdev;
+ void __iomem *bar0;
+
+ struct bnge_msix_info msix_info[BNGE_MAX_ROCE_MSIX];
+
+ u32 flags;
+
+ struct bnge_auxr_info *auxr_info;
+
+ /* Doorbell BAR size in bytes mapped by L2 driver. */
+ int l2_db_size;
+ /* Doorbell BAR size in bytes mapped as non-cacheable. */
+ int l2_db_size_nc;
+ /* Doorbell offset in bytes within l2_db_size_nc. */
+ int l2_db_offset;
+
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state;
+
+ u16 auxr_num_msix_vec;
+ u16 auxr_num_ctxs;
+
+ /* serialize auxr operations */
+ struct mutex auxr_dev_lock;
+};
+
+void bnge_rdma_aux_device_uninit(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_del(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_add(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_init(struct bnge_dev *bdev);
+int bnge_register_dev(struct bnge_auxr_dev *adev,
+ void *handle);
+void bnge_unregister_dev(struct bnge_auxr_dev *adev);
+int bnge_send_msg(struct bnge_auxr_dev *adev, struct bnge_fw_msg *fw_msg);
+
+#endif /* _BNGE_AUXR_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
new file mode 100644
index 000000000000..c94e132bebc8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/crash_dump.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_SUMMARY);
+
+char bnge_driver_name[] = DRV_NAME;
+
+static const struct {
+ char *name;
+} board_info[] = {
+ [BCM57708] = { "Broadcom BCM57708 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
+};
+
+static const struct pci_device_id bnge_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1780), .driver_data = BCM57708 },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, bnge_pci_tbl);
+
+static void bnge_print_device_info(struct pci_dev *pdev, enum board_idx idx)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_info(dev, "%s found at mem %lx\n", board_info[idx].name,
+ (long)pci_resource_start(pdev, 0));
+
+ pcie_print_link_status(pdev);
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd)
+{
+ struct bnge_auxr_dev *ba_dev = bd->auxr_dev;
+
+ if (ba_dev && ba_dev->auxr_info->msix_requested)
+ return true;
+
+ return false;
+}
+
+static void bnge_nvm_cfg_ver_get(struct bnge_dev *bd)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_info;
+
+ if (!bnge_hwrm_nvm_dev_info(bd, &nvm_info))
+ snprintf(bd->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
+ nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
+ nvm_info.nvm_cfg_ver_upd);
+}
+
+static int bnge_func_qcaps(struct bnge_dev *bd)
+{
+ int rc;
+
+ rc = bnge_hwrm_func_qcaps(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_hwrm_queue_qportcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query qportcfg failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_resc_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "query resc caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_qcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query config failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_vnic_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "vnic caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void bnge_fw_unregister_dev(struct bnge_dev *bd)
+{
+ /* ctx mem free after unrgtr only */
+ bnge_hwrm_func_drv_unrgtr(bd);
+ bnge_free_ctx_mem(bd);
+}
+
+static void bnge_set_dflt_rss_hash_type(struct bnge_dev *bd)
+{
+ bd->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+}
+
+static int bnge_fw_register_dev(struct bnge_dev *bd)
+{
+ int rc;
+
+ bd->fw_cap = 0;
+ rc = bnge_hwrm_ver_get(bd);
+ if (rc) {
+ dev_err(bd->dev, "Get Version command failed rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_nvm_cfg_ver_get(bd);
+
+ rc = bnge_hwrm_func_reset(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to reset function rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_hwrm_fw_set_time(bd);
+
+ rc = bnge_hwrm_func_drv_rgtr(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to rgtr with firmware rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_ctx_mem(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to allocate ctx mem rc: %d\n", rc);
+ goto err_func_unrgtr;
+ }
+
+ /* Get the resources and configuration from firmware */
+ rc = bnge_func_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed initial configuration rc: %d\n", rc);
+ rc = -ENODEV;
+ goto err_func_unrgtr;
+ }
+
+ bnge_set_dflt_rss_hash_type(bd);
+
+ return 0;
+
+err_func_unrgtr:
+ bnge_fw_unregister_dev(bd);
+ return rc;
+}
+
+static void bnge_pci_disable(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+}
+
+static int bnge_pci_enable(struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return rc;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_pci_disable;
+ }
+
+ rc = pci_request_regions(pdev, bnge_driver_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_pci_disable;
+ }
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_pci_disable:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void bnge_unmap_bars(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ if (bd->bar1) {
+ pci_iounmap(pdev, bd->bar1);
+ bd->bar1 = NULL;
+ }
+
+ if (bd->bar0) {
+ pci_iounmap(pdev, bd->bar0);
+ bd->bar0 = NULL;
+ }
+}
+
+static void bnge_set_max_func_irqs(struct bnge_dev *bd,
+ unsigned int max_irqs)
+{
+ bd->hw_resc.max_irqs = max_irqs;
+}
+
+static int bnge_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+static int bnge_map_db_bar(struct bnge_dev *bd)
+{
+ if (!bd->db_size)
+ return -ENODEV;
+
+ bd->bar1 = pci_iomap(bd->pdev, 2, bd->db_size);
+ if (!bd->bar1)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned int max_irqs;
+ struct bnge_dev *bd;
+ int rc;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability missing, aborting\n");
+ return -ENODEV;
+ }
+
+ if (is_kdump_kernel()) {
+ pci_clear_master(pdev);
+ pcie_flr(pdev);
+ }
+
+ rc = bnge_pci_enable(pdev);
+ if (rc)
+ return rc;
+
+ bnge_print_device_info(pdev, ent->driver_data);
+
+ bd = bnge_devlink_alloc(pdev);
+ if (!bd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ rc = -ENOMEM;
+ goto err_pci_disable;
+ }
+
+ bd->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bd->bar0) {
+ dev_err(&pdev->dev, "Failed mapping BAR-0, aborting\n");
+ rc = -ENOMEM;
+ goto err_devl_free;
+ }
+
+ rc = bnge_init_hwrm_resources(bd);
+ if (rc)
+ goto err_bar_unmap;
+
+ rc = bnge_fw_register_dev(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to register with firmware rc = %d\n", rc);
+ goto err_hwrm_cleanup;
+ }
+
+ bnge_devlink_register(bd);
+
+ max_irqs = bnge_get_max_irq(pdev);
+ bnge_set_max_func_irqs(bd, max_irqs);
+
+ bnge_aux_init_dflt_config(bd);
+
+ rc = bnge_net_init_dflt_config(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error setting up default cfg to netdev rc = %d\n",
+ rc);
+ goto err_fw_reg;
+ }
+
+ rc = bnge_map_db_bar(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed mapping doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto err_config_uninit;
+ }
+
+#if BITS_PER_LONG == 32
+ spin_lock_init(&bd->db_lock);
+#endif
+
+ bnge_rdma_aux_device_init(bd);
+
+ rc = bnge_alloc_irqs(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc);
+ goto err_uninit_auxr;
+ }
+
+ rc = bnge_netdev_alloc(bd, max_irqs);
+ if (rc)
+ goto err_free_irq;
+
+ bnge_rdma_aux_device_add(bd);
+
+ pci_save_state(pdev);
+
+ return 0;
+
+err_free_irq:
+ bnge_free_irqs(bd);
+
+err_uninit_auxr:
+ bnge_rdma_aux_device_uninit(bd);
+
+err_config_uninit:
+ bnge_net_uninit_dflt_config(bd);
+
+err_fw_reg:
+ bnge_devlink_unregister(bd);
+ bnge_fw_unregister_dev(bd);
+
+err_hwrm_cleanup:
+ bnge_cleanup_hwrm_resources(bd);
+
+err_bar_unmap:
+ bnge_unmap_bars(pdev);
+
+err_devl_free:
+ bnge_devlink_free(bd);
+
+err_pci_disable:
+ bnge_pci_disable(pdev);
+ return rc;
+}
+
+static void bnge_remove_one(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ bnge_rdma_aux_device_del(bd);
+
+ bnge_netdev_free(bd);
+
+ bnge_free_irqs(bd);
+
+ bnge_rdma_aux_device_uninit(bd);
+
+ bnge_net_uninit_dflt_config(bd);
+
+ bnge_devlink_unregister(bd);
+
+ bnge_fw_unregister_dev(bd);
+
+ bnge_cleanup_hwrm_resources(bd);
+
+ bnge_unmap_bars(pdev);
+
+ bnge_devlink_free(bd);
+
+ bnge_pci_disable(pdev);
+}
+
+static void bnge_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, 0);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static struct pci_driver bnge_driver = {
+ .name = bnge_driver_name,
+ .id_table = bnge_pci_tbl,
+ .probe = bnge_probe_one,
+ .remove = bnge_remove_one,
+ .shutdown = bnge_shutdown,
+};
+
+static int __init bnge_init_module(void)
+{
+ return pci_register_driver(&bnge_driver);
+}
+module_init(bnge_init_module);
+
+static void __exit bnge_exit_module(void)
+{
+ pci_unregister_driver(&bnge_driver);
+}
+module_exit(bnge_exit_module);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_db.h b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
new file mode 100644
index 000000000000..950ed582f1d8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DB_H_
+#define _BNGE_DB_H_
+
+/* 64-bit doorbell */
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_SFT 25
+#define DBR_XID_SFT 32
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
+
+struct bnge_db_info {
+ void __iomem *doorbell;
+ u64 db_key64;
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
+};
+
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
+#endif /* _BNGE_DB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
new file mode 100644
index 000000000000..a987afebd64d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm_lib.h"
+
+static int bnge_dl_info_put(struct bnge_dev *bd, struct devlink_info_req *req,
+ enum bnge_dl_version_type type, const char *key,
+ char *buf)
+{
+ if (!strlen(buf))
+ return 0;
+
+ if (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
+ !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))
+ return 0;
+
+ switch (type) {
+ case BNGE_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, buf);
+ case BNGE_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, buf);
+ case BNGE_VERSION_STORED:
+ return devlink_info_version_stored_put(req, key, buf);
+ }
+
+ return 0;
+}
+
+static void bnge_vpd_read_info(struct bnge_dev *bd)
+{
+ struct pci_dev *pdev = bd->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto read_sn;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_partno, &vpd_data[pos], size);
+
+read_sn:
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
+ if (pos < 0)
+ goto exit;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_serialno, &vpd_data[pos], size);
+
+exit:
+ kfree(vpd_data);
+}
+
+#define HWRM_FW_VER_STR_LEN 16
+
+static int bnge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_dev_info;
+ struct bnge_dev *bd = devlink_priv(devlink);
+ struct hwrm_ver_get_output *ver_resp;
+ char mgmt_ver[FW_VER_STR_LEN];
+ char roce_ver[FW_VER_STR_LEN];
+ char ncsi_ver[FW_VER_STR_LEN];
+ char buf[32];
+
+ int rc;
+
+ if (bd->dsn) {
+ char buf[32];
+ u8 dsn[8];
+ int rc;
+
+ put_unaligned_le64(bd->dsn, dsn);
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ dsn[7], dsn[6], dsn[5], dsn[4],
+ dsn[3], dsn[2], dsn[1], dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set dsn");
+ return rc;
+ }
+ }
+
+ if (strlen(bd->board_serialno)) {
+ rc = devlink_info_board_serial_number_put(req,
+ bd->board_serialno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set board serial number");
+ return rc;
+ }
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bd->board_partno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set board part number");
+ return rc;
+ }
+
+ /* More information from HWRM ver get command */
+ sprintf(buf, "%X", bd->chip_num);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic id");
+ return rc;
+ }
+
+ ver_resp = &bd->ver_resp;
+ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic info");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ bd->nvm_cfg_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set firmware version");
+ return rc;
+ }
+
+ buf[0] = 0;
+ strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware generic version");
+ return rc;
+ }
+
+ if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
+ ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
+ ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
+ ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
+ } else {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
+ ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
+ ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
+ ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
+ }
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bd->hwrm_ver_supp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt api version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set ncsi firmware version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set roce firmware version");
+ return rc;
+ }
+
+ rc = bnge_hwrm_nvm_dev_info(bd, &nvm_dev_info);
+ if (!(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ return 0;
+
+ buf[0] = 0;
+ strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set roce firmware version");
+ return rc;
+ }
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor,
+ nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored firmware version");
+ return rc;
+ }
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor,
+ nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored ncsi firmware version");
+ return rc;
+ }
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
+ nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored roce firmware version");
+
+ return rc;
+}
+
+static const struct devlink_ops bnge_devlink_ops = {
+ .info_get = bnge_devlink_info_get,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+
+ devlink_free(devlink);
+}
+
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev)
+{
+ struct devlink *devlink;
+ struct bnge_dev *bd;
+
+ devlink = devlink_alloc(&bnge_devlink_ops, sizeof(*bd), &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ bd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, bd);
+ bd->dev = &pdev->dev;
+ bd->pdev = pdev;
+
+ bd->dsn = pci_get_dsn(pdev);
+ if (!bd->dsn)
+ pci_warn(pdev, "Failed to get DSN\n");
+
+ bnge_vpd_read_info(bd);
+
+ return bd;
+}
+
+void bnge_devlink_register(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_register(devlink);
+}
+
+void bnge_devlink_unregister(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
new file mode 100644
index 000000000000..c6575255e650
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DEVLINK_H_
+#define _BNGE_DEVLINK_H_
+
+enum bnge_dl_version_type {
+ BNGE_VERSION_FIXED,
+ BNGE_VERSION_RUNNING,
+ BNGE_VERSION_STORED,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd);
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev);
+void bnge_devlink_register(struct bnge_dev *bd);
+void bnge_devlink_unregister(struct bnge_dev *bd);
+
+#endif /* _BNGE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
new file mode 100644
index 000000000000..569371c1b4f2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "bnge.h"
+#include "bnge_ethtool.h"
+
+static void bnge_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+ struct bnge_dev *bd = bn->bd;
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bd->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bd->pdev), sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bnge_ethtool_ops = {
+ .get_drvinfo = bnge_get_drvinfo,
+};
+
+void bnge_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &bnge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
new file mode 100644
index 000000000000..21e96a0976d5
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_ETHTOOL_H_
+#define _BNGE_ETHTOOL_H_
+
+void bnge_set_ethtool_ops(struct net_device *dev);
+
+#endif /* _BNGE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
new file mode 100644
index 000000000000..c3087e5cd875
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+
+static u64 bnge_cal_sentinel(struct bnge_hwrm_ctx *ctx, u16 req_type)
+{
+ return (((uintptr_t)ctx) + req_type) ^ BNGE_HWRM_SENTINEL;
+}
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len)
+{
+ struct bnge_hwrm_ctx *ctx;
+ dma_addr_t dma_handle;
+ u8 *req_addr;
+
+ if (req_len > BNGE_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ req_addr = dma_pool_alloc(bd->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
+ &dma_handle);
+ if (!req_addr)
+ return -ENOMEM;
+
+ ctx = (struct bnge_hwrm_ctx *)(req_addr + BNGE_HWRM_CTX_OFFSET);
+ /* safety first, sentinel used to check for invalid requests */
+ ctx->sentinel = bnge_cal_sentinel(ctx, req_type);
+ ctx->req_len = req_len;
+ ctx->req = (struct input *)req_addr;
+ ctx->resp = (struct output *)(req_addr + BNGE_HWRM_RESP_OFFSET);
+ ctx->dma_handle = dma_handle;
+ ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
+ ctx->timeout = bd->hwrm_cmd_timeout ?: BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET;
+ ctx->gfp = GFP_KERNEL;
+ ctx->slice_addr = NULL;
+
+ /* initialize common request fields */
+ ctx->req->req_type = cpu_to_le16(req_type);
+ ctx->req->resp_addr = cpu_to_le64(dma_handle + BNGE_HWRM_RESP_OFFSET);
+ ctx->req->cmpl_ring = cpu_to_le16(BNGE_HWRM_NO_CMPL_RING);
+ ctx->req->target_id = cpu_to_le16(BNGE_HWRM_TARGET);
+ *req = ctx->req;
+
+ return 0;
+}
+
+static struct bnge_hwrm_ctx *__hwrm_ctx_get(struct bnge_dev *bd, u8 *req_addr)
+{
+ void *ctx_addr = req_addr + BNGE_HWRM_CTX_OFFSET;
+ struct input *req = (struct input *)req_addr;
+ struct bnge_hwrm_ctx *ctx = ctx_addr;
+ u64 sentinel;
+
+ if (!req) {
+ dev_err(bd->dev, "null HWRM request");
+ dump_stack();
+ return NULL;
+ }
+
+ /* HWRM API has no type safety, verify sentinel to validate address */
+ sentinel = bnge_cal_sentinel(ctx, le16_to_cpu(req->req_type));
+ if (ctx->sentinel != sentinel) {
+ dev_err(bd->dev, "HWRM sentinel mismatch, req_type = %u\n",
+ (u32)le16_to_cpu(req->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ return ctx;
+}
+
+void bnge_hwrm_req_timeout(struct bnge_dev *bd,
+ void *req, unsigned int timeout)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->timeout = timeout;
+}
+
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t gfp)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->gfp = gfp;
+}
+
+int bnge_hwrm_req_replace(struct bnge_dev *bd, void *req, void *new_req,
+ u32 len)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ struct input *internal_req = req;
+ u16 req_type;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (len > BNGE_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ /* free any existing slices */
+ ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET;
+ if (ctx->slice_addr) {
+ dma_free_coherent(bd->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+ ctx->slice_addr = NULL;
+ }
+ ctx->gfp = GFP_KERNEL;
+
+ if ((bd->fw_cap & BNGE_FW_CAP_SHORT_CMD) || len > BNGE_HWRM_MAX_REQ_LEN) {
+ memcpy(internal_req, new_req, len);
+ } else {
+ internal_req->req_type = ((struct input *)new_req)->req_type;
+ ctx->req = new_req;
+ }
+
+ ctx->req_len = len;
+ ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
+ BNGE_HWRM_RESP_OFFSET);
+
+ /* update sentinel for potentially new request type */
+ req_type = le16_to_cpu(internal_req->req_type);
+ ctx->sentinel = bnge_cal_sentinel(ctx, req_type);
+
+ return 0;
+}
+
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->flags |= (flags & BNGE_HWRM_API_FLAGS);
+}
+
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ struct input *input = (struct input *)req;
+
+ if (!ctx)
+ return NULL;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED) {
+ dev_err(bd->dev, "HWRM context already owned, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ ctx->flags |= BNGE_HWRM_INTERNAL_CTX_OWNED;
+ return ((u8 *)req) + BNGE_HWRM_RESP_OFFSET;
+}
+
+static void __hwrm_ctx_invalidate(struct bnge_dev *bd,
+ struct bnge_hwrm_ctx *ctx)
+{
+ void *addr = ((u8 *)ctx) - BNGE_HWRM_CTX_OFFSET;
+ dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
+
+ /* unmap any auxiliary DMA slice */
+ if (ctx->slice_addr)
+ dma_free_coherent(bd->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+
+ /* invalidate, ensure ownership, sentinel and dma_handle are cleared */
+ memset(ctx, 0, sizeof(struct bnge_hwrm_ctx));
+
+ /* return the buffer to the DMA pool */
+ if (dma_handle)
+ dma_pool_free(bd->hwrm_dma_pool, addr, dma_handle);
+}
+
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ __hwrm_ctx_invalidate(bd, ctx);
+}
+
+static int bnge_map_hwrm_error(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_LOCKED:
+ return -EROFS;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
+ default:
+ return -EIO;
+ }
+}
+
+static struct bnge_hwrm_wait_token *
+bnge_hwrm_create_token(struct bnge_dev *bd, enum bnge_hwrm_chnl dst)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
+ return NULL;
+
+ mutex_lock(&bd->hwrm_cmd_lock);
+
+ token->dst = dst;
+ token->state = BNGE_HWRM_PENDING;
+ if (dst == BNGE_HWRM_CHNL_CHIMP) {
+ token->seq_id = bd->hwrm_cmd_seq++;
+ hlist_add_head_rcu(&token->node, &bd->hwrm_pending_list);
+ } else {
+ token->seq_id = bd->hwrm_cmd_kong_seq++;
+ }
+
+ return token;
+}
+
+static void
+bnge_hwrm_destroy_token(struct bnge_dev *bd, struct bnge_hwrm_wait_token *token)
+{
+ if (token->dst == BNGE_HWRM_CHNL_CHIMP) {
+ hlist_del_rcu(&token->node);
+ kfree_rcu(token, rcu);
+ } else {
+ kfree(token);
+ }
+ mutex_unlock(&bd->hwrm_cmd_lock);
+}
+
+static void bnge_hwrm_req_dbg(struct bnge_dev *bd, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNGE_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNGE_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ dev_dbg(bd->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define bnge_hwrm_err(bd, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNGE_HWRM_CTX_SILENT) \
+ dev_dbg((bd)->dev, fmt, __VA_ARGS__); \
+ else \
+ dev_err((bd)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static int __hwrm_send_ctx(struct bnge_dev *bd, struct bnge_hwrm_ctx *ctx)
+{
+ u32 doorbell_offset = BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER;
+ enum bnge_hwrm_chnl dst = BNGE_HWRM_CHNL_CHIMP;
+ u32 bar_offset = BNGE_GRCPF_REG_CHIMP_COMM;
+ struct bnge_hwrm_wait_token *token = NULL;
+ u16 max_req_len = BNGE_HWRM_MAX_REQ_LEN;
+ unsigned int i, timeout, tmo_count;
+ u32 *data = (u32 *)ctx->req;
+ u32 msg_len = ctx->req_len;
+ int rc = -EBUSY;
+ u32 req_type;
+ u16 len = 0;
+ u8 *valid;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_RESP_DIRTY)
+ memset(ctx->resp, 0, PAGE_SIZE);
+
+ req_type = le16_to_cpu(ctx->req->req_type);
+
+ if (msg_len > BNGE_HWRM_MAX_REQ_LEN &&
+ msg_len > bd->hwrm_max_ext_req_len) {
+ dev_warn(bd->dev, "oversized hwrm request, req_type 0x%x",
+ req_type);
+ rc = -E2BIG;
+ goto exit;
+ }
+
+ token = bnge_hwrm_create_token(bd, dst);
+ if (!token) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ ctx->req->seq_id = cpu_to_le16(token->seq_id);
+
+ /* Ensure any associated DMA buffers are written before doorbell */
+ wmb();
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bd->bar0 + bar_offset, data, msg_len / 4);
+
+ for (i = msg_len; i < max_req_len; i += 4)
+ writel(0, bd->bar0 + bar_offset + i);
+
+ /* Ring channel doorbell */
+ writel(1, bd->bar0 + doorbell_offset);
+
+ bnge_hwrm_req_dbg(bd, ctx->req);
+
+ /* Limit timeout to an upper limit */
+ timeout = min(ctx->timeout,
+ bd->hwrm_cmd_max_timeout ?: BNGE_HWRM_CMD_MAX_TIMEOUT);
+ /* convert timeout to usec */
+ timeout *= 1000;
+
+ i = 0;
+ /* Short timeout for the first few iterations:
+ * number of loops = number of loops for short timeout +
+ * number of loops for standard timeout.
+ */
+ tmo_count = BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ timeout = timeout - BNGE_HWRM_SHORT_MIN_TIMEOUT *
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ tmo_count += DIV_ROUND_UP(timeout, BNGE_HWRM_MIN_TIMEOUT);
+
+ if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (READ_ONCE(token->state) < BNGE_HWRM_COMPLETE &&
+ i++ < tmo_count) {
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (READ_ONCE(token->state) != BNGE_HWRM_COMPLETE) {
+ bnge_hwrm_err(bd, ctx, "No hwrm cmpl received: 0x%x\n",
+ req_type);
+ goto exit;
+ }
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ valid = ((u8 *)ctx->resp) + len - 1;
+ } else {
+ __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
+ int j;
+
+ /* Check if response len is updated */
+ for (i = 0; i < tmo_count; i++) {
+ if (token &&
+ READ_ONCE(token->state) == BNGE_HWRM_DEFERRED) {
+ bnge_hwrm_destroy_token(bd, token);
+ token = NULL;
+ }
+
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ if (len) {
+ __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
+
+ if (resp_seq == ctx->req->seq_id)
+ break;
+ if (resp_seq != seen_out_of_seq) {
+ dev_warn(bd->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
+ le16_to_cpu(resp_seq), req_type, le16_to_cpu(ctx->req->seq_id));
+ seen_out_of_seq = resp_seq;
+ }
+ }
+
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (i >= tmo_count) {
+ bnge_hwrm_err(bd, ctx,
+ "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ bnge_hwrm_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
+ goto exit;
+ }
+
+ /* Last byte of resp contains valid bit */
+ valid = ((u8 *)ctx->resp) + len - 1;
+ for (j = 0; j < BNGE_HWRM_FIN_WAIT_USEC; ) {
+ /* make sure we read from updated DMA memory */
+ dma_rmb();
+ if (*valid)
+ break;
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
+ }
+
+ if (j >= BNGE_HWRM_FIN_WAIT_USEC) {
+ bnge_hwrm_err(bd, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ bnge_hwrm_timeout(i) + j, req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
+ goto exit;
+ }
+ }
+
+ /* Zero valid bit for compatibility. Valid bit in an older spec
+ * may become a new field in a newer spec. We must make sure that
+ * a new field not implemented by old spec will read zero.
+ */
+ *valid = 0;
+ rc = le16_to_cpu(ctx->resp->error_code);
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNGE_HWRM_CTX_SILENT))
+ dev_warn(bd->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ bnge_hwrm_err(bd, ctx, "hwrm req_type 0x%x seq id 0x%x error %d\n",
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
+ rc = bnge_map_hwrm_error(rc);
+
+exit:
+ if (token)
+ bnge_hwrm_destroy_token(bd, token);
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED)
+ ctx->flags |= BNGE_HWRM_INTERNAL_RESP_DIRTY;
+ else
+ __hwrm_ctx_invalidate(bd, ctx);
+ return rc;
+}
+
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (!ctx)
+ return -EINVAL;
+
+ return __hwrm_send_ctx(bd, ctx);
+}
+
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req)
+{
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_CTX_SILENT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void *
+bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma_handle)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ u8 *end = ((u8 *)req) + BNGE_HWRM_DMA_SIZE;
+ struct input *input = req;
+ u8 *addr, *req_addr = req;
+ u32 max_offset, offset;
+
+ if (!ctx)
+ return NULL;
+
+ max_offset = BNGE_HWRM_DMA_SIZE - ctx->allocated;
+ offset = max_offset - size;
+ offset = ALIGN_DOWN(offset, BNGE_HWRM_DMA_ALIGN);
+ addr = req_addr + offset;
+
+ if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
+ ctx->allocated = end - addr;
+ *dma_handle = ctx->dma_handle + offset;
+ return addr;
+ }
+
+ if (ctx->slice_addr) {
+ dev_err(bd->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ addr = dma_alloc_coherent(bd->dev, size, dma_handle, ctx->gfp);
+ if (!addr)
+ return NULL;
+
+ ctx->slice_addr = addr;
+ ctx->slice_size = size;
+ ctx->slice_handle = *dma_handle;
+
+ return addr;
+}
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ dma_pool_destroy(bd->hwrm_dma_pool);
+ bd->hwrm_dma_pool = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node)
+ WRITE_ONCE(token->state, BNGE_HWRM_CANCELLED);
+ rcu_read_unlock();
+}
+
+int bnge_init_hwrm_resources(struct bnge_dev *bd)
+{
+ bd->hwrm_dma_pool = dma_pool_create("bnge_hwrm", bd->dev,
+ BNGE_HWRM_DMA_SIZE,
+ BNGE_HWRM_DMA_ALIGN, 0);
+ if (!bd->hwrm_dma_pool)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&bd->hwrm_pending_list);
+ mutex_init(&bd->hwrm_cmd_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
new file mode 100644
index 000000000000..6df629761d95
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_H_
+#define _BNGE_HWRM_H_
+
+#include <linux/bnxt/hsi.h>
+
+enum bnge_hwrm_ctx_flags {
+ BNGE_HWRM_INTERNAL_CTX_OWNED = BIT(0),
+ BNGE_HWRM_INTERNAL_RESP_DIRTY = BIT(1),
+ BNGE_HWRM_CTX_SILENT = BIT(2),
+ BNGE_HWRM_FULL_WAIT = BIT(3),
+};
+
+#define BNGE_HWRM_API_FLAGS (BNGE_HWRM_CTX_SILENT | BNGE_HWRM_FULL_WAIT)
+
+struct bnge_hwrm_ctx {
+ u64 sentinel;
+ dma_addr_t dma_handle;
+ struct output *resp;
+ struct input *req;
+ dma_addr_t slice_handle;
+ void *slice_addr;
+ u32 slice_size;
+ u32 req_len;
+ enum bnge_hwrm_ctx_flags flags;
+ unsigned int timeout;
+ u32 allocated;
+ gfp_t gfp;
+};
+
+enum bnge_hwrm_wait_state {
+ BNGE_HWRM_PENDING,
+ BNGE_HWRM_DEFERRED,
+ BNGE_HWRM_COMPLETE,
+ BNGE_HWRM_CANCELLED,
+};
+
+enum bnge_hwrm_chnl { BNGE_HWRM_CHNL_CHIMP, BNGE_HWRM_CHNL_KONG };
+
+struct bnge_hwrm_wait_token {
+ struct rcu_head rcu;
+ struct hlist_node node;
+ enum bnge_hwrm_wait_state state;
+ enum bnge_hwrm_chnl dst;
+ u16 seq_id;
+};
+
+#define BNGE_DFLT_HWRM_CMD_TIMEOUT 500
+
+#define BNGE_GRCPF_REG_CHIMP_COMM 0x0
+#define BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+
+#define BNGE_HWRM_MAX_REQ_LEN (bd->hwrm_max_req_len)
+#define BNGE_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
+#define BNGE_HWRM_CMD_MAX_TIMEOUT 40000U
+#define BNGE_SHORT_HWRM_CMD_TIMEOUT 20
+#define BNGE_HWRM_CMD_TIMEOUT (bd->hwrm_cmd_timeout)
+#define BNGE_HWRM_RESET_TIMEOUT ((BNGE_HWRM_CMD_TIMEOUT) * 4)
+#define BNGE_HWRM_TARGET 0xffff
+#define BNGE_HWRM_NO_CMPL_RING -1
+#define BNGE_HWRM_REQ_MAX_SIZE 128
+#define BNGE_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
+#define BNGE_HWRM_RESP_RESERVED PAGE_SIZE
+#define BNGE_HWRM_RESP_OFFSET (BNGE_HWRM_DMA_SIZE - \
+ BNGE_HWRM_RESP_RESERVED)
+#define BNGE_HWRM_CTX_OFFSET (BNGE_HWRM_RESP_OFFSET - \
+ sizeof(struct bnge_hwrm_ctx))
+#define BNGE_HWRM_DMA_ALIGN 16
+#define BNGE_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */
+#define BNGE_HWRM_SHORT_MIN_TIMEOUT 3
+#define BNGE_HWRM_SHORT_MAX_TIMEOUT 10
+#define BNGE_HWRM_SHORT_TIMEOUT_COUNTER 5
+
+#define BNGE_HWRM_MIN_TIMEOUT 25
+#define BNGE_HWRM_MAX_TIMEOUT 40
+
+static inline unsigned int bnge_hwrm_timeout(unsigned int n)
+{
+ return n <= BNGE_HWRM_SHORT_TIMEOUT_COUNTER ?
+ n * BNGE_HWRM_SHORT_MIN_TIMEOUT :
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER *
+ BNGE_HWRM_SHORT_MIN_TIMEOUT +
+ (n - BNGE_HWRM_SHORT_TIMEOUT_COUNTER) *
+ BNGE_HWRM_MIN_TIMEOUT;
+}
+
+#define BNGE_HWRM_FIN_WAIT_USEC 50000
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd);
+int bnge_init_hwrm_resources(struct bnge_dev *bd);
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len);
+#define bnge_hwrm_req_init(bd, req, req_type) \
+ bnge_hwrm_req_create((bd), (void **)&(req), (req_type), \
+ sizeof(*(req)))
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags);
+void bnge_hwrm_req_timeout(struct bnge_dev *bd, void *req,
+ unsigned int timeout);
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req);
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t flags);
+void *bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma);
+int bnge_hwrm_req_replace(struct bnge_dev *bd, void *req, void *new_req,
+ u32 len);
+#endif /* _BNGE_HWRM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
new file mode 100644
index 000000000000..198f49b40dbf
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -0,0 +1,1185 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
+#include <linux/if_vlan.h>
+#include <net/netdev_queues.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd)
+{
+ u32 dev_caps_cfg, hwrm_ver, hwrm_spec_code;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
+ struct hwrm_ver_get_output *resp;
+ struct hwrm_ver_get_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VER_GET);
+ if (rc)
+ return rc;
+
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_FULL_WAIT);
+ bd->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
+ req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req->hwrm_intf_min = HWRM_VERSION_MINOR;
+ req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bd->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
+ HWRM_VERSION_UPDATE;
+
+ if (hwrm_spec_code > hwrm_ver)
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
+ HWRM_VERSION_UPDATE);
+ else
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
+
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+
+ bd->fw_ver_code = BNGE_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bd->fw_ver_str, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ fw_maj, fw_min, fw_bld, fw_rsv);
+
+ if (strlen(resp->active_pkg_name)) {
+ int fw_ver_len = strlen(bd->fw_ver_str);
+
+ snprintf(bd->fw_ver_str + fw_ver_len,
+ FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
+ resp->active_pkg_name);
+ bd->fw_cap |= BNGE_FW_CAP_PKG_VER;
+ }
+
+ bd->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+ if (!bd->hwrm_cmd_timeout)
+ bd->hwrm_cmd_timeout = BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ bd->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bd->hwrm_cmd_max_timeout)
+ bd->hwrm_cmd_max_timeout = BNGE_HWRM_CMD_MAX_TIMEOUT;
+ else if (bd->hwrm_cmd_max_timeout > BNGE_HWRM_CMD_MAX_TIMEOUT)
+ dev_warn(bd->dev, "Default HWRM commands max timeout increased to %d seconds\n",
+ bd->hwrm_cmd_max_timeout / 1000);
+
+ bd->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bd->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
+
+ if (bd->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bd->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
+
+ bd->chip_num = le16_to_cpu(resp->chip_num);
+ bd->chip_rev = resp->chip_rev;
+
+ dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
+ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ bd->fw_cap |= BNGE_FW_CAP_SHORT_CMD;
+
+ if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_KONG_MB_CHNL;
+
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_CFA_ADV_FLOW;
+
+hwrm_ver_get_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int
+bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_info)
+{
+ struct hwrm_nvm_get_dev_info_output *resp;
+ struct hwrm_nvm_get_dev_info_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_NVM_GET_DEV_INFO);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ memcpy(nvm_info, resp, sizeof(*resp));
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_reset(struct bnge_dev *bd)
+{
+ struct hwrm_func_reset_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESET);
+ if (rc)
+ return rc;
+
+ req->enables = 0;
+ bnge_hwrm_req_timeout(bd, req, BNGE_HWRM_RESET_TIMEOUT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd)
+{
+ struct hwrm_fw_set_time_input *req;
+ struct tm tm;
+ int rc;
+
+ time64_to_tm(ktime_get_real_seconds(), 0, &tm);
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FW_SET_TIME);
+ if (rc)
+ return rc;
+
+ req->year = cpu_to_le16(1900 + tm.tm_year);
+ req->month = 1 + tm.tm_mon;
+ req->day = tm.tm_mday;
+ req->hour = tm.tm_hour;
+ req->minute = tm.tm_min;
+ req->second = tm.tm_sec;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_rgtr_output *resp;
+ struct hwrm_func_drv_rgtr_input *req;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_RGTR);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
+
+ req->flags = cpu_to_le32(flags);
+ req->ver_maj_8b = DRV_VER_MAJ;
+ req->ver_min_8b = DRV_VER_MIN;
+ req->ver_upd_8b = DRV_VER_UPD;
+ req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
+ req->ver_min = cpu_to_le16(DRV_VER_MIN);
+ req->ver_upd = cpu_to_le16(DRV_VER_UPD);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ set_bit(BNGE_STATE_DRV_REGISTERED, &bd->state);
+ if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bd->fw_cap |= BNGE_FW_CAP_IF_CHANGE;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_unrgtr_input *req;
+ int rc;
+
+ if (!test_and_clear_bit(BNGE_STATE_DRV_REGISTERED, &bd->state))
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_UNRGTR);
+ if (rc)
+ return rc;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+static void bnge_init_ctx_initializer(struct bnge_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
+{
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNGE_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnge_alloc_all_ctx_pg_info(struct bnge_dev *bd, int ctx_max)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
+
+ if (!ctxm->max_entries)
+ continue;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define BNGE_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnge_ctx_mem_info *ctx;
+ u16 type;
+ int rc;
+
+ if (bd->ctx)
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bd->ctx = ctx;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; ) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags &
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnge_init_ctx_initializer(ctxm, init_val, init_off,
+ BNGE_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNGE_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnge_alloc_all_ctx_pg_info(bd, BNGE_CTX_V2_MAX);
+
+ctx_done:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static void bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ if (!rmem->nr_pages)
+ return;
+
+ BNGE_SET_CTX_PAGE_ATTR(*pg_attr);
+ if (rmem->depth >= 1) {
+ if (rmem->depth == 2)
+ *pg_attr |= 2;
+ else
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->dma_pg_tbl);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNGE_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ bnge_hwrm_req_hold(bd, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnge_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnge_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(BNGE_BS_CFG_ALL_DONE);
+ rc = bnge_hwrm_req_send(bd, req);
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static int bnge_hwrm_get_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ u16 cp, stats;
+ u16 rx, tx;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc) {
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ hw_resc->resv_irqs = cp;
+ rx = hw_resc->resv_rx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bnge_is_agg_reqd(bd))
+ rx >>= 1;
+ if (cp < (rx + tx)) {
+ rc = bnge_fix_rings_count(&rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ hw_resc->resv_rx_rings = rx;
+ hw_resc->resv_tx_rings = tx;
+ }
+ hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_hw_ring_grps = rx;
+ hw_resc->resv_cp_rings = cp;
+ hw_resc->resv_stat_ctxs = stats;
+
+get_rings_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static struct hwrm_func_cfg_input *
+__bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ u32 enables = 0;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG))
+ return NULL;
+
+ req->fid = cpu_to_le16(0xffff);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->nq ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cmpl ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cmpl);
+ req->num_msix = cpu_to_le16(hwr->nq);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
+ req->enables = cpu_to_le32(enables);
+
+ return req;
+}
+
+static int
+bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ req = __bnge_hwrm_reserve_pf_rings(bd, hwr);
+ if (!req)
+ return -ENOMEM;
+
+ if (!req->enables) {
+ bnge_hwrm_req_drop(bd, req);
+ return 0;
+ }
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ return rc;
+
+ return bnge_hwrm_get_rings(bd);
+}
+
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ return bnge_hwrm_reserve_pf_rings(bd, hwr);
+}
+
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto func_qcfg_exit;
+
+ bd->max_mtu = le16_to_cpu(resp->max_mtu_configured);
+ if (!bd->max_mtu)
+ bd->max_mtu = BNGE_MAX_MTU;
+
+ if (bd->db_size)
+ goto func_qcfg_exit;
+
+ bd->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ bd->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bd->db_size || bd->db_size > pci_resource_len(bd->pdev, 2) ||
+ bd->db_size <= bd->db_offset)
+ bd->db_size = pci_resource_len(bd->pdev, 2);
+
+func_qcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_resource_qcaps_output *resp;
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_resource_qcaps_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESOURCE_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send_silent(bd, req);
+ if (rc)
+ goto hwrm_func_resc_qcaps_exit;
+
+ hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ hw_resc->max_nqs = le16_to_cpu(resp->max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+
+hwrm_func_resc_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_qcaps_input *req;
+ struct bnge_pf_info *pf = &bd->pf;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V1;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V2;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
+
+ bd->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
+
+hwrm_func_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_vnic_qcaps_output *resp;
+ struct hwrm_vnic_qcaps_input *req;
+ int rc;
+
+ bd->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
+ bd->rss_cap &= ~BNGE_RSS_CAP_NEW_RSS_CAP;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ u32 flags = le32_to_cpu(resp->flags);
+
+ if (flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VLAN_RX_STRIP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_TCAM;
+ bd->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bd->max_tpa_v2)
+ bd->hw_ring_stats_size = BNGE_RING_STATS_SIZE;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V6_RSS_CAP;
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+#define BNGE_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd)
+{
+ struct hwrm_queue_qportcfg_output *resp;
+ struct hwrm_queue_qportcfg_input *req;
+ u8 i, j, *qptr;
+ bool no_rdma;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_QUEUE_QPORTCFG);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bd->max_tc = resp->max_configurable_queues;
+ bd->max_lltc = resp->max_configurable_lossless_queues;
+ if (bd->max_tc > BNGE_MAX_QUEUE)
+ bd->max_tc = BNGE_MAX_QUEUE;
+
+ no_rdma = !bnge_is_roce_en(bd);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bd->max_tc; i++) {
+ bd->q_info[j].queue_id = *qptr;
+ bd->q_ids[i] = *qptr++;
+ bd->q_info[j].queue_profile = *qptr++;
+ bd->tc_to_qidx[j] = j;
+ if (!BNGE_CNPQ(bd->q_info[j].queue_profile) || no_rdma)
+ j++;
+ }
+ bd->max_q = bd->max_tc;
+ bd->max_tc = max_t(u8, j, 1);
+
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bd->max_tc = 1;
+
+ if (bd->max_lltc > bd->max_tc)
+ bd->max_lltc = bd->max_tc;
+
+qportcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh;
+ struct hwrm_vnic_plcmodes_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID);
+ req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size);
+
+ if (bnge_is_agg_reqd(bd)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
+ }
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static void
+__bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct hwrm_vnic_rss_cfg_input *req,
+ struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_fill_hw_rss_tbl(bn, vnic);
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+
+ req->hash_type = cpu_to_le32(bd->rss_hash_cfg);
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+}
+
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss)
+{
+ struct hwrm_vnic_rss_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ dma_addr_t ring_tbl_map;
+ u32 i, nr_ctxs;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss)
+ return bnge_hwrm_req_send(bd, req);
+
+ __bnge_hwrm_vnic_set_rss(bn, req, vnic);
+ ring_tbl_map = vnic->rss_table_dma_addr;
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) {
+ req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
+ req->ring_table_pair_index = i;
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto exit;
+ }
+
+exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[0];
+ struct hwrm_vnic_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG);
+ if (rc)
+ return rc;
+
+ req->default_rx_ring_id =
+ cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
+ req->default_cmpl_ring_id =
+ cpu_to_le16(bnge_cp_ring_for_rx(rxr));
+ req->enables =
+ cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
+ VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
+ vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ if (bd->flags & BNGE_EN_STRIP_VLAN)
+ req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd))
+ req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE);
+
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct hwrm_vnic_rss_qcfg_output *resp;
+ struct hwrm_vnic_rss_qcfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG))
+ return;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ /* all contexts configured to same hash_type, zero always exists */
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ resp = bnge_hwrm_req_hold(bd, req);
+ if (!bnge_hwrm_req_send(bd, req))
+ bd->rss_hash_cfg =
+ le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg;
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->l2_filter_id = fltr->base.filter_id;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ fltr->base.filter_id = resp->l2_filter_id;
+
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
+ req->mask = cpu_to_le32(vnic->rx_mask);
+ return bnge_hwrm_req_send_silent(bd, req);
+}
+
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings)
+{
+ struct hwrm_vnic_alloc_output *resp;
+ struct hwrm_vnic_alloc_input *req;
+ unsigned int i;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++)
+ vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT)
+ req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic)
+{
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE))
+ return;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ }
+}
+
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
+ return;
+
+ req->rss_cos_lb_ctx_id =
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+}
+
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE))
+ return;
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id);
+ bnge_hwrm_req_send(bd, req);
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_alloc_output *resp;
+ struct hwrm_stat_ctx_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc, i;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size);
+ req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map);
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ break;
+
+ nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+ bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int hwrm_ring_free_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ struct hwrm_ring_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE);
+ if (rc)
+ goto exit;
+
+ req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
+ req->ring_type = ring_type;
+ req->ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ bnge_hwrm_req_drop(bd, req);
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index)
+{
+ struct bnge_ring_mem_info *rmem = &ring->ring_mem;
+ struct bnge_ring_grp_info *grp_info;
+ struct hwrm_ring_alloc_output *resp;
+ struct hwrm_ring_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ u16 ring_id, flags = 0;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC);
+ if (rc)
+ goto exit;
+
+ req->enables = 0;
+ if (rmem->nr_pages > 1) {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl);
+ /* Page size is in log2 units */
+ req->page_size = BNGE_PAGE_SHIFT;
+ req->page_tbl_depth = 1;
+ } else {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
+ }
+ req->fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req->logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX: {
+ struct bnge_tx_ring_info *txr;
+
+ txr = container_of(ring, struct bnge_tx_ring_info,
+ tx_ring_struct);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr));
+ req->length = cpu_to_le32(bn->tx_ring_mask + 1);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->queue_id = cpu_to_le16(ring->queue_id);
+ req->flags = cpu_to_le16(flags);
+ break;
+ }
+ case HWRM_RING_ALLOC_RX:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = cpu_to_le32(bn->rx_ring_mask + 1);
+
+ /* Association of rx ring with stats context */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ if (NET_IP_ALIGN == 2)
+ flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
+ req->flags = cpu_to_le16(flags);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ /* Association of agg ring with rx ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ /* Association of cp ring with nq */
+ grp_info = &bn->grp_info[map_index];
+ req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id);
+ req->cq_handle = cpu_to_le64(ring->handle);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type);
+ return -EINVAL;
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ ring_id = le16_to_cpu(resp->ring_id);
+ bnge_hwrm_req_drop(bd, req);
+
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req->async_event_cr = cpu_to_le16(idx);
+ return bnge_hwrm_req_send(bd, req);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
new file mode 100644
index 000000000000..042f28e84a05
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_LIB_H_
+#define _BNGE_HWRM_LIB_H_
+
+#define BNGE_PLC_EN_JUMBO_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
+#define BNGE_PLC_EN_HDS_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
+#define BNGE_VNIC_CFG_ROCE_DUAL_MODE \
+ VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd);
+int bnge_hwrm_func_reset(struct bnge_dev *bd);
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last);
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr);
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd);
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd);
+
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss);
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn);
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings);
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic);
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic);
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn);
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn);
+int hwrm_ring_free_send_msg(struct bnge_net *bn, struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id);
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index);
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx);
+#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
new file mode 100644
index 000000000000..832eeb960bd2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -0,0 +1,2485 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <net/ip.h>
+#include <linux/skbuff.h>
+#include <net/page_pool/helpers.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_ethtool.h"
+#include "bnge_rmem.h"
+
+#define BNGE_RING_TO_TC_OFF(bd, tx) \
+ ((tx) % (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_RING_TO_TC(bd, tx) \
+ ((tx) / (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_TC_TO_RING_BASE(bd, tc) \
+ ((tc) * (bd)->tx_nr_rings_per_tc)
+
+static void bnge_free_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ if (stats->hw_stats) {
+ dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
+
+static int bnge_alloc_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnge_free_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ bnge_free_stats_mem(bn, &nqr->stats);
+ }
+}
+
+static int bnge_alloc_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ u32 size, i;
+ int rc;
+
+ size = bd->hw_ring_stats_size;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ nqr->stats.len = size;
+ rc = bnge_alloc_stats_mem(bn, &nqr->stats);
+ if (rc)
+ goto err_free_ring_stats;
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+
+err_free_ring_stats:
+ bnge_free_ring_stats(bn);
+ return rc;
+}
+
+static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
+{
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(nqr->desc_mapping);
+ nqr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring = &cpr->ring_struct;
+
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(cpr->desc_mapping);
+ cpr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
+{
+ nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
+ if (!nqr->desc_ring)
+ return -ENOMEM;
+
+ nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
+ if (!nqr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
+{
+ cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
+ if (!cpr->desc_ring)
+ return -ENOMEM;
+
+ cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
+ if (!cpr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static void bnge_free_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnge_free_nq_desc_arr(&bnapi->nq_ring);
+ }
+}
+
+static int bnge_alloc_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
+ if (rc)
+ goto err_free_nq_arrays;
+ }
+ return 0;
+
+err_free_nq_arrays:
+ bnge_free_nq_arrays(bn);
+ return rc;
+}
+
+static void bnge_free_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ if (!nqr->cp_ring_arr)
+ continue;
+
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ bnge_free_cp_desc_arr(cpr);
+ }
+ kfree(nqr->cp_ring_arr);
+ nqr->cp_ring_arr = NULL;
+ nqr->cp_ring_count = 0;
+ }
+}
+
+static int alloc_one_cp_ring(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_ring_struct *ring;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
+ if (rc)
+ return -ENOMEM;
+ ring = &cpr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->desc_ring;
+ rmem->dma_arr = cpr->desc_mapping;
+ rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
+ rc = bnge_alloc_ring(bd, rmem);
+ if (rc)
+ goto err_free_cp_desc_arr;
+ return rc;
+
+err_free_cp_desc_arr:
+ bnge_free_cp_desc_arr(cpr);
+ return rc;
+}
+
+static int bnge_alloc_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, ulp_msix, rc;
+ int tcs = 1;
+
+ ulp_msix = bnge_aux_get_msix(bd);
+ for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
+ bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
+
+ nqr = &bnapi->nq_ring;
+ nqr->bnapi = bnapi;
+ ring = &nqr->ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_nq_tree;
+
+ ring->map_idx = ulp_msix + i;
+
+ if (i < bd->rx_nr_rings) {
+ cp_count++;
+ rx = 1;
+ }
+
+ if ((sh && i < bd->tx_nr_rings) ||
+ (!sh && i >= bd->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
+ }
+
+ nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!nqr->cp_ring_arr) {
+ rc = -ENOMEM;
+ goto err_free_nq_tree;
+ }
+
+ nqr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr = &nqr->cp_ring_arr[k];
+ rc = alloc_one_cp_ring(bn, cpr);
+ if (rc)
+ goto err_free_nq_tree;
+
+ cpr->bnapi = bnapi;
+ cpr->cp_idx = k;
+ if (!k && rx) {
+ bn->rx_ring[i].rx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
+ bn->tx_ring[n].tx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
+ }
+ }
+ if (tx)
+ j++;
+ }
+ return 0;
+
+err_free_nq_tree:
+ bnge_free_nq_tree(bn);
+ return rc;
+}
+
+static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
+}
+
+static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_buf_ring)
+ return;
+
+ max_idx = bn->rx_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ void *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ rx_buf->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, true);
+ }
+}
+
+static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_agg_buf_ring)
+ return;
+
+ max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
+ netmem_ref netmem = rx_agg_buf->netmem;
+
+ if (!netmem)
+ continue;
+
+ rx_agg_buf->netmem = 0;
+ __clear_bit(i, rxr->rx_agg_bmap);
+
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
+ }
+}
+
+static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+}
+
+static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->rx_ring)
+ return;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
+}
+
+static void bnge_free_all_rings_bufs(struct bnge_net *bn)
+{
+ bnge_free_rx_ring_pair_bufs(bn);
+}
+
+static void bnge_free_rx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ page_pool_destroy(rxr->page_pool);
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int numa_node)
+{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
+ struct page_pool_params pp = { 0 };
+ struct bnge_dev *bd = bn->bd;
+ struct page_pool *pool;
+
+ pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
+ pp.nid = numa_node;
+ pp.netdev = bn->netdev;
+ pp.dev = bd->dev;
+ pp.dma_dir = bn->rx_dir;
+ pp.max_len = PAGE_SIZE;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
+
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnge_separate_head_pool(rxr)) {
+ pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
+ }
+ rxr->head_pool = pool;
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
+static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_alloc_rx_rings(struct bnge_net *bn)
+{
+ int i, rc = 0, agg_rings = 0, cpu;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_is_agg_reqd(bd))
+ agg_rings = 1;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+ int cpu_node;
+
+ ring = &rxr->rx_ring_struct;
+
+ cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
+ if (rc)
+ goto err_free_rx_rings;
+ bnge_enable_rx_page_pool(rxr);
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ if (agg_rings) {
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ rc = bnge_alloc_rx_agg_bmap(bn, rxr);
+ if (rc)
+ goto err_free_rx_rings;
+ }
+ }
+ return rc;
+
+err_free_rx_rings:
+ bnge_free_rx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ ring = &txr->tx_ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, rc;
+
+ for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+ u8 qidx;
+
+ ring = &txr->tx_ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_tx_rings;
+
+ ring->grp_idx = txr->bnapi->index;
+ qidx = bd->tc_to_qidx[j];
+ ring->queue_id = bd->q_info[qidx].queue_id;
+ if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+
+err_free_tx_rings:
+ bnge_free_tx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_vnic_attributes(struct bnge_net *bn)
+{
+ struct pci_dev *pdev = bn->bd->pdev;
+ struct bnge_vnic_info *vnic;
+ int i;
+
+ if (!bn->vnic_info)
+ return;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_vnic_info *vnic;
+ int i, size;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
+ int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(bd->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ /* Allocate rss table and hash key */
+ size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(bd->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ goto err_free_vnic_attributes;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+err_free_vnic_attributes:
+ bnge_free_vnic_attributes(bn);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_vnics(struct bnge_net *bn)
+{
+ int num_vnics;
+
+ /* Allocate only 1 VNIC for now
+ * Additional VNICs will be added based on RFS/NTUPLE in future patches
+ */
+ num_vnics = 1;
+
+ bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info),
+ GFP_KERNEL);
+ if (!bn->vnic_info)
+ return -ENOMEM;
+
+ bn->nr_vnics = num_vnics;
+
+ return 0;
+}
+
+static void bnge_free_vnics(struct bnge_net *bn)
+{
+ kfree(bn->vnic_info);
+ bn->vnic_info = NULL;
+ bn->nr_vnics = 0;
+}
+
+static void bnge_free_ring_grps(struct bnge_net *bn)
+{
+ kfree(bn->grp_info);
+ bn->grp_info = NULL;
+}
+
+static int bnge_init_ring_grps(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ bn->grp_info = kcalloc(bd->nq_nr_rings,
+ sizeof(struct bnge_ring_grp_info),
+ GFP_KERNEL);
+ if (!bn->grp_info)
+ return -ENOMEM;
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnge_free_core(struct bnge_net *bn)
+{
+ bnge_free_vnic_attributes(bn);
+ bnge_free_tx_rings(bn);
+ bnge_free_rx_rings(bn);
+ bnge_free_nq_tree(bn);
+ bnge_free_nq_arrays(bn);
+ bnge_free_ring_stats(bn);
+ bnge_free_ring_grps(bn);
+ bnge_free_vnics(bn);
+ kfree(bn->tx_ring_map);
+ bn->tx_ring_map = NULL;
+ kfree(bn->tx_ring);
+ bn->tx_ring = NULL;
+ kfree(bn->rx_ring);
+ bn->rx_ring = NULL;
+ kfree(bn->bnapi);
+ bn->bnapi = NULL;
+}
+
+static int bnge_alloc_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, size, arr_size;
+ int rc = -ENOMEM;
+ void *bnapi;
+
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
+ bd->nq_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
+ bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return rc;
+
+ bn->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
+ struct bnge_nq_ring_info *nqr;
+
+ bn->bnapi[i] = bnapi;
+ bn->bnapi[i]->index = i;
+ bn->bnapi[i]->bn = bn;
+ nqr = &bn->bnapi[i]->nq_ring;
+ nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ }
+
+ bn->rx_ring = kcalloc(bd->rx_nr_rings,
+ sizeof(struct bnge_rx_ring_info),
+ GFP_KERNEL);
+ if (!bn->rx_ring)
+ goto err_free_core;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ rxr->rx_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->rx_agg_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->bnapi = bn->bnapi[i];
+ bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
+ }
+
+ bn->tx_ring = kcalloc(bd->tx_nr_rings,
+ sizeof(struct bnge_tx_ring_info),
+ GFP_KERNEL);
+ if (!bn->tx_ring)
+ goto err_free_core;
+
+ bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
+ GFP_KERNEL);
+ if (!bn->tx_ring_map)
+ goto err_free_core;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ j = 0;
+ else
+ j = bd->rx_nr_rings;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_napi *bnapi2;
+ int k;
+
+ txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ bn->tx_ring_map[i] = i;
+ k = j + BNGE_RING_TO_TC_OFF(bd, i);
+
+ bnapi2 = bn->bnapi[k];
+ txr->txq_index = i;
+ txr->tx_napi_idx =
+ BNGE_RING_TO_TC(bd, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ txr->bnapi = bnapi2;
+ }
+
+ rc = bnge_alloc_ring_stats(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_vnics(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_arrays(bn);
+ if (rc)
+ goto err_free_core;
+
+ bnge_init_ring_struct(bn);
+
+ rc = bnge_alloc_rx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_tx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_tree(bn);
+ if (rc)
+ goto err_free_core;
+
+ bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
+ BNGE_VNIC_MCAST_FLAG |
+ BNGE_VNIC_UCAST_FLAG;
+ rc = bnge_alloc_vnic_attributes(bn);
+ if (rc)
+ goto err_free_core;
+ return 0;
+
+err_free_core:
+ bnge_free_core(bn);
+ return rc;
+}
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->rx_cpr->ring_struct.fw_ring_id;
+}
+
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
+{
+ return txr->tx_cpr->ring_struct.fw_ring_id;
+}
+
+static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
+{
+ struct bnge_napi *bnapi = bn->bnapi[n];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+
+ return nqr->ring_struct.map_idx;
+}
+
+static irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ /* NAPI scheduling to be added in a future patch */
+ return IRQ_HANDLED;
+}
+
+static void bnge_init_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
+ dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ unsigned int *offset,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
+ BNGE_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
+ return netmem;
+}
+
+static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ unsigned int offset;
+ struct page *page;
+
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bn->rx_buf_size, gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
+ return page_address(page) + offset;
+}
+
+static int bnge_alloc_rx_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ u8 *data;
+
+ rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+ data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bn->rx_offset;
+ rx_buf->mapping = mapping;
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_ring_size; i++) {
+ rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX(prod);
+ }
+
+ /* Abort if not a single buffer can be allocated */
+ if (rc && !i) {
+ netdev_err(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, abort\n",
+ ring_nr, i, bn->rx_ring_size);
+ return rc;
+ }
+
+ rxr->rx_prod = prod;
+
+ if (i < bn->rx_ring_size)
+ netdev_warn(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_ring_size);
+ return 0;
+}
+
+static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static int bnge_alloc_rx_netmem(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_agg_bd *rx_agg_buf;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ unsigned int offset = 0;
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ netmem_ref netmem;
+
+ rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
+ netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
+ if (!netmem)
+ return -ENOMEM;
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
+
+ rx_agg_buf->netmem = netmem;
+ rx_agg_buf->offset = offset;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_agg_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_agg_ring_size; i++) {
+ rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX_AGG(prod);
+ }
+
+ if (rc && i < MAX_SKB_FRAGS) {
+ netdev_err(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
+ ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
+ goto err_free_one_agg_ring_bufs;
+ }
+
+ rxr->rx_agg_prod = prod;
+
+ if (i < bn->rx_agg_ring_size)
+ netdev_warn(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_agg_ring_size);
+ return 0;
+
+err_free_one_agg_ring_bufs:
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
+ int rc;
+
+ rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bn->bd)) {
+ rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ goto err_free_one_rx_ring_bufs;
+ }
+ return 0;
+
+err_free_one_rx_ring_bufs:
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ return rc;
+}
+
+static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
+{
+ struct rx_bd **rx_desc_ring;
+ u32 prod;
+ int i;
+
+ rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
+ for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
+ struct rx_bd *rxbd = rx_desc_ring[i];
+ int j;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_init_rxbd_pages(ring, type);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ if (bnge_is_agg_reqd(bn->bd)) {
+ type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnge_init_rxbd_pages(ring, type);
+ }
+}
+
+static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr;
+
+ rxr = &bn->rx_ring[ring_nr];
+ bnge_init_one_rx_ring_rxbd(bn, rxr);
+
+ netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ bnge_init_one_agg_ring_rxbd(bn, rxr);
+}
+
+static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ int i, rc;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++) {
+ rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+ }
+ return 0;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static void bnge_init_rx_rings(struct bnge_net *bn)
+{
+ int i;
+
+#define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNGE_RX_DMA_OFFSET NET_SKB_PAD
+ bn->rx_offset = BNGE_RX_OFFSET;
+ bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++)
+ bnge_init_one_rx_ring_pair(bn, i);
+}
+
+static void bnge_init_tx_rings(struct bnge_net *bn)
+{
+ int i;
+
+ bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
+
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
+ }
+}
+
+static void bnge_init_vnics(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+ int j;
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
+ vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
+
+ if (bn->vnic_info[i].rss_hash_key) {
+ if (i == BNGE_VNIC_DEFAULT) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ if (!bn->rss_hash_key_valid &&
+ !bn->rss_hash_key_updated) {
+ get_random_bytes(bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bn->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bn->rss_hash_key_updated)
+ continue;
+
+ bn->rss_hash_key_updated = false;
+ bn->rss_hash_key_valid = true;
+
+ bn->toeplitz_prefix = 0;
+ for (k = 0; k < 8; k++) {
+ bn->toeplitz_prefix <<= 8;
+ bn->toeplitz_prefix |= key[k];
+ }
+ } else {
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+ }
+}
+
+static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bn->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bn->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bn->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bn->cp_ring_mask;
+ break;
+ }
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+}
+
+static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type, u32 map_idx, u32 xid)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ }
+ db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
+
+ db->doorbell = bd->bar1 + bd->db_offset;
+ bnge_set_db_mask(bn, db, ring_type);
+}
+
+static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct bnge_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+
+ return 0;
+}
+
+static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ struct bnge_dev *bd = bn->bd;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bd->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+ bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnge_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ bool agg_rings;
+ int i, rc = 0;
+
+ agg_rings = !!(bnge_is_agg_reqd(bd));
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+ u32 type = HWRM_RING_ALLOC_NQ;
+ u32 map_idx = ring->map_idx;
+ unsigned int vector;
+
+ vector = bd->irq_tbl[map_idx].vector;
+ disable_irq_nosync(vector);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc) {
+ enable_irq(vector);
+ goto err_out;
+ }
+ bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ enable_irq(vector);
+ bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
+
+ if (!i) {
+ rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
+ if (rc)
+ netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
+ }
+ }
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+
+ rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
+ if (rc)
+ goto err_out;
+ rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
+ if (rc)
+ goto err_out;
+ }
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ struct bnge_napi *bnapi;
+ u32 map_idx, type;
+
+ rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
+ if (rc)
+ goto err_out;
+ /* If we have agg rings, post agg buffers first. */
+ if (!agg_rings)
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+
+ cpr = rxr->rx_cpr;
+ bnapi = rxr->bnapi;
+ type = HWRM_RING_ALLOC_CMPL;
+ map_idx = bnapi->index;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ goto err_out;
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+ }
+
+ if (agg_rings) {
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
+ if (rc)
+ goto err_out;
+ }
+ }
+err_out:
+ return rc;
+}
+
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ __le16 *ring_tbl = vnic->rss_table;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_dev *bd = bn->bd;
+ u16 tbl_size, i;
+
+ tbl_size = bnge_get_rxfh_indir_size(bd);
+
+ for (i = 0; i < tbl_size; i++) {
+ u16 ring_id, j;
+
+ j = bd->rss_indir_tbl[i];
+ rxr = &bn->rx_ring[j];
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnge_cp_ring_for_rx(rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ }
+}
+
+static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnge_hwrm_vnic_cfg(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc, i, nr_ctxs;
+
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+ for (i = 0; i < nr_ctxs; i++) {
+ rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
+ vnic->vnic_id, i, rc);
+ return -ENOMEM;
+ }
+ bn->rsscos_nr_ctxs++;
+ }
+
+ rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bd)) {
+ rc = bnge_hwrm_vnic_set_hds(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
+ vnic->vnic_id, rc);
+ }
+ return rc;
+}
+
+static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
+{
+ if (!refcount_dec_and_test(&fltr->refcnt))
+ return;
+ hlist_del_rcu(&fltr->base.hash);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static void bnge_init_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_filter *fltr,
+ struct bnge_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNGE_FLTR_TYPE_L2;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ refcount_set(&fltr->refcnt, 1);
+}
+
+static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+ struct hlist_head *head;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnge_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+
+ rcu_read_lock();
+ fltr = __bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ refcount_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnge_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
+ BNGE_L2_FLTR_HASH_MASK;
+ fltr = bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+
+ bnge_init_l2_filter(bn, fltr, key, idx);
+ return fltr;
+}
+
+static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
+ const u8 *mac_addr)
+{
+ struct bnge_l2_filter *fltr;
+ struct bnge_l2_key key;
+ int rc;
+
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
+ if (rc)
+ goto err_del_l2_filter;
+ bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
+ return rc;
+
+err_del_l2_filter:
+ bnge_del_l2_filter(bn, fltr);
+ return rc;
+}
+
+static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int mc_count = 0, off = 0;
+ bool update = false;
+ u8 *haddr;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNGE_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnge_uc_list_updated(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static bool bnge_promisc_ok(struct bnge_net *bn)
+{
+ return true;
+}
+
+static int bnge_cfg_def_vnic(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnge_uc_list_updated(bn);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
+ vnic->uc_filter_count = i;
+ return rc;
+ }
+ }
+
+skip_uc:
+ if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
+ !bnge_promisc_ok(bn))
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
+ netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ }
+ if (rc)
+ netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
+ rc);
+
+ return rc;
+}
+
+static void bnge_hwrm_vnic_free(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++)
+ bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
+}
+
+static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
+{
+ int i, j;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
+ if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
+ bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
+ }
+ }
+ bn->rsscos_nr_ctxs = 0;
+}
+
+static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bn->bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 0;
+}
+
+static void bnge_clear_vnic(struct bnge_net *bn)
+{
+ bnge_hwrm_clear_vnic_filter(bn);
+ bnge_hwrm_vnic_free(bn);
+ bnge_hwrm_vnic_ctx_free(bn);
+}
+
+static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring;
+
+ ring = &cpr->ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
+ bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
+ }
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
+ bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
+
+ ring = &nqr->ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_NQ,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static void bnge_setup_msix(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int len, i;
+
+ len = sizeof(bd->irq_tbl[0].name);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ char *attr;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ attr = "TxRx";
+ else if (i < bd->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+ attr, i);
+ bd->irq_tbl[map_idx].handler = bnge_msix;
+ }
+}
+
+static int bnge_setup_interrupts(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_setup_msix(bn);
+
+ return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
+}
+
+static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
+{
+ bnge_clear_vnic(bn);
+ bnge_hwrm_ring_free(bn, close_path);
+ bnge_hwrm_stat_ctx_free(bn);
+}
+
+static void bnge_free_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_irq *irq;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ irq = &bd->irq_tbl[map_idx];
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_cpumask_var(irq->cpu_mask);
+ irq->have_cpumask = 0;
+ }
+ free_irq(irq->vector, bn->bnapi[i]);
+ }
+
+ irq->requested = 0;
+ }
+}
+
+static int bnge_request_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ rc = bnge_setup_interrupts(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
+ return rc;
+ }
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ struct bnge_irq *irq = &bd->irq_tbl[map_idx];
+
+ rc = request_irq(irq->vector, irq->handler, 0, irq->name,
+ bn->bnapi[i]);
+ if (rc)
+ goto err_free_irq;
+
+ netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
+ irq->requested = 1;
+
+ if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+ int numa_node = dev_to_node(&bd->pdev->dev);
+
+ irq->have_cpumask = 1;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ irq->cpu_mask);
+ rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ if (rc) {
+ netdev_warn(bn->netdev,
+ "Set affinity failed, IRQ = %d\n",
+ irq->vector);
+ goto err_free_irq;
+ }
+ }
+ }
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+ return rc;
+}
+
+static int bnge_init_chip(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+#define BNGE_DEF_STATS_COAL_TICKS 1000000
+ bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
+
+ rc = bnge_hwrm_stat_ctx_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_ring_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_setup_vnic(bn, vnic);
+ if (rc)
+ goto err_out;
+
+ if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
+ bnge_hwrm_update_rss_hash_cfg(bn);
+
+ /* Filter for default vnic 0 */
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
+ if (rc) {
+ netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
+ goto err_out;
+ }
+ vnic->uc_filter_count = 1;
+
+ vnic->rx_mask = 0;
+
+ if (bn->netdev->flags & IFF_BROADCAST)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if (bn->netdev->flags & IFF_PROMISC)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ if (bn->netdev->flags & IFF_ALLMULTI) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else if (bn->netdev->flags & IFF_MULTICAST) {
+ u32 mask = 0;
+
+ bnge_mc_list_updated(bn, &mask);
+ vnic->rx_mask |= mask;
+ }
+
+ rc = bnge_cfg_def_vnic(bn);
+ if (rc)
+ goto err_out;
+ return 0;
+
+err_out:
+ bnge_hwrm_resource_free(bn, 0);
+ return rc;
+}
+
+static int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+
+ /* defer NAPI implementation to next patch series */
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void bnge_init_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bnapi = bn->bnapi[i];
+ netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
+ bnge_napi_poll, bnapi->index);
+ }
+}
+
+static void bnge_del_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ __netif_napi_del_locked(&bnapi->napi);
+ }
+
+ /* Wait for RCU grace period after removing NAPI instances */
+ synchronize_net();
+}
+
+static int bnge_init_nic(struct bnge_net *bn)
+{
+ int rc;
+
+ bnge_init_nq_tree(bn);
+
+ bnge_init_rx_rings(bn);
+ rc = bnge_alloc_rx_ring_pair_bufs(bn);
+ if (rc)
+ return rc;
+
+ bnge_init_tx_rings(bn);
+
+ rc = bnge_init_ring_grps(bn);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+
+ bnge_init_vnics(bn);
+
+ rc = bnge_init_chip(bn);
+ if (rc)
+ goto err_free_ring_grps;
+ return rc;
+
+err_free_ring_grps:
+ bnge_free_ring_grps(bn);
+ return rc;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static int bnge_open_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ netif_carrier_off(bn->netdev);
+
+ rc = bnge_reserve_rings(bd);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_core(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
+ return rc;
+ }
+
+ bnge_init_napi(bn);
+ rc = bnge_request_irq(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
+ goto err_del_napi;
+ }
+
+ rc = bnge_init_nic(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
+ goto err_free_irq;
+ }
+ set_bit(BNGE_STATE_OPEN, &bd->state);
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+err_del_napi:
+ bnge_del_napi(bn);
+ bnge_free_core(bn);
+ return rc;
+}
+
+static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int bnge_open(struct net_device *dev)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+ int rc;
+
+ rc = bnge_open_core(bn);
+ if (rc)
+ netdev_err(dev, "bnge_open_core err: %d\n", rc);
+
+ return rc;
+}
+
+static int bnge_shutdown_nic(struct bnge_net *bn)
+{
+ /* TODO: close_path = 0 until we make NAPI functional */
+ bnge_hwrm_resource_free(bn, 0);
+ return 0;
+}
+
+static void bnge_close_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ clear_bit(BNGE_STATE_OPEN, &bd->state);
+ bnge_shutdown_nic(bn);
+ bnge_free_all_rings_bufs(bn);
+ bnge_free_irq(bn);
+ bnge_del_napi(bn);
+
+ bnge_free_core(bn);
+}
+
+static int bnge_close(struct net_device *dev)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+
+ bnge_close_core(bn);
+
+ return 0;
+}
+
+static const struct net_device_ops bnge_netdev_ops = {
+ .ndo_open = bnge_open,
+ .ndo_stop = bnge_close,
+ .ndo_start_xmit = bnge_start_xmit,
+};
+
+static void bnge_init_mac_addr(struct bnge_dev *bd)
+{
+ eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
+}
+
+static void bnge_set_tpa_flags(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ bn->priv_flags &= ~BNGE_NET_EN_TPA;
+
+ if (bd->netdev->features & NETIF_F_LRO)
+ bn->priv_flags |= BNGE_NET_EN_LRO;
+ else if (bd->netdev->features & NETIF_F_GRO_HW)
+ bn->priv_flags |= BNGE_NET_EN_GRO;
+}
+
+static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
+}
+
+void bnge_set_ring_params(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ u32 ring_size, rx_size, rx_space, max_rx_cmpl;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
+ ring_size = bn->rx_ring_size;
+ bn->rx_agg_ring_size = 0;
+ bn->rx_agg_nr_pages = 0;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
+
+ bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bn->priv_flags |= BNGE_NET_EN_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ if (agg_factor) {
+ if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
+ ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
+ netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
+ bn->rx_ring_size, ring_size);
+ bn->rx_ring_size = ring_size;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
+ RX_DESC_CNT);
+ if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bn->rx_agg_ring_size = agg_ring_size;
+ bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+
+ rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bn->rx_buf_use_size = rx_size;
+ bn->rx_buf_size = rx_space;
+
+ bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
+ bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bn->tx_ring_size;
+ bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
+ bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ max_rx_cmpl = bn->rx_ring_size;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ max_rx_cmpl += bd->max_tpa_v2;
+ ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
+ bn->cp_ring_size = ring_size;
+
+ bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
+ if (bn->cp_nr_pages > MAX_CP_PAGES) {
+ bn->cp_nr_pages = MAX_CP_PAGES;
+ bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
+ ring_size, bn->cp_ring_size);
+ }
+ bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
+ bn->cp_ring_mask = bn->cp_bit - 1;
+}
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
+{
+ struct net_device *netdev;
+ struct bnge_net *bn;
+ int rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
+ max_irqs);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, bd->dev);
+ bd->netdev = netdev;
+
+ netdev->netdev_ops = &bnge_netdev_ops;
+
+ bnge_set_ethtool_ops(netdev);
+
+ bn = netdev_priv(netdev);
+ bn->netdev = netdev;
+ bn->bd = bd;
+
+ netdev->min_mtu = ETH_ZLEN;
+ netdev->max_mtu = bd->max_mtu;
+
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_PARTIAL;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
+
+ netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+
+ if (netdev->features & NETIF_F_GRO_HW)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
+ if (bd->tso_max_segs)
+ netif_set_tso_max_segs(netdev, bd->tso_max_segs);
+
+ bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
+ bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
+ bn->rx_dir = DMA_FROM_DEVICE;
+
+ bnge_set_tpa_flags(bd);
+ bnge_set_ring_params(bd);
+
+ bnge_init_l2_fltr_tbl(bn);
+ bnge_init_mac_addr(bd);
+
+ netdev->request_ops_lock = true;
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
+ goto err_netdev;
+ }
+
+ return 0;
+
+err_netdev:
+ free_netdev(netdev);
+ return rc;
+}
+
+void bnge_netdev_free(struct bnge_dev *bd)
+{
+ struct net_device *netdev = bd->netdev;
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ bd->netdev = NULL;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
new file mode 100644
index 000000000000..fb3b961536ba
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_NETDEV_H_
+#define _BNGE_NETDEV_H_
+
+#include <linux/bnxt/hsi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/refcount.h>
+#include "bnge_db.h"
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
+};
+
+struct bnge_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
+ u8 is_ts_pkt;
+ u8 is_push;
+ u8 action;
+ unsigned short nr_frags;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
+};
+
+struct bnge_sw_rx_bd {
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
+};
+
+struct bnge_sw_rx_agg_bd {
+ netmem_ref netmem;
+ unsigned int offset;
+ dma_addr_t mapping;
+};
+
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+#define HWRM_RING_ALLOC_NQ 0x10
+
+struct bnge_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 nq_fw_ring_id;
+};
+
+#define BNGE_RX_COPY_THRESH 256
+
+#define BNGE_HW_FEATURE_VLAN_ALL_RX \
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
+#define BNGE_HW_FEATURE_VLAN_ALL_TX \
+ (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
+
+enum {
+ BNGE_NET_EN_GRO = BIT(0),
+ BNGE_NET_EN_LRO = BIT(1),
+ BNGE_NET_EN_JUMBO = BIT(2),
+};
+
+#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
+#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
+ (BNGE_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
+
+#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
+
+#define BNGE_NQ_HDL_TYPE_SHIFT 24
+#define BNGE_NQ_HDL_TYPE_RX 0x00
+#define BNGE_NQ_HDL_TYPE_TX 0x01
+
+struct bnge_net {
+ struct bnge_dev *bd;
+ struct net_device *netdev;
+
+ u32 priv_flags;
+
+ u32 rx_ring_size;
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* usable size */
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ u16 rx_nr_pages;
+ u16 rx_agg_nr_pages;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ u16 tx_nr_pages;
+
+ /* NQs and Completion rings */
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ u16 cp_nr_pages;
+
+#define BNGE_L2_FLTR_HASH_SIZE 32
+#define BNGE_L2_FLTR_HASH_MASK (BNGE_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+
+ struct bnge_napi **bnapi;
+
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring;
+
+ u16 *tx_ring_map;
+ enum dma_data_direction rx_dir;
+
+ /* grp_info indexed by napi/nq index */
+ struct bnge_ring_grp_info *grp_info;
+ struct bnge_vnic_info *vnic_info;
+ int nr_vnics;
+ int total_irqs;
+
+ u32 tx_wake_thresh;
+ u16 rx_offset;
+ u16 rx_dma_offset;
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
+ int rsscos_nr_ctxs;
+ u32 stats_coal_ticks;
+};
+
+#define BNGE_DEFAULT_RX_RING_SIZE 511
+#define BNGE_DEFAULT_TX_RING_SIZE 511
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs);
+void bnge_netdev_free(struct bnge_dev *bd);
+void bnge_set_ring_params(struct bnge_dev *bd);
+
+#if (BNGE_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES_AGG_ENA 1
+#define MAX_RX_PAGES 4
+#define MAX_RX_AGG_PAGES 4
+#define MAX_TX_PAGES 1
+#define MAX_CP_PAGES 16
+#else
+#define MAX_RX_PAGES_AGG_ENA 8
+#define MAX_RX_PAGES 32
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 128
+#endif
+
+#define BNGE_RX_PAGE_SIZE (1 << BNGE_RX_PAGE_SHIFT)
+
+#define RX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_cmp))
+#define SW_RXBD_RING_SIZE (sizeof(struct bnge_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnge_sw_rx_agg_bd) * RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct bnge_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+#define BNGE_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNGE_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
+#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define BNGE_MAX_TXR_PER_NAPI 8
+
+#define bnge_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
+#define BNGE_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+struct bnge_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
+struct bnge_cp_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct tx_cmp **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u8 cp_ring_type;
+ u8 cp_idx;
+ u32 cp_raw_cons;
+ struct bnge_db_info cp_db;
+};
+
+struct bnge_nq_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct nqe_cn **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u32 nq_raw_cons;
+ struct bnge_db_info nq_db;
+
+ struct bnge_stats_mem stats;
+ u32 hw_stats_ctx_id;
+
+ int cp_ring_count;
+ struct bnge_cp_ring_info *cp_ring_arr;
+};
+
+struct bnge_rx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *rx_cpr;
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ u16 rx_next_cons;
+ struct bnge_db_info rx_db;
+ struct bnge_db_info rx_agg_db;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnge_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnge_sw_rx_agg_bd *rx_agg_buf_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnge_ring_struct rx_ring_struct;
+ struct bnge_ring_struct rx_agg_ring_struct;
+ struct page_pool *page_pool;
+ struct page_pool *head_pool;
+ bool need_head_pool;
+};
+
+struct bnge_tx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *tx_cpr;
+ u16 tx_prod;
+ u16 tx_cons;
+ u16 tx_hw_cons;
+ u16 txq_index;
+ u8 tx_napi_idx;
+ u8 kick_pending;
+ struct bnge_db_info tx_db;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnge_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ u32 dev_state;
+#define BNGE_DEV_STATE_CLOSING 0x1
+
+ struct bnge_ring_struct tx_ring_struct;
+};
+
+struct bnge_napi {
+ struct napi_struct napi;
+ struct bnge_net *bn;
+ int index;
+
+ struct bnge_nq_ring_info nq_ring;
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI];
+};
+
+#define INVALID_STATS_CTX_ID -1
+#define BNGE_VNIC_DEFAULT 0
+#define BNGE_MAX_UC_ADDRS 4
+
+struct bnge_vnic_info {
+ u16 fw_vnic_id;
+#define BNGE_MAX_CTX_PER_VNIC 8
+ u16 fw_rss_cos_lb_ctx[BNGE_MAX_CTX_PER_VNIC];
+ u16 mru;
+ /* index 0 always dev_addr */
+ struct bnge_l2_filter *l2_filters[BNGE_MAX_UC_ADDRS];
+ u16 uc_filter_count;
+ u8 *uc_list;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ int rss_table_size;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNGE_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNGE_VNIC_RSS_FLAG 1
+#define BNGE_VNIC_MCAST_FLAG 4
+#define BNGE_VNIC_UCAST_FLAG 8
+ u32 vnic_id;
+};
+
+struct bnge_filter_base {
+ struct hlist_node hash;
+ struct list_head list;
+ __le64 filter_id;
+ u8 type;
+#define BNGE_FLTR_TYPE_L2 2
+ u8 flags;
+ u16 rxq;
+ u16 fw_vnic_id;
+ u16 vf_idx;
+ unsigned long state;
+#define BNGE_FLTR_VALID 0
+#define BNGE_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnge_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+#define BNGE_L2_KEY_SIZE (sizeof(struct bnge_l2_key) / 4)
+struct bnge_l2_filter {
+ /* base filter must be the first member */
+ struct bnge_filter_base base;
+ struct bnge_l2_key l2_key;
+ refcount_t refcnt;
+};
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
new file mode 100644
index 000000000000..943df5f60f01
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_resc.h"
+
+static u16 bnge_num_tx_to_cp(struct bnge_dev *bd, u16 tx)
+{
+ u16 tcs = bd->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+
+ return tx / tcs;
+}
+
+static u16 bnge_get_max_func_irqs(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ return min_t(u16, hw_resc->max_irqs, hw_resc->max_nqs);
+}
+
+static unsigned int bnge_get_max_func_stat_ctxs(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_stat_ctxs;
+}
+
+bool bnge_aux_has_enough_resources(struct bnge_dev *bd)
+{
+ unsigned int max_stat_ctxs;
+
+ max_stat_ctxs = bnge_get_max_func_stat_ctxs(bd);
+ if (max_stat_ctxs <= BNGE_MIN_ROCE_STAT_CTXS ||
+ bd->nq_nr_rings == max_stat_ctxs)
+ return false;
+
+ return true;
+}
+
+static unsigned int bnge_get_max_func_cp_rings(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_cp_rings;
+}
+
+static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
+{
+ int roce_msix = BNGE_MAX_ROCE_MSIX;
+
+ return min_t(int, roce_msix, num_online_cpus() + 1);
+}
+
+u16 bnge_aux_get_msix(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_msix;
+
+ return 0;
+}
+
+static void bnge_aux_set_msix_num(struct bnge_dev *bd, u16 num)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_msix = num;
+}
+
+static u16 bnge_aux_get_stat_ctxs(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_stat_ctxs;
+
+ return 0;
+}
+
+static void bnge_aux_set_stat_ctxs(struct bnge_dev *bd, u16 num_aux_ctx)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_stat_ctxs = num_aux_ctx;
+}
+
+static u16 bnge_func_stat_ctxs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_stat_ctxs(bd);
+}
+
+static int bnge_get_dflt_aux_stat_ctxs(struct bnge_dev *bd)
+{
+ int stat_ctx = 0;
+
+ if (bnge_is_roce_en(bd)) {
+ stat_ctx = BNGE_MIN_ROCE_STAT_CTXS;
+
+ if (!bd->pf.port_id && bd->port_count > 1)
+ stat_ctx++;
+ }
+
+ return stat_ctx;
+}
+
+static u16 bnge_nqs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_msix(bd);
+}
+
+static u16 bnge_cprs_demand(struct bnge_dev *bd)
+{
+ return bd->tx_nr_rings + bd->rx_nr_rings;
+}
+
+static u16 bnge_get_avail_msix(struct bnge_dev *bd, int num)
+{
+ u16 max_irq = bnge_get_max_func_irqs(bd);
+ u16 total_demand = bd->nq_nr_rings + num;
+
+ if (max_irq < total_demand) {
+ num = max_irq - bd->nq_nr_rings;
+ if (num <= 0)
+ return 0;
+ }
+
+ return num;
+}
+
+static u16 bnge_num_cp_to_tx(struct bnge_dev *bd, u16 tx_chunks)
+{
+ return tx_chunks * bd->num_tc;
+}
+
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared)
+{
+ u16 _rx = *rx, _tx = *tx;
+
+ if (shared) {
+ *rx = min_t(u16, _rx, max);
+ *tx = min_t(u16, _tx, max);
+ } else {
+ if (max < 2)
+ return -ENOMEM;
+ while (_rx + _tx > max) {
+ if (_rx > _tx && _rx > 1)
+ _rx--;
+ else if (_tx > 1)
+ _tx--;
+ }
+ *rx = _rx;
+ *tx = _tx;
+ }
+
+ return 0;
+}
+
+static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
+ u16 *tx, u16 max_nq, bool sh)
+{
+ u16 tx_chunks = bnge_num_tx_to_cp(bd, *tx);
+
+ if (tx_chunks != *tx) {
+ u16 tx_saved = tx_chunks, rc;
+
+ rc = bnge_fix_rings_count(rx, &tx_chunks, max_nq, sh);
+ if (rc)
+ return rc;
+ if (tx_chunks != tx_saved)
+ *tx = bnge_num_cp_to_tx(bd, tx_chunks);
+ return 0;
+ }
+
+ return bnge_fix_rings_count(rx, tx, max_nq, sh);
+}
+
+int bnge_cal_nr_rss_ctxs(u16 rx_rings)
+{
+ if (!rx_rings)
+ return 0;
+
+ return bnge_adjust_pow_two(rx_rings - 1,
+ BNGE_RSS_TABLE_ENTRIES);
+}
+
+static u16 bnge_rss_ctxs_in_use(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ return bnge_cal_nr_rss_ctxs(hwr->grp);
+}
+
+static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
+{
+ return 1;
+}
+
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
+{
+ return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
+ BNGE_RSS_TABLE_ENTRIES;
+}
+
+static void bnge_set_dflt_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 max_entries, pad;
+ u32 *rss_indir_tbl;
+ int i;
+
+ max_entries = bnge_get_rxfh_indir_size(bd);
+ rss_indir_tbl = &bd->rss_indir_tbl[0];
+
+ for (i = 0; i < max_entries; i++)
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i,
+ bd->rx_nr_rings);
+
+ pad = bd->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
+}
+
+static void bnge_copy_reserved_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->nq = hw_resc->resv_irqs;
+ hwr->cmpl = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+}
+
+static bool bnge_rings_ok(struct bnge_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic &&
+ hwr->stat && hwr->cmpl;
+}
+
+static bool bnge_need_reserve_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 cprs = bnge_cprs_demand(bd);
+ u16 rx = bd->rx_nr_rings, stat;
+ u16 nqs = bnge_nqs_demand(bd);
+ u16 vnic;
+
+ if (hw_resc->resv_tx_rings != bd->tx_nr_rings)
+ return true;
+
+ vnic = bnge_get_total_vnics(bd, rx);
+
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ stat = bnge_func_stat_ctxs_demand(bd);
+ if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cprs ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat)
+ return true;
+ if (hw_resc->resv_irqs != nqs)
+ return true;
+
+ return false;
+}
+
+int bnge_reserve_rings(struct bnge_dev *bd)
+{
+ u16 aux_dflt_msix = bnge_aux_get_dflt_msix(bd);
+ struct bnge_hw_rings hwr = {0};
+ u16 rx_rings, old_rx_rings;
+ u16 nq = bd->nq_nr_rings;
+ u16 aux_msix = 0;
+ bool sh = false;
+ u16 tx_cp;
+ int rc;
+
+ if (!bnge_need_reserve_rings(bd))
+ return 0;
+
+ if (!bnge_aux_registered(bd)) {
+ aux_msix = bnge_get_avail_msix(bd, aux_dflt_msix);
+ if (!aux_msix)
+ bnge_aux_set_stat_ctxs(bd, 0);
+
+ if (aux_msix > aux_dflt_msix)
+ aux_msix = aux_dflt_msix;
+ hwr.nq = nq + aux_msix;
+ } else {
+ hwr.nq = bnge_nqs_demand(bd);
+ }
+
+ hwr.tx = bd->tx_nr_rings;
+ hwr.rx = bd->rx_nr_rings;
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ sh = true;
+ hwr.cmpl = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnge_get_total_vnics(bd, hwr.rx);
+
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx <<= 1;
+ hwr.grp = bd->rx_nr_rings;
+ hwr.rss_ctx = bnge_rss_ctxs_in_use(bd, &hwr);
+ hwr.stat = bnge_func_stat_ctxs_demand(bd);
+ old_rx_rings = bd->hw_resc.resv_rx_rings;
+
+ rc = bnge_hwrm_reserve_rings(bd, &hwr);
+ if (rc)
+ return rc;
+
+ bnge_copy_reserved_rings(bd, &hwr);
+
+ rx_rings = hwr.rx;
+ if (bnge_is_agg_reqd(bd)) {
+ if (hwr.rx >= 2)
+ rx_rings = hwr.rx >> 1;
+ else
+ return -ENOMEM;
+ }
+
+ rx_rings = min_t(u16, rx_rings, hwr.grp);
+ hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings);
+ if (hwr.stat > bnge_aux_get_stat_ctxs(bd))
+ hwr.stat -= bnge_aux_get_stat_ctxs(bd);
+ hwr.nq = min_t(u16, hwr.nq, hwr.stat);
+
+ /* Adjust the rings */
+ rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh);
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx = rx_rings << 1;
+ tx_cp = hwr.tx;
+ hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bd->tx_nr_rings = hwr.tx;
+
+ if (rx_rings != bd->rx_nr_rings)
+ dev_warn(bd->dev, "RX rings resv reduced to %d than earlier %d requested\n",
+ rx_rings, bd->rx_nr_rings);
+
+ bd->rx_nr_rings = rx_rings;
+ bd->nq_nr_rings = hwr.nq;
+
+ if (!bnge_rings_ok(&hwr))
+ return -ENOMEM;
+
+ if (old_rx_rings != bd->hw_resc.resv_rx_rings)
+ bnge_set_dflt_rss_indir_tbl(bd);
+
+ if (!bnge_aux_registered(bd)) {
+ u16 resv_msix, resv_ctx, aux_ctxs;
+ struct bnge_hw_resc *hw_resc;
+
+ hw_resc = &bd->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bd->nq_nr_rings;
+ aux_msix = min_t(u16, resv_msix, aux_msix);
+ bnge_aux_set_msix_num(bd, aux_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bd->nq_nr_rings;
+ aux_ctxs = min(resv_ctx, bnge_aux_get_stat_ctxs(bd));
+ bnge_aux_set_stat_ctxs(bd, aux_ctxs);
+ }
+
+ return rc;
+}
+
+int bnge_alloc_irqs(struct bnge_dev *bd)
+{
+ u16 aux_msix, tx_cp, num_entries;
+ int i, irqs_demand, rc;
+ u16 max, min = 1;
+
+ irqs_demand = bnge_nqs_demand(bd);
+ max = bnge_get_max_func_irqs(bd);
+ if (irqs_demand > max)
+ irqs_demand = max;
+
+ if (!(bd->flags & BNGE_EN_SHARED_CHNL))
+ min = 2;
+
+ irqs_demand = pci_alloc_irq_vectors(bd->pdev, min, irqs_demand,
+ PCI_IRQ_MSIX);
+ aux_msix = bnge_aux_get_msix(bd);
+ if (irqs_demand < 0 || irqs_demand < aux_msix) {
+ rc = -ENODEV;
+ goto err_free_irqs;
+ }
+
+ num_entries = irqs_demand;
+ if (pci_msix_can_alloc_dyn(bd->pdev))
+ num_entries = max;
+ bd->irq_tbl = kcalloc(num_entries, sizeof(*bd->irq_tbl), GFP_KERNEL);
+ if (!bd->irq_tbl) {
+ rc = -ENOMEM;
+ goto err_free_irqs;
+ }
+
+ for (i = 0; i < irqs_demand; i++)
+ bd->irq_tbl[i].vector = pci_irq_vector(bd->pdev, i);
+
+ bd->irqs_acquired = irqs_demand;
+ /* Reduce rings based upon num of vectors allocated.
+ * We dont need to consider NQs as they have been calculated
+ * and must be more than irqs_demand.
+ */
+ rc = bnge_adjust_rings(bd, &bd->rx_nr_rings,
+ &bd->tx_nr_rings,
+ irqs_demand - aux_msix, min == 1);
+ if (rc)
+ goto err_free_irqs;
+
+ tx_cp = bnge_num_tx_to_cp(bd, bd->tx_nr_rings);
+ bd->nq_nr_rings = (min == 1) ?
+ max_t(u16, tx_cp, bd->rx_nr_rings) :
+ tx_cp + bd->rx_nr_rings;
+
+ /* Readjust tx_nr_rings_per_tc */
+ if (!bd->num_tc)
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+
+ return 0;
+
+err_free_irqs:
+ dev_err(bd->dev, "Failed to allocate IRQs err = %d\n", rc);
+ bnge_free_irqs(bd);
+ return rc;
+}
+
+void bnge_free_irqs(struct bnge_dev *bd)
+{
+ pci_free_irq_vectors(bd->pdev);
+ kfree(bd->irq_tbl);
+ bd->irq_tbl = NULL;
+}
+
+static void _bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, u16 *max_nq)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 max_ring_grps = 0, max_cp;
+ int rc;
+
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_nq = min_t(int, bnge_get_max_func_irqs(bd),
+ hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
+ if (bnge_is_agg_reqd(bd))
+ *max_rx >>= 1;
+
+ max_cp = bnge_get_max_func_cp_rings(bd);
+
+ /* Fix RX and TX rings according to number of CPs available */
+ rc = bnge_fix_rings_count(max_rx, max_tx, max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
+
+ *max_rx = min_t(int, *max_rx, max_ring_grps);
+}
+
+static int bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, bool shared)
+{
+ u16 rx, tx, nq;
+
+ _bnge_get_max_rings(bd, &rx, &tx, &nq);
+ *max_rx = rx;
+ *max_tx = tx;
+ if (!rx || !tx || !nq)
+ return -ENOMEM;
+
+ return bnge_fix_rings_count(max_rx, max_tx, nq, shared);
+}
+
+static int bnge_get_dflt_rings(struct bnge_dev *bd, u16 *max_rx, u16 *max_tx,
+ bool shared)
+{
+ int rc;
+
+ rc = bnge_get_max_rings(bd, max_rx, max_tx, shared);
+ if (rc) {
+ dev_info(bd->dev, "Not enough rings available\n");
+ return rc;
+ }
+
+ if (bnge_is_roce_en(bd)) {
+ int max_cp, max_stat, max_irq;
+
+ /* Reserve minimum resources for RoCE */
+ max_cp = bnge_get_max_func_cp_rings(bd);
+ max_stat = bnge_get_max_func_stat_ctxs(bd);
+ max_irq = bnge_get_max_func_irqs(bd);
+ if (max_cp <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_irq <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_stat <= BNGE_MIN_ROCE_STAT_CTXS)
+ return 0;
+
+ max_cp -= BNGE_MIN_ROCE_CP_RINGS;
+ max_irq -= BNGE_MIN_ROCE_CP_RINGS;
+ max_stat -= BNGE_MIN_ROCE_STAT_CTXS;
+ max_cp = min_t(u16, max_cp, max_irq);
+ max_cp = min_t(u16, max_cp, max_stat);
+ rc = bnge_adjust_rings(bd, max_rx, max_tx, max_cp, shared);
+ if (rc)
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnge_trim_dflt_sh_rings(struct bnge_dev *bd)
+{
+ bd->nq_nr_rings = min_t(u16, bd->tx_nr_rings_per_tc, bd->rx_nr_rings);
+ bd->rx_nr_rings = bd->nq_nr_rings;
+ bd->tx_nr_rings_per_tc = bd->nq_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+}
+
+static int bnge_net_init_dflt_rings(struct bnge_dev *bd, bool sh)
+{
+ u16 dflt_rings, max_rx_rings, max_tx_rings;
+ int rc;
+
+ if (sh)
+ bd->flags |= BNGE_EN_SHARED_CHNL;
+
+ dflt_rings = netif_get_num_default_rss_queues();
+
+ rc = bnge_get_dflt_rings(bd, &max_rx_rings, &max_tx_rings, sh);
+ if (rc)
+ return rc;
+ bd->rx_nr_rings = min_t(u16, dflt_rings, max_rx_rings);
+ bd->tx_nr_rings_per_tc = min_t(u16, dflt_rings, max_tx_rings);
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+ else
+ bd->nq_nr_rings = bd->tx_nr_rings_per_tc + bd->rx_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Unable to reserve tx rings\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+
+ /* Rings may have been reduced, re-reserve them again */
+ if (bnge_need_reserve_rings(bd)) {
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Fewer rings reservation failed\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ }
+ if (rc) {
+ bd->tx_nr_rings = 0;
+ bd->rx_nr_rings = 0;
+ }
+
+ return rc;
+}
+
+static int bnge_alloc_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 entries;
+
+ entries = BNGE_MAX_RSS_TABLE_ENTRIES;
+
+ bd->rss_indir_tbl_entries = entries;
+ bd->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bd->rss_indir_tbl), GFP_KERNEL);
+ if (!bd->rss_indir_tbl)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int bnge_net_init_dflt_config(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc;
+ int rc;
+
+ rc = bnge_alloc_rss_indir_tbl(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_net_init_dflt_rings(bd, true);
+ if (rc)
+ goto err_free_tbl;
+
+ hw_resc = &bd->hw_resc;
+ bd->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNGE_L2_FLTR_MAX_FLTR;
+
+ return 0;
+
+err_free_tbl:
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+ return rc;
+}
+
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd)
+{
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+}
+
+void bnge_aux_init_dflt_config(struct bnge_dev *bd)
+{
+ bd->aux_num_msix = bnge_aux_get_dflt_msix(bd);
+ bd->aux_num_stat_ctxs = bnge_get_dflt_aux_stat_ctxs(bd);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
new file mode 100644
index 000000000000..b62a634669f6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RESC_H_
+#define _BNGE_RESC_H_
+
+#include "bnge_netdev.h"
+#include "bnge_rmem.h"
+
+struct bnge_hw_resc {
+ u16 min_rsscos_ctxs;
+ u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
+ u16 min_cp_rings;
+ u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
+ u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 max_tx_sch_inputs;
+ u16 min_rx_rings;
+ u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
+ u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
+ u16 max_l2_ctxs;
+ u16 min_vnics;
+ u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
+ u16 max_stat_ctxs;
+ u16 resv_stat_ctxs;
+ u16 max_nqs;
+ u16 max_irqs;
+ u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+};
+
+struct bnge_hw_rings {
+ u16 tx;
+ u16 rx;
+ u16 grp;
+ u16 nq;
+ u16 cmpl;
+ u16 stat;
+ u16 vnic;
+ u16 rss_ctx;
+};
+
+/* "TXRX", 2 hypens, plus maximum integer */
+#define BNGE_IRQ_NAME_EXTRA 17
+struct bnge_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested:1;
+ u8 have_cpumask:1;
+ char name[IFNAMSIZ + BNGE_IRQ_NAME_EXTRA];
+ cpumask_var_t cpu_mask;
+};
+
+int bnge_reserve_rings(struct bnge_dev *bd);
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared);
+int bnge_alloc_irqs(struct bnge_dev *bd);
+void bnge_free_irqs(struct bnge_dev *bd);
+int bnge_net_init_dflt_config(struct bnge_dev *bd);
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd);
+void bnge_aux_init_dflt_config(struct bnge_dev *bd);
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd);
+int bnge_cal_nr_rss_ctxs(u16 rx_rings);
+bool bnge_aux_has_enough_resources(struct bnge_dev *bd);
+
+static inline u32
+bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk)
+{
+ u32 blks = total_ent / ent_per_blk;
+
+ if (blks == 0 || blks == 1)
+ return ++blks;
+
+ if (!is_power_of_2(blks))
+ blks = roundup_pow_of_two(blks);
+
+ return blks;
+}
+
+#define BNGE_MAX_ROCE_MSIX 64
+#define BNGE_MIN_ROCE_CP_RINGS 2
+#define BNGE_MIN_ROCE_STAT_CTXS 1
+
+#endif /* _BNGE_RESC_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
new file mode 100644
index 000000000000..79f5ce2e5d08
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+
+static void bnge_init_ctx_mem(struct bnge_ctx_mem_type *ctxm,
+ void *p, int len)
+{
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNGE_CTX_INIT_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += ctxm->entry_size)
+ *(p2 + i + offset) = init_val;
+}
+
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ int i;
+
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, rmem->page_size,
+ rmem->pg_arr[i], rmem->dma_arr[i]);
+
+ rmem->pg_arr[i] = NULL;
+ }
+skip_pages:
+ if (rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ dma_free_coherent(&pdev->dev, pg_tbl_size,
+ rmem->pg_tbl, rmem->dma_pg_tbl);
+ rmem->pg_tbl = NULL;
+ }
+ if (rmem->vmem_size && *rmem->vmem) {
+ vfree(*rmem->vmem);
+ *rmem->vmem = NULL;
+ }
+}
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ u64 valid_bit = 0;
+ int i;
+
+ if (rmem->flags & (BNGE_RMEM_VALID_PTE_FLAG | BNGE_RMEM_RING_PTE_FLAG))
+ valid_bit = PTU_PTE_VALID;
+
+ if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
+ &rmem->dma_pg_tbl,
+ GFP_KERNEL);
+ if (!rmem->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ u64 extra_bits = valid_bit;
+
+ rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ rmem->page_size,
+ &rmem->dma_arr[i],
+ GFP_KERNEL);
+ if (!rmem->pg_arr[i])
+ goto err_free_ring;
+
+ if (rmem->ctx_mem)
+ bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
+ rmem->page_size);
+
+ if (rmem->nr_pages > 1 || rmem->depth > 0) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_LAST;
+ rmem->pg_tbl[i] =
+ cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+ }
+ }
+
+ if (rmem->vmem_size) {
+ *rmem->vmem = vzalloc(rmem->vmem_size);
+ if (!(*rmem->vmem))
+ goto err_free_ring;
+ }
+ return 0;
+
+err_free_ring:
+ bnge_free_ring(bd, rmem);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ rmem->page_size = BNGE_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNGE_RMEM_VALID_PTE_FLAG;
+ if (rmem->depth >= 1)
+ rmem->flags |= BNGE_RMEM_USE_FULL_PAGE_FLAG;
+ return bnge_alloc_ring(bd, rmem);
+}
+
+static int bnge_alloc_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg, u32 mem_size,
+ u8 depth, struct bnge_ctx_mem_type *ctxm)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ int rc;
+
+ if (!mem_size)
+ return -EINVAL;
+
+ ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
+ ctx_pg->nr_pages = 0;
+ return -EINVAL;
+ }
+ if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
+ int nr_tbls, i;
+
+ rmem->depth = 2;
+ ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
+ GFP_KERNEL);
+ if (!ctx_pg->ctx_pg_tbl)
+ return -ENOMEM;
+ nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+ rmem->nr_pages = nr_tbls;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ if (rc)
+ return rc;
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+
+ pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+ ctx_pg->ctx_pg_tbl[i] = pg_tbl;
+ rmem = &pg_tbl->ring_mem;
+ rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
+ rmem->dma_pg_tbl = ctx_pg->ctx_dma_arr[i];
+ rmem->depth = 1;
+ rmem->nr_pages = MAX_CTX_PAGES;
+ rmem->ctx_mem = ctxm;
+ if (i == (nr_tbls - 1)) {
+ int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
+
+ if (rem)
+ rmem->nr_pages = rem;
+ }
+ rc = bnge_alloc_ctx_one_lvl(bd, pg_tbl);
+ if (rc)
+ break;
+ }
+ } else {
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (rmem->nr_pages > 1 || depth)
+ rmem->depth = 1;
+ rmem->ctx_mem = ctxm;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ }
+
+ return rc;
+}
+
+static void bnge_free_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
+ ctx_pg->ctx_pg_tbl) {
+ int i, nr_tbls = rmem->nr_pages;
+
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+ struct bnge_ring_mem_info *rmem2;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ if (!pg_tbl)
+ continue;
+ rmem2 = &pg_tbl->ring_mem;
+ bnge_free_ring(bd, rmem2);
+ ctx_pg->ctx_pg_arr[i] = NULL;
+ kfree(pg_tbl);
+ ctx_pg->ctx_pg_tbl[i] = NULL;
+ }
+ kfree(ctx_pg->ctx_pg_tbl);
+ ctx_pg->ctx_pg_tbl = NULL;
+ }
+ bnge_free_ring(bd, rmem);
+ ctx_pg->nr_pages = 0;
+}
+
+static int bnge_setup_ctxm_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnge_alloc_ctx_pg_tbls(bd, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+
+ return rc;
+}
+
+static int bnge_backing_store_cfg(struct bnge_dev *bd, u32 ena)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ struct bnge_ctx_mem_type *ctxm;
+ u16 last_type;
+ int rc = 0;
+ u16 type;
+
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNGE_CTX_MAX - 1;
+ else
+ last_type = BNGE_CTX_L2_MAX - 1;
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNGE_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ rc = bnge_hwrm_func_backing_store(bd, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnge_free_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnge_free_ctx_pg_tbls(bd, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
+ }
+
+ ctx->flags &= ~BNGE_CTX_FLAG_INITED;
+ kfree(ctx);
+ bd->ctx = NULL;
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+int bnge_alloc_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_type *ctxm;
+ struct bnge_ctx_mem_info *ctx;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
+ u32 num_mr, num_ah;
+ u32 extra_srqs = 0;
+ u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
+ u8 pg_lvl = 1;
+ int i, rc;
+
+ rc = bnge_hwrm_func_backing_store_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed querying ctx mem caps, rc: %d\n", rc);
+ return rc;
+ }
+
+ ctx = bd->ctx;
+ if (!ctx || (ctx->flags & BNGE_CTX_FLAG_INITED))
+ return 0;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ ena = 0;
+ if (bnge_is_roce_en(bd) && !is_kdump_kernel()) {
+ pg_lvl = 2;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fast qp destroy feature enabled */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
+ }
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_CQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_VNIC];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STAT];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ if (!bnge_is_roce_en(bd))
+ goto skip_rdma;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_MRAV];
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNGE_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_TIM];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
+
+skip_rdma:
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNGE_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+
+ rc = bnge_backing_store_cfg(bd, ena);
+ if (rc) {
+ dev_err(bd->dev, "Failed configuring ctx mem, rc: %d\n", rc);
+ return rc;
+ }
+ ctx->flags |= BNGE_CTX_FLAG_INITED;
+
+ return 0;
+}
+
+void bnge_init_ring_struct(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_tx_ring_info *txr;
+ struct bnge_ring_struct *ring;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)nqr->desc_ring;
+ rmem->dma_arr = nqr->desc_mapping;
+ rmem->vmem_size = 0;
+
+ rxr = bnapi->rx_ring;
+ if (!rxr)
+ goto skip_rx;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bn->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bn->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_buf_ring;
+
+skip_rx:
+ bnge_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bn->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
new file mode 100644
index 000000000000..341c7f81ed09
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RMEM_H_
+#define _BNGE_RMEM_H_
+
+struct bnge_ctx_mem_type;
+struct bnge_dev;
+struct bnge_net;
+
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+struct bnge_ring_mem_info {
+ /* Number of pages to next level */
+ int nr_pages;
+ int page_size;
+ u16 flags;
+#define BNGE_RMEM_VALID_PTE_FLAG 1
+#define BNGE_RMEM_RING_PTE_FLAG 2
+#define BNGE_RMEM_USE_FULL_PAGE_FLAG 4
+
+ u16 depth;
+
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t dma_pg_tbl;
+
+ int vmem_size;
+ void **vmem;
+
+ struct bnge_ctx_mem_type *ctx_mem;
+};
+
+/* The hardware supports certain page sizes.
+ * Use the supported page sizes to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNGE_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNGE_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNGE_PAGE_SHIFT 13
+#else
+#define BNGE_PAGE_SHIFT 16
+#endif
+#define BNGE_PAGE_SIZE (1 << BNGE_PAGE_SHIFT)
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNGE_RX_PAGE_SHIFT 15
+#else
+#define BNGE_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+#define MAX_CTX_PAGES (BNGE_PAGE_SIZE / 8)
+#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+
+struct bnge_ctx_pg_info {
+ u32 entries;
+ u32 nr_pages;
+ void *ctx_pg_arr[MAX_CTX_PAGES];
+ dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
+ struct bnge_ring_mem_info ring_mem;
+ struct bnge_ctx_pg_info **ctx_pg_tbl;
+};
+
+#define BNGE_MAX_TQM_SP_RINGS 1
+#define BNGE_MAX_TQM_FP_RINGS 8
+#define BNGE_MAX_TQM_RINGS \
+ (BNGE_MAX_TQM_SP_RINGS + BNGE_MAX_TQM_FP_RINGS)
+#define BNGE_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNGE_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNGE_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNGE_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
+#define BNGE_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNGE_CTX_QP \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNGE_CTX_SRQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNGE_CTX_CQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNGE_CTX_VNIC \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNGE_CTX_STAT \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNGE_CTX_STQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNGE_CTX_FTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNGE_CTX_MRAV \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNGE_CTX_TIM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNGE_CTX_TCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNGE_CTX_RCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
+#define BNGE_CTX_MTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNGE_CTX_SQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNGE_CTX_RQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNGE_CTX_SRQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNGE_CTX_CQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNGE_CTX_SRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNGE_CTX_SRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNGE_CTX_CRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNGE_CTX_CRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNGE_CTX_RIGP0_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNGE_CTX_L2_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNGE_CTX_ROCE_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+
+#define BNGE_CTX_MAX (BNGE_CTX_TIM + 1)
+#define BNGE_CTX_L2_MAX (BNGE_CTX_FTQM + 1)
+#define BNGE_CTX_INV ((u16)-1)
+
+#define BNGE_CTX_V2_MAX \
+ (FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE + 1)
+
+#define BNGE_BS_CFG_ALL_DONE \
+ FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE
+
+struct bnge_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNGE_CTX_MEM_TYPE_VALID \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNGE_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 split_entry_cnt;
+#define BNGE_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNGE_MAX_SPLIT_ENTRY];
+ };
+ struct bnge_ctx_pg_info *pg_info;
+};
+
+struct bnge_ctx_mem_info {
+ u8 tqm_fp_rings_count;
+ u32 flags;
+#define BNGE_CTX_FLAG_INITED 0x01
+ struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX];
+};
+
+struct bnge_ring_struct {
+ struct bnge_ring_mem_info ring_mem;
+
+ u16 fw_ring_id;
+ union {
+ u16 grp_idx;
+ u16 map_idx; /* Used by NQs */
+ };
+ u32 handle;
+ u8 queue_id;
+};
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+int bnge_alloc_ctx_mem(struct bnge_dev *bd);
+void bnge_free_ctx_mem(struct bnge_dev *bd);
+void bnge_init_ring_struct(struct bnge_net *bn);
+
+#endif /* _BNGE_RMEM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b65b8592ad75..805daae9dd36 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6163,7 +6163,7 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
static void
bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = from_timer(bp, t, timer);
+ struct bnx2 *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -6400,7 +6400,7 @@ bnx2_open(struct net_device *dev)
rc = bnx2_request_irq(bp);
if (rc) {
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
goto open_err;
}
bnx2_enable_int(bp);
@@ -6444,7 +6444,6 @@ bnx2_reset_task(struct work_struct *work)
if (!(pcicmd & PCI_COMMAND_MEMORY)) {
/* in case PCI block has reset */
pci_restore_state(bp->pdev);
- pci_save_state(bp->pdev);
}
rc = bnx2_init_nic(bp, 1);
if (rc) {
@@ -6752,7 +6751,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
netif_tx_disable(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
bnx2_free_skbs(bp);
@@ -7912,7 +7911,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2 *bp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
false);
}
@@ -8602,7 +8601,7 @@ bnx2_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_work_sync(&bp->reset_task);
pci_iounmap(bp->pdev, bp->regview);
@@ -8629,7 +8628,7 @@ bnx2_suspend(struct device *device)
cancel_work_sync(&bp->reset_task);
bnx2_netif_stop(bp, true);
netif_device_detach(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_shutdown_chip(bp);
__bnx2_free_irq(bp);
bnx2_free_skbs(bp);
@@ -8687,7 +8686,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev)) {
bnx2_netif_stop(bp, true);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
}
@@ -8718,7 +8717,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (netif_running(dev))
err = bnx2_init_nic(bp, 1);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e2a4e1088b7f..9580ab83d387 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1262,7 +1262,7 @@ enum {
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[FP_SB_MAX_E1x+
+ struct stats_query_entry query[FP_SB_MAX_E2 +
BNX2X_FIRST_QUEUE_QUERY_IDX];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index c9b6acd8c892..e59530357e2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3059,7 +3059,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->rx_mode = BNX2X_RX_MODE_NONE;
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
@@ -4902,7 +4902,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
* because the actual alloc size is
* only updated as part of load
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!bnx2x_mtu_allows_gro(new_mtu))
dev->features &= ~NETIF_F_GRO_HW;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 17ae6df90723..9af81630c8a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -344,7 +344,7 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
}
}
-/* maps unmapped priorities to to the same COS as L2 */
+/* maps unmapped priorities to the same COS as L2 */
static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
{
int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 58956ed8f531..3d853eeb976f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -39,34 +39,34 @@ static const struct {
int size;
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
-/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, "[%s]: rx_ucast_packets" },
+ 8, "[%d]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, "[%s]: rx_mcast_packets" },
+ 8, "[%d]: rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, "[%s]: rx_bcast_packets" },
- { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ 8, "[%d]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
{ Q_STATS_OFFSET32(rx_err_discard_pkt),
- 4, "[%s]: rx_phy_ip_err_discards"},
+ 4, "[%d]: rx_phy_ip_err_discards"},
{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
- 4, "[%s]: rx_skb_alloc_discard" },
- { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
- { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" },
- { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+ 4, "[%d]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
+ { Q_STATS_OFFSET32(driver_xoff), 4, "[%d]: tx_exhaustion_events" },
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%s]: tx_ucast_packets" },
+ 8, "[%d]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, "[%s]: tx_mcast_packets" },
+ 8, "[%d]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, "[%s]: tx_bcast_packets" },
+ 8, "[%d]: tx_bcast_packets" },
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
- 8, "[%s]: tpa_aggregations" },
+ 8, "[%d]: tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
- 8, "[%s]: tpa_aggregated_frames"},
- { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
+ 8, "[%d]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%d]: tpa_bytes"},
{ Q_STATS_OFFSET32(driver_filtered_tx_pkt),
- 4, "[%s]: driver_filtered_tx_pkt" }
+ 4, "[%d]: driver_filtered_tx_pkt" }
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -1243,9 +1243,9 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* pf B succeeds in taking the same lock since they are from the same port.
* pf A takes the per pf misc lock. Performs eeprom access.
* pf A finishes. Unlocks the per pf misc lock.
- * Pf B takes the lock and proceeds to perform it's own access.
+ * Pf B takes the lock and proceeds to perform its own access.
* pf A unlocks the per port lock, while pf B is still working (!).
- * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * mcp takes the per port lock and corrupts pf B's access (and/or has its own
* access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
@@ -3184,49 +3184,43 @@ static u32 bnx2x_get_private_flags(struct net_device *dev)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k, start;
- char queue_name[MAX_QUEUE_NAME_LEN+1];
+ const char *str;
+ int i, j, start;
switch (stringset) {
case ETH_SS_STATS:
- k = 0;
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- memset(queue_name, 0, sizeof(queue_name));
- snprintf(queue_name, sizeof(queue_name),
- "%d", i);
- for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
- snprintf(buf + (k + j)*ETH_GSTRING_LEN,
- ETH_GSTRING_LEN,
- bnx2x_q_stats_arr[j].string,
- queue_name);
- k += BNX2X_NUM_Q_STATS;
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+ str = bnx2x_q_stats_arr[j].string;
+ ethtool_sprintf(&buf, str, i);
+ }
}
}
- for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ for (i = 0; i < BNX2X_NUM_STATS; i++) {
if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
- strcpy(buf + (k + j)*ETH_GSTRING_LEN,
- bnx2x_stats_arr[i].string);
- j++;
+ ethtool_puts(&buf, bnx2x_stats_arr[i].string);
}
break;
case ETH_SS_TEST:
+ if (IS_VF(bp))
+ break;
/* First 4 tests cannot be done in MF mode */
if (!IS_MF(bp))
start = 0;
else
start = 4;
- memcpy(buf, bnx2x_tests_str_arr + start,
- ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+ for (i = start; i < BNX2X_NUM_TESTS_SF; i++)
+ ethtool_puts(&buf, bnx2x_tests_str_arr[i]);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(buf, bnx2x_private_arr,
- ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+ for (i = 0; i < BNX2X_PRI_FLAG_LEN; i++)
+ ethtool_puts(&buf, bnx2x_private_arr[i]);
break;
}
}
@@ -3324,8 +3318,11 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
-static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct bnx2x *bp = netdev_priv(dev);
+
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -3358,29 +3355,22 @@ static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
return 0;
}
-static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 bnx2x_get_rx_ring_count(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = BNX2X_NUM_ETH_QUEUES(bp);
- return 0;
- case ETHTOOL_GRXFH:
- return bnx2x_get_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return BNX2X_NUM_ETH_QUEUES(bp);
}
-static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct bnx2x *bp = netdev_priv(dev);
int udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -3466,19 +3456,6 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
}
}
-static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct bnx2x *bp = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return bnx2x_set_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
{
return T_ETH_INDIRECTION_TABLE_SIZE;
@@ -3634,22 +3611,18 @@ static int bnx2x_set_channels(struct net_device *dev,
}
static int bnx2x_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnx2x *bp = netdev_priv(dev);
if (bp->flags & PTP_SUPPORTED) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (bp->ptp_clock)
info->phc_index = ptp_clock_index(bp->ptp_clock);
- else
- info->phc_index = -1;
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
@@ -3693,11 +3666,12 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3720,11 +3694,12 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_sset_count = bnx2x_get_sset_count,
.get_strings = bnx2x_get_strings,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_link_ksettings = bnx2x_get_vf_link_ksettings,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index a84d015da5df..9221942290a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -332,7 +332,7 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
-/* microcode fixed page page size 4K (chains and ring segments) */
+/* microcode fixed page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Number of indices per slow-path SB */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 678829646cec..6a1cc2032bf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -308,8 +308,11 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/****************************************************************************
* General service functions
****************************************************************************/
-
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config);
static void __storm_memset_dma_mapping(struct bnx2x *bp,
u32 addr, dma_addr_t mapping)
@@ -1768,7 +1771,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
* @bp: driver handle
*
* Returns the recovery leader resource id according to the engine this function
- * belongs to. Currently only only 2 engines is supported.
+ * belongs to. Currently only 2 engines is supported.
*/
static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
{
@@ -5783,7 +5786,7 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = from_timer(bp, t, timer);
+ struct bnx2x *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -10219,8 +10222,7 @@ static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
.sync_table = bnx2x_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -12814,14 +12816,9 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -EAGAIN;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bnx2x_hwtstamp_ioctl(bp, ifr);
- default:
- DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
- mdio->phy_id, mdio->reg_num, mdio->val_in);
- return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
- }
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
}
static int bnx2x_validate_addr(struct net_device *dev)
@@ -13037,6 +13034,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
+ .ndo_hwtstamp_get = bnx2x_hwtstamp_get,
+ .ndo_hwtstamp_set = bnx2x_hwtstamp_set,
};
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
@@ -14140,7 +14139,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bnx2x_tx_disable(bp);
netdev_reset_tc(bp->dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
@@ -14217,7 +14216,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
@@ -15176,7 +15174,7 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
}
/* Read the PHC */
-static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+static u64 bnx2x_cyclecounter_read(struct cyclecounter *cc)
{
struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
int port = BP_PORT(bp);
@@ -15351,31 +15349,57 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
return 0;
}
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct bnx2x *bp = netdev_priv(dev);
int rc;
- DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+ DP(BNX2X_MSG_PTP, "HWTSTAMP SET called\n");
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ config->tx_type, config->rx_filter);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
+ }
bp->hwtstamp_ioctl_called = true;
- bp->tx_type = config.tx_type;
- bp->rx_filter = config.rx_filter;
+ bp->tx_type = config->tx_type;
+ bp->rx_filter = config->rx_filter;
rc = bnx2x_configure_ptp_filters(bp);
- if (rc)
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "HW configuration failure");
return rc;
+ }
+
+ config->rx_filter = bp->rx_filter;
+
+ return 0;
+}
+
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct bnx2x *bp = netdev_priv(dev);
- config.rx_filter = bp->rx_filter;
+ config->rx_filter = bp->rx_filter;
+ config->tx_type = bp->tx_type;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Configures HW for PTP */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4e9215bce4ad..a018f251d198 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -868,6 +868,8 @@
#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+extern const u32 dmae_reg_go_c[];
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8e04552d2216..02c8213915a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2593,7 +2593,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
/********************* Multicast verbs: SET, CLEAR ****************************/
static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
{
- return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+ return (crc32c(0, mac, ETH_ALEN) >> 24) & 0xff;
}
struct bnx2x_mcast_mac_elem {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index bacc8552bce1..00ca861c80dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -379,7 +379,7 @@ struct bnx2x_vlan_mac_obj {
/**
* Delete all configured elements having the given
* vlan_mac_flags specification. Assumes no pending for
- * execution commands. Will schedule all all currently
+ * execution commands. Will schedule all currently
* configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
* specification for deletion and will use the given
* ramrod_flags for the last DEL operation.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 77d4cb4ad782..12198fc3ab22 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2652,10 +2652,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* vlan */
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
/* vlan configured by ndo so its in bulletin board */
- memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ ivi->vlan = bulletin->vlan;
else
/* function has not been loaded yet. Show vlans as 0s */
- memset(&ivi->vlan, 0, VLAN_HLEN);
+ ivi->vlan = 0;
mutex_unlock(&bp->vfdb->bulletin_mutex);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 2bb133ae61c3..ba6729f2f9c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -23,8 +23,6 @@
#include "bnx2x_cmn.h"
#include "bnx2x_sriov.h"
-extern const u32 dmae_reg_go_c[];
-
/* Statistics */
/*
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 57e61f963167..d17d0ea89c36 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -54,9 +54,12 @@
#include <net/pkt_cls.h>
#include <net/page_pool/helpers.h>
#include <linux/align.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <linux/pci-tph.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -69,18 +72,19 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
+#include "bnxt_coredump.h"
#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
@@ -137,6 +141,8 @@ static const struct {
[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
+ [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
+ [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -211,6 +217,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
+ { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -242,6 +250,23 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
+ ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
+};
+
+const u16 bnxt_bstore_to_trace[] = {
+ [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
+ [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
+ [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
+ [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
+ [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
+ [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
+ [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
+ [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
+ [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
+ [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
+ [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
+ [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
+ [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -294,15 +319,12 @@ static bool bnxt_vf_pciid(enum board_idx idx)
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
- idx == NETXTREME_E_P5_VF_HV);
+ idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
+ idx == NETXTREME_E_P7_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
-#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
-
-#define BNXT_CP_DB_IRQ_DIS(db) \
- writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
@@ -454,11 +476,13 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping;
unsigned int length, pad = 0;
u32 len, free_size, vlan_tag_flags, cfa_action, flags;
- u16 prod, last_frag;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct pci_dev *pdev = bp->pdev;
+ u16 prod, last_frag, txts_prod;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
+ skb_frag_t *frag;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
@@ -471,6 +495,17 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
+ netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
+ skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
/* We must have raced with NAPI cleanup */
@@ -507,28 +542,34 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
+ ptp->tx_tstamp_en) {
+ if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else if (!skb_is_gso(skb)) {
+ u16 seq_id, hdr_off;
- if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
- atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
- if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
- &ptp->tx_hdr_off)) {
+ if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
+ !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
if (vlan_tag_flags)
- ptp->tx_hdr_off += VLAN_HLEN;
+ hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
- atomic_inc(&bp->ptp_cfg->tx_avail);
+
+ ptp->txts_req[txts_prod].tx_seqid = seq_id;
+ ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
+ tx_buf->txts_prod = txts_prod;
}
}
}
-
if (unlikely(skb->no_fcs))
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
- !lflags) {
+ skb_frags_readable(skb) && !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -544,7 +585,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
TX_BD_FLAGS_LHINT_512_AND_SMALLER |
TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+ TX_BD_CNT(2));
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_push1->tx_bd_hsize_lflags =
@@ -563,9 +604,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
+ frag = &skb_shinfo(skb)->frags[j];
fptr = skb_frag_address_safe(frag);
if (!fptr)
goto normal_tx;
@@ -619,7 +660,7 @@ normal_tx:
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ TX_BD_CNT(last_frag + 2);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
@@ -673,8 +714,7 @@ normal_tx:
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+ frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
@@ -686,7 +726,8 @@ normal_tx:
goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_addr_set(tx_buf, mapping, mapping);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -730,9 +771,6 @@ tx_done:
return NETDEV_TX_OK;
tx_dma_error:
- if (BNXT_TX_PTP_IS_SET(lflags))
- atomic_inc(&bp->ptp_cfg->tx_avail);
-
last_frag = i;
/* start back at beginning and unmap skb */
@@ -746,22 +784,32 @@ tx_dma_error:
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- DMA_TO_DEVICE);
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
+ if (BNXT_TX_PTP_IS_SET(lflags)) {
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
+ atomic64_inc(&bp->ptp_cfg->stats.ts_err);
+ if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ /* set SKB to err so PTP worker will clean up */
+ ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
+ }
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
- txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
-static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+/* Returns true if some remaining TX packets not processed. */
+static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
int budget)
{
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
@@ -769,25 +817,35 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
+ skb_frag_t *frag;
int tx_pkts = 0;
+ bool rc = false;
while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
+ bool is_ts_pkt;
int j, last;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- cons = NEXT_TX(cons);
skb = tx_buf->skb;
- tx_buf->skb = NULL;
if (unlikely(!skb)) {
bnxt_sched_reset_txr(bp, txr, cons);
- return;
+ return rc;
}
+ is_ts_pkt = tx_buf->is_ts_pkt;
+ if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
+ rc = true;
+ break;
+ }
+
+ cons = NEXT_TX(cons);
tx_pkts++;
tx_bytes += skb->len;
+ tx_buf->skb = NULL;
+ tx_buf->is_ts_pkt = 0;
if (tx_buf->is_push) {
tx_buf->is_push = 0;
@@ -799,28 +857,27 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
+ frag = &skb_shinfo(skb)->frags[j];
cons = NEXT_TX(cons);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[j]),
- DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
- if (!bnxt_get_tx_ts_p5(bp, skb))
- skb = NULL;
- else
- atomic_inc(&bp->ptp_cfg->tx_avail);
+ bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
+ skb = NULL;
}
}
next_tx_int:
cons = NEXT_TX(cons);
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
}
WRITE_ONCE(txr->tx_cons, cons);
@@ -828,18 +885,27 @@ next_tx_int:
__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
+
+ return rc;
}
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
struct bnxt_tx_ring_info *txr;
+ bool more = false;
int i;
bnxt_for_each_napi_tx(i, bnapi, txr) {
if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
- __bnxt_tx_int(bp, txr, budget);
+ more |= __bnxt_tx_int(bp, txr, budget);
}
- bnapi->events &= ~BNXT_TX_CMP_EVENT;
+ if (!more)
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
+}
+
+static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
+{
+ return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -863,28 +929,40 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
return page;
}
+static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
+ unsigned int *offset,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
+ return netmem;
+}
+
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
{
- u8 *data;
- struct pci_dev *pdev = bp->pdev;
+ unsigned int offset;
+ struct page *page;
- if (gfp == GFP_ATOMIC)
- data = napi_alloc_frag(bp->rx_buf_size);
- else
- data = netdev_alloc_frag(bp->rx_buf_size);
- if (!data)
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bp->rx_buf_size, gfp);
+ if (!page)
return NULL;
- *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
- if (dma_mapping_error(&pdev->dev, *mapping)) {
- skb_free_frag(data);
- data = NULL;
- }
- return data;
+ *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
+ return page_address(page) + offset;
}
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
@@ -906,7 +984,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
if (!data)
return -ENOMEM;
@@ -951,21 +1029,19 @@ static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
return next;
}
-static inline int bnxt_alloc_rx_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
- struct page *page;
- dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
+ dma_addr_t mapping;
+ netmem_ref netmem;
- page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
-
- if (!page)
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
+ if (!netmem)
return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -975,7 +1051,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
- rx_agg_buf->page = page;
+ rx_agg_buf->netmem = netmem;
rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1019,11 +1095,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
- u16 cons;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
struct rx_bd *prod_bd;
- struct page *page;
+ netmem_ref netmem;
+ u16 cons;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
@@ -1040,11 +1116,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
cons_rx_buf = &rxr->rx_agg_ring[cons];
/* It is possible for sw_prod to be equal to cons, so
- * set cons_rx_buf->page to NULL first.
+ * set cons_rx_buf->netmem to 0 first.
*/
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
- prod_rx_buf->page = page;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
@@ -1157,41 +1233,48 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
}
skb = napi_build_skb(data, bp->rx_buf_size);
- dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ bp->rx_dir);
if (!skb) {
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, true);
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, offset_and_len & 0xffff);
return skb;
}
-static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct skb_shared_info *shinfo,
- u16 idx, u32 agg_bufs, bool tpa,
- struct xdp_buff *xdp)
+static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u16 prod = rxr->rx_agg_prod;
+ struct skb_shared_info *shinfo;
+ struct bnxt_rx_ring_info *rxr;
u32 i, total_frag_len = 0;
bool p5_tpa = false;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
+ if (skb)
+ shinfo = skb_shinfo(skb);
+ else
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+
for (i = 0; i < agg_bufs; i++) {
- skb_frag_t *frag = &shinfo->frags[i];
- u16 cons, frag_len;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
- struct page *page;
- dma_addr_t mapping;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
@@ -1202,27 +1285,41 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_frag_fill_page_desc(frag, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
- shinfo->nr_frags = i + 1;
+ if (skb) {
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNXT_RX_PAGE_SIZE);
+ } else {
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len);
+ shinfo->nr_frags = i + 1;
+ }
__clear_bit(cons, rxr->rx_agg_bmap);
- /* It is possible for bnxt_alloc_rx_page() to allocate
+ /* It is possible for bnxt_alloc_rx_netmem() to allocate
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = cons_rx_buf->mapping;
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
- if (xdp && page_is_pfmemalloc(page))
+ if (xdp && netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (skb) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNXT_RX_PAGE_SIZE;
+ }
+
--shinfo->nr_frags;
- cons_rx_buf->page = page;
+ cons_rx_buf->netmem = netmem;
- /* Update prod since possibly some pages have been
+ /* Update prod since possibly some netmems have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
@@ -1230,8 +1327,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return 0;
}
- dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- bp->rx_dir);
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNXT_RX_PAGE_SIZE);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -1240,32 +1337,28 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return total_frag_len;
}
-static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 total_frag_len = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
- agg_bufs, tpa, NULL);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ skb, NULL);
if (!total_frag_len) {
skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
- skb->data_len += total_frag_len;
- skb->len += total_frag_len;
- skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
-static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct xdp_buff *xdp, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
u32 total_frag_len = 0;
@@ -1273,8 +1366,8 @@ static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
if (!xdp_buff_has_frags(xdp))
shinfo->nr_frags = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
- idx, agg_bufs, tpa, xdp);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ NULL, xdp);
if (total_frag_len) {
xdp_buff_set_frags_flag(xdp);
shinfo->nr_frags = agg_bufs;
@@ -1296,9 +1389,9 @@ static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return RX_AGG_CMP_VALID(agg, *raw_cons);
}
-static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
- unsigned int len,
- dma_addr_t mapping)
+static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
{
struct bnxt *bp = bnapi->bp;
struct pci_dev *pdev = bp->pdev;
@@ -1308,16 +1401,49 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ return bnxt_copy_data(bnapi, data, len, mapping);
+}
+
+static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
+ struct xdp_buff *xdp,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ unsigned int metasize = 0;
+ u8 *data = xdp->data;
+ struct sk_buff *skb;
+
+ len = xdp->data_end - xdp->data_meta;
+ metasize = xdp->data - xdp->data_meta;
+ data = xdp->data_meta;
+
+ skb = bnxt_copy_data(bnapi, data, len, mapping);
+ if (!skb)
+ return skb;
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
return skb;
}
@@ -1463,7 +1589,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
+ else if (!BNXT_CHIP_P4_PLUS(bp) &&
TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
@@ -1700,7 +1826,7 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
{
struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
- /* if vf-rep dev is NULL, the must belongs to the PF */
+ /* if vf-rep dev is NULL, it must belong to the PF */
return dev ? dev : bp->dev;
}
@@ -1774,21 +1900,22 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
} else {
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
+ GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
@@ -1797,25 +1924,26 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->mapping = new_mapping;
skb = napi_build_skb(data, bp->rx_buf_size);
- dma_unmap_single_attrs(&bp->pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir);
if (!skb) {
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, true);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
if (agg_bufs) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
+ true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -1932,6 +2060,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
}
return skb;
vlan_err:
+ skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
@@ -1970,6 +2099,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -2073,7 +2203,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
@@ -2091,27 +2221,35 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (bnxt_xdp_attached(bp, rxr)) {
bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) {
- u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
- cp_cons, agg_bufs,
- false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
+ cp_cons,
+ agg_bufs,
+ false);
+ if (!frag_len)
+ goto oom_next_rx;
+
}
xdp_active = true;
}
if (xdp_active) {
- if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
+ if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
rc = 1;
goto next_rx;
}
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
- if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ if (len <= bp->rx_copybreak) {
+ if (!xdp_active)
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ else
+ skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs) {
@@ -2121,9 +2259,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -2134,29 +2270,23 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
+ agg_bufs, false);
+ if (!skb)
+ goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
@@ -2167,15 +2297,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
type = bnxt_rss_ext_op(bp, rxcmp);
} else {
- u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+ u32 itypes = RX_CMP_ITYPES(rxcmp);
- /* RSS profiles 1 and 3 with extract code 0 for inner
- * 4-tuple
- */
- if (hash_type != 1 && hash_type != 3)
- type = PKT_HASH_TYPE_L3;
- else
+ if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
+ itypes == RX_CMP_FLAGS_ITYPE_UDP)
type = PKT_HASH_TYPE_L4;
+ else
+ type = PKT_HASH_TYPE_L3;
}
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
@@ -2199,7 +2327,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
}
}
@@ -2210,9 +2338,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
memset(skb_hwtstamps(skb), 0,
sizeof(*skb_hwtstamps(skb)));
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
@@ -2234,6 +2360,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->sw_stats->rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2280,7 +2411,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->sw_stats->rx.rx_netpoll_discards += 1;
return rc;
}
@@ -2414,6 +2545,59 @@ static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
return false;
}
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
+{
+ u32 flags = bp->ctx->ctx_arr[type].flags;
+
+ return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
+ ((flags & BNXT_CTX_MEM_FW_TRACE) ||
+ (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
+}
+
+static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
+{
+ u32 mem_size, pages, rem_bytes, magic_byte_offset;
+ u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
+ struct bnxt_bs_trace_info *bs_trace;
+ int last_pg;
+
+ if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
+ return;
+
+ mem_size = ctxm->max_entries * ctxm->entry_size;
+ rem_bytes = mem_size % BNXT_PAGE_SIZE;
+ pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+
+ last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
+ magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
+
+ rmem = &ctx_pg[0].ring_mem;
+ bs_trace = &bp->bs_trace[trace_type];
+ bs_trace->ctx_type = ctxm->type;
+ bs_trace->trace_type = trace_type;
+ if (pages > MAX_CTX_PAGES) {
+ int last_pg_dir = rmem->nr_pages - 1;
+
+ rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
+ bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
+ } else {
+ bs_trace->magic_byte = rmem->pg_arr[last_pg];
+ }
+ bs_trace->magic_byte += magic_byte_offset;
+ *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
+}
+
+#define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
+
+#define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
+
#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
((data2) & \
ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
@@ -2489,6 +2673,9 @@ static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
}
return false;
}
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
+ netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
+ break;
default:
netdev_err(bp->dev, "FW reported unknown error type %u\n",
err_type);
@@ -2704,17 +2891,18 @@ static int bnxt_async_event_process(struct bnxt *bp,
case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
if (BNXT_PTP_USE_RTC(bp)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
u64 ns;
if (!ptp)
goto async_event_process_exit;
- spin_lock_bh(&ptp->ptp_lock);
bnxt_ptp_update_current_time(bp);
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
BNXT_PHC_BITS) | ptp->current_time);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(ptp, ns);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
}
break;
}
@@ -2726,11 +2914,19 @@ static int bnxt_async_event_process(struct bnxt *bp,
hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
+ u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
+ u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
+
+ bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -2773,6 +2969,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
return 0;
}
+static bool bnxt_vnic_is_active(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
+}
+
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
@@ -2797,39 +3000,12 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return TX_CMP_VALID(txcmp, raw_cons);
}
-static irqreturn_t bnxt_inta(int irq, void *dev_instance)
-{
- struct bnxt_napi *bnapi = dev_instance;
- struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- u32 cons = RING_CMP(cpr->cp_raw_cons);
- u32 int_status;
-
- prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
-
- if (!bnxt_has_work(bp, cpr)) {
- int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
- /* return if erroneous interrupt */
- if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
- return IRQ_NONE;
- }
-
- /* disable ring IRQ */
- BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
-
- /* Return here if interrupt is shared and is disabled. */
- if (unlikely(atomic_read(&bp->intr_sem) != 0))
- return IRQ_HANDLED;
-
- napi_schedule(&bnapi->napi);
- return IRQ_HANDLED;
-}
-
static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
int budget)
{
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 cons;
int rx_pkts = 0;
u8 event = 0;
@@ -2874,6 +3050,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 1;
break;
}
+ } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
+ bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
@@ -2881,6 +3059,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
if (likely(rc >= 0))
rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
@@ -2905,8 +3085,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT)
+ if (flush_xdp) {
xdp_do_flush();
+ event &= ~BNXT_REDIRECT_EVENT;
+ }
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
@@ -2916,6 +3098,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
wmb();
bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
+ event &= ~BNXT_TX_EVENT;
}
cpr->cp_raw_cons = raw_cons;
@@ -2933,13 +3116,14 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bnapi->events &= ~BNXT_RX_EVENT;
}
if (bnapi->events & BNXT_AGG_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnapi->events &= ~BNXT_AGG_EVENT;
}
- bnapi->events &= BNXT_TX_CMP_EVENT;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -3062,14 +3246,14 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
- if (bp->flags & BNXT_FLAG_DIM) {
+ if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
cpr->rx_packets,
cpr->rx_bytes,
&dim_sample);
- net_dim(&cpr->dim, dim_sample);
+ net_dim(&cpr->dim, &dim_sample);
}
return work_done;
}
@@ -3193,156 +3377,180 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
poll_done:
cpr_rx = &cpr->cp_ring_arr[0];
if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
- (bp->flags & BNXT_FLAG_DIM)) {
+ (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
cpr_rx->rx_packets,
cpr_rx->rx_bytes,
&dim_sample);
- net_dim(&cpr->dim, dim_sample);
+ net_dim(&cpr->dim, &dim_sample);
}
return work_done;
}
-static void bnxt_free_tx_skbs(struct bnxt *bp)
+static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, int idx)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
- if (!bp->tx_ring)
- return;
-
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- int j;
- if (!txr->tx_buf_ring)
+ for (i = 0; i < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
+ struct sk_buff *skb;
+ int j, last;
+
+ if (idx < bp->tx_nr_rings_xdp &&
+ tx_buf->action == XDP_REDIRECT) {
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ i++;
continue;
+ }
- for (j = 0; j < max_idx;) {
- struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
- struct sk_buff *skb;
- int k, last;
-
- if (i < bp->tx_nr_rings_xdp &&
- tx_buf->action == XDP_REDIRECT) {
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- xdp_return_frame(tx_buf->xdpf);
- tx_buf->action = 0;
- tx_buf->xdpf = NULL;
- j++;
- continue;
- }
+ skb = tx_buf->skb;
+ if (!skb) {
+ i++;
+ continue;
+ }
- skb = tx_buf->skb;
- if (!skb) {
- j++;
- continue;
- }
+ tx_buf->skb = NULL;
- tx_buf->skb = NULL;
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ i += 2;
+ continue;
+ }
- if (tx_buf->is_push) {
- dev_kfree_skb(skb);
- j += 2;
- continue;
- }
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb),
- DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+ i += 2;
+ for (j = 0; j < last; j++, i++) {
+ int ring_idx = i & bp->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- last = tx_buf->nr_frags;
- j += 2;
- for (k = 0; k < last; k++, j++) {
- int ring_idx = j & bp->tx_ring_mask;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
-
- tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- dev_kfree_skb(skb);
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ dev_kfree_skb(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
}
-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+static void bnxt_free_tx_skbs(struct bnxt *bp)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_tpa_idx_map *map;
- int i, max_idx, max_agg_idx;
+ int i;
- max_idx = bp->rx_nr_pages * RX_DESC_CNT;
- max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- if (!rxr->rx_tpa)
- goto skip_rx_tpa_free;
+ if (!bp->tx_ring)
+ return;
- for (i = 0; i < bp->max_tpa; i++) {
- struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
- u8 *data = tpa_info->data;
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- if (!data)
+ if (!txr->tx_buf_ring)
continue;
- dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ bnxt_free_one_tx_ring_skbs(bp, txr, i);
+ }
- tpa_info->data = NULL;
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
+}
- skb_free_frag(data);
- }
+static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ int i, max_idx;
-skip_rx_tpa_free:
- if (!rxr->rx_buf_ring)
- goto skip_rx_buf_free;
+ max_idx = bp->rx_nr_pages * RX_DESC_CNT;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
- dma_addr_t mapping = rx_buf->mapping;
void *data = rx_buf->data;
if (!data)
continue;
rx_buf->data = NULL;
- if (BNXT_RX_PAGE_MODE(bp)) {
+ if (BNXT_RX_PAGE_MODE(bp))
page_pool_recycle_direct(rxr->page_pool, data);
- } else {
- dma_unmap_single_attrs(&pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- skb_free_frag(data);
- }
+ else
+ page_pool_free_va(rxr->head_pool, data, true);
}
+}
-skip_rx_buf_free:
- if (!rxr->rx_agg_ring)
- goto skip_rx_agg_free;
+static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- for (i = 0; i < max_agg_idx; i++) {
+ for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
- struct page *page = rx_agg_buf->page;
+ netmem_ref netmem = rx_agg_buf->netmem;
- if (!page)
+ if (!netmem)
continue;
- rx_agg_buf->page = NULL;
+ rx_agg_buf->netmem = 0;
__clear_bit(i, rxr->rx_agg_bmap);
- page_pool_recycle_direct(rxr->page_pool, page);
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
+ }
+}
+
+static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ for (i = 0; i < bp->max_tpa; i++) {
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ tpa_info->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, false);
}
+}
+
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_tpa_idx_map *map;
+
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
+
+ bnxt_free_one_tpa_info_data(bp, rxr);
+
+skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
+
+ bnxt_free_one_rx_ring(bp, rxr);
+
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
+
+ bnxt_free_one_rx_agg_ring(bp, rxr);
skip_rx_agg_free:
map = rxr->rx_tpa_idx_map;
@@ -3358,7 +3566,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
return;
for (i = 0; i < bp->rx_nr_rings; i++)
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -3384,6 +3592,35 @@ static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
*(p2 + i + offset) = init_val;
}
+static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ int i, head_page, start_idx, source_offset;
+ size_t len, rem_len, total_len, max_bytes;
+
+ head_page = head / rmem->page_size;
+ source_offset = head % rmem->page_size;
+ total_len = (tail - head) & MAX_CTX_BYTES_MASK;
+ if (!total_len)
+ total_len = MAX_CTX_BYTES;
+ start_idx = head_page % MAX_CTX_PAGES;
+ max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
+ source_offset;
+ total_len = min(total_len, max_bytes);
+ rem_len = total_len;
+
+ for (i = start_idx; rem_len; i++, source_offset = 0) {
+ len = min((size_t)(rmem->page_size - source_offset), rem_len);
+ if (buf)
+ memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
+ len);
+ offset += len;
+ rem_len -= len;
+ }
+ return total_len;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
@@ -3470,29 +3707,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ for (i = 0; i < bp->max_tpa; i++) {
+ kfree(rxr->rx_tpa[i].agg_arr);
+ rxr->rx_tpa[i].agg_arr = NULL;
+ }
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+}
+
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- kfree(rxr->rx_tpa_idx_map);
- rxr->rx_tpa_idx_map = NULL;
- if (rxr->rx_tpa) {
- for (j = 0; j < bp->max_tpa; j++) {
- kfree(rxr->rx_tpa[j].agg_arr);
- rxr->rx_tpa[j].agg_arr = NULL;
- }
- }
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
+ bnxt_free_one_tpa_info(bp, rxr);
+ }
+}
+
+static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_agg_cmp *agg;
+ int i;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ for (i = 0; i < bp->max_tpa; i++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[i].agg_arr = agg;
}
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+
+ return 0;
}
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i, rc;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
@@ -3503,25 +3775,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct rx_agg_cmp *agg;
- rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
-
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- continue;
- for (j = 0; j < bp->max_tpa; j++) {
- agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
- if (!agg)
- return -ENOMEM;
- rxr->rx_tpa[j].agg_arr = agg;
- }
- rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
- GFP_KERNEL);
- if (!rxr->rx_tpa_idx_map)
- return -ENOMEM;
+ rc = bnxt_alloc_one_tpa_info(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -3545,7 +3802,8 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- rxr->page_pool = NULL;
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -3559,34 +3817,74 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr)
+ struct bnxt_rx_ring_info *rxr,
+ int numa_node)
{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
struct page_pool_params pp = { 0 };
+ struct page_pool *pool;
- pp.pool_size = bp->rx_agg_ring_size;
+ pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
if (BNXT_RX_PAGE_MODE(bp))
- pp.pool_size += bp->rx_ring_size;
- pp.nid = dev_to_node(&bp->pdev->dev);
- pp.napi = &rxr->bnapi->napi;
+ pp.pool_size += bp->rx_ring_size / rx_size_fac;
+ pp.nid = numa_node;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
- pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
+
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnxt_separate_head_pool(rxr)) {
+ pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
+ }
+ rxr->head_pool = pool;
- rxr->page_pool = page_pool_create(&pp);
- if (IS_ERR(rxr->page_pool)) {
- int err = PTR_ERR(rxr->page_pool);
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
- rxr->page_pool = NULL;
- return err;
- }
return 0;
}
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc = 0, agg_rings = 0;
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ int i, rc = 0, agg_rings = 0, cpu;
if (!bp->rx_ring)
return -ENOMEM;
@@ -3597,12 +3895,18 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ int cpu_node;
ring = &rxr->rx_ring_struct;
- rc = bnxt_alloc_rx_page_pool(bp, rxr);
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
+ bnxt_enable_rx_page_pool(rxr);
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
if (rc < 0)
@@ -3622,19 +3926,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring->grp_idx = i;
if (agg_rings) {
- u16 mem_size;
-
ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
ring->grp_idx = i;
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
+ rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
+ if (rc)
+ return rc;
}
}
if (bp->flags & BNXT_FLAG_TPA)
@@ -3859,13 +4159,12 @@ static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
- int i, j, rc, ulp_base_vec, ulp_msix;
+ int i, j, rc, ulp_msix;
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
- ulp_base_vec = bnxt_get_ulp_msix_base(bp);
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr, *cpr2;
@@ -3884,10 +4183,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
- if (ulp_msix && i >= ulp_base_vec)
- ring->map_idx = i + ulp_msix;
- else
- ring->map_idx = i;
+ ring->map_idx = ulp_msix + i;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
@@ -3917,6 +4213,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
cpr2->bnapi = bnapi;
+ cpr2->sw_stats = cpr->sw_stats;
cpr2->cp_idx = k;
if (!k && rx) {
bp->rx_ring[i].rx_cpr = cpr2;
@@ -3935,6 +4232,65 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
return 0;
}
+static void bnxt_init_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
+}
+
+static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+ int i;
+
+ rxr->page_pool->p.napi = NULL;
+ rxr->page_pool = NULL;
+ rxr->head_pool->p.napi = NULL;
+ rxr->head_pool = NULL;
+ memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+}
+
static void bnxt_init_ring_struct(struct bnxt *bp)
{
int i, j;
@@ -4017,58 +4373,88 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
-static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct net_device *dev = bp->dev;
u32 prod;
int i;
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
+}
- if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
- return 0;
+static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod;
+ int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
- ring_nr, i, bp->rx_ring_size);
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
+ ring_nr, i, bp->rx_agg_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+}
- if (rxr->rx_tpa) {
- dma_addr_t mapping;
- u8 *data;
+static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ dma_addr_t mapping;
+ u8 *data;
+ int i;
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
}
+
return 0;
}
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ int rc;
+
+ bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
+
+ if (rxr->rx_tpa) {
+ rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr;
struct bnxt_ring_struct *ring;
u32 type;
@@ -4078,28 +4464,50 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (NET_IP_ALIGN == 2)
type |= RX_BD_FLAGS_SOP;
- rxr = &bp->rx_ring[ring_nr];
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
-
- netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
- &rxr->bnapi->napi);
-
- if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- bpf_prog_add(bp->xdp_prog, 1);
- rxr->xdp_prog = bp->xdp_prog;
- }
ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring;
+ u32 type;
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
-
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+ RX_BD_TYPE_RX_AGG_BD;
+
+ /* On P7, setting EOP will cause the chip to disable
+ * Relaxed Ordering (RO) for TPA data. Disable EOP for
+ * potentially higher performance with RO.
+ */
+ if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
+ type |= RX_BD_FLAGS_AGG_EOP;
bnxt_init_rxbd_pages(ring, type);
}
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr;
+
+ rxr = &bp->rx_ring[ring_nr];
+ bnxt_init_one_rx_ring_rxbd(bp, rxr);
+
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
+ }
+
+ bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
return bnxt_alloc_one_rx_ring(bp, ring_nr);
}
@@ -4241,6 +4649,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
@@ -4308,6 +4717,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ unsigned int rx_size;
+
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -4322,12 +4742,11 @@ void bnxt_set_ring_params(struct bnxt *bp)
rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -4367,7 +4786,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak,
+ bp->dev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -4408,12 +4830,12 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
if (page_mode) {
- bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
if (bp->xdp_prog->aux->xdp_has_frags)
@@ -4429,15 +4851,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -4757,6 +5194,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
bnxt_free_stats_mem(bp, &cpr->stats);
+
+ kfree(cpr->sw_stats);
+ cpr->sw_stats = NULL;
}
}
@@ -4771,6 +5211,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
+ if (!cpr->sw_stats)
+ return -ENOMEM;
+
cpr->stats.len = size;
rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
if (rc)
@@ -4875,7 +5319,7 @@ void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
list_del_init(&fltr->list);
}
-void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
{
struct bnxt_filter_base *usr_fltr, *tmp;
@@ -4901,8 +5345,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
- /* Under rtnl_lock and all our NAPIs have been disabled. It's
- * safe to delete the hash table.
+ netdev_assert_locked_or_invisible(bp->dev);
+
+ /* Under netdev instance lock and all our NAPIs have been disabled.
+ * It's safe to delete the hash table.
*/
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
@@ -5230,6 +5676,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
req->flags = cpu_to_le32(flags);
req->ver_maj_8b = DRV_VER_MAJ;
req->ver_min_8b = DRV_VER_MIN;
@@ -5247,6 +5695,10 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
u16 cmd = bnxt_vf_req_snif[i];
unsigned int bit, idx;
+ if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
+ cmd == HWRM_PORT_PHY_QCFG)
+ continue;
+
idx = cmd / 32;
bit = cmd % 32;
data[idx] |= 1 << bit;
@@ -5788,8 +6240,25 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_input *req,
- u16 rxq)
+ struct bnxt_ntuple_filter *fltr)
{
+ u16 rxq = fltr->base.rxq;
+
+ if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ struct ethtool_rxfh_context *ctx;
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx,
+ fltr->base.fw_vnic_id);
+ if (ctx) {
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ }
+ return;
+ }
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
struct bnxt_vnic_info *vnic;
u32 enables;
@@ -5830,7 +6299,7 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->flags =
cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -5938,9 +6407,9 @@ static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
-static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input *req;
int rc;
@@ -6035,16 +6504,19 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries;
- bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
- GFP_KERNEL);
+ bp->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
if (!bp->rss_indir_tbl)
return -ENOMEM;
+
return 0;
}
-static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx)
{
u16 max_rings, max_entries, pad, i;
+ u32 *rss_indir_tbl;
if (!bp->rx_nr_rings)
return;
@@ -6055,18 +6527,22 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
max_rings = bp->rx_nr_rings;
max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+ if (rss_ctx)
+ rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
+ else
+ rss_indir_tbl = &bp->rss_indir_tbl[0];
for (i = 0; i < max_entries; i++)
- bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
pad = bp->rss_indir_tbl_entries - max_entries;
if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
}
static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
{
- u16 i, tbl_size, max_ring = 0;
+ u32 i, tbl_size, max_ring = 0;
if (!bp->rss_indir_tbl)
return 0;
@@ -6117,6 +6593,8 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
else
j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
@@ -6154,9 +6632,9 @@ __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
-static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
int rc;
@@ -6174,9 +6652,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
return hwrm_req_send(bp, req);
}
-static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
dma_addr_t ring_tbl_map;
u32 i, nr_ctxs;
@@ -6229,9 +6707,9 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
-static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6241,22 +6719,21 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- if (BNXT_RX_PAGE_MODE(bp)) {
- req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- } else {
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
@@ -6265,10 +6742,10 @@ static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
return;
req->rss_cos_lb_ctx_id =
- cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
@@ -6280,13 +6757,14 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
- bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
}
}
bp->rsscos_nr_ctxs = 0;
}
-static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
@@ -6299,7 +6777,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
hwrm_req_drop(bp, req);
@@ -6313,10 +6791,9 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
}
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
u16 def_vlan = 0;
@@ -6364,15 +6841,16 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
ring = 0;
else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
- ring = vnic_id - 1;
- else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
+ ring = vnic->vnic_id - 1;
+ else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
+ vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -6381,25 +6859,25 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev))
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
- if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input *req;
if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
return;
- req->vnic_id =
- cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
}
}
@@ -6408,15 +6886,14 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp)
u16 i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_free_one(bp, i);
+ bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
}
-static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
- unsigned int start_rx_ring_idx,
- unsigned int nr_rings)
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings)
{
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_alloc_output *resp;
struct hwrm_vnic_alloc_input *req;
int rc;
@@ -6442,7 +6919,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == BNXT_VNIC_DEFAULT)
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -6509,6 +6986,10 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
hwrm_req_drop(bp, req);
return rc;
@@ -6573,6 +7054,30 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
+static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
+ struct hwrm_ring_alloc_input *req,
+ struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
+ u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
+
+ if (ring_type == HWRM_RING_ALLOC_AGG) {
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
+ } else {
+ req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ if (NET_IP_ALIGN == 2)
+ req->flags =
+ cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
+ }
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req->enables |= cpu_to_le32(enables);
+}
+
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
@@ -6604,6 +7109,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_TX: {
struct bnxt_tx_ring_info *txr;
+ u16 flags = 0;
txr = container_of(ring, struct bnxt_tx_ring_info,
tx_ring_struct);
@@ -6617,40 +7123,19 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
req->cmpl_coal_cnt =
RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
+ if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
+ flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
+ req->flags = cpu_to_le16(flags);
break;
}
case HWRM_RING_ALLOC_RX:
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 flags = 0;
-
- /* Association of rx ring with stats context */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- if (NET_IP_ALIGN == 2)
- flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
- req->flags = cpu_to_le16(flags);
- }
- break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
- /* Association of agg ring with rx ring */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- } else {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- }
- req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
+ cpu_to_le32(bp->rx_ring_mask + 1) :
+ cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -6662,20 +7147,19 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->cq_handle = cpu_to_le64(ring->handle);
req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
- } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ } else {
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
}
break;
case HWRM_RING_ALLOC_NQ:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
ring_type);
- return -1;
+ return -EINVAL;
}
resp = hwrm_req_hold(bp, req);
@@ -6790,6 +7274,81 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
bnxt_set_db_mask(bp, db, ring_type);
}
+static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bp->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->cp_ring_struct;
+ ring->handle = BNXT_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ return 0;
+}
+
+static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -6826,89 +7385,66 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
- type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring;
- u32 map_idx;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
- struct bnxt_napi *bnapi = txr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
- ring = &txr->tx_ring_struct;
- map_idx = i;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
if (rc)
goto err_out;
- bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
- type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
/* If we have agg rings, post agg buffers first. */
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
}
if (agg_rings) {
- type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring =
- &rxr->rx_agg_ring_struct;
- u32 grp_idx = ring->grp_idx;
- u32 map_idx = grp_idx + bp->rx_nr_rings;
-
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
if (rc)
goto err_out;
-
- bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
- ring->fw_ring_id);
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
err_out:
return rc;
}
+static void bnxt_cancel_dim(struct bnxt *bp)
+{
+ int i;
+
+ /* DIM work is initialized in bnxt_enable_napi(). Proceed only
+ * if NAPI is enabled.
+ */
+ if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
+ return;
+
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_napi *bnapi = rxr->bnapi;
+
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
+ }
+}
+
static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
@@ -6942,66 +7478,109 @@ exit:
return 0;
}
-static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ bool close_path)
{
- u32 type;
- int i;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
- if (!bp->bnapi)
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
return;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
+ cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_TX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].rx_fw_ring_id =
- INVALID_HW_RING_ID;
- }
- }
+static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 type, cmpl_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
- hwrm_ring_free_send_msg(bp, ring, type,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].agg_fw_ring_id =
- INVALID_HW_RING_ID;
- }
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring, type,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring;
+
+ ring = &cpr->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ int i, size = ring->ring_mem.page_size;
+
+ cpr->cp_raw_cons = 0;
+ cpr->toggle = 0;
+
+ for (i = 0; i < bp->cp_nr_pages; i++)
+ if (cpr->cp_desc_ring[i])
+ memset(cpr->cp_desc_ring[i], 0, size);
+}
+
+static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+ u32 type;
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++)
+ bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
+
+ bnxt_cancel_dim(bp);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
+ bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
}
/* The completion rings are about to be freed. After that the
@@ -7020,17 +7599,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
- struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
+ bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring, type,
@@ -7274,17 +7845,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
int bnxt_nq_rings_in_use(struct bnxt *bp)
{
- int cp = bp->cp_nr_rings;
- int ulp_msix, ulp_base;
-
- ulp_msix = bnxt_get_ulp_msix_num(bp);
- if (ulp_msix) {
- ulp_base = bnxt_get_ulp_msix_base(bp);
- cp += ulp_msix;
- if ((ulp_base + ulp_msix) > cp)
- cp = ulp_base + ulp_msix;
- }
- return cp;
+ return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
}
static int bnxt_cp_rings_in_use(struct bnxt *bp)
@@ -7300,16 +7861,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
- int cp = bp->cp_nr_rings;
-
- if (!ulp_stat)
- return cp;
-
- if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
- return bnxt_get_ulp_msix_base(bp) + ulp_stat;
-
- return cp + ulp_stat;
+ return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
}
static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
@@ -7341,7 +7893,7 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
hw_resc->resv_rx_rings = bp->rx_nr_rings;
if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
}
}
@@ -7349,7 +7901,7 @@ static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
{
if (bp->flags & BNXT_FLAG_RFS) {
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
- return 2;
+ return 2 + bp->num_rss_ctx;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return rx_rings + 1;
}
@@ -7364,19 +7916,20 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
int rx = bp->rx_nr_rings, stat;
int vnic, grp = rx;
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
- bp->hwrm_spec_code >= 0x10601)
- return true;
-
/* Old firmware does not need RX ring reservations but we still
* need to setup a default RSS map when needed. With new firmware
* we go through RX ring reservations first and then set up the
* RSS map for the successfully reserved RX rings when needed.
*/
- if (!BNXT_NEW_RM(bp)) {
+ if (!BNXT_NEW_RM(bp))
bnxt_check_rss_tbl_no_rmgr(bp);
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
+ bp->hwrm_spec_code >= 0x10601)
+ return true;
+
+ if (!BNXT_NEW_RM(bp))
return false;
- }
vnic = bnxt_get_total_vnics(bp, rx);
@@ -7417,17 +7970,32 @@ static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
}
+static int bnxt_get_avail_msix(struct bnxt *bp, int num);
+
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_rings hwr = {0};
- int rx_rings, rc;
+ int rx_rings, old_rx_rings, rc;
+ int cp = bp->cp_nr_rings;
+ int ulp_msix = 0;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
- hwr.cp = bnxt_nq_rings_in_use(bp);
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+ if (!ulp_msix)
+ bnxt_set_ulp_stat_ctxs(bp, 0);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ hwr.cp = cp + ulp_msix;
+ } else {
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ }
+
hwr.tx = bp->tx_nr_rings;
hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -7442,6 +8010,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.grp = bp->rx_nr_rings;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
hwr.stat = bnxt_get_func_stat_ctxs(bp);
+ old_rx_rings = bp->hw_resc.resv_rx_rings;
rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
@@ -7466,7 +8035,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
rx_rings = min_t(int, rx_rings, hwr.grp);
hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
- if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ if (bnxt_ulp_registered(bp->edev) &&
+ hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
hwr.cp = min_t(int, hwr.cp, hwr.stat);
rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
@@ -7474,6 +8044,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.rx = rx_rings << 1;
tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ if (hwr.tx != bp->tx_nr_rings) {
+ netdev_warn(bp->dev,
+ "Able to reserve only %d out of %d requested TX rings\n",
+ hwr.tx, bp->tx_nr_rings);
+ }
bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
@@ -7496,8 +8071,22 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
- if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
+ !netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
+
+ if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
+ int resv_msix, resv_ctx, ulp_ctxs;
+ struct bnxt_hw_resc *hw_resc;
+
+ hw_resc = &bp->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ ulp_msix = min_t(int, resv_msix, ulp_msix);
+ bnxt_set_ulp_msix_num(bp, ulp_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
+ ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
+ }
return rc;
}
@@ -7900,16 +8489,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -7922,8 +8515,17 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
+ if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
+ bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
+ if (resp->roce_bidi_opt_mode &
+ FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
+ bp->cos0_cos1_shared = 1;
+ else
+ bp->cos0_cos1_shared = 0;
+
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
@@ -7983,7 +8585,7 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
int n = 1;
- if (!ctxm->max_entries)
+ if (!ctxm->max_entries || ctxm->pg_info)
continue;
if (ctxm->instance_bmap)
@@ -7995,6 +8597,9 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
return 0;
}
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force);
+
#define BNXT_CTX_INIT_VALID(flags) \
(!!((flags) & \
FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
@@ -8003,7 +8608,7 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_v2_output *resp;
struct hwrm_func_backing_store_qcaps_v2_input *req;
- struct bnxt_ctx_mem_info *ctx;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
u16 type;
int rc;
@@ -8011,16 +8616,20 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
if (rc)
return rc;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- bp->ctx = ctx;
+ if (!ctx) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bp->ctx = ctx;
+ }
resp = hwrm_req_hold(bp, req);
for (type = 0; type < BNXT_CTX_V2_MAX; ) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
u8 init_val, init_off, i;
+ u32 max_entries;
+ u16 entry_size;
__le32 *p;
u32 flags;
@@ -8030,15 +8639,26 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
goto ctx_done;
flags = le32_to_cpu(resp->flags);
type = le16_to_cpu(resp->next_valid_type);
- if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
continue;
-
+ }
+ entry_size = le16_to_cpu(resp->entry_size);
+ max_entries = le32_to_cpu(resp->max_num_entries);
+ if (ctxm->mem_valid) {
+ if (!(flags & BNXT_CTX_MEM_PERSIST) ||
+ ctxm->entry_size != entry_size ||
+ ctxm->max_entries != max_entries)
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
+ else
+ continue;
+ }
ctxm->type = le16_to_cpu(resp->type);
- ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->entry_size = entry_size;
ctxm->flags = flags;
ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
ctxm->entry_multiple = resp->entry_multiple;
- ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->max_entries = max_entries;
ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
init_val = resp->ctx_init_value;
init_off = resp->ctx_init_offset;
@@ -8063,7 +8683,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
struct hwrm_func_backing_store_qcaps_input *req;
int rc;
- if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
+ if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
+ (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
@@ -8404,6 +9025,36 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
return rc;
}
+static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ size_t nr_pages = ctx_pg->nr_pages;
+ int page_size = rmem->page_size;
+ size_t len = 0, total_len = 0;
+ u16 depth = rmem->depth;
+
+ tail %= nr_pages * page_size;
+ do {
+ if (depth > 1) {
+ int i = head / (page_size * MAX_CTX_PAGES);
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ rmem = &pg_tbl->ring_mem;
+ }
+ len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
+ head += len;
+ offset += len;
+ total_len += len;
+ if (head >= nr_pages * page_size)
+ head = 0;
+ } while (head != tail);
+ return total_len;
+}
+
static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg)
{
@@ -8454,6 +9105,8 @@ static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
ctxm->init_value ? ctxm : NULL);
}
+ if (!rc)
+ ctxm->mem_valid = 1;
return rc;
}
@@ -8480,6 +9133,16 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
hwrm_req_hold(bp, req);
req->type = cpu_to_le16(ctxm->type);
req->entry_size = cpu_to_le16(ctxm->entry_size);
+ if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
+ bnxt_bs_trace_avail(bp, ctxm->type)) {
+ struct bnxt_bs_trace_info *bs_trace;
+ u32 enables;
+
+ enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
+ req->enables = cpu_to_le32(enables);
+ bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
+ req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
+ }
req->subtype_valid_cnt = ctxm->split_entry_cnt;
for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
p[i] = cpu_to_le32(ctxm->split[i]);
@@ -8505,25 +9168,47 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
return rc;
}
-static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
+static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_mem_type *ctxm;
- u16 last_type;
+ u16 last_type = BNXT_CTX_INV;
int rc = 0;
u16 type;
- if (!ena)
- return 0;
- else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
- last_type = BNXT_CTX_MAX - 1;
- else
- last_type = BNXT_CTX_L2_MAX - 1;
+ for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (!bnxt_bs_trace_avail(bp, type))
+ continue;
+ if (!ctxm->mem_valid) {
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
+ ctxm->max_entries, 1);
+ if (rc) {
+ netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
+ type);
+ continue;
+ }
+ bnxt_bs_trace_init(bp, ctxm);
+ }
+ last_type = type;
+ }
+
+ if (last_type == BNXT_CTX_INV) {
+ for (type = 0; type < BNXT_CTX_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (ctxm->mem_valid)
+ last_type = type;
+ }
+ if (last_type == BNXT_CTX_INV)
+ return 0;
+ }
ctx->ctx_arr[last_type].last = 1;
for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
ctxm = &ctx->ctx_arr[type];
+ if (!ctxm->mem_valid)
+ continue;
rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
if (rc)
return rc;
@@ -8531,21 +9216,63 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
return 0;
}
-void bnxt_free_ctx_mem(struct bnxt *bp)
+/**
+ * __bnxt_copy_ctx_mem - copy host context memory
+ * @bp: The driver context
+ * @ctxm: The pointer to the context memory type
+ * @buf: The destination buffer or NULL to just obtain the length
+ * @offset: The buffer offset to copy the data to
+ * @head: The head offset of context memory to copy from
+ * @tail: The tail offset (last byte + 1) of context memory to end the copy
+ *
+ * This function is called for debugging purposes to dump the host context
+ * used by the chip.
+ *
+ * Return: Length of memory copied
+ */
+static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, void *buf,
+ size_t offset, size_t head, size_t tail)
{
- struct bnxt_ctx_mem_info *ctx = bp->ctx;
- u16 type;
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ size_t len = 0, total_len = 0;
+ int i, n = 1;
- if (!ctx)
- return;
+ if (!ctx_pg)
+ return 0;
- for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
- struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
- struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
- int i, n = 1;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++) {
+ len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
+ tail);
+ offset += len;
+ total_len += len;
+ }
+ return total_len;
+}
- if (!ctx_pg)
- continue;
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset)
+{
+ size_t tail = ctxm->max_entries * ctxm->entry_size;
+
+ return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
+}
+
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ int i, n = 1;
+
+ ctxm->last = 0;
+
+ if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
+ return;
+
+ ctx_pg = ctxm->pg_info;
+ if (ctx_pg) {
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
for (i = 0; i < n; i++)
@@ -8553,11 +9280,27 @@ void bnxt_free_ctx_mem(struct bnxt *bp)
kfree(ctx_pg);
ctxm->pg_info = NULL;
+ ctxm->mem_valid = 0;
}
+ memset(ctxm, 0, sizeof(*ctxm));
+}
+
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++)
+ bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
- kfree(ctx);
- bp->ctx = NULL;
+ if (force) {
+ kfree(ctx);
+ bp->ctx = NULL;
+ }
}
static int bnxt_alloc_ctx_mem(struct bnxt *bp)
@@ -8584,6 +9327,10 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ ena = 0;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ goto skip_legacy;
+
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
l2_qps = ctxm->qp_l2_entries;
qp1_qps = ctxm->qp_qp1_entries;
@@ -8592,13 +9339,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
srqs = ctxm->srq_l2_entries;
max_srqs = ctxm->max_entries;
- ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- /* allocate extra qps if fw supports RoCE fast qp destroy feature */
- extra_qps += fast_qpmd_qps;
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
if (fast_qpmd_qps)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
@@ -8634,14 +9388,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+ }
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc)
@@ -8672,8 +9432,9 @@ skip_rdma:
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+skip_legacy:
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
- rc = bnxt_backing_store_cfg_v2(bp, ena);
+ rc = bnxt_backing_store_cfg_v2(bp);
else
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) {
@@ -8685,6 +9446,80 @@ skip_rdma:
return 0;
}
+static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
+{
+ struct hwrm_dbg_crashdump_medium_cfg_input *req;
+ u16 page_attr;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
+ if (rc)
+ return rc;
+
+ if (BNXT_PAGE_SIZE == 0x2000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
+ else if (BNXT_PAGE_SIZE == 0x10000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
+ else
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
+ req->pg_size_lvl = cpu_to_le16(page_attr |
+ bp->fw_crash_mem->ring_mem.depth);
+ req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
+ req->size = cpu_to_le32(bp->fw_crash_len);
+ req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
+ return hwrm_req_send(bp, req);
+}
+
+static void bnxt_free_crash_dump_mem(struct bnxt *bp)
+{
+ if (bp->fw_crash_mem) {
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ kfree(bp->fw_crash_mem);
+ bp->fw_crash_mem = NULL;
+ }
+}
+
+static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
+{
+ u32 mem_size = 0;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
+ if (rc)
+ return rc;
+
+ mem_size = round_up(mem_size, 4);
+
+ /* keep and use the existing pages */
+ if (bp->fw_crash_mem &&
+ mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
+ goto alloc_done;
+
+ if (bp->fw_crash_mem)
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ else
+ bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
+ GFP_KERNEL);
+ if (!bp->fw_crash_mem)
+ return -ENOMEM;
+
+ rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ return rc;
+ }
+
+alloc_done:
+ bp->fw_crash_len = mem_size;
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp;
@@ -8748,11 +9583,10 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
struct hwrm_port_mac_ptp_qcfg_output *resp;
struct hwrm_port_mac_ptp_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- bool phc_cfg;
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -8768,7 +9602,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
goto exit;
flags = resp->flags;
- if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
+ if (BNXT_CHIP_P5_AND_MINUS(bp) &&
+ !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
rc = -ENODEV;
goto exit;
}
@@ -8781,18 +9616,22 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
ptp->bp = bp;
bp->ptp_cfg = ptp;
}
- if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
+
+ if (flags &
+ (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
+ PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
- } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ } else if (BNXT_CHIP_P5(bp)) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
rc = -ENODEV;
goto exit;
}
- phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
- rc = bnxt_ptp_init(bp, phc_cfg);
+ ptp->rtc_configured =
+ (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+ rc = bnxt_ptp_init(bp);
if (rc)
netdev_warn(bp->dev, "PTP initialization failed.\n");
exit:
@@ -8809,10 +9648,10 @@ no_ptp:
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
+ u32 flags, flags_ext, flags_ext2, flags_ext3;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags, flags_ext, flags_ext2;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -8830,6 +9669,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
@@ -8856,6 +9697,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
@@ -8866,6 +9711,20 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
+ if (BNXT_PF(bp) &&
+ (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
+
+ flags_ext3 = le32_to_cpu(resp->flags_ext3);
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -8916,6 +9775,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
+ bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
hwrm_func_qcaps_exit:
hwrm_req_drop(bp, req);
@@ -9089,7 +9949,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -9321,7 +10181,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input *req;
u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc, len;
+ int rc, len, max_tmo_secs;
rc = hwrm_req_init(bp, req, HWRM_VER_GET);
if (rc)
@@ -9394,9 +10254,14 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
if (!bp->hwrm_cmd_max_timeout)
bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
- else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
- netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
- bp->hwrm_cmd_max_timeout / 1000);
+ max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
+#ifdef CONFIG_DETECT_HUNG_TASK
+ if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
+ max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
+ max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
+ }
+#endif
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -9676,7 +10541,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
- rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
if (rc) {
netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
i, rc);
@@ -9691,7 +10556,7 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
int i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_set_rss(bp, i, false);
+ bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
}
static void bnxt_clear_vnic(struct bnxt *bp)
@@ -9769,28 +10634,27 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return hwrm_req_send(bp, req);
}
-static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
goto skip_rss_ctx;
/* allocate context for vnic */
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
@@ -9798,26 +10662,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
skip_rss_ctx:
/* configure default vnic, ring grp */
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
/* Enable RSS hashing on vnic */
- rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
@@ -9825,16 +10689,53 @@ vnic_setup_err:
return rc;
}
-static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid)
+{
+ struct hwrm_vnic_update_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->enables = cpu_to_le32(valid);
+
+ return hwrm_req_send(bp, req);
+}
+
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc, i, nr_ctxs;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
- vnic_id, i, rc);
+ vnic->vnic_id, i, rc);
break;
}
bp->rsscos_nr_ctxs++;
@@ -9842,63 +10743,57 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
if (i < nr_ctxs)
return -ENOMEM;
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
- vnic_id, rc);
- return rc;
- }
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+ if (rc)
return rc;
- }
+
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
return rc;
}
-static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- return __bnxt_setup_vnic_p5(bp, vnic_id);
+ return __bnxt_setup_vnic_p5(bp, vnic);
else
- return __bnxt_setup_vnic(bp, vnic_id);
+ return __bnxt_setup_vnic(bp, vnic);
}
-static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
u16 start_rx_ring_idx, int rx_rings)
{
int rc;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
return rc;
}
- return bnxt_setup_vnic(bp, vnic_id);
+ return bnxt_setup_vnic(bp, vnic);
}
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic;
int i, rc = 0;
- if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
- return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
- bp->rx_nr_rings);
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
+ }
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
@@ -9909,12 +10804,148 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
+ if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
break;
}
return rc;
}
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all)
+{
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+ struct bnxt_filter_base *usr_fltr, *tmp;
+ struct bnxt_ntuple_filter *ntp_fltr;
+ int i;
+
+ if (netif_running(bp->dev)) {
+ bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
+ if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
+ }
+ }
+ if (!all)
+ return;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
+ usr_fltr->fw_vnic_id == rss_ctx->index) {
+ ntp_fltr = container_of(usr_fltr,
+ struct bnxt_ntuple_filter,
+ base);
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+ }
+
+ if (vnic->rss_table)
+ dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ bp->num_rss_ctx--;
+}
+
+static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int rxr_id)
+{
+ u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ int i, vnic_rx;
+
+ /* Ntuple VNIC always has all the rx rings. Any change of ring id
+ * must be updated because a future filter may use it.
+ */
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ return true;
+
+ for (i = 0; i < tbl_size; i++) {
+ if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
+ else
+ vnic_rx = bp->rss_indir_tbl[i];
+
+ if (rxr_id == vnic_rx)
+ return true;
+ }
+
+ return false;
+}
+
+static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u16 mru, int rxr_id)
+{
+ int rc;
+
+ if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
+ return 0;
+
+ if (mru) {
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ }
+ vnic->mru = mru;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+
+ return 0;
+}
+
+static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+ int rc;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
+{
+ bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
+ bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
+ __bnxt_setup_vnic_p5(bp, vnic)) {
+ netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
+ rss_ctx->index);
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
+ }
+ }
+}
+
+static void bnxt_clear_rss_ctxs(struct bnxt *bp)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, false);
+ }
+}
+
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp)
{
@@ -9927,16 +10958,17 @@ static bool bnxt_promisc_ok(struct bnxt *bp)
static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
unsigned int rc = 0;
- rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
return rc;
}
- rc = bnxt_hwrm_vnic_cfg(bp, 1);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
@@ -9979,7 +11011,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -9988,7 +11020,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
+ rc = bnxt_setup_vnic(bp, vnic);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@@ -10207,22 +11239,32 @@ static void bnxt_setup_msix(struct bnxt *bp)
}
}
-static void bnxt_setup_inta(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp);
+
+static int bnxt_change_msix(struct bnxt *bp, int total)
{
- const int len = sizeof(bp->irq_tbl[0].name);
+ struct msi_map map;
+ int i;
- if (bp->num_tc) {
- netdev_reset_tc(bp->dev);
- bp->num_tc = 0;
+ /* add MSIX to the end if needed */
+ for (i = bp->total_irqs; i < total; i++) {
+ map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
+ if (map.index < 0)
+ return bp->total_irqs;
+ bp->irq_tbl[i].vector = map.virq;
+ bp->total_irqs++;
}
- snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
- 0);
- bp->irq_tbl[0].handler = bnxt_inta;
+ /* trim MSIX from the end if needed */
+ for (i = bp->total_irqs; i > total; i--) {
+ map.index = i - 1;
+ map.virq = bp->irq_tbl[i - 1].vector;
+ pci_msix_free_irq(bp->pdev, map);
+ bp->total_irqs--;
+ }
+ return bp->total_irqs;
}
-static int bnxt_init_int_mode(struct bnxt *bp);
-
static int bnxt_setup_int_mode(struct bnxt *bp)
{
int rc;
@@ -10233,10 +11275,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc ?: -ENODEV;
}
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- bnxt_setup_msix(bp);
- else
- bnxt_setup_inta(bp);
+ bnxt_setup_msix(bp);
rc = bnxt_set_real_num_queues(bp);
return rc;
@@ -10303,19 +11342,10 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}
-int bnxt_get_avail_msix(struct bnxt *bp, int num)
+static int bnxt_get_avail_msix(struct bnxt *bp, int num)
{
- int max_cp = bnxt_get_max_func_cp_rings(bp);
int max_irq = bnxt_get_max_func_irqs(bp);
int total_req = bp->cp_nr_rings + num;
- int max_idx, avail_msix;
-
- max_idx = bp->total_irqs;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- max_idx = min_t(int, bp->total_irqs, max_cp);
- avail_msix = max_idx - bp->cp_nr_rings;
- if (!BNXT_NEW_RM(bp) || avail_msix >= num)
- return avail_msix;
if (max_irq < total_req) {
num = max_irq - bp->cp_nr_rings;
@@ -10333,10 +11363,9 @@ static int bnxt_get_num_msix(struct bnxt *bp)
return bnxt_nq_rings_in_use(bp);
}
-static int bnxt_init_msix(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
- struct msix_entry *msix_ent;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
total_vecs = bnxt_get_num_msix(bp);
max = bnxt_get_max_func_irqs(bp);
@@ -10346,29 +11375,24 @@ static int bnxt_init_msix(struct bnxt *bp)
if (!total_vecs)
return 0;
- msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!msix_ent)
- return -ENOMEM;
-
- for (i = 0; i < total_vecs; i++) {
- msix_ent[i].entry = i;
- msix_ent[i].vector = 0;
- }
-
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
min = 2;
- total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
+ total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
+ PCI_IRQ_MSIX);
ulp_msix = bnxt_get_ulp_msix_num(bp);
if (total_vecs < 0 || total_vecs < ulp_msix) {
rc = -ENODEV;
goto msix_setup_exit;
}
- bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ tbl_size = total_vecs;
+ if (pci_msix_can_alloc_dyn(bp->pdev))
+ tbl_size = max;
+ bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
if (bp->irq_tbl) {
for (i = 0; i < total_vecs; i++)
- bp->irq_tbl[i].vector = msix_ent[i].vector;
+ bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
@@ -10386,78 +11410,61 @@ static int bnxt_init_msix(struct bnxt *bp)
rc = -ENOMEM;
goto msix_setup_exit;
}
- bp->flags |= BNXT_FLAG_USING_MSIX;
- kfree(msix_ent);
return 0;
msix_setup_exit:
- netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+ netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- pci_disable_msix(bp->pdev);
- kfree(msix_ent);
- return rc;
-}
-
-static int bnxt_init_inta(struct bnxt *bp)
-{
- bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
- if (!bp->irq_tbl)
- return -ENOMEM;
-
- bp->total_irqs = 1;
- bp->rx_nr_rings = 1;
- bp->tx_nr_rings = 1;
- bp->cp_nr_rings = 1;
- bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->irq_tbl[0].vector = bp->pdev->irq;
- return 0;
-}
-
-static int bnxt_init_int_mode(struct bnxt *bp)
-{
- int rc = -ENODEV;
-
- if (bp->flags & BNXT_FLAG_MSIX_CAP)
- rc = bnxt_init_msix(bp);
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
- /* fallback to INTA */
- rc = bnxt_init_inta(bp);
- }
+ pci_free_irq_vectors(bp->pdev);
return rc;
}
static void bnxt_clear_int_mode(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- pci_disable_msix(bp->pdev);
+ pci_free_irq_vectors(bp->pdev);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
bool irq_cleared = false;
+ bool irq_change = false;
int tcs = bp->num_tc;
+ int irqs_required;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- if (irq_re_init && BNXT_NEW_RM(bp) &&
- bnxt_get_num_msix(bp) != bp->total_irqs) {
- bnxt_ulp_irq_stop(bp);
- bnxt_clear_int_mode(bp);
- irq_cleared = true;
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ irqs_required = ulp_msix + bp->cp_nr_rings;
+ } else {
+ irqs_required = bnxt_get_num_msix(bp);
+ }
+
+ if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
+ irq_change = true;
+ if (!pci_msix_can_alloc_dyn(bp->pdev)) {
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ irq_cleared = true;
+ }
}
rc = __bnxt_reserve_rings(bp);
if (irq_cleared) {
if (!rc)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
+ } else if (irq_change && !rc) {
+ if (bnxt_change_msix(bp, irqs_required) != irqs_required)
+ rc = -ENOSPC;
}
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
@@ -10477,6 +11484,155 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
return 0;
}
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+ synchronize_net();
+
+ if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq) {
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+ }
+ }
+
+ if (!bp->tph_mode)
+ continue;
+
+ bnxt_hwrm_tx_ring_free(bp, txr, true);
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+ bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+ }
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int rc, i;
+
+ bnapi = bp->bnapi[idx];
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (!bp->tph_mode)
+ goto start_tx;
+
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+ if (rc)
+ return rc;
+
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
+start_tx:
+ WRITE_ONCE(txr->dev_state, 0);
+ synchronize_net();
+
+ if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+ continue;
+
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq)
+ netif_tx_start_queue(txq);
+ }
+
+ return 0;
+}
+
+static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct bnxt_irq *irq;
+ u16 tag;
+ int err;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ cpumask_copy(irq->cpu_mask, mask);
+
+ if (irq->ring_nr >= irq->bp->rx_nr_rings)
+ return;
+
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask), &tag))
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
+ return;
+
+ netdev_lock(irq->bp->dev);
+ if (netif_running(irq->bp->dev)) {
+ err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
+ if (err)
+ netdev_err(irq->bp->dev,
+ "RX queue restart failed: err=%d\n", err);
+ }
+ netdev_unlock(irq->bp->dev);
+}
+
+static void bnxt_irq_affinity_release(struct kref *ref)
+{
+ struct irq_affinity_notify *notify =
+ container_of(ref, struct irq_affinity_notify, kref);
+ struct bnxt_irq *irq;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
+ netdev_err(irq->bp->dev,
+ "Setting ST=0 for MSIX entry %d failed\n",
+ irq->msix_nr);
+ return;
+ }
+}
+
+static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
+{
+ irq_set_affinity_notifier(irq->vector, NULL);
+}
+
+static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
+{
+ struct irq_affinity_notify *notify;
+
+ irq->bp = bp;
+
+ /* Nothing to do if TPH is not enabled */
+ if (!bp->tph_mode)
+ return;
+
+ /* Register IRQ affinity notifier */
+ notify = &irq->affinity_notify;
+ notify->irq = irq->vector;
+ notify->notify = bnxt_irq_affinity_notify;
+ notify->release = bnxt_irq_affinity_release;
+
+ irq_set_affinity_notifier(irq->vector, notify);
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -10495,24 +11651,29 @@ static void bnxt_free_irq(struct bnxt *bp)
irq = &bp->irq_tbl[map_idx];
if (irq->requested) {
if (irq->have_cpumask) {
- irq_set_affinity_hint(irq->vector, NULL);
+ irq_update_affinity_hint(irq->vector, NULL);
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
+
+ bnxt_release_irq_notifier(irq);
+
free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0;
}
+
+ /* Disable TPH support */
+ pcie_disable_tph(bp->pdev);
+ bp->tph_mode = 0;
}
static int bnxt_request_irq(struct bnxt *bp)
{
+ struct cpu_rmap *rmap = NULL;
int i, j, rc = 0;
unsigned long flags = 0;
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
rc = bnxt_setup_int_mode(bp);
if (rc) {
@@ -10523,43 +11684,59 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- flags = IRQF_SHARED;
+
+ /* Enable TPH support as part of IRQ request */
+ rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
+ if (!rc)
+ bp->tph_mode = PCI_TPH_ST_IV_MODE;
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
-#ifdef CONFIG_RFS_ACCEL
- if (rmap && bp->bnapi[i]->rx_ring) {
+ if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
+ rmap && bp->bnapi[i]->rx_ring) {
rc = irq_cpu_rmap_add(rmap, irq->vector);
if (rc)
netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
j);
j++;
}
-#endif
+
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
bp->bnapi[i]);
if (rc)
break;
- netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
+ netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
+ u16 tag;
irq->have_cpumask = 1;
+ irq->msix_nr = map_idx;
+ irq->ring_nr = i;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
- rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
if (rc) {
netdev_warn(bp->dev,
- "Set affinity failed, IRQ = %d\n",
+ "Update affinity hint failed, IRQ = %d\n",
irq->vector);
break;
}
+
+ bnxt_register_irq_notifier(bp, irq);
+
+ /* Init ST table entry */
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask),
+ &tag))
+ continue;
+
+ pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
}
}
return rc;
@@ -10580,9 +11757,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- __netif_napi_del(&bnapi->napi);
+ __netif_napi_del_locked(&bnapi->napi);
}
- /* We called __netif_napi_del(), we need
+ /* We called __netif_napi_del_locked(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -10590,29 +11767,26 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
- int i;
+ int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
unsigned int cp_nr_rings = bp->cp_nr_rings;
struct bnxt_napi *bnapi;
+ int i;
- if (bp->flags & BNXT_FLAG_USING_MSIX) {
- int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ poll_fn = bnxt_poll_p5;
+ else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ cp_nr_rings--;
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- poll_fn = bnxt_poll_p5;
- else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- cp_nr_rings--;
- for (i = 0; i < cp_nr_rings; i++) {
- bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
- }
- if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0);
- }
- } else {
- bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
+ set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
+
+ for (i = 0; i < cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
+ }
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnapi = bp->bnapi[cp_nr_rings];
+ netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -10630,12 +11804,10 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr = &bnapi->cp_ring;
if (bnapi->tx_fault)
- cpr->sw_stats.tx.tx_resets++;
+ cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
- cpr->sw_stats.rx.rx_resets++;
- napi_disable(&bnapi->napi);
- if (bnapi->rx_ring)
- cancel_work_sync(&cpr->dim.work);
+ cpr->sw_stats->rx.rx_resets++;
+ napi_disable_locked(&bnapi->napi);
}
}
@@ -10657,7 +11829,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bnapi->napi);
+ napi_enable_locked(&bnapi->napi);
}
}
@@ -10855,6 +12027,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -11265,7 +12457,7 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
-static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -11306,11 +12498,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
struct hwrm_func_drv_if_change_input *req;
- bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
+ bool caps_change = false;
int rc, retry = 0;
+ bool fw_reset;
u32 flags = 0;
+ fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
+ bp->fw_reset_state = 0;
+
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -11363,12 +12559,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
- if (resc_reinit || fw_reset) {
- if (fw_reset) {
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
+ caps_change = true;
+
+ if (resc_reinit || fw_reset || caps_change) {
+ if (fw_reset || caps_change) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- bnxt_ulp_stop(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_ulp_irq_stop(bp);
+ bnxt_free_ctx_mem(bp, false);
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
@@ -11376,13 +12575,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
+ /* IRQ will be initialized later in bnxt_request_irq()*/
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
- if (rc) {
- clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
- netdev_err(bp->dev, "init int mode failed\n");
- return rc;
- }
}
rc = bnxt_cancel_reservations(bp, fw_reset);
}
@@ -11600,20 +12794,6 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
-/* Common routine to pre-map certain register block to different GRC window.
- * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
- * in PF and 3 windows in VF that can be customized to map in different
- * register blocks.
- */
-static void bnxt_preset_reg_win(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- /* CAG registers map to GRC window #4 */
- writel(BNXT_CAG_REG_BASE,
- bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
- }
-}
-
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
static int bnxt_reinit_after_abort(struct bnxt *bp)
@@ -11674,11 +12854,61 @@ static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
bnxt_cfg_one_usr_fltr(bp, usr_fltr);
}
+static int bnxt_set_xps_mapping(struct bnxt *bp)
+{
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ unsigned int q_idx, map_idx, cpu, i;
+ const struct cpumask *cpu_mask_ptr;
+ int nr_cpus = num_online_cpus();
+ cpumask_t *q_map;
+ int rc = 0;
+
+ q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
+ if (!q_map)
+ return -ENOMEM;
+
+ /* Create CPU mask for all TX queues across MQPRIO traffic classes.
+ * Each TC has the same number of TX queues. The nth TX queue for each
+ * TC will have the same CPU mask.
+ */
+ for (i = 0; i < nr_cpus; i++) {
+ map_idx = i % bp->tx_nr_rings_per_tc;
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_mask_ptr = get_cpu_mask(cpu);
+ cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
+ }
+
+ /* Register CPU mask for each TX queue except the ones marked for XDP */
+ for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
+ map_idx = q_idx % bp->tx_nr_rings_per_tc;
+ rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
+ if (rc) {
+ netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
+ q_idx);
+ break;
+ }
+ }
+
+ kfree(q_map);
+
+ return rc;
+}
+
+static int bnxt_tx_nr_rings(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
+ bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
/* Reserve rings now if none were reserved at driver probe. */
@@ -11691,13 +12921,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_reserve_rings(bp, irq_re_init);
if (rc)
return rc;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_USING_MSIX)) {
- /* disable RFS if falling back to INTA */
- bp->dev->hw_features &= ~NETIF_F_NTUPLE;
- bp->flags &= ~BNXT_FLAG_RFS;
- }
+ /* Make adjustments if reserved TX rings are less than requested */
+ bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ if (bp->tx_nr_rings_xdp) {
+ bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
+ }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -11736,8 +12967,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- if (irq_re_init)
+ if (irq_re_init) {
udp_tunnel_nic_reset_ntf(bp->dev);
+ rc = bnxt_set_xps_mapping(bp);
+ if (rc)
+ netdev_warn(bp->dev, "failed to set xps mapping\n");
+ }
if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
if (!static_key_enabled(&bnxt_xdp_locking_key))
@@ -11758,10 +12993,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
- if (bp->ptp_cfg)
- atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_hwrm_realloc_rss_ctx_vnic(bp);
bnxt_cfg_usr_fltrs(bp);
return 0;
@@ -11775,7 +13010,6 @@ open_err_free_mem:
return rc;
}
-/* rtnl_lock held */
int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -11786,14 +13020,14 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
return rc;
}
-/* rtnl_lock held, open the NIC half way by allocating all resources, but
- * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
- * self tests.
+/* netdev instance lock held, open the NIC half way by allocating all
+ * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
+ * for offline self tests.
*/
int bnxt_half_open_nic(struct bnxt *bp)
{
@@ -11824,12 +13058,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
- dev_close(bp->dev);
+ netif_close(bp->dev);
return rc;
}
-/* rtnl_lock held, this call can only be made after a previous successful
- * call to bnxt_half_open_nic().
+/* netdev instance lock held, this call can only be made after a previous
+ * successful call to bnxt_half_open_nic().
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
@@ -11876,10 +13110,9 @@ static int bnxt_open(struct net_device *dev)
bnxt_hwrm_if_change(bp, false);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
- if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- }
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_queue_sp_work(bp,
+ BNXT_RESTART_ULP_SP_EVENT);
}
}
@@ -11910,6 +13143,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_clear_rss_ctxs(bp);
/* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
@@ -11917,7 +13152,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
@@ -11937,10 +13172,11 @@ void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
- * the close because we are holding rtnl_lock(). Some firmware
- * messages may fail as we proceed to close. We set the
- * ABORT_ERR flag here so that the FW reset thread will later
- * abort when it gets the rtnl_lock() and sees the flag.
+ * the close because we are holding netdev instance lock.
+ * Some firmware messages may fail as we proceed to close.
+ * We set the ABORT_ERR flag here so that the FW reset thread
+ * will later abort when it gets the netdev instance lock
+ * and sees the flag.
*/
netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -12031,7 +13267,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
return hwrm_req_send(bp, req);
}
-/* rtnl_lock held */
+/* netdev instance lock held */
static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mdio = if_mii(ifr);
@@ -12062,12 +13298,6 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
mdio->val_in);
- case SIOCSHWTSTAMP:
- return bnxt_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return bnxt_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -12109,8 +13339,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
stats->rx_dropped +=
- cpr->sw_stats.rx.rx_netpoll_discards +
- cpr->sw_stats.rx.rx_oom_discards;
+ cpr->sw_stats->rx.rx_netpoll_discards +
+ cpr->sw_stats->rx.rx_oom_discards;
}
}
@@ -12177,7 +13407,7 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats,
struct bnxt_cp_ring_info *cpr)
{
- struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
+ struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
u64 *hw_stats = cpr->stats.sw_stats;
stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
@@ -12407,33 +13637,26 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
}
/* If runtime conditions support RFS */
-static bool bnxt_rfs_capable(struct bnxt *bp)
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
{
struct bnxt_hw_rings hwr = {0};
int max_vnics, max_rss_ctxs;
- hwr.rss_ctx = 1;
- if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
- /* 2 VNICS: default + Ntuple */
- hwr.vnic = 2;
- hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
- hwr.vnic;
- goto check_reserve_vnic;
- }
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
+
+ if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- hwr.vnic = 1 + bp->rx_nr_rings;
-check_reserve_vnic:
+ hwr.grp = bp->rx_nr_rings;
+ hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
+ if (new_rss_ctx)
+ hwr.vnic++;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
- !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
- hwr.rss_ctx = hwr.vnic;
-
if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
@@ -12445,7 +13668,11 @@ check_reserve_vnic:
if (!BNXT_NEW_RM(bp))
return true;
- if (hwr.vnic == bp->hw_resc.resv_vnics &&
+ /* Do not reduce VNIC and RSS ctx reservations. There is a FW
+ * issue that will mess up the default VNIC if we reduce the
+ * reservations.
+ */
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
@@ -12467,7 +13694,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
- if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
+ if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
features &= ~NETIF_F_NTUPLE;
if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
@@ -12479,7 +13706,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (features & NETIF_F_GRO_HW)
features &= ~NETIF_F_LRO;
- /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ /* Both CTAG and STAG VLAN acceleration on the RX side have to be
* turned on or off together.
*/
vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
@@ -12811,11 +14038,19 @@ static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- int i = bnapi->index;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
+ int i = bnapi->index, j;
netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ cpr2 = &cpr->cp_ring_arr[j];
+ if (!cpr2->bnapi)
+ continue;
+ netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, j, cpr2->cp_ring_struct.fw_ring_id,
+ cpr2->cp_raw_cons);
+ }
}
static void bnxt_dbg_dump_states(struct bnxt *bp)
@@ -12859,17 +14094,8 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (!silent)
bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
- int rc;
-
- if (silent) {
- bnxt_close_nic(bp, false, false);
- bnxt_open_nic(bp, false, false);
- } else {
- bnxt_ulp_stop(bp);
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- bnxt_ulp_start(bp, rc);
- }
+ bnxt_close_nic(bp, !silent, false);
+ bnxt_open_nic(bp, !silent, false);
}
}
@@ -12920,7 +14146,7 @@ fw_reset:
static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = from_timer(bp, t, timer);
+ struct bnxt *bp = timer_container_of(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
@@ -12962,30 +14188,31 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+static void bnxt_lock_sp(struct bnxt *bp)
{
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
* set. If the device is being closed, bnxt_close() may be holding
- * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
- * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
+ * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
+ * instance lock.
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
+ netdev_lock(bp->dev);
}
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+static void bnxt_unlock_sp(struct bnxt *bp)
{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
}
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -12993,9 +14220,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -13019,7 +14246,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_reset_task(bp, true);
break;
}
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
rxr->rx_prod = 0;
rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0;
@@ -13027,19 +14254,28 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
rxr->bnapi->in_reset = false;
bnxt_alloc_one_rx_ring(bp, i);
cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats.rx.rx_resets++;
+ cpr->sw_stats->rx.rx_resets++;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
+}
+
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
}
static void bnxt_fw_reset_close(struct bnxt *bp)
{
- bnxt_ulp_stop(bp);
/* When firmware is in fatal state, quiesce device and disable
* bus master to prevent any potential bad DMAs before freeing
* kernel memory.
@@ -13050,12 +14286,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -13063,7 +14294,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
}
static bool is_bnxt_fw_ok(struct bnxt *bp)
@@ -13086,7 +14317,7 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
return false;
}
-/* rtnl_lock is acquired before calling this function */
+/* netdev instance lock is acquired before calling this function */
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -13097,10 +14328,13 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -13125,9 +14359,10 @@ void bnxt_fw_exception(struct bnxt *bp)
{
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_ulp_stop(bp);
+ bnxt_lock_sp(bp);
bnxt_force_fw_reset(bp);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
@@ -13156,16 +14391,20 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_ulp_stop(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int n = 0, tmo;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -13176,7 +14415,7 @@ void bnxt_fw_reset(struct bnxt *bp)
netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
n);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netif_close(bp->dev);
goto fw_reset_exit;
} else if (n > 0) {
u16 vf_tmo_dsecs = n * 10;
@@ -13199,7 +14438,7 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_queue_fw_reset_work(bp, tmo);
}
fw_reset_exit:
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_chk_missed_irq(struct bnxt *bp)
@@ -13234,7 +14473,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->sw_stats.cmn.missed_irqs++;
+ cpr->sw_stats->cmn.missed_irqs++;
}
}
}
@@ -13280,6 +14519,12 @@ static void bnxt_fw_echo_reply(struct bnxt *bp)
hwrm_req_send(bp, req);
}
+static void bnxt_ulp_restart(struct bnxt *bp)
+{
+ bnxt_ulp_stop(bp);
+ bnxt_ulp_start(bp, 0);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -13291,6 +14536,11 @@ static void bnxt_sp_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
+ bnxt_ulp_restart(bp);
+ bnxt_reenable_sriov(bp);
+ }
+
if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
bnxt_cfg_rx_mode(bp);
@@ -13387,13 +14637,14 @@ static void bnxt_sp_task(struct work_struct *work)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp);
-/* Under rtnl_lock */
+/* Under netdev instance lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
+ int rc;
if (tcs)
tx_sets = tcs;
@@ -13419,14 +14670,30 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
return -ENOMEM;
hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- hwr.cp += bnxt_get_ulp_msix_num(bp);
- hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
hwr.grp = rx;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hwr.cp_p5 = hwr.tx + rx;
- return bnxt_hwrm_check_rings(bp, &hwr);
+ rc = bnxt_hwrm_check_rings(bp, &hwr);
+ if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
+ if (!bnxt_ulp_registered(bp->edev)) {
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
+ }
+ if (hwr.cp > bp->total_irqs) {
+ int total_msix = bnxt_change_msix(bp, hwr.cp);
+
+ if (total_msix < hwr.cp) {
+ netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
+ hwr.cp, total_msix);
+ rc = -ENOSPC;
+ }
+ }
+ }
+ return rc;
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -13503,6 +14770,23 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
return false;
}
+static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
+ int rc;
+
+ bp->max_pfcwd_tmo_ms = 0;
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
+ if (rc)
+ return;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -13564,9 +14848,23 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
+ rc = bnxt_alloc_crash_dump_mem(bp);
+ if (rc)
+ netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
+ rc);
+ if (!rc) {
+ rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ netdev_warn(bp->dev,
+ "hwrm crash dump mem failure rc: %d\n", rc);
+ }
+ }
+
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
+ bnxt_hwrm_pfcwd_qcaps(bp);
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -13603,7 +14901,7 @@ static void bnxt_set_dflt_rfs(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_RFS;
if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
- if (bnxt_rfs_capable(bp)) {
+ if (bnxt_rfs_capable(bp, false)) {
bp->flags |= BNXT_FLAG_RFS;
dev->features |= NETIF_F_NTUPLE;
}
@@ -13747,12 +15045,10 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
{
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
- bnxt_ulp_start(bp, rc);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
- }
- bp->fw_reset_state = 0;
- dev_close(bp->dev);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
+ netif_close(bp->dev);
}
static void bnxt_fw_reset_task(struct work_struct *work)
@@ -13781,17 +15077,17 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
n);
- return;
+ goto ulp_start;
}
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
bp->fw_reset_timestamp = jiffies;
- rtnl_lock();
+ netdev_lock(bp->dev);
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
- return;
+ netdev_unlock(bp->dev);
+ goto ulp_start;
}
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
@@ -13801,7 +15097,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
tmo = bp->fw_reset_min_dsecs * HZ / 10;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_queue_fw_reset_work(bp, tmo);
return;
}
@@ -13875,7 +15171,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
@@ -13883,8 +15179,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
- return;
+ netdev_unlock(bp->dev);
+ goto ulp_start;
}
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
@@ -13896,17 +15192,19 @@ static void bnxt_fw_reset_task(struct work_struct *work)
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- bnxt_vf_reps_alloc(bp);
- bnxt_vf_reps_open(bp);
bnxt_ptp_reapply_pps(bp);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
bnxt_dl_health_fw_recovery_done(bp);
bnxt_dl_health_fw_status_update(bp, true);
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
+ bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
+ netdev_lock(bp->dev);
+ bnxt_vf_reps_alloc(bp);
+ bnxt_vf_reps_open(bp);
+ netdev_unlock(bp->dev);
break;
}
return;
@@ -13919,9 +15217,11 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
+ulp_start:
+ bnxt_ulp_start(bp, rc);
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -14011,13 +15311,14 @@ init_err:
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ netdev_assert_locked(dev);
+
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -14038,15 +15339,24 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
+
+ /* MTU change may change the AGG ring settings if an XDP multi-buffer
+ * program is attached. We need to set the AGG rings settings and
+ * rx_skb_func accordingly.
+ */
+ if (READ_ONCE(bp->xdp_prog))
+ bnxt_set_rx_skb_mode(bp, true);
+
bnxt_set_ring_params(bp);
if (netif_running(dev))
@@ -14414,8 +15724,7 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -14423,8 +15732,7 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
}, bnxt_udp_tunnels_p7 = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -14456,12 +15764,9 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
u16 mode;
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
mode = nla_get_u16(attr);
if (mode == bp->br_mode)
break;
@@ -14523,6 +15828,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
+ .ndo_hwtstamp_get = bnxt_hwtstamp_get,
+ .ndo_hwtstamp_set = bnxt_hwtstamp_set,
};
static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
@@ -14532,6 +15839,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
struct bnxt_cp_ring_info *cpr;
u64 *sw;
+ if (!bp->bnapi)
+ return;
+
cpr = &bp->bnapi[i]->cp_ring;
sw = cpr->stats.sw_stats;
@@ -14545,7 +15855,7 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
- stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
+ stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
}
static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
@@ -14555,6 +15865,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
struct bnxt_napi *bnapi;
u64 *sw;
+ if (!bp->tx_ring)
+ return;
+
bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
sw = bnapi->cp_ring.stats.sw_stats;
@@ -14589,20 +15902,325 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
+static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+ int rc;
+
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+ memcpy(clone, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, clone);
+ bnxt_reset_rx_ring_struct(bp, clone);
+
+ clone->rx_prod = 0;
+ clone->rx_agg_prod = 0;
+ clone->rx_sw_agg_prod = 0;
+ clone->rx_next_cons = 0;
+ clone->need_head_pool = false;
+
+ rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
+ if (rc)
+ return rc;
+
+ rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
+ if (rc < 0)
+ goto err_page_pool_destroy;
+
+ rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ clone->page_pool);
+ if (rc)
+ goto err_rxq_info_unreg;
+
+ ring = &clone->rx_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_ring;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ ring = &clone->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_agg_ring;
+
+ rc = bnxt_alloc_rx_agg_bmap(bp, clone);
+ if (rc)
+ goto err_free_rx_agg_ring;
+ }
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_alloc_one_tpa_info(bp, clone);
+ if (rc)
+ goto err_free_tpa_info;
+ }
+
+ bnxt_init_one_rx_ring_rxbd(bp, clone);
+ bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
+
+ bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_alloc_one_tpa_info_data(bp, clone);
+
+ return 0;
+
+err_free_tpa_info:
+ bnxt_free_one_tpa_info(bp, clone);
+err_free_rx_agg_ring:
+ bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
+err_free_rx_ring:
+ bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
+err_rxq_info_unreg:
+ xdp_rxq_info_unreg(&clone->xdp_rxq);
+err_page_pool_destroy:
+ page_pool_destroy(clone->page_pool);
+ page_pool_destroy(clone->head_pool);
+ clone->page_pool = NULL;
+ clone->head_pool = NULL;
+ return rc;
+}
+
+static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct bnxt_rx_ring_info *rxr = qmem;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
+
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
+ page_pool_destroy(rxr->page_pool);
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = NULL;
+ rxr->head_pool = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+}
+
+static void bnxt_copy_rx_ring(struct bnxt *bp,
+ struct bnxt_rx_ring_info *dst,
+ struct bnxt_rx_ring_info *src)
+{
+ struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
+ struct bnxt_ring_struct *dst_ring, *src_ring;
+ int i;
+
+ dst_ring = &dst->rx_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return;
+
+ dst_ring = &dst->rx_agg_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_agg_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+ WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ dst->rx_agg_bmap = src->rx_agg_bmap;
+}
+
+static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
+ int i, rc;
+ u16 mru;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+
+ rxr->rx_prod = clone->rx_prod;
+ rxr->rx_agg_prod = clone->rx_agg_prod;
+ rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
+ rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->rx_tpa = clone->rx_tpa;
+ rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
+ rxr->page_pool = clone->page_pool;
+ rxr->head_pool = clone->head_pool;
+ rxr->xdp_rxq = clone->xdp_rxq;
+ rxr->need_head_pool = clone->need_head_pool;
+
+ bnxt_copy_rx_ring(bp, rxr, clone);
+
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
+ if (rc)
+ goto err_reset;
+
+ if (bp->tph_mode) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
+ if (rc)
+ goto err_reset;
+ }
+
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
+ if (rc)
+ goto err_reset;
+
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+ rc = bnxt_tx_queue_start(bp, idx);
+ if (rc)
+ goto err_reset;
+ }
+
+ bnxt_enable_rx_page_pool(rxr);
+ napi_enable_locked(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
+ mru = bp->dev->mtu + VLAN_ETH_HLEN;
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
+ if (rc)
+ return rc;
+ }
+ return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
+
+err_reset:
+ netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
+ rc);
+ napi_enable_locked(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_reset_task(bp, true);
+ return rc;
+}
+
+static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
+ }
+ bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ rxr = &bp->rx_ring[idx];
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+ cancel_work_sync(&cpr->dim.work);
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+ page_pool_disable_direct_recycling(rxr->page_pool);
+ if (bnxt_separate_head_pool(rxr))
+ page_pool_disable_direct_recycling(rxr->head_pool);
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ bnxt_tx_queue_stop(bp, idx);
+
+ /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
+ * completion is handled in NAPI to guarantee no more DMA on that ring
+ * after seeing the completion.
+ */
+ napi_disable_locked(&bnapi->napi);
+
+ if (bp->tph_mode) {
+ bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
+ bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+ }
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
+ memcpy(qmem, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, qmem);
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
+ .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
+ .ndo_queue_mem_free = bnxt_queue_mem_free,
+ .ndo_queue_start = bnxt_queue_start,
+ .ndo_queue_stop = bnxt_queue_stop,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
if (BNXT_PF(bp))
- bnxt_sriov_disable(bp);
+ __bnxt_sriov_disable(bp);
- bnxt_rdma_aux_device_uninit(bp);
+ bnxt_rdma_aux_device_del(bp);
- bnxt_ptp_clear(bp);
unregister_netdev(dev);
+ bnxt_ptp_clear(bp);
+
+ bnxt_rdma_aux_device_uninit(bp);
+
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
+ WARN_ON(bp->num_rss_ctx);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@@ -14624,7 +16242,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, true);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -14647,6 +16266,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -14691,8 +16314,9 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_rx = hw_resc->max_rx_rings;
*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
- bnxt_get_ulp_msix_num(bp),
- hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_get_ulp_msix_num_in_use(bp),
+ hw_resc->max_stat_ctxs -
+ bnxt_get_ulp_stat_ctxs_in_use(bp));
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
@@ -14782,12 +16406,13 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
bp->rx_nr_rings = bp->cp_nr_rings;
bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
}
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
+ int avail_msix;
if (!bnxt_can_reserve_rings(bp))
return 0;
@@ -14813,12 +16438,20 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bnxt_trim_dflt_sh_rings(bp);
else
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
+
+ avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
+ if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
+ int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
+
+ bnxt_set_ulp_msix_num(bp, ulp_num_msix);
+ bnxt_set_dflt_ulp_stat_ctxs(bp);
+ }
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
if (sh)
bnxt_trim_dflt_sh_rings(bp);
@@ -14827,7 +16460,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -14861,7 +16494,7 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
if (rc)
goto init_dflt_ring_err;
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
bnxt_set_dflt_rfs(bp);
@@ -14874,7 +16507,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(bp->dev);
bnxt_hwrm_func_qcaps(bp);
if (netif_running(bp->dev))
@@ -14887,7 +16520,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (rc)
- dev_close(bp->dev);
+ netif_close(bp->dev);
else
rc = bnxt_open_nic(bp, true, false);
}
@@ -15002,6 +16635,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
+ return -ENODEV;
+ }
+
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@@ -15028,9 +16666,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
- if (pdev->msix_cap)
- bp->flags |= BNXT_FLAG_MSIX_CAP;
-
rc = bnxt_init_board(pdev, dev);
if (rc < 0)
goto init_err_free;
@@ -15117,6 +16752,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ if (bp->tso_max_segs)
+ netif_set_tso_max_segs(dev, bp->tso_max_segs);
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG;
@@ -15161,9 +16798,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
+ bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
if (BNXT_VF(bp) && rc == -ENODEV) {
@@ -15214,13 +16853,20 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&bp->usr_fltr_list);
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+ if (BNXT_SUPPORTS_QUEUE_API(bp))
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
+ dev->request_ops_lock = true;
+ dev->netmem_tx = true;
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
bnxt_dl_fw_reporters_create(bp);
- bnxt_rdma_aux_device_init(bp);
+ bnxt_rdma_aux_device_add(bp);
bnxt_print_device_info(bp);
@@ -15228,6 +16874,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
init_err_cleanup:
+ bnxt_rdma_aux_device_uninit(bp);
bnxt_dl_unregister(bp);
init_err_dl:
bnxt_shutdown_tc(bp);
@@ -15244,7 +16891,8 @@ init_err_pci_clean:
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, true);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -15262,13 +16910,19 @@ static void bnxt_shutdown(struct pci_dev *pdev)
return;
rtnl_lock();
+ netdev_lock(dev);
bp = netdev_priv(dev);
if (!bp)
goto shutdown_exit;
if (netif_running(dev))
- dev_close(dev);
+ netif_close(dev);
+ if (bnxt_hwrm_func_drv_unrgtr(bp)) {
+ pcie_flr(pdev);
+ goto shutdown_exit;
+ }
+ bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
@@ -15278,6 +16932,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
}
shutdown_exit:
+ netdev_unlock(dev);
rtnl_unlock();
}
@@ -15288,16 +16943,18 @@ static int bnxt_suspend(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
bnxt_ulp_stop(bp);
+
+ netdev_lock(dev);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
}
bnxt_hwrm_func_drv_unrgtr(bp);
+ bnxt_ptp_clear(bp);
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
- rtnl_unlock();
+ bnxt_free_ctx_mem(bp, false);
+ netdev_unlock(dev);
return rc;
}
@@ -15307,7 +16964,7 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
@@ -15335,7 +16992,13 @@ static int bnxt_resume(struct device *device)
rc = -ENODEV;
goto resume_exit;
}
+ if (bp->fw_crash_mem)
+ bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (bnxt_ptp_init(bp)) {
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {
rc = bnxt_open(dev);
@@ -15344,10 +17007,10 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
- rtnl_unlock();
return rc;
}
@@ -15373,31 +17036,43 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
- rtnl_lock();
+ bnxt_ulp_stop(bp);
+
+ netdev_lock(netdev);
netif_device_detach(netdev);
- bnxt_ulp_stop(bp);
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
- if (state == pci_channel_io_perm_failure) {
- rtnl_unlock();
+ if (abort || state == pci_channel_io_perm_failure) {
+ netdev_unlock(netdev);
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- bnxt_free_ctx_mem(bp);
- rtnl_unlock();
+ bnxt_free_ctx_mem(bp, false);
+ netdev_unlock(netdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -15406,7 +17081,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
@@ -15421,7 +17096,11 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
netdev_info(bp->dev, "PCI Slot Reset\n");
- rtnl_lock();
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
+ msleep(900);
+
+ netdev_lock(netdev);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
@@ -15430,7 +17109,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
/* Upon fatal error, our device internal logic that latches to
* BAR value is getting reset and will restore only upon
- * rewritting the BARs.
+ * rewriting the BARs.
*
* As pci_restore_state() does not re-write the BARs if the
* value is same as saved value earlier, driver needs to
@@ -15467,15 +17146,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
+ /* IRQ will be initialized later in bnxt_io_resume */
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
- err = bnxt_init_int_mode(bp);
- bnxt_ulp_irq_restart(bp, err);
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
- rtnl_unlock();
+ netdev_unlock(netdev);
return result;
}
@@ -15494,19 +17173,26 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
+ netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
-
- bnxt_ulp_start(bp, err);
if (!err) {
- bnxt_reenable_sriov(bp);
- netif_device_attach(netdev);
+ if (netif_running(netdev)) {
+ err = bnxt_open(netdev);
+ } else {
+ err = bnxt_reserve_rings(bp, true);
+ if (!err)
+ err = bnxt_init_int_mode(bp);
+ }
}
- rtnl_unlock();
+ if (!err)
+ netif_device_attach(netdev);
+
+ netdev_unlock(netdev);
+ bnxt_ulp_start(bp, err);
+ if (!err)
+ bnxt_reenable_sriov(bp);
}
static const struct pci_error_handlers bnxt_err_handler = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index dd849e715c9b..f5f07a7e6b29 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -34,6 +34,9 @@
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
+#define BNXT_DEFAULT_RX_COPYBREAK 256
+#define BNXT_MAX_RX_COPYBREAK 1024
+
extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -79,6 +82,12 @@ struct tx_bd {
#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
(bp)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -122,6 +131,7 @@ struct rx_bd {
#define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
#define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
#define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_AGG_EOP (1 << 6)
#define RX_BD_FLAGS_EOP (1 << 7)
#define RX_BD_FLAGS_BUFFERS (3 << 8)
#define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
@@ -181,6 +191,32 @@ struct tx_cmp {
#define TX_CMP_SQ_CONS_IDX(txcmp) \
(le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+struct tx_ts_cmp {
+ __le32 tx_ts_cmp_flags_type;
+ #define TX_TS_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8)
+ #define TX_TS_CMP_TS_SUB_NS (0xf << 12)
+ #define TX_TS_CMP_TS_NS_MID (0xffff << 16)
+ #define TX_TS_CMP_TS_NS_MID_SFT 16
+ u32 tx_ts_cmp_opaque;
+ __le32 tx_ts_cmp_errors_v;
+ #define TX_TS_CMP_V (1 << 0)
+ #define TX_TS_CMP_TS_INVALID_ERR (1 << 10)
+ __le32 tx_ts_cmp_ts_ns_lo;
+};
+
+#define BNXT_GET_TX_TS_48B_NS(tscmp) \
+ (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \
+ ((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \
+ TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT))
+
+#define BNXT_TX_TS_ERR(tscmp) \
+ (((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\
+ ((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR)))
+
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
@@ -241,6 +277,9 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define RX_CMP_ITYPES(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
+
#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
@@ -352,7 +391,7 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
- #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID (0x0fff << 16)
#define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
@@ -390,7 +429,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
#define RX_TPA_START_CMP_METADATA1 (0xf << 28)
#define RX_TPA_START_CMP_METADATA1_SHIFT 28
@@ -514,7 +553,7 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
@@ -848,11 +887,14 @@ struct bnxt_sw_tx_bd {
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
struct page *page;
- u8 is_gso;
+ u8 is_ts_pkt;
u8 is_push;
u8 action;
unsigned short nr_frags;
- u16 rx_prod;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
};
struct bnxt_sw_rx_bd {
@@ -862,7 +904,7 @@ struct bnxt_sw_rx_bd {
};
struct bnxt_sw_rx_agg_bd {
- struct page *page;
+ netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
@@ -1065,6 +1107,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
@@ -1076,6 +1119,7 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_agg_ring_struct;
struct xdp_rxq_info xdp_rxq;
struct page_pool *page_pool;
+ struct page_pool *head_pool;
};
struct bnxt_rx_sw_stats {
@@ -1152,7 +1196,7 @@ struct bnxt_cp_ring_info {
struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
- struct bnxt_sw_stats sw_stats;
+ struct bnxt_sw_stats *sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1188,13 +1232,21 @@ struct bnxt_napi {
bool in_reset;
};
+/* "TxRx", 2 hypens, plus maximum integer */
+#define BNXT_IRQ_NAME_EXTRA 17
+
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested:1;
u8 have_cpumask:1;
- char name[IFNAMSIZ + 2];
+ char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
+
+ struct bnxt *bp;
+ int msix_nr;
+ int ring_nr;
+ struct irq_affinity_notify affinity_notify;
};
#define HWRM_RING_ALLOC_TX 0x1
@@ -1221,6 +1273,7 @@ struct bnxt_vnic_info {
#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
+ u16 mru;
#define BNXT_MAX_UC_ADDRS 4
struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
@@ -1256,8 +1309,19 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
#define BNXT_VNIC_NTUPLE_FLAG 0x20
+#define BNXT_VNIC_RSSCTX_FLAG 0x40
+ struct ethtool_rxfh_context *rss_ctx;
+ u32 vnic_id;
};
+struct bnxt_rss_ctx {
+ struct bnxt_vnic_info vnic;
+ u8 index;
+};
+
+#define BNXT_MAX_ETH_RSS_CTX 32
+#define BNXT_VNIC_ID_INVALID 0xffffffff
+
struct bnxt_hw_rings {
int tx;
int rx;
@@ -1315,7 +1379,6 @@ struct bnxt_vf_info {
u16 vlan;
u16 func_qcfg_flags;
u32 flags;
-#define BNXT_VF_QOS 0x1
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
@@ -1360,6 +1423,7 @@ struct bnxt_filter_base {
#define BNXT_ACT_RING_DST 2
#define BNXT_ACT_FUNC_DST 4
#define BNXT_ACT_NO_AGING 8
+#define BNXT_ACT_RSS_CTX 0x10
u16 sw_id;
u16 rxq;
u16 fw_vnic_id;
@@ -1419,6 +1483,57 @@ struct bnxt_l2_filter {
atomic_t refcnt;
};
+/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
+ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
+ * The last valid byte in the compat version is different.
+ */
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 active_fec_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ __le16 force_pam4_link_speed;
+ __le16 auto_pam4_link_speed_mask;
+ u8 link_partner_pam4_adv_speeds;
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -1663,8 +1778,6 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
-#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
-#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRC_REG_STATUS_P5 0x520
@@ -1755,6 +1868,8 @@ struct bnxt_vf_rep {
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+#define MAX_CTX_BYTES ((size_t)MAX_CTX_TOTAL_PAGES * BNXT_PAGE_SIZE)
+#define MAX_CTX_BYTES_MASK (MAX_CTX_BYTES - 1)
struct bnxt_ctx_pg_info {
u32 entries;
@@ -1787,6 +1902,13 @@ struct bnxt_ctx_mem_type {
u16 entry_size;
u32 flags;
#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+#define BNXT_CTX_MEM_FW_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE
+#define BNXT_CTX_MEM_FW_BIN_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE
+#define BNXT_CTX_MEM_PERSIST \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET
+
u32 instance_bmap;
u8 init_value;
u8 entry_multiple;
@@ -1795,6 +1917,7 @@ struct bnxt_ctx_mem_type {
u32 max_entries;
u32 min_entries;
u8 last:1;
+ u8 mem_valid:1;
u8 split_entry_cnt;
#define BNXT_MAX_SPLIT_ENTRY 4
union {
@@ -1826,21 +1949,32 @@ struct bnxt_ctx_mem_type {
#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
-#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
-#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
+#define BNXT_CTX_TCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNXT_CTX_RCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
-#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
-#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
+#define BNXT_CTX_SRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNXT_CTX_SRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNXT_CTX_CRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNXT_CTX_CRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNXT_CTX_RIGP0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNXT_CTX_L2HWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNXT_CTX_REHWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+#define BNXT_CTX_CA0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE
+#define BNXT_CTX_CA1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE
+#define BNXT_CTX_CA2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE
+#define BNXT_CTX_RIGP1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE
+#define BNXT_CTX_KONG FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE
+#define BNXT_CTX_QPC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE
#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
-#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_QPC + 1)
#define BNXT_CTX_INV ((u16)-1)
struct bnxt_ctx_mem_info {
@@ -1998,8 +2132,30 @@ enum board_idx {
NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
+ NETXTREME_E_P7_VF,
+ NETXTREME_E_P7_VF_HV,
};
+#define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc)
+#define BNXT_TRACE_MAX 11
+
+struct bnxt_bs_trace_info {
+ u8 *magic_byte;
+ u32 last_offset;
+ u8 wrapped:1;
+ u16 ctx_type;
+ u16 trace_type;
+};
+
+static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace,
+ u32 offset)
+{
+ if (!bs_trace->wrapped && bs_trace->magic_byte &&
+ *bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE)
+ bs_trace->wrapped = 1;
+ bs_trace->last_offset = offset;
+}
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -2104,10 +2260,6 @@ struct bnxt {
#define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
#define BNXT_FLAG_JUMBO 0x10
#define BNXT_FLAG_STRIP_VLAN 0x20
- #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
- BNXT_FLAG_LRO)
- #define BNXT_FLAG_USING_MSIX 0x40
- #define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2128,6 +2280,9 @@ struct bnxt {
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_HDS 0x20000000
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO | BNXT_FLAG_HDS)
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -2135,6 +2290,11 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#ifdef CONFIG_BNXT_SRIOV
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST)
+#else
+#define BNXT_VF_IS_TRUSTED(bp) 0
+#endif
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
@@ -2170,9 +2330,17 @@ struct bnxt {
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
+/* Chip class phase 3.x */
+#define BNXT_CHIP_P3(bp) \
+ (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \
+ BNXT_CHIP_TYPE_NITRO_A0(bp))
+
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
+#define BNXT_CHIP_P5_AND_MINUS(bp) \
+ (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
@@ -2199,7 +2367,7 @@ struct bnxt {
enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
int rx_nr_pages;
@@ -2227,8 +2395,9 @@ struct bnxt {
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
+ u32 num_rss_ctx;
int nr_vnics;
- u16 *rss_indir_tbl;
+ u32 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u32 rss_hash_delta;
@@ -2241,20 +2410,28 @@ struct bnxt {
#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
+#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP BIT(9)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
u8 rss_hash_key_updated:1;
u16 max_mtu;
+ u16 tso_max_segs;
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 cos0_cos1_shared;
u8 num_tc;
+ u16 max_pfcwd_tmo_ms;
+
+ u8 tph_mode;
+
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2284,6 +2461,7 @@ struct bnxt {
struct bnxt_irq *irq_tbl;
int total_irqs;
+ int ulp_num_msix_want;
u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_BNXT_DCB
@@ -2302,7 +2480,11 @@ struct bnxt {
#define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2)
#define BNXT_FW_CAP_NEW_RM BIT_ULL(3)
#define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4)
+ #define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(5)
+ #define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6)
#define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
+ #define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8)
+ #define BNXT_FW_CAP_LINK_ADMIN BIT_ULL(9)
#define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
#define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
#define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
@@ -2311,6 +2493,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
+ #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19)
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
#define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
#define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
@@ -2331,6 +2514,10 @@ struct bnxt {
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
+ #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
+ #define BNXT_FW_CAP_MIRROR_ON_ROCE BIT_ULL(43)
u32 fw_dbg_cap;
@@ -2340,6 +2527,23 @@ struct bnxt {
#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
(BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_ROCE_VF_DYN_ALLOC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT)
+#define BNXT_SUPPORTS_QUEUE_API(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
+#define BNXT_RDMA_SRIOV_EN(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV)
+#define BNXT_ROCE_VF_RESC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
+#define BNXT_SW_RES_LMT(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
+#define BNXT_MIRROR_ON_ROCE_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_MIRROR_ON_ROCE)
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -2353,6 +2557,7 @@ struct bnxt {
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
+ u16 pcie_stat_len;
u8 pri2cos_idx[8];
u8 pri2cos_valid;
@@ -2416,6 +2621,7 @@ struct bnxt {
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
#define BNXT_THERMAL_THRESHOLD_SP_EVENT 22
#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
+#define BNXT_RESTART_ULP_SP_EVENT 24
struct delayed_work fw_reset_task;
int fw_reset_state;
@@ -2425,6 +2631,7 @@ struct bnxt {
#define BNXT_FW_RESET_STATE_POLL_FW 4
#define BNXT_FW_RESET_STATE_OPENING 5
#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6
+#define BNXT_FW_RESET_STATE_ABORT 7
u16 fw_reset_min_dsecs;
#define BNXT_DFLT_FW_RST_MIN_DSECS 20
@@ -2497,6 +2704,11 @@ struct bnxt {
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+ /* copied from flags in hwrm_port_mac_qcaps_output */
+ u8 mac_flags;
+#define BNXT_MAC_FL_NO_MAC_LPBK \
+ PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2508,6 +2720,8 @@ struct bnxt {
u16 dump_flag;
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
+#define BNXT_DUMP_DRIVER 2
+#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3
struct bpf_prog *xdp_prog;
@@ -2533,6 +2747,10 @@ struct bnxt {
#endif
u32 thermal_threshold_type;
enum board_idx board_idx;
+
+ struct bnxt_ctx_pg_info *fw_crash_mem;
+ u32 fw_crash_len;
+ struct bnxt_bs_trace_info bs_trace[BNXT_TRACE_MAX];
};
#define BNXT_NUM_RX_RING_STATS 8
@@ -2594,6 +2812,8 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+#define BNXT_HDS_THRESHOLD_MAX 1023
+
static inline u32 bnxt_tx_avail(struct bnxt *bp,
const struct bnxt_tx_ring_info *txr)
{
@@ -2668,18 +2888,19 @@ static inline bool bnxt_sriov_cfg(struct bnxt *bp)
#endif
}
+extern const u16 bnxt_bstore_to_trace[];
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
-void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
@@ -2693,19 +2914,27 @@ int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
-void bnxt_free_ctx_mem(struct bnxt *bp);
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset);
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force);
int bnxt_num_tx_to_cp(struct bnxt *bp, int tx);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
-int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
@@ -2715,12 +2944,19 @@ void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid);
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
@@ -2728,6 +2964,7 @@ void bnxt_reenable_sriov(struct bnxt *bp);
void bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats);
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index c06789882036..ccb8b509662d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -10,11 +10,57 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_coredump.h"
+static const u16 bnxt_bstore_to_seg_id[] = {
+ [BNXT_CTX_QP] = BNXT_CTX_MEM_SEG_QP,
+ [BNXT_CTX_SRQ] = BNXT_CTX_MEM_SEG_SRQ,
+ [BNXT_CTX_CQ] = BNXT_CTX_MEM_SEG_CQ,
+ [BNXT_CTX_VNIC] = BNXT_CTX_MEM_SEG_VNIC,
+ [BNXT_CTX_STAT] = BNXT_CTX_MEM_SEG_STAT,
+ [BNXT_CTX_STQM] = BNXT_CTX_MEM_SEG_STQM,
+ [BNXT_CTX_FTQM] = BNXT_CTX_MEM_SEG_FTQM,
+ [BNXT_CTX_MRAV] = BNXT_CTX_MEM_SEG_MRAV,
+ [BNXT_CTX_TIM] = BNXT_CTX_MEM_SEG_TIM,
+ [BNXT_CTX_SRT] = BNXT_CTX_MEM_SEG_SRT,
+ [BNXT_CTX_SRT2] = BNXT_CTX_MEM_SEG_SRT2,
+ [BNXT_CTX_CRT] = BNXT_CTX_MEM_SEG_CRT,
+ [BNXT_CTX_CRT2] = BNXT_CTX_MEM_SEG_CRT2,
+ [BNXT_CTX_RIGP0] = BNXT_CTX_MEM_SEG_RIGP0,
+ [BNXT_CTX_L2HWRM] = BNXT_CTX_MEM_SEG_L2HWRM,
+ [BNXT_CTX_REHWRM] = BNXT_CTX_MEM_SEG_REHWRM,
+ [BNXT_CTX_CA0] = BNXT_CTX_MEM_SEG_CA0,
+ [BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1,
+ [BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2,
+ [BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1,
+ [BNXT_CTX_KONG] = BNXT_CTX_MEM_SEG_KONG,
+ [BNXT_CTX_QPC] = BNXT_CTX_MEM_SEG_QPC,
+};
+
+static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
+ u32 *offset)
+{
+ struct hwrm_dbg_log_buffer_flush_output *resp;
+ struct hwrm_dbg_log_buffer_flush_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(flags);
+ req->type = cpu_to_le16(type);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *offset = le32_to_cpu(resp->current_buffer_offset);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
struct bnxt_hwrm_dbg_dma_info *info)
{
@@ -66,20 +112,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
}
}
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
+ u16 copylen = min_t(u16, len,
+ info->dest_buf_size - off);
+
+ memcpy(info->dest_buf + off, dma_buf, copylen);
+ if (copylen < len)
+ break;
} else {
rc = -ENOBUFS;
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ kfree(info->dest_buf);
+ info->dest_buf = NULL;
+ }
break;
}
}
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;
@@ -115,8 +171,8 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type,
+ u16 component_id, u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
@@ -128,6 +184,8 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
+ if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE)
+ req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE;
return hwrm_req_send(bp, req);
}
@@ -165,11 +223,12 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
return rc;
}
-static void
+void
bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
struct bnxt_coredump_segment_hdr *seg_hdr,
struct coredump_segment_record *seg_rec, u32 seg_len,
- int status, u32 duration, u32 instance)
+ int status, u32 duration, u32 instance, u32 comp_id,
+ u32 seg_id)
{
memset(seg_hdr, 0, sizeof(*seg_hdr));
memcpy(seg_hdr->signature, "sEgM", 4);
@@ -180,11 +239,8 @@ bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
seg_hdr->high_version = seg_rec->version_hi;
seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
} else {
- /* For hwrm_ver_get response Component id = 2
- * and Segment id = 0
- */
- seg_hdr->component_id = cpu_to_le32(2);
- seg_hdr->segment_id = 0;
+ seg_hdr->component_id = cpu_to_le32(comp_id);
+ seg_hdr->segment_id = cpu_to_le32(seg_id);
}
seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
seg_hdr->length = cpu_to_le32(seg_len);
@@ -269,7 +325,83 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
record->ioctl_high_version = 0;
}
-static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+static void bnxt_fill_drv_seg_record(struct bnxt *bp,
+ struct bnxt_driver_segment_record *record,
+ struct bnxt_ctx_mem_type *ctxm, u16 type)
+{
+ struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type];
+ u32 offset = 0;
+ int rc = 0;
+
+ record->max_entries = cpu_to_le32(ctxm->max_entries);
+ record->entry_size = cpu_to_le32(ctxm->entry_size);
+
+ rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
+ if (rc)
+ return;
+
+ bnxt_bs_trace_check_wrap(bs_trace, offset);
+ record->offset = cpu_to_le32(bs_trace->last_offset);
+ record->wrapped = bs_trace->wrapped;
+}
+
+static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
+ u32 *segs)
+{
+ struct bnxt_driver_segment_record record = {};
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u32 comp_id = BNXT_DRV_COMP_ID;
+ void *data = NULL;
+ size_t len = 0;
+ u16 type;
+
+ *segs = 0;
+ if (!ctx)
+ return 0;
+
+ if (buf)
+ buf += offset;
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ bool trace = bnxt_bs_trace_avail(bp, type);
+ u32 seg_id = bnxt_bstore_to_seg_id[type];
+ size_t seg_len, extra_hlen = 0;
+
+ if (!ctxm->mem_valid || !seg_id)
+ continue;
+
+ if (trace) {
+ extra_hlen = BNXT_SEG_RCD_LEN;
+ if (buf) {
+ u16 trace_type = bnxt_bstore_to_trace[type];
+
+ bnxt_fill_drv_seg_record(bp, &record, ctxm,
+ trace_type);
+ }
+ }
+
+ if (buf)
+ data = buf + BNXT_SEG_HDR_LEN + extra_hlen;
+
+ seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
+ 0, 0, 0, comp_id, seg_id);
+ memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN);
+ buf += BNXT_SEG_HDR_LEN;
+ if (trace)
+ memcpy(buf, &record, BNXT_SEG_RCD_LEN);
+ buf += seg_len;
+ }
+ len += BNXT_SEG_HDR_LEN + seg_len;
+ *segs += 1;
+ }
+ return len;
+}
+
+static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
+ u32 *dump_len)
{
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
@@ -287,17 +419,31 @@ static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr);
- /* First segment should be hwrm_ver_get response */
+ /* First segment should be hwrm_ver_get response.
+ * For hwrm_ver_get response Component id = 2 and Segment id = 0.
+ */
*dump_len = seg_hdr_len + ver_get_resp_len;
if (buf) {
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
- 0, 0, 0);
+ 0, 0, 0, BNXT_VER_GET_COMP_ID, 0);
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len;
memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
offset += ver_get_resp_len;
}
+ if (dump_type == BNXT_DUMP_DRIVER) {
+ u32 drv_len, segs = 0;
+
+ drv_len = bnxt_get_ctx_coredump(bp, buf, offset, &segs);
+ *dump_len += drv_len;
+ offset += drv_len;
+ if (buf)
+ coredump.total_segs += segs;
+ goto err;
+ }
+
+ seg_record_len = sizeof(*seg_record);
rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
if (rc) {
netdev_err(bp->dev, "Failed to get coredump segment list\n");
@@ -323,7 +469,8 @@ static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
start = jiffies;
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id,
+ seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
@@ -346,7 +493,7 @@ next_seg:
end = jiffies;
duration = jiffies_to_msecs(end - start);
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
- rc, duration, 0);
+ rc, duration, 0, 0, 0);
if (buf) {
/* Write segment header into the buffer */
@@ -366,26 +513,94 @@ err:
start_utc, coredump.total_segs + 1,
rc);
kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
+ if (!rc) {
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ /* The actual coredump length can be smaller than the FW
+ * reported length earlier. Use the ethtool provided length.
+ */
+ if (buf_len)
+ *dump_len = buf_len;
+ } else if (rc == -ENOBUFS) {
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ }
return rc;
}
+static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
+ u32 dump_len)
+{
+ u32 data_copied = 0;
+ u32 data_len;
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ data_len = rmem->page_size;
+ if (data_copied + data_len > dump_len)
+ data_len = dump_len - data_copied;
+ memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
+ data_copied += data_len;
+ if (data_copied >= dump_len)
+ break;
+ }
+ return data_copied;
+}
+
+static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
+{
+ struct bnxt_ring_mem_info *rmem;
+ u32 offset = 0;
+
+ if (!bp->fw_crash_mem)
+ return -ENOENT;
+
+ rmem = &bp->fw_crash_mem->ring_mem;
+
+ if (rmem->depth > 1) {
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
+ offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
+ buf + offset,
+ dump_len - offset);
+ if (offset >= dump_len)
+ break;
+ }
+ } else {
+ bnxt_copy_crash_data(rmem, buf, dump_len);
+ }
+
+ return 0;
+}
+
+static bool bnxt_crash_dump_avail(struct bnxt *bp)
+{
+ u32 sig = 0;
+
+ /* First 4 bytes(signature) of crash dump is always non-zero */
+ bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
+ return !!sig;
+}
+
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
{
if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
+ return bnxt_copy_crash_dump(bp, buf, *dump_len);
#ifdef CONFIG_TEE_BNXT_FW
- return tee_bnxt_copy_coredump(buf, 0, *dump_len);
-#else
- return -EOPNOTSUPP;
+ else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#endif
+ else
+ return -EOPNOTSUPP;
} else {
- return __bnxt_get_coredump(bp, buf, dump_len);
+ return __bnxt_get_coredump(bp, dump_type, buf, dump_len);
}
}
-static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
@@ -395,7 +610,8 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
- !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
+ (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
@@ -403,8 +619,12 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return rc;
req->fid = cpu_to_le16(0xffff);
- if (dump_type == BNXT_DUMP_CRASH)
- req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
+ if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
+ else
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
+ }
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -412,7 +632,10 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
- *dump_len = le32_to_cpu(resp->crashdump_size);
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ *dump_len = BNXT_CRASH_DUMP_LEN;
+ else
+ *dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump header and "HWRM_VER_GET response"
* segment additionally to coredump.
@@ -434,11 +657,21 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
{
u32 len = 0;
- if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
- if (dump_type == BNXT_DUMP_CRASH)
- len = BNXT_CRASH_DUMP_LEN;
- else
- __bnxt_get_coredump(bp, NULL, &len);
+ if (dump_type == BNXT_DUMP_CRASH &&
+ bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
+ bp->fw_crash_mem) {
+ if (!bnxt_crash_dump_avail(bp))
+ return 0;
+
+ return bp->fw_crash_len;
}
+
+ if (dump_type != BNXT_DUMP_DRIVER) {
+ if (!bnxt_hwrm_get_dump_len(bp, dump_type, &len))
+ return len;
+ }
+ if (dump_type != BNXT_DUMP_CRASH)
+ __bnxt_get_coredump(bp, dump_type, NULL, &len);
+
return len;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index b1a1b2fffb19..c087df88154a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -68,11 +68,51 @@ struct bnxt_coredump_record {
__le16 rsvd3[313];
};
+struct bnxt_driver_segment_record {
+ __le32 max_entries;
+ __le32 entry_size;
+ __le32 offset;
+ __u8 wrapped:1;
+ __u8 unused[3];
+};
+
+#define BNXT_VER_GET_COMP_ID 2
+#define BNXT_DRV_COMP_ID 0xd
+
+#define BNXT_CTX_MEM_SEG_ID_START 0x200
+
+#define BNXT_CTX_MEM_SEG_QP (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_QP)
+#define BNXT_CTX_MEM_SEG_SRQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQ)
+#define BNXT_CTX_MEM_SEG_CQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_CQ)
+#define BNXT_CTX_MEM_SEG_VNIC (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_VNIC)
+#define BNXT_CTX_MEM_SEG_STAT (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STAT)
+#define BNXT_CTX_MEM_SEG_STQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STQM)
+#define BNXT_CTX_MEM_SEG_FTQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_FTQM)
+#define BNXT_CTX_MEM_SEG_MRAV (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_MRAV)
+#define BNXT_CTX_MEM_SEG_TIM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_TIM)
+
+#define BNXT_CTX_MEM_SEG_SRT 0x1
+#define BNXT_CTX_MEM_SEG_SRT2 0x2
+#define BNXT_CTX_MEM_SEG_CRT 0x3
+#define BNXT_CTX_MEM_SEG_CRT2 0x4
+#define BNXT_CTX_MEM_SEG_RIGP0 0x5
+#define BNXT_CTX_MEM_SEG_L2HWRM 0x6
+#define BNXT_CTX_MEM_SEG_REHWRM 0x7
+#define BNXT_CTX_MEM_SEG_CA0 0x8
+#define BNXT_CTX_MEM_SEG_CA1 0x9
+#define BNXT_CTX_MEM_SEG_CA2 0xa
+#define BNXT_CTX_MEM_SEG_RIGP1 0xb
+#define BNXT_CTX_MEM_SEG_QPC 0xc
+#define BNXT_CTX_MEM_SEG_KONG 0xd
+
#define BNXT_CRASH_DUMP_LEN (8 << 20)
#define COREDUMP_LIST_BUF_LEN 2048
#define COREDUMP_RETRIEVE_BUF_LEN 4096
+#define BNXT_SEG_HDR_LEN sizeof(struct bnxt_coredump_segment_hdr)
+#define BNXT_SEG_RCD_LEN sizeof(struct bnxt_driver_segment_record)
+
struct bnxt_coredump {
void *data;
int data_size;
@@ -111,7 +151,20 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
+#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR
+#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
+ DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
+
+void bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec,
+ u32 seg_len, int status, u32 duration,
+ u32 instance, u32 comp_id, u32 seg_id);
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 0dbb880a7aa0..a00b67334f9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_dcb.h"
@@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
return -EINVAL;
+ }
+ for (i = 0; i < max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 156c2404854f..3324afbb3bec 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -10,7 +10,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include <linux/dim.h>
#include "bnxt.h"
#include "bnxt_debugfs.h"
@@ -64,9 +64,9 @@ static const struct file_operations debugfs_dim_fops = {
static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
- static char qname[16];
+ static char qname[12];
- snprintf(qname, 10, "%d", ring_idx);
+ snprintf(qname, sizeof(qname), "%d", ring_idx);
debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
index d0bb4887acd0..a0a8d687dd99 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
@@ -7,7 +7,7 @@
* the Free Software Foundation.
*/
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ae4529c043f0..15de802bbac4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -11,7 +11,8 @@
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
-#include "bnxt_hsi.h"
+#include <net/netdev_lock.h>
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
@@ -39,12 +40,6 @@ bnxt_dl_flash_update(struct devlink *dl,
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
- if (!BNXT_PF(bp)) {
- NL_SET_ERR_MSG_MOD(extack,
- "flash update not supported from a VF");
- return -EPERM;
- }
-
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
@@ -219,7 +214,7 @@ __bnxt_dl_reporter_create(struct bnxt *bp,
{
struct devlink_health_reporter *reporter;
- reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ reporter = devlink_health_reporter_create(bp->dl, ops, bp);
if (IS_ERR(reporter)) {
netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
ops->name, PTR_ERR(reporter));
@@ -437,18 +432,23 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_ulp_stop(bp);
rtnl_lock();
+ netdev_lock(bp->dev);
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
+ netdev_unlock(bp->dev);
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
return -ENODEV;
}
- bnxt_ulp_stop(bp);
if (netif_running(bp->dev))
bnxt_close_nic(bp, true, true);
bnxt_vf_reps_free(bp);
@@ -456,12 +456,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
if (netif_running(bp->dev))
- dev_close(bp->dev);
+ netif_close(bp->dev);
+ netdev_unlock(bp->dev);
rtnl_unlock();
break;
}
- bnxt_cancel_reservations(bp, false);
- bnxt_free_ctx_mem(bp);
+ bnxt_clear_reservations(bp, false);
+ bnxt_free_ctx_mem(bp, false);
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
@@ -477,7 +478,9 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -EPERM;
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
return -ENODEV;
}
@@ -491,6 +494,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
break;
@@ -509,6 +513,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc = 0;
+ netdev_assert_locked(bp->dev);
+
*actions_performed = 0;
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
@@ -516,7 +522,6 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
bnxt_vf_reps_alloc(bp);
if (netif_running(bp->dev))
rc = bnxt_open_nic(bp, true, true);
- bnxt_ulp_start(bp, rc);
if (!rc) {
bnxt_reenable_sriov(bp);
bnxt_ptp_reapply_pps(bp);
@@ -534,6 +539,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
if (!netif_running(bp->dev))
NL_SET_ERR_MSG_MOD(extack,
"Device is closed, not waiting for reset notice that will never come");
+ netdev_unlock(bp->dev);
rtnl_unlock();
while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
if (time_after(jiffies, timeout)) {
@@ -549,6 +555,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
msleep(50);
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (!rc)
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
@@ -567,9 +574,12 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
}
*actions_performed |= BIT(action);
} else if (netif_running(bp->dev)) {
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
+ netdev_unlock(bp->dev);
rtnl_unlock();
+ if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ bnxt_ulp_start(bp, rc);
return rc;
}
@@ -663,6 +673,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = {
NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA,
+ BNXT_NVM_FUNC_CFG, 1, 1},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
BNXT_NVM_SHARED_CFG, 1, 1},
};
@@ -1007,37 +1019,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
}
-static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
- union devlink_param_value *val)
+static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
+ const struct bnxt_dl_nvm_param *nvm, void *msg,
+ union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int idx = 0, rc, i;
-
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
+ int idx = 0, rc;
- for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
- if (nvm_params[i].id == param_id) {
- nvm_param = nvm_params[i];
- break;
- }
- }
-
- if (i == ARRAY_SIZE(nvm_params)) {
- hwrm_req_drop(bp, req);
- return -EOPNOTSUPP;
- }
-
- if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ if (nvm->dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
- else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ else if (nvm->dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
@@ -1048,23 +1042,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
- req->option_num = cpu_to_le16(nvm_param.offset);
+ req->data_len = cpu_to_le16(nvm->nvm_num_bits);
+ req->option_num = cpu_to_le16(nvm->offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
- nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
} else {
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
@@ -1077,8 +1071,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return rc;
}
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ union devlink_param_value *val)
+{
+ const struct bnxt_dl_nvm_param *nvm_param;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ nvm_param = &nvm_params[i];
+ if (nvm_param->id == param_id)
+ return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val);
+ }
+ return -EOPNOTSUPP;
+}
+
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_get_variable_input *req;
@@ -1096,7 +1105,8 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_set_variable_input *req;
@@ -1112,6 +1122,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
+static int bnxt_dl_roce_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE,
+ BNXT_NVM_SHARED_CFG, 1, 1};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_get_variable_input *req;
+ union devlink_param_value roce_cap;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable");
+ return -EINVAL;
+ }
+ if (!roce_cap.vbool) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -1133,7 +1169,8 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
@@ -1145,7 +1182,8 @@ static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
@@ -1175,6 +1213,10 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_roce_validate),
DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
"gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index b8105065367b..7f45dcd7b287 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -41,8 +41,10 @@ static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_RDMA_CAPABLE 161
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_SUPPORT_RDMA 506
#define NVM_OFF_NVM_CFG_VER 602
#define BNXT_NVM_CFG_VER_BITS 8
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
index 6f6576dc417a..53a3bcb0efe0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -8,7 +8,7 @@
*/
#include <linux/dim.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
void bnxt_dim_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1d240a27455a..068e191ede19 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -24,8 +24,9 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netdev_queues.h>
#include <net/netlink.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -631,13 +632,13 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = sw_stats[k];
skip_tpa_ring_stats:
- sw = (u64 *)&cpr->sw_stats.rx;
+ sw = (u64 *)&cpr->sw_stats->rx;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
buf[j] = sw[k];
}
- sw = (u64 *)&cpr->sw_stats.cmn;
+ sw = (u64 *)&cpr->sw_stats->cmn;
for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
buf[j] = sw[k];
}
@@ -687,16 +688,22 @@ skip_ring_stats:
buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_bytes_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_pkts_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
}
}
@@ -705,112 +712,105 @@ skip_ring_stats:
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnxt *bp = netdev_priv(dev);
- static const char * const *str;
u32 i, j, num_str;
+ const char *str;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- if (is_rx_ring(bp, i)) {
- num_str = NUM_RING_RX_HW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_rx_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i))
+ for (j = 0; j < NUM_RING_RX_HW_STATS; j++) {
+ str = bnxt_ring_rx_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
- }
- if (is_tx_ring(bp, i)) {
- num_str = NUM_RING_TX_HW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_tx_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_tx_ring(bp, i))
+ for (j = 0; j < NUM_RING_TX_HW_STATS; j++) {
+ str = bnxt_ring_tx_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
- }
num_str = bnxt_get_num_tpa_ring_stats(bp);
if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
if (bp->max_tpa_v2)
- str = bnxt_ring_tpa2_stats_str;
+ for (j = 0; j < num_str; j++) {
+ str = bnxt_ring_tpa2_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
+ }
else
- str = bnxt_ring_tpa_stats_str;
-
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i, str[j]);
- buf += ETH_GSTRING_LEN;
- }
-skip_tpa_stats:
- if (is_rx_ring(bp, i)) {
- num_str = NUM_RING_RX_SW_STATS;
for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_rx_sw_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_ring_tpa_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
+skip_tpa_stats:
+ if (is_rx_ring(bp, i))
+ for (j = 0; j < NUM_RING_RX_SW_STATS; j++) {
+ str = bnxt_rx_sw_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
+ }
+ for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) {
+ str = bnxt_cmn_sw_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i, str);
}
- num_str = NUM_RING_CMN_SW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_cmn_sw_stats_str[j]);
- buf += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
- strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++)
+ ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]);
- if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ if (bp->flags & BNXT_FLAG_PORT_STATS)
for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
- strcpy(buf, bnxt_port_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_port_stats_arr[i].string;
+ ethtool_puts(&buf, str);
}
- }
+
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
u32 len;
len = min_t(u32, bp->fw_rx_stats_ext_size,
ARRAY_SIZE(bnxt_port_stats_ext_arr));
for (i = 0; i < len; i++) {
- strcpy(buf, bnxt_port_stats_ext_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_port_stats_ext_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
len = min_t(u32, bp->fw_tx_stats_ext_size,
ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
for (i = 0; i < len; i++) {
- strcpy(buf,
- bnxt_tx_port_stats_ext_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_port_stats_ext_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_rx_bytes_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_rx_bytes_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_rx_pkts_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_rx_pkts_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_tx_bytes_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_bytes_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_tx_pkts_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_pkts_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
}
}
break;
case ETH_SS_TEST:
if (bp->num_tests)
- memcpy(buf, bp->test_info->string,
- bp->num_tests * ETH_GSTRING_LEN);
+ for (i = 0; i < bp->num_tests; i++)
+ ethtool_puts(&buf, bp->test_info->string[i]);
break;
default:
netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
@@ -840,6 +840,8 @@ static void bnxt_get_ringparam(struct net_device *dev,
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_agg_ring_size;
ering->tx_pending = bp->tx_ring_size;
+
+ kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX;
}
static int bnxt_set_ringparam(struct net_device *dev,
@@ -847,16 +849,35 @@ static int bnxt_set_ringparam(struct net_device *dev,
struct kernel_ethtool_ringparam *kernel_ering,
struct netlink_ext_ack *extack)
{
+ u8 tcp_data_split = kernel_ering->tcp_data_split;
struct bnxt *bp = netdev_priv(dev);
+ u8 hds_config_mod;
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
(ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
+ hds_config_mod = tcp_data_split != dev->cfg->hds_config;
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod)
+ return -EINVAL;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ hds_config_mod && BNXT_RX_PAGE_MODE(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached");
+ return -EINVAL;
+ }
+
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
+ if (hds_config_mod) {
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ bp->flags |= BNXT_FLAG_HDS;
+ else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ bp->flags &= ~BNXT_FLAG_HDS;
+ }
+
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
bnxt_set_ring_params(bp);
@@ -955,11 +976,6 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
- rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
- if (rc) {
- netdev_warn(dev, "Unable to allocate the requested rings\n");
- return rc;
- }
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
@@ -968,7 +984,12 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
- bnxt_clear_usr_fltrs(bp, true);
+ rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1131,14 +1152,15 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fkeys = &fltr->fkeys;
fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
- fkeys->basic.ip_proto == IPPROTO_RAW) {
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
fs->flow_type = IP_USER_FLOW;
fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
- if (fkeys->basic.ip_proto == IPPROTO_ICMP)
- fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
- else
- fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
+ fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip4_spec.proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
} else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
@@ -1160,13 +1182,13 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
}
} else {
- if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
- fkeys->basic.ip_proto == IPPROTO_RAW) {
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
fs->flow_type = IPV6_USER_FLOW;
- if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
- fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
- else
- fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
+ fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip6_spec.l4_proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) {
+ fs->flow_type = IPV6_USER_FLOW;
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
} else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
@@ -1193,10 +1215,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
}
}
- if (fltr->base.flags & BNXT_ACT_DROP)
+ if (fltr->base.flags & BNXT_ACT_DROP) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
- else
+ } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ fs->flow_type |= FLOW_RSS;
+ cmd->rss_context = fltr->base.fw_vnic_id;
+ } else {
fs->ring_cookie = fltr->base.rxq;
+ }
rc = 0;
fltr_err:
@@ -1205,6 +1231,35 @@ fltr_err:
return rc;
}
+static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
+ u32 index)
+{
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
+ if (!ctx)
+ return NULL;
+ return ethtool_rxfh_context_priv(ctx);
+}
+
+static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ return -ENOMEM;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ return 0;
+}
+
static int bnxt_add_l2_cls_rule(struct bnxt *bp,
struct ethtool_rx_flow_spec *fs)
{
@@ -1260,10 +1315,12 @@ static int bnxt_add_l2_cls_rule(struct bnxt *bp,
static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
struct ethtool_usrip4_spec *ip_mask)
{
+ u8 mproto = ip_mask->proto;
+ u8 sproto = ip_spec->proto;
+
if (ip_mask->l4_4_bytes || ip_mask->tos ||
ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
- ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
- (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP)))
return false;
return true;
}
@@ -1271,31 +1328,34 @@ static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
struct ethtool_usrip6_spec *ip_mask)
{
+ u8 mproto = ip_mask->l4_proto;
+ u8 sproto = ip_spec->l4_proto;
+
if (ip_mask->l4_4_bytes || ip_mask->tclass ||
- ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
- (ip_spec->l4_proto != IPPROTO_RAW &&
- ip_spec->l4_proto != IPPROTO_ICMPV6))
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6)))
return false;
return true;
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
- struct ethtool_rx_flow_spec *fs)
+ struct ethtool_rxnfc *cmd)
{
- u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
- u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_ntuple_filter *new_fltr, *fltr;
+ u32 flow_type = fs->flow_type & 0xff;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_flow_masks *fmasks;
- u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
- u32 idx;
+ u32 idx, ring;
int rc;
+ u8 vf;
if (!bp->vnic_info)
return -EAGAIN;
- if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
if (flow_type == IP_USER_FLOW) {
@@ -1326,7 +1386,8 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
- fkeys->basic.ip_proto = ip_spec->proto;
+ fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto
+ : BNXT_IP_PROTO_WILDCARD;
fkeys->basic.n_proto = htons(ETH_P_IP);
fkeys->addrs.v4addrs.src = ip_spec->ip4src;
fmasks->addrs.v4addrs.src = ip_mask->ip4src;
@@ -1357,7 +1418,8 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
- fkeys->basic.ip_proto = ip_spec->l4_proto;
+ fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto
+ : BNXT_IP_PROTO_WILDCARD;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
@@ -1403,6 +1465,19 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
rcu_read_unlock();
new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->flow_type & FLOW_RSS) {
+ struct bnxt_rss_ctx *rss_ctx;
+
+ new_fltr->base.fw_vnic_id = 0;
+ new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
+ rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
+ if (rss_ctx) {
+ new_fltr->base.fw_vnic_id = rss_ctx->index;
+ } else {
+ rc = -EINVAL;
+ goto ntuple_err;
+ }
+ }
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
new_fltr->base.flags |= BNXT_ACT_DROP;
else
@@ -1444,12 +1519,12 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
flow_type == IPV6_USER_FLOW) &&
!(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
return -EOPNOTSUPP;
- if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ if (flow_type & FLOW_MAC_EXT)
return -EINVAL;
flow_type &= ~FLOW_EXT;
if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
- return bnxt_add_ntuple_cls_rule(bp, fs);
+ return bnxt_add_ntuple_cls_rule(bp, cmd);
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
@@ -1463,7 +1538,7 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (flow_type == ETHER_FLOW)
rc = bnxt_add_l2_cls_rule(bp, fs);
else
- rc = bnxt_add_ntuple_cls_rule(bp, fs);
+ rc = bnxt_add_ntuple_cls_rule(bp, cmd);
return rc;
}
@@ -1515,11 +1590,16 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
{
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
return RXH_IP_SRC | RXH_IP_DST;
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL)
+ return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL;
return 0;
}
-static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct bnxt *bp = netdev_priv(dev);
+
cmd->data = 0;
switch (cmd->flow_type) {
case TCP_V4_FLOW:
@@ -1578,20 +1658,30 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
-static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- u32 rss_hash_cfg = bp->rss_hash_cfg;
+ struct bnxt *bp = netdev_priv(dev);
int tuple, rc = 0;
+ u32 rss_hash_cfg;
+
+ rss_hash_cfg = bp->rss_hash_cfg;
if (cmd->data == RXH_4TUPLE)
tuple = 4;
- else if (cmd->data == RXH_2TUPLE)
+ else if (cmd->data == RXH_2TUPLE ||
+ cmd->data == (RXH_2TUPLE | RXH_IP6_FL))
tuple = 2;
else if (!cmd->data)
tuple = 0;
else
return -EINVAL;
+ if (cmd->data & RXH_IP6_FL &&
+ !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP))
+ return -EINVAL;
+
if (cmd->flow_type == TCP_V4_FLOW) {
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
if (tuple == 4)
@@ -1655,10 +1745,15 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
- if (tuple == 2)
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL);
+ if (!tuple)
+ break;
+ if (cmd->data & RXH_IP6_FL)
+ rss_hash_cfg |=
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL;
+ else if (tuple == 2)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
- else if (!tuple)
- rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
break;
}
@@ -1675,6 +1770,13 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
return rc;
}
+static u32 bnxt_get_rx_ring_count(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->rx_nr_rings;
+}
+
static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1682,10 +1784,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int rc = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = bp->rx_nr_rings;
- break;
-
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
@@ -1699,10 +1797,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
rc = bnxt_grxclsrule(bp, cmd);
break;
- case ETHTOOL_GRXFH:
- rc = bnxt_grxfh(bp, cmd);
- break;
-
default:
rc = -EOPNOTSUPP;
break;
@@ -1717,10 +1811,6 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int rc;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- rc = bnxt_srxfh(bp, cmd);
- break;
-
case ETHTOOL_SRXCLSRLINS:
rc = bnxt_srxclsrlins(bp, cmd);
break;
@@ -1754,7 +1844,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
+ struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
+ u32 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
@@ -1764,10 +1856,21 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
- if (rxfh->indir && bp->rss_indir_tbl) {
+ if (rxfh->rss_context) {
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
+ if (!ctx)
+ return -EINVAL;
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+ }
+
+ if (rxfh->indir && indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- rxfh->indir[i] = bp->rss_indir_tbl[i];
+ rxfh->indir[i] = indir_tbl[i];
}
if (rxfh->key && vnic->rss_hash_key)
@@ -1776,6 +1879,159 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
}
+static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
+ struct bnxt_rss_ctx *rss_ctx,
+ const struct ethtool_rxfh_param *rxfh)
+{
+ if (rxfh->key) {
+ if (rss_ctx) {
+ memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
+ HW_HASH_KEY_SIZE);
+ } else {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+ }
+ if (rxfh->indir) {
+ u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ u32 *indir_tbl = bp->rss_indir_tbl;
+
+ if (rss_ctx)
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ for (i = 0; i < tbl_size; i++)
+ indir_tbl[i] = rxfh->indir[i];
+ pad = bp->rss_indir_tbl_entries - tbl_size;
+ if (pad)
+ memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
+ }
+}
+
+static int bnxt_rxfh_context_check(struct bnxt *bp,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!netif_running(bp->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int bnxt_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
+ if (rc)
+ return rc;
+
+ if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
+ BNXT_MAX_ETH_RSS_CTX);
+ return -EINVAL;
+ }
+
+ if (!bnxt_rfs_capable(bp, true)) {
+ NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
+ return -ENOMEM;
+ }
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bp->num_rss_ctx++;
+
+ vnic = &rss_ctx->vnic;
+ vnic->rss_ctx = ctx;
+ vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
+ vnic->vnic_id = BNXT_VNIC_ID_INVALID;
+ rc = bnxt_alloc_vnic_rss_table(bp, vnic);
+ if (rc)
+ goto out;
+
+ /* Populate defaults in the context */
+ bnxt_set_dflt_rss_indir_tbl(bp, ctx);
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
+ memcpy(ethtool_rxfh_context_key(ctx),
+ bp->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
+ goto out;
+ }
+
+ rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ rc = __bnxt_setup_vnic_p5(bp, vnic);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+
+ rss_ctx->index = rxfh->rss_context;
+ return 0;
+out:
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return rc;
+}
+
+static int bnxt_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
+ if (rc)
+ return rc;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
+}
+
+static int bnxt_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return 0;
+}
+
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@@ -1786,21 +2042,8 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->key) {
- memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
- bp->rss_hash_key_updated = true;
- }
+ bnxt_modify_rss(bp, NULL, NULL, rxfh);
- if (rxfh->indir) {
- u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
-
- for (i = 0; i < tbl_size; i++)
- bp->rss_indir_tbl[i] = rxfh->indir[i];
- pad = bp->rss_indir_tbl_entries - tbl_size;
- if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
- }
- bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -1827,30 +2070,56 @@ static void bnxt_get_drvinfo(struct net_device *dev,
static int bnxt_get_regs_len(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- int reg_len;
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
- reg_len = BNXT_PXP_REG_LEN;
+ return BNXT_PXP_REG_LEN + bp->pcie_stat_len;
+}
+
+static void *
+__bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req)
+{
+ struct pcie_ctx_hw_stats_v2 *hw_pcie_stats;
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
+
+ hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr);
+ if (!hw_pcie_stats)
+ return NULL;
- if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
- reg_len += sizeof(struct pcie_ctx_hw_stats);
+ req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ rc = hwrm_req_send(bp, req);
- return reg_len;
+ return rc ? NULL : hw_pcie_stats;
}
+#define BNXT_PCIE_32B_ENTRY(start, end) \
+ { offsetof(struct pcie_ctx_hw_stats_v2, start),\
+ offsetof(struct pcie_ctx_hw_stats_v2, end) }
+
+static const struct {
+ u16 start;
+ u16 end;
+} bnxt_pcie_32b_entries[] = {
+ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
+ BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1),
+ BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2),
+};
+
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
- struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_output *resp;
struct hwrm_pcie_qstats_input *req;
struct bnxt *bp = netdev_priv(dev);
- dma_addr_t hw_pcie_stats_addr;
- int rc;
+ u8 *src;
regs->version = 0;
- bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
@@ -1858,25 +2127,37 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
return;
- hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
- &hw_pcie_stats_addr);
- if (!hw_pcie_stats) {
- hwrm_req_drop(bp, req);
- return;
- }
-
- regs->version = 1;
- hwrm_req_hold(bp, req); /* hold on to slice */
- req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
- req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
- rc = hwrm_req_send(bp, req);
- if (!rc) {
- __le64 *src = (__le64 *)hw_pcie_stats;
- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
- int i;
-
- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
- dst[i] = le64_to_cpu(src[i]);
+ resp = hwrm_req_hold(bp, req);
+ src = __bnxt_hwrm_pcie_qstats(bp, req);
+ if (src) {
+ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
+ int i, j, len;
+
+ len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size));
+ if (len <= sizeof(struct pcie_ctx_hw_stats))
+ regs->version = 1;
+ else if (len < sizeof(struct pcie_ctx_hw_stats_v2))
+ regs->version = 2;
+ else
+ regs->version = 3;
+
+ for (i = 0, j = 0; i < len; ) {
+ if (i >= bnxt_pcie_32b_entries[j].start &&
+ i <= bnxt_pcie_32b_entries[j].end) {
+ u32 *dst32 = (u32 *)(dst + i);
+
+ *dst32 = le32_to_cpu(*(__le32 *)(src + i));
+ i += 4;
+ if (i > bnxt_pcie_32b_entries[j].end &&
+ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
+ j++;
+ } else {
+ u64 *dst64 = (u64 *)(dst + i);
+
+ *dst64 = le64_to_cpu(*(__le64 *)(src + i));
+ i += 8;
+ }
+ }
}
hwrm_req_drop(bp, req);
}
@@ -2641,19 +2922,24 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
}
base->port = PORT_NONE;
- if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ if (media == BNXT_MEDIA_TP) {
base->port = PORT_TP;
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
lk_ksettings->link_modes.supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
lk_ksettings->link_modes.advertising);
+ } else if (media == BNXT_MEDIA_KR) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+ lk_ksettings->link_modes.advertising);
} else {
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
lk_ksettings->link_modes.supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
lk_ksettings->link_modes.advertising);
- if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+ if (media == BNXT_MEDIA_CR)
base->port = PORT_DA;
else
base->port = PORT_FIBRE;
@@ -2931,7 +3217,8 @@ static int bnxt_get_fecparam(struct net_device *dev,
}
static void bnxt_get_fec_stats(struct net_device *dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct bnxt *bp = netdev_priv(dev);
u64 *rx;
@@ -3956,12 +4243,12 @@ err:
static void bnxt_get_pkgver(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- char buf[FW_VER_STR_LEN];
+ char buf[FW_VER_STR_LEN - 5];
int len;
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
"/pkg %s", buf);
}
}
@@ -4122,6 +4409,88 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
return 0;
}
+static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val)
+{
+ struct hwrm_queue_pfcwd_timeout_qcfg_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *val = le16_to_cpu(resp->pfcwd_timeout_value);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val)
+{
+ struct hwrm_queue_pfcwd_timeout_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG);
+ if (rc)
+ return rc;
+ req->pfcwd_timeout_value = cpu_to_le16(val);
+ rc = hwrm_req_send(bp, req);
+ return rc;
+}
+
+static int bnxt_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 rx_copybreak, val;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > BNXT_MAX_RX_COPYBREAK)
+ return -ERANGE;
+ if (rx_copybreak != bp->rx_copybreak) {
+ if (netif_running(dev))
+ return -EBUSY;
+ bp->rx_copybreak = rx_copybreak;
+ }
+ return 0;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+
+ val = *(u16 *)data;
+ if (val > bp->max_pfcwd_tmo_ms &&
+ val != PFC_STORM_PREVENTION_AUTO)
+ return -EINVAL;
+ return bnxt_hwrm_pfcwd_cfg(bp, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = bp->rx_copybreak;
+ break;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (!bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+ return bnxt_hwrm_pfcwd_qcfg(bp, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
u16 page_number, u8 bank,
u16 start_addr, u16 data_length,
@@ -4170,6 +4539,9 @@ static int bnxt_get_module_info(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
/* No point in going further if phy status indicates
* module is not inserted or if it is powered down or
* if it is of type 10GBase-T
@@ -4221,6 +4593,9 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
u16 start = eeprom->offset, length = eeprom->len;
int rc = 0;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
memset(data, 0, eeprom->len);
/* Read A0 portion of the EEPROM */
@@ -4251,6 +4626,11 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
return 0;
+ if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
+ bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){
+ NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T");
+ return -EOPNOTSUPP;
+ }
switch (bp->link_info.module_status) {
case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
@@ -4268,13 +4648,19 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
return -EINVAL;
}
-static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
- const struct ethtool_module_eeprom *page_data,
- struct netlink_ext_ack *extack)
+static int
+bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
{
- struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Module read/write not permitted on untrusted VF");
+ return -EPERM;
+ }
+
rc = bnxt_get_module_status(bp, extack);
if (rc)
return rc;
@@ -4288,6 +4674,19 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
return -EINVAL;
}
+ return 0;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
page_data->page, page_data->bank,
@@ -4301,6 +4700,62 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
return page_data->length;
}
+static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page)
+{
+ struct hwrm_port_phy_i2c_write_input *req;
+ int bytes_written = 0;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE);
+ if (rc)
+ return rc;
+
+ hwrm_req_hold(bp, req);
+ req->i2c_slave_addr = page->i2c_address << 1;
+ req->page_number = cpu_to_le16(page->page);
+ req->bank_number = page->bank;
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET |
+ PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER);
+
+ while (bytes_written < page->length) {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, page->length - bytes_written,
+ BNXT_MAX_PHY_I2C_RESP_SIZE);
+ req->page_offset = cpu_to_le16(page->offset + bytes_written);
+ req->data_length = xfer_size;
+ memcpy(req->data, page->data + bytes_written, xfer_size);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ break;
+ bytes_written += xfer_size;
+ }
+
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
+
+ rc = bnxt_write_sfp_module_eeprom_info(bp, page_data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -4572,7 +5027,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
cpr = &rxr->bnapi->cp_ring;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
cpr = rxr->rx_cpr;
- pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak));
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
@@ -4641,7 +5097,15 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+
memset(buf, 0, sizeof(u64) * bp->num_tests);
+ if (etest->flags & ETH_TEST_FL_OFFLINE &&
+ bnxt_ulp_registered(bp->edev)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n");
+ return;
+ }
+
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
return;
@@ -4671,45 +5135,51 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- bnxt_ulp_stop(bp);
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
- buf[BNXT_MACLPBK_TEST_IDX] = 1;
- bnxt_hwrm_mac_loopback(bp, true);
- msleep(250);
rc = bnxt_half_open_nic(bp);
if (rc) {
- bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
- bnxt_ulp_start(bp, rc);
return;
}
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK)
+ goto skip_mac_loopback;
+
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
else
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
+skip_mac_loopback:
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK)
+ goto skip_phy_loopback;
+
bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_PHYLPBK_TEST_IDX] = 0;
+skip_phy_loopback:
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
if (do_ext_lpbk) {
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
bnxt_hwrm_phy_loopback(bp, true, true);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_EXTLPBK_TEST_IDX] = 0;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
rc = bnxt_open_nic(bp, true, true);
- bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
@@ -4783,14 +5253,22 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_CRASH) {
- netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+ if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) {
+ netdev_info(dev,
+ "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n");
return -EINVAL;
}
- if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
- netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
- return -EOPNOTSUPP;
+ if (dump->flag == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
+ (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
+ netdev_info(dev,
+ "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+ return -EOPNOTSUPP;
+ } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
+ netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
+ return -EOPNOTSUPP;
+ }
}
bp->dump_flag = dump->flag;
@@ -4829,17 +5307,14 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
}
static int bnxt_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
- info->phc_index = -1;
if (!ptp)
return 0;
@@ -4860,6 +5335,26 @@ static int bnxt_get_ts_info(struct net_device *dev,
return 0;
}
+static void bnxt_hwrm_pcie_qstats(struct bnxt *bp)
+{
+ struct hwrm_pcie_qstats_output *resp;
+ struct hwrm_pcie_qstats_input *req;
+
+ bp->pcie_stat_len = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ if (__bnxt_hwrm_pcie_qstats(bp, req))
+ bp->pcie_stat_len = min_t(u16,
+ le16_to_cpu(resp->pcie_stat_size),
+ sizeof(struct pcie_ctx_hw_stats_v2));
+ hwrm_req_drop(bp, req);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp;
@@ -4868,6 +5363,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev;
int i, rc;
+ bnxt_hwrm_pcie_qstats(bp);
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
@@ -5049,6 +5545,19 @@ static void bnxt_get_rmon_stats(struct net_device *dev,
*ranges = bnxt_rmon_ranges;
}
+static void bnxt_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp) {
+ ts_stats->pkts = ptp->stats.ts_pkts;
+ ts_stats->lost = ptp->stats.ts_lost;
+ ts_stats->err = atomic64_read(&ptp->stats.ts_err);
+ }
+}
+
static void bnxt_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats)
{
@@ -5071,6 +5580,10 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
+ .rxfh_per_ctx_key = 1,
+ .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
+ .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
+ .rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
@@ -5078,6 +5591,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -5104,10 +5619,16 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_channels = bnxt_set_channels,
.get_rxnfc = bnxt_get_rxnfc,
.set_rxnfc = bnxt_set_rxnfc,
+ .get_rx_ring_count = bnxt_get_rx_ring_count,
.get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .get_rxfh_fields = bnxt_get_rxfh_fields,
+ .set_rxfh_fields = bnxt_set_rxfh_fields,
+ .create_rxfh_context = bnxt_create_rxfh_context,
+ .modify_rxfh_context = bnxt_modify_rxfh_context,
+ .remove_rxfh_context = bnxt_remove_rxfh_context,
.flash_device = bnxt_flash_device,
.get_eeprom_len = bnxt_get_eeprom_len,
.get_eeprom = bnxt_get_eeprom,
@@ -5116,9 +5637,12 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
+ .get_tunable = bnxt_get_tunable,
+ .set_tunable = bnxt_set_tunable,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
@@ -5131,4 +5655,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eth_mac_stats = bnxt_get_eth_mac_stats,
.get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
.get_rmon_stats = bnxt_get_rmon_stats,
+ .get_ts_stats = bnxt_get_ptp_stats,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index e2ee030237d4..33b86ede1ce5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -44,6 +44,7 @@ struct bnxt_led_cfg {
#define BNXT_PXP_REG_LEN 0x3110
#define BNXT_IP_PROTO_FULL_MASK 0xFF
+#define BNXT_IP_PROTO_WILDCARD 0x0
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
deleted file mode 100644
index e957abd704db..000000000000
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ /dev/null
@@ -1,10476 +0,0 @@
-/* Broadcom NetXtreme-C/E network driver.
- *
- * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2023 Broadcom Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- *
- * DO NOT MODIFY!!! This file is automatically generated.
- */
-
-#ifndef _BNXT_HSI_H_
-#define _BNXT_HSI_H_
-
-/* hwrm_cmd_hdr (size:128b/16B) */
-struct hwrm_cmd_hdr {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_resp_hdr (size:64b/8B) */
-struct hwrm_resp_hdr {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-#define CMD_DISCR_TLV_ENCAP 0x8000UL
-#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
-
-
-#define TLV_TYPE_HWRM_REQUEST 0x1UL
-#define TLV_TYPE_HWRM_RESPONSE 0x2UL
-#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
-#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
-#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
-#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
-#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
-#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
-#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
-#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
-#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
-#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
-#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
-
-
-/* tlv (size:64b/8B) */
-struct tlv {
- __le16 cmd_discr;
- u8 reserved_8b;
- u8 flags;
- #define TLV_FLAGS_MORE 0x1UL
- #define TLV_FLAGS_MORE_LAST 0x0UL
- #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
- #define TLV_FLAGS_REQUIRED 0x2UL
- #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
- #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
- #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
- __le16 tlv_type;
- __le16 length;
-};
-
-/* input (size:128b/16B) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* output (size:64b/8B) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* hwrm_short_input (size:128b/16B) */
-struct hwrm_short_input {
- __le16 req_type;
- __le16 signature;
- #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
- #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
- __le16 target_id;
- #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
- #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
- #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
- __le16 size;
- __le64 req_addr;
-};
-
-/* cmd_nums (size:64b/8B) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET 0x0UL
- #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
- #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
- #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
- #define HWRM_FUNC_BUF_UNRGTR 0xeUL
- #define HWRM_FUNC_VF_CFG 0xfUL
- #define HWRM_RESERVED1 0x10UL
- #define HWRM_FUNC_RESET 0x11UL
- #define HWRM_FUNC_GETFID 0x12UL
- #define HWRM_FUNC_VF_ALLOC 0x13UL
- #define HWRM_FUNC_VF_FREE 0x14UL
- #define HWRM_FUNC_QCAPS 0x15UL
- #define HWRM_FUNC_QCFG 0x16UL
- #define HWRM_FUNC_CFG 0x17UL
- #define HWRM_FUNC_QSTATS 0x18UL
- #define HWRM_FUNC_CLR_STATS 0x19UL
- #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
- #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
- #define HWRM_FUNC_DRV_RGTR 0x1dUL
- #define HWRM_FUNC_DRV_QVER 0x1eUL
- #define HWRM_FUNC_BUF_RGTR 0x1fUL
- #define HWRM_PORT_PHY_CFG 0x20UL
- #define HWRM_PORT_MAC_CFG 0x21UL
- #define HWRM_PORT_TS_QUERY 0x22UL
- #define HWRM_PORT_QSTATS 0x23UL
- #define HWRM_PORT_LPBK_QSTATS 0x24UL
- #define HWRM_PORT_CLR_STATS 0x25UL
- #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
- #define HWRM_PORT_PHY_QCFG 0x27UL
- #define HWRM_PORT_MAC_QCFG 0x28UL
- #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
- #define HWRM_PORT_PHY_QCAPS 0x2aUL
- #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
- #define HWRM_PORT_PHY_I2C_READ 0x2cUL
- #define HWRM_PORT_LED_CFG 0x2dUL
- #define HWRM_PORT_LED_QCFG 0x2eUL
- #define HWRM_PORT_LED_QCAPS 0x2fUL
- #define HWRM_QUEUE_QPORTCFG 0x30UL
- #define HWRM_QUEUE_QCFG 0x31UL
- #define HWRM_QUEUE_CFG 0x32UL
- #define HWRM_FUNC_VLAN_CFG 0x33UL
- #define HWRM_FUNC_VLAN_QCFG 0x34UL
- #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
- #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
- #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
- #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
- #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
- #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
- #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
- #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
- #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
- #define HWRM_VNIC_ALLOC 0x40UL
- #define HWRM_VNIC_FREE 0x41UL
- #define HWRM_VNIC_CFG 0x42UL
- #define HWRM_VNIC_QCFG 0x43UL
- #define HWRM_VNIC_TPA_CFG 0x44UL
- #define HWRM_VNIC_TPA_QCFG 0x45UL
- #define HWRM_VNIC_RSS_CFG 0x46UL
- #define HWRM_VNIC_RSS_QCFG 0x47UL
- #define HWRM_VNIC_PLCMODES_CFG 0x48UL
- #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
- #define HWRM_VNIC_QCAPS 0x4aUL
- #define HWRM_VNIC_UPDATE 0x4bUL
- #define HWRM_RING_ALLOC 0x50UL
- #define HWRM_RING_FREE 0x51UL
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
- #define HWRM_RING_AGGINT_QCAPS 0x54UL
- #define HWRM_RING_SCHQ_ALLOC 0x55UL
- #define HWRM_RING_SCHQ_CFG 0x56UL
- #define HWRM_RING_SCHQ_FREE 0x57UL
- #define HWRM_RING_RESET 0x5eUL
- #define HWRM_RING_GRP_ALLOC 0x60UL
- #define HWRM_RING_GRP_FREE 0x61UL
- #define HWRM_RING_CFG 0x62UL
- #define HWRM_RING_QCFG 0x63UL
- #define HWRM_RESERVED5 0x64UL
- #define HWRM_RESERVED6 0x65UL
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
- #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
- #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
- #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
- #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
- #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
- #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
- #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
- #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
- #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
- #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
- #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
- #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
- #define HWRM_QUEUE_QCAPS 0x8cUL
- #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
- #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
- #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
- #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
- #define HWRM_CFA_L2_FILTER_FREE 0x91UL
- #define HWRM_CFA_L2_FILTER_CFG 0x92UL
- #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
- #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
- #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
- #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
- #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
- #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
- #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
- #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
- #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
- #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
- #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
- #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
- #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
- #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
- #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
- #define HWRM_STAT_CTX_ALLOC 0xb0UL
- #define HWRM_STAT_CTX_FREE 0xb1UL
- #define HWRM_STAT_CTX_QUERY 0xb2UL
- #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
- #define HWRM_PORT_QSTATS_EXT 0xb4UL
- #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
- #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
- #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
- #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
- #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
- #define HWRM_RESERVED7 0xbaUL
- #define HWRM_PORT_TX_FIR_CFG 0xbbUL
- #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
- #define HWRM_PORT_ECN_QSTATS 0xbdUL
- #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
- #define HWRM_FW_LIVEPATCH 0xbfUL
- #define HWRM_FW_RESET 0xc0UL
- #define HWRM_FW_QSTATUS 0xc1UL
- #define HWRM_FW_HEALTH_CHECK 0xc2UL
- #define HWRM_FW_SYNC 0xc3UL
- #define HWRM_FW_STATE_QCAPS 0xc4UL
- #define HWRM_FW_STATE_QUIESCE 0xc5UL
- #define HWRM_FW_STATE_BACKUP 0xc6UL
- #define HWRM_FW_STATE_RESTORE 0xc7UL
- #define HWRM_FW_SET_TIME 0xc8UL
- #define HWRM_FW_GET_TIME 0xc9UL
- #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
- #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
- #define HWRM_FW_IPC_MAILBOX 0xccUL
- #define HWRM_FW_ECN_CFG 0xcdUL
- #define HWRM_FW_ECN_QCFG 0xceUL
- #define HWRM_FW_SECURE_CFG 0xcfUL
- #define HWRM_EXEC_FWD_RESP 0xd0UL
- #define HWRM_REJECT_FWD_RESP 0xd1UL
- #define HWRM_FWD_RESP 0xd2UL
- #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
- #define HWRM_OEM_CMD 0xd4UL
- #define HWRM_PORT_PRBS_TEST 0xd5UL
- #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
- #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
- #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
- #define HWRM_PORT_DSC_DUMP 0xd9UL
- #define HWRM_PORT_EP_TX_QCFG 0xdaUL
- #define HWRM_PORT_EP_TX_CFG 0xdbUL
- #define HWRM_PORT_CFG 0xdcUL
- #define HWRM_PORT_QCFG 0xddUL
- #define HWRM_PORT_MAC_QCAPS 0xdfUL
- #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
- #define HWRM_REG_POWER_QUERY 0xe1UL
- #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
- #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
- #define HWRM_WOL_FILTER_ALLOC 0xf0UL
- #define HWRM_WOL_FILTER_FREE 0xf1UL
- #define HWRM_WOL_FILTER_QCFG 0xf2UL
- #define HWRM_WOL_REASON_QCFG 0xf3UL
- #define HWRM_CFA_METER_QCAPS 0xf4UL
- #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
- #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
- #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
- #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
- #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
- #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
- #define HWRM_CFA_VFR_ALLOC 0xfdUL
- #define HWRM_CFA_VFR_FREE 0xfeUL
- #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
- #define HWRM_CFA_VF_PAIR_FREE 0x101UL
- #define HWRM_CFA_VF_PAIR_INFO 0x102UL
- #define HWRM_CFA_FLOW_ALLOC 0x103UL
- #define HWRM_CFA_FLOW_FREE 0x104UL
- #define HWRM_CFA_FLOW_FLUSH 0x105UL
- #define HWRM_CFA_FLOW_STATS 0x106UL
- #define HWRM_CFA_FLOW_INFO 0x107UL
- #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
- #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
- #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
- #define HWRM_CFA_PAIR_ALLOC 0x10dUL
- #define HWRM_CFA_PAIR_FREE 0x10eUL
- #define HWRM_CFA_PAIR_INFO 0x10fUL
- #define HWRM_FW_IPC_MSG 0x110UL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
- #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
- #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
- #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
- #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
- #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
- #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
- #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
- #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
- #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
- #define HWRM_CFA_COUNTER_CFG 0x11cUL
- #define HWRM_CFA_COUNTER_QCFG 0x11dUL
- #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
- #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
- #define HWRM_CFA_EEM_QCAPS 0x120UL
- #define HWRM_CFA_EEM_CFG 0x121UL
- #define HWRM_CFA_EEM_QCFG 0x122UL
- #define HWRM_CFA_EEM_OP 0x123UL
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
- #define HWRM_CFA_TFLIB 0x125UL
- #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
- #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
- #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
- #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
- #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
- #define HWRM_ENGINE_CKV_STATUS 0x12eUL
- #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
- #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
- #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
- #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
- #define HWRM_ENGINE_CKV_FLUSH 0x133UL
- #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
- #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
- #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
- #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
- #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
- #define HWRM_ENGINE_QG_QUERY 0x13dUL
- #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
- #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
- #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
- #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
- #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
- #define HWRM_ENGINE_QG_METER_BIND 0x143UL
- #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
- #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
- #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
- #define HWRM_ENGINE_SG_QUERY 0x147UL
- #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
- #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
- #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
- #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
- #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
- #define HWRM_ENGINE_STATS_CONFIG 0x155UL
- #define HWRM_ENGINE_STATS_CLEAR 0x156UL
- #define HWRM_ENGINE_STATS_QUERY 0x157UL
- #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
- #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
- #define HWRM_ENGINE_RQ_FREE 0x15fUL
- #define HWRM_ENGINE_CQ_ALLOC 0x160UL
- #define HWRM_ENGINE_CQ_FREE 0x161UL
- #define HWRM_ENGINE_NQ_ALLOC 0x162UL
- #define HWRM_ENGINE_NQ_FREE 0x163UL
- #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
- #define HWRM_ENGINE_FUNC_QCFG 0x165UL
- #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
- #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
- #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
- #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
- #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
- #define HWRM_FUNC_VF_BW_CFG 0x195UL
- #define HWRM_FUNC_VF_BW_QCFG 0x196UL
- #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
- #define HWRM_FUNC_QSTATS_EXT 0x198UL
- #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
- #define HWRM_FUNC_SPD_CFG 0x19aUL
- #define HWRM_FUNC_SPD_QCFG 0x19bUL
- #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
- #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
- #define HWRM_FUNC_PTP_CFG 0x19eUL
- #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
- #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
- #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
- #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
- #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
- #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
- #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
- #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
- #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
- #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
- #define HWRM_FUNC_SYNCE_CFG 0x1abUL
- #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
- #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
- #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
- #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
- #define HWRM_FUNC_LAG_CREATE 0x1b0UL
- #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
- #define HWRM_FUNC_LAG_FREE 0x1b2UL
- #define HWRM_FUNC_LAG_QCFG 0x1b3UL
- #define HWRM_SELFTEST_QLIST 0x200UL
- #define HWRM_SELFTEST_EXEC 0x201UL
- #define HWRM_SELFTEST_IRQ 0x202UL
- #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
- #define HWRM_PCIE_QSTATS 0x204UL
- #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
- #define HWRM_MFG_TIMERS_QUERY 0x206UL
- #define HWRM_MFG_OTP_CFG 0x207UL
- #define HWRM_MFG_OTP_QCFG 0x208UL
- #define HWRM_MFG_HDMA_TEST 0x209UL
- #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
- #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
- #define HWRM_MFG_SOC_IMAGE 0x20cUL
- #define HWRM_MFG_SOC_QSTATUS 0x20dUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
- #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
- #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
- #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
- #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
- #define HWRM_MFG_PSOC_QSTATUS 0x215UL
- #define HWRM_MFG_SELFTEST_QLIST 0x216UL
- #define HWRM_MFG_SELFTEST_EXEC 0x217UL
- #define HWRM_STAT_GENERIC_QSTATS 0x218UL
- #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
- #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
- #define HWRM_UDCC_QCAPS 0x258UL
- #define HWRM_UDCC_CFG 0x259UL
- #define HWRM_UDCC_QCFG 0x25aUL
- #define HWRM_UDCC_SESSION_CFG 0x25bUL
- #define HWRM_UDCC_SESSION_QCFG 0x25cUL
- #define HWRM_UDCC_SESSION_QUERY 0x25dUL
- #define HWRM_UDCC_COMP_CFG 0x25eUL
- #define HWRM_UDCC_COMP_QCFG 0x25fUL
- #define HWRM_UDCC_COMP_QUERY 0x260UL
- #define HWRM_TF 0x2bcUL
- #define HWRM_TF_VERSION_GET 0x2bdUL
- #define HWRM_TF_SESSION_OPEN 0x2c6UL
- #define HWRM_TF_SESSION_REGISTER 0x2c8UL
- #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
- #define HWRM_TF_SESSION_CLOSE 0x2caUL
- #define HWRM_TF_SESSION_QCFG 0x2cbUL
- #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
- #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
- #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
- #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
- #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
- #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
- #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
- #define HWRM_TF_TBL_TYPE_GET 0x2daUL
- #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
- #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
- #define HWRM_TF_EM_INSERT 0x2eaUL
- #define HWRM_TF_EM_DELETE 0x2ebUL
- #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
- #define HWRM_TF_EM_MOVE 0x2edUL
- #define HWRM_TF_TCAM_SET 0x2f8UL
- #define HWRM_TF_TCAM_GET 0x2f9UL
- #define HWRM_TF_TCAM_MOVE 0x2faUL
- #define HWRM_TF_TCAM_FREE 0x2fbUL
- #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
- #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
- #define HWRM_TF_IF_TBL_SET 0x2feUL
- #define HWRM_TF_IF_TBL_GET 0x2ffUL
- #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
- #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
- #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
- #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
- #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
- #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
- #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
- #define HWRM_TFC_SESSION_FID_ADD 0x389UL
- #define HWRM_TFC_SESSION_FID_REM 0x38aUL
- #define HWRM_TFC_IDENT_ALLOC 0x38bUL
- #define HWRM_TFC_IDENT_FREE 0x38cUL
- #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
- #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
- #define HWRM_TFC_IDX_TBL_SET 0x38fUL
- #define HWRM_TFC_IDX_TBL_GET 0x390UL
- #define HWRM_TFC_IDX_TBL_FREE 0x391UL
- #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
- #define HWRM_TFC_TCAM_SET 0x393UL
- #define HWRM_TFC_TCAM_GET 0x394UL
- #define HWRM_TFC_TCAM_ALLOC 0x395UL
- #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
- #define HWRM_TFC_TCAM_FREE 0x397UL
- #define HWRM_TFC_IF_TBL_SET 0x398UL
- #define HWRM_TFC_IF_TBL_GET 0x399UL
- #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
- #define HWRM_SV 0x400UL
- #define HWRM_DBG_READ_DIRECT 0xff10UL
- #define HWRM_DBG_READ_INDIRECT 0xff11UL
- #define HWRM_DBG_WRITE_DIRECT 0xff12UL
- #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
- #define HWRM_DBG_DUMP 0xff14UL
- #define HWRM_DBG_ERASE_NVM 0xff15UL
- #define HWRM_DBG_CFG 0xff16UL
- #define HWRM_DBG_COREDUMP_LIST 0xff17UL
- #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
- #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
- #define HWRM_DBG_FW_CLI 0xff1aUL
- #define HWRM_DBG_I2C_CMD 0xff1bUL
- #define HWRM_DBG_RING_INFO_GET 0xff1cUL
- #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
- #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
- #define HWRM_DBG_DRV_TRACE 0xff1fUL
- #define HWRM_DBG_QCAPS 0xff20UL
- #define HWRM_DBG_QCFG 0xff21UL
- #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
- #define HWRM_DBG_USEQ_ALLOC 0xff23UL
- #define HWRM_DBG_USEQ_FREE 0xff24UL
- #define HWRM_DBG_USEQ_FLUSH 0xff25UL
- #define HWRM_DBG_USEQ_QCAPS 0xff26UL
- #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
- #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
- #define HWRM_DBG_USEQ_RUN 0xff29UL
- #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
- #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
- #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
- #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
- #define HWRM_NVM_DEFRAG 0xffecUL
- #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
- #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
- #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
- #define HWRM_NVM_FLUSH 0xfff0UL
- #define HWRM_NVM_GET_VARIABLE 0xfff1UL
- #define HWRM_NVM_SET_VARIABLE 0xfff2UL
- #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
- #define HWRM_NVM_MODIFY 0xfff4UL
- #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
- #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
- #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
- #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
- #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
- #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
- #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
- #define HWRM_NVM_RAW_DUMP 0xfffcUL
- #define HWRM_NVM_READ 0xfffdUL
- #define HWRM_NVM_WRITE 0xfffeUL
- #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
- #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
- __le16 unused_0[3];
-};
-
-/* ret_codes (size:64b/8B) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
- #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
- #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
- #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
- #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_BUSY 0x10UL
- #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
- #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
- #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
- #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
- __le16 unused_0[3];
-};
-
-/* hwrm_err_output (size:128b/16B) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN 128
-#define HWRM_MAX_RESP_LEN 704
-#define HW_HASH_INDEX_SIZE 0x80
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1
-#define HWRM_TARGET_ID_BONO 0xFFF8
-#define HWRM_TARGET_ID_KONG 0xFFF9
-#define HWRM_TARGET_ID_APE 0xFFFA
-#define HWRM_TARGET_ID_TOOLS 0xFFFD
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 15
-#define HWRM_VERSION_STR "1.10.3.15"
-
-/* hwrm_ver_get_input (size:192b/24B) */
-struct hwrm_ver_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 unused_0[5];
-};
-
-/* hwrm_ver_get_output (size:1408b/176B) */
-struct hwrm_ver_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hwrm_intf_maj_8b;
- u8 hwrm_intf_min_8b;
- u8 hwrm_intf_upd_8b;
- u8 hwrm_intf_rsvd_8b;
- u8 hwrm_fw_maj_8b;
- u8 hwrm_fw_min_8b;
- u8 hwrm_fw_bld_8b;
- u8 hwrm_fw_rsvd_8b;
- u8 mgmt_fw_maj_8b;
- u8 mgmt_fw_min_8b;
- u8 mgmt_fw_bld_8b;
- u8 mgmt_fw_rsvd_8b;
- u8 netctrl_fw_maj_8b;
- u8 netctrl_fw_min_8b;
- u8 netctrl_fw_bld_8b;
- u8 netctrl_fw_rsvd_8b;
- __le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
- #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
- #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
- #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
- #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
- #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
- u8 roce_fw_maj_8b;
- u8 roce_fw_min_8b;
- u8 roce_fw_bld_8b;
- u8 roce_fw_rsvd_8b;
- char hwrm_fw_name[16];
- char mgmt_fw_name[16];
- char netctrl_fw_name[16];
- char active_pkg_name[16];
- char roce_fw_name[16];
- __le16 chip_num;
- u8 chip_rev;
- u8 chip_metal;
- u8 chip_bond_id;
- u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
- __le16 max_req_win_len;
- __le16 max_resp_len;
- __le16 def_req_timeout;
- u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
- u8 unused_0[2];
- u8 always_1;
- __le16 hwrm_intf_major;
- __le16 hwrm_intf_minor;
- __le16 hwrm_intf_build;
- __le16 hwrm_intf_patch;
- __le16 hwrm_fw_major;
- __le16 hwrm_fw_minor;
- __le16 hwrm_fw_build;
- __le16 hwrm_fw_patch;
- __le16 mgmt_fw_major;
- __le16 mgmt_fw_minor;
- __le16 mgmt_fw_build;
- __le16 mgmt_fw_patch;
- __le16 netctrl_fw_major;
- __le16 netctrl_fw_minor;
- __le16 netctrl_fw_build;
- __le16 netctrl_fw_patch;
- __le16 roce_fw_major;
- __le16 roce_fw_minor;
- __le16 roce_fw_build;
- __le16 roce_fw_patch;
- __le16 max_ext_req_len;
- __le16 max_req_timeout;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* eject_cmpl (size:128b/16B) */
-struct eject_cmpl {
- __le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
- #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
- #define EJECT_CMPL_FLAGS_SFT 6
- #define EJECT_CMPL_FLAGS_ERROR 0x40UL
- __le16 len;
- __le32 opaque;
- __le16 v;
- #define EJECT_CMPL_V 0x1UL
- #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
- #define EJECT_CMPL_ERRORS_SFT 1
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
- __le16 reserved16;
- __le32 unused_2;
-};
-
-/* hwrm_cmpl (size:128b/16B) */
-struct hwrm_cmpl {
- __le16 type;
- #define CMPL_TYPE_MASK 0x3fUL
- #define CMPL_TYPE_SFT 0
- #define CMPL_TYPE_HWRM_DONE 0x20UL
- #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
- __le16 sequence_id;
- __le32 unused_1;
- __le32 v;
- #define CMPL_V 0x1UL
- __le32 unused_3;
-};
-
-/* hwrm_fwd_req_cmpl (size:128b/16B) */
-struct hwrm_fwd_req_cmpl {
- __le16 req_len_type;
- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define FWD_REQ_CMPL_TYPE_SFT 0
- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
- __le16 source_id;
- __le32 unused0;
- __le32 req_buf_addr_v[2];
- #define FWD_REQ_CMPL_V 0x1UL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-};
-
-/* hwrm_fwd_resp_cmpl (size:128b/16B) */
-struct hwrm_fwd_resp_cmpl {
- __le16 type;
- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define FWD_RESP_CMPL_TYPE_SFT 0
- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
- #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
- __le16 source_id;
- __le16 resp_len;
- __le16 unused_1;
- __le32 resp_buf_addr_v[2];
- #define FWD_RESP_CMPL_V 0x1UL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-};
-
-/* hwrm_async_event_cmpl (size:128b/16B) */
-struct hwrm_async_event_cmpl {
- __le16 type;
- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_V 0x1UL
- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_link_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
-};
-
-/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
-struct hwrm_async_event_cmpl_port_conn_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_link_speed_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
-struct hwrm_async_event_cmpl_reset_notify {
- __le16 type;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
-};
-
-/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_recovery {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
-};
-
-/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
-struct hwrm_async_event_cmpl_ring_monitor_msg {
- __le16 type;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_vf_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
-};
-
-/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_default_vnic_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
-};
-
-/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
-struct hwrm_async_event_cmpl_hw_flow_aged {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
-};
-
-/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
-struct hwrm_async_event_cmpl_eem_cache_flush_req {
- __le16 type;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
-struct hwrm_async_event_cmpl_eem_cache_flush_done {
- __le16 type;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
-};
-
-/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
-struct hwrm_async_event_cmpl_deferred_response {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
-struct hwrm_async_event_cmpl_echo_request {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
-struct hwrm_async_event_cmpl_phc_update {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
-};
-
-/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
-struct hwrm_async_event_cmpl_pps_timestamp {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
-};
-
-/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
-};
-
-/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
-struct hwrm_async_event_cmpl_hwrm_error {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_base {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
-};
-
-/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_pause_storm {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
-};
-
-/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_invalid_signal {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
-};
-
-/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_nvm {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
-};
-
-/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
-};
-
-/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_thermal {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
-};
-
-/* hwrm_func_reset_input (size:192b/24B) */
-struct hwrm_func_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
- u8 unused_0;
-};
-
-/* hwrm_func_reset_output (size:128b/16B) */
-struct hwrm_func_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_getfid_input (size:192b/24B) */
-struct hwrm_func_getfid_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
- __le16 pci_id;
- u8 unused_0[2];
-};
-
-/* hwrm_func_getfid_output (size:128b/16B) */
-struct hwrm_func_getfid_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_alloc_input (size:192b/24B) */
-struct hwrm_func_vf_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* hwrm_func_vf_alloc_output (size:128b/16B) */
-struct hwrm_func_vf_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 first_vf_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_free_input (size:192b/24B) */
-struct hwrm_func_vf_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* hwrm_func_vf_free_output (size:128b/16B) */
-struct hwrm_func_vf_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_vf_cfg_input (size:576b/72B) */
-struct hwrm_func_vf_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
- __le16 mtu;
- __le16 guest_vlan;
- __le16 async_event_cr;
- u8 dflt_mac_addr[6];
- __le32 flags;
- #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
- #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
- #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
- #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
- #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
- #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
- #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
- #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
- #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
- #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- __le16 num_msix;
- u8 unused[2];
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
-};
-
-/* hwrm_func_vf_cfg_output (size:128b/16B) */
-struct hwrm_func_vf_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_qcaps_input (size:192b/24B) */
-struct hwrm_func_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_qcaps_output (size:1088b/136B) */
-struct hwrm_func_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
- u8 mac_address[6];
- __le16 max_rsscos_ctx;
- __le16 max_cmpl_rings;
- __le16 max_tx_rings;
- __le16 max_rx_rings;
- __le16 max_l2_ctxs;
- __le16 max_vnics;
- __le16 first_vf_id;
- __le16 max_vfs;
- __le16 max_stat_ctx;
- __le32 max_encap_records;
- __le32 max_decap_records;
- __le32 max_tx_em_flows;
- __le32 max_tx_wm_flows;
- __le32 max_rx_em_flows;
- __le32 max_rx_wm_flows;
- __le32 max_mcast_filters;
- __le32 max_flow_id;
- __le32 max_hw_ring_grps;
- __le16 max_sp_tx_rings;
- __le16 max_msix_vfs;
- __le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
- u8 max_schqs;
- u8 mpc_chnls_cap;
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
- __le16 max_key_ctxs_alloc;
- __le32 flags_ext2;
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
- __le16 tunnel_disable_flag;
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
- __le16 xid_partition_cap;
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL
- u8 device_serial_number[8];
- __le16 ctxs_per_partition;
- u8 unused_2[2];
- __le32 roce_vf_max_av;
- __le32 roce_vf_max_cq;
- __le32 roce_vf_max_mrw;
- __le32 roce_vf_max_qp;
- __le32 roce_vf_max_srq;
- __le32 roce_vf_max_gid;
- u8 unused_3[3];
- u8 valid;
-};
-
-/* hwrm_func_qcfg_input (size:192b/24B) */
-struct hwrm_func_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_qcfg_output (size:1280b/160B) */
-struct hwrm_func_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le16 vlan;
- __le16 flags;
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
- #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
- #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
- #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
- #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
- #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
- #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
- #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
- #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
- u8 mac_address[6];
- __le16 pci_id;
- __le16 alloc_rsscos_ctx;
- __le16 alloc_cmpl_rings;
- __le16 alloc_tx_rings;
- __le16 alloc_rx_rings;
- __le16 alloc_l2_ctx;
- __le16 alloc_vnics;
- __le16 admin_mtu;
- __le16 mru;
- __le16 stat_ctx_id;
- u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
- u8 port_pf_cnt;
- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
- #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
- __le16 dflt_vnic_id;
- __le16 max_mtu_configured;
- __le32 min_bw;
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
- u8 options;
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
- __le16 alloc_vfs;
- __le32 alloc_mcast_filters;
- __le32 alloc_hw_ring_grps;
- __le16 alloc_sp_tx_rings;
- __le16 alloc_stat_ctx;
- __le16 alloc_msix;
- __le16 registered_vfs;
- __le16 l2_doorbell_bar_size_kb;
- u8 active_endpoints;
- u8 always_1;
- __le32 reset_addr_poll;
- __le16 legacy_l2_db_size_kb;
- __le16 svif_info;
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
- u8 mpc_chnls;
- #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
- u8 db_page_size;
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
- u8 unused_2[2];
- __le32 partition_min_bw;
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
- __le32 partition_max_bw;
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
- __le16 host_mtu;
- u8 unused_3[2];
- u8 unused_4[2];
- u8 port_kdnet_mode;
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
- u8 kdnet_pcie_function;
- __le16 port_kdnet_fid;
- u8 unused_5[2];
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- u8 lag_id;
- u8 parif;
- u8 fw_lag_id;
- u8 unused_6;
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
- __le32 roce_max_av_per_vf;
- __le32 roce_max_cq_per_vf;
- __le32 roce_max_mrw_per_vf;
- __le32 roce_max_qp_per_vf;
- __le32 roce_max_srq_per_vf;
- __le32 roce_max_gid_per_vf;
- __le16 xid_partition_cfg;
- u8 unused_7;
- u8 valid;
-};
-
-/* hwrm_func_cfg_input (size:1280b/160B) */
-struct hwrm_func_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 num_msix;
- __le32 flags;
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
- #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
- #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
- #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
- #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
- #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
- #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
- #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
- #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
- #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
- #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
- #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
- #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
- #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
- #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
- #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
- #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
- #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
- __le32 enables;
- #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
- #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
- #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
- #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
- #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
- #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
- #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
- #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
- #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
- #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
- #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
- __le16 admin_mtu;
- __le16 mru;
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- u8 dflt_mac_addr[6];
- __le16 dflt_vlan;
- __be32 dflt_ip_addr[4];
- __le32 min_bw;
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- __le16 async_event_cr;
- u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
- u8 allowed_vlan_pris;
- u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
- #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
- u8 options;
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
- #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
- #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
- __le16 num_mcast_filters;
- __le16 schq_id;
- __le16 mpc_chnls;
- #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
- #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
- #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
- __le32 partition_min_bw;
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
- __le32 partition_max_bw;
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
- __be16 tpid;
- __le16 host_mtu;
- __le32 flags2;
- #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
- #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
- __le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
- #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
- #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
- u8 port_kdnet_mode;
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
- u8 db_page_size;
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
- u8 unused_1[2];
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
- __le32 roce_max_av_per_vf;
- __le32 roce_max_cq_per_vf;
- __le32 roce_max_mrw_per_vf;
- __le32 roce_max_qp_per_vf;
- __le32 roce_max_srq_per_vf;
- __le32 roce_max_gid_per_vf;
- __le16 xid_partition_cfg;
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL
- __le16 unused_2;
-};
-
-/* hwrm_func_cfg_output (size:128b/16B) */
-struct hwrm_func_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_cfg_cmd_err (size:64b/8B) */
-struct hwrm_func_cfg_cmd_err {
- u8 code;
- #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
- #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
- u8 unused_0[7];
-};
-
-/* hwrm_func_qstats_input (size:192b/24B) */
-struct hwrm_func_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
- u8 unused_0[5];
-};
-
-/* hwrm_func_qstats_output (size:1408b/176B) */
-struct hwrm_func_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- u8 clear_seq;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_func_qstats_ext_input (size:256b/32B) */
-struct hwrm_func_qstats_ext_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 flags;
- #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
- u8 unused_0[1];
- __le32 enables;
- #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
- __le16 schq_id;
- __le16 traffic_class;
- u8 unused_1[4];
-};
-
-/* hwrm_func_qstats_ext_output (size:1536b/192B) */
-struct hwrm_func_qstats_ext_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_clr_stats_input (size:192b/24B) */
-struct hwrm_func_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_clr_stats_output (size:128b/16B) */
-struct hwrm_func_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_vf_resc_free_input (size:192b/24B) */
-struct hwrm_func_vf_resc_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0[6];
-};
-
-/* hwrm_func_vf_resc_free_output (size:128b/16B) */
-struct hwrm_func_vf_resc_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_rgtr_input (size:896b/112B) */
-struct hwrm_func_drv_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
- __le32 enables;
- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
- __le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
- u8 ver_maj_8b;
- u8 ver_min_8b;
- u8 ver_upd_8b;
- u8 unused_0[3];
- __le32 timestamp;
- u8 unused_1[4];
- __le32 vf_req_fwd[8];
- __le32 async_event_fwd[8];
- __le16 ver_maj;
- __le16 ver_min;
- __le16 ver_upd;
- __le16 ver_patch;
-};
-
-/* hwrm_func_drv_rgtr_output (size:128b/16B) */
-struct hwrm_func_drv_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
-struct hwrm_func_drv_unrgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
- u8 unused_0[4];
-};
-
-/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
-struct hwrm_func_drv_unrgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
-struct hwrm_func_buf_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
- __le16 vf_id;
- __le16 req_buf_num_pages;
- __le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
- __le16 req_buf_len;
- __le16 resp_buf_len;
- u8 unused_0[2];
- __le64 req_buf_page_addr0;
- __le64 req_buf_page_addr1;
- __le64 req_buf_page_addr2;
- __le64 req_buf_page_addr3;
- __le64 req_buf_page_addr4;
- __le64 req_buf_page_addr5;
- __le64 req_buf_page_addr6;
- __le64 req_buf_page_addr7;
- __le64 req_buf_page_addr8;
- __le64 req_buf_page_addr9;
- __le64 error_buf_addr;
- __le64 resp_buf_addr;
-};
-
-/* hwrm_func_buf_rgtr_output (size:128b/16B) */
-struct hwrm_func_buf_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_qver_input (size:192b/24B) */
-struct hwrm_func_drv_qver_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 reserved;
- __le16 fid;
- u8 driver_type;
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
- u8 unused_0;
-};
-
-/* hwrm_func_drv_qver_output (size:256b/32B) */
-struct hwrm_func_drv_qver_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
- u8 ver_maj_8b;
- u8 ver_min_8b;
- u8 ver_upd_8b;
- u8 unused_0[3];
- __le16 ver_maj;
- __le16 ver_min;
- __le16 ver_upd;
- __le16 ver_patch;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_func_resource_qcaps_input (size:192b/24B) */
-struct hwrm_func_resource_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_resource_qcaps_output (size:704b/88B) */
-struct hwrm_func_resource_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 max_vfs;
- __le16 max_msix;
- __le16 vf_reservation_strategy;
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
- __le16 min_rsscos_ctx;
- __le16 max_rsscos_ctx;
- __le16 min_cmpl_rings;
- __le16 max_cmpl_rings;
- __le16 min_tx_rings;
- __le16 max_tx_rings;
- __le16 min_rx_rings;
- __le16 max_rx_rings;
- __le16 min_l2_ctxs;
- __le16 max_l2_ctxs;
- __le16 min_vnics;
- __le16 max_vnics;
- __le16 min_stat_ctx;
- __le16 max_stat_ctx;
- __le16 min_hw_ring_grps;
- __le16 max_hw_ring_grps;
- __le16 max_tx_scheduler_inputs;
- __le16 flags;
- #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_msix;
- __le32 min_ktls_tx_key_ctxs;
- __le32 max_ktls_tx_key_ctxs;
- __le32 min_ktls_rx_key_ctxs;
- __le32 max_ktls_rx_key_ctxs;
- __le32 min_quic_tx_key_ctxs;
- __le32 max_quic_tx_key_ctxs;
- __le32 min_quic_rx_key_ctxs;
- __le32 max_quic_rx_key_ctxs;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
-struct hwrm_func_vf_resource_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 max_msix;
- __le16 min_rsscos_ctx;
- __le16 max_rsscos_ctx;
- __le16 min_cmpl_rings;
- __le16 max_cmpl_rings;
- __le16 min_tx_rings;
- __le16 max_tx_rings;
- __le16 min_rx_rings;
- __le16 max_rx_rings;
- __le16 min_l2_ctxs;
- __le16 max_l2_ctxs;
- __le16 min_vnics;
- __le16 max_vnics;
- __le16 min_stat_ctx;
- __le16 max_stat_ctx;
- __le16 min_hw_ring_grps;
- __le16 max_hw_ring_grps;
- __le16 flags;
- #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_msix;
- __le32 min_ktls_tx_key_ctxs;
- __le32 max_ktls_tx_key_ctxs;
- __le32 min_ktls_rx_key_ctxs;
- __le32 max_ktls_rx_key_ctxs;
- __le32 min_quic_tx_key_ctxs;
- __le32 max_quic_tx_key_ctxs;
- __le32 min_quic_rx_key_ctxs;
- __le32 max_quic_rx_key_ctxs;
-};
-
-/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
-struct hwrm_func_vf_resource_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 reserved_rsscos_ctx;
- __le16 reserved_cmpl_rings;
- __le16 reserved_tx_rings;
- __le16 reserved_rx_rings;
- __le16 reserved_l2_ctxs;
- __le16 reserved_vnics;
- __le16 reserved_stat_ctx;
- __le16 reserved_hw_ring_grps;
- __le32 reserved_ktls_tx_key_ctxs;
- __le32 reserved_ktls_rx_key_ctxs;
- __le32 reserved_quic_tx_key_ctxs;
- __le32 reserved_quic_rx_key_ctxs;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
-struct hwrm_func_backing_store_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
-struct hwrm_func_backing_store_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 qp_max_entries;
- __le16 qp_min_qp1_entries;
- __le16 qp_max_l2_entries;
- __le16 qp_entry_size;
- __le16 srq_max_l2_entries;
- __le32 srq_max_entries;
- __le16 srq_entry_size;
- __le16 cq_max_l2_entries;
- __le32 cq_max_entries;
- __le16 cq_entry_size;
- __le16 vnic_max_vnic_entries;
- __le16 vnic_max_ring_table_entries;
- __le16 vnic_entry_size;
- __le32 stat_max_entries;
- __le16 stat_entry_size;
- __le16 tqm_entry_size;
- __le32 tqm_min_entries_per_ring;
- __le32 tqm_max_entries_per_ring;
- __le32 mrav_max_entries;
- __le16 mrav_entry_size;
- __le16 tim_entry_size;
- __le32 tim_max_entries;
- __le16 mrav_num_entries_units;
- u8 tqm_entries_multiple;
- u8 ctx_kind_initializer;
- __le16 ctx_init_mask;
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
- u8 qp_init_offset;
- u8 srq_init_offset;
- u8 cq_init_offset;
- u8 vnic_init_offset;
- u8 tqm_fp_rings_count;
- u8 stat_init_offset;
- u8 mrav_init_offset;
- u8 tqm_fp_rings_count_ext;
- u8 tkc_init_offset;
- u8 rkc_init_offset;
- __le16 tkc_entry_size;
- __le16 rkc_entry_size;
- __le32 tkc_max_entries;
- __le32 rkc_max_entries;
- __le16 fast_qpmd_qp_num_entries;
- u8 rsvd1[5];
- u8 valid;
-};
-
-/* tqm_fp_ring_cfg (size:128b/16B) */
-struct tqm_fp_ring_cfg {
- u8 tqm_ring_pg_size_tqm_ring_lvl;
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
- u8 unused[3];
- __le32 tqm_ring_num_entries;
- __le64 tqm_ring_page_dir;
-};
-
-/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
-struct hwrm_func_backing_store_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
- __le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
- u8 qpc_pg_size_qpc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
- u8 srq_pg_size_srq_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
- u8 cq_pg_size_cq_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
- u8 vnic_pg_size_vnic_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
- u8 stat_pg_size_stat_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
- u8 tqm_sp_pg_size_tqm_sp_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
- u8 tqm_ring0_pg_size_tqm_ring0_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
- u8 tqm_ring1_pg_size_tqm_ring1_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
- u8 tqm_ring2_pg_size_tqm_ring2_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
- u8 tqm_ring3_pg_size_tqm_ring3_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
- u8 tqm_ring4_pg_size_tqm_ring4_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
- u8 tqm_ring5_pg_size_tqm_ring5_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
- u8 tqm_ring6_pg_size_tqm_ring6_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
- u8 tqm_ring7_pg_size_tqm_ring7_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
- u8 mrav_pg_size_mrav_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
- u8 tim_pg_size_tim_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
- __le64 qpc_page_dir;
- __le64 srq_page_dir;
- __le64 cq_page_dir;
- __le64 vnic_page_dir;
- __le64 stat_page_dir;
- __le64 tqm_sp_page_dir;
- __le64 tqm_ring0_page_dir;
- __le64 tqm_ring1_page_dir;
- __le64 tqm_ring2_page_dir;
- __le64 tqm_ring3_page_dir;
- __le64 tqm_ring4_page_dir;
- __le64 tqm_ring5_page_dir;
- __le64 tqm_ring6_page_dir;
- __le64 tqm_ring7_page_dir;
- __le64 mrav_page_dir;
- __le64 tim_page_dir;
- __le32 qp_num_entries;
- __le32 srq_num_entries;
- __le32 cq_num_entries;
- __le32 stat_num_entries;
- __le32 tqm_sp_num_entries;
- __le32 tqm_ring0_num_entries;
- __le32 tqm_ring1_num_entries;
- __le32 tqm_ring2_num_entries;
- __le32 tqm_ring3_num_entries;
- __le32 tqm_ring4_num_entries;
- __le32 tqm_ring5_num_entries;
- __le32 tqm_ring6_num_entries;
- __le32 tqm_ring7_num_entries;
- __le32 mrav_num_entries;
- __le32 tim_num_entries;
- __le16 qp_num_qp1_entries;
- __le16 qp_num_l2_entries;
- __le16 qp_entry_size;
- __le16 srq_num_l2_entries;
- __le16 srq_entry_size;
- __le16 cq_num_l2_entries;
- __le16 cq_entry_size;
- __le16 vnic_num_vnic_entries;
- __le16 vnic_num_ring_table_entries;
- __le16 vnic_entry_size;
- __le16 stat_entry_size;
- __le16 tqm_entry_size;
- __le16 mrav_entry_size;
- __le16 tim_entry_size;
- u8 tqm_ring8_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
- u8 ring8_unused[3];
- __le32 tqm_ring8_num_entries;
- __le64 tqm_ring8_page_dir;
- u8 tqm_ring9_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
- u8 ring9_unused[3];
- __le32 tqm_ring9_num_entries;
- __le64 tqm_ring9_page_dir;
- u8 tqm_ring10_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
- u8 ring10_unused[3];
- __le32 tqm_ring10_num_entries;
- __le64 tqm_ring10_page_dir;
- __le32 tkc_num_entries;
- __le32 rkc_num_entries;
- __le64 tkc_page_dir;
- __le64 rkc_page_dir;
- __le16 tkc_entry_size;
- __le16 rkc_entry_size;
- u8 tkc_pg_size_tkc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
- u8 rkc_pg_size_rkc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
- __le16 qp_num_fast_qpmd_entries;
-};
-
-/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
-struct hwrm_func_backing_store_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
-struct hwrm_error_recovery_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
-struct hwrm_error_recovery_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
- __le32 driver_polling_freq;
- __le32 master_func_wait_period;
- __le32 normal_func_wait_period;
- __le32 master_func_wait_period_after_reset;
- __le32 max_bailout_time_after_reset;
- __le32 fw_health_status_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
- __le32 fw_heartbeat_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
- __le32 fw_reset_cnt_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
- __le32 reset_inprogress_reg;
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
- __le32 reset_inprogress_reg_mask;
- u8 unused_0[3];
- u8 reg_array_cnt;
- __le32 reset_reg[16];
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
- __le32 reset_reg_val[16];
- u8 delay_after_reset[16];
- __le32 err_recovery_cnt_reg;
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_func_echo_response_input (size:192b/24B) */
-struct hwrm_func_echo_response_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 event_data1;
- __le32 event_data2;
-};
-
-/* hwrm_func_echo_response_output (size:128b/16B) */
-struct hwrm_func_echo_response_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
-struct hwrm_func_ptp_pin_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
-struct hwrm_func_ptp_pin_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_pins;
- u8 state;
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
- u8 pin0_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
- u8 pin1_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
- u8 pin2_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 pin3_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
-struct hwrm_func_ptp_pin_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
- u8 pin0_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
- u8 pin0_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
- u8 pin1_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
- u8 pin1_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
- u8 pin2_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
- u8 pin2_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 pin3_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
- u8 pin3_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 unused_0[4];
-};
-
-/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_pin_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_cfg_input (size:384b/48B) */
-struct hwrm_func_ptp_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 enables;
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
- u8 ptp_pps_event;
- #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
- u8 ptp_freq_adj_dll_source;
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
- u8 ptp_freq_adj_dll_phase;
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
- u8 unused_0[3];
- __le32 ptp_freq_adj_ext_period;
- __le32 ptp_freq_adj_ext_up;
- __le32 ptp_freq_adj_ext_phase_lower;
- __le32 ptp_freq_adj_ext_phase_upper;
- __le64 ptp_set_time;
-};
-
-/* hwrm_func_ptp_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
-struct hwrm_func_ptp_ts_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
- #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
- u8 unused_0[4];
-};
-
-/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
-struct hwrm_func_ptp_ts_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 pps_event_ts;
- __le64 ptm_local_ts;
- __le64 ptm_system_ts;
- __le32 ptm_link_delay;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
-struct hwrm_func_ptp_ext_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 enables;
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
- __le16 phc_master_fid;
- __le16 phc_sec_fid;
- u8 phc_sec_mode;
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
- u8 unused_0;
- __le32 failover_timer;
- u8 unused_1[4];
-};
-
-/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_ext_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
-struct hwrm_func_ptp_ext_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
-struct hwrm_func_ptp_ext_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 phc_master_fid;
- __le16 phc_sec_fid;
- __le16 phc_active_fid0;
- __le16 phc_active_fid1;
- __le32 last_failover_event;
- __le16 from_fid;
- __le16 to_fid;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
-struct hwrm_func_backing_store_cfg_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
- __le16 instance;
- __le32 flags;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
- __le64 page_dir;
- __le32 num_entries;
- __le16 entry_size;
- u8 page_size_pbl_level;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
- u8 subtype_valid_cnt;
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
-};
-
-/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
-struct hwrm_func_backing_store_cfg_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 rsvd0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
-struct hwrm_func_backing_store_qcfg_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
- __le16 instance;
- u8 rsvd[4];
-};
-
-/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
-struct hwrm_func_backing_store_qcfg_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
- __le16 instance;
- __le32 flags;
- __le64 page_dir;
- __le32 num_entries;
- u8 page_size_pbl_level;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
- u8 subtype_valid_cnt;
- u8 rsvd[2];
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
- u8 rsvd2[7];
- u8 valid;
-};
-
-/* qpc_split_entries (size:128b/16B) */
-struct qpc_split_entries {
- __le32 qp_num_l2_entries;
- __le32 qp_num_qp1_entries;
- __le32 qp_num_fast_qpmd_entries;
- __le32 rsvd;
-};
-
-/* srq_split_entries (size:128b/16B) */
-struct srq_split_entries {
- __le32 srq_num_l2_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* cq_split_entries (size:128b/16B) */
-struct cq_split_entries {
- __le32 cq_num_l2_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* vnic_split_entries (size:128b/16B) */
-struct vnic_split_entries {
- __le32 vnic_num_vnic_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* mrav_split_entries (size:128b/16B) */
-struct mrav_split_entries {
- __le32 mrav_num_av_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* ts_split_entries (size:128b/16B) */
-struct ts_split_entries {
- __le32 region_num_entries;
- u8 tsid;
- u8 lkup_static_bkt_cnt_exp[2];
- u8 rsvd;
- __le32 rsvd2[2];
-};
-
-/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
-struct hwrm_func_backing_store_qcaps_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
- u8 rsvd[6];
-};
-
-/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
-struct hwrm_func_backing_store_qcaps_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
- __le16 entry_size;
- __le32 flags;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
- __le32 instance_bit_map;
- u8 ctx_init_value;
- u8 ctx_init_offset;
- u8 entry_multiple;
- u8 rsvd;
- __le32 max_num_entries;
- __le32 min_num_entries;
- __le16 next_valid_type;
- u8 subtype_valid_cnt;
- u8 exact_cnt_bit_map;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
- u8 rsvd3[3];
- u8 valid;
-};
-
-/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
-struct hwrm_func_dbr_pacing_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
-struct hwrm_func_dbr_pacing_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
- u8 unused_0[7];
- __le32 dbr_stat_db_fifo_reg;
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
- __le32 dbr_stat_db_fifo_reg_watermark_mask;
- u8 dbr_stat_db_fifo_reg_watermark_shift;
- u8 unused_1[3];
- __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
- u8 dbr_stat_db_fifo_reg_fifo_room_shift;
- u8 unused_2[3];
- __le32 dbr_throttling_aeq_arm_reg;
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
- u8 dbr_throttling_aeq_arm_reg_val;
- u8 unused_3[3];
- __le32 dbr_stat_db_max_fifo_depth;
- __le32 primary_nq_id;
- __le32 pacing_threshold;
- u8 unused_4[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_if_change_input (size:192b/24B) */
-struct hwrm_func_drv_if_change_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
- __le32 unused;
-};
-
-/* hwrm_func_drv_if_change_output (size:128b/16B) */
-struct hwrm_func_drv_if_change_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg_input (size:512b/64B) */
-struct hwrm_port_phy_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
- __le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
- __le16 port_id;
- __le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
- u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
- u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
- u8 auto_pause;
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 mgmt_flag;
- #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
- #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
- __le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
- __le16 auto_link_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
- u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
- #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
- u8 force_pause;
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
- u8 unused_1;
- __le32 preemphasis;
- __le16 eee_link_speed_mask;
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 force_pam4_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
- __le32 tx_lpi_timer;
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le16 auto_link_pam4_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
- __le16 force_link_speeds2;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
- __le16 auto_link_speeds2_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
- u8 unused_2[6];
-};
-
-/* hwrm_port_phy_cfg_output (size:128b/16B) */
-struct hwrm_port_phy_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
-struct hwrm_port_phy_cfg_cmd_err {
- u8 code;
- #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
- u8 unused_0[7];
-};
-
-/* hwrm_port_phy_qcfg_input (size:192b/24B) */
-struct hwrm_port_phy_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_phy_qcfg_output (size:832b/104B) */
-struct hwrm_port_phy_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 active_fec_signal_mode;
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
- __le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
- u8 duplex_cfg;
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
- u8 pause;
- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
- __le16 support_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- __le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
- u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
- u8 auto_pause;
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- __le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
- __le16 auto_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
- u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
- #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
- u8 force_pause;
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
- u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
- u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
- u8 eee_config_phy_addr;
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
- u8 parallel_detect;
- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
- __le16 link_partner_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
- u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
- u8 link_partner_adv_pause;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- __le16 adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 link_partner_adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le32 xcvr_identifier_type_tx_lpi_timer;
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
- __le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
- u8 duplex_state;
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
- u8 option_flags;
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- __le16 support_pam4_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
- __le16 force_pam4_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
- __le16 auto_pam4_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
- u8 link_partner_pam4_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
- u8 link_down_reason;
- #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
- __le16 support_speeds2;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
- __le16 force_link_speeds2;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
- __le16 auto_link_speeds2;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
- u8 active_lanes;
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg_input (size:448b/56B) */
-struct hwrm_port_mac_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
- #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
- #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
- __le32 enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
- __le16 port_id;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
- u8 vlan_pri2cos_map_pri;
- u8 reserved1;
- u8 tunnel_pri2cos_map_pri;
- u8 dscp2pri_map_pri;
- __le16 rx_ts_capture_ptp_msg_type;
- __le16 tx_ts_capture_ptp_msg_type;
- u8 cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- u8 unused_0[3];
- __le32 ptp_freq_adj_ppb;
- u8 unused_1[3];
- u8 ptp_load_control;
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
- __le64 ptp_adj_phase;
-};
-
-/* hwrm_port_mac_cfg_output (size:128b/16B) */
-struct hwrm_port_mac_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- __le16 mtu;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
-struct hwrm_port_mac_ptp_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
-struct hwrm_port_mac_ptp_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
- u8 unused_0[3];
- __le32 rx_ts_reg_off_lower;
- __le32 rx_ts_reg_off_upper;
- __le32 rx_ts_reg_off_seq_id;
- __le32 rx_ts_reg_off_src_id_0;
- __le32 rx_ts_reg_off_src_id_1;
- __le32 rx_ts_reg_off_src_id_2;
- __le32 rx_ts_reg_off_domain_id;
- __le32 rx_ts_reg_off_fifo;
- __le32 rx_ts_reg_off_fifo_adv;
- __le32 rx_ts_reg_off_granularity;
- __le32 tx_ts_reg_off_lower;
- __le32 tx_ts_reg_off_upper;
- __le32 tx_ts_reg_off_seq_id;
- __le32 tx_ts_reg_off_fifo;
- __le32 tx_ts_reg_off_granularity;
- __le32 ts_ref_clock_reg_lower;
- __le32 ts_ref_clock_reg_upper;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518b_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047b_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518b_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* hwrm_port_qstats_input (size:320b/40B) */
-struct hwrm_port_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 flags;
- #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[5];
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* hwrm_port_qstats_output (size:128b/16B) */
-struct hwrm_port_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* tx_port_stats_ext (size:2048b/256B) */
-struct tx_port_stats_ext {
- __le64 tx_bytes_cos0;
- __le64 tx_bytes_cos1;
- __le64 tx_bytes_cos2;
- __le64 tx_bytes_cos3;
- __le64 tx_bytes_cos4;
- __le64 tx_bytes_cos5;
- __le64 tx_bytes_cos6;
- __le64 tx_bytes_cos7;
- __le64 tx_packets_cos0;
- __le64 tx_packets_cos1;
- __le64 tx_packets_cos2;
- __le64 tx_packets_cos3;
- __le64 tx_packets_cos4;
- __le64 tx_packets_cos5;
- __le64 tx_packets_cos6;
- __le64 tx_packets_cos7;
- __le64 pfc_pri0_tx_duration_us;
- __le64 pfc_pri0_tx_transitions;
- __le64 pfc_pri1_tx_duration_us;
- __le64 pfc_pri1_tx_transitions;
- __le64 pfc_pri2_tx_duration_us;
- __le64 pfc_pri2_tx_transitions;
- __le64 pfc_pri3_tx_duration_us;
- __le64 pfc_pri3_tx_transitions;
- __le64 pfc_pri4_tx_duration_us;
- __le64 pfc_pri4_tx_transitions;
- __le64 pfc_pri5_tx_duration_us;
- __le64 pfc_pri5_tx_transitions;
- __le64 pfc_pri6_tx_duration_us;
- __le64 pfc_pri6_tx_transitions;
- __le64 pfc_pri7_tx_duration_us;
- __le64 pfc_pri7_tx_transitions;
-};
-
-/* rx_port_stats_ext (size:3904b/488B) */
-struct rx_port_stats_ext {
- __le64 link_down_events;
- __le64 continuous_pause_events;
- __le64 resume_pause_events;
- __le64 continuous_roce_pause_events;
- __le64 resume_roce_pause_events;
- __le64 rx_bytes_cos0;
- __le64 rx_bytes_cos1;
- __le64 rx_bytes_cos2;
- __le64 rx_bytes_cos3;
- __le64 rx_bytes_cos4;
- __le64 rx_bytes_cos5;
- __le64 rx_bytes_cos6;
- __le64 rx_bytes_cos7;
- __le64 rx_packets_cos0;
- __le64 rx_packets_cos1;
- __le64 rx_packets_cos2;
- __le64 rx_packets_cos3;
- __le64 rx_packets_cos4;
- __le64 rx_packets_cos5;
- __le64 rx_packets_cos6;
- __le64 rx_packets_cos7;
- __le64 pfc_pri0_rx_duration_us;
- __le64 pfc_pri0_rx_transitions;
- __le64 pfc_pri1_rx_duration_us;
- __le64 pfc_pri1_rx_transitions;
- __le64 pfc_pri2_rx_duration_us;
- __le64 pfc_pri2_rx_transitions;
- __le64 pfc_pri3_rx_duration_us;
- __le64 pfc_pri3_rx_transitions;
- __le64 pfc_pri4_rx_duration_us;
- __le64 pfc_pri4_rx_transitions;
- __le64 pfc_pri5_rx_duration_us;
- __le64 pfc_pri5_rx_transitions;
- __le64 pfc_pri6_rx_duration_us;
- __le64 pfc_pri6_rx_transitions;
- __le64 pfc_pri7_rx_duration_us;
- __le64 pfc_pri7_rx_transitions;
- __le64 rx_bits;
- __le64 rx_buffer_passed_threshold;
- __le64 rx_pcs_symbol_err;
- __le64 rx_corrected_bits;
- __le64 rx_discard_bytes_cos0;
- __le64 rx_discard_bytes_cos1;
- __le64 rx_discard_bytes_cos2;
- __le64 rx_discard_bytes_cos3;
- __le64 rx_discard_bytes_cos4;
- __le64 rx_discard_bytes_cos5;
- __le64 rx_discard_bytes_cos6;
- __le64 rx_discard_bytes_cos7;
- __le64 rx_discard_packets_cos0;
- __le64 rx_discard_packets_cos1;
- __le64 rx_discard_packets_cos2;
- __le64 rx_discard_packets_cos3;
- __le64 rx_discard_packets_cos4;
- __le64 rx_discard_packets_cos5;
- __le64 rx_discard_packets_cos6;
- __le64 rx_discard_packets_cos7;
- __le64 rx_fec_corrected_blocks;
- __le64 rx_fec_uncorrectable_blocks;
- __le64 rx_filter_miss;
- __le64 rx_fec_symbol_err;
-};
-
-/* hwrm_port_qstats_ext_input (size:320b/40B) */
-struct hwrm_port_qstats_ext_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 flags;
- #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0;
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* hwrm_port_qstats_ext_output (size:128b/16B) */
-struct hwrm_port_qstats_ext_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- __le16 total_active_cos_queues;
- u8 flags;
- #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
- u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
-struct hwrm_port_lpbk_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
-struct hwrm_port_lpbk_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 lpbk_ucast_frames;
- __le64 lpbk_mcast_frames;
- __le64 lpbk_bcast_frames;
- __le64 lpbk_ucast_bytes;
- __le64 lpbk_mcast_bytes;
- __le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_ecn_qstats_input (size:256b/32B) */
-struct hwrm_port_ecn_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 ecn_stat_buf_size;
- u8 flags;
- #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
- __le64 ecn_stat_host_addr;
-};
-
-/* hwrm_port_ecn_qstats_output (size:128b/16B) */
-struct hwrm_port_ecn_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ecn_stat_buf_size;
- u8 mark_en;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* port_stats_ecn (size:512b/64B) */
-struct port_stats_ecn {
- __le64 mark_cnt_cos0;
- __le64 mark_cnt_cos1;
- __le64 mark_cnt_cos2;
- __le64 mark_cnt_cos3;
- __le64 mark_cnt_cos4;
- __le64 mark_cnt_cos5;
- __le64 mark_cnt_cos6;
- __le64 mark_cnt_cos7;
-};
-
-/* hwrm_port_clr_stats_input (size:192b/24B) */
-struct hwrm_port_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 flags;
- #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
- u8 unused_0[5];
-};
-
-/* hwrm_port_clr_stats_output (size:128b/16B) */
-struct hwrm_port_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
-struct hwrm_port_lpbk_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
-struct hwrm_port_lpbk_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_ts_query_input (size:320b/40B) */
-struct hwrm_port_ts_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
- #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
- __le16 port_id;
- u8 unused_0[2];
- __le16 enables;
- #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
- #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
- #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
- __le16 ts_req_timeout;
- __le32 ptp_seq_id;
- __le16 ptp_hdr_offset;
- u8 unused_1[6];
-};
-
-/* hwrm_port_ts_query_output (size:192b/24B) */
-struct hwrm_port_ts_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ptp_msg_ts;
- __le16 ptp_msg_seqid;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_port_phy_qcaps_input (size:192b/24B) */
-struct hwrm_port_phy_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_phy_qcaps_output (size:320b/40B) */
-struct hwrm_port_phy_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
- u8 port_cnt;
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
- __le16 supported_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- __le16 supported_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- __le16 supported_speeds_eee_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
- __le32 tx_lpi_timer_low;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
- __le32 valid_tx_lpi_timer_high;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
- __le16 supported_pam4_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
- __le16 supported_pam4_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
- __le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
- u8 internal_port_cnt;
- u8 unused_0;
- __le16 supported_speeds2_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
- __le16 supported_speeds2_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
-struct hwrm_port_phy_i2c_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
- #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 bank_number;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
-};
-
-/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
-struct hwrm_port_phy_i2c_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 data[16];
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
-struct hwrm_port_phy_mdio_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[2];
- __le16 port_id;
- u8 phy_addr;
- u8 dev_addr;
- __le16 reg_addr;
- __le16 reg_data;
- u8 cl45_mdio;
- u8 unused_1[7];
-};
-
-/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
-struct hwrm_port_phy_mdio_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
-struct hwrm_port_phy_mdio_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[2];
- __le16 port_id;
- u8 phy_addr;
- u8 dev_addr;
- __le16 reg_addr;
- u8 cl45_mdio;
- u8 unused_1;
-};
-
-/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
-struct hwrm_port_phy_mdio_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 reg_data;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_port_led_cfg_input (size:512b/64B) */
-struct hwrm_port_led_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
- __le16 port_id;
- u8 num_leds;
- u8 rsvd;
- u8 led0_id;
- u8 led0_state;
- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
- u8 led0_color;
- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 rsvd0;
- u8 led1_id;
- u8 led1_state;
- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
- u8 led1_color;
- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 rsvd1;
- u8 led2_id;
- u8 led2_state;
- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
- u8 led2_color;
- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 rsvd2;
- u8 led3_id;
- u8 led3_state;
- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
- u8 led3_color;
- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 rsvd3;
-};
-
-/* hwrm_port_led_cfg_output (size:128b/16B) */
-struct hwrm_port_led_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_led_qcfg_input (size:192b/24B) */
-struct hwrm_port_led_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_led_qcfg_output (size:448b/56B) */
-struct hwrm_port_led_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
- u8 led0_state;
- #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
- u8 led0_color;
- #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
- u8 led1_state;
- #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
- u8 led1_color;
- #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
- u8 led2_state;
- #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
- u8 led2_color;
- #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
- u8 led3_state;
- #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
- u8 led3_color;
- #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 unused_4[6];
- u8 valid;
-};
-
-/* hwrm_port_led_qcaps_input (size:192b/24B) */
-struct hwrm_port_led_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_led_qcaps_output (size:384b/48B) */
-struct hwrm_port_led_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 unused[3];
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
- u8 led0_group_id;
- u8 unused_0;
- __le16 led0_state_caps;
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
- u8 led1_group_id;
- u8 unused_1;
- __le16 led1_state_caps;
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
- u8 led2_group_id;
- u8 unused_2;
- __le16 led2_state_caps;
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
- u8 led3_group_id;
- u8 unused_3;
- __le16 led3_state_caps;
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 unused_4[3];
- u8 valid;
-};
-
-/* hwrm_port_mac_qcaps_input (size:192b/24B) */
-struct hwrm_port_mac_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_mac_qcaps_output (size:128b/16B) */
-struct hwrm_port_mac_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
- #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_queue_qportcfg_input (size:192b/24B) */
-struct hwrm_queue_qportcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
- __le16 port_id;
- u8 drv_qmap_cap;
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
- u8 unused_0;
-};
-
-/* hwrm_queue_qportcfg_output (size:1344b/168B) */
-struct hwrm_queue_qportcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 max_configurable_queues;
- u8 max_configurable_lossless_queues;
- u8 queue_cfg_allowed;
- u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
- u8 queue_pfcenable_cfg_allowed;
- u8 queue_pri2cos_cfg_allowed;
- u8 queue_cos2bw_cfg_allowed;
- u8 queue_id0;
- u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
- u8 queue_id1;
- u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
- u8 queue_id2;
- u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
- u8 queue_id3;
- u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
- u8 queue_id4;
- u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
- u8 queue_id5;
- u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
- u8 queue_id6;
- u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
- u8 queue_id7;
- u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
- u8 queue_id0_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
- char qid0_name[16];
- char qid1_name[16];
- char qid2_name[16];
- char qid3_name[16];
- char qid4_name[16];
- char qid5_name[16];
- char qid6_name[16];
- char qid7_name[16];
- u8 queue_id1_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id2_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id3_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id4_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id5_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id6_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id7_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 valid;
-};
-
-/* hwrm_queue_qcfg_input (size:192b/24B) */
-struct hwrm_queue_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
- __le32 queue_id;
-};
-
-/* hwrm_queue_qcfg_output (size:128b/16B) */
-struct hwrm_queue_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 queue_len;
- u8 service_profile;
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
- u8 queue_cfg_info;
- #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_queue_cfg_input (size:320b/40B) */
-struct hwrm_queue_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
- __le32 enables;
- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
- __le32 queue_id;
- __le32 dflt_len;
- u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
- u8 unused_0[7];
-};
-
-/* hwrm_queue_cfg_output (size:128b/16B) */
-struct hwrm_queue_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
-struct hwrm_queue_pfcenable_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
-struct hwrm_queue_pfcenable_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
-struct hwrm_queue_pfcenable_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
- __le16 port_id;
- u8 unused_0[2];
-};
-
-/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
-struct hwrm_queue_pfcenable_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
-struct hwrm_queue_pri2cos_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
- u8 port_id;
- u8 unused_0[3];
-};
-
-/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
-struct hwrm_queue_pri2cos_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 queue_cfg_info;
- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
-struct hwrm_queue_pri2cos_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
- __le32 enables;
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
- u8 port_id;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 unused_0[7];
-};
-
-/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
-struct hwrm_queue_pri2cos_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
-struct hwrm_queue_cos2bw_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
-struct hwrm_queue_cos2bw_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 queue_id0;
- u8 unused_0;
- __le16 unused_1;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- struct {
- u8 queue_id;
- __le32 queue_id_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id_pri_lvl;
- u8 queue_id_bw_weight;
- } __packed cfg[7];
- u8 unused_2[4];
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
-struct hwrm_queue_cos2bw_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
- __le16 port_id;
- u8 queue_id0;
- u8 unused_0;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- struct {
- u8 queue_id;
- __le32 queue_id_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id_pri_lvl;
- u8 queue_id_bw_weight;
- } __packed cfg[7];
- u8 unused_1[5];
-};
-
-/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
-struct hwrm_queue_cos2bw_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
-struct hwrm_queue_dscp_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 port_id;
- u8 unused_0[7];
-};
-
-/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
-struct hwrm_queue_dscp_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_dscp_bits;
- u8 unused_0;
- __le16 max_entries;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
-struct hwrm_queue_dscp2pri_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- u8 port_id;
- u8 unused_0;
- __le16 dest_data_buffer_size;
- u8 unused_1[4];
-};
-
-/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 entry_cnt;
- u8 default_pri;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
-struct hwrm_queue_dscp2pri_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le32 flags;
- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
- __le32 enables;
- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
- u8 port_id;
- u8 default_pri;
- __le16 entry_cnt;
- u8 unused_0[4];
-};
-
-/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_alloc_input (size:192b/24B) */
-struct hwrm_vnic_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
- __le16 virtio_net_fid;
- u8 unused_0[2];
-};
-
-/* hwrm_vnic_alloc_output (size:128b/16B) */
-struct hwrm_vnic_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_vnic_free_input (size:192b/24B) */
-struct hwrm_vnic_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_free_output (size:128b/16B) */
-struct hwrm_vnic_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_cfg_input (size:384b/48B) */
-struct hwrm_vnic_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
- #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
- __le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
- #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
- #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
- #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
- #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
- #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
- __le16 vnic_id;
- __le16 dflt_ring_grp;
- __le16 rss_rule;
- __le16 cos_rule;
- __le16 lb_rule;
- __le16 mru;
- __le16 default_rx_ring_id;
- __le16 default_cmpl_ring_id;
- __le16 queue_id;
- u8 rx_csum_v2_mode;
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
- u8 l2_cqe_mode;
- #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- u8 unused0[4];
-};
-
-/* hwrm_vnic_cfg_output (size:128b/16B) */
-struct hwrm_vnic_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_qcaps_input (size:192b/24B) */
-struct hwrm_vnic_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_qcaps_output (size:192b/24B) */
-struct hwrm_vnic_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- u8 unused_0[2];
- __le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
- #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
- #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
- #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
- #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
- #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
- __le16 max_aggs_supported;
- u8 unused_1[5];
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
-struct hwrm_vnic_tpa_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
- __le32 enables;
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
- #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
- __le16 vnic_id;
- __le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
- __le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
- u8 unused_0[2];
- __le32 max_agg_timer;
- __le32 min_agg_len;
- __le32 tnl_tpa_en_bitmap;
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
- u8 unused_1[4];
-};
-
-/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
-struct hwrm_vnic_tpa_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_tpa_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vnic_id;
- u8 unused_0[6];
-};
-
-/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
-struct hwrm_vnic_tpa_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
- __le16 max_agg_segs;
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
- __le16 max_aggs;
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
- __le32 max_agg_timer;
- __le32 min_agg_len;
- __le32 tnl_tpa_en_bitmap;
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
-struct hwrm_vnic_rss_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
- __le16 vnic_id;
- u8 ring_table_pair_index;
- u8 hash_mode_flags;
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
- __le64 ring_grp_tbl_addr;
- __le64 hash_key_tbl_addr;
- __le16 rss_ctx_idx;
- u8 flags;
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
- #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
- u8 ring_select_mode;
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
- u8 unused_1[4];
-};
-
-/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
-struct hwrm_vnic_rss_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
-struct hwrm_vnic_rss_cfg_cmd_err {
- u8 code;
- #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
- u8 unused_0[7];
-};
-
-/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_rss_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_ctx_idx;
- __le16 vnic_id;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
-struct hwrm_vnic_rss_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 hash_type;
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
- u8 unused_0[4];
- __le32 hash_key[10];
- u8 hash_mode_flags;
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
- u8 ring_select_mode;
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
- u8 unused_1[5];
- u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
-struct hwrm_vnic_plcmodes_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
- __le32 enables;
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
- __le32 vnic_id;
- __le16 jumbo_thresh;
- __le16 hds_offset;
- __le16 hds_threshold;
- __le16 max_bds;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
-struct hwrm_vnic_plcmodes_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0[6];
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_alloc_input (size:704b/88B) */
-struct hwrm_ring_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
- #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
- u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
- #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
- #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 cmpl_coal_cnt;
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
- __le16 flags;
- #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
- #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
- #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
- #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
- __le64 page_tbl_addr;
- __le32 fbo;
- u8 page_size;
- u8 page_tbl_depth;
- __le16 schq_id;
- __le32 length;
- __le16 logical_id;
- __le16 cmpl_ring_id;
- __le16 queue_id;
- __le16 rx_buf_size;
- __le16 rx_ring_id;
- __le16 nq_ring_id;
- __le16 ring_arb_cfg;
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- __le16 steering_tag;
- __le32 reserved3;
- __le32 stat_ctx_id;
- __le32 reserved4;
- __le32 max_bw;
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
- #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
- u8 mpc_chnls_type;
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
- u8 unused_4[2];
- __le64 cq_handle;
-};
-
-/* hwrm_ring_alloc_output (size:128b/16B) */
-struct hwrm_ring_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ring_id;
- __le16 logical_ring_id;
- u8 push_buffer_index;
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
- u8 unused_0[2];
- u8 valid;
-};
-
-/* hwrm_ring_free_input (size:256b/32B) */
-struct hwrm_ring_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
- #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
- #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
- u8 flags;
- #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
- #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
- __le16 ring_id;
- __le32 prod_idx;
- __le32 opaque;
- __le32 unused_1;
-};
-
-/* hwrm_ring_free_output (size:128b/16B) */
-struct hwrm_ring_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
- u8 unused_0;
- __le16 ring_id;
- u8 unused_1[4];
-};
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 push_buffer_index;
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
- u8 unused_0[3];
- u8 consumer_idx[3];
- u8 valid;
-};
-
-/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
-struct hwrm_ring_aggint_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
-struct hwrm_ring_aggint_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 cmpl_params;
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
- __le32 nq_params;
- #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
- __le16 num_cmpl_dma_aggr_min;
- __le16 num_cmpl_dma_aggr_max;
- __le16 num_cmpl_dma_aggr_during_int_min;
- __le16 num_cmpl_dma_aggr_during_int_max;
- __le16 cmpl_aggr_dma_tmr_min;
- __le16 cmpl_aggr_dma_tmr_max;
- __le16 cmpl_aggr_dma_tmr_during_int_min;
- __le16 cmpl_aggr_dma_tmr_during_int_max;
- __le16 int_lat_tmr_min_min;
- __le16 int_lat_tmr_min_max;
- __le16 int_lat_tmr_max_min;
- __le16 int_lat_tmr_max_max;
- __le16 num_cmpl_aggr_int_min;
- __le16 num_cmpl_aggr_int_max;
- __le16 timer_units;
- u8 unused_0[1];
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
-struct hwrm_ring_cmpl_ring_qaggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
- u8 unused_0[4];
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
-struct hwrm_ring_cmpl_ring_qaggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
-struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le16 enables;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
- u8 unused_0[4];
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
-struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_grp_alloc_input (size:192b/24B) */
-struct hwrm_ring_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 cr;
- __le16 rr;
- __le16 ar;
- __le16 sc;
-};
-
-/* hwrm_ring_grp_alloc_output (size:128b/16B) */
-struct hwrm_ring_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 ring_group_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_ring_grp_free_input (size:192b/24B) */
-struct hwrm_ring_grp_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 ring_group_id;
- u8 unused_0[4];
-};
-
-/* hwrm_ring_grp_free_output (size:128b/16B) */
-struct hwrm_ring_grp_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
-#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
-#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
-#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
-
-/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
-struct hwrm_cfa_l2_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
- __le32 enables;
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
- u8 l2_addr[6];
- u8 num_vlans;
- u8 t_num_vlans;
- u8 l2_addr_mask[6];
- __le16 l2_ovlan;
- __le16 l2_ovlan_mask;
- __le16 l2_ivlan;
- __le16 l2_ivlan_mask;
- u8 unused_1[2];
- u8 t_l2_addr[6];
- u8 unused_2[2];
- u8 t_l2_addr_mask[6];
- __le16 t_l2_ovlan;
- __le16 t_l2_ovlan_mask;
- __le16 t_l2_ivlan;
- __le16 t_l2_ivlan_mask;
- u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
- u8 unused_3;
- __le32 src_id;
- u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 unused_4;
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
- u8 unused_5;
- __le32 unused_6;
- __le64 l2_filter_id_hint;
-};
-
-/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_l2_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 l2_filter_id;
- __le32 flow_id;
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_l2_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 l2_filter_id;
-};
-
-/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_l2_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
-struct hwrm_cfa_l2_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
- __le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- __le64 l2_filter_id;
- __le32 dst_id;
- __le32 new_mirror_vnic_id;
-};
-
-/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_l2_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
-struct hwrm_cfa_l2_set_rx_mask_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
- __le64 mc_tbl_addr;
- __le32 num_mc_entries;
- u8 unused_0[4];
- __le64 vlan_tag_tbl_addr;
- __le32 num_vlan_tags;
- u8 unused_1[4];
-};
-
-/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
-struct hwrm_cfa_l2_set_rx_mask_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
-struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- u8 code;
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
-struct hwrm_cfa_tunnel_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- __le32 enables;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
- __le64 l2_filter_id;
- u8 l2_addr[6];
- __le16 l2_ivlan;
- __le32 l3_addr[4];
- __le32 t_l3_addr[4];
- u8 l3_addr_type;
- u8 t_l3_addr_type;
- u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 tunnel_flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
- __le32 vni;
- __le32 dst_vnic_id;
- __le32 mirror_vnic_id;
-};
-
-/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_tunnel_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tunnel_filter_id;
- __le32 flow_id;
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_tunnel_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 tunnel_filter_id;
-};
-
-/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_tunnel_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
-struct hwrm_vxlan_ipv4_hdr {
- u8 ver_hlen;
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
- u8 tos;
- __be16 ip_id;
- __be16 flags_frag_offset;
- u8 ttl;
- u8 protocol;
- __be32 src_ip_addr;
- __be32 dest_ip_addr;
-};
-
-/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
-struct hwrm_vxlan_ipv6_hdr {
- __be32 ver_tc_flow_label;
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
- __be16 payload_len;
- u8 next_hdr;
- u8 ttl;
- __be32 src_ip_addr[4];
- __be32 dest_ip_addr[4];
-};
-
-/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
-struct hwrm_cfa_encap_data_vxlan {
- u8 src_mac_addr[6];
- __le16 unused_0;
- u8 dst_mac_addr[6];
- u8 num_vlan_tags;
- u8 unused_1;
- __be16 ovlan_tpid;
- __be16 ovlan_tci;
- __be16 ivlan_tpid;
- __be16 ivlan_tci;
- __le32 l3[10];
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
- #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
- __be16 src_port;
- __be16 dst_port;
- __be32 vni;
- u8 hdr_rsvd0[3];
- u8 hdr_rsvd1;
- u8 hdr_flags;
- u8 unused[3];
-};
-
-/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
-struct hwrm_cfa_encap_record_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
- u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
- u8 unused_0[3];
- __le32 encap_data[20];
-};
-
-/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
-struct hwrm_cfa_encap_record_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 encap_record_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
-struct hwrm_cfa_encap_record_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_record_id;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
-struct hwrm_cfa_encap_record_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
-struct hwrm_cfa_ntuple_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
- __le32 enables;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
- __le64 l2_filter_id;
- u8 src_macaddr[6];
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
- u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
- __le16 dst_id;
- __le16 rfs_ring_tbl_idx;
- u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
- __be32 src_ipaddr[4];
- __be32 src_ipaddr_mask[4];
- __be32 dst_ipaddr[4];
- __be32 dst_ipaddr_mask[4];
- __be16 src_port;
- __be16 src_port_mask;
- __be16 dst_port;
- __be16 dst_port_mask;
- __le64 ntuple_filter_id_hint;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_ntuple_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ntuple_filter_id;
- __le32 flow_id;
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
-struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_ntuple_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 ntuple_filter_id;
-};
-
-/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_ntuple_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
-struct hwrm_cfa_ntuple_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- __le32 flags;
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
- __le64 ntuple_filter_id;
- __le32 new_dst_id;
- __le32 new_mirror_vnic_id;
- __le16 new_meter_instance_id;
- #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
- #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
- u8 unused_1[6];
-};
-
-/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_ntuple_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
-struct hwrm_cfa_decap_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
- __le32 enables;
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- __be32 tunnel_id;
- u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 unused_0;
- __le16 unused_1;
- u8 src_macaddr[6];
- u8 unused_2[2];
- u8 dst_macaddr[6];
- __be16 ovlan_vid;
- __be16 ivlan_vid;
- __be16 t_ovlan_vid;
- __be16 t_ivlan_vid;
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
- u8 ip_protocol;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
- __le16 unused_3;
- __le32 unused_4;
- __be32 src_ipaddr[4];
- __be32 dst_ipaddr[4];
- __be16 src_port;
- __be16 dst_port;
- __le16 dst_id;
- __le16 l2_ctxt_ref_id;
-};
-
-/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
-struct hwrm_cfa_decap_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 decap_filter_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_decap_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 decap_filter_id;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_decap_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
-struct hwrm_cfa_flow_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
- #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
- __le16 src_fid;
- __le32 tunnel_handle;
- __le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
- __le16 dst_fid;
- __be16 l2_rewrite_vlan_tpid;
- __be16 l2_rewrite_vlan_tci;
- __le16 act_meter_id;
- __le16 ref_flow_handle;
- __be16 ethertype;
- __be16 outer_vlan_tci;
- __be16 dmac[3];
- __be16 inner_vlan_tci;
- __be16 smac[3];
- u8 ip_dst_mask_len;
- u8 ip_src_mask_len;
- __be32 ip_dst[4];
- __be32 ip_src[4];
- __be16 l4_src_port;
- __be16 l4_src_port_mask;
- __be16 l4_dst_port;
- __be16 l4_dst_port_mask;
- __be32 nat_ip_address[4];
- __be16 l2_rewrite_dmac[3];
- __be16 nat_port;
- __be16 l2_rewrite_smac[3];
- u8 ip_proto;
- u8 tunnel_type;
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
-};
-
-/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
-struct hwrm_cfa_flow_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flow_handle;
- u8 unused_0[2];
- __le32 flow_id;
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
- __le64 ext_flow_handle;
- __le32 flow_counter_id;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
-struct hwrm_cfa_flow_alloc_cmd_err {
- u8 code;
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_flow_free_input (size:256b/32B) */
-struct hwrm_cfa_flow_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- __le16 unused_0;
- __le32 flow_counter_id;
- __le64 ext_flow_handle;
-};
-
-/* hwrm_cfa_flow_free_output (size:256b/32B) */
-struct hwrm_cfa_flow_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet;
- __le64 byte;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_info_input (size:256b/32B) */
-struct hwrm_cfa_flow_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
- u8 unused_0[6];
- __le64 ext_flow_handle;
-};
-
-/* hwrm_cfa_flow_info_output (size:5632b/704B) */
-struct hwrm_cfa_flow_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
- u8 profile;
- __le16 src_fid;
- __le16 dst_fid;
- __le16 l2_ctxt_id;
- __le64 em_info;
- __le64 tcam_info;
- __le64 vfp_tcam_info;
- __le16 ar_id;
- __le16 flow_handle;
- __le32 tunnel_handle;
- __le16 flow_timer;
- u8 unused_0[6];
- __le32 flow_key_data[130];
- __le32 flow_action_info[30];
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_stats_input (size:640b/80B) */
-struct hwrm_cfa_flow_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 num_flows;
- __le16 flow_handle_0;
- __le16 flow_handle_1;
- __le16 flow_handle_2;
- __le16 flow_handle_3;
- __le16 flow_handle_4;
- __le16 flow_handle_5;
- __le16 flow_handle_6;
- __le16 flow_handle_7;
- __le16 flow_handle_8;
- __le16 flow_handle_9;
- u8 unused_0[2];
- __le32 flow_id_0;
- __le32 flow_id_1;
- __le32 flow_id_2;
- __le32 flow_id_3;
- __le32 flow_id_4;
- __le32 flow_id_5;
- __le32 flow_id_6;
- __le32 flow_id_7;
- __le32 flow_id_8;
- __le32 flow_id_9;
-};
-
-/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
-struct hwrm_cfa_flow_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet_0;
- __le64 packet_1;
- __le64 packet_2;
- __le64 packet_3;
- __le64 packet_4;
- __le64 packet_5;
- __le64 packet_6;
- __le64 packet_7;
- __le64 packet_8;
- __le64 packet_9;
- __le64 byte_0;
- __le64 byte_1;
- __le64 byte_2;
- __le64 byte_3;
- __le64 byte_4;
- __le64 byte_5;
- __le64 byte_6;
- __le64 byte_7;
- __le64 byte_8;
- __le64 byte_9;
- __le16 flow_hits;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
-struct hwrm_cfa_vfr_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 reserved;
- u8 unused_0[4];
- char vfr_name[32];
-};
-
-/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
-struct hwrm_cfa_vfr_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rx_cfa_code;
- __le16 tx_cfa_action;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_free_input (size:448b/56B) */
-struct hwrm_cfa_vfr_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- char vfr_name[32];
- __le16 vf_id;
- __le16 reserved;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_vfr_free_output (size:128b/16B) */
-struct hwrm_cfa_vfr_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 unused_0;
-};
-
-/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
-struct hwrm_cfa_eem_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
- __le32 unused_0;
- __le32 supported;
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
- __le32 max_entries_supported;
- __le16 key_entry_size;
- __le16 record_entry_size;
- __le16 efc_entry_size;
- __le16 fid_entry_size;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
-struct hwrm_cfa_eem_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
- __le16 group_id;
- __le16 unused_0;
- __le32 num_entries;
- __le32 unused_1;
- __le16 key0_ctx_id;
- __le16 key1_ctx_id;
- __le16 record_ctx_id;
- __le16 efc_ctx_id;
- __le16 fid_ctx_id;
- __le16 unused_2;
- __le32 unused_3;
-};
-
-/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
-struct hwrm_cfa_eem_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
- __le32 unused_0;
-};
-
-/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
-struct hwrm_cfa_eem_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 num_entries;
- __le16 key0_ctx_id;
- __le16 key1_ctx_id;
- __le16 record_ctx_id;
- __le16 efc_ctx_id;
- __le16 fid_ctx_id;
- u8 unused_2[5];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_op_input (size:192b/24B) */
-struct hwrm_cfa_eem_op_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
- __le16 unused_0;
- __le16 op;
- #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
- #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
- #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
- #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
- #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
-};
-
-/* hwrm_cfa_eem_op_output (size:128b/16B) */
-struct hwrm_cfa_eem_op_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[4];
-};
-
-/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE
- u8 tunnel_next_proto;
- u8 unused_0[6];
-};
-
-/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- __be16 tunnel_dst_port_val;
- u8 upar_in_use;
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 status;
- #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
- #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE
- u8 tunnel_next_proto;
- __be16 tunnel_dst_port_val;
- u8 unused_0[4];
-};
-
-/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- u8 error_info;
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
- u8 upar_in_use;
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE
- u8 tunnel_next_proto;
- __le16 tunnel_dst_port_id;
- u8 unused_0[4];
-};
-
-/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 error_info;
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
- u8 unused_1[6];
- u8 valid;
-};
-
-/* ctx_hw_stats (size:1280b/160B) */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
-
-/* ctx_hw_stats_ext (size:1408b/176B) */
-struct ctx_hw_stats_ext {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
-};
-
-/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
-struct hwrm_stat_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 stats_dma_addr;
- __le32 update_period_ms;
- u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0;
- __le16 stats_dma_length;
- __le16 flags;
- #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
- __le16 steering_tag;
- __le32 unused_1;
-};
-
-/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
-struct hwrm_stat_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_free_input (size:192b/24B) */
-struct hwrm_stat_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 unused_0[4];
-};
-
-/* hwrm_stat_ctx_free_output (size:128b/16B) */
-struct hwrm_stat_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 flags;
- #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
-};
-
-/* hwrm_stat_ctx_query_output (size:1408b/176B) */
-struct hwrm_stat_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_error_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ext_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 flags;
- #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
-};
-
-/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
-struct hwrm_stat_ext_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
-struct hwrm_stat_ctx_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 unused_0[4];
-};
-
-/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
-struct hwrm_stat_ctx_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_pcie_qstats_input (size:256b/32B) */
-struct hwrm_pcie_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 pcie_stat_size;
- u8 unused_0[6];
- __le64 pcie_stat_host_addr;
-};
-
-/* hwrm_pcie_qstats_output (size:128b/16B) */
-struct hwrm_pcie_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 pcie_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* pcie_ctx_hw_stats (size:768b/96B) */
-struct pcie_ctx_hw_stats {
- __le64 pcie_pl_signal_integrity;
- __le64 pcie_dl_signal_integrity;
- __le64 pcie_tl_signal_integrity;
- __le64 pcie_link_integrity;
- __le64 pcie_tx_traffic_rate;
- __le64 pcie_rx_traffic_rate;
- __le64 pcie_tx_dllp_statistics;
- __le64 pcie_rx_dllp_statistics;
- __le64 pcie_equalization_time;
- __le32 pcie_ltssm_histogram[4];
- __le64 pcie_recovery_histogram;
-};
-
-/* hwrm_stat_generic_qstats_input (size:256b/32B) */
-struct hwrm_stat_generic_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 generic_stat_size;
- u8 flags;
- #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[5];
- __le64 generic_stat_host_addr;
-};
-
-/* hwrm_stat_generic_qstats_output (size:128b/16B) */
-struct hwrm_stat_generic_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 generic_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* generic_sw_hw_stats (size:1408b/176B) */
-struct generic_sw_hw_stats {
- __le64 pcie_statistics_tx_tlp;
- __le64 pcie_statistics_rx_tlp;
- __le64 pcie_credit_fc_hdr_posted;
- __le64 pcie_credit_fc_hdr_nonposted;
- __le64 pcie_credit_fc_hdr_cmpl;
- __le64 pcie_credit_fc_data_posted;
- __le64 pcie_credit_fc_data_nonposted;
- __le64 pcie_credit_fc_data_cmpl;
- __le64 pcie_credit_fc_tgt_nonposted;
- __le64 pcie_credit_fc_tgt_data_posted;
- __le64 pcie_credit_fc_tgt_hdr_posted;
- __le64 pcie_credit_fc_cmpl_hdr_posted;
- __le64 pcie_credit_fc_cmpl_data_posted;
- __le64 pcie_cmpl_longest;
- __le64 pcie_cmpl_shortest;
- __le64 cache_miss_count_cfcq;
- __le64 cache_miss_count_cfcs;
- __le64 cache_miss_count_cfcc;
- __le64 cache_miss_count_cfcm;
- __le64 hw_db_recov_dbs_dropped;
- __le64 hw_db_recov_drops_serviced;
- __le64 hw_db_recov_dbs_recovered;
-};
-
-/* hwrm_fw_reset_input (size:192b/24B) */
-struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
- #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
- u8 host_idx;
- u8 flags;
- #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
- #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
- u8 unused_0[4];
-};
-
-/* hwrm_fw_reset_output (size:128b/16B) */
-struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
- #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_fw_qstatus_input (size:192b/24B) */
-struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
- u8 unused_0[7];
-};
-
-/* hwrm_fw_qstatus_output (size:128b/16B) */
-struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
- u8 nvm_option_action_status;
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_fw_set_time_input (size:256b/32B) */
-struct hwrm_fw_set_time_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 year;
- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
- #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 unused_0;
- __le16 millisecond;
- __le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
- #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
- u8 unused_1[4];
-};
-
-/* hwrm_fw_set_time_output (size:128b/16B) */
-struct hwrm_fw_set_time_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_struct_hdr (size:128b/16B) */
-struct hwrm_struct_hdr {
- __le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
- __le16 len;
- u8 version;
- u8 count;
- __le16 subtype;
- __le16 next_offset;
- #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
- u8 unused_0[6];
-};
-
-/* hwrm_struct_data_dcbx_app (size:64b/8B) */
-struct hwrm_struct_data_dcbx_app {
- __be16 protocol_id;
- u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
- u8 priority;
- u8 valid;
- u8 unused_0[3];
-};
-
-/* hwrm_fw_set_structured_data_input (size:256b/32B) */
-struct hwrm_fw_set_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- u8 hdr_cnt;
- u8 unused_0[5];
-};
-
-/* hwrm_fw_set_structured_data_output (size:128b/16B) */
-struct hwrm_fw_set_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
-struct hwrm_fw_set_structured_data_cmd_err {
- u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
- u8 unused_0[7];
-};
-
-/* hwrm_fw_get_structured_data_input (size:256b/32B) */
-struct hwrm_fw_get_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 structure_id;
- __le16 subtype;
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
- u8 count;
- u8 unused_0;
-};
-
-/* hwrm_fw_get_structured_data_output (size:128b/16B) */
-struct hwrm_fw_get_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hdr_cnt;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
-struct hwrm_fw_get_structured_data_cmd_err {
- u8 code;
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
- u8 unused_0[7];
-};
-
-/* hwrm_fw_livepatch_query_input (size:192b/24B) */
-struct hwrm_fw_livepatch_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 fw_target;
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
- u8 unused_0[7];
-};
-
-/* hwrm_fw_livepatch_query_output (size:640b/80B) */
-struct hwrm_fw_livepatch_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- char install_ver[32];
- char active_ver[32];
- __le16 status_flags;
- #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
- #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_fw_livepatch_input (size:256b/32B) */
-struct hwrm_fw_livepatch_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 opcode;
- #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
- #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
- #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
- u8 fw_target;
- #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
- #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
- #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
- u8 loadtype;
- #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
- #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
- #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
- u8 flags;
- __le32 patch_len;
- __le64 host_addr;
-};
-
-/* hwrm_fw_livepatch_output (size:128b/16B) */
-struct hwrm_fw_livepatch_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
-struct hwrm_fw_livepatch_cmd_err {
- u8 code;
- #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
- u8 unused_0[7];
-};
-
-/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
-struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- u8 unused_0[6];
-};
-
-/* hwrm_exec_fwd_resp_output (size:128b/16B) */
-struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
-struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- u8 unused_0[6];
-};
-
-/* hwrm_reject_fwd_resp_output (size:128b/16B) */
-struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fwd_resp_input (size:1024b/128B) */
-struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* hwrm_fwd_resp_output (size:128b/16B) */
-struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
-struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0[6];
- __le32 encap_async_event_cmpl[4];
-};
-
-/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
-struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query_input (size:128b/16B) */
-struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_temp_monitor_query_output (size:192b/24B) */
-struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 phy_temp;
- u8 om_temp;
- u8 flags;
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
- u8 temp2;
- u8 phy_temp2;
- u8 om_temp2;
- u8 warn_threshold;
- u8 critical_threshold;
- u8 fatal_threshold;
- u8 shutdown_threshold;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_wol_filter_alloc_input (size:512b/64B) */
-struct hwrm_wol_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
- __le16 port_id;
- u8 wol_type;
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
- u8 unused_0[5];
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_buf_size;
- __le16 pattern_mask_size;
- u8 unused_1[4];
- __le64 pattern_buf_addr;
- __le64 pattern_mask_addr;
-};
-
-/* hwrm_wol_filter_alloc_output (size:128b/16B) */
-struct hwrm_wol_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_wol_filter_free_input (size:256b/32B) */
-struct hwrm_wol_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
- __le32 enables;
- #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
- __le16 port_id;
- u8 wol_filter_id;
- u8 unused_0[5];
-};
-
-/* hwrm_wol_filter_free_output (size:128b/16B) */
-struct hwrm_wol_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
-struct hwrm_wol_filter_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 handle;
- u8 unused_0[4];
- __le64 pattern_buf_addr;
- __le16 pattern_buf_size;
- u8 unused_1[6];
- __le64 pattern_mask_addr;
- __le16 pattern_mask_size;
- u8 unused_2[6];
-};
-
-/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
-struct hwrm_wol_filter_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 next_handle;
- u8 wol_filter_id;
- u8 wol_type;
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
- __le32 unused_0;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_size;
- __le16 pattern_mask_size;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
-struct hwrm_wol_reason_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
- __le64 wol_pkt_buf_addr;
- __le16 wol_pkt_buf_size;
- u8 unused_1[6];
-};
-
-/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
-struct hwrm_wol_reason_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 wol_reason;
- #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
- u8 wol_pkt_len;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_dbg_read_direct_input (size:256b/32B) */
-struct hwrm_dbg_read_direct_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 read_addr;
- __le32 read_len32;
-};
-
-/* hwrm_dbg_read_direct_output (size:128b/16B) */
-struct hwrm_dbg_read_direct_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 crc32;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_dbg_qcaps_input (size:192b/24B) */
-struct hwrm_dbg_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_dbg_qcaps_output (size:192b/24B) */
-struct hwrm_dbg_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[2];
- __le32 coredump_component_disable_caps;
- #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
- __le32 flags;
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
- #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_dbg_qcfg_input (size:192b/24B) */
-struct hwrm_dbg_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 flags;
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
- __le32 coredump_component_disable_flags;
- #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
-};
-
-/* hwrm_dbg_qcfg_output (size:256b/32B) */
-struct hwrm_dbg_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[2];
- __le32 coredump_size;
- __le32 flags;
- #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
- #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
- #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
- #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
- #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
- #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
- __le16 async_cmpl_ring;
- u8 unused_2[2];
- __le32 crashdump_size;
- u8 unused_3[3];
- u8 valid;
-};
-
-/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
-struct hwrm_dbg_crashdump_medium_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 output_dest_flags;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
- __le16 pg_size_lvl;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
- __le32 size;
- __le32 coredump_component_disable_flags;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
- __le32 unused_0;
- __le64 pbl;
-};
-
-/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
-struct hwrm_dbg_crashdump_medium_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* coredump_segment_record (size:128b/16B) */
-struct coredump_segment_record {
- __le16 component_id;
- __le16 segment_id;
- __le16 max_instances;
- u8 version_hi;
- u8 version_low;
- u8 seg_flags;
- u8 compress_flags;
- #define SFLAG_COMPRESSED_ZLIB 0x1UL
- u8 unused_0[2];
- __le32 segment_len;
-};
-
-/* hwrm_dbg_coredump_list_input (size:256b/32B) */
-struct hwrm_dbg_coredump_list_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
- __le16 seq_no;
- u8 flags;
- #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
- u8 unused_0[1];
-};
-
-/* hwrm_dbg_coredump_list_output (size:128b/16B) */
-struct hwrm_dbg_coredump_list_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
- u8 unused_0;
- __le16 total_segments;
- __le16 data_len;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
-struct hwrm_dbg_coredump_initiate_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 component_id;
- __le16 segment_id;
- __le16 instance;
- __le16 unused_0;
- u8 seg_flags;
- u8 unused_1[7];
-};
-
-/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
-struct hwrm_dbg_coredump_initiate_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* coredump_data_hdr (size:128b/16B) */
-struct coredump_data_hdr {
- __le32 address;
- __le32 flags_length;
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
- __le32 instance;
- __le32 next_offset;
-};
-
-/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
-struct hwrm_dbg_coredump_retrieve_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
- __le32 unused_0;
- __le16 component_id;
- __le16 segment_id;
- __le16 instance;
- __le16 unused_1;
- u8 seg_flags;
- u8 unused_2;
- __le16 unused_3;
- __le32 unused_4;
- __le32 seq_no;
- __le32 unused_5;
-};
-
-/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
-struct hwrm_dbg_coredump_retrieve_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
- u8 unused_0;
- __le16 data_len;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
-struct hwrm_dbg_ring_info_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
- u8 unused_0[3];
- __le32 fw_ring_id;
-};
-
-/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
-struct hwrm_dbg_ring_info_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 producer_index;
- __le32 consumer_index;
- __le32 cag_vector_ctrl;
- __le16 st_tag;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_nvm_read_input (size:320b/40B) */
-struct hwrm_nvm_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
- u8 unused_0[2];
- __le32 offset;
- __le32 len;
- u8 unused_1[4];
-};
-
-/* hwrm_nvm_read_output (size:128b/16B) */
-struct hwrm_nvm_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
-struct hwrm_nvm_get_dir_entries_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
-};
-
-/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
-struct hwrm_nvm_get_dir_entries_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
-struct hwrm_nvm_get_dir_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
-struct hwrm_nvm_get_dir_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 entries;
- __le32 entry_length;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_write_input (size:448b/56B) */
-struct hwrm_nvm_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 dir_data_length;
- __le16 option;
- __le16 flags;
- #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
- #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
- #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
- __le32 dir_item_length;
- __le32 offset;
- __le32 len;
- __le32 unused_0;
-};
-
-/* hwrm_nvm_write_output (size:128b/16B) */
-struct hwrm_nvm_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le16 dir_idx;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_nvm_write_cmd_err (size:64b/8B) */
-struct hwrm_nvm_write_cmd_err {
- u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_modify_input (size:320b/40B) */
-struct hwrm_nvm_modify_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_idx;
- __le16 flags;
- #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
- #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
- __le32 offset;
- __le32 len;
- u8 unused_1[4];
-};
-
-/* hwrm_nvm_modify_output (size:128b/16B) */
-struct hwrm_nvm_modify_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
- __le16 dir_idx;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 opt_ordinal;
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
- u8 unused_0[3];
-};
-
-/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le32 dir_data_length;
- __le32 fw_ver;
- __le16 dir_ordinal;
- __le16 dir_idx;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
-struct hwrm_nvm_erase_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_idx;
- u8 unused_0[6];
-};
-
-/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_erase_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
-struct hwrm_nvm_get_dev_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
-struct hwrm_nvm_get_dev_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 manufacturer_id;
- __le16 device_id;
- __le32 sector_size;
- __le32 nvram_size;
- __le32 reserved_size;
- __le32 available_size;
- u8 nvm_cfg_ver_maj;
- u8 nvm_cfg_ver_min;
- u8 nvm_cfg_ver_upd;
- u8 flags;
- #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
- char pkg_name[16];
- __le16 hwrm_fw_major;
- __le16 hwrm_fw_minor;
- __le16 hwrm_fw_build;
- __le16 hwrm_fw_patch;
- __le16 mgmt_fw_major;
- __le16 mgmt_fw_minor;
- __le16 mgmt_fw_build;
- __le16 mgmt_fw_patch;
- __le16 roce_fw_major;
- __le16 roce_fw_minor;
- __le16 roce_fw_build;
- __le16 roce_fw_patch;
- __le16 netctrl_fw_major;
- __le16 netctrl_fw_minor;
- __le16 netctrl_fw_build;
- __le16 netctrl_fw_patch;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_mod_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
- __le16 dir_idx;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 checksum;
-};
-
-/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_mod_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_verify_update_input (size:192b/24B) */
-struct hwrm_nvm_verify_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 unused_0[2];
-};
-
-/* hwrm_nvm_verify_update_output (size:128b/16B) */
-struct hwrm_nvm_verify_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_install_update_input (size:192b/24B) */
-struct hwrm_nvm_install_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 install_type;
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
- __le16 flags;
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
- u8 unused_0[2];
-};
-
-/* hwrm_nvm_install_update_output (size:192b/24B) */
-struct hwrm_nvm_install_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 installed_items;
- u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
- u8 problem_item;
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
- u8 reset_required;
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
-struct hwrm_nvm_install_update_cmd_err {
- u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_get_variable_input (size:320b/40B) */
-struct hwrm_nvm_get_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
- u8 unused_0;
-};
-
-/* hwrm_nvm_get_variable_output (size:128b/16B) */
-struct hwrm_nvm_get_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_get_variable_cmd_err {
- u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_set_variable_input (size:320b/40B) */
-struct hwrm_nvm_set_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
- #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
- #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
- u8 unused_0;
-};
-
-/* hwrm_nvm_set_variable_output (size:128b/16B) */
-struct hwrm_nvm_set_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_set_variable_cmd_err {
- u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
- u8 unused_0[7];
-};
-
-/* hwrm_selftest_qlist_input (size:128b/16B) */
-struct hwrm_selftest_qlist_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_selftest_qlist_output (size:2240b/280B) */
-struct hwrm_selftest_qlist_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_tests;
- u8 available_tests;
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 offline_tests;
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- __le16 test_timeout;
- u8 unused_1[2];
- char test_name[8][32];
- u8 eyescope_target_BER_support;
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
- u8 unused_2[6];
- u8 valid;
-};
-
-/* hwrm_selftest_exec_input (size:192b/24B) */
-struct hwrm_selftest_exec_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[7];
-};
-
-/* hwrm_selftest_exec_output (size:128b/16B) */
-struct hwrm_selftest_exec_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 requested_tests;
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 test_success;
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_selftest_irq_input (size:128b/16B) */
-struct hwrm_selftest_irq_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_selftest_irq_output (size:128b/16B) */
-struct hwrm_selftest_irq_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* dbc_dbc (size:64b/8B) */
-struct dbc_dbc {
- u32 index;
- #define DBC_DBC_INDEX_MASK 0xffffffUL
- #define DBC_DBC_INDEX_SFT 0
- #define DBC_DBC_EPOCH 0x1000000UL
- #define DBC_DBC_TOGGLE_MASK 0x6000000UL
- #define DBC_DBC_TOGGLE_SFT 25
- u32 type_path_xid;
- #define DBC_DBC_XID_MASK 0xfffffUL
- #define DBC_DBC_XID_SFT 0
- #define DBC_DBC_PATH_MASK 0x3000000UL
- #define DBC_DBC_PATH_SFT 24
- #define DBC_DBC_PATH_ROCE (0x0UL << 24)
- #define DBC_DBC_PATH_L2 (0x1UL << 24)
- #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
- #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
- #define DBC_DBC_VALID 0x4000000UL
- #define DBC_DBC_DEBUG_TRACE 0x8000000UL
- #define DBC_DBC_TYPE_MASK 0xf0000000UL
- #define DBC_DBC_TYPE_SFT 28
- #define DBC_DBC_TYPE_SQ (0x0UL << 28)
- #define DBC_DBC_TYPE_RQ (0x1UL << 28)
- #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
- #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
- #define DBC_DBC_TYPE_CQ (0x4UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
- #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
- #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
- #define DBC_DBC_TYPE_NQ (0xaUL << 28)
- #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
- #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
- #define DBC_DBC_TYPE_NULL (0xfUL << 28)
- #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
-};
-
-/* db_push_start (size:64b/8B) */
-struct db_push_start {
- u64 db;
- #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
- #define DB_PUSH_START_DB_INDEX_SFT 0
- #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
- #define DB_PUSH_START_DB_PI_LO_SFT 24
- #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
- #define DB_PUSH_START_DB_XID_SFT 32
- #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
- #define DB_PUSH_START_DB_PI_HI_SFT 52
- #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
- #define DB_PUSH_START_DB_TYPE_SFT 60
- #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
- #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
- #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
-};
-
-/* db_push_end (size:64b/8B) */
-struct db_push_end {
- u64 db;
- #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
- #define DB_PUSH_END_DB_INDEX_SFT 0
- #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
- #define DB_PUSH_END_DB_PI_LO_SFT 24
- #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
- #define DB_PUSH_END_DB_XID_SFT 32
- #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
- #define DB_PUSH_END_DB_PI_HI_SFT 52
- #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
- #define DB_PUSH_END_DB_PATH_SFT 56
- #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
- #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
- #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
- #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
- #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
- #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
- #define DB_PUSH_END_DB_TYPE_SFT 60
- #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
- #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
- #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
-};
-
-/* db_push_info (size:64b/8B) */
-struct db_push_info {
- u32 push_size_push_index;
- #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
- #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
- #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
- #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
- u32 reserved32;
-};
-
-/* fw_status_reg (size:32b/4B) */
-struct fw_status_reg {
- u32 fw_status;
- #define FW_STATUS_REG_CODE_MASK 0xffffUL
- #define FW_STATUS_REG_CODE_SFT 0
- #define FW_STATUS_REG_CODE_READY 0x8000UL
- #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
- #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
- #define FW_STATUS_REG_RECOVERABLE 0x20000UL
- #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
- #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
- #define FW_STATUS_REG_SHUTDOWN 0x100000UL
- #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
- #define FW_STATUS_REG_RECOVERING 0x400000UL
- #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
-};
-
-/* hcomm_status (size:64b/8B) */
-struct hcomm_status {
- u32 sig_ver;
- #define HCOMM_STATUS_VER_MASK 0xffUL
- #define HCOMM_STATUS_VER_SFT 0
- #define HCOMM_STATUS_VER_LATEST 0x1UL
- #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
- #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
- #define HCOMM_STATUS_SIGNATURE_SFT 8
- #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
- #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
- u32 fw_status_loc;
- #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
- #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
- #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
- #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
-};
-#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
-
-#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
index 669d24ba0e87..de3427c6c6aa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
@@ -12,8 +12,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_hwmon.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 1df3d56cc4b5..5ce190f50120 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -20,8 +20,8 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- req_type, token->seq_id, rc);
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 15ca51b5d204..791b3a0cdb83 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -10,7 +10,7 @@
#ifndef BNXT_HWRM_H
#define BNXT_HWRM_H
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
enum bnxt_hwrm_ctx_flags {
/* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */
@@ -58,7 +58,7 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000U
+#define HWRM_CMD_MAX_TIMEOUT 60000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index cc07660330f5..a8a74f07bb54 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -15,7 +15,7 @@
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
@@ -62,19 +62,20 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_cfg_settime(ptp->bp, ns);
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ns);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
/* Caller holds ptp_lock */
-static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
- u64 *ns)
+static int __bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 high_before, high_now, low;
@@ -97,19 +98,54 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
return 0;
}
+static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+ int rc;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+ rc = __bnxt_refclk_read(bp, sts, ns);
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return rc;
+}
+
+static int bnxt_refclk_read_low(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u32 *low)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return -EIO;
+ }
+
+ ptp_read_system_prets(sts);
+ *low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return 0;
+}
+
static void bnxt_ptp_get_current_time(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
return;
- spin_lock_bh(&ptp->ptp_lock);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
}
-static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
+static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
+ u32 txts_tmo, int slot)
{
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
@@ -122,10 +158,16 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+ struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot];
+ u32 tmo_us = txts_tmo * 1000;
+
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
- req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
- req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
- req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+ req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid);
+ req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off);
+ if (!tmo_us)
+ tmo_us = BNXT_PTP_QTS_TIMEOUT;
+ tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US);
+ req->ts_req_timeout = cpu_to_le16(tmo_us);
}
resp = hwrm_req_hold(bp, req);
@@ -143,28 +185,26 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns, cycles;
+ u32 low;
int rc;
- spin_lock_bh(&ptp->ptp_lock);
- rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
- if (rc) {
- spin_unlock_bh(&ptp->ptp_lock);
+ rc = bnxt_refclk_read_low(ptp->bp, sts, &low);
+ if (rc)
return rc;
- }
- ns = timecounter_cyc2time(&ptp->tc, cycles);
- spin_unlock_bh(&ptp->ptp_lock);
+
+ cycles = bnxt_extend_cycles_32b_to_48b(ptp, low);
+ ns = bnxt_timecounter_cyc2time(ptp, cycles);
*ts = ns_to_timespec64(ns);
return 0;
}
-/* Caller holds ptp_lock */
void bnxt_ptp_update_current_time(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
}
static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
@@ -183,9 +223,7 @@ static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
if (rc) {
netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
} else {
- spin_lock_bh(&ptp->ptp_lock);
bnxt_ptp_update_current_time(ptp->bp);
- spin_unlock_bh(&ptp->ptp_lock);
}
return rc;
@@ -195,13 +233,14 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ unsigned long flags;
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_adjphc(ptp, delta);
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_adjtime(&ptp->tc, delta);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -229,14 +268,15 @@ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct bnxt *bp = ptp->bp;
+ unsigned long flags;
if (!BNXT_MH(bp))
return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -247,9 +287,7 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
u64 ns, pps_ts;
pps_ts = EVENT_PPS_TS(data2, data1);
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, pps_ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, pps_ts);
switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
@@ -388,14 +426,11 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
u64 nsec_now, nsec_delta;
int rc;
- spin_lock_bh(&ptp->ptp_lock);
rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
- if (rc) {
- spin_unlock_bh(&ptp->ptp_lock);
+ if (rc)
return rc;
- }
- nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
- spin_unlock_bh(&ptp->ptp_lock);
+
+ nsec_now = bnxt_timecounter_cyc2time(ptp, cycles_now);
nsec_delta = target_ns - nsec_now;
*cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
@@ -525,10 +560,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
return bnxt_ptp_cfg_tstamp_filters(bp);
}
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
u16 old_rxctl;
int old_rx_filter, rc;
@@ -538,17 +574,14 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (!ptp)
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
old_rx_filter = ptp->rx_filter;
old_rxctl = ptp->rxctl;
old_tx_tstamp_en = ptp->tx_tstamp_en;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp->rxctl = 0;
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
@@ -581,7 +614,7 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
ptp->tx_tstamp_en = 1;
else
ptp->tx_tstamp_en = 0;
@@ -590,9 +623,8 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (rc)
goto ts_set_err;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
ts_set_err:
ptp->rx_filter = old_rx_filter;
@@ -601,22 +633,22 @@ ts_set_err:
return rc;
}
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
if (!ptp)
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ stmpconf->flags = 0;
+ stmpconf->tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON
+ : HWTSTAMP_TX_OFF;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
}
static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
@@ -650,6 +682,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
(ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
return 0;
}
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ for (i = 0; i < 2; i++) {
+ if (reg_arr[i] & BNXT_GRC_BASE_MASK)
+ return -EINVAL;
+ ptp->refclk_mapped_regs[i] = reg_arr[i];
+ }
+ return 0;
+ }
return -ENODEV;
}
@@ -659,38 +699,51 @@ static void bnxt_unmap_ptp_regs(struct bnxt *bp)
(BNXT_PTP_GRC_WIN - 1) * 4);
}
-static u64 bnxt_cc_read(const struct cyclecounter *cc)
+static u64 bnxt_cc_read(struct cyclecounter *cc)
{
struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
u64 ns = 0;
- bnxt_refclk_read(ptp->bp, NULL, &ns);
+ __bnxt_refclk_read(ptp->bp, NULL, &ns);
return ns;
}
-static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
+static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct skb_shared_hwtstamps timestamp;
+ struct bnxt_ptp_tx_req *txts_req;
+ unsigned long now = jiffies;
u64 ts = 0, ns = 0;
+ u32 tmo = 0;
int rc;
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
+ txts_req = &ptp->txts_req[slot];
+ /* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */
+ smp_rmb();
+ if (!time_after_eq(now, txts_req->abs_txts_tmo))
+ tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts,
+ tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
timestamp.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(ptp->tx_skb, &timestamp);
+ skb_tstamp_tx(txts_req->tx_skb, &timestamp);
+ ptp->stats.ts_pkts++;
} else {
+ if (!time_after_eq(jiffies, txts_req->abs_txts_tmo))
+ return -EAGAIN;
+
+ ptp->stats.ts_lost++;
netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
- atomic_inc(&ptp->tx_avail);
+ dev_kfree_skb_any(txts_req->tx_skb);
+ txts_req->tx_skb = NULL;
+
+ return 0;
}
static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
@@ -699,53 +752,136 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
ptp_info);
unsigned long now = jiffies;
struct bnxt *bp = ptp->bp;
+ u16 cons = ptp->txts_cons;
+ unsigned long flags;
+ u32 num_requests;
+ int rc = 0;
+
+ num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail);
+ while (num_requests--) {
+ if (IS_ERR(ptp->txts_req[cons].tx_skb))
+ goto next_slot;
+ if (!ptp->txts_req[cons].tx_skb)
+ break;
+ rc = bnxt_stamp_tx_skb(bp, cons);
+ if (rc == -EAGAIN)
+ break;
+next_slot:
+ BNXT_PTP_INC_TX_AVAIL(ptp);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
- if (ptp->tx_skb)
- bnxt_stamp_tx_skb(bp, ptp->tx_skb);
-
- if (!time_after_eq(now, ptp->next_period))
+ if (!time_after_eq(now, ptp->next_period)) {
+ if (rc == -EAGAIN)
+ return 0;
return ptp->next_period - now;
+ }
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
if (time_after_eq(now, ptp->next_overflow_check)) {
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
+ if (rc == -EAGAIN)
+ return 0;
return HZ;
}
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
{
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_ptp_tx_req *txts_req;
+ u16 cons = ptp->txts_cons;
+
+ /* make sure ptp aux worker finished with
+ * possible BNXT_STATE_OPEN set
+ */
+ ptp_cancel_worker_sync(ptp->ptp_clock);
+
+ ptp->tx_avail = BNXT_MAX_TX_TS;
+ while (cons != ptp->txts_prod) {
+ txts_req = &ptp->txts_req[cons];
+ if (!IS_ERR_OR_NULL(txts_req->tx_skb))
+ dev_kfree_skb_any(txts_req->tx_skb);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+}
- if (ptp->tx_skb) {
- netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
- return -EBUSY;
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
+{
+ spin_lock_bh(&ptp->ptp_tx_lock);
+ if (ptp->tx_avail) {
+ *prod = ptp->txts_prod;
+ ptp->txts_prod = NEXT_TXTS(*prod);
+ ptp->tx_avail--;
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ return 0;
}
- ptp->tx_skb = skb;
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ atomic64_inc(&ptp->stats.ts_err);
+ return -ENOSPC;
+}
+
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_ptp_tx_req *txts_req;
+
+ txts_req = &ptp->txts_req[prod];
+ txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo);
+ /* make sure abs_txts_tmo is written first */
+ smp_wmb();
+ txts_req->tx_skb = skb;
ptp_schedule_worker(ptp->ptp_clock, 0);
- return 0;
}
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- u64 time;
if (!ptp)
return -ENODEV;
- BNXT_READ_TIME64(ptp, time, ptp->old_time);
- *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts;
- if (pkt_ts < (time & BNXT_LO_TIMER_MASK))
- *ts += BNXT_LO_TIMER_MASK + 1;
+ *ts = bnxt_extend_cycles_32b_to_48b(ptp, pkt_ts);
return 0;
}
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp)
+{
+ struct skb_shared_hwtstamps timestamp = {};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 opaque = tscmp->tx_ts_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+ u64 ts, ns;
+ u16 cons;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ ts = BNXT_GET_TX_TS_48B_NS(tscmp);
+ cons = TX_OPAQUE_IDX(opaque);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
+ if (tx_buf->is_ts_pkt) {
+ if (BNXT_TX_TS_ERR(tscmp)) {
+ netdev_err(bp->dev,
+ "timestamp completion error 0x%x 0x%x\n",
+ le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
+ le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
+ } else {
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
+ timestamp.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(tx_buf->skb, &timestamp);
+ }
+ tx_buf->is_ts_pkt = 0;
+ }
+}
+
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -816,7 +952,6 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
snprintf(ptp_info->pin_config[i].name,
sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i);
ptp_info->pin_config[i].index = i;
- ptp_info->pin_config[i].chan = i;
if (*pin_usg == BNXT_PPS_PIN_PPS_IN)
ptp_info->pin_config[i].func = PTP_PF_EXTTS;
else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT)
@@ -833,6 +968,8 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
ptp_info->n_per_out = 1;
ptp_info->pps = 1;
ptp_info->verify = bnxt_ptp_verify;
+ ptp_info->supported_extts_flags = PTP_RISING_EDGE | PTP_STRICT_FLAGS;
+ ptp_info->supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
return 0;
}
@@ -847,6 +984,7 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
if (!ptp->ptp_clock) {
memset(&ptp->cc, 0, sizeof(ptp->cc));
@@ -863,8 +1001,11 @@ static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
}
ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
}
- if (init_tc)
+ if (init_tc) {
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
+ }
}
/* Caller holds ptp_lock */
@@ -878,6 +1019,7 @@ void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
{
struct timespec64 tsp;
+ unsigned long flags;
u64 ns;
int rc;
@@ -891,13 +1033,14 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
} else {
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME,
+ &ns, 0, 0);
if (rc)
return rc;
}
- spin_lock_bh(&bp->ptp_cfg->ptp_lock);
+ write_seqlock_irqsave(&bp->ptp_cfg->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
- spin_unlock_bh(&bp->ptp_cfg->ptp_lock);
+ write_sequnlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags);
return 0;
}
@@ -909,12 +1052,12 @@ static void bnxt_ptp_free(struct bnxt *bp)
if (ptp->ptp_clock) {
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
}
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
}
-int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
+int bnxt_ptp_init(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int rc;
@@ -931,12 +1074,13 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_free(bp);
- atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
- spin_lock_init(&ptp->ptp_lock);
+ WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS);
+ seqlock_init(&ptp->ptp_lock);
+ spin_lock_init(&ptp->ptp_tx_lock);
if (BNXT_PTP_USE_RTC(bp)) {
bnxt_ptp_timecounter_init(bp, false);
- rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ rc = bnxt_ptp_init_rtc(bp, ptp->rtc_configured);
if (rc)
goto out;
} else {
@@ -958,13 +1102,17 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
rc = err;
goto out;
}
- if (BNXT_CHIP_P5(bp)) {
- spin_lock_bh(&ptp->ptp_lock);
+
+ ptp->stats.ts_pkts = 0;
+ ptp->stats.ts_lost = 0;
+ atomic64_set(&ptp->stats.ts_err, 0);
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
+ ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
return 0;
out:
@@ -987,9 +1135,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- if (ptp->tx_skb) {
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
- }
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index fce8dc39a7d0..8cc2fae47644 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -21,8 +21,11 @@
#define BNXT_DEVCLK_FREQ 1000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+#define BNXT_HI_TIMER_SHIFT 24
+#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT 1000
+#define BNXT_PTP_QTS_MAX_TMO_US 65535U
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
@@ -77,6 +80,22 @@ struct bnxt_pps {
struct pps_pin pins[BNXT_MAX_TSIO_PINS];
};
+struct bnxt_ptp_stats {
+ u64 ts_pkts;
+ u64 ts_lost;
+ atomic64_t ts_err;
+};
+
+#define BNXT_MAX_TX_TS 4
+#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1))
+
+struct bnxt_ptp_tx_req {
+ struct sk_buff *tx_skb;
+ u16 tx_seqid;
+ u16 tx_hdr_off;
+ unsigned long abs_txts_tmo;
+};
+
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -84,21 +103,22 @@ struct bnxt_ptp_cfg {
struct timecounter tc;
struct bnxt_pps pps_info;
/* serialize timecounter access */
- spinlock_t ptp_lock;
- struct sk_buff *tx_skb;
+ seqlock_t ptp_lock;
+ /* serialize ts tx request queuing */
+ spinlock_t ptp_tx_lock;
u64 current_time;
- u64 old_time;
unsigned long next_period;
unsigned long next_overflow_check;
u32 cmult;
+ /* cache of upper 24 bits of cyclecoutner. 8 bits are used to check for roll-over */
+ u32 old_time;
/* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
#define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
- u16 tx_seqid;
- u16 tx_hdr_off;
+ struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS];
+
struct bnxt *bp;
- atomic_t tx_avail;
-#define BNXT_MAX_TX_TS 1
+ u32 tx_avail;
u16 rxctl;
#define BNXT_PTP_MSG_SYNC (1 << 0)
#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
@@ -115,36 +135,67 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
+ u8 rtc_configured:1;
int rx_filter;
u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
+ u32 txts_tmo;
+ u16 txts_prod;
+ u16 txts_cons;
+
+ struct bnxt_ptp_stats stats;
};
-#if BITS_PER_LONG == 32
-#define BNXT_READ_TIME64(ptp, dst, src) \
+#define BNXT_PTP_INC_TX_AVAIL(ptp) \
do { \
- spin_lock_bh(&(ptp)->ptp_lock); \
- (dst) = (src); \
- spin_unlock_bh(&(ptp)->ptp_lock); \
+ spin_lock_bh(&(ptp)->ptp_tx_lock); \
+ (ptp)->tx_avail++; \
+ spin_unlock_bh(&(ptp)->ptp_tx_lock); \
} while (0)
-#else
-#define BNXT_READ_TIME64(ptp, dst, src) \
- ((dst) = READ_ONCE(src))
-#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack);
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf);
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp);
void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
-int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
+int bnxt_ptp_init(struct bnxt *bp);
void bnxt_ptp_clear(struct bnxt *bp);
+static inline u64 bnxt_timecounter_cyc2time(struct bnxt_ptp_cfg *ptp, u64 ts)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ } while (read_seqretry(&ptp->ptp_lock, seq));
+
+ return ns;
+}
+
+static inline u64 bnxt_extend_cycles_32b_to_48b(struct bnxt_ptp_cfg *ptp, u32 ts)
+{
+ u64 time, cycles;
+
+ time = (u64)READ_ONCE(ptp->old_time) << BNXT_HI_TIMER_SHIFT;
+ cycles = (time & BNXT_HI_TIMER_MASK) | ts;
+ if (ts < (time & BNXT_LO_TIMER_MASK))
+ cycles += BNXT_LO_TIMER_MASK + 1;
+ return cycles;
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 175192ebaa77..be7deb9cc410 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -15,7 +15,8 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
-#include "bnxt_hsi.h"
+#include <net/dcbnl.h>
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -196,11 +197,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
- ivi->vlan = vf->vlan;
- if (vf->flags & BNXT_VF_QOS)
- ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
- else
- ivi->qos = 0;
+ ivi->vlan = vf->vlan & VLAN_VID_MASK;
+ ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
ivi->trusted = bnxt_is_trusted_vf(bp, vf);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
@@ -256,21 +254,21 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (vlan_proto != htons(ETH_P_8021Q) &&
+ (vlan_proto != htons(ETH_P_8021AD) ||
+ !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
return -EPROTONOSUPPORT;
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
- /* TODO: needed to implement proper handling of user priority,
- * currently fail the command if there is valid priority
- */
- if (vlan_id > 4095 || qos)
+ if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
+ (!vlan_id && qos))
return -EINVAL;
vf = &bp->pf.vf[vf_id];
- vlan_tag = vlan_id;
+ vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
if (vlan_tag == vf->vlan)
return 0;
@@ -279,6 +277,10 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
+ req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
+ req->tpid = vlan_proto;
+ }
rc = hwrm_req_send(bp, req);
if (!rc)
vf->vlan = vlan_tag;
@@ -330,6 +332,38 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return rc;
}
+static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id)
+{
+ struct hwrm_func_cfg_input *req;
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+ return 0;
+
+ vf = &bp->pf.vf[vf_id];
+
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(vf->fw_fid);
+ switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) {
+ case BNXT_VF_LINK_FORCED:
+ req->options =
+ FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN;
+ break;
+ case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP):
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP;
+ break;
+ default:
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ break;
+ }
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
{
struct bnxt *bp = netdev_priv(dev);
@@ -355,10 +389,11 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
break;
default:
netdev_err(bp->dev, "Invalid link option\n");
- rc = -EINVAL;
- break;
+ return -EINVAL;
}
- if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)
+ rc = bnxt_set_vf_link_admin_state(bp, vf_id);
+ else if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
@@ -518,6 +553,63 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
return hwrm_req_send(bp, req);
}
+static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_cfg_input *cfg_req;
+ struct hwrm_func_qcaps_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto err;
+
+ rc = hwrm_req_init(bp, cfg_req, HWRM_FUNC_CFG);
+ if (rc)
+ goto err;
+
+ /* In case of VF Dynamic resource allocation, driver will provision
+ * maximum resources to all the VFs. FW will dynamically allocate
+ * resources to VFs on the fly, so always divide the resources by 1.
+ */
+ if (BNXT_ROCE_VF_DYN_ALLOC_CAP(bp))
+ num_vfs = 1;
+
+ cfg_req->fid = cpu_to_le16(0xffff);
+ cfg_req->enables2 =
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF);
+ cfg_req->roce_max_av_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs);
+ cfg_req->roce_max_cq_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs);
+ cfg_req->roce_max_mrw_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs);
+ cfg_req->roce_max_qp_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs);
+ cfg_req->roce_max_srq_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs);
+ cfg_req->roce_max_gid_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs);
+
+ rc = hwrm_req_send(bp, cfg_req);
+
+err:
+ hwrm_req_drop(bp, req);
+ if (rc)
+ netdev_err(bp->dev, "RoCE sriov configuration failed\n");
+}
+
/* Only called by PF to reserve resources for VFs, returns actual number of
* VFs configured, or < 0 on error.
*/
@@ -607,15 +699,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
+ struct bnxt_vf_info *vf = &pf->vf[i];
+
+ vf->fw_fid = pf->first_vf_id + i;
+ rc = bnxt_set_vf_link_admin_state(bp, i);
+ if (rc)
+ break;
+
if (reset)
__bnxt_set_vf_params(bp, i);
- req->vf_id = cpu_to_le16(pf->first_vf_id + i);
+ req->vf_id = cpu_to_le16(vf->fw_fid);
rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
- pf->vf[i].fw_fid = pf->first_vf_id + i;
}
if (pf->active_vfs) {
@@ -682,7 +780,13 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) {
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ req->enables |=
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ }
+
+ mtu = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(mtu);
req->admin_mtu = cpu_to_le16(mtu);
@@ -757,6 +861,9 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
*num_vfs = rc;
}
+ if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp))
+ bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs);
+
return 0;
}
@@ -768,7 +875,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
- /* Check if we can enable requested num of vf's. At a mininum
+ /* Check if we can enable requested num of vf's. At a minimum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
* features like TPA will not be available.
*/
@@ -864,7 +971,7 @@ err_out1:
return rc;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
@@ -888,10 +995,20 @@ void bnxt_sriov_disable(struct bnxt *bp)
devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
+}
+
+static void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!pci_num_vf(bp->pdev))
+ return;
+
+ __bnxt_sriov_disable(bp);
/* Reclaim all resources for the PF. */
rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_restore_pf_fw_resources(bp);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
@@ -900,23 +1017,22 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
- if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
- netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
- return 0;
- }
-
rtnl_lock();
+ netdev_lock(dev);
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
bp->sriov_cfg = true;
+ netdev_unlock(dev);
rtnl_unlock();
if (pci_vfs_assigned(bp->pdev)) {
@@ -950,8 +1066,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input *req;
int rc;
- if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
+ netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
+ msg_size);
return -EINVAL;
+ }
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
@@ -1066,7 +1185,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
/* There are two cases:
* 1.If firmware spec < 0x10202,VF MAC address is not forwarded
* to the PF and so it doesn't have to match
- * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * 2.Allow VF to modify its own MAC when PF has not assigned a
* valid MAC address and firmware spec >= 0x10202
*/
mac_ok = true;
@@ -1085,7 +1204,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
@@ -1096,6 +1215,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ /* New SPEEDS2 fields are beyond the legacy structure, so
+ * clear the SPEEDS2_SUPPORTED flag.
+ */
+ phy_qcfg_resp.option_flags &=
+ ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
@@ -1257,7 +1381,7 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
return 0;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 9a4bacba477b..e4979d729312 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -38,7 +38,7 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
-void bnxt_sriov_disable(struct bnxt *);
+void __bnxt_sriov_disable(struct bnxt *bp);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 273c9ba48f09..2d66bf59cd64 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -19,8 +19,8 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/vxlan.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_sriov.h"
@@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
offset < offset_of_ip6_daddr + 16) {
actions->nat.src_xlate = false;
idx = (offset - offset_of_ip6_daddr) / 4;
- actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val);
} else {
netdev_err(bp->dev,
"%s: IPv6_hdr: Invalid pedit field\n",
@@ -370,6 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
struct bnxt_tc_flow *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
@@ -380,6 +381,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -1312,7 +1316,7 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel decap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds. Ignore src_port in the tunnel_key,
+ * tunnel header fields. Ignore src_port in the tunnel_key,
* since it is not required for decap filters.
*/
decap_key->tp_src = 0;
@@ -1406,7 +1410,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel encap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds
+ * tunnel header fields
*/
encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 195c02dc0683..927971c362f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -20,8 +20,9 @@
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
+#include <net/netdev_lock.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -31,21 +32,80 @@ static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
- int num_msix, idx, i;
+ int num_msix, i;
if (!edev->ulp_tbl->msix_requested) {
netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
return;
}
num_msix = edev->ulp_tbl->msix_requested;
- idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
- ent[i].vector = bp->irq_tbl[idx + i].vector;
- ent[i].ring_idx = idx + i;
+ ent[i].vector = bp->irq_tbl[i].vector;
+ ent[i].ring_idx = i;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ent[i].db_offset = bp->db_offset;
else
- ent[i].db_offset = (idx + i) * 0x80;
+ ent[i].db_offset = i * 0x80;
+ }
+}
+
+int bnxt_get_ulp_msix_num(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
+
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_msix_vec = num;
+}
+
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
+
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
+
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_ctxs = num_ulp_ctx;
+}
+
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
+
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev) {
+ bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
+ /* Reserve one additional stat_ctx for PF0 (except
+ * on 1-port NICs) as it also creates one stat_ctx
+ * for PF1 in case of RoCE bonding.
+ */
+ if (BNXT_PF(bp) && !bp->pf.port_id &&
+ bp->port_count > 1)
+ bp->edev->ulp_num_ctxs++;
+
+ /* Reserve one additional stat_ctx when the device is capable
+ * of supporting port mirroring on RDMA device.
+ */
+ if (BNXT_MIRROR_ON_ROCE_CAP(bp))
+ bp->edev->ulp_num_ctxs++;
}
}
@@ -57,25 +117,35 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt *bp = netdev_priv(dev);
unsigned int max_stat_ctxs;
struct bnxt_ulp *ulp;
+ int rc = 0;
+ netdev_lock(dev);
+ mutex_lock(&edev->en_dev_lock);
+ if (!bp->irq_tbl) {
+ rc = -ENODEV;
+ goto exit;
+ }
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
+ bp->cp_nr_rings == max_stat_ctxs) {
+ rc = -ENOMEM;
+ goto exit;
+ }
ulp = edev->ulp_tbl;
- if (!ulp)
- return -ENOMEM;
-
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
+ bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
- edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return 0;
+exit:
+ mutex_unlock(&edev->en_dev_lock);
+ netdev_unlock(dev);
+ return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -84,11 +154,11 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- int i = 0;
ulp = edev->ulp_tbl;
- if (ulp->msix_requested)
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
+ netdev_lock(dev);
+ mutex_lock(&edev->en_dev_lock);
+ edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
@@ -97,44 +167,25 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
+ mutex_unlock(&edev->en_dev_lock);
+ netdev_unlock(dev);
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
-int bnxt_get_ulp_msix_num(struct bnxt *bp)
-{
- u32 roce_msix = BNXT_VF(bp) ?
- BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
-
- return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
- min_t(u32, roce_msix, num_online_cpus()) : 0);
-}
-
-int bnxt_get_ulp_msix_base(struct bnxt *bp)
-{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
-
- if (edev->ulp_tbl->msix_requested)
- return edev->ulp_tbl->msix_base;
- }
- return 0;
-}
-
-int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
+ int roce_msix = BNXT_MAX_ROCE_MSIX;
- if (edev->ulp_tbl->msix_requested)
- return BNXT_MIN_ROCE_STAT_CTXS;
- }
+ if (BNXT_VF(bp))
+ roce_msix = BNXT_MAX_ROCE_MSIX_VF;
+ else if (bp->port_partition_type)
+ roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
- return 0;
+ /* NQ MSIX vectors should match the number of CPUs plus 1 more for
+ * the CREQ MSIX, up to the default.
+ */
+ return min_t(int, roce_msix, num_online_cpus() + 1);
}
int bnxt_send_msg(struct bnxt_en_dev *edev,
@@ -156,7 +207,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
- return rc;
+ goto drop_req;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
@@ -168,6 +219,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
memcpy(fw_msg->resp, resp, resp_len);
}
+drop_req:
hwrm_req_drop(bp, req);
return rc;
}
@@ -181,13 +233,18 @@ void bnxt_ulp_stop(struct bnxt *bp)
if (!edev)
return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev) ||
+ (edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_stop_exit;
+
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
if (aux_priv) {
struct auxiliary_device *adev;
adev = &aux_priv->aux_dev;
if (adev->dev.driver) {
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
pm_message_t pm = {};
adrv = to_auxiliary_drv(adev->dev.driver);
@@ -195,6 +252,8 @@ void bnxt_ulp_stop(struct bnxt *bp)
adrv->suspend(adev, pm);
}
}
+ulp_stop_exit:
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_start(struct bnxt *bp, int err)
@@ -202,13 +261,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- if (!edev)
+ if (!edev || err)
return;
- edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
-
- if (err)
- return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev) ||
+ !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_start_exit;
if (edev->ulp_tbl->msix_requested)
bnxt_fill_msix_vecs(bp, edev->msix_entries);
@@ -218,22 +277,25 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
adev = &aux_priv->aux_dev;
if (adev->dev.driver) {
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
adrv = to_auxiliary_drv(adev->dev.driver);
edev->en_state = bp->state;
adrv->resume(adev);
}
}
-
+ulp_start_exit:
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
+ bool reset = false;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
if (bnxt_ulp_registered(bp->edev)) {
@@ -242,10 +304,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_stop)
return;
- ops->ulp_irq_stop(ulp->handle);
+ if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
+ reset = true;
+ ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -254,7 +318,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
if (bnxt_ulp_registered(bp->edev)) {
@@ -264,7 +328,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -280,9 +344,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap,
- u16 max_id)
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ struct bnxt_ulp *ulp;
+
+ if (!bnxt_ulp_registered(edev))
+ return;
+ ulp = edev->ulp_tbl;
+
+ rcu_read_lock();
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -294,7 +385,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev,
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
- return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);
@@ -309,7 +399,6 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
aux_priv = bp->aux_priv;
adev = &aux_priv->aux_dev;
- auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
}
@@ -327,6 +416,14 @@ static void bnxt_aux_dev_release(struct device *dev)
bp->aux_priv = NULL;
}
+void bnxt_rdma_aux_device_del(struct bnxt *bp)
+{
+ if (!bp->edev)
+ return;
+
+ auxiliary_device_delete(&bp->aux_priv->aux_dev);
+}
+
static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
{
edev->net = bp->dev;
@@ -334,6 +431,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
edev->l2_db_offset = bp->db_offset;
+ mutex_init(&edev->en_dev_lock);
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
@@ -341,13 +439,33 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
if (bp->flags & BNXT_FLAG_VF)
edev->flags |= BNXT_EN_FLAG_VF;
+ if (BNXT_ROCE_VF_RESC_CAP(bp))
+ edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->chip_num = bp->chip_num;
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
edev->bar0 = bp->bar0;
- edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+}
+
+void bnxt_rdma_aux_device_add(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ int rc;
+
+ if (!bp->edev)
+ return;
+
+ aux_dev = &bp->aux_priv->aux_dev;
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
+ auxiliary_device_uninit(aux_dev);
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
+ }
}
void bnxt_rdma_aux_device_init(struct bnxt *bp)
@@ -404,13 +522,7 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
edev->ulp_tbl = ulp;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
-
- rc = auxiliary_device_add(aux_dev);
- if (rc) {
- netdev_warn(bp->dev,
- "Failed to add auxiliary device for ROCE\n");
- goto aux_dev_uninit;
- }
+ bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index b9e73de14b57..3c5b8a53f715 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -10,13 +10,12 @@
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
-#define BNXT_ROCE_ULP 0
-#define BNXT_MAX_ULP 1
-
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
-#define BNXT_MAX_ROCE_MSIX 9
-#define BNXT_MAX_VF_ROCE_MSIX 2
+
+#define BNXT_MAX_ROCE_MSIX_VF 2
+#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
+#define BNXT_MAX_ROCE_MSIX 64
struct hwrm_async_event_cmpl;
struct bnxt;
@@ -28,7 +27,9 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- void (*ulp_irq_stop)(void *);
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -46,8 +47,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- u16 msix_base;
- atomic_t ref_count;
};
struct bnxt_en_dev {
@@ -59,10 +58,12 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
- #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+ #define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
@@ -86,18 +87,28 @@ struct bnxt_en_dev {
* updated in resume.
*/
void __iomem *bar0;
+
+ u16 ulp_num_msix_vec;
+ u16 ulp_num_ctxs;
+
+ /* serialize ulp operations */
+ struct mutex en_dev_lock;
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && edev->ulp_tbl)
+ if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops))
return true;
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
-int bnxt_get_ulp_msix_base(struct bnxt *bp);
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp);
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs);
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp);
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
@@ -105,11 +116,13 @@ void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_del(struct bnxt *bp);
+void bnxt_rdma_aux_device_add(struct bnxt *bp);
void bnxt_rdma_aux_device_init(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap, u16 max_id);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 1467b94a6427..bd116fd578d8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -12,8 +12,8 @@
#include <linux/rtnetlink.h>
#include <linux/jhash.h>
#include <net/pkt_cls.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
@@ -257,8 +257,7 @@ bool bnxt_dev_is_vf_rep(struct net_device *dev)
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
- * happens under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happens under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_close(struct bnxt *bp)
{
@@ -278,8 +277,7 @@ void bnxt_vf_reps_close(struct bnxt *bp)
/* Called when the parent PF interface is opened (re-opened):
* As the mode transition from SWITCHDEV to LEGACY
- * happen under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happen under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_open(struct bnxt *bp)
{
@@ -348,7 +346,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
* before proceeding with VF-rep cleanup.
*/
- rtnl_lock();
+ netdev_lock(bp->dev);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
closed = true;
@@ -365,10 +363,10 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
bnxt_open_nic(bp, false, false);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
- /* Need to call vf_reps_destroy() outside of rntl_lock
- * as unregister_netdev takes rtnl_lock
+ /* Need to call vf_reps_destroy() outside of netdev instance lock
+ * as unregister_netdev takes it
*/
__bnxt_vf_reps_destroy(bp);
}
@@ -376,7 +374,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Free the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
void bnxt_vf_reps_free(struct bnxt *bp)
{
@@ -413,7 +411,7 @@ static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
int bnxt_vf_reps_alloc(struct bnxt *bp)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 4079538bc310..3e77a96e5a3e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,8 +15,9 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_xdp.h"
@@ -48,8 +49,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) |
- ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
@@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
- dma_unmap_len_set(tx_buf, len, 0);
+ dma_unmap_len_set(tx_buf, len, len);
}
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
@@ -197,7 +197,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
- xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
+ xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
}
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
@@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
@@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- orig_data = xdp.data;
+ orig_data = xdp->data;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
@@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
- *len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
- offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
+ *len = xdp->data_end - xdp->data;
+ if (orig_data != xdp->data) {
+ offset = xdp->data - xdp->data_hard_start;
+ *data_ptr = xdp->data_hard_start + offset;
}
switch (act) {
@@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
mapping = rx_buf->mapping - bp->rx_dma_offset;
*event &= BNXT_TX_CMP_EVENT;
- if (unlikely(xdp_buff_has_frags(&xdp))) {
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags;
*event = BNXT_AGG_EVENT;
@@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod), &xdp);
+ NEXT_RX(rxr->rx_prod), xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -297,21 +297,16 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
- rx_buf = &rxr->rx_buf_ring[cons];
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping,
- BNXT_RX_PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
+ if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
page_pool_recycle_direct(rxr->page_pool, page);
return true;
@@ -326,7 +321,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
@@ -387,19 +382,24 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
return nxmit;
}
-/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
+ netdev_assert_locked(dev);
+
if (prog && !prog->aux->xdp_has_frags &&
bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
+ if (prog && bp->flags & BNXT_FLAG_HDS) {
+ netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
+ return -EOPNOTSUPP;
+ }
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
@@ -425,17 +425,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
- xdp_features_set_redirect_target(dev, true);
+ xdp_features_set_redirect_target_locked(dev, true);
} else {
- int rx, tx;
-
- xdp_features_clear_redirect_target(dev);
+ xdp_features_clear_redirect_target_locked(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
@@ -468,23 +461,15 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1)
+ struct page_pool *pool, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb)
return NULL;
- skb_checksum_none_assert(skb);
- if (RX_CMP_L4_CS_OK(rxcmp1)) {
- if (bp->dev->features & NETIF_F_RXCSUM) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = RX_CMP_ENCAP(rxcmp1);
- }
- }
- xdp_update_skb_shared_info(skb, num_frags,
- sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
- xdp_buff_is_frag_pfmemalloc(xdp));
+
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ BNXT_RX_PAGE_SIZE * num_frags,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 5e412c5655ba..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
- struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1);
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 3d63177e7e52..6e97a5a7daaf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
+#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
@@ -3015,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(struct tasklet_struct *t)
+static void cnic_service_bnx2_msix(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3036,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev)
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
- tasklet_schedule(&cp->cnic_irq_task);
+ queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
}
}
@@ -3140,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
+static void cnic_service_bnx2x_bh_work(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -3682,7 +3683,8 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
#if defined(CONFIG_INET)
struct rtable *rt;
- rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+ rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0,
+ RT_SCOPE_UNIVERSE);
if (!IS_ERR(rt)) {
*dst = &rt->dst;
return 0;
@@ -4228,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_bnx2x_delete_wait(dev, 0);
- cancel_delayed_work(&cp->delete_task);
- flush_workqueue(cnic_wq);
+ cancel_delayed_work_sync(&cp->delete_task);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
@@ -4427,7 +4428,7 @@ static void cnic_free_irq(struct cnic_dev *dev)
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
- tasklet_kill(&cp->cnic_irq_task);
+ cancel_work_sync(&cp->cnic_irq_bh_work);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
@@ -4440,7 +4441,7 @@ static int cnic_request_irq(struct cnic_dev *dev)
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
- tasklet_disable(&cp->cnic_irq_task);
+ disable_work_sync(&cp->cnic_irq_bh_work);
return err;
}
@@ -4463,7 +4464,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4872,7 +4873,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index fedc84ada937..1a314a75d2d2 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -268,7 +268,7 @@ struct cnic_local {
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
- struct tasklet_struct cnic_irq_task;
+ struct work_struct cnic_irq_bh_work;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b1f84b37032a..05512aa10c20 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -35,21 +35,18 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/phy.h>
-#include <linux/platform_data/bcmgenet.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "bcmgenet.h"
-/* Maximum number of hardware queues, downsized if needed */
-#define GENET_MAX_MQ_CNT 4
-
/* Default highest priority queue for multi queue support */
-#define GENET_Q0_PRIORITY 0
+#define GENET_Q1_PRIORITY 0
+#define GENET_Q0_PRIORITY 1
-#define GENET_Q16_RX_BD_CNT \
+#define GENET_Q0_RX_BD_CNT \
(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
-#define GENET_Q16_TX_BD_CNT \
+#define GENET_Q0_TX_BD_CNT \
(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
#define RX_BUF_LENGTH 2048
@@ -104,7 +101,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
* the platform is explicitly configured for 64-bits/LPAE.
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
#endif
}
@@ -446,33 +443,48 @@ static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
u32 offset;
u32 reg;
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg |= RBUF_HFB_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) |
+ RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset, reg, reg1;
- offset = HFB_FLT_ENABLE_V3PLUS;
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
- if (f_index < 32) {
- reg1 &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
- } else {
- reg &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- }
- if (!reg && !reg1) {
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~RBUF_HFB_EN;
+ reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT));
+ if (!(reg & RBUF_HFB_FILTER_EN_MASK))
+ reg &= ~RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
}
@@ -482,6 +494,9 @@ static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
offset = f_index / 8;
reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
reg &= ~(0xF << (4 * (f_index % 8)));
@@ -495,9 +510,13 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ offset = HFB_FLT_LEN_V2;
+ else
+ offset = HFB_FLT_LEN_V3PLUS;
+
+ offset += sizeof(u32) *
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg &= ~(0xFF << (8 * (f_index % 4)));
reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
@@ -579,13 +598,13 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- u32 offset = 0, f_length = 0, f;
+ u32 offset = 0, f_length = 0, f, q;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
- f = fs->location;
+ f = fs->location + 1;
if (fs->flow_type & FLOW_MAC_EXT) {
bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
@@ -667,19 +686,16 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
}
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
- if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
- /* Ring 0 flows can be handled by the default Descriptor Ring
- * We'll map them to ring 0, but don't enable the filter
- */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
- rule->state = BCMGENET_RXNFC_STATE_DISABLED;
- } else {
+ if (fs->ring_cookie == RX_CLS_FLOW_WAKE)
+ q = 0;
+ else if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ q = priv->hw_params->rx_queues + 1;
+ else
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
- fs->ring_cookie);
- bcmgenet_hfb_enable_filter(priv, f);
- rule->state = BCMGENET_RXNFC_STATE_ENABLED;
- }
+ q = fs->ring_cookie;
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
+ bcmgenet_hfb_enable_filter(priv, f);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
/* bcmgenet_hfb_clear
@@ -690,6 +706,7 @@ static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 base, i;
+ bcmgenet_hfb_set_filter_length(priv, f_index, 0);
base = f_index * priv->hw_params->hfb_filter_size;
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
@@ -699,22 +716,23 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) {
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0, i);
+ }
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
bcmgenet_hfb_clear_filter(priv, i);
+
+ /* Enable filter 0 to send default flow to ring 0 */
+ bcmgenet_hfb_set_filter_length(priv, 0, 4);
+ bcmgenet_hfb_enable_filter(priv, 0);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
@@ -722,9 +740,6 @@ static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
int i;
INIT_LIST_HEAD(&priv->rxnfc_list);
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -819,20 +834,16 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
unsigned int i;
ec->tx_max_coalesced_frames =
- bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_max_coalesced_frames =
- bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_coalesce_usecs =
- bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+ bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000;
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
ring = &priv->rx_rings[i];
ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
}
- ring = &priv->rx_rings[DESC_INDEX];
- ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
return 0;
}
@@ -902,17 +913,13 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
*/
- for (i = 0; i < priv->hw_params->tx_queues; i++)
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
bcmgenet_tdma_ring_writel(priv, i,
ec->tx_max_coalesced_frames,
DMA_MBUF_DONE_THRESH);
- bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
- ec->tx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
- bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
return 0;
}
@@ -961,12 +968,13 @@ static int bcmgenet_set_pauseparam(struct net_device *dev,
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
- BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_RTNL = -1,
BCMGENET_STAT_MIB_RX,
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
BCMGENET_STAT_SOFT,
+ BCMGENET_STAT_SOFT64,
};
struct bcmgenet_stats {
@@ -976,13 +984,15 @@ struct bcmgenet_stats {
enum bcmgenet_stat_type type;
/* reg offset from UMAC base for misc counters */
u16 reg_offset;
+ /* sync for u64 stats counters */
+ int syncp_offset;
};
-#define STAT_NETDEV(m) { \
+#define STAT_RTNL(m) { \
.stat_string = __stringify(m), \
- .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
- .stat_offset = offsetof(struct net_device_stats, m), \
- .type = BCMGENET_STAT_NETDEV, \
+ .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m), \
+ .type = BCMGENET_STAT_RTNL, \
}
#define STAT_GENET_MIB(str, m, _type) { \
@@ -992,6 +1002,14 @@ struct bcmgenet_stats {
.type = _type, \
}
+#define STAT_GENET_SOFT_MIB64(str, s, m) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, s.m), \
+ .type = BCMGENET_STAT_SOFT64, \
+ .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \
+}
+
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
@@ -1006,18 +1024,38 @@ struct bcmgenet_stats {
}
#define STAT_GENET_Q(num) \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
- tx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
- tx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
- rx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
- rx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
- rx_rings[num].errors), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
- rx_rings[num].dropped)
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \
+ tx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \
+ tx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \
+ tx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \
+ tx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \
+ rx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \
+ rx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \
+ rx_rings[num].stats64, multicast), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \
+ rx_rings[num].stats64, missed), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \
+ rx_rings[num].stats64, length_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \
+ rx_rings[num].stats64, over_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \
+ rx_rings[num].stats64, crc_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \
+ rx_rings[num].stats64, frame_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \
+ rx_rings[num].stats64, fragmented_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \
+ rx_rings[num].stats64, broadcast)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -1029,15 +1067,20 @@ struct bcmgenet_stats {
*/
static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
/* general stats */
- STAT_NETDEV(rx_packets),
- STAT_NETDEV(tx_packets),
- STAT_NETDEV(rx_bytes),
- STAT_NETDEV(tx_bytes),
- STAT_NETDEV(rx_errors),
- STAT_NETDEV(tx_errors),
- STAT_NETDEV(rx_dropped),
- STAT_NETDEV(tx_dropped),
- STAT_NETDEV(multicast),
+ STAT_RTNL(rx_packets),
+ STAT_RTNL(tx_packets),
+ STAT_RTNL(rx_bytes),
+ STAT_RTNL(tx_bytes),
+ STAT_RTNL(rx_errors),
+ STAT_RTNL(tx_errors),
+ STAT_RTNL(rx_dropped),
+ STAT_RTNL(tx_dropped),
+ STAT_RTNL(multicast),
+ STAT_RTNL(rx_missed_errors),
+ STAT_RTNL(rx_length_errors),
+ STAT_RTNL(rx_over_errors),
+ STAT_RTNL(rx_crc_errors),
+ STAT_RTNL(rx_frame_errors),
/* UniMAC RSV counters */
STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
@@ -1120,11 +1163,25 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_Q(1),
STAT_GENET_Q(2),
STAT_GENET_Q(3),
- STAT_GENET_Q(16),
+ STAT_GENET_Q(4),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+#define BCMGENET_STATS64_ADD(stats, m, v) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_add(&stats->m, v); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
+#define BCMGENET_STATS64_INC(stats, m) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_inc(&stats->m); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1144,14 +1201,14 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmgenet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmgenet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -1208,8 +1265,9 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
- case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_RTNL:
case BCMGENET_STAT_SOFT:
+ case BCMGENET_STAT_SOFT64:
continue;
case BCMGENET_STAT_RUNT:
offset += BCMGENET_STAT_OFFSET;
@@ -1247,28 +1305,40 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
u64 *data)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync *syncp;
+ unsigned int start;
int i;
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
- dev->netdev_ops->ndo_get_stats(dev);
+ dev_get_stats(dev, &stats64);
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
s = &bcmgenet_gstrings_stats[i];
- if (s->type == BCMGENET_STAT_NETDEV)
- p = (char *)&dev->stats;
- else
- p = (char *)priv;
- p += s->stat_offset;
- if (sizeof(unsigned long) != sizeof(u32) &&
- s->stat_sizeof == sizeof(unsigned long))
- data[i] = *(unsigned long *)p;
- else
- data[i] = *(u32 *)p;
+ p = (char *)priv;
+
+ if (s->type == BCMGENET_STAT_SOFT64) {
+ syncp = (struct u64_stats_sync *)(p + s->syncp_offset);
+ do {
+ start = u64_stats_fetch_begin(syncp);
+ data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset));
+ } while (u64_stats_fetch_retry(syncp, start));
+ } else {
+ if (s->type == BCMGENET_STAT_RTNL)
+ p = (char *)&stats64;
+
+ p += s->stat_offset;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -1438,7 +1508,8 @@ static int bcmgenet_insert_flow(struct net_device *dev,
}
if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
- cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) {
netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
cmd->fs.ring_cookie);
return -EINVAL;
@@ -1472,10 +1543,10 @@ static int bcmgenet_insert_flow(struct net_device *dev,
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
}
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
@@ -1505,10 +1576,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
}
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -1569,6 +1640,13 @@ static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
return res;
}
+static u32 bcmgenet_get_rx_ring_count(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return priv->hw_params->rx_queues ?: 1;
+}
+
static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1578,9 +1656,6 @@ static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int i = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->hw_params->rx_queues ?: 1;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bcmgenet_get_num_flows(priv);
cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
@@ -1629,6 +1704,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = bcmgenet_get_rxnfc,
.set_rxnfc = bcmgenet_set_rxnfc,
+ .get_rx_ring_count = bcmgenet_get_rx_ring_count,
.get_pauseparam = bcmgenet_get_pauseparam,
.set_pauseparam = bcmgenet_set_pauseparam,
};
@@ -1651,9 +1727,9 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
/* Power down LED */
- if (priv->hw_params->flags & GENET_HAS_EXT) {
+ if (bcmgenet_has_ext(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm)
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv))
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1676,13 +1752,14 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
return ret;
}
-static void bcmgenet_power_up(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+static int bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ int ret = 0;
u32 reg;
- if (!(priv->hw_params->flags & GENET_HAS_EXT))
- return;
+ if (!bcmgenet_has_ext(priv))
+ return ret;
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -1690,7 +1767,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1718,11 +1795,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
break;
case GENET_POWER_WOL_MAGIC:
- bcmgenet_wol_power_up_cfg(priv, mode);
- return;
+ ret = bcmgenet_wol_power_up_cfg(priv, mode);
+ break;
default:
break;
}
+
+ return ret;
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1759,18 +1838,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv,
@@ -1785,18 +1852,6 @@ static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
INTRL2_CPU_MASK_CLEAR);
}
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
@@ -1868,6 +1923,7 @@ static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
@@ -1877,12 +1933,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX)
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_CLEAR);
- else
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
- INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR);
/* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
@@ -1913,22 +1964,51 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = c_index;
- ring->packets += pkts_compl;
- ring->bytes += bytes_compl;
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->packets, pkts_compl);
+ u64_stats_add(&stats->bytes, bytes_compl);
+ u64_stats_update_end(&stats->syncp);
- netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring,
+ bool all)
{
- unsigned int released;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int released, drop, wr_ptr;
+ struct enet_cb *cb_ptr;
+ struct sk_buff *skb;
spin_lock_bh(&ring->lock);
released = __bcmgenet_tx_reclaim(dev, ring);
+ if (all) {
+ skb = NULL;
+ drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
+ released += drop;
+ ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
+ while (drop--) {
+ cb_ptr = bcmgenet_put_txcb(priv, ring);
+ skb = cb_ptr->skb;
+ bcmgenet_free_tx_cb(kdev, cb_ptr);
+ if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
+ dev_consume_skb_any(skb);
+ skb = NULL;
+ }
+ }
+ if (skb)
+ dev_consume_skb_any(skb);
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+ wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
+ bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
+ TDMA_WRITE_PTR);
+ }
spin_unlock_bh(&ring->lock);
return released;
@@ -1944,14 +2024,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
spin_lock(&ring->lock);
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->index);
netif_tx_wake_queue(txq);
}
spin_unlock(&ring->lock);
if (work_done == 0) {
napi_complete(napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
return 0;
}
@@ -1962,22 +2042,21 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int i;
-
- if (netif_is_multiqueue(dev)) {
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
- }
+ int i = 0;
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+ do {
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
+ } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
}
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
@@ -1994,7 +2073,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
if (!new_skb) {
dev_kfree_skb_any(skb);
priv->mib.tx_realloc_tsb_failed++;
- dev->stats.tx_dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
return NULL;
}
dev_consume_skb_any(skb);
@@ -2057,19 +2136,14 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
index = skb_get_queue_mapping(skb);
/* Mapping strategy:
- * queue_mapping = 0, unclassified, packet xmited through ring16
- * queue_mapping = 1, goes to ring 0. (highest priority queue
- * queue_mapping = 2, goes to ring 1.
- * queue_mapping = 3, goes to ring 2.
- * queue_mapping = 4, goes to ring 3.
+ * queue_mapping = 0, unclassified, packet xmited through ring 0
+ * queue_mapping = 1, goes to ring 1. (highest priority queue)
+ * queue_mapping = 2, goes to ring 2.
+ * queue_mapping = 3, goes to ring 3.
+ * queue_mapping = 4, goes to ring 4.
*/
- if (index == 0)
- index = DESC_INDEX;
- else
- index -= 1;
-
ring = &priv->tx_rings[index];
- txq = netdev_get_tx_queue(dev, ring->queue);
+ txq = netdev_get_tx_queue(dev, index);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -2087,7 +2161,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
GENET_CB(skb)->bytes_sent = skb->len;
/* add the Transmit Status Block */
- skb = bcmgenet_add_tsb(dev, skb);
+ skb = bcmgenet_add_tsb(dev, skb, ring);
if (!skb) {
ret = NETDEV_TX_OK;
goto out;
@@ -2229,6 +2303,7 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int budget)
{
+ struct bcmgenet_rx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct enet_cb *cb;
@@ -2242,15 +2317,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int discards;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX) {
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_CLEAR);
- } else {
- mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
- bcmgenet_intrl2_1_writel(priv,
- mask,
- INTRL2_CPU_CLEAR);
- }
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR);
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
@@ -2258,7 +2326,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- ring->errors += discards;
+ BCMGENET_STATS64_ADD(stats, missed, discards);
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -2284,7 +2352,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- ring->dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
goto next;
}
@@ -2311,8 +2379,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(len > RX_BUF_LENGTH)) {
netif_err(priv, rx_status, dev, "oversized packet\n");
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ BCMGENET_STATS64_INC(stats, length_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2320,7 +2387,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- ring->errors++;
+ BCMGENET_STATS64_INC(stats, fragmented_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2333,15 +2400,22 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_RX_RXER))) {
netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
(unsigned int)dma_flag);
+ u64_stats_update_begin(&stats->syncp);
if (dma_flag & DMA_RX_CRC_ERROR)
- dev->stats.rx_crc_errors++;
+ u64_stats_inc(&stats->crc_errors);
if (dma_flag & DMA_RX_OV)
- dev->stats.rx_over_errors++;
+ u64_stats_inc(&stats->over_errors);
if (dma_flag & DMA_RX_NO)
- dev->stats.rx_frame_errors++;
+ u64_stats_inc(&stats->frame_errors);
if (dma_flag & DMA_RX_LG)
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ u64_stats_inc(&stats->length_errors);
+ if ((dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER)) == DMA_RX_RXER)
+ u64_stats_inc(&stats->errors);
+ u64_stats_update_end(&stats->syncp);
dev_kfree_skb_any(skb);
goto next;
} /* error packet */
@@ -2361,10 +2435,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- ring->packets++;
- ring->bytes += len;
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->packets);
+ u64_stats_add(&stats->bytes, len);
if (dma_flag & DMA_RX_MULT)
- dev->stats.multicast++;
+ u64_stats_inc(&stats->multicast);
+ else if (dma_flag & DMA_RX_BRDCAST)
+ u64_stats_inc(&stats->broadcast);
+ u64_stats_update_end(&stats->syncp);
/* Notify kernel */
napi_gro_receive(&ring->napi, skb);
@@ -2397,15 +2476,13 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- ring->int_enable(ring);
- }
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ bcmgenet_rx_ring_int_enable(ring);
if (ring->dim.use_dim) {
dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
ring->dim.bytes, &dim_sample);
- net_dim(&ring->dim.dim, dim_sample);
+ net_dim(&ring->dim.dim, &dim_sample);
}
return work_done;
@@ -2467,14 +2544,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
+ if (reg & CMD_SW_RESET) {
+ spin_unlock_bh(&priv->reg_lock);
return;
+ }
if (enable)
reg |= mask;
else
reg &= ~mask;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
/* UniMAC stops on a packet boundary, wait for a full-size packet
* to be processed
@@ -2490,8 +2571,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
udelay(10);
/* issue soft reset and disable MAC while updating its registers */
+ spin_lock_bh(&priv->reg_lock);
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
udelay(2);
+ spin_unlock_bh(&priv->reg_lock);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -2517,7 +2600,7 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
int0_enable |= UMAC_IRQ_LINK_EVENT;
}
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2582,8 +2665,8 @@ static void init_umac(struct bcmgenet_priv *priv)
}
/* Enable MDIO interrupts on GENET v3+ */
- if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
- int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ if (bcmgenet_has_mdio_intr(priv))
+ int0_enable |= UMAC_IRQ_MDIO_EVENT;
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2633,15 +2716,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
spin_lock_init(&ring->lock);
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->queue = 0;
- ring->int_enable = bcmgenet_tx_ring16_int_enable;
- ring->int_disable = bcmgenet_tx_ring16_int_disable;
- } else {
- ring->queue = index + 1;
- ring->int_enable = bcmgenet_tx_ring_int_enable;
- ring->int_disable = bcmgenet_tx_ring_int_disable;
- }
ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size;
ring->clean_ptr = start_ptr;
@@ -2652,8 +2726,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
ring->end_ptr = end_ptr - 1;
ring->prod_index = 0;
- /* Set flow period for ring != 16 */
- if (index != DESC_INDEX)
+ /* Set flow period for ring != 0 */
+ if (index)
flow_period_val = ENET_MAX_MTU_SIZE << 16;
bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
@@ -2691,13 +2765,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->int_enable = bcmgenet_rx_ring16_int_enable;
- ring->int_disable = bcmgenet_rx_ring16_int_disable;
- } else {
- ring->int_enable = bcmgenet_rx_ring_int_enable;
- ring->int_disable = bcmgenet_rx_ring_int_disable;
- }
ring->cbs = priv->rx_cbs + start_ptr;
ring->size = size;
ring->c_index = 0;
@@ -2743,15 +2810,11 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
@@ -2759,13 +2822,10 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
}
static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
@@ -2773,82 +2833,104 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
netif_napi_del(&ring->napi);
}
+}
+
+static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
- ring = &priv->tx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
}
/* Initialize Tx queues
*
- * Queues 0-3 are priority-based, each one has 32 descriptors,
- * with queue 0 being the highest priority queue.
+ * Queues 1-4 are priority-based, each one has 32 descriptors,
+ * with queue 1 being the highest priority queue.
*
- * Queue 16 is the default Tx queue with
- * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
+ * Queue 0 is the default Tx queue with
+ * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
*
* The transmit control block pool is then partitioned as follows:
- * - Tx queue 0 uses tx_cbs[0..31]
- * - Tx queue 1 uses tx_cbs[32..63]
- * - Tx queue 2 uses tx_cbs[64..95]
- * - Tx queue 3 uses tx_cbs[96..127]
- * - Tx queue 16 uses tx_cbs[128..255]
+ * - Tx queue 0 uses tx_cbs[0..127]
+ * - Tx queue 1 uses tx_cbs[128..159]
+ * - Tx queue 2 uses tx_cbs[160..191]
+ * - Tx queue 3 uses tx_cbs[192..223]
+ * - Tx queue 4 uses tx_cbs[224..255]
*/
static void bcmgenet_init_tx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i, dma_enable;
- u32 dma_ctrl, ring_cfg;
- u32 dma_priority[3] = {0, 0, 0};
-
- dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
+ unsigned int start = 0, end = GENET_Q0_TX_BD_CNT;
+ u32 i, ring_mask, dma_priority[3] = {0, 0, 0};
/* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
/* Initialize Tx priority queues */
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
- i * priv->hw_params->tx_bds_per_q,
- (i + 1) * priv->hw_params->tx_bds_per_q);
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ bcmgenet_init_tx_ring(priv, i, end - start, start, end);
+ start = end;
+ end += priv->hw_params->tx_bds_per_q;
dma_priority[DMA_PRIO_REG_INDEX(i)] |=
- ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
+ (i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY)
+ << DMA_PRIO_REG_SHIFT(i);
}
- /* Initialize Tx default queue 16 */
- bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
- priv->hw_params->tx_queues *
- priv->hw_params->tx_bds_per_q,
- TOTAL_DESC);
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
- dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
- ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
- DMA_PRIO_REG_SHIFT(DESC_INDEX));
-
/* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
- /* Enable Tx queues */
- bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Tx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable Tx DMA */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Tx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL);
}
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
@@ -2856,15 +2938,11 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
@@ -2872,15 +2950,11 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
cancel_work_sync(&ring->dim.dim.work);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
- cancel_work_sync(&ring->dim.dim.work);
}
static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
@@ -2888,13 +2962,10 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
netif_napi_del(&ring->napi);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
}
/* Initialize Rx queues
@@ -2902,57 +2973,32 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
* Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
* used to direct traffic to these queues.
*
- * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors.
*/
static int bcmgenet_init_rx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i;
- u32 dma_enable;
- u32 dma_ctrl;
- u32 ring_cfg;
+ unsigned int start = 0, end = GENET_Q0_RX_BD_CNT;
+ u32 i, ring_mask;
int ret;
- dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
-
/* Initialize Rx priority queues */
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
- ret = bcmgenet_init_rx_ring(priv, i,
- priv->hw_params->rx_bds_per_q,
- i * priv->hw_params->rx_bds_per_q,
- (i + 1) *
- priv->hw_params->rx_bds_per_q);
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
+ ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end);
if (ret)
return ret;
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ start = end;
+ end += priv->hw_params->rx_bds_per_q;
}
- /* Initialize Rx default queue 16 */
- ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
- priv->hw_params->rx_queues *
- priv->hw_params->rx_bds_per_q,
- TOTAL_DESC);
- if (ret)
- return ret;
-
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
-
- /* Enable rings */
- bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Rx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Configure ring as descriptor ring and re-enable DMA if enabled */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Rx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL);
return 0;
}
@@ -2960,26 +3006,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
- int timeout = 0;
- u32 reg;
- u32 dma_ctrl;
- int i;
/* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
@@ -2988,39 +3017,11 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
usleep_range(10000, 20000);
/* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
return ret;
}
@@ -3032,32 +3033,53 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++)
- dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
- priv->tx_cbs + i));
-
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, i);
netdev_tx_reset_queue(txq);
}
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
- netdev_tx_reset_queue(txq);
-
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
}
/* init_edma: Initialize DMA control register */
-static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx)
{
- int ret;
- unsigned int i;
struct enet_cb *cb;
+ unsigned int i;
+ int ret;
+ u32 reg;
netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+ /* Disable TX DMA */
+ ret = bcmgenet_tdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Tx DMA\n");
+ return ret;
+ }
+
+ /* Disable RX DMA */
+ ret = bcmgenet_rdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Rx DMA\n");
+ return ret;
+ }
+
+ /* Flush TX queues */
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
@@ -3107,6 +3129,15 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
+ /* Enable RX/TX DMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return 0;
}
@@ -3136,7 +3167,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
}
-/* bcmgenet_isr1: handle Rx and Tx priority queues */
+/* bcmgenet_isr1: handle Rx and Tx queues */
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
@@ -3155,7 +3186,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
"%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
- for (index = 0; index < priv->hw_params->rx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->rx_queues; index++) {
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
@@ -3163,20 +3194,20 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
+ bcmgenet_rx_ring_int_disable(rx_ring);
__napi_schedule_irqoff(&rx_ring->napi);
}
}
/* Check Tx priority queue interrupts */
- for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->tx_queues; index++) {
if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
+ bcmgenet_tx_ring_int_disable(tx_ring);
__napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3184,12 +3215,10 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
+/* bcmgenet_isr0: handle other stuff */
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
- struct bcmgenet_rx_ring *rx_ring;
- struct bcmgenet_tx_ring *tx_ring;
unsigned int status;
unsigned long flags;
@@ -3203,29 +3232,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
netif_dbg(priv, intr, priv->dev,
"IRQ=0x%x\n", status);
- if (status & UMAC_IRQ_RXDMA_DONE) {
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_ring->dim.event_ctr++;
-
- if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
- __napi_schedule_irqoff(&rx_ring->napi);
- }
- }
-
- if (status & UMAC_IRQ_TXDMA_DONE) {
- tx_ring = &priv->tx_rings[DESC_INDEX];
-
- if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
- __napi_schedule_irqoff(&tx_ring->napi);
- }
- }
-
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT)
wake_up(&priv->wq);
- }
/* all other interested interrupts handled in bottom half */
status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
@@ -3279,62 +3287,14 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
put_unaligned_be16(addr_tmp, &addr[4]);
}
-/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
-{
- unsigned int i;
- u32 reg;
- u32 dma_ctrl;
-
- /* disable DMA */
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
- udelay(10);
- bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
-
- if (flush_rx) {
- reg = bcmgenet_rbuf_ctrl_get(priv);
- bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
- udelay(10);
- bcmgenet_rbuf_ctrl_set(priv, reg);
- udelay(10);
- }
-
- return dma_ctrl;
-}
-
-static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
-{
- u32 reg;
-
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ netif_addr_lock_bh(dev);
bcmgenet_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3350,7 +3310,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3376,22 +3335,16 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX and RX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, true);
+ /* HFB init */
+ bcmgenet_hfb_init(priv);
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, true);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto err_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
- /* HFB init */
- bcmgenet_hfb_init(priv);
-
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, priv);
if (ret < 0) {
@@ -3438,19 +3391,21 @@ static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- bcmgenet_disable_tx_napi(priv);
netif_tx_disable(dev);
/* Disable MAC receive */
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
umac_enable_set(priv, CMD_RX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
+
bcmgenet_dma_teardown(priv);
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- if (stop_phy)
- phy_stop(dev->phydev);
+ bcmgenet_disable_tx_napi(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3498,16 +3453,11 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
if (!netif_msg_tx_err(priv))
return;
- txq = netdev_get_tx_queue(priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(priv->dev, ring->index);
spin_lock(&ring->lock);
- if (ring->index == DESC_INDEX) {
- intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
- } else {
- intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = 1 << ring->index;
- }
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
txq_stopped = netif_tx_queue_stopped(txq);
@@ -3521,7 +3471,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
"(sw)c_index: %d (hw)c_index: %d\n"
"(sw)clean_p: %d (sw)write_p: %d\n"
"(sw)cb_ptr: %d (sw)end_ptr: %d\n",
- ring->index, ring->queue,
+ ring->index, ring->index,
txq_stopped ? "stopped" : "active",
intsts & intmsk ? "enabled" : "disabled",
free_bds, ring->size,
@@ -3534,30 +3484,25 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 int0_enable = 0;
u32 int1_enable = 0;
unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
- bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
bcmgenet_tx_reclaim_all(dev);
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
int1_enable |= (1 << q);
- int0_enable = UMAC_IRQ_TXDMA_DONE;
-
/* Re-enable TX interrupts if disabled */
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
netif_trans_update(dev);
- dev->stats.tx_errors++;
+ BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors);
netif_tx_wake_all_queues(dev);
}
@@ -3595,16 +3540,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
+ spin_lock(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
return;
} else {
reg &= ~CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
}
/* update MDF filter */
@@ -3643,47 +3591,72 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+static void bcmgenet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long tx_bytes = 0, tx_packets = 0;
- unsigned long rx_bytes = 0, rx_packets = 0;
- unsigned long rx_errors = 0, rx_dropped = 0;
- struct bcmgenet_tx_ring *tx_ring;
- struct bcmgenet_rx_ring *rx_ring;
+ struct bcmgenet_tx_stats64 *tx_stats;
+ struct bcmgenet_rx_stats64 *rx_stats;
+ u64 rx_length_errors, rx_over_errors;
+ u64 rx_missed, rx_fragmented_errors;
+ u64 rx_crc_errors, rx_frame_errors;
+ u64 tx_errors, tx_dropped;
+ u64 rx_errors, rx_dropped;
+ u64 tx_bytes, tx_packets;
+ u64 rx_bytes, rx_packets;
+ unsigned int start;
unsigned int q;
-
- for (q = 0; q < priv->hw_params->tx_queues; q++) {
- tx_ring = &priv->tx_rings[q];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
+ u64 multicast;
+
+ for (q = 0; q <= priv->hw_params->tx_queues; q++) {
+ tx_stats = &priv->tx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
+ tx_bytes = u64_stats_read(&tx_stats->bytes);
+ tx_packets = u64_stats_read(&tx_stats->packets);
+ tx_errors = u64_stats_read(&tx_stats->errors);
+ tx_dropped = u64_stats_read(&tx_stats->dropped);
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
+
+ stats->tx_bytes += tx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_errors += tx_errors;
+ stats->tx_dropped += tx_dropped;
}
- tx_ring = &priv->tx_rings[DESC_INDEX];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
- for (q = 0; q < priv->hw_params->rx_queues; q++) {
- rx_ring = &priv->rx_rings[q];
-
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
+ for (q = 0; q <= priv->hw_params->rx_queues; q++) {
+ rx_stats = &priv->rx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
+ rx_bytes = u64_stats_read(&rx_stats->bytes);
+ rx_packets = u64_stats_read(&rx_stats->packets);
+ rx_errors = u64_stats_read(&rx_stats->errors);
+ rx_dropped = u64_stats_read(&rx_stats->dropped);
+ rx_missed = u64_stats_read(&rx_stats->missed);
+ rx_length_errors = u64_stats_read(&rx_stats->length_errors);
+ rx_over_errors = u64_stats_read(&rx_stats->over_errors);
+ rx_crc_errors = u64_stats_read(&rx_stats->crc_errors);
+ rx_frame_errors = u64_stats_read(&rx_stats->frame_errors);
+ rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors);
+ multicast = u64_stats_read(&rx_stats->multicast);
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
+
+ rx_errors += rx_length_errors;
+ rx_errors += rx_crc_errors;
+ rx_errors += rx_frame_errors;
+ rx_errors += rx_fragmented_errors;
+
+ stats->rx_bytes += rx_bytes;
+ stats->rx_packets += rx_packets;
+ stats->rx_errors += rx_errors;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_missed_errors += rx_missed;
+ stats->rx_length_errors += rx_length_errors;
+ stats->rx_over_errors += rx_over_errors;
+ stats->rx_crc_errors += rx_crc_errors;
+ stats->rx_frame_errors += rx_frame_errors;
+ stats->multicast += multicast;
}
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
-
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
- dev->stats.rx_bytes = rx_bytes;
- dev->stats.rx_packets = rx_packets;
- dev->stats.rx_errors = rx_errors;
- dev->stats.rx_missed_errors = rx_errors;
- dev->stats.rx_dropped = rx_dropped;
- return &dev->stats;
}
static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
@@ -3711,132 +3684,113 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
- .ndo_get_stats = bcmgenet_get_stats,
+ .ndo_get_stats64 = bcmgenet_get_stats64,
.ndo_change_carrier = bcmgenet_change_carrier,
};
-/* Array of GENET hardware parameters/characteristics */
-static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
- [GENET_V1] = {
- .tx_queues = 0,
- .tx_bds_per_q = 0,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .hfb_offset = 0x1000,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x3000,
- .words_per_bd = 2,
- },
- [GENET_V2] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x1000,
- .hfb_reg_offset = 0x2000,
- .rdma_offset = 0x3000,
- .tdma_offset = 0x4000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT,
- },
- [GENET_V3] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x10000,
- .tdma_offset = 0x11000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
- GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V4] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V5] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
+/* GENET hardware parameters/characteristics */
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = {
+ .tx_queues = 0,
+ .tx_bds_per_q = 0,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
};
/* Infer hardware parameters from the detected GENET version */
static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
{
- struct bcmgenet_hw_params *params;
+ const struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
u16 gphy_rev;
- if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
- bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
- genet_dma_ring_regs = genet_dma_ring_regs_v4;
- } else if (GENET_IS_V3(priv)) {
+ /* default to latest values */
+ params = &bcmgenet_hw_params_v4;
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ if (GENET_IS_V3(priv)) {
+ params = &bcmgenet_hw_params_v3;
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V2(priv)) {
+ params = &bcmgenet_hw_params_v2;
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V1(priv)) {
+ params = &bcmgenet_hw_params_v1;
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
}
-
- /* enum genet_version starts at 1 */
- priv->hw_params = &bcmgenet_hw_params[priv->version];
- params = priv->hw_params;
+ priv->hw_params = params;
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 6)
+ if (major == 6 || major == 7)
major = 5;
else if (major == 5)
major = 4;
@@ -3887,7 +3841,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (!(params->flags & GENET_HAS_40BITS))
+ if (!bcmgenet_has_40bits(priv))
pr_warn("GENET does not support 40-bits PA\n");
#endif
@@ -3912,7 +3866,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
- bool ephy_16nm;
+ u32 flags;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3923,32 +3877,43 @@ static const struct bcmgenet_plat_data v1_plat_data = {
static const struct bcmgenet_plat_data v2_plat_data = {
.version = GENET_V2,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT,
};
static const struct bcmgenet_plat_data v3_plat_data = {
.version = GENET_V3,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+ GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v4_plat_data = {
.version = GENET_V4,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v5_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm2711_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = 0x08,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm7712_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
- .ephy_16nm = true,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET |
+ GENET_HAS_EPHY_16NM,
};
static const struct of_device_id bcmgenet_match[] = {
@@ -3965,7 +3930,6 @@ MODULE_DEVICE_TABLE(of, bcmgenet_match);
static int bcmgenet_probe(struct platform_device *pdev)
{
- struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
@@ -4003,6 +3967,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/* Set default pause parameters */
@@ -4024,6 +3989,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ netdev_sw_irq_coalesce_default_on(dev);
+
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
if (priv->wol_irq > 0) {
@@ -4045,10 +4012,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
- priv->ephy_16nm = pdata->ephy_16nm;
- } else {
- priv->version = pd->genet_version;
- priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
+ priv->flags = pdata->flags;
}
priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
@@ -4065,7 +4029,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
err = -EIO;
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -4098,16 +4062,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- if (pd && !IS_ERR_OR_NULL(pd->mac_address))
- eth_hw_addr_set(dev, pd->mac_address);
- else
- if (device_get_ethdev_address(&pdev->dev, dev))
- if (has_acpi_companion(&pdev->dev)) {
- u8 addr[ETH_ALEN];
+ if (device_get_ethdev_address(&pdev->dev, dev))
+ if (has_acpi_companion(&pdev->dev)) {
+ u8 addr[ETH_ALEN];
- bcmgenet_get_hw_addr(priv, addr);
- eth_hw_addr_set(dev, addr);
- }
+ bcmgenet_get_hw_addr(priv, addr);
+ eth_hw_addr_set(dev, addr);
+ }
if (!is_valid_ether_addr(dev->dev_addr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
@@ -4120,16 +4081,19 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
goto err_clk_disable;
- /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
- * just the ring 16 descriptor based TX
- */
+ /* setup number of real queues + 1 */
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
/* Set default coalescing parameters */
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
priv->rx_rings[i].rx_max_coalesced_frames = 1;
- priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
+
+ /* Initialize u64 stats seq counter for 32bit machines */
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
+ u64_stats_init(&priv->rx_rings[i].stats64.syncp);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
+ u64_stats_init(&priv->tx_rings[i].stats64.syncp);
/* libphy will determine the link state */
netif_carrier_off(dev);
@@ -4193,9 +4157,22 @@ static int bcmgenet_resume_noirq(struct device *d)
reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
if (reg & UMAC_IRQ_WAKE_EVENT)
pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC))
+ return 0;
+
+ /* Failed so fall through to reset MAC */
}
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (priv->internal_phy)
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
return 0;
}
@@ -4205,23 +4182,46 @@ static int bcmgenet_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
- unsigned long dma_ctrl;
int ret;
+ u32 reg;
if (!netif_running(dev))
return 0;
- /* From WOL-enabled suspend, switch to regular clock */
- if (device_may_wakeup(d) && priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
- /* If this is an internal GPHY, power it back on now, before UniMAC is
- * brought out of reset as absolutely no UniMAC activity is allowed
- */
- if (priv->internal_phy)
- bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
- bcmgenet_umac_reset(priv);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_RX_EN) {
+ /* Successfully exited WoL, just resume data flows */
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_enable_filter(priv,
+ rule->fs.location + 1);
+ bcmgenet_hfb_enable_filter(priv, 0);
+ bcmgenet_set_rx_mode(dev);
+ bcmgenet_enable_rx_napi(priv);
+
+ /* Reinitialize Tx flows */
+ bcmgenet_tdma_disable(priv);
+ bcmgenet_init_tx_queues(priv->dev);
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ bcmgenet_enable_tx_napi(priv);
+
+ bcmgenet_link_intr_enable(priv);
+ phy_start_machine(dev->phydev);
+
+ netif_device_attach(dev);
+ enable_irq(priv->irq1);
+ return 0;
+ }
+ /* MAC was reset so complete bcmgenet_netif_stop() */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false);
+ bcmgenet_rdma_disable(priv);
+ bcmgenet_intr_disable(priv);
+ bcmgenet_fini_dma(priv);
+ enable_irq(priv->irq1);
+ }
init_umac(priv);
@@ -4242,19 +4242,13 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, false);
-
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, false);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto out_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@@ -4275,19 +4269,52 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_enable = 0;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- bcmgenet_netif_stop(dev, true);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ netif_tx_disable(dev);
+
+ /* Suspend non-wake Rx data flows */
+ if (priv->wolopts & WAKE_FILTER)
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE &&
+ rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ hfb_enable |= 1 << rule->fs.location;
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg &= ~RBUF_HFB_FILTER_EN_MASK;
+ reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1);
+ } else {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable << 1,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ }
+ if (!hfb_enable)
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
+ /* Clear any old filter matches so only new matches wake */
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- /* Disable filtering */
- bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv))
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+
+ bcmgenet_disable_tx_napi(priv);
+ bcmgenet_disable_rx_napi(priv);
+ disable_irq(priv->irq1);
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_tx_napi(priv);
+ } else {
+ /* Teardown the interface */
+ bcmgenet_netif_stop(dev, true);
+ }
return 0;
}
@@ -4338,7 +4365,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
- .remove_new = bcmgenet_remove,
+ .remove = bcmgenet_remove,
.shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7523b60b3c1c..5ec3979779ec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -18,6 +18,9 @@
#include "../unimac.h"
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -152,6 +155,30 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
+struct bcmgenet_tx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+};
+
+struct bcmgenet_rx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t bytes;
+ u64_stats_t packets;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+ u64_stats_t multicast;
+ u64_stats_t broadcast;
+ u64_stats_t missed;
+ u64_stats_t length_errors;
+ u64_stats_t over_errors;
+ u64_stats_t crc_errors;
+ u64_stats_t frame_errors;
+ u64_stats_t fragmented_errors;
+};
+
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
@@ -271,6 +298,8 @@ struct bcmgenet_mib_counters {
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+#define UMAC_IRQ_MDIO_EVENT (UMAC_IRQ_MDIO_DONE | \
+ UMAC_IRQ_MDIO_ERROR)
/* INTRL2 instance 1 definitions */
#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
@@ -476,6 +505,7 @@ enum bcmgenet_version {
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
#define GENET_HAS_MOCA_LINK_DET (1 << 3)
+#define GENET_HAS_EPHY_16NM (1 << 4)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
@@ -496,7 +526,6 @@ struct bcmgenet_hw_params {
u32 rdma_offset;
u32 tdma_offset;
u32 words_per_bd;
- u32 flags;
};
struct bcmgenet_skb_cb {
@@ -510,10 +539,8 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
- unsigned long packets;
- unsigned long bytes;
+ struct bcmgenet_tx_stats64 stats64;
unsigned int index; /* ring index */
- unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
unsigned int clean_ptr; /* Tx ring clean pointer */
@@ -523,8 +550,6 @@ struct bcmgenet_tx_ring {
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
- void (*int_enable)(struct bcmgenet_tx_ring *);
- void (*int_disable)(struct bcmgenet_tx_ring *);
struct bcmgenet_priv *priv;
};
@@ -538,10 +563,7 @@ struct bcmgenet_net_dim {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
- unsigned long bytes;
- unsigned long packets;
- unsigned long errors;
- unsigned long dropped;
+ struct bcmgenet_rx_stats64 stats64;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
@@ -553,8 +575,6 @@ struct bcmgenet_rx_ring {
struct bcmgenet_net_dim dim;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_usecs;
- void (*int_enable)(struct bcmgenet_rx_ring *);
- void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
};
@@ -573,6 +593,8 @@ struct bcmgenet_rxnfc_rule {
/* device context */
struct bcmgenet_priv {
void __iomem *base;
+ /* reg_lock: lock to serialize access to shared registers */
+ spinlock_t reg_lock;
enum bcmgenet_version version;
struct net_device *dev;
@@ -581,7 +603,7 @@ struct bcmgenet_priv {
struct enet_cb *tx_cbs;
unsigned int num_tx_bds;
- struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+ struct bcmgenet_tx_ring tx_rings[GENET_MAX_MQ_CNT + 1];
/* receive variables */
void __iomem *rx_bds;
@@ -591,10 +613,11 @@ struct bcmgenet_priv {
struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
struct list_head rxnfc_list;
- struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
+ struct bcmgenet_rx_ring rx_rings[GENET_MAX_MQ_CNT + 1];
/* other misc variables */
- struct bcmgenet_hw_params *hw_params;
+ const struct bcmgenet_hw_params *hw_params;
+ u32 flags;
unsigned autoneg_pause:1;
unsigned tx_pause:1;
unsigned rx_pause:1;
@@ -613,7 +636,6 @@ struct bcmgenet_priv {
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
- bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -641,13 +663,37 @@ struct bcmgenet_priv {
struct clk *clk_wol;
u32 wolopts;
u8 sopass[SOPASS_MAX];
- bool wol_active;
struct bcmgenet_mib_counters mib;
struct ethtool_keee eee;
};
+static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_40BITS);
+}
+
+static inline bool bcmgenet_has_ext(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EXT);
+}
+
+static inline bool bcmgenet_has_mdio_intr(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MDIO_INTR);
+}
+
+static inline bool bcmgenet_has_moca_link_det(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MOCA_LINK_DET);
+}
+
+static inline bool bcmgenet_has_ephy_16nm(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EPHY_16NM);
+}
+
#define GENET_IO_MACRO(name, offset) \
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
@@ -700,8 +746,8 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode);
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode);
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
bool tx_lpi_enabled);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 7a41cad5788f..8fb551288298 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -41,23 +41,27 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
if (dev->phydev) {
phy_ethtool_get_wol(dev->phydev, wol);
- if (wol->supported)
- return;
+ phy_wolopts = wol->wolopts;
}
- if (!device_can_wakeup(kdev)) {
- wol->supported = 0;
- wol->wolopts = 0;
+ /* MAC is not wake-up capable, return what the PHY does */
+ if (!device_can_wakeup(kdev))
return;
- }
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ /* Overlay MAC capabilities with that of the PHY queried before */
+ wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
+ wol->wolopts |= priv->wolopts;
+
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -74,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Try Wake-on-LAN from the PHY first */
if (dev->phydev) {
ret = phy_ethtool_set_wol(dev->phydev, wol);
- if (ret != -EOPNOTSUPP)
+ if (ret != -EOPNOTSUPP && wol->wolopts)
return ret;
}
@@ -141,8 +145,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- struct bcmgenet_rxnfc_rule *rule;
- u32 reg, hfb_ctrl_reg, hfb_enable = 0;
+ u32 reg, hfb_ctrl_reg;
int retries = 0;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -150,16 +153,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return -EINVAL;
}
- /* Can't suspend with WoL if MAC is still in reset */
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
- reg &= ~CMD_SW_RESET;
-
- /* disable RX */
- reg &= ~CMD_RX_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- mdelay(10);
-
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
@@ -171,13 +164,8 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (priv->wolopts & WAKE_FILTER) {
- list_for_each_entry(rule, &priv->rxnfc_list, list)
- if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
- hfb_enable |= (1 << rule->fs.location);
- reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ reg = hfb_ctrl_reg | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
@@ -192,27 +180,30 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
- clk_prepare_enable(priv->clk_wol);
- priv->wol_active = 1;
+ /* Disable phy status updates while suspending */
+ mutex_lock(&dev->phydev->lock);
+ dev->phydev->state = PHY_READY;
+ mutex_unlock(&dev->phydev->lock);
- if (hfb_enable) {
- bcmgenet_hfb_reg_writel(priv, hfb_enable,
- HFB_FLT_ENABLE_V3PLUS + 4);
- hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
- }
+ clk_prepare_enable(priv->clk_wol);
/* Enable CRC forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
+ /* Can't suspend with WoL if MAC is still in reset */
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
- if (hfb_enable)
+ if (hfb_ctrl_reg & RBUF_HFB_EN)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
@@ -220,43 +211,57 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return 0;
}
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ struct net_device *dev = priv->dev;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
- return;
+ return -EINVAL;
}
- if (!priv->wol_active)
- return; /* failed to suspend so skip the rest */
-
- priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
priv->crc_fwd_en = 0;
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT,
+ INTRL2_CPU_MASK_SET);
+ if (bcmgenet_has_mdio_intr(priv))
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_MDIO_EVENT,
+ INTRL2_CPU_MASK_CLEAR);
+
/* Disable Magic Packet Detection */
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (!(reg & MPD_EN))
- return; /* already reset so skip the rest */
+ return -EPERM; /* already reset so skip the rest */
reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
- /* Disable WAKE_FILTER Detection */
- if (priv->wolopts & WAKE_FILTER) {
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (!(reg & RBUF_ACPI_EN))
- return; /* already reset so skip the rest */
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ /* Disable ACPI mode */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return -EPERM; /* already reset so skip the rest */
+ reg &= ~RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
+
+ /* Resume link status tracking */
+ mutex_lock(&dev->phydev->lock);
+ if (dev->phydev->link)
+ dev->phydev->state = PHY_RUNNING;
+ else
+ dev->phydev->state = PHY_NOLINK;
+ mutex_unlock(&dev->phydev->lock);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ada89355747..38f854b94a79 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#include <linux/acpi.h>
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/platform_data/bcmgenet.h>
#include <linux/platform_data/mdio-bcm-unimac.h>
#include "bcmgenet.h"
@@ -76,6 +75,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
@@ -88,6 +88,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
@@ -152,7 +153,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv) || priv->ephy_16nm) {
+ if (GENET_IS_V4(priv) || bcmgenet_has_ephy_16nm(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
@@ -167,10 +168,15 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
reg &= ~EXT_GPHY_RESET;
} else {
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
- EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR;
+ EXT_CFG_IDDQ_GLOBAL_PWR;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
+
reg |= EXT_CK25_DIS;
}
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
@@ -182,7 +188,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
}
@@ -275,6 +281,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* block for the interface to work, unconditionally clear the
* Out-of-band disable since we do not need it.
*/
+ mutex_lock(&phydev->lock);
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
@@ -286,6 +293,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
reg |= RGMII_MODE_EN;
}
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ mutex_unlock(&phydev->lock);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
@@ -427,23 +435,6 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
return priv->mdio_dn;
}
-static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv,
- struct unimac_mdio_pdata *ppd)
-{
- struct device *kdev = &priv->pdev->dev;
- struct bcmgenet_platform_data *pd = kdev->platform_data;
-
- if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
- /*
- * Internal or external PHY with MDIO access
- */
- if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
- ppd->phy_mask = 1 << pd->phy_address;
- else
- ppd->phy_mask = 0;
- }
-}
-
static int bcmgenet_mii_wait(void *wait_func_data)
{
struct bcmgenet_priv *priv = wait_func_data;
@@ -458,7 +449,6 @@ static int bcmgenet_mii_wait(void *wait_func_data)
static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
{
struct platform_device *pdev = priv->pdev;
- struct bcmgenet_platform_data *pdata = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
struct unimac_mdio_pdata ppd;
struct platform_device *ppdev;
@@ -502,8 +492,6 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppdev->dev.parent = &pdev->dev;
if (dn)
ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv);
- else if (pdata)
- bcmgenet_mii_pdata_init(priv, &ppd);
else
ppd.phy_mask = ~0;
@@ -585,58 +573,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
return 0;
}
-static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
-{
- struct device *kdev = &priv->pdev->dev;
- struct bcmgenet_platform_data *pd = kdev->platform_data;
- char phy_name[MII_BUS_ID_SIZE + 3];
- char mdio_bus_id[MII_BUS_ID_SIZE];
- struct phy_device *phydev;
-
- snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d",
- UNIMAC_MDIO_DRV_NAME, priv->pdev->id);
-
- if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
- snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
- mdio_bus_id, pd->phy_address);
-
- /*
- * Internal or external PHY with MDIO access
- */
- phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
- if (IS_ERR(phydev)) {
- dev_err(kdev, "failed to register PHY device\n");
- return PTR_ERR(phydev);
- }
- } else {
- /*
- * MoCA port or no MDIO access.
- * Use fixed PHY to represent the link layer.
- */
- struct fixed_phy_status fphy_status = {
- .link = 1,
- .speed = pd->phy_speed,
- .duplex = pd->phy_duplex,
- .pause = 0,
- .asym_pause = 0,
- };
-
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- dev_err(kdev, "failed to register fixed PHY device\n");
- return PTR_ERR(phydev);
- }
-
- /* Make sure we initialize MoCA PHYs with a link down */
- phydev->link = 0;
-
- }
-
- priv->phy_interface = pd->phy_interface;
-
- return 0;
-}
-
static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
{
struct device *kdev = &priv->pdev->dev;
@@ -647,7 +583,7 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
else if (has_acpi_companion(kdev))
return bcmgenet_phy_interface_init(priv);
else
- return bcmgenet_mii_pd_init(priv);
+ return -EINVAL;
}
int bcmgenet_mii_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index fcf8485f3446..30865fe03eeb 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2608,7 +2608,7 @@ static void sbmac_remove(struct platform_device *pldev)
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
- .remove_new = sbmac_remove,
+ .remove = sbmac_remove,
.driver = {
.name = sbmac_string,
},
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 62ff4381ac83..75f66587983d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,7 +54,8 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <linux/crc32poly.h>
+#include <linux/crc32.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
#include <net/gso.h>
@@ -3737,7 +3738,7 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
}
do {
- u32 *fw_data = (u32 *)(fw_hdr + 1);
+ __be32 *fw_data = (__be32 *)(fw_hdr + 1);
for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
write_op(tp, cpu_scratch_base +
(be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
@@ -4019,7 +4020,7 @@ static int tg3_power_up(struct tg3 *tp)
static int tg3_setup_phy(struct tg3 *, bool);
-static int tg3_power_down_prepare(struct tg3 *tp)
+static void tg3_power_down_prepare(struct tg3 *tp)
{
u32 misc_host_ctrl;
bool device_should_wake, do_low_power;
@@ -4263,7 +4264,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
- return 0;
+ return;
}
static void tg3_power_down(struct tg3 *tp)
@@ -5802,7 +5803,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
- u32 local_adv, remote_adv, sgsr;
+ u32 local_adv = 0, remote_adv = 0, sgsr;
if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720) &&
@@ -5943,9 +5944,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
else
current_duplex = DUPLEX_HALF;
- local_adv = 0;
- remote_adv = 0;
-
if (bmcr & BMCR_ANENABLE) {
u32 common;
@@ -6141,13 +6139,11 @@ static void tg3_refclk_write(struct tg3 *tp, u64 newval)
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
static inline void tg3_full_unlock(struct tg3 *tp);
-static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct tg3 *tp = netdev_priv(dev);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
if (tg3_flag(tp, PTP_CAPABLE)) {
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -6157,8 +6153,6 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
if (tp->ptp_clock)
info->phc_index = ptp_clock_index(tp->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
@@ -6689,7 +6683,7 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* We only need to fill in the address because the other members
* of the RX descriptor are invariant, see tg3_init_rings.
*
- * Note the purposeful assymetry of cpu vs. chip accesses. For
+ * Note the purposeful asymmetry of cpu vs. chip accesses. For
* posting buffers we only dirty the first cache line of the RX
* descriptor (containing the address). Whereas for the RX status
* buffers the cpu only reads the last cacheline of the RX descriptor
@@ -7399,27 +7393,61 @@ tx_recovery:
static void tg3_napi_disable(struct tg3 *tp)
{
+ int txq_idx = tp->txq_cnt - 1;
+ int rxq_idx = tp->rxq_cnt - 1;
+ struct tg3_napi *tnapi;
int i;
- for (i = tp->irq_cnt - 1; i >= 0; i--)
- napi_disable(&tp->napi[i].napi);
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ tnapi = &tp->napi[i];
+ if (tnapi->tx_buffers) {
+ netif_queue_set_napi(tp->dev, txq_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ txq_idx--;
+ }
+ if (tnapi->rx_rcb) {
+ netif_queue_set_napi(tp->dev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ rxq_idx--;
+ }
+ napi_disable(&tnapi->napi);
+ }
}
static void tg3_napi_enable(struct tg3 *tp)
{
+ int txq_idx = 0, rxq_idx = 0;
+ struct tg3_napi *tnapi;
int i;
- for (i = 0; i < tp->irq_cnt; i++)
- napi_enable(&tp->napi[i].napi);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ tnapi = &tp->napi[i];
+ napi_enable_locked(&tnapi->napi);
+ if (tnapi->tx_buffers) {
+ netif_queue_set_napi(tp->dev, txq_idx,
+ NETDEV_QUEUE_TYPE_TX,
+ &tnapi->napi);
+ txq_idx++;
+ }
+ if (tnapi->rx_rcb) {
+ netif_queue_set_napi(tp->dev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX,
+ &tnapi->napi);
+ rxq_idx++;
+ }
+ }
}
static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
- for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
+ i ? tg3_poll_msix : tg3_poll);
+ netif_napi_set_irq_locked(&tp->napi[i].napi,
+ tp->napi[i].irq_vec);
+ }
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -9778,26 +9806,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
static inline u32 calc_crc(unsigned char *buf, int len)
{
- u32 reg;
- u32 tmp;
- int j, k;
-
- reg = 0xffffffff;
-
- for (j = 0; j < len; j++) {
- reg ^= buf[j];
-
- for (k = 0; k < 8; k++) {
- tmp = reg & 0x01;
-
- reg >>= 1;
-
- if (tmp)
- reg ^= CRC32_POLY_LE;
- }
- }
-
- return ~reg;
+ return ~crc32(~0, buf, len);
}
static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
@@ -10133,7 +10142,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
/* Pseudo-header checksum is done by hardware logic and not
- * the offload processers, so make the chip do the pseudo-
+ * the offload processors, so make the chip do the pseudo-
* header checksums on receive. For transmit it is more
* convenient to do the pseudo-header checksum in software
* as Linux does that on transmit for us in all cases.
@@ -11050,7 +11059,7 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = from_timer(tp, t, timer);
+ struct tg3 *tp = timer_container_of(tp, t, timer);
spin_lock(&tp->lock);
@@ -11221,7 +11230,7 @@ static void tg3_timer_start(struct tg3 *tp)
static void tg3_timer_stop(struct tg3 *tp)
{
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
}
/* Restart hardware after configuration changes, self-test, etc.
@@ -11230,6 +11239,8 @@ static void tg3_timer_stop(struct tg3 *tp)
static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
+ __releases(tp->dev->lock)
+ __acquires(tp->dev->lock)
{
int err;
@@ -11242,7 +11253,9 @@ static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
tg3_timer_stop(tp);
tp->irq_sync = 0;
tg3_napi_enable(tp);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
@@ -11270,6 +11283,7 @@ static void tg3_reset_task(struct work_struct *work)
tg3_netif_stop(tp);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 1);
if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
@@ -11289,12 +11303,14 @@ static void tg3_reset_task(struct work_struct *work)
* call cancel_work_sync() and wait forever.
*/
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
goto out;
}
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(tp->dev);
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
@@ -11313,18 +11329,17 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
else {
name = &tnapi->irq_lbl[0];
if (tnapi->tx_buffers && tnapi->rx_rcb)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-txrx-%d", tp->dev->name, irq_num);
else if (tnapi->tx_buffers)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-tx-%d", tp->dev->name, irq_num);
else if (tnapi->rx_rcb)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-rx-%d", tp->dev->name, irq_num);
else
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-%d", tp->dev->name, irq_num);
- name[IFNAMSIZ-1] = 0;
}
if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
@@ -11655,9 +11670,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
if (err)
goto out_ints_fini;
+ netdev_lock(dev);
tg3_napi_init(tp);
tg3_napi_enable(tp);
+ netdev_unlock(dev);
for (i = 0; i < tp->irq_cnt; i++) {
err = tg3_request_irq(tp, i);
@@ -12541,6 +12558,7 @@ static int tg3_set_ringparam(struct net_device *dev,
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
tp->rx_pending = ering->rx_pending;
@@ -12569,6 +12587,7 @@ static int tg3_set_ringparam(struct net_device *dev,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err)
tg3_phy_start(tp);
@@ -12650,6 +12669,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
if (epause->autoneg)
@@ -12679,6 +12699,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -12698,29 +12719,17 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
}
}
-static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 tg3_get_rx_ring_count(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
if (!tg3_flag(tp, SUPPORT_MSIX))
- return -EOPNOTSUPP;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- if (netif_running(tp->dev))
- info->data = tp->rxq_cnt;
- else {
- info->data = num_online_cpus();
- if (info->data > TG3_RSS_MAX_NUM_QS)
- info->data = TG3_RSS_MAX_NUM_QS;
- }
+ return 1;
- return 0;
+ if (netif_running(tp->dev))
+ return tp->rxq_cnt;
- default:
- return -EOPNOTSUPP;
- }
+ return min_t(u32, netif_get_num_default_rss_queues(), tp->rxq_max);
}
static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
@@ -13097,12 +13106,16 @@ static int tg3_test_nvram(struct tg3 *tp)
/* Bootstrap checksum at offset 0x10 */
csum = calc_crc((unsigned char *) buf, 0x10);
- if (csum != le32_to_cpu(buf[0x10/4]))
+
+ /* The type of buf is __be32 *, but this value is __le32 */
+ if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
goto out;
/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
- csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
- if (csum != le32_to_cpu(buf[0xfc/4]))
+ csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
+
+ /* The type of buf is __be32 *, but this value is __le32 */
+ if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
goto out;
kfree(buf);
@@ -13879,6 +13892,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
data[TG3_INTERRUPT_TEST] = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -13890,6 +13904,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err2)
tg3_phy_start(tp);
@@ -13899,22 +13914,20 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tp->rxptpctl = 0;
break;
@@ -13974,74 +13987,72 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
tw32(TG3_RX_PTP_CTL,
tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
tg3_flag_set(tp, TX_TSTAMP_EN);
else
tg3_flag_clear(tp, TX_TSTAMP_EN);
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
-static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+ stmpconf->flags = 0;
+ stmpconf->tx_type = tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (tp->rxptpctl) {
case 0:
- stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -14096,12 +14107,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
- case SIOCSHWTSTAMP:
- return tg3_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return tg3_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -14251,7 +14256,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
.get_sset_count = tg3_get_sset_count,
- .get_rxnfc = tg3_get_rxnfc,
+ .get_rx_ring_count = tg3_get_rx_ring_count,
.get_rxfh_indir_size = tg3_get_rxfh_indir_size,
.get_rxfh = tg3_get_rxfh,
.set_rxfh = tg3_set_rxfh,
@@ -14295,7 +14300,7 @@ static void tg3_set_rx_mode(struct net_device *dev)
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ETH_DATA_LEN) {
if (tg3_flag(tp, 5780_CLASS)) {
@@ -14333,6 +14338,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
+ netdev_lock(dev);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -14352,6 +14358,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -14375,6 +14382,8 @@ static const struct net_device_ops tg3_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
+ .ndo_hwtstamp_get = tg3_hwtstamp_get,
+ .ndo_hwtstamp_set = tg3_hwtstamp_set,
};
static void tg3_get_eeprom_size(struct tg3 *tp)
@@ -16578,7 +16587,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCIX_TARGET_HWBUG);
- /* The chip can have it's power management PCI config
+ /* The chip can have its power management PCI config
* space registers clobbered due to this bug.
* So explicitly force the chip into D0 here.
*/
@@ -17069,12 +17078,14 @@ static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
addr_ok = is_valid_ether_addr(addr);
}
if (!addr_ok) {
+ __be32 be_hi, be_lo;
+
/* Next, try NVRAM. */
if (!tg3_flag(tp, NO_NVRAM) &&
- !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
- !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
- memcpy(&addr[0], ((char *)&hi) + 2, 2);
- memcpy(&addr[2], (char *)&lo, sizeof(lo));
+ !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
+ memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
+ memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {
@@ -17805,6 +17816,9 @@ static int tg3_init_one(struct pci_dev *pdev,
} else
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ persist_dma_mask = DMA_BIT_MASK(31);
+
/* Configure DMA attributes. */
if (dma_mask > DMA_BIT_MASK(32)) {
err = dma_set_mask(&pdev->dev, dma_mask);
@@ -18084,7 +18098,6 @@ static int tg3_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
- int err = 0;
rtnl_lock();
@@ -18108,32 +18121,11 @@ static int tg3_suspend(struct device *device)
tg3_flag_clear(tp, INIT_COMPLETE);
tg3_full_unlock(tp);
- err = tg3_power_down_prepare(tp);
- if (err) {
- int err2;
-
- tg3_full_lock(tp, 0);
-
- tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, true);
- if (err2)
- goto out;
-
- tg3_timer_start(tp);
-
- netif_device_attach(dev);
- tg3_netif_start(tp);
-
-out:
- tg3_full_unlock(tp);
-
- if (!err2)
- tg3_phy_start(tp);
- }
+ tg3_power_down_prepare(tp);
unlock:
rtnl_unlock();
- return err;
+ return 0;
}
static int tg3_resume(struct device *device)
@@ -18149,6 +18141,7 @@ static int tg3_resume(struct device *device)
netif_device_attach(dev);
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
@@ -18165,6 +18158,7 @@ static int tg3_resume(struct device *device)
out:
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18177,6 +18171,50 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -18193,6 +18231,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
@@ -18245,7 +18296,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
if (netdev) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
err = PCI_ERS_RESULT_DISCONNECT;
@@ -18263,7 +18316,7 @@ done:
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
@@ -18284,7 +18337,6 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!netdev || !netif_running(netdev)) {
rc = PCI_ERS_RESULT_RECOVERED;
@@ -18299,7 +18351,9 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
done:
if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
rtnl_unlock();
@@ -18325,12 +18379,14 @@ static void tg3_io_resume(struct pci_dev *pdev)
if (!netdev || !netif_running(netdev))
goto done;
+ netdev_lock(netdev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
goto done;
}
@@ -18342,6 +18398,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
tg3_phy_start(tp);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index cf1b2b123c7e..a9e7f88fa26d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2390,7 +2390,7 @@
#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
-/* Fast Ethernet Tranceiver definitions */
+/* Fast Ethernet Transceiver definitions */
#define MII_TG3_FET_PTEST 0x17
#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010
#define MII_TG3_FET_PTEST_TRIM_2 0x0002
@@ -3033,7 +3033,7 @@ struct tg3_napi {
dma_addr_t rx_rcb_mapping;
dma_addr_t tx_desc_mapping;
- char irq_lbl[IFNAMSIZ];
+ char irq_lbl[IFNAMSIZ + 6 + 10]; /* name + "-txrx-" + %d */
unsigned int irq_vec;
};
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9c80ab07a735..92c7639d1fc7 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -314,13 +314,13 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
case IOC_E_FWRSP_GETATTR:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
fallthrough;
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -330,7 +330,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
break;
case IOC_E_DISABLE:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
@@ -659,13 +659,13 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_ioc_pf_disabled(ioc);
break;
case IOCPF_E_STOP:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
@@ -741,7 +741,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_ioc_sync_leave(ioc);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
@@ -774,13 +774,13 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_ENABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
case IOCPF_E_INITFAIL:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
fallthrough;
case IOCPF_E_TIMEOUT:
@@ -791,7 +791,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
@@ -844,12 +844,12 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
fallthrough;
case IOCPF_E_TIMEOUT:
@@ -1210,7 +1210,7 @@ bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
static void
bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
{
- del_timer(&ioc->sem_timer);
+ timer_delete(&ioc->sem_timer);
}
/* Initialize LPU local memory (aka secondary memory / SRAM) */
@@ -1982,7 +1982,7 @@ bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
static void
bfa_ioc_hb_stop(struct bfa_ioc *ioc)
{
- del_timer(&ioc->hb_timer);
+ timer_delete(&ioc->hb_timer);
}
/* Initiate a full firmware download. */
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index a5ebd7110e07..986f43d27711 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -416,7 +416,7 @@ struct bna_ib {
/* Tx object */
/* Tx datapath control structure */
-#define BNA_Q_NAME_SIZE 16
+#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6)
struct bna_tcb {
/* Fast path */
void **sw_qpt;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index c32174484a96..9bed33295839 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/prefetch.h>
#include <linux/module.h>
+#include <net/gro.h>
#include "bnad.h"
#include "bna.h"
@@ -1534,8 +1535,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
for (i = 0; i < num_txqs; i++) {
vector_num = tx_info->tcb[i]->intr_vector;
- sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
- tx_id + tx_info->tcb[i]->id);
+ snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
+ bnad->netdev->name,
+ tx_id + tx_info->tcb[i]->id);
err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_tx, 0,
tx_info->tcb[i]->name,
@@ -1585,9 +1587,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
for (i = 0; i < num_rxps; i++) {
vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
- sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
- bnad->netdev->name,
- rx_id + rx_info->rx_ctrl[i].ccb->id);
+ snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
+ "%s CQ %d", bnad->netdev->name,
+ rx_id + rx_info->rx_ctrl[i].ccb->id);
err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_rx, 0,
rx_info->rx_ctrl[i].ccb->name,
@@ -1686,7 +1688,8 @@ err_return:
static void
bnad_ioc_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.ioc_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1697,7 +1700,8 @@ bnad_ioc_timeout(struct timer_list *t)
static void
bnad_ioc_hb_check(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.hb_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1708,7 +1712,8 @@ bnad_ioc_hb_check(struct timer_list *t)
static void
bnad_iocpf_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.iocpf_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1719,7 +1724,8 @@ bnad_iocpf_timeout(struct timer_list *t)
static void
bnad_iocpf_sem_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.sem_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1733,7 +1739,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
* Time CPU m CPU n
* 0 1 = test_bit
* 1 clear_bit
- * 2 del_timer_sync
+ * 2 timer_delete_sync
* 3 mod_timer
*/
@@ -1741,7 +1747,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
static void
bnad_dim_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, dim_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, dim_timer);
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
int i, j;
@@ -1774,7 +1780,7 @@ bnad_dim_timeout(struct timer_list *t)
static void
bnad_stats_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, stats_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, stats_timer);
unsigned long flags;
if (!netif_running(bnad->netdev) ||
@@ -1835,7 +1841,7 @@ bnad_stats_timer_stop(struct bnad *bnad)
to_del = 1;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->stats_timer);
+ timer_delete_sync(&bnad->stats_timer);
}
/* Utilities */
@@ -2158,7 +2164,7 @@ bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->dim_timer);
+ timer_delete_sync(&bnad->dim_timer);
}
init_completion(&bnad->bnad_completions.rx_comp);
@@ -3276,7 +3282,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&bnad->conf_mutex);
mtu = netdev->mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
frame = BNAD_FRAME_SIZE(mtu);
new_frame = BNAD_FRAME_SIZE(new_mtu);
@@ -3724,9 +3730,9 @@ probe_uninit:
bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
disable_ioceth:
bnad_ioceth_disable(bnad);
- del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3767,9 +3773,9 @@ bnad_pci_remove(struct pci_dev *pdev)
mutex_lock(&bnad->conf_mutex);
bnad_ioceth_disable(bnad);
- del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 10b1e534030e..4396997c59d0 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -351,7 +351,6 @@ struct bnad {
/* debugfs specific data */
char *regdata;
u32 reglen;
- struct dentry *bnad_dentry_files[5];
struct dentry *port_debugfs_root;
};
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7246e13dd559..8f0972e6737c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -500,11 +500,6 @@ bnad_debugfs_init(struct bnad *bnad)
if (!bna_debugfs_root) {
bna_debugfs_root = debugfs_create_dir("bna", NULL);
atomic_set(&bna_debugfs_port_count, 0);
- if (!bna_debugfs_root) {
- netdev_warn(bnad->netdev,
- "debugfs root dir creation failed\n");
- return;
- }
}
/* Setup the pci_dev debugfs directory for the port */
@@ -517,18 +512,11 @@ bnad_debugfs_init(struct bnad *bnad)
for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
file = &bnad_debugfs_files[i];
- bnad->bnad_dentry_files[i] =
- debugfs_create_file(file->name,
- file->mode,
- bnad->port_debugfs_root,
- bnad,
- file->fops);
- if (!bnad->bnad_dentry_files[i]) {
- netdev_warn(bnad->netdev,
- "create %s entry failed\n",
- file->name);
- return;
- }
+ debugfs_create_file(file->name,
+ file->mode,
+ bnad->port_debugfs_root,
+ bnad,
+ file->fops);
}
}
}
@@ -537,15 +525,6 @@ bnad_debugfs_init(struct bnad *bnad)
void
bnad_debugfs_uninit(struct bnad *bnad)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
- if (bnad->bnad_dentry_files[i]) {
- debugfs_remove(bnad->bnad_dentry_files[i]);
- bnad->bnad_dentry_files[i] = NULL;
- }
- }
-
/* Remove the pci_dev debugfs directory for the port */
if (bnad->port_debugfs_root) {
debugfs_remove(bnad->port_debugfs_root);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index d1ad6c9f8140..216e25f26dbb 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -373,7 +373,7 @@ static int bnad_set_coalesce(struct net_device *netdev,
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->dim_timer);
+ timer_delete_sync(&bnad->dim_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_rx_coalescing_timeo_set(bnad);
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index aa5700ac9c00..87414a2ddf6e 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,10 +13,7 @@
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
#include <linux/phy/phy.h>
-
-#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
-#define MACB_EXT_DESC
-#endif
+#include <linux/workqueue.h>
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 2
@@ -183,6 +180,13 @@
#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_DCFG10 0x02A4 /* Design Config 10 */
#define GEM_DCFG12 0x02AC /* Design Config 12 */
+#define GEM_ENST_START_TIME_Q0 0x0800 /* ENST Q0 start time */
+#define GEM_ENST_START_TIME_Q1 0x0804 /* ENST Q1 start time */
+#define GEM_ENST_ON_TIME_Q0 0x0820 /* ENST Q0 on time */
+#define GEM_ENST_ON_TIME_Q1 0x0824 /* ENST Q1 on time */
+#define GEM_ENST_OFF_TIME_Q0 0x0840 /* ENST Q0 off time */
+#define GEM_ENST_OFF_TIME_Q1 0x0844 /* ENST Q1 off time */
+#define GEM_ENST_CONTROL 0x0880 /* ENST control register */
#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */
#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */
@@ -212,14 +216,19 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
-#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
-#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
+#define GEM_ENST_START_TIME(hw_q) (0x0800 + ((hw_q) << 2))
+#define GEM_ENST_ON_TIME(hw_q) (0x0820 + ((hw_q) << 2))
+#define GEM_ENST_OFF_TIME(hw_q) (0x0840 + ((hw_q) << 2))
+
+/* Bitfields in ENST_CONTROL */
+#define GEM_ENST_DISABLE_QUEUE_OFFSET 16
+
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0 /* reserved */
#define MACB_LB_SIZE 1
@@ -528,6 +537,8 @@
/* Bitfields in DCFG6. */
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
+#define GEM_PBUF_RSC_OFFSET 26
+#define GEM_PBUF_RSC_SIZE 1
#define GEM_PBUF_CUTTHRU_OFFSET 25
#define GEM_PBUF_CUTTHRU_SIZE 1
#define GEM_DAW64_OFFSET 23
@@ -553,6 +564,23 @@
#define GEM_HIGH_SPEED_OFFSET 26
#define GEM_HIGH_SPEED_SIZE 1
+/* Bitfields in ENST_START_TIME_Qx. */
+#define GEM_START_TIME_SEC_OFFSET 30
+#define GEM_START_TIME_SEC_SIZE 2
+#define GEM_START_TIME_NSEC_OFFSET 0
+#define GEM_START_TIME_NSEC_SIZE 30
+
+/* Bitfields in ENST_ON_TIME_Qx. */
+#define GEM_ON_TIME_OFFSET 0
+#define GEM_ON_TIME_SIZE 17
+
+/* Bitfields in ENST_OFF_TIME_Qx. */
+#define GEM_OFF_TIME_OFFSET 0
+#define GEM_OFF_TIME_SIZE 17
+
+/* Hardware ENST timing registers granularity */
+#define ENST_TIME_GRANULARITY_NS 8
+
/* Bitfields in USX_CONTROL. */
#define GEM_USX_CTRL_SPEED_OFFSET 14
#define GEM_USX_CTRL_SPEED_SIZE 3
@@ -645,6 +673,10 @@
#define GEM_T2OFST_OFFSET 0 /* offset value */
#define GEM_T2OFST_SIZE 7
+/* Bitfields in queue pointer registers */
+#define MACB_QUEUE_DISABLE_OFFSET 0 /* disable queue */
+#define MACB_QUEUE_DISABLE_SIZE 1
+
/* Offset for screener type 2 compare values (T2CMPOFST).
* Note the offset is applied after the specified point,
* e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
@@ -722,25 +754,31 @@
#define MACB_MAN_C45_CODE 2
/* Capability mask bits */
-#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
-#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
-#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
-#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
-#define MACB_CAPS_USRIO_DISABLED 0x00000010
-#define MACB_CAPS_JUMBO 0x00000020
-#define MACB_CAPS_GEM_HAS_PTP 0x00000040
-#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
-#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
-#define MACB_CAPS_MIIONRGMII 0x00000200
-#define MACB_CAPS_NEED_TSUCLK 0x00000400
-#define MACB_CAPS_PCS 0x01000000
-#define MACB_CAPS_HIGH_SPEED 0x02000000
-#define MACB_CAPS_CLK_HW_CHG 0x04000000
-#define MACB_CAPS_MACB_IS_EMAC 0x08000000
-#define MACB_CAPS_FIFO_MODE 0x10000000
-#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
-#define MACB_CAPS_SG_DISABLED 0x40000000
-#define MACB_CAPS_MACB_IS_GEM 0x80000000
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE BIT(0)
+#define MACB_CAPS_USRIO_HAS_CLKEN BIT(1)
+#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII BIT(2)
+#define MACB_CAPS_NO_GIGABIT_HALF BIT(3)
+#define MACB_CAPS_USRIO_DISABLED BIT(4)
+#define MACB_CAPS_JUMBO BIT(5)
+#define MACB_CAPS_GEM_HAS_PTP BIT(6)
+#define MACB_CAPS_BD_RD_PREFETCH BIT(7)
+#define MACB_CAPS_NEEDS_RSTONUBR BIT(8)
+#define MACB_CAPS_MIIONRGMII BIT(9)
+#define MACB_CAPS_NEED_TSUCLK BIT(10)
+#define MACB_CAPS_QUEUE_DISABLE BIT(11)
+#define MACB_CAPS_QBV BIT(12)
+#define MACB_CAPS_PCS BIT(13)
+#define MACB_CAPS_HIGH_SPEED BIT(14)
+#define MACB_CAPS_CLK_HW_CHG BIT(15)
+#define MACB_CAPS_MACB_IS_EMAC BIT(16)
+#define MACB_CAPS_FIFO_MODE BIT(17)
+#define MACB_CAPS_GIGABIT_MODE_AVAILABLE BIT(18)
+#define MACB_CAPS_SG_DISABLED BIT(19)
+#define MACB_CAPS_MACB_IS_GEM BIT(20)
+#define MACB_CAPS_DMA_64B BIT(21)
+#define MACB_CAPS_DMA_PTP BIT(22)
+#define MACB_CAPS_RSC BIT(23)
+#define MACB_CAPS_NO_LSO BIT(24)
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
@@ -817,12 +855,6 @@ struct macb_dma_desc {
u32 ctrl;
};
-#ifdef MACB_EXT_DESC
-#define HW_DMA_CAP_32B 0
-#define HW_DMA_CAP_64B (1 << 0)
-#define HW_DMA_CAP_PTP (1 << 1)
-#define HW_DMA_CAP_64B_PTP (HW_DMA_CAP_64B | HW_DMA_CAP_PTP)
-
struct macb_dma_desc_64 {
u32 addrh;
u32 resvd;
@@ -832,7 +864,6 @@ struct macb_dma_desc_ptp {
u32 ts_1;
u32 ts_2;
};
-#endif
/* DMA descriptor bitfields */
#define MACB_RX_USED_OFFSET 0
@@ -945,75 +976,73 @@ struct macb_tx_skb {
* device stats by a periodic timer.
*/
struct macb_stats {
- u32 rx_pause_frames;
- u32 tx_ok;
- u32 tx_single_cols;
- u32 tx_multiple_cols;
- u32 rx_ok;
- u32 rx_fcs_errors;
- u32 rx_align_errors;
- u32 tx_deferred;
- u32 tx_late_cols;
- u32 tx_excessive_cols;
- u32 tx_underruns;
- u32 tx_carrier_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_symbol_errors;
- u32 rx_oversize_pkts;
- u32 rx_jabbers;
- u32 rx_undersize_pkts;
- u32 sqe_test_errors;
- u32 rx_length_mismatch;
- u32 tx_pause_frames;
+ u64 rx_pause_frames;
+ u64 tx_ok;
+ u64 tx_single_cols;
+ u64 tx_multiple_cols;
+ u64 rx_ok;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 tx_deferred;
+ u64 tx_late_cols;
+ u64 tx_excessive_cols;
+ u64 tx_underruns;
+ u64 tx_carrier_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_symbol_errors;
+ u64 rx_oversize_pkts;
+ u64 rx_jabbers;
+ u64 rx_undersize_pkts;
+ u64 sqe_test_errors;
+ u64 rx_length_mismatch;
+ u64 tx_pause_frames;
};
struct gem_stats {
- u32 tx_octets_31_0;
- u32 tx_octets_47_32;
- u32 tx_frames;
- u32 tx_broadcast_frames;
- u32 tx_multicast_frames;
- u32 tx_pause_frames;
- u32 tx_64_byte_frames;
- u32 tx_65_127_byte_frames;
- u32 tx_128_255_byte_frames;
- u32 tx_256_511_byte_frames;
- u32 tx_512_1023_byte_frames;
- u32 tx_1024_1518_byte_frames;
- u32 tx_greater_than_1518_byte_frames;
- u32 tx_underrun;
- u32 tx_single_collision_frames;
- u32 tx_multiple_collision_frames;
- u32 tx_excessive_collisions;
- u32 tx_late_collisions;
- u32 tx_deferred_frames;
- u32 tx_carrier_sense_errors;
- u32 rx_octets_31_0;
- u32 rx_octets_47_32;
- u32 rx_frames;
- u32 rx_broadcast_frames;
- u32 rx_multicast_frames;
- u32 rx_pause_frames;
- u32 rx_64_byte_frames;
- u32 rx_65_127_byte_frames;
- u32 rx_128_255_byte_frames;
- u32 rx_256_511_byte_frames;
- u32 rx_512_1023_byte_frames;
- u32 rx_1024_1518_byte_frames;
- u32 rx_greater_than_1518_byte_frames;
- u32 rx_undersized_frames;
- u32 rx_oversize_frames;
- u32 rx_jabbers;
- u32 rx_frame_check_sequence_errors;
- u32 rx_length_field_frame_errors;
- u32 rx_symbol_errors;
- u32 rx_alignment_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_ip_header_checksum_errors;
- u32 rx_tcp_checksum_errors;
- u32 rx_udp_checksum_errors;
+ u64 tx_octets;
+ u64 tx_frames;
+ u64 tx_broadcast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_pause_frames;
+ u64 tx_64_byte_frames;
+ u64 tx_65_127_byte_frames;
+ u64 tx_128_255_byte_frames;
+ u64 tx_256_511_byte_frames;
+ u64 tx_512_1023_byte_frames;
+ u64 tx_1024_1518_byte_frames;
+ u64 tx_greater_than_1518_byte_frames;
+ u64 tx_underrun;
+ u64 tx_single_collision_frames;
+ u64 tx_multiple_collision_frames;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred_frames;
+ u64 tx_carrier_sense_errors;
+ u64 rx_octets;
+ u64 rx_frames;
+ u64 rx_broadcast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_pause_frames;
+ u64 rx_64_byte_frames;
+ u64 rx_65_127_byte_frames;
+ u64 rx_128_255_byte_frames;
+ u64 rx_256_511_byte_frames;
+ u64 rx_512_1023_byte_frames;
+ u64 rx_1024_1518_byte_frames;
+ u64 rx_greater_than_1518_byte_frames;
+ u64 rx_undersized_frames;
+ u64 rx_oversize_frames;
+ u64 rx_jabbers;
+ u64 rx_frame_check_sequence_errors;
+ u64 rx_length_field_frame_errors;
+ u64 rx_symbol_errors;
+ u64 rx_alignment_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_ip_header_checksum_errors;
+ u64 rx_tcp_checksum_errors;
+ u64 rx_udp_checksum_errors;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1021,7 +1050,7 @@ struct gem_stats {
* this register should contribute to.
*/
struct gem_statistic {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int offset;
u32 stat_bits;
};
@@ -1163,7 +1192,7 @@ struct macb_ptp_info {
s32 (*get_ptp_max_adj)(void);
unsigned int (*get_tsu_rate)(struct macb *bp);
int (*get_ts_info)(struct net_device *dev,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_hwtst)(struct net_device *netdev,
struct kernel_hwtstamp_config *tstamp_config);
int (*set_hwtst)(struct net_device *netdev,
@@ -1210,10 +1239,13 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
- unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
- unsigned int RBQPH;
+
+ /* ENST register offsets for this queue */
+ unsigned int ENST_START_TIME;
+ unsigned int ENST_ON_TIME;
+ unsigned int ENST_OFF_TIME;
/* Lock to protect tx_head and tx_tail */
spinlock_t tx_ptr_lock;
@@ -1254,13 +1286,14 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
+ struct macb_dma_desc *rx_ring_tieoff;
+ dma_addr_t rx_ring_tieoff_dma;
size_t rx_buffer_size;
unsigned int rx_ring_size;
unsigned int tx_ring_size;
unsigned int num_queues;
- unsigned int queue_mask;
struct macb_queue queues[MACB_MAX_QUEUES];
spinlock_t lock;
@@ -1271,6 +1304,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
@@ -1299,17 +1334,15 @@ struct macb {
unsigned int jumbo_max_len;
u32 wol;
+ u32 wolopts;
/* holds value of rx watermark value for pbuf_rxcutthru register */
u32 rx_watermark;
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
- struct phy *sgmii_phy; /* for ZynqMP SGMII mode */
+ struct phy *phy;
-#ifdef MACB_EXT_DESC
- uint8_t hw_dma_cap;
-#endif
spinlock_t tsu_clk_lock; /* gem tsu clock locking */
unsigned int tsu_rate;
struct ptp_clock *ptp_clock;
@@ -1322,7 +1355,7 @@ struct macb {
spinlock_t rx_fs_lock;
unsigned int max_tuples;
- struct tasklet_struct hresp_err_tasklet;
+ struct work_struct hresp_err_bh_work;
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
@@ -1388,6 +1421,31 @@ static inline bool gem_has_ptp(struct macb *bp)
return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && (bp->caps & MACB_CAPS_GEM_HAS_PTP);
}
+/* ENST Helper functions */
+static inline u64 enst_ns_to_hw_units(size_t ns, u32 speed_mbps)
+{
+ return DIV_ROUND_UP((ns) * (speed_mbps),
+ (ENST_TIME_GRANULARITY_NS * 1000));
+}
+
+static inline u64 enst_max_hw_interval(u32 speed_mbps)
+{
+ return DIV_ROUND_UP(GENMASK(GEM_ON_TIME_SIZE - 1, 0) *
+ ENST_TIME_GRANULARITY_NS * 1000, (speed_mbps));
+}
+
+static inline bool macb_dma64(struct macb *bp)
+{
+ return IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ bp->caps & MACB_CAPS_DMA_64B;
+}
+
+static inline bool macb_dma_ptp(struct macb *bp)
+{
+ return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) &&
+ bp->caps & MACB_CAPS_DMA_PTP;
+}
+
/**
* struct macb_platform_data - platform data for MACB Ethernet used for PCI registration
* @pclk: platform clock
@@ -1398,4 +1456,21 @@ struct macb_platform_data {
struct clk *hclk;
};
+/**
+ * struct macb_queue_enst_config - Configuration for Enhanced Scheduled Traffic
+ * @start_time_mask: Bitmask representing the start time for the queue
+ * @on_time_bytes: "on" time nsec expressed in bytes
+ * @off_time_bytes: "off" time nsec expressed in bytes
+ * @queue_id: Identifier for the queue
+ *
+ * This structure holds the configuration parameters for an ENST queue,
+ * used to control time-based transmission scheduling in the MACB driver.
+ */
+struct macb_queue_enst_config {
+ u32 start_time_mask;
+ u32 on_time_bytes;
+ u32 off_time_bytes;
+ u8 queue_id;
+};
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 898debfd4db3..e461f5072884 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -6,38 +6,37 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/clk.h>
+#include <linux/circ_buf.h>
#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/crc32.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/circ_buf.h>
-#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/phylink.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/tcp.h>
-#include <linux/iopoll.h>
#include <linux/phy/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/ptp_classify.h>
#include <linux/reset.h>
-#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -53,14 +52,10 @@ struct sifive_fu540_macb_mgmt {
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
@@ -84,8 +79,7 @@ struct sifive_fu540_macb_mgmt {
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
-#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
-#define MACB_WOL_ENABLED (0x1 << 1)
+#define MACB_WOL_ENABLED BIT(0)
#define HS_SPEED_10000M 4
#define MACB_SERDES_RATE_10G 1
@@ -127,56 +121,26 @@ struct sifive_fu540_macb_mgmt {
*/
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
-#ifdef MACB_EXT_DESC
- unsigned int desc_size;
+ unsigned int desc_size = sizeof(struct macb_dma_desc);
+
+ if (macb_dma64(bp))
+ desc_size += sizeof(struct macb_dma_desc_64);
+ if (macb_dma_ptp(bp))
+ desc_size += sizeof(struct macb_dma_desc_ptp);
- switch (bp->hw_dma_cap) {
- case HW_DMA_CAP_64B:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_64);
- break;
- case HW_DMA_CAP_PTP:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_ptp);
- break;
- case HW_DMA_CAP_64B_PTP:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_64)
- + sizeof(struct macb_dma_desc_ptp);
- break;
- default:
- desc_size = sizeof(struct macb_dma_desc);
- }
return desc_size;
-#endif
- return sizeof(struct macb_dma_desc);
}
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
{
-#ifdef MACB_EXT_DESC
- switch (bp->hw_dma_cap) {
- case HW_DMA_CAP_64B:
- case HW_DMA_CAP_PTP:
- desc_idx <<= 1;
- break;
- case HW_DMA_CAP_64B_PTP:
- desc_idx *= 3;
- break;
- default:
- break;
- }
-#endif
- return desc_idx;
+ return desc_idx * (1 + macb_dma64(bp) + macb_dma_ptp(bp));
}
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
return (struct macb_dma_desc_64 *)((void *)desc
+ sizeof(struct macb_dma_desc));
}
-#endif
/* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
@@ -281,9 +245,9 @@ static void macb_set_hwaddr(struct macb *bp)
u32 bottom;
u16 top;
- bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ bottom = get_unaligned_le32(bp->dev->dev_addr);
macb_or_gem_writel(bp, SA1B, bottom);
- top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ top = get_unaligned_le16(bp->dev->dev_addr + 4);
macb_or_gem_writel(bp, SA1T, top);
if (gem_has_ptp(bp)) {
@@ -363,7 +327,6 @@ static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
mdio_read_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -409,7 +372,6 @@ static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
mdio_read_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -441,7 +403,6 @@ static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
goto mdio_write_exit;
mdio_write_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -487,7 +448,6 @@ static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
goto mdio_write_exit;
mdio_write_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -498,19 +458,17 @@ static void macb_init_buffers(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
+ /* Single register for all queues' high 32 bits. */
+ if (macb_dma64(bp)) {
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->queues[0].rx_ring_dma));
+ macb_writel(bp, TBQPH,
+ upper_32_bits(bp->queues[0].tx_ring_dma));
+ }
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
-#endif
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
}
}
@@ -530,19 +488,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
- switch (speed) {
- case SPEED_10:
- rate = 2500000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0)
return;
- }
rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
@@ -578,6 +526,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
@@ -608,7 +557,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
return 0;
}
-static void macb_pcs_get_state(struct phylink_pcs *pcs,
+static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
state->link = 0;
@@ -862,9 +811,7 @@ static int macb_mii_probe(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
- bp->phylink_sgmii_pcs.neg_mode = true;
bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
- bp->phylink_usx_pcs.neg_mode = true;
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
@@ -915,23 +862,15 @@ static int macb_mii_probe(struct net_device *dev)
return 0;
}
-static int macb_mdiobus_register(struct macb *bp)
+static int macb_mdiobus_register(struct macb *bp, struct device_node *mdio_np)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
/* If we have a child named mdio, probe it instead of looking for PHYs
* directly under the MAC node
*/
- child = of_get_child_by_name(np, "mdio");
- if (child) {
- int ret = of_mdiobus_register(bp->mii_bus, child);
-
- of_node_put(child);
- return ret;
- }
-
- if (of_phy_is_fixed_link(np))
- return mdiobus_register(bp->mii_bus);
+ if (mdio_np)
+ return of_mdiobus_register(bp->mii_bus, mdio_np);
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
@@ -953,8 +892,17 @@ static int macb_mdiobus_register(struct macb *bp)
static int macb_mii_init(struct macb *bp)
{
+ struct device_node *mdio_np, *np = bp->pdev->dev.of_node;
int err = -ENXIO;
+ /* With fixed-link, we don't need to register the MDIO bus,
+ * except if we have a child named "mdio" in the device tree.
+ * In that case, some devices may be attached to the MACB's MDIO bus.
+ */
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (!mdio_np && of_phy_is_fixed_link(np))
+ return macb_mii_probe(bp->dev);
+
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -976,7 +924,7 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- err = macb_mdiobus_register(bp);
+ err = macb_mdiobus_register(bp, mdio_np);
if (err)
goto err_out_free_mdiobus;
@@ -991,13 +939,15 @@ err_out_unregister_bus:
err_out_free_mdiobus:
mdiobus_free(bp->mii_bus);
err_out:
+ of_node_put(mdio_np);
+
return err;
}
static void macb_update_stats(struct macb *bp)
{
- u32 *p = &bp->hw_stats.macb.rx_pause_frames;
- u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ u64 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -1008,22 +958,15 @@ static void macb_update_stats(struct macb *bp)
static int macb_halt_tx(struct macb *bp)
{
- unsigned long halt_time, timeout;
- u32 status;
+ u32 status;
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
- do {
- halt_time = jiffies;
- status = macb_readl(bp, TSR);
- if (!(status & MACB_BIT(TGO)))
- return 0;
-
- udelay(250);
- } while (time_before(halt_time, timeout));
-
- return -ETIMEDOUT;
+ /* Poll TSR until TGO is cleared or timeout. */
+ return read_poll_timeout_atomic(macb_readl, status,
+ !(status & MACB_BIT(TGO)),
+ 250, MACB_HALT_TIMEOUT, false,
+ bp, TSR);
}
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
@@ -1046,10 +989,9 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budge
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
{
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- struct macb_dma_desc_64 *desc_64;
+ if (macb_dma64(bp)) {
+ struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr);
/* The low bits of RX address contain the RX_USED bit, clearing
@@ -1058,26 +1000,23 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
*/
dma_wmb();
}
-#endif
+
desc->addr = lower_32_bits(addr);
}
static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
{
dma_addr_t addr = 0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
+ if (macb_dma64(bp)) {
+ struct macb_dma_desc_64 *desc_64;
+
desc_64 = macb_64b_desc(bp, desc);
addr = ((u64)(desc_64->addrh) << 32);
}
-#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_MACB_USE_HWSTAMP
- if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ if (macb_dma_ptp(bp))
addr &= ~GEM_BIT(DMA_RXVALID);
-#endif
return addr;
}
@@ -1087,15 +1026,18 @@ static void macb_tx_error_task(struct work_struct *work)
tx_error_task);
bool halt_timeout = false;
struct macb *bp = queue->bp;
+ u32 queue_index;
+ u32 packets = 0;
+ u32 bytes = 0;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- (unsigned int)(queue - bp->queues),
- queue->tx_tail, queue->tx_head);
+ queue_index, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls
* macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1148,8 +1090,10 @@ static void macb_tx_error_task(struct work_struct *work)
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
+ packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -1166,6 +1110,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb, 0);
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
@@ -1176,10 +1123,6 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -1233,11 +1176,13 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
{
struct macb *bp = queue->bp;
u16 queue_index = queue - bp->queues;
+ unsigned long flags;
unsigned int tail;
unsigned int head;
int packets = 0;
+ u32 bytes = 0;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
struct macb_tx_skb *tx_skb;
@@ -1277,6 +1222,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
packets++;
+ bytes += skb->len;
}
/* Now we can safely release resources */
@@ -1291,12 +1237,15 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index);
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return packets;
}
@@ -1347,8 +1296,19 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
macb_set_addr(bp, desc, paddr);
- /* properly align Ethernet header */
- skb_reserve(skb, NET_IP_ALIGN);
+ /* Properly align Ethernet header.
+ *
+ * Hardware can add dummy bytes if asked using the RBOF
+ * field inside the NCFGR register. That feature isn't
+ * available if hardware is RSC capable.
+ *
+ * We cannot fallback to doing the 2-byte shift before
+ * DMA mapping because the address field does not allow
+ * setting the low 2/3 bits.
+ * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits.
+ */
+ if (!(bp->caps & MACB_CAPS_RSC))
+ skb_reserve(skb, NET_IP_ALIGN);
} else {
desc->ctrl = 0;
dma_wmb();
@@ -1712,8 +1672,9 @@ static void macb_tx_restart(struct macb_queue *queue)
{
struct macb *bp = queue->bp;
unsigned int head_idx, tbqp;
+ unsigned long flags;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
if (queue->tx_head == queue->tx_tail)
goto out_tx_ptr_unlock;
@@ -1725,19 +1686,20 @@ static void macb_tx_restart(struct macb_queue *queue)
if (tbqp == head_idx)
goto out_tx_ptr_unlock;
- spin_lock_irq(&bp->lock);
+ spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- spin_unlock_irq(&bp->lock);
+ spin_unlock(&bp->lock);
out_tx_ptr_unlock:
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
}
static bool macb_tx_complete_pending(struct macb_queue *queue)
{
bool retval = false;
+ unsigned long flags;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
if (queue->tx_head != queue->tx_tail) {
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -1745,7 +1707,7 @@ static bool macb_tx_complete_pending(struct macb_queue *queue)
if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
retval = true;
}
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return retval;
}
@@ -1792,9 +1754,9 @@ static int macb_tx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(struct tasklet_struct *t)
+static void macb_hresp_error_task(struct work_struct *work)
{
- struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
+ struct macb *bp = from_work(bp, work, hresp_err_bh_work);
struct net_device *dev = bp->dev;
struct macb_queue *queue;
unsigned int q;
@@ -1984,17 +1946,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
}
if (status & MACB_BIT(HRESP)) {
- tasklet_schedule(&bp->hresp_err_tasklet);
+ queue_work(system_bh_wq, &bp->hresp_err_bh_work);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -2031,14 +1995,14 @@ static unsigned int macb_tx_map(struct macb *bp,
struct sk_buff *skb,
unsigned int hdrlen)
{
- dma_addr_t mapping;
- unsigned int len, entry, i, tx_head = queue->tx_head;
- struct macb_tx_skb *tx_skb = NULL;
- struct macb_dma_desc *desc;
- unsigned int offset, size, count = 0;
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int eof = 1, mss_mfs = 0;
+ unsigned int len, i, tx_head = queue->tx_head;
u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
+ unsigned int eof = 1, mss_mfs = 0;
+ struct macb_tx_skb *tx_skb = NULL;
+ struct macb_dma_desc *desc;
+ unsigned int offset, size;
+ dma_addr_t mapping;
/* LSO */
if (skb_shinfo(skb)->gso_size != 0) {
@@ -2058,8 +2022,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0;
while (len) {
- entry = macb_tx_ring_wrap(bp, tx_head);
- tx_skb = &queue->tx_skb[entry];
+ tx_skb = macb_tx_skb(queue, tx_head);
mapping = dma_map_single(&bp->pdev->dev,
skb->data + offset,
@@ -2075,10 +2038,9 @@ static unsigned int macb_tx_map(struct macb *bp,
len -= size;
offset += size;
- count++;
tx_head++;
- size = min(len, bp->max_tx_length);
+ size = umin(len, bp->max_tx_length);
}
/* Then, map paged data from fragments */
@@ -2088,9 +2050,8 @@ static unsigned int macb_tx_map(struct macb *bp,
len = skb_frag_size(frag);
offset = 0;
while (len) {
- size = min(len, bp->max_tx_length);
- entry = macb_tx_ring_wrap(bp, tx_head);
- tx_skb = &queue->tx_skb[entry];
+ size = umin(len, bp->max_tx_length);
+ tx_skb = macb_tx_skb(queue, tx_head);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
offset, size, DMA_TO_DEVICE);
@@ -2105,7 +2066,6 @@ static unsigned int macb_tx_map(struct macb *bp,
len -= size;
offset += size;
- count++;
tx_head++;
}
}
@@ -2127,9 +2087,8 @@ static unsigned int macb_tx_map(struct macb *bp,
* to set the end of TX queue
*/
i = tx_head;
- entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED);
- desc = macb_tx_desc(queue, entry);
+ desc = macb_tx_desc(queue, i);
desc->ctrl = ctrl;
if (lso_ctrl) {
@@ -2149,16 +2108,15 @@ static unsigned int macb_tx_map(struct macb *bp,
do {
i--;
- entry = macb_tx_ring_wrap(bp, i);
- tx_skb = &queue->tx_skb[entry];
- desc = macb_tx_desc(queue, entry);
+ tx_skb = macb_tx_skb(queue, i);
+ desc = macb_tx_desc(queue, i);
ctrl = (u32)tx_skb->size;
if (eof) {
ctrl |= MACB_BIT(TX_LAST);
eof = 0;
}
- if (unlikely(entry == (bp->tx_ring_size - 1)))
+ if (unlikely(macb_tx_ring_wrap(bp, i) == bp->tx_ring_size - 1))
ctrl |= MACB_BIT(TX_WRAP);
/* First descriptor is header descriptor */
@@ -2186,7 +2144,7 @@ static unsigned int macb_tx_map(struct macb *bp,
queue->tx_head = tx_head;
- return count;
+ return 0;
dma_error:
netdev_err(bp->dev, "TX DMA map failed\n");
@@ -2197,7 +2155,7 @@ dma_error:
macb_tx_unmap(bp, tx_skb, 0);
}
- return 0;
+ return -ENOMEM;
}
static netdev_features_t macb_features_check(struct sk_buff *skb,
@@ -2311,6 +2269,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct macb_queue *queue = &bp->queues[queue_index];
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
+ unsigned long flags;
bool is_lso;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -2324,11 +2283,9 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-#ifdef CONFIG_MACB_USE_HWSTAMP
- if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (bp->hw_dma_cap & HW_DMA_CAP_PTP))
+ if (macb_dma_ptp(bp) &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-#endif
is_lso = (skb_shinfo(skb)->gso_size != 0);
@@ -2345,7 +2302,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
} else
- hdrlen = min(skb_headlen(skb), bp->max_tx_length);
+ hdrlen = umin(skb_headlen(skb), bp->max_tx_length);
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
@@ -2371,7 +2328,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
- spin_lock_bh(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
/* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
@@ -2384,7 +2341,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Map socket buffer for DMA transfer */
- if (!macb_tx_map(bp, queue, skb, hdrlen)) {
+ if (macb_tx_map(bp, queue, skb, hdrlen)) {
dev_kfree_skb_any(skb);
goto unlock;
}
@@ -2392,16 +2349,18 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ skb->len);
- spin_lock_irq(&bp->lock);
+ spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- spin_unlock_irq(&bp->lock);
+ spin_unlock(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index);
unlock:
- spin_unlock_bh(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return ret;
}
@@ -2471,29 +2430,42 @@ static void macb_free_rx_buffers(struct macb *bp)
}
}
+static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
+}
+
+static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
+}
+
static void macb_free_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
struct macb_queue *queue;
unsigned int q;
- int size;
+ size_t size;
+
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(dev, macb_dma_desc_get_size(bp),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
- if (queue->tx_ring) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->tx_ring, queue->tx_ring_dma);
- queue->tx_ring = NULL;
- }
- if (queue->rx_ring) {
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
+ queue->tx_ring = NULL;
+ queue->rx_ring = NULL;
}
}
@@ -2535,39 +2507,59 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
+ dma_addr_t tx_dma, rx_dma;
struct macb_queue *queue;
unsigned int q;
- int size;
+ void *tx, *rx;
+ size_t size;
+
+ /*
+ * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
+ * We cannot enforce this guarantee, the best we can do is do a single
+ * allocation and hope it will land into alloc_pages() that guarantees
+ * natural alignment of physical addresses.
+ */
+
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
+ if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)tx_dma, tx);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
+ if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)rx_dma, rx);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->tx_ring_dma,
- GFP_KERNEL);
- if (!queue->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
- q, size, (unsigned long)queue->tx_ring_dma,
- queue->tx_ring);
+ queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
+ queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
+
+ queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
+ queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
-
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->rx_ring_dma, GFP_KERNEL);
- if (!queue->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
+ /* Required for tie off descriptor for PM cases */
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ macb_dma_desc_get_size(bp),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+ }
+
return 0;
out_err:
@@ -2575,6 +2567,19 @@ out_err:
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *desc = bp->rx_ring_tieoff;
+
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
+ return;
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ macb_set_addr(bp, desc, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+ desc->ctrl = 0;
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
@@ -2598,6 +2603,7 @@ static void gem_init_rings(struct macb *bp)
gem_rx_refill(queue);
}
+ macb_init_tieoff(bp);
}
static void macb_init_rings(struct macb *bp)
@@ -2615,6 +2621,8 @@ static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
desc->ctrl |= MACB_BIT(TX_WRAP);
+
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
@@ -2754,14 +2762,10 @@ static void macb_configure_dma(struct macb *bp)
dmacfg &= ~GEM_BIT(TXCOEN);
dmacfg &= ~GEM_BIT(ADDR64);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (macb_dma64(bp))
dmacfg |= GEM_BIT(ADDR64);
-#endif
-#ifdef CONFIG_MACB_USE_HWSTAMP
- if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ if (macb_dma_ptp(bp))
dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
-#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
gem_writel(bp, DMACFG, dmacfg);
@@ -2776,7 +2780,11 @@ static void macb_init_hw(struct macb *bp)
macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
- config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
+ /* Make eth data aligned.
+ * If RSC capable, that offset is ignored by HW.
+ */
+ if (!(bp->caps & MACB_CAPS_RSC))
+ config |= MACB_BF(RBOF, NET_IP_ALIGN);
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
@@ -2953,7 +2961,11 @@ static int macb_open(struct net_device *dev)
macb_init_hw(bp);
- err = phy_power_on(bp->sgmii_phy);
+ err = phy_set_mode_ext(bp->phy, PHY_MODE_ETHERNET, bp->phy_interface);
+ if (err)
+ goto reset_hw;
+
+ err = phy_power_on(bp->phy);
if (err)
goto reset_hw;
@@ -2969,7 +2981,7 @@ static int macb_open(struct net_device *dev)
return 0;
phy_off:
- phy_power_off(bp->sgmii_phy);
+ phy_power_off(bp->phy);
reset_hw:
macb_reset_hw(bp);
@@ -2995,12 +3007,13 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_disable(&queue->napi_rx);
napi_disable(&queue->napi_tx);
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
}
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
- phy_power_off(bp->sgmii_phy);
+ phy_power_off(bp->phy);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -3022,7 +3035,7 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -3045,7 +3058,7 @@ static void gem_update_stats(struct macb *bp)
unsigned int i, q, idx;
unsigned long *stat;
- u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u64 *p = &bp->hw_stats.gem.tx_octets;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
@@ -3058,7 +3071,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(++p) += val;
+ *p += ((u64)val) << 32;
}
}
@@ -3068,15 +3081,13 @@ static void gem_update_stats(struct macb *bp)
bp->ethtool_stats[idx++] = *stat;
}
-static struct net_device_stats *gem_get_stats(struct macb *bp)
+static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->dev->stats;
-
- if (!netif_running(bp->dev))
- return nstat;
- gem_update_stats(bp);
+ spin_lock_irq(&bp->stats_lock);
+ if (netif_running(bp->dev))
+ gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
hwstat->rx_alignment_errors +
@@ -3105,19 +3116,19 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
-
- return nstat;
+ spin_unlock_irq(&bp->stats_lock);
}
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -3157,16 +3168,20 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
}
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+static void macb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
- if (macb_is_gem(bp))
- return gem_get_stats(bp);
+ netdev_stats_to_stats64(nstat, &bp->dev->stats);
+ if (macb_is_gem(bp)) {
+ gem_get_stats(bp, nstat);
+ return;
+ }
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3200,8 +3215,171 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_ok;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
+ mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
+ mac_stats->FramesReceivedOK = hwstat->rx_ok;
+ mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_align_errors;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
+ mac_stats->LateCollisions = hwstat->tx_late_cols;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
+ mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_frames;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
+ mac_stats->MultipleCollisionFrames =
+ hwstat->tx_multiple_collision_frames;
+ mac_stats->FramesReceivedOK = hwstat->rx_frames;
+ mac_stats->FrameCheckSequenceErrors =
+ hwstat->rx_frame_check_sequence_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
+ mac_stats->LateCollisions = hwstat->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
+ mac_stats->OctetsReceivedOK = hwstat->rx_octets;
+ mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
+ mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
+ mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
+ mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+/* TODO: Report SQE test errors when added to phy_stats */
+static void macb_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
- return nstat;
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void gem_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
+ rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
+ rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
+ rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
+ rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
+ rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
+ rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
+ rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
+ rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
+ rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
+ rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
+ rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
+ rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
+ rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
+ spin_unlock_irq(&bp->stats_lock);
+ *ranges = gem_rmon_ranges;
}
static int macb_get_regs_len(struct net_device *netdev)
@@ -3246,13 +3424,11 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- phylink_ethtool_get_wol(bp->phylink, wol);
- wol->supported |= WAKE_MAGIC;
+ phylink_ethtool_get_wol(bp->phylink, wol);
+ wol->supported |= (WAKE_MAGIC | WAKE_ARP);
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
+ /* Add macb wolopts to phy wolopts */
+ wol->wolopts |= bp->wolopts;
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -3262,22 +3438,15 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* Pass the order to phylink layer */
ret = phylink_ethtool_set_wol(bp->phylink, wol);
- /* Don't manage WoL on MAC if handled by the PHY
- * or if there's a failure in talking to the PHY
- */
- if (!ret || ret != -EOPNOTSUPP)
+ /* Don't manage WoL on MAC, if PHY set_wol() fails */
+ if (ret && ret != -EOPNOTSUPP)
return ret;
- if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
- (wol->wolopts & ~WAKE_MAGIC))
- return -EOPNOTSUPP;
-
- if (wol->wolopts & WAKE_MAGIC)
- bp->wol |= MACB_WOL_ENABLED;
- else
- bp->wol &= ~MACB_WOL_ENABLED;
+ bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0;
+ bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0;
+ bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0;
- device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
return 0;
}
@@ -3376,19 +3545,17 @@ static s32 gem_get_ptp_max_adj(void)
}
static int gem_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct macb *bp = netdev_priv(dev);
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
+ if (!macb_dma_ptp(bp)) {
ethtool_op_get_ts_info(dev, info);
return 0;
}
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -3400,7 +3567,8 @@ static int gem_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
+ if (bp->ptp_clock)
+ info->phc_index = ptp_clock_index(bp->ptp_clock);
return 0;
}
@@ -3417,7 +3585,7 @@ static struct macb_ptp_info gem_ptp_info = {
#endif
static int macb_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct macb *bp = netdev_priv(netdev);
@@ -3740,6 +3908,10 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_pause_stats = macb_get_pause_stats,
+ .get_eth_mac_stats = macb_get_eth_mac_stats,
+ .get_eth_phy_stats = macb_get_eth_phy_stats,
+ .get_rmon_stats = macb_get_rmon_stats,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link_ksettings = macb_get_link_ksettings,
@@ -3758,6 +3930,10 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_pause_stats = gem_get_pause_stats,
+ .get_eth_mac_stats = gem_get_eth_mac_stats,
+ .get_eth_phy_stats = gem_get_eth_phy_stats,
+ .get_rmon_stats = gem_get_rmon_stats,
.get_link_ksettings = macb_get_link_ksettings,
.set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3889,12 +4065,234 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxflow_feature(bp, features);
}
+static int macb_taprio_setup_replace(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *conf)
+{
+ u64 total_on_time = 0, start_time_sec = 0, start_time = conf->base_time;
+ u32 configured_queues = 0, speed = 0, start_time_nsec;
+ struct macb_queue_enst_config *enst_queue;
+ struct tc_taprio_sched_entry *entry;
+ struct macb *bp = netdev_priv(ndev);
+ struct ethtool_link_ksettings kset;
+ struct macb_queue *queue;
+ u32 queue_mask;
+ u8 queue_id;
+ size_t i;
+ int err;
+
+ if (conf->num_entries > bp->num_queues) {
+ netdev_err(ndev, "Too many TAPRIO entries: %zu > %d queues\n",
+ conf->num_entries, bp->num_queues);
+ return -EINVAL;
+ }
+
+ if (conf->base_time < 0) {
+ netdev_err(ndev, "Invalid base_time: must be 0 or positive, got %lld\n",
+ conf->base_time);
+ return -ERANGE;
+ }
+
+ /* Get the current link speed */
+ err = phylink_ethtool_ksettings_get(bp->phylink, &kset);
+ if (unlikely(err)) {
+ netdev_err(ndev, "Failed to get link settings: %d\n", err);
+ return err;
+ }
+
+ speed = kset.base.speed;
+ if (unlikely(speed <= 0)) {
+ netdev_err(ndev, "Invalid speed: %d\n", speed);
+ return -EINVAL;
+ }
+
+ enst_queue = kcalloc(conf->num_entries, sizeof(*enst_queue), GFP_KERNEL);
+ if (unlikely(!enst_queue))
+ return -ENOMEM;
+
+ /* Pre-validate all entries before making any hardware changes */
+ for (i = 0; i < conf->num_entries; i++) {
+ entry = &conf->entries[i];
+
+ if (entry->command != TC_TAPRIO_CMD_SET_GATES) {
+ netdev_err(ndev, "Entry %zu: unsupported command %d\n",
+ i, entry->command);
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ /* Validate gate_mask: must be nonzero, single queue, and within range */
+ if (!is_power_of_2(entry->gate_mask)) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x is not a power of 2 (only one queue per entry allowed)\n",
+ i, entry->gate_mask);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* gate_mask must not select queues outside the valid queues */
+ queue_id = order_base_2(entry->gate_mask);
+ if (queue_id >= bp->num_queues) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n",
+ i, entry->gate_mask, bp->num_queues);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Check for start time limits */
+ start_time_sec = start_time;
+ start_time_nsec = do_div(start_time_sec, NSEC_PER_SEC);
+ if (start_time_sec > GENMASK(GEM_START_TIME_SEC_SIZE - 1, 0)) {
+ netdev_err(ndev, "Entry %zu: Start time %llu s exceeds hardware limit\n",
+ i, start_time_sec);
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for on time limit */
+ if (entry->interval > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: interval %u ns exceeds hardware limit %llu ns\n",
+ i, entry->interval, enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for off time limit*/
+ if ((conf->cycle_time - entry->interval) > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: off_time %llu ns exceeds hardware limit %llu ns\n",
+ i, conf->cycle_time - entry->interval,
+ enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ enst_queue[i].queue_id = queue_id;
+ enst_queue[i].start_time_mask =
+ (start_time_sec << GEM_START_TIME_SEC_OFFSET) |
+ start_time_nsec;
+ enst_queue[i].on_time_bytes =
+ enst_ns_to_hw_units(entry->interval, speed);
+ enst_queue[i].off_time_bytes =
+ enst_ns_to_hw_units(conf->cycle_time - entry->interval, speed);
+
+ configured_queues |= entry->gate_mask;
+ total_on_time += entry->interval;
+ start_time += entry->interval;
+ }
+
+ /* Check total interval doesn't exceed cycle time */
+ if (total_on_time > conf->cycle_time) {
+ netdev_err(ndev, "Total ON %llu ns exceeds cycle time %llu ns\n",
+ total_on_time, conf->cycle_time);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ netdev_dbg(ndev, "TAPRIO setup: %zu entries, base_time=%lld ns, cycle_time=%llu ns\n",
+ conf->num_entries, conf->base_time, conf->cycle_time);
+
+ /* All validations passed - proceed with hardware configuration */
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Disable ENST queues if running before configuring */
+ queue_mask = BIT_U32(bp->num_queues) - 1;
+ gem_writel(bp, ENST_CONTROL,
+ queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
+
+ for (i = 0; i < conf->num_entries; i++) {
+ queue = &bp->queues[enst_queue[i].queue_id];
+ /* Configure queue timing registers */
+ queue_writel(queue, ENST_START_TIME,
+ enst_queue[i].start_time_mask);
+ queue_writel(queue, ENST_ON_TIME,
+ enst_queue[i].on_time_bytes);
+ queue_writel(queue, ENST_OFF_TIME,
+ enst_queue[i].off_time_bytes);
+ }
+
+ /* Enable ENST for all configured queues in one write */
+ gem_writel(bp, ENST_CONTROL, configured_queues);
+ }
+
+ netdev_info(ndev, "TAPRIO configuration completed successfully: %zu entries, %d queues configured\n",
+ conf->num_entries, hweight32(configured_queues));
+
+cleanup:
+ kfree(enst_queue);
+ return err;
+}
+
+static void macb_taprio_destroy(struct net_device *ndev)
+{
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ u32 queue_mask;
+ unsigned int q;
+
+ netdev_reset_tc(ndev);
+ queue_mask = BIT_U32(bp->num_queues) - 1;
+
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Single disable command for all queues */
+ gem_writel(bp, ENST_CONTROL,
+ queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
+
+ /* Clear all queue ENST registers in batch */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, ENST_START_TIME, 0);
+ queue_writel(queue, ENST_ON_TIME, 0);
+ queue_writel(queue, ENST_OFF_TIME, 0);
+ }
+ }
+ netdev_info(ndev, "TAPRIO destroy: All gates disabled\n");
+}
+
+static int macb_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct macb *bp = netdev_priv(ndev);
+ int err = 0;
+
+ if (unlikely(!(ndev->hw_features & NETIF_F_HW_TC)))
+ return -EOPNOTSUPP;
+
+ /* Check if Device is in runtime suspend */
+ if (unlikely(pm_runtime_suspended(&bp->pdev->dev))) {
+ netdev_err(ndev, "Device is in runtime suspend\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = macb_taprio_setup_replace(ndev, taprio);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ macb_taprio_destroy(ndev);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int macb_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ if (!dev || !type_data)
+ return -EINVAL;
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return macb_setup_taprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
.ndo_stop = macb_close,
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
@@ -3906,6 +4304,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_features_check = macb_features_check,
.ndo_hwtstamp_set = macb_hwtstamp_set,
.ndo_hwtstamp_get = macb_hwtstamp_get,
+ .ndo_setup_tc = macb_setup_tc,
};
/* Configure peripheral capabilities according to device tree
@@ -3914,8 +4313,12 @@ static const struct net_device_ops macb_netdev_ops = {
static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
+ struct device_node *np = bp->pdev->dev.of_node;
+ bool refclk_ext;
u32 dcfg;
+ refclk_ext = of_property_read_bool(np, "cdns,refclk-ext");
+
if (dt_conf)
bp->caps = dt_conf->caps;
@@ -3933,42 +4336,46 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
+ if (GEM_BFEXT(PBUF_RSC, gem_readl(bp, DCFG6)))
+ bp->caps |= MACB_CAPS_RSC;
if (gem_has_ptp(bp)) {
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
dev_err(&bp->pdev->dev,
"GEM doesn't support hardware ptp.\n");
else {
#ifdef CONFIG_MACB_USE_HWSTAMP
- bp->hw_dma_cap |= HW_DMA_CAP_PTP;
+ bp->caps |= MACB_CAPS_DMA_PTP;
bp->ptp_info = &gem_ptp_info;
#endif
}
}
}
+ if (refclk_ext)
+ bp->caps |= MACB_CAPS_USRIO_HAS_CLKEN;
+
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
-static void macb_probe_queues(void __iomem *mem,
- bool native_io,
- unsigned int *queue_mask,
- unsigned int *num_queues)
+static int macb_probe_queues(struct device *dev, void __iomem *mem, bool native_io)
{
- *queue_mask = 0x1;
- *num_queues = 1;
+ /* BIT(0) is never set but queue 0 always exists. */
+ unsigned int queue_mask = 0x1;
- /* is it macb or gem ?
- *
- * We need to read directly from the hardware here because
- * we are early in the probe process and don't have the
- * MACB_CAPS_MACB_IS_GEM flag positioned
- */
- if (!hw_is_gem(mem, native_io))
- return;
+ /* Use hw_is_gem() as MACB_CAPS_MACB_IS_GEM is not yet positioned. */
+ if (hw_is_gem(mem, native_io)) {
+ if (native_io)
+ queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF;
+ else
+ queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF;
- /* bit 0 is never set but queue 0 always exists */
- *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
- *num_queues = hweight32(*queue_mask);
+ if (fls(queue_mask) != ffz(queue_mask)) {
+ dev_err(dev, "queue mask %#x has a hole\n", queue_mask);
+ return -EINVAL;
+ }
+ }
+
+ return hweight32(queue_mask);
}
static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
@@ -4086,10 +4493,7 @@ static int macb_init(struct platform_device *pdev)
* register mapping but we don't want to test the queue index then
* compute the corresponding register offset at run time.
*/
- for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
- if (!(bp->queue_mask & (1 << hw_q)))
- continue;
-
+ for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) {
queue = &bp->queues[q];
queue->bp = bp;
spin_lock_init(&queue->tx_ptr_lock);
@@ -4103,12 +4507,6 @@ static int macb_init(struct platform_device *pdev)
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = GEM_TBQPH(hw_q - 1);
- queue->RBQPH = GEM_RBQPH(hw_q - 1);
- }
-#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -4117,14 +4515,12 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = MACB_TBQPH;
- queue->RBQPH = MACB_RBQPH;
- }
-#endif
}
+ queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
+ queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q);
+ queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q);
+
/* get irq: here we use the linux queue index, not the hardware
* queue index. the queue irq definitions in the device tree
* must remove the optional gaps that could exist in the
@@ -4161,13 +4557,18 @@ static int macb_init(struct platform_device *pdev)
dev->ethtool_ops = &macb_ethtool_ops;
}
+ netdev_sw_irq_coalesce_default_on(dev);
+
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* Set features */
dev->hw_features = NETIF_F_SG;
- /* Check LSO capability */
- if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
+ /* Check LSO capability; runtime detection can be overridden by a cap
+ * flag if the hardware is known to be buggy
+ */
+ if (!(bp->caps & MACB_CAPS_NO_LSO) &&
+ GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
dev->hw_features |= MACB_NETIF_LSO;
/* Checksum offload is only available on gem with packet buffer */
@@ -4175,6 +4576,10 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
+ /* Enable HW_TC if hardware supports QBV */
+ if (bp->caps & MACB_CAPS_QBV)
+ dev->hw_features |= NETIF_F_HW_TC;
+
dev->features = dev->hw_features;
/* Check RX Flow Filters support.
@@ -4182,8 +4587,8 @@ static int macb_init(struct platform_device *pdev)
* each 4-tuple define requires 1 T2 screener reg + 3 compare regs
*/
reg = gem_readl(bp, DCFG8);
- bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
- GEM_BFEXT(T2SCR, reg));
+ bp->max_tuples = umin((GEM_BFEXT(SCR2CMP, reg) / 3),
+ GEM_BFEXT(T2SCR, reg));
INIT_LIST_HEAD(&bp->rx_fs_list.list);
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
@@ -4553,7 +4958,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_eth_ioctl = macb_ioctl,
@@ -4618,36 +5023,45 @@ static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
return mgmt->rate;
}
-static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- if (WARN_ON(rate < 2500000))
- return 2500000;
- else if (rate == 2500000)
- return 2500000;
- else if (WARN_ON(rate < 13750000))
- return 2500000;
- else if (WARN_ON(rate < 25000000))
- return 25000000;
- else if (rate == 25000000)
- return 25000000;
- else if (WARN_ON(rate < 75000000))
- return 25000000;
- else if (WARN_ON(rate < 125000000))
- return 125000000;
- else if (rate == 125000000)
- return 125000000;
-
- WARN_ON(rate > 125000000);
+static int fu540_macb_tx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ if (WARN_ON(req->rate < 2500000))
+ req->rate = 2500000;
+ else if (req->rate == 2500000)
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 13750000))
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 25000000))
+ req->rate = 25000000;
+ else if (req->rate == 25000000)
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 75000000))
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 125000000))
+ req->rate = 125000000;
+ else if (req->rate == 125000000)
+ req->rate = 125000000;
+ else if (WARN_ON(req->rate > 125000000))
+ req->rate = 125000000;
+ else
+ req->rate = 125000000;
- return 125000000;
+ return 0;
}
static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
- if (rate != 125000000)
+ struct clk_rate_request req;
+ int ret;
+
+ clk_hw_init_rate_request(hw, &req, rate);
+ ret = fu540_macb_tx_determine_rate(hw, &req);
+ if (ret != 0)
+ return ret;
+
+ if (req.rate != 125000000)
iowrite32(1, mgmt->reg);
else
iowrite32(0, mgmt->reg);
@@ -4658,7 +5072,7 @@ static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops fu540_c000_ops = {
.recalc_rate = fu540_macb_tx_recalc_rate,
- .round_rate = fu540_macb_tx_round_rate,
+ .determine_rate = fu540_macb_tx_determine_rate,
.set_rate = fu540_macb_tx_set_rate,
};
@@ -4727,13 +5141,13 @@ static int init_reset_optional(struct platform_device *pdev)
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
/* Ensure PHY device used in SGMII mode is ready */
- bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
+ bp->phy = devm_phy_optional_get(&pdev->dev, NULL);
- if (IS_ERR(bp->sgmii_phy))
- return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
+ if (IS_ERR(bp->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->phy),
"failed to get SGMII PHY\n");
- ret = phy_init(bp->sgmii_phy);
+ ret = phy_init(bp->phy);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to init SGMII PHY\n");
@@ -4762,7 +5176,7 @@ static int init_reset_optional(struct platform_device *pdev)
/* Fully reset controller at hardware level if mapped in device tree */
ret = device_reset_optional(&pdev->dev);
if (ret) {
- phy_exit(bp->sgmii_phy);
+ phy_exit(bp->phy);
return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
}
@@ -4770,8 +5184,30 @@ static int init_reset_optional(struct platform_device *pdev)
err_out_phy_exit:
if (ret)
- phy_exit(bp->sgmii_phy);
+ phy_exit(bp->phy);
+
+ return ret;
+}
+
+static int eyeq5_init(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ bp->phy = devm_phy_get(dev, NULL);
+ if (IS_ERR(bp->phy))
+ return dev_err_probe(dev, PTR_ERR(bp->phy),
+ "failed to get PHY\n");
+
+ ret = phy_init(bp->phy);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init PHY\n");
+ ret = macb_init(pdev);
+ if (ret)
+ phy_exit(bp->phy);
return ret;
}
@@ -4817,10 +5253,11 @@ static const struct macb_config pc302gem_config = {
};
static const struct macb_config sama5d2_config = {
- .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4898,6 +5335,7 @@ static const struct macb_config mpfs_config = {
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
@@ -4917,7 +5355,9 @@ static const struct macb_config sama7g5_emac_config = {
static const struct macb_config versal_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE |
+ MACB_CAPS_QBV,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = init_reset_optional,
@@ -4925,6 +5365,28 @@ static const struct macb_config versal_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config eyeq5_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_QUEUE_DISABLE |
+ MACB_CAPS_NO_LSO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = eyeq5_init,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
+};
+
+static const struct macb_config raspberrypi_rp1_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
@@ -4945,6 +5407,8 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "mobileye,eyeq5-gem", .data = &eyeq5_config },
+ { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config },
{ .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "xlnx,zynq-gem", .data = &zynq_config },
{ .compatible = "xlnx,versal-gem", .data = &versal_config},
@@ -4967,21 +5431,17 @@ static const struct macb_config default_gem_config = {
static int macb_probe(struct platform_device *pdev)
{
const struct macb_config *macb_config = &default_gem_config;
- int (*clk_init)(struct platform_device *, struct clk **,
- struct clk **, struct clk **, struct clk **,
- struct clk **) = macb_config->clk_init;
- int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
struct clk *tsu_clk = NULL;
- unsigned int queue_mask, num_queues;
- bool native_io;
phy_interface_t interface;
struct net_device *dev;
struct resource *regs;
u32 wtrmrk_rst_val;
void __iomem *mem;
struct macb *bp;
+ int num_queues;
+ bool native_io;
int err, val;
mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
@@ -4992,14 +5452,11 @@ static int macb_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(macb_dt_ids, np);
- if (match && match->data) {
+ if (match && match->data)
macb_config = match->data;
- clk_init = macb_config->clk_init;
- init = macb_config->init;
- }
}
- err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
+ err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
if (err)
return err;
@@ -5010,7 +5467,12 @@ static int macb_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
native_io = hw_is_native_io(mem);
- macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
+ num_queues = macb_probe_queues(&pdev->dev, mem, native_io);
+ if (num_queues < 0) {
+ err = num_queues;
+ goto err_disable_clocks;
+ }
+
dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
if (!dev) {
err = -ENOMEM;
@@ -5034,16 +5496,13 @@ static int macb_probe(struct platform_device *pdev)
bp->macb_reg_writel = hw_writel;
}
bp->num_queues = num_queues;
- bp->queue_mask = queue_mask;
- if (macb_config)
- bp->dma_burst_length = macb_config->dma_burst_length;
+ bp->dma_burst_length = macb_config->dma_burst_length;
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
bp->rx_clk = rx_clk;
bp->tsu_clk = tsu_clk;
- if (macb_config)
- bp->jumbo_max_len = macb_config->jumbo_max_len;
+ bp->jumbo_max_len = macb_config->jumbo_max_len;
if (!hw_is_gem(bp->regs, bp->native_io))
bp->max_tx_length = MACB_MAX_TX_LEN;
@@ -5053,9 +5512,7 @@ static int macb_probe(struct platform_device *pdev)
bp->max_tx_length = GEM_MAX_TX_LEN;
bp->wol = 0;
- if (of_property_read_bool(np, "magic-packet"))
- bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, 1);
bp->usrio = macb_config->usrio;
@@ -5079,14 +5536,19 @@ static int macb_probe(struct platform_device *pdev)
}
}
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
- bp->hw_dma_cap |= HW_DMA_CAP_64B;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (err) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ goto err_out_free_netdev;
+ }
+ bp->caps |= MACB_CAPS_DMA_64B;
}
#endif
platform_set_drvdata(pdev, dev);
@@ -5097,12 +5559,12 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_free_netdev;
}
- /* MTU range: 68 - 1500 or 10240 */
+ /* MTU range: 68 - 1518 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
- dev->max_mtu = ETH_DATA_LEN;
+ dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN;
if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
@@ -5134,7 +5596,7 @@ static int macb_probe(struct platform_device *pdev)
bp->phy_interface = interface;
/* IP specific init */
- err = init(pdev);
+ err = macb_config->init(pdev);
if (err)
goto err_out_free_netdev;
@@ -5150,13 +5612,12 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
+ INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
return 0;
@@ -5166,7 +5627,7 @@ err_out_unregister_mdio:
mdiobus_free(bp->mii_bus);
err_out_phy_exit:
- phy_exit(bp->sgmii_phy);
+ phy_exit(bp->phy);
err_out_free_netdev:
free_netdev(dev);
@@ -5189,19 +5650,16 @@ static void macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- phy_exit(bp->sgmii_phy);
+ unregister_netdev(dev);
+ phy_exit(bp->phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
- unregister_netdev(dev);
- tasklet_kill(&bp->hresp_err_tasklet);
+ device_set_wakeup_enable(&bp->pdev->dev, 0);
+ cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- if (!pm_runtime_suspended(&pdev->dev)) {
- macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
- bp->rx_clk, bp->tsu_clk);
- pm_runtime_set_suspended(&pdev->dev);
- }
+ pm_runtime_set_suspended(&pdev->dev);
phylink_destroy(bp->phylink);
free_netdev(dev);
}
@@ -5211,30 +5669,70 @@ static int __maybe_unused macb_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
+ struct in_ifaddr *ifa = NULL;
struct macb_queue *queue;
+ struct in_device *idev;
unsigned long flags;
unsigned int q;
int err;
+ u32 tmp;
if (!device_may_wakeup(&bp->dev->dev))
- phy_exit(bp->sgmii_phy);
+ phy_exit(bp->phy);
if (!netif_running(netdev))
return 0;
if (bp->wol & MACB_WOL_ENABLED) {
+ /* Check for IP address in WOL ARP mode */
+ idev = __in_dev_get_rcu(bp->dev);
+ if (idev)
+ ifa = rcu_dereference(idev->ifa_list);
+ if ((bp->wolopts & WAKE_ARP) && !ifa) {
+ netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
+ return -EOPNOTSUPP;
+ }
spin_lock_irqsave(&bp->lock, flags);
- /* Flush all status bits */
- macb_writel(bp, TSR, -1);
- macb_writel(bp, RSR, -1);
+
+ /* Disable Tx and Rx engines before disabling the queues,
+ * this is mandatory as per the IP spec sheet
+ */
+ tmp = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
+ /* Disable RX queues */
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE) {
+ queue_writel(queue, RBQP, MACB_BIT(QUEUE_DISABLE));
+ } else {
+ /* Tie off RX queues */
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
+ }
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
queue_readl(queue, ISR);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, -1);
}
+ /* Enable Receive engine */
+ macb_writel(bp, NCR, tmp | MACB_BIT(RE));
+ /* Flush all status bits */
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+
+ tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0;
+ if (bp->wolopts & WAKE_ARP) {
+ tmp |= MACB_BIT(ARP);
+ /* write IP address into register */
+ tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local));
+ }
+
/* Change interrupt handler and
* Enable WoL IRQ on queue 0
*/
@@ -5250,7 +5748,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
return err;
}
queue_writel(bp->queues, IER, GEM_BIT(WOL));
- gem_writel(bp, WOL, MACB_BIT(MAG));
+ gem_writel(bp, WOL, tmp);
} else {
err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
@@ -5262,7 +5760,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
return err;
}
queue_writel(bp->queues, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
+ macb_writel(bp, WOL, tmp);
}
spin_unlock_irqrestore(&bp->lock, flags);
@@ -5309,7 +5807,7 @@ static int __maybe_unused macb_resume(struct device *dev)
int err;
if (!device_may_wakeup(&bp->dev->dev))
- phy_init(bp->sgmii_phy);
+ phy_init(bp->phy);
if (!netif_running(netdev))
return 0;
@@ -5413,6 +5911,20 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
return 0;
}
+static void macb_shutdown(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ dev_close(netdev);
+
+ netif_device_detach(netdev);
+
+ rtnl_unlock();
+}
+
static const struct dev_pm_ops macb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
@@ -5420,12 +5932,13 @@ static const struct dev_pm_ops macb_pm_ops = {
static struct platform_driver macb_driver = {
.probe = macb_probe,
- .remove_new = macb_remove,
+ .remove = macb_remove,
.driver = {
.name = "macb",
.of_match_table = of_match_ptr(macb_dt_ids),
.pm = &macb_pm_ops,
},
+ .shutdown = macb_shutdown,
};
module_platform_driver(macb_driver);
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index f66d22de5168..fc4f5aee6ab3 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -19,8 +19,7 @@
#define PCI_DRIVER_NAME "macb_pci"
#define PLAT_DRIVER_NAME "macb"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0xe007
+#define PCI_DEVICE_ID_CDNS_MACB 0xe007
#define GEM_PCLK_RATE 50000000
#define GEM_HCLK_RATE 50000000
@@ -117,7 +116,7 @@ static void macb_remove(struct pci_dev *pdev)
}
static const struct pci_device_id dev_id_table[] = {
- { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { PCI_VDEVICE(CDNS, PCI_DEVICE_ID_CDNS_MACB) },
{ 0, }
};
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index a63bf29c4fa8..c9e77819196e 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -28,14 +28,16 @@
static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
struct macb_dma_desc *desc)
{
- if (bp->hw_dma_cap == HW_DMA_CAP_PTP)
- return (struct macb_dma_desc_ptp *)
- ((u8 *)desc + sizeof(struct macb_dma_desc));
- if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP)
+ if (!macb_dma_ptp(bp))
+ return NULL;
+
+ if (macb_dma64(bp))
return (struct macb_dma_desc_ptp *)
((u8 *)desc + sizeof(struct macb_dma_desc)
+ sizeof(struct macb_dma_desc_64));
- return NULL;
+ else
+ return (struct macb_dma_desc_ptp *)
+ ((u8 *)desc + sizeof(struct macb_dma_desc));
}
static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts,
@@ -380,7 +382,7 @@ int gem_get_hwtst(struct net_device *dev,
struct macb *bp = netdev_priv(dev);
*tstamp_config = bp->tstamp_config;
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ if (!macb_dma_ptp(bp))
return -EOPNOTSUPP;
return 0;
@@ -407,7 +409,7 @@ int gem_set_hwtst(struct net_device *dev,
struct macb *bp = netdev_priv(dev);
u32 regval;
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ if (!macb_dma_ptp(bp))
return -EOPNOTSUPP;
switch (tstamp_config->tx_type) {
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 5e97f1e4e38e..331ac6a3dc38 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1358,7 +1358,7 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
/* Bring interface down, change mtu and bring interface back up */
xgmac_stop(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return xgmac_open(dev);
}
@@ -1919,7 +1919,7 @@ static struct platform_driver xgmac_driver = {
.pm = &xgmac_pm_ops,
},
.probe = xgmac_probe,
- .remove_new = xgmac_remove,
+ .remove = xgmac_remove,
};
module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index ca742cc146d7..7dae5aad3689 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -70,8 +70,8 @@ config LIQUIDIO
depends on 64BIT && PCI
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select FW_LOADER
- select LIBCRC32C
select LIQUIDIO_CORE
select NET_DEVLINK
help
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 9fd717b9cf69..61e261657073 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -209,7 +209,7 @@ static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
return -EOPNOTSUPP;
}
-static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+static u64 cavium_ptp_cc_read(struct cyclecounter *cc)
{
struct cavium_ptp *clock =
container_of(cc, struct cavium_ptp, cycle_counter);
@@ -239,12 +239,11 @@ static int cavium_ptp_probe(struct pci_dev *pdev,
if (err)
goto error_free;
- err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ clock->reg_base = pcim_iomap_region(pdev, PCI_PTP_BAR_NO, pci_name(pdev));
+ err = PTR_ERR_OR_ZERO(clock->reg_base);
if (err)
goto error_free;
- clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
-
spin_lock_init(&clock->spin_lock);
cc = &clock->cycle_counter;
@@ -292,7 +291,7 @@ error_stop:
clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
- pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+ pcim_iounmap_region(pdev, PCI_PTP_BAR_NO);
error_free:
devm_kfree(dev, clock);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index b3c81a2e9d46..75f22f74774c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -36,175 +36,6 @@
*/
#define CN23XX_INPUT_JABBER 64600
-void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
-{
- int i = 0;
- u32 regval = 0;
- struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
-
- /*In cn23xx_soft_reset*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
- "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
- lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
-
- /*In cn23xx_set_dpi_regs*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
- lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
-
- for (i = 0; i < 6; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_ENG_ENB", i,
- CN23XX_DPI_DMA_ENG_ENB(i),
- lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_ENG_BUF", i,
- CN23XX_DPI_DMA_ENG_BUF(i),
- lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
- }
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
- CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
-
- /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_CONFIG_PCIE_DEVCTL",
- CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
-
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
- CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
- lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
-
- /*In cn23xx_specific_regs_setup */
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
- CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
- (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
-
- /*In cn23xx_setup_global_mac_regs*/
- for (i = 0; i < CN23XX_MAX_MACS; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_MAC_RINFO64", i,
- CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_PKT_MAC_RINFO64
- (i, oct->pf_num))));
- }
-
- /*In cn23xx_setup_global_input_regs*/
- for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_PKT_CONTROL64", i,
- CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
- }
-
- /*In cn23xx_setup_global_output_regs*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
-
- for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKT_CONTROL", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
- CVM_CAST64(octeon_read_csr(
- oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
- }
-
- /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "cn23xx->intr_enb_reg64",
- CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
- CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "cn23xx->intr_sum_reg64",
- CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
- CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
-
- /*In cn23xx_setup_iq_regs*/
- for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_BASE_ADDR64", i,
- CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_SIZE", i,
- CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
- CVM_CAST64(octeon_read_csr
- (oct, CN23XX_SLI_IQ_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_DOORBELL", i,
- CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_DOORBELL(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_INSTR_COUNT64", i,
- CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
- }
-
- /*In cn23xx_setup_oq_regs*/
- for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_BASE_ADDR64", i,
- CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_SIZE", i,
- CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
- CVM_CAST64(octeon_read_csr
- (oct, CN23XX_SLI_OQ_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
- CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
- CVM_CAST64(octeon_read_csr(
- oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKTS_SENT", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKTS_CREDIT", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
- }
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_TIME_INT",
- CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_CNT_INT",
- CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
-}
-
static int cn23xx_pf_soft_reset(struct octeon_device *oct)
{
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
@@ -218,7 +49,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
- /* Wait for 100ms as Octeon resets. */
+ /* Wait for 100ms as Octeon resets */
mdelay(100);
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
@@ -230,7 +61,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
oct->octeon_id);
- /* restore the reset value*/
+ /* Restore the reset value */
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
return 0;
@@ -290,7 +121,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
oqticks_per_us /= 1024;
/* time_intr is in microseconds. The next 2 steps gives the oq ticks
- * corressponding to time_intr.
+ * corresponding to time_intr.
*/
oqticks_per_us *= time_intr_in_us;
oqticks_per_us /= 1000;
@@ -305,11 +136,11 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
u64 reg_val;
u64 temp;
- /* programming SRN and TRS for each MAC(0..3) */
+ /* Programming SRN and TRS for each MAC(0..3) */
dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
__func__, mac_no);
- /* By default, mapping all 64 IOQs to a single MACs */
+ /* By default, map all 64 IOQs to a single MAC */
reg_val =
octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
@@ -333,7 +164,7 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
temp = oct->sriov_info.max_vfs & 0xff;
reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
- /* write these settings to MAC register */
+ /* Write these settings to MAC register */
octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
reg_val);
@@ -352,10 +183,10 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
srn = oct->sriov_info.pf_srn;
ern = srn + oct->sriov_info.num_pf_rings;
- /*As per HRM reg description, s/w cant write 0 to ENB. */
- /*to make the queue off, need to set the RST bit. */
+ /* As per HRM reg description, s/w can't write 0 to ENB. */
+ /* We need to set the RST bit, to turn the queue off. */
- /* Reset the Enable bit for all the 64 IQs. */
+ /* Reset the enable bit for all the 64 IQs. */
for (q_no = srn; q_no < ern; q_no++) {
/* set RST bit to 1. This bit applies to both IQ and OQ */
d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -363,7 +194,7 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
}
- /*wait until the RST bit is clear or the RST and quite bits are set*/
+ /* Wait until the RST bit is clear or the RST and quiet bits are set */
for (q_no = srn; q_no < ern; q_no++) {
u64 reg_val = octeon_read_csr64(oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -414,15 +245,15 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
if (cn23xx_reset_io_queues(oct))
return -1;
- /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
- * for all queues.Only PF can set these bits.
+ /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues. Only PF can set these bits.
* bits 29:30 indicate the MAC num.
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
- /* for VF assigned queues. */
+ /* For VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
vf_num = q_no / oct->sriov_info.rings_per_vf;
vf_num += 1; /* VF1, VF2,........ */
@@ -437,7 +268,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
reg_val);
}
- /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for
* pf queues
*/
for (q_no = srn; q_no < ern; q_no++) {
@@ -458,7 +289,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
- /* Set WMARK level for triggering PI_INT */
+ /* Set WMARK level to trigger PI_INT */
/* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
CN23XX_PKT_IN_DONE_WMARK_MASK;
@@ -523,7 +354,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/* set the ES bit */
reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
- /* write all the selected settings */
+ /* Write all the selected settings */
octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
/* Enabling these interrupt in oct->fn_list.enable_interrupt()
@@ -542,7 +373,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/** Setting the water mark level for pko back pressure **/
writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
- /** Disabling setting OQs in reset when ring has no dorebells
+ /* Disabling setting OQs in reset when ring has no doorbells
* enabling this will cause of head of line blocking
*/
/* Do it only for pass1.1. and pass1.2 */
@@ -552,7 +383,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
CN23XX_SLI_GBL_CONTROL) | 0x2,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
- /** Enable channel-level backpressure */
+ /** Enable channel-level backpressure **/
if (oct->pf_num)
writeq(0xffffffffffffffffULL,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
@@ -565,7 +396,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
{
cn23xx_enable_error_reporting(oct);
- /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ /* Program the MAC(0..3)_RINFO before setting up input/output regs */
cn23xx_setup_global_mac_regs(oct);
if (cn23xx_pf_setup_global_input_regs(oct))
@@ -579,7 +410,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
CN23XX_SLI_WINDOW_CTL_DEFAULT);
- /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */
octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
return 0;
}
@@ -743,7 +574,7 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
- /*Mail Box Thread creation*/
+ /* Mail Box Thread creation */
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
cn23xx_pf_mbox_thread);
mbox->mbox_poll_wk.ctxptr = (void *)mbox;
@@ -795,7 +626,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
ern = srn + oct->num_iqs;
for (q_no = srn; q_no < ern; q_no++) {
- /* set the corresponding IQ IS_64B bit */
+ /* Set the corresponding IQ IS_64B bit */
if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -804,7 +635,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
}
- /* set the corresponding IQ ENB bit */
+ /* Set the corresponding IQ ENB bit */
if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
/* IOQs are in reset by default in PEM2 mode,
* clearing reset bit
@@ -850,7 +681,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
}
for (q_no = srn; q_no < ern; q_no++) {
u32 reg_val;
- /* set the corresponding OQ ENB bit */
+ /* Set the corresponding OQ ENB bit */
if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr(
oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@@ -876,7 +707,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
loop = HZ;
- /* start the Reset for a particular ring */
+ /* Start the Reset for a particular ring */
WRITE_ONCE(d64, octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
WRITE_ONCE(d64, READ_ONCE(d64) &
@@ -909,7 +740,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
loop = HZ;
/* Wait until hardware indicates that the particular IQ
- * is out of reset.It given that SLI_PKT_RING_RST is
+ * is out of reset. Given that SLI_PKT_RING_RST is
* common for both IQs and OQs
*/
WRITE_ONCE(d64, octeon_read_csr64(
@@ -929,7 +760,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
schedule_timeout_uninterruptible(1);
}
- /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
WRITE_ONCE(d32, octeon_read_csr(
oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
@@ -962,7 +793,7 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
return ret;
- /* Write count reg in sli_pkt_cnts to clear these int.*/
+ /* Write count reg in sli_pkt_cnts to clear these int. */
if ((pkts_sent & CN23XX_INTR_PO_INT) ||
(pkts_sent & CN23XX_INTR_PI_INT)) {
if (pkts_sent & CN23XX_INTR_PO_INT)
@@ -1077,7 +908,7 @@ static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
}
-/* always call with lock held */
+/* Always call with lock held */
static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx;
@@ -1088,7 +919,7 @@ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
iq->pkt_in_done = pkt_in_done;
/* Modulo of the new index with the IQ size will give us
- * the new index. The iq->reset_instr_cnt is always zero for
+ * the new index. The iq->reset_instr_cnt is always zero for
* cn23xx, so no extra adjustments are needed.
*/
new_idx = (iq->octeon_read_index +
@@ -1103,8 +934,8 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u64 intr_val = 0;
- /* Divide the single write to multiple writes based on the flag. */
- /* Enable Interrupt */
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupts */
if (intr_flag == OCTEON_ALL_INTR) {
writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
} else if (intr_flag & OCTEON_OUTPUT_INTR) {
@@ -1159,7 +990,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
ret = 0;
- /** Read Function Dependency Link reg to get the function number */
+ /* Read Function Dependency Link reg to get the function number */
if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
&fdl_bit) == 0) {
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
@@ -1172,13 +1003,13 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
* In this case, read the PF number from the
* SLI_PKT0_INPUT_CONTROL reg (written by f/w)
*/
- pkt0_in_ctl = octeon_read_csr64(oct,
- CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pkt0_in_ctl =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0));
pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
- /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */
d64 = octeon_read_csr64(oct,
CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
@@ -1377,53 +1208,14 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
}
EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx)
-{
- if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_IQ_MAX_Q(conf23xx),
- CN23XX_MAX_INPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_OQ_MAX_Q(conf23xx),
- CN23XX_MAX_OUTPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
- CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
- __func__);
- return 1;
- }
-
- if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- return 0;
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
/* If there's more than one active PF on this NIC, then that
- * implies that the NIC firmware is loaded and running. This check
+ * implies that the NIC firmware is loaded and running. This check
* prevents a rare false negative that might occur if we only relied
- * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
* happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
* though the firmware was already loaded but still booting and has yet
* to set SCR2_BIT_FW_LOADED.
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index e6f31d0d5c0b..bbe9f3133b07 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -54,13 +54,8 @@ struct oct_vf_stats {
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx);
-
u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
-
int cn23xx_sriov_config(struct octeon_device *oct);
int cn23xx_fw_loaded(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
index 2d06097d3f61..40f529d0bc4c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
@@ -43,6 +43,4 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-
-void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index 8ed57134ee0c..129c8b84f549 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -86,7 +86,6 @@ u32
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
-void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index f38d31bfab1b..215dac201b4a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -472,7 +472,7 @@ int setup_rx_oom_poll_fn(struct net_device *netdev)
q_no = lio->linfo.rxpciq[q].s.q_no;
wq = &lio->rxq_status_wq[q_no];
wq->wq = alloc_workqueue("rxq-oom-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!wq->wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
return -ENOMEM;
@@ -1262,7 +1262,7 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
lio->mtu = new_mtu;
WRITE_ONCE(sc->caller_is_done, true);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index d3e07b6ed5e1..c849e2c871a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -2496,37 +2496,31 @@ ret_intrmod:
return ret;
}
+#ifdef PTP_HARDWARE_TIMESTAMPING
static int lio_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct lio *lio = GET_LIO(netdev);
info->so_timestamping =
-#ifdef PTP_HARDWARE_TIMESTAMPING
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
-#endif
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
if (lio->ptp_clock)
info->phc_index = ptp_clock_index(lio->ptp_clock);
- else
- info->phc_index = -1;
-#ifdef PTP_HARDWARE_TIMESTAMPING
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
-#endif
return 0;
}
+#endif
/* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev)
@@ -3146,7 +3140,9 @@ static const struct ethtool_ops lio_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
@@ -3169,7 +3165,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
void liquidio_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 34f02a8ec2ca..0732440eeacd 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,12 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct oct_link_status_resp {
- u64 rh;
- struct oct_link_info link_info;
- u64 status;
-};
-
struct oct_timestamp_resp {
u64 rh;
u64 timestamp;
@@ -532,7 +526,8 @@ static inline int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
@@ -665,7 +660,8 @@ static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->sync_octeon_time_wq.wq =
- alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
+ alloc_workqueue("update-octeon-time",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->sync_octeon_time_wq.wq) {
dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
return -1;
@@ -1740,7 +1736,7 @@ static inline int setup_tx_poll_fn(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->txq_status_wq.wq = alloc_workqueue("txq-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return -1;
@@ -2111,20 +2107,16 @@ liquidio_get_stats64(struct net_device *netdev,
lstats->tx_fifo_errors;
}
-/**
- * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
- * @netdev: network device
- * @ifr: interface request
- */
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int liquidio_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config conf;
struct lio *lio = GET_LIO(netdev);
- if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
- return -EFAULT;
+ if (!lio->oct_dev->ptp_enable)
+ return -EOPNOTSUPP;
- switch (conf.tx_type) {
+ switch (conf->tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
break;
@@ -2132,7 +2124,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (conf.rx_filter) {
+ switch (conf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -2150,39 +2142,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ conf->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ if (conf->rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
else
ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
- return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+ return 0;
}
-/**
- * liquidio_ioctl - ioctl handler
- * @netdev: network device
- * @ifr: interface request
- * @cmd: command
- */
-static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int liquidio_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf)
{
struct lio *lio = GET_LIO(netdev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- if (lio->oct_dev->ptp_enable)
- return hwtstamp_ioctl(netdev, ifr);
- fallthrough;
- default:
- return -EOPNOTSUPP;
- }
+ /* TX timestamping is technically always on */
+ conf->tx_type = HWTSTAMP_TX_ON;
+ conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
/**
@@ -3231,7 +3216,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
.ndo_set_vf_mac = liquidio_set_vf_mac,
@@ -3242,6 +3226,8 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
.ndo_get_vf_stats = liquidio_get_vf_stats,
.ndo_get_port_parent_id = liquidio_get_port_parent_id,
+ .ndo_hwtstamp_get = liquidio_hwtstamp_get,
+ .ndo_hwtstamp_set = liquidio_hwtstamp_set,
};
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 62c2eadc33e3..e02942dbbcce 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -304,7 +304,8 @@ static int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
@@ -1235,20 +1236,13 @@ liquidio_get_stats64(struct net_device *netdev,
lstats->tx_carrier_errors;
}
-/**
- * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
- * @netdev: network device
- * @ifr: interface request
- */
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int liquidio_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf,
+ struct netlink_ext_ack *extack)
{
struct lio *lio = GET_LIO(netdev);
- struct hwtstamp_config conf;
-
- if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
- return -EFAULT;
- switch (conf.tx_type) {
+ switch (conf->tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
break;
@@ -1256,7 +1250,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (conf.rx_filter) {
+ switch (conf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -1274,35 +1268,31 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ conf->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ if (conf->rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
else
ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
- return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+ return 0;
}
-/**
- * liquidio_ioctl - ioctl handler
- * @netdev: network device
- * @ifr: interface request
- * @cmd: command
- */
-static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int liquidio_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return hwtstamp_ioctl(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ struct lio *lio = GET_LIO(netdev);
+
+ /* TX timestamping is techically always on */
+ conf->tx_type = HWTSTAMP_TX_ON;
+ conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ return 0;
}
static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
@@ -1880,9 +1870,10 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
+ .ndo_hwtstamp_get = liquidio_hwtstamp_get,
+ .ndo_hwtstamp_set = liquidio_hwtstamp_set,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index aa6c0dfb6f1c..989b4ddae342 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -218,7 +218,7 @@ lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
return -EIO;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
}
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset + MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 6b6cb73482d7..1753bb87dfbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1433,22 +1433,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
}
EXPORT_SYMBOL_GPL(octeon_wait_for_ddr_init);
-/* Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev)
-{
- struct octeon_device *octeon_dev = (struct octeon_device *)dev;
- u32 i;
-
- for (i = 0; i < MAX_OCTEON_DEVICES; i++)
- if (octeon_device[i] == octeon_dev)
- return octeon_dev->octeon_id;
- return -1;
-}
-
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
u64 instr_cnt;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index fb380b4f3e02..19344b21f8fb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -705,13 +705,6 @@ octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
*/
struct octeon_device *lio_get_device(u32 octeon_id);
-/** Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev);
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
@@ -804,13 +797,6 @@ int octeon_init_consoles(struct octeon_device *oct);
int octeon_add_console(struct octeon_device *oct, u32 console_num,
char *dbg_enb);
-/** write or read from a console */
-int octeon_console_write(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 write_request_size, u32 flags);
-int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
-
-int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
-
/** Removes all attached consoles. */
void octeon_remove_consoles(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 0d6ee30affb9..eef12fdd246d 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -30,11 +30,6 @@
#include "cn23xx_pf_device.h"
#include "cn23xx_vf_device.h"
-struct niclist {
- struct list_head list;
- void *ptr;
-};
-
struct __dispatch {
struct list_head list;
struct octeon_recv_info *rinfo;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index c9b19e624dce..232ae72c0e37 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -395,8 +395,6 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
void *octeon_get_dispatch_arg(struct octeon_device *oct,
u16 opcode, u16 subcode);
-void octeon_droq_print_stats(void);
-
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index bebf3bd349c6..a04f36a0e1a0 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -378,9 +378,6 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
u32 datasize, u32 reqtype);
-void octeon_dump_soft_command(struct octeon_device *oct,
- struct octeon_soft_command *sc);
-
void octeon_prepare_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc,
u8 opcode, u8 subcode,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 5b4cb725f60f..953edf0c7096 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -157,7 +157,7 @@ err_release_region:
response of the request.
* 0: the request will wait until its response gets back
* from the firmware within LIO_SC_MAX_TMO_MS milli sec.
- * It the response does not return within
+ * If the response does not return within
* LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
* will move the request to zombie response list.
*
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 87dd6f89ce51..c139fc423764 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -268,7 +268,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* @param oct - octeon device pointer
* @param ndata - control structure with queueing, and buffer information
*
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
@@ -278,7 +278,7 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index de8a6ce86ad7..d7cfb20eea00 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -126,13 +126,13 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
- WQ_MEM_RECLAIM,
+ WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!oct->check_db_wq[iq_no].wq) {
vfree(iq->request_list);
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 861050966e18..de1a8335b545 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -39,7 +39,8 @@ int octeon_setup_response_list(struct octeon_device *oct)
}
spin_lock_init(&oct->cmd_resp_wqlock);
- oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 007d4b06819e..c190fc6538d4 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -649,7 +649,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
struct octeon_mgmt *p = netdev_priv(netdev);
int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
/* HW lifts the limit if the frame is VLAN tagged
* (+4 bytes per each tag, up to two tags)
@@ -690,19 +690,16 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
return IRQ_HANDLED;
}
-static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
- struct ifreq *rq, int cmd)
+static int octeon_mgmt_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- struct hwtstamp_config config;
- union cvmx_mio_ptp_clock_cfg ptp;
union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ union cvmx_mio_ptp_clock_cfg ptp;
bool have_hw_timestamps = false;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* Check the status of hardware for tiemstamps */
+ /* Check the status of hardware for timestamps */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* Get the current state of the PTP clock */
ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
@@ -733,10 +730,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
have_hw_timestamps = true;
}
- if (!have_hw_timestamps)
+ if (!have_hw_timestamps) {
+ NL_SET_ERR_MSG_MOD(extack, "HW doesn't support timestamping");
return -EINVAL;
+ }
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -744,7 +743,7 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
p->has_rx_tstamp = false;
rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
@@ -766,33 +765,34 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- p->has_rx_tstamp = have_hw_timestamps;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- if (p->has_rx_tstamp) {
- rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
- rxx_frm_ctl.s.ptp_mode = 1;
- cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
- }
+ p->has_rx_tstamp = true;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
break;
default:
return -ERANGE;
}
- if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
return 0;
}
-static int octeon_mgmt_ioctl(struct net_device *netdev,
- struct ifreq *rq, int cmd)
+static int octeon_mgmt_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
- default:
- return phy_do_ioctl(netdev, rq, cmd);
- }
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ /* Check the status of hardware for timestamps */
+ if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ return -EINVAL;
+
+ config->tx_type = HWTSTAMP_TX_ON;
+ config->rx_filter = p->has_rx_tstamp ?
+ HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
@@ -1370,11 +1370,13 @@ static const struct net_device_ops octeon_mgmt_ops = {
.ndo_start_xmit = octeon_mgmt_xmit,
.ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
.ndo_set_mac_address = octeon_mgmt_set_mac_address,
- .ndo_eth_ioctl = octeon_mgmt_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_change_mtu = octeon_mgmt_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = octeon_mgmt_poll_controller,
#endif
+ .ndo_hwtstamp_get = octeon_mgmt_hwtstamp_get,
+ .ndo_hwtstamp_set = octeon_mgmt_hwtstamp_set,
};
static int octeon_mgmt_probe(struct platform_device *pdev)
@@ -1545,7 +1547,7 @@ static struct platform_driver octeon_mgmt_driver = {
.of_match_table = octeon_mgmt_match,
},
.probe = octeon_mgmt_probe,
- .remove_new = octeon_mgmt_remove,
+ .remove = octeon_mgmt_remove,
};
module_platform_driver(octeon_mgmt_driver);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 34125b8cd935..413028bdcacb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -516,8 +516,8 @@ static int nicvf_set_ringparam(struct net_device *netdev,
return 0;
}
-static int nicvf_get_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
info->data = 0;
@@ -541,36 +541,29 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rules)
+static u32 nicvf_get_rx_ring_count(struct net_device *dev)
{
struct nicvf *nic = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = nic->rx_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- return nicvf_get_rss_hash_opts(nic, info);
- default:
- break;
- }
- return ret;
+ return nic->rx_queues;
}
-static int nicvf_set_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
- struct nicvf_rss_info *rss = &nic->rss_info;
- u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss;
+ u64 rss_cfg;
+
+ rss = &nic->rss_info;
+ rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
if (!rss->enable)
netdev_err(nic->netdev,
"RSS is disabled, hash cannot be set\n");
- netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %u\n",
info->flow_type, info->data);
if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
@@ -628,19 +621,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct nicvf *nic = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return nicvf_set_rss_hash_opts(nic, info);
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
{
return RSS_HASH_KEY_SIZE * sizeof(u64);
@@ -836,7 +816,7 @@ static int nicvf_set_pauseparam(struct net_device *dev,
}
static int nicvf_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct nicvf *nic = netdev_priv(netdev);
@@ -844,8 +824,6 @@ static int nicvf_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -873,12 +851,13 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_coalesce = nicvf_get_coalesce,
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
- .get_rxnfc = nicvf_get_rxnfc,
- .set_rxnfc = nicvf_set_rxnfc,
+ .get_rx_ring_count = nicvf_get_rx_ring_count,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
.get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
.get_rxfh = nicvf_get_rxfh,
.set_rxfh = nicvf_set_rxfh,
+ .get_rxfh_fields = nicvf_get_rxfh_fields,
+ .set_rxfh_fields = nicvf_set_rxfh_fields,
.get_channels = nicvf_get_channels,
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index eff350e0bc2a..0b6e30a8feb0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1578,7 +1578,6 @@ napi_del:
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
- int orig_mtu = netdev->mtu;
/* For now just support only the usual MTU sized frames,
* plus some headroom for VLAN, QinQ.
@@ -1589,15 +1588,10 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev->mtu = new_mtu;
-
- if (!netif_running(netdev))
- return 0;
-
- if (nicvf_update_hw_max_frs(nic, new_mtu)) {
- netdev->mtu = orig_mtu;
+ if (netif_running(netdev) && nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
- }
+
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
@@ -1905,18 +1899,18 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
}
}
-static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+static int nicvf_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
struct nicvf *nic = netdev_priv(netdev);
- if (!nic->ptp_clock)
+ if (!nic->ptp_clock) {
+ NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported");
return -ENODEV;
+ }
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -1924,7 +1918,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
nic->hw_rx_tstamp = false;
break;
@@ -1943,7 +1937,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
nic->hw_rx_tstamp = true;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
@@ -1952,20 +1946,24 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (netif_running(netdev))
nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
return 0;
}
-static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+static int nicvf_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return nicvf_config_hwtstamp(netdev, req);
- default:
- return -EOPNOTSUPP;
- }
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ /* TX timestamping is technically always on */
+ config->tx_type = HWTSTAMP_TX_ON;
+ config->rx_filter = nic->hw_rx_tstamp ?
+ HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
@@ -2087,8 +2085,9 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
- .ndo_eth_ioctl = nicvf_ioctl,
.ndo_set_rx_mode = nicvf_set_rx_mode,
+ .ndo_hwtstamp_get = nicvf_hwtstamp_get,
+ .ndo_hwtstamp_set = nicvf_hwtstamp_set,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06397cc8bb36..5211759bfe47 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1389,11 +1389,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
- /* Check if timestamp is requested */
- if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- skb_tx_timestamp(skb);
+ /* Check if hw timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
return;
- }
/* Tx timestamping not supported along with TSO, so ignore request */
if (skb_shinfo(skb)->gso_size)
@@ -1472,6 +1470,8 @@ static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8453defc296c..b7531041c56d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -359,8 +359,6 @@ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
-void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
-u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val);
u64 nicvf_queue_reg_read(struct nicvf *nic,
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a317feb8decb..9efb60842ad1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -54,7 +54,7 @@ struct lmac {
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
- struct net_device netdev;
+ struct net_device *netdev;
struct phy_device *phydev;
unsigned int last_duplex;
unsigned int last_link;
@@ -590,10 +590,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
static void bgx_lmac_handler(struct net_device *netdev)
{
- struct lmac *lmac = container_of(netdev, struct lmac, netdev);
struct phy_device *phydev;
+ struct lmac *lmac, **priv;
int link_changed = 0;
+ priv = netdev_priv(netdev);
+ lmac = *priv;
phydev = lmac->phydev;
if (!phydev->link && lmac->last_link)
@@ -1116,7 +1118,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
lmac->phydev->dev_flags = 0;
- if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ if (phy_connect_direct(lmac->netdev, lmac->phydev,
bgx_lmac_handler,
phy_interface_mode(lmac->lmac_type)))
return -ENODEV;
@@ -1414,7 +1416,7 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
+ SET_NETDEV_DEV(bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
bgx->acpi_lmac_idx++; /* move to next LMAC */
@@ -1427,9 +1429,9 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
{
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
struct bgx *bgx = context;
- char bgx_sel[5];
+ char bgx_sel[7];
- snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id);
if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
pr_warn("Invalid link device\n");
return AE_OK;
@@ -1483,7 +1485,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
of_get_mac_address(node, bgx->lmac[lmac].mac);
- SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ SET_NETDEV_DEV(bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
phy_np = of_parse_phandle(node, "phy-handle", 0);
@@ -1491,13 +1493,17 @@ static int bgx_init_of_phy(struct bgx *bgx)
* this cortina phy, for which there is no driver
* support, ignore it.
*/
- if (phy_np &&
- !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
- /* Wait until the phy drivers are available */
- pd = of_phy_find_device(phy_np);
- if (!pd)
- goto defer;
- bgx->lmac[lmac].phydev = pd;
+ if (phy_np) {
+ if (!of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
+ /* Wait until the phy drivers are available */
+ pd = of_phy_find_device(phy_np);
+ if (!pd) {
+ of_node_put(phy_np);
+ goto defer;
+ }
+ bgx->lmac[lmac].phydev = pd;
+ }
+ of_node_put(phy_np);
}
lmac++;
@@ -1513,11 +1519,11 @@ defer:
* for phy devices we may have already found.
*/
while (lmac) {
+ lmac--;
if (bgx->lmac[lmac].phydev) {
put_device(&bgx->lmac[lmac].phydev->mdio.dev);
bgx->lmac[lmac].phydev = NULL;
}
- lmac--;
}
of_node_put(node);
return -EPROBE_DEFER;
@@ -1603,10 +1609,10 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
- goto err_disable_device;
+ goto err_zero_drv_data;
}
/* MAP configuration registers */
@@ -1614,7 +1620,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bgx->reg_base) {
dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_zero_drv_data;
}
set_max_bgx_per_node(pdev);
@@ -1644,6 +1650,23 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_get_qlm_mode(bgx);
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ struct lmac *lmacp, **priv;
+
+ lmacp = &bgx->lmac[lmac];
+ lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
+
+ if (!lmacp->netdev) {
+ for (int i = 0; i < lmac; i++)
+ free_netdev(bgx->lmac[i].netdev);
+ err = -ENOMEM;
+ goto err_enable;
+ }
+
+ priv = netdev_priv(lmacp->netdev);
+ *priv = lmacp;
+ }
+
err = bgx_init_phy(bgx);
if (err)
goto err_enable;
@@ -1669,10 +1692,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_enable:
bgx_vnic[bgx->bgx_id] = NULL;
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
-err_release_regions:
- pci_release_regions(pdev);
-err_disable_device:
- pci_disable_device(pdev);
+err_zero_drv_data:
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1683,14 +1703,14 @@ static void bgx_remove(struct pci_dev *pdev)
u8 lmac;
/* Disable all LMACs */
- for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
bgx_lmac_disable(bgx, lmac);
+ free_netdev(bgx->lmac[lmac].netdev);
+ }
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
bgx_vnic[bgx->bgx_id] = NULL;
- pci_release_regions(pdev);
- pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cdea49392185..84f16ababaee 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -219,9 +219,7 @@
void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf);
void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf);
void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode);
-void octeon_mdiobus_force_mod_depencency(void);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
-void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index e56eff701395..304bb282ab03 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -329,8 +329,6 @@ irqreturn_t t1_slow_intr_handler(adapter_t *adapter);
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct board_info *t1_get_board_info(unsigned int board_id);
-const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
- unsigned short ssid);
int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d2286adf09fe..4a0e2d2eb60a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -351,7 +351,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
adapter->msg_enable = val;
}
-static const char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] __nonstring_array = {
"TxOctetsOK",
"TxOctetsBad",
"TxUnicastFramesOK",
@@ -844,7 +844,7 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
return -EOPNOTSUPP;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1034,7 +1034,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_RXCSUM;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA;
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ netdev->lltx = true;
if (vlan_tso_capable(adapter)) {
netdev->features |=
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index cbfa03d5663a..f3ada6e7cdc5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -141,7 +141,7 @@ static int pm3393_interrupt_enable(struct cmac *cmac)
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
@@ -179,7 +179,7 @@ static int pm3393_interrupt_disable(struct cmac *cmac)
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
@@ -222,7 +222,7 @@ static int pm3393_interrupt_clear(struct cmac *cmac)
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT
+ /* TERMINATOR - PL_INTERRUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
@@ -756,7 +756,7 @@ static int pm3393_mac_reset(adapter_t * adapter)
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
- /* Has the XAUI MABC PLL circuitry stablized? */
+ /* Has the XAUI MABC PLL circuitry stabilized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 861edff5ed89..5f354cf62cdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1922,7 +1922,7 @@ send:
static void sge_tx_reclaim_cb(struct timer_list *t)
{
int i;
- struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
+ struct sge *sge = timer_container_of(sge, t, tx_reclaim_timer);
for (i = 0; i < SGE_CMDQ_N; ++i) {
struct cmdQ *q = &sge->cmdQ[i];
@@ -1984,9 +1984,9 @@ void t1_sge_stop(struct sge *sge)
readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
if (is_T2(sge->adapter))
- del_timer_sync(&sge->espibug_timer);
+ timer_delete_sync(&sge->espibug_timer);
- del_timer_sync(&sge->tx_reclaim_timer);
+ timer_delete_sync(&sge->tx_reclaim_timer);
if (sge->tx_sched)
tx_sched_stop(sge);
@@ -2017,7 +2017,7 @@ void t1_sge_start(struct sge *sge)
*/
static void espibug_workaround_t204(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
unsigned int nports = adapter->params.nports;
u32 seop[MAX_NPORTS];
@@ -2060,7 +2060,7 @@ static void espibug_workaround_t204(struct timer_list *t)
static void espibug_workaround(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
if (netif_running(adapter->port[0].dev)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h
index ba15675d56df..64f93dcc676b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.h
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.h
@@ -65,9 +65,7 @@ void t1_tp_intr_enable(struct petp *tp);
void t1_tp_intr_clear(struct petp *tp);
int t1_tp_intr_handler(struct petp *tp);
-void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
-int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
index f04e81f33795..a08fc762a438 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -106,6 +106,4 @@ static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
return &e->t3c_tid;
}
-int attach_t3cdev(struct t3cdev *dev);
-void detach_t3cdev(struct t3cdev *dev);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 2236f1d35f2b..3b1321c8ed14 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2559,7 +2559,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
init_port_mtus(adapter);
if (adapter->params.rev == 0 && offload_running(adapter))
t3_load_mtus(adapter, adapter->params.mtus,
@@ -2933,7 +2933,6 @@ static int t3_reenable_adapter(struct adapter *adapter)
}
pci_set_master(adapter->pdev);
pci_restore_state(adapter->pdev);
- pci_save_state(adapter->pdev);
/* Free sge resources */
t3_free_sge_resources(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 89256b866840..5a9f6925e1fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -515,23 +515,6 @@ void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
EXPORT_SYMBOL(cxgb3_free_atid);
-/*
- * Free a server TID and return it to the free pool.
- */
-void cxgb3_free_stid(struct t3cdev *tdev, int stid)
-{
- struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
- union listen_entry *p = stid2entry(t, stid);
-
- spin_lock_bh(&t->stid_lock);
- p->next = t->sfree;
- t->sfree = p;
- t->stids_in_use--;
- spin_unlock_bh(&t->stid_lock);
-}
-
-EXPORT_SYMBOL(cxgb3_free_stid);
-
void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
void *ctx, unsigned int tid)
{
@@ -671,28 +654,6 @@ int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
EXPORT_SYMBOL(cxgb3_alloc_atid);
-int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
- void *ctx)
-{
- int stid = -1;
- struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
-
- spin_lock_bh(&t->stid_lock);
- if (t->sfree) {
- union listen_entry *p = t->sfree;
-
- stid = (p - t->stid_tab) + t->stid_base;
- t->sfree = p->next;
- p->t3c_tid.ctx = ctx;
- p->t3c_tid.client = client;
- t->stids_in_use++;
- }
- spin_unlock_bh(&t->stid_lock);
- return stid;
-}
-
-EXPORT_SYMBOL(cxgb3_alloc_stid);
-
/* Get the t3cdev associated with a net_device */
struct t3cdev *dev2t3cdev(struct net_device *dev)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
index 929c298115ca..7419824f9926 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
@@ -95,10 +95,7 @@ struct cxgb3_client {
*/
int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
-int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
- void *ctx);
void *cxgb3_free_atid(struct t3cdev *dev, int atid);
-void cxgb3_free_stid(struct t3cdev *dev, int stid);
void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx, unsigned int tid);
void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 9749d1239f58..5d5f3380ecca 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -176,43 +176,6 @@ again:
EXPORT_SYMBOL(t3_l2t_send_slow);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
-{
-again:
- switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
- neigh_event_send(e->neigh, NULL);
- spin_lock_bh(&e->lock);
- if (e->state == L2T_STATE_STALE) {
- e->state = L2T_STATE_VALID;
- }
- spin_unlock_bh(&e->lock);
- return;
- case L2T_STATE_VALID: /* fast-path, send the packet on */
- return;
- case L2T_STATE_RESOLVING:
- spin_lock_bh(&e->lock);
- if (e->state != L2T_STATE_RESOLVING) {
- /* ARP already completed */
- spin_unlock_bh(&e->lock);
- goto again;
- }
- spin_unlock_bh(&e->lock);
-
- /*
- * Only the first packet added to the arpq should kick off
- * resolution. However, because the alloc_skb below can fail,
- * we allow each packet added to the arpq to retry resolution
- * as a way of recovering from transient memory exhaustion.
- * A better way would be to use a work request to retry L2T
- * entries when there's no memory.
- */
- neigh_event_send(e->neigh, NULL);
- }
-}
-
-EXPORT_SYMBOL(t3_l2t_send_event);
-
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 646ca0bc25bd..33558f177497 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -113,7 +113,6 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 6268f96cb4aa..b59735d0e065 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2906,7 +2906,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
*/
static void sge_timer_tx(struct timer_list *t)
{
- struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, tx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
@@ -2947,7 +2947,7 @@ static void sge_timer_tx(struct timer_list *t)
static void sge_timer_rx(struct timer_list *t)
{
spinlock_t *lock;
- struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, rx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
u32 status;
@@ -3223,9 +3223,9 @@ void t3_stop_sge_timers(struct adapter *adap)
struct sge_qset *q = &adap->sge.qs[i];
if (q->tx_reclaim_timer.function)
- del_timer_sync(&q->tx_reclaim_timer);
+ timer_delete_sync(&q->tx_reclaim_timer);
if (q->rx_reclaim_timer.function)
- del_timer_sync(&q->rx_reclaim_timer);
+ timer_delete_sync(&q->rx_reclaim_timer);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 163efab27e9b..5060d3998889 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -120,7 +120,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
write_unlock_bh(&ctbl->lock);
dev_err(adap->pdev_dev,
"CLIP FW cmd failed with error %d, "
- "Connections using %pI6c wont be "
+ "Connections using %pI6c won't be "
"offloaded",
ret, ce->addr6.sin6_addr.s6_addr);
return ret;
@@ -133,7 +133,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
} else {
write_unlock_bh(&ctbl->lock);
dev_info(adap->pdev_dev, "CLIP table overflow, "
- "Connections using %pI6c wont be offloaded",
+ "Connections using %pI6c won't be offloaded",
(void *)lip);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index fca9533bc011..f20f4bc58492 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -674,7 +674,7 @@ struct port_info {
struct cxgb_fcoe fcoe;
#endif /* CONFIG_CHELSIO_T4_FCOE */
bool rxtstamp; /* Enable TS */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
bool ptp_enable;
struct sched_table *sched_tbl;
u32 eth_flags;
@@ -1211,9 +1211,6 @@ struct adapter {
struct timer_list flower_stats_timer;
struct work_struct flower_stats_work;
- /* Ethtool Dump */
- struct ethtool_dump eth_dump;
-
/* HMA */
struct hma_data hma;
@@ -1233,6 +1230,10 @@ struct adapter {
/* Ethtool n-tuple */
struct cxgb4_ethtool_filter *ethtool_filters;
+
+ /* Ethtool Dump */
+ /* Must be last - ends in a flex-array member. */
+ struct ethtool_dump eth_dump;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1315,7 +1316,7 @@ struct ch_sched_flowc {
* (value, mask) tuples. The associated ingress packet field matches the
* tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
* rule can be constructed by specifying a tuple of (0, 0).) A filter rule
- * matches an ingress packet when all of the individual individual field
+ * matches an ingress packet when all of the individual field
* matching rules are true.
*
* Partial field masks are always valid, however, while it may be easy to
@@ -1608,7 +1609,6 @@ void t4_os_portmod_changed(struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void t4_free_sge_resources(struct adapter *adap);
-void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
int cxgb4_selftest_lb_pkt(struct net_device *netdev);
@@ -1958,11 +1958,6 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
- const u8 *addr);
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
- u64 mask0, u64 mask1, unsigned int crc, bool enable);
-
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state);
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
@@ -2082,7 +2077,7 @@ void t4_idma_monitor(struct adapter *adapter,
struct sge_idma_monitor_state *idma,
int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
- unsigned int naddr, u8 *addr);
+ u8 start, unsigned int naddr, u8 *addr);
void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok);
void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
@@ -2146,28 +2141,6 @@ int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
unsigned int naddr, const u8 **addr, bool sleep_ok);
int cxgb4_init_mps_ref_entries(struct adapter *adap);
void cxgb4_free_mps_ref_entries(struct adapter *adap);
-int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
- const u8 *addr, const u8 *mask,
- unsigned int vni, unsigned int vni_mask,
- u8 dip_hit, u8 lookup_type, bool sleep_ok);
-int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
- int idx, bool sleep_ok);
-int cxgb4_free_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok);
-int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok);
int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr,
bool persistent, u8 *smt_idx);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 80c6627fe981..c80a93347a8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -122,7 +122,6 @@ void cxgb4_dcb_version_init(struct net_device *);
void cxgb4_dcb_reset(struct net_device *dev);
void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input);
void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
-void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
static inline __u8 bitswap_1(unsigned char val)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 47eecde36285..23326235d4ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1550,18 +1550,15 @@ out_free_fw:
return ret;
}
-static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
+static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts_info)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-
- ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1575,8 +1572,6 @@ static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
if (adapter->ptp_clock)
ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- ts_info->phc_index = -1;
return 0;
}
@@ -1735,6 +1730,60 @@ static int cxgb4_ntuple_get_filter(struct net_device *dev,
return 0;
}
+static int cxgb4_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+}
+
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -1744,56 +1793,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int ret = 0;
switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- unsigned int v = pi->rss_mode;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- }
- return 0;
- }
case ETHTOOL_GRXRINGS:
info->data = pi->nqsets;
return 0;
@@ -2204,6 +2203,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
+ .get_rxfh_fields = cxgb4_get_rxfh_fields,
.self_test = cxgb4_self_test,
.flash_device = set_flash,
.get_ts_info = get_ts_info,
@@ -2275,6 +2275,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
+ kvfree(eth_filter->port[i].loc_array);
goto free_eth_finfo;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
index 33b2c0c45509..f6f745f5c022 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -81,8 +81,7 @@ int cxgb_fcoe_enable(struct net_device *netdev)
netdev->features |= NETIF_F_FCOE_CRC;
netdev->vlan_features |= NETIF_F_FCOE_CRC;
- netdev->features |= NETIF_F_FCOE_MTU;
- netdev->vlan_features |= NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = true;
netdev_features_change(netdev);
@@ -112,8 +111,7 @@ int cxgb_fcoe_disable(struct net_device *netdev)
netdev->features &= ~NETIF_F_FCOE_CRC;
netdev->vlan_features &= ~NETIF_F_FCOE_CRC;
- netdev->features &= ~NETIF_F_FCOE_MTU;
- netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = false;
netdev_features_change(netdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 786ceae34488..dd9e68465e69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1244,7 +1244,8 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && fs->mask.ivlan)
- ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
+ ntuple |= (u64)(FT_VLAN_VLD_F |
+ fs->val.ivlan) << tp->vlan_shift;
if (tp->port_shift >= 0 && fs->mask.iport)
ntuple |= (u64)fs->val.iport << tp->port_shift;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2eb33a727bba..043733c5c812 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
struct adapter *adap = container_of(t, struct adapter, tids);
struct sk_buff *skb;
- WARN_ON(tid_out_of_range(&adap->tids, tid));
+ if (tid_out_of_range(&adap->tids, tid)) {
+ dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
+ return;
+ }
if (t->tid_tab[tid - adap->tids.tid_base]) {
t->tid_tab[tid - adap->tids.tid_base] = NULL;
@@ -2188,18 +2191,6 @@ void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
}
EXPORT_SYMBOL(cxgb4_get_tcp_stats);
-void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
- const unsigned int *pgsz_order)
-{
- struct adapter *adap = netdev2adap(dev);
-
- t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
- t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
- HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
- HPZ3_V(pgsz_order[3]));
-}
-EXPORT_SYMBOL(cxgb4_iscsi_init);
-
int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
@@ -3051,12 +3042,87 @@ static void cxgb_get_stats(struct net_device *dev,
ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
}
+static int cxgb_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ *config = pi->tstamp_config;
+ return 0;
+}
+
+static int cxgb_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (is_t4(adapter->params.chip)) {
+ /* For T4 Adapters */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ pi->rxtstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+ pi->tstamp_config = *config;
+ return 0;
+ }
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L4);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L2_L4);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ pi->rxtstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
+ pi->ptp_enable = false;
+ }
+
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (cxgb4_ptp_redirect_rx_packet(adapter, pi) >= 0)
+ pi->ptp_enable = true;
+ }
+ pi->tstamp_config = *config;
+ return 0;
+}
+
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
unsigned int mbox;
int ret = 0, prtad, devad;
struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
switch (cmd) {
@@ -3085,81 +3151,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
data->reg_num, data->val_in);
break;
- case SIOCGHWTSTAMP:
- return copy_to_user(req->ifr_data, &pi->tstamp_config,
- sizeof(pi->tstamp_config)) ?
- -EFAULT : 0;
- case SIOCSHWTSTAMP:
- if (copy_from_user(&pi->tstamp_config, req->ifr_data,
- sizeof(pi->tstamp_config)))
- return -EFAULT;
-
- if (!is_t4(adapter->params.chip)) {
- switch (pi->tstamp_config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (pi->tstamp_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- pi->rxtstamp = false;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- cxgb4_ptprx_timestamping(pi, pi->port_id,
- PTP_TS_L4);
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- cxgb4_ptprx_timestamping(pi, pi->port_id,
- PTP_TS_L2_L4);
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- pi->rxtstamp = true;
- break;
- default:
- pi->tstamp_config.rx_filter =
- HWTSTAMP_FILTER_NONE;
- return -ERANGE;
- }
-
- if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
- (pi->tstamp_config.rx_filter ==
- HWTSTAMP_FILTER_NONE)) {
- if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
- pi->ptp_enable = false;
- }
-
- if (pi->tstamp_config.rx_filter !=
- HWTSTAMP_FILTER_NONE) {
- if (cxgb4_ptp_redirect_rx_packet(adapter,
- pi) >= 0)
- pi->ptp_enable = true;
- }
- } else {
- /* For T4 Adapters */
- switch (pi->tstamp_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- pi->rxtstamp = false;
- break;
- case HWTSTAMP_FILTER_ALL:
- pi->rxtstamp = true;
- break;
- default:
- pi->tstamp_config.rx_filter =
- HWTSTAMP_FILTER_NONE;
- return -ERANGE;
- }
- }
- return copy_to_user(req->ifr_data, &pi->tstamp_config,
- sizeof(pi->tstamp_config)) ?
- -EFAULT : 0;
default:
return -EOPNOTSUPP;
}
@@ -3180,7 +3171,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
@@ -3246,7 +3237,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
dev_info(pi->adapter->pdev_dev,
"Setting MAC %pM on VF %d\n", mac, vf);
- ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
+ ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac);
if (!ret)
ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
return ret;
@@ -3306,7 +3297,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
}
if (max_tx_rate == 0) {
- /* unbind VF to to any Traffic Class */
+ /* unbind VF to any Traffic Class */
fw_pfvf =
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
@@ -3494,7 +3485,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
struct adapter *adap = pi->adapter;
struct ch_sched_queue qe = { 0 };
struct ch_sched_params p = { 0 };
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 req_rate;
int err = 0;
@@ -3884,6 +3875,8 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_setup_tc = cxgb_setup_tc,
.ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
+ .ndo_hwtstamp_get = cxgb_hwtstamp_get,
+ .ndo_hwtstamp_set = cxgb_hwtstamp_set,
};
#ifdef CONFIG_PCI_IOV
@@ -4825,7 +4818,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
goto bye;
}
- /* Get FW from from /lib/firmware/ */
+ /* Get FW from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
adap->pdev_dev);
if (ret < 0) {
@@ -5465,7 +5458,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
if (!adap) {
pci_restore_state(pdev);
- pci_save_state(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -5480,7 +5472,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -6489,10 +6480,11 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x,
+static int cxgb4_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
@@ -6503,7 +6495,8 @@ static int cxgb4_xfrm_add_state(struct xfrm_state *x,
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x,
+ extack);
out_unlock:
mutex_unlock(&uld_mutex);
@@ -6511,9 +6504,9 @@ out_unlock:
return ret;
}
-static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+static void cxgb4_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6523,15 +6516,15 @@ static void cxgb4_xfrm_del_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
}
-static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+static void cxgb4_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6541,36 +6534,19 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
-
-out_unlock:
- mutex_unlock(&uld_mutex);
-}
-
-static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- struct adapter *adap = netdev2adap(x->xso.dev);
- bool ret = false;
-
- if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
- return ret;
- }
- if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
- goto out_unlock;
-
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
- return ret;
}
static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_IN)
+ return;
+
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
@@ -6589,7 +6565,6 @@ static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
.xdo_dev_state_add = cxgb4_xfrm_add_state,
.xdo_dev_state_delete = cxgb4_xfrm_del_state,
.xdo_dev_state_free = cxgb4_xfrm_free_state,
- .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
.xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
index a020e8490681..60f4d5b5eb3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
@@ -28,28 +28,6 @@ static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap,
return ret;
}
-static int cxgb4_mps_ref_dec(struct adapter *adap, u16 idx)
-{
- struct mps_entries_ref *mps_entry, *tmp;
- int ret = -EINVAL;
-
- spin_lock(&adap->mps_ref_lock);
- list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
- if (mps_entry->idx == idx) {
- if (!refcount_dec_and_test(&mps_entry->refcnt)) {
- spin_unlock(&adap->mps_ref_lock);
- return -EBUSY;
- }
- list_del(&mps_entry->list);
- kfree(mps_entry);
- ret = 0;
- break;
- }
- }
- spin_unlock(&adap->mps_ref_lock);
- return ret;
-}
-
static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr,
u16 idx, const u8 *mask)
{
@@ -141,82 +119,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
return ret;
}
-int cxgb4_free_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok)
-{
- int ret = 0;
-
- if (!cxgb4_mps_ref_dec(adap, idx))
- ret = t4_free_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
-
- return ret;
-}
-
-int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok)
-{
- int ret;
-
- ret = t4_alloc_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
- if (ret < 0)
- return ret;
-
- if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
- ret = -ENOMEM;
- t4_free_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
- }
-
- return ret;
-}
-
-int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
- int idx, bool sleep_ok)
-{
- int ret = 0;
-
- if (!cxgb4_mps_ref_dec(adap, idx))
- ret = t4_free_encap_mac_filt(adap, viid, idx, sleep_ok);
-
- return ret;
-}
-
-int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
- const u8 *addr, const u8 *mask,
- unsigned int vni, unsigned int vni_mask,
- u8 dip_hit, u8 lookup_type, bool sleep_ok)
-{
- int ret;
-
- ret = t4_alloc_encap_mac_filt(adap, viid, addr, mask, vni, vni_mask,
- dip_hit, lookup_type, sleep_ok);
- if (ret < 0)
- return ret;
-
- if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
- ret = -ENOMEM;
- t4_free_encap_mac_filt(adap, viid, ret, sleep_ok);
- }
- return ret;
-}
-
int cxgb4_init_mps_ref_entries(struct adapter *adap)
{
spin_lock_init(&adap->mps_ref_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 72ac4a34424b..e2b5554531b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -161,20 +161,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
static void cxgb4_process_flow_match(struct net_device *dev,
struct flow_rule *rule,
+ u16 addr_type,
struct ch_filter_specification *fs)
{
- u16 addr_type = 0;
-
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_match_control match;
-
- flow_rule_match_control(rule, &match);
- addr_type = match.key->addr_type;
- } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
- } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
- }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -305,7 +294,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.iport = ~0;
}
-static int cxgb4_validate_flow_match(struct net_device *dev,
+static int cxgb4_validate_flow_match(struct netlink_ext_ack *extack,
struct flow_rule *rule)
{
struct flow_dissector *dissector = rule->match.dissector;
@@ -321,8 +310,9 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP))) {
- netdev_warn(dev, "Unsupported key used: 0x%llx\n",
- dissector->used_keys);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported key used: 0x%llx",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -339,13 +329,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
struct flow_match_ip match;
if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
- netdev_err(dev, "IP Key supported only with IPv4/v6");
+ NL_SET_ERR_MSG_MOD(extack,
+ "IP Key supported only with IPv4/v6");
return -EINVAL;
}
flow_rule_match_ip(rule, &match);
if (match.mask->ttl) {
- netdev_warn(dev, "ttl match unsupported for offload");
+ NL_SET_ERR_MSG_MOD(extack,
+ "ttl match unsupported for offload");
return -EOPNOTSUPP;
}
}
@@ -576,7 +568,7 @@ static bool valid_l4_mask(u32 mask)
return hi && lo ? false : true;
}
-static bool valid_pedit_action(struct net_device *dev,
+static bool valid_pedit_action(struct netlink_ext_ack *extack,
const struct flow_action_entry *act,
u8 *natmode_flags)
{
@@ -595,8 +587,7 @@ static bool valid_pedit_action(struct net_device *dev,
case PEDIT_ETH_SMAC_47_16:
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -609,8 +600,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -629,8 +619,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -638,8 +627,8 @@ static bool valid_pedit_action(struct net_device *dev,
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported mask for TCP L4 ports");
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -648,8 +637,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -657,8 +645,8 @@ static bool valid_pedit_action(struct net_device *dev,
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported mask for UDP L4 ports");
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -667,13 +655,12 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
default:
- netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit type");
return false;
}
return true;
@@ -727,8 +714,7 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
* the provided output port is not valid
*/
if (!found) {
- netdev_err(dev, "%s: Out port invalid\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Out port invalid");
return -EINVAL;
}
act_redir = true;
@@ -745,21 +731,21 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_MANGLE:
if (proto != ETH_P_8021Q) {
- netdev_err(dev, "%s: Unsupported vlan proto\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported vlan proto");
return -EOPNOTSUPP;
}
break;
default:
- netdev_err(dev, "%s: Unsupported vlan action\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported vlan action");
return -EOPNOTSUPP;
}
act_vlan = true;
}
break;
case FLOW_ACTION_MANGLE: {
- bool pedit_valid = valid_pedit_action(dev, act,
+ bool pedit_valid = valid_pedit_action(extack, act,
&natmode_flags);
if (!pedit_valid)
@@ -771,14 +757,14 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
/* Do nothing. cxgb4_set_filter will validate */
break;
default:
- netdev_err(dev, "%s: Unsupported action\n", __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
return -EOPNOTSUPP;
}
}
if ((act_pedit || act_vlan) && !act_redir) {
- netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "pedit/vlan rewrite invalid without egress redirect");
return -EINVAL;
}
@@ -858,16 +844,38 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
{
struct adapter *adap = netdev2adap(dev);
struct filter_ctx ctx;
+ u16 addr_type = 0;
u8 inet_family;
int fidx, ret;
if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
return -EOPNOTSUPP;
- if (cxgb4_validate_flow_match(dev, rule))
+ if (cxgb4_validate_flow_match(extack, rule))
return -EOPNOTSUPP;
- cxgb4_process_flow_match(dev, rule, fs);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ fs->val.frag = match.key->flags & FLOW_DIS_IS_FRAGMENT;
+ fs->mask.frag = true;
+ }
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
+
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
+
+ cxgb4_process_flow_match(dev, rule, addr_type, fs);
cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
@@ -901,8 +909,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
init_completion(&ctx.completion);
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
if (ret) {
- netdev_err(dev, "%s: filter creation err %d\n",
- __func__, ret);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "filter creation err %d", ret);
return ret;
}
@@ -1052,7 +1059,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
static void ch_flower_stats_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, flower_stats_timer);
+ struct adapter *adap = timer_container_of(adap, t, flower_stats_timer);
schedule_work(&adap->flower_stats_work);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 1672d3afe5be..f8dcf0b4abcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -56,7 +56,7 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
struct ch_sched_queue qe;
- struct sched_class *e;
+ struct ch_sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
@@ -180,7 +180,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u32 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 338b04f339b3..a2dcd2e24263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -330,7 +330,7 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u8 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index a5d2f84dcdd5..8524246fd67e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -186,7 +186,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
@@ -422,7 +422,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
uhtid = TC_U32_USERHTID(cls->knode.handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index 9050568a034c..64663112cad8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -242,7 +242,7 @@ struct cxgb4_next_header {
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
- struct tc_u32_sel sel;
+ struct tc_u32_sel_hdr sel;
struct tc_u32_key key;
/* location of jump to make */
const struct cxgb4_match_field *jump;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index b08356060fb4..7bab8da8f6e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -29,7 +29,7 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+static const struct thermal_zone_device_ops cxgb4_thermal_ops = {
.get_temp = cxgb4_thermal_get_temp,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 17faac715882..5c13bcb4550d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -406,7 +406,7 @@ free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
for (i = 0; i < nq; i++) {
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
- if (txq && txq->q.desc) {
+ if (txq->q.desc) {
tasklet_kill(&txq->qresume_tsk);
t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
txq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index a9599ba26975..d7713038386c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -508,7 +508,6 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_e2cchan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
@@ -519,8 +518,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
unsigned int *mtu_idxp);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
-void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
- const unsigned int *pgsz_order);
struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1e5f5b1a22a6..c02b4e9c06b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -608,25 +608,6 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
return e;
}
-/**
- * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
- * @dev: net_device pointer
- * @vlan: VLAN Id
- * @port: Associated port
- * @dmac: Destination MAC address to add to L2T
- * Returns pointer to the allocated l2t entry
- *
- * Allocates an L2T entry for use by switching rule of a filter
- */
-struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
- u8 port, u8 *dmac)
-{
- struct adapter *adap = netdev2adap(dev);
-
- return t4_l2t_alloc_switching(adap, vlan, port, dmac);
-}
-EXPORT_SYMBOL(cxgb4_l2t_alloc_switching);
-
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
{
unsigned int l2t_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 340fecb28a13..8aad7e9dee6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -115,8 +115,6 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
u64 cxgb4_select_ntuple(struct net_device *dev,
const struct l2t_entry *l2t);
-struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
- u8 port, u8 *dmac);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
u8 port, u8 *dmac);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index a1b14468d1ff..38a30aeee122 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -44,7 +44,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
{
struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
e = &s->tab[p->u.params.class];
@@ -122,7 +122,7 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
const u32 val)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
+ struct ch_sched_class *e, *end;
void *found = NULL;
/* Look for an entry with matching @val */
@@ -166,8 +166,8 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p)
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_queue_entry *qe = NULL;
@@ -187,7 +187,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
@@ -218,7 +218,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
unsigned int qid;
int err = 0;
@@ -260,7 +260,7 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -288,7 +288,7 @@ static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
struct sched_table *s = pi->sched_tbl;
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -322,7 +322,7 @@ out_err:
}
static void t4_sched_class_unbind_all(struct port_info *pi,
- struct sched_class *e,
+ struct ch_sched_class *e,
enum sched_bind_type type)
{
if (!e)
@@ -476,12 +476,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
}
/* If @p is NULL, fetch any available unused class */
-static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
- const struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_lookup(struct port_info *pi,
+ const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *found = NULL;
- struct sched_class *e, *end;
+ struct ch_sched_class *found = NULL;
+ struct ch_sched_class *e, *end;
if (!p) {
/* Get any available unused class */
@@ -522,10 +522,10 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
return found;
}
-static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
- struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_alloc(struct port_info *pi,
+ struct ch_sched_params *p)
{
- struct sched_class *e = NULL;
+ struct ch_sched_class *e = NULL;
u8 class_id;
int err;
@@ -579,8 +579,8 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
* scheduling class with matching @p is found, then the matching class is
* returned.
*/
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p)
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p)
{
struct port_info *pi = netdev2pinfo(dev);
u8 class_id;
@@ -607,7 +607,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
struct ch_sched_params p;
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 speed;
int ret;
@@ -640,7 +640,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
}
}
-static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
+static void t4_sched_class_free(struct net_device *dev, struct ch_sched_class *e)
{
struct port_info *pi = netdev2pinfo(dev);
@@ -660,7 +660,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
s->sched_size = sched_size;
for (i = 0; i < s->sched_size; i++) {
- memset(&s->tab[i], 0, sizeof(struct sched_class));
+ memset(&s->tab[i], 0, sizeof(struct ch_sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].entry_list);
@@ -682,7 +682,7 @@ void t4_cleanup_sched(struct adapter *adap)
continue;
for (i = 0; i < s->sched_size; i++) {
- struct sched_class *e;
+ struct ch_sched_class *e;
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 6b3c778815f0..4d3b5a757536 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -71,7 +71,7 @@ struct sched_flowc_entry {
struct ch_sched_flowc param;
};
-struct sched_class {
+struct ch_sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[] __counted_by(sched_size);
+ struct ch_sched_class tab[] __counted_by(sched_size);
};
static inline bool can_sched(struct net_device *dev)
@@ -103,15 +103,15 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p);
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
enum sched_bind_type type);
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p);
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p);
void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 49d5808b7d11..9fccb8ea9bcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -163,7 +163,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
* for DMA, but this is of course never sent to the hardware and is only used
* to prevent double unmappings. All of the above requires that the Free List
* Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
- * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * 32-byte or a power of 2 greater in alignment. Since the SGE's minimal
* Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
@@ -1533,7 +1533,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
q = &adap->sge.ethtxq[qidx + pi->first_qset];
}
- skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -1706,6 +1705,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
+ skb_tx_timestamp(skb);
+
if (immediate) {
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
@@ -2268,7 +2269,6 @@ static int ethofld_hard_xmit(struct net_device *dev,
d = &eosw_txq->desc[eosw_txq->last_pidx];
skb = d->skb;
- skb_tx_timestamp(skb);
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
@@ -2373,6 +2373,7 @@ write_wr_headers:
eohw_txq->vlan_ins++;
txq_advance(&eohw_txq->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
@@ -2670,12 +2671,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
- __netif_tx_lock(q->txq, smp_processor_id());
+ __netif_tx_lock_bh(q->txq);
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}
@@ -2710,7 +2711,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
@@ -4233,7 +4234,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
{
unsigned long m;
unsigned int i;
- struct adapter *adap = from_timer(adap, t, sge.rx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.rx_timer);
struct sge *s = &adap->sge;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -4268,7 +4269,7 @@ done:
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, sge.tx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
unsigned long m, period;
unsigned int i, budget;
@@ -4874,22 +4875,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
}
}
-/**
- * t4_free_ofld_rxqs - free a block of consecutive Rx queues
- * @adap: the adapter
- * @n: number of queues
- * @q: pointer to first queue
- *
- * Release the resources of a consecutive block of offload Rx queues.
- */
-void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
-{
- for ( ; n; n--, q++)
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
-}
-
void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
{
if (txq->q.desc) {
@@ -5012,9 +4997,9 @@ void t4_sge_stop(struct adapter *adap)
struct sge *s = &adap->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
if (is_offload(adap)) {
struct sge_uld_txq_info *txq_info;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 9a54302bb046..a77d6ac1ee8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -51,64 +51,6 @@ struct srq_data *t4_init_srq(int srq_size)
return s;
}
-/* cxgb4_get_srq_entry: read the SRQ table entry
- * @dev: Pointer to the net_device
- * @idx: Index to the srq
- * @entryp: pointer to the srq entry
- *
- * Sends CPL_SRQ_TABLE_REQ message for the given index.
- * Contents will be returned in CPL_SRQ_TABLE_RPL message.
- *
- * Returns zero if the read is successful, else a error
- * number will be returned. Caller should not use the srq
- * entry if the return value is non-zero.
- *
- *
- */
-int cxgb4_get_srq_entry(struct net_device *dev,
- int srq_idx, struct srq_entry *entryp)
-{
- struct cpl_srq_table_req *req;
- struct adapter *adap;
- struct sk_buff *skb;
- struct srq_data *s;
- int rc = -ENODEV;
-
- adap = netdev2adap(dev);
- s = adap->srq;
-
- if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s)
- goto out;
-
- skb = alloc_skb(sizeof(*req), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- req = (struct cpl_srq_table_req *)
- __skb_put_zero(skb, sizeof(*req));
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ,
- TID_TID_V(srq_idx) |
- TID_QID_V(adap->sge.fw_evtq.abs_id)));
- req->idx = srq_idx;
-
- mutex_lock(&s->lock);
-
- s->entryp = entryp;
- t4_mgmt_tx(adap, skb);
-
- rc = wait_for_completion_timeout(&s->comp, SRQ_WAIT_TO);
- if (rc)
- rc = 0;
- else /* !rc means we timed out */
- rc = -ETIMEDOUT;
-
- WARN_ON_ONCE(entryp->idx != srq_idx);
- mutex_unlock(&s->lock);
-out:
- return rc;
-}
-EXPORT_SYMBOL(cxgb4_get_srq_entry);
-
void do_srq_table_rpl(struct adapter *adap,
const struct cpl_srq_table_rpl *rpl)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.h b/drivers/net/ethernet/chelsio/cxgb4/srq.h
index ec85cf93865a..d9f04bd5ffa3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.h
@@ -58,8 +58,6 @@ struct srq_data {
};
struct srq_data *t4_init_srq(int srq_size);
-int cxgb4_get_srq_entry(struct net_device *dev,
- int srq_idx, struct srq_entry *entryp);
void do_srq_table_rpl(struct adapter *adap,
const struct cpl_srq_table_rpl *rpl);
#endif /* __CXGB4_SRQ_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 76de55306c4d..171750fad44f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9348,7 +9348,7 @@ int t4_init_devlog_params(struct adapter *adap)
return 0;
}
- /* Otherwise, ask the firmware for it's Device Log Parameters.
+ /* Otherwise, ask the firmware for its Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
@@ -10215,11 +10215,12 @@ out:
* t4_set_vf_mac_acl - Set MAC address for the specified VF
* @adapter: The adapter
* @vf: one of the VFs instantiated by the specified PF
+ * @start: The start port id associated with specified VF
* @naddr: the number of MAC addresses
* @addr: the MAC address(es) to be set to the specified VF
*/
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
- unsigned int naddr, u8 *addr)
+ u8 start, unsigned int naddr, u8 *addr)
{
struct fw_acl_mac_cmd cmd;
@@ -10234,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
cmd.nmac = naddr;
- switch (adapter->pf) {
+ switch (start) {
case 3:
memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9ba0864592e8..2fbe0f059a0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1169,7 +1169,7 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
-1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 5b1d746e6563..31fab2415743 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2062,7 +2062,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
*/
static void sge_rx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.rx_timer);
struct sge *s = &adapter->sge;
unsigned int i;
@@ -2121,7 +2121,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
*/
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.tx_timer);
struct sge *s = &adapter->sge;
unsigned int i, budget;
@@ -2191,7 +2191,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
/**
* t4vf_sge_alloc_rxq - allocate an SGE RX Queue
* @adapter: the adapter
- * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @rspq: pointer to the new rxq's Response Queue to be filled in
* @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
* @dev: the network device associated with the new rspq
* @intr_dest: MSI-X vector index (overriden in MSI mode)
@@ -2609,9 +2609,9 @@ void t4vf_sge_stop(struct adapter *adapter)
struct sge *s = &adapter->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1c52592d3b65..56fcc531af2e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -706,7 +706,7 @@ int t4vf_fl_pkt_align(struct adapter *adapter)
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
* specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
+ * have the Padding Boundary be larger than the Packing Boundary but you
* could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index c7338ac6a5bb..49b57bb5fac1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -71,21 +71,22 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x);
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
.xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
.xdo_dev_state_free = ch_ipsec_xfrm_free_state,
- .xdo_dev_offload_ok = ch_ipsec_offload_ok,
.xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
@@ -225,7 +226,8 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
@@ -288,9 +290,15 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
return -EINVAL;
}
+ if (unlikely(!try_module_get(THIS_MODULE))) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference");
+ return -ENODEV;
+ }
+
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) {
res = -ENOMEM;
+ module_put(THIS_MODULE);
goto out;
}
@@ -299,19 +307,20 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
sa_entry->esn = 1;
ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
- try_module_get(THIS_MODULE);
out:
return res;
}
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
@@ -323,20 +332,6 @@ static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 6482728794dd..4e2096e49684 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -10,6 +10,7 @@
#include <net/ipv6.h>
#include <linux/netdevice.h>
#include <crypto/aes.h>
+#include <linux/skbuff_ref.h>
#include "chcr_ktls.h"
static LIST_HEAD(uld_ctx_list);
@@ -1639,6 +1640,7 @@ static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
@@ -1902,7 +1904,6 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
th = tcp_hdr(nskb);
skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
- skb_tx_timestamp(nskb);
if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
goto out;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 7ff82b6778ba..21e0dfeff158 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -573,7 +573,6 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt);
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
-int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
u64 mask, u64 val, u8 cookie,
int through_l2t);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 6f6525983130..ee0154337a9c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -171,7 +171,7 @@ static void chtls_purge_receive_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- skb_dst_set(skb, (void *)NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -194,7 +194,7 @@ static void chtls_purge_recv_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -505,7 +505,7 @@ static void reset_listen_child(struct sock *child)
chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
if (child->sk_state == TCP_CLOSE)
inet_csk_destroy_sock(child);
}
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
chtls_release_resources(child);
chtls_conn_done(child);
} else {
@@ -951,6 +951,7 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
struct tcp_sock *tp;
unsigned int mss;
struct sock *sk;
+ u16 user_mss;
mss = ntohs(req->tcpopt.mss);
sk = csk->sk;
@@ -969,8 +970,9 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
tp->advmss = dst_metric_advmss(dst);
- if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
- tp->advmss = USER_MSS(tp);
+ user_mss = USER_MSS(tp);
+ if (user_mss && tp->advmss > user_mss)
+ tp->advmss = user_mss;
if (tp->advmss > pmtu - iphdrsz)
tp->advmss = pmtu - iphdrsz;
if (mss && tp->advmss > mss)
@@ -1197,12 +1199,12 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct ipv6_pinfo *newnp = inet6_sk(newsk);
struct ipv6_pinfo *np = inet6_sk(lsk);
- inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+ newinet->pinet6 = &newtcp6sk->inet6;
+ newinet->ipv6_fl_list = NULL;
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr;
- newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newsk->sk_bound_dev_if = treq->ir_iif;
newinet->inet_opt = NULL;
@@ -1734,7 +1736,7 @@ static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_data, sk, skb);
return 0;
}
@@ -1786,7 +1788,7 @@ static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_pdu, sk, skb);
return 0;
}
@@ -1855,7 +1857,7 @@ static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_rx_hdr, sk, skb);
return 0;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index f61ca657601c..29ceff5a5fcb 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -90,12 +90,11 @@ struct deferred_skb_cb {
#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
-#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
+#define USER_MSS(tp) (READ_ONCE((tp)->rx_opt.user_mss))
#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
@@ -171,14 +170,14 @@ static inline void chtls_set_req_addr(struct request_sock *oreq,
static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index 1e67140b0f80..fab6df21f01c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -106,15 +106,6 @@ void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
}
-/*
- * Set one of the t_flags bits in the TCB.
- */
-int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
-{
- return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
- (u64)val << bit_pos);
-}
-
static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
{
return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index d567e42e1760..ee19933e2cca 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -159,19 +159,13 @@ static u8 tcp_state_to_flowc_state(u8 state)
int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt)
{
- struct flowc_packed {
- struct fw_flowc_wr fc;
- struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
- } __packed sflowc;
+ DEFINE_RAW_FLEX(struct fw_flowc_wr, flowc, mnemval, FW_FLOWC_MNEM_MAX);
int nparams, paramidx, flowclen16, flowclen;
- struct fw_flowc_wr *flowc;
struct chtls_sock *csk;
struct tcp_sock *tp;
csk = rcu_dereference_sk_user_data(sk);
tp = tcp_sk(sk);
- memset(&sflowc, 0, sizeof(sflowc));
- flowc = &sflowc.fc;
#define FLOWC_PARAM(__m, __v) \
do { \
@@ -1096,8 +1090,7 @@ new_buf:
copy = size;
if (msg->msg_flags & MSG_SPLICE_PAGES) {
- err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
if (err < 0) {
if (err == -EMSGSIZE)
goto new_buf;
@@ -1435,7 +1428,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
continue;
found_ok_skb:
if (!skb->len) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 455a54708be4..daa1ebaef511 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -342,12 +342,13 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
{
struct sk_buff *skb;
- /* Allocate space for cpl_pass_accpet_req which will be synthesized by
- * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
+ /* Allocate space for cpl_pass_accept_req which will be synthesized by
+ * driver. Once driver synthesizes cpl_pass_accept_req the skb will go
* through the regular cpl_pass_accept_req processing in TOM.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
- - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req)) -
+ pktshift, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
index aa79264e72ba..fbedc31674b3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/Makefile
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../cxgb4
+ccflags-y := -I $(src)/../cxgb4
obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 854d87e1125c..2e3973a32d9d 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -342,10 +342,10 @@ int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
}
EXPORT_SYMBOL(cxgbi_ppm_release);
-static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
- unsigned int *pcpu_ppmax)
+static struct cxgbi_ppm_pool __percpu *
+ppm_alloc_cpu_pool(unsigned int *total, unsigned int *pcpu_ppmax)
{
- struct cxgbi_ppm_pool *pools;
+ struct cxgbi_ppm_pool __percpu *pools;
unsigned int ppmax = (*total) / num_possible_cpus();
unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
unsigned int bmap;
@@ -392,7 +392,7 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
unsigned int iscsi_edram_size)
{
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
- struct cxgbi_ppm_pool *pool = NULL;
+ struct cxgbi_ppm_pool __percpu *pool = NULL;
unsigned int pool_index_max = 0;
unsigned int ppmax_pool = 0;
unsigned int ppod_bmap_size;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 0a21a10a791c..fa5857923db4 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1903,7 +1903,7 @@ static struct platform_driver cs89x0_driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(cs89x0_match),
},
- .remove_new = cs89x0_platform_remove,
+ .remove = cs89x0_platform_remove,
};
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 1f495cfd7959..a4972457edd9 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -16,13 +16,12 @@
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/platform_data/eth-ep93xx.h>
-
#define DRV_MODULE_NAME "ep93xx-eth"
#define RX_QUEUE_ENTRIES 64
@@ -738,25 +737,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
-{
- struct net_device *dev;
-
- dev = alloc_etherdev(sizeof(struct ep93xx_priv));
- if (dev == NULL)
- return NULL;
-
- eth_hw_addr_set(dev, data->dev_addr);
-
- dev->ethtool_ops = &ep93xx_ethtool_ops;
- dev->netdev_ops = &ep93xx_netdev_ops;
-
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
-
- return dev;
-}
-
-
static void ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
@@ -786,27 +766,49 @@ static void ep93xx_eth_remove(struct platform_device *pdev)
static int ep93xx_eth_probe(struct platform_device *pdev)
{
- struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
struct resource *mem;
+ void __iomem *base_addr;
+ struct device_node *np;
+ u8 addr[ETH_ALEN];
+ u32 phy_id;
int irq;
int err;
if (pdev == NULL)
return -ENODEV;
- data = dev_get_platdata(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!mem || irq < 0)
return -ENXIO;
- dev = ep93xx_dev_alloc(data);
+ base_addr = ioremap(mem->start, resource_size(mem));
+ if (!base_addr)
+ return dev_err_probe(&pdev->dev, -EIO, "Failed to ioremap ethernet registers\n");
+
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!np)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Please provide \"phy-handle\"\n");
+
+ err = of_property_read_u32(np, "reg", &phy_id);
+ of_node_put(np);
+ if (err)
+ return dev_err_probe(&pdev->dev, -ENOENT, "Failed to locate \"phy_id\"\n");
+
+ dev = alloc_etherdev(sizeof(struct ep93xx_priv));
if (dev == NULL) {
err = -ENOMEM;
goto err_out;
}
+
+ memcpy_fromio(addr, base_addr + 0x50, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ dev->ethtool_ops = &ep93xx_ethtool_ops;
+ dev->netdev_ops = &ep93xx_netdev_ops;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -822,15 +824,10 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- ep->base_addr = ioremap(mem->start, resource_size(mem));
- if (ep->base_addr == NULL) {
- dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
- err = -EIO;
- goto err_out;
- }
+ ep->base_addr = base_addr;
ep->irq = irq;
- ep->mii.phy_id = data->phy_id;
+ ep->mii.phy_id = phy_id;
ep->mii.phy_id_mask = 0x1f;
ep->mii.reg_num_mask = 0x1f;
ep->mii.dev = dev;
@@ -857,12 +854,18 @@ err_out:
return err;
}
+static const struct of_device_id ep93xx_eth_of_ids[] = {
+ { .compatible = "cirrus,ep9301-eth" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids);
static struct platform_driver ep93xx_eth_driver = {
.probe = ep93xx_eth_probe,
- .remove_new = ep93xx_eth_remove,
+ .remove = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
+ .of_match_table = ep93xx_eth_of_ids,
},
};
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 887876f35f10..6723df9b65d9 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -554,6 +554,7 @@ static int set_mac_address(struct net_device *dev, void *addr)
return 0;
}
+MODULE_DESCRIPTION("Macintosh CS89x0-based Ethernet driver");
MODULE_LICENSE("GPL");
static void mac89x0_device_remove(struct platform_device *pdev)
@@ -567,7 +568,7 @@ static void mac89x0_device_remove(struct platform_device *pdev)
static struct platform_driver mac89x0_platform_driver = {
.probe = mac89x0_device_probe,
- .remove_new = mac89x0_device_remove,
+ .remove = mac89x0_device_remove,
.driver = {
.name = "mac89x0",
},
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index ad80c0fa96a6..96709875fe4f 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -6,5 +6,6 @@
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
depends on PCI
+ select PAGE_POOL
help
This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index c3b6febfdbe4..a96b8332e6e2 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
- enic_ethtool.o enic_api.o enic_clsf.o
+ enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index 462c5435a206..bfb3f14e89f5 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -40,28 +40,7 @@ struct cq_desc {
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_desc_dec(const struct cq_desc *desc_arg,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- const struct cq_desc *desc = desc_arg;
- const u8 type_color = desc->type_color;
-
- *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-
- /*
- * Make sure color bit is read from desc *before* other fields
- * are read from desc. Hardware guarantees color bit is last
- * bit (byte) written. Adding the rmb() prevents the compiler
- * and/or CPU from reordering the reads which would potentially
- * result in reading stale values.
- */
-
- rmb();
-
- *type = type_color & CQ_DESC_TYPE_MASK;
- *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
- CQ_DESC_COMP_NDX_MASK;
-}
+#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
+#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index d25426470a29..50787cff29db 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -17,12 +17,22 @@ struct cq_enet_wq_desc {
u8 type_color;
};
-static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-}
+/*
+ * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
+ */
+#define VNIC_RQ_ALL (~0ULL)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
+#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
+#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
+#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
+#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
@@ -36,6 +46,45 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 32B */
+struct cq_enet_rq_desc_32 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 64B */
+struct cq_enet_rq_desc_64 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 reserved[32];
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
@@ -88,85 +137,4 @@ struct cq_enet_rq_desc {
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
-static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
- u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
- u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
- u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
- u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
- u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
-{
- u16 completed_index_flags;
- u16 q_number_rss_type_flags;
- u16 bytes_written_flags;
-
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-
- completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
-
- *ingress_port = (completed_index_flags &
- CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
- *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
- 1 : 0;
- *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
- 1 : 0;
- *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
- 1 : 0;
-
- *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
- CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
- *csum_not_calc = (q_number_rss_type_flags &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
-
- *rss_hash = le32_to_cpu(desc->rss_hash);
-
- *bytes_written = bytes_written_flags &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
- *packet_error = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
- *vlan_stripped = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
-
- /*
- * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
- */
- *vlan_tci = le16_to_cpu(desc->vlan);
-
- if (*fcoe) {
- *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
- CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
- *fcoe_fc_crc_ok = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
- *fcoe_enc_error = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
- CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
- CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
- *checksum = 0;
- } else {
- *fcoe_sof = 0;
- *fcoe_fc_crc_ok = 0;
- *fcoe_enc_error = 0;
- *fcoe_eof = 0;
- *checksum = le16_to_cpu(desc->checksum_fcoe);
- }
-
- *tcp_udp_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
- *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
- *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
- *ipv4_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
- *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
- *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
- *ipv4_fragment =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
- *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
-}
-
#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 300ad05ee05b..301b3f3114af 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -17,21 +17,28 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 8
-#define ENIC_RQ_MAX 8
-#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
-#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_WQ_MAX 256
+#define ENIC_RQ_MAX 256
+#define ENIC_RQ_MIN_DEFAULT 8
#define ENIC_WQ_NAPI_BUDGET 256
#define ENIC_AIC_LARGE_PKT_DIFF 3
+enum ext_cq {
+ ENIC_RQ_CQ_ENTRY_SIZE_16,
+ ENIC_RQ_CQ_ENTRY_SIZE_32,
+ ENIC_RQ_CQ_ENTRY_SIZE_64,
+ ENIC_RQ_CQ_ENTRY_SIZE_MAX,
+};
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ + 8];
@@ -77,6 +84,10 @@ struct enic_rx_coal {
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
+#define MAX_TSO BIT(16)
+#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
+#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
struct enic_port_profile {
u32 set;
u8 request;
@@ -128,6 +139,53 @@ struct vxlan_offload {
u8 flags;
};
+struct enic_wq_stats {
+ u64 packets; /* pkts queued for Tx */
+ u64 stopped; /* Tx ring almost full, queue stopped */
+ u64 wake; /* Tx ring no longer full, queue woken up*/
+ u64 tso; /* non-encap tso pkt */
+ u64 encap_tso; /* encap tso pkt */
+ u64 encap_csum; /* encap HW csum */
+ u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */
+ u64 csum_none; /* HW csum not required */
+ u64 bytes; /* bytes queued for Tx */
+ u64 add_vlan; /* HW adds vlan tag */
+ u64 cq_work; /* Tx completions processed */
+ u64 cq_bytes; /* Tx bytes processed */
+ u64 null_pkt; /* skb length <= 0 */
+ u64 skb_linear_fail; /* linearize failures */
+ u64 desc_full_awake; /* TX ring full while queue awake */
+};
+
+struct enic_rq_stats {
+ u64 packets; /* pkts received */
+ u64 bytes; /* bytes received */
+ u64 l4_rss_hash; /* hashed on l4 */
+ u64 l3_rss_hash; /* hashed on l3 */
+ u64 csum_unnecessary; /* HW verified csum */
+ u64 csum_unnecessary_encap; /* HW verified csum on encap packet */
+ u64 vlan_stripped; /* HW stripped vlan */
+ u64 napi_complete; /* napi complete intr reenabled */
+ u64 napi_repoll; /* napi poll again */
+ u64 bad_fcs; /* bad pkts */
+ u64 pkt_truncated; /* truncated pkts */
+ u64 no_skb; /* out of skbs */
+ u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_fail; /* page pool alloc failure */
+};
+
+struct enic_wq {
+ spinlock_t lock; /* spinlock for wq */
+ struct vnic_wq vwq;
+ struct enic_wq_stats stats;
+} ____cacheline_aligned;
+
+struct enic_rq {
+ struct vnic_rq vrq;
+ struct enic_rq_stats stats;
+ struct page_pool *pool;
+} ____cacheline_aligned;
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -139,8 +197,8 @@ struct enic {
struct work_struct reset;
struct work_struct tx_hang_reset;
struct work_struct change_mtu_work;
- struct msix_entry msix_entry[ENIC_INTR_MAX];
- struct enic_msix_entry msix[ENIC_INTR_MAX];
+ struct msix_entry *msix_entry;
+ struct enic_msix_entry *msix;
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
@@ -159,33 +217,30 @@ struct enic {
bool enic_api_busy;
struct enic_port_profile *pp;
- /* work queue cache line section */
- ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
- spinlock_t wq_lock[ENIC_WQ_MAX];
+ struct enic_wq *wq;
+ unsigned int wq_avail;
unsigned int wq_count;
u16 loop_enable;
u16 loop_tag;
- /* receive queue cache line section */
- ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
+ struct enic_rq *rq;
+ unsigned int rq_avail;
unsigned int rq_count;
struct vxlan_offload vxlan;
- u64 rq_truncated_pkts;
- u64 rq_bad_fcs;
- struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
+ struct napi_struct *napi;
- /* interrupt resource cache line section */
- ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
+ struct vnic_intr *intr;
+ unsigned int intr_avail;
unsigned int intr_count;
u32 __iomem *legacy_pba; /* memory-mapped */
- /* completion queue cache line section */
- ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
+ struct vnic_cq *cq;
+ unsigned int cq_avail;
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
- u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
+ enum ext_cq ext_cq;
};
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
@@ -238,18 +293,28 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic,
return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
}
-static inline unsigned int enic_msix_err_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count;
-}
+/* MSIX interrupts are organized as the error interrupt, then the notify
+ * interrupt followed by all the I/O interrupts. The error interrupt needs
+ * to fit in 7 bits due to hardware constraints
+ */
+#define ENIC_MSIX_RESERVED_INTR 2
+#define ENIC_MSIX_ERR_INTR 0
+#define ENIC_MSIX_NOTIFY_INTR 1
+#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR
+#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2)
#define ENIC_LEGACY_IO_INTR 0
#define ENIC_LEGACY_ERR_INTR 1
#define ENIC_LEGACY_NOTIFY_INTR 2
+static inline unsigned int enic_msix_err_intr(struct enic *enic)
+{
+ return ENIC_MSIX_ERR_INTR;
+}
+
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
- return enic->rq_count + enic->wq_count + 1;
+ return ENIC_MSIX_NOTIFY_INTR;
}
static inline bool enic_is_err_intr(struct enic *enic, int intr)
@@ -297,5 +362,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
int __enic_set_rsskey(struct enic *enic);
+void enic_ext_cq(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 9900993b6aea..837f954873ee 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -125,7 +125,7 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
#ifdef CONFIG_RFS_ACCEL
void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
+ struct enic *enic = timer_container_of(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
index 8c4ce50da6e1..5f5284102fb0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.h
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h
@@ -26,7 +26,7 @@ static inline void enic_rfs_timer_start(struct enic *enic)
static inline void enic_rfs_timer_stop(struct enic *enic)
{
- del_timer_sync(&enic->rfs_h.rfs_may_expire);
+ timer_delete_sync(&enic->rfs_h.rfs_may_expire);
}
#else
static inline void enic_rfs_timer_start(struct enic *enic) {}
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 241906697019..a50f5dad34d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -32,6 +32,41 @@ struct enic_stat {
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
}
+#define ENIC_PER_RQ_STAT(stat) { \
+ .name = "rq[%d]_"#stat, \
+ .index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \
+}
+
+#define ENIC_PER_WQ_STAT(stat) { \
+ .name = "wq[%d]_"#stat, \
+ .index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \
+}
+
+static const struct enic_stat enic_per_rq_stats[] = {
+ ENIC_PER_RQ_STAT(l4_rss_hash),
+ ENIC_PER_RQ_STAT(l3_rss_hash),
+ ENIC_PER_RQ_STAT(csum_unnecessary_encap),
+ ENIC_PER_RQ_STAT(vlan_stripped),
+ ENIC_PER_RQ_STAT(napi_complete),
+ ENIC_PER_RQ_STAT(napi_repoll),
+ ENIC_PER_RQ_STAT(no_skb),
+ ENIC_PER_RQ_STAT(desc_skip),
+};
+
+#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats)
+
+static const struct enic_stat enic_per_wq_stats[] = {
+ ENIC_PER_WQ_STAT(encap_tso),
+ ENIC_PER_WQ_STAT(encap_csum),
+ ENIC_PER_WQ_STAT(add_vlan),
+ ENIC_PER_WQ_STAT(cq_work),
+ ENIC_PER_WQ_STAT(cq_bytes),
+ ENIC_PER_WQ_STAT(null_pkt),
+ ENIC_PER_WQ_STAT(skb_linear_fail),
+ ENIC_PER_WQ_STAT(desc_full_awake),
+};
+
+#define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats)
static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_frames_ok),
ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -46,6 +81,8 @@ static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_tso),
};
+#define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats)
+
static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_ok),
ENIC_RX_STAT(rx_frames_total),
@@ -70,13 +107,13 @@ static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_to_max),
};
+#define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats)
+
static const struct enic_stat enic_gen_stats[] = {
ENIC_GEN_STAT(dma_map_error),
};
-static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
-static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
+#define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats)
static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
{
@@ -141,22 +178,38 @@ static void enic_get_drvinfo(struct net_device *netdev,
static void enic_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct enic *enic = netdev_priv(netdev);
unsigned int i;
+ unsigned int j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < enic_n_tx_stats; i++) {
+ for (i = 0; i < NUM_ENIC_TX_STATS; i++) {
memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
- for (i = 0; i < enic_n_rx_stats; i++) {
+ for (i = 0; i < NUM_ENIC_RX_STATS; i++) {
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
- for (i = 0; i < enic_n_gen_stats; i++) {
+ for (i = 0; i < NUM_ENIC_GEN_STATS; i++) {
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ for (i = 0; i < enic->rq_count; i++) {
+ for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ enic_per_rq_stats[j].name, i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < enic->wq_count; i++) {
+ for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ enic_per_wq_stats[j].name, i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -169,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
- ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
+ ring->rx_max_pending = c->max_rq_ring;
ring->rx_pending = c->rq_desc_count;
- ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
+ ring->tx_max_pending = c->max_wq_ring;
ring->tx_pending = c->wq_desc_count;
}
@@ -199,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
}
rx_pending = c->rq_desc_count;
tx_pending = c->wq_desc_count;
- if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
+ if (ring->rx_pending > c->max_rq_ring ||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
ring->rx_pending, ENIC_MIN_RQ_DESCS,
- ENIC_MAX_RQ_DESCS);
+ c->max_rq_ring);
return -EINVAL;
}
- if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
+ if (ring->tx_pending > c->max_wq_ring ||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
ring->tx_pending, ENIC_MIN_WQ_DESCS,
- ENIC_MAX_WQ_DESCS);
+ c->max_wq_ring);
return -EINVAL;
}
if (running)
@@ -242,9 +295,19 @@ err_out:
static int enic_get_sset_count(struct net_device *netdev, int sset)
{
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int n_per_rq_stats;
+ unsigned int n_per_wq_stats;
+ unsigned int n_stats;
+
switch (sset) {
case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
+ n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count;
+ n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count;
+ n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS +
+ NUM_ENIC_GEN_STATS +
+ n_per_rq_stats + n_per_wq_stats;
+ return n_stats;
default:
return -EOPNOTSUPP;
}
@@ -256,6 +319,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
+ unsigned int j;
int err;
err = enic_dev_stats_dump(enic, &vstats);
@@ -266,12 +330,30 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
if (err == -ENOMEM)
return;
- for (i = 0; i < enic_n_tx_stats; i++)
+ for (i = 0; i < NUM_ENIC_TX_STATS; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
- for (i = 0; i < enic_n_rx_stats; i++)
+ for (i = 0; i < NUM_ENIC_RX_STATS; i++)
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
- for (i = 0; i < enic_n_gen_stats; i++)
+ for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
+ for (i = 0; i < enic->rq_count; i++) {
+ struct enic_rq_stats *rqstats = &enic->rq[i].stats;
+ int index;
+
+ for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
+ index = enic_per_rq_stats[j].index;
+ *(data++) = ((u64 *)rqstats)[index];
+ }
+ }
+ for (i = 0; i < enic->wq_count; i++) {
+ struct enic_wq_stats *wqstats = &enic->wq[i].stats;
+ int index;
+
+ for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
+ index = enic_per_wq_stats[j].index;
+ *(data++) = ((u64 *)wqstats)[index];
+ }
+ }
}
static u32 enic_get_msglevel(struct net_device *netdev)
@@ -446,8 +528,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
return 0;
}
-static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
+static int enic_get_rx_flow_hash(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct enic *enic = netdev_priv(dev);
u8 rss_hash_type = 0;
cmd->data = 0;
@@ -515,9 +599,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = enic_grxclsrule(enic, cmd);
spin_unlock_bh(&enic->rfs_h.lock);
break;
- case ETHTOOL_GRXFH:
- ret = enic_get_rx_flow_hash(enic, cmd);
- break;
default:
ret = -EOPNOTSUPP;
break;
@@ -526,43 +607,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int enic_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = enic->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int enic_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- enic->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
{
return ENIC_RSS_LEN;
@@ -599,15 +643,35 @@ static int enic_set_rxfh(struct net_device *netdev,
}
static int enic_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
+static void enic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX);
+ channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX);
+ channels->rx_count = enic->rq_count;
+ channels->tx_count = enic->wq_count;
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ case VNIC_DEV_INTR_MODE_INTX:
+ channels->max_combined = 1;
+ channels->combined_count = 1;
+ break;
+ default:
+ break;
+ }
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
@@ -625,13 +689,13 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
- .get_tunable = enic_get_tunable,
- .set_tunable = enic_set_tunable,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_rxfh_fields = enic_get_rx_flow_hash,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
+ .get_channels = enic_get_channels,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d266a87297a5..6bc8dfdb3d4b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -46,6 +46,7 @@
#include <linux/crash_dump.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
+#include <net/netdev_queues.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -57,18 +58,15 @@
#include "enic_dev.h"
#include "enic_pp.h"
#include "enic_clsf.h"
+#include "enic_rq.h"
+#include "enic_wq.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
-#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
-#define MAX_TSO (1 << 16)
-#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
-#define RX_COPYBREAK_DEFAULT 256
-
/* Supported devices */
static const struct pci_device_id enic_id_table[] = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -108,7 +106,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{0, 0}, /* 0 - 4 Gbps */
{0, 3}, /* 4 - 10 Gbps */
- {3, 6}, /* 10 - 40 Gbps */
+ {3, 6}, /* 10+ Gbps */
};
static void enic_init_affinity_hint(struct enic *enic)
@@ -321,48 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- if (buf->sop)
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
-
- if (buf->os_buf)
- dev_kfree_skb_any(buf->os_buf);
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
-{
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- spin_lock(&enic->wq_lock[q_number]);
-
- vnic_wq_service(&enic->wq[q_number], cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number]) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
- netif_wake_subqueue(enic->netdev, q_number);
-
- spin_unlock(&enic->wq_lock[q_number]);
-
- return 0;
-}
-
static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
@@ -370,7 +326,7 @@ static bool enic_log_q_error(struct enic *enic)
bool err = false;
for (i = 0; i < enic->wq_count; i++) {
- error_status = vnic_wq_error_status(&enic->wq[i]);
+ error_status = vnic_wq_error_status(&enic->wq[i].vwq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
@@ -378,7 +334,7 @@ static bool enic_log_q_error(struct enic *enic)
}
for (i = 0; i < enic->rq_count; i++) {
- error_status = vnic_rq_error_status(&enic->rq[i]);
+ error_status = vnic_rq_error_status(&enic->rq[i].vrq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
@@ -421,6 +377,36 @@ static void enic_mtu_check(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (speed > ENIC_LINK_SPEED_10G)
+ index = ENIC_LINK_40G_INDEX;
+ else if (speed > ENIC_LINK_SPEED_4G)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static void enic_link_check(struct enic *enic)
{
int link_status = vnic_dev_link_status(enic->vdev);
@@ -429,6 +415,7 @@ static void enic_link_check(struct enic *enic)
if (link_status && !carrier_ok) {
netdev_info(enic->netdev, "Link UP\n");
netif_carrier_on(enic->netdev);
+ enic_set_rx_coal_setting(enic);
} else if (!link_status && carrier_ok) {
netdev_info(enic->netdev, "Link DOWN\n");
netif_carrier_off(enic->netdev);
@@ -590,6 +577,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ /* The enic_queue_wq_desc() above does not do HW checksum */
+ enic->wq[wq->index].stats.csum_none++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
+
return err;
}
@@ -622,6 +614,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ enic->wq[wq->index].stats.csum_partial++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
+
return err;
}
@@ -676,15 +672,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
unsigned int offset = 0;
unsigned int hdr_len;
dma_addr_t dma_addr;
+ unsigned int pkts;
unsigned int len;
skb_frag_t *frag;
if (skb->encapsulation) {
hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
+ enic->wq[wq->index].stats.encap_tso++;
} else {
hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
+ enic->wq[wq->index].stats.tso++;
}
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
@@ -705,7 +704,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
}
if (eop)
- return 0;
+ goto tso_out_stats;
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for additional data fragments
@@ -732,6 +731,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
}
}
+tso_out_stats:
+ /* calculate how many packets tso sent */
+ len = skb->len - hdr_len;
+ pkts = len / mss;
+ if ((len % mss) > 0)
+ pkts++;
+ enic->wq[wq->index].stats.packets += pkts;
+ enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len));
+
return 0;
}
@@ -764,6 +772,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ enic->wq[wq->index].stats.encap_csum++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
+
return err;
}
@@ -780,6 +792,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
vlan_tag = skb_vlan_tag_get(skb);
+ enic->wq[wq->index].stats.add_vlan++;
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
@@ -792,7 +805,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
else if (skb->encapsulation)
err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
- else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ else if (skb->ip_summed == CHECKSUM_PARTIAL)
err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
else
@@ -825,13 +838,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
unsigned int txq_map;
struct netdev_queue *txq;
+ txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
+ wq = &enic->wq[txq_map].vwq;
+
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
+ enic->wq[wq->index].stats.null_pkt++;
return NETDEV_TX_OK;
}
- txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
- wq = &enic->wq[txq_map];
txq = netdev_get_tx_queue(netdev, txq_map);
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
@@ -843,31 +858,35 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb_any(skb);
+ enic->wq[wq->index].stats.skb_linear_fail++;
return NETDEV_TX_OK;
}
- spin_lock(&enic->wq_lock[txq_map]);
+ spin_lock(&enic->wq[txq_map].lock);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock(&enic->wq_lock[txq_map]);
+ spin_unlock(&enic->wq[txq_map].lock);
+ enic->wq[wq->index].stats.desc_full_awake++;
return NETDEV_TX_BUSY;
}
if (enic_queue_wq_skb(enic, wq, skb))
goto error;
- if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
+ if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
+ enic->wq[wq->index].stats.stopped++;
+ }
skb_tx_timestamp(skb);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
error:
- spin_unlock(&enic->wq_lock[txq_map]);
+ spin_unlock(&enic->wq[txq_map].lock);
return NETDEV_TX_OK;
}
@@ -878,7 +897,10 @@ static void enic_get_stats(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
+ u64 pkt_truncated = 0;
+ u64 bad_fcs = 0;
int err;
+ int i;
err = enic_dev_stats_dump(enic, &stats);
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
@@ -897,8 +919,17 @@ static void enic_get_stats(struct net_device *netdev,
net_stats->rx_bytes = stats->rx.rx_bytes_ok;
net_stats->rx_errors = stats->rx.rx_errors;
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
- net_stats->rx_over_errors = enic->rq_truncated_pkts;
- net_stats->rx_crc_errors = enic->rq_bad_fcs;
+
+ for (i = 0; i < enic->rq_count; i++) {
+ struct enic_rq_stats *rqs = &enic->rq[i].stats;
+
+ if (!enic->rq[i].vrq.ctrl)
+ break;
+ pkt_truncated += rqs->pkt_truncated;
+ bad_fcs += rqs->bad_fcs;
+ }
+ net_stats->rx_over_errors = pkt_truncated;
+ net_stats->rx_crc_errors = bad_fcs;
net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
}
@@ -1117,18 +1148,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
if (port[IFLA_PORT_PROFILE]) {
+ if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_NAME;
memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
+ if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_INSTANCE;
memcpy(pp->instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
+ if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_HOST;
memcpy(pp->host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
@@ -1219,230 +1262,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
-
- if (!buf->os_buf)
- return;
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(buf->os_buf);
- buf->os_buf = NULL;
-}
-
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
- unsigned int os_buf_index = 0;
- dma_addr_t dma_addr;
- struct vnic_rq_buf *buf = rq->to_use;
-
- if (buf->os_buf) {
- enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
- buf->len);
-
- return 0;
- }
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb)
- return -ENOMEM;
-
- dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
- DMA_FROM_DEVICE);
- if (unlikely(enic_dma_map_check(enic, dma_addr))) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- enic_queue_rq_desc(rq, skb, os_buf_index,
- dma_addr, len);
-
- return 0;
-}
-
-static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
- u32 pkt_len)
-{
- if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
- pkt_size->large_pkt_bytes_cnt += pkt_len;
- else
- pkt_size->small_pkt_bytes_cnt += pkt_len;
-}
-
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
- struct vnic_rq_buf *buf, u16 len)
-{
- struct enic *enic = netdev_priv(netdev);
- struct sk_buff *new_skb;
-
- if (len > enic->rx_copybreak)
- return false;
- new_skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!new_skb)
- return false;
- dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, len);
- *skb = new_skb;
-
- return true;
-}
-
-static void enic_rq_indicate_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
- u32 rss_hash;
- bool outer_csum_ok = true, encap = false;
-
- if (skipped)
- return;
-
- skb = buf->os_buf;
-
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- if (packet_error) {
-
- if (!fcs_ok) {
- if (bytes_written > 0)
- enic->rq_bad_fcs++;
- else if (bytes_written == 0)
- enic->rq_truncated_pkts++;
- }
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
-
- return;
- }
-
- if (eop && bytes_written > 0) {
-
- /* Good receive
- */
-
- if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
- buf->os_buf = NULL;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
- buf->len, DMA_FROM_DEVICE);
- }
- prefetch(skb->data - NET_IP_ALIGN);
-
- skb_put(skb, bytes_written);
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, q_number);
- if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
- (type == 3)) {
- switch (rss_type) {
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
- break;
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
- break;
- }
- }
- if (enic->vxlan.vxlan_udp_port_number) {
- switch (enic->vxlan.patch_level) {
- case 0:
- if (fcoe) {
- encap = true;
- outer_csum_ok = fcoe_fc_crc_ok;
- }
- break;
- case 2:
- if ((type == 7) &&
- (rss_hash & BIT(0))) {
- encap = true;
- outer_csum_ok = (rss_hash & BIT(1)) &&
- (rss_hash & BIT(2));
- }
- break;
- }
- }
-
- /* Hardware does not provide whole packet checksum. It only
- * provides pseudo checksum. Since hw validates the packet
- * checksum but not provide us the checksum value. use
- * CHECSUM_UNNECESSARY.
- *
- * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
- * inner csum_ok. outer_csum_ok is set by hw when outer udp
- * csum is correct or is zero.
- */
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && outer_csum_ok &&
- (ipv4_csum_ok || ipv6)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = encap;
- }
-
- if (vlan_stripped)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
-
- skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (!(netdev->features & NETIF_F_GRO))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&enic->napi[q_number], skb);
- if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_intr_update_pkt_size(&cq->pkt_size_counter,
- bytes_written);
- } else {
-
- /* Buffer overflow
- */
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
- }
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number], cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_indicate_buf, opaque);
-
- return 0;
-}
-
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
{
unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1513,12 +1332,10 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
- wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
- enic_wq_service, NULL);
+ wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do);
if (budget > 0)
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do);
/* Accumulate intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1533,7 +1350,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1545,7 +1362,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[0]);
+ enic_calc_int_moderation(enic, &enic->rq[0].vrq);
if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
@@ -1554,8 +1371,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[0]);
+ enic_set_int_moderation(enic, &enic->rq[0].vrq);
vnic_intr_unmask(&enic->intr[intr]);
+ enic->rq[0].stats.napi_complete++;
+ } else {
+ enic->rq[0].stats.napi_repoll++;
}
return rq_work_done;
@@ -1604,7 +1424,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
struct net_device *netdev = napi->dev;
struct enic *enic = netdev_priv(netdev);
unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
- struct vnic_wq *wq = &enic->wq[wq_index];
+ struct vnic_wq *wq = &enic->wq[wq_index].vwq;
unsigned int cq;
unsigned int intr;
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
@@ -1614,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
wq_irq = wq->index;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
- wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
- enic_wq_service, NULL);
+
+ wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do);
vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
0 /* don't unmask intr */,
@@ -1644,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (budget > 0)
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ work_done = enic_rq_cq_service(enic, cq, work_to_do);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1658,7 +1477,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
@@ -1670,7 +1489,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[rq]);
+ enic_calc_int_moderation(enic, &enic->rq[rq].vrq);
if ((work_done < budget) && napi_complete_done(napi, work_done)) {
@@ -1679,8 +1498,11 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[rq]);
+ enic_set_int_moderation(enic, &enic->rq[rq].vrq);
vnic_intr_unmask(&enic->intr[intr]);
+ enic->rq[rq].stats.napi_complete++;
+ } else {
+ enic->rq[rq].stats.napi_repoll++;
}
return work_done;
@@ -1688,7 +1510,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, notify_timer);
+ struct enic *enic = timer_container_of(enic, t, notify_timer);
enic_notify_check(enic);
@@ -1710,7 +1532,7 @@ static void enic_free_intr(struct enic *enic)
free_irq(enic->pdev->irq, enic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
- for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ for (i = 0; i < enic->intr_count; i++)
if (enic->msix[i].requested)
free_irq(enic->msix_entry[i].vector,
enic->msix[i].devid);
@@ -1777,7 +1599,7 @@ static int enic_request_intr(struct enic *enic)
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
- for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ for (i = 0; i < enic->intr_count; i++)
enic->msix[i].requested = 0;
for (i = 0; i < enic->intr_count; i++) {
@@ -1819,36 +1641,6 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
-static void enic_set_rx_coal_setting(struct enic *enic)
-{
- unsigned int speed;
- int index = -1;
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-
- /* 1. Read the link speed from fw
- * 2. Pick the default range for the speed
- * 3. Update it in enic->rx_coalesce_setting
- */
- speed = vnic_dev_port_speed(enic->vdev);
- if (ENIC_LINK_SPEED_10G < speed)
- index = ENIC_LINK_40G_INDEX;
- else if (ENIC_LINK_SPEED_4G < speed)
- index = ENIC_LINK_10G_INDEX;
- else
- index = ENIC_LINK_4G_INDEX;
-
- rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
- rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
- rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
-
- /* Start with the value provided by UCSM */
- for (index = 0; index < enic->rq_count; index++)
- enic->cq[index].cur_rx_coal_timeval =
- enic->config.intr_timer_usec;
-
- rx_coal->use_adaptive_rx_coalesce = 1;
-}
-
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -1889,6 +1681,17 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
+ unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
+ struct page_pool_params pp_params = {
+ .order = get_order(max_pkt_len),
+ .pool_size = enic->config.rq_desc_count,
+ .nid = dev_to_node(&enic->pdev->dev),
+ .dev = &enic->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
+ .netdev = netdev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ };
err = enic_request_intr(enic);
if (err) {
@@ -1906,11 +1709,21 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+ if (IS_ERR(enic->rq[i].pool)) {
+ err = PTR_ERR(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ goto err_out_free_rq;
+ }
+
/* enable rq before updating rq desc */
- vnic_rq_enable(&enic->rq[i]);
- vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+ vnic_rq_enable(&enic->rq[i].vrq);
+ vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
+ if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
goto err_out_free_rq;
@@ -1918,7 +1731,7 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_enable(&enic->wq[i]);
+ vnic_wq_enable(&enic->wq[i].vwq);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
@@ -1945,9 +1758,12 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
- ret = vnic_rq_disable(&enic->rq[i]);
- if (!ret)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ ret = vnic_rq_disable(&enic->rq[i].vrq);
+ if (!ret) {
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -1971,7 +1787,7 @@ static int enic_stop(struct net_device *netdev)
enic_synchronize_irqs(enic);
- del_timer_sync(&enic->notify_timer);
+ timer_delete_sync(&enic->notify_timer);
enic_rfs_flw_tbl_free(enic);
enic_dev_disable(enic);
@@ -1989,12 +1805,12 @@ static int enic_stop(struct net_device *netdev)
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_disable(&enic->wq[i]);
+ err = vnic_wq_disable(&enic->wq[i].vwq);
if (err)
return err;
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_disable(&enic->rq[i]);
+ err = vnic_rq_disable(&enic->rq[i].vrq);
if (err)
return err;
}
@@ -2004,9 +1820,12 @@ static int enic_stop(struct net_device *netdev)
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
+ for (i = 0; i < enic->rq_count; i++) {
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2027,7 +1846,7 @@ static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (running) {
err = enic_open(netdev);
@@ -2045,10 +1864,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (netdev->mtu > enic->port_mtu)
+ if (new_mtu > enic->port_mtu)
netdev_warn(netdev,
"interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ new_mtu, enic->port_mtu);
return _enic_change_mtu(netdev, new_mtu);
}
@@ -2322,6 +2141,7 @@ static void enic_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2348,6 +2168,7 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2360,112 +2181,56 @@ static void enic_tx_hang_reset(struct work_struct *work)
static int enic_set_intr_mode(struct enic *enic)
{
- unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
- unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
unsigned int i;
+ int num_intr;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
* on system capabilities.
*
* Try MSI-X first
- *
- * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
- * (the second to last INTR is used for WQ/RQ errors)
- * (the last INTR is used for notifications)
- */
-
- BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
- for (i = 0; i < n + m + 2; i++)
- enic->msix_entry[i].entry = i;
-
- /* Use multiple RQs if RSS is enabled
*/
- if (ENIC_SETTING(enic, RSS) &&
- enic->config.intr_mode < 1 &&
- enic->rq_count >= n &&
- enic->wq_count >= m &&
- enic->cq_count >= n + m &&
- enic->intr_count >= n + m + 2) {
-
- if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
- n + m + 2, n + m + 2) > 0) {
-
- enic->rq_count = n;
- enic->wq_count = m;
- enic->cq_count = n + m;
- enic->intr_count = n + m + 2;
-
- vnic_dev_set_intr_mode(enic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
- return 0;
- }
- }
-
if (enic->config.intr_mode < 1 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= m &&
- enic->cq_count >= 1 + m &&
- enic->intr_count >= 1 + m + 2) {
- if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
- 1 + m + 2, 1 + m + 2) > 0) {
-
- enic->rq_count = 1;
- enic->wq_count = m;
- enic->cq_count = 1 + m;
- enic->intr_count = 1 + m + 2;
-
+ enic->intr_avail >= ENIC_MSIX_MIN_INTR) {
+ for (i = 0; i < enic->intr_avail; i++)
+ enic->msix_entry[i].entry = i;
+
+ num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ ENIC_MSIX_MIN_INTR,
+ enic->intr_avail);
+ if (num_intr > 0) {
vnic_dev_set_intr_mode(enic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
+ VNIC_DEV_INTR_MODE_MSIX);
+ enic->intr_avail = num_intr;
return 0;
}
}
/* Next try MSI
*
- * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
+ * We need 1 INTR
*/
if (enic->config.intr_mode < 2 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= 1 &&
- enic->cq_count >= 2 &&
- enic->intr_count >= 1 &&
+ enic->intr_avail >= 1 &&
!pci_enable_msi(enic->pdev)) {
-
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->cq_count = 2;
- enic->intr_count = 1;
-
+ enic->intr_avail = 1;
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
-
return 0;
}
/* Next try INTx
*
- * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
+ * We need 3 INTRs
* (the first INTR is used for WQ/RQ)
* (the second INTR is used for WQ/RQ errors)
* (the last INTR is used for notifications)
*/
if (enic->config.intr_mode < 3 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= 1 &&
- enic->cq_count >= 2 &&
- enic->intr_count >= 3) {
-
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->cq_count = 2;
- enic->intr_count = 3;
-
+ enic->intr_avail >= 3) {
+ enic->intr_avail = 3;
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
-
return 0;
}
@@ -2490,6 +2255,127 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static int enic_adjust_resources(struct enic *enic)
+{
+ unsigned int max_queues;
+ unsigned int rq_default;
+ unsigned int rq_avail;
+ unsigned int wq_avail;
+
+ if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) {
+ dev_err(enic_get_dev(enic),
+ "Not enough resources available rq: %d wq: %d cq: %d\n",
+ enic->rq_avail, enic->wq_avail,
+ enic->cq_avail);
+ return -ENOSPC;
+ }
+
+ if (is_kdump_kernel()) {
+ dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+ enic->rq_avail = 1;
+ enic->wq_avail = 1;
+ enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+ enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+ enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+ }
+
+ /* if RSS isn't set, then we can only use one RQ */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->rq_avail = 1;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ enic->rq_count = 1;
+ enic->wq_count = 1;
+ enic->cq_count = 2;
+ enic->intr_count = enic->intr_avail;
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ /* Adjust the number of wqs/rqs/cqs/interrupts that will be
+ * used based on which resource is the most constrained
+ */
+ wq_avail = min(enic->wq_avail, ENIC_WQ_MAX);
+ rq_default = max(netif_get_num_default_rss_queues(),
+ ENIC_RQ_MIN_DEFAULT);
+ rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default);
+ max_queues = min(enic->cq_avail,
+ enic->intr_avail - ENIC_MSIX_RESERVED_INTR);
+ if (wq_avail + rq_avail <= max_queues) {
+ enic->rq_count = rq_avail;
+ enic->wq_count = wq_avail;
+ } else {
+ /* recalculate wq/rq count */
+ if (rq_avail < wq_avail) {
+ enic->rq_count = min(rq_avail, max_queues / 2);
+ enic->wq_count = max_queues - enic->rq_count;
+ } else {
+ enic->wq_count = min(wq_avail, max_queues / 2);
+ enic->rq_count = max_queues - enic->wq_count;
+ }
+ }
+ enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR;
+
+ break;
+ default:
+ dev_err(enic_get_dev(enic), "Unknown interrupt mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rxs)
+{
+ struct enic *enic = netdev_priv(dev);
+ struct enic_rq_stats *rqstats = &enic->rq[idx].stats;
+
+ rxs->bytes = rqstats->bytes;
+ rxs->packets = rqstats->packets;
+ rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated;
+ rxs->hw_drop_overruns = rqstats->pkt_truncated;
+ rxs->csum_unnecessary = rqstats->csum_unnecessary +
+ rqstats->csum_unnecessary_encap;
+ rxs->alloc_fail = rqstats->pp_alloc_fail;
+}
+
+static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *txs)
+{
+ struct enic *enic = netdev_priv(dev);
+ struct enic_wq_stats *wqstats = &enic->wq[idx].stats;
+
+ txs->bytes = wqstats->bytes;
+ txs->packets = wqstats->packets;
+ txs->csum_none = wqstats->csum_none;
+ txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum +
+ wqstats->tso;
+ txs->hw_gso_packets = wqstats->tso;
+ txs->stop = wqstats->stopped;
+ txs->wake = wqstats->wake;
+}
+
+static void enic_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rxs,
+ struct netdev_queue_stats_tx *txs)
+{
+ rxs->bytes = 0;
+ rxs->packets = 0;
+ rxs->hw_drops = 0;
+ rxs->hw_drop_overruns = 0;
+ rxs->csum_unnecessary = 0;
+ rxs->alloc_fail = 0;
+ txs->bytes = 0;
+ txs->packets = 0;
+ txs->csum_none = 0;
+ txs->needs_csum = 0;
+ txs->hw_gso_packets = 0;
+ txs->stop = 0;
+ txs->wake = 0;
+}
+
static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
@@ -2538,6 +2424,77 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_features_check = enic_features_check,
};
+static const struct netdev_stat_ops enic_netdev_stat_ops = {
+ .get_queue_stats_rx = enic_get_queue_stats_rx,
+ .get_queue_stats_tx = enic_get_queue_stats_tx,
+ .get_base_stats = enic_get_base_stats,
+};
+
+static void enic_free_enic_resources(struct enic *enic)
+{
+ kfree(enic->wq);
+ enic->wq = NULL;
+
+ kfree(enic->rq);
+ enic->rq = NULL;
+
+ kfree(enic->cq);
+ enic->cq = NULL;
+
+ kfree(enic->napi);
+ enic->napi = NULL;
+
+ kfree(enic->msix_entry);
+ enic->msix_entry = NULL;
+
+ kfree(enic->msix);
+ enic->msix = NULL;
+
+ kfree(enic->intr);
+ enic->intr = NULL;
+}
+
+static int enic_alloc_enic_resources(struct enic *enic)
+{
+ enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL);
+ if (!enic->wq)
+ goto free_queues;
+
+ enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL);
+ if (!enic->rq)
+ goto free_queues;
+
+ enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL);
+ if (!enic->cq)
+ goto free_queues;
+
+ enic->napi = kcalloc(enic->wq_avail + enic->rq_avail,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!enic->napi)
+ goto free_queues;
+
+ enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!enic->msix_entry)
+ goto free_queues;
+
+ enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry),
+ GFP_KERNEL);
+ if (!enic->msix)
+ goto free_queues;
+
+ enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr),
+ GFP_KERNEL);
+ if (!enic->intr)
+ goto free_queues;
+
+ return 0;
+
+free_queues:
+ enic_free_enic_resources(enic);
+ return -ENOMEM;
+}
+
static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
@@ -2555,18 +2512,7 @@ static void enic_dev_deinit(struct enic *enic)
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
enic_free_affinity_hint(enic);
-}
-
-static void enic_kdump_kernel_config(struct enic *enic)
-{
- if (is_kdump_kernel()) {
- dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
- enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
- enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
- }
+ enic_free_enic_resources(enic);
}
static int enic_dev_init(struct enic *enic)
@@ -2598,19 +2544,28 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
- /* modify resource count if we are in kdump_kernel
- */
- enic_kdump_kernel_config(enic);
+ enic_ext_cq(enic);
- /* Set interrupt mode based on resource counts and system
- * capabilities
- */
+ err = enic_alloc_enic_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to allocate enic resources\n");
+ return err;
+ }
+
+ /* Set interrupt mode based on system capabilities */
err = enic_set_intr_mode(enic);
if (err) {
dev_err(dev, "Failed to set intr mode based on resource "
"counts and system capabilities, aborting\n");
- return err;
+ goto err_out_free_vnic_resources;
+ }
+
+ /* Adjust resource counts based on most constrained resources */
+ err = enic_adjust_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to adjust resources\n");
+ goto err_out_free_vnic_resources;
}
/* Allocate and configure vNIC resources
@@ -2652,6 +2607,7 @@ err_out_free_vnic_resources:
enic_free_affinity_hint(enic);
enic_clear_intr_mode(enic);
enic_free_vnic_resources(enic);
+ enic_free_enic_resources(enic);
return err;
}
@@ -2851,13 +2807,12 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_rfs_flw_tbl_init(enic);
- enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
for (i = 0; i < enic->wq_count; i++)
- spin_lock_init(&enic->wq_lock[i]);
+ spin_lock_init(&enic->wq[i].lock);
/* Register net device
*/
@@ -2880,6 +2835,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &enic_netdev_dynamic_ops;
else
netdev->netdev_ops = &enic_netdev_ops;
+ netdev->stat_ops = &enic_netdev_stat_ops;
netdev->watchdog_timeo = 2 * HZ;
enic_set_ethtool_ops(netdev);
@@ -2966,7 +2922,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
- enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 1c48aebdbab0..bbd3143ed73e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_usec);
GET_CONFIG(loop_tag);
GET_CONFIG(num_arfs);
+ GET_CONFIG(max_rq_ring);
+ GET_CONFIG(max_wq_ring);
+ GET_CONFIG(max_cq_ring);
+
+ if (!c->max_wq_ring)
+ c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
+ if (!c->max_rq_ring)
+ c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
+ if (!c->max_cq_ring)
+ c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
c->wq_desc_count =
- min_t(u32, ENIC_MAX_WQ_DESCS,
- max_t(u32, ENIC_MIN_WQ_DESCS,
- c->wq_desc_count));
+ min_t(u32, c->max_wq_ring,
+ max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
- min_t(u32, ENIC_MAX_RQ_DESCS,
- max_t(u32, ENIC_MIN_RQ_DESCS,
- c->rq_desc_count));
+ min_t(u32, c->max_rq_ring,
+ max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
- c->mtu = min_t(u16, ENIC_MAX_MTU,
- max_t(u16, ENIC_MIN_MTU,
- c->mtu));
+ c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
- "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
- enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
+ "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
+ enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
+ c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
@@ -176,9 +183,9 @@ void enic_free_vnic_resources(struct enic *enic)
unsigned int i;
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_free(&enic->wq[i]);
+ vnic_wq_free(&enic->wq[i].vwq);
for (i = 0; i < enic->rq_count; i++)
- vnic_rq_free(&enic->rq[i]);
+ vnic_rq_free(&enic->rq[i].vrq);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -187,16 +194,21 @@ void enic_free_vnic_resources(struct enic *enic)
void enic_get_res_counts(struct enic *enic)
{
- enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
- enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
- enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
- enic->intr_count = vnic_dev_get_res_count(enic->vdev,
- RES_TYPE_INTR_CTRL);
+ enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+ enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+ enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+ enic->intr_avail = vnic_dev_get_res_count(enic->vdev,
+ RES_TYPE_INTR_CTRL);
+
+ enic->wq_count = enic->wq_avail;
+ enic->rq_count = enic->rq_avail;
+ enic->cq_count = enic->cq_avail;
+ enic->intr_count = enic->intr_avail;
dev_info(enic_get_dev(enic),
"vNIC resources avail: wq %d rq %d cq %d intr %d\n",
- enic->wq_count, enic->rq_count,
- enic->cq_count, enic->intr_count);
+ enic->wq_avail, enic->rq_avail,
+ enic->cq_avail, enic->intr_avail);
}
void enic_init_vnic_resources(struct enic *enic)
@@ -221,9 +233,12 @@ void enic_init_vnic_resources(struct enic *enic)
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_INTX:
+ error_interrupt_enable = 1;
+ error_interrupt_offset = ENIC_LEGACY_ERR_INTR;
+ break;
case VNIC_DEV_INTR_MODE_MSIX:
error_interrupt_enable = 1;
- error_interrupt_offset = enic->intr_count - 2;
+ error_interrupt_offset = enic_msix_err_intr(enic);
break;
default:
error_interrupt_enable = 0;
@@ -233,7 +248,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
cq_index = i;
- vnic_rq_init(&enic->rq[i],
+ vnic_rq_init(&enic->rq[i].vrq,
cq_index,
error_interrupt_enable,
error_interrupt_offset);
@@ -241,7 +256,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
cq_index = enic->rq_count + i;
- vnic_wq_init(&enic->wq[i],
+ vnic_wq_init(&enic->wq[i].vwq,
cq_index,
error_interrupt_enable,
error_interrupt_offset);
@@ -249,15 +264,15 @@ void enic_init_vnic_resources(struct enic *enic)
/* Init CQ resources
*
- * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
- * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
+ * All CQs point to INTR[0] for INTx, MSI
+ * CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X
*/
for (i = 0; i < enic->cq_count; i++) {
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSIX:
- interrupt_offset = i;
+ interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i;
break;
default:
interrupt_offset = 0;
@@ -304,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
+ int rq_cq_desc_size;
unsigned int i;
int err;
@@ -318,11 +334,29 @@ int enic_alloc_vnic_resources(struct enic *enic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown");
+ switch (enic->ext_cq) {
+ case ENIC_RQ_CQ_ENTRY_SIZE_16:
+ rq_cq_desc_size = 16;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_32:
+ rq_cq_desc_size = 32;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_64:
+ rq_cq_desc_size = 64;
+ break;
+ default:
+ dev_err(enic_get_dev(enic),
+ "Unable to determine rq cq desc size: %d",
+ enic->ext_cq);
+ err = -ENODEV;
+ goto err_out;
+ }
+
/* Allocate queue resources
*/
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
+ err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i,
enic->config.wq_desc_count,
sizeof(struct wq_enet_desc));
if (err)
@@ -330,7 +364,7 @@ int enic_alloc_vnic_resources(struct enic *enic)
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
+ err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
enic->config.rq_desc_count,
sizeof(struct rq_enet_desc));
if (err)
@@ -340,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
- enic->config.rq_desc_count,
- sizeof(struct cq_enet_rq_desc));
+ enic->config.rq_desc_count,
+ rq_cq_desc_size);
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
@@ -372,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
err_out_cleanup:
enic_free_vnic_resources(enic);
-
+err_out:
return err;
}
+
+/*
+ * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
+ * that command
+ */
+void enic_ext_cq(struct enic *enic)
+{
+ u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ spin_lock_bh(&enic->devcmd_lock);
+ ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (ret || a0) {
+ dev_info(&enic->pdev->dev,
+ "CMD_CQ_ENTRY_SIZE_SET not supported.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ goto out;
+ }
+ a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
+ enic->ext_cq = fls(a1) - 1;
+ a0 = VNIC_RQ_ALL;
+ a1 = enic->ext_cq;
+ ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
+ if (ret) {
+ dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ }
+out:
+ spin_unlock_bh(&enic->devcmd_lock);
+ dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
+ 16 << enic->ext_cq);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index b8ee42d297aa..02dca1ae4a22 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -12,10 +12,13 @@
#include "vnic_wq.h"
#include "vnic_rq.h"
-#define ENIC_MIN_WQ_DESCS 64
-#define ENIC_MAX_WQ_DESCS 4096
-#define ENIC_MIN_RQ_DESCS 64
-#define ENIC_MAX_RQ_DESCS 4096
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_WQ_DESCS 16384
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 16384
+#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c
new file mode 100644
index 000000000000..ccbf5c9a21d0
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Cisco Systems, Inc. All rights reserved.
+
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include "enic.h"
+#include "enic_res.h"
+#include "enic_rq.h"
+#include "vnic_rq.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
+static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
+ u8 *color, u16 *q_number, u16 *completed_index)
+{
+ /* type_color is the last field for all cq structs */
+ u8 type_color;
+
+ switch (cq_desc_size) {
+ case VNIC_RQ_CQ_ENTRY_SIZE_16: {
+ struct cq_enet_rq_desc *desc =
+ (struct cq_enet_rq_desc *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_32: {
+ struct cq_enet_rq_desc_32 *desc =
+ (struct cq_enet_rq_desc_32 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_64: {
+ struct cq_enet_rq_desc_64 *desc =
+ (struct cq_enet_rq_desc_64 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ }
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+ *type = type_color & CQ_DESC_TYPE_MASK;
+}
+
+static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
+ u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
+ u8 vlan_stripped, u8 csum_not_calc,
+ u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
+ u16 vlan_tci, struct sk_buff *skb)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+ bool outer_csum_ok = true, encap = false;
+
+ if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
+ switch (rss_type) {
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
+ break;
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
+ break;
+ }
+ }
+ if (enic->vxlan.vxlan_udp_port_number) {
+ switch (enic->vxlan.patch_level) {
+ case 0:
+ if (fcoe) {
+ encap = true;
+ outer_csum_ok = fcoe_fc_crc_ok;
+ }
+ break;
+ case 2:
+ if (type == 7 && (rss_hash & BIT(0))) {
+ encap = true;
+ outer_csum_ok = (rss_hash & BIT(1)) &&
+ (rss_hash & BIT(2));
+ }
+ break;
+ }
+ }
+
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ *
+ * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+ * inner csum_ok. outer_csum_ok is set by hw when outer udp
+ * csum is correct or is zero.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+ tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
+ }
+
+ if (vlan_stripped) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rqstats->vlan_stripped++;
+ }
+}
+
+/*
+ * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
+ * is identical for all type (16,32 and 64 byte) of cqs.
+ */
+static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
+ u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash,
+ u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci,
+ u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
+ u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
+ u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
+ u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
+ u16 bytes_written)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+
+ if (packet_error) {
+ if (!fcs_ok) {
+ if (bytes_written > 0)
+ rqstats->bad_fcs++;
+ else if (bytes_written == 0)
+ rqstats->pkt_truncated++;
+ }
+ return true;
+ }
+ return false;
+}
+
+int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq *erq = &enic->rq[rq->index];
+ struct enic_rq_stats *rqstats = &erq->stats;
+ unsigned int offset = 0;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+ unsigned int os_buf_index = 0;
+ dma_addr_t dma_addr;
+ struct vnic_rq_buf *buf = rq->to_use;
+ struct page *page;
+ unsigned int truesize = len;
+
+ if (buf->os_buf) {
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
+
+ return 0;
+ }
+
+ page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_fail++;
+ return -ENOMEM;
+ }
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
+
+ return 0;
+}
+
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct enic_rq *erq = &enic->rq[rq->index];
+
+ if (!buf->os_buf)
+ return;
+
+ page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
+ buf->os_buf = NULL;
+}
+
+static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
+ struct vnic_rq_buf *buf, void *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+ struct napi_struct *napi;
+
+ u8 eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ rqstats->packets++;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
+ &fcoe, &eop, &sop, &rss_type, &csum_not_calc,
+ &rss_hash, &bytes_written, &packet_error,
+ &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
+ &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
+ &ipv4, &ipv4_fragment, &fcs_ok);
+
+ if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
+ return;
+
+ if (eop && bytes_written > 0) {
+ /* Good receive
+ */
+ rqstats->bytes += bytes_written;
+ napi = &enic->napi[rq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, rq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
+ }
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
+ bytes_written, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ (struct page *)buf->os_buf, buf->offset,
+ bytes_written, buf->truesize);
+ skb_record_rx_queue(skb, q_number);
+ enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
+ fcoe_fc_crc_ok, vlan_stripped,
+ csum_not_calc, tcp_udp_csum_ok, ipv6,
+ ipv4_csum_ok, vlan_tci, skb);
+ skb_mark_for_recycle(skb);
+ napi_gro_frags(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
+ } else {
+ /* Buffer overflow
+ */
+ rqstats->pkt_truncated++;
+ }
+}
+
+static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
+ u16 q_number, u16 completed_index)
+{
+ struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
+ struct vnic_rq *vrq = &enic->rq[q_number].vrq;
+ struct vnic_rq_buf *vrq_buf = vrq->to_clean;
+ int skipped;
+
+ while (1) {
+ skipped = (vrq_buf->index != completed_index);
+ if (!skipped)
+ enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
+ q_number, completed_index);
+ else
+ rqstats->desc_skip++;
+
+ vrq->ring.desc_avail++;
+ vrq->to_clean = vrq_buf->next;
+ vrq_buf = vrq_buf->next;
+ if (!skipped)
+ break;
+ }
+}
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ void *cq_desc = vnic_cq_to_clean(cq);
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ u8 type, color;
+
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
+ &completed_index);
+
+ while (color != cq->last_color) {
+ enic_rq_service(enic, cq_desc, type, q_number, completed_index);
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = vnic_cq_to_clean(cq);
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h
new file mode 100644
index 000000000000..98476a7297af
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2024 Cisco Systems, Inc. All rights reserved.
+ */
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
+int enic_rq_alloc_buf(struct vnic_rq *rq);
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c
new file mode 100644
index 000000000000..07936f8b4231
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2025 Cisco Systems, Inc. All rights reserved.
+
+#include <net/netdev_queues.h>
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_wq.h"
+
+#define ENET_CQ_DESC_COMP_NDX_BITS 14
+#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
+
+static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
+ u8 *type, u8 *color, u16 *q_number,
+ u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+
+ if (ext_wq)
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ ENET_CQ_DESC_COMP_NDX_MASK;
+ else
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ if (buf->sop)
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+
+ if (buf->os_buf)
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
+ enic_free_wq_buf(wq, buf);
+}
+
+static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ spin_lock(&enic->wq[q_number].lock);
+
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
+ completed_index, enic_wq_free_buf, NULL);
+
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
+ && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
+ netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq[q_number].stats.wake++;
+ }
+
+ spin_unlock(&enic->wq[q_number].lock);
+}
+
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ struct cq_desc *cq_desc;
+ u8 type, color;
+ bool ext_wq;
+
+ ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ enic_wq_service(cq->vdev, cq_desc, type, q_number,
+ completed_index);
+
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h
new file mode 100644
index 000000000000..12acb3f2fbc9
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2025 Cisco Systems, Inc. All rights reserved.
+ */
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index eed5bf59e5d2..0e37f5d5e527 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -56,45 +56,18 @@ struct vnic_cq {
ktime_t prev_ts;
};
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
+static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
-
- if ((*q_service)(cq->vdev, cq_desc, type,
- q_number, completed_index, opaque))
- break;
-
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
+ return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
+}
- work_done++;
- if (work_done >= work_to_do)
- break;
+static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
+{
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
}
-
- return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 12a83fa1302d..9f6089e81608 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -146,23 +146,19 @@ EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
- /* The base address of the desc rings must be 512 byte aligned.
- * Descriptor count is aligned to groups of 32 descriptors. A
- * count of 0 means the maximum 4096 descriptors. Descriptor
- * size is aligned to 16 bytes.
- */
-
- unsigned int count_align = 32;
- unsigned int desc_align = 16;
- ring->base_align = 512;
+ /* Descriptor ring base address alignment in bytes*/
+ ring->base_align = VNIC_DESC_BASE_ALIGN;
+ /* A count of 0 means the maximum descriptors */
if (desc_count == 0)
- desc_count = 4096;
+ desc_count = VNIC_DESC_MAX_COUNT;
- ring->desc_count = ALIGN(desc_count, count_align);
+ /* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */
+ ring->desc_count = ALIGN(desc_count, VNIC_DESC_COUNT_ALIGN);
- ring->desc_size = ALIGN(desc_size, desc_align);
+ /* Descriptor size alignment in bytes */
+ ring->desc_size = ALIGN(desc_size, VNIC_DESC_SIZE_ALIGN);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 6273794b923b..7fdd8c661c99 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -31,6 +31,11 @@ static inline void writeq(u64 val, void __iomem *reg)
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define VNIC_DESC_SIZE_ALIGN 16
+#define VNIC_DESC_COUNT_ALIGN 32
+#define VNIC_DESC_BASE_ALIGN 512
+#define VNIC_DESC_MAX_COUNT 4096
+
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index db56d778877a..605ef17f967e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -436,6 +436,25 @@ enum vnic_devcmd_cmd {
* in: (u16) a2 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Set extended CQ field in MREGS of RQ (or all RQs)
+ * for given vNIC
+ * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
+ * (u32) a1 = CQ entry size
+ * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
+ *
+ * Capability query:
+ * out: (u32) a0 = errno, 0:valid cmd
+ * (u32) a1 = value consisting of supported entries
+ * bit 0: 16 bytes
+ * bit 1: 32 bytes
+ * bit 2: 64 bytes
+ */
+ CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
+
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 5acc236069de..9e8e86262a3f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -21,6 +21,11 @@ struct vnic_enet_config {
u16 loop_tag;
u16 vf_rq_count;
u16 num_arfs;
+ u8 reserved[66];
+ u32 max_rq_ring; // MAX RQ ring size
+ u32 max_wq_ring; // MAX WQ ring size
+ u32 max_cq_ring; // MAX CQ ring size
+ u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0bc595abc03b..a1cdd729caec 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -50,7 +50,7 @@ struct vnic_rq_ctrl {
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 75c526911074..3bb4758100ba 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,7 +62,7 @@ struct vnic_wq_buf {
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
struct vnic_wq {
unsigned int index;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 705c3eb19cd3..6a2004bbe87f 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/gro.h>
#include "gemini.h"
@@ -79,7 +80,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM)
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
/**
* struct gmac_queue_page - page buffer per-page info
@@ -287,13 +289,13 @@ static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
spin_unlock_irqrestore(&port->config_lock, flags);
}
-static void gmac_speed_set(struct net_device *netdev)
+static void gmac_adjust_link(struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
union gmac_status status, old_status;
- int pause_tx = 0;
- int pause_rx = 0;
+ bool pause_tx = false;
+ bool pause_rx = false;
status.bits32 = readl(port->gmac_base + GMAC_STATUS);
old_status.bits32 = status.bits32;
@@ -328,14 +330,9 @@ static void gmac_speed_set(struct net_device *netdev)
}
if (phydev->duplex == DUPLEX_FULL) {
- u16 lcladv = phy_read(phydev, MII_ADVERTISE);
- u16 rmtadv = phy_read(phydev, MII_LPA);
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
-
- if (cap & FLOW_CTRL_RX)
- pause_rx = 1;
- if (cap & FLOW_CTRL_TX)
- pause_tx = 1;
+ phy_get_pause(phydev, &pause_tx, &pause_rx);
+ netdev_dbg(netdev, "set negotiated pause params pause TX = %s, pause RX = %s\n",
+ pause_tx ? "ON" : "OFF", pause_rx ? "ON" : "OFF");
}
gmac_set_flow_control(netdev, pause_tx, pause_rx);
@@ -366,7 +363,7 @@ static int gmac_setup_phy(struct net_device *netdev)
phy = of_phy_get_and_connect(netdev,
dev->of_node,
- gmac_speed_set);
+ gmac_adjust_link);
if (!phy)
return -ENODEV;
netdev->phydev = phy;
@@ -1107,10 +1104,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
u32 val, mask;
netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
if (en)
@@ -1119,6 +1119,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
val = en ? val | mask : val & ~mask;
writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
}
static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
@@ -1142,15 +1144,53 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
+ bool tcp = false;
void *buffer;
+ u16 mss;
int ret;
- /* TODO: implement proper TSO using MTU in word3 */
word1 = skb->len;
word3 = SOF_BIT;
- if (skb->len >= ETH_FRAME_LEN) {
- /* Hardware offloaded checksumming isn't working on frames
+ /* Determine if we are doing TCP */
+ if (skb->protocol == htons(ETH_P_IP))
+ tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else
+ /* IPv6 */
+ tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ /* This means we are dealing with TCP and skb->len is the
+ * sum total of all the segments. The TSO will deal with
+ * chopping this up for us.
+ */
+ /* The accelerator needs the full frame size here */
+ mss += skb_tcp_all_headers(skb);
+ netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n",
+ mss, skb->len);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
+ } else if (tcp) {
+ /* Even if we are not using TSO, use the hardware offloader
+ * for transferring the TCP frame: this hardware has partial
+ * TCP awareness (called TOE - TCP Offload Engine) and will
+ * according to the datasheet put packets belonging to the
+ * same TCP connection in the same queue for the TOE/TSO
+ * engine to process. The engine will deal with chopping
+ * up frames that exceed ETH_DATA_LEN which the
+ * checksumming engine cannot handle (see below) into
+ * manageable chunks. It flawlessly deals with quite big
+ * frames and frames containing custom DSA EtherTypes.
+ */
+ mss = netdev->mtu + skb_tcp_all_headers(skb);
+ mss = min(mss, skb->len);
+ netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
+ skb->len, netdev->mtu, mss);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
+ } else if (skb->len >= ETH_FRAME_LEN) {
+ /* Hardware offloaded checksumming isn't working on non-TCP frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
* bigger they get truncated, or the last few bytes get
@@ -1164,22 +1204,19 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
return ret;
}
word1 |= TSS_BYPASS_BIT;
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int tcp = 0;
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
/* We do not switch off the checksumming on non TCP/UDP
* frames: as is shown from tests, the checksumming engine
* is smart enough to see that a frame is not actually TCP
* or UDP and then just pass it through without any changes
* to the frame.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
word1 |= TSS_IP_CHKSUM_BIT;
- tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
- } else { /* IPv6 */
+ else
word1 |= TSS_IPV6_ENABLE_BIT;
- tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
- }
word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
}
@@ -1415,15 +1452,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
union gmac_rxdesc_3 word3;
struct page *page = NULL;
unsigned int page_offs;
+ unsigned long flags;
unsigned short r, w;
union dma_rwptr rw;
dma_addr_t mapping;
int frag_nr = 0;
+ spin_lock_irqsave(&geth->irq_lock, flags);
rw.bits32 = readl(ptr_reg);
/* Reset interrupt as all packages until here are taken into account */
writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
r = rw.bits.rptr;
w = rw.bits.wptr;
@@ -1726,10 +1767,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
gmac_update_hw_stats(netdev);
if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ spin_lock(&geth->irq_lock);
writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
-
- spin_lock(&geth->irq_lock);
u64_stats_update_begin(&port->ir_stats_syncp);
++port->stats.rx_fifo_errors;
u64_stats_update_end(&port->ir_stats_syncp);
@@ -1815,9 +1855,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
@@ -1978,7 +2017,7 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
gmac_disable_tx_rx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
CONFIG0_MAXLEN_MASK);
@@ -2108,6 +2147,19 @@ static void gmac_get_pauseparam(struct net_device *netdev,
pparam->autoneg = true;
}
+static int gmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!pparam->autoneg)
+ return -EOPNOTSUPP;
+
+ phy_set_asym_pause(phydev, pparam->rx_pause, pparam->tx_pause);
+
+ return 0;
+}
+
static void gmac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *rp,
struct kernel_ethtool_ringparam *kernel_rp,
@@ -2228,6 +2280,7 @@ static const struct ethtool_ops gmac_351x_ethtool_ops = {
.set_link_ksettings = gmac_set_ksettings,
.nway_reset = gmac_nway_reset,
.get_pauseparam = gmac_get_pauseparam,
+ .set_pauseparam = gmac_set_pauseparam,
.get_ringparam = gmac_get_ringparam,
.set_ringparam = gmac_set_ringparam,
.get_coalesce = gmac_get_coalesce,
@@ -2541,7 +2594,7 @@ static struct platform_driver gemini_ethernet_port_driver = {
.of_match_table = gemini_ethernet_port_of_match,
},
.probe = gemini_ethernet_port_probe,
- .remove_new = gemini_ethernet_port_remove,
+ .remove = gemini_ethernet_port_remove,
};
static int gemini_ethernet_probe(struct platform_device *pdev)
@@ -2605,7 +2658,7 @@ static struct platform_driver gemini_ethernet_driver = {
.of_match_table = gemini_ethernet_of_match,
},
.probe = gemini_ethernet_probe,
- .remove_new = gemini_ethernet_remove,
+ .remove = gemini_ethernet_remove,
};
static int __init gemini_ethernet_module_init(void)
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 150cc94ae9f8..b87eaf0c250c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1777,10 +1777,11 @@ static void dm9000_drv_remove(struct platform_device *pdev)
unregister_netdev(ndev);
dm9000_release_board(pdev, dm);
- free_netdev(ndev); /* free device structure */
if (dm->power_supply)
regulator_disable(dm->power_supply);
+ free_netdev(ndev); /* free device structure */
+
dev_dbg(&pdev->dev, "released and freed device\n");
}
@@ -1799,7 +1800,7 @@ static struct platform_driver dm9000_driver = {
.of_match_table = of_match_ptr(dm9000_of_matches),
},
.probe = dm9000_probe,
- .remove_new = dm9000_drv_remove,
+ .remove = dm9000_drv_remove,
};
module_platform_driver(dm9000_driver);
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
index bcfe52c11804..59ea48d4c9de 100644
--- a/drivers/net/ethernet/davicom/dm9051.c
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -1235,6 +1235,7 @@ static const struct of_device_id dm9051_match_table[] = {
{ .compatible = "davicom,dm9051" },
{}
};
+MODULE_DEVICE_TABLE(of, dm9051_match_table);
static const struct spi_device_id dm9051_id_table[] = {
{ "dm9051", 0 },
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
index 369858272650..76767dec216d 100644
--- a/drivers/net/ethernet/dec/tulip/21142.c
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -216,7 +216,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
(csr12 & 2) == 2) ||
(tp->nway && (csr5 & (TPLnkFail)))) {
/* Link blew? Maybe restart NWay. */
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
t21142_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -226,7 +226,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
medianame[dev->if_port],
(csr12 & 2) ? "failed" : "good");
if ((csr12 & 2) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
t21142_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index cd3dc4b89518..f9504f340c4a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -49,7 +49,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
@@ -963,7 +963,7 @@ static void de_next_media (struct de_private *de, const u32 *media,
static void de21040_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1044,7 +1044,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
static void de21041_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1428,7 +1428,7 @@ static int de_close (struct net_device *dev)
netif_dbg(de, ifdown, dev, "disabling interface\n");
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
spin_lock_irqsave(&de->lock, flags);
de_stop_hw(de);
@@ -1452,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
de->rx_tail, de->tx_head, de->tx_tail);
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
disable_irq(irq);
spin_lock_irq(&de->lock);
@@ -2126,7 +2126,7 @@ static int __maybe_unused de_suspend(struct device *dev_d)
if (netif_running (dev)) {
const int irq = pdev->irq;
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
disable_irq(irq);
spin_lock_irq(&de->lock);
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 3188ba7b450f..2d3bd343b6e6 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -745,7 +745,7 @@ static int dmfe_stop(struct net_device *dev)
netif_stop_queue(dev);
/* deleted timer */
- del_timer_sync(&db->timer);
+ timer_delete_sync(&db->timer);
/* Reset & stop DM910X board */
dw32(DCR0, DM910X_RESET);
@@ -1115,7 +1115,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void dmfe_timer(struct timer_list *t)
{
- struct dmfe_board_info *db = from_timer(db, t, timer);
+ struct dmfe_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
void __iomem *ioaddr = db->ioaddr;
u32 tmp_cr8;
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index d5657ff15e3c..71ff9e6db209 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -13,7 +13,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include "tulip.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 54560f9a1651..0a12cb9b3ba7 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -104,7 +104,7 @@ int tulip_refill_rx(struct net_device *dev)
void oom_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, oom_timer);
+ struct tulip_private *tp = timer_container_of(tp, t, oom_timer);
napi_schedule(&tp->napi);
}
@@ -699,8 +699,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
tulip_start_rxtx(tp);
}
/*
- * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
- * call is ever done under the spinlock
+ * NB: t21142_lnk_change() does a timer_delete_sync(), so be careful
+ * if this call is ever done under the spinlock
*/
if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
if (tp->link_change)
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 653bde48ef44..1de5ed967070 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -86,7 +86,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
void pnic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 72a09156b48b..39c410bf224e 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -78,7 +78,7 @@
void pnic2_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -323,7 +323,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 2)
netdev_dbg(dev, "Ugh! Link blew?\n");
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -348,7 +348,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* if failed then try doing an nway to get in sync */
if ((csr12 & 2) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -372,7 +372,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* if failed, try doing an nway to get in sync */
if ((csr12 & 4) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 642e9dfc5451..ca0c509b601c 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -139,7 +139,7 @@ void tulip_media_task(struct work_struct *work)
void mxic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -156,7 +156,7 @@ void mxic_timer(struct timer_list *t)
void comet_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
int next_tick = 2*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index bd786dfbc066..5e010e1fa6f7 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -23,7 +23,7 @@
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index ecfad43df45a..b608585f1954 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -23,7 +23,7 @@
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/crc32.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/uaccess.h>
#ifdef CONFIG_SPARC
@@ -114,7 +114,7 @@ int tulip_debug = 1;
static void tulip_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (netif_running(dev))
@@ -747,9 +747,9 @@ static void tulip_down (struct net_device *dev)
napi_disable(&tp->napi);
#endif
- del_timer_sync (&tp->timer);
+ timer_delete_sync(&tp->timer);
#ifdef CONFIG_TULIP_NAPI
- del_timer_sync (&tp->oom_timer);
+ timer_delete_sync(&tp->oom_timer);
#endif
spin_lock_irqsave (&tp->lock, flags);
@@ -1177,7 +1177,6 @@ static void set_rx_mode(struct net_device *dev)
iowrite32(csr6, ioaddr + CSR6);
}
-#ifdef CONFIG_TULIP_MWI
static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
@@ -1251,7 +1250,6 @@ out:
netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
cache, csr0);
}
-#endif
/*
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
@@ -1413,7 +1411,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
return -ENODEV;
ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1463,10 +1461,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
-#ifdef CONFIG_TULIP_MWI
- if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 &&
+ (tp->flags & HAS_PCI_MWI))
tulip_mwi_config (pdev, dev);
-#endif
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
@@ -1553,7 +1550,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(PCI_SLOT(pdev->devfn) == 12))) {
/* Cobalt MAC address in first EEPROM locations. */
sa_offset = 0;
- /* Ensure our media table fixup get's applied */
+ /* Ensure our media table fixup gets applied */
memcpy(ee_data + 16, ee_data, 8);
}
#endif
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index ff080ab0f116..6e4d8d31aba9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -656,7 +656,7 @@ static int uli526x_stop(struct net_device *dev)
netif_stop_queue(dev);
/* deleted timer */
- del_timer_sync(&db->timer);
+ timer_delete_sync(&db->timer);
/* Reset & stop ULI526X board */
uw32(DCR0, ULI526X_RESET);
@@ -1014,7 +1014,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void uli526x_timer(struct timer_list *t)
{
- struct uli526x_board_info *db = from_timer(db, t, timer);
+ struct uli526x_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
struct uli_phy_ops *phy = &db->phy;
void __iomem *ioaddr = db->ioaddr;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 37fba39c0056..a24a25a5f73d 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -375,7 +375,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
goto err_out_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
@@ -763,7 +763,7 @@ static inline void update_csr6(struct net_device *dev, int new)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pci_dev);
void __iomem *ioaddr = np->base_addr;
@@ -1509,7 +1509,7 @@ static int netdev_close(struct net_device *dev)
}
#endif /* __i386__ debugging only */
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
free_rxtx_rings(np);
free_ringdesc(np);
@@ -1560,7 +1560,7 @@ static int __maybe_unused w840_suspend(struct device *dev_d)
rtnl_lock();
if (netif_running (dev)) {
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irq(&np->lock);
netif_device_detach(dev);
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 8759f9f76b62..e5d2ede13845 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -143,7 +143,7 @@ static const struct pci_device_id xircom_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
-static struct pci_driver xircom_ops = {
+static struct pci_driver xircom_driver = {
.name = "xircom_cb",
.id_table = xircom_pci_table,
.probe = xircom_probe,
@@ -1169,4 +1169,4 @@ investigate_write_descriptor(struct net_device *dev,
}
}
-module_pci_driver(xircom_ops);
+module_pci_driver(xircom_driver);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 7bfeae04b52b..846d58c769ea 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -41,7 +41,7 @@ module_param(tx_flow, int, 0);
module_param(rx_flow, int, 0);
module_param(copy_thresh, int, 0);
module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
-module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
+module_param(rx_timeout, int, 0); /* Rx DMA wait time in 640ns increments */
module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
@@ -99,6 +99,13 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = rio_tx_timeout,
};
+static bool is_support_rmon_mmio(struct pci_dev *pdev)
+{
+ return pdev->vendor == PCI_VENDOR_ID_DLINK &&
+ pdev->device == 0x4000 &&
+ pdev->revision == 0x0c;
+}
+
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -131,21 +138,27 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
+ if (is_support_rmon_mmio(pdev))
+ np->rmon_enable = true;
+
/* IO registers range. */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr)
goto err_out_dev;
np->eeprom_addr = ioaddr;
-#ifdef MEM_MAPPING
- /* MM registers range. */
- ioaddr = pci_iomap(pdev, 1, 0);
- if (!ioaddr)
- goto err_out_iounmap;
-#endif
+ if (np->rmon_enable) {
+ /* MM registers range. */
+ ioaddr = pci_iomap(pdev, 1, 0);
+ if (!ioaddr)
+ goto err_out_iounmap;
+ }
+
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
+
+ spin_lock_init(&np->stats_lock);
spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock);
@@ -249,7 +262,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np->link_status = 0;
/* Set media and reset PHY */
if (np->phy_media) {
- /* default Auto-Negotiation for fiber deivices */
+ /* default Auto-Negotiation for fiber devices */
if (np->an_enable == 2) {
np->an_enable = 1;
}
@@ -287,9 +300,8 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
err_out_iounmap:
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
free_netdev (dev);
@@ -352,7 +364,7 @@ parse_eeprom (struct net_device *dev)
eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
- np->led_mode = psrom->led_mode;
+ np->led_mode = le16_to_cpu(psrom->led_mode);
return 0;
}
@@ -496,25 +508,34 @@ static int alloc_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
+ dma_addr_t addr;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
- if (!skb) {
- free_list(dev);
- return -ENOMEM;
- }
+ if (!skb)
+ goto err_free_list;
+
+ addr = dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pdev->dev, addr))
+ goto err_kfree_skb;
np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
- np->rx_ring[i].fraginfo =
- cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
+ np->rx_ring[i].fraginfo = cpu_to_le64(addr);
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
return 0;
+
+err_kfree_skb:
+ dev_kfree_skb(np->rx_skbuff[i]);
+ np->rx_skbuff[i] = NULL;
+err_free_list:
+ free_list(dev);
+ return -ENOMEM;
}
static void rio_hw_init(struct net_device *dev)
@@ -576,7 +597,8 @@ static void rio_hw_init(struct net_device *dev)
dw8(TxDMAPollPeriod, 0xff);
dw8(RxDMABurstThresh, 0x30);
dw8(RxDMAUrgentThresh, 0x30);
- dw32(RmonStatMask, 0x0007ffff);
+ if (!np->rmon_enable)
+ dw32(RmonStatMask, 0x0007ffff);
/* clear statistics */
clear_stats (dev);
@@ -648,7 +670,7 @@ static int rio_open(struct net_device *dev)
static void
rio_timer (struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pdev);
unsigned int entry;
int next_tick = 1*HZ;
@@ -711,7 +733,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
entry = np->cur_tx % TX_RING_SIZE;
@@ -865,8 +887,7 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
- dev->stats.tx_errors++;
- /* Ttransmit Underrun */
+ /* Transmit Underrun */
if (tx_status & 0x10) {
dev->stats.tx_fifo_errors++;
dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
@@ -902,9 +923,15 @@ tx_error (struct net_device *dev, int tx_status)
rio_set_led_mode(dev);
/* Let TxStartThresh stay default value */
}
+
+ spin_lock(&np->stats_lock);
/* Maximum Collisions */
if (tx_status & 0x08)
dev->stats.collisions++;
+
+ dev->stats.tx_errors++;
+ spin_unlock(&np->stats_lock);
+
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
@@ -946,15 +973,18 @@ receive_packet (struct net_device *dev)
} else {
struct sk_buff *skb;
+ skb = NULL;
/* Small skbuffs for short packets */
- if (pkt_len > copy_thresh) {
+ if (pkt_len <= copy_thresh)
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ if (!skb) {
dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
- } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
+ } else {
dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
@@ -1053,7 +1083,7 @@ rio_error (struct net_device *dev, int int_status)
get_stats (dev);
}
- /* PCI Error, a catastronphic error related to the bus interface
+ /* PCI Error, a catastrophic error related to the bus interface
occurs, set GlobalReset and HostReset to reset. */
if (int_status & HostError) {
printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
@@ -1069,11 +1099,10 @@ get_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
unsigned int stat_reg;
+ unsigned long flags;
+ spin_lock_irqsave(&np->stats_lock, flags);
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1082,7 +1111,7 @@ get_stats (struct net_device *dev)
dev->stats.rx_bytes += dr32(OctetRcvOk);
dev->stats.tx_bytes += dr32(OctetXmtOk);
- dev->stats.multicast = dr32(McstFramesRcvdOk);
+ dev->stats.multicast += dr32(McstFramesRcvdOk);
dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
@@ -1114,15 +1143,18 @@ get_stats (struct net_device *dev)
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
+
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
+
+ spin_unlock_irqrestore(&np->stats_lock, flags);
+
return &dev->stats;
}
@@ -1131,9 +1163,6 @@ clear_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1169,10 +1198,9 @@ clear_stats (struct net_device *dev)
dr16(BcstFramesXmtdOk);
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1778,7 +1806,7 @@ rio_close (struct net_device *dev)
rio_hw_stop(dev);
free_irq(pdev->irq, dev);
- del_timer_sync (&np->timer);
+ timer_delete_sync(&np->timer);
free_list(dev);
@@ -1798,9 +1826,8 @@ rio_remove1 (struct pci_dev *pdev)
np->rx_ring_dma);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
free_netdev (dev);
pci_release_regions (pdev);
@@ -1818,7 +1845,7 @@ static int rio_suspend(struct device *device)
return 0;
netif_device_detach(dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
rio_hw_stop(dev);
return 0;
@@ -1842,7 +1869,7 @@ static int rio_resume(struct device *device)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
#define RIO_PM_OPS (&rio_pm_ops)
#else
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 195dc6cfd895..9ebf7a6db93e 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -270,7 +270,7 @@ enum _pcs_reg {
PCS_ESR = 15,
};
-/* IEEE Extened Status Register */
+/* IEEE Extended Status Register */
enum _mii_esr {
MII_ESR_1000BX_FD = 0x8000,
MII_ESR_1000BX_HD = 0x4000,
@@ -329,18 +329,18 @@ enum _pcs_anlpar {
};
typedef struct t_SROM {
- u16 config_param; /* 0x00 */
- u16 asic_ctrl; /* 0x02 */
- u16 sub_vendor_id; /* 0x04 */
- u16 sub_system_id; /* 0x06 */
- u16 pci_base_1; /* 0x08 (IP1000A only) */
- u16 pci_base_2; /* 0x0a (IP1000A only) */
- u16 led_mode; /* 0x0c (IP1000A only) */
- u16 reserved1[9]; /* 0x0e-0x1f */
+ __le16 config_param; /* 0x00 */
+ __le16 asic_ctrl; /* 0x02 */
+ __le16 sub_vendor_id; /* 0x04 */
+ __le16 sub_system_id; /* 0x06 */
+ __le16 pci_base_1; /* 0x08 (IP1000A only) */
+ __le16 pci_base_2; /* 0x0a (IP1000A only) */
+ __le16 led_mode; /* 0x0c (IP1000A only) */
+ __le16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
u8 sib[204]; /* 0x30-0xfb */
- u32 crc; /* 0xfc-0xff */
+ __le32 crc; /* 0xfc-0xff */
} SROM_t, *PSROM_t;
/* Ioctl custom data */
@@ -372,6 +372,8 @@ struct netdev_private {
struct pci_dev *pdev;
void __iomem *ioaddr;
void __iomem *eeprom_addr;
+ // To ensure synchronization when stats are updated.
+ spinlock_t stats_lock;
spinlock_t tx_lock;
spinlock_t rx_lock;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
@@ -401,6 +403,8 @@ struct netdev_private {
u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */
u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
+
+ bool rmon_enable;
};
/* The station address location in the EEPROM. */
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index aaf0eda96292..277c50ef773f 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -708,7 +708,7 @@ static int change_mtu(struct net_device *dev, int new_mtu)
{
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -942,7 +942,7 @@ static void check_duplex(struct net_device *dev)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->mii_if.dev;
void __iomem *ioaddr = np->base;
int next_tick = 10*HZ;
@@ -1033,21 +1033,22 @@ static void init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
+ dma_addr_t addr;
+
struct sk_buff *skb =
netdev_alloc_skb(dev, np->rx_buf_sz + 2);
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- np->rx_ring[i].frag.addr = cpu_to_le32(
- dma_map_single(&np->pci_dev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[i].frag.addr)) {
+ addr = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr)) {
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
break;
}
+ np->rx_ring[i].frag.addr = cpu_to_le32(addr);
np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1088,6 +1089,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
struct netdev_desc *txdesc;
+ dma_addr_t addr;
unsigned entry;
/* Calculate the next Tx descriptor entry. */
@@ -1095,13 +1097,14 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[entry] = skb;
txdesc = &np->tx_ring[entry];
+ addr = dma_map_single(&np->pci_dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr))
+ goto drop_frame;
+
txdesc->next_desc = 0;
txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
- txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
- skb->data, skb->len, DMA_TO_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- txdesc->frag.addr))
- goto drop_frame;
+ txdesc->frag.addr = cpu_to_le32(addr);
txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
/* Increment cur_tx before tasklet_schedule() */
@@ -1419,6 +1422,8 @@ static void refill_rx (struct net_device *dev)
for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
struct sk_buff *skb;
+ dma_addr_t addr;
+
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
@@ -1426,15 +1431,15 @@ static void refill_rx (struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- np->rx_ring[entry].frag.addr = cpu_to_le32(
- dma_map_single(&np->pci_dev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[entry].frag.addr)) {
+ addr = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr)) {
dev_kfree_skb_irq(skb);
np->rx_skbuff[entry] = NULL;
break;
}
+
+ np->rx_ring[entry].frag.addr = cpu_to_le32(addr);
}
/* Perhaps we need not reset this field. */
np->rx_ring[entry].frag.length =
@@ -1882,7 +1887,7 @@ static int netdev_close(struct net_device *dev)
free_irq(np->pci_dev->irq, dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 2a18df3605f1..0de3cd660ec8 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -863,7 +863,7 @@ static void dnet_remove(struct platform_device *pdev)
static struct platform_driver dnet_driver = {
.probe = dnet_probe,
- .remove_new = dnet_remove,
+ .remove = dnet_remove,
.driver = {
.name = "dnet",
},
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 44af1d13d931..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 61fe9625bed1..270ff9aab335 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -562,7 +562,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
@@ -966,9 +966,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
void be_link_status_update(struct be_adapter *adapter, u8 link_status);
void be_parse_stats(struct be_adapter *adapter);
int be_load_fw(struct be_adapter *adapter, u8 *func);
-bool be_is_wol_supported(struct be_adapter *adapter);
bool be_pause_supported(struct be_adapter *adapter);
-u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
void be_eqd_update(struct be_adapter *adapter, bool force_update);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 61adcebeef01..bb5d2fa15736 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 12000 /* 12s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- usleep_range(500, 1000);
+ udelay(100);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- return mutex_unlock(&adapter->mcc_lock);
+ return spin_unlock_bh(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
hdr->version = 0;
- if (BE3_chip(adapter) || lancer_chip(adapter))
+ else if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
else
hdr->version = 2;
@@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60 * 1024);
@@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data + off, len);
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2611,11 +2611,15 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+/*
+ * Since the cookie is text, add a parsing-skipped space to keep it from
+ * ever being matched on storage holding this source file.
+ */
+static const char flash_cookie[32] __nonstring = "*** SE FLAS" "H DIRECTORY *** ";
static bool phy_flashing_required(struct be_adapter *adapter)
{
@@ -3217,7 +3221,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3234,7 +3238,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3249,7 +3253,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3272,7 +3276,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3281,7 +3285,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3298,7 +3302,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3324,7 +3328,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3332,7 +3336,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3348,7 +3352,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3382,7 +3386,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3393,7 +3397,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3409,7 +3413,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3424,7 +3428,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3469,7 +3473,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3479,7 +3483,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3499,7 +3503,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3611,7 +3615,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3643,7 +3647,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3655,7 +3659,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3675,7 +3679,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3707,7 +3711,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3771,7 +3775,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3831,7 +3835,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3852,8 +3856,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3889,7 +3893,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3930,7 +3934,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3944,7 +3948,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3991,7 +3995,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4190,7 +4194,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4206,7 +4210,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4684,7 +4688,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4701,7 +4705,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4735,7 +4739,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4756,7 +4760,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4850,7 +4854,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4868,7 +4872,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4941,7 +4945,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
u32 link_config = 0;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4969,7 +4973,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5000,8 +5004,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
int status;
- if (mutex_lock_interruptible(&adapter->mcc_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5039,7 +5042,7 @@ err:
dev_info(&adapter->pdev->dev,
"Adapter does not support HW error recovery\n");
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5053,7 +5056,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5076,7 +5079,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index e2085c68c0ee..5e2d3ddb5d43 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1415,7 +1415,7 @@ struct flash_section_entry {
} __packed;
struct flash_section_info {
- u8 cookie[32];
+ u8 cookie[32] __nonstring;
struct flash_section_hdr fsec_hdr;
struct flash_section_entry fsec_entry[32];
} __packed;
@@ -2381,7 +2381,6 @@ struct be_cmd_req_manage_iface_filters {
} __packed;
u16 be_POST_stage_get(struct be_adapter *adapter);
-int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
bool permanent, u32 if_handle, u32 pmac_id);
@@ -2406,7 +2405,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
u8 *link_status, u32 dom);
-int be_cmd_reset(struct be_adapter *adapter);
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -2488,7 +2486,6 @@ int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
int lancer_initiate_dump(struct be_adapter *adapter);
int lancer_delete_dump(struct be_adapter *adapter);
bool dump_present(struct be_adapter *adapter);
-int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f001a649f58f..f9216326bdfe 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1073,10 +1073,19 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
adapter->msg_enable = level;
}
-static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+static int be_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u64 flow_type = cmd->flow_type;
u64 data = 0;
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::get_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
switch (flow_type) {
case TCP_V4_FLOW:
if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
@@ -1104,7 +1113,8 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
break;
}
- return data;
+ cmd->data = data;
+ return 0;
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
@@ -1119,9 +1129,6 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_qs;
break;
@@ -1132,11 +1139,19 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
return 0;
}
-static int be_set_rss_hash_opts(struct be_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int be_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- int status;
+ struct be_adapter *adapter = netdev_priv(netdev);
u32 rss_flags = adapter->rss_info.rss_flags;
+ int status;
+
+ if (!be_multi_rxq(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "ethtool::set_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1195,28 +1210,6 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return be_cmd_status(status);
}
-static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
-
- if (!be_multi_rxq(adapter)) {
- dev_err(&adapter->pdev->dev,
- "ethtool::set_rxnfc: RX flow hashing is disabled\n");
- return -EINVAL;
- }
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- status = be_set_rss_hash_opts(adapter, cmd);
- break;
- default:
- return -EINVAL;
- }
-
- return status;
-}
-
static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -1449,7 +1442,8 @@ const struct ethtool_ops be_ethtool_ops = {
.flash_device = be_do_flash,
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
- .set_rxnfc = be_set_rxnfc,
+ .get_rxfh_fields = be_get_rxfh_fields,
+ .set_rxfh_fields = be_set_rxfh_fields,
.get_rxfh_indir_size = be_get_rxfh_indir_size,
.get_rxfh_key_size = be_get_rxfh_key_size,
.get_rxfh = be_get_rxfh,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ad862ed7888a..5bb31c8fab39 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1296,7 +1296,8 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
- struct sk_buff **skb)
+ struct sk_buff **skb,
+ struct be_wrb_params *wrb_params)
{
struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
bool os2bmc = false;
@@ -1360,7 +1361,7 @@ done:
* to BMC, asic expects the vlan to be inline in the packet.
*/
if (os2bmc)
- *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params);
return os2bmc;
}
@@ -1381,19 +1382,17 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
- if (unlikely(!wrb_cnt)) {
- dev_kfree_skb_any(skb);
- goto drop;
- }
+ if (unlikely(!wrb_cnt))
+ goto drop_skb;
/* if os2bmc is enabled and if the pkt is destined to bmc,
* enqueue the pkt a 2nd time with mgmt bit set.
*/
- if (be_send_pkt_to_bmc(adapter, &skb)) {
+ if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) {
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
if (unlikely(!wrb_cnt))
- goto drop;
+ goto drop_skb;
else
skb_get(skb);
}
@@ -1407,6 +1406,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_xmit_flush(adapter, txo);
return NETDEV_TX_OK;
+drop_skb:
+ dev_kfree_skb_any(skb);
drop:
tx_stats(txo)->tx_drv_drops++;
/* Flush the already enqueued tx requests */
@@ -1465,10 +1466,10 @@ static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
ntohs(tcphdr->source));
dev_info(dev, "TCP dest port %d\n",
ntohs(tcphdr->dest));
- dev_info(dev, "TCP sequence num %d\n",
- ntohs(tcphdr->seq));
- dev_info(dev, "TCP ack_seq %d\n",
- ntohs(tcphdr->ack_seq));
+ dev_info(dev, "TCP sequence num %u\n",
+ ntohl(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %u\n",
+ ntohl(tcphdr->ack_seq));
} else if (ip_hdr(skb)->protocol ==
IPPROTO_UDP) {
udphdr = udp_hdr(skb);
@@ -4031,8 +4032,7 @@ static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
static const struct udp_tunnel_nic_info be_udp_tunnels = {
.set_port = be_vxlan_set_port,
.unset_port = be_vxlan_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
@@ -4982,10 +4982,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
mode = nla_get_u16(attr);
if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
@@ -5670,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- mutex_init(&adapter->mcc_lock);
mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index f188fba021a6..03e19aea9ea4 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -176,7 +176,7 @@ struct tsnep_adapter {
struct tsnep_gcl gcl[2];
int next_gcl;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
/* ptp clock lock */
@@ -203,7 +203,11 @@ extern const struct ethtool_ops tsnep_ethtool_ops;
int tsnep_ptp_init(struct tsnep_adapter *adapter);
void tsnep_ptp_cleanup(struct tsnep_adapter *adapter);
-int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int tsnep_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int tsnep_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int tsnep_tc_init(struct tsnep_adapter *adapter);
void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 65ec1abc9442..228a638eae16 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -305,21 +305,17 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev,
}
static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 4b15af6b7122..b118407c30e8 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -67,6 +67,8 @@
#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
#define TSNEP_TX_TYPE_XSK BIT(12)
+#define TSNEP_TX_TYPE_TSTAMP BIT(13)
+#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
#define TSNEP_XDP_TX BIT(0)
#define TSNEP_XDP_REDIRECT BIT(1)
@@ -221,20 +223,19 @@ static void tsnep_phy_link_status_change(struct net_device *netdev)
static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
{
- int retval;
-
- retval = phy_loopback(adapter->phydev, enable);
+ int speed;
- /* PHY link state change is not signaled if loopback is enabled, it
- * would delay a working loopback anyway, let's ensure that loopback
- * is working immediately by setting link mode directly
- */
- if (!retval && enable) {
- netif_carrier_on(adapter->netdev);
- tsnep_set_link_mode(adapter);
+ if (enable) {
+ if (adapter->phydev->autoneg == AUTONEG_DISABLE &&
+ adapter->phydev->speed == SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_1000;
+ } else {
+ speed = 0;
}
- return retval;
+ return phy_loopback(adapter->phydev, enable, speed);
}
static int tsnep_phy_open(struct tsnep_adapter *adapter)
@@ -387,8 +388,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
@@ -480,7 +480,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
return mapped;
}
-static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
+ bool do_tstamp)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
@@ -506,6 +507,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
entry->type = TSNEP_TX_TYPE_SKB_INLINE;
mapped = 0;
}
+
+ if (do_tstamp)
+ entry->type |= TSNEP_TX_TYPE_TSTAMP;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -559,11 +563,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- int count = 1;
struct tsnep_tx_entry *entry;
+ bool do_tstamp = false;
+ int count = 1;
int length;
- int i;
int retval;
+ int i;
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
@@ -580,7 +585,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry = &tx->entry[tx->write];
entry->skb = skb;
- retval = tsnep_tx_map(skb, tx, count);
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_tstamp = true;
+ }
+
+ retval = tsnep_tx_map(skb, tx, count, do_tstamp);
if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
@@ -592,9 +603,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
}
length = retval;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
i == count - 1);
@@ -845,15 +853,14 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
length = tsnep_tx_unmap(tx, tx->read, count);
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
u64 timestamp;
- if (skb_shinfo(entry->skb)->tx_flags &
- SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (entry->skb->sk &&
+ READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
timestamp =
__le64_to_cpu(entry->desc_wb->counter);
else
@@ -1587,7 +1594,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
- xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(entry->xdp);
/* RX metadata with timestamps is in front of actual data,
* subtract metadata size to get length of actual data and
@@ -1966,23 +1973,41 @@ failed:
static void tsnep_queue_enable(struct tsnep_queue *queue)
{
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ netif_napi_set_irq(&queue->napi, queue->irq);
napi_enable(&queue->napi);
- tsnep_enable_irq(queue->adapter, queue->irq_mask);
+ tsnep_enable_irq(adapter, queue->irq_mask);
- if (queue->tx)
+ if (queue->tx) {
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, &queue->napi);
tsnep_tx_enable(queue->tx);
+ }
- if (queue->rx)
+ if (queue->rx) {
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, &queue->napi);
tsnep_rx_enable(queue->rx);
+ }
}
static void tsnep_queue_disable(struct tsnep_queue *queue)
{
- if (queue->tx)
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ if (queue->rx)
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+
+ if (queue->tx) {
tsnep_tx_disable(queue->tx, &queue->napi);
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
napi_disable(&queue->napi);
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ tsnep_disable_irq(adapter, queue->irq_mask);
/* disable RX after NAPI polling has been disabled, because RX can be
* enabled during NAPI polling
@@ -2143,16 +2168,6 @@ static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
}
-static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
- if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
- return tsnep_ptp_ioctl(netdev, ifr, cmd);
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
-}
-
static void tsnep_netdev_set_multicast(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
@@ -2359,7 +2374,7 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_open = tsnep_netdev_open,
.ndo_stop = tsnep_netdev_close,
.ndo_start_xmit = tsnep_netdev_xmit_frame,
- .ndo_eth_ioctl = tsnep_netdev_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = tsnep_netdev_set_multicast,
.ndo_get_stats64 = tsnep_netdev_get_stats64,
.ndo_set_mac_address = tsnep_netdev_set_mac_address,
@@ -2369,6 +2384,8 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_bpf = tsnep_netdev_bpf,
.ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
.ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup,
+ .ndo_hwtstamp_get = tsnep_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = tsnep_ptp_hwtstamp_set,
};
static int tsnep_mac_init(struct tsnep_adapter *adapter)
@@ -2689,7 +2706,7 @@ static struct platform_driver tsnep_driver = {
.of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
- .remove_new = tsnep_remove,
+ .remove = tsnep_remove,
};
module_platform_driver(tsnep_driver);
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
index 54fbf0126815..ae1308eb813d 100644
--- a/drivers/net/ethernet/engleder/tsnep_ptp.c
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -19,57 +19,53 @@ void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
*time = (((u64)high) << 32) | ((u64)low);
}
-int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int tsnep_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
-
- if (!ifr)
- return -EINVAL;
-
- if (cmd == SIOCSHWTSTAMP) {
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- memcpy(&adapter->hwtstamp_config, &config,
- sizeof(adapter->hwtstamp_config));
+
+ *config = adapter->hwtstamp_config;
+ return 0;
+}
+
+int tsnep_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
}
- if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
- sizeof(adapter->hwtstamp_config)))
- return -EFAULT;
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+ adapter->hwtstamp_config = *config;
return 0;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ad41c9019018..0c418557264c 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1296,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match);
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
- .remove_new = ethoc_remove,
+ .remove = ethoc_remove,
.suspend = ethoc_suspend,
.resume = ethoc_resume,
.driver = {
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 9ebe751c1df0..5cb478e98697 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -651,7 +651,7 @@ MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
- .remove_new = nps_enet_remove,
+ .remove = nps_enet_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = nps_enet_dt_ids,
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index c699bd6bcbb9..474073c7f94d 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -31,6 +31,7 @@ config FTGMAC100
depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select FIXED_PHY
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index fddfd1dd5070..a863f7841210 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -24,6 +25,7 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/of_net.h>
+#include <linux/phy_fixed.h>
#include <net/ip.h>
#include <net/ncsi.h>
@@ -50,6 +52,15 @@
#define FTGMAC_100MHZ 100000000
#define FTGMAC_25MHZ 25000000
+/* For NC-SI to register a fixed-link phy device */
+static struct fixed_phy_status ncsi_phy_status = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ .pause = 0,
+ .asym_pause = 0
+};
+
struct ftgmac100 {
/* Registers */
struct resource *res;
@@ -91,6 +102,8 @@ struct ftgmac100 {
/* AST2500/AST2600 RMII ref clock gate */
struct clk *rclk;
+ /* Aspeed reset control */
+ struct reset_control *rst;
/* Link management */
int cur_speed;
@@ -138,6 +151,23 @@ static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
{
u32 maccr = 0;
+ /* Aspeed RMII needs SCU reset to clear status */
+ if (priv->is_aspeed && priv->netdev->phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to reset mac (%d)\n", err);
+ return err;
+ }
+ usleep_range(10000, 20000);
+ err = reset_control_deassert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err);
+ return err;
+ }
+ }
+
switch (priv->cur_speed) {
case SPEED_10:
case 0: /* no link */
@@ -572,7 +602,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
(*processed)++;
return true;
- drop:
+drop:
/* Clean rxdes0 (which resets own bit) */
rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
@@ -656,6 +686,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+ /* Ensure the descriptor config is visible before setting the tx
+ * pointer.
+ */
+ smp_wmb();
+
priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
return true;
@@ -809,6 +844,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
dma_wmb();
first->txdes0 = cpu_to_le32(f_ctl_stat);
+ /* Ensure the descriptor config is visible before setting the tx
+ * pointer.
+ */
+ smp_wmb();
+
/* Update next TX pointer */
priv->tx_pointer = pointer;
@@ -829,7 +869,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
- dma_err:
+dma_err:
if (net_ratelimit())
netdev_err(netdev, "map tx fragment failed\n");
@@ -851,7 +891,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
* last fragment, so we know ftgmac100_free_tx_packet()
* hasn't freed the skb yet.
*/
- drop:
+drop:
/* Drop the packet */
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
@@ -1344,7 +1384,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
ftgmac100_init_all(priv, true);
netdev_dbg(netdev, "Reset done !\n");
- bail:
+bail:
if (priv->mii_bus)
mutex_unlock(&priv->mii_bus->mdio_lock);
if (netdev->phydev)
@@ -1408,7 +1448,7 @@ static void ftgmac100_adjust_link(struct net_device *netdev)
/* Disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ /* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock
* order consistent to prevent dead lock.
*/
if (netdev->phydev)
@@ -1531,7 +1571,8 @@ static int ftgmac100_open(struct net_device *netdev)
if (netdev->phydev) {
/* If we have a PHY, start polling */
phy_start(netdev->phydev);
- } else if (priv->use_ncsi) {
+ }
+ if (priv->use_ncsi) {
/* If using NC-SI, set our carrier on and start the stack */
netif_carrier_on(netdev);
@@ -1543,15 +1584,16 @@ static int ftgmac100_open(struct net_device *netdev)
return 0;
- err_ncsi:
+err_ncsi:
+ phy_stop(netdev->phydev);
napi_disable(&priv->napi);
netif_stop_queue(netdev);
- err_alloc:
+err_alloc:
ftgmac100_free_buffers(priv);
free_irq(netdev->irq, netdev);
- err_irq:
+err_irq:
netif_napi_del(&priv->napi);
- err_hw:
+err_hw:
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
ftgmac100_free_rings(priv);
return err;
@@ -1577,7 +1619,7 @@ static int ftgmac100_stop(struct net_device *netdev)
netif_napi_del(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
- else if (priv->use_ncsi)
+ if (priv->use_ncsi)
ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
@@ -1708,13 +1750,17 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
- if (!netdev->phydev)
+ if (!phydev)
return;
- phy_disconnect(netdev->phydev);
+ phy_disconnect(phydev);
if (of_phy_is_fixed_link(priv->dev->of_node))
of_phy_deregister_fixed_link(priv->dev->of_node);
+
+ if (priv->use_ncsi)
+ fixed_phy_unregister(phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1792,6 +1838,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
struct resource *res;
int irq;
struct net_device *netdev;
+ struct phy_device *phydev;
struct ftgmac100 *priv;
struct device_node *np;
int err = 0;
@@ -1879,35 +1926,30 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
- } else if (np && of_phy_is_fixed_link(np)) {
- struct phy_device *phy;
- err = of_phy_register_fixed_link(np);
- if (err) {
- dev_err(&pdev->dev, "Failed to register fixed PHY\n");
+ phydev = fixed_phy_register(&ncsi_phy_status, np);
+ if (IS_ERR(phydev)) {
+ dev_err(&pdev->dev, "failed to register fixed PHY device\n");
+ err = PTR_ERR(phydev);
goto err_phy_connect;
}
-
- phy = of_phy_get_and_connect(priv->netdev, np,
- &ftgmac100_adjust_link);
- if (!phy) {
- dev_err(&pdev->dev, "Failed to connect to fixed PHY\n");
- of_phy_deregister_fixed_link(np);
- err = -EINVAL;
+ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
+ PHY_INTERFACE_MODE_RMII);
+ if (err) {
+ dev_err(&pdev->dev, "Connecting PHY failed\n");
goto err_phy_connect;
}
-
- /* Display what we found */
- phy_attached_info(phy);
- } else if (np && of_get_property(np, "phy-handle", NULL)) {
+ } else if (np && (of_phy_is_fixed_link(np) ||
+ of_get_property(np, "phy-handle", NULL))) {
struct phy_device *phy;
/* Support "mdio"/"phy" child nodes for ast2400/2500 with
* an embedded MDIO controller. Automatically scan the DTS for
* available PHYs and register them.
*/
- if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ if (of_get_property(np, "phy-handle", NULL) &&
+ (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
err = ftgmac100_setup_mdio(netdev);
if (err)
goto err_setup_mdio;
@@ -1947,6 +1989,12 @@ static int ftgmac100_probe(struct platform_device *pdev)
}
+ priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ goto err_phy_connect;
+ }
+
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
@@ -2050,7 +2098,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
static struct platform_driver ftgmac100_driver = {
.probe = ftgmac100_probe,
- .remove_new = ftgmac100_remove,
+ .remove = ftgmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftgmac100_of_match,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 63b3e02fab16..4968f6f0bdbc 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -84,7 +84,7 @@
FTGMAC100_INT_RPKT_BUF)
/* All the interrupts we care about */
-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
+#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \
FTGMAC100_INT_BAD)
/*
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 003bc9a45c65..5803a382f0ba 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1092,7 +1092,7 @@ static int ftmac100_change_mtu(struct net_device *netdev, int mtu)
}
iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
@@ -1243,7 +1243,7 @@ static const struct of_device_id ftmac100_of_ids[] = {
static struct platform_driver ftmac100_driver = {
.probe = ftmac100_probe,
- .remove_new = ftmac100_remove,
+ .remove = ftmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftmac100_of_ids
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index ed18450fd2cc..3c9961806f75 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -196,7 +196,7 @@ enum intr_status_bits {
ERI = 0x00000080, /* receive early int */
CNTOVF = 0x00000040, /* counter overflow */
RBU = 0x00000020, /* receive buffer unavailable */
- TBU = 0x00000010, /* transmit buffer unavilable */
+ TBU = 0x00000010, /* transmit buffer unavailable */
TI = 0x00000008, /* transmit interrupt */
RI = 0x00000004, /* receive interrupt */
RxErr = 0x00000002, /* receive error */
@@ -215,7 +215,7 @@ enum rx_mode_bits {
CR_W_RXMODEMASK = 0x000000e0,
CR_W_PROM = 0x00000080, /* promiscuous mode */
CR_W_AB = 0x00000040, /* accept broadcast */
- CR_W_AM = 0x00000020, /* accept mutlicast */
+ CR_W_AM = 0x00000020, /* accept multicast */
CR_W_ARP = 0x00000008, /* receive runt pkt */
CR_W_ALP = 0x00000004, /* receive long pkt */
CR_W_SEP = 0x00000002, /* receive error pkt */
@@ -1074,7 +1074,7 @@ static void allocate_rx_buffers(struct net_device *dev)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->mii.dev;
void __iomem *ioaddr = np->mem;
int old_crvalue = np->crvalue;
@@ -1163,7 +1163,7 @@ static void enable_rxtx(struct net_device *dev)
static void reset_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, reset_timer);
+ struct netdev_private *np = timer_container_of(np, t, reset_timer);
struct net_device *dev = np->mii.dev;
unsigned long flags;
@@ -1900,8 +1900,8 @@ static int netdev_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
stop_nic_rxtx(ioaddr, 0);
- del_timer_sync(&np->timer);
- del_timer_sync(&np->reset_timer);
+ timer_delete_sync(&np->timer);
+ timer_delete_sync(&np->reset_timer);
free_irq(np->pci_dev->irq, dev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 75401d2a5fb4..e2a591cf9601 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -28,6 +28,7 @@ config FEC
depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
+ select FIXED_PHY if M5272
select PAGE_POOL
imply PAGE_POOL_STATS
imply NET_SELFTESTS
@@ -71,7 +72,6 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
select PHYLIB
depends on OF
- select MDIO_DEVRES
select OF_MDIO
help
This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
@@ -81,8 +81,7 @@ config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
help
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index dcbc598b11c6..3edc8d142dd5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -28,7 +28,6 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
-#include <linux/phy_fixed.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
@@ -229,7 +228,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->max_mtu = dpaa_get_max_mtu();
net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_LLTX | NETIF_F_RXHASH);
+ NETIF_F_RXHASH);
net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
/* The kernels enables GSO automatically, if we declare NETIF_F_SG.
@@ -239,6 +238,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= NETIF_F_RXCSUM;
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ net_dev->lltx = true;
/* we do not want shared skbs on TX */
net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
@@ -371,6 +371,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
+ int num_txqs_per_tc = dpaa_num_txqs_per_tc();
struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc;
int i;
@@ -398,12 +399,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
netdev_set_num_tc(net_dev, num_tc);
for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
- i * DPAA_TC_TXQ_NUM);
+ netdev_set_tc_queue(net_dev, i, num_txqs_per_tc,
+ i * num_txqs_per_tc);
out:
priv->num_tc = num_tc ? : 1;
- netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc);
return 0;
}
@@ -461,6 +462,22 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
return 0;
}
+static int dpaa_addr_sync(struct net_device *net_dev, const u8 *addr)
+{
+ const struct dpaa_priv *priv = netdev_priv(net_dev);
+
+ return priv->mac_dev->add_hash_mac_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)addr);
+}
+
+static int dpaa_addr_unsync(struct net_device *net_dev, const u8 *addr)
+{
+ const struct dpaa_priv *priv = netdev_priv(net_dev);
+
+ return priv->mac_dev->remove_hash_mac_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)addr);
+}
+
static void dpaa_set_rx_mode(struct net_device *net_dev)
{
const struct dpaa_priv *priv;
@@ -488,9 +505,9 @@ static void dpaa_set_rx_mode(struct net_device *net_dev)
err);
}
- err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ err = __dev_mc_sync(net_dev, dpaa_addr_sync, dpaa_addr_unsync);
if (err < 0)
- netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+ netif_err(priv, drv, net_dev, "dpaa_addr_sync() = %d\n",
err);
}
@@ -649,7 +666,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 6;
break;
case FQ_TYPE_TX:
- switch (idx / DPAA_TC_TXQ_NUM) {
+ switch (idx / dpaa_num_txqs_per_tc()) {
case 0:
/* Low priority (best effort) */
fq->wq = 6;
@@ -667,8 +684,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 0;
break;
default:
- WARN(1, "Too many TX FQs: more than %d!\n",
- DPAA_ETH_TXQ_NUM);
+ WARN(1, "Too many TX FQs: more than %zu!\n",
+ dpaa_max_num_txqs());
}
break;
default:
@@ -740,7 +757,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->rx_pcdq = &dpaa_fq[0];
- if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
+ if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list,
+ FQ_TYPE_TX_CONF_MQ))
goto fq_alloc_failed;
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
@@ -755,7 +773,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->tx_defq = &dpaa_fq[0];
- if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
+ if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX))
goto fq_alloc_failed;
return 0;
@@ -931,14 +949,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
}
}
-static void dpaa_fq_setup(struct dpaa_priv *priv,
- const struct dpaa_fq_cbs *fq_cbs,
- struct fman_port *tx_port)
+static int dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
{
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
const cpumask_t *affine_cpus = qman_affine_cpus();
- u16 channels[NR_CPUS];
struct dpaa_fq *fq;
+ u16 *channels;
+
+ channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
channels[num_portals++] = qman_affine_channel(cpu);
@@ -965,11 +987,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
case FQ_TYPE_TX:
dpaa_setup_egress(priv, fq, tx_port,
&fq_cbs->egress_ern);
- /* If we have more Tx queues than the number of cores,
- * just ignore the extra ones.
- */
- if (egress_cnt < DPAA_ETH_TXQ_NUM)
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
break;
case FQ_TYPE_TX_CONF_MQ:
priv->conf_fqs[conf_cnt++] = &fq->fq_base;
@@ -987,16 +1005,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
}
}
- /* Make sure all CPUs receive a corresponding Tx queue. */
- while (egress_cnt < DPAA_ETH_TXQ_NUM) {
- list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
- if (fq->fq_type != FQ_TYPE_TX)
- continue;
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- if (egress_cnt == DPAA_ETH_TXQ_NUM)
- break;
- }
- }
+ kfree(channels);
+
+ return 0;
}
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
@@ -1004,7 +1015,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
{
int i;
- for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
+ for (i = 0; i < dpaa_max_num_txqs(); i++)
if (priv->egress_fqs[i] == tx_fq)
return i;
@@ -1808,7 +1819,6 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
struct page *page, *head_page;
struct dpaa_bp *dpaa_bp;
void *vaddr, *sg_vaddr;
- int frag_off, frag_len;
struct sk_buff *skb;
dma_addr_t sg_addr;
int page_offset;
@@ -1851,6 +1861,11 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
* on Tx, if extra headers are added.
*/
WARN_ON(fd_off != priv->rx_headroom);
+ /* The offset to data start within the buffer holding
+ * the SGT should always be equal to the offset to data
+ * start within the first buffer holding the frame.
+ */
+ WARN_ON_ONCE(fd_off != qm_sg_entry_get_off(&sgt[i]));
skb_reserve(skb, fd_off);
skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
} else {
@@ -1864,21 +1879,23 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
page = virt_to_page(sg_vaddr);
head_page = virt_to_head_page(sg_vaddr);
- /* Compute offset in (possibly tail) page */
+ /* Compute offset of sg_vaddr in (possibly tail) page */
page_offset = ((unsigned long)sg_vaddr &
(PAGE_SIZE - 1)) +
(page_address(page) - page_address(head_page));
- /* page_offset only refers to the beginning of sgt[i];
- * but the buffer itself may have an internal offset.
+
+ /* Non-initial SGT entries should not have a buffer
+ * offset.
*/
- frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
- frag_len = qm_sg_entry_get_len(&sgt[i]);
+ WARN_ON_ONCE(qm_sg_entry_get_off(&sgt[i]));
+
/* skb_add_rx_frag() does no checking on the page; if
* we pass it a tail page, we'll end up with
- * bad page accounting and eventually with segafults.
+ * bad page accounting and eventually with segfaults.
*/
- skb_add_rx_frag(skb, i - 1, head_page, frag_off,
- frag_len, dpaa_bp->size);
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ qm_sg_entry_get_len(&sgt[i]),
+ dpaa_bp->size);
}
/* Update the pool count for the current {cpu x bpool} */
@@ -2263,7 +2280,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
- new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
/* Release the initial buffer */
xdp_return_frame_rx_napi(xdpf);
@@ -2277,12 +2294,12 @@ static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
const int queue_mapping = skb_get_queue_mapping(skb);
- bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
+ bool nonlinear;
int offset = 0;
int err = 0;
@@ -2292,6 +2309,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
qm_fd_clear_fd(&fd);
+ /* Packet data is always read as 32-bit words, so zero out any part of
+ * the skb which might be sent if we have to pad the packet
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN, false))
+ goto enomem;
+
+ nonlinear = skb_is_nonlinear(skb);
if (!nonlinear) {
/* We're going to store the skb backpointer at the beginning
* of the data buffer, so we need a privately owned skb
@@ -2747,7 +2771,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
!fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
&hash_offset)) {
- hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
+ hash = be32_to_cpu(*(__be32 *)(vaddr + hash_offset));
hash_valid = true;
}
@@ -2995,7 +3019,7 @@ static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
return -EINVAL;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
return 0;
}
@@ -3064,15 +3088,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
return nxmit;
}
-static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct dpaa_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
+}
- switch (config.tx_type) {
+static int dpaa_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
@@ -3087,7 +3121,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
*/
@@ -3096,28 +3130,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- int ret = -EINVAL;
struct dpaa_priv *priv = netdev_priv(net_dev);
- if (cmd == SIOCGMIIREG) {
- if (net_dev->phydev)
- return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
- cmd);
- }
-
- if (cmd == SIOCSHWTSTAMP)
- return dpaa_ts_ioctl(net_dev, rq, cmd);
-
- return ret;
+ return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd);
}
static const struct net_device_ops dpaa_ops = {
@@ -3126,7 +3149,6 @@ static const struct net_device_ops dpaa_ops = {
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
@@ -3135,6 +3157,8 @@ static const struct net_device_ops dpaa_ops = {
.ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp,
.ndo_xdp_xmit = dpaa_xdp_xmit,
+ .ndo_hwtstamp_get = dpaa_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa_hwtstamp_set,
};
static int dpaa_napi_add(struct net_device *net_dev)
@@ -3161,8 +3185,9 @@ static void dpaa_napi_del(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_del(&percpu_priv->np.napi);
+ __netif_napi_del(&percpu_priv->np.napi);
}
+ synchronize_net();
}
static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
@@ -3324,7 +3349,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Allocate this early, so we can store relevant information in
* the private area
*/
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
+ net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs());
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
return -ENOMEM;
@@ -3339,6 +3364,22 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
+ priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
+ sizeof(*priv->egress_fqs),
+ GFP_KERNEL);
+ if (!priv->egress_fqs) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
+ priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
+ sizeof(*priv->conf_fqs),
+ GFP_KERNEL);
+ if (!priv->conf_fqs) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
@@ -3416,7 +3457,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
*/
dpaa_eth_add_channel(priv->channel, &pdev->dev);
- dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ if (err)
+ goto free_dpaa_bps;
/* Create a congestion group for this netdev, with
* dynamically-allocated CGR ID.
@@ -3462,7 +3505,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
}
priv->num_tc = 1;
- netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ netif_set_real_num_tx_queues(net_dev,
+ priv->num_tc * dpaa_num_txqs_per_tc());
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
@@ -3548,7 +3592,7 @@ static struct platform_driver dpaa_driver = {
},
.id_table = dpaa_devtype,
.probe = dpaa_eth_probe,
- .remove_new = dpaa_remove
+ .remove = dpaa_remove
};
static int __init dpaa_load(void)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index ac3c8ed57bbe..7ed659eb08de 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -18,10 +18,6 @@
/* Number of prioritised traffic classes */
#define DPAA_TC_NUM 4
-/* Number of Tx queues per traffic class */
-#define DPAA_TC_TXQ_NUM NR_CPUS
-/* Total number of Tx queues */
-#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
@@ -142,8 +138,8 @@ struct dpaa_priv {
struct mac_device *mac_dev;
struct device *rx_dma_dev;
struct device *tx_dma_dev;
- struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
- struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
+ struct qman_fq **egress_fqs;
+ struct qman_fq **conf_fqs;
u16 channel;
struct list_head dpaa_fq_list;
@@ -185,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops;
/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
+
+static inline size_t dpaa_num_txqs_per_tc(void)
+{
+ return num_possible_cpus();
+}
+
+/* Total number of Tx queues */
+static inline size_t dpaa_max_num_txqs(void)
+{
+ return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
+}
+
#endif /* __DPAA_H */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 4fee74c024bd..aad470e9caea 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -35,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
u32 last_fqid = 0;
ssize_t bytes = 0;
char *str;
- int i = 0;
list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) {
switch (fq->fq_type) {
@@ -85,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
prev = fq;
prevstr = str;
- i++;
}
if (prev) {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 889f89df9930..9e1d44ae92cc 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -56,8 +56,8 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
__entry->fd_format = qm_fd_get_format(fd);
__entry->fd_offset = qm_fd_get_offset(fd);
__entry->fd_length = qm_fd_get_length(fd);
- __entry->fd_status = fd->status;
- __assign_str(name, netdev->name);
+ __entry->fd_status = __be32_to_cpu(fd->status);
+ __assign_str(name);
),
/* This is what gets printed when the trace event is triggered */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 5bd0b36d1feb..ed3fa80af8c3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -243,42 +243,28 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
u8 *data)
{
- unsigned int i, j, num_cpus, size;
- char string_cpu[ETH_GSTRING_LEN];
- u8 *strings;
+ unsigned int i, j, num_cpus;
- memset(string_cpu, 0, sizeof(string_cpu));
- strings = data;
- num_cpus = num_online_cpus();
- size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+ num_cpus = num_online_cpus();
for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
- dpaa_stats_percpu[i], j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
- dpaa_stats_percpu[i]);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN,
- "bpool [CPU %d]", j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
+ for (j = 0; j < num_cpus; j++)
+ ethtool_sprintf(&data, "%s [CPU %d]",
+ dpaa_stats_percpu[i], j);
+
+ ethtool_sprintf(&data, "%s [TOTAL]", dpaa_stats_percpu[i]);
}
- snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
+ for (i = 0; i < num_cpus; i++)
+ ethtool_sprintf(&data, "bpool [CPU %d]", i);
- memcpy(strings, dpaa_stats_global, size);
+ ethtool_puts(&data, "bpool [TOTAL]");
+
+ for (i = 0; i < DPAA_STATS_GLOBAL_LEN; i++)
+ ethtool_puts(&data, dpaa_stats_global[i]);
}
-static int dpaa_get_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *cmd)
+static int dpaa_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
struct dpaa_priv *priv = netdev_priv(dev);
@@ -313,22 +299,6 @@ static int dpaa_get_hash_opts(struct net_device *dev,
return 0;
}
-static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *unused)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = dpaa_get_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void dpaa_set_hash(struct net_device *net_dev, bool enable)
{
struct mac_device *mac_dev;
@@ -343,8 +313,9 @@ static void dpaa_set_hash(struct net_device *net_dev, bool enable)
priv->keygen_in_use = enable;
}
-static int dpaa_set_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *nfc)
+static int dpaa_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
int ret = -EINVAL;
@@ -378,23 +349,8 @@ static int dpaa_set_hash_opts(struct net_device *dev,
return ret;
}
-static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = dpaa_set_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int dpaa_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct device *dev = net_dev->dev.parent;
struct device_node *mac_node = dev->of_node;
@@ -415,8 +371,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
of_node_put(ptp_node);
}
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
if (ptp)
info->phc_index = ptp->phc_index;
@@ -457,12 +415,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
struct netlink_ext_ack *extack)
{
const cpumask_t *cpus = qman_affine_cpus();
- bool needs_revert[NR_CPUS] = {false};
struct qman_portal *portal;
u32 period, prev_period;
u8 thresh, prev_thresh;
+ bool *needs_revert;
int cpu, res;
+ needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
+ if (!needs_revert)
+ return -ENOMEM;
+
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
@@ -485,6 +447,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
needs_revert[cpu] = true;
}
+ kfree(needs_revert);
+
return 0;
revert_values:
@@ -498,9 +462,52 @@ revert_values:
qman_dqrr_set_ithresh(portal, prev_thresh);
}
+ kfree(needs_revert);
+
return res;
}
+static void dpaa_get_pause_stats(struct net_device *net_dev,
+ struct ethtool_pause_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_pause_stats)
+ mac_dev->get_pause_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_rmon_stats(struct net_device *net_dev,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_rmon_stats)
+ mac_dev->get_rmon_stats(mac_dev->fman_mac, s, ranges);
+}
+
+static void dpaa_get_eth_ctrl_stats(struct net_device *net_dev,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_ctrl_stats)
+ mac_dev->get_eth_ctrl_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_eth_mac_stats(struct net_device *net_dev,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_mac_stats)
+ mac_dev->get_eth_mac_stats(mac_dev->fman_mac, s);
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
@@ -516,9 +523,13 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_strings = dpaa_get_strings,
.get_link_ksettings = dpaa_get_link_ksettings,
.set_link_ksettings = dpaa_set_link_ksettings,
- .get_rxnfc = dpaa_get_rxnfc,
- .set_rxnfc = dpaa_set_rxnfc,
+ .get_rxfh_fields = dpaa_get_rxfh_fields,
+ .set_rxfh_fields = dpaa_set_rxfh_fields,
.get_ts_info = dpaa_get_ts_info,
.get_coalesce = dpaa_get_coalesce,
.set_coalesce = dpaa_set_coalesce,
+ .get_pause_stats = dpaa_get_pause_stats,
+ .get_rmon_stats = dpaa_get_rmon_stats,
+ .get_eth_ctrl_stats = dpaa_get_eth_ctrl_stats,
+ .get_eth_mac_stats = dpaa_get_eth_mac_stats,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
index 9b43fadb9b11..956767e0869c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -48,7 +48,7 @@ DECLARE_EVENT_CLASS(dpaa2_eth_fd,
__entry->fd_addr = dpaa2_fd_get_addr(fd);
__entry->fd_len = dpaa2_fd_get_len(fd);
__entry->fd_offset = dpaa2_fd_get_offset(fd);
- __assign_str(name, netdev->name);
+ __assign_str(name);
),
/* This is what gets printed when the trace event is
@@ -144,7 +144,7 @@ DECLARE_EVENT_CLASS(dpaa2_eth_buf,
__entry->dma_addr = dma_addr;
__entry->map_size = map_size;
__entry->bpid = bpid;
- __assign_str(name, netdev->name);
+ __assign_str(name);
),
/* This is what gets printed when the trace event is
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 888509cf1f21..18d86badd6ea 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
+ aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
else
@@ -2585,40 +2584,52 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
return 0;
}
-static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa2_eth_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
if (!dpaa2_ptp)
return -EINVAL;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->tx_tstamp_type = config.tx_type;
+ priv->tx_tstamp_type = config->tx_type;
break;
default:
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
priv->rx_tstamp = false;
} else {
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
dpaa2_ptp_onestep_reg_update_method(priv);
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
+}
+
+static int dpaa2_eth_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
+ config->tx_type = priv->tx_tstamp_type;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2626,9 +2637,6 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct dpaa2_eth_priv *priv = netdev_priv(dev);
int err;
- if (cmd == SIOCSHWTSTAMP)
- return dpaa2_eth_ts_ioctl(dev, rq, cmd);
-
mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv)) {
@@ -2698,7 +2706,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
return err;
out:
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -2896,11 +2904,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
static int update_xps(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
- struct cpumask xps_mask;
- struct dpaa2_eth_fq *fq;
int i, num_queues, netdev_queues;
+ struct dpaa2_eth_fq *fq;
+ cpumask_var_t xps_mask;
int err = 0;
+ if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
+ return -ENOMEM;
+
num_queues = dpaa2_eth_queue_count(priv);
netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
@@ -2910,16 +2921,17 @@ static int update_xps(struct dpaa2_eth_priv *priv)
for (i = 0; i < netdev_queues; i++) {
fq = &priv->fq[i % num_queues];
- cpumask_clear(&xps_mask);
- cpumask_set_cpu(fq->target_cpu, &xps_mask);
+ cpumask_clear(xps_mask);
+ cpumask_set_cpu(fq->target_cpu, xps_mask);
- err = netif_set_xps_queue(net_dev, &xps_mask, i);
+ err = netif_set_xps_queue(net_dev, xps_mask, i);
if (err) {
netdev_warn_once(net_dev, "Error setting XPS queue\n");
break;
}
}
+ free_cpumask_var(xps_mask);
return err;
}
@@ -3030,7 +3042,9 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_xsk_wakeup = dpaa2_xsk_wakeup,
.ndo_setup_tc = dpaa2_eth_setup_tc,
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
- .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid,
+ .ndo_hwtstamp_get = dpaa2_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa2_eth_hwtstamp_set,
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -3924,6 +3938,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
MEM_TYPE_PAGE_ORDER0, NULL);
if (err) {
dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+ xdp_rxq_info_unreg(&fq->channel->xdp_rxq);
return err;
}
@@ -4417,17 +4432,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
return -EINVAL;
}
if (err)
- return err;
+ goto out;
}
err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &priv->tx_qdid);
if (err) {
dev_err(dev, "dpni_get_qdid() failed\n");
- return err;
+ goto out;
}
return 0;
+
+out:
+ while (i--) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+ return err;
}
/* Allocate rings for storing incoming frame descriptors */
@@ -4590,12 +4613,13 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->priv_flags |= supported;
net_dev->priv_flags &= ~not_supported;
+ net_dev->lltx = true;
/* Features */
net_dev->features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ NETIF_F_HW_TC | NETIF_F_TSO;
net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
@@ -4641,12 +4665,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
return PTR_ERR(dpmac_dev);
}
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = priv->mc_io;
@@ -4680,6 +4711,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
@@ -4809,6 +4842,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
}
}
+static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+}
+
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{
struct device *dev;
@@ -4839,7 +4883,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
priv->rx_tstamp = false;
- priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", WQ_PERCPU, 0);
if (!priv->dpaa2_ptp_wq) {
err = -ENOMEM;
goto err_wq_alloc;
@@ -5012,6 +5056,7 @@ err_alloc_percpu_extras:
free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
err_bind:
dpaa2_eth_free_dpbps(priv);
err_dpbp_setup:
@@ -5064,6 +5109,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
free_percpu(priv->percpu_extras);
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
dpaa2_eth_free_dpbps(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index e80e9388c71f..baab4f1c908d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -217,20 +217,15 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
- strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
- strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- dpaa2_mac_get_strings(p);
+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++)
+ ethtool_puts(&data, dpaa2_ethtool_stats[i]);
+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++)
+ ethtool_puts(&data, dpaa2_ethtool_extras[i]);
+ dpaa2_mac_get_strings(&data);
break;
}
}
@@ -716,6 +711,13 @@ static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
return 0;
}
+static u32 dpaa2_eth_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return dpaa2_eth_queue_count(priv);
+}
+
static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
@@ -724,16 +726,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
int i, j = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- /* we purposely ignore cmd->flow_type for now, because the
- * classifier only supports a single set of fields for all
- * protocols
- */
- rxnfc->data = priv->rx_hash_fields;
- break;
- case ETHTOOL_GRXRINGS:
- rxnfc->data = dpaa2_eth_queue_count(priv);
- break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
@@ -772,11 +764,6 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
int err = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_SRXFH:
- if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
- return -EOPNOTSUPP;
- err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
- break;
case ETHTOOL_SRXCLSRLINS:
err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
@@ -790,11 +777,33 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *rxnfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ return 0;
+}
+
+static int dpaa2_eth_set_rxfh_fields(struct net_device *net_dev,
+ const struct ethtool_rxfh_fields *rxnfc,
+ struct netlink_ext_ack *extack)
+{
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ return dpaa2_eth_set_hash(net_dev, rxnfc->data);
+}
+
int dpaa2_phc_index = -1;
EXPORT_SYMBOL(dpaa2_phc_index);
static int dpaa2_eth_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
if (!dpaa2_ptp)
return ethtool_op_get_ts_info(dev, info);
@@ -944,6 +953,9 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_strings = dpaa2_eth_get_strings,
.get_rxnfc = dpaa2_eth_get_rxnfc,
.set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_rx_ring_count = dpaa2_eth_get_rx_ring_count,
+ .get_rxfh_fields = dpaa2_eth_get_rxfh_fields,
+ .set_rxfh_fields = dpaa2_eth_set_rxfh_fields,
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index a69bb22c37ea..422ce13a7c94 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -558,15 +558,12 @@ int dpaa2_mac_get_sset_count(void)
return DPAA2_MAC_NUM_STATS;
}
-void dpaa2_mac_get_strings(u8 *data)
+void dpaa2_mac_get_strings(u8 **data)
{
- u8 *p = data;
int i;
- for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
- strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++)
+ ethtool_puts(data, dpaa2_mac_ethtool_stats[i]);
}
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index c1ec9efd413a..53f8d106d11e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -49,7 +49,7 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
int dpaa2_mac_get_sset_count(void);
-void dpaa2_mac_get_strings(u8 *data);
+void dpaa2_mac_get_strings(u8 **data);
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
index 6bc1988be311..a888f6e6e9b0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -170,17 +170,16 @@ dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
- u8 *p = data;
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
- memcpy(p, dpaa2_switch_ethtool_counters[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = dpaa2_switch_ethtool_counters[i].name;
+ ethtool_puts(&data, str);
}
- dpaa2_mac_get_strings(p);
+ dpaa2_mac_get_strings(&data);
break;
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index b6a534a3e0b1..701a87370737 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -33,6 +33,9 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
acl_h = &acl_key->match;
acl_m = &acl_key->mask;
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -548,6 +551,9 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index f3543a2df68d..b1e1ad9e4b48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -590,7 +590,7 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
return err;
}
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
@@ -780,13 +780,14 @@ struct ethsw_dump_ctx {
static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
struct ethsw_dump_ctx *dump)
{
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -1447,12 +1448,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
return PTR_ERR(dpmac_dev);
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = port_priv->ethsw_data->mc_io;
@@ -1482,6 +1490,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
@@ -2638,13 +2648,14 @@ static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
{
- int *count, i;
+ int *count, ret, i;
for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
+ ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
count = &ethsw->buf_count;
- *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ *count += ret;
- if (unlikely(*count < BUFS_PER_CMD))
+ if (unlikely(ret < BUFS_PER_CMD))
return -ENOMEM;
}
@@ -2725,7 +2736,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
goto err_get_attr;
}
- ethsw->bpid = dpbp_attrs.id;
+ ethsw->bpid = dpbp_attrs.bpid;
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
index 051748b997f3..4b0ae7d9af92 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -55,7 +55,7 @@ static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
xdp_set_data_meta_invalid(xdp_buff);
xdp_buff->rxq = &ch->xdp_rxq;
- xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp_buff);
xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
/* xdp.data pointer may have changed */
@@ -448,7 +448,5 @@ bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
- xsk_tx_release(ch->xsk_pool);
-
return total_enqueued == budget;
}
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 4d75e6807e92..117038104b69 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,19 +1,39 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC_CORE
tristate
+ select NXP_NETC_LIB if NXP_NTMP
help
This module supports common functionality between the PF and VF
drivers for the NXP ENETC controller.
If compiled as module (M), the module name is fsl-enetc-core.
+config NXP_ENETC_PF_COMMON
+ tristate
+ help
+ This module supports common functionality between drivers of
+ different versions of NXP ENETC PF controllers.
+
+ If compiled as module (M), the module name is nxp-enetc-pf-common.
+
+config NXP_NETC_LIB
+ tristate
+ help
+ This module provides common functionalities for both ENETC and NETC
+ Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc
+ flower and debugfs interfaces and so on.
+
+config NXP_NTMP
+ bool
+
config FSL_ENETC
tristate "ENETC PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
- select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
+ select NXP_ENETC_PF_COMMON
select PHYLINK
select PCS_LYNX
select DIMLIB
@@ -24,8 +44,27 @@ config FSL_ENETC
If compiled as module (M), the module name is fsl-enetc.
+config NXP_ENETC4
+ tristate "ENETC4 PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
+ select FSL_ENETC_MDIO
+ select NXP_ENETC_PF_COMMON
+ select NXP_NTMP
+ select PHYLINK
+ select DIMLIB
+ help
+ This driver supports NXP ENETC devices with major revision 4. ENETC is
+ as the NIC functionality in NETC, it supports virtualization/isolation
+ based on PCIe Single Root IO Virtualization (SR-IOV) and a full range
+ of TSN standards and NIC offload capabilities.
+
+ If compiled as module (M), the module name is nxp-enetc4.
+
config FSL_ENETC_VF
tristate "ENETC VF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
@@ -47,7 +86,7 @@ config FSL_ENETC_IERB
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && MDIO_DEVRES && MDIO_BUS
+ depends on PCI && PHYLIB
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
@@ -75,3 +114,17 @@ config FSL_ENETC_QOS
enable/disable from user space via Qos commands(tc). In the kernel
side, it can be loaded by Qos driver. Currently, it is only support
taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
+
+config NXP_NETC_BLK_CTRL
+ tristate "NETC blocks control driver"
+ help
+ This driver configures Integrated Endpoint Register Block (IERB) and
+ Privileged Register Block (PRB) of NETC. For i.MX platforms, it also
+ includes the configuration of NETCMIX block.
+ The IERB contains registers that are used for pre-boot initialization,
+ debug, and non-customer configuration. The PRB controls global reset
+ and global error handling for NETC. The NETCMIX block is mainly used
+ to set MII protocol and PCS protocol of the links, it also contains
+ settings for some other functions.
+
+ If compiled as module (M), the module name is nxp-netc-blk-ctrl.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index b13cbbabb2ea..f1c5ad45fd76 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -3,11 +3,21 @@
obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o
fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
+obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o
+nxp-enetc-pf-common-y := enetc_pf_common.o
+
+obj-$(CONFIG_NXP_NETC_LIB) += nxp-netc-lib.o
+nxp-netc-lib-y := ntmp.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o
+nxp-enetc4-y := enetc4_pf.o
+nxp-enetc4-$(CONFIG_DEBUG_FS) += enetc4_debugfs.o
+
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o
@@ -19,3 +29,6 @@ fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
fsl-enetc-ptp-y := enetc_ptp.o
+
+obj-$(CONFIG_NXP_NETC_BLK_CTRL) += nxp-netc-blk-ctrl.o
+nxp-netc-blk-ctrl-y := netc_blk_ctrl.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 9f07f4947b63..d5e5800b84ef 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -3,6 +3,7 @@
#include "enetc.h"
#include <linux/bpf_trace.h>
+#include <linux/clk.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/vmalloc.h>
@@ -13,25 +14,73 @@
u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
{
+ /* ENETC with pseudo MAC does not have Ethernet MAC
+ * port registers.
+ */
+ if (enetc_is_pseudo_mac(si))
+ return 0;
+
return enetc_port_rd(&si->hw, reg);
}
EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
{
+ if (enetc_is_pseudo_mac(si))
+ return;
+
enetc_port_wr(&si->hw, reg, val);
if (si->hw_features & ENETC_SI_F_QBU)
- enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val);
+ enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val);
}
EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
u8 preemptible_tcs)
{
+ if (!(priv->si->hw_features & ENETC_SI_F_QBU))
+ return;
+
priv->preemptible_tcs = preemptible_tcs;
enetc_mm_commit_preemptible_tcs(priv);
}
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter);
+
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter);
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
@@ -142,22 +191,181 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
+{
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool enetc_skb_is_ipv6(struct sk_buff *skb)
+{
+ return vlan_get_protocol(skb) == htons(ETH_P_IPV6);
+}
+
+static bool enetc_skb_is_tcp(struct sk_buff *skb)
+{
+ return skb->csum_offset == offsetof(struct tcphdr, check);
+}
+
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
+static void enetc_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = ENETC_PM0_SINGLE_STEP_EN;
+
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ /* The "Correction" field of a packet is updated based on the
+ * current time and the timestamp provided
+ */
+ enetc_port_mac_wr(si, ENETC_PM0_SINGLE_STEP, val);
+}
+
+static void enetc4_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = PM_SINGLE_STEP_EN;
+
+ val |= PM_SINGLE_STEP_OFFSET_SET(offset);
+ if (udp)
+ val |= PM_SINGLE_STEP_CH;
+
+ enetc_port_mac_wr(si, ENETC4_PM_SINGLE_STEP(0), val);
+}
+
+static u32 enetc_update_ptp_sync_msg(struct enetc_ndev_priv *priv,
+ struct sk_buff *skb, bool csum_offload)
+{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+ u16 tstamp_off = enetc_cb->origin_tstamp_off;
+ u16 corr_off = enetc_cb->correction_off;
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ __be32 new_sec_l, new_nsec;
+ __be16 new_sec_h;
+ u32 lo, hi, nsec;
+ u8 *data;
+ u64 sec;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ *
+ * In addition, if csum_offload is false, the UDP checksum needs
+ * to be updated by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong checksum when
+ * updating the correction field and update it to the packet.
+ */
+
+ data = skb_mac_header(skb);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (enetc_cb->udp && !csum_offload) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + tstamp_off);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + tstamp_off + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + tstamp_off + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + tstamp_off) = new_sec_h;
+ *(__be32 *)(data + tstamp_off + 2) = new_sec_l;
+ *(__be32 *)(data + tstamp_off + 6) = new_nsec;
+
+ /* Configure single-step register */
+ if (is_enetc_rev1(si))
+ enetc_set_one_step_ts(si, enetc_cb->udp, corr_off);
+ else
+ enetc4_set_one_step_ts(si, enetc_cb->udp, corr_off);
+
+ return lo & ENETC_TXBD_TSTAMP;
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_tx_swbd *tx_swbd;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
- u8 msgtype, twostep, udp;
+ bool csum_offload = false;
union enetc_tx_bd *txbd;
- u16 offset1, offset2;
int i, count = 0;
skb_frag_t *frag;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
+ u32 tstamp;
+
+ enetc_clear_tx_bd(&temp_bd);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Can not support TSD and checksum offload at the same time */
+ if (priv->active_offloads & ENETC_F_TXCSUM &&
+ enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
+ temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START,
+ skb_network_offset(skb));
+ temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ skb_network_header_len(skb) / 4);
+ temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T,
+ enetc_skb_is_ipv6(skb));
+ if (enetc_skb_is_tcp(skb))
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_TCP);
+ else
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_UDP);
+ flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ csum_offload = true;
+ } else if (skb_checksum_help(skb)) {
+ return 0;
+ }
+ }
+
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ do_onestep_tstamp = true;
+ tstamp = enetc_update_ptp_sync_msg(priv, skb, csum_offload);
+ } else if (enetc_cb->flag & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
@@ -169,7 +377,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
- temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
@@ -179,17 +386,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
count++;
do_vlan = skb_vlan_tag_present(skb);
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
- if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
- &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep)
- WARN_ONCE(1, "Bad packet for one-step timestamping\n");
- else
- do_onestep_tstamp = true;
- } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
- do_twostep_tstamp = true;
- }
-
tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
@@ -232,38 +428,9 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
- u8 *data;
-
- lo = enetc_rd_hot(hw, ENETC_SICTR0);
- hi = enetc_rd_hot(hw, ENETC_SICTR1);
- sec = (u64)hi << 32 | lo;
- nsec = do_div(sec, 1000000000);
-
/* Configure extension BD */
- temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ temp_bd.ext.tstamp = cpu_to_le32(tstamp);
e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
-
- /* Update originTimestamp field of Sync packet
- * - 48 bits seconds field
- * - 32 bits nanseconds field
- */
- data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
-
- /* Configure single-step register */
- val = ENETC_PM0_SINGLE_STEP_EN;
- val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
- if (udp)
- val |= ENETC_PM0_SINGLE_STEP_CH;
-
- enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
- val);
} else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
@@ -325,25 +492,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -386,7 +548,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -485,8 +650,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso
}
}
+static int enetc_lso_count_descs(const struct sk_buff *skb)
+{
+ /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD
+ * for linear area data but not include LSO header, namely
+ * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's
+ * okay, we only need to consider the worst case). And 1 BD
+ * for gap.
+ */
+ return skb_shinfo(skb)->nr_frags + 4;
+}
+
+static int enetc_lso_get_hdr_len(const struct sk_buff *skb)
+{
+ int hdr_len, tlen;
+
+ tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
+ hdr_len = skb_transport_offset(skb) + tlen;
+
+ return hdr_len;
+}
+
+static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso)
+{
+ lso->lso_seg_size = skb_shinfo(skb)->gso_size;
+ lso->ipv6 = enetc_skb_is_ipv6(skb);
+ lso->tcp = skb_is_gso_tcp(skb);
+ lso->l3_hdr_len = skb_network_header_len(skb);
+ lso->l3_start = skb_network_offset(skb);
+ lso->hdr_len = enetc_lso_get_hdr_len(skb);
+ lso->total_len = skb->len - lso->hdr_len;
+}
+
+static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso)
+{
+ union enetc_tx_bd txbd_tmp, *txbd;
+ struct enetc_tx_swbd *tx_swbd;
+ u16 frm_len, frm_len_ext;
+ u8 flags, e_flags = 0;
+ dma_addr_t addr;
+ char *hdr;
+
+ /* Get the first BD of the LSO BDs chain */
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Prepare LSO header: MAC + IP + TCP/UDP */
+ hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE;
+ memcpy(hdr, skb->data, lso->hdr_len);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ /* {frm_len_ext, frm_len} indicates the total length of
+ * large transmit data unit. frm_len contains the 16 least
+ * significant bits and frm_len_ext contains the 4 most
+ * significant bits.
+ */
+ frm_len = lso->total_len & 0xffff;
+ frm_len_ext = (lso->total_len >> 16) & 0xf;
+
+ /* Set the flags of the first BD */
+ flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO |
+ ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(frm_len);
+ txbd_tmp.flags = flags;
+
+ txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start);
+ /* l3_hdr_size in 32-bits (4 bytes) */
+ txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ lso->l3_hdr_len / 4);
+ if (lso->ipv6)
+ txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T;
+ else
+ txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS;
+
+ txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ?
+ ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP);
+
+ /* For the LSO header we do not set the dma address since
+ * we do not want it unmapped when we do cleanup. We still
+ * set len so that we count the bytes sent.
+ */
+ tx_swbd->len = lso->hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Get the next BD, and the next BD is extended BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ if (skb_vlan_tag_present(skb)) {
+ /* Setup the VLAN fields */
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = ENETC_TPID_8021Q;
+ e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size);
+ txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext);
+ *txbd = txbd_tmp;
+}
+
+static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso, int *count)
+{
+ union enetc_tx_bd txbd_tmp, *txbd = NULL;
+ struct enetc_tx_swbd *tx_swbd;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u8 flags = 0;
+ int len, f;
+
+ len = skb_headlen(skb) - lso->hdr_len;
+ if (len > 0) {
+ dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ if (txbd)
+ *txbd = txbd_tmp;
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ /* Last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ txbd_tmp.flags = flags;
+ *txbd = txbd_tmp;
+
+ tx_swbd->is_eof = 1;
+ tx_swbd->skb = skb;
+
+ return 0;
+}
+
+static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_lso_t lso = {0};
+ int err, i, count = 0;
+
+ /* Initialize the LSO handler */
+ enetc_lso_start(skb, &lso);
+ i = tx_ring->next_to_use;
+
+ enetc_lso_map_hdr(tx_ring, skb, &i, &lso);
+ /* First BD and an extend BD */
+ count += 2;
+
+ err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count);
+ if (err)
+ goto dma_err;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+dma_err:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (--count);
+
+ return 0;
+}
+
static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
int hdr_len, total_len, data_len;
struct enetc_tx_swbd *tx_swbd;
union enetc_tx_bd *txbd;
@@ -518,9 +908,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -544,15 +934,20 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
bd_data_num++;
tso_build_data(skb, &tso, size);
- if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ if (unlikely(bd_data_num >= priv->max_frags && data_len))
goto err_chained_bd;
}
@@ -574,13 +969,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -588,12 +977,13 @@ err_chained_bd:
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count, err;
+ int count;
/* Queue one-step Sync packet if already locked */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
&priv->flags)) {
skb_queue_tail(&priv->tx_skbs, skb);
@@ -604,16 +994,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
if (skb_is_gso(skb)) {
- if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ /* LSO data unit lengths of up to 256KB are supported */
+ if (priv->active_offloads & ENETC_F_LSO &&
+ (skb->len - enetc_lso_get_hdr_len(skb)) <=
+ ENETC_LSO_MAX_DATA_LEN) {
+ if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_tso_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ count = enetc_lso_hw_offload(tx_ring, skb);
+ } else {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
} else {
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
@@ -623,11 +1025,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
- goto drop_packet_err;
- }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
@@ -636,7 +1033,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
if (unlikely(!count))
goto drop_packet_err;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags))
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
@@ -648,24 +1045,29 @@ drop_packet_err:
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
u8 udp, msgtype, twostep;
u16 offset1, offset2;
- /* Mark tx timestamp type on skb->cb[0] if requires */
+ /* Mark tx timestamp type on enetc_cb->flag if requires */
if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
- skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
- } else {
- skb->cb[0] = 0;
- }
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK))
+ enetc_cb->flag = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ else
+ enetc_cb->flag = 0;
/* Fall back to two-step timestamp if not one-step Sync packet */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
&offset1, &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
- skb->cb[0] = ENETC_F_TX_TSTAMP;
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0) {
+ enetc_cb->flag = ENETC_F_TX_TSTAMP;
+ } else {
+ enetc_cb->udp = !!udp;
+ enetc_cb->correction_off = offset1;
+ enetc_cb->origin_tstamp_off = offset2;
+ }
}
return enetc_start_xmit(skb, ndev);
@@ -700,8 +1102,9 @@ static void enetc_rx_dim_work(struct work_struct *w)
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
struct enetc_int_vector *v =
container_of(dim, struct enetc_int_vector, rx_dim);
+ struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev);
- v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
+ v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq);
dim->state = DIM_START_MEASURE;
}
@@ -718,7 +1121,7 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v)
v->rx_ring.stats.packets,
v->rx_ring.stats.bytes,
&dim_sample);
- net_dim(&v->rx_dim, dim_sample);
+ net_dim(&v->rx_dim, &dim_sample);
}
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
@@ -856,7 +1259,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+
+ if (unlikely(enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -902,7 +1307,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
- (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ !test_bit(ENETC_TX_DOWN, &priv->flags) &&
+ (enetc_bd_unused(tx_ring) >=
+ ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -977,7 +1384,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static void enetc_get_rx_tstamp(struct net_device *ndev,
union enetc_rx_bd *rxbd,
struct sk_buff *skb)
@@ -1001,7 +1407,6 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
}
}
-#endif
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
@@ -1017,6 +1422,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
}
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
+ struct enetc_hw *hw = &priv->si->hw;
__be16 tpid = 0;
switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
@@ -1027,24 +1433,19 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
tpid = htons(ETH_P_8021AD);
break;
case 2:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR1));
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR1) &
+ SICVLANR_ETYPE);
break;
case 3:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR2));
- break;
- default:
- break;
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR2) &
+ SICVLANR_ETYPE);
}
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (priv->active_offloads & ENETC_F_RX_TSTAMP)
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
-#endif
}
/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
@@ -1203,6 +1604,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
@@ -1238,7 +1641,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
}
rx_ring->next_to_clean = i;
@@ -1246,6 +1651,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1380,6 +1787,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
int xdp_tx_bd_cnt, i, k;
int xdp_tx_frm_cnt = 0;
+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
+ return -ENETDOWN;
+
enetc_lock_mdio();
tx_ring = priv->xdp_tx_ring[smp_processor_id()];
@@ -1524,7 +1934,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
&rx_ring->rx_swbd[rx_ring_first]);
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
}
- rx_ring->stats.xdp_drops++;
+}
+
+static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
}
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
@@ -1543,13 +1962,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
- int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
+ int orig_i, err;
u32 bd_status;
- int err;
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1564,7 +1984,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
orig_rxbd = rxbd;
- orig_cleaned_cnt = cleaned_cnt;
orig_i = i;
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
@@ -1589,22 +2008,37 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
fallthrough;
case XDP_DROP:
enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
- rxbd = orig_rxbd;
- cleaned_cnt = orig_cleaned_cnt;
- i = orig_i;
-
- skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
- &i, &cleaned_cnt,
- ENETC_RXB_DMA_SIZE_XDP);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&xdp_buff);
+ /* Probably under memory pressure, stop NAPI */
+ if (unlikely(!skb)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
goto out;
+ }
+
+ enetc_get_offloads(rx_ring, orig_rxbd, skb);
+
+ /* These buffers are about to be owned by the stack.
+ * Update our buffer cache (the rx_swbd array elements)
+ * with their other page halves.
+ */
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ tx_ring->stats.xdp_tx_drops++;
+ break;
+ }
+
xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
rx_ring,
orig_i, i);
@@ -1613,7 +2047,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
@@ -1630,16 +2064,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
+ enetc_unlock_mdio();
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ enetc_lock_mdio();
if (unlikely(err)) {
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
} else {
- while (orig_i != i) {
- enetc_flip_rx_buff(rx_ring,
- &rx_ring->rx_swbd[orig_i]);
- enetc_bdr_idx_inc(rx_ring, &orig_i);
- }
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
@@ -1654,8 +2086,11 @@ out:
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
- if (xdp_redirect_frm_cnt)
+ if (xdp_redirect_frm_cnt) {
+ enetc_unlock_mdio();
xdp_do_flush();
+ enetc_lock_mdio();
+ }
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
@@ -1664,6 +2099,8 @@ out:
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1682,6 +2119,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
+ enetc_unlock_mdio();
prog = rx_ring->xdp.prog;
if (prog)
@@ -1693,10 +2131,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
if (work_done)
v->rx_napi_work = true;
- if (!complete) {
- enetc_unlock_mdio();
+ if (!complete)
return budget;
- }
napi_complete_done(napi, work_done);
@@ -1705,6 +2141,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
@@ -1729,9 +2166,15 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rx_rings = (val >> 16) & 0xff;
si->num_tx_rings = val & 0xff;
- val = enetc_rd(hw, ENETC_SIRFSCAPR);
- si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
- si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+ val = enetc_rd(hw, ENETC_SIPCAPR0);
+ if (val & ENETC_SIPCAPR0_RFS) {
+ val = enetc_rd(hw, ENETC_SIRFSCAPR);
+ si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
+ si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+ } else {
+ /* ENETC which not supports RFS */
+ si->num_fs_entries = 0;
+ }
si->num_rss = 0;
val = enetc_rd(hw, ENETC_SIPCAPR0);
@@ -1742,14 +2185,8 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
- if (val & ENETC_SIPCAPR0_QBV)
- si->hw_features |= ENETC_SI_F_QBV;
-
- if (val & ENETC_SIPCAPR0_QBU)
- si->hw_features |= ENETC_SI_F_QBU;
-
- if (val & ENETC_SIPCAPR0_PSFP)
- si->hw_features |= ENETC_SI_F_PSFP;
+ if (val & ENETC_SIPCAPR0_LSO)
+ si->hw_features |= ENETC_SI_F_LSO;
}
EXPORT_SYMBOL_GPL(enetc_get_si_caps);
@@ -2039,13 +2476,35 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
for (i = 0; i < si->num_rss; i++)
rss_table[i] = i % num_groups;
- enetc_set_rss_table(si, rss_table, si->num_rss);
+ si->ops->set_rss_table(si, rss_table, si->num_rss);
kfree(rss_table);
return 0;
}
+static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC4_SILSOSFMR0,
+ SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK,
+ ENETC4_TCP_NL_SEG_FLAGS_DMASK));
+ enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
+}
+
+static void enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -2059,10 +2518,16 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+ if (si->hw_features & ENETC_SI_F_LSO)
+ enetc_set_lso_flags_mask(hw);
+
if (si->num_rss) {
err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
if (err)
return err;
+
+ if (priv->ndev->features & NETIF_F_RXHASH)
+ enetc_set_rss(priv->ndev, true);
}
return 0;
@@ -2083,9 +2548,9 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
*/
priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
priv->num_tx_rings = si->num_tx_rings;
- priv->bdr_int_num = cpus;
+ priv->bdr_int_num = priv->num_rx_rings;
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
- priv->tx_ictt = ENETC_TXIC_TIMETHR;
+ priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq);
}
EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
@@ -2226,18 +2691,24 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
-static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_enable_txbdr(hw, priv->tx_ring[i]);
-
for (i = 0; i < priv->num_rx_rings; i++)
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
+static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_enable_txbdr(hw, priv->tx_ring[i]);
+}
+
static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
@@ -2254,18 +2725,24 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
}
-static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_disable_txbdr(hw, priv->tx_ring[i]);
-
for (i = 0; i < priv->num_rx_rings; i++)
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
}
+static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_disable_txbdr(hw, priv->tx_ring[i]);
+}
+
static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
int delay = 8, timeout = 100;
@@ -2305,12 +2782,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
- err = request_irq(irq, enetc_msix, 0, v->name, v);
+ err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
if (err) {
dev_err(priv->dev, "request_irq() failed!\n");
goto irq_err;
}
- disable_irq(irq);
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
@@ -2464,9 +2940,13 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
- enetc_enable_bdrs(priv);
+ enetc_enable_tx_bdrs(priv);
+
+ enetc_enable_rx_bdrs(priv);
netif_tx_start_all_queues(ndev);
+
+ clear_bit(ENETC_TX_DOWN, &priv->flags);
}
EXPORT_SYMBOL_GPL(enetc_start);
@@ -2479,10 +2959,14 @@ int enetc_open(struct net_device *ndev)
extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
- err = enetc_setup_irqs(priv);
+ err = clk_prepare_enable(priv->ref_clk);
if (err)
return err;
+ err = enetc_setup_irqs(priv);
+ if (err)
+ goto err_setup_irqs;
+
err = enetc_phylink_connect(ndev);
if (err)
goto err_phy_connect;
@@ -2514,6 +2998,8 @@ err_alloc_tx:
phylink_disconnect_phy(priv->phylink);
err_phy_connect:
enetc_free_irqs(priv);
+err_setup_irqs:
+ clk_disable_unprepare(priv->ref_clk);
return err;
}
@@ -2524,9 +3010,15 @@ void enetc_stop(struct net_device *ndev)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i;
+ set_bit(ENETC_TX_DOWN, &priv->flags);
+
netif_tx_stop_all_queues(ndev);
- enetc_disable_bdrs(priv);
+ enetc_disable_rx_bdrs(priv);
+
+ enetc_wait_bdrs(priv);
+
+ enetc_disable_tx_bdrs(priv);
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
@@ -2537,8 +3029,6 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
- enetc_wait_bdrs(priv);
-
enetc_clear_interrupts(priv);
}
EXPORT_SYMBOL_GPL(enetc_stop);
@@ -2563,6 +3053,7 @@ int enetc_close(struct net_device *ndev)
enetc_assign_tx_resources(priv, NULL);
enetc_free_irqs(priv);
+ clk_disable_unprepare(priv->ref_clk);
return 0;
}
@@ -2769,7 +3260,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
priv->num_tx_rings) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
+ "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
num_xdp_tx_queues,
priv->min_num_stack_tx_queues,
priv->num_tx_rings);
@@ -2829,22 +3320,6 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(enetc_get_stats);
-static int enetc_set_rss(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- u32 reg;
-
- enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
-
- reg = enetc_rd(hw, ENETC_SIMR);
- reg &= ~ENETC_SIMR_RSSE;
- reg |= (en) ? ENETC_SIMR_RSSE : 0;
- enetc_wr(hw, ENETC_SIMR, reg);
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2882,17 +3357,17 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
}
EXPORT_SYMBOL_GPL(enetc_set_features);
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
-static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err, new_offloads = priv->active_offloads;
- struct hwtstamp_config config;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!enetc_ptp_clock_is_enabled(priv->si))
+ return -EOPNOTSUPP;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
@@ -2901,6 +3376,10 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!enetc_si_is_pf(priv->si) ||
+ enetc_is_pseudo_mac(priv->si))
+ return -EOPNOTSUPP;
+
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
@@ -2908,13 +3387,13 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
new_offloads |= ENETC_F_RX_TSTAMP;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
@@ -2927,41 +3406,35 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
priv->active_offloads = new_offloads;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_set);
-static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- config.flags = 0;
+ if (!enetc_ptp_clock_is_enabled(priv->si))
+ return -EOPNOTSUPP;
if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-#endif
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_get);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (cmd == SIOCSHWTSTAMP)
- return enetc_hwtstamp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return enetc_hwtstamp_get(ndev, rq);
-#endif
if (!priv->phylink)
return -EOPNOTSUPP;
@@ -2970,13 +3443,100 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
}
EXPORT_SYMBOL_GPL(enetc_ioctl);
+static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
+ int v_tx_rings)
+{
+ struct enetc_int_vector *v;
+ struct enetc_bdr *bdr;
+ int j, err;
+
+ v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ priv->int_vector[i] = v;
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+ ENETC_RXB_DMA_SIZE_XDP);
+ if (err)
+ goto free_vector;
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ goto free_vector;
+ }
+
+ /* init defaults for adaptive IC */
+ if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
+ v->rx_ictt = 0x1;
+ v->rx_dim_en = true;
+ }
+
+ INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
+ v->count_tx_rings = v_tx_rings;
+
+ for (j = 0; j < v_tx_rings; j++) {
+ int idx;
+
+ /* default tx ring mapping policy */
+ idx = priv->bdr_int_num * j + i;
+ __set_bit(idx, &v->tx_rings_map);
+ bdr = &v->tx_ring[j];
+ bdr->index = idx;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->tx_bd_count;
+ priv->tx_ring[idx] = bdr;
+ }
+
+ return 0;
+
+free_vector:
+ priv->rx_ring[i] = NULL;
+ priv->int_vector[i] = NULL;
+ kfree(v);
+
+ return err;
+}
+
+static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i)
+{
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ int j, tx_ring_index;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+
+ for (j = 0; j < v->count_tx_rings; j++) {
+ tx_ring_index = priv->bdr_int_num * j + i;
+ priv->tx_ring[tx_ring_index] = NULL;
+ }
+
+ priv->rx_ring[i] = NULL;
+ priv->int_vector[i] = NULL;
+ kfree(v);
+}
+
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int v_tx_rings, v_remainder;
int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
- int v_tx_rings;
nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
/* allocate MSIX for both messaging and Rx/Tx interrupts */
@@ -2990,64 +3550,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
/* # of tx rings per int vector */
v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+ v_remainder = priv->num_tx_rings % priv->bdr_int_num;
for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v;
- struct enetc_bdr *bdr;
- int j;
-
- v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
- if (!v) {
- err = -ENOMEM;
- goto fail;
- }
-
- priv->int_vector[i] = v;
-
- bdr = &v->rx_ring;
- bdr->index = i;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->rx_bd_count;
- bdr->buffer_offset = ENETC_RXB_PAD;
- priv->rx_ring[i] = bdr;
-
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
- if (err) {
- kfree(v);
- goto fail;
- }
+ /* Distribute the remaining TX rings to the first v_remainder
+ * interrupt vectors
+ */
+ int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings;
- err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err) {
- xdp_rxq_info_unreg(&bdr->xdp.rxq);
- kfree(v);
+ err = enetc_int_vector_init(priv, i, num_tx_rings);
+ if (err)
goto fail;
- }
-
- /* init defaults for adaptive IC */
- if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
- v->rx_ictt = 0x1;
- v->rx_dim_en = true;
- }
- INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll);
- v->count_tx_rings = v_tx_rings;
-
- for (j = 0; j < v_tx_rings; j++) {
- int idx;
-
- /* default tx ring mapping policy */
- idx = priv->bdr_int_num * j + i;
- __set_bit(idx, &v->tx_rings_map);
- bdr = &v->tx_ring[j];
- bdr->index = idx;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->tx_bd_count;
- priv->tx_ring[idx] = bdr;
- }
}
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
@@ -3067,16 +3580,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
return 0;
fail:
- while (i--) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- kfree(v);
- }
+ while (i--)
+ enetc_int_vector_destroy(priv, i);
pci_free_irq_vectors(pdev);
@@ -3088,26 +3593,8 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
{
int i;
- for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- }
-
- for (i = 0; i < priv->num_rx_rings; i++)
- priv->rx_ring[i] = NULL;
-
- for (i = 0; i < priv->num_tx_rings; i++)
- priv->tx_ring[i] = NULL;
-
- for (i = 0; i < priv->bdr_int_num; i++) {
- kfree(priv->int_vector[i]);
- priv->int_vector[i] = NULL;
- }
+ for (i = 0; i < priv->bdr_int_num; i++)
+ enetc_int_vector_destroy(priv, i);
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
@@ -3216,5 +3703,75 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
+static const struct enetc_drvdata enetc_pf_data = {
+ .sysclk_freq = ENETC_CLK_400M,
+ .pmac_offset = ENETC_PMAC_OFFSET,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
+ .eth_ops = &enetc_pf_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc4_pf_data = {
+ .sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
+ .pmac_offset = ENETC4_PMAC_OFFSET,
+ .eth_ops = &enetc4_pf_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc4_ppm_data = {
+ .sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
+ .eth_ops = &enetc4_ppm_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc_vf_data = {
+ .sysclk_freq = ENETC_CLK_400M,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
+ .eth_ops = &enetc_vf_ethtool_ops,
+};
+
+static const struct enetc_platform_info enetc_info[] = {
+ { .revision = ENETC_REV_1_0,
+ .dev_id = ENETC_DEV_ID_PF,
+ .data = &enetc_pf_data,
+ },
+ { .revision = ENETC_REV_4_1,
+ .dev_id = NXP_ENETC_PF_DEV_ID,
+ .data = &enetc4_pf_data,
+ },
+ { .revision = ENETC_REV_1_0,
+ .dev_id = ENETC_DEV_ID_VF,
+ .data = &enetc_vf_data,
+ },
+ {
+ .revision = ENETC_REV_4_3,
+ .dev_id = NXP_ENETC_PPM_DEV_ID,
+ .data = &enetc4_ppm_data,
+ },
+ { .revision = ENETC_REV_4_3,
+ .dev_id = NXP_ENETC_PF_DEV_ID,
+ .data = &enetc4_pf_data,
+ },
+};
+
+int enetc_get_driver_data(struct enetc_si *si)
+{
+ u16 dev_id = si->pdev->device;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_info); i++) {
+ if (si->revision == enetc_info[i].revision &&
+ dev_id == enetc_info[i].dev_id) {
+ si->drvdata = enetc_info[i].data;
+
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+EXPORT_SYMBOL_GPL(enetc_get_driver_data);
+
MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index a9c2ff22431c..dce27bd67a7d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -8,12 +8,14 @@
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <linux/fsl/ntmp.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <linux/dim.h>
#include <net/xdp.h>
#include "enetc_hw.h"
+#include "enetc4_hw.h"
#define ENETC_MAC_MAXFRM_SIZE 9600
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
@@ -21,6 +23,18 @@
#define ENETC_CBD_DATA_MEM_ALIGN 64
+#define ENETC_MADDR_HASH_TBL_SZ 64
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -40,8 +54,29 @@ struct enetc_tx_swbd {
u8 qbv_en:1;
};
+struct enetc_skb_cb {
+ u8 flag;
+ bool udp;
+ u16 correction_off;
+ u16 origin_tstamp_off;
+};
+
+#define ENETC_SKB_CB(skb) ((struct enetc_skb_cb *)((skb)->cb))
+
+struct enetc_lso_t {
+ bool ipv6;
+ bool tcp;
+ u8 l3_hdr_len;
+ u8 hdr_len; /* LSO header length */
+ u8 l3_start;
+ u16 lso_seg_size;
+ int total_len; /* total data length, not include LSO header */
+};
+
+#define ENETC_LSO_MAX_DATA_LEN SZ_256K
+
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
-#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
@@ -58,22 +93,29 @@ struct enetc_rx_swbd {
/* ENETC overhead: optional extension BD + 1 BD gap */
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
+/* For LS1028A, max # of chained Tx BDs is 15, including head and
+ * extension BD.
+ */
#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+/* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
+ * including head and extension BD, but the range of MAX_SKB_FRAGS
+ * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
+ */
+#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS
+#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
- unsigned int packets;
- unsigned int bytes;
- unsigned int rx_alloc_errs;
- unsigned int xdp_drops;
- unsigned int xdp_tx;
- unsigned int xdp_tx_drops;
- unsigned int xdp_redirect;
- unsigned int xdp_redirect_failures;
- unsigned int recycles;
- unsigned int recycle_failures;
- unsigned int win_drop;
+ unsigned long packets;
+ unsigned long bytes;
+ unsigned long rx_alloc_errs;
+ unsigned long xdp_drops;
+ unsigned long xdp_tx;
+ unsigned long xdp_tx_drops;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_failures;
+ unsigned long recycles;
+ unsigned long recycle_failures;
+ unsigned long win_drop;
};
struct enetc_xdp_data {
@@ -184,10 +226,9 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
{
int hw_idx = i;
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (rx_ring->ext_en)
hw_idx = 2 * i;
-#endif
+
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
}
@@ -199,10 +240,8 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
new_rxbd++;
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (rx_ring->ext_en)
new_rxbd++;
-#endif
if (unlikely(++new_index == rx_ring->bd_count)) {
new_rxbd = rx_ring->bd_base;
@@ -233,6 +272,35 @@ enum enetc_errata {
#define ENETC_SI_F_PSFP BIT(0)
#define ENETC_SI_F_QBV BIT(1)
#define ENETC_SI_F_QBU BIT(2)
+#define ENETC_SI_F_LSO BIT(3)
+#define ENETC_SI_F_PPM BIT(4) /* pseudo MAC */
+
+struct enetc_drvdata {
+ u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
+ u8 tx_csum:1;
+ u8 max_frags;
+ u64 sysclk_freq;
+ const struct ethtool_ops *eth_ops;
+};
+
+struct enetc_platform_info {
+ u16 revision;
+ u16 dev_id;
+ const struct enetc_drvdata *data;
+};
+
+struct enetc_si;
+
+/*
+ * This structure defines the some common hooks for ENETC PSI and VSI.
+ * In addition, since VSI only uses the struct enetc_si as its private
+ * driver data, so this structure also define some hooks specifically
+ * for VSI. For VSI-specific hooks, the format is ‘vf_*()’.
+ */
+struct enetc_si_ops {
+ int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
+ int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
+};
/* PCI IEP device data */
struct enetc_si {
@@ -242,18 +310,33 @@ struct enetc_si {
struct net_device *ndev; /* back ref. */
- struct enetc_cbdr cbd_ring;
+ union {
+ struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */
+ struct ntmp_user ntmp_user; /* ENETC 4.1 and later */
+ };
int num_rx_rings; /* how many rings are available in the SI */
int num_tx_rings;
int num_fs_entries;
int num_rss; /* number of RSS buckets */
unsigned short pad;
+ u16 revision;
int hw_features;
+ const struct enetc_drvdata *drvdata;
+ const struct enetc_si_ops *ops;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct rx_mode_task;
+ struct dentry *debugfs_root;
};
#define ENETC_SI_ALIGN 32
+static inline bool is_enetc_rev1(struct enetc_si *si)
+{
+ return si->pdev->revision == ENETC_REV1;
+}
+
static inline void *enetc_si_priv(const struct enetc_si *si)
{
return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
@@ -280,6 +363,11 @@ static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
}
}
+static inline bool enetc_is_pseudo_mac(struct enetc_si *si)
+{
+ return si->hw_features & ENETC_SI_F_PPM;
+}
+
#define ENETC_MAX_NUM_TXQS 8
#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
@@ -305,7 +393,7 @@ struct enetc_cls_rule {
int used;
};
-#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+#define ENETC_MAX_BDR_INT 6 /* fixed to max # of available cpus */
struct psfp_cap {
u32 max_streamid;
u32 max_psfp_filter;
@@ -324,10 +412,13 @@ enum enetc_active_offloads {
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
ENETC_F_QBU = BIT(11),
+ ENETC_F_TXCSUM = BIT(12),
+ ENETC_F_LSO = BIT(13),
};
enum enetc_flags_bit {
ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
+ ENETC_TX_DOWN,
};
/* interrupt coalescing modes */
@@ -343,7 +434,6 @@ enum enetc_ic_mode {
#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
-#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600)
struct enetc_ndev_priv {
struct net_device *ndev;
@@ -358,6 +448,7 @@ struct enetc_ndev_priv {
u16 msg_enable;
u8 preemptible_tcs;
+ u8 max_frags; /* The maximum number of BDs for fragments */
enum enetc_active_offloads active_offloads;
@@ -391,6 +482,9 @@ struct enetc_ndev_priv {
* and link state updates
*/
struct mutex mm_lock;
+
+ struct clk *ref_clk; /* RGMII/RMII reference clock */
+ u64 sysclk_freq; /* NETC system clock frequency */
};
/* Messaging */
@@ -405,9 +499,6 @@ struct enetc_msg_cmd_set_primary_mac {
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
-/* PTP driver exports */
-extern int enetc_phc_index;
-
/* SI common */
u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
@@ -420,6 +511,10 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
int enetc_configure_si(struct enetc_ndev_priv *priv);
+int enetc_get_driver_data(struct enetc_si *si);
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr);
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter);
int enetc_open(struct net_device *ndev);
int enetc_close(struct net_device *ndev);
@@ -435,7 +530,18 @@ int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config);
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
/* ethtool */
+extern const struct ethtool_ops enetc_pf_ethtool_ops;
+extern const struct ethtool_ops enetc4_pf_ethtool_ops;
+extern const struct ethtool_ops enetc_vf_ethtool_ops;
+extern const struct ethtool_ops enetc4_ppm_ethtool_ops;
+
void enetc_set_ethtool_ops(struct net_device *ndev);
void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
@@ -444,15 +550,19 @@ void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
struct enetc_cbdr *cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si);
+void enetc4_teardown_cbdr(struct enetc_si *si);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index);
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
struct enetc_cbd *cbd,
@@ -493,6 +603,14 @@ static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
void enetc_reset_ptcmsdur(struct enetc_hw *hw);
void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+static inline bool enetc_ptp_clock_is_enabled(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK);
+
+ return IS_ENABLED(CONFIG_PTP_NETC_V4_TIMER);
+}
+
#ifdef CONFIG_FSL_ENETC_QOS
int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
new file mode 100644
index 000000000000..1b1591dce73d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2025 NXP */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+
+#include "enetc_pf.h"
+#include "enetc4_debugfs.h"
+
+static void enetc_show_si_mac_hash_filter(struct seq_file *s, int i)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ u32 hash_h, hash_l;
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIUMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIUMHFR1(i));
+ seq_printf(s, "SI %d unicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIMMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIMMHFR1(i));
+ seq_printf(s, "SI %d multicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+}
+
+static int enetc_mac_filter_show(struct seq_file *s, void *data)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ struct maft_entry_data maft;
+ struct enetc_pf *pf;
+ int i, err, num_si;
+ u32 val;
+
+ pf = enetc_si_priv(si);
+ num_si = pf->caps.num_vsi + 1;
+
+ val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+ for (i = 0; i < num_si; i++) {
+ seq_printf(s, "SI %d Unicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_UP(i) & val));
+ seq_printf(s, "SI %d Multicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_MP(i) & val));
+ }
+
+ /* MAC hash filter table */
+ for (i = 0; i < num_si; i++)
+ enetc_show_si_mac_hash_filter(s, i);
+
+ if (!pf->num_mfe)
+ return 0;
+
+ /* MAC address filter table */
+ seq_puts(s, "MAC address filter table\n");
+ for (i = 0; i < pf->num_mfe; i++) {
+ memset(&maft, 0, sizeof(maft));
+ err = ntmp_maft_query_entry(&si->ntmp_user, i, &maft);
+ if (err)
+ return err;
+
+ seq_printf(s, "Entry %d, MAC: %pM, SI bitmap: 0x%04x\n", i,
+ maft.keye.mac_addr, le16_to_cpu(maft.cfge.si_bitmap));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(enetc_mac_filter);
+
+void enetc_create_debugfs(struct enetc_si *si)
+{
+ struct net_device *ndev = si->ndev;
+ struct dentry *root;
+
+ root = debugfs_create_dir(netdev_name(ndev), NULL);
+ if (IS_ERR(root))
+ return;
+
+ si->debugfs_root = root;
+
+ debugfs_create_file("mac_filter", 0444, root, si, &enetc_mac_filter_fops);
+}
+
+void enetc_remove_debugfs(struct enetc_si *si)
+{
+ debugfs_remove(si->debugfs_root);
+ si->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
new file mode 100644
index 000000000000..96caca35f79d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+
+#ifndef __ENETC4_DEBUGFS_H
+#define __ENETC4_DEBUGFS_H
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void enetc_create_debugfs(struct enetc_si *si);
+void enetc_remove_debugfs(struct enetc_si *si);
+#else
+static inline void enetc_create_debugfs(struct enetc_si *si)
+{
+}
+
+static inline void enetc_remove_debugfs(struct enetc_si *si)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
new file mode 100644
index 000000000000..3ed0f7a02767
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * This header file defines the register offsets and bit fields
+ * of ENETC4 PF and VFs. Note that the same registers as ENETC
+ * version 1.0 are defined in the enetc_hw.h file.
+ *
+ * Copyright 2024 NXP
+ */
+#ifndef __ENETC4_HW_H_
+#define __ENETC4_HW_H_
+
+#define NXP_ENETC_VENDOR_ID 0x1131
+#define NXP_ENETC_PF_DEV_ID 0xe101
+#define NXP_ENETC_PPM_DEV_ID 0xe110
+
+/**********************Station interface registers************************/
+/* Station interface LSO segmentation flag mask register 0/1 */
+#define ENETC4_SILSOSFMR0 0x1300
+#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16)
+#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0)
+#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \
+ FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first))
+
+#define ENETC4_SILSOSFMR1 0x1304
+#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0)
+#define ENETC4_TCP_FLAGS_FIN BIT(0)
+#define ENETC4_TCP_FLAGS_SYN BIT(1)
+#define ENETC4_TCP_FLAGS_RST BIT(2)
+#define ENETC4_TCP_FLAGS_PSH BIT(3)
+#define ENETC4_TCP_FLAGS_ACK BIT(4)
+#define ENETC4_TCP_FLAGS_URG BIT(5)
+#define ENETC4_TCP_FLAGS_ECE BIT(6)
+#define ENETC4_TCP_FLAGS_CWR BIT(7)
+#define ENETC4_TCP_FLAGS_NS BIT(8)
+/* According to tso_build_hdr(), clear all special flags for not last packet. */
+#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \
+ ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH)
+
+/***************************ENETC port registers**************************/
+#define ENETC4_ECAPR0 0x0
+#define ECAPR0_RFS BIT(2)
+#define ECAPR0_TSD BIT(5)
+#define ECAPR0_RSS BIT(8)
+#define ECAPR0_RSC BIT(9)
+#define ECAPR0_LSO BIT(10)
+#define ECAPR0_WO BIT(13)
+
+#define ENETC4_ECAPR1 0x4
+#define ECAPR1_NUM_TCS GENMASK(6, 4)
+#define ECAPR1_NUM_MCH GENMASK(9, 8)
+#define ECAPR1_NUM_UCH GENMASK(11, 10)
+#define ECAPR1_NUM_MSIX GENMASK(22, 12)
+#define ECAPR1_NUM_VSI GENMASK(27, 24)
+#define ECAPR1_NUM_IPV BIT(31)
+
+#define ENETC4_ECAPR2 0x8
+#define ECAPR2_NUM_TX_BDR GENMASK(9, 0)
+#define ECAPR2_NUM_RX_BDR GENMASK(25, 16)
+
+#define ENETC4_PMR 0x10
+#define PMR_SI_EN(a) BIT((16 + (a)))
+
+/* Port Pause ON/OFF threshold register */
+#define ENETC4_PPAUONTR 0x108
+#define ENETC4_PPAUOFFTR 0x10c
+
+/* Port Station interface promiscuous MAC mode register */
+#define ENETC4_PSIPMMR 0x200
+#define PSIPMMR_SI_MAC_UP(a) BIT(a) /* a = SI index */
+#define PSIPMMR_SI_MAC_MP(a) BIT((a) + 16)
+
+/* Port Station interface promiscuous VLAN mode register */
+#define ENETC4_PSIPVMR 0x204
+
+/* Port RSS key register n. n = 0,1,2,...,9 */
+#define ENETC4_PRSSKR(n) ((n) * 0x4 + 0x250)
+
+/* Port station interface MAC address filtering capability register */
+#define ENETC4_PSIMAFCAPR 0x280
+#define PSIMAFCAPR_NUM_MAC_AFTE GENMASK(11, 0)
+
+/* Port station interface VLAN filtering capability register */
+#define ENETC4_PSIVLANFCAPR 0x2c0
+#define PSIVLANFCAPR_NUM_VLAN_FTE GENMASK(11, 0)
+
+/* Port station interface VLAN filtering mode register */
+#define ENETC4_PSIVLANFMR 0x2c4
+#define PSIVLANFMR_VS BIT(0)
+
+/* Port Station interface a primary MAC address registers */
+#define ENETC4_PSIPMAR0(a) ((a) * 0x80 + 0x2000)
+#define ENETC4_PSIPMAR1(a) ((a) * 0x80 + 0x2004)
+
+/* Port station interface a configuration register 0/2 */
+#define ENETC4_PSICFGR0(a) ((a) * 0x80 + 0x2010)
+#define PSICFGR0_VASE BIT(13)
+#define PSICFGR0_ASE BIT(15)
+#define PSICFGR0_ANTI_SPOOFING (PSICFGR0_VASE | PSICFGR0_ASE)
+
+#define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018)
+#define PSICFGR2_NUM_MSIX GENMASK(5, 0)
+
+/* Port station interface a unicast MAC hash filter register 0/1 */
+#define ENETC4_PSIUMHFR0(a) ((a) * 0x80 + 0x2050)
+#define ENETC4_PSIUMHFR1(a) ((a) * 0x80 + 0x2054)
+
+/* Port station interface a multicast MAC hash filter register 0/1 */
+#define ENETC4_PSIMMHFR0(a) ((a) * 0x80 + 0x2058)
+#define ENETC4_PSIMMHFR1(a) ((a) * 0x80 + 0x205c)
+
+/* Port station interface a VLAN hash filter register 0/1 */
+#define ENETC4_PSIVHFR0(a) ((a) * 0x80 + 0x2060)
+#define ENETC4_PSIVHFR1(a) ((a) * 0x80 + 0x2064)
+
+#define ENETC4_PMCAPR 0x4004
+#define PMCAPR_HD BIT(8)
+#define PMCAPR_FP GENMASK(10, 9)
+
+/* Port capability register */
+#define ENETC4_PCAPR 0x4000
+#define PCAPR_LINK_TYPE BIT(4)
+
+/* Port configuration register */
+#define ENETC4_PCR 0x4010
+#define PCR_HDR_FMT BIT(0)
+#define PCR_L2DOSE BIT(4)
+#define PCR_TIMER_CS BIT(8)
+#define PCR_PSPEED GENMASK(29, 16)
+#define PCR_PSPEED_VAL(speed) (((speed) / 10 - 1) << 16)
+
+/* Port MAC address register 0/1 */
+#define ENETC4_PMAR0 0x4020
+#define ENETC4_PMAR1 0x4024
+
+/* Port operational register */
+#define ENETC4_POR 0x4100
+
+/* Port traffic class a transmit maximum SDU register */
+#define ENETC4_PTCTMSDUR(a) ((a) * 0x20 + 0x4208)
+#define PTCTMSDUR_MAXSDU GENMASK(15, 0)
+#define PTCTMSDUR_SDU_TYPE GENMASK(17, 16)
+#define SDU_TYPE_PPDU 0
+#define SDU_TYPE_MPDU 1
+#define SDU_TYPE_MSDU 2
+
+#define ENETC4_PMAC_OFFSET 0x400
+#define ENETC4_PM_CMD_CFG(mac) (0x5008 + (mac) * 0x400)
+#define PM_CMD_CFG_TX_EN BIT(0)
+#define PM_CMD_CFG_RX_EN BIT(1)
+#define PM_CMD_CFG_PAUSE_FWD BIT(7)
+#define PM_CMD_CFG_PAUSE_IGN BIT(8)
+#define PM_CMD_CFG_TX_ADDR_INS BIT(9)
+#define PM_CMD_CFG_LOOP_EN BIT(10)
+#define PM_CMD_CFG_LPBK_MODE GENMASK(12, 11)
+#define LPBCK_MODE_EXT_TX_CLK 0
+#define LPBCK_MODE_MAC_LEVEL 1
+#define LPBCK_MODE_INT_TX_CLK 2
+#define PM_CMD_CFG_CNT_FRM_EN BIT(13)
+#define PM_CMD_CFG_TXP BIT(15)
+#define PM_CMD_CFG_SEND_IDLE BIT(16)
+#define PM_CMD_CFG_HD_FCEN BIT(18)
+#define PM_CMD_CFG_SFD BIT(21)
+#define PM_CMD_CFG_TX_FLUSH BIT(22)
+#define PM_CMD_CFG_TX_LOWP_EN BIT(23)
+#define PM_CMD_CFG_RX_LOWP_EMPTY BIT(24)
+#define PM_CMD_CFG_SWR BIT(26)
+#define PM_CMD_CFG_TS_MODE BIT(30)
+#define PM_CMD_CFG_MG BIT(31)
+
+/* Port MAC 0/1 Maximum Frame Length Register */
+#define ENETC4_PM_MAXFRM(mac) (0x5014 + (mac) * 0x400)
+
+/* Port internal MDIO base address, use to access PCS */
+#define ENETC4_PM_IMDIO_BASE 0x5030
+
+/* Port MAC 0/1 Pause Quanta Register */
+#define ENETC4_PM_PAUSE_QUANTA(mac) (0x5054 + (mac) * 0x400)
+
+/* Port MAC 0/1 Pause Quanta Threshold Register */
+#define ENETC4_PM_PAUSE_THRESH(mac) (0x5064 + (mac) * 0x400)
+
+#define ENETC4_PM_SINGLE_STEP(mac) (0x50c0 + (mac) * 0x400)
+#define PM_SINGLE_STEP_CH BIT(6)
+#define PM_SINGLE_STEP_OFFSET GENMASK(15, 7)
+#define PM_SINGLE_STEP_OFFSET_SET(o) FIELD_PREP(PM_SINGLE_STEP_OFFSET, o)
+#define PM_SINGLE_STEP_EN BIT(31)
+
+/* Port MAC 0 Interface Mode Control Register */
+#define ENETC4_PM_IF_MODE(mac) (0x5300 + (mac) * 0x400)
+#define PM_IF_MODE_IFMODE GENMASK(2, 0)
+#define IFMODE_XGMII 0
+#define IFMODE_RMII 3
+#define IFMODE_RGMII 4
+#define IFMODE_SGMII 5
+#define PM_IF_MODE_REVMII BIT(3)
+#define PM_IF_MODE_M10 BIT(4)
+#define PM_IF_MODE_HD BIT(6)
+#define PM_IF_MODE_SSP GENMASK(14, 13)
+#define SSP_100M 0
+#define SSP_10M 1
+#define SSP_1G 2
+#define PM_IF_MODE_ENA BIT(15)
+
+/* Port external MDIO Base address, use to access off-chip PHY */
+#define ENETC4_EMDIO_BASE 0x5c00
+
+/**********************ENETC Pseudo MAC port registers************************/
+/* Port pseudo MAC receive octets counter (64-bit) */
+#define ENETC4_PPMROCR 0x5080
+
+/* Port pseudo MAC receive unicast frame counter register (64-bit) */
+#define ENETC4_PPMRUFCR 0x5088
+
+/* Port pseudo MAC receive multicast frame counter register (64-bit) */
+#define ENETC4_PPMRMFCR 0x5090
+
+/* Port pseudo MAC receive broadcast frame counter register (64-bit) */
+#define ENETC4_PPMRBFCR 0x5098
+
+/* Port pseudo MAC transmit octets counter (64-bit) */
+#define ENETC4_PPMTOCR 0x50c0
+
+/* Port pseudo MAC transmit unicast frame counter register (64-bit) */
+#define ENETC4_PPMTUFCR 0x50c8
+
+/* Port pseudo MAC transmit multicast frame counter register (64-bit) */
+#define ENETC4_PPMTMFCR 0x50d0
+
+/* Port pseudo MAC transmit broadcast frame counter register (64-bit) */
+#define ENETC4_PPMTBFCR 0x50d8
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
new file mode 100644
index 000000000000..498346dd996a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2024 NXP */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/unaligned.h>
+
+#include "enetc_pf_common.h"
+#include "enetc4_debugfs.h"
+
+#define ENETC_SI_MAX_RING_NUM 8
+
+#define ENETC_MAC_FILTER_TYPE_UC BIT(0)
+#define ENETC_MAC_FILTER_TYPE_MC BIT(1)
+#define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \
+ ENETC_MAC_FILTER_TYPE_MC)
+
+struct enetc_mac_addr {
+ u8 addr[ETH_ALEN];
+};
+
+static void enetc4_get_port_caps(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC4_ECAPR1);
+ pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24;
+ pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1;
+
+ val = enetc_port_rd(hw, ENETC4_ECAPR2);
+ pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16;
+ pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR;
+
+ val = enetc_port_rd(hw, ENETC4_PMCAPR);
+ pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
+
+ val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
+ pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
+}
+
+static void enetc4_get_psi_hw_features(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC4_PCAPR);
+ if (val & PCAPR_LINK_TYPE)
+ si->hw_features |= ENETC_SI_F_PPM;
+}
+
+static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
+ const u8 *addr)
+{
+ u16 lower = get_unaligned_le16(addr + 4);
+ u32 upper = get_unaligned_le32(addr);
+
+ if (si != 0) {
+ __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
+ __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si));
+ } else {
+ __raw_writel(upper, hw->port + ENETC4_PMAR0);
+ __raw_writew(lower, hw->port + ENETC4_PMAR1);
+ }
+}
+
+static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
+ u8 *addr)
+{
+ u32 upper;
+ u16 lower;
+
+ upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
+ lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si));
+
+ put_unaligned_le32(upper, addr);
+ put_unaligned_le16(lower, addr + 4);
+}
+
+static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
+ bool uc_promisc, bool mc_promisc)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+
+ if (uc_promisc)
+ val |= PSIPMMR_SI_MAC_UP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_UP(si);
+
+ if (mc_promisc)
+ val |= PSIPMMR_SI_MAC_MP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_MP(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPMMR, val);
+}
+
+static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
+ /* Default to select MAC level loopback mode if loopback is enabled. */
+ val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
+ PM_CMD_CFG_LPBK_MODE);
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_mfe; i++)
+ ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
+
+ pf->num_mfe = 0;
+}
+
+static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
+ struct enetc_mac_addr *mac,
+ int mac_cnt)
+{
+ struct maft_entry_data maft = {};
+ u16 si_bit = BIT(0);
+ int i, err;
+
+ maft.cfge.si_bitmap = cpu_to_le16(si_bit);
+ for (i = 0; i < mac_cnt; i++) {
+ ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
+ err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
+ if (unlikely(err)) {
+ pf->num_mfe = i;
+ goto clear_maft_entries;
+ }
+ }
+
+ pf->num_mfe = mac_cnt;
+
+ return 0;
+
+clear_maft_entries:
+ enetc4_pf_clear_maft_entries(pf);
+
+ return err;
+}
+
+static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
+{
+ int max_num_mfe = pf->caps.mac_filter_num;
+ struct enetc_mac_filter mac_filter = {};
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_mac_addr *mac_tbl;
+ struct netdev_hw_addr *ha;
+ int i = 0, err;
+ int mac_cnt;
+
+ netif_addr_lock_bh(ndev);
+
+ mac_cnt = netdev_uc_count(ndev);
+ if (!mac_cnt) {
+ netif_addr_unlock_bh(ndev);
+ /* clear both MAC hash and exact filters */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+
+ return 0;
+ }
+
+ if (mac_cnt > max_num_mfe) {
+ err = -ENOSPC;
+ goto unlock_netif_addr;
+ }
+
+ mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC);
+ if (!mac_tbl) {
+ err = -ENOMEM;
+ goto unlock_netif_addr;
+ }
+
+ netdev_for_each_uc_addr(ha, ndev) {
+ enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
+ ether_addr_copy(mac_tbl[i++].addr, ha->addr);
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Set temporary unicast hash filters in case of Rx loss when
+ * updating MAC address filter table
+ */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
+ enetc4_pf_clear_maft_entries(pf);
+
+ if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+
+ kfree(mac_tbl);
+
+ return 0;
+
+unlock_netif_addr:
+ netif_addr_unlock_bh(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
+{
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_mac_filter *mac_filter;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(ndev);
+ if (type & ENETC_MAC_FILTER_TYPE_UC) {
+ mac_filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_uc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_uc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC) {
+ mac_filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_mc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_mc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+ netif_addr_unlock_bh(ndev);
+}
+
+static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
+{
+ /* Currently, the MAC address filter table (MAFT) only has 4 entries,
+ * and multiple multicast addresses for filtering will be configured
+ * in the default network configuration, so MAFT is only suitable for
+ * unicast filtering. If the number of unicast addresses exceeds the
+ * table capacity, the MAC hash filter will be used.
+ */
+ if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
+ /* Fall back to the MAC hash filter */
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
+ /* Clear the old MAC exact filter */
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC)
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
+}
+
+static const struct enetc_pf_ops enetc4_pf_ops = {
+ .set_si_primary_mac = enetc4_pf_set_si_primary_mac,
+ .get_si_primary_mac = enetc4_pf_get_si_primary_mac,
+};
+
+static int enetc4_pf_struct_init(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ pf->si = si;
+ pf->total_vfs = pci_sriov_get_totalvfs(si->pdev);
+ pf->ops = &enetc4_pf_ops;
+
+ enetc4_get_port_caps(pf);
+ enetc4_get_psi_hw_features(si);
+
+ return 0;
+}
+
+static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr)
+{
+ u32 val;
+
+ val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr);
+ val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+ if (is_vf)
+ val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+ return val;
+}
+
+static void enetc4_default_rings_allocation(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 num_rx_bdr, num_tx_bdr, val;
+ u32 vf_tx_bdr, vf_rx_bdr;
+ int i, rx_rem, tx_rem;
+
+ if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
+ num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi;
+ else
+ num_rx_bdr = ENETC_SI_MAX_RING_NUM;
+
+ if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
+ num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi;
+ else
+ num_tx_bdr = ENETC_SI_MAX_RING_NUM;
+
+ val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr);
+ enetc_port_wr(hw, ENETC4_PSICFGR0(0), val);
+
+ num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr;
+ rx_rem = num_rx_bdr % pf->caps.num_vsi;
+ num_rx_bdr = num_rx_bdr / pf->caps.num_vsi;
+
+ num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr;
+ tx_rem = num_tx_bdr % pf->caps.num_vsi;
+ num_tx_bdr = num_tx_bdr / pf->caps.num_vsi;
+
+ for (i = 0; i < pf->caps.num_vsi; i++) {
+ vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr;
+ vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr;
+ val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr);
+ enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val);
+ }
+}
+
+static void enetc4_allocate_si_rings(struct enetc_pf *pf)
+{
+ enetc4_default_rings_allocation(pf);
+}
+
+static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR);
+
+ if (en)
+ val |= BIT(si);
+ else
+ val &= ~BIT(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPVMR, val);
+}
+
+static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int num_si = pf->caps.num_vsi + 1;
+ int i;
+
+ /* enforce VLAN promiscuous mode for all SIs */
+ for (i = 0; i < num_si; i++)
+ enetc4_pf_set_si_vlan_promisc(hw, i, true);
+}
+
+/* Allocate the number of MSI-X vectors for per SI. */
+static void enetc4_set_si_msix_num(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int i, num_msix, total_si;
+ u32 val;
+
+ total_si = pf->caps.num_vsi + 1;
+
+ num_msix = pf->caps.num_msix / total_si +
+ pf->caps.num_msix % total_si - 1;
+ val = num_msix & PSICFGR2_NUM_MSIX;
+ enetc_port_wr(hw, ENETC4_PSICFGR2(0), val);
+
+ num_msix = pf->caps.num_msix / total_si - 1;
+ val = num_msix & PSICFGR2_NUM_MSIX;
+ for (i = 0; i < pf->caps.num_vsi; i++)
+ enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val);
+}
+
+static void enetc4_enable_all_si(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int num_si = pf->caps.num_vsi + 1;
+ u32 si_bitmap = 0;
+ int i;
+
+ /* Master enable for all SIs */
+ for (i = 0; i < num_si; i++)
+ si_bitmap |= PMR_SI_EN(i);
+
+ enetc_port_wr(hw, ENETC4_PMR, si_bitmap);
+}
+
+static void enetc4_configure_port_si(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ enetc4_allocate_si_rings(pf);
+
+ /* Outer VLAN tag will be used for VLAN filtering */
+ enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS);
+
+ enetc4_set_default_si_vlan_promisc(pf);
+
+ /* Disable SI MAC multicast & unicast promiscuous */
+ enetc_port_wr(hw, ENETC4_PSIPMMR, 0);
+
+ enetc4_set_si_msix_num(pf);
+
+ enetc4_enable_all_si(pf);
+}
+
+static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw)
+{
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+ int tc;
+
+ val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE);
+
+ for (tc = 0; tc < ENETC_NUM_TC; tc++)
+ enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val);
+}
+
+static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+
+ enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0),
+ ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE));
+
+ enetc4_pf_reset_tc_msdu(&si->hw);
+}
+
+static void enetc4_enable_trx(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ /* Enable port transmit/receive */
+ enetc_port_wr(hw, ENETC4_POR, 0);
+}
+
+static void enetc4_configure_port(struct enetc_pf *pf)
+{
+ enetc4_configure_port_si(pf);
+ enetc4_set_trx_frame_size(pf);
+ enetc_set_default_rss_key(pf);
+ enetc4_enable_trx(pf);
+}
+
+static int enetc4_init_ntmp_user(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ /* For ENETC 4.1, all table versions are 0 */
+ memset(&user->tbl, 0, sizeof(user->tbl));
+
+ return enetc4_setup_cbdr(si);
+}
+
+static void enetc4_free_ntmp_user(struct enetc_si *si)
+{
+ enetc4_teardown_cbdr(si);
+}
+
+static int enetc4_pf_init(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ int err;
+
+ /* Initialize the MAC address for PF and VFs */
+ err = enetc_setup_mac_addresses(dev->of_node, pf);
+ if (err) {
+ dev_err(dev, "Failed to set MAC addresses\n");
+ return err;
+ }
+
+ err = enetc4_init_ntmp_user(pf->si);
+ if (err) {
+ dev_err(dev, "Failed to init CBDR\n");
+ return err;
+ }
+
+ enetc4_configure_port(pf);
+
+ return 0;
+}
+
+static void enetc4_pf_free(struct enetc_pf *pf)
+{
+ enetc4_free_ntmp_user(pf->si);
+}
+
+static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
+{
+ struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct net_device *ndev = si->ndev;
+ struct enetc_hw *hw = &si->hw;
+ bool uc_promisc = false;
+ bool mc_promisc = false;
+ int type = 0;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_PROMISC) {
+ uc_promisc = true;
+ mc_promisc = true;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ mc_promisc = true;
+ type = ENETC_MAC_FILTER_TYPE_UC;
+ } else {
+ type = ENETC_MAC_FILTER_TYPE_ALL;
+ }
+
+ enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
+
+ if (uc_promisc) {
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (mc_promisc)
+ enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
+
+ /* Set new MAC filter */
+ enetc4_pf_set_mac_filter(pf, type);
+
+ rtnl_unlock();
+}
+
+static void enetc4_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ queue_work(si->workqueue, &si->rx_mode_task);
+}
+
+static int enetc4_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+
+ enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static const struct net_device_ops enetc4_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc4_pf_set_rx_mode,
+ .ndo_set_features = enetc4_pf_set_features,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
+ .ndo_eth_ioctl = enetc_ioctl,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
+};
+
+static struct phylink_pcs *
+enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ return pf->pcs;
+}
+
+static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode,
+ phy_interface_t phy_mode)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev);
+ struct enetc_si *si = pf->si;
+ u32 val;
+
+ if (enetc_is_pseudo_mac(si))
+ return;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val |= IFMODE_RGMII;
+ /* We need to enable auto-negotiation for the MAC
+ * if its RGMII interface support In-Band status.
+ */
+ if (phylink_autoneg_inband(mode))
+ val |= PM_IF_MODE_ENA;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val |= IFMODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ val |= IFMODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ val |= IFMODE_XGMII;
+ break;
+ default:
+ dev_err(priv->dev,
+ "Unsupported PHY mode:%d\n", phy_mode);
+ return;
+ }
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc4_mac_config(pf, mode, state->interface);
+}
+
+static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed)
+{
+ u32 old_speed = priv->speed;
+ u32 val;
+
+ if (speed == old_speed)
+ return;
+
+ val = enetc_port_rd(&priv->si->hw, ENETC4_PCR);
+ val &= ~PCR_PSPEED;
+
+ switch (speed) {
+ case SPEED_100:
+ case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
+ val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed));
+ break;
+ case SPEED_10:
+ default:
+ val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10));
+ }
+
+ priv->speed = speed;
+ enetc_port_wr(&priv->si->hw, ENETC4_PCR, val);
+}
+
+static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII);
+
+ switch (speed) {
+ case SPEED_1000:
+ val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP);
+ break;
+ case SPEED_100:
+ val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP);
+ break;
+ case SPEED_10:
+ val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP);
+ }
+
+ val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
+ PM_IF_MODE_HD);
+
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP);
+
+ switch (speed) {
+ case SPEED_100:
+ val &= ~PM_IF_MODE_M10;
+ break;
+ case SPEED_10:
+ val |= PM_IF_MODE_M10;
+ }
+
+ val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
+ PM_IF_MODE_HD);
+
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ if (!pf->caps.half_duplex)
+ return;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN);
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN);
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause)
+{
+ u32 pause_off_thresh = 0, pause_on_thresh = 0;
+ u32 init_quanta = 0, refresh_quanta = 0;
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 rbmr, old_rbmr;
+ int i;
+
+ for (i = 0; i < num_rxbdr; i++) {
+ old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR);
+ rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM);
+ if (rbmr == old_rbmr)
+ continue;
+
+ enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr);
+ }
+
+ if (tx_pause) {
+ /* When the port first enters congestion, send a PAUSE request
+ * with the maximum number of quanta. When the port exits
+ * congestion, it will automatically send a PAUSE frame with
+ * zero quanta.
+ */
+ init_quanta = 0xffff;
+
+ /* Also, set up the refresh timer to send follow-up PAUSE
+ * frames at half the quanta value, in case the congestion
+ * condition persists.
+ */
+ refresh_quanta = 0xffff / 2;
+
+ /* Start emitting PAUSE frames when 3 large frames (or more
+ * smaller frames) have accumulated in the FIFO waiting to be
+ * DMAed to the RX ring.
+ */
+ pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
+ pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
+ }
+
+ enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta);
+ enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta);
+ enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh);
+ enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh);
+}
+
+static void enetc4_enable_mac(struct enetc_pf *pf, bool en)
+{
+ struct enetc_si *si = pf->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val &= ~(PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN);
+ val |= en ? (PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN) : 0;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pl_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
+ bool hd_fc = false;
+
+ priv = netdev_priv(si->ndev);
+ enetc4_set_port_speed(priv, speed);
+
+ if (!phylink_autoneg_inband(mode) &&
+ phy_interface_mode_is_rgmii(interface))
+ enetc4_set_rgmii_mac(pf, speed, duplex);
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ enetc4_set_rmii_mac(pf, speed, duplex);
+
+ if (duplex == DUPLEX_FULL) {
+ /* When preemption is enabled, generation of PAUSE frames
+ * must be disabled, as stated in the IEEE 802.3 standard.
+ */
+ if (priv->active_offloads & ENETC_F_QBU)
+ tx_pause = false;
+ } else { /* DUPLEX_HALF */
+ if (tx_pause || rx_pause)
+ hd_fc = true;
+
+ /* As per 802.3 annex 31B, PAUSE frames are only supported
+ * when the link is configured for full duplex operation.
+ */
+ tx_pause = false;
+ rx_pause = false;
+ }
+
+ enetc4_set_hd_flow_control(pf, hd_fc);
+ enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause);
+ enetc4_set_rx_pause(pf, rx_pause);
+ enetc4_enable_mac(pf, true);
+}
+
+static void enetc4_pl_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc4_enable_mac(pf, false);
+}
+
+static const struct phylink_mac_ops enetc_pl_mac_ops = {
+ .mac_select_pcs = enetc4_pl_mac_select_pcs,
+ .mac_config = enetc4_pl_mac_config,
+ .mac_link_up = enetc4_pl_mac_link_up,
+ .mac_link_down = enetc4_pl_mac_link_down,
+};
+
+static void enetc4_pci_remove(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ enetc_pci_remove(pdev);
+}
+
+static int enetc4_link_init(struct enetc_ndev_priv *priv,
+ struct device_node *node)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct device *dev = priv->dev;
+ int err;
+
+ err = of_get_phy_mode(node, &pf->if_mode);
+ if (err) {
+ dev_err(dev, "Failed to get PHY mode\n");
+ return err;
+ }
+
+ err = enetc_mdiobus_create(pf, node);
+ if (err) {
+ dev_err(dev, "Failed to create MDIO bus\n");
+ return err;
+ }
+
+ err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
+ if (err) {
+ dev_err(dev, "Failed to create phylink\n");
+ goto err_phylink_create;
+ }
+
+ return 0;
+
+err_phylink_create:
+ enetc_mdiobus_destroy(pf);
+
+ return err;
+}
+
+static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+}
+
+static int enetc4_psi_wq_task_init(struct enetc_si *si)
+{
+ char wq_name[24];
+
+ INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
+ snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
+ si->workqueue = create_singlethread_workqueue(wq_name);
+ if (!si->workqueue)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int enetc4_pf_netdev_create(struct enetc_si *si)
+{
+ struct device *dev = &si->pdev->dev;
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv),
+ si->num_tx_rings, si->num_rx_rings);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ref_clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(priv->ref_clk)) {
+ dev_err(dev, "Get reference clock failed\n");
+ err = PTR_ERR(priv->ref_clk);
+ goto err_clk_get;
+ }
+
+ enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_configure_si(priv);
+ if (err) {
+ dev_err(dev, "Failed to configure SI\n");
+ goto err_config_si;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(dev, "Failed to alloc MSI-X\n");
+ goto err_alloc_msix;
+ }
+
+ err = enetc4_link_init(priv, dev->of_node);
+ if (err)
+ goto err_link_init;
+
+ err = enetc4_psi_wq_task_init(si);
+ if (err) {
+ dev_err(dev, "Failed to init workqueue\n");
+ goto err_wq_init;
+ }
+
+ err = register_netdev(ndev);
+ if (err) {
+ dev_err(dev, "Failed to register netdev\n");
+ goto err_reg_netdev;
+ }
+
+ return 0;
+
+err_reg_netdev:
+ destroy_workqueue(si->workqueue);
+err_wq_init:
+ enetc4_link_deinit(priv);
+err_link_init:
+ enetc_free_msix(priv);
+err_alloc_msix:
+err_config_si:
+err_clk_get:
+ free_netdev(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_netdev_destroy(struct enetc_si *si)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
+ struct net_device *ndev = si->ndev;
+
+ unregister_netdev(ndev);
+ cancel_work(&si->rx_mode_task);
+ destroy_workqueue(si->workqueue);
+ enetc4_link_deinit(priv);
+ enetc_free_msix(priv);
+ free_netdev(ndev);
+}
+
+static const struct enetc_si_ops enetc4_psi_ops = {
+ .get_rss_table = enetc4_get_rss_table,
+ .set_rss_table = enetc4_set_rss_table,
+};
+
+static int enetc4_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+ if (err)
+ return dev_err_probe(dev, err, "PCIe probing failed\n");
+
+ err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
+ if (err)
+ return err;
+
+ /* si is the private data. */
+ si = pci_get_drvdata(pdev);
+ if (!si->hw.port || !si->hw.global)
+ return dev_err_probe(dev, -ENODEV,
+ "Couldn't map PF only space\n");
+
+ si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc4_psi_ops;
+ err = enetc_get_driver_data(si);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Could not get PF driver data\n");
+
+ err = enetc4_pf_struct_init(si);
+ if (err)
+ return err;
+
+ pf = enetc_si_priv(si);
+ err = enetc4_pf_init(pf);
+ if (err)
+ return err;
+
+ enetc_get_si_caps(si);
+
+ err = enetc4_pf_netdev_create(si);
+ if (err)
+ goto err_netdev_create;
+
+ enetc_create_debugfs(si);
+
+ return 0;
+
+err_netdev_create:
+ enetc4_pf_free(pf);
+
+ return err;
+}
+
+static void enetc4_pf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ enetc_remove_debugfs(si);
+ enetc4_pf_netdev_destroy(si);
+ enetc4_pf_free(pf);
+}
+
+static const struct pci_device_id enetc4_pf_id_table[] = {
+ { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) },
+ { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PPM_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table);
+
+static struct pci_driver enetc4_pf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc4_pf_id_table,
+ .probe = enetc4_pf_probe,
+ .remove = enetc4_pf_remove,
+};
+module_pci_driver(enetc4_pf_driver);
+
+MODULE_DESCRIPTION("ENETC4 PF Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 20bfdf7fb4b4..3d5f31879d5c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -60,6 +60,44 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
}
EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct netc_cbdr_regs regs;
+
+ user->cbdr_num = 1;
+ user->dev = dev;
+ user->ring = devm_kcalloc(dev, user->cbdr_num,
+ sizeof(struct netc_cbdr), GFP_KERNEL);
+ if (!user->ring)
+ return -ENOMEM;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ regs.pir = hw->reg + ENETC_SICBDRPIR;
+ regs.cir = hw->reg + ENETC_SICBDRCIR;
+ regs.mr = hw->reg + ENETC_SICBDRMR;
+ regs.bar0 = hw->reg + ENETC_SICBDRBAR0;
+ regs.bar1 = hw->reg + ENETC_SICBDRBAR1;
+ regs.lenr = hw->reg + ENETC_SICBDRLENR;
+
+ return ntmp_init_cbdr(user->ring, dev, &regs);
+}
+EXPORT_SYMBOL_GPL(enetc4_setup_cbdr);
+
+void enetc4_teardown_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ ntmp_free_cbdr(user->ring);
+ user->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr);
+
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
struct enetc_cbd *dest_cbd;
@@ -256,3 +294,15 @@ int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_table);
+
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return ntmp_rsst_query_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_get_rss_table);
+
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return ntmp_rsst_update_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index f7753ea5b57e..fed89d4f1e1d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -4,6 +4,9 @@
#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/ptp_clock_kernel.h>
+
#include "enetc.h"
static const u32 enetc_si_regs[] = {
@@ -141,8 +144,8 @@ static const struct {
static const struct {
int reg;
- char name[ETH_GSTRING_LEN];
-} enetc_port_counters[] = {
+ char name[ETH_GSTRING_LEN] __nonstring;
+} enetc_pm_counters[] = {
{ ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
{ ENETC_PM_RALN(0), "MAC rx alignment errors" },
{ ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
@@ -194,6 +197,12 @@ static const struct {
{ ENETC_PM_TSCOL(0), "MAC tx single collisions" },
{ ENETC_PM_TLCOL(0), "MAC tx late collisions" },
{ ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+};
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN] __nonstring;
+} enetc_port_counters[] = {
{ ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
{ ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
{ ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
@@ -240,6 +249,7 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset)
return len;
len += ARRAY_SIZE(enetc_port_counters);
+ len += ARRAY_SIZE(enetc_pm_counters);
return len;
}
@@ -247,38 +257,28 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset)
static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- u8 *p = data;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
- strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < priv->num_tx_rings; i++) {
- for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) {
- snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
- i);
- p += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < priv->num_rx_rings; i++) {
- for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) {
- snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
- i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
+ ethtool_puts(&data, enetc_si_counters[i].name);
+ for (i = 0; i < priv->num_tx_rings; i++)
+ for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++)
+ ethtool_sprintf(&data, tx_ring_stats[j], i);
+ for (i = 0; i < priv->num_rx_rings; i++)
+ for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++)
+ ethtool_sprintf(&data, rx_ring_stats[j], i);
if (!enetc_si_is_pf(priv->si))
break;
- for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
- strscpy(p, enetc_port_counters[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
+ ethtool_cpy(&data, enetc_port_counters[i].name);
+
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ ethtool_cpy(&data, enetc_pm_counters[i].name);
+
break;
}
}
@@ -315,13 +315,16 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ data[o++] = enetc_port_rd64(hw, enetc_pm_counters[i].reg);
}
static void enetc_pause_stats(struct enetc_hw *hw, int mac,
struct ethtool_pause_stats *pause_stats)
{
- pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac));
- pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac));
+ pause_stats->tx_pause_frames = enetc_port_rd64(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd64(hw, ENETC_PM_RXPF(mac));
}
static void enetc_get_pause_stats(struct net_device *ndev,
@@ -348,31 +351,31 @@ static void enetc_get_pause_stats(struct net_device *ndev,
static void enetc_mac_stats(struct enetc_hw *hw, int mac,
struct ethtool_eth_mac_stats *s)
{
- s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
- s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
- s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
- s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
- s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
- s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
- s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
- s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
- s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
- s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
- s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
- s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
- s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
- s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
- s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
- s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
- s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
- s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+ s->FramesTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd64(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd64(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd64(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd64(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd64(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd64(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd64(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd64(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RBCA(mac));
}
static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
struct ethtool_eth_ctrl_stats *s)
{
- s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
- s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+ s->MACControlFramesTransmitted = enetc_port_rd64(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd64(hw, ENETC_PM_RCNP(mac));
}
static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
@@ -389,26 +392,26 @@ static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
struct ethtool_rmon_stats *s)
{
- s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
- s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
- s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
- s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
-
- s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
- s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
- s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
- s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
- s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
- s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
- s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
-
- s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
- s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
- s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
- s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
- s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
- s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
- s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+ s->undersize_pkts = enetc_port_rd64(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd64(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd64(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd64(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd64(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd64(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd64(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd64(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd64(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd64(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd64(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd64(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd64(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd64(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd64(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd64(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd64(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd64(hw, ENETC_PM_T1523X(mac));
}
static void enetc_get_eth_mac_stats(struct net_device *ndev,
@@ -432,6 +435,48 @@ static void enetc_get_eth_mac_stats(struct net_device *ndev,
}
}
+static void enetc_ppm_mac_stats(struct enetc_si *si,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct enetc_hw *hw = &si->hw;
+ u64 rufcr, rmfcr, rbfcr;
+ u64 tufcr, tmfcr, tbfcr;
+
+ rufcr = enetc_port_rd64(hw, ENETC4_PPMRUFCR);
+ rmfcr = enetc_port_rd64(hw, ENETC4_PPMRMFCR);
+ rbfcr = enetc_port_rd64(hw, ENETC4_PPMRBFCR);
+
+ tufcr = enetc_port_rd64(hw, ENETC4_PPMTUFCR);
+ tmfcr = enetc_port_rd64(hw, ENETC4_PPMTMFCR);
+ tbfcr = enetc_port_rd64(hw, ENETC4_PPMTBFCR);
+
+ s->FramesTransmittedOK = tufcr + tmfcr + tbfcr;
+ s->FramesReceivedOK = rufcr + rmfcr + rbfcr;
+ s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC4_PPMTOCR);
+ s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC4_PPMROCR);
+ s->MulticastFramesXmittedOK = tmfcr;
+ s->BroadcastFramesXmittedOK = tbfcr;
+ s->MulticastFramesReceivedOK = rmfcr;
+ s->BroadcastFramesReceivedOK = rbfcr;
+}
+
+static void enetc_ppm_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_ppm_mac_stats(priv->si, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_mac_stats(ndev, mac_stats);
+ break;
+ }
+}
+
static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
@@ -480,7 +525,8 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
-static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+static int enetc_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *rxnfc)
{
static const u32 rsshash[] = {
[TCP_V4_FLOW] = ENETC_RSSHASH_L4,
@@ -587,6 +633,13 @@ done:
return enetc_set_fs_entry(si, &rfse, fs->location);
}
+static u32 enetc_get_rx_ring_count(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ return priv->num_rx_rings;
+}
+
static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
u32 *rule_locs)
{
@@ -594,12 +647,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
int i, j;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = priv->num_rx_rings;
- break;
- case ETHTOOL_GRXFH:
- /* get RSS hash config */
- return enetc_get_rsshash(rxnfc);
case ETHTOOL_GRXCLSRLCNT:
/* total number of entries */
rxnfc->data = priv->si->num_fs_entries;
@@ -690,36 +737,53 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
+static int enetc_get_rss_key_base(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return ENETC_PRSSK(0);
+
+ return ENETC4_PRSSKR(0);
+}
+
+static void enetc_get_rss_key(struct enetc_si *si, const u8 *key)
+{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, base + i * 4);
+}
+
static int enetc_get_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- int err = 0, i;
+ struct enetc_si *si = priv->si;
+ int err = 0;
/* return hash function */
rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
- if (rxfh->key && hw->port)
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- ((u32 *)rxfh->key)[i] = enetc_port_rd(hw,
- ENETC_PRSSK(i));
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_get_rss_key(si, rxfh->key);
/* return RSS table */
if (rxfh->indir)
- err = enetc_get_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->get_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes)
{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
int i;
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+ enetc_port_wr(hw, base + i * 4, ((u32 *)bytes)[i]);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_key);
@@ -728,17 +792,16 @@ static int enetc_set_rxfh(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
int err = 0;
/* set hash key, if PF */
- if (rxfh->key && hw->port)
- enetc_set_rss_key(hw, rxfh->key);
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_set_rss_key(si, rxfh->key);
/* set RSS table */
if (rxfh->indir)
- err = enetc_set_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->set_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
@@ -775,9 +838,10 @@ static int enetc_get_coalesce(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_int_vector *v = priv->int_vector[0];
+ u64 clk_freq = priv->sysclk_freq;
- ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt);
- ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt);
+ ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt, clk_freq);
+ ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt, clk_freq);
ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR;
ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR;
@@ -793,12 +857,13 @@ static int enetc_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u64 clk_freq = priv->sysclk_freq;
u32 rx_ictt, tx_ictt;
int i, ic_mode;
bool changed;
- tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs);
- rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs);
+ tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs, clk_freq);
+ rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs, clk_freq);
if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR)
return -EOPNOTSUPP;
@@ -840,37 +905,97 @@ static int enetc_set_coalesce(struct net_device *ndev,
return 0;
}
-static int enetc_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+static int enetc_get_phc_index_by_pdev(struct enetc_si *si)
{
- int *phc_idx;
-
- phc_idx = symbol_get(enetc_phc_index);
- if (phc_idx) {
- info->phc_index = *phc_idx;
- symbol_put(enetc_phc_index);
- } else {
- info->phc_index = -1;
+ struct pci_bus *bus = si->pdev->bus;
+ struct pci_dev *timer_pdev;
+ unsigned int devfn;
+ int phc_index;
+
+ switch (si->revision) {
+ case ENETC_REV_1_0:
+ devfn = PCI_DEVFN(0, 4);
+ break;
+ case ENETC_REV_4_1:
+ devfn = PCI_DEVFN(24, 0);
+ break;
+ case ENETC_REV_4_3:
+ devfn = PCI_DEVFN(0, 1);
+ break;
+ default:
+ return -1;
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ timer_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(bus),
+ bus->number, devfn);
+ if (!timer_pdev)
+ return -1;
+
+ phc_index = ptp_clock_index_by_dev(&timer_pdev->dev);
+ pci_dev_put(timer_pdev);
+
+ return phc_index;
+}
+
+static int enetc_get_phc_index(struct enetc_si *si)
+{
+ struct device_node *np = si->pdev->dev.of_node;
+ struct device_node *timer_np;
+ int phc_index;
+
+ if (!np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ timer_np = of_parse_phandle(np, "ptp-timer", 0);
+ if (!timer_np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ phc_index = ptp_clock_index_by_of_node(timer_np);
+ of_node_put(timer_np);
+
+ return phc_index;
+}
+
+static void enetc_get_ts_generic_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON) |
- (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ (1 << HWTSTAMP_TX_ON);
+
+ if (enetc_si_is_pf(priv->si))
+ info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
-#else
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-#endif
+}
+
+static int enetc_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ if (!enetc_ptp_clock_is_enabled(si))
+ goto timestamp_tx_sw;
+
+ info->phc_index = enetc_get_phc_index(si);
+ if (info->phc_index < 0)
+ goto timestamp_tx_sw;
+
+ enetc_get_ts_generic_info(ndev, info);
+
+ return 0;
+
+timestamp_tx_sw:
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
return 0;
}
@@ -1180,7 +1305,7 @@ void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
}
EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
-static const struct ethtool_ops enetc_pf_ethtool_ops = {
+const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -1193,12 +1318,14 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_rmon_stats = enetc_get_rmon_stats,
.get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
.get_eth_mac_stats = enetc_get_eth_mac_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -1215,7 +1342,26 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_mm_stats = enetc_get_mm_stats,
};
-static const struct ethtool_ops enetc_vf_ethtool_ops = {
+const struct ethtool_ops enetc4_ppm_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_eth_mac_stats = enetc_ppm_get_eth_mac_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+};
+
+const struct ethtool_ops enetc_vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -1224,15 +1370,40 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = enetc_get_ts_info,
+};
+
+const struct ethtool_ops enetc4_pf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
.get_link = ethtool_op_get_link,
+ .get_wol = enetc_get_wol,
+ .set_wol = enetc_set_wol,
+ .get_pauseparam = enetc_get_pauseparam,
+ .set_pauseparam = enetc_set_pauseparam,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ts_info = enetc_get_ts_info,
};
@@ -1240,9 +1411,6 @@ void enetc_set_ethtool_ops(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (enetc_si_is_pf(priv->si))
- ndev->ethtool_ops = &enetc_pf_ethtool_ops;
- else
- ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+ ndev->ethtool_ops = priv->si->drvdata->eth_ops;
}
EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 1619943fb263..7b882b8921fe 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -6,6 +6,8 @@
#define ENETC_MM_VERIFY_SLEEP_US USEC_PER_MSEC
#define ENETC_MM_VERIFY_RETRIES 3
+#define ENETC_NUM_TC 8
+
/* ENETC device IDs */
#define ENETC_DEV_ID_PF 0xe100
#define ENETC_DEV_ID_VF 0xef00
@@ -21,10 +23,9 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
-#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
-#define ENETC_SIPCAPR0_QBV BIT(4)
-#define ENETC_SIPCAPR0_QBU BIT(3)
+#define ENETC_SIPCAPR0_RFS BIT(2)
+#define ENETC_SIPCAPR0_LSO BIT(1)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -42,6 +43,9 @@
#define ENETC_SIPMAR0 0x80
#define ENETC_SIPMAR1 0x84
+#define ENETC_SICVLANR1 0x90
+#define ENETC_SICVLANR2 0x94
+#define SICVLANR_ETYPE GENMASK(15, 0)
/* VF-PF Message passing */
#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
@@ -191,6 +195,9 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PCAPR0 0x0900
#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
+#define ENETC_PCAPR0_PSFP BIT(9)
+#define ENETC_PCAPR0_QBV BIT(4)
+#define ENETC_PCAPR0_QBU BIT(3)
#define ENETC_PCAPR1 0x0904
#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
@@ -368,6 +375,11 @@ enum enetc_bdr_type {TX, RX};
/** Global regs, offset: 2_0000h */
#define ENETC_GLOBAL_BASE 0x20000
#define ENETC_G_EIPBRR0 0x0bf8
+#define EIPBRR0_REVISION GENMASK(15, 0)
+#define ENETC_REV_1_0 0x0100
+#define ENETC_REV_4_1 0X0401
+#define ENETC_REV_4_3 0x0403
+
#define ENETC_G_EIPBRR1 0x0bfc
#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
#define ENETC_G_EPFBLPR1_XGMII 0x80000000
@@ -396,18 +408,22 @@ struct enetc_hw {
*/
extern rwlock_t enetc_mdio_lock;
+DECLARE_STATIC_KEY_FALSE(enetc_has_err050089);
+
/* use this locking primitive only on the fast datapath to
* group together multiple non-MDIO register accesses to
* minimize the overhead of the lock
*/
static inline void enetc_lock_mdio(void)
{
- read_lock(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ read_lock(&enetc_mdio_lock);
}
static inline void enetc_unlock_mdio(void)
{
- read_unlock(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ read_unlock(&enetc_mdio_lock);
}
/* use these accessors only on the fast datapath under
@@ -416,14 +432,16 @@ static inline void enetc_unlock_mdio(void)
*/
static inline u32 enetc_rd_reg_hot(void __iomem *reg)
{
- lockdep_assert_held(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ lockdep_assert_held(&enetc_mdio_lock);
return ioread32(reg);
}
static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
{
- lockdep_assert_held(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ lockdep_assert_held(&enetc_mdio_lock);
iowrite32(val, reg);
}
@@ -452,9 +470,13 @@ static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
unsigned long flags;
u32 val;
- write_lock_irqsave(&enetc_mdio_lock, flags);
- val = ioread32(reg);
- write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ if (static_branch_unlikely(&enetc_has_err050089)) {
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ val = ioread32(reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ } else {
+ val = ioread32(reg);
+ }
return val;
}
@@ -463,9 +485,13 @@ static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
{
unsigned long flags;
- write_lock_irqsave(&enetc_mdio_lock, flags);
- iowrite32(val, reg);
- write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ if (static_branch_unlikely(&enetc_has_err050089)) {
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ iowrite32(val, reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ } else {
+ iowrite32(val, reg);
+ }
}
#ifdef ioread64
@@ -485,7 +511,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg)
tmp = ioread32(reg + 4);
} while (high != tmp);
- return le64_to_cpu((__le64)high << 32 | low);
+ return (u64)high << 32 | low;
}
#endif
@@ -511,6 +537,7 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd64(hw, off) _enetc_rd_reg64_wa((hw)->port + (off))
#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
(hw)->port + (off), val)
@@ -533,11 +560,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
union enetc_tx_bd {
struct {
__le64 addr;
- __le16 buf_len;
+ union {
+ __le16 buf_len;
+ __le16 hdr_len; /* For LSO, ENETC 4.1 and later */
+ };
__le16 frm_len;
union {
struct {
- u8 reserved[3];
+ u8 l3_aux0;
+#define ENETC_TX_BD_L3_START GENMASK(6, 0)
+#define ENETC_TX_BD_IPCS BIT(7)
+ u8 l3_aux1;
+#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0)
+#define ENETC_TX_BD_L3T BIT(7)
+ u8 l4_aux;
+#define ENETC_TX_BD_L4T GENMASK(7, 5)
+#define ENETC_TXBD_L4T_UDP 1
+#define ENETC_TXBD_L4T_TCP 2
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -548,23 +587,27 @@ union enetc_tx_bd {
__le32 tstamp;
__le16 tpid;
__le16 vid;
- u8 reserved[6];
+ __le16 lso_sg_size; /* For ENETC 4.1 and later */
+ __le16 frm_len_ext; /* For ENETC 4.1 and later */
+ u8 reserved[2];
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
struct {
__le32 tstamp;
- u8 reserved[10];
+ u8 reserved[8];
+ __le16 lso_err_count; /* For ENETC 4.1 and later */
u8 status;
u8 flags;
} wb; /* writeback descriptor */
};
enum enetc_txbd_flags {
- ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_W = BIT(2),
- ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TXSTART = BIT(4),
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
@@ -572,6 +615,7 @@ enum enetc_txbd_flags {
#define ENETC_TXBD_STATS_WIN BIT(7)
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+#define ENETC_TXBD_TSTAMP GENMASK(29, 0)
static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
{
@@ -633,6 +677,8 @@ union enetc_rx_bd {
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
+#define ENETC_TPID_8021Q 0
+
struct enetc_cmd_rfse {
u8 smac_h[6];
u8 smac_m[6];
@@ -957,15 +1003,17 @@ struct enetc_cbd {
u8 status_flags;
};
-#define ENETC_CLK 400000000ULL
-static inline u32 enetc_cycles_to_usecs(u32 cycles)
+#define ENETC_CLK_400M 400000000ULL
+#define ENETC_CLK_333M 333000000ULL
+
+static inline u32 enetc_cycles_to_usecs(u32 cycles, u64 clk_freq)
{
- return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK);
+ return (u32)div_u64(cycles * 1000000ULL, clk_freq);
}
-static inline u32 enetc_usecs_to_cycles(u32 usecs)
+static inline u32 enetc_usecs_to_cycles(u32 usecs, u64 clk_freq)
{
- return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL);
+ return (u32)div_u64(usecs * clk_freq, 1000000ULL);
}
/* Port traffic class frame preemption register */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index a1b595bd7993..e108cac8288d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -4,11 +4,35 @@
#include <linux/of_mdio.h>
#include "enetc_pf.h"
+#define NETC_EMDIO_VEN_ID 0x1131
+#define NETC_EMDIO_DEV_ID 0xee00
#define ENETC_MDIO_DEV_ID 0xee01
#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+DEFINE_STATIC_KEY_FALSE(enetc_has_err050089);
+EXPORT_SYMBOL_GPL(enetc_has_err050089);
+
+static void enetc_emdio_enable_err050089(struct pci_dev *pdev)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_FREESCALE &&
+ pdev->device == ENETC_MDIO_DEV_ID) {
+ static_branch_inc(&enetc_has_err050089);
+ dev_info(&pdev->dev, "Enabled ERR050089 workaround\n");
+ }
+}
+
+static void enetc_emdio_disable_err050089(struct pci_dev *pdev)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_FREESCALE &&
+ pdev->device == ENETC_MDIO_DEV_ID) {
+ static_branch_dec(&enetc_has_err050089);
+ if (!static_key_enabled(&enetc_has_err050089.key))
+ dev_info(&pdev->dev, "Disabled ERR050089 workaround\n");
+ }
+}
+
static int enetc_pci_mdio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -62,6 +86,8 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
goto err_pci_mem_reg;
}
+ enetc_emdio_enable_err050089(pdev);
+
err = of_mdiobus_register(bus, dev->of_node);
if (err)
goto err_mdiobus_reg;
@@ -71,6 +97,7 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
+ enetc_emdio_disable_err050089(pdev);
pci_release_region(pdev, 0);
err_pci_mem_reg:
pci_disable_device(pdev);
@@ -88,6 +115,9 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
struct enetc_mdio_priv *mdio_priv;
mdiobus_unregister(bus);
+
+ enetc_emdio_disable_err050089(pdev);
+
mdio_priv = bus->priv;
iounmap(mdio_priv->hw->port);
pci_release_region(pdev, 0);
@@ -96,6 +126,7 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
static const struct pci_device_id enetc_pci_mdio_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { PCI_DEVICE(NETC_EMDIO_VEN_ID, NETC_EMDIO_DEV_ID) },
{ 0, } /* End of table. */
};
MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 11b14555802c..de0fb272c847 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1,16 +1,14 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
-#include <asm/unaligned.h>
-#include <linux/mdio.h>
+#include <linux/unaligned.h>
#include <linux/module.h>
-#include <linux/fsl/enetc_mdio.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pcs-lynx.h>
#include "enetc_ierb.h"
-#include "enetc_pf.h"
+#include "enetc_pf_common.h"
#define ENETC_DRV_NAME_STR "ENETC PF driver"
@@ -33,18 +31,15 @@ static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
}
-static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+static struct phylink_pcs *enetc_pf_create_pcs(struct enetc_pf *pf,
+ struct mii_bus *bus)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct sockaddr *saddr = addr;
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(ndev, saddr->sa_data);
- enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+ return lynx_pcs_create_mdiodev(bus, 0);
+}
- return 0;
+static void enetc_pf_destroy_pcs(struct phylink_pcs *pcs)
+{
+ lynx_pcs_destroy(pcs);
}
static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
@@ -77,30 +72,6 @@ static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
}
-static int enetc_mac_addr_hash_idx(const u8 *addr)
-{
- u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
- u64 mask = 0;
- int res = 0;
- int i;
-
- for (i = 0; i < 8; i++)
- mask |= BIT_ULL(i * 6);
-
- for (i = 0; i < 6; i++)
- res |= (hweight64(fold & (mask << i)) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
-{
- filter->mac_addr_cnt = 0;
-
- bitmap_zero(filter->mac_hash_table,
- ENETC_MADDR_HASH_TBL_SZ);
-}
-
static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
const unsigned char *addr)
{
@@ -109,16 +80,6 @@ static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
filter->mac_addr_cnt++;
}
-static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
- const unsigned char *addr)
-{
- int idx = enetc_mac_addr_hash_idx(addr);
-
- /* add hash table entry */
- __set_bit(idx, filter->mac_hash_table);
- filter->mac_addr_cnt++;
-}
-
static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
@@ -255,67 +216,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
}
-static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
- unsigned long hash)
-{
- enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
- enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
-}
-
-static int enetc_vid_hash_idx(unsigned int vid)
-{
- int res = 0;
- int i;
-
- for (i = 0; i < 6; i++)
- res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
-{
- int i;
-
- if (rehash) {
- bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
- int hidx = enetc_vid_hash_idx(i);
-
- __set_bit(hidx, pf->vlan_ht_filter);
- }
- }
-
- enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
-}
-
-static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- int idx;
-
- __set_bit(vid, pf->active_vlans);
-
- idx = enetc_vid_hash_idx(vid);
- if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
- enetc_sync_vlan_ht_filter(pf, false);
-
- return 0;
-}
-
-static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
-
- __clear_bit(vid, pf->active_vlans);
- enetc_sync_vlan_ht_filter(pf, true);
-
- return 0;
-}
-
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -393,56 +293,6 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
return 0;
}
-static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
- int si)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_hw *hw = &pf->si->hw;
- u8 mac_addr[ETH_ALEN] = { 0 };
- int err;
-
- /* (1) try to get the MAC address from the device tree */
- if (np) {
- err = of_get_mac_address(np, mac_addr);
- if (err == -EPROBE_DEFER)
- return err;
- }
-
- /* (2) bootloader supplied MAC address */
- if (is_zero_ether_addr(mac_addr))
- enetc_pf_get_primary_mac_addr(hw, si, mac_addr);
-
- /* (3) choose a random one */
- if (is_zero_ether_addr(mac_addr)) {
- eth_random_addr(mac_addr);
- dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
- si, mac_addr);
- }
-
- enetc_pf_set_primary_mac_addr(hw, si, mac_addr);
-
- return 0;
-}
-
-static int enetc_setup_mac_addresses(struct device_node *np,
- struct enetc_pf *pf)
-{
- int err, i;
-
- /* The PF might take its MAC from the device tree */
- err = enetc_setup_mac_address(np, pf, 0);
- if (err)
- return err;
-
- for (i = 0; i < pf->total_vfs; i++) {
- err = enetc_setup_mac_address(NULL, pf, i + 1);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static void enetc_port_assign_rfs_entries(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
@@ -464,6 +314,23 @@ static void enetc_port_assign_rfs_entries(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
}
+static void enetc_port_get_caps(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PCAPR0);
+
+ if (val & ENETC_PCAPR0_QBV)
+ si->hw_features |= ENETC_SI_F_QBV;
+
+ if (val & ENETC_PCAPR0_QBU)
+ si->hw_features |= ENETC_SI_F_QBU;
+
+ if (val & ENETC_PCAPR0_PSFP)
+ si->hw_features |= ENETC_SI_F_PSFP;
+}
+
static void enetc_port_si_configure(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
@@ -471,6 +338,8 @@ static void enetc_port_si_configure(struct enetc_si *si)
int num_rings, i;
u32 val;
+ enetc_port_get_caps(si);
+
val = enetc_port_rd(hw, ENETC_PCAPR0);
num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
@@ -585,7 +454,6 @@ static void enetc_mac_enable(struct enetc_si *si, bool en)
static void enetc_configure_port(struct enetc_pf *pf)
{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
enetc_configure_port_mac(pf->si);
@@ -593,8 +461,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
enetc_port_si_configure(pf->si);
/* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc_set_rss_key(hw, hash_key);
+ enetc_set_default_rss_key(pf);
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
@@ -665,19 +532,11 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
enetc_msg_psi_free(pf);
- kfree(pf->vf_state);
pf->num_vfs = 0;
pci_disable_sriov(pdev);
} else {
pf->num_vfs = num_vfs;
- pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
- GFP_KERNEL);
- if (!pf->vf_state) {
- pf->num_vfs = 0;
- return -ENOMEM;
- }
-
err = enetc_msg_psi_init(pf);
if (err) {
dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
@@ -696,7 +555,6 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
err_en_sriov:
enetc_msg_psi_free(pf);
err_msg_psi:
- kfree(pf->vf_state);
pf->num_vfs = 0;
return err;
@@ -773,189 +631,10 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
-static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
- const struct net_device_ops *ndev_ops)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
-
- SET_NETDEV_DEV(ndev, &si->pdev->dev);
- priv->ndev = ndev;
- priv->si = si;
- priv->dev = &si->pdev->dev;
- si->ndev = ndev;
-
- priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
- ndev->netdev_ops = ndev_ops;
- enetc_set_ethtool_ops(ndev);
- ndev->watchdog_timeo = 5 * HZ;
- ndev->max_mtu = ENETC_MAX_MTU;
-
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
- ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
-
- if (si->num_rss)
- ndev->hw_features |= NETIF_F_RXHASH;
-
- ndev->priv_flags |= IFF_UNICAST_FLT;
- ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
- NETDEV_XDP_ACT_NDO_XMIT_SG;
-
- if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
- priv->active_offloads |= ENETC_F_QCI;
- ndev->features |= NETIF_F_HW_TC;
- ndev->hw_features |= NETIF_F_HW_TC;
- }
-
- /* pick up primary MAC address from SI */
- enetc_load_primary_mac_addr(&si->hw, ndev);
-}
-
-static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct mii_bus *bus;
- int err;
-
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read_c22;
- bus->write = enetc_mdio_write_c22;
- bus->read_c45 = enetc_mdio_read_c45;
- bus->write_c45 = enetc_mdio_write_c45;
- bus->parent = dev;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_EMDIO_BASE;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
-
- err = of_mdiobus_register(bus, np);
- if (err)
- return dev_err_probe(dev, err, "cannot register MDIO bus\n");
-
- pf->mdio = bus;
-
- return 0;
-}
-
-static void enetc_mdio_remove(struct enetc_pf *pf)
-{
- if (pf->mdio)
- mdiobus_unregister(pf->mdio);
-}
-
-static int enetc_imdio_create(struct enetc_pf *pf)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct phylink_pcs *phylink_pcs;
- struct mii_bus *bus;
- int err;
-
- bus = mdiobus_alloc_size(sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC internal MDIO Bus";
- bus->read = enetc_mdio_read_c22;
- bus->write = enetc_mdio_write_c22;
- bus->read_c45 = enetc_mdio_read_c45;
- bus->write_c45 = enetc_mdio_write_c45;
- bus->parent = dev;
- bus->phy_mask = ~0;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
-
- err = mdiobus_register(bus);
- if (err) {
- dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
- goto free_mdio_bus;
- }
-
- phylink_pcs = lynx_pcs_create_mdiodev(bus, 0);
- if (IS_ERR(phylink_pcs)) {
- err = PTR_ERR(phylink_pcs);
- dev_err(dev, "cannot create lynx pcs (%d)\n", err);
- goto unregister_mdiobus;
- }
-
- pf->imdio = bus;
- pf->pcs = phylink_pcs;
-
- return 0;
-
-unregister_mdiobus:
- mdiobus_unregister(bus);
-free_mdio_bus:
- mdiobus_free(bus);
- return err;
-}
-
-static void enetc_imdio_remove(struct enetc_pf *pf)
-{
- if (pf->pcs)
- lynx_pcs_destroy(pf->pcs);
- if (pf->imdio) {
- mdiobus_unregister(pf->imdio);
- mdiobus_free(pf->imdio);
- }
-}
-
-static bool enetc_port_has_pcs(struct enetc_pf *pf)
-{
- return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
- pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
- pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
- pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
-}
-
-static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
-{
- struct device_node *mdio_np;
- int err;
-
- mdio_np = of_get_child_by_name(node, "mdio");
- if (mdio_np) {
- err = enetc_mdio_probe(pf, mdio_np);
-
- of_node_put(mdio_np);
- if (err)
- return err;
- }
-
- if (enetc_port_has_pcs(pf)) {
- err = enetc_imdio_create(pf);
- if (err) {
- enetc_mdio_remove(pf);
- return err;
- }
- }
-
- return 0;
-}
-
-static void enetc_mdiobus_destroy(struct enetc_pf *pf)
-{
- enetc_mdio_remove(pf);
- enetc_imdio_remove(pf);
-}
-
static struct phylink_pcs *
enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
@@ -1101,47 +780,6 @@ static const struct phylink_mac_ops enetc_mac_phylink_ops = {
.mac_link_down = enetc_pl_mac_link_down,
};
-static int enetc_phylink_create(struct enetc_ndev_priv *priv,
- struct device_node *node)
-{
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- struct phylink *phylink;
- int err;
-
- pf->phylink_config.dev = &priv->ndev->dev;
- pf->phylink_config.type = PHYLINK_NETDEV;
- pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
-
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_SGMII,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_1000BASEX,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_USXGMII,
- pf->phylink_config.supported_interfaces);
- phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
-
- phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
- pf->if_mode, &enetc_mac_phylink_ops);
- if (IS_ERR(phylink)) {
- err = PTR_ERR(phylink);
- return err;
- }
-
- priv->phylink = phylink;
-
- return 0;
-}
-
-static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
-{
- phylink_destroy(priv->phylink);
-}
-
/* Initialize the entire shared memory for the flow steering entries
* of this port (PF + VFs)
*/
@@ -1191,21 +829,36 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
struct platform_device *ierb_pdev;
struct device_node *ierb_node;
+ int ret;
ierb_node = of_find_compatible_node(NULL, NULL,
"fsl,ls1028a-enetc-ierb");
- if (!ierb_node || !of_device_is_available(ierb_node))
+ if (!ierb_node)
return -ENODEV;
+ if (!of_device_is_available(ierb_node)) {
+ of_node_put(ierb_node);
+ return -ENODEV;
+ }
+
ierb_pdev = of_find_device_by_node(ierb_node);
of_node_put(ierb_node);
if (!ierb_pdev)
return -EPROBE_DEFER;
- return enetc_ierb_register_pf(ierb_pdev, pdev);
+ ret = enetc_ierb_register_pf(ierb_pdev, pdev);
+
+ put_device(&ierb_pdev->dev);
+
+ return ret;
}
+static const struct enetc_si_ops enetc_psi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
{
struct enetc_si *si;
@@ -1224,6 +877,14 @@ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
goto out_pci_remove;
}
+ si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc_psi_ops;
+ err = enetc_get_driver_data(si);
+ if (err) {
+ dev_err(&pdev->dev, "Could not get PF driver data\n");
+ goto out_pci_remove;
+ }
+
err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
&si->cbd_ring);
if (err)
@@ -1259,6 +920,14 @@ static void enetc_psi_destroy(struct pci_dev *pdev)
enetc_pci_remove(pdev);
}
+static const struct enetc_pf_ops enetc_pf_ops = {
+ .set_si_primary_mac = enetc_pf_set_primary_mac_addr,
+ .get_si_primary_mac = enetc_pf_get_primary_mac_addr,
+ .create_pcs = enetc_pf_create_pcs,
+ .destroy_pcs = enetc_pf_destroy_pcs,
+ .enable_psfp = enetc_psfp_enable,
+};
+
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1285,7 +954,15 @@ static int enetc_pf_probe(struct pci_dev *pdev,
pf = enetc_si_priv(si);
pf->si = si;
+ pf->ops = &enetc_pf_ops;
+
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+ if (pf->total_vfs) {
+ pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state),
+ GFP_KERNEL);
+ if (!pf->vf_state)
+ goto err_alloc_vf_state;
+ }
err = enetc_setup_mac_addresses(node, pf);
if (err)
@@ -1338,7 +1015,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
if (err)
goto err_mdiobus_create;
- err = enetc_phylink_create(priv, node);
+ err = enetc_phylink_create(priv, node, &enetc_mac_phylink_ops);
if (err)
goto err_phylink_create;
@@ -1363,6 +1040,8 @@ err_alloc_si_res:
free_netdev(ndev);
err_alloc_netdev:
err_setup_mac_addresses:
+ kfree(pf->vf_state);
+err_alloc_vf_state:
enetc_psi_destroy(pdev);
err_psi_create:
return err;
@@ -1389,6 +1068,7 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_free_si_resources(priv);
free_netdev(si->ndev);
+ kfree(pf->vf_state);
enetc_psi_destroy(pdev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index c26bd66e4597..ae407e9e9ee7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -5,19 +5,8 @@
#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
-
-enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
-#define ENETC_MADDR_HASH_TBL_SZ 64
-struct enetc_mac_filter {
- union {
- char mac_addr[ETH_ALEN];
- DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
- };
- int mac_addr_cnt;
-};
-
#define ENETC_VLAN_HT_SIZE 64
enum enetc_vf_flags {
@@ -28,6 +17,25 @@ struct enetc_vf_state {
enum enetc_vf_flags flags;
};
+struct enetc_port_caps {
+ u32 half_duplex:1;
+ int num_vsi;
+ int num_msix;
+ int num_rx_bdr;
+ int num_tx_bdr;
+ int mac_filter_num;
+};
+
+struct enetc_pf;
+
+struct enetc_pf_ops {
+ void (*set_si_primary_mac)(struct enetc_hw *hw, int si, const u8 *addr);
+ void (*get_si_primary_mac)(struct enetc_hw *hw, int si, u8 *addr);
+ struct phylink_pcs *(*create_pcs)(struct enetc_pf *pf, struct mii_bus *bus);
+ void (*destroy_pcs)(struct phylink_pcs *pcs);
+ int (*enable_psfp)(struct enetc_ndev_priv *priv);
+};
+
struct enetc_pf {
struct enetc_si *si;
int num_vfs; /* number of active VFs, after sriov_init */
@@ -50,6 +58,11 @@ struct enetc_pf {
phy_interface_t if_mode;
struct phylink_config phylink_config;
+
+ struct enetc_port_caps caps;
+ const struct enetc_pf_ops *ops;
+
+ int num_mfe; /* number of mac address filter table entries */
};
#define phylink_to_enetc_pf(config) \
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
new file mode 100644
index 000000000000..76263b8566bb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2024 NXP */
+
+#include <linux/fsl/enetc_mdio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "enetc_pf_common.h"
+
+static void enetc_set_si_hw_addr(struct enetc_pf *pf, int si,
+ const u8 *mac_addr)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ pf->ops->set_si_primary_mac(hw, si, mac_addr);
+}
+
+static void enetc_get_si_hw_addr(struct enetc_pf *pf, int si, u8 *mac_addr)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ pf->ops->get_si_primary_mac(hw, si, mac_addr);
+}
+
+int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+ enetc_set_si_hw_addr(pf, 0, saddr->sa_data);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_pf_set_mac_addr);
+
+static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
+ int si)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int err;
+
+ /* (1) try to get the MAC address from the device tree */
+ if (np) {
+ err = of_get_mac_address(np, mac_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
+ /* (2) bootloader supplied MAC address */
+ if (is_zero_ether_addr(mac_addr))
+ enetc_get_si_hw_addr(pf, si, mac_addr);
+
+ /* (3) choose a random one */
+ if (is_zero_ether_addr(mac_addr)) {
+ eth_random_addr(mac_addr);
+ dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
+ si, mac_addr);
+ }
+
+ enetc_set_si_hw_addr(pf, si, mac_addr);
+
+ return 0;
+}
+
+int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf)
+{
+ int err, i;
+
+ /* The PF might take its MAC from the device tree */
+ err = enetc_setup_mac_address(np, pf, 0);
+ if (err)
+ return err;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ err = enetc_setup_mac_address(NULL, pf, i + 1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_setup_mac_addresses);
+
+void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+ priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (si->drvdata->tx_csum)
+ priv->active_offloads |= ENETC_F_TXCSUM;
+
+ if (si->hw_features & ENETC_SI_F_LSO)
+ priv->active_offloads |= ENETC_F_LSO;
+
+ if (si->num_rss) {
+ ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
+
+ if (!enetc_is_pseudo_mac(si))
+ ndev->hw_features |= NETIF_F_LOOPBACK;
+
+ /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
+ if (!is_enetc_rev1(si))
+ goto end;
+
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
+ if (si->hw_features & ENETC_SI_F_PSFP && pf->ops->enable_psfp &&
+ !pf->ops->enable_psfp(priv)) {
+ priv->active_offloads |= ENETC_F_QCI;
+ ndev->features |= NETIF_F_HW_TC;
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
+end:
+ /* pick up primary MAC address from SI */
+ enetc_load_primary_mac_addr(&si->hw, ndev);
+}
+EXPORT_SYMBOL_GPL(enetc_pf_netdev_setup);
+
+static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_EMDIO_BASE;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
+
+ pf->mdio = bus;
+
+ return 0;
+}
+
+static void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio)
+ mdiobus_unregister(pf->mdio);
+}
+
+static int enetc_imdio_create(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct phylink_pcs *phylink_pcs;
+ struct mii_bus *bus;
+ int err;
+
+ if (!pf->ops->create_pcs) {
+ dev_err(dev, "Creating PCS is not supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC internal MDIO Bus";
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
+ bus->parent = dev;
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_PM_IMDIO_BASE;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ err = mdiobus_register(bus);
+ if (err) {
+ dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
+ goto free_mdio_bus;
+ }
+
+ phylink_pcs = pf->ops->create_pcs(pf, bus);
+ if (IS_ERR(phylink_pcs)) {
+ err = PTR_ERR(phylink_pcs);
+ dev_err(dev, "cannot create lynx pcs (%d)\n", err);
+ goto unregister_mdiobus;
+ }
+
+ pf->imdio = bus;
+ pf->pcs = phylink_pcs;
+
+ return 0;
+
+unregister_mdiobus:
+ mdiobus_unregister(bus);
+free_mdio_bus:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void enetc_imdio_remove(struct enetc_pf *pf)
+{
+ if (pf->pcs && pf->ops->destroy_pcs)
+ pf->ops->destroy_pcs(pf->pcs);
+
+ if (pf->imdio) {
+ mdiobus_unregister(pf->imdio);
+ mdiobus_free(pf->imdio);
+ }
+}
+
+static bool enetc_port_has_pcs(struct enetc_pf *pf)
+{
+ return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
+}
+
+int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
+{
+ struct device_node *mdio_np;
+ int err;
+
+ mdio_np = of_get_child_by_name(node, "mdio");
+ if (mdio_np) {
+ err = enetc_mdio_probe(pf, mdio_np);
+
+ of_node_put(mdio_np);
+ if (err)
+ return err;
+ }
+
+ if (enetc_port_has_pcs(pf)) {
+ err = enetc_imdio_create(pf);
+ if (err) {
+ enetc_mdio_remove(pf);
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdiobus_create);
+
+void enetc_mdiobus_destroy(struct enetc_pf *pf)
+{
+ enetc_mdio_remove(pf);
+ enetc_imdio_remove(pf);
+}
+EXPORT_SYMBOL_GPL(enetc_mdiobus_destroy);
+
+int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
+ const struct phylink_mac_ops *ops)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct phylink *phylink;
+ int err;
+
+ pf->phylink_config.dev = &priv->ndev->dev;
+ pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
+ pf->if_mode, ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ return err;
+ }
+
+ priv->phylink = phylink;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_phylink_create);
+
+void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
+{
+ phylink_destroy(priv->phylink);
+}
+EXPORT_SYMBOL_GPL(enetc_phylink_destroy);
+
+void enetc_set_default_rss_key(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(pf->si, hash_key);
+}
+EXPORT_SYMBOL_GPL(enetc_set_default_rss_key);
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_refresh_vlan_ht_filter(struct enetc_pf *pf)
+{
+ int i;
+
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+}
+
+static void enetc_set_si_vlan_ht_filter(struct enetc_si *si,
+ int si_id, u64 hash)
+{
+ struct enetc_hw *hw = &si->hw;
+ int high_reg_off, low_reg_off;
+
+ if (is_enetc_rev1(si)) {
+ low_reg_off = ENETC_PSIVHFR0(si_id);
+ high_reg_off = ENETC_PSIVHFR1(si_id);
+ } else {
+ low_reg_off = ENETC4_PSIVHFR0(si_id);
+ high_reg_off = ENETC4_PSIVHFR1(si_id);
+ }
+
+ enetc_port_wr(hw, low_reg_off, lower_32_bits(hash));
+ enetc_port_wr(hw, high_reg_off, upper_32_bits(hash));
+}
+
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_add_vid);
+
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (__test_and_clear_bit(vid, pf->active_vlans)) {
+ enetc_refresh_vlan_ht_filter(pf);
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_del_vid);
+
+MODULE_DESCRIPTION("NXP ENETC PF common functionality driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
new file mode 100644
index 000000000000..96d4840a3107
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2024 NXP */
+
+#include "enetc_pf.h"
+
+int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr);
+int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf);
+void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops);
+int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node);
+void enetc_mdiobus_destroy(struct enetc_pf *pf);
+int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
+ const struct phylink_mac_ops *ops);
+void enetc_phylink_destroy(struct enetc_ndev_priv *priv);
+void enetc_set_default_rss_key(struct enetc_pf *pf);
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid);
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid);
+
+static inline u16 enetc_get_ip_revision(struct enetc_hw *hw)
+{
+ return enetc_global_rd(hw, ENETC_G_EIPBRR0) & EIPBRR0_REVISION;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 5243fc031058..b8413d3b4f16 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -7,9 +7,6 @@
#include "enetc.h"
-int enetc_phc_index = -1;
-EXPORT_SYMBOL_GPL(enetc_phc_index);
-
static struct ptp_clock_info enetc_ptp_caps = {
.owner = THIS_MODULE,
.name = "ENETC PTP clock",
@@ -92,7 +89,6 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
goto err_no_clock;
- enetc_phc_index = ptp_qoriq->phc_index;
pci_set_drvdata(pdev, ptp_qoriq);
return 0;
@@ -118,7 +114,6 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
{
struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
- enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
pci_free_irq_vectors(pdev);
kfree(ptp_qoriq);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index b65da49dd926..ccf86651455c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -336,7 +336,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
*
* (enetClockFrequency / portTransmitRate) * 100
*/
- hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
+ hi_credit_reg = (u32)div_u64((priv->sysclk_freq * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index dfcaac302e24..6c4b374bcb0e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -78,11 +78,18 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
+ int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ if (err)
+ return err;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+
+ return 0;
}
static int enetc_vf_set_features(struct net_device *ndev,
@@ -114,6 +121,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_vf_setup_tc,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
@@ -128,6 +137,8 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
si->ndev = ndev;
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+ priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -136,21 +147,30 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- if (si->num_rss)
+ if (si->num_rss) {
ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
/* pick up primary MAC address from SI */
enetc_load_primary_mac_addr(&si->hw, ndev);
}
+static const struct enetc_si_ops enetc_vsi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static int enetc_vf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -164,6 +184,14 @@ static int enetc_vf_probe(struct pci_dev *pdev,
return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
+ si->revision = ENETC_REV_1_0;
+ si->ops = &enetc_vsi_ops;
+ err = enetc_get_driver_data(si);
+ if (err) {
+ dev_err_probe(&pdev->dev, err,
+ "Could not get VF driver data\n");
+ goto err_alloc_netdev;
+ }
enetc_get_si_caps(si);
diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
new file mode 100644
index 000000000000..443983fdecd9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NXP NETC Blocks Control Driver
+ *
+ * Copyright 2024 NXP
+ *
+ * This driver is used for pre-initialization of NETC, such as PCS and MII
+ * protocols, LDID, warm reset, etc. Therefore, all NETC device drivers can
+ * only be probed after the netc-blk-crtl driver has completed initialization.
+ * In addition, when the system enters suspend mode, IERB, PRB, and NETCMIX
+ * will be powered off, except for WOL. Therefore, when the system resumes,
+ * these blocks need to be reinitialized.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+/* NETCMIX registers */
+#define IMX95_CFG_LINK_IO_VAR 0x0
+#define IO_VAR_16FF_16G_SERDES 0x1
+#define IO_VAR(port, var) (((var) & 0xf) << ((port) << 2))
+
+#define IMX95_CFG_LINK_MII_PROT 0x4
+#define CFG_LINK_MII_PORT_0 GENMASK(3, 0)
+#define CFG_LINK_MII_PORT_1 GENMASK(7, 4)
+#define MII_PROT_MII 0x0
+#define MII_PROT_RMII 0x1
+#define MII_PROT_RGMII 0x2
+#define MII_PROT_SERIAL 0x3
+#define MII_PROT(port, prot) (((prot) & 0xf) << ((port) << 2))
+
+#define IMX95_CFG_LINK_PCS_PROT(a) (0x8 + (a) * 4)
+#define PCS_PROT_1G_SGMII BIT(0)
+#define PCS_PROT_2500M_SGMII BIT(1)
+#define PCS_PROT_XFI BIT(3)
+#define PCS_PROT_SFI BIT(4)
+#define PCS_PROT_10G_SXGMII BIT(6)
+
+#define IMX94_EXT_PIN_CONTROL 0x10
+#define MAC2_MAC3_SEL BIT(1)
+
+#define IMX94_NETC_LINK_CFG(a) (0x4c + (a) * 4)
+#define NETC_LINK_CFG_MII_PROT GENMASK(3, 0)
+#define NETC_LINK_CFG_IO_VAR GENMASK(19, 16)
+
+/* NETC privileged register block register */
+#define PRB_NETCRR 0x100
+#define NETCRR_SR BIT(0)
+#define NETCRR_LOCK BIT(1)
+
+#define PRB_NETCSR 0x104
+#define NETCSR_ERROR BIT(0)
+#define NETCSR_STATE BIT(1)
+
+/* NETC integrated endpoint register block register */
+#define IERB_EMDIOFAUXR 0x344
+#define IERB_T0FAUXR 0x444
+#define IERB_ETBCR(a) (0x300c + 0x100 * (a))
+#define IERB_LBCR(a) (0x1010 + 0x40 * (a))
+#define LBCR_MDIO_PHYAD_PRTAD(addr) (((addr) & 0x1f) << 8)
+
+#define IERB_EFAUXR(a) (0x3044 + 0x100 * (a))
+#define IERB_VFAUXR(a) (0x4004 + 0x40 * (a))
+#define FAUXR_LDID GENMASK(3, 0)
+
+/* Platform information */
+#define IMX95_ENETC0_BUS_DEVFN 0x0
+#define IMX95_ENETC1_BUS_DEVFN 0x40
+#define IMX95_ENETC2_BUS_DEVFN 0x80
+
+#define IMX94_ENETC0_BUS_DEVFN 0x100
+#define IMX94_ENETC1_BUS_DEVFN 0x140
+#define IMX94_ENETC2_BUS_DEVFN 0x180
+#define IMX94_TIMER0_BUS_DEVFN 0x1
+#define IMX94_TIMER1_BUS_DEVFN 0x101
+#define IMX94_TIMER2_BUS_DEVFN 0x181
+#define IMX94_ENETC0_LINK 3
+#define IMX94_ENETC1_LINK 4
+#define IMX94_ENETC2_LINK 5
+
+#define NETC_ENETC_ID(a) (a)
+#define NETC_TIMER_ID(a) (a)
+
+/* Flags for different platforms */
+#define NETC_HAS_NETCMIX BIT(0)
+
+struct netc_devinfo {
+ u32 flags;
+ int (*netcmix_init)(struct platform_device *pdev);
+ int (*ierb_init)(struct platform_device *pdev);
+};
+
+struct netc_blk_ctrl {
+ void __iomem *prb;
+ void __iomem *ierb;
+ void __iomem *netcmix;
+
+ const struct netc_devinfo *devinfo;
+ struct platform_device *pdev;
+ struct dentry *debugfs_root;
+};
+
+static void netc_reg_write(void __iomem *base, u32 offset, u32 val)
+{
+ netc_write(base + offset, val);
+}
+
+static u32 netc_reg_read(void __iomem *base, u32 offset)
+{
+ return netc_read(base + offset);
+}
+
+static int netc_of_pci_get_bus_devfn(struct device_node *np)
+{
+ u32 reg[5];
+ int error;
+
+ error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
+ if (error)
+ return error;
+
+ return (reg[0] >> 8) & 0xffff;
+}
+
+static int netc_get_link_mii_protocol(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return MII_PROT_MII;
+ case PHY_INTERFACE_MODE_RMII:
+ return MII_PROT_RMII;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return MII_PROT_RGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ return MII_PROT_SERIAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx95_netcmix_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ phy_interface_t interface;
+ int bus_devfn, mii_proto;
+ u32 val;
+ int err;
+
+ /* Default setting of MII protocol */
+ val = MII_PROT(0, MII_PROT_RGMII) | MII_PROT(1, MII_PROT_RGMII) |
+ MII_PROT(2, MII_PROT_SERIAL);
+
+ /* Update the link MII protocol through parsing phy-mode */
+ for_each_available_child_of_node_scoped(np, child) {
+ for_each_available_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0)
+ return -EINVAL;
+
+ if (bus_devfn == IMX95_ENETC2_BUS_DEVFN)
+ continue;
+
+ err = of_get_phy_mode(gchild, &interface);
+ if (err)
+ continue;
+
+ mii_proto = netc_get_link_mii_protocol(interface);
+ if (mii_proto < 0)
+ return -EINVAL;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ val = u32_replace_bits(val, mii_proto,
+ CFG_LINK_MII_PORT_0);
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ val = u32_replace_bits(val, mii_proto,
+ CFG_LINK_MII_PORT_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Configure Link I/O variant */
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_IO_VAR,
+ IO_VAR(2, IO_VAR_16FF_16G_SERDES));
+ /* Configure Link 2 PCS protocol */
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_PCS_PROT(2),
+ PCS_PROT_10G_SXGMII);
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_MII_PROT, val);
+
+ return 0;
+}
+
+static int imx94_enetc_get_link_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse ENETC link number */
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ return IMX94_ENETC0_LINK;
+ case IMX94_ENETC1_BUS_DEVFN:
+ return IMX94_ENETC1_LINK;
+ case IMX94_ENETC2_BUS_DEVFN:
+ return IMX94_ENETC2_LINK;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_link_config(struct netc_blk_ctrl *priv,
+ struct device_node *np, int link_id)
+{
+ phy_interface_t interface;
+ int mii_proto;
+ u32 val;
+
+ /* The node may be disabled and does not have a 'phy-mode'
+ * or 'phy-connection-type' property.
+ */
+ if (of_get_phy_mode(np, &interface))
+ return 0;
+
+ mii_proto = netc_get_link_mii_protocol(interface);
+ if (mii_proto < 0)
+ return mii_proto;
+
+ val = mii_proto & NETC_LINK_CFG_MII_PROT;
+ if (val == MII_PROT_SERIAL)
+ val = u32_replace_bits(val, IO_VAR_16FF_16G_SERDES,
+ NETC_LINK_CFG_IO_VAR);
+
+ netc_reg_write(priv->netcmix, IMX94_NETC_LINK_CFG(link_id), val);
+
+ return 0;
+}
+
+static int imx94_enetc_link_config(struct netc_blk_ctrl *priv,
+ struct device_node *np)
+{
+ int link_id = imx94_enetc_get_link_id(np);
+
+ if (link_id < 0)
+ return link_id;
+
+ return imx94_link_config(priv, np, link_id);
+}
+
+static int imx94_netcmix_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+ int err;
+
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ err = imx94_enetc_link_config(priv, gchild);
+ if (err)
+ return err;
+ }
+ }
+
+ /* ENETC 0 and switch port 2 share the same parallel interface.
+ * Currently, the switch is not supported, so this interface is
+ * used by ENETC 0 by default.
+ */
+ val = netc_reg_read(priv->netcmix, IMX94_EXT_PIN_CONTROL);
+ val |= MAC2_MAC3_SEL;
+ netc_reg_write(priv->netcmix, IMX94_EXT_PIN_CONTROL, val);
+
+ return 0;
+}
+
+static bool netc_ierb_is_locked(struct netc_blk_ctrl *priv)
+{
+ return !!(netc_reg_read(priv->prb, PRB_NETCRR) & NETCRR_LOCK);
+}
+
+static int netc_lock_ierb(struct netc_blk_ctrl *priv)
+{
+ u32 val;
+
+ netc_reg_write(priv->prb, PRB_NETCRR, NETCRR_LOCK);
+
+ return read_poll_timeout(netc_reg_read, val, !(val & NETCSR_STATE),
+ 100, 2000, false, priv->prb, PRB_NETCSR);
+}
+
+static int netc_unlock_ierb_with_warm_reset(struct netc_blk_ctrl *priv)
+{
+ u32 val;
+
+ netc_reg_write(priv->prb, PRB_NETCRR, 0);
+
+ return read_poll_timeout(netc_reg_read, val, !(val & NETCRR_LOCK),
+ 1000, 100000, true, priv->prb, PRB_NETCRR);
+}
+
+static int netc_get_phy_addr(struct device_node *np)
+{
+ struct device_node *mdio_node, *phy_node;
+ u32 addr = 0;
+ int err = 0;
+
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ return 0;
+
+ phy_node = of_get_next_child(mdio_node, NULL);
+ if (!phy_node)
+ goto of_put_mdio_node;
+
+ err = of_property_read_u32(phy_node, "reg", &addr);
+ if (err)
+ goto of_put_phy_node;
+
+ if (addr >= PHY_MAX_ADDR)
+ err = -EINVAL;
+
+of_put_phy_node:
+ of_node_put(phy_node);
+
+of_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return err ? err : addr;
+}
+
+static int netc_parse_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ u32 mask = 0;
+
+ for_each_child_of_node_scoped(np, child) {
+ u32 addr;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &addr);
+ if (err)
+ return err;
+
+ if (addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mask |= BIT(addr);
+ }
+
+ *phy_mask = mask;
+
+ return 0;
+}
+
+static int netc_get_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,ee00"))
+ continue;
+
+ return netc_parse_emdio_phy_mask(gchild, phy_mask);
+ }
+ }
+
+ return 0;
+}
+
+static int imx95_enetc_mdio_phyaddr_config(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int bus_devfn, addr, err;
+ u32 phy_mask = 0;
+
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
+ /* Update the port EMDIO PHY address through parsing phy properties.
+ * This is needed when using the port EMDIO but it's harmless when
+ * using the central EMDIO. So apply it on all cases.
+ */
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(gchild);
+ if (addr < 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is
+ * 0, so no need to set the register.
+ */
+ if (!addr)
+ continue;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(0),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(1),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(2),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int imx95_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+
+ /* EMDIO : No MSI-X intterupt */
+ netc_reg_write(priv->ierb, IERB_EMDIOFAUXR, 0);
+ /* ENETC0 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(0), 0);
+ /* ENETC0 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(0), 1);
+ /* ENETC0 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(1), 2);
+ /* ENETC1 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(1), 3);
+ /* ENETC1 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(2), 5);
+ /* ENETC1 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(3), 6);
+ /* ENETC2 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(2), 4);
+ /* ENETC2 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(4), 5);
+ /* ENETC2 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(5), 6);
+ /* NETC TIMER */
+ netc_reg_write(priv->ierb, IERB_T0FAUXR, 7);
+
+ return imx95_enetc_mdio_phyaddr_config(pdev);
+}
+
+static int imx94_get_enetc_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse ENETC offset */
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ return NETC_ENETC_ID(0);
+ case IMX94_ENETC1_BUS_DEVFN:
+ return NETC_ENETC_ID(1);
+ case IMX94_ENETC2_BUS_DEVFN:
+ return NETC_ENETC_ID(2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_get_timer_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse NETC PTP timer ID, the timer0 is on bus 0,
+ * the timer 1 and timer2 is on bus 1.
+ */
+ switch (bus_devfn) {
+ case IMX94_TIMER0_BUS_DEVFN:
+ return NETC_TIMER_ID(0);
+ case IMX94_TIMER1_BUS_DEVFN:
+ return NETC_TIMER_ID(1);
+ case IMX94_TIMER2_BUS_DEVFN:
+ return NETC_TIMER_ID(2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_enetc_update_tid(struct netc_blk_ctrl *priv,
+ struct device_node *np)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *timer_np;
+ int eid, tid;
+
+ eid = imx94_get_enetc_id(np);
+ if (eid < 0) {
+ dev_err(dev, "Failed to get ENETC ID\n");
+ return eid;
+ }
+
+ timer_np = of_parse_phandle(np, "ptp-timer", 0);
+ if (!timer_np) {
+ /* If 'ptp-timer' is not present, the timer1 is the default
+ * timer of all standalone ENETCs, which is on the same PCIe
+ * bus as these ENETCs.
+ */
+ tid = NETC_TIMER_ID(1);
+ goto end;
+ }
+
+ tid = imx94_get_timer_id(timer_np);
+ of_node_put(timer_np);
+ if (tid < 0) {
+ dev_err(dev, "Failed to get NETC Timer ID\n");
+ return tid;
+ }
+
+end:
+ netc_reg_write(priv->ierb, IERB_ETBCR(eid), tid);
+
+ return 0;
+}
+
+static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv,
+ struct device_node *np,
+ u32 phy_mask)
+{
+ struct device *dev = &priv->pdev->dev;
+ int bus_devfn, addr;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(np);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(np);
+ if (addr <= 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC0_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC1_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC2_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int imx94_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ u32 phy_mask = 0;
+ int err;
+
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ err = imx94_enetc_update_tid(priv, gchild);
+ if (err)
+ return err;
+
+ err = imx94_enetc_mdio_phyaddr_config(priv, gchild,
+ phy_mask);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int netc_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ const struct netc_devinfo *devinfo = priv->devinfo;
+ int err;
+
+ if (netc_ierb_is_locked(priv)) {
+ err = netc_unlock_ierb_with_warm_reset(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Unlock IERB failed.\n");
+ return err;
+ }
+ }
+
+ if (devinfo->ierb_init) {
+ err = devinfo->ierb_init(pdev);
+ if (err)
+ return err;
+ }
+
+ err = netc_lock_ierb(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Lock IERB failed.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int netc_prb_show(struct seq_file *s, void *data)
+{
+ struct netc_blk_ctrl *priv = s->private;
+ u32 val;
+
+ val = netc_reg_read(priv->prb, PRB_NETCRR);
+ seq_printf(s, "[PRB NETCRR] Lock:%d SR:%d\n",
+ (val & NETCRR_LOCK) ? 1 : 0,
+ (val & NETCRR_SR) ? 1 : 0);
+
+ val = netc_reg_read(priv->prb, PRB_NETCSR);
+ seq_printf(s, "[PRB NETCSR] State:%d Error:%d\n",
+ (val & NETCSR_STATE) ? 1 : 0,
+ (val & NETCSR_ERROR) ? 1 : 0);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(netc_prb);
+
+static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("netc_blk_ctrl", NULL);
+ if (IS_ERR(root))
+ return;
+
+ priv->debugfs_root = root;
+
+ debugfs_create_file("prb", 0444, root, priv, &netc_prb_fops);
+}
+
+static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv)
+{
+ debugfs_remove_recursive(priv->debugfs_root);
+ priv->debugfs_root = NULL;
+}
+
+#else
+
+static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv)
+{
+}
+
+static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv)
+{
+}
+#endif
+
+static int netc_prb_check_error(struct netc_blk_ctrl *priv)
+{
+ if (netc_reg_read(priv->prb, PRB_NETCSR) & NETCSR_ERROR)
+ return -1;
+
+ return 0;
+}
+
+static const struct netc_devinfo imx95_devinfo = {
+ .flags = NETC_HAS_NETCMIX,
+ .netcmix_init = imx95_netcmix_init,
+ .ierb_init = imx95_ierb_init,
+};
+
+static const struct netc_devinfo imx94_devinfo = {
+ .flags = NETC_HAS_NETCMIX,
+ .netcmix_init = imx94_netcmix_init,
+ .ierb_init = imx94_ierb_init,
+};
+
+static const struct of_device_id netc_blk_ctrl_match[] = {
+ { .compatible = "nxp,imx95-netc-blk-ctrl", .data = &imx95_devinfo },
+ { .compatible = "nxp,imx94-netc-blk-ctrl", .data = &imx94_devinfo },
+ {},
+};
+MODULE_DEVICE_TABLE(of, netc_blk_ctrl_match);
+
+static int netc_blk_ctrl_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct netc_devinfo *devinfo;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ struct netc_blk_ctrl *priv;
+ struct clk *ipg_clk;
+ void __iomem *regs;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ ipg_clk = devm_clk_get_optional_enabled(dev, "ipg");
+ if (IS_ERR(ipg_clk))
+ return dev_err_probe(dev, PTR_ERR(ipg_clk),
+ "Set ipg clock failed\n");
+
+ id = of_match_device(netc_blk_ctrl_match, dev);
+ if (!id)
+ return dev_err_probe(dev, -EINVAL, "Cannot match device\n");
+
+ devinfo = (struct netc_devinfo *)id->data;
+ if (!devinfo)
+ return dev_err_probe(dev, -EINVAL, "No device information\n");
+
+ priv->devinfo = devinfo;
+ regs = devm_platform_ioremap_resource_byname(pdev, "ierb");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing IERB resource\n");
+
+ priv->ierb = regs;
+ regs = devm_platform_ioremap_resource_byname(pdev, "prb");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing PRB resource\n");
+
+ priv->prb = regs;
+ if (devinfo->flags & NETC_HAS_NETCMIX) {
+ regs = devm_platform_ioremap_resource_byname(pdev, "netcmix");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing NETCMIX resource\n");
+ priv->netcmix = regs;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ if (devinfo->netcmix_init) {
+ err = devinfo->netcmix_init(pdev);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Initializing NETCMIX failed\n");
+ }
+
+ err = netc_ierb_init(pdev);
+ if (err)
+ return dev_err_probe(dev, err, "Initializing IERB failed\n");
+
+ if (netc_prb_check_error(priv) < 0)
+ dev_warn(dev, "The current IERB configuration is invalid\n");
+
+ netc_blk_ctrl_create_debugfs(priv);
+
+ err = of_platform_populate(node, NULL, NULL, dev);
+ if (err) {
+ netc_blk_ctrl_remove_debugfs(priv);
+ return dev_err_probe(dev, err, "of_platform_populate failed\n");
+ }
+
+ return 0;
+}
+
+static void netc_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+ netc_blk_ctrl_remove_debugfs(priv);
+}
+
+static struct platform_driver netc_blk_ctrl_driver = {
+ .driver = {
+ .name = "nxp-netc-blk-ctrl",
+ .of_match_table = netc_blk_ctrl_match,
+ },
+ .probe = netc_blk_ctrl_probe,
+ .remove = netc_blk_ctrl_remove,
+};
+
+module_platform_driver(netc_blk_ctrl_driver);
+
+MODULE_DESCRIPTION("NXP NETC Blocks Control Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
new file mode 100644
index 000000000000..0c1d343253bf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NETC NTMP (NETC Table Management Protocol) 2.0 Library
+ * Copyright 2025 NXP
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/iopoll.h>
+
+#include "ntmp_private.h"
+
+#define NETC_CBDR_TIMEOUT 1000 /* us */
+#define NETC_CBDR_DELAY_US 10
+#define NETC_CBDR_MR_EN BIT(31)
+
+#define NTMP_BASE_ADDR_ALIGN 128
+#define NTMP_DATA_ADDR_ALIGN 32
+
+/* Define NTMP Table ID */
+#define NTMP_MAFT_ID 1
+#define NTMP_RSST_ID 3
+
+/* Generic Update Actions for most tables */
+#define NTMP_GEN_UA_CFGEU BIT(0)
+#define NTMP_GEN_UA_STSEU BIT(1)
+
+#define NTMP_ENTRY_ID_SIZE 4
+#define RSST_ENTRY_NUM 64
+#define RSST_STSE_DATA_SIZE(n) ((n) * 8)
+#define RSST_CFGE_DATA_SIZE(n) (n)
+
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ int cbd_num = NETC_CBDR_BD_NUM;
+ size_t size;
+
+ size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
+ cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
+ GFP_KERNEL);
+ if (!cbdr->addr_base)
+ return -ENOMEM;
+
+ cbdr->dma_size = size;
+ cbdr->bd_num = cbd_num;
+ cbdr->regs = *regs;
+ cbdr->dev = dev;
+
+ /* The base address of the Control BD Ring must be 128 bytes aligned */
+ cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN);
+ cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
+ NTMP_BASE_ADDR_ALIGN);
+
+ spin_lock_init(&cbdr->ring_lock);
+
+ cbdr->next_to_use = netc_read(cbdr->regs.pir);
+ cbdr->next_to_clean = netc_read(cbdr->regs.cir);
+
+ /* Step 1: Configure the base address of the Control BD Ring */
+ netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
+ netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
+
+ /* Step 2: Configure the number of BDs of the Control BD Ring */
+ netc_write(cbdr->regs.lenr, cbdr->bd_num);
+
+ /* Step 3: Enable the Control BD Ring */
+ netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
+
+void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+ /* Disable the Control BD Ring */
+ netc_write(cbdr->regs.mr, 0);
+ dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
+ cbdr->dma_base);
+ memset(cbdr, 0, sizeof(*cbdr));
+}
+EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
+
+static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
+{
+ return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
+ cbdr->bd_num) % cbdr->bd_num;
+}
+
+static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
+{
+ return &((union netc_cbd *)(cbdr->addr_base_align))[index];
+}
+
+static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
+{
+ union netc_cbd *cbd;
+ int i;
+
+ i = cbdr->next_to_clean;
+ while (netc_read(cbdr->regs.cir) != i) {
+ cbd = ntmp_get_cbd(cbdr, i);
+ memset(cbd, 0, sizeof(*cbd));
+ i = (i + 1) % cbdr->bd_num;
+ }
+
+ cbdr->next_to_clean = i;
+}
+
+static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
+{
+ union netc_cbd *cur_cbd;
+ struct netc_cbdr *cbdr;
+ int i, err;
+ u16 status;
+ u32 val;
+
+ /* Currently only i.MX95 ENETC is supported, and it only has one
+ * command BD ring
+ */
+ cbdr = &user->ring[0];
+
+ spin_lock_bh(&cbdr->ring_lock);
+
+ if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
+ ntmp_clean_cbdr(cbdr);
+
+ i = cbdr->next_to_use;
+ cur_cbd = ntmp_get_cbd(cbdr, i);
+ *cur_cbd = *cbd;
+ dma_wmb();
+
+ /* Update producer index of both software and hardware */
+ i = (i + 1) % cbdr->bd_num;
+ cbdr->next_to_use = i;
+ netc_write(cbdr->regs.pir, i);
+
+ err = read_poll_timeout_atomic(netc_read, val, val == i,
+ NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
+ true, cbdr->regs.cir);
+ if (unlikely(err))
+ goto cbdr_unlock;
+
+ dma_rmb();
+ /* Get the writeback command BD, because the caller may need
+ * to check some other fields of the response header.
+ */
+ *cbd = *cur_cbd;
+
+ /* Check the writeback error status */
+ status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
+ if (unlikely(status)) {
+ err = -EIO;
+ dev_err(user->dev, "Command BD error: 0x%04x\n", status);
+ }
+
+ ntmp_clean_cbdr(cbdr);
+ dma_wmb();
+
+cbdr_unlock:
+ spin_unlock_bh(&cbdr->ring_lock);
+
+ return err;
+}
+
+static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
+{
+ void *buf;
+
+ buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ &data->dma, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ data->buf = buf;
+ *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
+
+ return 0;
+}
+
+static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
+{
+ dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ data->buf, data->dma);
+}
+
+static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
+ int len, int table_id, int cmd,
+ int access_method)
+{
+ dma_addr_t dma_align;
+
+ memset(cbd, 0, sizeof(*cbd));
+ dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
+ cbd->req_hdr.addr = cpu_to_le64(dma_align);
+ cbd->req_hdr.len = cpu_to_le32(len);
+ cbd->req_hdr.cmd = cmd;
+ cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
+ access_method);
+ cbd->req_hdr.table_id = table_id;
+ cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
+ NTMP_HDR_VER2);
+ /* For NTMP version 2.0 or later version */
+ cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
+}
+
+static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
+ u8 qa, u16 ua)
+{
+ crd->update_act = cpu_to_le16(ua);
+ crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
+}
+
+static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
+ u8 qa, u16 ua, u32 entry_id)
+{
+ ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
+ rbe->entry_id = cpu_to_le32(entry_id);
+}
+
+static const char *ntmp_table_name(int tbl_id)
+{
+ switch (tbl_id) {
+ case NTMP_MAFT_ID:
+ return "MAC Address Filter Table";
+ case NTMP_RSST_ID:
+ return "RSS Table";
+ default:
+ return "Unknown Table";
+ };
+}
+
+static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u8 tbl_ver, u32 entry_id, u32 req_len,
+ u32 resp_len)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = max(req_len, resp_len),
+ };
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
+ tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev,
+ "Failed to delete entry 0x%x of %s, err: %pe",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+
+static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u32 len, struct ntmp_req_by_eid *req,
+ dma_addr_t dma, bool compare_eid)
+{
+ struct ntmp_cmn_resp_query *resp;
+ int cmd = NTMP_CMD_QUERY;
+ union netc_cbd cbd;
+ u32 entry_id;
+ int err;
+
+ entry_id = le32_to_cpu(req->entry_id);
+ if (le16_to_cpu(req->crd.update_act))
+ cmd = NTMP_CMD_QU;
+
+ /* Request header */
+ ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev,
+ "Failed to query entry 0x%x of %s, err: %pe\n",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+ return err;
+ }
+
+ /* For a few tables, the first field of their response data is not
+ * entry_id, so directly return success.
+ */
+ if (!compare_eid)
+ return 0;
+
+ resp = (struct ntmp_cmn_resp_query *)req;
+ if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
+ dev_err(user->dev,
+ "%s: query EID 0x%x doesn't match response EID 0x%x\n",
+ ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_req_add),
+ };
+ struct maft_req_add *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set mac address filter table request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
+ req->keye = maft->keye;
+ req->cfge = maft->cfge;
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
+ entry_id, ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
+
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_resp_query),
+ };
+ struct maft_resp_query *resp;
+ struct ntmp_req_by_eid *req;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
+ err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
+ NTMP_LEN(sizeof(*req), data.size),
+ req, data.dma, true);
+ if (err)
+ goto end;
+
+ resp = (struct maft_resp_query *)req;
+ maft->keye = resp->keye;
+ maft->cfge = resp->cfge;
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
+
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
+ entry_id, NTMP_EID_REQ_LEN, 0);
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
+
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct rsst_req_update *req;
+ union netc_cbd cbd;
+ int err, i;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = struct_size(req, groups, count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
+ NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
+ for (i = 0; i < count; i++)
+ req->groups[i] = (u8)(table[i]);
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
+ ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
+
+int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err, i;
+ u8 *group;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
+ RSST_CFGE_DATA_SIZE(count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
+ NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
+ ERR_PTR(err));
+ goto end;
+ }
+
+ group = (u8 *)req;
+ group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
+ for (i = 0; i < count; i++)
+ table[i] = group[i];
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
+
+MODULE_DESCRIPTION("NXP NETC Library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
new file mode 100644
index 000000000000..34394e40fddd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * NTMP table request and response data buffer formats
+ * Copyright 2025 NXP
+ */
+
+#ifndef __NTMP_PRIVATE_H
+#define __NTMP_PRIVATE_H
+
+#include <linux/bitfield.h>
+#include <linux/fsl/ntmp.h>
+
+#define NTMP_EID_REQ_LEN 8
+#define NETC_CBDR_BD_NUM 256
+
+union netc_cbd {
+ struct {
+ __le64 addr;
+ __le32 len;
+#define NTMP_RESP_LEN GENMASK(19, 0)
+#define NTMP_REQ_LEN GENMASK(31, 20)
+#define NTMP_LEN(req, resp) (FIELD_PREP(NTMP_REQ_LEN, (req)) | \
+ ((resp) & NTMP_RESP_LEN))
+ u8 cmd;
+#define NTMP_CMD_DELETE BIT(0)
+#define NTMP_CMD_UPDATE BIT(1)
+#define NTMP_CMD_QUERY BIT(2)
+#define NTMP_CMD_ADD BIT(3)
+#define NTMP_CMD_QU (NTMP_CMD_QUERY | NTMP_CMD_UPDATE)
+ u8 access_method;
+#define NTMP_ACCESS_METHOD GENMASK(7, 4)
+#define NTMP_AM_ENTRY_ID 0
+#define NTMP_AM_EXACT_KEY 1
+#define NTMP_AM_SEARCH 2
+#define NTMP_AM_TERNARY_KEY 3
+ u8 table_id;
+ u8 ver_cci_rr;
+#define NTMP_HDR_VERSION GENMASK(5, 0)
+#define NTMP_HDR_VER2 2
+#define NTMP_CCI BIT(6)
+#define NTMP_RR BIT(7)
+ __le32 resv[3];
+ __le32 npf;
+#define NTMP_NPF BIT(15)
+ } req_hdr; /* NTMP Request Message Header Format */
+
+ struct {
+ __le32 resv0[3];
+ __le16 num_matched;
+ __le16 error_rr;
+#define NTMP_RESP_ERROR GENMASK(11, 0)
+#define NTMP_RESP_RR BIT(15)
+ __le32 resv1[4];
+ } resp_hdr; /* NTMP Response Message Header Format */
+};
+
+struct ntmp_dma_buf {
+ struct device *dev;
+ size_t size;
+ void *buf;
+ dma_addr_t dma;
+};
+
+struct ntmp_cmn_req_data {
+ __le16 update_act;
+ u8 dbg_opt;
+ u8 tblv_qact;
+#define NTMP_QUERY_ACT GENMASK(3, 0)
+#define NTMP_TBL_VER GENMASK(7, 4)
+#define NTMP_TBLV_QACT(v, a) (FIELD_PREP(NTMP_TBL_VER, (v)) | \
+ ((a) & NTMP_QUERY_ACT))
+};
+
+struct ntmp_cmn_resp_query {
+ __le32 entry_id;
+};
+
+/* Generic structure for request data by entry ID */
+struct ntmp_req_by_eid {
+ struct ntmp_cmn_req_data crd;
+ __le32 entry_id;
+};
+
+/* MAC Address Filter Table Request Data Buffer Format of Add action */
+struct maft_req_add {
+ struct ntmp_req_by_eid rbe;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* MAC Address Filter Table Response Data Buffer Format of Query action */
+struct maft_resp_query {
+ __le32 entry_id;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* RSS Table Request Data Buffer Format of Update action */
+struct rsst_req_update {
+ struct ntmp_req_by_eid rbe;
+ u8 groups[];
+};
+
+#endif
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a19cb2a786fd..fd9a93d02f8e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -14,19 +14,17 @@
#define FEC_H
/****************************************************************************/
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/bpf.h>
#include <linux/clocksource.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/net_tstamp.h>
#include <linux/pm_qos.h>
-#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
-#include <dt-bindings/firmware/imx/rsrc.h>
-#include <linux/firmware/imx/sci.h>
#include <net/xdp.h>
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -115,7 +113,7 @@
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
-#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excessive collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
@@ -242,23 +240,6 @@ struct bufdesc_ex {
__fec16 res0[4];
};
-/*
- * The following definitions courtesy of commproc.h, which where
- * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
- */
-#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
-#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
-#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
-#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
-#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
-#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
-#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
-#define BD_SC_BR ((ushort)0x0020) /* Break received */
-#define BD_SC_FR ((ushort)0x0010) /* Framing error */
-#define BD_SC_PR ((ushort)0x0008) /* Parity error */
-#define BD_SC_OV ((ushort)0x0002) /* Overrun */
-#define BD_SC_CD ((ushort)0x0001) /* ?? */
-
/* Buffer descriptor control/status used by Ethernet receive.
*/
#define BD_ENET_RX_EMPTY ((ushort)0x8000)
@@ -342,16 +323,17 @@ struct bufdesc_ex {
#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
- * pool. The code may assume these are power of two, so it it best
+ * pool. The code may assume these are power of two, so it is best
* to keep them that size.
* We don't need to allocate pages for the transmitter. We just use
* the skbuffer directly.
*/
+#define FEC_DRV_RESERVE_SPACE (XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
- - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_DRV_RESERVE_SPACE)
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -460,7 +442,7 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
-/* Controller supports interrupt coalesc */
+/* Controller supports interrupt coalesce */
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
/* Interrupt doesn't wake CPU from deep idle */
#define FEC_QUIRK_ERR006687 (1 << 14)
@@ -495,7 +477,7 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_EEE (1 << 20)
-/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+/* i.MX8QM ENET IP version add new feature to generate delayed TXC/RXC
* as an alternative option to make sure it works well with various PHYs.
* For the implementation of delayed clock, ENET takes synchronized 250MHz
* clocks to generate 2ns delay.
@@ -513,6 +495,9 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+/* Jumbo Frame support */
+#define FEC_QUIRK_JUMBO_FRAME BIT(25)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -526,12 +511,6 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
-struct fec_enet_priv_txrx_info {
- int offset;
- struct page *page;
- struct sk_buff *skb;
-};
-
enum {
RX_XDP_REDIRECT = 0,
RX_XDP_PASS,
@@ -571,7 +550,7 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+ struct page *rx_buf[RX_RING_SIZE];
/* page_pool */
struct page_pool *page_pool;
@@ -614,12 +593,14 @@ struct fec_enet_private {
unsigned int num_tx_queues;
unsigned int num_rx_queues;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ unsigned int max_buf_size;
+ unsigned int pagepool_order;
+ unsigned int rx_frame_size;
struct platform_device *pdev;
@@ -662,7 +643,6 @@ struct fec_enet_private {
struct pm_qos_request pm_qos_req;
unsigned int tx_align;
- unsigned int rx_align;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;
@@ -671,8 +651,6 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
- /* tx lpi eee mode */
- struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
@@ -683,6 +661,7 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
+ bool perout_enable;
struct hrtimer perout_timer;
u64 perout_stime;
@@ -691,10 +670,19 @@ struct fec_enet_private {
/* XDP BPF Program */
struct bpf_prog *xdp_prog;
+ struct {
+ int pps_enable;
+ u64 ns_sys, ns_phc;
+ u32 at_corr;
+ u8 at_inc_corr;
+ } ptp_saved_state;
+
u64 ethtool_stats[];
};
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
+void fec_ptp_restore_state(struct fec_enet_private *fep);
+void fec_ptp_save_state(struct fec_enet_private *fep);
void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8bd213da8fb6..c685a5c0cc51 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -22,56 +22,56 @@
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/pm_runtime.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <net/page_pool/helpers.h>
-#include <net/selftests.h>
-#include <net/tso.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <linux/fec.h>
+#include <linux/filter.h>
+#include <linux/gpio/consumer.h>
#include <linux/icmp.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/kernel.h>
#include <linux/mdio.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/regulator/consumer.h>
-#include <linux/if_vlan.h>
+#include <linux/phy.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/gpio/consumer.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
-#include <linux/mfd/syscon.h>
+#include <linux/property.h>
+#include <linux/ptrace.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <net/ip.h>
+#include <net/page_pool/helpers.h>
+#include <net/selftests.h>
+#include <net/tso.h>
#include <soc/imx/cpuidle.h>
-#include <linux/filter.h>
-#include <linux/bpf.h>
-#include <linux/bpf_trace.h>
-
-#include <asm/cacheflush.h>
#include "fec.h"
@@ -131,7 +131,7 @@ static const struct fec_devinfo fec_mvf600_info = {
FEC_QUIRK_HAS_MDIO_C45,
};
-static const struct fec_devinfo fec_imx6x_info = {
+static const struct fec_devinfo fec_imx6sx_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
@@ -168,7 +168,8 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45 |
+ FEC_QUIRK_JUMBO_FRAME,
};
static const struct fec_devinfo fec_s32v234_info = {
@@ -196,7 +197,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
- { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, },
{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
@@ -234,6 +235,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* 2048 byte skbufs are allocated. However, alignment requirements
* varies between FEC variants. Worst case is 64, so round down by 64.
*/
+#define MAX_JUMBO_BUF_SIZE (round_down(16384 - FEC_DRV_RESERVE_SPACE - 64, 64))
#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
#define PKT_MINBUF_SIZE 64
@@ -251,12 +253,10 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* size bits. Other FEC hardware does not, so we need to take that into
* account when setting it.
*/
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
-#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#ifndef CONFIG_M5272
+#define OPT_ARCH_HAS_MAX_FL 1
#else
-#define OPT_FRAME_SIZE 0
+#define OPT_ARCH_HAS_MAX_FL 0
#endif
/* FEC MII MMFR bits definition */
@@ -276,16 +276,19 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_ECR_MAGICEN BIT(2)
#define FEC_ECR_SLEEP BIT(3)
#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_SPEED BIT(5)
#define FEC_ECR_BYTESWP BIT(8)
/* FEC RCR bits definition */
#define FEC_RCR_LOOP BIT(0)
-#define FEC_RCR_HALFDPX BIT(1)
+#define FEC_RCR_DRT BIT(1)
#define FEC_RCR_MII BIT(2)
#define FEC_RCR_PROMISC BIT(3)
#define FEC_RCR_BC_REJ BIT(4)
#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RGMII BIT(6)
#define FEC_RCR_RMII BIT(8)
#define FEC_RCR_10BASET BIT(9)
+#define FEC_RCR_NLC BIT(30)
/* TX WMARK bits */
#define FEC_TXWMRK_STRFWD BIT(8)
@@ -468,14 +471,14 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
{
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
struct page_pool_params pp_params = {
- .order = 0,
+ .order = fep->pagepool_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = dev_to_node(&fep->pdev->dev),
.dev = &fep->pdev->dev,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = FEC_ENET_XDP_HEADROOM,
- .max_len = FEC_ENET_RX_FRSIZE,
+ .max_len = fep->rx_frame_size,
};
int err;
@@ -714,7 +717,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
return 0;
}
@@ -840,6 +848,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len, total_len, data_left;
struct bufdesc *bdp = txq->bd.cur;
+ struct bufdesc *tmp_bdp;
+ struct bufdesc_ex *ebdp;
struct tso_t tso;
unsigned int index = 0;
int ret;
@@ -913,7 +923,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
return 0;
err_release:
- /* TODO: Release all used data descriptors for TSO */
+ /* Release all used data descriptors for TSO */
+ tmp_bdp = txq->bd.cur;
+
+ while (tmp_bdp != bdp) {
+ /* Unmap data buffers */
+ if (tmp_bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(tmp_bdp->cbd_bufaddr),
+ fec16_to_cpu(tmp_bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+
+ /* Clear standard buffer descriptor fields */
+ tmp_bdp->cbd_sc = 0;
+ tmp_bdp->cbd_datlen = 0;
+ tmp_bdp->cbd_bufaddr = 0;
+
+ /* Handle extended descriptor if enabled */
+ if (fep->bufdesc_ex) {
+ ebdp = (struct bufdesc_ex *)tmp_bdp;
+ ebdp->cbd_esc = 0;
+ }
+
+ tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
+ }
+
+ dev_kfree_skb_any(skb);
+
return ret;
}
@@ -973,7 +1010,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
rxq->bd.cur = rxq->bd.base;
}
@@ -1009,7 +1046,9 @@ static void fec_enet_bd_init(struct net_device *dev)
struct page *page = txq->tx_buf[i].buf_p;
if (page)
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0,
+ false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -1021,7 +1060,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
txq->dirty_tx = bdp;
}
}
@@ -1045,7 +1084,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+ writel(fep->max_buf_size, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -1064,6 +1103,40 @@ static void fec_enet_enable_ring(struct net_device *ndev)
}
}
+/* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
+{
+ u32 val;
+
+ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
+}
+
+static void fec_set_hw_mac_addr(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+ fep->hwp + FEC_ADDR_HIGH);
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1073,31 +1146,22 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 temp_mac[2];
- u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = FEC_ECR_ETHEREN;
+ u32 rcntl = FEC_RCR_MII;
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
+ if (OPT_ARCH_HAS_MAX_FL)
+ rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
+
+ if (fep->bufdesc_ex)
+ fec_ptp_save_state(fep);
+
+ fec_ctrl_reset(fep, false);
/*
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel((__force u32)cpu_to_be32(temp_mac[0]),
- fep->hwp + FEC_ADDR_LOW);
- writel((__force u32)cpu_to_be32(temp_mac[1]),
- fep->hwp + FEC_ADDR_HIGH);
+ fec_set_hw_mac_addr(ndev);
/* Clear any outstanding interrupt, except MDIO. */
writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
@@ -1112,7 +1176,7 @@ fec_restart(struct net_device *ndev)
writel(0x04, fep->hwp + FEC_X_CNTRL);
} else {
/* No Rcv on Xmit */
- rcntl |= 0x02;
+ rcntl |= FEC_RCR_DRT;
writel(0x0, fep->hwp + FEC_X_CNTRL);
}
@@ -1131,7 +1195,7 @@ fec_restart(struct net_device *ndev)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+ writel(min(fep->rx_frame_size, fep->max_buf_size), fep->hwp + FEC_FTRL);
}
#endif
@@ -1141,14 +1205,11 @@ fec_restart(struct net_device *ndev)
*/
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* Enable flow control and length check */
- rcntl |= 0x40000000 | 0x00000020;
+ rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL;
/* RGMII, RMII or MII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rcntl |= (1 << 6);
+ if (phy_interface_mode_is_rgmii(fep->phy_interface))
+ rcntl |= FEC_RCR_RGMII;
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= FEC_RCR_RMII;
else
@@ -1157,7 +1218,7 @@ fec_restart(struct net_device *ndev)
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
- ecntl |= (1 << 5);
+ ecntl |= FEC_ECR_SPEED;
else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~FEC_RCR_10BASET;
else
@@ -1221,8 +1282,18 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= FEC_ECR_BYTESWP;
- /* enable ENET store and forward mode */
- writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
+
+ /* When Jumbo Frame is enabled, the FIFO may not be large enough
+ * to hold an entire frame. In such cases, if the MTU exceeds
+ * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
+ * to operate in cut-through mode, triggered by the FIFO threshold.
+ * Otherwise, enable the ENET store-and-forward mode.
+ */
+ if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
+ (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
+ writel(0xF, fep->hwp + FEC_X_WMRK);
+ else
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
@@ -1244,8 +1315,10 @@ fec_restart(struct net_device *ndev)
writel(ecntl, fep->hwp + FEC_ECNTRL);
fec_enet_active_rxring(ndev);
- if (fep->bufdesc_ex)
+ if (fep->bufdesc_ex) {
fec_ptp_start_cyclecounter(ndev);
+ fec_ptp_restore_state(fep);
+ }
/* Enable interrupts we wish to service */
if (fep->link)
@@ -1336,22 +1409,10 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
- } else {
- val = readl(fep->hwp + FEC_ECNTRL);
- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
- writel(val, fep->hwp + FEC_ECNTRL);
- }
+ if (fep->bufdesc_ex)
+ fec_ptp_save_state(fep);
+
+ fec_ctrl_reset(fep, true);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1361,6 +1422,15 @@ fec_stop(struct net_device *ndev)
writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
+
+ if (fep->bufdesc_ex) {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= FEC_ECR_EN1588;
+ writel(val, fep->hwp + FEC_ECNTRL);
+
+ fec_ptp_start_cyclecounter(ndev);
+ fec_ptp_restore_state(fep);
+ }
}
static void
@@ -1532,7 +1602,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
xdp_return_frame_rx_napi(xdpf);
} else { /* recycle pages of XDP_TX frames */
/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(page->pp, page, 0, true);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
}
txq->tx_buf[index].buf_p = NULL;
@@ -1574,19 +1645,21 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
fec_enet_tx_queue(ndev, i, budget);
}
-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
struct page *new_page;
dma_addr_t phys_addr;
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
- WARN_ON(!new_page);
- rxq->rx_skb_info[index].page = new_page;
+ if (unlikely(!new_page))
+ return -ENOMEM;
- rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ rxq->rx_buf[index] = new_page;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ return 0;
}
static u32
@@ -1654,13 +1727,29 @@ xdp_err:
return ret;
}
+static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
+{
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb);
+ const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
+
+ /* Push and remove the vlan tag */
+
+ memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
static int
-fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_enet_priv_rx_q *rxq;
@@ -1668,11 +1757,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
unsigned short status;
struct sk_buff *skb;
ushort pkt_len;
- __u8 *data;
int pkt_received = 0;
struct bufdesc_ex *ebdp = NULL;
- bool vlan_packet_rcvd = false;
- u16 vlan_tag;
int index = 0;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
@@ -1681,9 +1767,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
+ __fec32 cbd_bufaddr;
u32 sub_len = 4;
-#if !defined(CONFIG_M5272)
/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
* FEC_RACC_SHIFT16 is set by default in the probe function.
*/
@@ -1691,7 +1777,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
data_start += 2;
sub_len += 2;
}
-#endif
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
/*
@@ -1706,7 +1791,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
- xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1746,15 +1831,22 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_packets++;
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len;
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ ndev->stats.rx_bytes -= 2;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- page = rxq->rx_skb_info[index].page;
+ page = rxq->rx_buf[index];
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ fec32_to_cpu(cbd_bufaddr),
pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- fec_enet_update_cbd(rxq, bdp, index);
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp);
@@ -1771,7 +1863,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = build_skb(page_address(page), PAGE_SIZE);
+ skb = build_skb(page_address(page),
+ PAGE_SIZE << fep->pagepool_order);
if (unlikely(!skb)) {
page_pool_recycle_direct(rxq->page_pool, page);
ndev->stats.rx_dropped++;
@@ -1785,10 +1878,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_mark_for_recycle(skb);
if (unlikely(need_swap)) {
+ u8 *data;
+
data = page_address(page) + FEC_ENET_XDP_HEADROOM;
swap_buffer(data, pkt_len);
}
- data = skb->data;
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
@@ -1796,20 +1890,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ebdp = (struct bufdesc_ex *)bdp;
/* If this is a VLAN packet remove the VLAN Tag */
- vlan_packet_rcvd = false;
- if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex &&
- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
- /* Push and remove the vlan tag */
- struct vlan_hdr *vlan_header =
- (struct vlan_hdr *) (data + ETH_HLEN);
- vlan_tag = ntohs(vlan_header->h_vlan_TCI);
-
- vlan_packet_rcvd = true;
-
- memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
- skb_pull(skb, VLAN_HLEN);
- }
+ if (fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
+ fec_enet_rx_vlan(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1828,12 +1911,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
}
}
- /* Handle received VLAN packets */
- if (vlan_packet_rcvd)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
-
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
@@ -1881,7 +1958,7 @@ static int fec_enet_rx(struct net_device *ndev, int budget)
/* Make sure that AVB queues are processed first. */
for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, budget - done, i);
+ done += fec_enet_rx_queue(ndev, i, budget - done);
return done;
}
@@ -2028,14 +2105,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
return us * (fep->clk_ref_rate / 1000) / 1000;
}
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
+ bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
unsigned int sleep_cycle, wake_cycle;
if (enable) {
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
wake_cycle = sleep_cycle;
} else {
sleep_cycle = 0;
@@ -2088,7 +2165,9 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_enable(&fep->napi);
}
if (fep->quirks & FEC_QUIRK_HAS_EEE)
- fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
+ fec_enet_eee_mode_set(ndev,
+ phy_dev->eee_cfg.tx_lpi_timer,
+ phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -2150,7 +2229,6 @@ static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2199,7 +2277,6 @@ static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2231,7 +2308,6 @@ static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
if (ret)
netdev_err(fep->netdev, "MDIO write timeout\n");
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2275,7 +2351,6 @@ static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
netdev_err(fep->netdev, "MDIO write timeout\n");
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2298,7 +2373,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
*/
phy_dev = of_phy_find_device(fep->phy_node);
phy_reset_after_clk_enable(phy_dev);
- put_device(&phy_dev->mdio.dev);
+ if (phy_dev)
+ put_device(&phy_dev->mdio.dev);
}
}
@@ -2394,11 +2470,8 @@ static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phy_dev = NULL;
- char mdio_bus_id[MII_BUS_ID_SIZE];
- char phy_name[MII_BUS_ID_SIZE + 3];
- int phy_id;
- int dev_id = fep->dev_id;
+ struct phy_device *phy_dev;
+ int ret;
if (fep->phy_node) {
phy_dev = of_phy_connect(ndev, fep->phy_node,
@@ -2410,30 +2483,28 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
} else {
/* check for attached phy */
- for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
- if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
- continue;
- if (dev_id--)
- continue;
- strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
- break;
- }
+ phy_dev = phy_find_first(fep->mii_bus);
+ if (fep->dev_id && phy_dev)
+ phy_dev = phy_find_next(fep->mii_bus, phy_dev);
- if (phy_id >= PHY_MAX_ADDR) {
+ if (!phy_dev) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
- phy_id = 0;
+ phy_dev = fixed_phy_register_100fd();
+ if (IS_ERR(phy_dev)) {
+ netdev_err(ndev, "could not register fixed PHY\n");
+ return PTR_ERR(phy_dev);
+ }
}
- snprintf(phy_name, sizeof(phy_name),
- PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
- fep->phy_interface);
- }
+ ret = phy_connect_direct(ndev, phy_dev, &fec_enet_adjust_link,
+ fep->phy_interface);
+ if (ret) {
+ if (phy_is_pseudo_fixed_link(phy_dev))
+ fixed_phy_unregister(phy_dev);
+ netdev_err(ndev, "could not attach to PHY\n");
+ return ret;
+ }
- if (IS_ERR(phy_dev)) {
- netdev_err(ndev, "could not attach to PHY\n");
- return PTR_ERR(phy_dev);
}
/* mask with MAC supported features */
@@ -2441,9 +2512,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_set_max_speed(phy_dev, 1000);
phy_remove_link_mode(phy_dev,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-#if !defined(CONFIG_M5272)
phy_support_sym_pause(phy_dev);
-#endif
}
else
phy_set_max_speed(phy_dev, 100);
@@ -2470,7 +2539,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
int err = -ENXIO;
u32 mii_speed, holdtime;
u32 bus_freq;
- int addr;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2585,11 +2653,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
of_node_put(node);
/* find all the PHY devices on the bus and set mac_managed_pm to true */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- phydev = mdiobus_get_phy(fep->mii_bus, addr);
- if (phydev)
- phydev->mac_managed_pm = true;
- }
+ mdiobus_for_each_phy(fep->mii_bus, phydev)
+ phydev->mac_managed_pm = true;
mii_cnt++;
@@ -2638,9 +2703,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
}
/* List of registers that can be safety be read to dump them with ethtool */
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
static __u32 fec_enet_register_version = 2;
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
@@ -2714,30 +2777,22 @@ static u32 fec_enet_register_offset[] = {
static void fec_enet_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *regbuf)
{
+ u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ u32 *reg_list = fec_enet_register_offset;
struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
- u32 *reg_list;
- u32 reg_cnt;
-
- if (!of_machine_is_compatible("fsl,imx6ul")) {
- reg_list = fec_enet_register_offset;
- reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
- } else {
+
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
+ if (of_machine_is_compatible("fsl,imx6ul")) {
reg_list = fec_enet_register_offset_6ul;
reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
}
-#else
- /* coldfire */
- static u32 *reg_list = fec_enet_register_offset;
- static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
#endif
+
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2757,27 +2812,22 @@ static void fec_enet_get_regs(struct net_device *ndev,
buf[off] = readl(&theregs[off]);
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
static int fec_enet_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct fec_enet_private *fep = netdev_priv(ndev);
if (fep->bufdesc_ex) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (fep->ptp_clock)
info->phc_index = ptp_clock_index(fep->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -3068,27 +3118,25 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int rx_itr, tx_itr;
+ u32 rx_itr = 0, tx_itr = 0;
+ int rx_ictt, tx_ictt;
- /* Must be greater than zero to avoid unpredictable behavior */
- if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
- !fep->tx_time_itr || !fep->tx_pkts_itr)
- return;
-
- /* Select enet system clock as Interrupt Coalescing
- * timer Clock Source
- */
- rx_itr = FEC_ITR_CLK_SEL;
- tx_itr = FEC_ITR_CLK_SEL;
+ rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
- /* set ICFT and ICTT */
- rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
- rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
- tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
- tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+ if (rx_ictt > 0 && fep->rx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(rx_ictt);
+ }
- rx_itr |= FEC_ITR_EN;
- tx_itr |= FEC_ITR_EN;
+ if (tx_ictt > 0 && fep->tx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(tx_ictt);
+ }
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
@@ -3168,7 +3216,6 @@ static int
fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3176,8 +3223,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->tx_lpi_timer = p->tx_lpi_timer;
-
return phy_ethtool_get_eee(ndev->phydev, edata);
}
@@ -3185,7 +3230,6 @@ static int
fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3193,8 +3237,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- p->tx_lpi_timer = edata->tx_lpi_timer;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
@@ -3269,7 +3311,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
+ false);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
@@ -3298,7 +3341,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
} else {
struct page *page = txq->tx_buf[i].buf_p;
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -3394,6 +3438,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
return err;
}
+ /* Some platforms require the RX buffer must be 64 bytes alignment.
+ * Some platforms require 16 bytes alignment. And some platforms
+ * require 4 bytes alignment. But since the page pool have been
+ * introduced into the driver, the address of RX buffer is always
+ * the page address plus FEC_ENET_XDP_HEADROOM, and
+ * FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can
+ * satisfy all platforms. To prevent future modifications to
+ * FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a
+ * BUILD_BUG_ON() test has been added, which ensures that
+ * FEC_ENET_XDP_HEADROOM provides the required alignment.
+ */
+ BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f);
+
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -3402,8 +3459,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skb_info[i].page = page;
- rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
+ rxq->rx_buf[i] = page;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3416,7 +3472,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
return 0;
err_alloc:
@@ -3452,7 +3508,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
return 0;
@@ -3545,7 +3601,6 @@ err_enet_mii_probe:
err_enet_alloc:
fec_enet_clk_enable(ndev, false);
clk_enable:
- pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
return ret;
@@ -3555,8 +3610,9 @@ static int
fec_enet_close(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
- phy_stop(ndev->phydev);
+ phy_stop(phy_dev);
if (netif_device_present(ndev)) {
napi_disable(&fep->napi);
@@ -3564,7 +3620,10 @@ fec_enet_close(struct net_device *ndev)
fec_stop(ndev);
}
- phy_disconnect(ndev->phydev);
+ phy_disconnect(phy_dev);
+
+ if (!fep->phy_node && phy_is_pseudo_fixed_link(phy_dev))
+ fixed_phy_unregister(phy_dev);
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_unused();
@@ -3576,7 +3635,6 @@ fec_enet_close(struct net_device *ndev)
cpu_latency_qos_remove_request(&fep->pm_qos_req);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
- pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
fec_enet_free_buffers(ndev);
@@ -3649,7 +3707,6 @@ static void set_multicast_list(struct net_device *ndev)
static int
fec_set_mac_address(struct net_device *ndev, void *p)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
if (addr) {
@@ -3666,36 +3723,10 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (!netif_running(ndev))
return 0;
- writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
- (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
- fep->hwp + FEC_ADDR_LOW);
- writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
- fep->hwp + FEC_ADDR_HIGH);
- return 0;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fec_poll_controller - FEC Poll controller function
- * @dev: The FEC network adapter
- *
- * Polled functionality used by netconsole and others in non interrupt mode
- *
- */
-static void fec_poll_controller(struct net_device *dev)
-{
- int i;
- struct fec_enet_private *fep = netdev_priv(dev);
+ fec_set_hw_mac_addr(ndev);
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- if (fep->irq[i] > 0) {
- disable_irq(fep->irq[i]);
- fec_enet_interrupt(fep->irq[i], dev);
- enable_irq(fep->irq[i]);
- }
- }
+ return 0;
}
-#endif
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
@@ -3993,6 +4024,23 @@ static int fec_hwtstamp_set(struct net_device *ndev,
return fec_ptp_set(ndev, config, extack);
}
+static int fec_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int order;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
+ + FEC_DRV_RESERVE_SPACE);
+ fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
+ fep->pagepool_order = order;
+ WRITE_ONCE(ndev->mtu, new_mtu);
+
+ return 0;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -4002,10 +4050,8 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_change_mtu = fec_change_mtu,
.ndo_eth_ioctl = phy_do_ioctl_running,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fec_poll_controller,
-#endif
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
.ndo_xdp_xmit = fec_enet_xdp_xmit,
@@ -4039,10 +4085,8 @@ static int fec_enet_init(struct net_device *ndev)
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
- fep->rx_align = 0x3;
fep->tx_align = 0x3;
#endif
fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
@@ -4131,10 +4175,8 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES)
fep->tx_align = 0;
- fep->rx_align = 0x3f;
- }
ndev->hw_features = ndev->features;
@@ -4156,6 +4198,14 @@ free_queue_mem:
return ret;
}
+static void fec_enet_deinit(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ netif_napi_del(&fep->napi);
+ fec_enet_free_queue(ndev);
+}
+
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
@@ -4344,11 +4394,9 @@ fec_probe(struct platform_device *pdev)
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
-#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (fep->quirks & FEC_QUIRK_HAS_GBIT)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
-#endif
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
@@ -4527,7 +4575,15 @@ fec_probe(struct platform_device *pdev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
- ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+ fep->pagepool_order = 0;
+ fep->rx_frame_size = FEC_ENET_RX_FRSIZE;
+
+ if (fep->quirks & FEC_QUIRK_JUMBO_FRAME)
+ fep->max_buf_size = MAX_JUMBO_BUF_SIZE;
+ else
+ fep->max_buf_size = PKT_MAXBUF_SIZE;
+
+ ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)
@@ -4541,7 +4597,6 @@ fec_probe(struct platform_device *pdev)
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -4550,6 +4605,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
+ fec_enet_deinit(ndev);
failed_init:
fec_ptp_stop(pdev);
failed_reset:
@@ -4613,10 +4669,11 @@ fec_drv_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ fec_enet_deinit(ndev);
free_netdev(ndev);
}
-static int __maybe_unused fec_suspend(struct device *dev)
+static int fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4669,7 +4726,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_resume(struct device *dev)
+static int fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4724,7 +4781,7 @@ failed_clk:
return ret;
}
-static int __maybe_unused fec_runtime_suspend(struct device *dev)
+static int fec_runtime_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4735,7 +4792,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_runtime_resume(struct device *dev)
+static int fec_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4756,20 +4813,20 @@ failed_clk_ipg:
}
static const struct dev_pm_ops fec_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
- SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
};
static struct platform_driver fec_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &fec_pm_ops,
+ .pm = pm_ptr(&fec_pm_ops),
.of_match_table = fec_dt_ids,
.suppress_bind_attrs = true,
},
.id_table = fec_devtype,
.probe = fec_probe,
- .remove_new = fec_drv_remove,
+ .remove = fec_drv_remove,
};
module_platform_driver(fec_driver);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index ebae71ec26c6..3fc29afc9854 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -619,7 +619,7 @@ static void mpc52xx_fec_hw_init(struct net_device *dev)
out_be32(&fec->rfifo_alarm, 0x0000030c);
out_be32(&fec->tfifo_alarm, 0x00000100);
- /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ /* begin transmission when 256 bytes are in FIFO (or EOF or FIFO full) */
out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
/* enable crc generation */
@@ -1040,7 +1040,7 @@ static struct platform_driver mpc52xx_fec_driver = {
.of_match_table = mpc52xx_fec_match,
},
.probe = mpc52xx_fec_probe,
- .remove_new = mpc52xx_fec_remove,
+ .remove = mpc52xx_fec_remove,
#ifdef CONFIG_PM
.suspend = mpc52xx_fec_of_suspend,
.resume = mpc52xx_fec_of_resume,
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 39689826cc8f..3d073f0fae63 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -94,7 +94,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
goto out_free;
}
- snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
bus->priv = priv;
bus->parent = dev;
@@ -144,7 +144,7 @@ struct platform_driver mpc52xx_fec_mdio_driver = {
.of_match_table = mpc52xx_fec_mdio_match,
},
.probe = mpc52xx_fec_mdio_probe,
- .remove_new = mpc52xx_fec_mdio_remove,
+ .remove = mpc52xx_fec_mdio_remove,
};
/* let fec driver call it, since this has to be registered before it */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 181d9bfbee22..4b7bad9a485d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -7,31 +7,30 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/fec.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
#include "fec.h"
@@ -84,18 +83,41 @@
#define FEC_CC_MULT (1 << 31)
#define FEC_COUNTER_PERIOD (1 << 31)
#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
-#define FEC_CHANNLE_0 0
-#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
+#define DEFAULT_PPS_CHANNEL 0
#define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static u64 fec_ptp_read(struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
*
- * This function enble the PPS ouput on the timer channel.
+ * This function enables the PPS output on the timer channel.
*/
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
@@ -104,13 +126,18 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
struct timespec64 ts;
u64 ns;
- if (fep->pps_enable == enable)
- return 0;
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
- fep->pps_channel = DEFAULT_PPS_CHANNEL;
- fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+ if (fep->perout_enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ dev_err(&fep->pdev->dev, "PEROUT is running");
+ return -EBUSY;
+ }
- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ if (fep->pps_enable == enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return 0;
+ }
if (enable) {
/* clear capture or output compare interrupt status if have.
@@ -137,7 +164,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = fep->cc.read(&fep->cc);
+ tempval = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -151,7 +178,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* very close to the second point, which means NSEC_PER_SEC
* - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
* is still running when we calculate the first compare event, it is
- * possible that the remaining nanoseonds run out before the compare
+ * possible that the remaining nanoseconds run out before the compare
* counter is calculated and written into TCCR register. To avoid
* this possibility, we will set the compare event to be the next
* of next second. The current setting is 31-bit timer and wrap
@@ -212,13 +239,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
timecounter_read(&fep->tc);
/* Get the current ptp hardware time counter */
- temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
- temp_val |= FEC_T_CTRL_CAPTURE;
- writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
-
- ptp_hc = readl(fep->hwp + FEC_ATIME);
+ ptp_hc = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
@@ -228,6 +249,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
* the FEC_TCCR register in time and missed the start time.
*/
if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
+ fep->perout_enable = false;
dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return -1;
@@ -273,30 +295,6 @@ static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
}
/**
- * fec_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc: the cyclecounter structure
- *
- * this function reads the cyclecounter registers and is called by the
- * cyclecounter structure used to construct a ns counter from the
- * arbitrary fixed point registers
- */
-static u64 fec_ptp_read(const struct cyclecounter *cc)
-{
- struct fec_enet_private *fep =
- container_of(cc, struct fec_enet_private, cc);
- u32 tempval;
-
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
-
- return readl(fep->hwp + FEC_ATIME);
-}
-
-/**
* fec_ptp_start_cyclecounter - create the cycle counter from hw
* @ndev: network device
*
@@ -506,7 +504,10 @@ static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
{
unsigned long flags;
+ hrtimer_cancel(&fep->perout_timer);
+
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ fep->perout_enable = false;
writel(0, fep->hwp + FEC_TCSR(channel));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
@@ -532,18 +533,21 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
int ret = 0;
if (rq->type == PTP_CLK_REQ_PPS) {
+ fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+
ret = fec_ptp_enable_pps(fep, on);
return ret;
} else if (rq->type == PTP_CLK_REQ_PEROUT) {
+ u32 reload_period;
+
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
- if (rq->perout.index != DEFAULT_PPS_CHANNEL)
+ if (rq->perout.index != fep->pps_channel)
return -EOPNOTSUPP;
- fep->pps_channel = DEFAULT_PPS_CHANNEL;
period.tv_sec = rq->perout.period.sec;
period.tv_nsec = rq->perout.period.nsec;
period_ns = timespec64_to_ns(&period);
@@ -556,12 +560,14 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
- fep->reload_period = div_u64(period_ns, 2);
- if (on && fep->reload_period) {
+ reload_period = div_u64(period_ns, 2);
+ if (on && reload_period) {
+ u64 perout_stime;
+
/* Convert 1588 timestamp to ns*/
start_time.tv_sec = rq->perout.start.sec;
start_time.tv_nsec = rq->perout.start.nsec;
- fep->perout_stime = timespec64_to_ns(&start_time);
+ perout_stime = timespec64_to_ns(&start_time);
mutex_lock(&fep->ptp_clk_mutex);
if (!fep->ptp_clk_on) {
@@ -570,18 +576,41 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ if (fep->pps_enable) {
+ dev_err(&fep->pdev->dev, "PPS is running");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (fep->perout_enable) {
+ dev_err(&fep->pdev->dev,
+ "PEROUT has been enabled\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
/* Read current timestamp */
curr_time = timecounter_read(&fep->tc);
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
- mutex_unlock(&fep->ptp_clk_mutex);
+ if (perout_stime <= curr_time) {
+ dev_err(&fep->pdev->dev,
+ "Start time must be greater than current time\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
/* Calculate time difference */
- delta = fep->perout_stime - curr_time;
+ delta = perout_stime - curr_time;
+ fep->reload_period = reload_period;
+ fep->perout_stime = perout_stime;
+ fep->perout_enable = true;
- if (fep->perout_stime <= curr_time) {
- dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
- return -EINVAL;
- }
+unlock:
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ if (ret)
+ return ret;
/* Because the timer counter of FEC only has 31-bits, correspondingly,
* the time comparison register FEC_TCCR also only low 31 bits can be
@@ -689,8 +718,11 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
fep->next_counter = (fep->next_counter + fep->reload_period) &
fep->cc.mask;
- event.type = PTP_CLOCK_PPS;
- ptp_clock_event(fep->ptp_clock, &event);
+ if (fep->pps_enable) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ }
+
return IRQ_HANDLED;
}
@@ -711,12 +743,16 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct device_node *np = fep->pdev->dev.of_node;
int irq;
int ret;
fep->ptp_caps.owner = THIS_MODULE;
strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel);
+
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
fep->ptp_caps.n_ext_ts = 0;
@@ -742,8 +778,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
- fep->perout_timer.function = fec_ptp_pps_perout_handler;
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
@@ -768,11 +804,64 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
schedule_delayed_work(&fep->time_keep, HZ);
}
+void fec_ptp_save_state(struct fec_enet_private *fep)
+{
+ unsigned long flags;
+ u32 atime_inc_corr;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ fep->ptp_saved_state.pps_enable = fep->pps_enable;
+
+ fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc);
+ fep->ptp_saved_state.ns_sys = ktime_get_ns();
+
+ fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
+ atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
+ fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/* Restore PTP functionality after a reset */
+void fec_ptp_restore_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ unsigned long flags;
+ u32 counter;
+ u64 ns;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* Reset turned it off, so adjust our status flag */
+ fep->pps_enable = 0;
+
+ writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
+ atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
+ writel(atime_inc, fep->hwp + FEC_ATIME_INC);
+
+ ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc;
+ counter = ns & fep->cc.mask;
+ writel(counter, fep->hwp + FEC_ATIME);
+ timecounter_init(&fep->tc, &fep->cc, ns);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ /* Restart PPS if needed */
+ if (fep->ptp_saved_state.pps_enable) {
+ /* Re-enable PPS */
+ fec_ptp_enable_pps(fep, 1);
+ }
+}
+
void fec_ptp_stop(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ if (fep->pps_enable)
+ fec_ptp_enable_pps(fep, 0);
+
cancel_delayed_work_sync(&fep->time_keep);
hrtimer_cancel(&fep->perout_timer);
if (fep->ptp_clock)
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index d96028f01770..11887458f050 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -24,7 +24,6 @@
/* General defines */
#define FMAN_LIODN_TBL 64 /* size of LIODN table */
-#define MAX_NUM_OF_MACS 10
#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
#define BASE_RX_PORTID 0x08
#define BASE_TX_PORTID 0x28
@@ -2691,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
{
struct fman *fman;
struct device_node *fm_node, *muram_node;
+ void __iomem *base_addr;
struct resource *res;
u32 val, range[2];
int err, irq;
struct clk *clk;
u32 clk_rate;
- phys_addr_t phys_base_addr;
- resource_size_t mem_size;
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
@@ -2725,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_node_put;
fman->dts_params.err_irq = err;
- /* Get the FM address */
- res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
- __func__);
- goto fman_node_put;
- }
-
- phys_base_addr = res->start;
- mem_size = resource_size(res);
-
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
@@ -2804,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
}
}
- fman->dts_params.res =
- devm_request_mem_region(&of_dev->dev, phys_base_addr,
- mem_size, "fman");
- if (!fman->dts_params.res) {
- err = -EBUSY;
- dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
- __func__);
- goto fman_free;
- }
-
- fman->dts_params.base_addr =
- devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (!fman->dts_params.base_addr) {
- err = -ENOMEM;
+ base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res);
+ if (IS_ERR(base_addr)) {
+ err = PTR_ERR(base_addr);
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
+ fman->dts_params.base_addr = base_addr;
+ fman->dts_params.res = res;
+
fman->dev = &of_dev->dev;
err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 2ea575a46675..74eb62eba0d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -74,6 +74,9 @@
#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
+/* General defines */
+#define MAX_NUM_OF_MACS 10
+
struct fman; /* FMan data */
/* Enum for defining port types */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 3088da7adf0f..51402dff72c5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -755,12 +755,12 @@ static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
return container_of(pcs, struct fman_mac, pcs);
}
-static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state);
}
static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
@@ -1415,7 +1415,6 @@ int dtsec_initialization(struct mac_device *mac_dev,
mac_dev->set_exception = dtsec_set_exception;
mac_dev->set_allmulti = dtsec_set_allmulti;
mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = fman_set_multi;
mac_dev->enable = dtsec_enable;
mac_dev->disable = dtsec_disable;
@@ -1447,7 +1446,6 @@ int dtsec_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
dtsec->pcs.ops = &dtsec_pcs_ops;
- dtsec->pcs.neg_mode = true;
dtsec->pcs.poll = true;
supported = mac_dev->phylink_config.supported_interfaces;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 758535adc9ff..c84f0336c94c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -267,7 +267,6 @@ struct memac_cfg {
bool reset_on_init;
bool pause_ignore;
bool promiscuous_mode_enable;
- struct fixed_phy_status *fixed_link;
u16 max_frame_length;
u16 pause_quanta;
u32 tx_ipg_length;
@@ -650,6 +649,7 @@ static u32 memac_if_mode(phy_interface_t interface)
return IF_MODE_GMII | IF_MODE_RGMII;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
return IF_MODE_GMII;
case PHY_INTERFACE_MODE_10GBASER:
@@ -668,6 +668,7 @@ static struct phylink_pcs *memac_select_pcs(struct phylink_config *config,
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
return memac->sgmii_pcs;
case PHY_INTERFACE_MODE_QSGMII:
return memac->qsgmii_pcs;
@@ -686,6 +687,7 @@ static int memac_prepare(struct phylink_config *config, unsigned int mode,
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_10GBASER:
return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET,
@@ -898,6 +900,89 @@ static int memac_set_exception(struct fman_mac *memac,
return 0;
}
+static u64 memac_read64(void __iomem *reg)
+{
+ u32 low, high, tmp;
+
+ do {
+ high = ioread32be(reg + 4);
+ low = ioread32be(reg);
+ tmp = ioread32be(reg + 4);
+ } while (high != tmp);
+
+ return ((u64)high << 32) | low;
+}
+
+static void memac_get_pause_stats(struct fman_mac *memac,
+ struct ethtool_pause_stats *s)
+{
+ s->tx_pause_frames = memac_read64(&memac->regs->txpf_l);
+ s->rx_pause_frames = memac_read64(&memac->regs->rxpf_l);
+}
+
+static const struct ethtool_rmon_hist_range memac_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 9600 },
+ {},
+};
+
+static void memac_get_rmon_stats(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = memac_read64(&memac->regs->rund_l);
+ s->oversize_pkts = memac_read64(&memac->regs->rovr_l);
+ s->fragments = memac_read64(&memac->regs->rfrg_l);
+ s->jabbers = memac_read64(&memac->regs->rjbr_l);
+
+ s->hist[0] = memac_read64(&memac->regs->r64_l);
+ s->hist[1] = memac_read64(&memac->regs->r127_l);
+ s->hist[2] = memac_read64(&memac->regs->r255_l);
+ s->hist[3] = memac_read64(&memac->regs->r511_l);
+ s->hist[4] = memac_read64(&memac->regs->r1023_l);
+ s->hist[5] = memac_read64(&memac->regs->r1518_l);
+ s->hist[6] = memac_read64(&memac->regs->r1519x_l);
+
+ s->hist_tx[0] = memac_read64(&memac->regs->t64_l);
+ s->hist_tx[1] = memac_read64(&memac->regs->t127_l);
+ s->hist_tx[2] = memac_read64(&memac->regs->t255_l);
+ s->hist_tx[3] = memac_read64(&memac->regs->t511_l);
+ s->hist_tx[4] = memac_read64(&memac->regs->t1023_l);
+ s->hist_tx[5] = memac_read64(&memac->regs->t1518_l);
+ s->hist_tx[6] = memac_read64(&memac->regs->t1519x_l);
+
+ *ranges = memac_rmon_ranges;
+}
+
+static void memac_get_eth_ctrl_stats(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = memac_read64(&memac->regs->tcnp_l);
+ s->MACControlFramesReceived = memac_read64(&memac->regs->rcnp_l);
+}
+
+static void memac_get_eth_mac_stats(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = memac_read64(&memac->regs->tfrm_l);
+ s->FramesReceivedOK = memac_read64(&memac->regs->rfrm_l);
+ s->FrameCheckSequenceErrors = memac_read64(&memac->regs->rfcs_l);
+ s->AlignmentErrors = memac_read64(&memac->regs->raln_l);
+ s->OctetsTransmittedOK = memac_read64(&memac->regs->teoct_l);
+ s->FramesLostDueToIntMACXmitError = memac_read64(&memac->regs->terr_l);
+ s->OctetsReceivedOK = memac_read64(&memac->regs->reoct_l);
+ s->FramesLostDueToIntMACRcvError = memac_read64(&memac->regs->rdrntp_l);
+ s->MulticastFramesXmittedOK = memac_read64(&memac->regs->tmca_l);
+ s->BroadcastFramesXmittedOK = memac_read64(&memac->regs->tbca_l);
+ s->MulticastFramesReceivedOK = memac_read64(&memac->regs->rmca_l);
+ s->BroadcastFramesReceivedOK = memac_read64(&memac->regs->rbca_l);
+}
+
static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
@@ -1067,7 +1152,6 @@ int memac_initialization(struct mac_device *mac_dev,
struct fman_mac_params *params)
{
int err;
- struct device_node *fixed;
struct phylink_pcs *pcs;
struct fman_mac *memac;
unsigned long capabilities;
@@ -1089,9 +1173,12 @@ int memac_initialization(struct mac_device *mac_dev,
mac_dev->set_exception = memac_set_exception;
mac_dev->set_allmulti = memac_set_allmulti;
mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = fman_set_multi;
mac_dev->enable = memac_enable;
mac_dev->disable = memac_disable;
+ mac_dev->get_pause_stats = memac_get_pause_stats;
+ mac_dev->get_rmon_stats = memac_get_rmon_stats;
+ mac_dev->get_eth_ctrl_stats = memac_get_eth_ctrl_stats;
+ mac_dev->get_eth_mac_stats = memac_get_eth_mac_stats;
mac_dev->fman_mac = memac_config(mac_dev, params);
if (!mac_dev->fman_mac)
@@ -1223,18 +1310,16 @@ int memac_initialization(struct mac_device *mac_dev,
memac->rgmii_no_half_duplex = true;
/* Most boards should use MLO_AN_INBAND, but existing boards don't have
- * a managed property. Default to MLO_AN_INBAND if nothing else is
- * specified. We need to be careful and not enable this if we have a
- * fixed link or if we are using MII or RGMII, since those
- * configurations modes don't use in-band autonegotiation.
+ * a managed property. Default to MLO_AN_INBAND rather than MLO_AN_PHY.
+ * Phylink will allow this to be overriden by a fixed link. We need to
+ * be careful and not enable this if we are using MII or RGMII, since
+ * those configurations modes don't use in-band autonegotiation.
*/
- fixed = of_get_child_by_name(mac_node, "fixed-link");
- if (!fixed && !of_property_read_bool(mac_node, "fixed-link") &&
- !of_property_read_bool(mac_node, "managed") &&
+ if (!of_property_present(mac_node, "managed") &&
+ mac_dev->phy_if != PHY_INTERFACE_MODE_2500BASEX &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
- mac_dev->phylink_config.ovr_an_inband = true;
- of_node_put(fixed);
+ mac_dev->phylink_config.default_an_inband = true;
err = memac_init(mac_dev->fman_mac);
if (err < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index f557d68e5b76..1ed245a2ee01 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -12,7 +12,6 @@
struct muram_info {
struct gen_pool *pool;
void __iomem *vbase;
- size_t size;
phys_addr_t pbase;
};
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 406e75e9e5ea..e977389f7088 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -987,7 +987,7 @@ static int init_low_level_driver(struct fman_port *port)
return -ENODEV;
}
- /* The code bellow is a trick so the FM will not release the buffer
+ /* The code below is a trick so the FM will not release the buffer
* to BM nor will try to enqueue the frame to QM
*/
if (port->port_type == FMAN_PORT_TYPE_TX) {
@@ -1748,7 +1748,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct resource res;
struct resource *dev_res;
u32 val;
- int err = 0, lenp;
+ int err = 0;
enum fman_port_type port_type;
u16 port_speed;
u8 port_id;
@@ -1795,7 +1795,7 @@ static int fman_port_probe(struct platform_device *of_dev)
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
port_type = FMAN_PORT_TYPE_TX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1808,7 +1808,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
port_type = FMAN_PORT_TYPE_RX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index c2261d26db5b..fecfca6eba03 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -771,7 +771,6 @@ int tgec_initialization(struct mac_device *mac_dev,
mac_dev->set_exception = tgec_set_exception;
mac_dev->set_allmulti = tgec_set_allmulti;
mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = fman_set_multi;
mac_dev->enable = tgec_enable;
mac_dev->disable = tgec_disable;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 9767586b4eb3..f27ff625fe29 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -14,8 +14,6 @@
#include <linux/device.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
-#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/libfdt_env.h>
#include <linux/platform_device.h>
@@ -32,8 +30,6 @@ MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
u8 cell_index;
struct fman *fman;
- /* List of multicast addresses */
- struct list_head mc_addr_list;
struct platform_device *eth_dev;
u16 speed;
};
@@ -57,44 +53,6 @@ static void mac_exception(struct mac_device *mac_dev,
__func__, ex);
}
-int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv;
- struct mac_address *old_addr, *tmp;
- struct netdev_hw_addr *ha;
- int err;
- enet_addr_t *addr;
-
- priv = mac_dev->priv;
-
- /* Clear previous address list */
- list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
- addr = (enet_addr_t *)old_addr->addr;
- err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
- if (err < 0)
- return err;
-
- list_del(&old_addr->list);
- kfree(old_addr);
- }
-
- /* Add all the addresses from the new list */
- netdev_for_each_mc_addr(ha, net_dev) {
- addr = (enet_addr_t *)ha->addr;
- err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
- if (err < 0)
- return err;
-
- tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
- if (!tmp)
- return -ENOMEM;
-
- ether_addr_copy(tmp->addr, ha->addr);
- list_add(&tmp->list, &priv->mc_addr_list);
- }
- return 0;
-}
-
static DEFINE_MUTEX(eth_lock);
static struct platform_device *dpaa_eth_add_device(int fman_id,
@@ -181,8 +139,6 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->priv = priv;
mac_dev->dev = dev;
- INIT_LIST_HEAD(&priv->mc_addr_list);
-
/* Get the FM node */
dev_node = of_get_parent(mac_node);
if (!dev_node) {
@@ -197,55 +153,72 @@ static int mac_probe(struct platform_device *_of_dev)
err = -EINVAL;
goto _return_of_node_put;
}
+ mac_dev->fman_dev = &of_dev->dev;
/* Get the FMan cell-index */
err = of_property_read_u32(dev_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", dev_node);
err = -EINVAL;
- goto _return_of_node_put;
+ goto _return_dev_put;
}
/* cell-index 0 => FMan id 1 */
fman_id = (u8)(val + 1);
- priv->fman = fman_bind(&of_dev->dev);
+ priv->fman = fman_bind(mac_dev->fman_dev);
if (!priv->fman) {
dev_err(dev, "fman_bind(%pOF) failed\n", dev_node);
err = -ENODEV;
- goto _return_of_node_put;
+ goto _return_dev_put;
}
+ /* Two references have been taken in of_find_device_by_node()
+ * and fman_bind(). Release one of them here. The second one
+ * will be released in mac_remove().
+ */
+ put_device(mac_dev->fman_dev);
of_node_put(dev_node);
+ dev_node = NULL;
/* Get the address of the memory mapped registers */
mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
if (!mac_dev->res) {
dev_err(dev, "could not get registers\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto _return_dev_put;
}
err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
mac_dev->res);
if (err) {
dev_err_probe(dev, err, "could not request resource\n");
- return err;
+ goto _return_dev_put;
}
mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
resource_size(mac_dev->res));
if (!mac_dev->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
- return -EIO;
+ err = -EIO;
+ goto _return_dev_put;
}
- if (!of_device_is_available(mac_node))
- return -ENODEV;
+ if (!of_device_is_available(mac_node)) {
+ err = -ENODEV;
+ goto _return_dev_put;
+ }
/* Get the cell-index */
err = of_property_read_u32(mac_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
- return -EINVAL;
+ err = -EINVAL;
+ goto _return_dev_put;
+ }
+ if (val >= MAX_NUM_OF_MACS) {
+ dev_err(dev, "cell-index value is too big for %pOF\n", mac_node);
+ err = -EINVAL;
+ goto _return_dev_put;
}
priv->cell_index = (u8)val;
@@ -259,22 +232,26 @@ static int mac_probe(struct platform_device *_of_dev)
if (unlikely(nph < 0)) {
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
- return nph;
+ err = nph;
+ goto _return_dev_put;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
- return -EINVAL;
+ err = -EINVAL;
+ goto _return_dev_put;
}
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ /* PORT_NUM determines the size of the port array */
+ for (i = 0; i < PORT_NUM; i++) {
/* Find the port node */
dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
if (!dev_node) {
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
mac_node);
- return -EINVAL;
+ err = -EINVAL;
+ goto _return_dev_arr_put;
}
of_dev = of_find_device_by_node(dev_node);
@@ -282,17 +259,24 @@ static int mac_probe(struct platform_device *_of_dev)
dev_err(dev, "of_find_device_by_node(%pOF) failed\n",
dev_node);
err = -EINVAL;
- goto _return_of_node_put;
+ goto _return_dev_arr_put;
}
+ mac_dev->fman_port_devs[i] = &of_dev->dev;
- mac_dev->port[i] = fman_port_bind(&of_dev->dev);
+ mac_dev->port[i] = fman_port_bind(mac_dev->fman_port_devs[i]);
if (!mac_dev->port[i]) {
dev_err(dev, "dev_get_drvdata(%pOF) failed\n",
dev_node);
err = -EINVAL;
- goto _return_of_node_put;
+ goto _return_dev_arr_put;
}
+ /* Two references have been taken in of_find_device_by_node()
+ * and fman_port_bind(). Release one of them here. The second
+ * one will be released in mac_remove().
+ */
+ put_device(mac_dev->fman_port_devs[i]);
of_node_put(dev_node);
+ dev_node = NULL;
}
/* Get the PHY connection type */
@@ -312,7 +296,7 @@ static int mac_probe(struct platform_device *_of_dev)
err = init(mac_dev, mac_node, &params);
if (err < 0)
- return err;
+ goto _return_dev_arr_put;
if (!is_zero_ether_addr(mac_dev->addr))
dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
@@ -327,6 +311,12 @@ static int mac_probe(struct platform_device *_of_dev)
return err;
+_return_dev_arr_put:
+ /* mac_dev is kzalloc'ed */
+ for (i = 0; i < PORT_NUM; i++)
+ put_device(mac_dev->fman_port_devs[i]);
+_return_dev_put:
+ put_device(mac_dev->fman_dev);
_return_of_node_put:
of_node_put(dev_node);
return err;
@@ -335,6 +325,11 @@ _return_of_node_put:
static void mac_remove(struct platform_device *pdev)
{
struct mac_device *mac_dev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < PORT_NUM; i++)
+ put_device(mac_dev->fman_port_devs[i]);
+ put_device(mac_dev->fman_dev);
platform_device_unregister(mac_dev->priv->eth_dev);
}
@@ -345,7 +340,7 @@ static struct platform_driver mac_driver = {
.of_match_table = mac_match,
},
.probe = mac_probe,
- .remove_new = mac_remove,
+ .remove = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index fe747915cc73..63c2c5b4f99e 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -16,15 +16,21 @@
#include "fman.h"
#include "fman_mac.h"
+struct ethtool_eth_ctrl_stats;
+struct ethtool_eth_mac_stats;
+struct ethtool_pause_stats;
+struct ethtool_rmon_stats;
+struct ethtool_rmon_hist_range;
struct fman_mac;
struct mac_priv_s;
+#define PORT_NUM 2
struct mac_device {
void __iomem *vaddr;
struct device *dev;
struct resource *res;
u8 addr[ETH_ALEN];
- struct fman_port *port[2];
+ struct fman_port *port[PORT_NUM];
struct phylink *phylink;
struct phylink_config phylink_config;
phy_interface_t phy_if;
@@ -39,19 +45,29 @@ struct mac_device {
int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
- int (*set_multi)(struct net_device *net_dev,
- struct mac_device *mac_dev);
int (*set_exception)(struct fman_mac *mac_dev,
enum fman_mac_exceptions exception, bool enable);
int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*get_pause_stats)(struct fman_mac *memac,
+ struct ethtool_pause_stats *s);
+ void (*get_rmon_stats)(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges);
+ void (*get_eth_ctrl_stats)(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s);
+ void (*get_eth_mac_stats)(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s);
void (*update_speed)(struct mac_device *mac_dev, int speed);
struct fman_mac *fman_mac;
struct mac_priv_s *priv;
+
+ struct device *fman_dev;
+ struct device *fman_port_devs[PORT_NUM];
};
static inline struct mac_device
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
index 7f20840fde07..57013bf14d7c 100644
--- a/drivers/net/ethernet/freescale/fs_enet/Kconfig
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -3,7 +3,7 @@ config FS_ENET
tristate "Freescale Ethernet Driver"
depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
select MII
- select PHYLIB
+ select PHYLINK
config FS_ENET_MPC5121_FEC
def_bool y if (FS_ENET && PPC_MPC512x)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index cf392faa6105..f563692a4a00 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -9,10 +10,6 @@
*
* Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
* and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -29,17 +26,18 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/property.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pgtable.h>
+#include <linux/rtnetlink.h>
#include <linux/vmalloc.h>
#include <asm/irq.h>
@@ -72,6 +70,13 @@ static void fs_set_multicast_list(struct net_device *dev)
(*fep->ops->set_multicast_list)(dev);
}
+static int fs_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_mii_ioctl(fep->phylink, ifr, cmd);
+}
+
static void skb_align(struct sk_buff *skb, int align)
{
int off = ((unsigned long)skb->data) & (align - 1);
@@ -84,15 +89,14 @@ static void skb_align(struct sk_buff *skb, int align)
static int fs_enet_napi(struct napi_struct *napi, int budget)
{
struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
- struct net_device *dev = fep->ndev;
const struct fs_platform_info *fpi = fep->fpi;
- cbd_t __iomem *bdp;
+ struct net_device *dev = fep->ndev;
+ int curidx, dirtyidx, received = 0;
+ int do_wake = 0, do_restart = 0;
+ int tx_left = TX_RING_SIZE;
struct sk_buff *skb, *skbn;
- int received = 0;
+ cbd_t __iomem *bdp;
u16 pkt_len, sc;
- int curidx;
- int dirtyidx, do_wake, do_restart;
- int tx_left = TX_RING_SIZE;
spin_lock(&fep->tx_lock);
bdp = fep->dirty_tx;
@@ -100,7 +104,6 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
/* clear status bits for napi*/
(*fep->ops->napi_clear_event)(dev);
- do_wake = do_restart = 0;
while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
dirtyidx = bdp - fep->tx_bd_base;
@@ -109,12 +112,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
skb = fep->tx_skbuff[dirtyidx];
- /*
- * Check for errors.
- */
+ /* Check for errors. */
if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
-
if (sc & BD_ENET_TX_HB) /* No heartbeat */
dev->stats.tx_heartbeat_errors++;
if (sc & BD_ENET_TX_LC) /* Late collision */
@@ -130,16 +130,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
dev->stats.tx_errors++;
do_restart = 1;
}
- } else
+ } else {
dev->stats.tx_packets++;
+ }
if (sc & BD_ENET_TX_READY) {
dev_warn(fep->dev,
"HEY! Enet xmit interrupt and TX_READY.\n");
}
- /*
- * Deferred means some collisions occurred during transmit,
+ /* Deferred means some collisions occurred during transmit,
* but we eventually sent the packet OK.
*/
if (sc & BD_ENET_TX_DEF)
@@ -153,25 +153,20 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
CBDR_DATLEN(bdp), DMA_TO_DEVICE);
- /*
- * Free the sk buffer associated with this last transmit.
- */
+ /* Free the sk buffer associated with this last transmit. */
if (skb) {
dev_kfree_skb(skb);
fep->tx_skbuff[dirtyidx] = NULL;
}
- /*
- * Update pointer to next buffer descriptor to be transmitted.
+ /* Update pointer to next buffer descriptor to be transmitted.
*/
if ((sc & BD_ENET_TX_WRAP) == 0)
bdp++;
else
bdp = fep->tx_bd_base;
- /*
- * Since we have freed up a buffer, the ring is no longer
- * full.
+ /* Since we have freed up a buffer, the ring is no longer full.
*/
if (++fep->tx_free == MAX_SKB_FRAGS)
do_wake = 1;
@@ -188,8 +183,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (do_wake)
netif_wake_queue(dev);
- /*
- * First, grab all of the stats for the incoming packet.
+ /* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
bdp = fep->cur_rx;
@@ -198,16 +192,13 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
received < budget) {
curidx = bdp - fep->rx_bd_base;
- /*
- * Since we have allocated space to hold a complete frame,
+ /* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
if ((sc & BD_ENET_RX_LAST) == 0)
dev_warn(fep->dev, "rcv is not +last\n");
- /*
- * Check for errors.
- */
+ /* Check for errors. */
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
dev->stats.rx_errors++;
@@ -228,9 +219,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
} else {
skb = fep->rx_skbuff[curidx];
- /*
- * Process the incoming frame.
- */
+ /* Process the incoming frame */
dev->stats.rx_packets++;
pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
dev->stats.rx_bytes += pkt_len + 4;
@@ -238,15 +227,15 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (pkt_len <= fpi->rx_copybreak) {
/* +2 to make IP header L1 cache aligned */
skbn = netdev_alloc_skb(dev, pkt_len + 2);
- if (skbn != NULL) {
+ if (skbn) {
skb_reserve(skbn, 2); /* align IP header */
- skb_copy_from_linear_data(skb,
- skbn->data, pkt_len);
+ skb_copy_from_linear_data(skb, skbn->data,
+ pkt_len);
swap(skb, skbn);
dma_sync_single_for_cpu(fep->dev,
- CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(pkt_len),
- DMA_FROM_DEVICE);
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(pkt_len),
+ DMA_FROM_DEVICE);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
@@ -256,20 +245,18 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
skb_align(skbn, ENET_RX_ALIGN);
- dma_unmap_single(fep->dev,
- CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
- dma = dma_map_single(fep->dev,
- skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ dma = dma_map_single(fep->dev, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
CBDW_BUFADDR(bdp, dma);
}
}
- if (skbn != NULL) {
+ if (skbn) {
skb_put(skb, pkt_len); /* Make room */
skb->protocol = eth_type_trans(skb, dev);
received++;
@@ -284,9 +271,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
CBDW_DATLEN(bdp, 0);
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
- /*
- * Update BD pointer to next entry.
- */
+ /* Update BD pointer to next entry */
if ((sc & BD_ENET_RX_WRAP) == 0)
bdp++;
else
@@ -308,19 +293,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
return budget;
}
-/*
- * The interrupt handler.
+/* The interrupt handler.
* This is called from the MPC core interrupt.
*/
static irqreturn_t
fs_enet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
+ u32 int_events, int_clr_events;
struct fs_enet_private *fep;
- u32 int_events;
- u32 int_clr_events;
- int nr, napi_ok;
- int handled;
+ int nr, napi_ok, handled;
fep = netdev_priv(dev);
@@ -342,12 +324,12 @@ fs_enet_interrupt(int irq, void *dev_id)
(*fep->ops->napi_disable)(dev);
(*fep->ops->clear_int_events)(dev, fep->ev_napi);
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
+ /* NOTE: it is possible for FCCs in NAPI mode
+ * to submit a spurious interrupt while in poll
+ */
if (napi_ok)
__napi_schedule(&fep->napi);
}
-
}
handled = nr > 0;
@@ -357,45 +339,40 @@ fs_enet_interrupt(int irq, void *dev_id)
void fs_init_bds(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- cbd_t __iomem *bdp;
struct sk_buff *skb;
+ cbd_t __iomem *bdp;
int i;
fs_cleanup_bds(dev);
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->dirty_tx = fep->tx_bd_base;
+ fep->cur_tx = fep->tx_bd_base;
fep->tx_free = fep->tx_ring;
fep->cur_rx = fep->rx_bd_base;
- /*
- * Initialize the receive buffer descriptors.
- */
+ /* Initialize the receive buffer descriptors */
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skb == NULL)
+ if (!skb)
break;
skb_align(skb, ENET_RX_ALIGN);
fep->rx_skbuff[i] = skb;
- CBDW_BUFADDR(bdp,
- dma_map_single(fep->dev, skb->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
CBDW_DATLEN(bdp, 0); /* zero */
CBDW_SC(bdp, BD_ENET_RX_EMPTY |
((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
}
- /*
- * if we failed, fillup remainder
- */
+
+ /* if we failed, fillup remainder */
for (; i < fep->rx_ring; i++, bdp++) {
fep->rx_skbuff[i] = NULL;
CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
}
- /*
- * ...and the same for transmit.
- */
+ /* ...and the same for transmit. */
for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
fep->tx_skbuff[i] = NULL;
CBDW_BUFADDR(bdp, 0);
@@ -411,32 +388,30 @@ void fs_cleanup_bds(struct net_device *dev)
cbd_t __iomem *bdp;
int i;
- /*
- * Reset SKB transmit buffers.
- */
+ /* Reset SKB transmit buffers. */
for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
- if ((skb = fep->tx_skbuff[i]) == NULL)
+ skb = fep->tx_skbuff[i];
+ if (!skb)
continue;
/* unmap */
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- skb->len, DMA_TO_DEVICE);
+ skb->len, DMA_TO_DEVICE);
fep->tx_skbuff[i] = NULL;
dev_kfree_skb(skb);
}
- /*
- * Reset SKB receive buffers
- */
+ /* Reset SKB receive buffers */
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
- if ((skb = fep->rx_skbuff[i]) == NULL)
+ skb = fep->rx_skbuff[i];
+ if (!skb)
continue;
/* unmap */
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
fep->rx_skbuff[i] = NULL;
@@ -444,12 +419,8 @@ void fs_cleanup_bds(struct net_device *dev)
}
}
-/**********************************************************************************/
-
#ifdef CONFIG_FS_ENET_MPC5121_FEC
-/*
- * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
- */
+/* MPC5121 FEC requires 4-byte alignment for TX data buffer! */
static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
struct sk_buff *skb)
{
@@ -481,15 +452,12 @@ static netdev_tx_t
fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ int curidx, nr_frags, len;
cbd_t __iomem *bdp;
- int curidx;
- u16 sc;
- int nr_frags;
skb_frag_t *frag;
- int len;
+ u16 sc;
#ifdef CONFIG_FS_ENET_MPC5121_FEC
- int is_aligned = 1;
- int i;
+ int i, is_aligned = 1;
if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
is_aligned = 0;
@@ -507,8 +475,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!is_aligned) {
skb = tx_skb_align_workaround(dev, skb);
if (!skb) {
- /*
- * We have lost packet due to memory allocation error
+ /* We have lost packet due to memory allocation error
* in tx_skb_align_workaround(). Hopefully original
* skb is still valid, so try transmit it later.
*/
@@ -519,9 +486,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&fep->tx_lock);
- /*
- * Fill in a Tx ring entry
- */
+ /* Fill in a Tx ring entry */
bdp = fep->cur_tx;
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -529,8 +494,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
spin_unlock(&fep->tx_lock);
- /*
- * Ooops. All transmit buffers are full. Bail out.
+ /* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since the tx queue should be stopped.
*/
dev_warn(fep->dev, "tx queue full!.\n");
@@ -543,12 +507,12 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += len;
if (nr_frags)
len -= skb->data_len;
+
fep->tx_free -= nr_frags + 1;
- /*
- * Push the data cache so the CPM does not get stale memory data.
+ /* Push the data cache so the CPM does not get stale memory data.
*/
CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
- skb->data, len, DMA_TO_DEVICE));
+ skb->data, len, DMA_TO_DEVICE));
CBDW_DATLEN(bdp, len);
fep->mapped_as_page[curidx] = 0;
@@ -585,9 +549,11 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* note that while FEC does not have this bit
* it marks it as available for software use
- * yay for hw reuse :) */
+ * yay for hw reuse :)
+ */
if (skb->len <= 60)
sc |= BD_ENET_TX_PAD;
+
CBDC_SC(bdp, BD_ENET_TX_STATS);
CBDS_SC(bdp, sc);
@@ -599,6 +565,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
bdp++;
else
bdp = fep->tx_bd_base;
+
fep->cur_tx = bdp;
if (fep->tx_free < MAX_SKB_FRAGS)
@@ -623,15 +590,21 @@ static void fs_timeout_work(struct work_struct *work)
dev->stats.tx_errors++;
- spin_lock_irqsave(&fep->lock, flags);
+ /* In the event a timeout was detected, but the netdev is brought down
+ * shortly after, it no longer makes sense to try to recover from the
+ * timeout. netif_running() will return false when called from the
+ * .ndo_close() callback. Calling the following recovery code while
+ * called from .ndo_close() could deadlock on rtnl.
+ */
+ if (!netif_running(dev))
+ return;
- if (dev->flags & IFF_UP) {
- phy_stop(dev->phydev);
- (*fep->ops->stop)(dev);
- (*fep->ops->restart)(dev);
- }
+ rtnl_lock();
+ phylink_stop(fep->phylink);
+ phylink_start(fep->phylink);
+ rtnl_unlock();
- phy_start(dev->phydev);
+ spin_lock_irqsave(&fep->lock, flags);
wake = fep->tx_free >= MAX_SKB_FRAGS &&
!(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
@@ -647,82 +620,37 @@ static void fs_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&fep->timeout_work);
}
-/*-----------------------------------------------------------------------------
- * generic link-change handler - should be sufficient for most cases
- *-----------------------------------------------------------------------------*/
-static void generic_adjust_link(struct net_device *dev)
+static void fs_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- if (phydev->link) {
- /* adjust to duplex mode */
- if (phydev->duplex != fep->oldduplex) {
- new_state = 1;
- fep->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != fep->oldspeed) {
- new_state = 1;
- fep->oldspeed = phydev->speed;
- }
-
- if (!fep->oldlink) {
- new_state = 1;
- fep->oldlink = 1;
- }
-
- if (new_state)
- fep->ops->restart(dev);
- } else if (fep->oldlink) {
- new_state = 1;
- fep->oldlink = 0;
- fep->oldspeed = 0;
- fep->oldduplex = -1;
- }
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
+ unsigned long flags;
- if (new_state && netif_msg_link(fep))
- phy_print_status(phydev);
+ spin_lock_irqsave(&fep->lock, flags);
+ fep->ops->restart(ndev, interface, speed, duplex);
+ spin_unlock_irqrestore(&fep->lock, flags);
}
-
-static void fs_adjust_link(struct net_device *dev)
+static void fs_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
{
- struct fs_enet_private *fep = netdev_priv(dev);
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
unsigned long flags;
spin_lock_irqsave(&fep->lock, flags);
-
- if(fep->ops->adjust_link)
- fep->ops->adjust_link(dev);
- else
- generic_adjust_link(dev);
-
+ fep->ops->stop(ndev);
spin_unlock_irqrestore(&fep->lock, flags);
}
-static int fs_init_phy(struct net_device *dev)
+static void fs_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev;
- phy_interface_t iface;
-
- fep->oldlink = 0;
- fep->oldspeed = 0;
- fep->oldduplex = -1;
-
- iface = fep->fpi->use_rmii ?
- PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
-
- phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
- iface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
- }
-
- return 0;
+ /* Nothing to do */
}
static int fs_enet_open(struct net_device *dev)
@@ -731,8 +659,9 @@ static int fs_enet_open(struct net_device *dev)
int r;
int err;
- /* to initialize the fep->cur_rx,... */
- /* not doing this, will cause a crash in fs_enet_napi */
+ /* to initialize the fep->cur_rx,...
+ * not doing this, will cause a crash in fs_enet_napi
+ */
fs_init_bds(fep->ndev);
napi_enable(&fep->napi);
@@ -746,13 +675,13 @@ static int fs_enet_open(struct net_device *dev)
return -EINVAL;
}
- err = fs_init_phy(dev);
+ err = phylink_of_phy_connect(fep->phylink, fep->dev->of_node, 0);
if (err) {
free_irq(fep->interrupt, dev);
napi_disable(&fep->napi);
return err;
}
- phy_start(dev->phydev);
+ phylink_start(fep->phylink);
netif_start_queue(dev);
@@ -765,28 +694,25 @@ static int fs_enet_close(struct net_device *dev)
unsigned long flags;
netif_stop_queue(dev);
- netif_carrier_off(dev);
napi_disable(&fep->napi);
- cancel_work_sync(&fep->timeout_work);
- phy_stop(dev->phydev);
+ cancel_work(&fep->timeout_work);
+ phylink_stop(fep->phylink);
spin_lock_irqsave(&fep->lock, flags);
spin_lock(&fep->tx_lock);
(*fep->ops->stop)(dev);
spin_unlock(&fep->tx_lock);
spin_unlock_irqrestore(&fep->lock, flags);
+ phylink_disconnect_phy(fep->phylink);
/* release any irqs */
- phy_disconnect(dev->phydev);
free_irq(fep->interrupt, dev);
return 0;
}
-/*************************************************************************/
-
static void fs_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
@@ -799,7 +725,7 @@ static int fs_get_regs_len(struct net_device *dev)
}
static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *p)
+ void *p)
{
struct fs_enet_private *fep = netdev_priv(dev);
unsigned long flags;
@@ -818,12 +744,14 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
static u32 fs_get_msglevel(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+
return fep->msg_enable;
}
static void fs_set_msglevel(struct net_device *dev, u32 value)
{
struct fs_enet_private *fep = netdev_priv(dev);
+
fep->msg_enable = value;
}
@@ -865,6 +793,22 @@ static int fs_set_tunable(struct net_device *dev,
return ret;
}
+static int fs_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(fep->phylink, cmd);
+}
+
+static int fs_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(fep->phylink, cmd);
+}
+
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
@@ -874,14 +818,12 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_msglevel = fs_set_msglevel,
.get_regs = fs_get_regs,
.get_ts_info = ethtool_op_get_ts_info,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = fs_ethtool_get_link_ksettings,
+ .set_link_ksettings = fs_ethtool_set_link_ksettings,
.get_tunable = fs_get_tunable,
.set_tunable = fs_set_tunable,
};
-/**************************************************************************************/
-
#ifdef CONFIG_FS_ENET_HAS_FEC
#define IS_FEC(ops) ((ops) == &fs_fec_ops)
#else
@@ -894,7 +836,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
- .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = fs_eth_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -902,17 +844,23 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
+static const struct phylink_mac_ops fs_enet_phylink_mac_ops = {
+ .mac_config = fs_mac_config,
+ .mac_link_down = fs_mac_link_down,
+ .mac_link_up = fs_mac_link_up,
+};
+
static int fs_enet_probe(struct platform_device *ofdev)
{
+ int privsize, len, ret = -ENODEV;
+ struct fs_platform_info *fpi;
+ struct fs_enet_private *fep;
+ phy_interface_t phy_mode;
const struct fs_ops *ops;
struct net_device *ndev;
- struct fs_enet_private *fep;
- struct fs_platform_info *fpi;
+ struct phylink *phylink;
const u32 *data;
struct clk *clk;
- int err;
- const char *phy_connection_type;
- int privsize, len, ret = -ENODEV;
ops = device_get_match_data(&ofdev->dev);
if (!ops)
@@ -930,51 +878,36 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->cp_command = *data;
}
+ ret = of_get_phy_mode(ofdev->dev.of_node, &phy_mode);
+ if (ret) {
+ /* For compatibility, if the mode isn't specified in DT,
+ * assume MII
+ */
+ phy_mode = PHY_INTERFACE_MODE_MII;
+ }
+
fpi->rx_ring = RX_RING_SIZE;
fpi->tx_ring = TX_RING_SIZE;
fpi->rx_copybreak = 240;
fpi->napi_weight = 17;
- fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
- if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
- err = of_phy_register_fixed_link(ofdev->dev.of_node);
- if (err)
- goto out_free_fpi;
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- fpi->phy_node = of_node_get(ofdev->dev.of_node);
- }
-
- if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
- phy_connection_type = of_get_property(ofdev->dev.of_node,
- "phy-connection-type", NULL);
- if (phy_connection_type && !strcmp("rmii", phy_connection_type))
- fpi->use_rmii = 1;
- }
/* make clock lookup non-fatal (the driver is shared among platforms),
* but require enable to succeed when a clock was specified/found,
* keep a reference to the clock upon successful acquisition
*/
- clk = devm_clk_get(&ofdev->dev, "per");
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- goto out_deregister_fixed_link;
-
- fpi->clk_per = clk;
- }
+ clk = devm_clk_get_optional_enabled(&ofdev->dev, "per");
+ if (IS_ERR(clk))
+ goto out_free_fpi;
privsize = sizeof(*fep) +
- sizeof(struct sk_buff **) *
+ sizeof(struct sk_buff **) *
(fpi->rx_ring + fpi->tx_ring) +
sizeof(char) * fpi->tx_ring;
ndev = alloc_etherdev(privsize);
if (!ndev) {
ret = -ENOMEM;
- goto out_put;
+ goto out_free_fpi;
}
SET_NETDEV_DEV(ndev, &ofdev->dev);
@@ -986,9 +919,29 @@ static int fs_enet_probe(struct platform_device *ofdev)
fep->fpi = fpi;
fep->ops = ops;
+ fep->phylink_config.dev = &ndev->dev;
+ fep->phylink_config.type = PHYLINK_NETDEV;
+ fep->phylink_config.mac_capabilities = MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ fep->phylink_config.supported_interfaces);
+
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec"))
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ fep->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&fep->phylink_config, dev_fwnode(fep->dev),
+ phy_mode, &fs_enet_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ ret = PTR_ERR(phylink);
+ goto out_free_dev;
+ }
+
+ fep->phylink = phylink;
+
ret = fep->ops->setup_data(ndev);
if (ret)
- goto out_free_dev;
+ goto out_phylink;
fep->rx_skbuff = (struct sk_buff **)&fep[1];
fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
@@ -1018,8 +971,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->ethtool_ops = &fs_ethtool_ops;
- netif_carrier_off(ndev);
-
ndev->features |= NETIF_F_SG;
ret = register_netdev(ndev);
@@ -1034,14 +985,10 @@ out_free_bd:
fep->ops->free_bd(ndev);
out_cleanup_data:
fep->ops->cleanup_data(ndev);
+out_phylink:
+ phylink_destroy(fep->phylink);
out_free_dev:
free_netdev(ndev);
-out_put:
- clk_disable_unprepare(fpi->clk_per);
-out_deregister_fixed_link:
- of_node_put(fpi->phy_node);
- if (of_phy_is_fixed_link(ofdev->dev.of_node))
- of_phy_deregister_fixed_link(ofdev->dev.of_node);
out_free_fpi:
kfree(fpi);
return ret;
@@ -1057,10 +1004,7 @@ static void fs_enet_remove(struct platform_device *ofdev)
fep->ops->free_bd(ndev);
fep->ops->cleanup_data(ndev);
dev_set_drvdata(fep->dev, NULL);
- of_node_put(fep->fpi->phy_node);
- clk_disable_unprepare(fep->fpi->clk_per);
- if (of_phy_is_fixed_link(ofdev->dev.of_node))
- of_phy_deregister_fixed_link(ofdev->dev.of_node);
+ phylink_destroy(fep->phylink);
free_netdev(ndev);
}
@@ -1108,15 +1052,15 @@ static struct platform_driver fs_enet_driver = {
.of_match_table = fs_enet_match,
},
.probe = fs_enet_probe,
- .remove_new = fs_enet_remove,
+ .remove = fs_enet_remove,
};
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev)
{
- disable_irq(dev->irq);
- fs_enet_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
+ disable_irq(dev->irq);
+ fs_enet_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
}
#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 21c07ac05225..36e4fcc29e36 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -3,11 +3,11 @@
#define FS_ENET_H
#include <linux/clk.h>
-#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_CPM1
@@ -77,8 +77,8 @@ struct fs_ops {
void (*free_bd)(struct net_device *dev);
void (*cleanup_data)(struct net_device *dev);
void (*set_multicast_list)(struct net_device *dev);
- void (*adjust_link)(struct net_device *dev);
- void (*restart)(struct net_device *dev);
+ void (*restart)(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex);
void (*stop)(struct net_device *dev);
void (*napi_clear_event)(struct net_device *dev);
void (*napi_enable)(struct net_device *dev);
@@ -93,14 +93,6 @@ struct fs_ops {
void (*tx_restart)(struct net_device *dev);
};
-struct phy_info {
- unsigned int id;
- const char *name;
- void (*startup) (struct net_device * dev);
- void (*shutdown) (struct net_device * dev);
- void (*ack_int) (struct net_device * dev);
-};
-
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
@@ -122,15 +114,9 @@ struct fs_platform_info {
u32 dpram_offset;
- struct device_node *phy_node;
-
int rx_ring, tx_ring; /* number of buffers on rx */
int rx_copybreak; /* limit we copy small frames */
int napi_weight; /* NAPI weight */
-
- int use_rmii; /* use RMII mode */
-
- struct clk *clk_per; /* 'per' clock for register access */
};
struct fs_enet_private {
@@ -154,14 +140,11 @@ struct fs_enet_private {
cbd_t __iomem *cur_rx;
cbd_t __iomem *cur_tx;
int tx_free;
- const struct phy_info *phy;
u32 msg_enable;
- struct mii_if_info mii_if;
- unsigned int last_mii_status;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
int interrupt;
- int oldduplex, oldspeed, oldlink; /* current settings */
-
/* event masks */
u32 ev_napi; /* mask of NAPI events */
u32 ev; /* event mask */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index e2ffac9eb2ad..be63293511d9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* FCC driver for Motorola MPC82xx (PQ2).
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -25,7 +22,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -239,7 +235,8 @@ static void set_multicast_list(struct net_device *dev)
set_promiscuous_mode(dev);
}
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
@@ -363,8 +360,8 @@ static void restart(struct net_device *dev)
fs_init_bds(dev);
/* adjust to speed (for RMII mode) */
- if (fpi->use_rmii) {
- if (dev->phydev->speed == 100)
+ if (interface == PHY_INTERFACE_MODE_RMII) {
+ if (speed == SPEED_100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
@@ -386,11 +383,11 @@ static void restart(struct net_device *dev)
W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
- if (fpi->use_rmii)
+ if (interface == PHY_INTERFACE_MODE_RMII)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
- if (dev->phydev->duplex)
+ if (duplex == DUPLEX_FULL)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index cdc89d83cf07..f2ecd20027cf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Freescale Ethernet controllers
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -26,7 +23,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -224,7 +220,8 @@ static void set_multicast_list(struct net_device *dev)
set_promiscuous_mode(dev);
}
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
@@ -306,13 +303,13 @@ static void restart(struct net_device *dev)
* Only set MII/RMII mode - do not touch maximum frame length
* configured before.
*/
- FS(fecp, r_cntrl, fpi->use_rmii ?
- FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
+ FS(fecp, r_cntrl, interface == PHY_INTERFACE_MODE_RMII ?
+ FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
#endif
/*
* adjust to duplex mode
*/
- if (dev->phydev->duplex) {
+ if (duplex == DUPLEX_FULL) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index a64cb6270515..6c97191649de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -25,7 +22,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -131,15 +127,14 @@ static int setup_data(struct net_device *dev)
static int allocate_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
+ struct fs_platform_info *fpi = fep->fpi;
- fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
- sizeof(cbd_t), 8);
- if (IS_ERR_VALUE(fep->ring_mem_addr))
+ fpi->dpram_offset = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
+ if (IS_ERR_VALUE(fpi->dpram_offset))
return -ENOMEM;
- fep->ring_base = (void __iomem __force*)
- cpm_muram_addr(fep->ring_mem_addr);
+ fep->ring_base = cpm_muram_addr(fpi->dpram_offset);
return 0;
}
@@ -147,9 +142,10 @@ static int allocate_bd(struct net_device *dev)
static void free_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
if (fep->ring_base)
- cpm_muram_free(fep->ring_mem_addr);
+ cpm_muram_free(fpi->dpram_offset);
}
static void cleanup_data(struct net_device *dev)
@@ -230,7 +226,8 @@ static void set_multicast_list(struct net_device *dev)
* change. This only happens when switching between half and full
* duplex.
*/
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
@@ -247,9 +244,9 @@ static void restart(struct net_device *dev)
__fs_out8((u8 __iomem *)ep + i, 0);
/* point to bds */
- W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
+ W16(ep, sen_genscc.scc_rbase, fpi->dpram_offset);
W16(ep, sen_genscc.scc_tbase,
- fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
+ fpi->dpram_offset + sizeof(cbd_t) * fpi->rx_ring);
/* Initialize function code registers for big-endian.
*/
@@ -341,7 +338,7 @@ static void restart(struct net_device *dev)
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
- if (dev->phydev->duplex)
+ if (duplex == DUPLEX_FULL)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
/* Restore multicast and promiscuous settings */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index f965a2329055..66038e2a4ae3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -126,7 +123,7 @@ static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
* we get is an int, and the odds of multiple bitbang mdio buses
* is low enough that it's not worth going too crazy.
*/
- snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
data = of_get_property(np, "fsl,mdio-pin", &len);
if (!data || len != 4)
@@ -217,7 +214,7 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
.of_match_table = fs_enet_mdio_bb_match,
},
.probe = fs_enet_mdio_probe,
- .remove_new = fs_enet_mdio_remove,
+ .remove = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 7bb69727952a..dec31b638941 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -215,7 +212,7 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
.of_match_table = fs_enet_mdio_fec_match,
},
.probe = fs_enet_mdio_probe,
- .remove_new = fs_enet_mdio_remove,
+ .remove = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 026f7270a54d..de88776dd2a2 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -479,10 +479,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
"missing 'reg' property in node %pOF\n",
tbi);
err = -EBUSY;
+ of_node_put(tbi);
goto error;
}
set_tbipa(*prop, pdev,
data->get_tbipa, priv->map, &res);
+ of_node_put(tbi);
}
}
@@ -491,8 +493,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
err = of_mdiobus_register(new_bus, np);
if (err) {
- dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
- new_bus->name);
+ dev_err_probe(&pdev->dev, err, "cannot register %s as MDIO bus\n",
+ new_bus->name);
goto error;
}
@@ -526,7 +528,7 @@ static struct platform_driver fsl_pq_mdio_driver = {
.of_match_table = fsl_pq_mdio_match,
},
.probe = fsl_pq_mdio_probe,
- .remove_new = fsl_pq_mdio_remove,
+ .remove = fsl_pq_mdio_remove,
};
module_platform_driver(fsl_pq_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a811238c018d..7c0f049f0938 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -97,6 +97,7 @@
#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/property.h>
#include "gianfar.h"
@@ -571,18 +572,6 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
-static int gfar_of_group_count(struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_available_child_of_node(np, child)
- if (of_node_name_eq(child, "queue-group"))
- num++;
-
- return num;
-}
-
/* Reads the controller's registers to determine what interface
* connects it to the PHY.
*/
@@ -654,8 +643,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_rx_qs = 1;
} else { /* MQ_MG_MODE */
/* get the actual number of supported groups */
- unsigned int num_grps = gfar_of_group_count(np);
+ unsigned int num_grps;
+ num_grps = device_get_named_child_node_count(&ofdev->dev,
+ "queue-group");
if (num_grps == 0 || num_grps > MAXGROUPS) {
dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
num_grps);
@@ -754,6 +745,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
err = of_get_ethdev_address(np, dev);
+ if (err == -EPROBE_DEFER)
+ goto err_grp_init;
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
@@ -1645,20 +1638,11 @@ static void gfar_configure_serdes(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
struct ethtool_keee edata;
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
-
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1673,9 +1657,8 @@ static int init_phy(struct net_device *dev)
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
- /* Remove any features not supported by the controller */
- linkmode_and(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT))
+ phy_set_max_speed(phydev, SPEED_100);
/* Add support for flow control */
phy_support_asym_pause(phydev);
@@ -2026,7 +2009,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
stop_gfar(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (dev->flags & IFF_UP)
startup_gfar(dev);
@@ -2069,15 +2052,13 @@ static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&priv->reset_task);
}
-static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
break;
@@ -2090,7 +2071,7 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
priv->hwts_rx_en = 0;
@@ -2104,44 +2085,23 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
priv->hwts_rx_en = 1;
reset_gfar(netdev);
}
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- config.flags = 0;
- config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->hwts_rx_en ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
+ config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
-static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_set(dev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return gfar_hwtstamp_get(dev, rq);
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
+ return 0;
}
/* Interrupt Handler for Transmit complete */
@@ -2205,8 +2165,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(do_tstamp)) {
struct skb_shared_hwtstamps shhwtstamps;
- u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
- ~0x7UL);
+ __be64 *ns;
+
+ ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
@@ -2469,7 +2430,7 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
/* Get receive timestamp from the skb */
if (priv->hwts_rx_en) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- u64 *ns = (u64 *) skb->data;
+ __be64 *ns = (__be64 *)skb->data;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
@@ -3181,7 +3142,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
- .ndo_eth_ioctl = gfar_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
@@ -3189,6 +3150,8 @@ static const struct net_device_ops gfar_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
#endif
+ .ndo_hwtstamp_get = gfar_hwtstamp_get,
+ .ndo_hwtstamp_set = gfar_hwtstamp_set,
};
/* Set up the ethernet device structure, private data,
@@ -3640,7 +3603,7 @@ static struct platform_driver gfar_driver = {
.of_match_table = gfar_match,
},
.probe = gfar_probe,
- .remove_new = gfar_remove,
+ .remove = gfar_remove,
};
module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 7a15b9245698..6fa752d3b60d 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -115,12 +115,14 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
{
struct gfar_private *priv = netdev_priv(dev);
+ int i;
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
- memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < GFAR_STATS_LEN; i++)
+ ethtool_puts(&buf, stat_gstrings[i]);
else
- memcpy(buf, stat_gstrings,
- GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ ethtool_puts(&buf, stat_gstrings[i]);
}
/* Fill in an array of 64-bit statistics from various sources.
@@ -779,14 +781,26 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd)
+static int gfar_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret;
+
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ ret = 0;
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
}
static int gfar_check_filer_hardware(struct gfar_private *priv)
@@ -1396,9 +1410,6 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
mutex_lock(&priv->rx_queue_access);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = gfar_set_hash_opts(priv, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
cmd->fs.ring_cookie >= priv->num_rx_queues) ||
@@ -1420,6 +1431,13 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static u32 gfar_get_rx_ring_count(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ return priv->num_rx_queues;
+}
+
static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1427,9 +1445,6 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->num_rx_queues;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = priv->rx_list.count;
break;
@@ -1448,19 +1463,15 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
}
static int gfar_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct gfar_private *priv = netdev_priv(dev);
struct platform_device *ptp_dev;
struct device_node *ptp_node;
struct ptp_qoriq *ptp = NULL;
- info->phc_index = -1;
-
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
@@ -1468,8 +1479,10 @@ static int gfar_get_ts_info(struct net_device *dev,
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
of_node_put(ptp_node);
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
}
if (ptp)
@@ -1478,9 +1491,7 @@ static int gfar_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
@@ -1512,6 +1523,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
#endif
.set_rxnfc = gfar_set_nfc,
.get_rxnfc = gfar_get_nfc,
+ .get_rx_ring_count = gfar_get_rx_ring_count,
+ .set_rxfh_fields = gfar_set_rxfh_fields,
.get_ts_info = gfar_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ab421243a419..affd5a6c44e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -34,6 +34,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
@@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = {
.transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
- .prel = 7,
.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
.minFrameLength = 64,
.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
@@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
return 0;
}
-static int init_check_frame_length_mode(int length_check,
- u32 __iomem *maccfg2_register)
-{
- u32 value = 0;
-
- value = in_be32(maccfg2_register);
-
- if (length_check)
- value |= MACCFG2_LC;
- else
- value &= ~MACCFG2_LC;
-
- out_be32(maccfg2_register, value);
- return 0;
-}
-
-static int init_preamble_length(u8 preamble_length,
- u32 __iomem *maccfg2_register)
-{
- if ((preamble_length < 3) || (preamble_length > 7))
- return -EINVAL;
-
- clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
- preamble_length << MACCFG2_PREL_SHIFT);
-
- return 0;
-}
-
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
int promiscuous, u32 __iomem *upsmr_register)
@@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length,
return 0;
}
-static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+static bool phy_interface_mode_is_reduced(phy_interface_t interface)
{
- struct ucc_geth_info *ug_info;
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- int ret_val;
- u32 upsmr, maccfg2;
- u16 value;
-
- ugeth_vdbg("%s: IN", __func__);
-
- ug_info = ugeth->ug_info;
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- /* Set MACCFG2 */
- maccfg2 = in_be32(&ug_regs->maccfg2);
- maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
- if ((ugeth->max_speed == SPEED_10) ||
- (ugeth->max_speed == SPEED_100))
- maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
- else if (ugeth->max_speed == SPEED_1000)
- maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
- maccfg2 |= ug_info->padAndCrc;
- out_be32(&ug_regs->maccfg2, maccfg2);
-
- /* Set UPSMR */
- upsmr = in_be32(&uf_regs->upsmr);
- upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
- UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
- upsmr |= UCC_GETH_UPSMR_RPM;
- switch (ugeth->max_speed) {
- case SPEED_10:
- upsmr |= UCC_GETH_UPSMR_R10M;
- fallthrough;
- case SPEED_100:
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
- upsmr |= UCC_GETH_UPSMR_RMM;
- }
- }
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- upsmr |= UCC_GETH_UPSMR_TBIM;
- }
- if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
- upsmr |= UCC_GETH_UPSMR_SGMM;
-
- out_be32(&uf_regs->upsmr, upsmr);
-
- /* Disable autonegotiation in tbi mode, because by default it
- comes up in autonegotiation mode. */
- /* Note that this depends on proper setting in utbipar register. */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
- struct phy_device *tbiphy;
-
- if (!ug_info->tbi_node)
- pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
-
- tbiphy = of_phy_find_device(ug_info->tbi_node);
- if (!tbiphy)
- pr_warn("Could not get TBI device\n");
-
- value = phy_read(tbiphy, ENET_TBI_MII_CR);
- value &= ~0x1000; /* Turn off autonegotiation */
- phy_write(tbiphy, ENET_TBI_MII_CR, value);
-
- put_device(&tbiphy->mdio.dev);
- }
-
- init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
-
- ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
- if (ret_val != 0) {
- if (netif_msg_probe(ugeth))
- pr_err("Preamble length must be between 3 and 7 inclusive\n");
- return ret_val;
- }
-
- return 0;
+ return phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_RTBI;
}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
@@ -1545,108 +1434,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
/* allow to xmit again */
netif_tx_wake_all_queues(ugeth->ndev);
- __netdev_watchdog_up(ugeth->ndev);
-}
-
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the ugeth structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-
-static void adjust_link(struct net_device *dev)
-{
- struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- struct phy_device *phydev = ugeth->phydev;
- int new_state = 0;
-
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- if (phydev->link) {
- u32 tempval = in_be32(&ug_regs->maccfg2);
- u32 upsmr = in_be32(&uf_regs->upsmr);
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != ugeth->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- tempval &= ~(MACCFG2_FDX);
- else
- tempval |= MACCFG2_FDX;
- ugeth->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != ugeth->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case SPEED_1000:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_BYTE);
- break;
- case SPEED_100:
- case SPEED_10:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_NIBBLE);
- /* if reduced mode, re-set UPSMR.R10M */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (phydev->speed == SPEED_10)
- upsmr |= UCC_GETH_UPSMR_R10M;
- else
- upsmr &= ~UCC_GETH_UPSMR_R10M;
- }
- break;
- default:
- if (netif_msg_link(ugeth))
- pr_warn(
- "%s: Ack! Speed (%d) is not 10/100/1000!",
- dev->name, phydev->speed);
- break;
- }
- ugeth->oldspeed = phydev->speed;
- }
-
- if (!ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 1;
- }
-
- if (new_state) {
- /*
- * To change the MAC configuration we need to disable
- * the controller. To do so, we have to either grab
- * ugeth->lock, which is a bad idea since 'graceful
- * stop' commands might take quite a while, or we can
- * quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
- }
- } else if (ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(ugeth))
- phy_print_status(phydev);
+ netdev_watchdog_up(ugeth->ndev);
}
/* Initialize TBI PHY interface for communicating with the
@@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev)
struct phy_device *tbiphy;
if (!ug_info->tbi_node) {
- dev_warn(&dev->dev, "SGMII mode requires that the device "
- "tree specify a tbi-handle\n");
+ dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n");
return;
}
@@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev)
put_device(&tbiphy->mdio.dev);
}
-/* Configure the PHY for dev.
- * returns 0 if success. -1 if failure
- */
-static int init_phy(struct net_device *dev)
+static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct ucc_geth_private *priv = netdev_priv(dev);
- struct ucc_geth_info *ug_info = priv->ug_info;
- struct phy_device *phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs;
+ u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2);
+ u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr);
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
+ old_maccfg2 = maccfg2;
+ old_upsmr = upsmr;
- phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
- priv->phy_interface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
+ /* No length check */
+ maccfg2 &= ~MACCFG2_LC;
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+
+ if (speed == SPEED_10 || speed == SPEED_100)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+
+ maccfg2 |= ug_info->padAndCrc;
+
+ if (phy_interface_mode_is_reduced(interface)) {
+
+ if (interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+
+ switch (speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ fallthrough;
+ case SPEED_100:
+ if (interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
- uec_configure_serdes(dev);
+ if (interface == PHY_INTERFACE_MODE_TBI ||
+ interface == PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_TBIM;
- phy_set_max_speed(phydev, priv->max_speed);
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ upsmr |= UCC_GETH_UPSMR_SGMM;
- priv->phydev = phydev;
+ if (duplex == DUPLEX_HALF)
+ maccfg2 &= ~(MACCFG2_FDX);
+ else
+ maccfg2 |= MACCFG2_FDX;
- return 0;
+ if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, maccfg2);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(ndev);
+
+ if (!phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 0;
+ ug_info->receiveFlowControl = rx_pause;
+ ug_info->transmitFlowControl = tx_pause;
+
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ u16 value;
+
+ if (state->interface == PHY_INTERFACE_MODE_TBI ||
+ state->interface == PHY_INTERFACE_MODE_RTBI) {
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ pr_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->mdio.dev);
+ }
+
+ if (phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 1;
+
+ init_flow_control_params(ug_info->aufc, 1, 1,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
}
static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
@@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
- struct phy_device *phydev = ugeth->phydev;
ugeth_vdbg("%s: IN", __func__);
@@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
* Must be done before disabling the controller
* or deadlock may happen.
*/
- phy_stop(phydev);
+ phylink_stop(ugeth->phylink);
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
goto err;
}
- err = adjust_enet_interface(ugeth);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
- goto err;
- }
-
/* Set MACSTNADDR1, MACSTNADDR2 */
/* For more details see the hardware spec. */
init_mac_station_addr_regs(dev->dev_addr[0],
@@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
- err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
- goto err;
- }
-
return 0;
err:
ucc_geth_stop(ugeth);
@@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev)
return -EINVAL;
}
- err = init_phy(dev);
+ err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0);
if (err) {
- netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
- return err;
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
}
err = ucc_geth_init_mac(ugeth);
@@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev)
goto err;
}
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
napi_enable(&ugeth->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
- qe_alive_during_sleep() || ugeth->phydev->irq);
+ qe_alive_during_sleep() || dev->phydev->irq);
device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
return err;
@@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev)
cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
+ phylink_disconnect_phy(ugeth->phylink);
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
ucc_geth_stop(ugeth);
ucc_geth_init_mac(ugeth);
/* Must start PHY here */
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
netif_tx_start_all_queues(dev);
}
@@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ bool mac_wol = false;
if (!netif_running(ndev))
return 0;
@@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
*/
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- if (ugeth->wol_en & WAKE_MAGIC) {
+ if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) {
setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
- } else if (!(ugeth->wol_en & WAKE_PHY)) {
- phy_stop(ugeth->phydev);
+ mac_wol = true;
}
+ rtnl_lock();
+ phylink_suspend(ugeth->phylink, mac_wol);
+ rtnl_unlock();
+
return 0;
}
@@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev)
}
}
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
-
- phy_stop(ugeth->phydev);
- phy_start(ugeth->phydev);
+ rtnl_lock();
+ phylink_resume(ugeth->phylink);
+ rtnl_unlock();
napi_enable(&ugeth->napi);
netif_device_attach(ndev);
@@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev)
#define ucc_geth_resume NULL
#endif
-static phy_interface_t to_phy_interface(const char *phy_connection_type)
-{
- if (strcasecmp(phy_connection_type, "mii") == 0)
- return PHY_INTERFACE_MODE_MII;
- if (strcasecmp(phy_connection_type, "gmii") == 0)
- return PHY_INTERFACE_MODE_GMII;
- if (strcasecmp(phy_connection_type, "tbi") == 0)
- return PHY_INTERFACE_MODE_TBI;
- if (strcasecmp(phy_connection_type, "rmii") == 0)
- return PHY_INTERFACE_MODE_RMII;
- if (strcasecmp(phy_connection_type, "rgmii") == 0)
- return PHY_INTERFACE_MODE_RGMII;
- if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
- return PHY_INTERFACE_MODE_RGMII_ID;
- if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
- return PHY_INTERFACE_MODE_RGMII_TXID;
- if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
- return PHY_INTERFACE_MODE_RGMII_RXID;
- if (strcasecmp(phy_connection_type, "rtbi") == 0)
- return PHY_INTERFACE_MODE_RTBI;
- if (strcasecmp(phy_connection_type, "sgmii") == 0)
- return PHY_INTERFACE_MODE_SGMII;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (!ugeth->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+ return phylink_mii_ioctl(ugeth->phylink, rq, cmd);
}
static const struct net_device_ops ucc_geth_netdev_ops = {
@@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
@@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
+static const struct phylink_mac_ops ugeth_mac_ops = {
+ .mac_link_up = ugeth_mac_link_up,
+ .mac_link_down = ugeth_mac_link_down,
+ .mac_config = ugeth_mac_config,
+};
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
+ struct device_node *phy_node;
+ struct phylink *phylink;
struct resource res;
- int err, ucc_num, max_speed = 0;
+ int err, ucc_num;
const unsigned int *prop;
phy_interface_t phy_interface;
- static const int enet_to_speed[] = {
- SPEED_10, SPEED_10, SPEED_10,
- SPEED_100, SPEED_100, SPEED_100,
- SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
- };
- static const phy_interface_t enet_to_phy_interface[] = {
- PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
- PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
- PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
- PHY_INTERFACE_MODE_SGMII,
- };
ugeth_vdbg("%s: IN", __func__);
@@ -3591,77 +3441,56 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
- ug_info = kmemdup(&ugeth_primary_info, sizeof(*ug_info), GFP_KERNEL);
- if (ug_info == NULL)
+ ug_info = devm_kmemdup(&ofdev->dev, &ugeth_primary_info,
+ sizeof(*ug_info), GFP_KERNEL);
+ if (!ug_info)
return -ENOMEM;
ug_info->uf_info.ucc_num = ucc_num;
err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
if (err)
- goto err_free_info;
+ return err;
err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
if (err)
- goto err_free_info;
+ return err;
err = of_address_to_resource(np, 0, &res);
if (err)
- goto err_free_info;
+ return err;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
- ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
- /*
- * In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- err = of_phy_register_fixed_link(np);
- if (err)
- goto err_free_info;
- ug_info->phy_node = of_node_get(np);
- }
-
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
- /* get the phy interface type, or default to MII */
- prop = of_get_property(np, "phy-connection-type", NULL);
- if (!prop) {
- /* handle interface property present in old trees */
- prop = of_get_property(ug_info->phy_node, "interface", NULL);
- if (prop != NULL) {
- phy_interface = enet_to_phy_interface[*prop];
- max_speed = enet_to_speed[*prop];
- } else
- phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
- phy_interface = to_phy_interface((const char *)prop);
- }
-
- /* get speed, or derive from PHY interface */
- if (max_speed == 0)
- switch (phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_SGMII:
- max_speed = SPEED_1000;
- break;
- default:
- max_speed = SPEED_100;
- break;
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ prop = of_get_property(phy_node, "interface", NULL);
+ if (prop) {
+ dev_err(&ofdev->dev,
+ "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead.");
+ of_node_put(phy_node);
+ err = -EINVAL;
+ goto err_put_tbi;
}
+ of_node_put(phy_node);
+ }
+
+ err = of_get_phy_mode(np, &phy_interface);
+ if (err) {
+ dev_err(&ofdev->dev, "Invalid phy-connection-type");
+ goto err_put_tbi;
+ }
- if (max_speed == SPEED_1000) {
+ if (phy_interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(phy_interface) ||
+ phy_interface == PHY_INTERFACE_MODE_TBI ||
+ phy_interface == PHY_INTERFACE_MODE_RTBI ||
+ phy_interface == PHY_INTERFACE_MODE_SGMII) {
unsigned int snums = qe_get_num_of_snums();
- /* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
@@ -3687,11 +3516,10 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.irq);
/* Create an ethernet device instance */
- dev = alloc_etherdev(sizeof(*ugeth));
-
- if (dev == NULL) {
+ dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth));
+ if (!dev) {
err = -ENOMEM;
- goto err_deregister_fixed_link;
+ goto err_put_tbi;
}
ugeth = netdev_priv(dev);
@@ -3718,21 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
- ugeth->phy_interface = phy_interface;
- ugeth->max_speed = max_speed;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ ugeth->phylink_config.dev = &dev->dev;
+ ugeth->phylink_config.type = PHYLINK_NETDEV;
+
+ ugeth->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ugeth->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces);
- err = register_netdev(dev);
+ if (ug_info->tbi_node) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TBI,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RTBI,
+ ugeth->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev),
+ phy_interface, &ugeth_mac_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_tbi;
+ }
+
+ ugeth->phylink = phylink;
+
+ err = devm_register_netdev(&ofdev->dev, dev);
if (err) {
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
- goto err_free_netdev;
+ goto err_destroy_phylink;
}
- of_get_ethdev_address(np, dev);
+ err = of_get_ethdev_address(np, dev);
+ if (err == -EPROBE_DEFER)
+ goto err_destroy_phylink;
ugeth->ug_info = ug_info;
ugeth->dev = device;
@@ -3741,15 +3598,10 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return 0;
-err_free_netdev:
- free_netdev(dev);
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+err_destroy_phylink:
+ phylink_destroy(phylink);
+err_put_tbi:
of_node_put(ug_info->tbi_node);
- of_node_put(ug_info->phy_node);
-err_free_info:
- kfree(ug_info);
return err;
}
@@ -3758,16 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
- unregister_netdev(dev);
ucc_geth_memclean(ugeth);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+ phylink_destroy(ugeth->phylink);
of_node_put(ugeth->ug_info->tbi_node);
- of_node_put(ugeth->ug_info->phy_node);
- kfree(ugeth->ug_info);
- free_netdev(dev);
}
static const struct of_device_id ucc_geth_match[] = {
@@ -3786,7 +3632,7 @@ static struct platform_driver ucc_geth_driver = {
.of_match_table = ucc_geth_match,
},
.probe = ucc_geth_probe,
- .remove_new = ucc_geth_remove,
+ .remove = ucc_geth_remove,
.suspend = ucc_geth_suspend,
.resume = ucc_geth_resume,
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 4294ed096ebb..84f92f6384e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/phylink.h>
#include <linux/if_ether.h>
#include <soc/fsl/qe/immap_qe.h>
@@ -889,8 +890,6 @@ struct ucc_geth_hardware_statistics {
addresses */
#define TX_TIMEOUT (1*HZ)
-#define PHY_INIT_TIMEOUT 100000
-#define PHY_CHANGE_TIME 2
/* Fast Ethernet (10/100 Mbps) */
#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
@@ -921,7 +920,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
#define UCC_GETH_MACCFG1_INIT 0
-#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \
+ (7 << MACCFG2_PREL_SHIFT))
/* Ethernet Address Type. */
enum enet_addr_type {
@@ -1073,6 +1073,9 @@ struct ucc_geth_tad_params {
u16 vid;
};
+struct phylink;
+struct phylink_config;
+
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
@@ -1088,7 +1091,6 @@ struct ucc_geth_info {
u8 miminumInterFrameGapEnforcement;
u8 backToBackInterFrameGap;
int ipAddressAlignment;
- int lengthCheckRx;
u32 mblinterval;
u16 nortsrbytetime;
u8 fracsiz;
@@ -1114,7 +1116,6 @@ struct ucc_geth_info {
int transmitFlowControl;
u8 maxGroupAddrInHash;
u8 maxIndAddrInHash;
- u8 prel;
u16 maxFrameLength;
u16 minFrameLength;
u16 maxD1Length;
@@ -1125,7 +1126,6 @@ struct ucc_geth_info {
u32 eventRegMask;
u16 pausePeriod;
u16 extensionField;
- struct device_node *phy_node;
struct device_node *tbi_node;
u8 weightfactor[NUM_TX_QUEUES];
u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
@@ -1210,14 +1210,12 @@ struct ucc_geth_private {
u16 skb_dirtytx[NUM_TX_QUEUES];
struct ugeth_mii_info *mii_info;
- struct phy_device *phydev;
- phy_interface_t phy_interface;
- int max_speed;
uint32_t msg_enable;
- int oldspeed;
- int oldduplex;
- int oldlink;
- int wol_en;
+ u32 wol_en;
+ u32 phy_wol_en;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct device_node *node;
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 601beb93d3b3..1fb49e5a414a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -104,14 +104,8 @@ static int
uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- phy_ethtool_ksettings_get(phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(ugeth->phylink, cmd);
}
static int
@@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
-
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_ksettings_set(phydev, cmd);
+ return phylink_ethtool_ksettings_set(ugeth->phylink, cmd);
}
static void
@@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev,
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- pause->autoneg = ugeth->phydev->autoneg;
-
- if (ugeth->ug_info->receiveFlowControl)
- pause->rx_pause = 1;
- if (ugeth->ug_info->transmitFlowControl)
- pause->tx_pause = 1;
+ return phylink_ethtool_get_pauseparam(ugeth->phylink, pause);
}
static int
@@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- int ret = 0;
ugeth->ug_info->receiveFlowControl = pause->rx_pause;
ugeth->ug_info->transmitFlowControl = pause->tx_pause;
- if (ugeth->phydev->autoneg) {
- if (netif_running(netdev)) {
- /* FIXME: automatically restart */
- netdev_info(netdev, "Please re-open the interface\n");
- }
- } else {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
-
- ret = init_flow_control_params(ug_info->aufc,
- ug_info->receiveFlowControl,
- ug_info->transmitFlowControl,
- ug_info->pausePeriod,
- ug_info->extensionField,
- &ugeth->uccf->uf_regs->upsmr,
- &ugeth->ug_regs->uempr,
- &ugeth->ug_regs->maccfg1);
- }
-
- return ret;
+ return phylink_ethtool_set_pauseparam(ugeth->phylink, pause);
}
static uint32_t
@@ -287,20 +253,17 @@ static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
u32 stats_mode = ugeth->ug_info->statisticsMode;
+ int i;
- if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
- memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN *
- ETH_GSTRING_LEN);
- buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN;
- }
- if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
- memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN *
- ETH_GSTRING_LEN);
- buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
- }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE)
+ for (i = 0; i < UEC_HW_STATS_LEN; i++)
+ ethtool_puts(&buf, hw_stat_gstrings[i]);
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX)
+ for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
+ ethtool_puts(&buf, tx_fw_stat_gstrings[i]);
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
- memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
- ETH_GSTRING_LEN);
+ for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
+ ethtool_puts(&buf, rx_fw_stat_gstrings[i]);
}
static void uec_get_ethtool_stats(struct net_device *netdev,
@@ -346,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev,
static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (phydev && phydev->irq)
- wol->supported |= WAKE_PHY;
+ phylink_ethtool_get_wol(ugeth->phylink, wol);
+
if (qe_alive_during_sleep())
wol->supported |= WAKE_MAGIC;
- wol->wolopts = ugeth->wol_en;
+ wol->wolopts |= ugeth->wol_en;
}
static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
+ int ret = 0;
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
- else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ ret = phylink_ethtool_set_wol(ugeth->phylink, wol);
+ if (ret == -EOPNOTSUPP) {
+ ugeth->phy_wol_en = 0;
+ } else if (ret) {
+ return ret;
+ } else {
+ ugeth->phy_wol_en = wol->wolopts;
+ goto out;
+ }
+
+ /* If the PHY isn't handling the WoL and the MAC is asked to more than
+ * WAKE_MAGIC, error-out
+ */
+ if (!ugeth->phy_wol_en &&
+ wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+
+ if (wol->wolopts & WAKE_MAGIC &&
+ !qe_alive_during_sleep())
return -EINVAL;
+out:
ugeth->wol_en = wol->wolopts;
device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 0a1400cb410a..06a28bce5d27 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_FUJITSU
config PCMCIA_FMVJ18X
tristate "Fujitsu FMV-J18x PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
help
Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
index a7fbd4cd560a..ce97b76f9ae0 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_dev.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -546,17 +546,14 @@ int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
unsigned int id0, enum fun_admin_bind_type type1,
unsigned int id1)
{
- struct {
- struct fun_admin_bind_req req;
- struct fun_admin_bind_entry entry[2];
- } cmd = {
- .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
- sizeof(cmd)),
- .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
- .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
- };
+ DEFINE_RAW_FLEX(struct fun_admin_bind_req, cmd, entry, 2);
+
+ cmd->common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
+ __struct_size(cmd));
+ cmd->entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0);
+ cmd->entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1);
- return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
+ return fun_submit_admin_sync_cmd(fdev, &cmd->common, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(fun_bind);
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.c b/drivers/net/ethernet/fungible/funcore/fun_queue.c
index 8ab9f68434f5..d07ee3e4f52a 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_queue.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.c
@@ -482,43 +482,6 @@ free_funq:
return NULL;
}
-/* Create a funq's CQ on the device. */
-static int fun_create_cq(struct fun_queue *funq)
-{
- struct fun_dev *fdev = funq->fdev;
- unsigned int rqid;
- int rc;
-
- rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
- funq->rqid : FUN_HCI_ID_INVALID;
- rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
- funq->cqe_size_log2, funq->cq_depth,
- funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
- funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
- &funq->cqid, &funq->cq_db);
- if (!rc)
- dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
-
- return rc;
-}
-
-/* Create a funq's SQ on the device. */
-static int fun_create_sq(struct fun_queue *funq)
-{
- struct fun_dev *fdev = funq->fdev;
- int rc;
-
- rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
- funq->sqe_size_log2, funq->sq_depth,
- funq->sq_dma_addr, funq->sq_intcoal_nentries,
- funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
- 0, &funq->sqid, &funq->sq_db);
- if (!rc)
- dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
-
- return rc;
-}
-
/* Create a funq's RQ on the device. */
int fun_create_rq(struct fun_queue *funq)
{
@@ -561,34 +524,6 @@ int fun_request_irq(struct fun_queue *funq, const char *devname,
return rc;
}
-/* Create all component queues of a funq on the device. */
-int fun_create_queue(struct fun_queue *funq)
-{
- int rc;
-
- rc = fun_create_cq(funq);
- if (rc)
- return rc;
-
- if (funq->rq_depth) {
- rc = fun_create_rq(funq);
- if (rc)
- goto release_cq;
- }
-
- rc = fun_create_sq(funq);
- if (rc)
- goto release_rq;
-
- return 0;
-
-release_rq:
- fun_destroy_sq(funq->fdev, funq->rqid);
-release_cq:
- fun_destroy_cq(funq->fdev, funq->cqid);
- return rc;
-}
-
void fun_free_irq(struct fun_queue *funq)
{
if (funq->irq_handler) {
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.h b/drivers/net/ethernet/fungible/funcore/fun_queue.h
index 7fb53d0ae8b0..2d966afb187a 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_queue.h
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.h
@@ -163,7 +163,6 @@ static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
}
int fun_create_rq(struct fun_queue *funq);
-int fun_create_queue(struct fun_queue *funq);
void fun_free_irq(struct fun_queue *funq);
int fun_request_irq(struct fun_queue *funq, const char *devname,
diff --git a/drivers/net/ethernet/fungible/funeth/Makefile b/drivers/net/ethernet/fungible/funeth/Makefile
index 646d69595b4f..d51e4c2b4a1a 100644
--- a/drivers/net/ethernet/fungible/funeth/Makefile
+++ b/drivers/net/ethernet/fungible/funeth/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
+ccflags-y += -I$(src)/../funcore -I$(src)
obj-$(CONFIG_FUN_ETH) += funeth.o
diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h
index 1250e10d21db..55e705e239f8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth.h
@@ -4,7 +4,7 @@
#define _FUNETH_H
#include <uapi/linux/if_ether.h>
-#include <uapi/linux/net_tstamp.h>
+#include <linux/net_tstamp.h>
#include <linux/mutex.h>
#include <linux/seqlock.h>
#include <linux/xarray.h>
@@ -121,7 +121,7 @@ struct funeth_priv {
u8 rx_coal_usec;
u8 rx_coal_count;
- struct hwtstamp_config hwtstamp_cfg;
+ struct kernel_hwtstamp_config hwtstamp_cfg;
/* cumulative queue stats from earlier queue instances */
u64 tx_packets;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 4edd0adfc6c7..1966dba512f8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -930,7 +930,8 @@ static void fun_get_rmon_stats(struct net_device *netdev,
}
static void fun_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *stats)
+ struct ethtool_fec_stats *stats,
+ struct ethtool_fec_hist *hist)
{
const struct funeth_priv *fp = netdev_priv(netdev);
@@ -1040,14 +1041,11 @@ static int fun_set_rxfh(struct net_device *netdev,
}
static int fun_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
return 0;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index df86770731ad..792cddac6f1b 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -927,7 +927,7 @@ static int fun_change_mtu(struct net_device *netdev, int new_mtu)
rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
if (!rc)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return rc;
}
@@ -1014,26 +1014,25 @@ static int fun_get_port_attributes(struct net_device *netdev)
return 0;
}
-static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int fun_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
const struct funeth_priv *fp = netdev_priv(dev);
- return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg,
- sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0;
+ *config = fp->hwtstamp_cfg;
+ return 0;
}
-static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int fun_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct funeth_priv *fp = netdev_priv(dev);
- struct hwtstamp_config cfg;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
/* no TX HW timestamps */
- cfg.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
- switch (cfg.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -1051,26 +1050,14 @@ static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- fp->hwtstamp_cfg = cfg;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-
-static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return fun_hwtstamp_set(dev, ifr);
- case SIOCGHWTSTAMP:
- return fun_hwtstamp_get(dev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ fp->hwtstamp_cfg = *config;
+ return 0;
}
/* Prepare the queues for XDP. */
@@ -1340,7 +1327,6 @@ static const struct net_device_ops fun_netdev_ops = {
.ndo_change_mtu = fun_change_mtu,
.ndo_set_mac_address = fun_set_macaddr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = fun_ioctl,
.ndo_uninit = fun_uninit,
.ndo_bpf = fun_xdp,
.ndo_xdp_xmit = fun_xdp_xmit_frames,
@@ -1348,6 +1334,8 @@ static const struct net_device_ops fun_netdev_ops = {
.ndo_set_vf_vlan = fun_set_vf_vlan,
.ndo_set_vf_rate = fun_set_vf_rate,
.ndo_get_vf_config = fun_get_vf_config,
+ .ndo_hwtstamp_get = fun_hwtstamp_get,
+ .ndo_hwtstamp_set = fun_hwtstamp_set,
};
#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_trace.h b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
index 9e58dfec19d5..b9985900f30b 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_trace.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
@@ -32,7 +32,7 @@ TRACE_EVENT(funeth_tx,
__entry->len = len;
__entry->sqe_idx = sqe_idx;
__entry->ngle = ngle;
- __assign_str(devname, txq->netdev->name);
+ __assign_str(devname);
),
TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u",
@@ -62,7 +62,7 @@ TRACE_EVENT(funeth_tx_free,
__entry->sqe_idx = sqe_idx;
__entry->num_sqes = num_sqes;
__entry->hw_head = hw_head;
- __assign_str(devname, txq->netdev->name);
+ __assign_str(devname);
),
TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u",
@@ -97,7 +97,7 @@ TRACE_EVENT(funeth_rx,
__entry->len = pkt_len;
__entry->hash = hash;
__entry->cls_vec = cls_vec;
- __assign_str(devname, rxq->netdev->name);
+ __assign_str(devname);
),
TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x",
diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig
index 8641a00f8e63..14c9431e15e5 100644
--- a/drivers/net/ethernet/google/Kconfig
+++ b/drivers/net/ethernet/google/Kconfig
@@ -18,6 +18,8 @@ if NET_VENDOR_GOOGLE
config GVE
tristate "Google Virtual NIC (gVNIC) support"
depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select PAGE_POOL
help
This driver supports Google Virtual NIC (gVNIC)"
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index b9a6be76531b..e0ec227a50f7 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,4 +1,7 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o
+gve-y := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
+ gve_buffer_mgmt_dqo.o
+
+gve-$(CONFIG_PTP_1588_CLOCK) += gve_ptp.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 4814c96d5fe7..970d5ca8cdde 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
#ifndef _GVE_H_
@@ -11,8 +11,11 @@
#include <linux/dmapool.h>
#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/u64_stats_sync.h>
+#include <net/page_pool/helpers.h>
#include <net/xdp.h>
#include "gve_desc.h"
@@ -50,12 +53,26 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
+/* Default minimum ring size */
+#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
+#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
+
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
-#define GVE_MAX_RX_BUFFER_SIZE 4096
+#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
+#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
+
+#define GVE_FLOW_RULES_CACHE_SIZE \
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
+#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
+
+#define GVE_RSS_KEY_SIZE 40
+#define GVE_RSS_INDIR_SIZE 128
+
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -63,7 +80,6 @@
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
-#define DQO_QPL_DEFAULT_RX_PAGES 2048
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
@@ -82,6 +98,8 @@
*/
#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
+#define GVE_DQO_RX_HWTSTAMP_VALID 0x1
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -91,9 +109,16 @@ struct gve_rx_desc_queue {
/* The page info for a single slot in the RX data queue */
struct gve_rx_slot_page_info {
- struct page *page;
+ /* netmem is used for DQO RDA mode
+ * page is used in all other modes
+ */
+ union {
+ struct page *page;
+ netmem_ref netmem;
+ };
void *page_address;
u32 page_offset; /* offset to write to in page */
+ unsigned int buf_size;
int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
u16 pad; /* adjustment for rx padding */
u8 can_flip; /* tracks if the networking stack is using the page */
@@ -165,6 +190,9 @@ struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
struct gve_rx_slot_page_info page_info;
+ /* XSK buffer */
+ struct xdp_buff *xsk_buff;
+
/* The DMA address corresponding to `page_info`. */
dma_addr_t addr;
@@ -177,6 +205,13 @@ struct gve_rx_buf_state_dqo {
s16 next;
};
+/* Wrapper for XDP Rx metadata */
+struct gve_xdp_buff {
+ struct xdp_buff xdp;
+ struct gve_priv *gve;
+ const struct gve_rx_compl_desc_dqo *compl_desc;
+};
+
/* `head` and `tail` are indices into an array, or -1 if empty. */
struct gve_index_list {
s16 head;
@@ -206,6 +241,11 @@ struct gve_rx_cnts {
/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
+
+ u16 packet_buffer_size; /* Size of buffer posted to NIC */
+ u16 packet_buffer_truesize; /* Total size of RX buffer */
+ u16 rx_headroom;
+
union {
/* GQI fields */
struct {
@@ -214,7 +254,6 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
- u16 packet_buffer_size;
u32 qpl_copy_pool_mask;
u32 qpl_copy_pool_head;
@@ -265,6 +304,8 @@ struct gve_rx_ring {
/* Address info of the buffers for header-split */
struct gve_header_buf hdr_bufs;
+
+ struct page_pool *page_pool;
} dqo;
};
@@ -300,7 +341,6 @@ struct gve_rx_ring {
/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
- struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
@@ -369,10 +409,24 @@ enum gve_packet_state {
GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
/* No valid completion received within the specified timeout. */
GVE_PACKET_STATE_TIMED_OUT_COMPL,
+ /* XSK pending packet has received a packet/reinjection completion, or
+ * has timed out. At this point, the pending packet can be counted by
+ * xsk_tx_complete and freed.
+ */
+ GVE_PACKET_STATE_XSK_COMPLETE,
+};
+
+enum gve_tx_pending_packet_dqo_type {
+ GVE_TX_PENDING_PACKET_DQO_SKB,
+ GVE_TX_PENDING_PACKET_DQO_XDP_FRAME,
+ GVE_TX_PENDING_PACKET_DQO_XSK,
};
struct gve_tx_pending_packet_dqo {
- struct sk_buff *skb; /* skb for this packet */
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
/* 0th element corresponds to the linear portion of `skb`, should be
* unmapped with `dma_unmap_single`.
@@ -402,7 +456,10 @@ struct gve_tx_pending_packet_dqo {
/* Identifies the current state of the packet as defined in
* `enum gve_packet_state`.
*/
- u8 state;
+ u8 state : 3;
+
+ /* gve_tx_pending_packet_dqo_type */
+ u8 type : 2;
/* If packet is an outstanding miss completion, then the packet is
* freed if the corresponding re-injection completion is not received
@@ -424,6 +481,9 @@ struct gve_tx_ring {
/* DQO fields. */
struct {
+ /* Spinlock for XDP tx traffic */
+ spinlock_t xdp_lock;
+
/* Linked list of gve_tx_pending_packet_dqo. Index into
* pending_packets, or -1 if empty.
*
@@ -468,6 +528,8 @@ struct gve_tx_ring {
/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
u32 free_tx_qpl_buf_cnt;
};
+
+ atomic_t xsk_reorder_queue_tail;
} dqo_tx;
};
@@ -501,6 +563,9 @@ struct gve_tx_ring {
/* Last TX ring index fetched by HW */
atomic_t hw_tx_head;
+ u16 xsk_reorder_queue_head;
+ u16 xsk_reorder_queue_tail;
+
/* List to track pending packets which received a miss
* completion but not a corresponding reinjection.
*/
@@ -554,6 +619,8 @@ struct gve_tx_ring {
struct gve_tx_pending_packet_dqo *pending_packets;
s16 num_pending_packets;
+ u16 *xsk_reorder_queue;
+
u32 complq_mask; /* complq size is complq_mask + 1 */
/* QPL fields */
@@ -590,8 +657,6 @@ struct gve_tx_ring {
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct xsk_buff_pool *xsk_pool;
- u32 xdp_xsk_wakeup;
- u32 xdp_xsk_done;
u64 xdp_xsk_sent;
u64 xdp_xmit;
u64 xdp_xmit_errors;
@@ -607,12 +672,21 @@ struct gve_notify_block {
struct gve_priv *priv;
struct gve_tx_ring *tx; /* tx rings on this block */
struct gve_rx_ring *rx; /* rx rings on this block */
+ u32 irq;
};
-/* Tracks allowed and current queue settings */
-struct gve_queue_config {
+/* Tracks allowed and current rx queue settings */
+struct gve_rx_queue_config {
u16 max_queues;
- u16 num_queues; /* current */
+ u16 num_queues;
+ u16 packet_buffer_size;
+};
+
+/* Tracks allowed and current tx queue settings */
+struct gve_tx_queue_config {
+ u16 max_queues;
+ u16 num_queues; /* number of TX queues, excluding XDP queues */
+ u16 num_xdp_queues;
};
/* Tracks the available and used qpl IDs */
@@ -621,11 +695,6 @@ struct gve_qpl_config {
unsigned long *qpl_id_map; /* bitmap of used qpl ids */
};
-struct gve_options_dqo_rda {
- u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
- u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
-};
-
struct gve_irq_db {
__be32 index;
} ____cacheline_aligned;
@@ -639,31 +708,13 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
-/* Parameters for allocating queue page lists */
-struct gve_qpls_alloc_cfg {
- struct gve_qpl_config *qpl_cfg;
- struct gve_queue_config *tx_cfg;
- struct gve_queue_config *rx_cfg;
-
- u16 num_xdp_queues;
- bool raw_addressing;
- bool is_gqi;
-
- /* Allocated resources are returned here */
- struct gve_queue_page_list *qpls;
-};
-
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
- struct gve_queue_config *qcfg;
+ struct gve_tx_queue_config *qcfg;
- /* qpls and qpl_cfg must already be allocated */
- struct gve_queue_page_list *qpls;
- struct gve_qpl_config *qpl_cfg;
+ u16 num_xdp_rings;
u16 ring_size;
- u16 start_idx;
- u16 num_rings;
bool raw_addressing;
/* Allocated resources are returned here */
@@ -673,17 +724,15 @@ struct gve_tx_alloc_rings_cfg {
/* Parameters for allocating resources for rx queues */
struct gve_rx_alloc_rings_cfg {
/* tx config is also needed to determine QPL ids */
- struct gve_queue_config *qcfg;
- struct gve_queue_config *qcfg_tx;
-
- /* qpls and qpl_cfg must already be allocated */
- struct gve_queue_page_list *qpls;
- struct gve_qpl_config *qpl_cfg;
+ struct gve_rx_queue_config *qcfg_rx;
+ struct gve_tx_queue_config *qcfg_tx;
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
bool enable_header_split;
+ bool reset_rss;
+ bool xdp;
/* Allocated resources are returned here */
struct gve_rx_ring *rx;
@@ -701,11 +750,54 @@ enum gve_queue_format {
GVE_DQO_QPL_FORMAT = 0x4,
};
+struct gve_flow_spec {
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ union {
+ struct {
+ __be16 src_port;
+ __be16 dst_port;
+ };
+ __be32 spi;
+ };
+ union {
+ u8 tos;
+ u8 tclass;
+ };
+};
+
+struct gve_flow_rule {
+ u32 location;
+ u16 flow_type;
+ u16 action;
+ struct gve_flow_spec key;
+ struct gve_flow_spec mask;
+};
+
+struct gve_flow_rules_cache {
+ bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
+ struct gve_adminq_queried_flow_rule *rules_cache;
+ __be32 *rule_ids_cache;
+ /* The total number of queried rules that stored in the caches */
+ u32 rules_cache_num;
+ u32 rule_ids_cache_num;
+};
+
+struct gve_rss_config {
+ u8 *hash_key;
+ u32 *hash_lut;
+};
+
+struct gve_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct gve_priv *priv;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
- struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
dma_addr_t irq_db_indices_bus;
@@ -718,20 +810,24 @@ struct gve_priv {
u16 num_event_counters;
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
+ u16 max_tx_desc_cnt;
+ u16 max_rx_desc_cnt;
+ u16 min_tx_desc_cnt;
+ u16 min_rx_desc_cnt;
+ bool modify_ring_size_enabled;
+ bool default_min_ring_size;
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
- u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
- u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
- u16 num_xdp_queues;
- struct gve_queue_config tx_cfg;
- struct gve_queue_config rx_cfg;
- struct gve_qpl_config qpl_cfg; /* map used QPL ids */
- u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
+ struct gve_tx_queue_config tx_cfg;
+ struct gve_rx_queue_config rx_cfg;
+ unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
+ u32 num_ntfy_blks; /* split between TX and RX so must be even */
+ int numa_node;
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
__be32 __iomem *db_bar2; /* "array" of doorbells */
@@ -745,6 +841,7 @@ struct gve_priv {
union gve_adminq_command *adminq;
dma_addr_t adminq_bus_addr;
struct dma_pool *adminq_pool;
+ struct mutex adminq_lock; /* Protects adminq command execution */
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
@@ -762,8 +859,13 @@ struct gve_priv {
u32 adminq_set_driver_parameter_cnt;
u32 adminq_report_stats_cnt;
u32 adminq_report_link_speed_cnt;
+ u32 adminq_report_nic_timestamp_cnt;
u32 adminq_get_ptype_map_cnt;
u32 adminq_verify_driver_compatibility_cnt;
+ u32 adminq_query_flow_rules_cnt;
+ u32 adminq_cfg_flow_rule_cnt;
+ u32 adminq_cfg_rss_cnt;
+ u32 adminq_query_rss_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -792,11 +894,9 @@ struct gve_priv {
u64 link_speed;
bool up_before_suspend; /* True if dev was up before suspend */
- struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- u16 data_buffer_size_dqo;
u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
@@ -807,6 +907,24 @@ struct gve_priv {
u16 header_buf_size; /* device configured, header-split supported if non-zero */
bool header_split_enabled; /* True if the header split is enabled by the user */
+
+ u32 max_flow_rules;
+ u32 num_flow_rules;
+
+ struct gve_flow_rules_cache flow_rules_cache;
+
+ u16 rss_key_size;
+ u16 rss_lut_size;
+ bool cache_rss_config;
+ struct gve_rss_config rss_config;
+
+ /* True if the device supports reading the nic clock */
+ bool nic_timestamp_supported;
+ struct gve_ptp *ptp;
+ struct kernel_hwtstamp_config ts_config;
+ struct gve_nic_ts_report *nic_ts_report;
+ dma_addr_t nic_ts_report_bus;
+ u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
};
enum gve_service_task_flags_bit {
@@ -989,27 +1107,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
}
/* Returns the number of tx queue page lists */
-static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
- int num_xdp_queues,
+static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
bool is_qpl)
{
if (!is_qpl)
return 0;
- return tx_cfg->num_queues + num_xdp_queues;
-}
-
-/* Returns the number of XDP tx queue page lists
- */
-static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
-{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
- return 0;
-
- return priv->num_xdp_queues;
+ return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
}
/* Returns the number of rx queue page lists */
-static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
bool is_qpl)
{
if (!is_qpl)
@@ -1027,8 +1134,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
-static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
+ int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
}
@@ -1038,41 +1145,17 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-/* Returns the index into priv->qpls where the first rx queue's QPL resides */
-static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
+static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
-/* Returns a pointer to the next available tx qpl in the list of qpls */
-static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
- int tx_qid)
-{
- /* QPL already in use */
- if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
- return NULL;
- set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
- return &cfg->qpls[tx_qid];
-}
-
-/* Returns a pointer to the next available rx qpl in the list of qpls */
-static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
- int rx_qid)
+static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
{
- int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
- /* QPL already in use */
- if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
- return NULL;
- set_bit(id, cfg->qpl_cfg->qpl_id_map);
- return &cfg->qpls[id];
-}
-
-/* Unassigns the qpl with the given id */
-static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
-{
- clear_bit(id, qpl_cfg->qpl_id_map);
+ /* For DQO, page count should be more than ring size for
+ * out-of-order completions. Set it to two times of ring size.
+ */
+ return 2 * rx_desc_cnt;
}
/* Returns the correct dma direction for tx and rx qpls */
@@ -1091,9 +1174,15 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT;
}
+static inline bool gve_is_dqo(struct gve_priv *priv)
+{
+ return priv->queue_format == GVE_DQO_RDA_FORMAT ||
+ priv->queue_format == GVE_DQO_QPL_FORMAT;
+}
+
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
}
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
@@ -1106,6 +1195,17 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
+{
+ switch (priv->queue_format) {
+ case GVE_GQI_QPL_FORMAT:
+ case GVE_DQO_RDA_FORMAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);
@@ -1115,15 +1215,26 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
+/* qpls */
+struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
+ u32 id, int pages);
+void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags);
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void *data, int len, void *frame_p);
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
+int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,
@@ -1137,22 +1248,94 @@ bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
-int gve_rx_alloc_rings(struct gve_priv *priv);
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
-u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
bool gve_header_split_supported(const struct gve_priv *priv);
-int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
+int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
+ struct netlink_ext_ack *extack,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+/* rx buffer handling */
+int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
+void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
+ bool free_page);
+struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
+bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+void gve_free_buf_state(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
+ struct gve_index_list *list);
+void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
+ struct gve_rx_buf_state_dqo *buf_state);
+struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
+void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+void gve_free_to_page_pool(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ bool allow_direct);
+int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
+void gve_reuse_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+void gve_free_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
+struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
+ struct gve_rx_ring *rx,
+ bool xdp);
+
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config);
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss);
+/* flow steering rule */
+int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
+int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_flow_rules_reset(struct gve_priv *priv);
+/* RSS config */
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
+/* PTP and timestamping */
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int gve_clock_nic_ts_read(struct gve_priv *priv);
+int gve_init_clock(struct gve_priv *priv);
+void gve_teardown_clock(struct gve_priv *priv);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int gve_init_clock(struct gve_priv *priv)
+{
+ return 0;
+}
+
+static inline void gve_teardown_clock(struct gve_priv *priv) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index ae12ac38e18b..b72cc0fa2ba2 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -32,6 +32,8 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
}
+#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8
+
static
void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_descriptor *device_descriptor,
@@ -41,7 +43,11 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -165,6 +171,78 @@ void gve_parse_device_option(struct gve_priv *priv,
"Buffer Sizes");
*dev_op_buffer_sizes = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_MODIFY_RING:
+ if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Modify Ring", (int)sizeof(**dev_op_modify_ring),
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_modify_ring)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring");
+ }
+
+ *dev_op_modify_ring = (void *)(option + 1);
+
+ /* device has not provided min ring size */
+ if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
+ priv->default_min_ring_size = true;
+ break;
+ case GVE_DEV_OPT_ID_FLOW_STEERING:
+ if (option_length < sizeof(**dev_op_flow_steering) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Flow Steering",
+ (int)sizeof(**dev_op_flow_steering),
+ GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_flow_steering))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Flow Steering");
+ *dev_op_flow_steering = (void *)(option + 1);
+ break;
+ case GVE_DEV_OPT_ID_RSS_CONFIG:
+ if (option_length < sizeof(**dev_op_rss_config) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "RSS config",
+ (int)sizeof(**dev_op_rss_config),
+ GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_rss_config))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "RSS config");
+ *dev_op_rss_config = (void *)(option + 1);
+ break;
+ case GVE_DEV_OPT_ID_NIC_TIMESTAMP:
+ if (option_length < sizeof(**dev_op_nic_timestamp) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Nic Timestamp",
+ (int)sizeof(**dev_op_nic_timestamp),
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_nic_timestamp))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Nic Timestamp");
+ *dev_op_nic_timestamp = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -183,7 +261,11 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -204,7 +286,10 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl, dev_op_buffer_sizes);
+ dev_op_dqo_qpl, dev_op_buffer_sizes,
+ dev_op_flow_steering, dev_op_rss_config,
+ dev_op_nic_timestamp,
+ dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -241,7 +326,12 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_set_driver_parameter_cnt = 0;
priv->adminq_report_stats_cnt = 0;
priv->adminq_report_link_speed_cnt = 0;
+ priv->adminq_report_nic_timestamp_cnt = 0;
priv->adminq_get_ptype_map_cnt = 0;
+ priv->adminq_query_flow_rules_cnt = 0;
+ priv->adminq_cfg_flow_rule_cnt = 0;
+ priv->adminq_cfg_rss_cnt = 0;
+ priv->adminq_query_rss_cnt = 0;
/* Setup Admin queue with the device */
if (priv->pdev->revision < 0x1) {
@@ -258,6 +348,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
&priv->reg_bar0->adminq_base_address_lo);
iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
}
+ mutex_init(&priv->adminq_lock);
gve_set_admin_queue_ok(priv);
return 0;
}
@@ -372,6 +463,8 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
int tail, head;
int i;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
@@ -397,9 +490,6 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- */
static int gve_adminq_issue_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
@@ -407,6 +497,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
u32 opcode;
u32 tail;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
// Check if next command will overflow the buffer.
@@ -434,6 +526,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
+ if (opcode == GVE_ADMINQ_EXTENDED_COMMAND)
+ opcode = be32_to_cpu(cmd->extended_command.inner_opcode);
switch (opcode) {
case GVE_ADMINQ_DESCRIBE_DEVICE:
@@ -472,46 +566,92 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_REPORT_LINK_SPEED:
priv->adminq_report_link_speed_cnt++;
break;
+ case GVE_ADMINQ_REPORT_NIC_TIMESTAMP:
+ priv->adminq_report_nic_timestamp_cnt++;
+ break;
case GVE_ADMINQ_GET_PTYPE_MAP:
priv->adminq_get_ptype_map_cnt++;
break;
case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
priv->adminq_verify_driver_compatibility_cnt++;
break;
+ case GVE_ADMINQ_QUERY_FLOW_RULES:
+ priv->adminq_query_flow_rules_cnt++;
+ break;
+ case GVE_ADMINQ_CONFIGURE_FLOW_RULE:
+ priv->adminq_cfg_flow_rule_cnt++;
+ break;
+ case GVE_ADMINQ_CONFIGURE_RSS:
+ priv->adminq_cfg_rss_cnt++;
+ break;
+ case GVE_ADMINQ_QUERY_RSS:
+ priv->adminq_query_rss_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
+ return -EINVAL;
}
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- * The caller is also responsible for making sure there are no commands
- * waiting to be executed.
- */
static int gve_adminq_execute_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
u32 tail, head;
int err;
+ mutex_lock(&priv->adminq_lock);
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
- if (tail != head)
- // This is not a valid path
- return -EINVAL;
+ if (tail != head) {
+ err = -EINVAL;
+ goto out;
+ }
err = gve_adminq_issue_cmd(priv, cmd_orig);
if (err)
- return err;
+ goto out;
+
+ err = gve_adminq_kick_and_wait(priv);
- return gve_adminq_kick_and_wait(priv);
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
+}
+
+static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
+ size_t cmd_size, void *cmd_orig)
+{
+ union gve_adminq_command cmd;
+ dma_addr_t inner_cmd_bus;
+ void *inner_cmd;
+ int err;
+
+ inner_cmd = dma_alloc_coherent(&priv->pdev->dev, cmd_size,
+ &inner_cmd_bus, GFP_KERNEL);
+ if (!inner_cmd)
+ return -ENOMEM;
+
+ memcpy(inner_cmd, cmd_orig, cmd_size);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_EXTENDED_COMMAND);
+ cmd.extended_command = (struct gve_adminq_extended_command) {
+ .inner_opcode = cpu_to_be32(opcode),
+ .inner_length = cpu_to_be32(cmd_size),
+ .inner_command_addr = cpu_to_be64(inner_cmd_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+
+ dma_free_coherent(&priv->pdev->dev, cmd_size, inner_cmd, inner_cmd_bus);
+ return err;
}
/* The device specifies that the management vector can either be the first irq
* or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
- * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
+ * the ntfy blks. If it is 0 then the management vector is last, if it is 1 then
* the management vector is first.
*
* gve arranges the msix vectors so that the management vector is last.
@@ -565,6 +705,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
+ .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt),
};
if (gve_is_gqi(priv)) {
@@ -573,24 +714,17 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
- u16 comp_ring_size;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- comp_ring_size =
- priv->options_dqo_rda.tx_comp_ring_entries;
- } else {
+ else
qpl_id = tx->dqo.qpl->id;
- comp_ring_size = priv->tx_desc_cnt;
- }
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_tx_queue.tx_ring_size =
- cpu_to_be16(priv->tx_desc_cnt);
cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(tx->complq_bus_dqo);
cmd.create_tx_queue.tx_comp_ring_size =
- cpu_to_be16(comp_ring_size);
+ cpu_to_be16(priv->tx_desc_cnt);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -601,84 +735,104 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
-static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd,
+ u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
- union gve_adminq_command cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
- cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
+ cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) {
.queue_id = cpu_to_be32(queue_index),
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
+ .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
+ .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size),
};
if (gve_is_gqi(priv)) {
u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
- cmd.create_rx_queue.rx_desc_ring_addr =
- cpu_to_be64(rx->desc.bus),
- cmd.create_rx_queue.rx_data_ring_addr =
- cpu_to_be64(rx->data.data_bus),
- cmd.create_rx_queue.index = cpu_to_be32(queue_index);
- cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
+ cmd->create_rx_queue.rx_desc_ring_addr =
+ cpu_to_be64(rx->desc.bus);
+ cmd->create_rx_queue.rx_data_ring_addr =
+ cpu_to_be64(rx->data.data_bus);
+ cmd->create_rx_queue.index = cpu_to_be32(queue_index);
+ cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
- u16 rx_buff_ring_entries;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- rx_buff_ring_entries =
- priv->options_dqo_rda.rx_buff_ring_entries;
- } else {
+ else
qpl_id = rx->dqo.qpl->id;
- rx_buff_ring_entries = priv->rx_desc_cnt;
- }
- cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.rx_ring_size =
- cpu_to_be16(priv->rx_desc_cnt);
- cmd.create_rx_queue.rx_desc_ring_addr =
+ cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd->create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->dqo.complq.bus);
- cmd.create_rx_queue.rx_data_ring_addr =
+ cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd.create_rx_queue.packet_buffer_size =
- cpu_to_be16(priv->data_buffer_size_dqo);
- cmd.create_rx_queue.rx_buff_ring_size =
- cpu_to_be16(rx_buff_ring_entries);
- cmd.create_rx_queue.enable_rsc =
+ cmd->create_rx_queue.rx_buff_ring_size =
+ cpu_to_be16(priv->rx_desc_cnt);
+ cmd->create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
if (priv->header_split_enabled)
- cmd.create_rx_queue.header_buffer_size =
+ cmd->create_rx_queue.header_buffer_size =
cpu_to_be16(priv->header_buf_size);
}
+}
+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+
+ gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
return gve_adminq_issue_cmd(priv, &cmd);
}
+/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */
+int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+
+ gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
{
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_create_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
@@ -704,31 +858,46 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
+}
+
+static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
+ u32 queue_index)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
+ cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
+ .queue_id = cpu_to_be32(queue_index),
+ };
}
static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
- int err;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
- cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
- .queue_id = cpu_to_be32(queue_index),
- };
+ gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
+ return gve_adminq_issue_cmd(priv, &cmd);
+}
- err = gve_adminq_issue_cmd(priv, &cmd);
- if (err)
- return err;
+/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */
+int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
- return 0;
+ gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
+ return gve_adminq_execute_cmd(priv, &cmd);
}
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
@@ -736,40 +905,41 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_destroy_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
-}
+ err = gve_adminq_kick_and_wait(priv);
-static int gve_set_desc_cnt(struct gve_priv *priv,
- struct gve_device_descriptor *descriptor)
-{
- priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- return 0;
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
-static int
-gve_set_desc_cnt_dqo(struct gve_priv *priv,
- const struct gve_device_descriptor *descriptor,
- const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
+static void gve_set_default_desc_cnt(struct gve_priv *priv,
+ const struct gve_device_descriptor *descriptor)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- if (priv->queue_format == GVE_DQO_QPL_FORMAT)
- return 0;
-
- priv->options_dqo_rda.tx_comp_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
- priv->options_dqo_rda.rx_buff_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
+ /* set default ranges */
+ priv->max_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->max_rx_desc_cnt = priv->rx_desc_cnt;
+ priv->min_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->min_rx_desc_cnt = priv->rx_desc_cnt;
+}
- return 0;
+static void gve_set_default_rss_sizes(struct gve_priv *priv)
+{
+ if (!gve_is_gqi(priv)) {
+ priv->rss_key_size = GVE_RSS_KEY_SIZE;
+ priv->rss_lut_size = GVE_RSS_INDIR_SIZE;
+ priv->cache_rss_config = true;
+ }
}
static void gve_enable_supported_features(struct gve_priv *priv,
@@ -779,7 +949,15 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_dqo_qpl
*dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
- *dev_op_buffer_sizes)
+ *dev_op_buffer_sizes,
+ const struct gve_device_option_flow_steering
+ *dev_op_flow_steering,
+ const struct gve_device_option_rss_config
+ *dev_op_rss_config,
+ const struct gve_device_option_nic_timestamp
+ *dev_op_nic_timestamp,
+ const struct gve_device_option_modify_ring
+ *dev_op_modify_ring)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -796,12 +974,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (dev_op_dqo_qpl) {
priv->tx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
- priv->rx_pages_per_qpl =
- be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
if (priv->tx_pages_per_qpl == 0)
priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
- if (priv->rx_pages_per_qpl == 0)
- priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
if (dev_op_buffer_sizes &&
@@ -813,13 +987,69 @@ static void gve_enable_supported_features(struct gve_priv *priv,
dev_info(&priv->pdev->dev,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
+ if (gve_is_dqo(priv) &&
+ priv->max_rx_buffer_size > GVE_DEFAULT_RX_BUFFER_SIZE)
+ priv->rx_cfg.packet_buffer_size =
+ priv->max_rx_buffer_size;
+ }
+
+ /* Read and store ring size ranges given by device */
+ if (dev_op_modify_ring &&
+ (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
+ priv->modify_ring_size_enabled = true;
+
+ /* max ring size for DQO QPL should not be overwritten because of device limit */
+ if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
+ priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
+ priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
+ }
+ if (priv->default_min_ring_size) {
+ /* If device hasn't provided minimums, use default minimums */
+ priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+ priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+ } else {
+ priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
+ priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
+ }
}
+
+ if (dev_op_flow_steering &&
+ (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
+ if (dev_op_flow_steering->max_flow_rules) {
+ priv->max_flow_rules =
+ be32_to_cpu(dev_op_flow_steering->max_flow_rules);
+ priv->dev->hw_features |= NETIF_F_NTUPLE;
+ dev_info(&priv->pdev->dev,
+ "FLOW STEERING device option enabled with max rule limit of %u.\n",
+ priv->max_flow_rules);
+ }
+ }
+
+ if (dev_op_rss_config &&
+ (supported_features_mask & GVE_SUP_RSS_CONFIG_MASK)) {
+ priv->rss_key_size =
+ be16_to_cpu(dev_op_rss_config->hash_key_size);
+ priv->rss_lut_size =
+ be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ priv->cache_rss_config = false;
+ dev_dbg(&priv->pdev->dev,
+ "RSS device option enabled with key size of %u, lut size of %u.\n",
+ priv->rss_key_size, priv->rss_lut_size);
+ }
+
+ if (dev_op_nic_timestamp &&
+ (supported_features_mask & GVE_SUP_NIC_TIMESTAMP_MASK))
+ priv->nic_timestamp_supported = true;
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_nic_timestamp *dev_op_nic_timestamp = NULL;
+ struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
+ struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
+ struct gve_device_option_rss_config *dev_op_rss_config = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -851,9 +1081,12 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
- &dev_op_jumbo_frames,
- &dev_op_dqo_qpl,
- &dev_op_buffer_sizes);
+ &dev_op_jumbo_frames, &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes,
+ &dev_op_flow_steering,
+ &dev_op_rss_config,
+ &dev_op_nic_timestamp,
+ &dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -888,15 +1121,15 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_info(&priv->pdev->dev,
"Driver is running with GQI QPL queue format.\n");
}
- if (gve_is_gqi(priv)) {
- err = gve_set_desc_cnt(priv, descriptor);
- } else {
- /* DQO supports LRO. */
+
+ /* set default descriptor counts */
+ gve_set_default_desc_cnt(priv, descriptor);
+
+ gve_set_default_rss_sizes(priv);
+
+ /* DQO supports LRO. */
+ if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
- err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
- }
- if (err)
- goto free_device_descriptor;
priv->max_registered_pages =
be64_to_cpu(descriptor->max_registered_pages);
@@ -912,18 +1145,13 @@ int gve_adminq_describe_device(struct gve_priv *priv)
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
- priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
-
- if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
- dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_data_slot_cnt);
- priv->rx_desc_cnt = priv->rx_data_slot_cnt;
- }
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
- dev_op_buffer_sizes);
+ dev_op_buffer_sizes, dev_op_flow_steering,
+ dev_op_rss_config, dev_op_nic_timestamp,
+ dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -976,20 +1204,6 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
-{
- union gve_adminq_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
- cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
- .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
- .parameter_value = cpu_to_be64(mtu),
- };
-
- return gve_adminq_execute_cmd(priv, &cmd);
-}
-
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval)
{
@@ -1049,6 +1263,22 @@ int gve_adminq_report_link_speed(struct gve_priv *priv)
return err;
}
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_NIC_TIMESTAMP);
+ cmd.report_nic_ts = (struct gve_adminq_report_nic_ts) {
+ .nic_ts_report_len =
+ cpu_to_be64(sizeof(struct gve_nic_ts_report)),
+ .nic_ts_report_addr = cpu_to_be64(nic_ts_report_addr),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
struct gve_ptype_lut *ptype_lut)
{
@@ -1086,3 +1316,293 @@ err:
ptype_map_bus);
return err;
}
+
+static int
+gve_adminq_configure_flow_rule(struct gve_priv *priv,
+ struct gve_adminq_configure_flow_rule *flow_rule_cmd)
+{
+ int err = gve_adminq_execute_extended_cmd(priv,
+ GVE_ADMINQ_CONFIGURE_FLOW_RULE,
+ sizeof(struct gve_adminq_configure_flow_rule),
+ flow_rule_cmd);
+
+ if (err == -ETIME) {
+ dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset");
+ gve_reset(priv, true);
+ } else if (!err) {
+ priv->flow_rules_cache.rules_cache_synced = false;
+ }
+
+ return err;
+}
+
+int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_ADD),
+ .location = cpu_to_be32(loc),
+ .rule = *rule,
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_DEL),
+ .location = cpu_to_be32(loc),
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+int gve_adminq_reset_flow_rules(struct gve_priv *priv)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_RESET),
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
+{
+ const u32 *hash_lut_to_config = NULL;
+ const u8 *hash_key_to_config = NULL;
+ dma_addr_t lut_bus = 0, key_bus = 0;
+ union gve_adminq_command cmd;
+ __be32 *lut = NULL;
+ u8 hash_alg = 0;
+ u8 *key = NULL;
+ int err = 0;
+ u16 i;
+
+ switch (rxfh->hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE:
+ fallthrough;
+ case ETH_RSS_HASH_TOP:
+ hash_alg = ETH_RSS_HASH_TOP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rxfh->indir) {
+ if (rxfh->indir_size != priv->rss_lut_size)
+ return -EINVAL;
+
+ hash_lut_to_config = rxfh->indir;
+ } else if (priv->cache_rss_config) {
+ hash_lut_to_config = priv->rss_config.hash_lut;
+ }
+
+ if (hash_lut_to_config) {
+ lut = dma_alloc_coherent(&priv->pdev->dev,
+ priv->rss_lut_size * sizeof(*lut),
+ &lut_bus, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ lut[i] = cpu_to_be32(hash_lut_to_config[i]);
+ }
+
+ if (rxfh->key) {
+ if (rxfh->key_size != priv->rss_key_size) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hash_key_to_config = rxfh->key;
+ } else if (priv->cache_rss_config) {
+ hash_key_to_config = priv->rss_config.hash_key;
+ }
+
+ if (hash_key_to_config) {
+ key = dma_alloc_coherent(&priv->pdev->dev,
+ priv->rss_key_size,
+ &key_bus, GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(key, hash_key_to_config, priv->rss_key_size);
+ }
+
+ /* Zero-valued fields in the cmd.configure_rss instruct the device to
+ * not update those fields.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS);
+ cmd.configure_rss = (struct gve_adminq_configure_rss) {
+ .hash_types = cpu_to_be16(BIT(GVE_RSS_HASH_TCPV4) |
+ BIT(GVE_RSS_HASH_UDPV4) |
+ BIT(GVE_RSS_HASH_TCPV6) |
+ BIT(GVE_RSS_HASH_UDPV6)),
+ .hash_alg = hash_alg,
+ .hash_key_size =
+ cpu_to_be16((key_bus) ? priv->rss_key_size : 0),
+ .hash_lut_size =
+ cpu_to_be16((lut_bus) ? priv->rss_lut_size : 0),
+ .hash_key_addr = cpu_to_be64(key_bus),
+ .hash_lut_addr = cpu_to_be64(lut_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+
+out:
+ if (lut)
+ dma_free_coherent(&priv->pdev->dev,
+ priv->rss_lut_size * sizeof(*lut),
+ lut, lut_bus);
+ if (key)
+ dma_free_coherent(&priv->pdev->dev,
+ priv->rss_key_size, key, key_bus);
+ return err;
+}
+
+/* In the dma memory that the driver allocated for the device to query the flow rules, the device
+ * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device
+ * will write an array of rules or rule ids with the count that specified in the descriptor.
+ * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor.
+ */
+static int gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode,
+ struct gve_query_flow_rules_descriptor *descriptor)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+ u32 num_queried_rules, total_memory_len, rule_info_len;
+ void *rule_info;
+
+ total_memory_len = be32_to_cpu(descriptor->total_length);
+ num_queried_rules = be32_to_cpu(descriptor->num_queried_rules);
+ rule_info = (void *)(descriptor + 1);
+
+ switch (query_opcode) {
+ case GVE_FLOW_RULE_QUERY_RULES:
+ rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rules_cache);
+ if (sizeof(*descriptor) + rule_info_len != total_memory_len) {
+ dev_err(&priv->dev->dev, "flow rules query is out of memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy(flow_rules_cache->rules_cache, rule_info, rule_info_len);
+ flow_rules_cache->rules_cache_num = num_queried_rules;
+ break;
+ case GVE_FLOW_RULE_QUERY_IDS:
+ rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rule_ids_cache);
+ if (sizeof(*descriptor) + rule_info_len != total_memory_len) {
+ dev_err(&priv->dev->dev, "flow rule ids query is out of memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy(flow_rules_cache->rule_ids_cache, rule_info, rule_info_len);
+ flow_rules_cache->rule_ids_cache_num = num_queried_rules;
+ break;
+ case GVE_FLOW_RULE_QUERY_STATS:
+ priv->num_flow_rules = be32_to_cpu(descriptor->num_flow_rules);
+ priv->max_flow_rules = be32_to_cpu(descriptor->max_flow_rules);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc)
+{
+ struct gve_query_flow_rules_descriptor *descriptor;
+ union gve_adminq_command cmd;
+ dma_addr_t descriptor_bus;
+ int err = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
+ if (!descriptor)
+ return -ENOMEM;
+
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_FLOW_RULES);
+ cmd.query_flow_rules = (struct gve_adminq_query_flow_rules) {
+ .opcode = cpu_to_be16(query_opcode),
+ .starting_rule_id = cpu_to_be32(starting_loc),
+ .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
+ .rule_descriptor_addr = cpu_to_be64(descriptor_bus),
+ };
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto out;
+
+ err = gve_adminq_process_flow_rules_query(priv, query_opcode, descriptor);
+
+out:
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
+ return err;
+}
+
+static int gve_adminq_process_rss_query(struct gve_priv *priv,
+ struct gve_query_rss_descriptor *descriptor,
+ struct ethtool_rxfh_param *rxfh)
+{
+ u32 total_memory_length;
+ u16 hash_lut_length;
+ void *rss_info_addr;
+ __be32 *lut;
+ u16 i;
+
+ total_memory_length = be32_to_cpu(descriptor->total_length);
+ hash_lut_length = priv->rss_lut_size * sizeof(*rxfh->indir);
+
+ if (sizeof(*descriptor) + priv->rss_key_size + hash_lut_length != total_memory_length) {
+ dev_err(&priv->dev->dev,
+ "rss query desc from device has invalid length parameter.\n");
+ return -EINVAL;
+ }
+
+ rxfh->hfunc = descriptor->hash_alg;
+
+ rss_info_addr = (void *)(descriptor + 1);
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
+ memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+ }
+
+ rss_info_addr += priv->rss_key_size;
+ lut = (__be32 *)rss_info_addr;
+ if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rxfh->indir[i] = be32_to_cpu(lut[i]);
+ }
+
+ return 0;
+}
+
+int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_query_rss_descriptor *descriptor;
+ union gve_adminq_command cmd;
+ dma_addr_t descriptor_bus;
+ int err = 0;
+
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
+ if (!descriptor)
+ return -ENOMEM;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_RSS);
+ cmd.query_rss = (struct gve_adminq_query_rss) {
+ .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
+ .rss_descriptor_addr = cpu_to_be64(descriptor_bus),
+ };
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto out;
+
+ err = gve_adminq_process_rss_query(priv, descriptor, rxfh);
+
+out:
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5ac972e45ff8..22a74b6aa17e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -20,11 +20,27 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7,
GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
+ GVE_ADMINQ_CONFIGURE_RSS = 0xA,
GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
GVE_ADMINQ_REPORT_STATS = 0xC,
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
+ GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+ GVE_ADMINQ_REPORT_NIC_TIMESTAMP = 0x11,
+ GVE_ADMINQ_QUERY_RSS = 0x12,
+
+ /* For commands that are larger than 56 bytes */
+ GVE_ADMINQ_EXTENDED_COMMAND = 0xFF,
+};
+
+/* The normal adminq command is restricted to be 56 bytes at maximum. For the
+ * longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with
+ * inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command
+ * is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND.
+ */
+enum gve_adminq_extended_cmd_opcodes {
+ GVE_ADMINQ_CONFIGURE_FLOW_RULE = 0x101,
};
/* Admin queue status codes */
@@ -103,8 +119,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
struct gve_device_option_dqo_rda {
__be32 supported_features_mask;
- __be16 tx_comp_ring_entries;
- __be16 rx_buff_ring_entries;
+ __be32 reserved;
};
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
@@ -134,6 +149,38 @@ struct gve_device_option_buffer_sizes {
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+struct gve_device_option_modify_ring {
+ __be32 supported_featured_mask;
+ __be16 max_rx_ring_size;
+ __be16 max_tx_ring_size;
+ __be16 min_rx_ring_size;
+ __be16 min_tx_ring_size;
+};
+
+static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
+
+struct gve_device_option_flow_steering {
+ __be32 supported_features_mask;
+ __be32 reserved;
+ __be32 max_flow_rules;
+};
+
+static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
+
+struct gve_device_option_rss_config {
+ __be32 supported_features_mask;
+ __be16 hash_key_size;
+ __be16 hash_lut_size;
+};
+
+static_assert(sizeof(struct gve_device_option_rss_config) == 8);
+
+struct gve_device_option_nic_timestamp {
+ __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_nic_timestamp) == 4);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -143,28 +190,40 @@ static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
* the device for read/write and data is copied from/to SKBs.
*/
enum gve_dev_opt_id {
- GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
- GVE_DEV_OPT_ID_GQI_RDA = 0x2,
- GVE_DEV_OPT_ID_GQI_QPL = 0x3,
- GVE_DEV_OPT_ID_DQO_RDA = 0x4,
- GVE_DEV_OPT_ID_DQO_QPL = 0x7,
- GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
- GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+ GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+ GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+ GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+ GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
+ GVE_DEV_OPT_ID_DQO_QPL = 0x7,
+ GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
+ GVE_DEV_OPT_ID_NIC_TIMESTAMP = 0xd,
+ GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
};
enum gve_dev_opt_req_feat_mask {
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP = 0x0,
};
enum gve_sup_feature_mask {
- GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
- GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_MODIFY_RING_MASK = 1 << 0,
+ GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
+ GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
+ GVE_SUP_NIC_TIMESTAMP_MASK = 1 << 8,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -178,6 +237,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
gve_driver_capability_flexible_buffer_size = 5,
+ gve_driver_capability_flexible_rss_size = 6,
};
#define GVE_CAP1(a) BIT((int)a)
@@ -190,12 +250,21 @@ enum gve_driver_capbility {
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
- GVE_CAP1(gve_driver_capability_flexible_buffer_size))
+ GVE_CAP1(gve_driver_capability_flexible_buffer_size) | \
+ GVE_CAP1(gve_driver_capability_flexible_rss_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
+struct gve_adminq_extended_command {
+ __be32 inner_opcode;
+ __be32 inner_length;
+ __be64 inner_command_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_extended_command) == 16);
+
struct gve_driver_info {
u8 os_type; /* 0x01 = Linux */
u8 driver_major;
@@ -333,6 +402,21 @@ struct gve_adminq_report_link_speed {
static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
+struct gve_adminq_report_nic_ts {
+ __be64 nic_ts_report_len;
+ __be64 nic_ts_report_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_report_nic_ts) == 16);
+
+struct gve_nic_ts_report {
+ __be64 nic_timestamp; /* NIC clock in nanoseconds */
+ __be64 reserved1;
+ __be64 reserved2;
+ __be64 reserved3;
+ __be64 reserved4;
+};
+
struct stats {
__be32 stat_name;
__be32 queue_id;
@@ -392,7 +476,7 @@ struct gve_ptype_entry {
};
struct gve_ptype_map {
- struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+ struct gve_ptype_entry ptypes[GVE_NUM_PTYPES]; /* PTYPES are always 10 bits. */
};
struct gve_adminq_get_ptype_map {
@@ -400,6 +484,109 @@ struct gve_adminq_get_ptype_map {
__be64 ptype_map_addr;
};
+/* Flow-steering related definitions */
+enum gve_adminq_flow_rule_cfg_opcode {
+ GVE_FLOW_RULE_CFG_ADD = 0,
+ GVE_FLOW_RULE_CFG_DEL = 1,
+ GVE_FLOW_RULE_CFG_RESET = 2,
+};
+
+enum gve_adminq_flow_rule_query_opcode {
+ GVE_FLOW_RULE_QUERY_RULES = 0,
+ GVE_FLOW_RULE_QUERY_IDS = 1,
+ GVE_FLOW_RULE_QUERY_STATS = 2,
+};
+
+enum gve_adminq_flow_type {
+ GVE_FLOW_TYPE_TCPV4,
+ GVE_FLOW_TYPE_UDPV4,
+ GVE_FLOW_TYPE_SCTPV4,
+ GVE_FLOW_TYPE_AHV4,
+ GVE_FLOW_TYPE_ESPV4,
+ GVE_FLOW_TYPE_TCPV6,
+ GVE_FLOW_TYPE_UDPV6,
+ GVE_FLOW_TYPE_SCTPV6,
+ GVE_FLOW_TYPE_AHV6,
+ GVE_FLOW_TYPE_ESPV6,
+};
+
+/* Flow-steering command */
+struct gve_adminq_flow_rule {
+ __be16 flow_type;
+ __be16 action; /* RX queue id */
+ struct gve_flow_spec key;
+ struct gve_flow_spec mask;
+};
+
+struct gve_adminq_configure_flow_rule {
+ __be16 opcode;
+ u8 padding[2];
+ struct gve_adminq_flow_rule rule;
+ __be32 location;
+};
+
+static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92);
+
+struct gve_query_flow_rules_descriptor {
+ __be32 num_flow_rules;
+ __be32 max_flow_rules;
+ __be32 num_queried_rules;
+ __be32 total_length;
+};
+
+struct gve_adminq_queried_flow_rule {
+ __be32 location;
+ struct gve_adminq_flow_rule flow_rule;
+};
+
+struct gve_adminq_query_flow_rules {
+ __be16 opcode;
+ u8 padding[2];
+ __be32 starting_rule_id;
+ __be64 available_length; /* The dma memory length that the driver allocated */
+ __be64 rule_descriptor_addr; /* The dma memory address */
+};
+
+static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
+
+enum gve_rss_hash_type {
+ GVE_RSS_HASH_IPV4,
+ GVE_RSS_HASH_TCPV4,
+ GVE_RSS_HASH_IPV6,
+ GVE_RSS_HASH_IPV6_EX,
+ GVE_RSS_HASH_TCPV6,
+ GVE_RSS_HASH_TCPV6_EX,
+ GVE_RSS_HASH_UDPV4,
+ GVE_RSS_HASH_UDPV6,
+ GVE_RSS_HASH_UDPV6_EX,
+};
+
+struct gve_adminq_configure_rss {
+ __be16 hash_types;
+ u8 hash_alg;
+ u8 reserved;
+ __be16 hash_key_size;
+ __be16 hash_lut_size;
+ __be64 hash_key_addr;
+ __be64 hash_lut_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_configure_rss) == 24);
+
+struct gve_query_rss_descriptor {
+ __be32 total_length;
+ __be16 hash_types;
+ u8 hash_alg;
+ u8 reserved;
+};
+
+struct gve_adminq_query_rss {
+ __be64 available_length;
+ __be64 rss_descriptor_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_query_rss) == 16);
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -420,6 +607,11 @@ union gve_adminq_command {
struct gve_adminq_get_ptype_map get_ptype_map;
struct gve_adminq_verify_driver_compatibility
verify_driver_compatibility;
+ struct gve_adminq_query_flow_rules query_flow_rules;
+ struct gve_adminq_configure_rss configure_rss;
+ struct gve_adminq_query_rss query_rss;
+ struct gve_adminq_report_nic_ts report_nic_ts;
+ struct gve_adminq_extended_command extended_command;
};
};
u8 reserved[64];
@@ -439,18 +631,27 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
+int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
u64 driver_info_len,
dma_addr_t driver_info_addr);
int gve_adminq_report_link_speed(struct gve_priv *priv);
+int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc);
+int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
+int gve_adminq_reset_flow_rules(struct gve_priv *priv);
+int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
+int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
new file mode 100644
index 000000000000..0e2b703c673a
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2024 Google, Inc.
+ */
+
+#include <net/xdp_sock_drv.h>
+#include "gve.h"
+#include "gve_utils.h"
+
+int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
+{
+ return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
+}
+
+struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ s16 buffer_id;
+
+ buffer_id = rx->dqo.free_buf_states;
+ if (unlikely(buffer_id == -1))
+ return NULL;
+
+ buf_state = &rx->dqo.buf_states[buffer_id];
+
+ /* Remove buf_state from free list */
+ rx->dqo.free_buf_states = buf_state->next;
+
+ /* Point buf_state to itself to mark it as allocated */
+ buf_state->next = buffer_id;
+
+ /* Clear the buffer pointers */
+ buf_state->page_info.page = NULL;
+ buf_state->xsk_buff = NULL;
+
+ return buf_state;
+}
+
+bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ return buf_state->next == buffer_id;
+}
+
+void gve_free_buf_state(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ buf_state->next = rx->dqo.free_buf_states;
+ rx->dqo.free_buf_states = buffer_id;
+}
+
+struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
+ struct gve_index_list *list)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ s16 buffer_id;
+
+ buffer_id = list->head;
+ if (unlikely(buffer_id == -1))
+ return NULL;
+
+ buf_state = &rx->dqo.buf_states[buffer_id];
+
+ /* Remove buf_state from list */
+ list->head = buf_state->next;
+ if (buf_state->next == -1)
+ list->tail = -1;
+
+ /* Point buf_state to itself to mark it as allocated */
+ buf_state->next = buffer_id;
+
+ return buf_state;
+}
+
+void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ buf_state->next = -1;
+
+ if (list->head == -1) {
+ list->head = buffer_id;
+ list->tail = buffer_id;
+ } else {
+ int tail = list->tail;
+
+ rx->dqo.buf_states[tail].next = buffer_id;
+ list->tail = buffer_id;
+ }
+}
+
+struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ int i;
+
+ /* Recycled buf states are immediately usable. */
+ buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
+ if (likely(buf_state))
+ return buf_state;
+
+ if (unlikely(rx->dqo.used_buf_states.head == -1))
+ return NULL;
+
+ /* Used buf states are only usable when ref count reaches 0, which means
+ * no SKBs refer to them.
+ *
+ * Search a limited number before giving up.
+ */
+ for (i = 0; i < 5; i++) {
+ buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
+ if (gve_buf_ref_cnt(buf_state) == 0) {
+ rx->dqo.used_buf_states_cnt--;
+ return buf_state;
+ }
+
+ gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+ }
+
+ return NULL;
+}
+
+int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ struct gve_priv *priv = rx->gve;
+ u32 idx;
+
+ idx = rx->dqo.next_qpl_page_idx;
+ if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
+ net_err_ratelimited("%s: Out of QPL pages\n",
+ priv->dev->name);
+ return -ENOMEM;
+ }
+ buf_state->page_info.page = rx->dqo.qpl->pages[idx];
+ buf_state->addr = rx->dqo.qpl->page_buses[idx];
+ rx->dqo.next_qpl_page_idx++;
+ buf_state->page_info.page_offset = 0;
+ buf_state->page_info.page_address =
+ page_address(buf_state->page_info.page);
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ buf_state->page_info.pad = rx->rx_headroom;
+ buf_state->last_single_ref_offset = 0;
+
+ /* The page already has 1 ref. */
+ page_ref_add(buf_state->page_info.page, INT_MAX - 1);
+ buf_state->page_info.pagecnt_bias = INT_MAX;
+
+ return 0;
+}
+
+void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
+{
+ if (!buf_state->page_info.page)
+ return;
+
+ page_ref_sub(buf_state->page_info.page,
+ buf_state->page_info.pagecnt_bias - 1);
+ buf_state->page_info.page = NULL;
+}
+
+void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ const u16 data_buffer_size = rx->packet_buffer_truesize;
+ int pagecount;
+
+ /* Can't reuse if we only fit one buffer per page */
+ if (data_buffer_size * 2 > PAGE_SIZE)
+ goto mark_used;
+
+ pagecount = gve_buf_ref_cnt(buf_state);
+
+ /* Record the offset when we have a single remaining reference.
+ *
+ * When this happens, we know all of the other offsets of the page are
+ * usable.
+ */
+ if (pagecount == 1) {
+ buf_state->last_single_ref_offset =
+ buf_state->page_info.page_offset;
+ }
+
+ /* Use the next buffer sized chunk in the page. */
+ buf_state->page_info.page_offset += data_buffer_size;
+ buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
+
+ /* If we wrap around to the same offset without ever dropping to 1
+ * reference, then we don't know if this offset was ever freed.
+ */
+ if (buf_state->page_info.page_offset ==
+ buf_state->last_single_ref_offset) {
+ goto mark_used;
+ }
+
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+ return;
+
+mark_used:
+ gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+ rx->dqo.used_buf_states_cnt++;
+}
+
+void gve_free_to_page_pool(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ bool allow_direct)
+{
+ netmem_ref netmem = buf_state->page_info.netmem;
+
+ if (!netmem)
+ return;
+
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
+ buf_state->page_info.netmem = 0;
+}
+
+static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ netmem_ref netmem;
+
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
+ &buf_state->page_info.page_offset,
+ &buf_state->page_info.buf_size,
+ GFP_ATOMIC);
+
+ if (!netmem)
+ return -ENOMEM;
+
+ buf_state->page_info.netmem = netmem;
+ buf_state->page_info.page_address = netmem_address(netmem);
+ buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
+ buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
+
+ return 0;
+}
+
+struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
+ struct gve_rx_ring *rx,
+ bool xdp)
+{
+ u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
+ struct page_pool_params pp = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
+ .nid = priv->numa_node,
+ .dev = &priv->pdev->dev,
+ .netdev = priv->dev,
+ .napi = &priv->ntfy_blocks[ntfy_id].napi,
+ .max_len = PAGE_SIZE,
+ .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = xdp ? XDP_PACKET_HEADROOM : 0,
+ };
+
+ if (priv->header_split_enabled) {
+ pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rx->q_num;
+ }
+
+ return page_pool_create(&pp);
+}
+
+void gve_free_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ if (rx->dqo.page_pool) {
+ gve_free_to_page_pool(rx, buf_state, true);
+ gve_free_buf_state(rx, buf_state);
+ } else {
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
+ buf_state);
+ }
+}
+
+void gve_reuse_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ if (rx->dqo.page_pool) {
+ buf_state->page_info.netmem = 0;
+ gve_free_buf_state(rx, buf_state);
+ } else {
+ gve_dec_pagecnt_bias(&buf_state->page_info);
+ gve_try_recycle_buf(rx->gve, rx, buf_state);
+ }
+}
+
+int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+
+ if (rx->xsk_pool) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (unlikely(!buf_state))
+ return -ENOMEM;
+
+ buf_state->xsk_buff = xsk_buff_alloc(rx->xsk_pool);
+ if (unlikely(!buf_state->xsk_buff)) {
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ /* Allocated xsk buffer. Clear wakeup in case it was set. */
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+ desc->buf_addr =
+ cpu_to_le64(xsk_buff_xdp_get_dma(buf_state->xsk_buff));
+ return 0;
+ } else if (rx->dqo.page_pool) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (WARN_ON_ONCE(!buf_state))
+ return -ENOMEM;
+
+ if (gve_alloc_from_page_pool(rx, buf_state))
+ goto free_buf_state;
+ } else {
+ buf_state = gve_get_recycled_buf_state(rx);
+ if (unlikely(!buf_state)) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (unlikely(!buf_state))
+ return -ENOMEM;
+
+ if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state)))
+ goto free_buf_state;
+ }
+ }
+ desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+ desc->buf_addr = cpu_to_le64(buf_state->addr +
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad);
+
+ return 0;
+
+free_buf_state:
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index f79cd0591110..f7786b03c744 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -236,7 +236,8 @@ struct gve_rx_compl_desc_dqo {
u8 status_error1;
- __le16 reserved5;
+ u8 reserved5;
+ u8 ts_sub_nsecs_low;
__le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
union {
@@ -247,7 +248,8 @@ struct gve_rx_compl_desc_dqo {
};
__le32 hash;
__le32 reserved6;
- __le64 reserved7;
+ __le32 reserved7;
+ __le32 ts; /* timestamp in nanosecs */
} __packed;
static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index b81584829c40..5871f773f0c7 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -36,7 +36,10 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
+bool gve_xdp_poll_dqo(struct gve_notify_block *block);
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
@@ -44,6 +47,12 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_dqo(struct gve_priv *priv,
@@ -54,6 +63,7 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid);
static inline void
gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 9aebfb843d9d..52500ae8348e 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
#include "gve_dqo.h"
+#include "gve_utils.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -62,18 +63,20 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
- "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
- "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
+ "tx_dma_mapping_error[%u]",
+ "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
-static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
+static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array = {
"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
- "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
+ "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
+ "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
+ "adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -89,42 +92,34 @@ static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
- char *s = (char *)data;
+ u8 *s = (char *)data;
int num_tx_queues;
int i, j;
num_tx_queues = gve_num_tx_queues(priv);
switch (stringset) {
case ETH_SS_STATS:
- memcpy(s, *gve_gstrings_main_stats,
- sizeof(gve_gstrings_main_stats));
- s += sizeof(gve_gstrings_main_stats);
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
- snprintf(s, ETH_GSTRING_LEN,
- gve_gstrings_rx_stats[j], i);
- s += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
+ ethtool_puts(&s, gve_gstrings_main_stats[i]);
- for (i = 0; i < num_tx_queues; i++) {
- for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
- snprintf(s, ETH_GSTRING_LEN,
- gve_gstrings_tx_stats[j], i);
- s += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ for (j = 0; j < NUM_GVE_RX_CNTS; j++)
+ ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
+ i);
+
+ for (i = 0; i < num_tx_queues; i++)
+ for (j = 0; j < NUM_GVE_TX_CNTS; j++)
+ ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
+ i);
+
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
+ ethtool_cpy(&s, gve_gstrings_adminq_stats[i]);
- memcpy(s, *gve_gstrings_adminq_stats,
- sizeof(gve_gstrings_adminq_stats));
- s += sizeof(gve_gstrings_adminq_stats);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(s, *gve_gstrings_priv_flags,
- sizeof(gve_gstrings_priv_flags));
- s += sizeof(gve_gstrings_priv_flags);
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
+ ethtool_puts(&s, gve_gstrings_priv_flags[i]);
break;
default:
@@ -165,6 +160,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct stats *report_stats;
int *rx_qid_to_stats_idx;
int *tx_qid_to_stats_idx;
+ int num_stopped_rxqs = 0;
+ int num_stopped_txqs = 0;
struct gve_priv *priv;
bool skip_nic_stats;
unsigned int start;
@@ -181,12 +178,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx)
return;
+ for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
+ rx_qid_to_stats_idx[ring] = -1;
+ if (!gve_rx_was_added_to_block(priv, ring))
+ num_stopped_rxqs++;
+ }
tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx);
return;
}
+ for (ring = 0; ring < num_tx_queues; ring++) {
+ tx_qid_to_stats_idx[ring] = -1;
+ if (!gve_tx_was_added_to_block(priv, ring))
+ num_stopped_txqs++;
+ }
+
for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
@@ -260,7 +268,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For rx cross-reporting stats, start from nic rx stats in report */
base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
- max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+ /* The boundary between driver stats and NIC stats shifts if there are
+ * stopped queues.
+ */
+ base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
+ NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
+ max_stats_idx = NIC_RX_STATS_REPORT_NUM *
+ (priv->rx_cfg.num_queues - num_stopped_rxqs) +
base_stats_idx;
/* Preprocess the stats report for rx, map queue id to start index */
skip_nic_stats = false;
@@ -274,6 +288,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
+ if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
+ net_err_ratelimited("Invalid rxq id in NIC stats\n");
+ continue;
+ }
rx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk RX rings */
@@ -308,11 +326,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->rx_copybreak_pkt;
data[i++] = rx->rx_copied_pkt;
/* stats from NIC */
- if (skip_nic_stats) {
+ stats_idx = rx_qid_to_stats_idx[ring];
+ if (skip_nic_stats || stats_idx < 0) {
/* skip NIC rx stats */
i += NIC_RX_STATS_REPORT_NUM;
} else {
- stats_idx = rx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -338,7 +356,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx;
- max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM *
+ (num_tx_queues - num_stopped_txqs) +
max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false;
@@ -352,6 +371,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
+ if (queue_id < 0 || queue_id >= num_tx_queues) {
+ net_err_ratelimited("Invalid txq id in NIC stats\n");
+ continue;
+ }
tx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk TX rings */
@@ -369,7 +392,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = 0;
data[i++] = 0;
- data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
+ data[i++] =
+ (tx->dqo_tx.tail - tx->dqo_tx.head) &
+ tx->mask;
}
do {
start =
@@ -383,20 +408,18 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
- if (skip_nic_stats) {
+ stats_idx = tx_qid_to_stats_idx[ring];
+ if (skip_nic_stats || stats_idx < 0) {
/* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM;
} else {
- stats_idx = tx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
data[i++] = value;
}
}
- /* XDP xsk counters */
- data[i++] = tx->xdp_xsk_wakeup;
- data[i++] = tx->xdp_xsk_done;
+ /* XDP counters */
do {
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
data[i] = tx->xdp_xsk_sent;
@@ -428,6 +451,12 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_set_driver_parameter_cnt;
data[i++] = priv->adminq_report_stats_cnt;
data[i++] = priv->adminq_report_link_speed_cnt;
+ data[i++] = priv->adminq_get_ptype_map_cnt;
+ data[i++] = priv->adminq_query_flow_rules_cnt;
+ data[i++] = priv->adminq_cfg_flow_rule_cnt;
+ data[i++] = priv->adminq_cfg_rss_cnt;
+ data[i++] = priv->adminq_query_rss_cnt;
+ data[i++] = priv->adminq_report_nic_timestamp_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -449,11 +478,12 @@ static int gve_set_channels(struct net_device *netdev,
struct ethtool_channels *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_queue_config new_tx_cfg = priv->tx_cfg;
- struct gve_queue_config new_rx_cfg = priv->rx_cfg;
+ struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
+ struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
struct ethtool_channels old_settings;
int new_tx = cmd->tx_count;
int new_rx = cmd->rx_count;
+ bool reset_rss = false;
gve_get_channels(netdev, &old_settings);
@@ -464,22 +494,27 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx)
return -EINVAL;
- if (priv->num_xdp_queues &&
- (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
- dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
- return -EINVAL;
- }
+ if (priv->xdp_prog) {
+ if (new_tx != new_rx ||
+ (2 * new_tx > priv->tx_cfg.max_queues)) {
+ dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
+ return -EINVAL;
+ }
- if (!netif_carrier_ok(netdev)) {
- priv->tx_cfg.num_queues = new_tx;
- priv->rx_cfg.num_queues = new_rx;
- return 0;
+ /* One XDP TX queue per RX queue. */
+ new_tx_cfg.num_xdp_queues = new_rx;
+ } else {
+ new_tx_cfg.num_xdp_queues = 0;
}
+ if (new_rx != priv->rx_cfg.num_queues &&
+ priv->cache_rss_config && !netif_is_rxfh_configured(netdev))
+ reset_rss = true;
+
new_tx_cfg.num_queues = new_tx;
new_rx_cfg.num_queues = new_rx;
- return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
+ return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss);
}
static void gve_get_ringparam(struct net_device *netdev,
@@ -489,11 +524,13 @@ static void gve_get_ringparam(struct net_device *netdev,
{
struct gve_priv *priv = netdev_priv(netdev);
- cmd->rx_max_pending = priv->rx_desc_cnt;
- cmd->tx_max_pending = priv->tx_desc_cnt;
+ cmd->rx_max_pending = priv->max_rx_desc_cnt;
+ cmd->tx_max_pending = priv->max_tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
+ kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size;
+
if (!gve_header_split_supported(priv))
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
else if (priv->header_split_enabled)
@@ -502,20 +539,87 @@ static void gve_get_ringparam(struct net_device *netdev,
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
+static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ /* check for valid range */
+ if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
+ new_tx_desc_cnt > priv->max_tx_desc_cnt ||
+ new_rx_desc_cnt < priv->min_rx_desc_cnt ||
+ new_rx_desc_cnt > priv->max_rx_desc_cnt) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int gve_set_ring_sizes_config(struct gve_priv *priv, u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ if (new_tx_desc_cnt == priv->tx_desc_cnt &&
+ new_rx_desc_cnt == priv->rx_desc_cnt)
+ return 0;
+
+ if (!priv->modify_ring_size_enabled) {
+ dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (gve_validate_req_ring_size(priv, new_tx_desc_cnt, new_rx_desc_cnt))
+ return -EINVAL;
+
+ tx_alloc_cfg->ring_size = new_tx_desc_cnt;
+ rx_alloc_cfg->ring_size = new_rx_desc_cnt;
+ return 0;
+}
+
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
+ int err;
- if (priv->tx_desc_cnt != cmd->tx_pending ||
- priv->rx_desc_cnt != cmd->rx_pending) {
- dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
- return -EOPNOTSUPP;
- }
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+
+ err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack,
+ &rx_alloc_cfg);
+ if (err)
+ return err;
+
+ err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
+ &rx_alloc_cfg);
+ if (err)
+ return err;
- return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ err = gve_set_ring_sizes_config(priv, cmd->tx_pending, cmd->rx_pending,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+ } else {
+ /* Set ring params for the next up */
+ priv->rx_cfg.packet_buffer_size =
+ rx_alloc_cfg.packet_buffer_size;
+ priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
+ priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
+ priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
+ }
+ return 0;
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -554,8 +658,7 @@ static int gve_set_tunable(struct net_device *netdev,
switch (etuna->id) {
case ETHTOOL_RX_COPYBREAK:
{
- u32 max_copybreak = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
+ u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
len = *(u32 *)value;
if (len > max_copybreak)
@@ -573,7 +676,7 @@ static u32 gve_get_priv_flags(struct net_device *netdev)
struct gve_priv *priv = netdev_priv(netdev);
u32 ret_flags = 0;
- /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
+ /* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */
if (priv->ethtool_flags & BIT(0))
ret_flags |= BIT(0);
return ret_flags;
@@ -611,7 +714,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
sizeof(struct stats));
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
}
return 0;
}
@@ -689,9 +792,170 @@ static int gve_set_coalesce(struct net_device *netdev,
return 0;
}
+static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!(netdev->features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = gve_add_flow_rule(priv, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = gve_del_flow_rule(priv, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->rx_cfg.num_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0);
+ if (err)
+ return err;
+
+ cmd->rule_cnt = priv->num_flow_rules;
+ cmd->data = priv->max_flow_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = gve_get_flow_rule_entry(priv, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static u32 gve_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rss_key_size;
+}
+
+static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rss_lut_size;
+}
+
+static void gve_get_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
+ memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size);
+ }
+
+ if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
+ memcpy(rxfh->indir, rss_config->hash_lut,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+ }
+}
+
+static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (!priv->rss_key_size || !priv->rss_lut_size)
+ return -EOPNOTSUPP;
+
+ if (priv->cache_rss_config) {
+ gve_get_rss_config_cache(priv, rxfh);
+ return 0;
+ }
+
+ return gve_adminq_query_rss_config(priv, rxfh);
+}
+
+static void gve_set_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (rxfh->key)
+ memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size);
+
+ if (rxfh->indir)
+ memcpy(rss_config->hash_lut, rxfh->indir,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+}
+
+static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err;
+
+ if (!priv->rss_key_size || !priv->rss_lut_size)
+ return -EOPNOTSUPP;
+
+ err = gve_adminq_configure_rss(priv, rxfh);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config");
+ return err;
+ }
+
+ if (priv->cache_rss_config)
+ gve_set_rss_config_cache(priv, rxfh);
+
+ return 0;
+}
+
+static int gve_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (priv->nic_timestamp_supported) {
+ info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ if (priv->ptp)
+ info->phc_index = ptp_clock_index(priv->ptp->clock);
+ }
+
+ return 0;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
- .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_RX_BUF_LEN,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -700,6 +964,12 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_msglevel = gve_get_msglevel,
.set_channels = gve_set_channels,
.get_channels = gve_get_channels,
+ .set_rxnfc = gve_set_rxnfc,
+ .get_rxnfc = gve_get_rxnfc,
+ .get_rxfh_indir_size = gve_get_rxfh_indir_size,
+ .get_rxfh_key_size = gve_get_rxfh_key_size,
+ .get_rxfh = gve_get_rxfh,
+ .set_rxfh = gve_set_rxfh,
.get_link = ethtool_op_get_link,
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
@@ -710,5 +980,6 @@ const struct ethtool_ops gve_ethtool_ops = {
.set_tunable = gve_set_tunable,
.get_priv_flags = gve_get_priv_flags,
.set_priv_flags = gve_set_priv_flags,
- .get_link_ksettings = gve_get_link_ksettings
+ .get_link_ksettings = gve_get_link_ksettings,
+ .get_ts_info = gve_get_ts_info,
};
diff --git a/drivers/net/ethernet/google/gve/gve_flow_rule.c b/drivers/net/ethernet/google/gve/gve_flow_rule.c
new file mode 100644
index 000000000000..0bb8cd1876a3
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_flow_rule.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2024 Google LLC
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+
+static
+int gve_fill_ethtool_flow_spec(struct ethtool_rx_flow_spec *fsp,
+ struct gve_adminq_queried_flow_rule *rule)
+{
+ struct gve_adminq_flow_rule *flow_rule = &rule->flow_rule;
+ static const u16 flow_type_lut[] = {
+ [GVE_FLOW_TYPE_TCPV4] = TCP_V4_FLOW,
+ [GVE_FLOW_TYPE_UDPV4] = UDP_V4_FLOW,
+ [GVE_FLOW_TYPE_SCTPV4] = SCTP_V4_FLOW,
+ [GVE_FLOW_TYPE_AHV4] = AH_V4_FLOW,
+ [GVE_FLOW_TYPE_ESPV4] = ESP_V4_FLOW,
+ [GVE_FLOW_TYPE_TCPV6] = TCP_V6_FLOW,
+ [GVE_FLOW_TYPE_UDPV6] = UDP_V6_FLOW,
+ [GVE_FLOW_TYPE_SCTPV6] = SCTP_V6_FLOW,
+ [GVE_FLOW_TYPE_AHV6] = AH_V6_FLOW,
+ [GVE_FLOW_TYPE_ESPV6] = ESP_V6_FLOW,
+ };
+
+ if (be16_to_cpu(flow_rule->flow_type) >= ARRAY_SIZE(flow_type_lut))
+ return -EINVAL;
+
+ fsp->flow_type = flow_type_lut[be16_to_cpu(flow_rule->flow_type)];
+
+ memset(&fsp->h_u, 0, sizeof(fsp->h_u));
+ memset(&fsp->h_ext, 0, sizeof(fsp->h_ext));
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.ip4src = flow_rule->key.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = flow_rule->key.dst_ip[0];
+ fsp->h_u.tcp_ip4_spec.psrc = flow_rule->key.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = flow_rule->key.dst_port;
+ fsp->h_u.tcp_ip4_spec.tos = flow_rule->key.tos;
+ fsp->m_u.tcp_ip4_spec.ip4src = flow_rule->mask.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.psrc = flow_rule->mask.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = flow_rule->mask.dst_port;
+ fsp->m_u.tcp_ip4_spec.tos = flow_rule->mask.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.ip4src = flow_rule->key.src_ip[0];
+ fsp->h_u.ah_ip4_spec.ip4dst = flow_rule->key.dst_ip[0];
+ fsp->h_u.ah_ip4_spec.spi = flow_rule->key.spi;
+ fsp->h_u.ah_ip4_spec.tos = flow_rule->key.tos;
+ fsp->m_u.ah_ip4_spec.ip4src = flow_rule->mask.src_ip[0];
+ fsp->m_u.ah_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0];
+ fsp->m_u.ah_ip4_spec.spi = flow_rule->mask.spi;
+ fsp->m_u.ah_ip4_spec.tos = flow_rule->mask.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, &flow_rule->key.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, &flow_rule->key.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = flow_rule->key.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = flow_rule->key.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = flow_rule->key.tclass;
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src, &flow_rule->mask.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, &flow_rule->mask.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = flow_rule->mask.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = flow_rule->mask.dst_port;
+ fsp->m_u.tcp_ip6_spec.tclass = flow_rule->mask.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(fsp->h_u.ah_ip6_spec.ip6src, &flow_rule->key.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &flow_rule->key.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.ah_ip6_spec.spi = flow_rule->key.spi;
+ fsp->h_u.ah_ip6_spec.tclass = flow_rule->key.tclass;
+ memcpy(fsp->m_u.ah_ip6_spec.ip6src, &flow_rule->mask.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &flow_rule->mask.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.ah_ip6_spec.spi = flow_rule->mask.spi;
+ fsp->m_u.ah_ip6_spec.tclass = flow_rule->mask.tclass;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsp->ring_cookie = be16_to_cpu(flow_rule->action);
+
+ return 0;
+}
+
+static int gve_generate_flow_rule(struct gve_priv *priv, struct ethtool_rx_flow_spec *fsp,
+ struct gve_adminq_flow_rule *rule)
+{
+ static const u16 flow_type_lut[] = {
+ [TCP_V4_FLOW] = GVE_FLOW_TYPE_TCPV4,
+ [UDP_V4_FLOW] = GVE_FLOW_TYPE_UDPV4,
+ [SCTP_V4_FLOW] = GVE_FLOW_TYPE_SCTPV4,
+ [AH_V4_FLOW] = GVE_FLOW_TYPE_AHV4,
+ [ESP_V4_FLOW] = GVE_FLOW_TYPE_ESPV4,
+ [TCP_V6_FLOW] = GVE_FLOW_TYPE_TCPV6,
+ [UDP_V6_FLOW] = GVE_FLOW_TYPE_UDPV6,
+ [SCTP_V6_FLOW] = GVE_FLOW_TYPE_SCTPV6,
+ [AH_V6_FLOW] = GVE_FLOW_TYPE_AHV6,
+ [ESP_V6_FLOW] = GVE_FLOW_TYPE_ESPV6,
+ };
+ u32 flow_type;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ return -EOPNOTSUPP;
+
+ if (fsp->ring_cookie >= priv->rx_cfg.num_queues)
+ return -EINVAL;
+
+ rule->action = cpu_to_be16(fsp->ring_cookie);
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ if (!flow_type || flow_type >= ARRAY_SIZE(flow_type_lut))
+ return -EINVAL;
+
+ rule->flow_type = cpu_to_be16(flow_type_lut[flow_type]);
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ rule->key.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ rule->key.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ rule->mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ rule->mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ rule->key.spi = fsp->h_u.ah_ip4_spec.spi;
+ rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ rule->mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(&rule->key.src_ip, fsp->h_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->key.dst_ip, fsp->h_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ rule->key.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ memcpy(&rule->mask.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->mask.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ rule->mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(&rule->key.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->key.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.spi = fsp->h_u.ah_ip6_spec.spi;
+ memcpy(&rule->mask.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->mask.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.spi = fsp->h_u.ah_ip6_spec.spi;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct gve_adminq_queried_flow_rule *rules_cache = priv->flow_rules_cache.rules_cache;
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ u32 *cache_num = &priv->flow_rules_cache.rules_cache_num;
+ struct gve_adminq_queried_flow_rule *rule = NULL;
+ int err = 0;
+ u32 i;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ if (!priv->flow_rules_cache.rules_cache_synced ||
+ fsp->location < be32_to_cpu(rules_cache[0].location) ||
+ fsp->location > be32_to_cpu(rules_cache[*cache_num - 1].location)) {
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_RULES, fsp->location);
+ if (err)
+ return err;
+
+ priv->flow_rules_cache.rules_cache_synced = true;
+ }
+
+ for (i = 0; i < *cache_num; i++) {
+ if (fsp->location == be32_to_cpu(rules_cache[i].location)) {
+ rule = &rules_cache[i];
+ break;
+ }
+ }
+
+ if (!rule)
+ return -EINVAL;
+
+ err = gve_fill_ethtool_flow_spec(fsp, rule);
+
+ return err;
+}
+
+int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ __be32 *rule_ids_cache = priv->flow_rules_cache.rule_ids_cache;
+ u32 *cache_num = &priv->flow_rules_cache.rule_ids_cache_num;
+ u32 starting_rule_id = 0;
+ u32 i = 0, j = 0;
+ int err = 0;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ do {
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_IDS,
+ starting_rule_id);
+ if (err)
+ return err;
+
+ for (i = 0; i < *cache_num; i++) {
+ if (j >= cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[j++] = be32_to_cpu(rule_ids_cache[i]);
+ starting_rule_id = be32_to_cpu(rule_ids_cache[i]) + 1;
+ }
+ } while (*cache_num != 0);
+ cmd->data = priv->max_flow_rules;
+
+ return err;
+}
+
+int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct gve_adminq_flow_rule *rule = NULL;
+ int err;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ rule = kvzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ err = gve_generate_flow_rule(priv, fsp, rule);
+ if (err)
+ goto out;
+
+ err = gve_adminq_add_flow_rule(priv, rule, fsp->location);
+
+out:
+ kvfree(rule);
+ if (err)
+ dev_err(&priv->pdev->dev, "Failed to add the flow rule: %u", fsp->location);
+
+ return err;
+}
+
+int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_del_flow_rule(priv, fsp->location);
+}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 166bd827a6d7..a5a2b18d309b 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1,14 +1,16 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
+#include <linux/bitmap.h>
#include <linux/bpf.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
@@ -16,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <net/netdev_queues.h>
#include <net/sch_generic.h>
#include <net/xdp_sock_drv.h>
#include "gve.h"
@@ -139,6 +142,86 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
}
}
+static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+ int err = 0;
+
+ if (!priv->max_flow_rules)
+ return 0;
+
+ flow_rules_cache->rules_cache =
+ kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache),
+ GFP_KERNEL);
+ if (!flow_rules_cache->rules_cache) {
+ dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n");
+ return -ENOMEM;
+ }
+
+ flow_rules_cache->rule_ids_cache =
+ kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache),
+ GFP_KERNEL);
+ if (!flow_rules_cache->rule_ids_cache) {
+ dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n");
+ err = -ENOMEM;
+ goto free_rules_cache;
+ }
+
+ return 0;
+
+free_rules_cache:
+ kvfree(flow_rules_cache->rules_cache);
+ flow_rules_cache->rules_cache = NULL;
+ return err;
+}
+
+static void gve_free_flow_rule_caches(struct gve_priv *priv)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+
+ kvfree(flow_rules_cache->rule_ids_cache);
+ flow_rules_cache->rule_ids_cache = NULL;
+ kvfree(flow_rules_cache->rules_cache);
+ flow_rules_cache->rules_cache = NULL;
+}
+
+static int gve_alloc_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ rss_config->hash_key = kcalloc(priv->rss_key_size,
+ sizeof(rss_config->hash_key[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_key)
+ return -ENOMEM;
+
+ rss_config->hash_lut = kcalloc(priv->rss_lut_size,
+ sizeof(rss_config->hash_lut[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_lut)
+ goto free_rss_key_cache;
+
+ return 0;
+
+free_rss_key_cache:
+ kfree(rss_config->hash_key);
+ rss_config->hash_key = NULL;
+ return -ENOMEM;
+}
+
+static void gve_free_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ kfree(rss_config->hash_key);
+ kfree(rss_config->hash_lut);
+
+ memset(rss_config, 0, sizeof(*rss_config));
+}
+
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
@@ -186,7 +269,8 @@ static void gve_stats_report_schedule(struct gve_priv *priv)
static void gve_stats_report_timer(struct timer_list *t)
{
- struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
+ struct gve_priv *priv = timer_container_of(priv, t,
+ stats_report_timer);
mod_timer(&priv->stats_report_timer,
round_jiffies(jiffies +
@@ -220,7 +304,7 @@ static void gve_free_stats_report(struct gve_priv *priv)
if (!priv->stats_report)
return;
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
priv->stats_report, priv->stats_report_bus);
priv->stats_report = NULL;
@@ -253,6 +337,18 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg)
return IRQ_HANDLED;
}
+static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
+{
+ int cpu_curr = smp_processor_id();
+ const struct cpumask *aff_mask;
+
+ aff_mask = irq_get_effective_affinity_mask(irq);
+ if (unlikely(!aff_mask))
+ return 1;
+
+ return cpumask_test_cpu(cpu_curr, aff_mask);
+}
+
int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
@@ -276,6 +372,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->rx) {
work_done = gve_rx_poll(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
+ * TX and RX work done.
+ */
+ if (priv->xdp_prog)
+ work_done = max_t(int, work_done,
+ gve_xsk_tx_poll(block, budget));
+
reschedule |= work_done == budget;
}
@@ -311,19 +415,42 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
- if (block->tx)
- reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ else
+ reschedule |= gve_xdp_poll_dqo(block);
+ }
if (!budget)
return 0;
if (block->rx) {
work_done = gve_rx_poll_dqo(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on if
+ * either datapath has more work to do.
+ */
+ if (priv->xdp_prog)
+ reschedule |= gve_xsk_tx_poll_dqo(block, budget);
reschedule |= work_done == budget;
}
- if (reschedule)
- return budget;
+ if (reschedule) {
+ /* Reschedule by returning budget only if already on the correct
+ * cpu.
+ */
+ if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
+ return budget;
+
+ /* If not on the cpu with which this queue's irq has affinity
+ * with, we avoid rescheduling napi and arm the irq instead so
+ * that napi gets rescheduled back eventually onto the right
+ * cpu.
+ */
+ if (work_done == budget)
+ work_done--;
+ }
if (likely(napi_complete_done(napi, work_done))) {
/* Enable interrupts again.
@@ -341,10 +468,19 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
return work_done;
}
+static const struct cpumask *gve_get_node_mask(struct gve_priv *priv)
+{
+ if (priv->numa_node == NUMA_NO_NODE)
+ return cpu_all_mask;
+ else
+ return cpumask_of_node(priv->numa_node);
+}
+
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
- unsigned int active_cpus;
+ const struct cpumask *node_mask;
+ unsigned int cur_cpu;
int vecs_enabled;
int i, j;
int err;
@@ -383,8 +519,6 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
}
- /* Half the notification blocks go to TX and half to RX */
- active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
/* Setup Management Vector - the last vector */
snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
@@ -413,6 +547,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
}
/* Setup the other blocks - the first n-1 vectors */
+ node_mask = gve_get_node_mask(priv);
+ cur_cpu = cpumask_first(node_mask);
for (i = 0; i < priv->num_ntfy_blks; i++) {
struct gve_notify_block *block = &priv->ntfy_blocks[i];
int msix_idx = i;
@@ -428,9 +564,18 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
"Failed to receive msix vector %d\n", i);
goto abort_with_some_ntfy_blocks;
}
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- get_cpu_mask(i % active_cpus));
+ block->irq = priv->msix_vectors[msix_idx].vector;
+ irq_set_affinity_and_hint(block->irq,
+ cpumask_of(cur_cpu));
block->irq_db_index = &priv->irq_db_indices[i].index;
+
+ cur_cpu = cpumask_next(cur_cpu, node_mask);
+ /* Wrap once CPUs in the node have been exhausted, or when
+ * starting RX queue affinities. TX and RX queues of the same
+ * index share affinity.
+ */
+ if (cur_cpu >= nr_cpu_ids || (i + 1) == priv->tx_cfg.max_queues)
+ cur_cpu = cpumask_first(node_mask);
}
return 0;
abort_with_some_ntfy_blocks:
@@ -441,6 +586,7 @@ abort_with_some_ntfy_blocks:
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
+ block->irq = 0;
}
kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
@@ -474,6 +620,7 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
+ block->irq = 0;
}
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
kvfree(priv->ntfy_blocks);
@@ -491,12 +638,21 @@ static int gve_setup_device_resources(struct gve_priv *priv)
{
int err;
- err = gve_alloc_counter_array(priv);
+ err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
- err = gve_alloc_notify_blocks(priv);
+ err = gve_alloc_rss_config_cache(priv);
+ if (err)
+ goto abort_with_flow_rule_caches;
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ goto abort_with_rss_config_cache;
+ err = gve_init_clock(priv);
if (err)
goto abort_with_counter;
+ err = gve_alloc_notify_blocks(priv);
+ if (err)
+ goto abort_with_clock;
err = gve_alloc_stats_report(priv);
if (err)
goto abort_with_ntfy_blocks;
@@ -527,6 +683,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to init RSS config");
+ goto abort_with_ptype_lut;
+ }
+
err = gve_adminq_report_stats(priv, priv->stats_report_len,
priv->stats_report_bus,
GVE_STATS_REPORT_TIMER_PERIOD);
@@ -543,8 +705,14 @@ abort_with_stats_report:
gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
+abort_with_clock:
+ gve_teardown_clock(priv);
abort_with_counter:
gve_free_counter_array(priv);
+abort_with_rss_config_cache:
+ gve_free_rss_config_cache(priv);
+abort_with_flow_rule_caches:
+ gve_free_flow_rule_caches(priv);
return err;
}
@@ -557,6 +725,12 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
/* Tell device its resources are being freed */
if (gve_get_device_resources_ok(priv)) {
+ err = gve_flow_rules_reset(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to reset flow rules: err=%d\n", err);
+ gve_trigger_reset(priv);
+ }
/* detach the stats report */
err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
if (err) {
@@ -576,43 +750,45 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
kvfree(priv->ptype_lut_dqo);
priv->ptype_lut_dqo = NULL;
+ gve_free_flow_rule_caches(priv);
+ gve_free_rss_config_cache(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
+ gve_teardown_clock(priv);
gve_clear_device_resources_ok(priv);
}
-static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
+static int gve_unregister_qpl(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
{
int err;
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ if (!qpl)
+ return 0;
+
+ err = gve_adminq_unregister_page_list(priv, qpl->id);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ qpl->id);
return err;
}
- priv->num_registered_pages -= priv->qpls[i].num_entries;
+ priv->num_registered_pages -= qpl->num_entries;
return 0;
}
-static int gve_register_qpl(struct gve_priv *priv, u32 i)
+static int gve_register_qpl(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
{
- int num_rx_qpls;
int pages;
int err;
- /* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
- num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
- if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot register nonexisting QPL at index %d\n", i);
- return -EINVAL;
- }
+ if (!qpl)
+ return 0;
- pages = priv->qpls[i].num_entries;
+ pages = qpl->num_entries;
if (pages + priv->num_registered_pages > priv->max_registered_pages) {
netif_err(priv, drv, priv->dev,
@@ -622,14 +798,11 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return -EINVAL;
}
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ err = gve_adminq_register_page_list(priv, qpl);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ qpl->id);
return err;
}
@@ -637,43 +810,43 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return 0;
}
-static int gve_register_xdp_qpls(struct gve_priv *priv)
+static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
{
- int start_id;
- int err;
- int i;
+ struct gve_tx_ring *tx = &priv->tx[idx];
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, i);
- /* This failure will trigger a reset - no need to clean up */
- if (err)
- return err;
- }
- return 0;
+ if (gve_is_gqi(priv))
+ return tx->tx_fifo.qpl;
+ else
+ return tx->dqo.qpl;
+}
+
+static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+
+ if (gve_is_gqi(priv))
+ return rx->data.qpl;
+ else
+ return rx->dqo.qpl;
}
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
- int start_id;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
- err = gve_register_qpl(priv, i);
+ err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
if (err)
return err;
}
- /* there might be a gap between the tx and rx qpl ids */
- start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
- err = gve_register_qpl(priv, start_id + i);
+ err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
if (err)
return err;
}
@@ -681,43 +854,24 @@ static int gve_register_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_unregister_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, i);
- /* This failure will trigger a reset - no need to clean */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
- int start_id;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
- err = gve_unregister_qpl(priv, i);
+ err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
}
- start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
- err = gve_unregister_qpl(priv, start_id + i);
+ err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@@ -725,27 +879,6 @@ static int gve_unregister_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_create_xdp_rings(struct gve_priv *priv)
-{
- int err;
-
- err = gve_adminq_create_tx_queues(priv,
- gve_xdp_tx_start_queue_id(priv),
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
- priv->num_xdp_queues);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
- priv->num_xdp_queues);
-
- return 0;
-}
-
static int gve_create_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -801,7 +934,7 @@ static void init_xdp_sync_stats(struct gve_priv *priv)
int i;
/* Init stats */
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
@@ -828,22 +961,19 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
{
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
- cfg->qpls = priv->qpls;
- cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->tx_desc_cnt;
- cfg->start_idx = 0;
- cfg->num_rings = gve_num_tx_queues(priv);
+ cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
cfg->tx = priv->tx;
}
-static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings)
{
int i;
if (!priv->tx)
return;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_stop_ring_gqi(priv, i);
else
@@ -851,12 +981,11 @@ static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings
}
}
-static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
- int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_start_ring_gqi(priv, i);
else
@@ -864,31 +993,9 @@ static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
}
}
-static int gve_alloc_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
- int err = 0;
-
- if (!priv->num_xdp_queues)
- return 0;
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- err = gve_tx_alloc_rings_gqi(priv, &cfg);
- if (err)
- return err;
-
- gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
- init_xdp_sync_stats(priv);
-
- return 0;
-}
-
-static int gve_alloc_rings(struct gve_priv *priv,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static int gve_queues_mem_alloc(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
@@ -916,26 +1023,6 @@ free_tx:
return err;
}
-static int gve_destroy_xdp_rings(struct gve_priv *priv)
-{
- int start_id;
- int err;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_adminq_destroy_tx_queues(priv,
- start_id,
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy XDP queues\n");
- /* This failure will trigger a reset - no need to clean up */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
-
- return 0;
-}
-
static int gve_destroy_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -960,23 +1047,9 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_free_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- if (priv->tx) {
- gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
- gve_tx_free_rings_gqi(priv, &cfg);
- }
-}
-
-static void gve_free_rings(struct gve_priv *priv,
- struct gve_tx_alloc_rings_cfg *tx_cfg,
- struct gve_rx_alloc_rings_cfg *rx_cfg)
+static void gve_queues_mem_free(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_cfg)
{
if (gve_is_gqi(priv)) {
gve_tx_free_rings_gqi(priv, tx_cfg);
@@ -991,7 +1064,7 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(gfp_flags);
+ *page = alloc_pages_node(priv->numa_node, gfp_flags, 0);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -1005,35 +1078,41 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0;
}
-static int gve_alloc_queue_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl,
- u32 id, int pages)
+struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
+ u32 id, int pages)
{
+ struct gve_queue_page_list *qpl;
int err;
int i;
+ qpl = kvzalloc(sizeof(*qpl), GFP_KERNEL);
+ if (!qpl)
+ return NULL;
+
qpl->id = id;
qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
- /* caller handles clean up */
if (!qpl->pages)
- return -ENOMEM;
+ goto abort;
+
qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
- /* caller handles clean up */
if (!qpl->page_buses)
- return -ENOMEM;
+ goto abort;
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
gve_qpl_dma_dir(priv, id), GFP_KERNEL);
- /* caller handles clean up */
if (err)
- return -ENOMEM;
+ goto abort;
qpl->num_entries++;
}
- return 0;
+ return qpl;
+
+abort:
+ gve_free_queue_page_list(priv, qpl, id);
+ return NULL;
}
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
@@ -1045,14 +1124,16 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl,
- int id)
+void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id)
{
int i;
- if (!qpl->pages)
+ if (!qpl)
return;
+ if (!qpl->pages)
+ goto free_qpl;
if (!qpl->page_buses)
goto free_pages;
@@ -1065,149 +1146,103 @@ static void gve_free_queue_page_list(struct gve_priv *priv,
free_pages:
kvfree(qpl->pages);
qpl->pages = NULL;
+free_qpl:
+ kvfree(qpl);
}
-static void gve_free_n_qpls(struct gve_priv *priv,
- struct gve_queue_page_list *qpls,
- int start_id,
- int num_qpls)
+/* Use this to schedule a reset when the device is capable of continuing
+ * to handle other requests in its current state. If it is not, do a reset
+ * in thread instead.
+ */
+void gve_schedule_reset(struct gve_priv *priv)
{
- int i;
-
- for (i = start_id; i < start_id + num_qpls; i++)
- gve_free_queue_page_list(priv, &qpls[i], i);
+ gve_set_do_reset(priv);
+ queue_work(priv->gve_wq, &priv->service_task);
}
-static int gve_alloc_n_qpls(struct gve_priv *priv,
- struct gve_queue_page_list *qpls,
- int page_count,
- int start_id,
- int num_qpls)
+static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
+static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
+static void gve_turndown(struct gve_priv *priv);
+static void gve_turnup(struct gve_priv *priv);
+
+static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
{
- int err;
- int i;
+ struct gve_rx_ring *rx;
- for (i = start_id; i < start_id + num_qpls; i++) {
- err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
- if (err)
- goto free_qpls;
- }
+ if (!priv->rx)
+ return;
- return 0;
+ rx = &priv->rx[qid];
+ rx->xsk_pool = NULL;
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg_mem_model(&rx->xdp_rxq);
-free_qpls:
- /* Must include the failing QPL too for gve_alloc_queue_page_list fails
- * without cleaning up.
- */
- gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
- return err;
+ if (!priv->tx)
+ return;
+ priv->tx[gve_xdp_tx_queue_id(priv, qid)].xsk_pool = NULL;
}
-static int gve_alloc_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
+static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
+ struct xsk_buff_pool *pool, u16 qid)
{
- int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
- int rx_start_id, tx_num_qpls, rx_num_qpls;
- struct gve_queue_page_list *qpls;
- int page_count;
+ struct gve_rx_ring *rx;
+ u16 tx_qid;
int err;
- if (cfg->raw_addressing)
- return 0;
-
- qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
- if (!qpls)
- return -ENOMEM;
-
- cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
- sizeof(unsigned long) * BITS_PER_BYTE;
- cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
- sizeof(unsigned long), GFP_KERNEL);
- if (!cfg->qpl_cfg->qpl_id_map) {
- err = -ENOMEM;
- goto free_qpl_array;
+ rx = &priv->rx[qid];
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, pool);
+ if (err) {
+ gve_unreg_xsk_pool(priv, qid);
+ return err;
}
- /* Allocate TX QPLs */
- page_count = priv->tx_pages_per_qpl;
- tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
- gve_is_qpl(priv));
- err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
- if (err)
- goto free_qpl_map;
+ rx->xsk_pool = pool;
- /* Allocate RX QPLs */
- rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
- /* For GQI_QPL number of pages allocated have 1:1 relationship with
- * number of descriptors. For DQO, number of pages required are
- * more than descriptors (because of out of order completions).
- */
- page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
- rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
- err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
- if (err)
- goto free_tx_qpls;
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ priv->tx[tx_qid].xsk_pool = pool;
- cfg->qpls = qpls;
return 0;
-
-free_tx_qpls:
- gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
-free_qpl_map:
- kvfree(cfg->qpl_cfg->qpl_id_map);
- cfg->qpl_cfg->qpl_id_map = NULL;
-free_qpl_array:
- kvfree(qpls);
- return err;
}
-static void gve_free_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
+static void gve_unreg_xdp_info(struct gve_priv *priv)
{
- int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
- struct gve_queue_page_list *qpls = cfg->qpls;
int i;
- if (!qpls)
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx)
return;
- kvfree(cfg->qpl_cfg->qpl_id_map);
- cfg->qpl_cfg->qpl_id_map = NULL;
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct gve_rx_ring *rx = &priv->rx[i];
- for (i = 0; i < max_queues; i++)
- gve_free_queue_page_list(priv, &qpls[i], i);
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
- kvfree(qpls);
- cfg->qpls = NULL;
+ gve_unreg_xsk_pool(priv, i);
+ }
}
-/* Use this to schedule a reset when the device is capable of continuing
- * to handle other requests in its current state. If it is not, do a reset
- * in thread instead.
- */
-void gve_schedule_reset(struct gve_priv *priv)
+static struct xsk_buff_pool *gve_get_xsk_pool(struct gve_priv *priv, int qid)
{
- gve_set_do_reset(priv);
- queue_work(priv->gve_wq, &priv->service_task);
-}
+ if (!test_bit(qid, priv->xsk_pools))
+ return NULL;
-static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
-static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
-static void gve_turndown(struct gve_priv *priv);
-static void gve_turnup(struct gve_priv *priv);
+ return xsk_get_pool_from_qid(priv->dev, qid);
+}
static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
{
struct napi_struct *napi;
struct gve_rx_ring *rx;
int err = 0;
- int i, j;
- u32 tx_qid;
+ int i;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct xsk_buff_pool *xsk_pool;
+
rx = &priv->rx[i];
napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
@@ -1215,64 +1250,28 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
+
+ xsk_pool = gve_get_xsk_pool(priv, i);
+ if (xsk_pool)
+ err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
+ else if (gve_is_qpl(priv))
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ else
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->dqo.page_pool);
if (err)
goto err;
- rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
- if (rx->xsk_pool) {
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
- napi->napi_id);
- if (err)
- goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
- if (err)
- goto err;
- xsk_pool_set_rxq_info(rx->xsk_pool,
- &rx->xsk_rxq);
- }
- }
-
- for (i = 0; i < priv->num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
return 0;
err:
- for (j = i; j >= 0; j--) {
- rx = &priv->rx[j];
- if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- }
+ gve_unreg_xdp_info(priv);
return err;
}
-static void gve_unreg_xdp_info(struct gve_priv *priv)
-{
- int i, tx_qid;
-
- if (!priv->num_xdp_queues)
- return;
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- struct gve_rx_ring *rx = &priv->rx[i];
-
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (rx->xsk_pool) {
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- rx->xsk_pool = NULL;
- }
- }
-
- for (i = 0; i < priv->num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = NULL;
- }
-}
static void gve_drain_page_cache(struct gve_priv *priv)
{
@@ -1282,118 +1281,69 @@ static void gve_drain_page_cache(struct gve_priv *priv)
page_frag_cache_drain(&priv->rx[i].page_cache);
}
-static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- cfg->raw_addressing = !gve_is_qpl(priv);
- cfg->is_gqi = gve_is_gqi(priv);
- cfg->num_xdp_queues = priv->num_xdp_queues;
- cfg->qpl_cfg = &priv->qpl_cfg;
- cfg->tx_cfg = &priv->tx_cfg;
- cfg->rx_cfg = &priv->rx_cfg;
- cfg->qpls = priv->qpls;
-}
-
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
- cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_rx = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
- cfg->qpls = priv->qpls;
- cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->rx_desc_cnt;
- cfg->packet_buffer_size = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE :
- priv->data_buffer_size_dqo;
+ cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
cfg->rx = priv->rx;
+ cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
}
-static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
}
-static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
+static void gve_rx_start_ring(struct gve_priv *priv, int i)
{
- int i;
-
- for (i = 0; i < num_rings; i++) {
- if (gve_is_gqi(priv))
- gve_rx_start_ring_gqi(priv, i);
- else
- gve_rx_start_ring_dqo(priv, i);
- }
+ if (gve_is_gqi(priv))
+ gve_rx_start_ring_gqi(priv, i);
+ else
+ gve_rx_start_ring_dqo(priv, i);
}
-static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
+static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- if (!priv->rx)
- return;
-
- for (i = 0; i < num_rings; i++) {
- if (gve_is_gqi(priv))
- gve_rx_stop_ring_gqi(priv, i);
- else
- gve_rx_stop_ring_dqo(priv, i);
- }
+ for (i = 0; i < num_rings; i++)
+ gve_rx_start_ring(priv, i);
}
-static void gve_queues_mem_free(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static void gve_rx_stop_ring(struct gve_priv *priv, int i)
{
- gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
- gve_free_qpls(priv, qpls_alloc_cfg);
+ if (gve_is_gqi(priv))
+ gve_rx_stop_ring_gqi(priv, i);
+ else
+ gve_rx_stop_ring_dqo(priv, i);
}
-static int gve_queues_mem_alloc(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{
- int err;
-
- err = gve_alloc_qpls(priv, qpls_alloc_cfg);
- if (err) {
- netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
- return err;
- }
- tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
- rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
- err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
- if (err) {
- netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
- goto free_qpls;
- }
+ int i;
- return 0;
+ if (!priv->rx)
+ return;
-free_qpls:
- gve_free_qpls(priv, qpls_alloc_cfg);
- return err;
+ for (i = 0; i < num_rings; i++)
+ gve_rx_stop_ring(priv, i);
}
static void gve_queues_mem_remove(struct gve_priv *priv)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- gve_queues_mem_free(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- priv->qpls = NULL;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
priv->tx = NULL;
priv->rx = NULL;
}
@@ -1402,7 +1352,6 @@ static void gve_queues_mem_remove(struct gve_priv *priv)
* No memory is allocated. Passed-in memory is freed on errors.
*/
static int gve_queues_start(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
@@ -1410,24 +1359,18 @@ static int gve_queues_start(struct gve_priv *priv,
int err;
/* Record new resources into priv */
- priv->qpls = qpls_alloc_cfg->qpls;
priv->tx = tx_alloc_cfg->tx;
priv->rx = rx_alloc_cfg->rx;
/* Record new configs into priv */
- priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
priv->tx_cfg = *tx_alloc_cfg->qcfg;
- priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
- if (priv->xdp_prog)
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
- else
- priv->num_xdp_queues = 0;
-
- gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
- gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_tx_start_rings(priv, gve_num_tx_queues(priv));
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
@@ -1441,12 +1384,18 @@ static int gve_queues_start(struct gve_priv *priv,
if (err)
goto stop_and_free_rings;
+ if (rx_alloc_cfg->reset_rss) {
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto reset;
+ }
+
err = gve_register_qpls(priv);
if (err)
goto reset;
priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
- priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+ priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
err = gve_create_rings(priv);
if (err)
@@ -1473,7 +1422,7 @@ reset:
/* return the original error */
return err;
stop_and_free_rings:
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
gve_queues_mem_remove(priv);
return err;
@@ -1483,23 +1432,19 @@ static int gve_open(struct net_device *dev)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
/* No need to free on error: ownership of resources is lost after
* calling gve_queues_start.
*/
- err = gve_queues_start(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
@@ -1522,11 +1467,11 @@ static int gve_queues_stop(struct gve_priv *priv)
goto err;
gve_clear_device_rings_ok(priv);
}
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
gve_unreg_xdp_info(priv);
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
priv->interface_down_cnt++;
@@ -1556,69 +1501,6 @@ static int gve_close(struct net_device *dev)
return 0;
}
-static int gve_remove_xdp_queues(struct gve_priv *priv)
-{
- int qpl_start_id;
- int err;
-
- qpl_start_id = gve_xdp_tx_start_queue_id(priv);
-
- err = gve_destroy_xdp_rings(priv);
- if (err)
- return err;
-
- err = gve_unregister_xdp_qpls(priv);
- if (err)
- return err;
-
- gve_unreg_xdp_info(priv);
- gve_free_xdp_rings(priv);
-
- gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
- priv->num_xdp_queues = 0;
- return 0;
-}
-
-static int gve_add_xdp_queues(struct gve_priv *priv)
-{
- int start_id;
- int err;
-
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
- start_id, gve_num_xdp_qpls(priv));
- if (err)
- goto err;
-
- err = gve_alloc_xdp_rings(priv);
- if (err)
- goto free_xdp_qpls;
-
- err = gve_reg_xdp_info(priv, priv->dev);
- if (err)
- goto free_xdp_rings;
-
- err = gve_register_xdp_qpls(priv);
- if (err)
- goto free_xdp_rings;
-
- err = gve_create_xdp_rings(priv);
- if (err)
- goto free_xdp_rings;
-
- return 0;
-
-free_xdp_rings:
- gve_free_xdp_rings(priv);
-free_xdp_qpls:
- gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
-err:
- priv->num_xdp_queues = 0;
- return err;
-}
-
static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
{
if (!gve_get_napi_enabled(priv))
@@ -1636,6 +1518,19 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
}
}
+static int gve_configure_rings_xdp(struct gve_priv *priv,
+ u16 num_xdp_rings)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ tx_alloc_cfg.num_xdp_rings = num_xdp_rings;
+
+ rx_alloc_cfg.xdp = !!num_xdp_rings;
+ return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+}
+
static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -1644,46 +1539,53 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
u32 status;
old_prog = READ_ONCE(priv->xdp_prog);
- if (!netif_carrier_ok(priv->dev)) {
+ if (!netif_running(priv->dev)) {
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
+
+ /* Update priv XDP queue configuration */
+ priv->tx_cfg.num_xdp_queues = priv->xdp_prog ?
+ priv->rx_cfg.num_queues : 0;
return 0;
}
- gve_turndown(priv);
- if (!old_prog && prog) {
- // Allocate XDP TX queues if an XDP program is
- // being installed
- err = gve_add_xdp_queues(priv);
- if (err)
- goto out;
- } else if (old_prog && !prog) {
- // Remove XDP TX queues if an XDP program is
- // being uninstalled
- err = gve_remove_xdp_queues(priv);
- if (err)
- goto out;
- }
+ if (!old_prog && prog)
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ else if (old_prog && !prog)
+ err = gve_configure_rings_xdp(priv, 0);
+
+ if (err)
+ goto out;
+
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
out:
- gve_turnup(priv);
status = ioread32be(&priv->reg_bar0->device_status);
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
return err;
}
+static int gve_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ return gve_xdp_xmit_gqi(dev, n, frames, flags);
+ else if (priv->queue_format == GVE_DQO_RDA_FORMAT)
+ return gve_xdp_xmit_dqo(dev, n, frames, flags);
+
+ return -EOPNOTSUPP;
+}
+
static int gve_xsk_pool_enable(struct net_device *dev,
struct xsk_buff_pool *pool,
u16 qid)
{
struct gve_priv *priv = netdev_priv(dev);
- struct napi_struct *napi;
- struct gve_rx_ring *rx;
- int tx_qid;
int err;
if (qid >= priv->rx_cfg.num_queues) {
@@ -1701,34 +1603,31 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
- /* If XDP prog is not installed, return */
- if (!priv->xdp_prog)
- return 0;
+ set_bit(qid, priv->xsk_pools);
- rx = &priv->rx[qid];
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
- if (err)
- goto err;
+ /* If XDP prog is not installed or interface is down, return. */
+ if (!priv->xdp_prog || !netif_running(dev))
+ return 0;
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
+ err = gve_reg_xsk_pool(priv, dev, pool, qid);
if (err)
- goto err;
-
- xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
- rx->xsk_pool = pool;
-
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- priv->tx[tx_qid].xsk_pool = pool;
+ goto err_xsk_pool_dma_mapped;
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv)) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto err_xsk_pool_registered;
+ }
return 0;
-err:
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
+err_xsk_pool_registered:
+ gve_unreg_xsk_pool(priv, qid);
+err_xsk_pool_dma_mapped:
+ clear_bit(qid, priv->xsk_pools);
xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
return err;
}
@@ -1740,103 +1639,118 @@ static int gve_xsk_pool_disable(struct net_device *dev,
struct napi_struct *napi_tx;
struct xsk_buff_pool *pool;
int tx_qid;
+ int err;
- pool = xsk_get_pool_from_qid(dev, qid);
- if (!pool)
- return -EINVAL;
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
- /* If XDP prog is not installed, unmap DMA and return */
- if (!priv->xdp_prog)
- goto done;
+ clear_bit(qid, priv->xsk_pools);
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- if (!netif_running(dev)) {
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
- goto done;
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (pool)
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
+
+ if (!netif_running(dev) || !priv->tx_cfg.num_xdp_queues)
+ return 0;
+
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv) && priv->xdp_prog) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ return err;
}
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
+ gve_unreg_xsk_pool(priv, qid);
smp_mb(); /* Make sure it is visible to the workers on datapath */
napi_enable(napi_rx);
- if (gve_rx_work_pending(&priv->rx[qid]))
- napi_schedule(napi_rx);
-
napi_enable(napi_tx);
- if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
- napi_schedule(napi_tx);
+ if (gve_is_gqi(priv)) {
+ if (gve_rx_work_pending(&priv->rx[qid]))
+ napi_schedule(napi_rx);
+
+ if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
+ napi_schedule(napi_tx);
+ }
-done:
- xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
return 0;
}
static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
- int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
+ struct napi_struct *napi;
+
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
return -EINVAL;
- if (flags & XDP_WAKEUP_TX) {
- struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
- struct napi_struct *napi =
- &priv->ntfy_blocks[tx->ntfy_id].napi;
-
- if (!napi_if_scheduled_mark_missed(napi)) {
- /* Call local_bh_enable to trigger SoftIRQ processing */
- local_bh_disable();
- napi_schedule(napi);
- local_bh_enable();
- }
-
- tx->xdp_xsk_wakeup++;
+ napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
+ if (!napi_if_scheduled_mark_missed(napi)) {
+ /* Call local_bh_enable to trigger SoftIRQ processing */
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
}
return 0;
}
-static int verify_xdp_configuration(struct net_device *dev)
+static int gve_verify_xdp_configuration(struct net_device *dev,
+ struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(dev);
+ u16 max_xdp_mtu;
if (dev->features & NETIF_F_LRO) {
- netdev_warn(dev, "XDP is not supported when LRO is on.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP is not supported when LRO is on.");
return -EOPNOTSUPP;
}
- if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
- netdev_warn(dev, "XDP is not supported in mode %d.\n",
- priv->queue_format);
+ if (priv->header_split_enabled) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP is not supported when header-data split is enabled.");
return -EOPNOTSUPP;
}
- if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
- netdev_warn(dev, "XDP is not supported for mtu %d.\n",
- dev->mtu);
+ if (priv->rx_cfg.packet_buffer_size != SZ_2K) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "XDP is not supported for Rx buf len %d, only %d supported.",
+ priv->rx_cfg.packet_buffer_size, SZ_2K);
+ return -EOPNOTSUPP;
+ }
+
+ max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ max_xdp_mtu -= GVE_RX_PAD;
+
+ if (dev->mtu > max_xdp_mtu) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "XDP is not supported for mtu %d.",
+ dev->mtu);
return -EOPNOTSUPP;
}
if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
(2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
- netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
- priv->rx_cfg.num_queues,
- priv->tx_cfg.num_queues,
+ netdev_warn(dev,
+ "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d.",
+ priv->rx_cfg.num_queues, priv->tx_cfg.num_queues,
priv->tx_cfg.max_queues);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
return -EINVAL;
}
return 0;
@@ -1847,7 +1761,7 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct gve_priv *priv = netdev_priv(dev);
int err;
- err = verify_xdp_configuration(dev);
+ err = gve_verify_xdp_configuration(dev, xdp->extack);
if (err)
return err;
switch (xdp->command) {
@@ -1863,16 +1777,42 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static int gve_adjust_config(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+ struct ethtool_rxfh_param rxfh = {0};
+ u16 i;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rss_config->hash_lut[i] =
+ ethtool_rxfh_indir_default(i, num_queues);
+
+ netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size);
+
+ rxfh.hfunc = ETH_RSS_HASH_TOP;
+
+ return gve_adminq_configure_rss(priv, &rxfh);
+}
+
+int gve_flow_rules_reset(struct gve_priv *priv)
+{
+ if (!priv->max_flow_rules)
+ return 0;
+
+ return gve_adminq_reset_flow_rules(priv);
+}
+
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
- /* Allocate resources for the new confiugration */
- err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ /* Allocate resources for the new configuration */
+ err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to alloc new queues");
@@ -1884,14 +1824,12 @@ static int gve_adjust_config(struct gve_priv *priv,
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to close old queues");
- gve_queues_mem_free(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
return err;
}
/* Bring the device back up again with the new resources. */
- err = gve_queues_start(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
@@ -1906,40 +1844,32 @@ static int gve_adjust_config(struct gve_priv *priv,
}
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config)
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
- struct gve_qpl_config new_qpl_cfg;
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
-
- /* qpl_cfg is not read-only, it contains a map that gets updated as
- * rings are allocated, which is why we cannot use the yet unreleased
- * one in priv.
- */
- qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
/* Relay the new config from ethtool */
- qpls_alloc_cfg.tx_cfg = &new_tx_config;
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- qpls_alloc_cfg.rx_cfg = &new_rx_config;
- rx_alloc_cfg.qcfg = &new_rx_config;
- tx_alloc_cfg.num_rings = new_tx_config.num_queues;
+ rx_alloc_cfg.qcfg_rx = &new_rx_config;
+ rx_alloc_cfg.reset_rss = reset_rss;
- if (netif_carrier_ok(priv->dev)) {
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
+ if (reset_rss) {
+ err = gve_init_rss_config(priv, new_rx_config.num_queues);
+ if (err)
+ return err;
+ }
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
@@ -1961,20 +1891,37 @@ static void gve_turndown(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- napi_disable(&block->napi);
+ if (!gve_tx_was_added_to_block(priv, idx))
+ continue;
+
+ if (idx < priv->tx_cfg.num_queues)
+ netif_queue_set_napi(priv->dev, idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+
+ napi_disable_locked(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- napi_disable(&block->napi);
+ if (!gve_rx_was_added_to_block(priv, idx))
+ continue;
+
+ netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
+ NULL);
+ napi_disable_locked(&block->napi);
}
/* Stop tx queues */
netif_tx_disable(priv->dev);
+ xdp_features_clear_redirect_target_locked(priv->dev);
+
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
+
+ /* Make sure that all traffic is finished processing. */
+ synchronize_net();
}
static void gve_turnup(struct gve_priv *priv)
@@ -1989,97 +1936,169 @@ static void gve_turnup(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- napi_enable(&block->napi);
+ if (!gve_tx_was_added_to_block(priv, idx))
+ continue;
+
+ napi_enable_locked(&block->napi);
+
+ if (idx < priv->tx_cfg.num_queues)
+ netif_queue_set_napi(priv->dev, idx,
+ NETDEV_QUEUE_TYPE_TX,
+ &block->napi);
+
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->tx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- napi_enable(&block->napi);
+ if (!gve_rx_was_added_to_block(priv, idx))
+ continue;
+
+ napi_enable_locked(&block->napi);
+ netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
+ &block->napi);
+
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->rx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
+ if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
+ xdp_features_set_redirect_target_locked(priv->dev, false);
+
gve_set_napi_enabled(priv);
}
-static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+static void gve_turnup_and_check_status(struct gve_priv *priv)
+{
+ u32 status;
+
+ gve_turnup(priv);
+ status = ioread32be(&priv->reg_bar0->device_status);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
+}
+
+static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
+ unsigned int txqueue)
{
- struct gve_notify_block *block;
- struct gve_tx_ring *tx = NULL;
- struct gve_priv *priv;
- u32 last_nic_done;
- u32 current_time;
u32 ntfy_idx;
- netdev_info(dev, "Timeout on tx queue, %d", txqueue);
- priv = netdev_priv(dev);
if (txqueue > priv->tx_cfg.num_queues)
- goto reset;
+ return NULL;
ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
if (ntfy_idx >= priv->num_ntfy_blks)
- goto reset;
+ return NULL;
+
+ return &priv->ntfy_blocks[ntfy_idx];
+}
+
+static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
+ unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ u32 current_time;
+
+ block = gve_get_tx_notify_block(priv, txqueue);
- block = &priv->ntfy_blocks[ntfy_idx];
- tx = block->tx;
+ if (!block)
+ return false;
current_time = jiffies_to_msecs(jiffies);
- if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
- goto reset;
+ if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ return false;
- /* Check to see if there are missed completions, which will allow us to
- * kick the queue.
- */
- last_nic_done = gve_tx_load_event_counter(priv, tx);
- if (last_nic_done - tx->done) {
- netdev_info(dev, "Kicking queue %d", txqueue);
- iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
- napi_schedule(&block->napi);
- tx->last_kick_msec = current_time;
- goto out;
- } // Else reset.
+ netdev_info(priv->dev, "Kicking queue %d", txqueue);
+ napi_schedule(&block->napi);
+ block->tx->last_kick_msec = current_time;
+ return true;
+}
-reset:
- gve_schedule_reset(priv);
+static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ struct gve_priv *priv;
-out:
- if (tx)
- tx->queue_timeout++;
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+
+ if (!gve_tx_timeout_try_q_kick(priv, txqueue))
+ gve_schedule_reset(priv);
+
+ block = gve_get_tx_notify_block(priv, txqueue);
+ if (block)
+ block->tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
-u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
+/* Header split is only supported on DQ RDA queue format. If XDP is enabled,
+ * header split is not allowed.
+ */
+bool gve_header_split_supported(const struct gve_priv *priv)
{
- if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
- return GVE_MAX_RX_BUFFER_SIZE;
- else
- return GVE_DEFAULT_RX_BUFFER_SIZE;
+ return priv->header_buf_size &&
+ priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
}
-/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
-bool gve_header_split_supported(const struct gve_priv *priv)
+int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
+ struct netlink_ext_ack *extack,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+ u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size;
+
+ if (rx_buf_len == old_rx_buf_len)
+ return 0;
+
+ /* device options may not always contain support for 4K buffers */
+ if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Modifying Rx buf len is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->xdp_prog && rx_buf_len != SZ_2K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rx buf len can only be 2048 when XDP is on");
+ return -EINVAL;
+ }
+
+ if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rx buf len can only be 2048 or 4096");
+ return -EINVAL;
+ }
+ rx_alloc_cfg->packet_buffer_size = rx_buf_len;
+
+ return 0;
}
-int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
- struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
bool enable_hdr_split;
- int err = 0;
if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
return 0;
@@ -2097,16 +2116,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
if (enable_hdr_split == priv->header_split_enabled)
return 0;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
-
- rx_alloc_cfg.enable_header_split = enable_hdr_split;
- rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
+ rx_alloc_cfg->enable_header_split = enable_hdr_split;
- if (netif_running(priv->dev))
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- return err;
+ return 0;
}
static int gve_set_features(struct net_device *netdev,
@@ -2115,33 +2127,70 @@ static int gve_set_features(struct net_device *netdev,
const netdev_features_t orig_features = netdev->features;
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_qpl_config new_qpl_cfg;
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- /* qpl_cfg is not read-only, it contains a map that gets updated as
- * rings are allocated, which is why we cannot use the yet unreleased
- * one in priv.
- */
- qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
- if (netif_carrier_ok(netdev)) {
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- if (err) {
- /* Revert the change on error. */
- netdev->features = orig_features;
- return err;
- }
+ if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) {
+ netdev_warn(netdev,
+ "XDP is not supported when LRO is on.\n");
+ err = -EOPNOTSUPP;
+ goto revert_features;
+ }
+ if (netif_running(netdev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ goto revert_features;
}
}
+ if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) {
+ err = gve_flow_rules_reset(priv);
+ if (err)
+ goto revert_features;
+ }
+
+ return 0;
+
+revert_features:
+ netdev->features = orig_features;
+ return err;
+}
+
+static int gve_get_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ *kernel_config = priv->ts_config;
+ return 0;
+}
+
+static int gve_set_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (kernel_config->tx_type != HWTSTAMP_TX_OFF) {
+ NL_SET_ERR_MSG_MOD(extack, "TX timestamping is not supported");
+ return -ERANGE;
+ }
+
+ if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (!priv->nic_ts_report) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "RX timestamping is not supported");
+ kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -EOPNOTSUPP;
+ }
+
+ kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ priv->ts_config.rx_filter = kernel_config->rx_filter;
return 0;
}
@@ -2157,6 +2206,8 @@ static const struct net_device_ops gve_netdev_ops = {
.ndo_bpf = gve_xdp,
.ndo_xdp_xmit = gve_xdp_xmit,
.ndo_xsk_wakeup = gve_xsk_wakeup,
+ .ndo_hwtstamp_get = gve_get_ts_config,
+ .ndo_hwtstamp_set = gve_set_ts_config,
};
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -2182,7 +2233,9 @@ static void gve_handle_reset(struct gve_priv *priv)
if (gve_get_do_reset(priv)) {
rtnl_lock();
+ netdev_lock(priv->dev);
gve_reset(priv, false);
+ netdev_unlock(priv->dev);
rtnl_unlock();
}
}
@@ -2256,7 +2309,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
};
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
- .value = cpu_to_be64(priv->rx[0].fill_cnt),
+ .value = cpu_to_be64(priv->rx[idx].fill_cnt),
.queue_id = cpu_to_be32(idx),
};
}
@@ -2278,16 +2331,27 @@ static void gve_service_task(struct work_struct *work)
static void gve_set_netdev_xdp_features(struct gve_priv *priv)
{
+ xdp_features_t xdp_features;
+
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
- priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ } else if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
- priv->dev->xdp_features = 0;
+ xdp_features = 0;
}
+
+ xdp_set_features_flag_locked(priv->dev, xdp_features);
}
+static const struct xdp_metadata_ops gve_xdp_metadata_ops = {
+ .xmo_rx_timestamp = gve_xdp_rx_timestamp,
+};
+
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{
int num_ntfy;
@@ -2335,7 +2399,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err;
}
- /* Big TCP is only supported on DQ*/
+ /* Big TCP is only supported on DQO */
if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
@@ -2345,6 +2409,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
*/
priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
priv->mgmt_msix_idx = priv->num_ntfy_blks;
+ priv->numa_node = dev_to_node(&priv->pdev->dev);
priv->tx_cfg.max_queues =
min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
@@ -2359,6 +2424,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
priv->rx_cfg.num_queues);
}
+ priv->tx_cfg.num_xdp_queues = 0;
dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
@@ -2370,11 +2436,29 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
}
+ priv->ts_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE;
+
setup_device:
+ priv->xsk_pools = bitmap_zalloc(priv->rx_cfg.max_queues, GFP_KERNEL);
+ if (!priv->xsk_pools) {
+ err = -ENOMEM;
+ goto err;
+ }
+
gve_set_netdev_xdp_features(priv);
+ if (!gve_is_gqi(priv))
+ priv->dev->xdp_metadata_ops = &gve_xdp_metadata_ops;
+
err = gve_setup_device_resources(priv);
- if (!err)
- return 0;
+ if (err)
+ goto err_free_xsk_bitmap;
+
+ return 0;
+
+err_free_xsk_bitmap:
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
err:
gve_adminq_free(&priv->pdev->dev, priv);
return err;
@@ -2384,6 +2468,8 @@ static void gve_teardown_priv_resources(struct gve_priv *priv)
{
gve_teardown_device_resources(priv);
gve_adminq_free(&priv->pdev->dev, priv);
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
}
static void gve_trigger_reset(struct gve_priv *priv)
@@ -2422,7 +2508,7 @@ err:
int gve_reset(struct gve_priv *priv, bool attempt_teardown)
{
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
int err;
dev_info(&priv->pdev->dev, "Performing reset\n");
@@ -2473,6 +2559,188 @@ static void gve_write_version(u8 __iomem *driver_version_register)
writeb('\n', driver_version_register);
}
+static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ /* Destroying queue 0 while other queues exist is not supported in DQO */
+ if (!gve_is_gqi(priv) && idx == 0)
+ return -ERANGE;
+
+ /* Single-queue destruction requires quiescence on all queues */
+ gve_turndown(priv);
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_destroy_single_rx_queue(priv, idx);
+ if (err)
+ return err;
+
+ if (gve_is_qpl(priv)) {
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx));
+ if (err)
+ return err;
+ }
+
+ gve_rx_stop_ring(priv, idx);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ *gve_per_q_mem = priv->rx[idx];
+ memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
+ return 0;
+}
+
+static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *gve_per_q_mem;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+
+ if (gve_is_gqi(priv))
+ gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg);
+ else
+ gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
+}
+
+static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
+ int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+
+ if (gve_is_gqi(priv))
+ err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx);
+ else
+ err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx);
+
+ return err;
+}
+
+static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ priv->rx[idx] = *gve_per_q_mem;
+
+ /* Single-queue creation requires quiescence on all queues */
+ gve_turndown(priv);
+
+ gve_rx_start_ring(priv, idx);
+
+ if (gve_is_qpl(priv)) {
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx));
+ if (err)
+ goto abort;
+ }
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_create_single_rx_queue(priv, idx);
+ if (err)
+ goto abort;
+
+ if (gve_is_gqi(priv))
+ gve_rx_write_doorbell(priv, &priv->rx[idx]);
+ else
+ gve_rx_post_buffers_dqo(&priv->rx[idx]);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+ return 0;
+
+abort:
+ gve_rx_stop_ring(priv, idx);
+
+ /* All failures in this func result in a reset, by clearing the struct
+ * at idx, we prevent a double free when that reset runs. The reset,
+ * which needs the rtnl lock, will not run till this func returns and
+ * its caller gives up the lock.
+ */
+ memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
+ return err;
+}
+
+static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct gve_rx_ring),
+ .ndo_queue_mem_alloc = gve_rx_queue_mem_alloc,
+ .ndo_queue_mem_free = gve_rx_queue_mem_free,
+ .ndo_queue_start = gve_rx_queue_start,
+ .ndo_queue_stop = gve_rx_queue_stop,
+};
+
+static void gve_get_rx_queue_stats(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rx_stats)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&rx->statss);
+ rx_stats->packets = rx->rpackets;
+ rx_stats->bytes = rx->rbytes;
+ rx_stats->alloc_fail = rx->rx_skb_alloc_fail +
+ rx->rx_buf_alloc_fail;
+ } while (u64_stats_fetch_retry(&rx->statss, start));
+}
+
+static void gve_get_tx_queue_stats(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *tx_stats)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx = &priv->tx[idx];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&tx->statss);
+ tx_stats->packets = tx->pkt_done;
+ tx_stats->bytes = tx->bytes_done;
+ } while (u64_stats_fetch_retry(&tx->statss, start));
+}
+
+static void gve_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ tx->packets = 0;
+ tx->bytes = 0;
+}
+
+static const struct netdev_stat_ops gve_stat_ops = {
+ .get_queue_stats_rx = gve_get_rx_queue_stats,
+ .get_queue_stats_tx = gve_get_tx_queue_stats,
+ .get_base_stats = gve_get_base_stats,
+};
+
static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int max_tx_queues, max_rx_queues;
@@ -2527,6 +2795,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
dev->ethtool_ops = &gve_ethtool_ops;
dev->netdev_ops = &gve_netdev_ops;
+ dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
+ dev->stat_ops = &gve_stat_ops;
/* Set default and supported features.
*
@@ -2555,7 +2825,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
@@ -2574,6 +2844,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto abort_with_wq;
+ if (!gve_is_gqi(priv) && !gve_is_qpl(priv))
+ dev->netmem_tx = true;
+
err = register_netdev(dev);
if (err)
goto abort_with_gve_init;
@@ -2628,9 +2901,12 @@ static void gve_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct gve_priv *priv = netdev_priv(netdev);
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
+
+ netif_device_detach(netdev);
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2638,6 +2914,7 @@ static void gve_shutdown(struct pci_dev *pdev)
/* If the dev wasn't up or close worked, finish tearing down */
gve_teardown_priv_resources(priv);
}
+ netdev_unlock(netdev);
rtnl_unlock();
}
@@ -2646,10 +2923,11 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct gve_priv *priv = netdev_priv(netdev);
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
priv->suspend_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2658,6 +2936,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
gve_teardown_priv_resources(priv);
}
priv->up_before_suspend = was_up;
+ netdev_unlock(netdev);
rtnl_unlock();
return 0;
}
@@ -2670,7 +2949,9 @@ static int gve_resume(struct pci_dev *pdev)
priv->resume_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
err = gve_reset_recovery(priv, priv->up_before_suspend);
+ netdev_unlock(netdev);
rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
new file mode 100644
index 000000000000..073677d82ee8
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2025 Google LLC
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+
+/* Interval to schedule a nic timestamp calibration, 250ms. */
+#define GVE_NIC_TS_SYNC_INTERVAL_MS 250
+
+/* Read the nic timestamp from hardware via the admin queue. */
+int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ u64 nic_raw;
+ int err;
+
+ err = gve_adminq_report_nic_ts(priv, priv->nic_ts_report_bus);
+ if (err)
+ return err;
+
+ nic_raw = be64_to_cpu(priv->nic_ts_report->nic_timestamp);
+ WRITE_ONCE(priv->last_sync_nic_counter, nic_raw);
+
+ return 0;
+}
+
+static int gve_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int gve_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
+ struct gve_priv *priv = ptp->priv;
+ int err;
+
+ if (gve_get_reset_in_progress(priv) || !gve_get_admin_queue_ok(priv))
+ goto out;
+
+ err = gve_clock_nic_ts_read(priv);
+ if (err && net_ratelimit())
+ dev_err(&priv->pdev->dev,
+ "%s read err %d\n", __func__, err);
+
+out:
+ return msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS);
+}
+
+static const struct ptp_clock_info gve_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "gve clock",
+ .gettimex64 = gve_ptp_gettimex64,
+ .settime64 = gve_ptp_settime64,
+ .do_aux_work = gve_ptp_do_aux_work,
+};
+
+static int gve_ptp_init(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp;
+ int err;
+
+ if (!priv->nic_timestamp_supported) {
+ dev_dbg(&priv->pdev->dev, "Device does not support PTP\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL);
+ if (!priv->ptp)
+ return -ENOMEM;
+
+ ptp = priv->ptp;
+ ptp->info = gve_ptp_caps;
+ ptp->clock = ptp_clock_register(&ptp->info, &priv->pdev->dev);
+
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&priv->pdev->dev, "PTP clock registration failed\n");
+ err = PTR_ERR(ptp->clock);
+ goto free_ptp;
+ }
+
+ ptp->priv = priv;
+ return 0;
+
+free_ptp:
+ kfree(ptp);
+ priv->ptp = NULL;
+ return err;
+}
+
+static void gve_ptp_release(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp = priv->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->clock)
+ ptp_clock_unregister(ptp->clock);
+
+ kfree(ptp);
+ priv->ptp = NULL;
+}
+
+int gve_init_clock(struct gve_priv *priv)
+{
+ int err;
+
+ if (!priv->nic_timestamp_supported)
+ return 0;
+
+ err = gve_ptp_init(priv);
+ if (err)
+ return err;
+
+ priv->nic_ts_report =
+ dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ &priv->nic_ts_report_bus,
+ GFP_KERNEL);
+ if (!priv->nic_ts_report) {
+ dev_err(&priv->pdev->dev, "%s dma alloc error\n", __func__);
+ err = -ENOMEM;
+ goto release_ptp;
+ }
+ err = gve_clock_nic_ts_read(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "failed to read NIC clock %d\n", err);
+ goto release_nic_ts_report;
+ }
+ ptp_schedule_worker(priv->ptp->clock,
+ msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS));
+
+ return 0;
+
+release_nic_ts_report:
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
+release_ptp:
+ gve_ptp_release(priv);
+ return err;
+}
+
+void gve_teardown_clock(struct gve_priv *priv)
+{
+ gve_ptp_release(priv);
+
+ if (priv->nic_ts_report) {
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 20f5a9e7fae9..ec424d2f4f57 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -30,6 +30,9 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
u32 slots = rx->mask + 1;
int i;
+ if (!rx->data.page_info)
+ return;
+
if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
@@ -38,8 +41,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
- rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
page_ref_sub(rx->qpl_copy_pool[i].page,
@@ -51,6 +52,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
rx->data.page_info = NULL;
}
+static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+ ctx->skb_head = NULL;
+ ctx->skb_tail = NULL;
+ ctx->total_size = 0;
+ ctx->frag_cnt = 0;
+ ctx->drop_pkt = false;
+}
+
+static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
+{
+ rx->desc.seqno = 1;
+ rx->cnt = 0;
+ gve_rx_ctx_clear(&rx->ctx);
+}
+
+static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ const u32 slots = priv->rx_desc_cnt;
+ size_t size;
+
+ /* Reset desc ring */
+ if (rx->desc.desc_ring) {
+ size = slots * sizeof(rx->desc.desc_ring[0]);
+ memset(rx->desc.desc_ring, 0, size);
+ }
+
+ /* Reset q_resources */
+ if (rx->q_resources)
+ memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+ gve_rx_init_ring_state_gqi(rx);
+}
+
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -60,43 +96,60 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
+ gve_rx_reset_ring_gqi(priv, idx);
}
-static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1;
int idx = rx->q_num;
size_t bytes;
+ u32 qpl_id;
- bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
- dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
- rx->desc.desc_ring = NULL;
+ if (rx->desc.desc_ring) {
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
+ dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
+ rx->desc.desc_ring = NULL;
+ }
- dma_free_coherent(dev, sizeof(*rx->q_resources),
- rx->q_resources, rx->q_resources_bus);
- rx->q_resources = NULL;
+ if (rx->q_resources) {
+ dma_free_coherent(dev, sizeof(*rx->q_resources),
+ rx->q_resources, rx->q_resources_bus);
+ rx->q_resources = NULL;
+ }
gve_rx_unfill_pages(priv, rx, cfg);
- bytes = sizeof(*rx->data.data_ring) * slots;
- dma_free_coherent(dev, bytes, rx->data.data_ring,
- rx->data.data_bus);
- rx->data.data_ring = NULL;
+ if (rx->data.data_ring) {
+ bytes = sizeof(*rx->data.data_ring) * slots;
+ dma_free_coherent(dev, bytes, rx->data.data_ring,
+ rx->data.data_bus);
+ rx->data.data_ring = NULL;
+ }
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
+ if (rx->data.qpl) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx);
+ gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
+ rx->data.qpl = NULL;
+ }
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
- dma_addr_t addr, struct page *page, __be64 *slot_addr)
+static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info,
+ dma_addr_t addr, struct page *page,
+ __be64 *slot_addr)
{
page_info->page = page;
page_info->page_offset = 0;
page_info->page_address = page_address(page);
+ page_info->buf_size = rx->packet_buffer_size;
*slot_addr = cpu_to_be64(addr);
/* The page already has 1 ref */
page_ref_add(page, INT_MAX - 1);
@@ -121,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
return err;
}
- gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+ gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
return 0;
}
@@ -139,25 +192,18 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
*/
slots = rx->mask + 1;
- rx->data.page_info = kvzalloc(slots *
- sizeof(*rx->data.page_info), GFP_KERNEL);
+ rx->data.page_info = kvcalloc_node(slots, sizeof(*rx->data.page_info),
+ GFP_KERNEL, priv->numa_node);
if (!rx->data.page_info)
return -ENOMEM;
- if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
- if (!rx->data.qpl) {
- kvfree(rx->data.page_info);
- rx->data.page_info = NULL;
- return -ENOMEM;
- }
- }
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
- gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+ gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
+ page,
&rx->data.data_ring[i].qpl_offset);
continue;
}
@@ -170,7 +216,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.raw_addressing) {
for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
- struct page *page = alloc_page(GFP_KERNEL);
+ struct page *page = alloc_pages_node(priv->numa_node,
+ GFP_KERNEL, 0);
if (!page) {
err = -ENOMEM;
@@ -180,6 +227,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
rx->qpl_copy_pool[j].page = page;
rx->qpl_copy_pool[j].page_offset = 0;
rx->qpl_copy_pool[j].page_address = page_address(page);
+ rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
@@ -204,9 +252,6 @@ alloc_err_qpl:
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
- rx->data.qpl = NULL;
-
return err;
alloc_err_rda:
@@ -217,15 +262,6 @@ alloc_err_rda:
return err;
}
-static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
-{
- ctx->skb_head = NULL;
- ctx->skb_tail = NULL;
- ctx->total_size = 0;
- ctx->frag_cnt = 0;
- ctx->drop_pkt = false;
-}
-
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -234,14 +270,16 @@ void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll);
}
-static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
- u32 slots = priv->rx_data_slot_cnt;
+ u32 slots = cfg->ring_size;
int filled_pages;
+ int qpl_page_cnt;
+ u32 qpl_id = 0;
size_t bytes;
int err;
@@ -251,6 +289,7 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
rx->mask = slots - 1;
rx->data.raw_addressing = cfg->raw_addressing;
@@ -265,19 +304,30 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
rx->qpl_copy_pool_head = 0;
- rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
- sizeof(rx->qpl_copy_pool[0]),
- GFP_KERNEL);
-
+ rx->qpl_copy_pool = kvcalloc_node(rx->qpl_copy_pool_mask + 1,
+ sizeof(rx->qpl_copy_pool[0]),
+ GFP_KERNEL, priv->numa_node);
if (!rx->qpl_copy_pool) {
err = -ENOMEM;
goto abort_with_slots;
}
+ if (!rx->data.raw_addressing) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = cfg->ring_size;
+
+ rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
+ if (!rx->data.qpl) {
+ err = -ENOMEM;
+ goto abort_with_copy_pool;
+ }
+ }
+
filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
- goto abort_with_copy_pool;
+ goto abort_with_qpl;
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
@@ -304,11 +354,9 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
err = -ENOMEM;
goto abort_with_q_resources;
}
- rx->cnt = 0;
rx->db_threshold = slots / 2;
- rx->desc.seqno = 1;
+ gve_rx_init_ring_state_gqi(rx);
- rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
return 0;
@@ -319,6 +367,11 @@ abort_with_q_resources:
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx, cfg);
+abort_with_qpl:
+ if (!rx->data.raw_addressing) {
+ gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
+ rx->data.qpl = NULL;
+ }
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
@@ -337,18 +390,12 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -377,7 +424,7 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_gqi(priv, &rx[i], cfg);
kvfree(rx);
@@ -548,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
copy_page_info->pad = page_info->pad;
skb = gve_rx_add_frags(napi, copy_page_info,
- rx->packet_buffer_size, len, ctx);
+ copy_page_info->buf_size, len, ctx);
if (unlikely(!skb))
return NULL;
@@ -588,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* device.
*/
if (page_info->can_flip) {
- skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
+ skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
+ len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
@@ -638,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
data_slot,
- rx->packet_buffer_size, ctx);
+ page_info->buf_size, ctx);
} else {
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
@@ -813,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+ xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
xdp_prepare_buff(&xdp, page_info->page_address +
page_info->page_offset, GVE_RX_PAD,
len, false);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8e8071308aeb..f1bd8f5d5732 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -8,6 +8,7 @@
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_utils.h"
+#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
@@ -15,221 +16,122 @@
#include <net/ip6_checksum.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/xdp_sock_drv.h>
-static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
-{
- return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
-}
-
-static void gve_free_page_dqo(struct gve_priv *priv,
- struct gve_rx_buf_state_dqo *bs,
- bool free_page)
-{
- page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
- if (free_page)
- gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
- DMA_FROM_DEVICE);
- bs->page_info.page = NULL;
-}
-
-static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
-{
- struct gve_rx_buf_state_dqo *buf_state;
- s16 buffer_id;
-
- buffer_id = rx->dqo.free_buf_states;
- if (unlikely(buffer_id == -1))
- return NULL;
-
- buf_state = &rx->dqo.buf_states[buffer_id];
-
- /* Remove buf_state from free list */
- rx->dqo.free_buf_states = buf_state->next;
-
- /* Point buf_state to itself to mark it as allocated */
- buf_state->next = buffer_id;
-
- return buf_state;
-}
-
-static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- s16 buffer_id = buf_state - rx->dqo.buf_states;
-
- return buf_state->next == buffer_id;
-}
-
-static void gve_free_buf_state(struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state)
+static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
{
- s16 buffer_id = buf_state - rx->dqo.buf_states;
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
- buf_state->next = rx->dqo.free_buf_states;
- rx->dqo.free_buf_states = buffer_id;
+ if (rx->dqo.hdr_bufs.data) {
+ dma_free_coherent(hdev, priv->header_buf_size * buf_count,
+ rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr);
+ rx->dqo.hdr_bufs.data = NULL;
+ }
}
-static struct gve_rx_buf_state_dqo *
-gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list)
+static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx,
+ const u32 buffer_queue_slots,
+ const u32 completion_queue_slots)
{
- struct gve_rx_buf_state_dqo *buf_state;
- s16 buffer_id;
-
- buffer_id = list->head;
- if (unlikely(buffer_id == -1))
- return NULL;
-
- buf_state = &rx->dqo.buf_states[buffer_id];
-
- /* Remove buf_state from list */
- list->head = buf_state->next;
- if (buf_state->next == -1)
- list->tail = -1;
-
- /* Point buf_state to itself to mark it as allocated */
- buf_state->next = buffer_id;
-
- return buf_state;
-}
+ int i;
-static void gve_enqueue_buf_state(struct gve_rx_ring *rx,
- struct gve_index_list *list,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- s16 buffer_id = buf_state - rx->dqo.buf_states;
+ /* Set buffer queue state */
+ rx->dqo.bufq.mask = buffer_queue_slots - 1;
+ rx->dqo.bufq.head = 0;
+ rx->dqo.bufq.tail = 0;
- buf_state->next = -1;
+ /* Set completion queue state */
+ rx->dqo.complq.num_free_slots = completion_queue_slots;
+ rx->dqo.complq.mask = completion_queue_slots - 1;
+ rx->dqo.complq.cur_gen_bit = 0;
+ rx->dqo.complq.head = 0;
- if (list->head == -1) {
- list->head = buffer_id;
- list->tail = buffer_id;
- } else {
- int tail = list->tail;
+ /* Set RX SKB context */
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
- rx->dqo.buf_states[tail].next = buffer_id;
- list->tail = buffer_id;
+ /* Set up linked list of buffer IDs */
+ if (rx->dqo.buf_states) {
+ for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
+ rx->dqo.buf_states[i].next = i + 1;
+ rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
}
+
+ rx->dqo.free_buf_states = 0;
+ rx->dqo.recycled_buf_states.head = -1;
+ rx->dqo.recycled_buf_states.tail = -1;
+ rx->dqo.used_buf_states.head = -1;
+ rx->dqo.used_buf_states.tail = -1;
}
-static struct gve_rx_buf_state_dqo *
-gve_get_recycled_buf_state(struct gve_rx_ring *rx)
+static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
{
- struct gve_rx_buf_state_dqo *buf_state;
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ size_t size;
int i;
- /* Recycled buf states are immediately usable. */
- buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
- if (likely(buf_state))
- return buf_state;
-
- if (unlikely(rx->dqo.used_buf_states.head == -1))
- return NULL;
-
- /* Used buf states are only usable when ref count reaches 0, which means
- * no SKBs refer to them.
- *
- * Search a limited number before giving up.
- */
- for (i = 0; i < 5; i++) {
- buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
- if (gve_buf_ref_cnt(buf_state) == 0) {
- rx->dqo.used_buf_states_cnt--;
- return buf_state;
- }
+ const u32 buffer_queue_slots = priv->rx_desc_cnt;
+ const u32 completion_queue_slots = priv->rx_desc_cnt;
- gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+ /* Reset buffer queue */
+ if (rx->dqo.bufq.desc_ring) {
+ size = sizeof(rx->dqo.bufq.desc_ring[0]) *
+ buffer_queue_slots;
+ memset(rx->dqo.bufq.desc_ring, 0, size);
}
- /* For QPL, we cannot allocate any new buffers and must
- * wait for the existing ones to be available.
- */
- if (rx->dqo.qpl)
- return NULL;
-
- /* If there are no free buf states discard an entry from
- * `used_buf_states` so it can be used.
- */
- if (unlikely(rx->dqo.free_buf_states == -1)) {
- buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
- if (gve_buf_ref_cnt(buf_state) == 0)
- return buf_state;
-
- gve_free_page_dqo(rx->gve, buf_state, true);
- gve_free_buf_state(rx, buf_state);
+ /* Reset completion queue */
+ if (rx->dqo.complq.desc_ring) {
+ size = sizeof(rx->dqo.complq.desc_ring[0]) *
+ completion_queue_slots;
+ memset(rx->dqo.complq.desc_ring, 0, size);
}
- return NULL;
-}
-
-static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- struct gve_priv *priv = rx->gve;
- u32 idx;
+ /* Reset q_resources */
+ if (rx->q_resources)
+ memset(rx->q_resources, 0, sizeof(*rx->q_resources));
- if (!rx->dqo.qpl) {
- int err;
+ /* Reset buf states */
+ if (rx->dqo.buf_states) {
+ for (i = 0; i < rx->dqo.num_buf_states; i++) {
+ struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
- err = gve_alloc_page(priv, &priv->pdev->dev,
- &buf_state->page_info.page,
- &buf_state->addr,
- DMA_FROM_DEVICE, GFP_ATOMIC);
- if (err)
- return err;
- } else {
- idx = rx->dqo.next_qpl_page_idx;
- if (idx >= priv->rx_pages_per_qpl) {
- net_err_ratelimited("%s: Out of QPL pages\n",
- priv->dev->name);
- return -ENOMEM;
+ if (rx->dqo.page_pool)
+ gve_free_to_page_pool(rx, bs, false);
+ else
+ gve_free_qpl_page_dqo(bs);
}
- buf_state->page_info.page = rx->dqo.qpl->pages[idx];
- buf_state->addr = rx->dqo.qpl->page_buses[idx];
- rx->dqo.next_qpl_page_idx++;
}
- buf_state->page_info.page_offset = 0;
- buf_state->page_info.page_address =
- page_address(buf_state->page_info.page);
- buf_state->last_single_ref_offset = 0;
-
- /* The page already has 1 ref. */
- page_ref_add(buf_state->page_info.page, INT_MAX - 1);
- buf_state->page_info.pagecnt_bias = INT_MAX;
-
- return 0;
-}
-
-static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
-{
- struct device *hdev = &priv->pdev->dev;
- int buf_count = rx->dqo.bufq.mask + 1;
- if (rx->dqo.hdr_bufs.data) {
- dma_free_coherent(hdev, priv->header_buf_size * buf_count,
- rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr);
- rx->dqo.hdr_bufs.data = NULL;
- }
+ gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+ completion_queue_slots);
}
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_rx_ring *rx = &priv->rx[idx];
if (!gve_rx_was_added_to_block(priv, idx))
return;
+ if (rx->dqo.page_pool)
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
+ gve_rx_reset_ring_dqo(priv, idx);
}
-static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size;
+ u32 qpl_id;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
@@ -243,12 +145,20 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
for (i = 0; i < rx->dqo.num_buf_states; i++) {
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
- /* Only free page for RDA. QPL pages are freed in gve_main. */
- if (bs->page_info.page)
- gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
+
+ if (rx->dqo.page_pool)
+ gve_free_to_page_pool(rx, bs, false);
+ else
+ gve_free_qpl_page_dqo(bs);
+ if (gve_buf_state_is_allocated(rx, bs) && bs->xsk_buff) {
+ xsk_buff_free(bs->xsk_buff);
+ bs->xsk_buff = NULL;
+ }
}
+
if (rx->dqo.qpl) {
- gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id);
rx->dqo.qpl = NULL;
}
@@ -270,15 +180,20 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
kvfree(rx->dqo.buf_states);
rx->dqo.buf_states = NULL;
+ if (rx->dqo.page_pool) {
+ page_pool_destroy(rx->dqo.page_pool);
+ rx->dqo.page_pool = NULL;
+ }
+
gve_rx_free_hdr_bufs(priv, rx);
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx,
+ const u32 buf_count)
{
struct device *hdev = &priv->pdev->dev;
- int buf_count = rx->dqo.bufq.mask + 1;
rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
&rx->dqo.hdr_bufs.addr, GFP_KERNEL);
@@ -296,17 +211,18 @@ void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
-static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
+ struct page_pool *pool;
+ int qpl_page_cnt;
size_t size;
- int i;
+ u32 qpl_id;
- const u32 buffer_queue_slots = cfg->raw_addressing ?
- priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
+ const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -314,36 +230,34 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
- rx->dqo.bufq.mask = buffer_queue_slots - 1;
- rx->dqo.complq.num_free_slots = completion_queue_slots;
- rx->dqo.complq.mask = completion_queue_slots - 1;
- rx->ctx.skb_head = NULL;
- rx->ctx.skb_tail = NULL;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
+
+ if (cfg->xdp) {
+ rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO;
+ rx->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ rx->packet_buffer_truesize = rx->packet_buffer_size;
+ rx->rx_headroom = 0;
+ }
+
+ /* struct gve_xdp_buff is overlaid on struct xdp_buff_xsk and utilizes
+ * the 24 byte field cb to store gve specific data.
+ */
+ XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff);
- rx->dqo.num_buf_states = cfg->raw_addressing ?
- min_t(s16, S16_MAX, buffer_queue_slots * 4) :
- priv->rx_pages_per_qpl;
- rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
- sizeof(rx->dqo.buf_states[0]),
- GFP_KERNEL);
+ rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
+ gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
+ rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
+ sizeof(rx->dqo.buf_states[0]),
+ GFP_KERNEL, priv->numa_node);
if (!rx->dqo.buf_states)
return -ENOMEM;
/* Allocate header buffers for header-split */
if (cfg->enable_header_split)
- if (gve_rx_alloc_hdr_bufs(priv, rx))
+ if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
goto err;
- /* Set up linked list of buffer IDs */
- for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
- rx->dqo.buf_states[i].next = i + 1;
-
- rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
- rx->dqo.recycled_buf_states.head = -1;
- rx->dqo.recycled_buf_states.tail = -1;
- rx->dqo.used_buf_states.head = -1;
- rx->dqo.used_buf_states.tail = -1;
-
/* Allocate RX completion queue */
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
@@ -359,8 +273,18 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->dqo.bufq.desc_ring)
goto err;
- if (!cfg->raw_addressing) {
- rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
+ if (cfg->raw_addressing) {
+ pool = gve_rx_create_page_pool(priv, rx, cfg->xdp);
+ if (IS_ERR(pool))
+ goto err;
+
+ rx->dqo.page_pool = pool;
+ } else {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
+
+ rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
@@ -371,6 +295,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->q_resources)
goto err;
+ gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+ completion_queue_slots);
+
return 0;
err:
@@ -393,18 +320,12 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -433,7 +354,7 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_dqo(priv, &rx[i], cfg);
kvfree(rx);
@@ -455,26 +376,14 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots);
while (num_posted < num_avail_slots) {
struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail];
- struct gve_rx_buf_state_dqo *buf_state;
-
- buf_state = gve_get_recycled_buf_state(rx);
- if (unlikely(!buf_state)) {
- buf_state = gve_alloc_buf_state(rx);
- if (unlikely(!buf_state))
- break;
-
- if (unlikely(gve_alloc_page_dqo(rx, buf_state))) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_buf_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- gve_free_buf_state(rx, buf_state);
- break;
- }
+
+ if (unlikely(gve_alloc_buffer(rx, desc))) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
+ break;
}
- desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
- desc->buf_addr = cpu_to_le64(buf_state->addr +
- buf_state->page_info.page_offset);
if (rx->dqo.hdr_bufs.data)
desc->header_buf_addr =
cpu_to_le64(rx->dqo.hdr_bufs.addr +
@@ -491,48 +400,6 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
rx->fill_cnt += num_posted;
}
-static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- const u16 data_buffer_size = priv->data_buffer_size_dqo;
- int pagecount;
-
- /* Can't reuse if we only fit one buffer per page */
- if (data_buffer_size * 2 > PAGE_SIZE)
- goto mark_used;
-
- pagecount = gve_buf_ref_cnt(buf_state);
-
- /* Record the offset when we have a single remaining reference.
- *
- * When this happens, we know all of the other offsets of the page are
- * usable.
- */
- if (pagecount == 1) {
- buf_state->last_single_ref_offset =
- buf_state->page_info.page_offset;
- }
-
- /* Use the next buffer sized chunk in the page. */
- buf_state->page_info.page_offset += data_buffer_size;
- buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
-
- /* If we wrap around to the same offset without ever dropping to 1
- * reference, then we don't know if this offset was ever freed.
- */
- if (buf_state->page_info.page_offset ==
- buf_state->last_single_ref_offset) {
- goto mark_used;
- }
-
- gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
- return;
-
-mark_used:
- gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
- rx->dqo.used_buf_states_cnt++;
-}
-
static void gve_rx_skb_csum(struct sk_buff *skb,
const struct gve_rx_compl_desc_dqo *desc,
struct gve_ptype ptype)
@@ -581,11 +448,60 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
-static void gve_rx_free_skb(struct gve_rx_ring *rx)
+/* Expand the hardware timestamp to the full 64 bits of width, and add it to the
+ * skb.
+ *
+ * This algorithm works by using the passed hardware timestamp to generate a
+ * diff relative to the last read of the nic clock. This diff can be positive or
+ * negative, as it is possible that we have read the clock more recently than
+ * the hardware has received this packet. To detect this, we use the high bit of
+ * the diff, and assume that the read is more recent if the high bit is set. In
+ * this case we invert the process.
+ *
+ * Note that this means if the time delta between packet reception and the last
+ * clock read is greater than ~2 seconds, this will provide invalid results.
+ */
+static ktime_t gve_rx_get_hwtstamp(struct gve_priv *gve, u32 hwts)
+{
+ u64 last_read = READ_ONCE(gve->last_sync_nic_counter);
+ u32 low = (u32)last_read;
+ s32 diff = hwts - low;
+
+ return ns_to_ktime(last_read + diff);
+}
+
+static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx,
+ const struct gve_rx_compl_desc_dqo *desc)
+{
+ struct sk_buff *skb = rx->ctx.skb_head;
+
+ if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID)
+ skb_hwtstamps(skb)->hwtstamp =
+ gve_rx_get_hwtstamp(rx->gve, le32_to_cpu(desc->ts));
+}
+
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct gve_xdp_buff *ctx = (void *)_ctx;
+
+ if (!ctx->gve->nic_ts_report)
+ return -ENODATA;
+
+ if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID))
+ return -ENODATA;
+
+ *timestamp = gve_rx_get_hwtstamp(ctx->gve,
+ le32_to_cpu(ctx->compl_desc->ts));
+ return 0;
+}
+
+static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
return;
+ if (rx->ctx.skb_head == napi->skb)
+ napi->skb = NULL;
dev_kfree_skb_any(rx->ctx.skb_head);
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
@@ -606,7 +522,7 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
u16 buf_len)
{
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_pages_node(rx->gve->numa_node, GFP_ATOMIC, 0);
int num_frags;
if (!page)
@@ -628,6 +544,25 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
return 0;
}
+static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ int num_frags, u16 buf_len)
+{
+ if (rx->dqo.page_pool) {
+ skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.netmem,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ } else {
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.page,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ }
+}
+
/* Chains multi skbs for single rx packet.
* Returns 0 if buffer is appended, -1 otherwise.
*/
@@ -645,6 +580,9 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;
+ if (rx->dqo.page_pool)
+ skb_mark_for_recycle(skb);
+
if (rx->ctx.skb_tail == rx->ctx.skb_head)
skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
else
@@ -655,26 +593,185 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (rx->ctx.skb_tail != rx->ctx.skb_head) {
rx->ctx.skb_head->len += buf_len;
rx->ctx.skb_head->data_len += buf_len;
- rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
+ rx->ctx.skb_head->truesize += buf_state->page_info.buf_size;
}
/* Trigger ondemand page allocation if we are running low on buffers */
if (gve_rx_should_trigger_copy_ondemand(rx))
return gve_rx_copy_ondemand(rx, buf_state, buf_len);
- skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
- buf_state->page_info.page,
- buf_state->page_info.page_offset,
- buf_len, priv->data_buffer_size_dqo);
- gve_dec_pagecnt_bias(&buf_state->page_info);
+ gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len);
+ gve_reuse_buffer(rx, buf_state);
+ return 0;
+}
- /* Advances buffer page-offset if page is partially used.
- * Marks buffer as used if page is full.
- */
- gve_try_recycle_buf(priv, rx, buf_state);
+static int gve_xdp_tx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp)
+{
+ struct gve_tx_ring *tx;
+ struct xdp_frame *xdpf;
+ u32 tx_qid;
+ int err;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ if (rx->xsk_pool)
+ xsk_buff_free(xdp);
+ return -ENOSPC;
+ }
+
+ tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
+ tx = &priv->tx[tx_qid];
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ err = gve_xdp_xmit_one_dqo(priv, tx, xdpf);
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ return err;
+}
+
+static void gve_xsk_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act)
+{
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ xsk_buff_free(xdp);
+ break;
+ case XDP_TX:
+ if (unlikely(gve_xdp_tx_dqo(priv, rx, xdp)))
+ goto err;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(priv->dev, xdp, xprog)))
+ goto err;
+ break;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+}
+
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ int err;
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ gve_free_buffer(rx, buf_state);
+ break;
+ case XDP_TX:
+ err = gve_xdp_tx_dqo(priv, rx, xdp);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(priv->dev, xdp, xprog);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
+ break;
+ }
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ else if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+ gve_free_buffer(rx, buf_state);
+ return;
+}
+
+static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
+ const struct gve_rx_compl_desc_dqo *compl_desc,
+ struct gve_rx_buf_state_dqo *buf_state,
+ struct bpf_prog *xprog)
+{
+ struct xdp_buff *xdp = buf_state->xsk_buff;
+ int buf_len = compl_desc->packet_len;
+ struct gve_priv *priv = rx->gve;
+ struct gve_xdp_buff *gve_xdp;
+ int xdp_act;
+
+ xdp->data_end = xdp->data + buf_len;
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ gve_xdp = (void *)xdp;
+ gve_xdp->gve = priv;
+ gve_xdp->compl_desc = compl_desc;
+
+ if (xprog) {
+ xdp_act = bpf_prog_run_xdp(xprog, xdp);
+ buf_len = xdp->data_end - xdp->data;
+ if (xdp_act != XDP_PASS) {
+ gve_xsk_done_dqo(priv, rx, xdp, xprog, xdp_act);
+ gve_free_buf_state(rx, buf_state);
+ return 0;
+ }
+ }
+
+ /* Copy the data to skb */
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ xdp->data, buf_len);
+ if (unlikely(!rx->ctx.skb_head)) {
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+
+ /* Free XSK buffer and Buffer state */
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+
+ /* Update Stats */
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
return 0;
}
+static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
+{
+ struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
+
+ if (rx->dqo.page_pool) {
+ page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
+ page_info->netmem,
+ page_info->page_offset,
+ buf_len);
+ } else {
+ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+ page_info->page_offset +
+ page_info->pad,
+ buf_len, DMA_FROM_DEVICE);
+ }
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -689,6 +786,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
+ struct bpf_prog *xprog;
u16 buf_len;
u16 hdr_len;
@@ -705,18 +803,26 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
if (unlikely(compl_desc->rx_error)) {
- gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
- buf_state);
+ gve_free_buffer(rx, buf_state);
return -EINVAL;
}
buf_len = compl_desc->packet_len;
hdr_len = compl_desc->header_len;
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (buf_state->xsk_buff)
+ return gve_rx_xsk_dqo(napi, rx, compl_desc, buf_state, xprog);
+
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
- prefetch(buf_state->page_info.page);
+ if (rx->dqo.page_pool) {
+ if (!netmem_is_net_iov(buf_state->page_info.netmem))
+ prefetch(netmem_to_page(buf_state->page_info.netmem));
+ } else {
+ prefetch(buf_state->page_info.page);
+ }
/* Copy the header into the skb in the case of header split */
if (hsplit) {
@@ -730,6 +836,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
+
+ if (rx->dqo.page_pool)
+ skb_mark_for_recycle(rx->ctx.skb_head);
} else {
unsplit = 1;
}
@@ -738,12 +847,18 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
+ } else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem)) {
+ /* when header split is disabled, the header went to the packet
+ * buffer. If the packet buffer is a net_iov, those can't be
+ * easily mapped into the kernel space to access the header
+ * required to process the packet.
+ */
+ goto error;
}
/* Sync the portion of dma buffer for CPU to read. */
- dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset,
- buf_len, DMA_FROM_DEVICE);
+ gve_dma_sync(priv, rx, buf_state, buf_len);
/* Append to current skb if one exists. */
if (rx->ctx.skb_head) {
@@ -754,7 +869,39 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- if (eop && buf_len <= priv->rx_copybreak) {
+ if (xprog) {
+ struct gve_xdp_buff gve_xdp;
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&gve_xdp.xdp, buf_state->page_info.buf_size,
+ &rx->xdp_rxq);
+ xdp_prepare_buff(&gve_xdp.xdp,
+ buf_state->page_info.page_address +
+ buf_state->page_info.page_offset,
+ buf_state->page_info.pad,
+ buf_len, false);
+ gve_xdp.gve = priv;
+ gve_xdp.compl_desc = compl_desc;
+
+ old_data = gve_xdp.xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &gve_xdp.xdp);
+ buf_state->page_info.pad += gve_xdp.xdp.data - old_data;
+ buf_len = gve_xdp.xdp.data_end - gve_xdp.xdp.data;
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done_dqo(priv, rx, &gve_xdp.xdp, xprog, xdp_act,
+ buf_state);
+ return 0;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ }
+
+ if (eop && buf_len <= priv->rx_copybreak &&
+ !(rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem))) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
@@ -766,8 +913,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_copybreak_pkt++;
u64_stats_update_end(&rx->statss);
- gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
- buf_state);
+ gve_free_buffer(rx, buf_state);
return 0;
}
@@ -782,16 +928,15 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
- buf_state->page_info.page_offset, buf_len,
- priv->data_buffer_size_dqo);
- gve_dec_pagecnt_bias(&buf_state->page_info);
+ if (rx->dqo.page_pool)
+ skb_mark_for_recycle(rx->ctx.skb_head);
- gve_try_recycle_buf(priv, rx, buf_state);
+ gve_skb_add_rx_frag(rx, buf_state, 0, buf_len);
+ gve_reuse_buffer(rx, buf_state);
return 0;
error:
- gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+ gve_free_buffer(rx, buf_state);
return -ENOMEM;
}
@@ -837,6 +982,9 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
if (feat & NETIF_F_RXCSUM)
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
+ if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)
+ gve_rx_skb_hwtstamp(rx, desc);
+
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
*/
@@ -856,16 +1004,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
{
- struct napi_struct *napi = &block->napi;
- netdev_features_t feat = napi->dev->features;
-
- struct gve_rx_ring *rx = block->rx;
- struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
-
+ struct gve_rx_compl_queue_dqo *complq;
+ struct napi_struct *napi;
+ netdev_features_t feat;
+ struct gve_rx_ring *rx;
+ struct gve_priv *priv;
+ u64 xdp_redirects;
u32 work_done = 0;
u64 bytes = 0;
+ u64 xdp_txs;
int err;
+ napi = &block->napi;
+ feat = napi->dev->features;
+
+ rx = block->rx;
+ priv = rx->gve;
+ complq = &rx->dqo.complq;
+
+ xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
+ xdp_txs = rx->xdp_actions[XDP_TX];
+
while (work_done < budget) {
struct gve_rx_compl_desc_dqo *compl_desc =
&complq->desc_ring[complq->head];
@@ -884,7 +1043,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
if (err == -ENOMEM)
rx->rx_skb_alloc_fail++;
@@ -927,7 +1086,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* gve_rx_complete_skb() will consume skb if successful */
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
@@ -939,6 +1098,12 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
rx->ctx.skb_tail = NULL;
}
+ if (xdp_txs != rx->xdp_actions[XDP_TX])
+ gve_xdp_tx_flush_dqo(priv, rx->q_num);
+
+ if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
+ xdp_do_flush();
+
gve_rx_post_buffers_dqo(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4b9853adc113..97efc8d27e6f 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -158,15 +158,16 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do)
{
struct gve_tx_buffer_state *info;
- u32 clean_end = tx->done + to_do;
u64 pkts = 0, bytes = 0;
size_t space_freed = 0;
u32 xsk_complete = 0;
u32 idx;
+ int i;
- for (; tx->done < clean_end; tx->done++) {
+ for (i = 0; i < to_do; i++) {
idx = tx->done & tx->mask;
info = &tx->info[idx];
+ tx->done++;
if (unlikely(!info->xdp.size))
continue;
@@ -205,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
return;
gve_remove_napi(priv, ntfy_idx);
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ if (tx->q_num < priv->tx_cfg.num_queues)
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ else
+ gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_remove_from_block(priv, idx);
}
@@ -216,6 +220,7 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
+ u32 qpl_id;
u32 slots;
slots = tx->mask + 1;
@@ -223,9 +228,12 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
- if (!tx->raw_addressing) {
- gve_tx_fifo_release(priv, &tx->tx_fifo);
- gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
+ if (tx->tx_fifo.qpl) {
+ if (tx->tx_fifo.base)
+ gve_tx_fifo_release(priv, &tx->tx_fifo);
+
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
@@ -256,6 +264,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
+ int qpl_page_cnt;
+ u32 qpl_id = 0;
size_t bytes;
/* Make sure everything is zeroed to start */
@@ -280,9 +290,14 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
tx->raw_addressing = cfg->raw_addressing;
tx->dev = hdev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ qpl_page_cnt = priv->tx_pages_per_qpl;
+
+ tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
+
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
goto abort_with_qpl;
@@ -302,8 +317,10 @@ abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
- if (!tx->raw_addressing)
- gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
+ if (!tx->raw_addressing) {
+ gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
+ tx->tx_fifo.qpl = NULL;
+ }
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -317,33 +334,23 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -359,8 +366,7 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
cleanup:
for (j = 0; j < i; j++)
gve_tx_free_ring_gqi(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -373,13 +379,11 @@ void gve_tx_free_rings_gqi(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_gqi(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* gve_tx_avail - Calculates the number of slots available in the ring
@@ -726,7 +730,9 @@ unmap_drop:
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
return 0;
}
@@ -819,18 +825,21 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
return ndescs;
}
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags)
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_tx_ring *tx;
int i, err = 0, qid;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
return -EINVAL;
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
+
qid = gve_xdp_tx_queue_id(priv,
- smp_processor_id() % priv->num_xdp_queues);
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
tx = &priv->tx[qid];
@@ -945,14 +954,10 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
spin_lock(&tx->xdp_lock);
while (sent < budget) {
- if (!gve_can_tx(tx, GVE_TX_START_THRESH))
+ if (!gve_can_tx(tx, GVE_TX_START_THRESH) ||
+ !xsk_tx_peek_desc(tx->xsk_pool, &desc))
goto out;
- if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
- tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
- goto out;
- }
-
data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
tx->req += nsegs;
@@ -967,33 +972,41 @@ out:
return sent;
}
+int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+ int sent = 0;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool) {
+ sent = gve_xsk_tx(priv, tx, budget);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ }
+
+ return sent;
+}
+
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
u32 nic_done;
- bool repoll;
u32 to_do;
/* Find out how much work there is to be done */
nic_done = gve_tx_load_event_counter(priv, tx);
to_do = min_t(u32, (nic_done - tx->done), budget);
gve_clean_xdp_done(priv, tx, to_do);
- repoll = nic_done != tx->done;
-
- if (tx->xsk_pool) {
- int sent = gve_xsk_tx(priv, tx, budget);
-
- u64_stats_update_begin(&tx->statss);
- tx->xdp_xsk_sent += sent;
- u64_stats_update_end(&tx->statss);
- repoll |= (sent == budget);
- if (xsk_uses_need_wakeup(tx->xsk_pool))
- xsk_set_tx_need_wakeup(tx->xsk_pool);
- }
/* If we still have work we want to repoll */
- return repoll;
+ return nic_done != tx->done;
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index bc34b6cd3a3e..40b89b3e5a31 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -9,9 +9,11 @@
#include "gve_utils.h"
#include "gve_dqo.h"
#include <net/ip.h>
+#include <linux/bpf.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <net/xdp_sock_drv.h>
/* Returns true if tx_bufs are available. */
static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
@@ -110,6 +112,14 @@ static bool gve_has_pending_packet(struct gve_tx_ring *tx)
return false;
}
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid)
+{
+ u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
+ struct gve_tx_ring *tx = &priv->tx[tx_qid];
+
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+}
+
static struct gve_tx_pending_packet_dqo *
gve_alloc_pending_packet(struct gve_tx_ring *tx)
{
@@ -198,7 +208,8 @@ void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
- netdev_tx_reset_queue(tx->netdev_txq);
+ if (tx->netdev_txq)
+ netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx);
}
@@ -209,6 +220,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
+ u32 qpl_id;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -230,6 +242,9 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_ring = NULL;
}
+ kvfree(tx->dqo.xsk_reorder_queue);
+ tx->dqo.xsk_reorder_queue = NULL;
+
kvfree(tx->dqo.pending_packets);
tx->dqo.pending_packets = NULL;
@@ -237,7 +252,8 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) {
- gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id);
tx->dqo.qpl = NULL;
}
@@ -274,7 +290,8 @@ void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_tx_add_to_block(priv, idx);
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ if (idx < priv->tx_cfg.num_queues)
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
@@ -285,19 +302,20 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
+ int qpl_page_cnt;
size_t bytes;
+ u32 qpl_id;
int i;
memset(tx, 0, sizeof(*tx));
tx->q_num = idx;
tx->dev = hdev;
+ spin_lock_init(&tx->dqo_tx.xdp_lock);
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */
tx->mask = cfg->ring_size - 1;
- tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
- priv->options_dqo_rda.tx_comp_ring_entries - 1 :
- tx->mask;
+ tx->dqo.complq_mask = tx->mask;
/* The max number of pending packets determines the maximum number of
* descriptors which maybe written to the completion queue.
@@ -331,6 +349,17 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
+
+ /* Only alloc xsk pool for XDP queues */
+ if (idx >= cfg->qcfg->num_queues && cfg->num_xdp_rings) {
+ tx->dqo.xsk_reorder_queue =
+ kvcalloc(tx->dqo.complq_mask + 1,
+ sizeof(tx->dqo.xsk_reorder_queue[0]),
+ GFP_KERNEL);
+ if (!tx->dqo.xsk_reorder_queue)
+ goto err;
+ }
+
tx->dqo_compl.miss_completions.head = -1;
tx->dqo_compl.miss_completions.tail = -1;
tx->dqo_compl.timed_out_completions.head = -1;
@@ -354,7 +383,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
- tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ qpl_page_cnt = priv->tx_pages_per_qpl;
+
+ tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!tx->dqo.qpl)
goto err;
@@ -373,33 +406,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -415,8 +438,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
err:
for (j = 0; j < i; j++)
gve_tx_free_ring_dqo(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -429,13 +451,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* Returns the number of slots available in the ring */
@@ -446,12 +466,28 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
return tx->mask - num_used;
}
+/* Checks if the requested number of slots are available in the ring */
+static bool gve_has_tx_slots_available(struct gve_tx_ring *tx, u32 slots_req)
+{
+ u32 num_avail = num_avail_tx_slots(tx);
+
+ slots_req += GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP;
+
+ if (num_avail >= slots_req)
+ return true;
+
+ /* Update cached TX head pointer */
+ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+ return num_avail_tx_slots(tx) >= slots_req;
+}
+
static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
int desc_count, int buf_count)
{
return gve_has_pending_packet(tx) &&
- num_avail_tx_slots(tx) >= desc_count &&
- gve_has_free_tx_qpl_bufs(tx, buf_count);
+ gve_has_tx_slots_available(tx, desc_count) &&
+ gve_has_free_tx_qpl_bufs(tx, buf_count);
}
/* Stops the queue if available descriptors is less than 'count'.
@@ -463,12 +499,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return 0;
- /* Update cached TX head pointer */
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
- if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
- return 0;
-
/* No space, so stop the queue */
tx->stop_queue++;
netif_tx_stop_queue(tx->netdev_txq);
@@ -479,8 +509,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
/* After stopping queue, check if we can transmit again in order to
* avoid TOCTOU bug.
*/
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return -EBUSY;
@@ -507,11 +535,9 @@ static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
}
static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
- struct sk_buff *skb, u32 len, u64 addr,
+ bool enable_csum, u32 len, u64 addr,
s16 compl_tag, bool eop, bool is_gso)
{
- const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
-
while (len > 0) {
struct gve_tx_pkt_desc_dqo *desc =
&tx->dqo.tx_ring[*desc_idx].pkt;
@@ -522,7 +548,7 @@ static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
.buf_addr = cpu_to_le64(addr),
.dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
.end_of_packet = cur_eop,
- .checksum_offload_enable = checksum_offload_en,
+ .checksum_offload_enable = enable_csum,
.compl_tag = cpu_to_le16(compl_tag),
.buf_size = cur_len,
};
@@ -555,28 +581,18 @@ static int gve_prep_tso(struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
return -1;
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -EINVAL;
+
/* Needed because we will modify header. */
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
tcp = tcp_hdr(skb);
-
- /* Remove payload length from checksum. */
paylen = skb->len - skb_transport_offset(skb);
-
- switch (skb_shinfo(skb)->gso_type) {
- case SKB_GSO_TCPV4:
- case SKB_GSO_TCPV6:
- csum_replace_by_diff(&tcp->check,
- (__force __wsum)htonl(paylen));
-
- /* Compute length of segmentation header. */
- header_len = skb_tcp_all_headers(skb);
- break;
- default:
- return -EINVAL;
- }
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
+ header_len = skb_tcp_all_headers(skb);
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
return -EINVAL;
@@ -629,6 +645,25 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
};
}
+static void gve_tx_update_tail(struct gve_tx_ring *tx, u32 desc_idx)
+{
+ u32 last_desc_idx = (desc_idx - 1) & tx->mask;
+ u32 last_report_event_interval =
+ (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
+
+ /* Commit the changes to our state */
+ tx->dqo_tx.tail = desc_idx;
+
+ /* Request a descriptor completion on the last descriptor of the
+ * packet if we are allowed to by the HW enforced interval.
+ */
+
+ if (unlikely(last_report_event_interval >= GVE_TX_MIN_RE_INTERVAL)) {
+ tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
+ tx->dqo_tx.last_re_idx = last_desc_idx;
+ }
+}
+
static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
struct sk_buff *skb,
struct gve_tx_pending_packet_dqo *pkt,
@@ -636,6 +671,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -661,7 +697,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag,
/*eop=*/shinfo->nr_frags == 0, is_gso);
}
@@ -677,10 +713,11 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
goto err;
dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
- dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), pkt,
+ dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag, is_eop, is_gso);
}
@@ -725,6 +762,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
u32 copy_offset = 0;
dma_addr_t dma_addr;
u32 copy_len;
@@ -746,7 +784,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
copy_offset += copy_len;
dma_sync_single_for_device(tx->dev, dma_addr,
copy_len, DMA_TO_DEVICE);
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum,
copy_len,
dma_addr,
completion_tag,
@@ -780,7 +818,11 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
s16 completion_tag;
pkt = gve_alloc_pending_packet(tx);
+ if (!pkt)
+ return -ENOMEM;
+
pkt->skb = skb;
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_SKB;
completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
@@ -813,24 +855,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
- /* Commit the changes to our state */
- tx->dqo_tx.tail = desc_idx;
-
- /* Request a descriptor completion on the last descriptor of the
- * packet if we are allowed to by the HW enforced interval.
- */
- {
- u32 last_desc_idx = (desc_idx - 1) & tx->mask;
- u32 last_report_event_interval =
- (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
-
- if (unlikely(last_report_event_interval >=
- GVE_TX_MIN_RE_INTERVAL)) {
- tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
- tx->dqo_tx.last_re_idx = last_desc_idx;
- }
- }
-
+ gve_tx_update_tail(tx, desc_idx);
return 0;
err:
@@ -876,22 +901,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
+ int prev_frag_size;
int cur_seg_size;
int i;
cur_seg_size = skb_headlen(skb) - header_len;
+ prev_frag_size = skb_headlen(skb);
cur_seg_num_bufs = cur_seg_size > 0;
for (i = 0; i < shinfo->nr_frags; i++) {
if (cur_seg_size >= gso_size) {
cur_seg_size %= gso_size;
cur_seg_num_bufs = cur_seg_size > 0;
+
+ if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
+ int prev_frag_remain = prev_frag_size %
+ GVE_TX_MAX_BUF_SIZE_DQO;
+
+ /* If the last descriptor of the previous frag
+ * is less than cur_seg_size, the segment will
+ * span two descriptors in the previous frag.
+ * Since max gso size (9728) is less than
+ * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
+ * for the segment to span more than two
+ * descriptors.
+ */
+ if (prev_frag_remain &&
+ cur_seg_size > prev_frag_remain)
+ cur_seg_num_bufs++;
+ }
}
if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
return false;
- cur_seg_size += skb_frag_size(&shinfo->frags[i]);
+ prev_frag_size = skb_frag_size(&shinfo->frags[i]);
+ cur_seg_size += prev_frag_size;
}
return true;
@@ -944,9 +989,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
/* Metadata + (optional TSO) + data descriptors. */
total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
- if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
- GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP,
- num_buffer_descs))) {
+ if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs,
+ num_buffer_descs))) {
return -1;
}
@@ -958,11 +1002,45 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
dev_kfree_skb_any(skb);
return 0;
}
+static void gve_xsk_reorder_queue_push_dqo(struct gve_tx_ring *tx,
+ u16 completion_tag)
+{
+ u32 tail = atomic_read(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ tx->dqo.xsk_reorder_queue[tail] = completion_tag;
+ tail = (tail + 1) & tx->dqo.complq_mask;
+ atomic_set_release(&tx->dqo_tx.xsk_reorder_queue_tail, tail);
+}
+
+static struct gve_tx_pending_packet_dqo *
+gve_xsk_reorder_queue_head(struct gve_tx_ring *tx)
+{
+ u32 head = tx->dqo_compl.xsk_reorder_queue_head;
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail) {
+ tx->dqo_compl.xsk_reorder_queue_tail =
+ atomic_read_acquire(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail)
+ return NULL;
+ }
+
+ return &tx->dqo.pending_packets[tx->dqo.xsk_reorder_queue[head]];
+}
+
+static void gve_xsk_reorder_queue_pop_dqo(struct gve_tx_ring *tx)
+{
+ tx->dqo_compl.xsk_reorder_queue_head++;
+ tx->dqo_compl.xsk_reorder_queue_head &= tx->dqo.complq_mask;
+}
+
/* Transmit a given skb and ring the doorbell. */
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
{
@@ -986,6 +1064,62 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static bool gve_xsk_tx_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ int budget)
+{
+ struct xsk_buff_pool *pool = tx->xsk_pool;
+ struct xdp_desc desc;
+ bool repoll = false;
+ int sent = 0;
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (; sent < budget; sent++) {
+ struct gve_tx_pending_packet_dqo *pkt;
+ s16 completion_tag;
+ dma_addr_t addr;
+ u32 desc_idx;
+
+ if (unlikely(!gve_has_avail_slots_tx_dqo(tx, 1, 1))) {
+ repoll = true;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &desc))
+ break;
+
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XSK;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ addr = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, addr, desc.len);
+
+ desc_idx = tx->dqo_tx.tail;
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ true, desc.len,
+ addr, completion_tag, true,
+ false);
+ ++pkt->num_bufs;
+ gve_tx_update_tail(tx, desc_idx);
+ tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
+ gve_xsk_reorder_queue_push_dqo(tx, completion_tag);
+ }
+
+ if (sent) {
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+ xsk_tx_release(pool);
+ }
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+
+ return (sent == budget) || repoll;
+}
+
static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
struct gve_tx_pending_packet_dqo *pending_packet)
{
@@ -1035,8 +1169,9 @@ static void gve_unmap_packet(struct device *dev,
dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
for (i = 1; i < pkt->num_bufs; i++) {
- dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE, 0);
}
pkt->num_bufs = 0;
}
@@ -1099,16 +1234,35 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
}
}
tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs;
- if (tx->dqo.qpl)
- gve_free_tx_qpl_bufs(tx, pending_packet);
- else
+
+ switch (pending_packet->type) {
+ case GVE_TX_PENDING_PACKET_DQO_SKB:
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, pending_packet);
+ else
+ gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->skb->len;
+
+ napi_consume_skb(pending_packet->skb, is_napi);
+ pending_packet->skb = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XDP_FRAME:
gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->xdpf->len;
- *bytes += pending_packet->skb->len;
- (*pkts)++;
- napi_consume_skb(pending_packet->skb, is_napi);
- pending_packet->skb = NULL;
- gve_free_pending_packet(tx, pending_packet);
+ xdp_return_frame(pending_packet->xdpf);
+ pending_packet->xdpf = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XSK:
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
}
static void gve_handle_miss_completion(struct gve_priv *priv,
@@ -1136,8 +1290,7 @@ static void gve_handle_miss_completion(struct gve_priv *priv,
/* jiffies can wraparound but time comparisons can handle overflows. */
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT);
add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
*bytes += pending_packet->skb->len;
@@ -1173,7 +1326,11 @@ static void remove_miss_completions(struct gve_priv *priv,
/* This indicates the packet was dropped. */
dev_kfree_skb_any(pending_packet->skb);
pending_packet->skb = NULL;
+
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
+
net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
priv->dev->name,
(int)(pending_packet - tx->dqo.pending_packets));
@@ -1181,8 +1338,7 @@ static void remove_miss_completions(struct gve_priv *priv,
pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT);
/* Maintain pending packet in another list so the packet can be
* unallocated at a later time.
*/
@@ -1207,8 +1363,34 @@ static void remove_timed_out_completions(struct gve_priv *priv,
remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
pending_packet);
+
+ /* Need to count XSK packets in xsk_tx_completed. */
+ if (pending_packet->type == GVE_TX_PENDING_PACKET_DQO_XSK)
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ else
+ gve_free_pending_packet(tx, pending_packet);
+ }
+}
+
+static void gve_tx_process_xsk_completions(struct gve_tx_ring *tx)
+{
+ u32 num_xsks = 0;
+
+ while (true) {
+ struct gve_tx_pending_packet_dqo *pending_packet =
+ gve_xsk_reorder_queue_head(tx);
+
+ if (!pending_packet ||
+ pending_packet->state != GVE_PACKET_STATE_XSK_COMPLETE)
+ break;
+
+ num_xsks++;
+ gve_xsk_reorder_queue_pop_dqo(tx);
gve_free_pending_packet(tx, pending_packet);
}
+
+ if (num_xsks)
+ xsk_tx_completed(tx->xsk_pool, num_xsks);
}
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
@@ -1281,13 +1463,17 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
num_descs_cleaned++;
}
- netdev_tx_completed_queue(tx->netdev_txq,
- pkt_compl_pkts + miss_compl_pkts,
- pkt_compl_bytes + miss_compl_bytes);
+ if (tx->netdev_txq)
+ netdev_tx_completed_queue(tx->netdev_txq,
+ pkt_compl_pkts + miss_compl_pkts,
+ pkt_compl_bytes + miss_compl_bytes);
remove_miss_completions(priv, tx);
remove_timed_out_completions(priv, tx);
+ if (tx->xsk_pool)
+ gve_tx_process_xsk_completions(tx);
+
u64_stats_update_begin(&tx->statss);
tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
@@ -1319,3 +1505,111 @@ bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
}
+
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool)
+ return gve_xsk_tx_dqo(priv, tx, budget);
+
+ return 0;
+}
+
+bool gve_xdp_poll_dqo(struct gve_notify_block *block)
+{
+ struct gve_tx_compl_desc *compl_desc;
+ struct gve_tx_ring *tx = block->tx;
+ struct gve_priv *priv = block->priv;
+
+ gve_clean_tx_done_dqo(priv, tx, &block->napi);
+
+ /* Return true if we still have work. */
+ compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
+ return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
+}
+
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf)
+{
+ struct gve_tx_pending_packet_dqo *pkt;
+ u32 desc_idx = tx->dqo_tx.tail;
+ s16 completion_tag;
+ int num_descs = 1;
+ dma_addr_t addr;
+ int err;
+
+ if (unlikely(!gve_has_tx_slots_available(tx, num_descs)))
+ return -EBUSY;
+
+ pkt = gve_alloc_pending_packet(tx);
+ if (unlikely(!pkt))
+ return -EBUSY;
+
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XDP_FRAME;
+ pkt->num_bufs = 0;
+ pkt->xdpf = xdpf;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ /* Generate Packet Descriptor */
+ addr = dma_map_single(tx->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ err = dma_mapping_error(tx->dev, addr);
+ if (unlikely(err))
+ goto err;
+
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], xdpf->len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ pkt->num_bufs++;
+
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ false, xdpf->len,
+ addr, completion_tag, true,
+ false);
+
+ gve_tx_update_tail(tx, desc_idx);
+ return 0;
+
+err:
+ pkt->xdpf = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
+ return err;
+}
+
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
+
+ tx = &priv->tx[qid];
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (i = 0; i < n; i++) {
+ err = gve_xdp_xmit_one_dqo(priv, tx, frames[i]);
+ if (err)
+ break;
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xmit += n;
+ tx->xdp_xmit_errors += n - i;
+ u64_stats_update_end(&tx->statss);
+
+ return i ? i : err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 2349750075a5..ace9b8698021 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -110,12 +110,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll);
+ netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
+ netif_napi_set_irq_locked(&block->napi, block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_del(&block->napi);
+ netif_napi_del_locked(&block->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 3312e1d93c3b..18eca7d12c20 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -7,7 +7,6 @@ config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
depends on OF || ACPI
- depends on ARM || ARM64 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,6 +17,8 @@ config NET_VENDOR_HISILICON
if NET_VENDOR_HISILICON
+if ARM || ARM64 || COMPILE_TEST
+
config HIX5HD2_GMAC
tristate "Hisilicon HIX5HD2 Family Network Device Support"
select PHYLIB
@@ -141,4 +142,21 @@ config HNS3_ENET
endif #HNS3
+endif # ARM || ARM64 || COMPILE_TEST
+
+config HIBMCGE
+ tristate "Hisilicon BMC Gigabit Ethernet Device Support"
+ depends on PCI && PCI_MSI
+ select PHYLIB
+ select FIXED_PHY
+ select MOTORCOMM_PHY
+ select REALTEK_PHY
+ select PAGE_POOL
+ help
+ If you wish to compile a kernel for a BMC with HIBMC-xx_gmac
+ then you should answer Y to this. This makes this driver suitable for use
+ on certain boards such as the HIBMC-210.
+
+ If you are unsure, say N.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 7f76d412047a..0e2cadfea8ff 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
obj-$(CONFIG_HNS) += hns/
obj-$(CONFIG_HNS3) += hns3/
obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o
+obj-$(CONFIG_HIBMCGE) += hibmcge/
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
new file mode 100644
index 000000000000..d6610ba16855
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON BMC GE network device drivers.
+#
+
+ccflags-y += -I$(src)
+obj-$(CONFIG_HIBMCGE) += hibmcge.o
+
+hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
+ hbg_debugfs.o hbg_err.o hbg_diagnose.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
new file mode 100644
index 000000000000..8e134da3e217
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_COMMON_H
+#define __HBG_COMMON_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <net/page_pool/helpers.h>
+#include "hbg_reg.h"
+
+#define HBG_STATUS_DISABLE 0x0
+#define HBG_STATUS_ENABLE 0x1
+#define HBG_RX_SKIP1 0x00
+#define HBG_RX_SKIP2 0x01
+#define HBG_VECTOR_NUM 4
+#define HBG_PCU_CACHE_LINE_SIZE 32
+#define HBG_TX_TIMEOUT_BUF_LEN 1024
+#define HBG_RX_DESCR 0x01
+#define HBG_NO_PHY 0xFF
+
+#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
+ HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
+
+enum hbg_dir {
+ HBG_DIR_TX = 1 << 0,
+ HBG_DIR_RX = 1 << 1,
+ HBG_DIR_TX_RX = HBG_DIR_TX | HBG_DIR_RX,
+};
+
+enum hbg_tx_state {
+ HBG_TX_STATE_COMPLETE = 0, /* clear state, must fix to 0 */
+ HBG_TX_STATE_START,
+};
+
+enum hbg_nic_state {
+ HBG_NIC_STATE_EVENT_HANDLING = 0,
+ HBG_NIC_STATE_RESETTING,
+ HBG_NIC_STATE_RESET_FAIL,
+ HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */
+ HBG_NIC_STATE_NP_LINK_FAIL,
+};
+
+enum hbg_reset_type {
+ HBG_RESET_TYPE_NONE = 0,
+ HBG_RESET_TYPE_FLR,
+ HBG_RESET_TYPE_FUNCTION,
+};
+
+struct hbg_buffer {
+ u32 state;
+ dma_addr_t state_dma;
+
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ u32 skb_len;
+
+ struct page *page;
+ void *page_addr;
+ dma_addr_t page_dma;
+ u32 page_size;
+ u32 page_offset;
+
+ enum hbg_dir dir;
+ struct hbg_ring *ring;
+ struct hbg_priv *priv;
+};
+
+struct hbg_ring {
+ struct hbg_buffer *queue;
+ dma_addr_t queue_dma;
+
+ union {
+ u32 head;
+ u32 ntc;
+ };
+ union {
+ u32 tail;
+ u32 ntu;
+ };
+ u32 len;
+
+ enum hbg_dir dir;
+ struct hbg_priv *priv;
+ struct napi_struct napi;
+ char *tout_log_buf; /* tx timeout log buffer */
+ struct page_pool *page_pool; /* only for rx */
+};
+
+enum hbg_hw_event_type {
+ HBG_HW_EVENT_NONE = 0,
+ HBG_HW_EVENT_INIT, /* driver is loading */
+ HBG_HW_EVENT_RESET,
+ HBG_HW_EVENT_CORE_RESET,
+};
+
+struct hbg_dev_specs {
+ u32 mac_id;
+ struct sockaddr mac_addr;
+ u32 phy_addr;
+ u32 mdio_frequency;
+ u32 rx_fifo_num;
+ u32 tx_fifo_num;
+ u32 vlan_layers;
+ u32 max_mtu;
+ u32 min_mtu;
+ u32 uc_mac_num;
+
+ u32 max_frame_len;
+ u32 rx_buf_size;
+};
+
+struct hbg_irq_info {
+ const char *name;
+ u32 mask;
+ bool re_enable;
+ bool need_print;
+ bool need_reset;
+
+ void (*irq_handle)(struct hbg_priv *priv,
+ const struct hbg_irq_info *info);
+};
+
+struct hbg_vector {
+ char name[HBG_VECTOR_NUM][32];
+
+ u64 *stats_array;
+ const struct hbg_irq_info *info_array;
+ u32 info_array_len;
+};
+
+struct hbg_mac {
+ struct mii_bus *mdio_bus;
+ struct phy_device *phydev;
+ u8 phy_addr;
+
+ u32 speed;
+ u32 duplex;
+ u32 autoneg;
+ u32 link_status;
+ u32 pause_autoneg;
+};
+
+struct hbg_mac_table_entry {
+ u8 addr[ETH_ALEN];
+};
+
+struct hbg_mac_filter {
+ struct hbg_mac_table_entry *mac_table;
+ u32 table_max_len;
+ bool enabled;
+};
+
+/* saved for restore after rest */
+struct hbg_user_def {
+ struct ethtool_pauseparam pause_param;
+};
+
+struct hbg_stats {
+ u64 rx_desc_drop;
+ u64 rx_desc_l2_err_cnt;
+ u64 rx_desc_pkt_len_err_cnt;
+ u64 rx_desc_l3l4_err_cnt;
+ u64 rx_desc_l3_wrong_head_cnt;
+ u64 rx_desc_l3_csum_err_cnt;
+ u64 rx_desc_l3_len_err_cnt;
+ u64 rx_desc_l3_zero_ttl_cnt;
+ u64 rx_desc_l3_other_cnt;
+ u64 rx_desc_l4_err_cnt;
+ u64 rx_desc_l4_wrong_head_cnt;
+ u64 rx_desc_l4_len_err_cnt;
+ u64 rx_desc_l4_csum_err_cnt;
+ u64 rx_desc_l4_zero_port_num_cnt;
+ u64 rx_desc_l4_other_cnt;
+ u64 rx_desc_frag_cnt;
+ u64 rx_desc_ip_ver_err_cnt;
+ u64 rx_desc_ipv4_pkt_cnt;
+ u64 rx_desc_ipv6_pkt_cnt;
+ u64 rx_desc_no_ip_pkt_cnt;
+ u64 rx_desc_ip_pkt_cnt;
+ u64 rx_desc_tcp_pkt_cnt;
+ u64 rx_desc_udp_pkt_cnt;
+ u64 rx_desc_vlan_pkt_cnt;
+ u64 rx_desc_icmp_pkt_cnt;
+ u64 rx_desc_arp_pkt_cnt;
+ u64 rx_desc_rarp_pkt_cnt;
+ u64 rx_desc_multicast_pkt_cnt;
+ u64 rx_desc_broadcast_pkt_cnt;
+ u64 rx_desc_ipsec_pkt_cnt;
+ u64 rx_desc_ip_opt_pkt_cnt;
+ u64 rx_desc_key_not_match_cnt;
+
+ u64 rx_octets_total_ok_cnt;
+ u64 rx_uc_pkt_cnt;
+ u64 rx_mc_pkt_cnt;
+ u64 rx_bc_pkt_cnt;
+ u64 rx_vlan_pkt_cnt;
+ u64 rx_octets_bad_cnt;
+ u64 rx_octets_total_filt_cnt;
+ u64 rx_filt_pkt_cnt;
+ u64 rx_trans_pkt_cnt;
+ u64 rx_framesize_64;
+ u64 rx_framesize_65_127;
+ u64 rx_framesize_128_255;
+ u64 rx_framesize_256_511;
+ u64 rx_framesize_512_1023;
+ u64 rx_framesize_1024_1518;
+ u64 rx_framesize_bt_1518;
+ u64 rx_fcs_error_cnt;
+ u64 rx_data_error_cnt;
+ u64 rx_align_error_cnt;
+ u64 rx_pause_macctl_frame_cnt;
+ u64 rx_unknown_macctl_frame_cnt;
+ /* crc ok, > max_frm_size, < 2max_frm_size */
+ u64 rx_frame_long_err_cnt;
+ /* crc fail, > max_frm_size, < 2max_frm_size */
+ u64 rx_jabber_err_cnt;
+ /* > 2max_frm_size */
+ u64 rx_frame_very_long_err_cnt;
+ /* < 64byte, >= short_runts_thr */
+ u64 rx_frame_runt_err_cnt;
+ /* < short_runts_thr */
+ u64 rx_frame_short_err_cnt;
+ /* PCU: dropped when the RX FIFO is full.*/
+ u64 rx_overflow_cnt;
+ /* GMAC: the count of overflows of the RX FIFO */
+ u64 rx_overrun_cnt;
+ /* PCU: the count of buffer alloc errors in RX */
+ u64 rx_bufrq_err_cnt;
+ /* PCU: the count of write descriptor errors in RX */
+ u64 rx_we_err_cnt;
+ /* GMAC: the count of pkts that contain PAD but length is not 64 */
+ u64 rx_lengthfield_err_cnt;
+ u64 rx_fail_comma_cnt;
+
+ u64 rx_dma_err_cnt;
+ u64 rx_fifo_less_empty_thrsld_cnt;
+
+ u64 tx_octets_total_ok_cnt;
+ u64 tx_uc_pkt_cnt;
+ u64 tx_mc_pkt_cnt;
+ u64 tx_bc_pkt_cnt;
+ u64 tx_vlan_pkt_cnt;
+ u64 tx_octets_bad_cnt;
+ u64 tx_trans_pkt_cnt;
+ u64 tx_pause_frame_cnt;
+ u64 tx_framesize_64;
+ u64 tx_framesize_65_127;
+ u64 tx_framesize_128_255;
+ u64 tx_framesize_256_511;
+ u64 tx_framesize_512_1023;
+ u64 tx_framesize_1024_1518;
+ u64 tx_framesize_bt_1518;
+ /* GMAC: the count of times that frames fail to be transmitted
+ * due to internal errors.
+ */
+ u64 tx_underrun_err_cnt;
+ u64 tx_add_cs_fail_cnt;
+ /* PCU: the count of buffer free errors in TX */
+ u64 tx_bufrl_err_cnt;
+ u64 tx_crc_err_cnt;
+ u64 tx_drop_cnt;
+ u64 tx_excessive_length_drop_cnt;
+
+ u64 tx_timeout_cnt;
+ u64 tx_dma_err_cnt;
+
+ u64 np_link_fail_cnt;
+ u64 reset_fail_cnt;
+};
+
+struct hbg_priv {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ u8 __iomem *io_base;
+ struct hbg_dev_specs dev_specs;
+ unsigned long state;
+ struct hbg_mac mac;
+ struct hbg_vector vectors;
+ struct hbg_ring tx_ring;
+ struct hbg_ring rx_ring;
+ struct hbg_mac_filter filter;
+ enum hbg_reset_type reset_type;
+ struct hbg_user_def user_def;
+ struct hbg_stats stats;
+ unsigned long last_update_stats_time;
+ struct delayed_work service_task;
+};
+
+void hbg_err_reset_task_schedule(struct hbg_priv *priv);
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
new file mode 100644
index 000000000000..01ad82d2f5cc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+#include "hbg_common.h"
+#include "hbg_debugfs.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_txrx.h"
+
+static struct dentry *hbg_dbgfs_root;
+
+struct hbg_dbg_info {
+ const char *name;
+ int (*read)(struct seq_file *seq, void *data);
+};
+
+#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state))
+
+static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring,
+ struct seq_file *s)
+{
+ u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B :
+ HBG_INT_MSK_RX_B;
+
+ seq_printf(s, "ring used num: %u\n",
+ hbg_get_queue_used_num(ring));
+ seq_printf(s, "ring max num: %u\n", ring->len);
+ seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail);
+ seq_printf(s, "fifo used num: %u\n",
+ hbg_hw_get_fifo_used_num(priv, ring->dir));
+ seq_printf(s, "fifo max num: %u\n",
+ hbg_get_spec_fifo_max_num(priv, ring->dir));
+ seq_printf(s, "irq enabled: %s\n",
+ str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask)));
+}
+
+static int hbg_dbg_tx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->tx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_rx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->rx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ const struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ seq_printf(s,
+ "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n",
+ info->name,
+ str_true_false(hbg_hw_irq_is_enabled(priv,
+ info->mask)),
+ str_true_false(info->need_reset),
+ str_true_false(info->need_print),
+ priv->vectors.stats_array[i]);
+ }
+
+ return 0;
+}
+
+static int hbg_dbg_mac_table(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_mac_filter *filter;
+ u32 i;
+
+ filter = &priv->filter;
+ seq_printf(s, "mac addr max count: %u\n", filter->table_max_len);
+ seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled));
+
+ for (i = 0; i < filter->table_max_len; i++) {
+ if (is_zero_ether_addr(filter->mac_table[i].addr))
+ continue;
+
+ seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr);
+ }
+
+ return 0;
+}
+
+static const char * const reset_type_str[] = {"None", "FLR", "Function"};
+
+static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ bool np_link_fail;
+
+ seq_printf(s, "event handling state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING));
+ seq_printf(s, "resetting state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESETTING));
+ seq_printf(s, "reset fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
+ seq_printf(s, "last reset type: %s\n",
+ reset_type_str[priv->reset_type]);
+ seq_printf(s, "need reset state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET));
+
+ np_link_fail = !hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+ seq_printf(s, "np_link fail state: %s\n", str_true_false(np_link_fail));
+
+ return 0;
+}
+
+static const struct hbg_dbg_info hbg_dbg_infos[] = {
+ { "tx_ring", hbg_dbg_tx_ring },
+ { "rx_ring", hbg_dbg_rx_ring },
+ { "irq_info", hbg_dbg_irq_info },
+ { "mac_table", hbg_dbg_mac_table },
+ { "nic_state", hbg_dbg_nic_state },
+};
+
+static void hbg_debugfs_uninit(void *data)
+{
+ debugfs_remove_recursive((struct dentry *)data);
+}
+
+void hbg_debugfs_init(struct hbg_priv *priv)
+{
+ const char *name = pci_name(priv->pdev);
+ struct device *dev = &priv->pdev->dev;
+ struct dentry *root;
+ u32 i;
+
+ root = debugfs_create_dir(name, hbg_dbgfs_root);
+
+ for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++)
+ debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name,
+ root, hbg_dbg_infos[i].read);
+
+ /* Ignore the failure because debugfs is not a key feature. */
+ devm_add_action_or_reset(dev, hbg_debugfs_uninit, root);
+}
+
+void hbg_debugfs_register(void)
+{
+ hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL);
+}
+
+void hbg_debugfs_unregister(void)
+{
+ debugfs_remove_recursive(hbg_dbgfs_root);
+ hbg_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
new file mode 100644
index 000000000000..80670d66bbeb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_DEBUGFS_H
+#define __HBG_DEBUGFS_H
+
+void hbg_debugfs_register(void);
+void hbg_debugfs_unregister(void);
+
+void hbg_debugfs_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
new file mode 100644
index 000000000000..c0ce74cf7382
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_diagnose.h"
+
+#define HBG_MSG_DATA_MAX_NUM 64
+
+struct hbg_diagnose_message {
+ u32 opcode;
+ u32 status;
+ u32 data_num;
+ struct hbg_priv *priv;
+
+ u32 data[HBG_MSG_DATA_MAX_NUM];
+};
+
+#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000)
+
+enum hbg_push_cmd {
+ HBG_PUSH_CMD_IRQ = 0,
+ HBG_PUSH_CMD_STATS,
+ HBG_PUSH_CMD_LINK,
+};
+
+struct hbg_push_stats_info {
+ /* id is used to match the name of the current stats item.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u64 offset;
+};
+
+struct hbg_push_irq_info {
+ /* id is used to match the name of the current irq.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u32 mask;
+};
+
+#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B}
+static const struct hbg_push_irq_info hbg_push_irq_list[] = {
+ HBG_PUSH_IRQ_I(RX, 0),
+ HBG_PUSH_IRQ_I(TX, 1),
+ HBG_PUSH_IRQ_I(TX_PKT_CPL, 2),
+ HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3),
+ HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4),
+ HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5),
+ HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6),
+ HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7),
+ HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8),
+ HBG_PUSH_IRQ_I(TX_AHB_ERR, 9),
+ HBG_PUSH_IRQ_I(RX_BUF_AVL, 10),
+ HBG_PUSH_IRQ_I(REL_BUF_ERR, 11),
+ HBG_PUSH_IRQ_I(TXCFG_AVL, 12),
+ HBG_PUSH_IRQ_I(TX_DROP, 13),
+ HBG_PUSH_IRQ_I(RX_DROP, 14),
+ HBG_PUSH_IRQ_I(RX_AHB_ERR, 15),
+ HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16),
+ HBG_PUSH_IRQ_I(RBREQ_ERR, 17),
+ HBG_PUSH_IRQ_I(WE_ERR, 18),
+};
+
+#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)}
+static const struct hbg_push_stats_info hbg_push_stats_list[] = {
+ HBG_PUSH_STATS_I(rx_desc_drop, 0),
+ HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1),
+ HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2),
+ HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3),
+ HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4),
+ HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5),
+ HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6),
+ HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7),
+ HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8),
+ HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9),
+ HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10),
+ HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11),
+ HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12),
+ HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13),
+ HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14),
+ HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15),
+ HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16),
+ HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17),
+ HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18),
+ HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19),
+ HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20),
+ HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21),
+ HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22),
+ HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23),
+ HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24),
+ HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25),
+ HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26),
+ HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27),
+ HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28),
+ HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29),
+ HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30),
+ HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31),
+ HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32),
+ HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33),
+ HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34),
+ HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35),
+ HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36),
+ HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37),
+ HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38),
+ HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39),
+ HBG_PUSH_STATS_I(rx_framesize_64, 40),
+ HBG_PUSH_STATS_I(rx_framesize_65_127, 41),
+ HBG_PUSH_STATS_I(rx_framesize_128_255, 42),
+ HBG_PUSH_STATS_I(rx_framesize_256_511, 43),
+ HBG_PUSH_STATS_I(rx_framesize_512_1023, 44),
+ HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45),
+ HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46),
+ HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47),
+ HBG_PUSH_STATS_I(rx_data_error_cnt, 48),
+ HBG_PUSH_STATS_I(rx_align_error_cnt, 49),
+ HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50),
+ HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51),
+ HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52),
+ HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53),
+ HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54),
+ HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55),
+ HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56),
+ HBG_PUSH_STATS_I(rx_overflow_cnt, 57),
+ HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58),
+ HBG_PUSH_STATS_I(rx_we_err_cnt, 59),
+ HBG_PUSH_STATS_I(rx_overrun_cnt, 60),
+ HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61),
+ HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62),
+ HBG_PUSH_STATS_I(rx_dma_err_cnt, 63),
+ HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64),
+ HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65),
+ HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66),
+ HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67),
+ HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68),
+ HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69),
+ HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70),
+ HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71),
+ HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72),
+ HBG_PUSH_STATS_I(tx_framesize_64, 73),
+ HBG_PUSH_STATS_I(tx_framesize_65_127, 74),
+ HBG_PUSH_STATS_I(tx_framesize_128_255, 75),
+ HBG_PUSH_STATS_I(tx_framesize_256_511, 76),
+ HBG_PUSH_STATS_I(tx_framesize_512_1023, 77),
+ HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78),
+ HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79),
+ HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80),
+ HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81),
+ HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82),
+ HBG_PUSH_STATS_I(tx_crc_err_cnt, 83),
+ HBG_PUSH_STATS_I(tx_drop_cnt, 84),
+ HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
+ HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+ HBG_PUSH_STATS_I(reset_fail_cnt, 87),
+};
+
+static int hbg_push_msg_send(struct hbg_priv *priv,
+ struct hbg_diagnose_message *msg)
+{
+ u32 header = 0;
+ u32 i;
+
+ if (msg->data_num == 0)
+ return 0;
+
+ for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++)
+ hbg_reg_write(priv,
+ HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32),
+ msg->data[i]);
+
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT);
+
+ /* start status */
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1);
+
+ /* write header msg to start push */
+ hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header);
+
+ /* wait done */
+ readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header,
+ !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header),
+ HBG_HW_PUSH_WAIT_INTERVAL_US,
+ HBG_HW_PUSH_WAIT_TIMEOUT_US);
+
+ msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header);
+ return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header);
+}
+
+static int hbg_push_data(struct hbg_priv *priv,
+ u32 opcode, u32 *data, u32 data_num)
+{
+ struct hbg_diagnose_message msg = {0};
+ u32 data_left_num;
+ u32 i, j;
+ int ret;
+
+ msg.priv = priv;
+ msg.opcode = opcode;
+ for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) {
+ if (i * HBG_MSG_DATA_MAX_NUM >= data_num)
+ break;
+
+ data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM;
+ for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++)
+ msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j];
+
+ msg.data_num = j;
+ ret = hbg_push_msg_send(priv, &msg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode,
+ u64 *data, u32 data_num)
+{
+ /* The length of u64 is twice that of u32,
+ * the data_num must be multiplied by 2.
+ */
+ return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2);
+}
+
+static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
+{
+ u32 i = 0;
+
+ for (i = 0; i < vectors->info_array_len; i++)
+ if (vectors->info_array[i].mask == mask)
+ return vectors->stats_array[i];
+
+ return 0;
+}
+
+static int hbg_push_irq_cnt(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2;
+ struct hbg_vector *vectors = &priv->vectors;
+ const struct hbg_push_irq_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_irq_list[j++];
+ data[i] = info->id;
+ data[i + 1] = hbg_get_irq_stats(vectors, info->mask);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+static int hbg_push_link_status(struct hbg_priv *priv)
+{
+ u32 link_status[2];
+
+ /* phy link status */
+ link_status[0] = priv->mac.phydev->link;
+ /* mac link status */
+ link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+
+ return hbg_push_data(priv, HBG_PUSH_CMD_LINK,
+ link_status, ARRAY_SIZE(link_status));
+}
+
+static int hbg_push_stats(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2;
+ struct hbg_stats *stats = &priv->stats;
+ const struct hbg_push_stats_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_stats_list[j++];
+ data[i] = info->id;
+ data[i + 1] = HBG_STATS_R(stats, info->offset);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+void hbg_diagnose_message_push(struct hbg_priv *priv)
+{
+ int ret;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ /* only 1 is the right value */
+ if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1)
+ return;
+
+ ret = hbg_push_irq_cnt(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push irq cnt, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_link_status(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push link status, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_stats(priv);
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "failed to push stats, ret = %d\n", ret);
+
+push_done:
+ hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
new file mode 100644
index 000000000000..ba04c6d8c03d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+#ifndef __HBG_DIAGNOSE_H
+#define __HBG_DIAGNOSE_H
+
+#include "hbg_common.h"
+
+void hbg_diagnose_message_push(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
new file mode 100644
index 000000000000..7234618e8e81
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
+#include "hbg_hw.h"
+
+static void hbg_restore_mac_table(struct hbg_priv *priv)
+{
+ struct hbg_mac_filter *filter = &priv->filter;
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < filter->table_max_len; i++)
+ if (!is_zero_ether_addr(filter->mac_table[i].addr)) {
+ addr = ether_addr_to_u64(filter->mac_table[i].addr);
+ hbg_hw_set_uc_addr(priv, addr, i);
+ }
+
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_restore_user_def_settings(struct hbg_priv *priv)
+{
+ /* The index of host mac is always 0. */
+ u64 rx_pause_addr = ether_addr_to_u64(priv->filter.mac_table[0].addr);
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ hbg_restore_mac_table(priv);
+ hbg_hw_set_mtu(priv, priv->netdev->mtu);
+ hbg_hw_set_pause_enable(priv, pause_param->tx_pause,
+ pause_param->rx_pause);
+ hbg_hw_set_rx_pause_mac_addr(priv, rx_pause_addr);
+}
+
+int hbg_rebuild(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_restore_user_def_settings(priv);
+ return 0;
+}
+
+static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
+
+ if (netif_running(priv->netdev)) {
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ dev_warn(&priv->pdev->dev,
+ "failed to reset because port is up\n");
+ return -EBUSY;
+ }
+
+ netif_device_detach(priv->netdev);
+
+ priv->reset_type = type;
+ clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret) {
+ priv->stats.reset_fail_cnt++;
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ }
+
+ return ret;
+}
+
+static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) ||
+ type != priv->reset_type)
+ return 0;
+
+ ret = hbg_rebuild(priv);
+ if (ret) {
+ priv->stats.reset_fail_cnt++;
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
+ return ret;
+ }
+
+ netif_device_attach(priv->netdev);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+
+ dev_info(&priv->pdev->dev, "reset done\n");
+ return ret;
+}
+
+int hbg_reset(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
+ if (ret)
+ return ret;
+
+ return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
+}
+
+void hbg_err_reset(struct hbg_priv *priv)
+{
+ bool running;
+
+ rtnl_lock();
+ running = netif_running(priv->netdev);
+ if (running)
+ dev_close(priv->netdev);
+
+ if (hbg_reset(priv))
+ goto err_unlock;
+
+ if (running)
+ dev_open(priv->netdev, NULL);
+
+err_unlock:
+ rtnl_unlock();
+}
+
+static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure) {
+ netif_device_detach(netdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+ pci_disable_device(pdev);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "failed to re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ hbg_err_reset(priv);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
+}
+
+static void hbg_pci_err_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
+}
+
+static const struct pci_error_handlers hbg_pci_err_handler = {
+ .error_detected = hbg_pci_err_detected,
+ .slot_reset = hbg_pci_err_slot_reset,
+ .reset_prepare = hbg_pci_err_reset_prepare,
+ .reset_done = hbg_pci_err_reset_done,
+};
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv)
+{
+ pdrv->err_handler = &hbg_pci_err_handler;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
new file mode 100644
index 000000000000..fb9fbe7004e8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ERR_H
+#define __HBG_ERR_H
+
+#include <linux/pci.h>
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv);
+int hbg_reset(struct hbg_priv *priv);
+int hbg_rebuild(struct hbg_priv *priv);
+void hbg_err_reset(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
new file mode 100644
index 000000000000..1d62ff913737
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+
+struct hbg_ethtool_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned long offset;
+ u32 reg; /* set to 0 if stats is not updated via dump reg */
+};
+
+#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0}
+#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg}
+
+static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
+ HBG_STATS_I(rx_desc_l2_err_cnt),
+ HBG_STATS_I(rx_desc_pkt_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l3_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l3_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_zero_ttl_cnt),
+ HBG_STATS_I(rx_desc_l3_other_cnt),
+ HBG_STATS_I(rx_desc_l4_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l4_len_err_cnt),
+ HBG_STATS_I(rx_desc_l4_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l4_zero_port_num_cnt),
+ HBG_STATS_I(rx_desc_l4_other_cnt),
+ HBG_STATS_I(rx_desc_ip_ver_err_cnt),
+ HBG_STATS_I(rx_desc_ipv4_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipv6_pkt_cnt),
+ HBG_STATS_I(rx_desc_no_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_tcp_pkt_cnt),
+ HBG_STATS_I(rx_desc_udp_pkt_cnt),
+ HBG_STATS_I(rx_desc_vlan_pkt_cnt),
+ HBG_STATS_I(rx_desc_icmp_pkt_cnt),
+ HBG_STATS_I(rx_desc_arp_pkt_cnt),
+ HBG_STATS_I(rx_desc_rarp_pkt_cnt),
+ HBG_STATS_I(rx_desc_multicast_pkt_cnt),
+ HBG_STATS_I(rx_desc_broadcast_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipsec_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_opt_pkt_cnt),
+ HBG_STATS_I(rx_desc_key_not_match_cnt),
+
+ HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_filt_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR),
+ HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR),
+ HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR),
+ HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR),
+ HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_frame_very_long_err_cnt,
+ HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR),
+ HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR),
+ HBG_STATS_REG_I(rx_lengthfield_err_cnt,
+ HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR),
+ HBG_STATS_I(rx_dma_err_cnt),
+ HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt),
+
+ HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR),
+ HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR),
+
+ HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR),
+ HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR),
+ HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR),
+ HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR),
+ HBG_STATS_REG_I(tx_excessive_length_drop_cnt,
+ HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
+ HBG_STATS_I(tx_dma_err_cnt),
+ HBG_STATS_I(tx_timeout_cnt),
+ HBG_STATS_I(reset_fail_cnt),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
+ HBG_STATS_I(rx_desc_frag_cnt),
+ HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_65_127,
+ HBG_REG_RX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_128_255,
+ HBG_REG_RX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_256_511,
+ HBG_REG_RX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_512_1023,
+ HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_1024_1518,
+ HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_bt_1518,
+ HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_65_127,
+ HBG_REG_TX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_128_255,
+ HBG_REG_TX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_256_511,
+ HBG_REG_TX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_512_1023,
+ HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_1024_1518,
+ HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_bt_1518,
+ HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = {
+ HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_ok_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_OK_ADDR),
+ HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR),
+ HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_octets_total_ok_cnt,
+ HBG_REG_OCTETS_TRANSMITTED_OK_ADDR),
+ HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = {
+ HBG_STATS_REG_I(rx_pause_macctl_frame_cnt,
+ HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR),
+ HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR),
+ HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt,
+ HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR),
+};
+
+enum hbg_reg_dump_type {
+ HBG_DUMP_REG_TYPE_SPEC = 0,
+ HBG_DUMP_REG_TYPE_MDIO,
+ HBG_DUMP_REG_TYPE_GMAC,
+ HBG_DUMP_REG_TYPE_PCU,
+};
+
+struct hbg_reg_info {
+ u32 type;
+ u32 offset;
+ u32 val;
+};
+
+#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0}
+#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0}
+#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0}
+#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0}
+
+static const struct hbg_reg_info hbg_dump_reg_infos[] = {
+ /* dev specs */
+ HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR),
+
+ /* mdio */
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR),
+
+ /* gmac */
+ HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR),
+
+ /* pcu */
+ HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR),
+};
+
+static const u32 hbg_dump_type_base_array[] = {
+ [HBG_DUMP_REG_TYPE_SPEC] = 0,
+ [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE,
+ [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE,
+ [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE,
+};
+
+static int hbg_ethtool_get_regs_len(struct net_device *netdev)
+{
+ return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info);
+}
+
+static void hbg_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_reg_info *info;
+ u32 i, offset = 0;
+
+ regs->version = 0;
+ for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) {
+ info = data + offset;
+
+ *info = hbg_dump_reg_infos[i];
+ info->val = hbg_reg_read(priv, info->offset);
+ info->offset -= hbg_dump_type_base_array[info->type];
+
+ offset += sizeof(*info);
+ }
+}
+
+static void hbg_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &param->tx_pause, &param->rx_pause);
+}
+
+static int hbg_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ priv->mac.pause_autoneg = param->autoneg;
+ phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause);
+
+ if (!param->autoneg)
+ hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause);
+
+ priv->user_def.pause_param = *param;
+ return 0;
+}
+
+static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (*flags != ETH_RESET_DEDICATED)
+ return -EOPNOTSUPP;
+
+ *flags = 0;
+ return hbg_reset(priv);
+}
+
+static void hbg_update_stats_by_info(struct hbg_priv *priv,
+ const struct hbg_ethtool_stats *info,
+ u32 info_len)
+{
+ const struct hbg_ethtool_stats *stats;
+ u32 i;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ for (i = 0; i < info_len; i++) {
+ stats = &info[i];
+ if (!stats->reg)
+ continue;
+
+ HBG_STATS_U(&priv->stats, stats->offset,
+ hbg_reg_read(priv, stats->reg));
+ }
+}
+
+void hbg_update_stats(struct hbg_priv *priv)
+{
+ hbg_update_stats_by_info(priv, hbg_ethtool_stats_info,
+ ARRAY_SIZE(hbg_ethtool_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info,
+ ARRAY_SIZE(hbg_ethtool_rmon_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info,
+ ARRAY_SIZE(hbg_ethtool_mac_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info,
+ ARRAY_SIZE(hbg_ethtool_ctrl_stats_info));
+}
+
+static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset)
+{
+ if (stringset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(hbg_ethtool_stats_info);
+}
+
+static void hbg_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ ethtool_puts(&data, hbg_ethtool_stats_info[i].name);
+}
+
+static void hbg_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u32 i;
+
+ hbg_update_stats(priv);
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ *data++ = HBG_STATS_R(&priv->stats,
+ hbg_ethtool_stats_info[i].offset);
+}
+
+static void hbg_ethtool_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *epstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt;
+ epstats->tx_pause_frames = stats->tx_pause_frame_cnt;
+}
+
+static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *emstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt;
+ emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt;
+ emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt;
+ emstats->AlignmentErrors = stats->rx_align_error_cnt;
+ emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt;
+ emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt;
+
+ emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt;
+ emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt;
+ emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt;
+ emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt;
+ emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt +
+ stats->rx_jabber_err_cnt +
+ stats->rx_unknown_macctl_frame_cnt +
+ stats->rx_bufrq_err_cnt +
+ stats->rx_we_err_cnt;
+ emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt +
+ stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+}
+
+static void
+hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ecstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *s = &priv->stats;
+
+ hbg_update_stats(priv);
+ ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt;
+ ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt;
+ ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt;
+}
+
+static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4095 },
+};
+
+static void
+hbg_ethtool_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt;
+ rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ rmon_stats->fragments = stats->rx_desc_frag_cnt;
+ rmon_stats->hist[0] = stats->rx_framesize_64;
+ rmon_stats->hist[1] = stats->rx_framesize_65_127;
+ rmon_stats->hist[2] = stats->rx_framesize_128_255;
+ rmon_stats->hist[3] = stats->rx_framesize_256_511;
+ rmon_stats->hist[4] = stats->rx_framesize_512_1023;
+ rmon_stats->hist[5] = stats->rx_framesize_1024_1518;
+ rmon_stats->hist[6] = stats->rx_framesize_bt_1518;
+
+ rmon_stats->hist_tx[0] = stats->tx_framesize_64;
+ rmon_stats->hist_tx[1] = stats->tx_framesize_65_127;
+ rmon_stats->hist_tx[2] = stats->tx_framesize_128_255;
+ rmon_stats->hist_tx[3] = stats->tx_framesize_256_511;
+ rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023;
+ rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518;
+ rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518;
+
+ *ranges = hbg_rmon_ranges;
+}
+
+static const struct ethtool_ops hbg_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = hbg_ethtool_get_regs_len,
+ .get_regs = hbg_ethtool_get_regs,
+ .get_pauseparam = hbg_ethtool_get_pauseparam,
+ .set_pauseparam = hbg_ethtool_set_pauseparam,
+ .reset = hbg_ethtool_reset,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_sset_count = hbg_ethtool_get_sset_count,
+ .get_strings = hbg_ethtool_get_strings,
+ .get_ethtool_stats = hbg_ethtool_get_stats,
+ .get_pause_stats = hbg_ethtool_get_pause_stats,
+ .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = hbg_ethtool_get_rmon_stats,
+};
+
+void hbg_ethtool_set_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &hbg_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
new file mode 100644
index 000000000000..e173155b146a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ETHTOOL_H
+#define __HBG_ETHTOOL_H
+
+#include <linux/netdevice.h>
+
+#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f))
+#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val))
+
+void hbg_ethtool_set_ops(struct net_device *netdev);
+void hbg_update_stats(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
new file mode 100644
index 000000000000..d6e8ce8e351a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/minmax.h>
+#include "hbg_common.h"
+#include "hbg_hw.h"
+#include "hbg_reg.h"
+
+#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
+#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000)
+#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000)
+/* little endian or big endian.
+ * ctrl means packet description, data means skb packet data
+ */
+#define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0
+#define HBG_PCU_FRAME_LEN_PLUS 4
+
+#define HBG_FIFO_TX_FULL_THRSLD 0x3F0
+#define HBG_FIFO_TX_EMPTY_THRSLD 0x1F0
+#define HBG_FIFO_RX_FULL_THRSLD 0x240
+#define HBG_FIFO_RX_EMPTY_THRSLD 0x190
+#define HBG_CFG_FIFO_FULL_THRSLD 0x10
+#define HBG_CFG_FIFO_EMPTY_THRSLD 0x01
+
+static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
+{
+ return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
+ !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR);
+}
+
+int hbg_hw_event_notify(struct hbg_priv *priv,
+ enum hbg_hw_event_type event_type)
+{
+ bool is_valid;
+ int ret;
+
+ if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state))
+ return -EBUSY;
+
+ /* notify */
+ hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type);
+
+ ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid,
+ HBG_HW_EVENT_WAIT_INTERVAL_US,
+ HBG_HW_EVENT_WAIT_TIMEOUT_US,
+ HBG_HW_EVENT_WAIT_INTERVAL_US, priv);
+
+ clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state);
+
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "event %d wait timeout\n", event_type);
+
+ return ret;
+}
+
+static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *specs = &priv->dev_specs;
+ u64 mac_addr;
+
+ if (!hbg_hw_spec_is_valid(priv)) {
+ dev_err(&priv->pdev->dev, "dev_specs not init\n");
+ return -EINVAL;
+ }
+
+ specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR);
+ specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR);
+ specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR);
+ specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR);
+ specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR);
+ specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
+ specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
+ specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
+ specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
+
+ mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
+ u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
+
+ if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data))
+ return -EADDRNOTAVAIL;
+
+ specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu;
+ specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len;
+ return 0;
+}
+
+u32 hbg_hw_get_irq_status(struct hbg_priv *priv)
+{
+ u32 status;
+
+ status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR);
+
+ hbg_field_modify(status, HBG_INT_MSK_TX_B,
+ hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR));
+ hbg_field_modify(status, HBG_INT_MSK_RX_B,
+ hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR));
+
+ return status;
+}
+
+void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask)
+{
+ if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
+ return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1);
+
+ if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
+ return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1);
+
+ return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask);
+}
+
+bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask)
+{
+ if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
+ return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR);
+
+ if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
+ return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR);
+
+ return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask;
+}
+
+void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
+{
+ u32 value;
+
+ if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
+ return hbg_reg_write(priv,
+ HBG_REG_CF_IND_TXINT_MSK_ADDR, enable);
+
+ if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
+ return hbg_reg_write(priv,
+ HBG_REG_CF_IND_RXINT_MSK_ADDR, enable);
+
+ value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR);
+ if (enable)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
+}
+
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
+{
+ u32 addr;
+
+ /* mac addr is u64, so the addr offset is 0x8 */
+ addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
+ hbg_reg_write64(priv, addr, mac_addr);
+}
+
+static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
+ u16 max_frame_len)
+{
+ max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN);
+
+ /* lower two bits of value must be set to 0 */
+ max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS);
+
+ hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR,
+ HBG_REG_MAX_FRAME_LEN_M, max_frame_len);
+}
+
+static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
+ u16 max_frame_size)
+{
+ hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR,
+ HBG_REG_MAX_FRAME_LEN_M, max_frame_size);
+}
+
+void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
+{
+ /* burst_len BIT(29) set to 1 can improve the TX performance.
+ * But packet drop occurs when mtu > 2000.
+ * So, BIT(29) reset to 0 when mtu > 2000.
+ */
+ u32 burst_len_bit = (mtu > 2000) ? 0 : 1;
+ u32 frame_len;
+
+ frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
+ ETH_HLEN + ETH_FCS_LEN;
+
+ hbg_hw_set_pcu_max_frame_len(priv, frame_len);
+ hbg_hw_set_mac_max_frame_len(priv, frame_len);
+
+ hbg_reg_write_field(priv, HBG_REG_BRUST_LENGTH_ADDR,
+ HBG_REG_BRUST_LENGTH_B, burst_len_bit);
+}
+
+void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
+ HBG_REG_PORT_ENABLE_TX_B, enable);
+ hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
+ HBG_REG_PORT_ENABLE_RX_B, enable);
+}
+
+u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir)
+{
+ if (dir & HBG_DIR_TX)
+ return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
+ HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M);
+
+ if (dir & HBG_DIR_RX)
+ return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
+ HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M);
+
+ return 0;
+}
+
+void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc)
+{
+ hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0);
+ hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1);
+ hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2);
+ hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3);
+}
+
+void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
+{
+ hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr);
+}
+
+void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
+{
+ u32 link_status;
+ int ret;
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+
+ hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
+ HBG_REG_PORT_MODE_M, speed);
+ hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
+ HBG_REG_DUPLEX_B, duplex);
+
+ hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+
+ if (priv->mac.phy_addr == HBG_NO_PHY)
+ return;
+
+ /* wait MAC link up */
+ ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
+ link_status,
+ FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
+ link_status),
+ HBG_MAC_LINK_WAIT_INTERVAL_US,
+ HBG_MAC_LINK_WAIT_TIMEOUT_US);
+ if (ret)
+ hbg_np_link_fail_task_schedule(priv);
+}
+
+/* only support uc filter */
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
+
+ /* only uc filter is supported, so set all bits of mc mask reg to 1 */
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_0, U64_MAX);
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_1, U64_MAX);
+}
+
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
+{
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
+
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B, rx_en);
+}
+
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
+{
+ *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B);
+ *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B);
+}
+
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
+{
+ hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
+}
+
+static void hbg_hw_set_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value = 0;
+
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_EMPTY_M, empty);
+
+ if (dir & HBG_DIR_TX)
+ hbg_reg_write(priv, HBG_REG_TX_FIFO_THRSLD_ADDR, value);
+
+ if (dir & HBG_DIR_RX)
+ hbg_reg_write(priv, HBG_REG_RX_FIFO_THRSLD_ADDR, value);
+}
+
+static void hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value;
+
+ value = hbg_reg_read(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR);
+
+ if (dir & HBG_DIR_TX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M, empty);
+ }
+
+ if (dir & HBG_DIR_RX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M, empty);
+ }
+
+ hbg_reg_write(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR, value);
+}
+
+static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
+{
+ u32 ctrl = 0;
+
+ ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE);
+ ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE);
+ ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE);
+
+ hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl);
+}
+
+static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv)
+{
+ u32 ctrl = 0;
+
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B,
+ HBG_STATUS_ENABLE);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M,
+ HBG_RX_SKIP2);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id);
+
+ hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl);
+}
+
+static void hbg_hw_init_rx_control(struct hbg_priv *priv)
+{
+ hbg_hw_init_rx_ctrl(priv);
+
+ /* parse from L2 layer */
+ hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR,
+ HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1);
+
+ hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR,
+ HBG_REG_RECV_CTRL_STRIP_PAD_EN_B,
+ HBG_STATUS_ENABLE);
+ hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR,
+ HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size);
+ hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR,
+ HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE);
+}
+
+int hbg_hw_init(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_dev_specs_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR,
+ HBG_REG_BUS_CTRL_ENDIAN_M,
+ HBG_ENDIAN_CTRL_LE_DATA_BE);
+ hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR,
+ HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE);
+
+ hbg_hw_init_rx_control(priv);
+ hbg_hw_init_transmit_ctrl(priv);
+
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_TX_FULL_THRSLD,
+ HBG_FIFO_TX_EMPTY_THRSLD, HBG_DIR_TX);
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_RX_FULL_THRSLD,
+ HBG_FIFO_RX_EMPTY_THRSLD, HBG_DIR_RX);
+ hbg_hw_set_cfg_fifo_thrsld(priv, HBG_CFG_FIFO_FULL_THRSLD,
+ HBG_CFG_FIFO_EMPTY_THRSLD, HBG_DIR_TX_RX);
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
new file mode 100644
index 000000000000..a4a049b5121d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_HW_H
+#define __HBG_HW_H
+
+#include <linux/bitfield.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+static inline u32 hbg_reg_read(struct hbg_priv *priv, u32 addr)
+{
+ return readl(priv->io_base + addr);
+}
+
+static inline void hbg_reg_write(struct hbg_priv *priv, u32 addr, u32 value)
+{
+ writel(value, priv->io_base + addr);
+}
+
+static inline u64 hbg_reg_read64(struct hbg_priv *priv, u32 addr)
+{
+ return lo_hi_readq(priv->io_base + addr);
+}
+
+static inline void hbg_reg_write64(struct hbg_priv *priv, u32 addr, u64 value)
+{
+ lo_hi_writeq(value, priv->io_base + addr);
+}
+
+#define hbg_reg_read_field(priv, addr, mask) \
+ FIELD_GET(mask, hbg_reg_read(priv, addr))
+
+#define hbg_field_modify(reg_value, mask, value) ({ \
+ (reg_value) &= ~(mask); \
+ (reg_value) |= FIELD_PREP(mask, value); })
+
+#define hbg_reg_write_field(priv, addr, mask, val) ({ \
+ typeof(priv) _priv = (priv); \
+ typeof(addr) _addr = (addr); \
+ u32 _value = hbg_reg_read(_priv, _addr); \
+ hbg_field_modify(_value, mask, val); \
+ hbg_reg_write(_priv, _addr, _value); })
+
+int hbg_hw_event_notify(struct hbg_priv *priv,
+ enum hbg_hw_event_type event_type);
+int hbg_hw_init(struct hbg_priv *priv);
+void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex);
+u32 hbg_hw_get_irq_status(struct hbg_priv *priv);
+void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask);
+bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask);
+void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable);
+void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu);
+void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index);
+u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
+void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
+void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en);
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en);
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
new file mode 100644
index 000000000000..ae4cb35186d8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/interrupt.h>
+#include "hbg_irq.h"
+#include "hbg_hw.h"
+
+static void hbg_irq_handle_err(struct hbg_priv *priv,
+ const struct hbg_irq_info *irq_info)
+{
+ if (irq_info->need_print)
+ dev_err(&priv->pdev->dev,
+ "receive error interrupt: %s\n", irq_info->name);
+
+ if (irq_info->need_reset)
+ hbg_err_reset_task_schedule(priv);
+}
+
+static void hbg_irq_handle_tx(struct hbg_priv *priv,
+ const struct hbg_irq_info *irq_info)
+{
+ napi_schedule(&priv->tx_ring.napi);
+}
+
+static void hbg_irq_handle_rx(struct hbg_priv *priv,
+ const struct hbg_irq_info *irq_info)
+{
+ napi_schedule(&priv->rx_ring.napi);
+}
+
+static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
+ const struct hbg_irq_info *irq_info)
+{
+ priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+ hbg_hw_irq_enable(priv, irq_info->mask, true);
+}
+
+#define HBG_IRQ_I(name, handle) \
+ {#name, HBG_INT_MSK_##name##_B, false, false, false, handle}
+#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \
+ {#name, HBG_INT_MSK_##name##_B, true, need_print, \
+ ndde_reset, hbg_irq_handle_err}
+
+static const struct hbg_irq_info hbg_irqs[] = {
+ HBG_IRQ_I(RX, hbg_irq_handle_rx),
+ HBG_IRQ_I(TX, hbg_irq_handle_tx),
+ HBG_ERR_IRQ_I(TX_PKT_CPL, true, true),
+ HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false),
+ HBG_ERR_IRQ_I(TX_AHB_ERR, true, true),
+ HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val),
+ HBG_ERR_IRQ_I(REL_BUF_ERR, true, false),
+ HBG_ERR_IRQ_I(TXCFG_AVL, false, false),
+ HBG_ERR_IRQ_I(TX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_AHB_ERR, true, false),
+ HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(RBREQ_ERR, true, true),
+ HBG_ERR_IRQ_I(WE_ERR, true, true),
+};
+
+static irqreturn_t hbg_irq_handle(int irq_num, void *p)
+{
+ const struct hbg_irq_info *info;
+ struct hbg_priv *priv = p;
+ u32 status;
+ u32 i;
+
+ status = hbg_hw_get_irq_status(priv);
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ if (status & info->mask) {
+ if (!hbg_hw_irq_is_enabled(priv, info->mask))
+ continue;
+
+ hbg_hw_irq_enable(priv, info->mask, false);
+ hbg_hw_irq_clear(priv, info->mask);
+
+ priv->vectors.stats_array[i]++;
+ if (info->irq_handle)
+ info->irq_handle(priv, info);
+
+ if (info->re_enable)
+ hbg_hw_irq_enable(priv, info->mask, true);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const char *irq_names_map[HBG_VECTOR_NUM] = { "tx", "rx",
+ "err", "mdio" };
+
+int hbg_irq_init(struct hbg_priv *priv)
+{
+ struct hbg_vector *vectors = &priv->vectors;
+ struct device *dev = &priv->pdev->dev;
+ int ret, id;
+ u32 i;
+
+ /* used pcim_enable_device(), so the vectors become device managed */
+ ret = pci_alloc_irq_vectors(priv->pdev, HBG_VECTOR_NUM, HBG_VECTOR_NUM,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to allocate vectors\n");
+
+ if (ret != HBG_VECTOR_NUM)
+ return dev_err_probe(dev, -EINVAL,
+ "requested %u MSI, but allocated %d MSI\n",
+ HBG_VECTOR_NUM, ret);
+
+ /* mdio irq not requested, so the number of requested interrupts
+ * is HBG_VECTOR_NUM - 1.
+ */
+ for (i = 0; i < HBG_VECTOR_NUM - 1; i++) {
+ id = pci_irq_vector(priv->pdev, i);
+ if (id < 0)
+ return dev_err_probe(dev, id, "failed to get irq id\n");
+
+ snprintf(vectors->name[i], sizeof(vectors->name[i]), "%s-%s-%s",
+ dev_driver_string(dev), pci_name(priv->pdev),
+ irq_names_map[i]);
+
+ ret = devm_request_irq(dev, id, hbg_irq_handle, 0,
+ vectors->name[i], priv);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to request irq: %s\n",
+ irq_names_map[i]);
+ }
+
+ vectors->stats_array = devm_kcalloc(&priv->pdev->dev,
+ ARRAY_SIZE(hbg_irqs),
+ sizeof(u64), GFP_KERNEL);
+ if (!vectors->stats_array)
+ return -ENOMEM;
+
+ vectors->info_array = hbg_irqs;
+ vectors->info_array_len = ARRAY_SIZE(hbg_irqs);
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h
new file mode 100644
index 000000000000..5c5323cfc751
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_IRQ_H
+#define __HBG_IRQ_H
+
+#include "hbg_common.h"
+
+int hbg_irq_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
new file mode 100644
index 000000000000..068da2fd1fea
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_diagnose.h"
+#include "hbg_err.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_mdio.h"
+#include "hbg_txrx.h"
+#include "hbg_debugfs.h"
+
+#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXCSUM)
+
+static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
+{
+ const struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ hbg_hw_irq_enable(priv, info->mask, enabled);
+ }
+}
+
+static int hbg_net_open(struct net_device *netdev)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ ret = hbg_txrx_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_all_irq_enable(priv, true);
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+ netif_start_queue(netdev);
+ hbg_phy_start(priv);
+
+ return 0;
+}
+
+/* This function only can be called after hbg_txrx_uninit() */
+static int hbg_hw_txrx_clear(struct hbg_priv *priv)
+{
+ int ret;
+
+ /* After ring buffers have been released,
+ * do a reset to release hw fifo rx ring buffer
+ */
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret)
+ return ret;
+
+ /* After reset, regs need to be reconfigured */
+ return hbg_rebuild(priv);
+}
+
+static int hbg_net_stop(struct net_device *netdev)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_phy_stop(priv);
+ netif_stop_queue(netdev);
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+ hbg_all_irq_enable(priv, false);
+ hbg_txrx_uninit(priv);
+ return hbg_hw_txrx_clear(priv);
+}
+
+static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
+ * The MAC filter will be enabled.
+ * Otherwise the filter will be disabled.
+ */
+ priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
+ u32 index, const u8 *addr)
+{
+ if (addr) {
+ ether_addr_copy(priv->filter.mac_table[index].addr, addr);
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
+ } else {
+ eth_zero_addr(priv->filter.mac_table[index].addr);
+ hbg_hw_set_uc_addr(priv, 0, index);
+ }
+}
+
+static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
+ const u8 *addr, u32 *index)
+{
+ u32 i;
+
+ for (i = 0; i < priv->filter.table_max_len; i++)
+ if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
+ *index = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* already exists */
+ if (!hbg_get_index_from_mac_table(priv, addr, &index))
+ return 0;
+
+ for (index = 0; index < priv->filter.table_max_len; index++)
+ if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
+ hbg_set_mac_to_mac_table(priv, index, addr);
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* not exists */
+ if (hbg_get_index_from_mac_table(priv, addr, &index))
+ return;
+
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+}
+
+static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ return hbg_add_mac_to_filter(priv, addr);
+}
+
+static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
+ return 0;
+
+ hbg_del_mac_from_filter(priv, addr);
+ return 0;
+}
+
+static void hbg_net_set_rx_mode(struct net_device *netdev)
+{
+ int ret;
+
+ ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
+
+ /* If ret != 0, overflow has occurred */
+ hbg_update_promisc_mode(netdev, !!ret);
+}
+
+static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u8 *mac_addr;
+ bool exists;
+ u32 index;
+
+ mac_addr = ((struct sockaddr *)addr)->sa_data;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EADDRNOTAVAIL;
+
+ /* The index of host mac is always 0.
+ * If new mac address already exists,
+ * delete the existing mac address and
+ * add it to the position with index 0.
+ */
+ exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
+ hbg_set_mac_to_mac_table(priv, 0, mac_addr);
+ if (exists)
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+
+ hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
+ dev_addr_set(netdev, mac_addr);
+ return 0;
+}
+
+static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ dev_dbg(&priv->pdev->dev,
+ "change mtu from %u to %u\n", netdev->mtu, new_mtu);
+
+ hbg_hw_set_mtu(priv, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
+ return 0;
+}
+
+static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_ring *ring = &priv->tx_ring;
+ char *buf = ring->tout_log_buf;
+ u32 pos = 0;
+
+ priv->stats.tx_timeout_cnt++;
+
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "ring used num: %u, fifo used num: %u\n",
+ hbg_get_queue_used_num(ring),
+ hbg_hw_get_fifo_used_num(priv, HBG_DIR_TX));
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "ntc: %u, ntu: %u, irq enabled: %u\n",
+ ring->ntc, ring->ntu,
+ hbg_hw_irq_is_enabled(priv, HBG_INT_MSK_TX_B));
+
+ netdev_info(netdev, "%s", buf);
+}
+
+static void hbg_net_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *h_stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ dev_get_tstats64(netdev, stats);
+
+ /* fifo empty */
+ stats->tx_fifo_errors += h_stats->tx_drop_cnt;
+
+ stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt +
+ h_stats->tx_drop_cnt;
+ stats->tx_errors += h_stats->tx_add_cs_fail_cnt +
+ h_stats->tx_bufrl_err_cnt +
+ h_stats->tx_underrun_err_cnt +
+ h_stats->tx_crc_err_cnt;
+ stats->rx_errors += h_stats->rx_data_error_cnt;
+ stats->multicast += h_stats->rx_mc_pkt_cnt;
+ stats->rx_dropped += h_stats->rx_desc_drop;
+ stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt +
+ h_stats->rx_frame_long_err_cnt +
+ h_stats->rx_frame_runt_err_cnt +
+ h_stats->rx_frame_short_err_cnt +
+ h_stats->rx_lengthfield_err_cnt;
+ stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt +
+ h_stats->rx_desc_l3l4_err_cnt;
+ stats->rx_fifo_errors += h_stats->rx_overflow_cnt +
+ h_stats->rx_overrun_cnt;
+ stats->rx_crc_errors += h_stats->rx_fcs_error_cnt;
+}
+
+static const struct net_device_ops hbg_netdev_ops = {
+ .ndo_open = hbg_net_open,
+ .ndo_stop = hbg_net_stop,
+ .ndo_start_xmit = hbg_net_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = hbg_net_set_mac_address,
+ .ndo_change_mtu = hbg_net_change_mtu,
+ .ndo_tx_timeout = hbg_net_tx_timeout,
+ .ndo_set_rx_mode = hbg_net_set_rx_mode,
+ .ndo_get_stats64 = hbg_net_get_stats,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+};
+
+static void hbg_service_task(struct work_struct *work)
+{
+ struct hbg_priv *priv = container_of(work, struct hbg_priv,
+ service_task.work);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state))
+ hbg_err_reset(priv);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state))
+ hbg_fix_np_link_fail(priv);
+
+ hbg_diagnose_message_push(priv);
+
+ /* The type of statistics register is u32,
+ * To prevent the statistics register from overflowing,
+ * the driver dumps the statistics every 30 seconds.
+ */
+ if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) {
+ hbg_update_stats(priv);
+ priv->last_update_stats_time = jiffies;
+ }
+
+ schedule_delayed_work(&priv->service_task,
+ msecs_to_jiffies(MSEC_PER_SEC));
+}
+
+void hbg_err_reset_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+static void hbg_cancel_delayed_work_sync(void *data)
+{
+ cancel_delayed_work_sync(data);
+}
+
+static int hbg_delaywork_init(struct hbg_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->service_task, hbg_service_task);
+ schedule_delayed_work(&priv->service_task, 0);
+ return devm_add_action_or_reset(&priv->pdev->dev,
+ hbg_cancel_delayed_work_sync,
+ &priv->service_task);
+}
+
+static int hbg_mac_filter_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *dev_specs = &priv->dev_specs;
+ struct hbg_mac_filter *filter = &priv->filter;
+ struct hbg_mac_table_entry *tmp_table;
+
+ tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
+ sizeof(*tmp_table), GFP_KERNEL);
+ if (!tmp_table)
+ return -ENOMEM;
+
+ filter->mac_table = tmp_table;
+ filter->table_max_len = dev_specs->uc_mac_num;
+ filter->enabled = true;
+
+ hbg_hw_set_mac_filter_enable(priv, filter->enabled);
+ return 0;
+}
+
+static void hbg_init_user_def(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
+
+ pause_param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
+ &pause_param->rx_pause);
+}
+
+static int hbg_init(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_INIT);
+ if (ret)
+ return ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_irq_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_mdio_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_mac_filter_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_delaywork_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_debugfs_init(priv);
+ hbg_init_user_def(priv);
+ return 0;
+}
+
+static int hbg_pci_init(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable PCI device\n");
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set PCI DMA mask\n");
+
+ ret = pcim_iomap_regions(pdev, BIT(0), dev_driver_string(dev));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to map PCI bar space\n");
+
+ priv->io_base = pcim_iomap_table(pdev)[0];
+ if (!priv->io_base)
+ return -ENOMEM;
+
+ pci_set_master(pdev);
+ return 0;
+}
+
+static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct hbg_priv *priv;
+ int ret;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(struct hbg_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, dev);
+
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->pdev = pdev;
+
+ ret = hbg_pci_init(pdev);
+ if (ret)
+ return ret;
+
+ ret = hbg_init(priv);
+ if (ret)
+ return ret;
+
+ /* set default features */
+ netdev->features |= HBG_SUPPORT_FEATURES;
+ netdev->hw_features |= HBG_SUPPORT_FEATURES;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ netdev->max_mtu = priv->dev_specs.max_mtu;
+ netdev->min_mtu = priv->dev_specs.min_mtu;
+ netdev->netdev_ops = &hbg_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
+
+ hbg_hw_set_mtu(priv, ETH_DATA_LEN);
+ hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
+ hbg_ethtool_set_ops(netdev);
+
+ ret = devm_register_netdev(dev, netdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register netdev\n");
+
+ netif_carrier_off(netdev);
+ return 0;
+}
+
+static void hbg_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ dev_close(netdev);
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static const struct pci_device_id hbg_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, 0x3730), 0},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, hbg_pci_tbl);
+
+static struct pci_driver hbg_driver = {
+ .name = "hibmcge",
+ .id_table = hbg_pci_tbl,
+ .probe = hbg_probe,
+ .shutdown = hbg_shutdown,
+};
+
+static int __init hbg_module_init(void)
+{
+ int ret;
+
+ hbg_debugfs_register();
+ hbg_set_pci_err_handler(&hbg_driver);
+ ret = pci_register_driver(&hbg_driver);
+ if (ret)
+ hbg_debugfs_unregister();
+
+ return ret;
+}
+module_init(hbg_module_init);
+
+static void __exit hbg_module_exit(void)
+{
+ pci_unregister_driver(&hbg_driver);
+ hbg_debugfs_unregister();
+}
+module_exit(hbg_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("hibmcge driver");
+MODULE_VERSION("1.0");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
new file mode 100644
index 000000000000..b6f0a2780ea8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_hw.h"
+#include "hbg_mdio.h"
+#include "hbg_reg.h"
+
+#define HBG_MAC_GET_PRIV(mac) ((struct hbg_priv *)(mac)->mdio_bus->priv)
+#define HBG_MII_BUS_GET_MAC(bus) (&((struct hbg_priv *)(bus)->priv)->mac)
+
+#define HBG_MDIO_C22_MODE 0x1
+#define HBG_MDIO_C22_REG_WRITE 0x1
+#define HBG_MDIO_C22_REG_READ 0x2
+
+#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000)
+#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
+
+#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+
+static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
+{
+ hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd);
+}
+
+static void hbg_mdio_get_command(struct hbg_mac *mac, u32 *cmd)
+{
+ *cmd = hbg_reg_read(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR);
+}
+
+static void hbg_mdio_set_wdata_reg(struct hbg_mac *mac, u16 wdata_value)
+{
+ hbg_reg_write_field(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_WDATA_ADDR,
+ HBG_REG_MDIO_WDATA_M, wdata_value);
+}
+
+static u32 hbg_mdio_get_rdata_reg(struct hbg_mac *mac)
+{
+ return hbg_reg_read_field(HBG_MAC_GET_PRIV(mac),
+ HBG_REG_MDIO_RDATA_ADDR,
+ HBG_REG_MDIO_WDATA_M);
+}
+
+static int hbg_mdio_wait_ready(struct hbg_mac *mac)
+{
+ struct hbg_priv *priv = HBG_MAC_GET_PRIV(mac);
+ u32 cmd = 0;
+ int ret;
+
+ ret = readl_poll_timeout(priv->io_base + HBG_REG_MDIO_COMMAND_ADDR, cmd,
+ !FIELD_GET(HBG_REG_MDIO_COMMAND_START_B, cmd),
+ HBG_MDIO_OP_INTERVAL_US,
+ HBG_MDIO_OP_TIMEOUT_US);
+
+ return ret ? -ETIMEDOUT : 0;
+}
+
+static int hbg_mdio_cmd_send(struct hbg_mac *mac, u32 prt_addr, u32 dev_addr,
+ u32 type, u32 op_code)
+{
+ u32 cmd = 0;
+
+ hbg_mdio_get_command(mac, &cmd);
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_ST_M, type);
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_OP_M, op_code);
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_PRTAD_M, prt_addr);
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_DEVAD_M, dev_addr);
+
+ /* if auto scan enabled, this value need fix to 0 */
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_START_B, 0x1);
+
+ hbg_mdio_set_command(mac, cmd);
+
+ /* wait operation complete and check the result */
+ return hbg_mdio_wait_ready(mac);
+}
+
+static int hbg_mdio_read22(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct hbg_mac *mac = HBG_MII_BUS_GET_MAC(bus);
+ int ret;
+
+ ret = hbg_mdio_cmd_send(mac, phy_addr, regnum, HBG_MDIO_C22_MODE,
+ HBG_MDIO_C22_REG_READ);
+ if (ret)
+ return ret;
+
+ return hbg_mdio_get_rdata_reg(mac);
+}
+
+static int hbg_mdio_write22(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 val)
+{
+ struct hbg_mac *mac = HBG_MII_BUS_GET_MAC(bus);
+
+ hbg_mdio_set_wdata_reg(mac, val);
+ return hbg_mdio_cmd_send(mac, phy_addr, regnum, HBG_MDIO_C22_MODE,
+ HBG_MDIO_C22_REG_WRITE);
+}
+
+static void hbg_mdio_init_hw(struct hbg_priv *priv)
+{
+ u32 freq = priv->dev_specs.mdio_frequency;
+ struct hbg_mac *mac = &priv->mac;
+ u32 cmd = 0;
+
+ cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_ST_M, HBG_MDIO_C22_MODE);
+ cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_AUTO_SCAN_B, HBG_STATUS_DISABLE);
+
+ /* freq use two bits, which are stored in clk_sel and clk_sel_exp */
+ cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_CLK_SEL_B, freq & 0x1);
+ cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_CLK_SEL_EXP_B,
+ (freq >> 1) & 0x1);
+
+ hbg_mdio_set_command(mac, cmd);
+}
+
+static void hbg_flowctrl_cfg(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ bool rx_pause;
+ bool tx_pause;
+
+ if (!priv->mac.pause_autoneg)
+ return;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
+ hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
+}
+
+void hbg_fix_np_link_fail(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ rtnl_lock();
+
+ if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) {
+ dev_err(dev, "failed to fix the MAC link status\n");
+ priv->stats.np_link_fail_cnt = 0;
+ goto unlock;
+ }
+
+ if (!priv->mac.phydev->link)
+ goto unlock;
+
+ priv->stats.np_link_fail_cnt++;
+ dev_err(dev, "failed to link between MAC and PHY, try to fix...\n");
+
+ /* Replace phy_reset() with phy_stop() and phy_start(),
+ * as suggested by Andrew.
+ */
+ hbg_phy_stop(priv);
+ hbg_phy_start(priv);
+
+unlock:
+ rtnl_unlock();
+}
+
+static void hbg_phy_adjust_link(struct net_device *netdev)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 speed;
+
+ if (phydev->link != priv->mac.link_status) {
+ if (phydev->link) {
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed = HBG_PORT_MODE_SGMII_10M;
+ break;
+ case SPEED_100:
+ speed = HBG_PORT_MODE_SGMII_100M;
+ break;
+ case SPEED_1000:
+ speed = HBG_PORT_MODE_SGMII_1000M;
+ break;
+ default:
+ return;
+ }
+
+ priv->mac.speed = speed;
+ priv->mac.duplex = phydev->duplex;
+ priv->mac.autoneg = phydev->autoneg;
+ hbg_hw_adjust_link(priv, speed, phydev->duplex);
+ hbg_flowctrl_cfg(priv);
+ }
+
+ priv->mac.link_status = phydev->link;
+ phy_print_status(phydev);
+ }
+}
+
+static void hbg_phy_disconnect(void *data)
+{
+ phy_disconnect((struct phy_device *)data);
+}
+
+static int hbg_phy_connect(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ ret = phy_connect_direct(priv->netdev, phydev, hbg_phy_adjust_link,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to connect phy\n");
+
+ ret = devm_add_action_or_reset(dev, hbg_phy_disconnect, phydev);
+ if (ret)
+ return ret;
+
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_support_asym_pause(phydev);
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+void hbg_phy_start(struct hbg_priv *priv)
+{
+ phy_start(priv->mac.phydev);
+}
+
+void hbg_phy_stop(struct hbg_priv *priv)
+{
+ phy_stop(priv->mac.phydev);
+}
+
+static void hbg_fixed_phy_uninit(void *data)
+{
+ fixed_phy_unregister((struct phy_device *)data);
+}
+
+static int hbg_fixed_phy_init(struct hbg_priv *priv)
+{
+ struct fixed_phy_status hbg_fixed_phy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .pause = 1,
+ .asym_pause = 1,
+ };
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ int ret;
+
+ phydev = fixed_phy_register(&hbg_fixed_phy_status, NULL);
+ if (IS_ERR(phydev)) {
+ dev_err_probe(dev, PTR_ERR(phydev),
+ "failed to register fixed PHY device\n");
+ return PTR_ERR(phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, hbg_fixed_phy_uninit, phydev);
+ if (ret)
+ return ret;
+
+ priv->mac.phydev = phydev;
+ return hbg_phy_connect(priv);
+}
+
+int hbg_mdio_init(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct hbg_mac *mac = &priv->mac;
+ struct phy_device *phydev;
+ struct mii_bus *mdio_bus;
+ int ret;
+
+ mac->phy_addr = priv->dev_specs.phy_addr;
+ if (mac->phy_addr == HBG_NO_PHY)
+ return hbg_fixed_phy_init(priv);
+
+ mdio_bus = devm_mdiobus_alloc(dev);
+ if (!mdio_bus)
+ return -ENOMEM;
+
+ mdio_bus->parent = dev;
+ mdio_bus->priv = priv;
+ mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ mdio_bus->name = "hibmcge mii bus";
+ mac->mdio_bus = mdio_bus;
+
+ mdio_bus->read = hbg_mdio_read22;
+ mdio_bus->write = hbg_mdio_write22;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", dev_name(dev));
+
+ ret = devm_mdiobus_register(dev, mdio_bus);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register MDIO bus\n");
+
+ phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr);
+ if (!phydev)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get phy device\n");
+
+ mac->phydev = phydev;
+ hbg_mdio_init_hw(priv);
+ return hbg_phy_connect(priv);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
new file mode 100644
index 000000000000..f3771c1bbd34
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_MDIO_H
+#define __HBG_MDIO_H
+
+#include "hbg_common.h"
+
+int hbg_mdio_init(struct hbg_priv *priv);
+void hbg_phy_start(struct hbg_priv *priv);
+void hbg_phy_stop(struct hbg_priv *priv);
+void hbg_fix_np_link_fail(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
new file mode 100644
index 000000000000..30b3903c8f2d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_REG_H
+#define __HBG_REG_H
+
+/* DEV SPEC */
+#define HBG_REG_SPEC_VALID_ADDR 0x0000
+#define HBG_REG_EVENT_REQ_ADDR 0x0004
+#define HBG_REG_MAC_ID_ADDR 0x0008
+#define HBG_REG_PHY_ID_ADDR 0x000C
+#define HBG_REG_MAC_ADDR_ADDR 0x0010
+#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014
+#define HBG_REG_UC_MAC_NUM_ADDR 0x0018
+#define HBG_REG_MDIO_FREQ_ADDR 0x0024
+#define HBG_REG_MAX_MTU_ADDR 0x0028
+#define HBG_REG_MIN_MTU_ADDR 0x002C
+#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030
+#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034
+#define HBG_REG_VLAN_LAYERS_ADDR 0x0038
+#define HBG_REG_PUSH_REQ_ADDR 0x00F0
+#define HBG_REG_MSG_HEADER_ADDR 0x00F4
+#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0)
+#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8)
+#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12)
+#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20)
+#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100
+
+/* MDIO */
+#define HBG_REG_MDIO_BASE 0x8000
+#define HBG_REG_MDIO_COMMAND_ADDR (HBG_REG_MDIO_BASE + 0x0000)
+#define HBG_REG_MDIO_COMMAND_CLK_SEL_EXP_B BIT(17)
+#define HBG_REG_MDIO_COMMAND_AUTO_SCAN_B BIT(16)
+#define HBG_REG_MDIO_COMMAND_CLK_SEL_B BIT(15)
+#define HBG_REG_MDIO_COMMAND_START_B BIT(14)
+#define HBG_REG_MDIO_COMMAND_ST_M GENMASK(13, 12)
+#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10)
+#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5)
+#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0)
+#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004)
+#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008)
+#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0)
+#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C)
+#define HBG_REG_MDIO_STA_ADDR (HBG_REG_MDIO_BASE + 0x0010)
+
+/* GMAC */
+#define HBG_REG_SGMII_BASE 0x10000
+#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008)
+#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C)
+#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C)
+#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020)
+#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024)
+#define HBG_REG_DUPLEX_B BIT(0)
+#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C)
+#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040)
+#define HBG_REG_PORT_MODE_M GENMASK(3, 0)
+#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044)
+#define HBG_REG_PORT_ENABLE_RX_B BIT(1)
+#define HBG_REG_PORT_ENABLE_TX_B BIT(2)
+#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048)
+#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
+#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
+#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
+#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15)
+#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
+#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
+#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
+#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
+#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
+#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B BIT(4)
+#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080)
+#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084)
+#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088)
+#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C)
+#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090)
+#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094)
+#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098)
+#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C)
+#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0)
+#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4)
+#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8)
+#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC)
+#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0)
+#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4)
+#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8)
+#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC)
+#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0)
+#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4)
+#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8)
+#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC)
+#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0)
+#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4)
+#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8)
+#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8)
+#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC)
+#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100)
+#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104)
+#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108)
+#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C)
+#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110)
+#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114)
+#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118)
+#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C)
+#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120)
+#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124)
+#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128)
+#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C)
+#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C)
+#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150)
+#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154)
+#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158)
+#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C)
+#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
+#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
+#define HBG_REG_CF_CRC_STRIP_B BIT(0)
+#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4)
+#define HBG_REG_MODE_CHANGE_EN_B BIT(0)
+#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC)
+#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
+#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
+#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC)
+#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4)
+#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8)
+#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
+#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
+#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
+#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C)
+#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210)
+#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214)
+#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218)
+#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C)
+#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220)
+#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224)
+#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228)
+#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C)
+#define HBG_REG_STATION_ADDR_LOW_MSK_0 (HBG_REG_SGMII_BASE + 0x0230)
+#define HBG_REG_STATION_ADDR_LOW_MSK_1 (HBG_REG_SGMII_BASE + 0x0238)
+
+/* PCU */
+#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
+#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_FIFO_THRSLD_FULL_M GENMASK(25, 16)
+#define HBG_REG_FIFO_THRSLD_EMPTY_M GENMASK(9, 0)
+#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M GENMASK(31, 24)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M GENMASK(23, 16)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M GENMASK(15, 8)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M GENMASK(7, 0)
+#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
+#define HBG_INT_MSK_WE_ERR_B BIT(31)
+#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
+#define HBG_INT_MSK_MAC_FIFO_ERR_B BIT(29)
+#define HBG_INT_MSK_RX_AHB_ERR_B BIT(28)
+#define HBG_INT_MSK_RX_DROP_B BIT(26)
+#define HBG_INT_MSK_TX_DROP_B BIT(25)
+#define HBG_INT_MSK_TXCFG_AVL_B BIT(24)
+#define HBG_INT_MSK_REL_BUF_ERR_B BIT(23)
+#define HBG_INT_MSK_RX_BUF_AVL_B BIT(22)
+#define HBG_INT_MSK_TX_AHB_ERR_B BIT(21)
+#define HBG_INT_MSK_SRAM_PARITY_ERR_B BIT(20)
+#define HBG_INT_MSK_MAC_APP_TX_FIFO_ERR_B BIT(19)
+#define HBG_INT_MSK_MAC_APP_RX_FIFO_ERR_B BIT(18)
+#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17)
+#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16)
+#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15)
+#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14)
+#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */
+#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
+#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
+#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438)
+#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C)
+#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
+#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
+#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448)
+#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C)
+#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
+#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
+#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
+#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
+#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
+#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460)
+#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464)
+#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468)
+#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
+#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
+#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
+#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
+#define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494)
+#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0)
+#define HBG_REG_BRUST_LENGTH_ADDR (HBG_REG_SGMII_BASE + 0x04C4)
+#define HBG_REG_BRUST_LENGTH_B BIT(29)
+#define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4)
+#define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0)
+#define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8)
+#define HBG_REG_BUS_CTRL_ENDIAN_M GENMASK(2, 1)
+#define HBG_REG_RX_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04F0)
+#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M GENMASK(31, 28)
+#define HBG_REG_RX_CTRL_TIME_INF_EN_B BIT(23)
+#define HBG_REG_RX_CTRL_RX_ALIGN_NUM_M GENMASK(18, 17)
+#define HBG_REG_RX_CTRL_PORT_NUM GENMASK(16, 13)
+#define HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B BIT(12)
+#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
+#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
+#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C)
+#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590)
+#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594)
+#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
+#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
+#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
+#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688)
+#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694)
+#define HBG_REG_IND_INTR_MASK_B BIT(0)
+#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698)
+#define HBG_REG_CF_IND_TXINT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x069C)
+#define HBG_REG_CF_IND_RXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x06a0)
+#define HBG_REG_CF_IND_RXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x06a4)
+#define HBG_REG_CF_IND_RXINT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x06a8)
+
+enum hbg_port_mode {
+ /* 0x0 ~ 0x5 are reserved */
+ HBG_PORT_MODE_SGMII_10M = 0x6,
+ HBG_PORT_MODE_SGMII_100M = 0x7,
+ HBG_PORT_MODE_SGMII_1000M = 0x8,
+};
+
+struct hbg_tx_desc {
+ u32 word0;
+ u32 word1;
+ u32 word2; /* pkt_addr */
+ u32 word3; /* clear_addr */
+};
+
+#define HBG_TX_DESC_W0_IP_OFF_M GENMASK(30, 26)
+#define HBG_TX_DESC_W0_l3_CS_B BIT(2)
+#define HBG_TX_DESC_W0_WB_B BIT(1)
+#define HBG_TX_DESC_W0_l4_CS_B BIT(0)
+#define HBG_TX_DESC_W1_SEND_LEN_M GENMASK(19, 4)
+
+struct hbg_rx_desc {
+ u32 word0;
+ u32 word1; /* tag */
+ u32 word2;
+ u32 word3;
+ u32 word4;
+ u32 word5;
+};
+
+#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
+#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W3_IP_OFFSET_M GENMASK(23, 16)
+#define HBG_RX_DESC_W3_VLAN_M GENMASK(15, 0)
+#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
+#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
+#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
+#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23)
+#define HBG_RX_DESC_W4_FRAG_B BIT(22)
+#define HBG_RX_DESC_W4_OPT_B BIT(21)
+#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20)
+#define HBG_RX_DESC_W4_BRD_CST_B BIT(19)
+#define HBG_RX_DESC_W4_MUL_CST_B BIT(18)
+#define HBG_RX_DESC_W4_ARP_B BIT(17)
+#define HBG_RX_DESC_W4_RARP_B BIT(16)
+#define HBG_RX_DESC_W4_ICMP_B BIT(15)
+#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14)
+#define HBG_RX_DESC_W4_DROP_B BIT(13)
+#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
+#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
+#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+#define HBG_RX_DESC_W4_PARSE_MODE_M GENMASK(6, 5)
+#define HBG_RX_DESC_W5_VALID_SIZE_M GENMASK(15, 0)
+
+enum hbg_l3_err_code {
+ HBG_L3_OK = 0,
+ HBG_L3_WRONG_HEAD,
+ HBG_L3_CSUM_ERR,
+ HBG_L3_LEN_ERR,
+ HBG_L3_ZERO_TTL,
+ HBG_L3_RSVD,
+};
+
+enum hbg_l4_err_code {
+ HBG_L4_OK = 0,
+ HBG_L4_WRONG_HEAD,
+ HBG_L4_LEN_ERR,
+ HBG_L4_CSUM_ERR,
+ HBG_L4_ZERO_PORT_NUM,
+ HBG_L4_RSVD,
+};
+
+enum hbg_pkt_type_code {
+ HBG_NO_IP_PKT = 0,
+ HBG_IP_PKT,
+ HBG_TCP_PKT,
+ HBG_UDP_PKT,
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h
new file mode 100644
index 000000000000..b70fd960da8d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+/* This must be outside ifdef _HBG_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hibmcge
+
+#if !defined(_HBG_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HBG_TRACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include "hbg_reg.h"
+
+TRACE_EVENT(hbg_rx_desc,
+ TP_PROTO(struct hbg_priv *priv, u32 index,
+ struct hbg_rx_desc *rx_desc),
+ TP_ARGS(priv, index, rx_desc),
+
+ TP_STRUCT__entry(__field(u32, index)
+ __field(u8, port_num)
+ __field(u8, ip_offset)
+ __field(u8, parse_mode)
+ __field(u8, l4_error_code)
+ __field(u8, l3_error_code)
+ __field(u8, l2_error_code)
+ __field(u16, packet_len)
+ __field(u16, valid_size)
+ __field(u16, vlan)
+ __string(pciname, pci_name(priv->pdev))
+ __string(devname, priv->netdev->name)
+ ),
+
+ TP_fast_assign(__entry->index = index,
+ __entry->packet_len =
+ FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M,
+ rx_desc->word2);
+ __entry->port_num =
+ FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M,
+ rx_desc->word2);
+ __entry->ip_offset =
+ FIELD_GET(HBG_RX_DESC_W3_IP_OFFSET_M,
+ rx_desc->word3);
+ __entry->vlan =
+ FIELD_GET(HBG_RX_DESC_W3_VLAN_M,
+ rx_desc->word3);
+ __entry->parse_mode =
+ FIELD_GET(HBG_RX_DESC_W4_PARSE_MODE_M,
+ rx_desc->word4);
+ __entry->l4_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M,
+ rx_desc->word4);
+ __entry->l3_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M,
+ rx_desc->word4);
+ __entry->l2_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B,
+ rx_desc->word4);
+ __entry->valid_size =
+ FIELD_GET(HBG_RX_DESC_W5_VALID_SIZE_M,
+ rx_desc->word5);
+ __assign_str(pciname);
+ __assign_str(devname);
+ ),
+
+ TP_printk("%s %s index:%u, port num:%u, len:%u, valid size:%u, ip_offset:%u, vlan:0x%04x, parse mode:%u, l4_err:0x%x, l3_err:0x%x, l2_err:0x%x",
+ __get_str(pciname), __get_str(devname), __entry->index,
+ __entry->port_num, __entry->packet_len,
+ __entry->valid_size, __entry->ip_offset, __entry->vlan,
+ __entry->parse_mode, __entry->l4_error_code,
+ __entry->l3_error_code, __entry->l2_error_code
+ )
+);
+
+#endif /* _HBG_TRACE_H_ */
+
+/* This must be outside ifdef _HBG_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hbg_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
new file mode 100644
index 000000000000..a4ea92c31c2f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -0,0 +1,717 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <net/netdev_queues.h>
+#include "hbg_common.h"
+#include "hbg_irq.h"
+#include "hbg_reg.h"
+#include "hbg_txrx.h"
+
+#define CREATE_TRACE_POINTS
+#include "hbg_trace.h"
+
+#define netdev_get_tx_ring(netdev) \
+ (&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
+
+#define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \
+ DMA_FROM_DEVICE : DMA_TO_DEVICE)
+
+#define hbg_queue_used_num(head, tail, ring) ({ \
+ typeof(ring) _ring = (ring); \
+ ((tail) + _ring->len - (head)) % _ring->len; })
+#define hbg_queue_left_num(head, tail, ring) ({ \
+ typeof(ring) _r = (ring); \
+ _r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
+#define hbg_queue_is_empty(head, tail, ring) \
+ (hbg_queue_used_num((head), (tail), (ring)) == 0)
+#define hbg_queue_is_full(head, tail, ring) \
+ (hbg_queue_left_num((head), (tail), (ring)) == 0)
+#define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len)
+#define hbg_queue_move_next(p, ring) ({ \
+ typeof(ring) _ring = (ring); \
+ _ring->p = hbg_queue_next_prt(_ring->p, _ring); })
+
+#define hbg_get_page_order(ring) ({ \
+ typeof(ring) _ring = (ring); \
+ get_order(hbg_spec_max_frame_len(_ring->priv, _ring->dir)); })
+#define hbg_get_page_size(ring) (PAGE_SIZE << hbg_get_page_order((ring)))
+
+#define HBG_TX_STOP_THRS 2
+#define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
+
+static int hbg_dma_map(struct hbg_buffer *buffer)
+{
+ struct hbg_priv *priv = buffer->priv;
+
+ buffer->skb_dma = dma_map_single(&priv->pdev->dev,
+ buffer->skb->data, buffer->skb_len,
+ buffer_to_dma_dir(buffer));
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
+ if (buffer->dir == HBG_DIR_RX)
+ priv->stats.rx_dma_err_cnt++;
+ else
+ priv->stats.tx_dma_err_cnt++;
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hbg_dma_unmap(struct hbg_buffer *buffer)
+{
+ struct hbg_priv *priv = buffer->priv;
+
+ if (unlikely(!buffer->skb_dma))
+ return;
+
+ dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len,
+ buffer_to_dma_dir(buffer));
+ buffer->skb_dma = 0;
+}
+
+static void hbg_buffer_free_page(struct hbg_buffer *buffer)
+{
+ struct hbg_ring *ring = buffer->ring;
+
+ if (unlikely(!buffer->page))
+ return;
+
+ page_pool_put_full_page(ring->page_pool, buffer->page, false);
+
+ buffer->page = NULL;
+ buffer->page_dma = 0;
+ buffer->page_addr = NULL;
+ buffer->page_size = 0;
+ buffer->page_offset = 0;
+}
+
+static int hbg_buffer_alloc_page(struct hbg_buffer *buffer)
+{
+ struct hbg_ring *ring = buffer->ring;
+ u32 len = hbg_get_page_size(ring);
+ u32 offset;
+
+ if (unlikely(!ring->page_pool))
+ return 0;
+
+ buffer->page = page_pool_dev_alloc_frag(ring->page_pool, &offset, len);
+ if (unlikely(!buffer->page))
+ return -ENOMEM;
+
+ buffer->page_dma = page_pool_get_dma_addr(buffer->page) + offset;
+ buffer->page_addr = page_address(buffer->page) + offset;
+ buffer->page_size = len;
+ buffer->page_offset = offset;
+
+ return 0;
+}
+
+static void hbg_init_tx_desc(struct hbg_buffer *buffer,
+ struct hbg_tx_desc *tx_desc)
+{
+ u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header;
+ u32 word0 = 0;
+
+ word0 |= FIELD_PREP(HBG_TX_DESC_W0_WB_B, HBG_STATUS_ENABLE);
+ word0 |= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M, ip_offset);
+ if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) {
+ word0 |= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B, HBG_STATUS_ENABLE);
+ word0 |= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B, HBG_STATUS_ENABLE);
+ }
+
+ tx_desc->word0 = word0;
+ tx_desc->word1 = FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M,
+ buffer->skb->len);
+ tx_desc->word2 = buffer->skb_dma;
+ tx_desc->word3 = buffer->state_dma;
+}
+
+netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hbg_ring *ring = netdev_get_tx_ring(netdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hbg_napi_tx_recycle() called in tx interrupt handle process.
+ */
+ u32 ntc = smp_load_acquire(&ring->ntc);
+ struct hbg_buffer *buffer;
+ struct hbg_tx_desc tx_desc;
+ u32 ntu = ring->ntu;
+
+ if (unlikely(!skb->len ||
+ skb->len > hbg_spec_max_frame_len(priv, HBG_DIR_TX))) {
+ dev_kfree_skb_any(skb);
+ netdev->stats.tx_errors++;
+ return NETDEV_TX_OK;
+ }
+
+ if (!netif_subqueue_maybe_stop(netdev, 0,
+ hbg_queue_left_num(ntc, ntu, ring),
+ HBG_TX_STOP_THRS, HBG_TX_START_THRS))
+ return NETDEV_TX_BUSY;
+
+ buffer = &ring->queue[ntu];
+ buffer->skb = skb;
+ buffer->skb_len = skb->len;
+ if (unlikely(hbg_dma_map(buffer))) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ buffer->state = HBG_TX_STATE_START;
+ hbg_init_tx_desc(buffer, &tx_desc);
+ hbg_hw_set_tx_desc(priv, &tx_desc);
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * hbg_napi_tx_recycle() called in tx interrupt handle process.
+ */
+ smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring));
+ dev_sw_netstats_tx_add(netdev, 1, skb->len);
+ return NETDEV_TX_OK;
+}
+
+static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
+{
+ if (unlikely(!buffer->skb))
+ return;
+
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+}
+
+static void hbg_buffer_free(struct hbg_buffer *buffer)
+{
+ if (buffer->skb) {
+ hbg_dma_unmap(buffer);
+ return hbg_buffer_free_skb(buffer);
+ }
+
+ hbg_buffer_free_page(buffer);
+}
+
+static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
+{
+ struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hbg_net_start_xmit() called in xmit process.
+ */
+ u32 ntu = smp_load_acquire(&ring->ntu);
+ struct hbg_priv *priv = ring->priv;
+ struct hbg_buffer *buffer;
+ u32 ntc = ring->ntc;
+ int packet_done = 0;
+
+ /* We need do cleanup even if budget is 0.
+ * Per NAPI documentation budget is for Rx.
+ * So We hardcode the amount of work Tx NAPI does to 128.
+ */
+ budget = 128;
+ while (packet_done < budget) {
+ if (unlikely(hbg_queue_is_empty(ntc, ntu, ring)))
+ break;
+
+ /* make sure HW write desc complete */
+ dma_rmb();
+
+ buffer = &ring->queue[ntc];
+ if (buffer->state != HBG_TX_STATE_COMPLETE)
+ break;
+
+ hbg_buffer_free(buffer);
+ ntc = hbg_queue_next_prt(ntc, ring);
+ packet_done++;
+ }
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * hbg_net_start_xmit() called in xmit process.
+ */
+ smp_store_release(&ring->ntc, ntc);
+ netif_wake_queue(priv->netdev);
+
+ if (likely(packet_done < budget &&
+ napi_complete_done(napi, packet_done)))
+ hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true);
+
+ return packet_done;
+}
+
+static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
+
+ skb->ip_summed = rx_checksum_offload ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+ if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
+ !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
+ return true;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
+ case HBG_L3_OK:
+ break;
+ case HBG_L3_WRONG_HEAD:
+ priv->stats.rx_desc_l3_wrong_head_cnt++;
+ return false;
+ case HBG_L3_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l3_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L3_LEN_ERR:
+ priv->stats.rx_desc_l3_len_err_cnt++;
+ return false;
+ case HBG_L3_ZERO_TTL:
+ priv->stats.rx_desc_l3_zero_ttl_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l3_other_cnt++;
+ return false;
+ }
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
+ case HBG_L4_OK:
+ break;
+ case HBG_L4_WRONG_HEAD:
+ priv->stats.rx_desc_l4_wrong_head_cnt++;
+ return false;
+ case HBG_L4_LEN_ERR:
+ priv->stats.rx_desc_l4_len_err_cnt++;
+ return false;
+ case HBG_L4_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l4_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L4_ZERO_PORT_NUM:
+ priv->stats.rx_desc_l4_zero_port_num_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l4_other_cnt++;
+ return false;
+ }
+
+ return true;
+}
+
+static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ return;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_ip_ver_err_cnt++;
+ return;
+ }
+
+ /* 0:ipv4, 1:ipv6 */
+ if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
+ priv->stats.rx_desc_ipv6_pkt_cnt++;
+ else
+ priv->stats.rx_desc_ipv4_pkt_cnt++;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
+ case HBG_IP_PKT:
+ priv->stats.rx_desc_ip_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
+ priv->stats.rx_desc_ip_opt_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
+ priv->stats.rx_desc_frag_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
+ priv->stats.rx_desc_icmp_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
+ priv->stats.rx_desc_ipsec_pkt_cnt++;
+ break;
+ case HBG_TCP_PKT:
+ priv->stats.rx_desc_tcp_pkt_cnt++;
+ break;
+ case HBG_UDP_PKT:
+ priv->stats.rx_desc_udp_pkt_cnt++;
+ break;
+ default:
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ break;
+ }
+}
+
+static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
+ priv->stats.rx_desc_key_not_match_cnt++;
+ return;
+ }
+
+ if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
+ priv->stats.rx_desc_broadcast_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
+ priv->stats.rx_desc_multicast_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
+ priv->stats.rx_desc_vlan_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
+ priv->stats.rx_desc_arp_pkt_cnt++;
+ return;
+ } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
+ priv->stats.rx_desc_rarp_pkt_cnt++;
+ return;
+ }
+
+ hbg_update_rx_ip_protocol_stats(priv, desc);
+}
+
+static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
+ priv->dev_specs.max_frame_len)) {
+ priv->stats.rx_desc_pkt_len_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
+ priv->dev_specs.mac_id ||
+ FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
+ priv->stats.rx_desc_drop++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_l2_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
+ priv->stats.rx_desc_l3l4_err_cnt++;
+ return false;
+ }
+
+ hbg_update_rx_protocol_stats(priv, desc);
+ return true;
+}
+
+static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
+{
+ struct hbg_ring *ring = &priv->rx_ring;
+ struct hbg_buffer *buffer;
+ int ret;
+
+ if (hbg_queue_is_full(ring->ntc, ring->ntu, ring) ||
+ hbg_fifo_is_full(priv, ring->dir))
+ return 0;
+
+ buffer = &ring->queue[ring->ntu];
+ ret = hbg_buffer_alloc_page(buffer);
+ if (unlikely(ret))
+ return ret;
+
+ memset(buffer->page_addr, 0, HBG_PACKET_HEAD_SIZE);
+ dma_sync_single_for_device(&priv->pdev->dev, buffer->page_dma,
+ HBG_PACKET_HEAD_SIZE, DMA_TO_DEVICE);
+
+ hbg_hw_fill_buffer(priv, buffer->page_dma);
+ hbg_queue_move_next(ntu, ring);
+ return 0;
+}
+
+static int hbg_rx_fill_buffers(struct hbg_priv *priv)
+{
+ u32 remained = hbg_hw_get_fifo_used_num(priv, HBG_DIR_RX);
+ u32 max_count = priv->dev_specs.rx_fifo_num;
+ u32 refill_count;
+ int ret;
+
+ if (unlikely(remained >= max_count))
+ return 0;
+
+ refill_count = max_count - remained;
+ while (refill_count--) {
+ ret = hbg_rx_fill_one_buffer(priv);
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
+static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
+ struct hbg_buffer *buffer)
+{
+ struct hbg_rx_desc *rx_desc;
+
+ /* make sure HW write desc complete */
+ dma_rmb();
+
+ dma_sync_single_for_cpu(&priv->pdev->dev, buffer->page_dma,
+ buffer->page_size, DMA_FROM_DEVICE);
+
+ rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
+ return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
+}
+
+static int hbg_build_skb(struct hbg_priv *priv,
+ struct hbg_buffer *buffer, u32 pkt_len)
+{
+ net_prefetch(buffer->page_addr);
+
+ buffer->skb = napi_build_skb(buffer->page_addr, buffer->page_size);
+ if (unlikely(!buffer->skb))
+ return -ENOMEM;
+ skb_mark_for_recycle(buffer->skb);
+
+ /* page will be freed together with the skb */
+ buffer->page = NULL;
+
+ return 0;
+}
+
+static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
+ struct hbg_priv *priv = ring->priv;
+ struct hbg_rx_desc *rx_desc;
+ struct hbg_buffer *buffer;
+ u32 packet_done = 0;
+ u32 pkt_len;
+
+ hbg_rx_fill_buffers(priv);
+ while (packet_done < budget) {
+ if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
+ break;
+
+ buffer = &ring->queue[ring->ntc];
+ if (unlikely(!buffer->page))
+ goto next_buffer;
+
+ if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
+ break;
+ rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
+ pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
+ trace_hbg_rx_desc(priv, ring->ntc, rx_desc);
+
+ if (unlikely(hbg_build_skb(priv, buffer, pkt_len))) {
+ hbg_buffer_free_page(buffer);
+ goto next_buffer;
+ }
+
+ if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
+ hbg_buffer_free_skb(buffer);
+ goto next_buffer;
+ }
+
+ skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
+ skb_put(buffer->skb, pkt_len);
+ buffer->skb->protocol = eth_type_trans(buffer->skb,
+ priv->netdev);
+ dev_sw_netstats_rx_add(priv->netdev, pkt_len);
+ napi_gro_receive(napi, buffer->skb);
+ buffer->skb = NULL;
+ buffer->page = NULL;
+
+next_buffer:
+ hbg_rx_fill_one_buffer(priv);
+ hbg_queue_move_next(ntc, ring);
+ packet_done++;
+ }
+
+ if (likely(packet_done < budget &&
+ napi_complete_done(napi, packet_done)))
+ hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true);
+
+ return packet_done;
+}
+
+static void hbg_ring_page_pool_destory(struct hbg_ring *ring)
+{
+ if (!ring->page_pool)
+ return;
+
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+}
+
+static int hbg_ring_page_pool_init(struct hbg_priv *priv, struct hbg_ring *ring)
+{
+ u32 buf_size = hbg_spec_max_frame_len(priv, ring->dir);
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = hbg_get_page_order(ring),
+ .pool_size = ring->len * buf_size / hbg_get_page_size(ring),
+ .nid = dev_to_node(&priv->pdev->dev),
+ .dev = &priv->pdev->dev,
+ .napi = &ring->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = hbg_get_page_size(ring),
+ };
+ int ret = 0;
+
+ ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(ring->page_pool)) {
+ ret = PTR_ERR(ring->page_pool);
+ dev_err(&priv->pdev->dev,
+ "failed to create page pool, ret = %d\n", ret);
+ ring->page_pool = NULL;
+ }
+
+ return ret;
+}
+
+static void hbg_ring_uninit(struct hbg_ring *ring)
+{
+ struct hbg_buffer *buffer;
+ u32 i;
+
+ if (!ring->queue)
+ return;
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+ for (i = 0; i < ring->len; i++) {
+ buffer = &ring->queue[i];
+ hbg_buffer_free(buffer);
+ buffer->ring = NULL;
+ buffer->priv = NULL;
+ }
+
+ hbg_ring_page_pool_destory(ring);
+ dma_free_coherent(&ring->priv->pdev->dev,
+ ring->len * sizeof(*ring->queue),
+ ring->queue, ring->queue_dma);
+ ring->queue = NULL;
+ ring->queue_dma = 0;
+ ring->len = 0;
+ ring->priv = NULL;
+}
+
+static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
+ int (*napi_poll)(struct napi_struct *, int),
+ enum hbg_dir dir)
+{
+ struct hbg_buffer *buffer;
+ u32 i, len;
+ int ret;
+
+ len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
+ /* To improve receiving performance under high-stress scenarios,
+ * in the `hbg_napi_rx_poll()`, we first use the other half of
+ * the buffer to receive packets from the hardware via the
+ * `hbg_rx_fill_buffers()`, and then process the packets in the
+ * original half of the buffer to avoid packet loss caused by
+ * hardware overflow as much as possible.
+ */
+ if (dir == HBG_DIR_RX)
+ len += hbg_get_spec_fifo_max_num(priv, dir);
+
+ ring->queue = dma_alloc_coherent(&priv->pdev->dev,
+ len * sizeof(*ring->queue),
+ &ring->queue_dma, GFP_KERNEL);
+ if (!ring->queue)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ buffer = &ring->queue[i];
+ buffer->skb_len = 0;
+ buffer->dir = dir;
+ buffer->ring = ring;
+ buffer->priv = priv;
+ buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer));
+ }
+
+ ring->dir = dir;
+ ring->priv = priv;
+ ring->ntc = 0;
+ ring->ntu = 0;
+ ring->len = len;
+
+ if (dir == HBG_DIR_TX) {
+ netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
+ } else {
+ netif_napi_add(priv->netdev, &ring->napi, napi_poll);
+
+ ret = hbg_ring_page_pool_init(priv, ring);
+ if (ret) {
+ netif_napi_del(&ring->napi);
+ dma_free_coherent(&ring->priv->pdev->dev,
+ ring->len * sizeof(*ring->queue),
+ ring->queue, ring->queue_dma);
+ ring->queue = NULL;
+ ring->len = 0;
+ return ret;
+ }
+ }
+
+ napi_enable(&ring->napi);
+ return 0;
+}
+
+static int hbg_tx_ring_init(struct hbg_priv *priv)
+{
+ struct hbg_ring *tx_ring = &priv->tx_ring;
+
+ if (!tx_ring->tout_log_buf)
+ tx_ring->tout_log_buf = devm_kmalloc(&priv->pdev->dev,
+ HBG_TX_TIMEOUT_BUF_LEN,
+ GFP_KERNEL);
+
+ if (!tx_ring->tout_log_buf)
+ return -ENOMEM;
+
+ return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX);
+}
+
+static int hbg_rx_ring_init(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
+ if (ret)
+ return ret;
+
+ ret = hbg_rx_fill_buffers(priv);
+ if (ret)
+ hbg_ring_uninit(&priv->rx_ring);
+
+ return ret;
+}
+
+int hbg_txrx_init(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_tx_ring_init(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to init tx ring, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hbg_rx_ring_init(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to init rx ring, ret = %d\n", ret);
+ hbg_ring_uninit(&priv->tx_ring);
+ }
+
+ return ret;
+}
+
+void hbg_txrx_uninit(struct hbg_priv *priv)
+{
+ hbg_ring_uninit(&priv->tx_ring);
+ hbg_ring_uninit(&priv->rx_ring);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
new file mode 100644
index 000000000000..8b6110599e10
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_TXRX_H
+#define __HBG_TXRX_H
+
+#include <linux/etherdevice.h>
+#include "hbg_hw.h"
+
+static inline u32 hbg_spec_max_frame_len(struct hbg_priv *priv,
+ enum hbg_dir dir)
+{
+ return (dir == HBG_DIR_TX) ? priv->dev_specs.max_frame_len :
+ priv->dev_specs.rx_buf_size;
+}
+
+static inline u32 hbg_get_spec_fifo_max_num(struct hbg_priv *priv,
+ enum hbg_dir dir)
+{
+ return (dir == HBG_DIR_TX) ? priv->dev_specs.tx_fifo_num :
+ priv->dev_specs.rx_fifo_num;
+}
+
+static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
+{
+ return hbg_hw_get_fifo_used_num(priv, dir) >=
+ hbg_get_spec_fifo_max_num(priv, dir);
+}
+
+static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
+{
+ u32 len = READ_ONCE(ring->len);
+
+ if (!len)
+ return 0;
+
+ return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len;
+}
+
+netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+int hbg_txrx_init(struct hbg_priv *priv);
+void hbg_txrx_uninit(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index b91e7a06b97f..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,9 +942,10 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
+ of_node_put(arg.np);
if (IS_ERR(priv->map)) {
dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
ret = PTR_ERR(priv->map);
@@ -1046,7 +1045,7 @@ MODULE_DEVICE_TABLE(of, hip04_mac_match);
static struct platform_driver hip04_mac_driver = {
.probe = hip04_mac_probe,
- .remove_new = hip04_remove,
+ .remove = hip04_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = hip04_mac_match,
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 2406263c9dd3..d244a40df430 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -959,7 +959,7 @@ static struct platform_driver hisi_femac_driver = {
.of_match_table = hisi_femac_match,
},
.probe = hisi_femac_drv_probe,
- .remove_new = hisi_femac_drv_remove,
+ .remove = hisi_femac_drv_remove,
#ifdef CONFIG_PM
.suspend = hisi_femac_drv_suspend,
.resume = hisi_femac_drv_resume,
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 1a972b093a42..e3e7f2270560 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1312,7 +1312,7 @@ static struct platform_driver hix5hd2_dev_driver = {
.of_match_table = hix5hd2_of_match,
},
.probe = hix5hd2_dev_probe,
- .remove_new = hix5hd2_dev_remove,
+ .remove = hix5hd2_dev_remove,
};
module_platform_driver(hix5hd2_dev_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index d72657444ef3..2ae34d01fd36 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -512,7 +512,7 @@ struct hnae_ae_ops {
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae_handle *handle, u64 *data);
void (*get_strings)(struct hnae_handle *handle,
- u32 stringset, u8 *data);
+ u32 stringset, u8 **data);
int (*get_sset_count)(struct hnae_handle *handle, int stringset);
void (*update_led_status)(struct hnae_handle *handle);
int (*set_led_id)(struct hnae_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index bc3e406f0139..8ce910f8d0cc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -730,15 +730,14 @@ static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
}
-static void hns_ae_get_strings(struct hnae_handle *handle,
- u32 stringset, u8 *data)
+static void hns_ae_get_strings(struct hnae_handle *handle, u32 stringset,
+ u8 **data)
{
int port;
int idx;
struct hns_mac_cb *mac_cb;
struct hns_ppe_cb *ppe_cb;
struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- u8 *p = data;
struct hnae_vf_cb *vf_cb;
assert(handle);
@@ -748,19 +747,14 @@ static void hns_ae_get_strings(struct hnae_handle *handle,
mac_cb = hns_get_mac_cb(handle);
ppe_cb = hns_get_ppe_cb(handle);
- for (idx = 0; idx < handle->q_num; idx++) {
- hns_rcb_get_strings(stringset, p, idx);
- p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
- }
-
- hns_ppe_get_strings(ppe_cb, stringset, p);
- p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+ for (idx = 0; idx < handle->q_num; idx++)
+ hns_rcb_get_strings(stringset, data, idx);
- hns_mac_get_strings(mac_cb, stringset, p);
- p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+ hns_ppe_get_strings(ppe_cb, stringset, data);
+ hns_mac_get_strings(mac_cb, stringset, data);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
+ hns_dsaf_get_strings(stringset, data, port, dsaf_dev);
}
static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index bdb7afaabdd0..400933ca1a29 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -669,16 +669,15 @@ static void hns_gmac_get_stats(void *mac_drv, u64 *data)
}
}
-static void hns_gmac_get_strings(u32 stringset, u8 *data)
+static void hns_gmac_get_strings(u32 stringset, u8 **data)
{
- u8 *buff = data;
u32 i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++)
- ethtool_puts(&buff, g_gmac_stats_string[i].desc);
+ ethtool_puts(data, g_gmac_stats_string[i].desc);
}
static int hns_gmac_get_sset_count(int stringset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f75668c47935..bc6b269be299 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -734,7 +734,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return -ENODATA;
phy = get_phy_device(mdio, addr, is_c45);
- if (!phy || IS_ERR(phy))
+ if (IS_ERR_OR_NULL(phy))
return -EIO;
phy->irq = mdio->irq[addr];
@@ -933,6 +933,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
mac_cb->cpld_ctrl = NULL;
} else {
syscon = syscon_node_to_regmap(cpld_args.np);
+ of_node_put(cpld_args.np);
if (IS_ERR_OR_NULL(syscon)) {
dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
mac_cb->cpld_ctrl = NULL;
@@ -1089,28 +1090,24 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
u32 port_id;
int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
struct hns_mac_cb *mac_cb;
- struct fwnode_handle *child;
- device_for_each_child_node(dsaf_dev->dev, child) {
+ device_for_each_child_node_scoped(dsaf_dev->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &port_id);
if (ret) {
- fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"get reg fail, ret=%d!\n", ret);
return ret;
}
if (port_id >= max_port_num) {
- fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"reg(%u) out of range!\n", port_id);
return -EINVAL;
}
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
GFP_KERNEL);
- if (!mac_cb) {
- fwnode_handle_put(child);
+ if (!mac_cb)
return -ENOMEM;
- }
+
mac_cb->fw_port = child;
mac_cb->mac_id = (u8)port_id;
dsaf_dev->mac_cb[port_id] = mac_cb;
@@ -1193,8 +1190,7 @@ void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
}
-void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
- int stringset, u8 *data)
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 **data)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index edf0bcf76ac9..630f01cf7a71 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -378,7 +378,7 @@ struct mac_driver {
void (*get_regs)(void *mac_drv, void *data);
int (*get_regs_count)(void);
/* get strings name for ethtool statistic */
- void (*get_strings)(u32 stringset, u8 *data);
+ void (*get_strings)(u32 stringset, u8 **data);
/* get the number of strings*/
int (*get_sset_count)(int stringset);
@@ -445,7 +445,7 @@ int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
enum hnae_loop loop, int en);
void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
-void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 **data);
int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 1b67da1f6fa8..6b6ced37e490 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2590,55 +2590,34 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
p[i] = 0xdddddddd;
}
-static char *hns_dsaf_get_node_stats_strings(char *data, int node,
- struct dsaf_device *dsaf_dev)
+static void hns_dsaf_get_node_stats_strings(u8 **data, int node,
+ struct dsaf_device *dsaf_dev)
{
- char *buff = data;
- int i;
bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ int i;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "innod%d_pad_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_manage_pkts", node);
+ ethtool_sprintf(data, "innod%d_rx_pkts", node);
+ ethtool_sprintf(data, "innod%d_rx_pkt_id", node);
+ ethtool_sprintf(data, "innod%d_rx_pause_frame", node);
+ ethtool_sprintf(data, "innod%d_release_buf_num", node);
+ ethtool_sprintf(data, "innod%d_sbm_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_crc_false_pkts", node);
+ ethtool_sprintf(data, "innod%d_bp_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_lookup_rslt_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_local_rslt_fail_pkts", node);
+ ethtool_sprintf(data, "innod%d_vlan_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_stp_drop_pkts", node);
if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
for (i = 0; i < DSAF_PRIO_NR; i++) {
- snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
- ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
- node, i);
- snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
- ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
- node, i);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "inod%d_pfc_prio%d_pkts", node,
+ i);
+ ethtool_sprintf(data, "onod%d_pfc_prio%d_pkts", node,
+ i);
}
- buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
}
- snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
- buff += ETH_GSTRING_LEN;
-
- return buff;
+ ethtool_sprintf(data, "onnod%d_tx_pkts", node);
}
static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
@@ -2720,21 +2699,20 @@ int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
*@port:port index
*@dsaf_dev: dsaf device
*/
-void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+void hns_dsaf_get_strings(int stringset, u8 **data, int port,
struct dsaf_device *dsaf_dev)
{
- char *buff = (char *)data;
int node = port;
if (stringset != ETH_SS_STATS)
return;
/* for ge/xge node info */
- buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+ hns_dsaf_get_node_stats_strings(data, node, dsaf_dev);
/* for ppe node info */
node = port + DSAF_PPE_INODE_BASE;
- (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+ hns_dsaf_get_node_stats_strings(data, node, dsaf_dev);
}
/**
@@ -3031,7 +3009,7 @@ MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
- .remove_new = hns_dsaf_remove,
+ .remove = hns_dsaf_remove,
.driver = {
.name = DSAF_DRV_NAME,
.of_match_table = g_dsaf_match,
@@ -3041,115 +3019,6 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
-/**
- * hns_dsaf_roce_reset - reset dsaf and roce
- * @dsaf_fwnode: Pointer to framework node for the dasf
- * @dereset: false - request reset , true - drop reset
- * return 0 - success , negative -fail
- */
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
-{
- struct dsaf_device *dsaf_dev;
- struct platform_device *pdev;
- u32 mp;
- u32 sl;
- u32 credit;
- int i;
- static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- };
- static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- };
-
- /* find the platform device corresponding to fwnode */
- if (is_of_node(dsaf_fwnode)) {
- pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
- } else if (is_acpi_device_node(dsaf_fwnode)) {
- pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
- } else {
- pr_err("fwnode is neither OF or ACPI type\n");
- return -EINVAL;
- }
-
- /* check if we were a success in fetching pdev */
- if (!pdev) {
- pr_err("couldn't find platform device for node\n");
- return -ENODEV;
- }
-
- /* retrieve the dsaf_device from the driver data */
- dsaf_dev = dev_get_drvdata(&pdev->dev);
- if (!dsaf_dev) {
- dev_err(&pdev->dev, "dsaf_dev is NULL\n");
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* now, make sure we are running on compatible SoC */
- if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
- dsaf_dev->ae_dev.name);
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* do reset or de-reset according to the flag */
- if (!dereset) {
- /* reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- false);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
- } else {
- /* configure dsaf tx roce correspond to port map and sl map */
- mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(mp, 7 << i * 3, i * 3,
- port_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
-
- sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(sl, 3 << i * 2, i * 2,
- sl_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
-
- /* de-reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- true);
- msleep(SRST_TIME_INTERVAL);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
-
- /* enable dsaf channel rocee credit */
- credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
-
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
- }
-
- put_device(&pdev->dev);
-
- return 0;
-}
-EXPORT_SYMBOL(hns_dsaf_roce_reset);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 5526a10caac5..653dfbb25d1b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -42,29 +42,6 @@ struct hns_mac_cb;
#define HNS_MAX_WAIT_CNT 10000
-enum dsaf_roce_port_mode {
- DSAF_ROCE_6PORT_MODE,
- DSAF_ROCE_4PORT_MODE,
- DSAF_ROCE_2PORT_MODE,
- DSAF_ROCE_CHAN_MODE_NUM,
-};
-
-enum dsaf_roce_port_num {
- DSAF_ROCE_PORT_0,
- DSAF_ROCE_PORT_1,
- DSAF_ROCE_PORT_2,
- DSAF_ROCE_PORT_3,
- DSAF_ROCE_PORT_4,
- DSAF_ROCE_PORT_5,
-};
-
-enum dsaf_roce_qos_sl {
- DSAF_ROCE_SL_0,
- DSAF_ROCE_SL_1,
- DSAF_ROCE_SL_2,
- DSAF_ROCE_SL_3,
-};
-
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -307,9 +284,6 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
- void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
- bool dereset);
- void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
@@ -442,7 +416,7 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
-void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+void hns_dsaf_get_strings(int stringset, u8 **data, int port,
struct dsaf_device *dsaf_dev);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
@@ -463,6 +437,4 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 5df19c604d09..91391a49fcea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -326,69 +326,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
HNS_XGE_RESET_FUNC, port, dereset);
}
-/**
- * hns_dsaf_srst_chns - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- u32 reg_addr;
-
- if (!dereset)
- reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
- else
- reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
-
- dsaf_write_sub(dsaf_dev, reg_addr, msk);
-}
-
-/**
- * hns_dsaf_srst_chns_acpi - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_DSAF_CHN_RESET_FUNC,
- msk, dereset);
-}
-
-static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
-{
- if (!dereset) {
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
- } else {
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
- msleep(20);
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
- }
-}
-
-static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_ROCE_RESET_FUNC, 0, dereset);
-}
-
static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
bool dereset)
{
@@ -729,8 +666,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -746,8 +681,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index a08d1f0a5a16..5013beb4d282 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -457,24 +457,23 @@ int hns_ppe_get_regs_count(void)
* @stringset: string set type
* @data: output string
*/
-void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 **data)
{
int index = ppe_cb->index;
- u8 *buff = data;
-
- ethtool_sprintf(&buff, "ppe%d_rx_sw_pkt", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_ok", index);
- ethtool_sprintf(&buff, "ppe%d_rx_drop_pkt_no_bd", index);
- ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_fail", index);
- ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_wait", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_drop_no_buf", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_err_fifo_full", index);
-
- ethtool_sprintf(&buff, "ppe%d_tx_bd", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_ok", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_fifo_empty", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_csum_fail", index);
+
+ ethtool_sprintf(data, "ppe%d_rx_sw_pkt", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_ok", index);
+ ethtool_sprintf(data, "ppe%d_rx_drop_pkt_no_bd", index);
+ ethtool_sprintf(data, "ppe%d_rx_alloc_buf_fail", index);
+ ethtool_sprintf(data, "ppe%d_rx_alloc_buf_wait", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_drop_no_buf", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_err_fifo_full", index);
+
+ ethtool_sprintf(data, "ppe%d_tx_bd", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_ok", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_err_fifo_empty", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_err_csum_fail", index);
}
void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 7e00231c1acf..602c8e971fe4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -109,7 +109,7 @@ int hns_ppe_get_sset_count(int stringset);
int hns_ppe_get_regs_count(void);
void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
-void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 **data);
void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value);
void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 93344563a259..635b3a95dd82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -195,11 +195,6 @@ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
}
-void hns_rcb_start(struct hnae_queue *q, u32 val)
-{
- hns_rcb_ring_enable_hw(q, val);
-}
-
/**
*hns_rcb_common_init_commit_hw - make rcb common init completed
*@rcb_common: rcb common device
@@ -923,44 +918,42 @@ int hns_rcb_get_ring_regs_count(void)
*@data:strings name value
*@index:queue index
*/
-void hns_rcb_get_strings(int stringset, u8 *data, int index)
+void hns_rcb_get_strings(int stringset, u8 **data, int index)
{
- u8 *buff = data;
-
if (stringset != ETH_SS_STATS)
return;
- ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index);
-
- ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_bytes", index);
- ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index);
- ethtool_sprintf(&buff, "tx_ring%d_io_err", index);
- ethtool_sprintf(&buff, "tx_ring%d_sw_err", index);
- ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index);
- ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index);
- ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index);
-
- ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index);
-
- ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_bytes", index);
- ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index);
- ethtool_sprintf(&buff, "rx_ring%d_io_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_sw_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index);
- ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index);
- ethtool_sprintf(&buff, "rx_ring%d_len_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_l2_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index);
+ ethtool_sprintf(data, "tx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_ppe_tx_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(data, "tx_ring%d_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_bytes", index);
+ ethtool_sprintf(data, "tx_ring%d_err_cnt", index);
+ ethtool_sprintf(data, "tx_ring%d_io_err", index);
+ ethtool_sprintf(data, "tx_ring%d_sw_err", index);
+ ethtool_sprintf(data, "tx_ring%d_seg_pkt", index);
+ ethtool_sprintf(data, "tx_ring%d_restart_queue", index);
+ ethtool_sprintf(data, "tx_ring%d_tx_busy", index);
+
+ ethtool_sprintf(data, "rx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_ppe_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(data, "rx_ring%d_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_bytes", index);
+ ethtool_sprintf(data, "rx_ring%d_err_cnt", index);
+ ethtool_sprintf(data, "rx_ring%d_io_err", index);
+ ethtool_sprintf(data, "rx_ring%d_sw_err", index);
+ ethtool_sprintf(data, "rx_ring%d_seg_pkt", index);
+ ethtool_sprintf(data, "rx_ring%d_reuse_pg", index);
+ ethtool_sprintf(data, "rx_ring%d_len_err", index);
+ ethtool_sprintf(data, "rx_ring%d_non_vld_desc_err", index);
+ ethtool_sprintf(data, "rx_ring%d_bd_num_err", index);
+ ethtool_sprintf(data, "rx_ring%d_l2_err", index);
+ ethtool_sprintf(data, "rx_ring%d_l3l4csum_err", index);
}
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index c1e9b6997853..68f81547dfb4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -116,7 +116,6 @@ int hns_rcb_buf_size2type(u32 buf_size);
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
-void hns_rcb_start(struct hnae_queue *q, u32 val);
int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
@@ -157,7 +156,7 @@ int hns_rcb_get_ring_regs_count(void);
void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
-void hns_rcb_get_strings(int stringset, u8 *data, int index);
+void hns_rcb_get_strings(int stringset, u8 **data, int index);
void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index c58833eb4830..dbc44c2c26c2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -743,16 +743,15 @@ static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
*@stringset: type of values in data
*@data:data for value of string name
*/
-static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+static void hns_xgmac_get_strings(u32 stringset, u8 **data)
{
- u8 *buff = data;
u32 i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++)
- ethtool_puts(&buff, g_xgmac_stats_string[i].desc);
+ ethtool_puts(data, g_xgmac_stats_string[i].desc);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 8a713eed4465..e905f10b894e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1402,7 +1402,7 @@ static void hns_nic_net_down(struct net_device *ndev)
if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
return;
- (void)del_timer_sync(&priv->service_timer);
+ (void) timer_delete_sync(&priv->service_timer);
netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
netif_tx_disable(ndev);
@@ -1777,7 +1777,7 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
}
/* finally, set new mtu to netdevice */
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
out:
if (if_running) {
@@ -2075,7 +2075,7 @@ static void hns_nic_task_schedule(struct hns_nic_priv *priv)
static void hns_nic_service_timer(struct timer_list *t)
{
- struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
+ struct hns_nic_priv *priv = timer_container_of(priv, t, service_timer);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
@@ -2439,7 +2439,7 @@ static struct platform_driver hns_nic_dev_driver = {
.acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
},
.probe = hns_nic_dev_probe,
- .remove_new = hns_nic_dev_remove,
+ .remove = hns_nic_dev_remove,
};
module_platform_driver(hns_nic_dev_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index a5bb306b2cf1..60a586a951a0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -266,9 +266,9 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
if (err)
goto out;
- err = phy_loopback(phy_dev, true);
+ err = phy_loopback(phy_dev, true, 0);
} else {
- err = phy_loopback(phy_dev, false);
+ err = phy_loopback(phy_dev, false, 0);
if (err)
goto out;
@@ -903,7 +903,6 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- u8 *buff = data;
if (!h->dev->ops->get_strings) {
netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
@@ -912,43 +911,43 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
if (stringset == ETH_SS_TEST) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
- ethtool_puts(&buff,
+ ethtool_puts(&data,
hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
- ethtool_puts(&buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
+ ethtool_puts(&data, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
if ((netdev->phydev) && (!netdev->phydev->is_c45))
- ethtool_puts(&buff,
+ ethtool_puts(&data,
hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
} else {
- ethtool_puts(&buff, "rx_packets");
- ethtool_puts(&buff, "tx_packets");
- ethtool_puts(&buff, "rx_bytes");
- ethtool_puts(&buff, "tx_bytes");
- ethtool_puts(&buff, "rx_errors");
- ethtool_puts(&buff, "tx_errors");
- ethtool_puts(&buff, "rx_dropped");
- ethtool_puts(&buff, "tx_dropped");
- ethtool_puts(&buff, "multicast");
- ethtool_puts(&buff, "collisions");
- ethtool_puts(&buff, "rx_over_errors");
- ethtool_puts(&buff, "rx_crc_errors");
- ethtool_puts(&buff, "rx_frame_errors");
- ethtool_puts(&buff, "rx_fifo_errors");
- ethtool_puts(&buff, "rx_missed_errors");
- ethtool_puts(&buff, "tx_aborted_errors");
- ethtool_puts(&buff, "tx_carrier_errors");
- ethtool_puts(&buff, "tx_fifo_errors");
- ethtool_puts(&buff, "tx_heartbeat_errors");
- ethtool_puts(&buff, "rx_length_errors");
- ethtool_puts(&buff, "tx_window_errors");
- ethtool_puts(&buff, "rx_compressed");
- ethtool_puts(&buff, "tx_compressed");
- ethtool_puts(&buff, "netdev_rx_dropped");
- ethtool_puts(&buff, "netdev_tx_dropped");
-
- ethtool_puts(&buff, "netdev_tx_timeout");
-
- h->dev->ops->get_strings(h, stringset, buff);
+ ethtool_puts(&data, "rx_packets");
+ ethtool_puts(&data, "tx_packets");
+ ethtool_puts(&data, "rx_bytes");
+ ethtool_puts(&data, "tx_bytes");
+ ethtool_puts(&data, "rx_errors");
+ ethtool_puts(&data, "tx_errors");
+ ethtool_puts(&data, "rx_dropped");
+ ethtool_puts(&data, "tx_dropped");
+ ethtool_puts(&data, "multicast");
+ ethtool_puts(&data, "collisions");
+ ethtool_puts(&data, "rx_over_errors");
+ ethtool_puts(&data, "rx_crc_errors");
+ ethtool_puts(&data, "rx_frame_errors");
+ ethtool_puts(&data, "rx_fifo_errors");
+ ethtool_puts(&data, "rx_missed_errors");
+ ethtool_puts(&data, "tx_aborted_errors");
+ ethtool_puts(&data, "tx_carrier_errors");
+ ethtool_puts(&data, "tx_fifo_errors");
+ ethtool_puts(&data, "tx_heartbeat_errors");
+ ethtool_puts(&data, "rx_length_errors");
+ ethtool_puts(&data, "tx_window_errors");
+ ethtool_puts(&data, "rx_compressed");
+ ethtool_puts(&data, "tx_compressed");
+ ethtool_puts(&data, "netdev_rx_dropped");
+ ethtool_puts(&data, "netdev_tx_dropped");
+
+ ethtool_puts(&data, "netdev_tx_timeout");
+
+ h->dev->ops->get_strings(h, stringset, &data);
}
}
@@ -970,7 +969,7 @@ static int hns_get_sset_count(struct net_device *netdev, int stringset)
return -EOPNOTSUPP;
}
if (stringset == ETH_SS_TEST) {
- u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+ u32 cnt = ARRAY_SIZE(hns_nic_test_strs);
if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
cnt--;
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index e214bfaece1f..e8af26da1fc1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(src)
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
@@ -15,15 +15,14 @@ hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
-obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclge-common.o
-hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \
- hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+hclge-common-objs += hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
-obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o
+
+obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-common.o
hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
- hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
-
hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 67b0bf310daa..b25fb400f476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -25,8 +25,11 @@ void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!pci_id)
continue;
- if (IS_ENABLED(CONFIG_PCI_IOV))
+ if (IS_ENABLED(CONFIG_PCI_IOV)) {
+ device_lock(&ae_dev->pdev->dev);
pci_disable_sriov(ae_dev->pdev);
+ device_unlock(&ae_dev->pdev->dev);
+ }
}
}
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
@@ -37,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
*/
static DEFINE_MUTEX(hnae3_common_lock);
+/* ensure the drivers being unloaded one by one */
+static DEFINE_MUTEX(hnae3_unload_lock);
+
+void hnae3_acquire_unload_lock(void)
+{
+ mutex_lock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_acquire_unload_lock);
+
+void hnae3_release_unload_lock(void)
+{
+ mutex_unlock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_release_unload_lock);
+
static bool hnae3_client_match(enum hnae3_client_type client_type)
{
if (client_type == HNAE3_CLIENT_KNIC ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index f19f1e1d1f9f..d7c3df1958f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -104,6 +104,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_WOL_B,
HNAE3_DEV_SUPPORT_TM_FLUSH_B,
HNAE3_DEV_SUPPORT_VF_FAULT_B,
+ HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B,
};
#define hnae3_ae_dev_fd_supported(ae_dev) \
@@ -181,6 +182,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_vf_fault_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps)
+#define hnae3_ae_dev_gen_reg_dfx_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B, (hdev)->ae_dev->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -335,6 +339,10 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+#define hnae3_seq_file_to_ae_dev(s) (dev_get_drvdata((s)->private))
+#define hnae3_seq_file_to_handle(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->handle)
+
enum hnae3_tc_map_mode {
HNAE3_TC_MAP_MODE_PRIO,
HNAE3_TC_MAP_MODE_DSCP,
@@ -362,6 +370,15 @@ struct hnae3_vector_info {
#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+#define HNAE3_SCC_VERSION_BYTE3_SHIFT 24
+#define HNAE3_SCC_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_SCC_VERSION_BYTE2_SHIFT 16
+#define HNAE3_SCC_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_SCC_VERSION_BYTE1_SHIFT 8
+#define HNAE3_SCC_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_SCC_VERSION_BYTE0_SHIFT 0
+#define HNAE3_SCC_VERSION_BYTE0_MASK GENMASK(7, 0)
+
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
@@ -421,8 +438,11 @@ struct hnae3_ae_dev {
u32 dev_version;
DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
void *priv;
+ struct hnae3_handle *handle;
};
+typedef int (*read_func)(struct seq_file *s, void *data);
+
/* This struct defines the operation on the handle.
*
* init_ae_dev(): (mandatory)
@@ -567,8 +587,6 @@ struct hnae3_ae_dev {
* Delete clsflower rule
* cls_flower_active
* Check if any cls flower rule exist
- * dbg_read_cmd
- * Execute debugfs read command.
* set_tx_hwts_info
* Save information for 1588 tx packet
* get_rx_hwts
@@ -581,6 +599,8 @@ struct hnae3_ae_dev {
* Get wake on lan info
* set_wol
* Config wake on lan
+ * dbg_get_read_func
+ * Return the read func for debugfs seq file
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -664,7 +684,7 @@ struct hnae3_ae_ops {
void (*get_mac_stats)(struct hnae3_handle *handle,
struct hns3_mac_stats *mac_stats);
void (*get_strings)(struct hnae3_handle *handle,
- u32 stringset, u8 *data);
+ u32 stringset, u8 **data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
void (*get_regs)(struct hnae3_handle *handle, u32 *version,
@@ -677,9 +697,9 @@ struct hnae3_ae_ops {
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc);
int (*set_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ const struct ethtool_rxfh_fields *cmd);
int (*get_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ struct ethtool_rxfh_fields *cmd);
int (*get_tc_size)(struct hnae3_handle *handle);
@@ -735,8 +755,6 @@ struct hnae3_ae_ops {
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
- int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -773,7 +791,7 @@ struct hnae3_ae_ops {
void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb,
u32 nsec, u32 sec);
int (*get_ts_info)(struct hnae3_handle *handle,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
@@ -783,6 +801,14 @@ struct hnae3_ae_ops {
struct ethtool_wolinfo *wol);
int (*set_wol)(struct hnae3_handle *handle,
struct ethtool_wolinfo *wol);
+ int (*dbg_get_read_func)(struct hnae3_handle *handle,
+ enum hnae3_dbg_cmd cmd,
+ read_func *func);
+ int (*hwtstamp_get)(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config);
+ int (*hwtstamp_set)(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
};
struct hnae3_dcb_ops {
@@ -897,15 +923,12 @@ struct hnae3_handle {
struct hnae3_roce_private_info rinfo;
};
- u32 numa_node_mask; /* for multi-chip support */
+ nodemask_t numa_node_mask; /* for multi-chip support */
enum hnae3_port_base_vlan_state port_base_vlan_state;
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
- /* protects concurrent contention between debugfs commands */
- struct mutex dbgfs_lock;
- char **dbgfs_buf;
/* Network interface message level enabled bits */
u32 msg_enable;
@@ -953,4 +976,6 @@ int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev,
unsigned int inited);
+void hnae3_acquire_unload_lock(void);
+void hnae3_release_unload_lock(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index 652d71326231..37396ca4ecfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -48,6 +48,7 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
else
desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_reuse_desc);
static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
bool is_pf)
@@ -72,6 +73,7 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read)
desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_setup_basic_desc);
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, bool en)
@@ -158,6 +160,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
{HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
{HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B},
+ {HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B, HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -345,7 +348,7 @@ static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
{
u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
+ return head == (u32)hw->cmq.csq.next_to_use;
}
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
@@ -470,10 +473,14 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num)
{
+ bool is_special = hclge_comm_is_special_opcode(le16_to_cpu(desc->opcode));
struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
int ret;
int ntc;
+ if (hw->cmq.ops.trace_cmd_send)
+ hw->cmq.ops.trace_cmd_send(hw, desc, num, is_special);
+
spin_lock_bh(&hw->cmq.csq.lock);
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
@@ -507,8 +514,12 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
spin_unlock_bh(&hw->cmq.csq.lock);
+ if (hw->cmq.ops.trace_cmd_get)
+ hw->cmq.ops.trace_cmd_get(hw, desc, num, is_special);
+
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_send);
static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
{
@@ -545,6 +556,7 @@ void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
hclge_comm_free_cmd_desc(&cmdq->csq);
hclge_comm_free_cmd_desc(&cmdq->crq);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_uninit);
int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
{
@@ -583,6 +595,19 @@ err_csq:
hclge_comm_free_cmd_desc(&hw->cmq.csq);
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_queue_init);
+
+void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
+ const struct hclge_comm_cmq_ops *ops)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+
+ if (ops) {
+ cmdq->ops.trace_cmd_send = ops->trace_cmd_send;
+ cmdq->ops.trace_cmd_get = ops->trace_cmd_get;
+ }
+}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_init_ops);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
@@ -653,3 +678,8 @@ err_cmd_init:
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet PF/VF Common Library");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 552396518e08..2c2a2f1e0d7a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -91,6 +91,7 @@ enum hclge_opcode_type {
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+ HCLGE_OPC_DFX_GEN_REG = 0x7038,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067,
@@ -246,6 +247,9 @@ enum hclge_opcode_type {
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
+ /* SCC commands */
+ HCLGE_OPC_QUERY_SCC_VER = 0x1A84,
+
/* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
@@ -353,6 +357,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_LANE_NUM_B = 27,
HCLGE_COMM_CAP_WOL_B = 28,
HCLGE_COMM_CAP_TM_FLUSH_B = 31,
+ HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B = 32,
};
enum HCLGE_COMM_API_CAP_BITS {
@@ -392,6 +397,11 @@ struct hclge_comm_query_version_cmd {
__le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
};
+struct hclge_comm_query_scc_cmd {
+ __le32 scc_version;
+ u8 rsv[20];
+};
+
#define HCLGE_DESC_DATA_LEN 6
struct hclge_desc {
__le16 opcode;
@@ -423,11 +433,22 @@ enum hclge_comm_cmd_status {
HCLGE_COMM_ERR_CSQ_ERROR = -3,
};
+struct hclge_comm_hw;
+struct hclge_comm_cmq_ops {
+ void (*trace_cmd_send)(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, bool is_special);
+ void (*trace_cmd_get)(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, bool is_special);
+};
+
struct hclge_comm_cmq {
struct hclge_comm_cmq_ring csq;
struct hclge_comm_cmq_ring crq;
u16 tx_timeout;
enum hclge_comm_cmd_status last_status;
+ struct hclge_comm_cmq_ops ops;
};
struct hclge_comm_hw {
@@ -474,5 +495,6 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
unsigned long reset_pending);
-
+void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
+ const struct hclge_comm_cmq_ops *ops);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index b4ae2160aff4..1eca53aaf598 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -62,6 +62,7 @@ int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_rss_init_cfg);
void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size)
@@ -78,6 +79,7 @@ void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
}
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tc_info);
int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size)
@@ -113,6 +115,7 @@ int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tc_mode);
int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
struct hclge_comm_hw *hw, const u8 *key,
@@ -143,11 +146,12 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_comm_rss_input_tuple_cmd *req;
struct hclge_desc desc;
@@ -185,11 +189,13 @@ int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tuple);
u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGE_COMM_RSS_KEY_SIZE;
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_key_size);
int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
const u8 hfunc, u8 *hash_algo)
@@ -217,6 +223,7 @@ void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
}
+EXPORT_SYMBOL_GPL(hclge_comm_rss_indir_init_cfg);
int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
u8 *tuple_sets)
@@ -250,6 +257,7 @@ int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tuple);
static void
hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
@@ -304,6 +312,7 @@ int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
}
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_indir_table);
int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg)
@@ -332,6 +341,7 @@ int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw,
"failed to configure rss input, ret = %d.\n", ret);
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_input_tuple);
void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
u8 *hfunc)
@@ -355,6 +365,7 @@ void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
if (key)
memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_hash_info);
void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
u32 *indir, u16 rss_ind_tbl_size)
@@ -367,6 +378,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
for (i = 0; i < rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_indir_tbl);
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key)
@@ -408,8 +420,9 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key);
-static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+static u8 hclge_comm_get_rss_hash_bits(const struct ethtool_rxfh_fields *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
@@ -435,7 +448,7 @@ static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
}
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req)
{
@@ -502,3 +515,4 @@ u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
return tuple_data;
}
+EXPORT_SYMBOL_GPL(hclge_comm_convert_rss_tuple);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
index cdafa63fe38b..cbc02b50c6e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -108,7 +108,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req);
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
@@ -129,5 +129,5 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc);
+ const struct ethtool_rxfh_fields *nfc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index 618f66d9586b..f9a3d6fc4416 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -26,6 +26,7 @@ u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
return buff;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_stats);
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
{
@@ -33,29 +34,26 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count);
-u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+void hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 **data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "txq%u_pktnum_rcd", tqp->index);
}
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "rxq%u_pktnum_rcd", tqp->index);
}
-
- return buff;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings);
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw)
@@ -99,6 +97,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_update_stats);
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
{
@@ -113,3 +112,4 @@ void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
}
}
+EXPORT_SYMBOL_GPL(hclge_comm_reset_tqp_stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
index a46350162ee8..b9ff424c0bc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
@@ -32,7 +32,7 @@ struct hclge_comm_tqp {
u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
-u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
+void hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 **data);
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 807eb3bbb11c..4cce4f4ba6b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,8 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include "hnae3.h"
#include "hns3_debugfs.h"
@@ -39,323 +41,279 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
};
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
-static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd);
static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
{
.name = "tm_nodes",
.cmd = HNAE3_DBG_CMD_TM_NODES,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_priority",
.cmd = HNAE3_DBG_CMD_TM_PRI,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_map",
.cmd = HNAE3_DBG_CMD_TM_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_pg",
.cmd = HNAE3_DBG_CMD_TM_PG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_port",
.cmd = HNAE3_DBG_CMD_TM_PORT,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tc_sch_info",
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pause_cfg",
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pri_map",
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_dscp_map",
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dev_info",
.cmd = HNAE3_DBG_CMD_DEV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_bd_queue",
.cmd = HNAE3_DBG_CMD_TX_BD,
.dentry = HNS3_DBG_DENTRY_TX_BD,
- .buf_len = HNS3_DBG_READ_LEN_5MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "rx_bd_queue",
.cmd = HNAE3_DBG_CMD_RX_BD,
.dentry = HNS3_DBG_DENTRY_RX_BD,
- .buf_len = HNS3_DBG_READ_LEN_4MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mc",
.cmd = HNAE3_DBG_CMD_MAC_MC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mng_tbl",
.cmd = HNAE3_DBG_CMD_MNG_TBL,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "loopback",
.cmd = HNAE3_DBG_CMD_LOOPBACK,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "interrupt_info",
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "reset_info",
.cmd = HNAE3_DBG_CMD_RESET_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "imp_info",
.cmd = HNAE3_DBG_CMD_IMP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncl_config",
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac_tnl_status",
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "bios_common",
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ssu",
.cmd = HNAE3_DBG_CMD_REG_SSU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "igu_egu",
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rpu",
.cmd = HNAE3_DBG_CMD_REG_RPU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncsi",
.cmd = HNAE3_DBG_CMD_REG_NCSI,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rtc",
.cmd = HNAE3_DBG_CMD_REG_RTC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ppp",
.cmd = HNAE3_DBG_CMD_REG_PPP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rcb",
.cmd = HNAE3_DBG_CMD_REG_RCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac",
.cmd = HNAE3_DBG_CMD_REG_MAC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dcb",
.cmd = HNAE3_DBG_CMD_REG_DCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "queue_map",
.cmd = HNAE3_DBG_CMD_QUEUE_MAP,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "rx_queue_info",
.cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_queue_info",
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "service_task_info",
.cmd = HNAE3_DBG_CMD_SERV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "vlan_config",
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ptp_info",
.cmd = HNAE3_DBG_CMD_PTP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "fd_counter",
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "umv_info",
.cmd = HNAE3_DBG_CMD_UMV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "page_pool_info",
.cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "coalesce_info",
.cmd = HNAE3_DBG_CMD_COAL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
};
@@ -420,71 +378,17 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}
};
-static const struct hns3_dbg_item coal_info_items[] = {
- { "VEC_ID", 2 },
- { "ALGO_STATE", 2 },
- { "PROFILE_ID", 2 },
- { "CQE_MODE", 2 },
- { "TUNE_STATE", 2 },
- { "STEPS_LEFT", 2 },
- { "STEPS_RIGHT", 2 },
- { "TIRED", 2 },
- { "SW_GL", 2 },
- { "SW_QL", 2 },
- { "HW_GL", 2 },
- { "HW_QL", 2 },
-};
-
static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
static const char * const
dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
-static void hns3_dbg_fill_content(char *content, u16 len,
- const struct hns3_dbg_item *items,
- const char **result, u16 size)
-{
-#define HNS3_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HNS3_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HNS3_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
- char **result, int i, bool is_tx)
+ struct seq_file *s, int i, bool is_tx)
{
unsigned int gl_offset, ql_offset;
struct hns3_enet_coalesce *coal;
unsigned int reg_val;
- unsigned int j = 0;
struct dim *dim;
bool ql_enable;
@@ -502,191 +406,96 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
ql_enable = tqp_vector->rx_group.coal.ql_enable;
}
- sprintf(result[j++], "%d", i);
- sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
- dim_state_str[dim->state] : "unknown");
- sprintf(result[j++], "%u", dim->profile_ix);
- sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
- dim_cqe_mode_str[dim->mode] : "unknown");
- sprintf(result[j++], "%s",
- dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
- dim_tune_stat_str[dim->tune_state] : "unknown");
- sprintf(result[j++], "%u", dim->steps_left);
- sprintf(result[j++], "%u", dim->steps_right);
- sprintf(result[j++], "%u", dim->tired);
- sprintf(result[j++], "%u", coal->int_gl);
- sprintf(result[j++], "%u", coal->int_ql);
+ seq_printf(s, "%-8d", i);
+ seq_printf(s, "%-12s", dim->state < ARRAY_SIZE(dim_state_str) ?
+ dim_state_str[dim->state] : "unknown");
+ seq_printf(s, "%-12u", dim->profile_ix);
+ seq_printf(s, "%-10s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
+ dim_cqe_mode_str[dim->mode] : "unknown");
+ seq_printf(s, "%-12s", dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
+ dim_tune_stat_str[dim->tune_state] : "unknown");
+ seq_printf(s, "%-12u%-13u%-7u%-7u%-7u", dim->steps_left,
+ dim->steps_right, dim->tired, coal->int_gl, coal->int_ql);
reg_val = readl(tqp_vector->mask_addr + gl_offset) &
HNS3_VECTOR_GL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%-7u", reg_val);
if (ql_enable) {
reg_val = readl(tqp_vector->mask_addr + ql_offset) &
HNS3_VECTOR_QL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%u\n", reg_val);
} else {
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA\n");
}
}
-static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
- int *pos, bool is_tx)
+static void hns3_dump_coal_info(struct seq_file *s, bool is_tx)
{
- char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(coal_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_enet_tqp_vector *tqp_vector;
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "%s interrupt coalesce info:\n", is_tx ? "tx" : "rx");
- *pos += scnprintf(buf + *pos, len - *pos,
- "%s interrupt coalesce info:\n",
- is_tx ? "tx" : "rx");
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- NULL, ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "VEC_ID ALGO_STATE PROFILE_ID CQE_MODE TUNE_STATE ");
+ seq_puts(s, "STEPS_LEFT STEPS_RIGHT TIRED SW_GL SW_QL ");
+ seq_puts(s, "HW_GL HW_QL\n");
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
- hns3_get_coal_info(tqp_vector, result, i, is_tx);
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- (const char **)result,
- ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ hns3_get_coal_info(tqp_vector, s, i, is_tx);
}
}
-static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_coal_info(struct seq_file *s, void *data)
{
- int pos = 0;
-
- hns3_dump_coal_info(h, buf, len, &pos, true);
- pos += scnprintf(buf + pos, len - pos, "\n");
- hns3_dump_coal_info(h, buf, len, &pos, false);
+ hns3_dump_coal_info(s, true);
+ seq_puts(s, "\n");
+ hns3_dump_coal_info(s, false);
return 0;
}
-static const struct hns3_dbg_item tx_spare_info_items[] = {
- { "QUEUE_ID", 2 },
- { "COPYBREAK", 2 },
- { "LEN", 7 },
- { "NTU", 4 },
- { "NTC", 4 },
- { "LTC", 4 },
- { "DMA", 17 },
-};
-
-static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
- int len, u32 ring_num, int *pos)
-{
- char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_tx_spare *tx_spare = ring->tx_spare;
- char *result[ARRAY_SIZE(tx_spare_info_items)];
- char content[HNS3_DBG_INFO_LEN];
- u32 i, j;
-
- if (!tx_spare) {
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx spare buffer is not enabled\n");
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++)
- result[i] = &data_str[i][0];
-
- *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n");
- hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items,
- NULL, ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
-
- for (i = 0; i < ring_num; i++) {
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u", ring->tx_copybreak);
- sprintf(result[j++], "%u", tx_spare->len);
- sprintf(result[j++], "%u", tx_spare->next_to_use);
- sprintf(result[j++], "%u", tx_spare->next_to_clean);
- sprintf(result[j++], "%u", tx_spare->last_to_clean);
- sprintf(result[j++], "%pad", &tx_spare->dma);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_spare_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
- }
-}
-
-static const struct hns3_dbg_item rx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "BD_LEN", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "PKTNUM", 5 },
- { "COPYBREAK", 2 },
- { "RING_EN", 2 },
- { "RX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_LEN_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_HEAD_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_NUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_LEN_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_TAIL_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_FBDNUM_REG));
+ seq_printf(s, "%-11u", readl_relaxed(base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-11u", ring->rx_copybreak);
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-12s", str_on_off(readl_relaxed(base +
+ HNS3_RING_RX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_rx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(rx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -694,12 +503,9 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID BD_NUM BD_LEN TAIL HEAD FBDNUM ");
+ seq_puts(s, "PKTNUM COPYBREAK RING_EN RX_RING_EN BASE_ADDR\n");
- hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items,
- NULL, ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
@@ -710,86 +516,51 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_rx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_queue_info(ring, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "TC", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "OFFSET", 2 },
- { "PKTNUM", 5 },
- { "RING_EN", 2 },
- { "TX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TC_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG));
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
-
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_BD_NUM_REG));
+ seq_printf(s, "%-4u", readl_relaxed(base + HNS3_RING_TX_RING_TC_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_TAIL_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_FBDNUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_OFFSET_REG));
+ seq_printf(s, "%-11u",
+ readl_relaxed(base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-12s",
+ str_on_off(readl_relaxed(base +
+ HNS3_RING_TX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_tx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(tx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -797,12 +568,8 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++)
- result[i] = &data_str[i][0];
-
- hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items,
- NULL, ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "QUEUE_ID BD_NUM TC TAIL HEAD FBDNUM OFFSET ");
+ seq_puts(s, "PKTNUM RING_EN TX_RING_EN BASE_ADDR\n");
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
@@ -814,339 +581,213 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[i];
- hns3_dump_tx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_queue_info(ring, s, i);
}
- hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
-
return 0;
}
-static const struct hns3_dbg_item queue_map_items[] = {
- { "local_queue_id", 2 },
- { "global_queue_id", 2 },
- { "vector_id", 2 },
-};
-
-static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_queue_map(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(queue_map_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
- int pos = 0;
- int j;
u32 i;
if (!h->ae_algo->ops->get_global_queue_id)
return -EOPNOTSUPP;
- for (i = 0; i < ARRAY_SIZE(queue_map_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "local_queue_id global_queue_id vector_id\n");
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- NULL, ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u",
- h->ae_algo->ops->get_global_queue_id(h, i));
- sprintf(result[j++], "%d",
- priv->ring[i].tqp_vector->vector_irq);
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- (const char **)result,
- ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%-16u%-17u%d\n", i,
+ h->ae_algo->ops->get_global_queue_id(h, i),
+ priv->ring[i].tqp_vector->vector_irq);
}
return 0;
}
-static const struct hns3_dbg_item rx_bd_info_items[] = {
- { "BD_IDX", 3 },
- { "L234_INFO", 2 },
- { "PKT_LEN", 3 },
- { "SIZE", 4 },
- { "RSS_HASH", 4 },
- { "FD_ID", 2 },
- { "VLAN_TAG", 2 },
- { "O_DM_VLAN_ID_FB", 2 },
- { "OT_VLAN_TAG", 2 },
- { "BD_BASE_INFO", 2 },
- { "PTYPE", 2 },
- { "HW_CSUM", 2 },
-};
-
static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
- struct hns3_desc *desc, char **result, int idx)
+ struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
+ seq_printf(s, "%-9d%#-11x%-10u%-8u%#-12x%-7u%-10u%-17u%-13u%#-14x",
+ idx, le32_to_cpu(desc->rx.l234_info),
+ le16_to_cpu(desc->rx.pkt_len), le16_to_cpu(desc->rx.size),
+ le32_to_cpu(desc->rx.rss_hash), le16_to_cpu(desc->rx.fd_id),
+ le16_to_cpu(desc->rx.vlan_tag),
+ le16_to_cpu(desc->rx.o_dm_vlan_id_fb),
+ le16_to_cpu(desc->rx.ot_vlan_tag),
+ le32_to_cpu(desc->rx.bd_base_info));
+
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
- sprintf(result[j++], "%5lu", hnae3_get_field(ol_info,
- HNS3_RXD_PTYPE_M,
- HNS3_RXD_PTYPE_S));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->csum));
+ seq_printf(s, "%-7lu%-9u\n",
+ hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S),
+ le16_to_cpu(desc->csum));
} else {
- sprintf(result[j++], "NA");
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA NA\n");
}
}
-static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_rx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(rx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "Queue %u rx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX L234_INFO PKT_LEN SIZE ");
+ seq_puts(s, "RSS_HASH FD_ID VLAN_TAG O_DM_VLAN_ID_FB ");
+ seq_puts(s, "OT_VLAN_TAG BD_BASE_INFO PTYPE HW_CSUM\n");
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u rx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items,
- NULL, ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
-
- ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps];
+ ring = &priv->ring[data->qid + data->handle->kinfo.num_tqps];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_rx_bd_info(priv, desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_bd_info_items, (const char **)result,
- ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_bd_info(priv, desc, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 2 },
- { "ADDRESS", 13 },
- { "VLAN_TAG", 2 },
- { "SIZE", 2 },
- { "T_CS_VLAN_TSO", 2 },
- { "OT_VLAN_TAG", 3 },
- { "TV", 5 },
- { "OLT_VLAN_LEN", 2 },
- { "PAYLEN_OL4CS", 2 },
- { "BD_FE_SC_VLD", 2 },
- { "MSS_HW_CSUM", 0 },
-};
-
-static void hns3_dump_tx_bd_info(struct hns3_desc *desc, char **result, int idx)
+static void hns3_dump_tx_bd_info(struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
- sprintf(result[j++], "%#x",
- le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%u",
- le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
- sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
+ seq_printf(s, "%-8d%#-20llx%-10u%-6u%#-15x%-14u%-7u%-16u%#-14x%#-14x%-11u\n",
+ idx, le64_to_cpu(desc->addr),
+ le16_to_cpu(desc->tx.vlan_tag),
+ le16_to_cpu(desc->tx.send_size),
+ le32_to_cpu(desc->tx.type_cs_vlan_tso_len),
+ le16_to_cpu(desc->tx.outer_vlan_tag),
+ le16_to_cpu(desc->tx.tv),
+ le32_to_cpu(desc->tx.ol_type_vlan_len_msec),
+ le32_to_cpu(desc->tx.paylen_ol4cs),
+ le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri),
+ le16_to_cpu(desc->tx.mss_hw_csum));
}
-static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_tx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(tx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++)
- result[i] = &data_str[i][0];
-
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u tx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items,
- NULL, ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "Queue %u tx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX ADDRESS VLAN_TAG SIZE ");
+ seq_puts(s, "T_CS_VLAN_TSO OT_VLAN_TAG TV OLT_VLAN_LEN ");
+ seq_puts(s, "PAYLEN_OL4CS BD_FE_SC_VLD MSS_HW_CSUM\n");
- ring = &priv->ring[d->qid];
+ ring = &priv->ring[data->qid];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_tx_bd_info(desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_bd_info_items, (const char **)result,
- ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_bd_info(desc, s, i);
}
return 0;
}
-static void
-hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_caps(struct hnae3_handle *h, struct seq_file *s)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const char * const str[] = {"no", "yes"};
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
unsigned long *caps = ae_dev->caps;
u32 i, state;
- *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n");
+ seq_puts(s, "dev capability:\n");
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str[state]);
+ seq_printf(s, "%s: %s\n", hns3_dbg_cap[i].name,
+ str_yes_no(state));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
-static void
-hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_specs(struct hnae3_handle *h, struct seq_file *s)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
struct hnae3_knic_private_info *kinfo = &h->kinfo;
struct net_device *dev = kinfo->netdev;
- *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
- *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
- dev_specs->mac_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n",
- dev_specs->mng_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n",
- dev_specs->max_non_tso_bd_num);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n",
- dev_specs->rss_ind_tbl_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n",
- dev_specs->rss_key_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n",
- kinfo->rss_size);
- *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n",
- kinfo->req_rss_size);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Task queue pairs numbers: %u\n",
- kinfo->num_tqps);
- *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n",
- kinfo->rx_buf_len);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n",
- kinfo->num_tx_desc);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n",
- kinfo->num_rx_desc);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Total number of enabled TCs: %u\n",
- kinfo->tc_info.num_tc);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n",
- dev_specs->int_ql_max);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n",
- dev_specs->max_int_gl);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n",
- dev_specs->max_tm_rate);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
- dev_specs->max_qset_num);
- *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
- dev_specs->umv_size);
- *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
- dev_specs->mc_mac_size);
- *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
- dev_specs->mac_stats_num);
- *pos += scnprintf(buf + *pos, len - *pos,
- "TX timeout threshold: %d seconds\n",
- dev->watchdog_timeo / HZ);
- *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
- dev_specs->hilink_version);
+ seq_puts(s, "dev_spec:\n");
+ seq_printf(s, "MAC entry num: %u\n", dev_specs->mac_entry_num);
+ seq_printf(s, "MNG entry num: %u\n", dev_specs->mng_entry_num);
+ seq_printf(s, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ seq_printf(s, "RSS ind tbl size: %u\n", dev_specs->rss_ind_tbl_size);
+ seq_printf(s, "RSS key size: %u\n", dev_specs->rss_key_size);
+ seq_printf(s, "RSS size: %u\n", kinfo->rss_size);
+ seq_printf(s, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ seq_printf(s, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ seq_printf(s, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ seq_printf(s, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ seq_printf(s, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ seq_printf(s, "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
+ seq_printf(s, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+ seq_printf(s, "MAX INT GL: %u\n", dev_specs->max_int_gl);
+ seq_printf(s, "MAX TM RATE: %u\n", dev_specs->max_tm_rate);
+ seq_printf(s, "MAX QSET number: %u\n", dev_specs->max_qset_num);
+ seq_printf(s, "umv size: %u\n", dev_specs->umv_size);
+ seq_printf(s, "mc mac size: %u\n", dev_specs->mc_mac_size);
+ seq_printf(s, "MAC statistics number: %u\n", dev_specs->mac_stats_num);
+ seq_printf(s, "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
+ seq_printf(s, "mac tunnel number: %u\n", dev_specs->tnl_num);
+ seq_printf(s, "Hilink Version: %u\n", dev_specs->hilink_version);
}
-static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_dev_info(struct seq_file *s, void *data)
{
- int pos = 0;
-
- hns3_dbg_dev_caps(h, buf, len, &pos);
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
- hns3_dbg_dev_specs(h, buf, len, &pos);
+ hns3_dbg_dev_caps(h, s);
+ hns3_dbg_dev_specs(h, s);
return 0;
}
-static const struct hns3_dbg_item page_pool_info_items[] = {
- { "QUEUE_ID", 2 },
- { "ALLOCATE_CNT", 2 },
- { "FREE_CNT", 6 },
- { "POOL_SIZE(PAGE_NUM)", 2 },
- { "ORDER", 2 },
- { "NUMA_ID", 2 },
- { "MAX_LEN", 2 },
-};
-
static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
- char **result, u32 index)
+ struct seq_file *s, u32 index)
{
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u",
- READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%d",
- atomic_read(&ring->page_pool->pages_state_release_cnt));
- sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
- sprintf(result[j++], "%u", ring->page_pool->p.order);
- sprintf(result[j++], "%d", ring->page_pool->p.nid);
- sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+ seq_printf(s, "%-10u%-14u%-14d%-21u%-7u%-9d%uK\n",
+ index,
+ READ_ONCE(ring->page_pool->pages_state_hold_cnt),
+ atomic_read(&ring->page_pool->pages_state_release_cnt),
+ ring->page_pool->p.pool_size,
+ ring->page_pool->p.order,
+ ring->page_pool->p.nid,
+ ring->page_pool->p.max_len / 1024);
}
-static int
-hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_page_pool_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -1159,182 +800,50 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID ALLOCATE_CNT FREE_CNT ");
+ seq_puts(s, "POOL_SIZE(PAGE_NUM) ORDER NUMA_ID MAX_LEN\n");
- hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
- NULL, ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
+
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_page_pool_info(ring, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- page_pool_info_items,
- (const char **)result,
- ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_page_pool_info(ring, s, i);
}
return 0;
}
-static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
-{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
- if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
- *index = i;
- return 0;
- }
- }
-
- dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
- dbg_data->cmd);
- return -EINVAL;
-}
-
-static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
- {
- .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
- .dbg_dump = hns3_dbg_queue_map,
- },
- {
- .cmd = HNAE3_DBG_CMD_DEV_INFO,
- .dbg_dump = hns3_dbg_dev_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_BD,
- .dbg_dump_bd = hns3_dbg_tx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_BD,
- .dbg_dump_bd = hns3_dbg_rx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_rx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_tx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
- .dbg_dump = hns3_dbg_page_pool_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_COAL_INFO,
- .dbg_dump = hns3_dbg_coal_info,
- },
-};
-
-static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
-{
- const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops;
- const struct hns3_dbg_func *cmd_func;
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) {
- if (cmd == hns3_dbg_cmd_func[i].cmd) {
- cmd_func = &hns3_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(dbg_data->handle, buf,
- len);
- else
- return cmd_func->dbg_dump_bd(dbg_data, buf,
- len);
- }
- }
-
- if (!ops->dbg_read_cmd)
- return -EOPNOTSUPP;
-
- return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len);
-}
-
-static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
+static int hns3_dbg_bd_info_show(struct seq_file *s, void *private)
{
- struct hns3_dbg_data *dbg_data = filp->private_data;
- struct hnae3_handle *handle = dbg_data->handle;
- struct hns3_nic_priv *priv = handle->priv;
- ssize_t size = 0;
- char **save_buf;
- char *read_buf;
- u32 index;
- int ret;
-
- ret = hns3_dbg_get_cmd_index(dbg_data, &index);
- if (ret)
- return ret;
-
- mutex_lock(&handle->dbgfs_lock);
- save_buf = &handle->dbgfs_buf[index];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
- test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (*save_buf) {
- read_buf = *save_buf;
- } else {
- read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
- /* save the buffer addr until the last read operation */
- *save_buf = read_buf;
+ if (data->cmd == HNAE3_DBG_CMD_TX_BD)
+ return hns3_dbg_tx_bd_info(s, private);
+ else if (data->cmd == HNAE3_DBG_CMD_RX_BD)
+ return hns3_dbg_rx_bd_info(s, private);
- /* get data ready for the first time to read */
- ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
- read_buf, hns3_dbg_cmd[index].buf_len);
- if (ret)
- goto out;
- }
-
- size = simple_read_from_buffer(buffer, count, ppos, read_buf,
- strlen(read_buf));
- if (size > 0) {
- mutex_unlock(&handle->dbgfs_lock);
- return size;
- }
-
-out:
- /* free the buffer for the last read operation */
- if (*save_buf) {
- kvfree(*save_buf);
- *save_buf = NULL;
- }
-
- mutex_unlock(&handle->dbgfs_lock);
- return ret;
+ return -EOPNOTSUPP;
}
-
-static const struct file_operations hns3_dbg_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = hns3_dbg_read,
-};
+DEFINE_SHOW_ATTRIBUTE(hns3_dbg_bd_info);
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
{
- struct dentry *entry_dir;
struct hns3_dbg_data *data;
+ struct dentry *entry_dir;
u16 max_queue_num;
unsigned int i;
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
max_queue_num = hns3_get_max_available_channels(handle);
- data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data),
+ data = devm_kcalloc(&handle->pdev->dev, max_queue_num, sizeof(*data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1347,45 +856,77 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
data[i].qid = i;
sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
debugfs_create_file(name, 0400, entry_dir, &data[i],
- &hns3_dbg_fops);
+ &hns3_dbg_bd_info_fops);
}
return 0;
}
-static int
-hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd)
{
- struct hns3_dbg_data *data;
+ struct device *dev = &handle->pdev->dev;
struct dentry *entry_dir;
+ read_func func = NULL;
+
+ switch (hns3_dbg_cmd[cmd].cmd) {
+ case HNAE3_DBG_CMD_TX_QUEUE_INFO:
+ func = hns3_dbg_tx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_RX_QUEUE_INFO:
+ func = hns3_dbg_rx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_QUEUE_MAP:
+ func = hns3_dbg_queue_map;
+ break;
+ case HNAE3_DBG_CMD_PAGE_POOL_INFO:
+ func = hns3_dbg_page_pool_info;
+ break;
+ case HNAE3_DBG_CMD_COAL_INFO:
+ func = hns3_dbg_coal_info;
+ break;
+ case HNAE3_DBG_CMD_DEV_INFO:
+ func = hns3_dbg_dev_info;
+ break;
+ default:
+ return -EINVAL;
+ }
- data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
+
+ return 0;
+}
+
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd)
+{
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct device *dev = &handle->pdev->dev;
+ struct dentry *entry_dir;
+ read_func func;
+ int ret;
+
+ if (!ops->dbg_get_read_func)
+ return 0;
+
+ ret = ops->dbg_get_read_func(handle, hns3_dbg_cmd[cmd].cmd, &func);
+ if (ret)
+ return ret;
- data->handle = handle;
- data->cmd = hns3_dbg_cmd[cmd].cmd;
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
- debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
- data, &hns3_dbg_fops);
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
return 0;
}
int hns3_dbg_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
const char *name = pci_name(handle->pdev);
int ret;
u32 i;
- handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
- ARRAY_SIZE(hns3_dbg_cmd),
- sizeof(*handle->dbgfs_buf),
- GFP_KERNEL);
- if (!handle->dbgfs_buf)
- return -ENOMEM;
-
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
debugfs_create_dir(name, hns3_dbgfs_root);
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1395,8 +936,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
- mutex_init(&handle->dbgfs_lock);
-
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1425,24 +964,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
out:
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
- mutex_destroy(&handle->dbgfs_lock);
return ret;
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
- u32 i;
-
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
- if (handle->dbgfs_buf[i]) {
- kvfree(handle->dbgfs_buf[i]);
- handle->dbgfs_buf[i] = NULL;
- }
-
- mutex_destroy(&handle->dbgfs_lock);
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index 4a5ef8a90a10..57c9d3fc1b27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -6,15 +6,6 @@
#include "hnae3.h"
-#define HNS3_DBG_READ_LEN 65536
-#define HNS3_DBG_READ_LEN_128KB 0x20000
-#define HNS3_DBG_READ_LEN_1MB 0x100000
-#define HNS3_DBG_READ_LEN_4MB 0x400000
-#define HNS3_DBG_READ_LEN_5MB 0x500000
-#define HNS3_DBG_WRITE_LEN 1024
-
-#define HNS3_DBG_DATA_STR_LEN 32
-#define HNS3_DBG_INFO_LEN 256
#define HNS3_DBG_ITEM_NAME_LEN 32
#define HNS3_DBG_FILE_NAME_LEN 16
@@ -49,16 +40,9 @@ struct hns3_dbg_cmd_info {
const char *name;
enum hnae3_dbg_cmd cmd;
enum hns3_dbg_dentry_type dentry;
- u32 buf_len;
int (*init)(struct hnae3_handle *handle, unsigned int cmd);
};
-struct hns3_dbg_func {
- enum hnae3_dbg_cmd cmd;
- int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len);
- int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len);
-};
-
struct hns3_dbg_cap_info {
const char *name;
enum HNAE3_DEV_CAP_BITS cap_bit;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 19668a8d22f7..7a0654e2d3dd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
@@ -473,20 +474,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
-
- /* enable vector */
- hns3_mask_vector_irq(tqp_vector, 1);
}
-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
- /* disable vector */
- hns3_mask_vector_irq(tqp_vector, 0);
-
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -553,9 +548,9 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
@@ -707,11 +702,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
+static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+}
+
+static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_disable(&priv->tqp_vector[i]);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -720,23 +746,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- /* enable the vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- /* enable rcb */
- for (j = 0; j < h->kinfo.num_tqps; j++)
- hns3_tqp_enable(h->kinfo.tqp[j]);
+ hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
-
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -823,17 +839,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
- int i;
-
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
- /* disable rcb */
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -953,7 +961,7 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ops->request_update_promisc_mode)
ops->request_update_promisc_mode(handle);
@@ -1032,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_tx_spare *tx_spare;
struct page *page;
dma_addr_t dma;
@@ -1073,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ ring->tx_copybreak = priv->tx_copybreak;
return;
dma_mapping_error:
@@ -1297,7 +1308,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
union l4_hdr_info l4;
/* device version above V3(include V3), the hardware can
@@ -1497,7 +1508,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
* VLAN enabled, only one VLAN header is allowed in skb, otherwise it
* will cause RAS error.
*/
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (unlikely(skb_vlan_tagged_multi(skb) &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
handle->port_base_vlan_state ==
@@ -1683,8 +1694,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
#define HNS3_LIKELY_BD_NUM 1
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- unsigned int frag_buf_num;
- int k, sizeoflast;
+ unsigned int frag_buf_num, k;
+ int sizeoflast;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1856,7 +1867,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
- int i;
+ unsigned int i;
for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
@@ -1884,7 +1895,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -2068,8 +2079,6 @@ static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
__iowrite64_copy(ring->tqp->mem_base, desc,
(sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
HNS3_BYTES_PER_64BIT);
-
- io_stop_wc();
}
static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
@@ -2088,8 +2097,6 @@ static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_mem_doorbell += ring->pending_buf;
u64_stats_update_end(&ring->syncp);
-
- io_stop_wc();
}
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
@@ -2103,7 +2110,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit
* is updated.
*/
@@ -2119,7 +2126,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
@@ -2204,9 +2211,9 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
struct sg_table *sgt;
- int i, bd_num = 0;
+ int bd_num = 0;
dma_addr_t dma;
- u32 cb_len;
+ u32 cb_len, i;
int nents;
if (skb_has_frag_list(skb))
@@ -2412,6 +2419,35 @@ static int hns3_nic_do_ioctl(struct net_device *netdev,
return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
}
+static int hns3_nic_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!h->ae_algo->ops->hwtstamp_get)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->hwtstamp_get(h, config);
+}
+
+static int hns3_nic_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!h->ae_algo->ops->hwtstamp_set)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->hwtstamp_set(h, config, extack);
+}
+
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2444,7 +2480,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
h->ae_algo->ops->cls_flower_active(h)) {
netdev_err(netdev,
- "there are offloaded TC filters active, cannot disable HW TC offload");
+ "there are offloaded TC filters active, cannot disable HW TC offload\n");
return -EINVAL;
}
@@ -2456,7 +2492,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- netdev->features = features;
return 0;
}
@@ -2542,7 +2577,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
struct hnae3_handle *handle = priv->ae_handle;
struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- unsigned int idx;
+ int idx;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
@@ -2761,14 +2796,14 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
ret);
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
static int hns3_get_timeout_queue(struct net_device *ndev)
{
- int i;
+ unsigned int i;
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
@@ -2849,7 +2884,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
- int timeout_queue;
+ u32 timeout_queue;
timeout_queue = hns3_get_timeout_queue(ndev);
if (timeout_queue >= ndev->num_tx_queues) {
@@ -3042,6 +3077,8 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
.ndo_select_queue = hns3_nic_select_queue,
+ .ndo_hwtstamp_get = hns3_nic_hwtstamp_get,
+ .ndo_hwtstamp_set = hns3_nic_hwtstamp_set,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -3539,6 +3576,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ret = hns3_alloc_and_attach_buffer(ring, i);
if (ret)
goto out_buffer_fail;
+
+ if (!(i % HNS3_RESCHED_BD_NUM))
+ cond_resched();
}
return 0;
@@ -3816,7 +3856,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
- int depth = 0;
+ u32 depth = 0;
while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
@@ -4449,7 +4489,7 @@ static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
rx_group->total_bytes, &sample);
- net_dim(&rx_group->dim, sample);
+ net_dim(&rx_group->dim, &sample);
}
static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
@@ -4462,7 +4502,7 @@ static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
tx_group->total_bytes, &sample);
- net_dim(&tx_group->dim, sample);
+ net_dim(&tx_group->dim, &sample);
}
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
@@ -4742,7 +4782,7 @@ map_ring_fail:
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
@@ -4869,6 +4909,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
devm_kfree(&pdev->dev, priv->tqp_vector);
}
+static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
+{
+#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
+#define HNS3_MAX_PACKET_SIZE (64 * 1024)
+
+ struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
+ struct hnae3_handle *handle = priv->ae_handle;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return;
+
+ if (!(domain && iommu_is_dma_domain(domain)))
+ return;
+
+ priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
+ priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
+
+ if (priv->tx_copybreak < priv->min_tx_copybreak)
+ priv->tx_copybreak = priv->min_tx_copybreak;
+ if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
+ handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
+}
+
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type)
{
@@ -5102,6 +5166,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int i, j;
int ret;
+ hns3_update_tx_spare_buf_config(priv);
for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
@@ -5111,6 +5176,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
}
u64_stats_init(&priv->ring[i].syncp);
+ cond_resched();
}
return 0;
@@ -5220,7 +5286,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
enum dim_cq_period_mode mode, bool is_tx)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hnae3_handle *handle = priv->ae_handle;
int i;
@@ -5258,7 +5324,7 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
static void hns3_state_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -5293,6 +5359,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
struct net_device *netdev;
int ret;
+ ae_dev->handle = handle;
+
handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
&max_rss_size);
netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
@@ -5305,6 +5373,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ priv->min_tx_copybreak = 0;
+ priv->min_tx_spare_buf_size = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
@@ -5724,6 +5794,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ hns3_nic_net_stop(netdev);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
@@ -5862,8 +5935,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5874,11 +5945,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5894,7 +5961,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5910,11 +5976,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_enable(h->kinfo.tqp[i]);
+ hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);
@@ -5936,7 +5998,7 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
static void hns3_process_hw_error(struct hnae3_handle *handle,
enum hnae3_hw_error_type type)
{
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
if (hns3_hw_err[i].type == type) {
@@ -5963,8 +6025,8 @@ static int __init hns3_init_module(void)
{
int ret;
- pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
- pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+ pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
@@ -6000,9 +6062,11 @@ module_init(hns3_init_module);
*/
static void __exit hns3_exit_module(void)
{
+ hnae3_acquire_unload_lock();
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
hns3_dbg_unregister_debugfs();
+ hnae3_release_unload_lock();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index acd756b0c7c9..933e3527ed82 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -214,6 +214,8 @@ enum hns3_nic_state {
#define HNS3_CQ_MODE_EQE 1U
#define HNS3_CQ_MODE_CQE 0U
+#define HNS3_RESCHED_BD_NUM 1024
+
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
@@ -594,6 +596,8 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce rx_coal;
u32 tx_copybreak;
u32 rx_copybreak;
+ u32 min_tx_copybreak;
+ u32 min_tx_spare_buf_size;
};
union l3_hdr_info {
@@ -619,7 +623,7 @@ struct hns3_reset_type_map {
enum hnae3_reset_type rst_type;
};
-static inline int ring_space(struct hns3_enet_ring *ring)
+static inline u32 ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
* hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
@@ -690,7 +694,7 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; (pos); pos = (pos)->next)
+ for ((pos) = (head).ring; (pos); (pos) = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 941cb529d671..a5eefa28454c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/phy.h>
#include <linux/sfp.h>
@@ -85,7 +86,7 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -170,7 +171,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest.
*/
handle = hns3_get_handle(ndev);
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
@@ -435,7 +436,7 @@ static void hns3_self_test(struct net_device *ndev,
data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
- netdev_err(ndev, "dev resetting!");
+ netdev_err(ndev, "dev resetting!\n");
goto failure;
}
@@ -488,7 +489,7 @@ static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
if (!ops->get_sset_count)
return -EOPNOTSUPP;
@@ -509,73 +510,53 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
}
}
-static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
- u32 stat_count, u32 num_tqps, const char *prefix)
+static void hns3_update_strings(u8 **data, const struct hns3_stats *stats,
+ u32 stat_count, u32 num_tqps,
+ const char *prefix)
{
-#define MAX_PREFIX_SIZE (6 + 4)
- u32 size_left;
u32 i, j;
- u32 n1;
- for (i = 0; i < num_tqps; i++) {
- for (j = 0; j < stat_count; j++) {
- data[ETH_GSTRING_LEN - 1] = '\0';
-
- /* first, prepend the prefix string */
- n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_",
- prefix, i);
- size_left = (ETH_GSTRING_LEN - 1) - n1;
-
- /* now, concatenate the stats string to it */
- strncat(data, stats[j].stats_string, size_left);
- data += ETH_GSTRING_LEN;
- }
- }
-
- return data;
+ for (i = 0; i < num_tqps; i++)
+ for (j = 0; j < stat_count; j++)
+ ethtool_sprintf(data, "%s%u_%s", prefix, i,
+ stats[j].stats_string);
}
-static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
+static void hns3_get_strings_tqps(struct hnae3_handle *handle, u8 **data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
const char tx_prefix[] = "txq";
const char rx_prefix[] = "rxq";
/* get strings for Tx */
- data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
- kinfo->num_tqps, tx_prefix);
+ hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
+ kinfo->num_tqps, tx_prefix);
/* get strings for Rx */
- data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
- kinfo->num_tqps, rx_prefix);
-
- return data;
+ hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
+ kinfo->num_tqps, rx_prefix);
}
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
- char *buff = (char *)data;
- int i;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
+ u32 i;
if (!ops->get_strings)
return;
switch (stringset) {
case ETH_SS_STATS:
- buff = hns3_get_strings_tqps(h, buff);
- ops->get_strings(h, stringset, (u8 *)buff);
+ hns3_get_strings_tqps(h, &data);
+ ops->get_strings(h, stringset, &data);
break;
case ETH_SS_TEST:
- ops->get_strings(h, stringset, data);
+ ops->get_strings(h, stringset, &data);
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) {
- snprintf(buff, ETH_GSTRING_LEN, "%s",
- hns3_priv_flags[i].name);
- buff += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++)
+ ethtool_puts(&data, hns3_priv_flags[i].name);
break;
default:
break;
@@ -588,7 +569,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hns3_nic_priv *nic_priv = handle->priv;
struct hns3_enet_ring *ring;
u8 *stat;
- int i, j;
+ u32 i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -711,7 +692,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return;
@@ -725,7 +706,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return -EOPNOTSUPP;
@@ -744,7 +725,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
static void hns3_get_ksettings(struct hnae3_handle *h,
struct ethtool_link_ksettings *cmd)
{
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
/* 1.auto_neg & speed & duplex from cmd */
if (ops->get_ksettings_an_result)
@@ -770,7 +751,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
const struct hnae3_ae_ops *ops;
u8 module_type;
u8 media_type;
@@ -813,7 +794,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
break;
default:
- netdev_warn(netdev, "Unknown media type");
+ netdev_warn(netdev, "Unknown media type\n");
return 0;
}
@@ -833,7 +814,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
u32 lane_num;
@@ -861,7 +842,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
- "only copper port supports half duplex!");
+ "only copper port supports half duplex!\n");
return -EINVAL;
}
@@ -880,8 +861,8 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
int ret;
/* Chip don't support this mode. */
@@ -951,7 +932,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
return ae_dev->dev_specs.rss_ind_tbl_size;
}
@@ -973,7 +954,7 @@ static int hns3_set_rss(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
@@ -997,6 +978,16 @@ static int hns3_set_rss(struct net_device *netdev,
rxfh->hfunc);
}
+static int hns3_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -1007,10 +998,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
return 0;
- case ETHTOOL_GRXFH:
- if (h->ae_algo->ops->get_rss_tuple)
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
@@ -1043,8 +1030,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
{
enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
const struct hns3_reset_type_map *rst_type_map;
enum ethtool_reset_flags rst_flags;
u32 i, size;
@@ -1208,7 +1195,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
@@ -1218,7 +1205,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
return 0;
netdev_dbg(netdev, "Changing tx push from %s to %s\n",
- old_state ? "on" : "off", tx_push ? "on" : "off");
+ str_on_off(old_state), str_on_off(tx_push));
if (tx_push)
set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
@@ -1294,15 +1281,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
return ret;
}
+static int hns3_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- if (h->ae_algo->ops->set_rss_tuple)
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_SRXCLSRLINS:
if (h->ae_algo->ops->add_fd_entry)
return h->ae_algo->ops->add_fd_entry(h, cmd);
@@ -1319,7 +1313,7 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
static int hns3_nway_reset(struct net_device *netdev)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct phy_device *phy = netdev->phydev;
int autoneg;
@@ -1327,7 +1321,7 @@ static int hns3_nway_reset(struct net_device *netdev)
return 0;
if (hns3_nic_resetting(netdev)) {
- netdev_err(netdev, "dev resetting!");
+ netdev_err(netdev, "dev resetting!\n");
return -EBUSY;
}
@@ -1396,7 +1390,7 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
u32 rx_gl, tx_gl;
if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
@@ -1468,7 +1462,7 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
!ae_dev->dev_specs.int_ql_max) {
@@ -1492,7 +1486,7 @@ hns3_check_cqe_coalesce_param(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
!hnae3_ae_dev_cq_supported(ae_dev)) {
@@ -1665,11 +1659,12 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
}
static void hns3_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
return;
@@ -1719,8 +1714,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 fec_ability;
u8 fec_mode;
@@ -1744,8 +1739,8 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u32 fec_mode;
if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
@@ -1766,8 +1761,8 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct hns3_sfp_type sfp_type;
int ret;
@@ -1816,8 +1811,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!ops->get_module_eeprom)
@@ -1933,6 +1928,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
return ret;
}
+static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (copybreak < priv->min_tx_copybreak) {
+ netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
+ copybreak, priv->min_tx_copybreak);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (buf_size < priv->min_tx_spare_buf_size) {
+ netdev_err(netdev,
+ "tx spare buf size %u should be no less than %u!\n",
+ buf_size, priv->min_tx_spare_buf_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
@@ -1943,12 +1963,16 @@ static int hns3_set_tunable(struct net_device *netdev,
int i, ret = 0;
if (hns3_nic_resetting(netdev) || !priv->ring) {
- netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ netdev_err(netdev, "failed to set tunable value, dev resetting!\n");
return -EBUSY;
}
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
+ ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
+ if (ret)
+ return ret;
+
priv->tx_copybreak = *(u32 *)data;
for (i = 0; i < h->kinfo.num_tqps; i++)
@@ -1963,6 +1987,10 @@ static int hns3_set_tunable(struct net_device *netdev,
break;
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
+ if (ret)
+ return ret;
+
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
@@ -2009,7 +2037,7 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_RING_USE_TX_PUSH)
static int hns3_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
@@ -2124,6 +2152,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
@@ -2161,6 +2191,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index b8a1ecb4b8fb..3362b8d14d4f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -84,7 +84,7 @@ TRACE_EVENT(hns3_tx_desc,
__entry->desc_dma = ring->desc_dma_addr,
memcpy(__entry->desc, &ring->desc[cur_ntu],
sizeof(struct hns3_desc));
- __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -117,7 +117,7 @@ TRACE_EVENT(hns3_rx_desc,
__entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma;
memcpy(__entry->desc, &ring->desc[ring->next_to_clean],
sizeof(struct hns3_desc));
- __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ __assign_str(devname);
),
TP_printk(
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9ec471ced3d6..b76d25074e99 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/sched/clock.h>
+#include <linux/string_choices.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
@@ -11,13 +12,655 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static const char * const state_str[] = { "off", "on" };
+#define hclge_seq_file_to_hdev(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->priv)
+
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+ {false, "Reserved"},
+ {true, "BP_CPU_STATE"},
+ {true, "DFX_MSIX_INFO_NIC_0"},
+ {true, "DFX_MSIX_INFO_NIC_1"},
+ {true, "DFX_MSIX_INFO_NIC_2"},
+ {true, "DFX_MSIX_INFO_NIC_3"},
+
+ {true, "DFX_MSIX_INFO_ROC_0"},
+ {true, "DFX_MSIX_INFO_ROC_1"},
+ {true, "DFX_MSIX_INFO_ROC_2"},
+ {true, "DFX_MSIX_INFO_ROC_3"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+ {false, "Reserved"},
+ {true, "SSU_ETS_PORT_STATUS"},
+ {true, "SSU_ETS_TCG_STATUS"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {true, "SSU_BP_STATUS_0"},
+
+ {true, "SSU_BP_STATUS_1"},
+ {true, "SSU_BP_STATUS_2"},
+ {true, "SSU_BP_STATUS_3"},
+ {true, "SSU_BP_STATUS_4"},
+ {true, "SSU_BP_STATUS_5"},
+ {true, "SSU_MAC_TX_PFC_IND"},
+
+ {true, "MAC_SSU_RX_PFC_IND"},
+ {true, "BTMP_AGEING_ST_B0"},
+ {true, "BTMP_AGEING_ST_B1"},
+ {true, "BTMP_AGEING_ST_B2"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "FULL_DROP_NUM"},
+ {true, "PART_DROP_NUM"},
+ {true, "PPP_KEY_DROP_NUM"},
+ {true, "PPP_RLT_DROP_NUM"},
+ {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
+ {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
+
+ {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
+ {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK0"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK1"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK2"},
+ {true, "SSU_MB_RD_RLT_DROP_CNT"},
+
+ {true, "SSU_PPP_MAC_KEY_NUM_L"},
+ {true, "SSU_PPP_MAC_KEY_NUM_H"},
+ {true, "SSU_PPP_HOST_KEY_NUM_L"},
+ {true, "SSU_PPP_HOST_KEY_NUM_H"},
+ {true, "PPP_SSU_MAC_RLT_NUM_L"},
+ {true, "PPP_SSU_MAC_RLT_NUM_H"},
+
+ {true, "PPP_SSU_HOST_RLT_NUM_L"},
+ {true, "PPP_SSU_HOST_RLT_NUM_H"},
+ {true, "NCSI_RX_PACKET_IN_CNT_L"},
+ {true, "NCSI_RX_PACKET_IN_CNT_H"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_L"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_H"},
+
+ {true, "SSU_KEY_DROP_NUM"},
+ {true, "MB_UNCOPY_NUM"},
+ {true, "RX_OQ_DROP_PKT_CNT"},
+ {true, "TX_OQ_DROP_PKT_CNT"},
+ {true, "BANK_UNBALANCE_DROP_CNT"},
+ {true, "BANK_UNBALANCE_RX_DROP_CNT"},
+
+ {true, "NIC_L2_ERR_DROP_PKT_CNT"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT"},
+ {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "RX_OQ_GLB_DROP_PKT_CNT"},
+ {false, "Reserved"},
+
+ {true, "LO_PRI_UNICAST_CUR_CNT"},
+ {true, "HI_PRI_MULTICAST_CUR_CNT"},
+ {true, "LO_PRI_MULTICAST_CUR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+ {true, "prt_id"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
+
+ {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
+ {true, "PACKET_CURR_BUFFER_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "RX_PACKET_IN_CNT_L"},
+ {true, "RX_PACKET_IN_CNT_H"},
+ {true, "RX_PACKET_OUT_CNT_L"},
+ {true, "RX_PACKET_OUT_CNT_H"},
+ {true, "TX_PACKET_IN_CNT_L"},
+ {true, "TX_PACKET_IN_CNT_H"},
+
+ {true, "TX_PACKET_OUT_CNT_L"},
+ {true, "TX_PACKET_OUT_CNT_H"},
+ {true, "ROC_RX_PACKET_IN_CNT_L"},
+ {true, "ROC_RX_PACKET_IN_CNT_H"},
+ {true, "ROC_TX_PACKET_OUT_CNT_L"},
+ {true, "ROC_TX_PACKET_OUT_CNT_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_0_L"},
+ {true, "RX_PACKET_TC_IN_CNT_0_H"},
+ {true, "RX_PACKET_TC_IN_CNT_1_L"},
+ {true, "RX_PACKET_TC_IN_CNT_1_H"},
+ {true, "RX_PACKET_TC_IN_CNT_2_L"},
+ {true, "RX_PACKET_TC_IN_CNT_2_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_3_L"},
+ {true, "RX_PACKET_TC_IN_CNT_3_H"},
+ {true, "RX_PACKET_TC_IN_CNT_4_L"},
+ {true, "RX_PACKET_TC_IN_CNT_4_H"},
+ {true, "RX_PACKET_TC_IN_CNT_5_L"},
+ {true, "RX_PACKET_TC_IN_CNT_5_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_6_L"},
+ {true, "RX_PACKET_TC_IN_CNT_6_H"},
+ {true, "RX_PACKET_TC_IN_CNT_7_L"},
+ {true, "RX_PACKET_TC_IN_CNT_7_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_7_H"},
+ {true, "TX_PACKET_TC_IN_CNT_0_L"},
+ {true, "TX_PACKET_TC_IN_CNT_0_H"},
+ {true, "TX_PACKET_TC_IN_CNT_1_L"},
+ {true, "TX_PACKET_TC_IN_CNT_1_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_2_L"},
+ {true, "TX_PACKET_TC_IN_CNT_2_H"},
+ {true, "TX_PACKET_TC_IN_CNT_3_L"},
+ {true, "TX_PACKET_TC_IN_CNT_3_H"},
+ {true, "TX_PACKET_TC_IN_CNT_4_L"},
+ {true, "TX_PACKET_TC_IN_CNT_4_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_5_L"},
+ {true, "TX_PACKET_TC_IN_CNT_5_H"},
+ {true, "TX_PACKET_TC_IN_CNT_6_L"},
+ {true, "TX_PACKET_TC_IN_CNT_6_H"},
+ {true, "TX_PACKET_TC_IN_CNT_7_L"},
+ {true, "TX_PACKET_TC_IN_CNT_7_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_0_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_3_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_6_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+ {true, "OQ_INDEX"},
+ {true, "QUEUE_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+ {true, "prt_id"},
+ {true, "IGU_RX_ERR_PKT"},
+ {true, "IGU_RX_NO_SOF_PKT"},
+ {true, "EGU_TX_1588_SHORT_PKT"},
+ {true, "EGU_TX_1588_PKT"},
+ {true, "EGU_TX_ERR_PKT"},
+
+ {true, "IGU_RX_OUT_L2_PKT"},
+ {true, "IGU_RX_OUT_L3_PKT"},
+ {true, "IGU_RX_OUT_L4_PKT"},
+ {true, "IGU_RX_IN_L2_PKT"},
+ {true, "IGU_RX_IN_L3_PKT"},
+ {true, "IGU_RX_IN_L4_PKT"},
+
+ {true, "IGU_RX_EL3E_PKT"},
+ {true, "IGU_RX_EL4E_PKT"},
+ {true, "IGU_RX_L3E_PKT"},
+ {true, "IGU_RX_L4E_PKT"},
+ {true, "IGU_RX_ROCEE_PKT"},
+ {true, "IGU_RX_OUT_UDP0_PKT"},
+
+ {true, "IGU_RX_IN_UDP0_PKT"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
+ {false, "Reserved"},
+
+ {true, "IGU_RX_OVERSIZE_PKT_L"},
+ {true, "IGU_RX_OVERSIZE_PKT_H"},
+ {true, "IGU_RX_UNDERSIZE_PKT_L"},
+ {true, "IGU_RX_UNDERSIZE_PKT_H"},
+ {true, "IGU_RX_OUT_ALL_PKT_L"},
+ {true, "IGU_RX_OUT_ALL_PKT_H"},
+
+ {true, "IGU_TX_OUT_ALL_PKT_L"},
+ {true, "IGU_TX_OUT_ALL_PKT_H"},
+ {true, "IGU_RX_UNI_PKT_L"},
+ {true, "IGU_RX_UNI_PKT_H"},
+ {true, "IGU_RX_MULTI_PKT_L"},
+ {true, "IGU_RX_MULTI_PKT_H"},
+
+ {true, "IGU_RX_BROAD_PKT_L"},
+ {true, "IGU_RX_BROAD_PKT_H"},
+ {true, "EGU_TX_OUT_ALL_PKT_L"},
+ {true, "EGU_TX_OUT_ALL_PKT_H"},
+ {true, "EGU_TX_UNI_PKT_L"},
+ {true, "EGU_TX_UNI_PKT_H"},
+
+ {true, "EGU_TX_MULTI_PKT_L"},
+ {true, "EGU_TX_MULTI_PKT_H"},
+ {true, "EGU_TX_BROAD_PKT_L"},
+ {true, "EGU_TX_BROAD_PKT_H"},
+ {true, "IGU_TX_KEY_NUM_L"},
+ {true, "IGU_TX_KEY_NUM_H"},
+
+ {true, "IGU_RX_NON_TUN_PKT_L"},
+ {true, "IGU_RX_NON_TUN_PKT_H"},
+ {true, "IGU_RX_TUN_PKT_L"},
+ {true, "IGU_RX_TUN_PKT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+ {true, "tc_queue_num"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "RPU_RX_PKT_DROP_CNT"},
+ {true, "BUF_WAIT_TIMEOUT"},
+ {true, "BUF_WAIT_TIMEOUT_QID"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+ {false, "Reserved"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+
+ {true, "FIFO_DFX_ST5"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+ {false, "Reserved"},
+ {true, "NCSI_EGU_TX_FIFO_STS"},
+ {true, "NCSI_PAUSE_STATUS"},
+ {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
+
+ {true, "NCSI_RX_CTRL_PKT_CNT"},
+ {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_PKT_CNT"},
+ {true, "NCSI_RX_FCS_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
+
+ {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_CNT"},
+ {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_PKT_CNT"},
+ {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
+
+ {true, "NCSI_TX_PT_PKT_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "NCSI_MAC_RX_OCTETS_OK"},
+ {true, "NCSI_MAC_RX_OCTETS_BAD"},
+ {true, "NCSI_MAC_RX_UC_PKTS"},
+ {true, "NCSI_MAC_RX_MC_PKTS"},
+ {true, "NCSI_MAC_RX_BC_PKTS"},
+ {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
+
+ {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
+
+ {true, "NCSI_MAC_RX_FCS_ERRORS"},
+ {true, "NCSI_MAC_RX_LONG_ERRORS"},
+ {true, "NCSI_MAC_RX_JABBER_ERRORS"},
+ {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
+
+ {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
+ {true, "NCSI_MAC_TX_OCTETS_OK"},
+ {true, "NCSI_MAC_TX_OCTETS_BAD"},
+ {true, "NCSI_MAC_TX_UC_PKTS"},
+ {true, "NCSI_MAC_TX_MC_PKTS"},
+ {true, "NCSI_MAC_TX_BC_PKTS"},
+
+ {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
+
+ {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
+ {true, "NCSI_MAC_TX_UNDERRUN"},
+ {true, "NCSI_MAC_TX_CRC_ERROR"},
+ {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
+ {true, "NCSI_MAC_RX_PAD_PKTS"},
+ {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+ {false, "Reserved"},
+ {true, "LGE_IGU_AFIFO_DFX_0"},
+ {true, "LGE_IGU_AFIFO_DFX_1"},
+ {true, "LGE_IGU_AFIFO_DFX_2"},
+ {true, "LGE_IGU_AFIFO_DFX_3"},
+ {true, "LGE_IGU_AFIFO_DFX_4"},
+
+ {true, "LGE_IGU_AFIFO_DFX_5"},
+ {true, "LGE_IGU_AFIFO_DFX_6"},
+ {true, "LGE_IGU_AFIFO_DFX_7"},
+ {true, "LGE_EGU_AFIFO_DFX_0"},
+ {true, "LGE_EGU_AFIFO_DFX_1"},
+ {true, "LGE_EGU_AFIFO_DFX_2"},
+
+ {true, "LGE_EGU_AFIFO_DFX_3"},
+ {true, "LGE_EGU_AFIFO_DFX_4"},
+ {true, "LGE_EGU_AFIFO_DFX_5"},
+ {true, "LGE_EGU_AFIFO_DFX_6"},
+ {true, "LGE_EGU_AFIFO_DFX_7"},
+ {true, "CGE_IGU_AFIFO_DFX_0"},
+
+ {true, "CGE_IGU_AFIFO_DFX_1"},
+ {true, "CGE_EGU_AFIFO_DFX_0"},
+ {true, "CGE_EGU_AFIFO_DFX_1"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+ {false, "Reserved"},
+ {true, "DROP_FROM_PRT_PKT_CNT"},
+ {true, "DROP_FROM_HOST_PKT_CNT"},
+ {true, "DROP_TX_VLAN_PROC_CNT"},
+ {true, "DROP_MNG_CNT"},
+ {true, "DROP_FD_CNT"},
+
+ {true, "DROP_NO_DST_CNT"},
+ {true, "DROP_MC_MBID_FULL_CNT"},
+ {true, "DROP_SC_FILTERED"},
+ {true, "PPP_MC_DROP_PKT_CNT"},
+ {true, "DROP_PT_CNT"},
+ {true, "DROP_MAC_ANTI_SPOOF_CNT"},
+
+ {true, "DROP_IG_VFV_CNT"},
+ {true, "DROP_IG_PRTV_CNT"},
+ {true, "DROP_CNM_PFC_PAUSE_CNT"},
+ {true, "DROP_TORUS_TC_CNT"},
+ {true, "DROP_TORUS_LPBK_CNT"},
+ {true, "PPP_HFS_STS"},
+
+ {true, "PPP_MC_RSLT_STS"},
+ {true, "PPP_P3U_STS"},
+ {true, "PPP_RSLT_DESCR_STS"},
+ {true, "PPP_UMV_STS_0"},
+ {true, "PPP_UMV_STS_1"},
+ {true, "PPP_VFV_STS"},
+
+ {true, "PPP_GRO_KEY_CNT"},
+ {true, "PPP_GRO_INFO_CNT"},
+ {true, "PPP_GRO_DROP_CNT"},
+ {true, "PPP_GRO_OUT_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
+
+ {true, "PPP_GRO_INFO_MATCH_CNT"},
+ {true, "PPP_GRO_FREE_ENTRY_CNT"},
+ {true, "PPP_GRO_INNER_DFX_SIGNAL"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "GET_RX_PKT_CNT_L"},
+ {true, "GET_RX_PKT_CNT_H"},
+ {true, "GET_TX_PKT_CNT_L"},
+ {true, "GET_TX_PKT_CNT_H"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
+
+ {true, "SEND_MC_FROM_PRT_CNT_L"},
+ {true, "SEND_MC_FROM_PRT_CNT_H"},
+ {true, "SEND_MC_FROM_HOST_CNT_L"},
+ {true, "SEND_MC_FROM_HOST_CNT_H"},
+ {true, "SSU_MC_RD_CNT_L"},
+ {true, "SSU_MC_RD_CNT_H"},
+
+ {true, "SSU_MC_DROP_CNT_L"},
+ {true, "SSU_MC_DROP_CNT_H"},
+ {true, "SSU_MC_RD_PKT_CNT_L"},
+ {true, "SSU_MC_RD_PKT_CNT_H"},
+ {true, "PPP_MC_2HOST_PKT_CNT_L"},
+ {true, "PPP_MC_2HOST_PKT_CNT_H"},
+
+ {true, "PPP_MC_2PRT_PKT_CNT_L"},
+ {true, "PPP_MC_2PRT_PKT_CNT_H"},
+ {true, "NTSNOS_PKT_CNT_L"},
+ {true, "NTSNOS_PKT_CNT_H"},
+ {true, "NTUP_PKT_CNT_L"},
+ {true, "NTUP_PKT_CNT_H"},
+
+ {true, "NTLCL_PKT_CNT_L"},
+ {true, "NTLCL_PKT_CNT_H"},
+ {true, "NTTGT_PKT_CNT_L"},
+ {true, "NTTGT_PKT_CNT_H"},
+ {true, "RTNS_PKT_CNT_L"},
+ {true, "RTNS_PKT_CNT_H"},
+
+ {true, "RTLPBK_PKT_CNT_L"},
+ {true, "RTLPBK_PKT_CNT_H"},
+ {true, "NR_PKT_CNT_L"},
+ {true, "NR_PKT_CNT_H"},
+ {true, "RR_PKT_CNT_L"},
+ {true, "RR_PKT_CNT_H"},
+
+ {true, "MNG_TBL_HIT_CNT_L"},
+ {true, "MNG_TBL_HIT_CNT_H"},
+ {true, "FD_TBL_HIT_CNT_L"},
+ {true, "FD_TBL_HIT_CNT_H"},
+ {true, "FD_LKUP_CNT_L"},
+ {true, "FD_LKUP_CNT_H"},
+
+ {true, "BC_HIT_CNT_L"},
+ {true, "BC_HIT_CNT_H"},
+ {true, "UM_TBL_UC_HIT_CNT_L"},
+ {true, "UM_TBL_UC_HIT_CNT_H"},
+ {true, "UM_TBL_MC_HIT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_CNT_H"},
+
+ {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
+ {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
+ {true, "MTA_TBL_HIT_CNT_L"},
+ {true, "MTA_TBL_HIT_CNT_H"},
+ {true, "FWD_BONDING_HIT_CNT_L"},
+ {true, "FWD_BONDING_HIT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_CNT_H"},
+ {true, "GET_TUNL_PKT_CNT_L"},
+ {true, "GET_TUNL_PKT_CNT_H"},
+ {true, "GET_BMC_PKT_CNT_L"},
+ {true, "GET_BMC_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
+ {true, "PPP_MC_2BMC_PKT_CNT_L"},
+ {true, "PPP_MC_2BMC_PKT_CNT_H"},
+ {true, "VLAN_MIRR_CNT_L"},
+ {true, "VLAN_MIRR_CNT_H"},
+
+ {true, "IG_MIRR_CNT_L"},
+ {true, "IG_MIRR_CNT_H"},
+ {true, "EG_MIRR_CNT_L"},
+ {true, "EG_MIRR_CNT_H"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
+
+ {true, "LAN_PAIR_CNT_L"},
+ {true, "LAN_PAIR_CNT_H"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
+ {true, "MTA_TBL_HIT_PKT_CNT_L"},
+ {true, "MTA_TBL_HIT_PKT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+ {false, "Reserved"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "FSM_DFX_ST2"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+ {true, "FIFO_DFX_ST5"},
+ {true, "FIFO_DFX_ST6"},
+ {true, "FIFO_DFX_ST7"},
+
+ {true, "FIFO_DFX_ST8"},
+ {true, "FIFO_DFX_ST9"},
+ {true, "FIFO_DFX_ST10"},
+ {true, "FIFO_DFX_ST11"},
+ {true, "Q_CREDIT_VLD_0"},
+ {true, "Q_CREDIT_VLD_1"},
+
+ {true, "Q_CREDIT_VLD_2"},
+ {true, "Q_CREDIT_VLD_3"},
+ {true, "Q_CREDIT_VLD_4"},
+ {true, "Q_CREDIT_VLD_5"},
+ {true, "Q_CREDIT_VLD_6"},
+ {true, "Q_CREDIT_VLD_7"},
+
+ {true, "Q_CREDIT_VLD_8"},
+ {true, "Q_CREDIT_VLD_9"},
+ {true, "Q_CREDIT_VLD_10"},
+ {true, "Q_CREDIT_VLD_11"},
+ {true, "Q_CREDIT_VLD_12"},
+ {true, "Q_CREDIT_VLD_13"},
+
+ {true, "Q_CREDIT_VLD_14"},
+ {true, "Q_CREDIT_VLD_15"},
+ {true, "Q_CREDIT_VLD_16"},
+ {true, "Q_CREDIT_VLD_17"},
+ {true, "Q_CREDIT_VLD_18"},
+ {true, "Q_CREDIT_VLD_19"},
+
+ {true, "Q_CREDIT_VLD_20"},
+ {true, "Q_CREDIT_VLD_21"},
+ {true, "Q_CREDIT_VLD_22"},
+ {true, "Q_CREDIT_VLD_23"},
+ {true, "Q_CREDIT_VLD_24"},
+ {true, "Q_CREDIT_VLD_25"},
+
+ {true, "Q_CREDIT_VLD_26"},
+ {true, "Q_CREDIT_VLD_27"},
+ {true, "Q_CREDIT_VLD_28"},
+ {true, "Q_CREDIT_VLD_29"},
+ {true, "Q_CREDIT_VLD_30"},
+ {true, "Q_CREDIT_VLD_31"},
+
+ {true, "GRO_BD_SERR_CNT"},
+ {true, "GRO_CONTEXT_SERR_CNT"},
+ {true, "RX_STASH_CFG_SERR_CNT"},
+ {true, "AXI_RD_FBD_SERR_CNT"},
+ {true, "GRO_BD_MERR_CNT"},
+ {true, "GRO_CONTEXT_MERR_CNT"},
+
+ {true, "RX_STASH_CFG_MERR_CNT"},
+ {true, "AXI_RD_FBD_MERR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+ {true, "q_num"},
+ {true, "RCB_CFG_RX_RING_TAIL"},
+ {true, "RCB_CFG_RX_RING_HEAD"},
+ {true, "RCB_CFG_RX_RING_FBDNUM"},
+ {true, "RCB_CFG_RX_RING_OFFSET"},
+ {true, "RCB_CFG_RX_RING_FBDOFFSET"},
+
+ {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
+ {true, "RCB_CFG_TX_RING_TAIL"},
+ {true, "RCB_CFG_TX_RING_HEAD"},
+ {true, "RCB_CFG_TX_RING_FBDNUM"},
+ {true, "RCB_CFG_TX_RING_OFFSET"},
+ {true, "RCB_CFG_TX_RING_EBDNUM"},
+};
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -81,48 +724,6 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
-/* make sure: len(name) + interval >= maxlen(item data) + 2,
- * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
- * and print as "%u"(maxlen: 10), so the interval should be at least 5.
- */
-static void hclge_dbg_fill_content(char *content, u16 len,
- const struct hclge_dbg_item *items,
- const char **result, u16 size)
-{
-#define HCLGE_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HCLGE_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HCLGE_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
@@ -161,10 +762,8 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
return 0;
}
-static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
- struct hclge_desc *desc_src,
- int index, int bd_num,
- enum hclge_opcode_type cmd)
+int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
+ int index, int bd_num, enum hclge_opcode_type cmd)
{
struct hclge_desc *desc = desc_src;
int ret, i;
@@ -188,14 +787,14 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
static int
hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ u32 index, entry, i, cnt, min_num;
struct hclge_desc *desc_src;
- u32 index, entry, i, cnt;
- int bd_num, min_num, ret;
struct hclge_desc *desc;
+ int bd_num, ret;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -208,13 +807,12 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
- cnt++, dfx_message->message);
+ seq_printf(s, "item%u = %s\n", cnt++, dfx_message->message);
for (i = 0; i < cnt; i++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
+ seq_printf(s, "item%u\t", i);
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
dfx_message = reg_info->dfx_msg;
@@ -229,10 +827,9 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
if (i > 0 && !entry)
desc++;
- *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%#x\t", le32_to_cpu(desc->data[entry]));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
kfree(desc_src);
@@ -242,14 +839,14 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
static int
hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
struct hclge_desc *desc_src;
- int bd_num, min_num, ret;
+ int bd_num, min_num, ret, i;
struct hclge_desc *desc;
- u32 entry, i;
+ u32 entry;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -276,9 +873,8 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (!dfx_message->flag)
continue;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- dfx_message->message,
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%s: %#x\n", dfx_message->message,
+ le32_to_cpu(desc->data[entry]));
}
kfree(desc_src);
@@ -302,8 +898,8 @@ static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
};
-static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
@@ -324,16 +920,15 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
offset = hclge_dbg_mac_en_status[i].offset;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- hclge_dbg_mac_en_status[i].message,
- hnae3_get_bit(loop_en, offset));
+ seq_printf(s, "%s: %#x\n", hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
}
return 0;
}
-static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
@@ -350,16 +945,14 @@ static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
- le16_to_cpu(req->max_frm_size));
- *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
- req->min_frm_size);
+ seq_printf(s, "max_frame_size: %u\n", le16_to_cpu(req->max_frm_size));
+ seq_printf(s, "min_frame_size: %u\n", req->min_frm_size);
return 0;
}
-static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev,
+ struct seq_file *s)
{
#define HCLGE_MAC_SPEED_SHIFT 0
#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
@@ -380,33 +973,31 @@ static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
- hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
- HCLGE_MAC_SPEED_SHIFT));
- *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
- hnae3_get_bit(req->speed_dup,
- HCLGE_MAC_DUPLEX_SHIFT));
+ seq_printf(s, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ seq_printf(s, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
return 0;
}
-static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_enable_status(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_frame_size(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
+ return hclge_dbg_dump_mac_speed_duplex(hdev, s);
}
-static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -417,8 +1008,8 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
+ seq_puts(s, "qset_id roce_qset_mask nic_qset_mask ");
+ seq_puts(s, "qset_shaping_pass qset_bp_status\n");
for (qset_id = 0; qset_id < qset_num; qset_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
HCLGE_OPC_QSET_DFX_STS);
@@ -427,17 +1018,14 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%04u %#x %#x %#x %#x\n",
- qset_id, req.bit0, req.bit1, req.bit2,
- req.bit3);
+ seq_printf(s, "%04u %#-16x%#-15x%#-19x%#-x\n",
+ qset_id, req.bit0, req.bit1, req.bit2, req.bit3);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -448,8 +1036,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
+ seq_puts(s, "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
for (pri_id = 0; pri_id < pri_num; pri_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
HCLGE_OPC_PRI_DFX_STS);
@@ -458,24 +1045,21 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pri_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-10x%#-19x%#-x\n",
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
+ seq_puts(s, "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
HCLGE_OPC_PG_DFX_STS);
@@ -484,47 +1068,41 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pg_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-9x%#-18x%#-x\n",
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc;
u16 nq_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
+ seq_puts(s, "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_NQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
- nq_id, le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%04u %#-19x",
+ nq_id, le32_to_cpu(desc.data[1]));
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_RQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- " %#x\n",
- le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%#-x\n", le32_to_cpu(desc.data[1]));
}
return 0;
}
-static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -538,16 +1116,13 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- req.bit0);
- *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- req.bit1);
+ seq_printf(s, "port_mask: %#x\n", req.bit0);
+ seq_printf(s, "port_shaping_pass: %#x\n", req.bit1);
return 0;
}
-static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc[2];
u8 port_id = 0;
@@ -558,32 +1133,23 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
- le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "SCH_NIC_NUM: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "SCH_ROCE_NUM: %#x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "sch_roce_fifo_afull_gap: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx_private_waterline: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
- le32_to_cpu(desc[0].data[5]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
- le32_to_cpu(desc[1].data[0]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
- le32_to_cpu(desc[1].data[1]));
+ seq_printf(s, "pri_bp: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "fifo_dfx_info: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "sch_roce_fifo_afull_gap: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "tx_private_waterline: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "tm_bypass_en: %#x\n", le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "SSU_TM_BYPASS_EN: %#x\n", le32_to_cpu(desc[1].data[0]));
+ seq_printf(s, "SSU_RESERVE_CFG: %#x\n", le32_to_cpu(desc[1].data[1]));
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
return 0;
@@ -593,65 +1159,60 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "TC_MAP_SEL: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "IGU_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "MAC_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "IGU_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
return 0;
}
-static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_dcb(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_qset(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pri(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pg(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_queue(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_port(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
+ return hclge_dbg_dump_dcb_tm(hdev, s);
}
-static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
+static int hclge_dbg_dump_reg_cmd(enum hnae3_dbg_cmd cmd, struct seq_file *s)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
const struct hclge_dbg_reg_type_info *reg_info;
- int pos = 0, ret = 0;
- int i;
+ int ret = 0;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
reg_info = &hclge_dbg_reg_info[i];
if (cmd == reg_info->cmd) {
if (cmd == HNAE3_DBG_CMD_REG_TQP)
- return hclge_dbg_dump_reg_tqp(hdev, reg_info,
- buf, len, &pos);
+ return hclge_dbg_dump_reg_tqp(hdev,
+ reg_info, s);
- ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
- len, &pos);
+ ret = hclge_dbg_dump_reg_common(hdev, reg_info, s);
if (ret)
break;
}
@@ -660,12 +1221,57 @@ static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
return ret;
}
-static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_bios_reg_cmd(struct seq_file *s, void *data)
{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_BIOS_COMMON, s);
+}
+
+static int hclge_dbg_dump_ssu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_SSU, s);
+}
+
+static int hclge_dbg_dump_igu_egu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_IGU_EGU, s);
+}
+
+static int hclge_dbg_dump_rpu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RPU, s);
+}
+
+static int hclge_dbg_dump_ncsi_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_NCSI, s);
+}
+
+static int hclge_dbg_dump_rtc_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RTC, s);
+}
+
+static int hclge_dbg_dump_ppp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_PPP, s);
+}
+
+static int hclge_dbg_dump_rcb_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RCB, s);
+}
+
+static int hclge_dbg_dump_tqp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_TQP, s);
+}
+
+static int hclge_dbg_dump_tc(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ets_tc_weight_cmd *ets_weight;
+ const char *sch_mode_str;
struct hclge_desc desc;
- char *sch_mode_str;
- int pos = 0;
int ret;
u8 i;
@@ -685,72 +1291,37 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
- hdev->tm_info.num_tc);
- pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
- ets_weight->weight_offset);
+ seq_printf(s, "enabled tc number: %u\n", hdev->tm_info.num_tc);
+ seq_printf(s, "weight_offset: %u\n", ets_weight->weight_offset);
- pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
+ seq_puts(s, "TC MODE WEIGHT\n");
for (i = 0; i < HNAE3_MAX_TC; i++) {
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
- pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
- i, sch_mode_str, ets_weight->tc_weight[i]);
+ seq_printf(s, "%u %4s %3u\n", i, sch_mode_str,
+ ets_weight->tc_weight[i]);
}
return 0;
}
-static const struct hclge_dbg_item tm_pg_items[] = {
- { "ID", 2 },
- { "PRI_MAP", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
- char **result, u8 *index)
+static void hclge_dbg_fill_shaper_content(struct seq_file *s,
+ struct hclge_tm_shaper_para *para)
{
- sprintf(result[(*index)++], "%3u", para->ir_b);
- sprintf(result[(*index)++], "%3u", para->ir_u);
- sprintf(result[(*index)++], "%3u", para->ir_s);
- sprintf(result[(*index)++], "%3u", para->bs_b);
- sprintf(result[(*index)++], "%3u", para->bs_s);
- sprintf(result[(*index)++], "%3u", para->flag);
- sprintf(result[(*index)++], "%6u", para->rate);
+ seq_printf(s, "%-8u%-8u%-8u%-8u%-8u%-8u%-14u", para->ir_b, para->ir_u,
+ para->ir_s, para->bs_b, para->bs_s, para->flag, para->rate);
}
-static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
- char *buf, int len)
+static int hclge_dbg_dump_tm_pg(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
- u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
- char content[HCLGE_DBG_TM_INFO_LEN];
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pg_id, sch_mode, weight, pri_bit_map;
+ const char *sch_mode_str;
int ret;
- for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
- result[i] = data_str;
- data_str += HCLGE_DBG_DATA_STR_LEN;
- }
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- NULL, ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "ID PRI_MAP MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
@@ -780,68 +1351,41 @@ static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%02u", pg_id);
- sprintf(result[j++], "0x%02x", pri_bit_map);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- (const char **)result,
- ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%02u 0x%-7x%-6s%-6u", pg_id, pri_bit_map,
+ sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
-{
- char *data_str;
- int ret;
-
- data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
- HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
-
- kfree(data_str);
-
- return ret;
-}
-
-static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_port(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_shaper_para shaper_para;
- int pos = 0;
int ret;
ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
- pos += scnprintf(buf + pos, len - pos,
- "%3u %3u %3u %3u %3u %1u %6u\n",
- shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
- shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
- shaper_para.rate);
+ seq_puts(s, "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
+ seq_printf(s, "%3u %3u %3u %3u %3u %1u %6u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
return 0;
}
static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
- char *buf, int len)
+ struct seq_file *s)
{
u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_bp_to_qs_map_cmd *map;
struct hclge_desc desc;
- int pos = 0;
u8 group_id;
u8 grp_num;
u16 i = 0;
@@ -867,27 +1411,27 @@ static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
}
- pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
+ seq_puts(s, "INDEX | TM BP QSET MAPPING:\n");
for (group_id = 0; group_id < grp_num / 8; group_id++) {
- pos += scnprintf(buf + pos, len - pos,
- "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_mapping[i + 7],
- qset_mapping[i + 6], qset_mapping[i + 5],
- qset_mapping[i + 4], qset_mapping[i + 3],
- qset_mapping[i + 2], qset_mapping[i + 1],
- qset_mapping[i]);
+ seq_printf(s,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_mapping[i + 7],
+ qset_mapping[i + 6], qset_mapping[i + 5],
+ qset_mapping[i + 4], qset_mapping[i + 3],
+ qset_mapping[i + 2], qset_mapping[i + 1],
+ qset_mapping[i]);
i += 8;
}
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_map(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u16 queue_id;
u16 qset_id;
u8 link_vld;
- int pos = 0;
u8 pri_id;
u8 tc_id;
int ret;
@@ -906,32 +1450,28 @@ static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
- pos += scnprintf(buf + pos, len - pos,
- "%04u %4u %3u %2u\n",
- queue_id, qset_id, pri_id, tc_id);
+ seq_puts(s, "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
+ seq_printf(s, "%04u %4u %3u %2u\n",
+ queue_id, qset_id, pri_id, tc_id);
if (!hnae3_dev_dcb_supported(hdev))
continue;
- ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_nodes(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_nodes_cmd *nodes;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
@@ -944,65 +1484,36 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
nodes = (struct hclge_tm_nodes_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
- pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
- nodes->pg_base_id, nodes->pg_num);
- pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
- nodes->pri_base_id, nodes->pri_num);
- pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
- le16_to_cpu(nodes->qset_base_id),
- le16_to_cpu(nodes->qset_num));
- pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
- le16_to_cpu(nodes->queue_base_id),
- le16_to_cpu(nodes->queue_num));
+ seq_puts(s, " BASE_ID MAX_NUM\n");
+ seq_printf(s, "PG %4u %4u\n", nodes->pg_base_id,
+ nodes->pg_num);
+ seq_printf(s, "PRI %4u %4u\n", nodes->pri_base_id,
+ nodes->pri_num);
+ seq_printf(s, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ seq_printf(s, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
return 0;
}
-static const struct hclge_dbg_item tm_pri_items[] = {
- { "ID", 4 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_pri(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
- char content[HCLGE_DBG_TM_INFO_LEN];
- u8 pri_num, sch_mode, weight, i, j;
- char *data_str;
- int pos, ret;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pri_num, sch_mode, weight, i;
+ const char *sch_mode_str;
+ int ret;
ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
return ret;
- data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN,
- GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
- result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- NULL, ARRAY_SIZE(tm_pri_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (i = 0; i < pri_num; i++) {
ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
@@ -1028,59 +1539,31 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- (const char **)result,
- ARRAY_SIZE(tm_pri_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-6s%-6u", i, sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
out:
- kfree(data_str);
return ret;
}
-static const struct hclge_dbg_item tm_qset_items[] = {
- { "ID", 4 },
- { "MAP_PRI", 2 },
- { "LINK_VLD", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "IR_B", 2 },
- { "IR_U", 2 },
- { "IR_S", 2 },
- { "BS_B", 2 },
- { "BS_S", 2 },
- { "FLAG", 2 },
- { "RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_qset(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 priority, link_vld, sch_mode, weight;
struct hclge_tm_shaper_para shaper_para;
- char content[HCLGE_DBG_TM_INFO_LEN];
+ const char *sch_mode_str;
u16 qset_num, i;
- int ret, pos;
- u8 j;
+ int ret;
ret = hclge_tm_get_qset_num(hdev, &qset_num);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
- result[i] = &data_str[i][0];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- NULL, ARRAY_SIZE(tm_qset_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MAP_PRI LINK_VLD MODE DWRR IR_B IR_U IR_S ");
+ seq_puts(s, "BS_B BS_S FLAG RATE(Mbps)\n");
for (i = 0; i < qset_num; i++) {
ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
@@ -1102,29 +1585,22 @@ static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4u", priority);
- sprintf(result[j++], "%4u", link_vld);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- (const char **)result,
- ARRAY_SIZE(tm_qset_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-9u%-10u%-6s%-6u", i, priority, link_vld,
+ sch_mode_str, weight);
+ seq_printf(s, "%-6u%-6u%-6u%-6u%-6u%-6u%-14u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
}
return 0;
}
-static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pause_cfg(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
@@ -1137,23 +1613,21 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
- pause_param->pause_trans_gap);
- pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
- le16_to_cpu(pause_param->pause_trans_time));
+ seq_printf(s, "pause_trans_gap: 0x%x\n", pause_param->pause_trans_gap);
+ seq_printf(s, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
return 0;
}
#define HCLGE_DBG_TC_MASK 0x0F
-static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pri_map(struct seq_file *s, void *data)
{
#define HCLGE_DBG_TC_BIT_WIDTH 4
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_qos_pri_map_cmd *pri_map;
struct hclge_desc desc;
- int pos = 0;
u8 *pri_tc;
u8 tc, i;
int ret;
@@ -1168,33 +1642,33 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
- pri_map->vlan_pri);
- pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
+ seq_printf(s, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ seq_puts(s, "PRI TC\n");
pri_tc = (u8 *)pri_map;
for (i = 0; i < HNAE3_MAX_TC; i++) {
tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
tc &= HCLGE_DBG_TC_MASK;
- pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
+ seq_printf(s, "%u %u\n", i, tc);
}
return 0;
}
-static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_dscp_map(struct seq_file *s, void *data)
{
- struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ struct hnae3_knic_private_info *kinfo;
u8 *req0 = (u8 *)desc[0].data;
u8 *req1 = (u8 *)desc[1].data;
u8 dscp_tc[HNAE3_MAX_DSCP];
- int pos, ret;
+ int ret;
u8 i, j;
- pos = scnprintf(buf, len, "tc map mode: %s\n",
- tc_map_mode_str[kinfo->tc_map_mode]);
+ kinfo = &hdev->vport[0].nic.kinfo;
+
+ seq_printf(s, "tc map mode: %s\n", tc_map_mode_str[kinfo->tc_map_mode]);
if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
return 0;
@@ -1209,7 +1683,7 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+ seq_puts(s, "\nDSCP PRIO TC\n");
/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
@@ -1227,18 +1701,17 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
continue;
- pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
- i, kinfo->dscp_prio[i], dscp_tc[i]);
+ seq_printf(s, " %2u %u %u\n", i, kinfo->dscp_prio[i],
+ dscp_tc[i]);
}
return 0;
}
-static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
@@ -1251,19 +1724,17 @@ static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "tx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+ seq_printf(s, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
@@ -1274,26 +1745,24 @@ static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(rx_buf_cmd->buf_num[i]));
+ seq_printf(s, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
- pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
- le16_to_cpu(rx_buf_cmd->shared_buf));
+ seq_printf(s, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_com_wl;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
@@ -1305,21 +1774,19 @@ static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
}
rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "\n");
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_wl: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_com_wl->com_wl.high),
- le16_to_cpu(rx_com_wl->com_wl.low));
+ seq_puts(s, "\n");
+ seq_printf(s, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
@@ -1331,20 +1798,18 @@ static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
}
rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_packet_cnt->com_wl.high),
- le16_to_cpu(rx_packet_cnt->com_wl.low));
+ seq_printf(s, "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_wl_buf *rx_priv_wl;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
@@ -1359,28 +1824,25 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- return pos;
+ return 0;
}
static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
- char *buf, int len)
+ struct seq_file *s)
{
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
@@ -1393,86 +1855,75 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_buf_cfg(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
if (!hnae3_dev_dcb_supported(hdev))
return 0;
- ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, s);
if (ret < 0)
return ret;
return 0;
}
-static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mng_table(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_ethertype_idx_rd_cmd *req0;
struct hclge_desc desc;
u32 msg_egress_port;
- int pos = 0;
int ret, i;
- pos += scnprintf(buf + pos, len - pos,
- "entry mac_addr mask ether ");
- pos += scnprintf(buf + pos, len - pos,
- "mask vlan mask i_map i_dir e_type ");
- pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
+ seq_puts(s, "entry mac_addr mask ether ");
+ seq_puts(s, "mask vlan mask i_map i_dir e_type ");
+ seq_puts(s, "pf_id vf_id q_id drop\n");
for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
true);
- req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)desc.data;
req0->index = cpu_to_le16(i);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1485,46 +1936,40 @@ static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
if (!req0->resp_code)
continue;
- pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
- le16_to_cpu(req0->index), req0->mac_addr);
+ seq_printf(s, "%02u %pM ",
+ le16_to_cpu(req0->index), req0->mac_addr);
- pos += scnprintf(buf + pos, len - pos,
- "%x %04x %x %04x ",
- !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- le16_to_cpu(req0->ethter_type),
- !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- le16_to_cpu(req0->vlan_tag) &
- HCLGE_DBG_MNG_VLAN_TAG);
+ seq_printf(s, "%x %04x %x %04x ",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) &
+ HCLGE_DBG_MNG_VLAN_TAG);
- pos += scnprintf(buf + pos, len - pos,
- "%x %02x %02x ",
- !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
- req0->i_port_bitmap, req0->i_port_direction);
+ seq_printf(s, "%x %02x %02x ",
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
msg_egress_port = le16_to_cpu(req0->egress_port);
- pos += scnprintf(buf + pos, len - pos,
- "%x %x %02x %04x %x\n",
- !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- msg_egress_port & HCLGE_DBG_MNG_PF_ID,
- (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- le16_to_cpu(req0->egress_queue),
- !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
+ seq_printf(s, "%x %x %02x %04x %x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
}
return 0;
}
-#define HCLGE_DBG_TCAM_BUF_SIZE 256
-
static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
- char *tcam_buf,
+ struct seq_file *s,
struct hclge_dbg_tcam_msg tcam_msg)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
- int pos = 0;
int ret, i;
__le32 *req;
@@ -1546,27 +1991,23 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
if (ret)
return ret;
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
- tcam_msg.loc);
+ seq_printf(s, "read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
return ret;
}
@@ -1590,14 +2031,13 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
return cnt;
}
-static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_tcam(struct seq_file *s, void *data)
{
- u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_dbg_tcam_msg tcam_msg;
int i, ret, rule_cnt;
u16 *rule_locs;
- char *tcam_buf;
- int pos = 0;
+ u32 rule_num;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
@@ -1605,6 +2045,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
return -EOPNOTSUPP;
}
+ rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
if (!hdev->hclge_fd_rule_num || !rule_num)
return 0;
@@ -1612,12 +2053,6 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
if (!rule_locs)
return -ENOMEM;
- tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
- if (!tcam_buf) {
- kfree(rule_locs);
- return -ENOMEM;
- }
-
rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
if (rule_cnt < 0) {
ret = rule_cnt;
@@ -1631,38 +2066,34 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
tcam_msg.stage = HCLGE_FD_STAGE_1;
tcam_msg.loc = rule_locs[i];
- ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, true, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
-
- ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, false, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
}
out:
- kfree(tcam_buf);
kfree(rule_locs);
return ret;
}
-static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_counter(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_fd_ad_cnt_read_cmd *req;
char str_id[HCLGE_DBG_ID_LEN];
struct hclge_desc desc;
- int pos = 0;
int ret;
u64 cnt;
u8 i;
@@ -1670,8 +2101,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
- pos += scnprintf(buf + pos, len - pos,
- "func_id\thit_times\n");
+ seq_puts(s, "func_id\thit_times\n");
for (i = 0; i < func_num; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
@@ -1685,8 +2115,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
}
cnt = le64_to_cpu(req->cnt);
hclge_dbg_get_func_id_str(str_id, i);
- pos += scnprintf(buf + pos, len - pos,
- "%s\t%llu\n", str_id, cnt);
+ seq_printf(s, "%s\t%llu\n", str_id, cnt);
}
return 0;
@@ -1737,74 +2166,95 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
-static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_seq_dump_rst_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u32 i, offset;
+
+ seq_printf(s, "PF reset count: %u\n", hdev->rst_stats.pf_rst_cnt);
+ seq_printf(s, "FLR reset count: %u\n", hdev->rst_stats.flr_rst_cnt);
+ seq_printf(s, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ seq_printf(s, "IMP reset count: %u\n", hdev->rst_stats.imp_rst_cnt);
+ seq_printf(s, "reset done count: %u\n", hdev->rst_stats.reset_done_cnt);
+ seq_printf(s, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ seq_printf(s, "reset count: %u\n", hdev->rst_stats.reset_cnt);
+ seq_printf(s, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt);
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ seq_printf(s, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
+ seq_printf(s, "hdev state: 0x%lx\n", hdev->state);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_serv_info(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
unsigned long rem_nsec;
- int pos = 0;
u64 lc;
lc = local_clock();
rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
- (unsigned long)lc, rem_nsec / 1000);
- pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
- jiffies_to_msecs(jiffies - hdev->last_serv_processed));
- pos += scnprintf(buf + pos, len - pos,
- "last_service_task_processed: %lu(jiffies)\n",
- hdev->last_serv_processed);
- pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
- hdev->serv_processed_cnt);
+ seq_printf(s, "local_clock: [%5lu.%06lu]\n",
+ (unsigned long)lc, rem_nsec / 1000);
+ seq_printf(s, "delta: %u(ms)\n",
+ jiffies_to_msecs(jiffies - hdev->last_serv_processed));
+ seq_printf(s, "last_service_task_processed: %lu(jiffies)\n",
+ hdev->last_serv_processed);
+ seq_printf(s, "last_service_task_cnt: %lu\n", hdev->serv_processed_cnt);
return 0;
}
-static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_interrupt(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
- pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
- hdev->num_nic_msi);
- pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
- hdev->num_roce_msi);
- pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
- hdev->num_msi_used);
- pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
- hdev->num_msi_left);
+ seq_printf(s, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ seq_printf(s, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ seq_printf(s, "num_msi_used: %u\n", hdev->num_msi_used);
+ seq_printf(s, "num_msi_left: %u\n", hdev->num_msi_left);
return 0;
}
-static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
- char *buf, int len, u32 bd_num)
+static void hclge_dbg_imp_info_data_print(struct seq_file *s,
+ struct hclge_desc *desc_src,
+ u32 bd_num)
{
#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
struct hclge_desc *desc_index = desc_src;
u32 offset = 0;
- int pos = 0;
u32 i, j;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
for (i = 0; i < bd_num; i++) {
j = 0;
while (j < HCLGE_DESC_DATA_LEN - 1) {
- pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
- offset);
- pos += scnprintf(buf + pos, len - pos, "0x%08x ",
- le32_to_cpu(desc_index->data[j++]));
- pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
- le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%04x | ", offset);
+ seq_printf(s, "0x%08x ",
+ le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%08x\n",
+ le32_to_cpu(desc_index->data[j++]));
offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
}
desc_index++;
}
}
-static int
-hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_get_imp_stats_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_get_imp_bd_cmd *req;
struct hclge_desc *desc_src;
struct hclge_desc desc;
@@ -1841,7 +2291,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
+ hclge_dbg_imp_info_data_print(s, desc_src, bd_num);
kfree(desc_src);
@@ -1852,7 +2302,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -1864,9 +2314,8 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, len - *pos,
- "0x%04x | 0x%08x\n", offset,
- le32_to_cpu(desc[i].data[j]));
+ seq_printf(s, "0x%04x | 0x%08x\n", offset,
+ le32_to_cpu(desc[i].data[j]));
offset += sizeof(u32);
*index -= sizeof(u32);
@@ -1877,19 +2326,18 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
}
}
-static int
-hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ncl_config(struct seq_file *s, void *data)
{
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
- int pos = 0;
u32 data0;
int ret;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
while (index > 0) {
data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
@@ -1902,27 +2350,26 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
+ hclge_ncl_config_data_print(desc, &index, s);
}
return 0;
}
-static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_loopback(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct phy_device *phydev = hdev->hw.mac.phydev;
struct hclge_config_mac_mode_cmd *req_app;
struct hclge_common_lb_cmd *req_common;
struct hclge_desc desc;
u8 loopback_en;
- int pos = 0;
int ret;
req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
req_common = (struct hclge_common_lb_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
- hdev->hw.mac.mac_id);
+ seq_printf(s, "mac id: %u\n", hdev->hw.mac.mac_id);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1934,8 +2381,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
- pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "app loopback: %s\n", str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1946,24 +2392,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- state_str[loopback_en]);
+ loopback_en = req_common->enable &
+ HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ seq_printf(s, "serdes serial loopback: %s\n", str_on_off(loopback_en));
loopback_en = req_common->enable &
- HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
- pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- state_str[loopback_en]);
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
+ seq_printf(s, "serdes parallel loopback: %s\n",
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
}
return 0;
@@ -1972,107 +2416,75 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
-static int
-hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_tnl_status(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_tnl_stats stats;
unsigned long rem_nsec;
- int pos = 0;
- pos += scnprintf(buf + pos, len - pos,
- "Recently generated mac tnl interruption:\n");
+ seq_puts(s, "Recently generated mac tnl interruption:\n");
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos,
- "[%07lu.%03lu] status = 0x%x\n",
- (unsigned long)stats.time, rem_nsec / 1000,
- stats.status);
+ seq_printf(s, "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
}
return 0;
}
-
-static const struct hclge_dbg_item mac_list_items[] = {
- { "FUNC_ID", 2 },
- { "MAC_ADDR", 12 },
- { "STATE", 2 },
-};
-
-static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
- bool is_unicast)
+static void hclge_dbg_dump_mac_list(struct seq_file *s, bool is_unicast)
{
- char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
- char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- char *result[ARRAY_SIZE(mac_list_items)];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_node *mac_node, *tmp;
struct hclge_vport *vport;
struct list_head *list;
u32 func_id;
- int pos = 0;
- int i;
- for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
- result[i] = &data_str[i][0];
-
- pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
- is_unicast ? "UC" : "MC");
- hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
- NULL, ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%s MAC_LIST:\n", is_unicast ? "UC" : "MC");
+ seq_puts(s, "FUNC_ID MAC_ADDR STATE\n");
for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
vport = &hdev->vport[func_id];
list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_node, tmp, list, node) {
- i = 0;
- result[i++] = hclge_dbg_get_func_id_str(str_id,
- func_id);
- sprintf(result[i++], "%pM", mac_node->mac_addr);
- sprintf(result[i++], "%5s",
- hclge_mac_state_str[mac_node->state]);
- hclge_dbg_fill_content(content, sizeof(content),
- mac_list_items,
- (const char **)result,
- ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ if (func_id)
+ seq_printf(s, "vf%-7u", func_id - 1U);
+ else
+ seq_puts(s, "pf ");
+ seq_printf(s, "%pM ", mac_node->mac_addr);
+ seq_printf(s, "%5s\n",
+ hclge_mac_state_str[mac_node->state]);
}
spin_unlock_bh(&vport->mac_list_lock);
}
}
-static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_umv_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1;
struct hclge_vport *vport;
- int pos = 0;
u8 i;
- pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
- hdev->num_alloc_vport);
- pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
- hdev->max_umv_size);
- pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
- hdev->wanted_umv_size);
- pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
- hdev->priv_umv_size);
+ seq_printf(s, "num_alloc_vport : %u\n", hdev->num_alloc_vport);
+ seq_printf(s, "max_umv_size : %u\n", hdev->max_umv_size);
+ seq_printf(s, "wanted_umv_size : %u\n", hdev->wanted_umv_size);
+ seq_printf(s, "priv_umv_size : %u\n", hdev->priv_umv_size);
mutex_lock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
- hdev->share_umv_size);
+ seq_printf(s, "share_umv_size : %u\n", hdev->share_umv_size);
for (i = 0; i < func_num; i++) {
vport = &hdev->vport[i];
- pos += scnprintf(buf + pos, len - pos,
- "vport(%u) used_umv_num : %u\n",
- i, vport->used_umv_num);
+ seq_printf(s, "vport(%u) used_umv_num : %u\n",
+ i, vport->used_umv_num);
}
mutex_unlock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
- hdev->used_mc_mac_num);
+ seq_printf(s, "used_mc_mac_num : %u\n", hdev->used_mc_mac_num);
return 0;
}
@@ -2214,38 +2626,12 @@ static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
return 0;
}
-static const struct hclge_dbg_item vlan_filter_items[] = {
- { "FUNC_ID", 2 },
- { "I_VF_VLAN_FILTER", 2 },
- { "E_VF_VLAN_FILTER", 2 },
- { "PORT_VLAN_FILTER_BYPASS", 0 }
-};
-
-static const struct hclge_dbg_item vlan_offload_items[] = {
- { "FUNC_ID", 2 },
- { "PVID", 4 },
- { "ACCEPT_TAG1", 2 },
- { "ACCEPT_TAG2", 2 },
- { "ACCEPT_UNTAG1", 2 },
- { "ACCEPT_UNTAG2", 2 },
- { "INSERT_TAG1", 2 },
- { "INSERT_TAG2", 2 },
- { "SHIFT_TAG", 2 },
- { "STRIP_TAG1", 2 },
- { "STRIP_TAG2", 2 },
- { "DROP_TAG1", 2 },
- { "DROP_TAG2", 2 },
- { "PRI_ONLY_TAG1", 2 },
- { "PRI_ONLY_TAG2", 0 }
-};
-
-static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_filter_items)];
- u8 i, j, vlan_fe, bypass, ingress, egress;
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ u8 i, vlan_fe, bypass, ingress, egress;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
@@ -2255,14 +2641,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
- *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- state_str[ingress]);
- *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- state_str[egress]);
+ seq_printf(s, "I_PORT_VLAN_FILTER: %s\n", str_on_off(ingress));
+ seq_printf(s, "E_PORT_VLAN_FILTER: %s\n", str_on_off(egress));
- hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
- NULL, ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID I_VF_VLAN_FILTER E_VF_VLAN_FILTER ");
+ seq_puts(s, "PORT_VLAN_FILTER_BYPASS\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
@@ -2275,37 +2658,32 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
if (ret)
return ret;
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = state_str[ingress];
- result[j++] = state_str[egress];
- result[j++] =
- test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ? state_str[bypass] : "NA";
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_filter_items, result,
- ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ seq_printf(s, "%-9s%-18s%-18s%s\n",
+ hclge_dbg_get_func_id_str(str_id, i),
+ str_on_off(ingress), str_on_off(egress),
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA");
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
return 0;
}
-static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_offload_items)];
- char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_dbg_vlan_cfg vlan_cfg;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
- u8 i, j;
+ u8 i;
- hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
- NULL, ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID PVID ACCEPT_TAG1 ACCEPT_TAG2 ACCEPT_UNTAG1 ");
+ seq_puts(s, "ACCEPT_UNTAG2 INSERT_TAG1 INSERT_TAG2 SHIFT_TAG ");
+ seq_puts(s, "STRIP_TAG1 STRIP_TAG2 DROP_TAG1 DROP_TAG2 ");
+ seq_puts(s, "PRI_ONLY_TAG1 PRI_ONLY_TAG2\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
@@ -2316,107 +2694,92 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
if (ret)
return ret;
- sprintf(str_pvid, "%u", vlan_cfg.pvid);
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = str_pvid;
- result[j++] = state_str[vlan_cfg.accept_tag1];
- result[j++] = state_str[vlan_cfg.accept_tag2];
- result[j++] = state_str[vlan_cfg.accept_untag1];
- result[j++] = state_str[vlan_cfg.accept_untag2];
- result[j++] = state_str[vlan_cfg.insert_tag1];
- result[j++] = state_str[vlan_cfg.insert_tag2];
- result[j++] = state_str[vlan_cfg.shift_tag];
- result[j++] = state_str[vlan_cfg.strip_tag1];
- result[j++] = state_str[vlan_cfg.strip_tag2];
- result[j++] = state_str[vlan_cfg.drop_tag1];
- result[j++] = state_str[vlan_cfg.drop_tag2];
- result[j++] = state_str[vlan_cfg.pri_only1];
- result[j++] = state_str[vlan_cfg.pri_only2];
-
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_offload_items, result,
- ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_printf(s, "%-9s", hclge_dbg_get_func_id_str(str_id, i));
+ seq_printf(s, "%-6u", vlan_cfg.pvid);
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.accept_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.accept_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag1));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag2));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag1));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.shift_tag));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag1));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.pri_only1));
+ seq_printf(s, "%s\n", str_on_off(vlan_cfg.pri_only2));
}
return 0;
}
-static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_vlan_config(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+ return hclge_dbg_dump_vlan_offload_config(hdev, s);
}
-static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ptp_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ptp *ptp = hdev->ptp;
u32 sw_cfg = ptp->ptp_cfg;
unsigned int tx_start;
unsigned int last_rx;
- int pos = 0;
u32 hw_cfg;
int ret;
- pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
- ptp->info.name);
- pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
- "yes" : "no");
- pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
- "yes" : "no");
- pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
- "yes" : "no");
+ seq_printf(s, "phc %s's debug info:\n", ptp->info.name);
+ seq_printf(s, "ptp enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
+ seq_printf(s, "ptp tx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags)));
+ seq_printf(s, "ptp rx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
- pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
- last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+ seq_printf(s, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ seq_printf(s, "rx count: %lu\n", ptp->rx_cnt);
tx_start = jiffies_to_msecs(ptp->tx_start);
- pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
- tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
- pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
- ptp->tx_skipped);
- pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
- ptp->tx_timeout);
- pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
- ptp->last_tx_seqid);
+ seq_printf(s, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ seq_printf(s, "tx count: %lu\n", ptp->tx_cnt);
+ seq_printf(s, "tx skipped count: %lu\n", ptp->tx_skipped);
+ seq_printf(s, "tx timeout count: %lu\n", ptp->tx_timeout);
+ seq_printf(s, "last tx seqid: %u\n", ptp->last_tx_seqid);
+
ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
- sw_cfg, hw_cfg);
+ seq_printf(s, "sw_cfg: %#x, hw_cfg: %#x\n", sw_cfg, hw_cfg);
- pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
- ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
+ seq_printf(s, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
return 0;
}
-static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_uc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, true);
+ hclge_dbg_dump_mac_list(s, true);
return 0;
}
-static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_mc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, false);
+ hclge_dbg_dump_mac_list(s, false);
return 0;
}
@@ -2424,156 +2787,156 @@ static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
{
.cmd = HNAE3_DBG_CMD_TM_NODES,
- .dbg_dump = hclge_dbg_dump_tm_nodes,
+ .dbg_read_func = hclge_dbg_dump_tm_nodes,
},
{
.cmd = HNAE3_DBG_CMD_TM_PRI,
- .dbg_dump = hclge_dbg_dump_tm_pri,
+ .dbg_read_func = hclge_dbg_dump_tm_pri,
},
{
.cmd = HNAE3_DBG_CMD_TM_QSET,
- .dbg_dump = hclge_dbg_dump_tm_qset,
+ .dbg_read_func = hclge_dbg_dump_tm_qset,
},
{
.cmd = HNAE3_DBG_CMD_TM_MAP,
- .dbg_dump = hclge_dbg_dump_tm_map,
+ .dbg_read_func = hclge_dbg_dump_tm_map,
},
{
.cmd = HNAE3_DBG_CMD_TM_PG,
- .dbg_dump = hclge_dbg_dump_tm_pg,
+ .dbg_read_func = hclge_dbg_dump_tm_pg,
},
{
.cmd = HNAE3_DBG_CMD_TM_PORT,
- .dbg_dump = hclge_dbg_dump_tm_port,
+ .dbg_read_func = hclge_dbg_dump_tm_port,
},
{
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
- .dbg_dump = hclge_dbg_dump_tc,
+ .dbg_read_func = hclge_dbg_dump_tc,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
- .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_pause_cfg,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
- .dbg_dump = hclge_dbg_dump_qos_pri_map,
+ .dbg_read_func = hclge_dbg_dump_qos_pri_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
- .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ .dbg_read_func = hclge_dbg_dump_qos_dscp_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
- .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_buf_cfg,
},
{
.cmd = HNAE3_DBG_CMD_MAC_UC,
- .dbg_dump = hclge_dbg_dump_mac_uc,
+ .dbg_read_func = hclge_dbg_dump_mac_uc,
},
{
.cmd = HNAE3_DBG_CMD_MAC_MC,
- .dbg_dump = hclge_dbg_dump_mac_mc,
+ .dbg_read_func = hclge_dbg_dump_mac_mc,
},
{
.cmd = HNAE3_DBG_CMD_MNG_TBL,
- .dbg_dump = hclge_dbg_dump_mng_table,
+ .dbg_read_func = hclge_dbg_dump_mng_table,
},
{
.cmd = HNAE3_DBG_CMD_LOOPBACK,
- .dbg_dump = hclge_dbg_dump_loopback,
+ .dbg_read_func = hclge_dbg_dump_loopback,
},
{
.cmd = HNAE3_DBG_CMD_PTP_INFO,
- .dbg_dump = hclge_dbg_dump_ptp_info,
+ .dbg_read_func = hclge_dbg_dump_ptp_info,
},
{
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
- .dbg_dump = hclge_dbg_dump_interrupt,
+ .dbg_read_func = hclge_dbg_dump_interrupt,
},
{
.cmd = HNAE3_DBG_CMD_RESET_INFO,
- .dbg_dump = hclge_dbg_dump_rst_info,
+ .dbg_read_func = hclge_dbg_seq_dump_rst_info,
},
{
.cmd = HNAE3_DBG_CMD_IMP_INFO,
- .dbg_dump = hclge_dbg_get_imp_stats_info,
+ .dbg_read_func = hclge_dbg_get_imp_stats_info,
},
{
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
- .dbg_dump = hclge_dbg_dump_ncl_config,
+ .dbg_read_func = hclge_dbg_dump_ncl_config,
},
{
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_bios_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_SSU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ssu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_igu_egu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RPU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rpu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_NCSI,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ncsi_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RTC,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rtc_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_PPP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ppp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RCB,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rcb_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_TQP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_tqp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_MAC,
- .dbg_dump = hclge_dbg_dump_mac,
+ .dbg_read_func = hclge_dbg_dump_mac,
},
{
.cmd = HNAE3_DBG_CMD_REG_DCB,
- .dbg_dump = hclge_dbg_dump_dcb,
+ .dbg_read_func = hclge_dbg_dump_dcb,
},
{
.cmd = HNAE3_DBG_CMD_FD_TCAM,
- .dbg_dump = hclge_dbg_dump_fd_tcam,
+ .dbg_read_func = hclge_dbg_dump_fd_tcam,
},
{
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
- .dbg_dump = hclge_dbg_dump_mac_tnl_status,
+ .dbg_read_func = hclge_dbg_dump_mac_tnl_status,
},
{
.cmd = HNAE3_DBG_CMD_SERV_INFO,
- .dbg_dump = hclge_dbg_dump_serv_info,
+ .dbg_read_func = hclge_dbg_dump_serv_info,
},
{
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
- .dbg_dump = hclge_dbg_dump_vlan_config,
+ .dbg_read_func = hclge_dbg_dump_vlan_config,
},
{
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
- .dbg_dump = hclge_dbg_dump_fd_counter,
+ .dbg_read_func = hclge_dbg_dump_fd_counter,
},
{
.cmd = HNAE3_DBG_CMD_UMV_INFO,
- .dbg_dump = hclge_dbg_dump_umv_info,
+ .dbg_read_func = hclge_dbg_dump_umv_info,
},
};
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len)
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func)
{
struct hclge_vport *vport = hclge_get_vport(handle);
const struct hclge_dbg_func *cmd_func;
@@ -2583,11 +2946,8 @@ int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
if (cmd == hclge_dbg_cmd_func[i].cmd) {
cmd_func = &hclge_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(hdev, buf, len);
- else
- return cmd_func->dbg_dump_reg(hdev, cmd, buf,
- len);
+ *func = cmd_func->dbg_read_func;
+ return 0;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 724052928b88..317f79efd54c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -92,6 +92,7 @@ struct hclge_dbg_func {
int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd,
char *buf, int len);
+ read_func dbg_read_func;
};
struct hclge_dbg_status_dfx_info {
@@ -99,646 +100,6 @@ struct hclge_dbg_status_dfx_info {
char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
};
-static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
- {false, "Reserved"},
- {true, "BP_CPU_STATE"},
- {true, "DFX_MSIX_INFO_NIC_0"},
- {true, "DFX_MSIX_INFO_NIC_1"},
- {true, "DFX_MSIX_INFO_NIC_2"},
- {true, "DFX_MSIX_INFO_NIC_3"},
-
- {true, "DFX_MSIX_INFO_ROC_0"},
- {true, "DFX_MSIX_INFO_ROC_1"},
- {true, "DFX_MSIX_INFO_ROC_2"},
- {true, "DFX_MSIX_INFO_ROC_3"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
- {false, "Reserved"},
- {true, "SSU_ETS_PORT_STATUS"},
- {true, "SSU_ETS_TCG_STATUS"},
- {false, "Reserved"},
- {false, "Reserved"},
- {true, "SSU_BP_STATUS_0"},
-
- {true, "SSU_BP_STATUS_1"},
- {true, "SSU_BP_STATUS_2"},
- {true, "SSU_BP_STATUS_3"},
- {true, "SSU_BP_STATUS_4"},
- {true, "SSU_BP_STATUS_5"},
- {true, "SSU_MAC_TX_PFC_IND"},
-
- {true, "MAC_SSU_RX_PFC_IND"},
- {true, "BTMP_AGEING_ST_B0"},
- {true, "BTMP_AGEING_ST_B1"},
- {true, "BTMP_AGEING_ST_B2"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "FULL_DROP_NUM"},
- {true, "PART_DROP_NUM"},
- {true, "PPP_KEY_DROP_NUM"},
- {true, "PPP_RLT_DROP_NUM"},
- {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
- {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
-
- {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
- {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
- {true, "BTMP_AGEING_RLS_CNT_BANK0"},
- {true, "BTMP_AGEING_RLS_CNT_BANK1"},
- {true, "BTMP_AGEING_RLS_CNT_BANK2"},
- {true, "SSU_MB_RD_RLT_DROP_CNT"},
-
- {true, "SSU_PPP_MAC_KEY_NUM_L"},
- {true, "SSU_PPP_MAC_KEY_NUM_H"},
- {true, "SSU_PPP_HOST_KEY_NUM_L"},
- {true, "SSU_PPP_HOST_KEY_NUM_H"},
- {true, "PPP_SSU_MAC_RLT_NUM_L"},
- {true, "PPP_SSU_MAC_RLT_NUM_H"},
-
- {true, "PPP_SSU_HOST_RLT_NUM_L"},
- {true, "PPP_SSU_HOST_RLT_NUM_H"},
- {true, "NCSI_RX_PACKET_IN_CNT_L"},
- {true, "NCSI_RX_PACKET_IN_CNT_H"},
- {true, "NCSI_TX_PACKET_OUT_CNT_L"},
- {true, "NCSI_TX_PACKET_OUT_CNT_H"},
-
- {true, "SSU_KEY_DROP_NUM"},
- {true, "MB_UNCOPY_NUM"},
- {true, "RX_OQ_DROP_PKT_CNT"},
- {true, "TX_OQ_DROP_PKT_CNT"},
- {true, "BANK_UNBALANCE_DROP_CNT"},
- {true, "BANK_UNBALANCE_RX_DROP_CNT"},
-
- {true, "NIC_L2_ERR_DROP_PKT_CNT"},
- {true, "ROC_L2_ERR_DROP_PKT_CNT"},
- {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
- {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
- {true, "RX_OQ_GLB_DROP_PKT_CNT"},
- {false, "Reserved"},
-
- {true, "LO_PRI_UNICAST_CUR_CNT"},
- {true, "HI_PRI_MULTICAST_CUR_CNT"},
- {true, "LO_PRI_MULTICAST_CUR_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
- {true, "prt_id"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
-
- {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
- {true, "PACKET_CURR_BUFFER_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "RX_PACKET_IN_CNT_L"},
- {true, "RX_PACKET_IN_CNT_H"},
- {true, "RX_PACKET_OUT_CNT_L"},
- {true, "RX_PACKET_OUT_CNT_H"},
- {true, "TX_PACKET_IN_CNT_L"},
- {true, "TX_PACKET_IN_CNT_H"},
-
- {true, "TX_PACKET_OUT_CNT_L"},
- {true, "TX_PACKET_OUT_CNT_H"},
- {true, "ROC_RX_PACKET_IN_CNT_L"},
- {true, "ROC_RX_PACKET_IN_CNT_H"},
- {true, "ROC_TX_PACKET_OUT_CNT_L"},
- {true, "ROC_TX_PACKET_OUT_CNT_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_0_L"},
- {true, "RX_PACKET_TC_IN_CNT_0_H"},
- {true, "RX_PACKET_TC_IN_CNT_1_L"},
- {true, "RX_PACKET_TC_IN_CNT_1_H"},
- {true, "RX_PACKET_TC_IN_CNT_2_L"},
- {true, "RX_PACKET_TC_IN_CNT_2_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_3_L"},
- {true, "RX_PACKET_TC_IN_CNT_3_H"},
- {true, "RX_PACKET_TC_IN_CNT_4_L"},
- {true, "RX_PACKET_TC_IN_CNT_4_H"},
- {true, "RX_PACKET_TC_IN_CNT_5_L"},
- {true, "RX_PACKET_TC_IN_CNT_5_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_6_L"},
- {true, "RX_PACKET_TC_IN_CNT_6_H"},
- {true, "RX_PACKET_TC_IN_CNT_7_L"},
- {true, "RX_PACKET_TC_IN_CNT_7_H"},
- {true, "RX_PACKET_TC_OUT_CNT_0_L"},
- {true, "RX_PACKET_TC_OUT_CNT_0_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_1_L"},
- {true, "RX_PACKET_TC_OUT_CNT_1_H"},
- {true, "RX_PACKET_TC_OUT_CNT_2_L"},
- {true, "RX_PACKET_TC_OUT_CNT_2_H"},
- {true, "RX_PACKET_TC_OUT_CNT_3_L"},
- {true, "RX_PACKET_TC_OUT_CNT_3_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_4_L"},
- {true, "RX_PACKET_TC_OUT_CNT_4_H"},
- {true, "RX_PACKET_TC_OUT_CNT_5_L"},
- {true, "RX_PACKET_TC_OUT_CNT_5_H"},
- {true, "RX_PACKET_TC_OUT_CNT_6_L"},
- {true, "RX_PACKET_TC_OUT_CNT_6_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_7_L"},
- {true, "RX_PACKET_TC_OUT_CNT_7_H"},
- {true, "TX_PACKET_TC_IN_CNT_0_L"},
- {true, "TX_PACKET_TC_IN_CNT_0_H"},
- {true, "TX_PACKET_TC_IN_CNT_1_L"},
- {true, "TX_PACKET_TC_IN_CNT_1_H"},
-
- {true, "TX_PACKET_TC_IN_CNT_2_L"},
- {true, "TX_PACKET_TC_IN_CNT_2_H"},
- {true, "TX_PACKET_TC_IN_CNT_3_L"},
- {true, "TX_PACKET_TC_IN_CNT_3_H"},
- {true, "TX_PACKET_TC_IN_CNT_4_L"},
- {true, "TX_PACKET_TC_IN_CNT_4_H"},
-
- {true, "TX_PACKET_TC_IN_CNT_5_L"},
- {true, "TX_PACKET_TC_IN_CNT_5_H"},
- {true, "TX_PACKET_TC_IN_CNT_6_L"},
- {true, "TX_PACKET_TC_IN_CNT_6_H"},
- {true, "TX_PACKET_TC_IN_CNT_7_L"},
- {true, "TX_PACKET_TC_IN_CNT_7_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_0_L"},
- {true, "TX_PACKET_TC_OUT_CNT_0_H"},
- {true, "TX_PACKET_TC_OUT_CNT_1_L"},
- {true, "TX_PACKET_TC_OUT_CNT_1_H"},
- {true, "TX_PACKET_TC_OUT_CNT_2_L"},
- {true, "TX_PACKET_TC_OUT_CNT_2_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_3_L"},
- {true, "TX_PACKET_TC_OUT_CNT_3_H"},
- {true, "TX_PACKET_TC_OUT_CNT_4_L"},
- {true, "TX_PACKET_TC_OUT_CNT_4_H"},
- {true, "TX_PACKET_TC_OUT_CNT_5_L"},
- {true, "TX_PACKET_TC_OUT_CNT_5_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_6_L"},
- {true, "TX_PACKET_TC_OUT_CNT_6_H"},
- {true, "TX_PACKET_TC_OUT_CNT_7_L"},
- {true, "TX_PACKET_TC_OUT_CNT_7_H"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
- {true, "OQ_INDEX"},
- {true, "QUEUE_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
- {true, "prt_id"},
- {true, "IGU_RX_ERR_PKT"},
- {true, "IGU_RX_NO_SOF_PKT"},
- {true, "EGU_TX_1588_SHORT_PKT"},
- {true, "EGU_TX_1588_PKT"},
- {true, "EGU_TX_ERR_PKT"},
-
- {true, "IGU_RX_OUT_L2_PKT"},
- {true, "IGU_RX_OUT_L3_PKT"},
- {true, "IGU_RX_OUT_L4_PKT"},
- {true, "IGU_RX_IN_L2_PKT"},
- {true, "IGU_RX_IN_L3_PKT"},
- {true, "IGU_RX_IN_L4_PKT"},
-
- {true, "IGU_RX_EL3E_PKT"},
- {true, "IGU_RX_EL4E_PKT"},
- {true, "IGU_RX_L3E_PKT"},
- {true, "IGU_RX_L4E_PKT"},
- {true, "IGU_RX_ROCEE_PKT"},
- {true, "IGU_RX_OUT_UDP0_PKT"},
-
- {true, "IGU_RX_IN_UDP0_PKT"},
- {true, "IGU_MC_CAR_DROP_PKT_L"},
- {true, "IGU_MC_CAR_DROP_PKT_H"},
- {true, "IGU_BC_CAR_DROP_PKT_L"},
- {true, "IGU_BC_CAR_DROP_PKT_H"},
- {false, "Reserved"},
-
- {true, "IGU_RX_OVERSIZE_PKT_L"},
- {true, "IGU_RX_OVERSIZE_PKT_H"},
- {true, "IGU_RX_UNDERSIZE_PKT_L"},
- {true, "IGU_RX_UNDERSIZE_PKT_H"},
- {true, "IGU_RX_OUT_ALL_PKT_L"},
- {true, "IGU_RX_OUT_ALL_PKT_H"},
-
- {true, "IGU_TX_OUT_ALL_PKT_L"},
- {true, "IGU_TX_OUT_ALL_PKT_H"},
- {true, "IGU_RX_UNI_PKT_L"},
- {true, "IGU_RX_UNI_PKT_H"},
- {true, "IGU_RX_MULTI_PKT_L"},
- {true, "IGU_RX_MULTI_PKT_H"},
-
- {true, "IGU_RX_BROAD_PKT_L"},
- {true, "IGU_RX_BROAD_PKT_H"},
- {true, "EGU_TX_OUT_ALL_PKT_L"},
- {true, "EGU_TX_OUT_ALL_PKT_H"},
- {true, "EGU_TX_UNI_PKT_L"},
- {true, "EGU_TX_UNI_PKT_H"},
-
- {true, "EGU_TX_MULTI_PKT_L"},
- {true, "EGU_TX_MULTI_PKT_H"},
- {true, "EGU_TX_BROAD_PKT_L"},
- {true, "EGU_TX_BROAD_PKT_H"},
- {true, "IGU_TX_KEY_NUM_L"},
- {true, "IGU_TX_KEY_NUM_H"},
-
- {true, "IGU_RX_NON_TUN_PKT_L"},
- {true, "IGU_RX_NON_TUN_PKT_H"},
- {true, "IGU_RX_TUN_PKT_L"},
- {true, "IGU_RX_TUN_PKT_H"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
- {true, "tc_queue_num"},
- {true, "FSM_DFX_ST0"},
- {true, "FSM_DFX_ST1"},
- {true, "RPU_RX_PKT_DROP_CNT"},
- {true, "BUF_WAIT_TIMEOUT"},
- {true, "BUF_WAIT_TIMEOUT_QID"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
- {false, "Reserved"},
- {true, "FIFO_DFX_ST0"},
- {true, "FIFO_DFX_ST1"},
- {true, "FIFO_DFX_ST2"},
- {true, "FIFO_DFX_ST3"},
- {true, "FIFO_DFX_ST4"},
-
- {true, "FIFO_DFX_ST5"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
- {false, "Reserved"},
- {true, "NCSI_EGU_TX_FIFO_STS"},
- {true, "NCSI_PAUSE_STATUS"},
- {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
- {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
- {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
-
- {true, "NCSI_RX_CTRL_PKT_CNT"},
- {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
- {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
- {true, "NCSI_RX_PT_PKT_CNT"},
- {true, "NCSI_RX_FCS_ERR_CNT"},
- {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
-
- {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
- {true, "NCSI_TX_CTRL_PKT_CNT"},
- {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
- {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
- {true, "NCSI_TX_PT_PKT_CNT"},
- {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
-
- {true, "NCSI_TX_PT_PKT_ERR_CNT"},
- {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
- {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
- {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "NCSI_MAC_RX_OCTETS_OK"},
- {true, "NCSI_MAC_RX_OCTETS_BAD"},
- {true, "NCSI_MAC_RX_UC_PKTS"},
- {true, "NCSI_MAC_RX_MC_PKTS"},
- {true, "NCSI_MAC_RX_BC_PKTS"},
- {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
-
- {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
-
- {true, "NCSI_MAC_RX_FCS_ERRORS"},
- {true, "NCSI_MAC_RX_LONG_ERRORS"},
- {true, "NCSI_MAC_RX_JABBER_ERRORS"},
- {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
- {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
- {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
-
- {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
- {true, "NCSI_MAC_TX_OCTETS_OK"},
- {true, "NCSI_MAC_TX_OCTETS_BAD"},
- {true, "NCSI_MAC_TX_UC_PKTS"},
- {true, "NCSI_MAC_TX_MC_PKTS"},
- {true, "NCSI_MAC_TX_BC_PKTS"},
-
- {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
-
- {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
- {true, "NCSI_MAC_TX_UNDERRUN"},
- {true, "NCSI_MAC_TX_CRC_ERROR"},
- {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
- {true, "NCSI_MAC_RX_PAD_PKTS"},
- {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
- {false, "Reserved"},
- {true, "LGE_IGU_AFIFO_DFX_0"},
- {true, "LGE_IGU_AFIFO_DFX_1"},
- {true, "LGE_IGU_AFIFO_DFX_2"},
- {true, "LGE_IGU_AFIFO_DFX_3"},
- {true, "LGE_IGU_AFIFO_DFX_4"},
-
- {true, "LGE_IGU_AFIFO_DFX_5"},
- {true, "LGE_IGU_AFIFO_DFX_6"},
- {true, "LGE_IGU_AFIFO_DFX_7"},
- {true, "LGE_EGU_AFIFO_DFX_0"},
- {true, "LGE_EGU_AFIFO_DFX_1"},
- {true, "LGE_EGU_AFIFO_DFX_2"},
-
- {true, "LGE_EGU_AFIFO_DFX_3"},
- {true, "LGE_EGU_AFIFO_DFX_4"},
- {true, "LGE_EGU_AFIFO_DFX_5"},
- {true, "LGE_EGU_AFIFO_DFX_6"},
- {true, "LGE_EGU_AFIFO_DFX_7"},
- {true, "CGE_IGU_AFIFO_DFX_0"},
-
- {true, "CGE_IGU_AFIFO_DFX_1"},
- {true, "CGE_EGU_AFIFO_DFX_0"},
- {true, "CGE_EGU_AFIFO_DFX_1"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
- {false, "Reserved"},
- {true, "DROP_FROM_PRT_PKT_CNT"},
- {true, "DROP_FROM_HOST_PKT_CNT"},
- {true, "DROP_TX_VLAN_PROC_CNT"},
- {true, "DROP_MNG_CNT"},
- {true, "DROP_FD_CNT"},
-
- {true, "DROP_NO_DST_CNT"},
- {true, "DROP_MC_MBID_FULL_CNT"},
- {true, "DROP_SC_FILTERED"},
- {true, "PPP_MC_DROP_PKT_CNT"},
- {true, "DROP_PT_CNT"},
- {true, "DROP_MAC_ANTI_SPOOF_CNT"},
-
- {true, "DROP_IG_VFV_CNT"},
- {true, "DROP_IG_PRTV_CNT"},
- {true, "DROP_CNM_PFC_PAUSE_CNT"},
- {true, "DROP_TORUS_TC_CNT"},
- {true, "DROP_TORUS_LPBK_CNT"},
- {true, "PPP_HFS_STS"},
-
- {true, "PPP_MC_RSLT_STS"},
- {true, "PPP_P3U_STS"},
- {true, "PPP_RSLT_DESCR_STS"},
- {true, "PPP_UMV_STS_0"},
- {true, "PPP_UMV_STS_1"},
- {true, "PPP_VFV_STS"},
-
- {true, "PPP_GRO_KEY_CNT"},
- {true, "PPP_GRO_INFO_CNT"},
- {true, "PPP_GRO_DROP_CNT"},
- {true, "PPP_GRO_OUT_CNT"},
- {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
- {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
-
- {true, "PPP_GRO_INFO_MATCH_CNT"},
- {true, "PPP_GRO_FREE_ENTRY_CNT"},
- {true, "PPP_GRO_INNER_DFX_SIGNAL"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "GET_RX_PKT_CNT_L"},
- {true, "GET_RX_PKT_CNT_H"},
- {true, "GET_TX_PKT_CNT_L"},
- {true, "GET_TX_PKT_CNT_H"},
- {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
- {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
-
- {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
- {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
- {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
- {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
- {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
- {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
-
- {true, "SEND_MC_FROM_PRT_CNT_L"},
- {true, "SEND_MC_FROM_PRT_CNT_H"},
- {true, "SEND_MC_FROM_HOST_CNT_L"},
- {true, "SEND_MC_FROM_HOST_CNT_H"},
- {true, "SSU_MC_RD_CNT_L"},
- {true, "SSU_MC_RD_CNT_H"},
-
- {true, "SSU_MC_DROP_CNT_L"},
- {true, "SSU_MC_DROP_CNT_H"},
- {true, "SSU_MC_RD_PKT_CNT_L"},
- {true, "SSU_MC_RD_PKT_CNT_H"},
- {true, "PPP_MC_2HOST_PKT_CNT_L"},
- {true, "PPP_MC_2HOST_PKT_CNT_H"},
-
- {true, "PPP_MC_2PRT_PKT_CNT_L"},
- {true, "PPP_MC_2PRT_PKT_CNT_H"},
- {true, "NTSNOS_PKT_CNT_L"},
- {true, "NTSNOS_PKT_CNT_H"},
- {true, "NTUP_PKT_CNT_L"},
- {true, "NTUP_PKT_CNT_H"},
-
- {true, "NTLCL_PKT_CNT_L"},
- {true, "NTLCL_PKT_CNT_H"},
- {true, "NTTGT_PKT_CNT_L"},
- {true, "NTTGT_PKT_CNT_H"},
- {true, "RTNS_PKT_CNT_L"},
- {true, "RTNS_PKT_CNT_H"},
-
- {true, "RTLPBK_PKT_CNT_L"},
- {true, "RTLPBK_PKT_CNT_H"},
- {true, "NR_PKT_CNT_L"},
- {true, "NR_PKT_CNT_H"},
- {true, "RR_PKT_CNT_L"},
- {true, "RR_PKT_CNT_H"},
-
- {true, "MNG_TBL_HIT_CNT_L"},
- {true, "MNG_TBL_HIT_CNT_H"},
- {true, "FD_TBL_HIT_CNT_L"},
- {true, "FD_TBL_HIT_CNT_H"},
- {true, "FD_LKUP_CNT_L"},
- {true, "FD_LKUP_CNT_H"},
-
- {true, "BC_HIT_CNT_L"},
- {true, "BC_HIT_CNT_H"},
- {true, "UM_TBL_UC_HIT_CNT_L"},
- {true, "UM_TBL_UC_HIT_CNT_H"},
- {true, "UM_TBL_MC_HIT_CNT_L"},
- {true, "UM_TBL_MC_HIT_CNT_H"},
-
- {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
- {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
- {true, "MTA_TBL_HIT_CNT_L"},
- {true, "MTA_TBL_HIT_CNT_H"},
- {true, "FWD_BONDING_HIT_CNT_L"},
- {true, "FWD_BONDING_HIT_CNT_H"},
-
- {true, "PROMIS_TBL_HIT_CNT_L"},
- {true, "PROMIS_TBL_HIT_CNT_H"},
- {true, "GET_TUNL_PKT_CNT_L"},
- {true, "GET_TUNL_PKT_CNT_H"},
- {true, "GET_BMC_PKT_CNT_L"},
- {true, "GET_BMC_PKT_CNT_H"},
-
- {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
- {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
- {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
- {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
- {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
- {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
-
- {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
- {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
- {true, "PPP_MC_2BMC_PKT_CNT_L"},
- {true, "PPP_MC_2BMC_PKT_CNT_H"},
- {true, "VLAN_MIRR_CNT_L"},
- {true, "VLAN_MIRR_CNT_H"},
-
- {true, "IG_MIRR_CNT_L"},
- {true, "IG_MIRR_CNT_H"},
- {true, "EG_MIRR_CNT_L"},
- {true, "EG_MIRR_CNT_H"},
- {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
- {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
-
- {true, "LAN_PAIR_CNT_L"},
- {true, "LAN_PAIR_CNT_H"},
- {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
- {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
- {true, "MTA_TBL_HIT_PKT_CNT_L"},
- {true, "MTA_TBL_HIT_PKT_CNT_H"},
-
- {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
- {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
- {false, "Reserved"},
- {true, "FSM_DFX_ST0"},
- {true, "FSM_DFX_ST1"},
- {true, "FSM_DFX_ST2"},
- {true, "FIFO_DFX_ST0"},
- {true, "FIFO_DFX_ST1"},
-
- {true, "FIFO_DFX_ST2"},
- {true, "FIFO_DFX_ST3"},
- {true, "FIFO_DFX_ST4"},
- {true, "FIFO_DFX_ST5"},
- {true, "FIFO_DFX_ST6"},
- {true, "FIFO_DFX_ST7"},
-
- {true, "FIFO_DFX_ST8"},
- {true, "FIFO_DFX_ST9"},
- {true, "FIFO_DFX_ST10"},
- {true, "FIFO_DFX_ST11"},
- {true, "Q_CREDIT_VLD_0"},
- {true, "Q_CREDIT_VLD_1"},
-
- {true, "Q_CREDIT_VLD_2"},
- {true, "Q_CREDIT_VLD_3"},
- {true, "Q_CREDIT_VLD_4"},
- {true, "Q_CREDIT_VLD_5"},
- {true, "Q_CREDIT_VLD_6"},
- {true, "Q_CREDIT_VLD_7"},
-
- {true, "Q_CREDIT_VLD_8"},
- {true, "Q_CREDIT_VLD_9"},
- {true, "Q_CREDIT_VLD_10"},
- {true, "Q_CREDIT_VLD_11"},
- {true, "Q_CREDIT_VLD_12"},
- {true, "Q_CREDIT_VLD_13"},
-
- {true, "Q_CREDIT_VLD_14"},
- {true, "Q_CREDIT_VLD_15"},
- {true, "Q_CREDIT_VLD_16"},
- {true, "Q_CREDIT_VLD_17"},
- {true, "Q_CREDIT_VLD_18"},
- {true, "Q_CREDIT_VLD_19"},
-
- {true, "Q_CREDIT_VLD_20"},
- {true, "Q_CREDIT_VLD_21"},
- {true, "Q_CREDIT_VLD_22"},
- {true, "Q_CREDIT_VLD_23"},
- {true, "Q_CREDIT_VLD_24"},
- {true, "Q_CREDIT_VLD_25"},
-
- {true, "Q_CREDIT_VLD_26"},
- {true, "Q_CREDIT_VLD_27"},
- {true, "Q_CREDIT_VLD_28"},
- {true, "Q_CREDIT_VLD_29"},
- {true, "Q_CREDIT_VLD_30"},
- {true, "Q_CREDIT_VLD_31"},
-
- {true, "GRO_BD_SERR_CNT"},
- {true, "GRO_CONTEXT_SERR_CNT"},
- {true, "RX_STASH_CFG_SERR_CNT"},
- {true, "AXI_RD_FBD_SERR_CNT"},
- {true, "GRO_BD_MERR_CNT"},
- {true, "GRO_CONTEXT_MERR_CNT"},
-
- {true, "RX_STASH_CFG_MERR_CNT"},
- {true, "AXI_RD_FBD_MERR_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
- {true, "q_num"},
- {true, "RCB_CFG_RX_RING_TAIL"},
- {true, "RCB_CFG_RX_RING_HEAD"},
- {true, "RCB_CFG_RX_RING_FBDNUM"},
- {true, "RCB_CFG_RX_RING_OFFSET"},
- {true, "RCB_CFG_RX_RING_FBDOFFSET"},
-
- {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
- {true, "RCB_CFG_TX_RING_TAIL"},
- {true, "RCB_CFG_TX_RING_HEAD"},
- {true, "RCB_CFG_TX_RING_FBDNUM"},
- {true, "RCB_CFG_TX_RING_OFFSET"},
- {true, "RCB_CFG_TX_RING_EBDNUM"},
-};
-
#define HCLGE_DBG_INFO_LEN 256
#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256
#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512
@@ -771,4 +132,7 @@ struct hclge_dbg_vlan_cfg {
u8 pri_only2;
};
+int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
+ int index, int bd_num, enum hclge_opcode_type cmd);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 9a939c0b217f..a1571c108678 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -5,6 +5,34 @@
#include "hclge_devlink.h"
+static int hclge_devlink_scc_info_get(struct devlink *devlink,
+ struct devlink_info_req *req)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ char scc_version[HCLGE_DEVLINK_FW_SCC_LEN];
+ struct hclge_dev *hdev = priv->hdev;
+ u32 scc_version_tmp;
+ int ret;
+
+ ret = hclge_query_scc_version(hdev, &scc_version_tmp);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get scc version, ret = %d\n", ret);
+ return ret;
+ }
+
+ snprintf(scc_version, sizeof(scc_version), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+ return devlink_info_version_running_put(req, "fw.scc", scc_version);
+}
+
static int hclge_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -13,6 +41,7 @@ static int hclge_devlink_info_get(struct devlink *devlink,
struct hclge_devlink_priv *priv = devlink_priv(devlink);
char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
struct hclge_dev *hdev = priv->hdev;
+ int ret;
snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
@@ -24,9 +53,18 @@ static int hclge_devlink_info_get(struct devlink *devlink,
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
- return devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW,
- version_str);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to set running version of fw\n");
+ return ret;
+ }
+
+ if (hdev->pdev->revision > HNAE3_DEVICE_VERSION_V2)
+ ret = hclge_devlink_scc_info_get(devlink, req);
+
+ return ret;
}
static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
index 918be04507a5..148effa5ea89 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
@@ -6,6 +6,8 @@
#include "hclge_main.h"
+#define HCLGE_DEVLINK_FW_SCC_LEN 32
+
struct hclge_devlink_priv {
struct hclge_dev *hdev;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index d63e114f93d0..cc7f46c0b35f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1198,6 +1198,425 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
}
};
+static const struct hclge_mod_reg_info hclge_ssu_reg_0_info[] = {
+ {
+ .reg_name = "SSU_BP_STATUS_0~5",
+ .reg_offset_group = { 5, 6, 7, 8, 9, 10},
+ .group_size = 6
+ }, {
+ .reg_name = "LO_PRI_UNICAST_CUR_CNT",
+ .reg_offset_group = {54},
+ .group_size = 1
+ }, {
+ .reg_name = "HI/LO_PRI_MULTICAST_CUR_CNT",
+ .reg_offset_group = {55, 56},
+ .group_size = 2
+ }, {
+ .reg_name = "SSU_MB_RD_RLT_DROP_CNT",
+ .reg_offset_group = {29},
+ .group_size = 1
+ }, {
+ .reg_name = "SSU_PPP_MAC_KEY_NUM",
+ .reg_offset_group = {31, 30},
+ .group_size = 2
+ }, {
+ .reg_name = "SSU_PPP_HOST_KEY_NUM",
+ .reg_offset_group = {33, 32},
+ .group_size = 2
+ }, {
+ .reg_name = "PPP_SSU_MAC/HOST_RLT_NUM",
+ .reg_offset_group = {35, 34, 37, 36},
+ .group_size = 4
+ }, {
+ .reg_name = "FULL/PART_DROP_NUM",
+ .reg_offset_group = {18, 19},
+ .group_size = 2
+ }, {
+ .reg_name = "PPP_KEY/RLT_DROP_NUM",
+ .reg_offset_group = {20, 21},
+ .group_size = 2
+ }, {
+ .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT",
+ .reg_offset_group = {48, 49},
+ .group_size = 2
+ }, {
+ .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT_RX",
+ .reg_offset_group = {50, 51},
+ .group_size = 2
+ },
+};
+
+static const struct hclge_mod_reg_info hclge_ssu_reg_1_info[] = {
+ {
+ .reg_name = "RX_PACKET_IN/OUT_CNT",
+ .reg_offset_group = {13, 12, 15, 14},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_IN/OUT_CNT",
+ .reg_offset_group = {17, 16, 19, 18},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC0_IN/OUT_CNT",
+ .reg_offset_group = {25, 24, 41, 40},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC1_IN/OUT_CNT",
+ .reg_offset_group = {27, 26, 43, 42},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC2_IN/OUT_CNT",
+ .reg_offset_group = {29, 28, 45, 44},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC3_IN/OUT_CNT",
+ .reg_offset_group = {31, 30, 47, 46},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC4_IN/OUT_CNT",
+ .reg_offset_group = {33, 32, 49, 48},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC5_IN/OUT_CNT",
+ .reg_offset_group = {35, 34, 51, 50},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC6_IN/OUT_CNT",
+ .reg_offset_group = {37, 36, 53, 52},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC7_IN/OUT_CNT",
+ .reg_offset_group = {39, 38, 55, 54},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC0_IN/OUT_CNT",
+ .reg_offset_group = {57, 56, 73, 72},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC1_IN/OUT_CNT",
+ .reg_offset_group = {59, 58, 75, 74},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC2_IN/OUT_CNT",
+ .reg_offset_group = {61, 60, 77, 76},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC3_IN/OUT_CNT",
+ .reg_offset_group = {63, 62, 79, 78},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC4_IN/OUT_CNT",
+ .reg_offset_group = {65, 64, 81, 80},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC5_IN/OUT_CNT",
+ .reg_offset_group = {67, 66, 83, 82},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC6_IN/OUT_CNT",
+ .reg_offset_group = {69, 68, 85, 84},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC7_IN/OUT_CNT",
+ .reg_offset_group = {71, 70, 87, 86},
+ .group_size = 4
+ }, {
+ .reg_name = "PACKET_TC0~3_CURR_BUFFER_CNT",
+ .reg_offset_group = {1, 2, 3, 4},
+ .group_size = 4
+ }, {
+ .reg_name = "PACKET_TC4~7_CURR_BUFFER_CNT",
+ .reg_offset_group = {5, 6, 7, 8},
+ .group_size = 4
+ }, {
+ .reg_name = "ROC_RX_PACKET_IN_CNT",
+ .reg_offset_group = {21, 20},
+ .group_size = 2
+ }, {
+ .reg_name = "ROC_TX_PACKET_OUT_CNT",
+ .reg_offset_group = {23, 22},
+ .group_size = 2
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_rpu_reg_0_info[] = {
+ {
+ .reg_name = "RPU_FSM_DFX_ST0/ST1_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {1, 2},
+ .group_size = 2
+ }, {
+ .reg_name = "RPU_RX_PKT_DROP_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {3},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_rpu_reg_1_info[] = {
+ {
+ .reg_name = "FIFO_DFX_ST0_1_2_4",
+ .reg_offset_group = {1, 2, 3, 5},
+ .group_size = 4
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_igu_egu_reg_info[] = {
+ {
+ .reg_name = "IGU_RX_ERR_PKT",
+ .reg_offset_group = {1},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_RX_OUT_ALL_PKT",
+ .reg_offset_group = {29, 28},
+ .group_size = 2
+ }, {
+ .reg_name = "EGU_TX_OUT_ALL_PKT",
+ .reg_offset_group = {39, 38},
+ .group_size = 2
+ }, {
+ .reg_name = "EGU_TX_ERR_PKT",
+ .reg_offset_group = {5},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_gen_reg_info_tnl[] = {
+ {
+ .reg_name = "SSU2RPU_TNL_WR_PKT_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {1},
+ .group_size = 1
+ }, {
+ .reg_name = "RPU2HST_TNL_WR_PKT_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {12},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_gen_reg_info[] = {
+ {
+ .reg_name = "SSU_OVERSIZE_DROP_CNT",
+ .reg_offset_group = {12},
+ .group_size = 1
+ }, {
+ .reg_name = "ROCE_RX_BYPASS_5NS_DROP_NUM",
+ .reg_offset_group = {13},
+ .group_size = 1
+ }, {
+ .reg_name = "RX_PKT_IN/OUT_ERR_CNT",
+ .reg_offset_group = {15, 14, 19, 18},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PKT_IN/OUT_ERR_CNT",
+ .reg_offset_group = {17, 16, 21, 20},
+ .group_size = 4
+ }, {
+ .reg_name = "ETS_TC_READY",
+ .reg_offset_group = {22},
+ .group_size = 1
+ }, {
+ .reg_name = "MIB_TX/RX_BAD_PKTS",
+ .reg_offset_group = {19, 18, 29, 28},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_GOOD_PKTS",
+ .reg_offset_group = {21, 20, 31, 30},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_TOTAL_PKTS",
+ .reg_offset_group = {23, 22, 33, 32},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_PAUSE_PKTS",
+ .reg_offset_group = {25, 24, 35, 34},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX_ERR_ALL_PKTS",
+ .reg_offset_group = {27, 26},
+ .group_size = 2
+ }, {
+ .reg_name = "MIB_RX_FCS_ERR_PKTS",
+ .reg_offset_group = {37, 36},
+ .group_size = 2
+ }, {
+ .reg_name = "IGU_EGU_AUTO_GATE_EN",
+ .reg_offset_group = {42},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_EGU_INT_SRC",
+ .reg_offset_group = {43},
+ .group_size = 1
+ }, {
+ .reg_name = "EGU_READY_NUM_CFG",
+ .reg_offset_group = {44},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_EGU_TNL_DFX",
+ .reg_offset_group = {45},
+ .group_size = 1
+ }, {
+ .reg_name = "TX_TNL_NOTE_PKT",
+ .reg_offset_group = {46},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_common_msg hclge_ssu_reg_common_msg[] = {
+ {
+ .cmd = HCLGE_OPC_DFX_SSU_REG_0,
+ .result_regs = hclge_ssu_reg_0_info,
+ .bd_num = HCLGE_BD_NUM_SSU_REG_0,
+ .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_0_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_SSU_REG_1,
+ .result_regs = hclge_ssu_reg_1_info,
+ .bd_num = HCLGE_BD_NUM_SSU_REG_1,
+ .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_1_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_RPU_REG_0,
+ .result_regs = hclge_rpu_reg_0_info,
+ .bd_num = HCLGE_BD_NUM_RPU_REG_0,
+ .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_0_info),
+ .need_para = true
+ }, {
+ .cmd = HCLGE_OPC_DFX_RPU_REG_1,
+ .result_regs = hclge_rpu_reg_1_info,
+ .bd_num = HCLGE_BD_NUM_RPU_REG_1,
+ .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_1_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_IGU_EGU_REG,
+ .result_regs = hclge_igu_egu_reg_info,
+ .bd_num = HCLGE_BD_NUM_IGU_EGU_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_igu_egu_reg_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_GEN_REG,
+ .result_regs = hclge_gen_reg_info_tnl,
+ .bd_num = HCLGE_BD_NUM_GEN_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info_tnl),
+ .need_para = true
+ }, {
+ .cmd = HCLGE_OPC_DFX_GEN_REG,
+ .result_regs = hclge_gen_reg_info,
+ .bd_num = HCLGE_BD_NUM_GEN_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info)
+ }
+};
+
+static int
+hclge_print_mod_reg_info(struct device *dev, struct hclge_desc *desc,
+ const struct hclge_mod_reg_info *reg_info, int size)
+{
+ int i, j, pos, actual_len;
+ u8 offset, bd_idx, index;
+ char *buf;
+
+ buf = kzalloc(HCLGE_MOD_REG_INFO_LEN_MAX, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ actual_len = strlen(reg_info[i].reg_name) +
+ HCLGE_MOD_REG_EXTRA_LEN +
+ HCLGE_MOD_REG_VALUE_LEN * reg_info[i].group_size;
+ if (actual_len > HCLGE_MOD_REG_INFO_LEN_MAX) {
+ dev_info(dev, "length of reg(%s) is invalid, len=%d\n",
+ reg_info[i].reg_name, actual_len);
+ continue;
+ }
+
+ pos = scnprintf(buf, HCLGE_MOD_REG_INFO_LEN_MAX, "%s",
+ reg_info[i].reg_name);
+ if (reg_info[i].has_suffix)
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos, "%u",
+ le32_to_cpu(desc->data[0]));
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos,
+ ":");
+ for (j = 0; j < reg_info[i].group_size; j++) {
+ offset = reg_info[i].reg_offset_group[j];
+ index = offset % HCLGE_DESC_DATA_LEN;
+ bd_idx = offset / HCLGE_DESC_DATA_LEN;
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos,
+ " %08x",
+ le32_to_cpu(desc[bd_idx].data[index]));
+ }
+ dev_info(dev, "%s\n", buf);
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+static bool hclge_err_mod_check_support_cmd(enum hclge_opcode_type opcode,
+ struct hclge_dev *hdev)
+{
+ if (opcode == HCLGE_OPC_DFX_GEN_REG &&
+ !hnae3_ae_dev_gen_reg_dfx_supported(hdev))
+ return false;
+ return true;
+}
+
+/* For each common msg, send cmdq to IMP and print result reg info.
+ * If there is a parameter, loop it and request.
+ */
+static void
+hclge_query_reg_info(struct hclge_dev *hdev,
+ struct hclge_mod_reg_common_msg *msg, u32 loop_time,
+ u32 *loop_para)
+{
+ int desc_len, i, ret;
+
+ desc_len = msg->bd_num * sizeof(struct hclge_desc);
+ msg->desc = kzalloc(desc_len, GFP_KERNEL);
+ if (!msg->desc) {
+ dev_err(&hdev->pdev->dev, "failed to query reg info, ret=%d",
+ -ENOMEM);
+ return;
+ }
+
+ for (i = 0; i < loop_time; i++) {
+ ret = hclge_dbg_cmd_send(hdev, msg->desc, *loop_para,
+ msg->bd_num, msg->cmd);
+ loop_para++;
+ if (ret)
+ continue;
+ ret = hclge_print_mod_reg_info(&hdev->pdev->dev, msg->desc,
+ msg->result_regs,
+ msg->result_regs_size);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to print mod reg info, ret=%d\n",
+ ret);
+ }
+
+ kfree(msg->desc);
+}
+
+static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
+{
+ u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0};
+ struct hclge_mod_reg_common_msg msg;
+ u8 i, j, num, loop_time;
+
+ num = ARRAY_SIZE(hclge_ssu_reg_common_msg);
+ for (i = 0; i < num; i++) {
+ msg = hclge_ssu_reg_common_msg[i];
+ if (!hclge_err_mod_check_support_cmd(msg.cmd, hdev))
+ continue;
+ loop_time = 1;
+ loop_para[0] = 0;
+ if (msg.need_para) {
+ loop_time = min(hdev->ae_dev->dev_specs.tnl_num,
+ HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE);
+ for (j = 0; j < loop_time; j++)
+ loop_para[j] = j + 1;
+ }
+ hclge_query_reg_info(hdev, &msg, loop_time, loop_para);
+ }
+}
+
static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
{
.module_id = MODULE_NONE,
@@ -1210,7 +1629,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.msg = "MODULE_GE"
}, {
.module_id = MODULE_IGU_EGU,
- .msg = "MODULE_IGU_EGU"
+ .msg = "MODULE_IGU_EGU",
+ .query_reg_info = hclge_query_reg_info_of_ssu
}, {
.module_id = MODULE_LGE,
.msg = "MODULE_LGE"
@@ -1231,7 +1651,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.msg = "MODULE_RTC"
}, {
.module_id = MODULE_SSU,
- .msg = "MODULE_SSU"
+ .msg = "MODULE_SSU",
+ .query_reg_info = hclge_query_reg_info_of_ssu
}, {
.module_id = MODULE_TM,
.msg = "MODULE_TM"
@@ -2762,7 +3183,7 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev)
}
static bool
-hclge_handle_error_type_reg_log(struct device *dev,
+hclge_handle_error_type_reg_log(struct hclge_dev *hdev,
struct hclge_mod_err_info *mod_info,
struct hclge_type_reg_err_info *type_reg_info)
{
@@ -2770,6 +3191,7 @@ hclge_handle_error_type_reg_log(struct device *dev,
#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7
u8 mod_id, total_module, type_id, total_type, i, is_ras;
+ struct device *dev = &hdev->pdev->dev;
u8 index_module = MODULE_NONE;
u8 index_type = NONE_ERROR;
bool cause_by_vf = false;
@@ -2810,6 +3232,9 @@ hclge_handle_error_type_reg_log(struct device *dev,
for (i = 0; i < type_reg_info->reg_num; i++)
dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
+ if (hclge_hw_module_id_st[index_module].query_reg_info)
+ hclge_hw_module_id_st[index_module].query_reg_info(hdev);
+
return cause_by_vf;
}
@@ -2850,7 +3275,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
type_reg_info = (struct hclge_type_reg_err_info *)
&buf[offset++];
- if (hclge_handle_error_type_reg_log(dev, mod_info,
+ if (hclge_handle_error_type_reg_log(hdev, mod_info,
type_reg_info))
cause_by_vf = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 68b738affa66..45a783a50643 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -5,6 +5,7 @@
#define __HCLGE_ERR_H
#include "hclge_main.h"
+#include "hclge_debugfs.h"
#include "hnae3.h"
#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
@@ -115,6 +116,18 @@
#define HCLGE_REG_NUM_MAX 256
#define HCLGE_DESC_NO_DATA_LEN 8
+#define HCLGE_BD_NUM_SSU_REG_0 10
+#define HCLGE_BD_NUM_SSU_REG_1 15
+#define HCLGE_BD_NUM_RPU_REG_0 1
+#define HCLGE_BD_NUM_RPU_REG_1 2
+#define HCLGE_BD_NUM_IGU_EGU_REG 9
+#define HCLGE_BD_NUM_GEN_REG 8
+#define HCLGE_MOD_REG_INFO_LEN_MAX 256
+#define HCLGE_MOD_REG_EXTRA_LEN 11
+#define HCLGE_MOD_REG_VALUE_LEN 9
+#define HCLGE_MOD_REG_GROUP_MAX_SIZE 6
+#define HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE 8
+
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
HCLGE_ERR_INT_RAS_CE = 1,
@@ -191,6 +204,7 @@ struct hclge_hw_error {
struct hclge_hw_module_id {
enum hclge_mod_name_list module_id;
const char *msg;
+ void (*query_reg_info)(struct hclge_dev *hdev);
};
struct hclge_hw_type_id {
@@ -218,6 +232,28 @@ struct hclge_type_reg_err_info {
u32 hclge_reg[HCLGE_REG_NUM_MAX];
};
+struct hclge_mod_reg_info {
+ const char *reg_name;
+ bool has_suffix; /* add suffix for register name */
+ /* the positions of reg values in hclge_desc.data */
+ u8 reg_offset_group[HCLGE_MOD_REG_GROUP_MAX_SIZE];
+ u8 group_size;
+};
+
+/* This structure defines cmdq used to query the hardware module debug
+ * regisgers.
+ */
+struct hclge_mod_reg_common_msg {
+ enum hclge_opcode_type cmd;
+ struct hclge_desc *desc;
+ u8 bd_num; /* the bd number of hclge_desc used */
+ bool need_para; /* whether this cmdq needs to add para */
+
+ /* the regs need to print */
+ const struct hclge_mod_reg_info *result_regs;
+ u16 result_regs_size;
+};
+
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ff6a2ed23ddb..cf8abbe01840 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6,6 +6,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -13,8 +14,9 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
-#include <net/ipv6.h>
+
#include <net/rtnetlink.h>
+
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
@@ -27,6 +29,8 @@
#include "hclge_devlink.h"
#include "hclge_comm_cmd.h"
+#include "hclge_trace.h"
+
#define HCLGE_NAME "hclge"
#define HCLGE_BUF_SIZE_UNIT 256U
@@ -391,6 +395,48 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
+static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ trace_hclge_pf_cmd_send(hw, desc, 0, num);
+
+ if (!is_special) {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_cmd_send(hw, &desc[i], i, num);
+ } else {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i],
+ i, num);
+ }
+}
+
+static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ return;
+
+ trace_hclge_pf_cmd_get(hw, desc, 0, num);
+
+ if (!is_special) {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_cmd_get(hw, &desc[i], i, num);
+ } else {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i],
+ i, num);
+ }
+}
+
+static const struct hclge_comm_cmq_ops hclge_cmq_ops = {
+ .trace_cmd_send = hclge_trace_cmd_send,
+ .trace_cmd_get = hclge_trace_cmd_get,
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -444,7 +490,7 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
- * so GFP_ATOMIC is more suitalbe here
+ * so GFP_ATOMIC is more suitable here
*/
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
@@ -536,7 +582,7 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
int size, u64 *data)
{
u64 *buf = data;
- u32 i;
+ int i;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
@@ -549,25 +595,21 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
return buf;
}
-static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
- const struct hclge_comm_stats_str strs[],
- int size, u8 *data)
+static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
+ const struct hclge_comm_stats_str strs[],
+ int size, u8 **data)
{
- char *buff = (char *)data;
- u32 i;
+ int i;
if (stringset != ETH_SS_STATS)
- return buff;
+ return;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
continue;
- snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
- buff = buff + ETH_GSTRING_LEN;
+ ethtool_puts(data, strs[i].desc);
}
-
- return (u8 *)buff;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -672,44 +714,38 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
}
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
- u8 *data)
+ u8 **data)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 *p = (char *)data;
+ const char *str;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
- size, p);
- p = hclge_comm_tqps_get_strings(handle, p);
+ hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
+ size, data);
+ hclge_comm_tqps_get_strings(handle, data);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_APP];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_PHY];
+ ethtool_puts(data, str);
}
}
}
@@ -1537,6 +1573,9 @@ static int hclge_configure(struct hclge_dev *hdev)
cfg.default_speed, ret);
return ret;
}
+ hdev->hw.mac.req_speed = hdev->hw.mac.speed;
+ hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
+ hdev->hw.mac.req_duplex = DUPLEX_FULL;
hclge_parse_link_mode(hdev, cfg.speed_ability);
@@ -1766,7 +1805,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
@@ -2142,8 +2182,8 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
-static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
- struct hclge_pkt_buf_alloc *buf_alloc)
+static bool hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
#define COMPENSATE_BUFFER 0x3C00
#define COMPENSATE_HALF_MPS_NUM 5
@@ -2318,7 +2358,7 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i],
HCLGE_OPC_RX_COM_THRD_ALLOC, false);
- req = (struct hclge_rx_com_thrd *)&desc[i].data;
+ req = (struct hclge_rx_com_thrd *)desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
@@ -2458,7 +2498,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -2583,7 +2624,7 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lan
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (!mac->support_autoneg && mac->speed == speed &&
+ if (!mac->support_autoneg && mac->speed == (u32)speed &&
mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
@@ -2604,8 +2645,17 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.req_speed = (u32)speed;
+ hdev->hw.mac.req_duplex = duplex;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+ return 0;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2907,17 +2957,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
- if (ret)
- return ret;
-
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
if (ret)
return ret;
}
+ if (!hdev->hw.mac.autoneg) {
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
+ hdev->hw.mac.req_duplex,
+ hdev->hw.mac.lane_num);
+ if (ret)
+ return ret;
+ }
+
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
@@ -3037,9 +3090,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
- struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
@@ -3063,8 +3114,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle, state);
+
+ if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_client *rclient = hdev->roce_client;
+
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
+ }
hclge_push_link_status(hdev);
}
@@ -3342,9 +3400,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
return ret;
}
- hdev->hw.mac.autoneg = cmd->base.autoneg;
- hdev->hw.mac.speed = cmd->base.speed;
- hdev->hw.mac.duplex = cmd->base.duplex;
+ hdev->hw.mac.req_autoneg = cmd->base.autoneg;
+ hdev->hw.mac.req_speed = cmd->base.speed;
+ hdev->hw.mac.req_duplex = cmd->base.duplex;
linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
return 0;
@@ -3377,9 +3435,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
if (!hnae3_dev_phy_imp_supported(hdev))
return 0;
- cmd.base.autoneg = hdev->hw.mac.autoneg;
- cmd.base.speed = hdev->hw.mac.speed;
- cmd.base.duplex = hdev->hw.mac.duplex;
+ cmd.base.autoneg = hdev->hw.mac.req_autoneg;
+ cmd.base.speed = hdev->hw.mac.req_speed;
+ cmd.base.duplex = hdev->hw.mac.req_duplex;
linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
@@ -3388,7 +3446,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed;
+ u32 speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -3517,6 +3575,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
return ret;
}
+static void hclge_set_reset_pending(struct hclge_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3537,7 +3606,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
@@ -3547,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3702,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, hdev->misc_vector.name, hdev);
+ IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -3995,7 +4064,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
- set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
hclge_reset_task_schedule(hdev);
break;
default:
@@ -4029,6 +4098,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
if (hdev->reset_type != HNAE3_NONE_RESET &&
rst_level < hdev->reset_type)
return HNAE3_NONE_RESET;
@@ -4170,7 +4241,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, hdev->reset_type);
dev_info(&hdev->pdev->dev,
"re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
@@ -4413,14 +4484,26 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGE_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
+ BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
+
struct hclge_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
+
set_bit(rst_type, &hdev->default_reset_request);
}
static void hclge_reset_timer(struct timer_list *t)
{
- struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclge_dev *hdev = timer_container_of(hdev, t, reset_timer);
/* if default_reset_request has no value, it means that this reset
* request has already be handled, so just return here
@@ -4789,7 +4872,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4807,7 +4890,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
u8 tuple_sets;
@@ -6224,15 +6307,15 @@ static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule, u8 ip_proto)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
@@ -6253,15 +6336,15 @@ static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
@@ -6690,21 +6773,19 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip6_spec *spec,
struct ethtool_tcpip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -6723,19 +6804,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
struct ethtool_usrip6_spec *spec,
struct ethtool_usrip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -6908,7 +6989,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node2;
- int cnt = 0;
+ u32 cnt = 0;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
@@ -6953,7 +7034,7 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
} else {
int i;
- for (i = 0; i < IPV6_SIZE; i++) {
+ for (i = 0; i < IPV6_ADDR_WORDS; i++) {
tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
}
@@ -7178,8 +7259,9 @@ static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
}
}
-static void hclge_get_cls_key_ip(const struct flow_rule *flow,
- struct hclge_fd_rule *rule)
+static int hclge_get_cls_key_ip(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule,
+ struct netlink_ext_ack *extack)
{
u16 addr_type = 0;
@@ -7188,6 +7270,9 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow,
flow_rule_match_control(flow, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -7204,18 +7289,20 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow,
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(flow, &match);
- be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- match.mask->src.s6_addr32, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- match.mask->dst.s6_addr32, IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ match.key->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ match.mask->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ match.key->dst.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ match.mask->dst.s6_addr32);
} else {
rule->unused_tuple |= BIT(INNER_SRC_IP);
rule->unused_tuple |= BIT(INNER_DST_IP);
}
+
+ return 0;
}
static void hclge_get_cls_key_port(const struct flow_rule *flow,
@@ -7241,7 +7328,9 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
struct flow_dissector *dissector = flow->match.dissector;
+ int ret;
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -7259,7 +7348,11 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
hclge_get_cls_key_basic(flow, rule);
hclge_get_cls_key_mac(flow, rule);
hclge_get_cls_key_vlan(flow, rule);
- hclge_get_cls_key_ip(flow, rule);
+
+ ret = hclge_get_cls_key_ip(flow, rule, extack);
+ if (ret)
+ return ret;
+
hclge_get_cls_key_port(flow, rule);
return 0;
@@ -7782,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
if (ret)
return ret;
- return phy_loopback(phydev, true);
+ return phy_loopback(phydev, true, 0);
}
static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
@@ -7790,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
{
int ret;
- ret = phy_loopback(phydev, false);
+ ret = phy_loopback(phydev, false, 0);
if (ret)
return ret;
@@ -7907,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -7952,8 +8045,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
/* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclge_flush_link_update(hdev);
}
}
@@ -8131,14 +8223,14 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[1].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[2].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
@@ -9200,7 +9292,7 @@ static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
static int init_mgr_tbl(struct hclge_dev *hdev)
{
int ret;
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
@@ -9337,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
/* this command reads phy id and register at the same time */
fallthrough;
case SIOCGMIIREG:
- data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
- return 0;
+ return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
case SIOCSMIIREG:
return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
@@ -9353,15 +9444,8 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return hclge_ptp_get_cfg(hdev, ifr);
- case SIOCSHWTSTAMP:
- return hclge_ptp_set_cfg(hdev, ifr);
- default:
- if (!hdev->hw.mac.phydev)
- return hclge_mii_ioctl(hdev, ifr, cmd);
- }
+ if (!hdev->hw.mac.phydev)
+ return hclge_mii_ioctl(hdev, ifr, cmd);
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
@@ -9484,33 +9568,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
return false;
}
-int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport,
+ bool request_en)
{
- struct hclge_dev *hdev = vport->back;
bool need_en;
int ret;
- mutex_lock(&hdev->vport_lock);
-
- vport->req_vlan_fltr_en = request_en;
-
need_en = hclge_need_enable_vport_vlan_filter(vport);
- if (need_en == vport->cur_vlan_fltr_en) {
- mutex_unlock(&hdev->vport_lock);
+ if (need_en == vport->cur_vlan_fltr_en)
return 0;
- }
ret = hclge_set_vport_vlan_filter(vport, need_en);
- if (ret) {
- mutex_unlock(&hdev->vport_lock);
+ if (ret)
return ret;
- }
vport->cur_vlan_fltr_en = need_en;
+ return 0;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+ vport->req_vlan_fltr_en = request_en;
+ ret = __hclge_enable_vport_vlan_filter(vport, request_en);
mutex_unlock(&hdev->vport_lock);
- return 0;
+ return ret;
}
static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
@@ -9906,6 +9993,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
struct hclge_vport *vport;
+ bool enable = true;
int ret;
int i;
@@ -9925,8 +10013,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
vport->cur_vlan_fltr_en = true;
}
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
+ !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ enable = false;
+
return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true, 0);
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
}
static int hclge_init_vlan_type(struct hclge_dev *hdev)
@@ -10526,16 +10618,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
&vport->state))
continue;
- ret = hclge_enable_vport_vlan_filter(vport,
- vport->req_vlan_fltr_en);
+ mutex_lock(&hdev->vport_lock);
+ ret = __hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to sync vlan filter state for vport%u, ret = %d\n",
vport->vport_id, ret);
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
+ mutex_unlock(&hdev->vport_lock);
return;
}
+ mutex_unlock(&hdev->vport_lock);
}
}
@@ -10616,7 +10711,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
- if (vport->vport_id && max_frm_size > hdev->mps) {
+ if (vport->vport_id && (u32)max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
@@ -10627,7 +10722,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
- if (max_frm_size < hdev->vport[i].mps) {
+ if ((u32)max_frm_size < hdev->vport[i].mps) {
dev_err(&hdev->pdev->dev,
"failed to set pf mtu for less than vport %d, mps = %u.\n",
i, hdev->vport[i].mps);
@@ -10839,6 +10934,24 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version)
+{
+ struct hclge_comm_query_scc_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1);
+ resp = (struct hclge_comm_query_scc_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ *scc_version = le32_to_cpu(resp->scc_version);
+
+ return 0;
+}
+
static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -11085,9 +11198,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.dcb_ets_active));
dev_info(dev, "MQPRIO %s\n",
- handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.mqprio_active));
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
@@ -11099,7 +11212,7 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.reset_cnt;
+ u32 rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
ret = client->ops->init_instance(&vport->nic);
@@ -11143,7 +11256,7 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hclge_dev *hdev = ae_dev->priv;
struct hnae3_client *client;
- int rst_cnt;
+ u32 rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
@@ -11236,6 +11349,12 @@ clear_roce:
return ret;
}
+static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
+{
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -11244,7 +11363,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ while (hclge_uninit_need_wait(hdev))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
@@ -11302,7 +11421,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev,
- "can't set consistent PCI DMA");
+ "can't set consistent PCI DMA\n");
goto err_disable_device;
}
dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
@@ -11350,7 +11469,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
- pci_release_mem_regions(pdev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -11371,7 +11490,7 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->reset_timer.function)
- del_timer_sync(&hdev->reset_timer);
+ timer_delete_sync(&hdev->reset_timer);
if (hdev->service_task.work.func)
cancel_delayed_work_sync(&hdev->service_task);
}
@@ -11422,8 +11541,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
@@ -11622,18 +11741,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
- ret = hclge_devlink_init(hdev);
- if (ret)
- goto err_pci_uninit;
-
- devl_lock(hdev->devlink);
-
/* Firmware command queue initialize */
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
- goto err_devlink_uninit;
+ goto err_pci_uninit;
/* Firmware command initialize */
+ hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
true, hdev->reset_pending);
if (ret)
@@ -11759,7 +11873,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_update_port_info(hdev);
if (ret)
- goto err_mdiobus_unreg;
+ goto err_ptp_uninit;
INIT_KFIFO(hdev->mac_tnl_log);
@@ -11791,25 +11905,30 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
-
ret = hclge_init_wol(hdev);
if (ret)
dev_warn(&pdev->dev,
"failed to wake on lan init, ret = %d\n", ret);
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_ptp_uninit;
+
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
+ /* Enable MISC vector(vector0) */
+ enable_irq(hdev->misc_vector.vector_irq);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
-
- devl_unlock(hdev->devlink);
return 0;
+err_ptp_uninit:
+ hclge_ptp_uninit(hdev);
err_mdiobus_unreg:
if (hdev->hw.mac.phydev)
mdiobus_unregister(hdev->hw.mac.mdio_bus);
@@ -11819,9 +11938,6 @@ err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
-err_devlink_uninit:
- devl_unlock(hdev->devlink);
- hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_release_regions(pdev);
@@ -11858,7 +11974,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret) {
dev_err(&hdev->pdev->dev,
"Set vf %d mac spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11866,7 +11982,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret)
dev_err(&hdev->pdev->dev,
"Set vf %d vlan spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11970,7 +12086,7 @@ static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
- max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ max_tx_rate < 0 || (u32)max_tx_rate > hdev->hw.mac.max_speed) {
dev_err(&hdev->pdev->dev,
"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
@@ -11995,7 +12111,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
if (!vport)
return -EINVAL;
- if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ if (!force && (u32)max_tx_rate == vport->vf_info.max_tx_rate)
return 0;
ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
@@ -12210,7 +12326,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- synchronize_irq(hdev->misc_vector.vector_irq);
+ disable_irq(hdev->misc_vector.vector_irq);
/* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
@@ -12746,7 +12862,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
- .dbg_read_cmd = hclge_dbg_read_cmd,
+ .dbg_get_read_func = hclge_dbg_get_read_func,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
@@ -12777,6 +12893,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_dscp_prio = hclge_get_dscp_prio,
.get_wol = hclge_get_wol,
.set_wol = hclge_set_wol,
+ .hwtstamp_get = hclge_ptp_get_cfg,
+ .hwtstamp_set = hclge_ptp_set_cfg,
};
static struct hnae3_ae_algo ae_algo = {
@@ -12786,9 +12904,10 @@ static struct hnae3_ae_algo ae_algo = {
static int __init hclge_init(void)
{
- pr_info("%s is initializing\n", HCLGE_NAME);
+ pr_debug("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0,
+ HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
@@ -12801,9 +12920,11 @@ static int __init hclge_init(void)
static void __exit hclge_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
+ hnae3_release_unload_lock();
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index e821dd2f1528..032b472d2368 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -8,7 +8,9 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
+
#include <net/devlink.h>
+#include <net/ipv6.h>
#include "hclge_cmd.h"
#include "hclge_ptp.h"
@@ -279,11 +281,14 @@ struct hclge_mac {
u8 media_type; /* port media type, e.g. fibre/copper/backplane */
u8 mac_addr[ETH_ALEN];
u8 autoneg;
+ u8 req_autoneg;
u8 duplex;
+ u8 req_duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
u8 lane_num;
u32 speed;
+ u32 req_speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
@@ -715,15 +720,15 @@ struct hclge_fd_cfg {
};
#define IPV4_INDEX 3
-#define IPV6_SIZE 4
+
struct hclge_fd_rule_tuples {
u8 src_mac[ETH_ALEN];
u8 dst_mac[ETH_ALEN];
/* Be compatible for ip address of both ipv4 and ipv6.
* For ipv4 address, we store it in src/dst_ip[3].
*/
- u32 src_ip[IPV6_SIZE];
- u32 dst_ip[IPV6_SIZE];
+ u32 src_ip[IPV6_ADDR_WORDS];
+ u32 dst_ip[IPV6_ADDR_WORDS];
u16 src_port;
u16 dst_port;
u16 vlan_tag1;
@@ -891,7 +896,7 @@ struct hclge_dev {
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
- u32 numa_node_mask;
+ nodemask_t numa_node_mask;
u16 rx_buf_len;
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
@@ -1137,8 +1142,8 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
@@ -1169,4 +1174,5 @@ int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
int hclge_mac_update_stats(struct hclge_dev *hdev);
struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf);
int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type);
+int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index d4a0e0be7a72..c7ff12a6c076 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -749,16 +749,17 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
struct hclge_comm_rss_cfg *rss_cfg;
+ int rss_hash_key_size;
u8 index;
index = mbx_req->msg.data[0];
rss_cfg = &hdev->rss_cfg;
+ rss_hash_key_size = sizeof(rss_cfg->rss_hash_key);
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
- if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(rss_cfg->rss_hash_key)) {
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > rss_hash_key_size) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
@@ -800,7 +801,7 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
+ int tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
return tail == hw->hw.cmq.crq.next_to_use;
}
@@ -1077,12 +1078,13 @@ static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
hdev = param->vport->back;
cmd_func = hclge_mbx_ops_list[param->req->msg.code];
- if (cmd_func)
- ret = cmd_func(param);
- else
+ if (!cmd_func) {
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
param->req->msg.code);
+ return;
+ }
+ ret = cmd_func(param);
/* PF driver should not reply IMP */
if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 85fb11de43a1..cf881108fa57 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -151,7 +151,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
mdio_bus->parent = &hdev->pdev->dev;
mdio_bus->priv = hdev;
- mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ mdio_bus->phy_mask = ~(1U << mac->phy_addr);
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
@@ -191,6 +191,9 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
+ hdev->hw.mac.req_speed = (u32)speed;
+ hdev->hw.mac.req_duplex = (u8)duplex;
+
ret = hclge_cfg_flowctrl(hdev);
if (ret)
netdev_err(netdev, "failed to configure flow control.\n");
@@ -255,7 +258,7 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
- phy_loopback(phydev, false);
+ phy_loopback(phydev, false, 0);
phy_start(phydev);
}
@@ -271,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
phy_stop(phydev);
}
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val)
{
struct hclge_phy_reg_cmd *req;
struct hclge_desc desc;
@@ -283,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
req->reg_addr = cpu_to_le16(reg_addr);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"failed to read phy reg, ret = %d.\n", ret);
+ return ret;
+ }
- return le16_to_cpu(req->reg_val);
+ *val = le16_to_cpu(req->reg_val);
+ return 0;
}
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index 4200d0b6d931..21d434c82475 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val);
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 507d7ce26d83..0081c5281455 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021 Hisilicon Limited.
#include <linux/skbuff.h>
+#include <linux/string_choices.h>
#include "hclge_main.h"
#include "hnae3.h"
@@ -58,6 +59,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
struct hclge_dev *hdev = vport->back;
struct hclge_ptp *ptp = hdev->ptp;
+ if (!ptp)
+ return false;
+
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
ptp->tx_skipped++;
@@ -200,13 +204,17 @@ static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+int hclge_ptp_get_cfg(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg,
- sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+ *config = hdev->ptp->ts_cfg;
+ return 0;
}
static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
@@ -223,7 +231,7 @@ static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
if (ret)
dev_err(&hdev->pdev->dev,
"failed to %s ptp interrupt, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -265,7 +273,7 @@ static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg)
return ret;
}
-static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
+static int hclge_ptp_set_tx_mode(struct kernel_hwtstamp_config *cfg,
unsigned long *flags, u32 *ptp_cfg)
{
switch (cfg->tx_type) {
@@ -283,7 +291,7 @@ static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
return 0;
}
-static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
+static int hclge_ptp_set_rx_mode(struct kernel_hwtstamp_config *cfg,
unsigned long *flags, u32 *ptp_cfg)
{
int rx_filter = cfg->rx_filter;
@@ -328,7 +336,7 @@ static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
}
static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
- struct hwtstamp_config *cfg)
+ struct kernel_hwtstamp_config *cfg)
{
unsigned long flags = hdev->ptp->flags;
u32 ptp_cfg = 0;
@@ -355,9 +363,12 @@ static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
return 0;
}
-int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+int hclge_ptp_set_cfg(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config cfg;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
int ret;
if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
@@ -365,20 +376,17 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
return -EOPNOTSUPP;
}
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- ret = hclge_ptp_set_ts_mode(hdev, &cfg);
+ ret = hclge_ptp_set_ts_mode(hdev, config);
if (ret)
return ret;
- hdev->ptp->ts_cfg = cfg;
+ hdev->ptp->ts_cfg = *config;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -389,16 +397,12 @@ int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
}
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (hdev->ptp->clock)
info->phc_index = ptp_clock_index(hdev->ptp->clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
@@ -440,6 +444,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.settime64 = hclge_ptp_settime;
ptp->info.n_alarm = 0;
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
if (IS_ERR(ptp->clock)) {
dev_err(&hdev->pdev->dev,
@@ -451,12 +462,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
return -ENODEV;
}
- spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hdev->ptp = ptp;
-
return 0;
}
@@ -484,7 +489,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_get_cycle(hdev);
if (ret)
- return ret;
+ goto out;
}
ret = hclge_ptp_int_en(hdev, true);
@@ -496,14 +501,14 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init freq, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts mode, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ktime_get_real_ts64(&ts);
@@ -511,7 +516,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts time, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
@@ -519,6 +524,9 @@ int hclge_ptp_init(struct hclge_dev *hdev)
return 0;
+out_clear_int:
+ clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ hclge_ptp_int_en(hdev, false);
out:
hclge_ptp_destroy_clock(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index bbee74cd8404..0162fa5ac146 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -25,7 +25,7 @@ struct ifreq;
#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
#define HCLGE_PTP_TIME_SEC_L_REG 0x54
#define HCLGE_PTP_TIME_NSEC_REG 0x58
-#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_MASK 0x3fffffffLL
#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
#define HCLGE_PTP_TIME_SYNC_REG 0x5C
#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
@@ -62,7 +62,7 @@ struct hclge_ptp {
unsigned long flags;
void __iomem *io_base;
struct ptp_clock_info info;
- struct hwtstamp_config ts_cfg;
+ struct kernel_hwtstamp_config ts_cfg;
spinlock_t lock; /* protects ptp registers */
u32 ptp_cfg;
u32 last_tx_seqid;
@@ -133,11 +133,14 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb);
void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev);
void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
u32 nsec, u32 sec);
-int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
-int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_get_cfg(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config);
+int hclge_ptp_set_cfg(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int hclge_ptp_init(struct hclge_dev *hdev);
void hclge_ptp_uninit(struct hclge_dev *hdev);
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
index 43c1c18fa81f..8c057192aae6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -510,9 +510,9 @@ out:
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
struct hnae3_knic_private_info *kinfo)
{
-#define HCLGE_RING_REG_OFFSET 0x200
#define HCLGE_RING_INT_REG_OFFSET 0x4
+ struct hnae3_queue *tqp;
int i, j, reg_num;
int data_num_sum;
u32 *reg = data;
@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < kinfo->num_tqps; j++) {
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ tqp = kinfo->tqp[j];
for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGE_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
index f3cd5a376eca..7103cf04bffc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -10,6 +10,7 @@
#include <linux/tracepoint.h>
+#define PF_DESC_LEN (sizeof(struct hclge_desc) / sizeof(u32))
#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
@@ -32,8 +33,8 @@ TRACE_EVENT(hclge_pf_mbx_get,
__entry->vfid = req->mbx_src_vfid;
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
- __assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(pciname);
+ __assign_str(devname);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
@@ -63,8 +64,8 @@ TRACE_EVENT(hclge_pf_mbx_send,
TP_fast_assign(
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
- __assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(pciname);
+ __assign_str(devname);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
@@ -77,6 +78,99 @@ TRACE_EVENT(hclge_pf_mbx_send,
)
);
+DECLARE_EVENT_CLASS(hclge_pf_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num),
+
+ TP_STRUCT__entry(__field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __field(u16, rsv)
+ __field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, HCLGE_DESC_DATA_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ __entry->rsv = le16_to_cpu(desc->rsv);
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname);
+ for (i = 0; i < HCLGE_DESC_DATA_LEN; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);),
+
+ TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s",
+ __get_str(pciname), __entry->opcode,
+ __entry->index, __entry->num,
+ __entry->flag, __entry->retval, __entry->rsv,
+ __print_array(__entry->data,
+ HCLGE_DESC_DATA_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
+DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
+DECLARE_EVENT_CLASS(hclge_pf_special_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *data,
+ int index,
+ int num),
+ TP_ARGS(hw, data, index, num),
+
+ TP_STRUCT__entry(__field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, PF_DESC_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname);
+ for (i = 0; i < PF_DESC_LEN; i++)
+ __entry->data[i] = le32_to_cpu(data[i]);
+ ),
+
+ TP_printk("%s %d-%d data:%s",
+ __get_str(pciname),
+ __entry->index, __entry->num,
+ __print_array(__entry->data,
+ PF_DESC_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
+DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
#endif /* _HCLGE_TRACE_H_ */
/* This must be outside ifdef _HCLGE_TRACE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 0aa9beefd1c7..8fcf220a120d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -11,6 +11,7 @@
#include "hnae3.h"
#include "hclgevf_devlink.h"
#include "hclge_comm_rss.h"
+#include "hclgevf_trace.h"
#define HCLGEVF_NAME "hclgevf"
@@ -47,6 +48,42 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
+static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ trace_hclge_vf_cmd_send(hw, desc, 0, num);
+
+ if (is_special)
+ return;
+
+ for (i = 1; i < num; i++)
+ trace_hclge_vf_cmd_send(hw, &desc[i], i, num);
+}
+
+static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ return;
+
+ trace_hclge_vf_cmd_get(hw, desc, 0, num);
+
+ if (is_special)
+ return;
+
+ for (i = 1; i < num; i++)
+ trace_hclge_vf_cmd_get(hw, &desc[i], i, num);
+}
+
+static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = {
+ .trace_cmd_send = hclgevf_trace_cmd_send,
+ .trace_cmd_get = hclgevf_trace_cmd_get,
+};
+
void hclgevf_arq_init(struct hclgevf_dev *hdev)
{
struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
@@ -93,12 +130,10 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
}
static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
- u8 *data)
+ u8 **data)
{
- u8 *p = (char *)data;
-
if (strset == ETH_SS_STATS)
- p = hclge_comm_tqps_get_strings(handle, p);
+ hclge_comm_tqps_get_strings(handle, data);
}
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
@@ -412,7 +447,8 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->ae_algo = &ae_algovf;
nic->pdev = hdev->pdev;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->flags |= HNAE3_SUPPORT_VF;
nic->kinfo.io_base = hdev->hw.hw.io_base;
@@ -570,7 +606,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
@@ -588,7 +624,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 tuple_sets;
@@ -1256,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
rtnl_unlock();
}
-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1267,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
+ if (ret)
+ return ret;
+
+ hdev->rxvtag_strip_en = enable;
+ return 0;
+}
+
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
@@ -1357,6 +1405,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
return ret;
}
+static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1506,7 +1565,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_type);
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
@@ -1626,6 +1685,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
return rst_level;
}
@@ -1635,14 +1696,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv;
- dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
-
if (hdev->default_reset_request)
hdev->reset_level =
hclgevf_get_reset_level(&hdev->default_reset_request);
else
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
+ hdev->reset_level);
+
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
@@ -1653,8 +1715,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGEVF_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
+ BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
+ BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
+
struct hclgevf_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -1709,8 +1783,8 @@ static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1811,14 +1885,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
*/
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
- set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- set_bit(hdev->reset_level, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_level);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
hclgevf_reset_task_schedule(hdev);
@@ -1941,7 +2015,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
- set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
@@ -1983,7 +2057,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
static void hclgevf_reset_timer(struct timer_list *t)
{
- struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclgevf_dev *hdev = timer_container_of(hdev, t, reset_timer);
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
hclgevf_reset_task_schedule(hdev);
@@ -2082,8 +2156,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
-
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -2142,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
tc_valid, tc_size);
}
-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
+ bool rxvtag_strip_en)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
- ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
@@ -2180,8 +2255,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
} else {
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclgevf_flush_link_update(hdev);
}
}
@@ -2252,6 +2326,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+ /* timer needs to be initialized before misc irq */
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
@@ -2389,7 +2465,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.rst_cnt;
+ u32 rst_cnt = hdev->rst_stats.rst_cnt;
int ret;
ret = client->ops->init_instance(&hdev->nic);
@@ -2549,7 +2625,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
- dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting\n");
goto err_disable_device;
}
@@ -2796,6 +2872,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
}
hclgevf_arq_init(hdev);
+
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
&hdev->fw_version, false,
hdev->reset_pending);
@@ -2815,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2845,15 +2922,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_devlink_init(hdev);
- if (ret)
- goto err_devlink_init;
-
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_cmd_queue_init;
hclgevf_arq_init(hdev);
+
+ hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
&hdev->fw_version, false,
hdev->reset_pending);
@@ -2932,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2941,6 +3016,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_init_rxd_adv_layout(hdev);
+ ret = hclgevf_devlink_init(hdev);
+ if (ret)
+ goto err_config;
+
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
hdev->last_reset_time = jiffies;
@@ -2948,7 +3027,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
- timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;
@@ -2960,8 +3038,6 @@ err_misc_irq_init:
err_cmd_init:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_cmd_queue_init:
- hclgevf_devlink_uninit(hdev);
-err_devlink_init:
hclgevf_pci_uninit(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
@@ -3018,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *nic = &hdev->nic;
- struct hnae3_knic_private_info *kinfo = &nic->kinfo;
-
- return min_t(u32, hdev->rss_size_max,
- hdev->num_tqps / kinfo->tc_info.num_tc);
+ return min(hdev->rss_size_max, hdev->num_tqps);
}
/**
@@ -3347,8 +3419,10 @@ static int __init hclgevf_init(void)
static void __exit hclgevf_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
+ hnae3_release_unload_lock();
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index a73f2bf3a56a..0208425ab594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -236,7 +236,7 @@ struct hclgevf_dev {
u16 rss_size_max; /* HW defined max RSS task queue */
u16 num_alloc_vport; /* num vports this driver supports */
- u32 numa_node_mask;
+ nodemask_t numa_node_mask;
u16 rx_buf_len;
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
@@ -253,6 +253,7 @@ struct hclgevf_dev {
int *vector_irq;
bool gro_en;
+ bool rxvtag_strip_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 85c2a634c8f9..f5c99ca54369 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -159,7 +159,7 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->hw.cmq.crq.next_to_use;
+ return tail == (u32)hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 65b9dcd38137..9de01e344e27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -123,40 +123,42 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
-#define HCLGEVF_RING_REG_OFFSET 0x200
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, j, reg_um;
+ struct hnae3_queue *tqp;
+ int i, j, reg_num;
u32 *reg = data;
*version = hdev->fw_version;
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGEVF_RING_REG_OFFSET * j);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_num, reg);
+ tqp = &hdev->htqp[j].q;
+ for (i = 0; i < reg_num; i++)
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGEVF_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR,
+ reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
HCLGEVF_RING_INT_REG_OFFSET * j);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
index b259e95dd53c..66b084309c91 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -30,8 +30,8 @@ TRACE_EVENT(hclge_vf_mbx_get,
TP_fast_assign(
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
- __assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, hdev->nic.kinfo.netdev->name);
+ __assign_str(pciname);
+ __assign_str(devname);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
@@ -63,8 +63,8 @@ TRACE_EVENT(hclge_vf_mbx_send,
__entry->vfid = req->mbx_src_vfid;
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
- __assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, hdev->nic.kinfo.netdev->name);
+ __assign_str(pciname);
+ __assign_str(devname);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
@@ -77,6 +77,56 @@ TRACE_EVENT(hclge_vf_mbx_send,
)
);
+DECLARE_EVENT_CLASS(hclge_vf_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+
+ TP_ARGS(hw, desc, index, num),
+
+ TP_STRUCT__entry(__field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __field(u16, rsv)
+ __field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, HCLGE_DESC_DATA_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ __entry->rsv = le16_to_cpu(desc->rsv);
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname);
+ for (i = 0; i < HCLGE_DESC_DATA_LEN; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);),
+
+ TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s",
+ __get_str(pciname), __entry->opcode,
+ __entry->index, __entry->num,
+ __entry->flag, __entry->retval, __entry->rsv,
+ __print_array(__entry->data,
+ HCLGE_DESC_DATA_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
+DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
#endif /* _HCLGEVF_TRACE_H_ */
/* This must be outside ifdef _HCLGEVF_TRACE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index ed73707176c1..6812be8dc64f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -575,6 +575,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
MDIO_SC_RESET_ST;
}
}
+ of_node_put(reg_args.np);
} else {
dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
mdio_dev->subctrl_vbase = NULL;
@@ -635,11 +636,11 @@ MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
static struct platform_driver hns_mdio_driver = {
.probe = hns_mdio_probe,
- .remove_new = hns_mdio_remove,
+ .remove = hns_mdio_remove,
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
- .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ .acpi_match_table = hns_mdio_acpi_match,
},
};
diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig
index c05fce15eb51..7d0feb1da158 100644
--- a/drivers/net/ethernet/huawei/Kconfig
+++ b/drivers/net/ethernet/huawei/Kconfig
@@ -16,5 +16,6 @@ config NET_VENDOR_HUAWEI
if NET_VENDOR_HUAWEI
source "drivers/net/ethernet/huawei/hinic/Kconfig"
+source "drivers/net/ethernet/huawei/hinic3/Kconfig"
endif # NET_VENDOR_HUAWEI
diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile
index 2549ad5afe6d..59865b882879 100644
--- a/drivers/net/ethernet/huawei/Makefile
+++ b/drivers/net/ethernet/huawei/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_HINIC) += hinic/
+obj-$(CONFIG_HINIC3) += hinic3/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 03e42512a2d5..300bc267a259 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -443,8 +443,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
struct devlink *devlink = priv_to_devlink(priv);
priv->hw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops,
- 0, priv);
+ devlink_health_reporter_create(devlink,
+ &hinic_hw_fault_reporter_ops,
+ priv);
if (IS_ERR(priv->hw_fault_reporter)) {
dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n",
PTR_ERR(priv->hw_fault_reporter));
@@ -452,8 +453,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
}
priv->fw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops,
- 0, priv);
+ devlink_health_reporter_create(devlink,
+ &hinic_fw_fault_reporter_ops,
+ priv);
if (IS_ERR(priv->fw_fault_reporter)) {
dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n",
PTR_ERR(priv->fw_fault_reporter));
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 0304f03d4093..e9f338e9dbe7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -919,9 +919,10 @@ static int hinic_set_channels(struct net_device *netdev,
return 0;
}
-static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rss_type rss_type = { 0 };
int err;
@@ -964,7 +965,7 @@ static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
return 0;
}
-static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
+static int set_l4_rss_hash_ops(const struct ethtool_rxfh_fields *cmd,
struct hinic_rss_type *rss_type)
{
u8 rss_l4_en = 0;
@@ -1000,16 +1001,18 @@ static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
return 0;
}
-static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- struct hinic_rss_type *rss_type = &nic_dev->rss_type;
+ struct hinic_dev *nic_dev = netdev_priv(dev);
+ struct hinic_rss_type *rss_type;
int err;
- if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
- cmd->data = 0;
+ rss_type = &nic_dev->rss_type;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- }
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -1108,26 +1111,6 @@ static int hinic_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nic_dev->num_qps;
break;
- case ETHTOOL_GRXFH:
- err = hinic_get_rss_hash_opts(nic_dev, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- err = hinic_set_rss_hash_opts(nic_dev, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1471,7 +1454,6 @@ static void hinic_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- char *p = (char *)data;
u16 i, j;
switch (stringset) {
@@ -1479,31 +1461,19 @@ static void hinic_get_strings(struct net_device *netdev,
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
- memcpy(p, hinic_function_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++)
+ ethtool_puts(&data, hinic_function_stats[i].name);
- for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
- memcpy(p, hinic_port_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++)
+ ethtool_puts(&data, hinic_port_stats[i].name);
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
- sprintf(p, hinic_tx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < nic_dev->num_qps; i++)
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++)
+ ethtool_sprintf(&data, hinic_tx_queue_stats[j].name, i);
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
- sprintf(p, hinic_rx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < nic_dev->num_qps; i++)
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++)
+ ethtool_sprintf(&data, hinic_rx_queue_stats[j].name, i);
return;
default:
@@ -1810,11 +1780,12 @@ static const struct ethtool_ops hinic_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
@@ -1842,11 +1813,12 @@ static const struct ethtool_ops hinicvf_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 045c47786a04..28114a59347e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -605,7 +605,7 @@ static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
/**
* ceq_elements_init - Initialize all the elements in the ceq
* @eq: the event queue
- * @init_val: value to init with it the elements
+ * @init_val: value to init the elements with
**/
static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 3f9c31d29215..97c1584dc05b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -861,7 +861,7 @@ static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
HINIC_MBOX_HEADER_SET(cmd, CMD) |
- /* The vf's offset to it's associated pf */
+ /* The vf's offset to its associated pf */
HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 499c657d37a9..ae1f523d6841 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -172,6 +172,7 @@ err_init_txq:
hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
+ nic_dev->txqs = NULL;
return err;
}
@@ -268,6 +269,7 @@ err_init_rxq:
hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
+ nic_dev->rxqs = NULL;
return err;
}
@@ -581,7 +583,7 @@ static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index f81a43d2cdfc..486fb0e20bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -469,7 +469,7 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
err = HINIC_MGMT_CMD_UNSUPPORTED;
} else if (err || !out_size || vlan_filter.status) {
dev_err(&pdev->dev,
- "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n",
err, vlan_filter.status, out_size);
err = -EINVAL;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig
new file mode 100644
index 000000000000..ce4331d1387b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Huawei driver configuration
+#
+
+config HINIC3
+ tristate "Huawei 3rd generation network adapters (HINIC3) support"
+ # Fields of HW and management structures are little endian and are
+ # currently not converted
+ depends on !CPU_BIG_ENDIAN
+ depends on X86 || ARM64 || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ select AUXILIARY_BUS
+ select PAGE_POOL
+ help
+ This driver supports HiNIC 3rd gen Network Adapter (HINIC3).
+ The driver is supported on X86_64 and ARM64 little endian.
+
+ To compile this driver as a module, choose M here.
+ The module will be called hinic3.
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
new file mode 100644
index 000000000000..c3efa45a6a42
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+obj-$(CONFIG_HINIC3) += hinic3.o
+
+hinic3-objs := hinic3_cmdq.o \
+ hinic3_common.o \
+ hinic3_eqs.o \
+ hinic3_hw_cfg.o \
+ hinic3_hw_comm.o \
+ hinic3_hwdev.o \
+ hinic3_hwif.o \
+ hinic3_irq.o \
+ hinic3_lld.o \
+ hinic3_main.o \
+ hinic3_mbox.o \
+ hinic3_mgmt.o \
+ hinic3_netdev_ops.o \
+ hinic3_nic_cfg.o \
+ hinic3_nic_io.o \
+ hinic3_queue_common.o \
+ hinic3_rss.o \
+ hinic3_rx.o \
+ hinic3_tx.o \
+ hinic3_wq.o
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
new file mode 100644
index 000000000000..ef539d1b69a3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define CMDQ_BUF_SIZE 2048
+#define CMDQ_WQEBB_SIZE 64
+
+#define CMDQ_CMD_TIMEOUT 5000
+#define CMDQ_ENABLE_WAIT_TIMEOUT 300
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK GENMASK_ULL(51, 0)
+#define CMDQ_CTXT_EQ_ID_MASK GENMASK_ULL(60, 53)
+#define CMDQ_CTXT_CEQ_ARM_MASK BIT_ULL(61)
+#define CMDQ_CTXT_CEQ_EN_MASK BIT_ULL(62)
+#define CMDQ_CTXT_HW_BUSY_BIT_MASK BIT_ULL(63)
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK GENMASK_ULL(51, 0)
+#define CMDQ_CTXT_CI_MASK GENMASK_ULL(63, 52)
+#define CMDQ_CTXT_SET(val, member) \
+ FIELD_PREP(CMDQ_CTXT_##member##_MASK, val)
+
+#define CMDQ_WQE_HDR_BUFDESC_LEN_MASK GENMASK(7, 0)
+#define CMDQ_WQE_HDR_COMPLETE_FMT_MASK BIT(15)
+#define CMDQ_WQE_HDR_DATA_FMT_MASK BIT(22)
+#define CMDQ_WQE_HDR_COMPLETE_REQ_MASK BIT(23)
+#define CMDQ_WQE_HDR_COMPLETE_SECT_LEN_MASK GENMASK(28, 27)
+#define CMDQ_WQE_HDR_CTRL_LEN_MASK GENMASK(30, 29)
+#define CMDQ_WQE_HDR_HW_BUSY_BIT_MASK BIT(31)
+#define CMDQ_WQE_HDR_SET(val, member) \
+ FIELD_PREP(CMDQ_WQE_HDR_##member##_MASK, val)
+#define CMDQ_WQE_HDR_GET(val, member) \
+ FIELD_GET(CMDQ_WQE_HDR_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_CTRL_PI_MASK GENMASK(15, 0)
+#define CMDQ_CTRL_CMD_MASK GENMASK(23, 16)
+#define CMDQ_CTRL_MOD_MASK GENMASK(28, 24)
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK BIT(31)
+#define CMDQ_CTRL_SET(val, member) \
+ FIELD_PREP(CMDQ_CTRL_##member##_MASK, val)
+#define CMDQ_CTRL_GET(val, member) \
+ FIELD_GET(CMDQ_CTRL_##member##_MASK, val)
+
+#define CMDQ_WQE_ERRCODE_VAL_MASK GENMASK(30, 0)
+#define CMDQ_WQE_ERRCODE_GET(val, member) \
+ FIELD_GET(CMDQ_WQE_ERRCODE_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK GENMASK(7, 0)
+#define CMDQ_DB_INFO_SET(val, member) \
+ FIELD_PREP(CMDQ_DB_INFO_##member##_MASK, val)
+
+#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK BIT(23)
+#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK GENMASK(26, 24)
+#define CMDQ_DB_HEAD_SET(val, member) \
+ FIELD_PREP(CMDQ_DB_HEAD_##member##_MASK, val)
+
+#define CMDQ_CEQE_TYPE_MASK GENMASK(2, 0)
+#define CMDQ_CEQE_GET(val, member) \
+ FIELD_GET(CMDQ_CEQE_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_WQE_HEADER(wqe) ((struct cmdq_header *)(wqe))
+#define CMDQ_WQE_COMPLETED(ctrl_info) \
+ CMDQ_CTRL_GET(le32_to_cpu(ctrl_info), HW_BUSY_BIT)
+
+#define CMDQ_PFN(addr) ((addr) >> 12)
+
+/* cmdq work queue's chip logical address table is up to 512B */
+#define CMDQ_WQ_CLA_SIZE 512
+
+/* Completion codes: send, direct sync, force stop */
+#define CMDQ_SEND_CMPT_CODE 10
+#define CMDQ_DIRECT_SYNC_CMPT_CODE 11
+#define CMDQ_FORCE_STOP_CMPT_CODE 12
+
+enum cmdq_data_format {
+ CMDQ_DATA_SGE = 0,
+ CMDQ_DATA_DIRECT = 1,
+};
+
+enum cmdq_ctrl_sect_len {
+ CMDQ_CTRL_SECT_LEN = 1,
+ CMDQ_CTRL_DIRECT_SECT_LEN = 2,
+};
+
+enum cmdq_bufdesc_len {
+ CMDQ_BUFDESC_LCMD_LEN = 2,
+ CMDQ_BUFDESC_SCMD_LEN = 3,
+};
+
+enum cmdq_completion_format {
+ CMDQ_COMPLETE_DIRECT = 0,
+ CMDQ_COMPLETE_SGE = 1,
+};
+
+enum cmdq_cmd_type {
+ CMDQ_CMD_DIRECT_RESP,
+ CMDQ_CMD_SGE_RESP,
+};
+
+#define CMDQ_WQE_NUM_WQEBBS 1
+
+static struct cmdq_wqe *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci)
+{
+ if (hinic3_wq_get_used(wq) == 0)
+ return NULL;
+
+ *ci = wq->cons_idx & wq->idx_mask;
+
+ return get_q_element(&wq->qpages, wq->cons_idx, NULL);
+}
+
+struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = hwdev->cmdqs;
+
+ cmd_buf = kmalloc(sizeof(*cmd_buf), GFP_ATOMIC);
+ if (!cmd_buf)
+ return NULL;
+
+ cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
+ &cmd_buf->dma_addr);
+ if (!cmd_buf->buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmdq cmd buf from the pool\n");
+ goto err_free_cmd_buf;
+ }
+
+ cmd_buf->size = cpu_to_le16(CMDQ_BUF_SIZE);
+ refcount_set(&cmd_buf->ref_cnt, 1);
+
+ return cmd_buf;
+
+err_free_cmd_buf:
+ kfree(cmd_buf);
+
+ return NULL;
+}
+
+void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmd_buf *cmd_buf)
+{
+ struct hinic3_cmdqs *cmdqs;
+
+ if (!refcount_dec_and_test(&cmd_buf->ref_cnt))
+ return;
+
+ cmdqs = hwdev->cmdqs;
+
+ dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
+ kfree(cmd_buf);
+}
+
+static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info,
+ struct hinic3_hwdev *hwdev)
+{
+ if (cmd_info->buf_in) {
+ hinic3_free_cmd_buf(hwdev, cmd_info->buf_in);
+ cmd_info->buf_in = NULL;
+ }
+}
+
+static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 ci)
+{
+ struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
+ __le32 header_info = hdr->header_info;
+ enum cmdq_data_format df;
+ struct cmdq_ctrl *ctrl;
+
+ df = CMDQ_WQE_HDR_GET(header_info, DATA_FMT);
+ if (df == CMDQ_DATA_SGE)
+ ctrl = &wqe->wqe_lcmd.ctrl;
+ else
+ ctrl = &wqe->wqe_scmd.ctrl;
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+ cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE;
+ wmb(); /* verify wqe is clear before updating ci */
+ hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
+}
+
+static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx,
+ struct cmdq_wqe *wqe)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ __le32 status_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ cmd_info = &cmdq->cmd_infos[prod_idx];
+ if (cmd_info->errcode) {
+ status_info = wqe_lcmd->status.status_info;
+ *cmd_info->errcode = CMDQ_WQE_ERRCODE_GET(status_info, VAL);
+ }
+
+ if (cmd_info->direct_resp)
+ *cmd_info->direct_resp = wqe_lcmd->completion.resp.direct.val;
+}
+
+static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 ci)
+{
+ spin_lock(&cmdq->cmdq_lock);
+ cmdq_update_cmd_status(cmdq, ci, wqe);
+ if (cmdq->cmd_infos[ci].cmpt_code) {
+ *cmdq->cmd_infos[ci].cmpt_code = CMDQ_DIRECT_SYNC_CMPT_CODE;
+ cmdq->cmd_infos[ci].cmpt_code = NULL;
+ }
+
+ /* Ensure that completion code has been updated before updating done */
+ smp_wmb();
+ if (cmdq->cmd_infos[ci].done) {
+ complete(cmdq->cmd_infos[ci].done);
+ cmdq->cmd_infos[ci].done = NULL;
+ }
+ spin_unlock(&cmdq->cmdq_lock);
+
+ cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+}
+
+void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data)
+{
+ enum hinic3_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic3_cmdq *cmdq;
+ struct cmdq_wqe *wqe;
+ __le32 ctrl_info;
+ u16 ci;
+
+ if (unlikely(cmdq_type >= ARRAY_SIZE(cmdqs->cmdq)))
+ return;
+
+ cmdq = &cmdqs->cmdq[cmdq_type];
+ while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) {
+ cmd_info = &cmdq->cmd_infos[ci];
+ switch (cmd_info->cmd_type) {
+ case HINIC3_CMD_TYPE_NONE:
+ return;
+ case HINIC3_CMD_TYPE_TIMEOUT:
+ dev_warn(hwdev->dev, "Cmdq timeout, q_id: %u, ci: %u\n",
+ cmdq_type, ci);
+ fallthrough;
+ case HINIC3_CMD_TYPE_FAKE_TIMEOUT:
+ cmdq_clear_cmd_buf(cmd_info, hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ break;
+ default:
+ /* only arm bit is using scmd wqe,
+ * the other wqe is lcmd
+ */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl_info = wqe_lcmd->ctrl.ctrl_info;
+ if (!CMDQ_WQE_COMPLETED(ctrl_info))
+ return;
+
+ dma_rmb();
+ /* For FORCE_STOP cmd_type, we also need to wait for
+ * the firmware processing to complete to prevent the
+ * firmware from accessing the released cmd_buf
+ */
+ if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) {
+ cmdq_clear_cmd_buf(cmd_info, hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ } else {
+ cmdq_sync_cmd_handler(cmdq, wqe, ci);
+ }
+
+ break;
+ }
+ }
+}
+
+static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(CMDQ_ENABLE_WAIT_TIMEOUT);
+ do {
+ if (cmdqs->status & HINIC3_CMDQ_ENABLE)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end) && !cmdqs->disable_flag);
+
+ cmdqs->disable_flag = 1;
+
+ return -EBUSY;
+}
+
+static void cmdq_set_completion(struct cmdq_completion *complete,
+ struct hinic3_cmd_buf *buf_out)
+{
+ struct hinic3_sge *sge = &complete->resp.sge;
+
+ hinic3_set_sge(sge, buf_out->dma_addr, cpu_to_le32(CMDQ_BUF_SIZE));
+}
+
+static struct cmdq_wqe *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi)
+{
+ if (!hinic3_wq_free_wqebbs(wq))
+ return NULL;
+
+ return hinic3_wq_get_one_wqebb(wq, pi);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct cmdq_wqe_lcmd *wqe,
+ struct hinic3_cmd_buf *buf_in)
+{
+ hinic3_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr,
+ (__force __le32)buf_in->size);
+}
+
+static void cmdq_set_db(struct hinic3_cmdq *cmdq,
+ enum hinic3_cmdq_type cmdq_type, u16 prod_idx)
+{
+ u8 __iomem *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base;
+ u16 db_ofs = (prod_idx & 0xFF) << 3;
+ struct cmdq_db db;
+
+ db.db_info = cpu_to_le32(CMDQ_DB_INFO_SET(prod_idx >> 8, HI_PROD_IDX));
+ db.db_head = cpu_to_le32(CMDQ_DB_HEAD_SET(1, QUEUE_TYPE) |
+ CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE));
+ writeq(*(u64 *)&db, db_base + db_ofs);
+}
+
+static void cmdq_wqe_fill(struct cmdq_wqe *hw_wqe,
+ const struct cmdq_wqe *shadow_wqe)
+{
+ const struct cmdq_header *src = (struct cmdq_header *)shadow_wqe;
+ struct cmdq_header *dst = (struct cmdq_header *)hw_wqe;
+ size_t len;
+
+ len = sizeof(struct cmdq_wqe) - sizeof(struct cmdq_header);
+ memcpy(dst + 1, src + 1, len);
+ /* Ensure buffer len before updating header */
+ wmb();
+ WRITE_ONCE(*dst, *src);
+}
+
+static void cmdq_prepare_wqe_ctrl(struct cmdq_wqe *wqe, u8 wrapped,
+ u8 mod, u8 cmd, u16 prod_idx,
+ enum cmdq_completion_format complete_format,
+ enum cmdq_data_format data_format,
+ enum cmdq_bufdesc_len buf_len)
+{
+ struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
+ enum cmdq_ctrl_sect_len ctrl_len;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct cmdq_wqe_scmd *wqe_scmd;
+ struct cmdq_ctrl *ctrl;
+
+ if (data_format == CMDQ_DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CMDQ_CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->wqe_scmd;
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CMDQ_CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info =
+ cpu_to_le32(CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD));
+
+ hdr->header_info =
+ cpu_to_le32(CMDQ_WQE_HDR_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HDR_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HDR_SET(data_format, DATA_FMT) |
+ CMDQ_WQE_HDR_SET(1, COMPLETE_REQ) |
+ CMDQ_WQE_HDR_SET(3, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HDR_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HDR_SET(wrapped, HW_BUSY_BIT));
+}
+
+static void cmdq_set_lcmd_wqe(struct cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hinic3_cmd_buf *buf_in,
+ struct hinic3_cmd_buf *buf_out,
+ u8 wrapped, u8 mod, u8 cmd, u16 prod_idx)
+{
+ enum cmdq_completion_format complete_format = CMDQ_COMPLETE_DIRECT;
+ struct cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+
+ switch (cmd_type) {
+ case CMDQ_CMD_DIRECT_RESP:
+ wqe_lcmd->completion.resp.direct.val = 0;
+ break;
+ case CMDQ_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = CMDQ_COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format,
+ CMDQ_DATA_SGE, CMDQ_BUFDESC_LCMD_LEN);
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 pi)
+{
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct cmdq_ctrl *ctrl;
+ __le32 ctrl_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = ctrl->ctrl_info;
+ if (!CMDQ_WQE_COMPLETED(ctrl_info)) {
+ dev_dbg(cmdq->hwdev->dev, "Cmdq sync command check busy bit not set\n");
+ return -EFAULT;
+ }
+ cmdq_update_cmd_status(cmdq, pi, wqe);
+
+ return 0;
+}
+
+static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info,
+ const struct hinic3_cmdq_cmd_info *saved_cmd_info)
+{
+ if (cmd_info->errcode == saved_cmd_info->errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == saved_cmd_info->done)
+ cmd_info->done = NULL;
+
+ if (cmd_info->direct_resp == saved_cmd_info->direct_resp)
+ cmd_info->direct_resp = NULL;
+}
+
+static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq,
+ struct hinic3_cmdq_cmd_info *cmd_info,
+ struct hinic3_cmdq_cmd_info *saved_cmd_info,
+ u64 curr_msg_id, u16 curr_prod_idx,
+ struct cmdq_wqe *curr_wqe,
+ u32 timeout)
+{
+ ulong timeo = msecs_to_jiffies(timeout);
+ int err;
+
+ if (wait_for_completion_timeout(saved_cmd_info->done, timeo))
+ return 0;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (*saved_cmd_info->cmpt_code == CMDQ_DIRECT_SYNC_CMPT_CODE) {
+ dev_dbg(cmdq->hwdev->dev, "Cmdq direct sync command has been completed\n");
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return 0;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx);
+ if (err)
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ dev_err(cmdq->hwdev->dev,
+ "Cmdq sync command current msg id mismatch cmd_info msg id\n");
+ }
+
+ clear_cmd_info(cmd_info, saved_cmd_info);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in,
+ __le64 *out_param)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info, saved_cmd_info;
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ struct cmdq_wqe *curr_wqe, wqe = {};
+ struct hinic3_wq *wq = &cmdq->wq;
+ u16 curr_prod_idx, next_prod_idx;
+ struct completion done;
+ u64 curr_msg_id;
+ int errcode;
+ u8 wrapped;
+ int err;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ wrapped = cmdq->wrapped;
+ next_prod_idx = curr_prod_idx + CMDQ_WQE_NUM_WQEBBS;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped ^= 1;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+ init_completion(&done);
+ refcount_inc(&buf_in->ref_cnt);
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP;
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->direct_resp = out_param;
+ cmd_info->cmpt_code = &cmpt_code;
+ cmd_info->buf_in = buf_in;
+ saved_cmd_info = *cmd_info;
+ cmdq_set_lcmd_wqe(&wqe, CMDQ_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, mod, cmd, curr_prod_idx);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+ cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info,
+ curr_msg_id, curr_prod_idx,
+ curr_wqe, CMDQ_CMD_TIMEOUT);
+ if (err) {
+ dev_err(cmdq->hwdev->dev,
+ "Cmdq sync command timeout, mod: %u, cmd: %u, prod idx: 0x%x\n",
+ mod, cmd, curr_prod_idx);
+ err = -ETIMEDOUT;
+ }
+
+ if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) {
+ dev_dbg(cmdq->hwdev->dev,
+ "Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd);
+ err = -EAGAIN;
+ }
+
+ smp_rmb(); /* read error code after completion */
+
+ return err ? err : errcode;
+}
+
+int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in, __le64 *out_param)
+{
+ struct hinic3_cmdqs *cmdqs;
+ int err;
+
+ cmdqs = hwdev->cmdqs;
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ dev_err(hwdev->dev, "Cmdq is disabled\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC],
+ mod, cmd, buf_in, out_param);
+
+ return err;
+}
+
+static void cmdq_init_queue_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id,
+ struct comm_cmdq_ctxt_info *ctxt_info)
+{
+ const struct hinic3_cmdqs *cmdqs;
+ u64 cmdq_first_block_paddr, pfn;
+ const struct hinic3_wq *wq;
+
+ cmdqs = hwdev->cmdqs;
+ wq = &cmdqs->cmdq[cmdq_id].wq;
+ pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq));
+
+ ctxt_info->curr_wqe_page_pfn =
+ cpu_to_le64(CMDQ_CTXT_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_SET(1, CEQ_EN) |
+ CMDQ_CTXT_SET(1, CEQ_ARM) |
+ CMDQ_CTXT_SET(0, EQ_ID) |
+ CMDQ_CTXT_SET(pfn, CURR_WQE_PAGE_PFN));
+
+ if (!hinic3_wq_is_0_level_cla(wq)) {
+ cmdq_first_block_paddr = cmdqs->wq_block_paddr;
+ pfn = CMDQ_PFN(cmdq_first_block_paddr);
+ }
+
+ ctxt_info->wq_block_pfn = cpu_to_le64(CMDQ_CTXT_SET(wq->cons_idx, CI) |
+ CMDQ_CTXT_SET(pfn, WQ_BLOCK_PFN));
+}
+
+static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev,
+ enum hinic3_cmdq_type q_type)
+{
+ int err;
+
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+ cmdq->hwdev = hwdev;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos),
+ GFP_KERNEL);
+ if (!cmdq->cmd_infos) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id)
+{
+ struct comm_cmd_set_cmdq_ctxt cmdq_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmdq_init_queue_ctxt(hwdev, cmdq_id, &cmdq_ctxt.ctxt);
+ cmdq_ctxt.func_id = hinic3_global_func_id(hwdev);
+ cmdq_ctxt.cmdq_id = cmdq_id;
+
+ mgmt_msg_params_init_default(&msg_params, &cmdq_ctxt,
+ sizeof(cmdq_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_CMDQ_CTXT, &msg_params);
+ if (err || cmdq_ctxt.head.status) {
+ dev_err(hwdev->dev, "Failed to set cmdq ctxt, err: %d, status: 0x%x\n",
+ err, cmdq_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+ int err;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type);
+ if (err)
+ return err;
+ }
+
+ cmdqs->status |= HINIC3_CMDQ_ENABLE;
+ cmdqs->disable_flag = 0;
+
+ return 0;
+}
+
+static int create_cmdq_wq(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmdqs *cmdqs)
+{
+ u8 cmdq_type;
+ int err;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = hinic3_wq_create(hwdev, &cmdqs->cmdq[cmdq_type].wq,
+ CMDQ_DEPTH, CMDQ_WQEBB_SIZE);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create cmdq wq\n");
+ goto err_destroy_wq;
+ }
+ }
+
+ /* 1-level Chip Logical Address (CLA) must put all
+ * cmdq's wq page addr in one wq block
+ */
+ if (!hinic3_wq_is_0_level_cla(&cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq)) {
+ if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.qpages.num_pages >
+ CMDQ_WQ_CLA_SIZE / sizeof(u64)) {
+ err = -EINVAL;
+ dev_err(hwdev->dev,
+ "Cmdq number of wq pages exceeds limit: %lu\n",
+ CMDQ_WQ_CLA_SIZE / sizeof(u64));
+ goto err_destroy_wq;
+ }
+
+ cmdqs->wq_block_vaddr =
+ dma_alloc_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
+ &cmdqs->wq_block_paddr, GFP_KERNEL);
+ if (!cmdqs->wq_block_vaddr) {
+ err = -ENOMEM;
+ goto err_destroy_wq;
+ }
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
+ memcpy((u8 *)cmdqs->wq_block_vaddr +
+ CMDQ_WQ_CLA_SIZE * cmdq_type,
+ cmdqs->cmdq[cmdq_type].wq.wq_block_vaddr,
+ cmdqs->cmdq[cmdq_type].wq.qpages.num_pages *
+ sizeof(__be64));
+ }
+
+ return 0;
+
+err_destroy_wq:
+ while (cmdq_type > 0) {
+ cmdq_type--;
+ hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return err;
+}
+
+static void destroy_cmdq_wq(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmdqs *cmdqs)
+{
+ u8 cmdq_type;
+
+ if (cmdqs->wq_block_vaddr)
+ dma_free_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
+ cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr);
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
+ hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
+}
+
+static int init_cmdqs(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
+ if (!cmdqs)
+ return -ENOMEM;
+
+ hwdev->cmdqs = cmdqs;
+ cmdqs->hwdev = hwdev;
+ cmdqs->cmdq_num = hwdev->max_cmdq;
+
+ cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev,
+ CMDQ_BUF_SIZE, CMDQ_BUF_SIZE, 0);
+ if (!cmdqs->cmd_buf_pool) {
+ dev_err(hwdev->dev, "Failed to create cmdq buffer pool\n");
+ kfree(cmdqs);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info *cmd_info)
+{
+ if (cmd_info->cmd_type != HINIC3_CMD_TYPE_DIRECT_RESP)
+ return;
+
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_FORCE_STOP;
+
+ if (cmd_info->cmpt_code &&
+ *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE)
+ *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE;
+
+ if (cmd_info->done) {
+ complete(cmd_info->done);
+ cmd_info->done = NULL;
+ cmd_info->cmpt_code = NULL;
+ cmd_info->direct_resp = NULL;
+ cmd_info->errcode = NULL;
+ }
+}
+
+static void hinic3_cmdq_flush_cmd(struct hinic3_cmdq *cmdq)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ u16 ci;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ while (cmdq_read_wqe(&cmdq->wq, &ci)) {
+ hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
+ cmd_info = &cmdq->cmd_infos[ci];
+ if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP)
+ cmdq_flush_sync_cmd(cmd_info);
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdq *cmdq;
+ u16 wqe_cnt, wqe_idx, i;
+ struct hinic3_wq *wq;
+
+ cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC];
+ spin_lock_bh(&cmdq->cmdq_lock);
+ wq = &cmdq->wq;
+ wqe_cnt = hinic3_wq_get_used(wq);
+ for (i = 0; i < wqe_cnt; i++) {
+ wqe_idx = (wq->cons_idx + i) & wq->idx_mask;
+ cmdq_flush_sync_cmd(cmdq->cmd_infos + wqe_idx);
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+static void hinic3_cmdq_reset_all_cmd_buf(struct hinic3_cmdq *cmdq)
+{
+ u16 i;
+
+ for (i = 0; i < cmdq->wq.q_depth; i++)
+ cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev);
+}
+
+int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
+ hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ hinic3_wq_reset(&cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return hinic3_set_cmdq_ctxts(hwdev);
+}
+
+int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs;
+ void __iomem *db_base;
+ u8 cmdq_type;
+ int err;
+
+ err = init_cmdqs(hwdev);
+ if (err)
+ goto err_out;
+
+ cmdqs = hwdev->cmdqs;
+ err = create_cmdq_wq(hwdev, cmdqs);
+ if (err)
+ goto err_free_cmdqs;
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate doorbell address\n");
+ goto err_destroy_cmdq_wq;
+ }
+ cmdqs->cmdqs_db_base = db_base;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to initialize cmdq type : %d\n",
+ cmdq_type);
+ goto err_free_cmd_infos;
+ }
+ }
+
+ err = hinic3_set_cmdq_ctxts(hwdev);
+ if (err)
+ goto err_free_cmd_infos;
+
+ return 0;
+
+err_free_cmd_infos:
+ while (cmdq_type > 0) {
+ cmdq_type--;
+ kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
+ }
+
+ hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
+
+err_destroy_cmdq_wq:
+ destroy_cmdq_wq(hwdev, cmdqs);
+
+err_free_cmdqs:
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+ kfree(cmdqs);
+
+err_out:
+ return err;
+}
+
+void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+
+ cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
+ hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
+ kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
+ }
+
+ hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
+ destroy_cmdq_wq(hwdev, cmdqs);
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+ kfree(cmdqs);
+}
+
+bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq)
+{
+ return hinic3_wq_get_used(&cmdq->wq) == 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h
new file mode 100644
index 000000000000..f99c386a2780
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_CMDQ_H_
+#define _HINIC3_CMDQ_H_
+
+#include <linux/dmapool.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_wq.h"
+
+#define CMDQ_DEPTH 4096
+
+struct cmdq_db {
+ __le32 db_head;
+ __le32 db_info;
+};
+
+/* hw defined cmdq wqe header */
+struct cmdq_header {
+ __le32 header_info;
+ __le32 saved_data;
+};
+
+struct cmdq_lcmd_bufdesc {
+ struct hinic3_sge sge;
+ __le64 rsvd2;
+ __le64 rsvd3;
+};
+
+struct cmdq_status {
+ __le32 status_info;
+};
+
+struct cmdq_ctrl {
+ __le32 ctrl_info;
+};
+
+struct cmdq_direct_resp {
+ __le64 val;
+ __le64 rsvd;
+};
+
+struct cmdq_completion {
+ union {
+ struct hinic3_sge sge;
+ struct cmdq_direct_resp direct;
+ } resp;
+};
+
+struct cmdq_wqe_scmd {
+ struct cmdq_header header;
+ __le64 rsvd3;
+ struct cmdq_status status;
+ struct cmdq_ctrl ctrl;
+ struct cmdq_completion completion;
+ __le32 rsvd10[6];
+};
+
+struct cmdq_wqe_lcmd {
+ struct cmdq_header header;
+ struct cmdq_status status;
+ struct cmdq_ctrl ctrl;
+ struct cmdq_completion completion;
+ struct cmdq_lcmd_bufdesc buf_desc;
+};
+
+struct cmdq_wqe {
+ union {
+ struct cmdq_wqe_scmd wqe_scmd;
+ struct cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+static_assert(sizeof(struct cmdq_wqe) == 64);
+
+enum hinic3_cmdq_type {
+ HINIC3_CMDQ_SYNC = 0,
+ HINIC3_MAX_CMDQ_TYPES = 4
+};
+
+enum hinic3_cmdq_status {
+ HINIC3_CMDQ_ENABLE = BIT(0),
+};
+
+enum hinic3_cmdq_cmd_type {
+ HINIC3_CMD_TYPE_NONE,
+ HINIC3_CMD_TYPE_DIRECT_RESP,
+ HINIC3_CMD_TYPE_FAKE_TIMEOUT,
+ HINIC3_CMD_TYPE_TIMEOUT,
+ HINIC3_CMD_TYPE_FORCE_STOP,
+};
+
+struct hinic3_cmd_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ __le16 size;
+ refcount_t ref_cnt;
+};
+
+struct hinic3_cmdq_cmd_info {
+ enum hinic3_cmdq_cmd_type cmd_type;
+ struct completion *done;
+ int *errcode;
+ /* completion code */
+ int *cmpt_code;
+ __le64 *direct_resp;
+ u64 cmdq_msg_id;
+ struct hinic3_cmd_buf *buf_in;
+};
+
+struct hinic3_cmdq {
+ struct hinic3_wq wq;
+ enum hinic3_cmdq_type cmdq_type;
+ u8 wrapped;
+ /* synchronize command submission with completions via event queue */
+ spinlock_t cmdq_lock;
+ struct hinic3_cmdq_cmd_info *cmd_infos;
+ struct hinic3_hwdev *hwdev;
+};
+
+struct hinic3_cmdqs {
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_cmdq cmdq[HINIC3_MAX_CMDQ_TYPES];
+ struct dma_pool *cmd_buf_pool;
+ /* doorbell area */
+ u8 __iomem *cmdqs_db_base;
+
+ /* When command queue uses multiple memory pages (1-level CLA), this
+ * block will hold aggregated indirection table for all command queues
+ * of cmdqs. Not used for small cmdq (0-level CLA).
+ */
+ dma_addr_t wq_block_paddr;
+ void *wq_block_vaddr;
+
+ u32 status;
+ u32 disable_flag;
+ u8 cmdq_num;
+};
+
+int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev);
+void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev);
+
+struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev);
+void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmd_buf *cmd_buf);
+void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data);
+
+int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in, __le64 *out_param);
+
+void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev);
+int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev);
+bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
new file mode 100644
index 000000000000..fe4778d152cf
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+
+#include "hinic3_common.h"
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_addr_t paddr, align_paddr;
+ void *vaddr, *align_vaddr;
+ u32 real_size = size;
+
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ if (align_paddr == paddr) {
+ align_vaddr = vaddr;
+ goto out;
+ }
+
+ dma_free_coherent(dev, real_size, vaddr, paddr);
+
+ /* realloc memory for align */
+ real_size = size + align;
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ align_vaddr = vaddr + (align_paddr - paddr);
+
+out:
+ mem_align->real_size = real_size;
+ mem_align->ori_vaddr = vaddr;
+ mem_align->ori_paddr = paddr;
+ mem_align->align_vaddr = align_vaddr;
+ mem_align->align_paddr = align_paddr;
+
+ return 0;
+}
+
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_free_coherent(dev, mem_align->real_size,
+ mem_align->ori_vaddr, mem_align->ori_paddr);
+}
+
+int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler,
+ u32 wait_total_ms, u32 wait_once_us)
+{
+ enum hinic3_wait_return ret;
+ int err;
+
+ err = read_poll_timeout(handler, ret, ret == HINIC3_WAIT_PROCESS_CPL,
+ wait_once_us, wait_total_ms * USEC_PER_MSEC,
+ false, priv_data);
+
+ return err;
+}
+
+/* Data provided to/by cmdq is arranged in structs with little endian fields but
+ * every dword (32bits) should be swapped since HW swaps it again when it
+ * copies it from/to host memory.
+ */
+void hinic3_cmdq_buf_swab32(void *data, int len)
+{
+ swab32_array(data, len / sizeof(u32));
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
new file mode 100644
index 000000000000..a8fabfae90fb
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_COMMON_H_
+#define _HINIC3_COMMON_H_
+
+#include <linux/device.h>
+
+#define HINIC3_MIN_PAGE_SIZE 0x1000
+
+struct hinic3_dma_addr_align {
+ u32 real_size;
+
+ void *ori_vaddr;
+ dma_addr_t ori_paddr;
+
+ void *align_vaddr;
+ dma_addr_t align_paddr;
+};
+
+enum hinic3_wait_return {
+ HINIC3_WAIT_PROCESS_CPL = 0,
+ HINIC3_WAIT_PROCESS_WAITING = 1,
+};
+
+struct hinic3_sge {
+ __le32 hi_addr;
+ __le32 lo_addr;
+ __le32 len;
+ __le32 rsvd;
+};
+
+static inline void hinic3_set_sge(struct hinic3_sge *sge, dma_addr_t addr,
+ __le32 len)
+{
+ sge->hi_addr = cpu_to_le32(upper_32_bits(addr));
+ sge->lo_addr = cpu_to_le32(lower_32_bits(addr));
+ sge->len = len;
+ sge->rsvd = 0;
+}
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align);
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align);
+
+typedef enum hinic3_wait_return (*wait_cpl_handler)(void *priv_data);
+int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler,
+ u32 wait_total_ms, u32 wait_once_us);
+
+void hinic3_cmdq_buf_swab32(void *data, int len);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
new file mode 100644
index 000000000000..e7417e8efa99
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_CSR_H_
+#define _HINIC3_CSR_H_
+
+#define HINIC3_CFG_REGS_FLAG 0x40000000
+#define HINIC3_REGS_FLAG_MASK 0x3FFFFFFF
+
+#define HINIC3_VF_CFG_REG_OFFSET 0x2000
+
+/* HW interface registers */
+#define HINIC3_CSR_FUNC_ATTR0_ADDR (HINIC3_CFG_REGS_FLAG + 0x0)
+#define HINIC3_CSR_FUNC_ATTR1_ADDR (HINIC3_CFG_REGS_FLAG + 0x4)
+#define HINIC3_CSR_FUNC_ATTR2_ADDR (HINIC3_CFG_REGS_FLAG + 0x8)
+#define HINIC3_CSR_FUNC_ATTR3_ADDR (HINIC3_CFG_REGS_FLAG + 0xC)
+#define HINIC3_CSR_FUNC_ATTR4_ADDR (HINIC3_CFG_REGS_FLAG + 0x10)
+#define HINIC3_CSR_FUNC_ATTR5_ADDR (HINIC3_CFG_REGS_FLAG + 0x14)
+#define HINIC3_CSR_FUNC_ATTR6_ADDR (HINIC3_CFG_REGS_FLAG + 0x18)
+
+#define HINIC3_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF (HINIC3_CFG_REGS_FLAG + 0x0100)
+#define HINIC3_FUNC_CSR_MAILBOX_INT_OFF (HINIC3_CFG_REGS_FLAG + 0x0104)
+#define HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF (HINIC3_CFG_REGS_FLAG + 0x0108)
+#define HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF (HINIC3_CFG_REGS_FLAG + 0x010C)
+
+#define HINIC3_CSR_DMA_ATTR_TBL_ADDR (HINIC3_CFG_REGS_FLAG + 0x380)
+#define HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x390)
+
+/* MSI-X registers */
+#define HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR (HINIC3_CFG_REGS_FLAG + 0x58)
+
+#define HINIC3_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK BIT(0)
+#define HINIC3_MSI_CLR_INDIR_INT_MSK_SET_MASK BIT(1)
+#define HINIC3_MSI_CLR_INDIR_INT_MSK_CLR_MASK BIT(2)
+#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_SET_MASK BIT(3)
+#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK BIT(4)
+#define HINIC3_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK GENMASK(31, 22)
+#define HINIC3_MSI_CLR_INDIR_SET(val, member) \
+ FIELD_PREP(HINIC3_MSI_CLR_INDIR_##member##_MASK, val)
+
+/* EQ registers */
+#define HINIC3_AEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x210)
+#define HINIC3_CEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x290)
+
+#define HINIC3_EQ_INDIR_IDX_ADDR(type) \
+ ((type == HINIC3_AEQ) ? HINIC3_AEQ_INDIR_IDX_ADDR : \
+ HINIC3_CEQ_INDIR_IDX_ADDR)
+
+#define HINIC3_AEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x240)
+#define HINIC3_CEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x2C0)
+
+#define HINIC3_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC3_CSR_AEQ_CTRL_0_ADDR (HINIC3_CFG_REGS_FLAG + 0x200)
+#define HINIC3_CSR_AEQ_CTRL_1_ADDR (HINIC3_CFG_REGS_FLAG + 0x204)
+#define HINIC3_CSR_AEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x20C)
+#define HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x50)
+
+#define HINIC3_CSR_CEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x28c)
+#define HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x54)
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
new file mode 100644
index 000000000000..01686472985b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define AEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0)
+#define AEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12)
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(22, 20)
+#define AEQ_CTRL_0_INTR_MODE_MASK BIT(31)
+#define AEQ_CTRL_0_SET(val, member) \
+ FIELD_PREP(AEQ_CTRL_0_##member##_MASK, val)
+
+#define AEQ_CTRL_1_LEN_MASK GENMASK(20, 0)
+#define AEQ_CTRL_1_ELEM_SIZE_MASK GENMASK(25, 24)
+#define AEQ_CTRL_1_PAGE_SIZE_MASK GENMASK(31, 28)
+#define AEQ_CTRL_1_SET(val, member) \
+ FIELD_PREP(AEQ_CTRL_1_##member##_MASK, val)
+
+#define CEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0)
+#define CEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12)
+#define CEQ_CTRL_0_LIMIT_KICK_MASK GENMASK(23, 20)
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(25, 24)
+#define CEQ_CTRL_0_PAGE_SIZE_MASK GENMASK(30, 27)
+#define CEQ_CTRL_0_INTR_MODE_MASK BIT(31)
+#define CEQ_CTRL_0_SET(val, member) \
+ FIELD_PREP(CEQ_CTRL_0_##member##_MASK, val)
+
+#define CEQ_CTRL_1_LEN_MASK GENMASK(19, 0)
+#define CEQ_CTRL_1_SET(val, member) \
+ FIELD_PREP(CEQ_CTRL_1_##member##_MASK, val)
+
+#define CEQE_TYPE_MASK GENMASK(25, 23)
+#define CEQE_TYPE(type) \
+ FIELD_GET(CEQE_TYPE_MASK, le32_to_cpu(type))
+
+#define CEQE_DATA_MASK GENMASK(25, 0)
+#define CEQE_DATA(data) ((data) & cpu_to_le32(CEQE_DATA_MASK))
+
+#define EQ_ELEM_DESC_TYPE_MASK GENMASK(6, 0)
+#define EQ_ELEM_DESC_SRC_MASK BIT(7)
+#define EQ_ELEM_DESC_SIZE_MASK GENMASK(15, 8)
+#define EQ_ELEM_DESC_WRAPPED_MASK BIT(31)
+#define EQ_ELEM_DESC_GET(val, member) \
+ FIELD_GET(EQ_ELEM_DESC_##member##_MASK, le32_to_cpu(val))
+
+#define EQ_CI_SIMPLE_INDIR_CI_MASK GENMASK(20, 0)
+#define EQ_CI_SIMPLE_INDIR_ARMED_MASK BIT(21)
+#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK GENMASK(31, 30)
+#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK GENMASK(31, 24)
+#define EQ_CI_SIMPLE_INDIR_SET(val, member) \
+ FIELD_PREP(EQ_CI_SIMPLE_INDIR_##member##_MASK, val)
+
+#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \
+ HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR)
+
+#define EQ_PROD_IDX_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_PROD_IDX_ADDR : HINIC3_CSR_CEQ_PROD_IDX_ADDR)
+
+#define EQ_HI_PHYS_ADDR_REG(type, pg_num) \
+ (((type) == HINIC3_AEQ) ? \
+ HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) : \
+ HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num))
+
+#define EQ_LO_PHYS_ADDR_REG(type, pg_num) \
+ (((type) == HINIC3_AEQ) ? \
+ HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) : \
+ HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num))
+
+#define EQ_MSIX_RESEND_TIMER_CLEAR 1
+
+#define HINIC3_EQ_MAX_PAGES(eq) \
+ ((eq)->type == HINIC3_AEQ ? \
+ HINIC3_AEQ_MAX_PAGES : HINIC3_CEQ_MAX_PAGES)
+
+#define HINIC3_TASK_PROCESS_EQE_LIMIT 1024
+#define HINIC3_EQ_UPDATE_CI_STEP 64
+#define HINIC3_EQS_WQ_NAME "hinic3_eqs"
+
+#define HINIC3_EQ_VALID_SHIFT 31
+#define HINIC3_EQ_WRAPPED(eq) \
+ ((eq)->wrapped << HINIC3_EQ_VALID_SHIFT)
+
+#define HINIC3_EQ_WRAPPED_SHIFT 20
+#define HINIC3_EQ_CONS_IDX(eq) \
+ ((eq)->cons_idx | ((eq)->wrapped << HINIC3_EQ_WRAPPED_SHIFT))
+
+static const struct hinic3_aeq_elem *get_curr_aeq_elem(const struct hinic3_eq *eq)
+{
+ return get_q_element(&eq->qpages, eq->cons_idx, NULL);
+}
+
+static const __be32 *get_curr_ceq_elem(const struct hinic3_eq *eq)
+{
+ return get_q_element(&eq->qpages, eq->cons_idx, NULL);
+}
+
+int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event,
+ hinic3_aeq_event_cb hwe_cb)
+{
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = hwdev->aeqs;
+ aeqs->aeq_cb[event] = hwe_cb;
+ spin_lock_init(&aeqs->aeq_lock);
+
+ return 0;
+}
+
+void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event)
+{
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = hwdev->aeqs;
+
+ spin_lock_bh(&aeqs->aeq_lock);
+ aeqs->aeq_cb[event] = NULL;
+ spin_unlock_bh(&aeqs->aeq_lock);
+}
+
+int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event,
+ hinic3_ceq_event_cb callback)
+{
+ struct hinic3_ceqs *ceqs;
+
+ ceqs = hwdev->ceqs;
+ ceqs->ceq_cb[event] = callback;
+ spin_lock_init(&ceqs->ceq_lock);
+
+ return 0;
+}
+
+void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event)
+{
+ struct hinic3_ceqs *ceqs;
+
+ ceqs = hwdev->ceqs;
+
+ spin_lock_bh(&ceqs->ceq_lock);
+ ceqs->ceq_cb[event] = NULL;
+ spin_unlock_bh(&ceqs->ceq_lock);
+}
+
+/* Set consumer index in the hw. */
+static void set_eq_cons_idx(struct hinic3_eq *eq, u32 arm_state)
+{
+ u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq);
+ u32 eq_wrap_ci, val;
+
+ eq_wrap_ci = HINIC3_EQ_CONS_IDX(eq);
+ val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED);
+ if (eq->type == HINIC3_AEQ) {
+ val = val |
+ EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
+ EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX);
+ } else {
+ val = val |
+ EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
+ EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX);
+ }
+
+ hinic3_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+static struct hinic3_ceqs *ceq_to_ceqs(const struct hinic3_eq *eq)
+{
+ return container_of(eq, struct hinic3_ceqs, ceq[eq->q_id]);
+}
+
+static void ceq_event_handler(struct hinic3_ceqs *ceqs, __le32 ceqe)
+{
+ enum hinic3_ceq_event event = CEQE_TYPE(ceqe);
+ struct hinic3_hwdev *hwdev = ceqs->hwdev;
+ __le32 ceqe_data = CEQE_DATA(ceqe);
+
+ if (event >= HINIC3_MAX_CEQ_EVENTS) {
+ dev_warn(hwdev->dev, "Ceq unknown event:%d, ceqe data: 0x%x\n",
+ event, ceqe_data);
+ return;
+ }
+
+ spin_lock_bh(&ceqs->ceq_lock);
+ if (ceqs->ceq_cb[event])
+ ceqs->ceq_cb[event](hwdev, ceqe_data);
+
+ spin_unlock_bh(&ceqs->ceq_lock);
+}
+
+static struct hinic3_aeqs *aeq_to_aeqs(const struct hinic3_eq *eq)
+{
+ return container_of(eq, struct hinic3_aeqs, aeq[eq->q_id]);
+}
+
+static void aeq_event_handler(struct hinic3_aeqs *aeqs, __le32 aeqe,
+ const struct hinic3_aeq_elem *aeqe_pos)
+{
+ struct hinic3_hwdev *hwdev = aeqs->hwdev;
+ u8 data[HINIC3_AEQE_DATA_SIZE], size;
+ enum hinic3_aeq_type event;
+ hinic3_aeq_event_cb hwe_cb;
+
+ if (EQ_ELEM_DESC_GET(aeqe, SRC))
+ return;
+
+ event = EQ_ELEM_DESC_GET(aeqe, TYPE);
+ if (event >= HINIC3_MAX_AEQ_EVENTS) {
+ dev_warn(hwdev->dev, "Aeq unknown event:%d\n", event);
+ return;
+ }
+
+ memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE);
+ swab32_array((u32 *)data, HINIC3_AEQE_DATA_SIZE / sizeof(u32));
+ size = EQ_ELEM_DESC_GET(aeqe, SIZE);
+
+ spin_lock_bh(&aeqs->aeq_lock);
+ hwe_cb = aeqs->aeq_cb[event];
+ if (hwe_cb)
+ hwe_cb(aeqs->hwdev, data, size);
+ spin_unlock_bh(&aeqs->aeq_lock);
+}
+
+static int aeq_irq_handler(struct hinic3_eq *eq)
+{
+ const struct hinic3_aeq_elem *aeqe_pos;
+ struct hinic3_aeqs *aeqs;
+ u32 i, eqe_cnt = 0;
+ __le32 aeqe;
+
+ aeqs = aeq_to_aeqs(eq);
+ for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
+ aeqe_pos = get_curr_aeq_elem(eq);
+ aeqe = (__force __le32)swab32((__force __u32)aeqe_pos->desc);
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(aeqe, WRAPPED) == eq->wrapped)
+ return 0;
+
+ /* Prevent speculative reads from element */
+ dma_rmb();
+ aeq_event_handler(aeqs, aeqe, aeqe_pos);
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static int ceq_irq_handler(struct hinic3_eq *eq)
+{
+ struct hinic3_ceqs *ceqs;
+ u32 eqe_cnt = 0;
+ __be32 ceqe_raw;
+ __le32 ceqe;
+ u32 i;
+
+ ceqs = ceq_to_ceqs(eq);
+ for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
+ ceqe_raw = *get_curr_ceq_elem(eq);
+ ceqe = (__force __le32)swab32((__force __u32)ceqe_raw);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
+ return 0;
+
+ ceq_event_handler(ceqs, ceqe);
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static void reschedule_aeq_handler(struct hinic3_eq *eq)
+{
+ struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq);
+
+ queue_work(aeqs->workq, &eq->aeq_work);
+}
+
+static int eq_irq_handler(struct hinic3_eq *eq)
+{
+ int err;
+
+ if (eq->type == HINIC3_AEQ)
+ err = aeq_irq_handler(eq);
+ else
+ err = ceq_irq_handler(eq);
+
+ set_eq_cons_idx(eq, err ? HINIC3_EQ_NOT_ARMED :
+ HINIC3_EQ_ARMED);
+
+ return err;
+}
+
+static void aeq_irq_work(struct work_struct *work)
+{
+ struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work);
+ int err;
+
+ err = eq_irq_handler(eq);
+ if (err)
+ reschedule_aeq_handler(eq);
+}
+
+static irqreturn_t aeq_interrupt(int irq, void *data)
+{
+ struct workqueue_struct *workq;
+ struct hinic3_eq *aeq = data;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = aeq_to_aeqs(aeq);
+ hwdev = aeq->hwdev;
+
+ /* clear resend timer cnt register */
+ workq = aeqs->workq;
+ hinic3_msix_intr_clear_resend_bit(hwdev, aeq->msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ queue_work(workq, &aeq->aeq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ceq_interrupt(int irq, void *data)
+{
+ struct hinic3_eq *ceq = data;
+ int err;
+
+ /* clear resend timer counters */
+ hinic3_msix_intr_clear_resend_bit(ceq->hwdev, ceq->msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ err = eq_irq_handler(ceq);
+ if (err)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id,
+ u32 ctrl0, u32 ctrl1)
+{
+ struct comm_cmd_set_ceq_ctrl_reg ceq_ctrl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ ceq_ctrl.func_id = hinic3_global_func_id(hwdev);
+ ceq_ctrl.q_id = q_id;
+ ceq_ctrl.ctrl0 = ctrl0;
+ ceq_ctrl.ctrl1 = ctrl1;
+
+ mgmt_msg_params_init_default(&msg_params, &ceq_ctrl, sizeof(ceq_ctrl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_CEQ_CTRL_REG, &msg_params);
+ if (err || ceq_ctrl.head.status) {
+ dev_err(hwdev->dev, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x\n",
+ q_id, err, ceq_ctrl.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int set_eq_ctrls(struct hinic3_eq *eq)
+{
+ struct hinic3_hwif *hwif = eq->hwdev->hwif;
+ struct hinic3_queue_pages *qpages;
+ u8 pci_intf_idx, elem_size;
+ u32 mask, ctrl0, ctrl1;
+ u32 page_size_val;
+ int err;
+
+ qpages = &eq->qpages;
+ page_size_val = ilog2(qpages->page_size / HINIC3_MIN_PAGE_SIZE);
+ pci_intf_idx = hwif->attr.pci_intf_idx;
+
+ if (eq->type == HINIC3_AEQ) {
+ /* set ctrl0 using read-modify-write */
+ mask = AEQ_CTRL_0_INTR_IDX_MASK |
+ AEQ_CTRL_0_DMA_ATTR_MASK |
+ AEQ_CTRL_0_PCI_INTF_IDX_MASK |
+ AEQ_CTRL_0_INTR_MODE_MASK;
+ ctrl0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR);
+ ctrl0 = (ctrl0 & ~mask) |
+ AEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
+ AEQ_CTRL_0_SET(0, DMA_ATTR) |
+ AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ AEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR, ctrl0);
+
+ /* HW expects log2(number of 32 byte units). */
+ elem_size = qpages->elem_size_shift - 5;
+ ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_1_ADDR, ctrl1);
+ } else {
+ ctrl0 = CEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
+ CEQ_CTRL_0_SET(0, DMA_ATTR) |
+ CEQ_CTRL_0_SET(0, LIMIT_KICK) |
+ CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) |
+ CEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
+
+ ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN);
+
+ /* set ceq ctrl reg through mgmt cpu */
+ err = hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0,
+ ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ceq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ __be32 *ceqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ ceqe = get_q_element(&eq->qpages, i, NULL);
+ *ceqe = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Clear ceq elements bit */
+}
+
+static void aeq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ struct hinic3_aeq_elem *aeqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ aeqe = get_q_element(&eq->qpages, i, NULL);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Clear aeq elements bit */
+}
+
+static void eq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ if (eq->type == HINIC3_AEQ)
+ aeq_elements_init(eq, init_val);
+ else
+ ceq_elements_init(eq, init_val);
+}
+
+static int alloc_eq_pages(struct hinic3_eq *eq)
+{
+ struct hinic3_hwif *hwif = eq->hwdev->hwif;
+ struct hinic3_queue_pages *qpages;
+ dma_addr_t page_paddr;
+ u32 reg, init_val;
+ u16 pg_idx;
+ int err;
+
+ qpages = &eq->qpages;
+ err = hinic3_queue_pages_alloc(eq->hwdev, qpages, HINIC3_MIN_PAGE_SIZE);
+ if (err)
+ return err;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ page_paddr = qpages->pages[pg_idx].align_paddr;
+ reg = EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx);
+ hinic3_hwif_write_reg(hwif, reg, upper_32_bits(page_paddr));
+ reg = EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx);
+ hinic3_hwif_write_reg(hwif, reg, lower_32_bits(page_paddr));
+ }
+
+ init_val = HINIC3_EQ_WRAPPED(eq);
+ eq_elements_init(eq, init_val);
+
+ return 0;
+}
+
+static void eq_calc_page_size_and_num(struct hinic3_eq *eq, u32 elem_size)
+{
+ u32 max_pages, min_page_size, page_size, total_size;
+
+ /* No need for complicated arithmetic. All values must be power of 2.
+ * Multiplications give power of 2 and divisions give power of 2 without
+ * remainder.
+ */
+ max_pages = HINIC3_EQ_MAX_PAGES(eq);
+ min_page_size = HINIC3_MIN_PAGE_SIZE;
+ total_size = eq->eq_len * elem_size;
+
+ if (total_size <= max_pages * min_page_size)
+ page_size = min_page_size;
+ else
+ page_size = total_size / max_pages;
+
+ hinic3_queue_pages_init(&eq->qpages, eq->eq_len, page_size, elem_size);
+}
+
+static int request_eq_irq(struct hinic3_eq *eq)
+{
+ int err;
+
+ if (eq->type == HINIC3_AEQ) {
+ INIT_WORK(&eq->aeq_work, aeq_irq_work);
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic3_aeq%u@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pdev));
+ err = request_irq(eq->irq_id, aeq_interrupt, 0,
+ eq->irq_name, eq);
+ } else {
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic3_ceq%u@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pdev));
+ err = request_threaded_irq(eq->irq_id, NULL, ceq_interrupt,
+ IRQF_ONESHOT, eq->irq_name, eq);
+ }
+
+ return err;
+}
+
+static void reset_eq(struct hinic3_eq *eq)
+{
+ /* clear eq_len to force eqe drop in hardware */
+ if (eq->type == HINIC3_AEQ)
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
+ else
+ hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+
+ hinic3_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+}
+
+static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id,
+ u32 q_len, enum hinic3_eq_type type,
+ struct msix_entry *msix_entry)
+{
+ u32 elem_size;
+ int err;
+
+ eq->hwdev = hwdev;
+ eq->q_id = q_id;
+ eq->type = type;
+ eq->eq_len = q_len;
+
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ reset_eq(eq);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ elem_size = (type == HINIC3_AEQ) ? HINIC3_AEQE_SIZE : HINIC3_CEQE_SIZE;
+ eq_calc_page_size_and_num(eq, elem_size);
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate pages for eq\n");
+ return err;
+ }
+
+ eq->msix_entry_idx = msix_entry->entry;
+ eq->irq_id = msix_entry->vector;
+
+ err = set_eq_ctrls(eq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set ctrls for eq\n");
+ goto err_free_queue_pages;
+ }
+
+ set_eq_cons_idx(eq, HINIC3_EQ_ARMED);
+
+ err = request_eq_irq(eq);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to request irq for the eq, err: %d\n", err);
+ goto err_free_queue_pages;
+ }
+
+ hinic3_set_msix_state(hwdev, eq->msix_entry_idx, HINIC3_MSIX_DISABLE);
+
+ return 0;
+
+err_free_queue_pages:
+ hinic3_queue_pages_free(hwdev, &eq->qpages);
+
+ return err;
+}
+
+static void remove_eq(struct hinic3_eq *eq)
+{
+ hinic3_set_msix_state(eq->hwdev, eq->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ free_irq(eq->irq_id, eq);
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ if (eq->type == HINIC3_AEQ) {
+ disable_work_sync(&eq->aeq_work);
+ /* clear eq_len to avoid hw access host memory */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
+ } else {
+ hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+ }
+
+ /* update consumer index to avoid invalid interrupt */
+ eq->cons_idx = hinic3_hwif_read_reg(eq->hwdev->hwif,
+ EQ_PROD_IDX_REG_ADDR(eq));
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ hinic3_queue_pages_free(eq->hwdev, &eq->qpages);
+}
+
+int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
+ struct msix_entry *msix_entries)
+{
+ struct hinic3_aeqs *aeqs;
+ u16 q_id;
+ int err;
+
+ aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+ if (!aeqs)
+ return -ENOMEM;
+
+ hwdev->aeqs = aeqs;
+ aeqs->hwdev = hwdev;
+ aeqs->num_aeqs = num_aeqs;
+ aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM,
+ HINIC3_MAX_AEQS);
+ if (!aeqs->workq) {
+ dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n");
+ err = -ENOMEM;
+ goto err_free_aeqs;
+ }
+
+ for (q_id = 0; q_id < num_aeqs; q_id++) {
+ err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
+ HINIC3_DEFAULT_AEQ_LEN, HINIC3_AEQ,
+ &msix_entries[q_id]);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeq %u\n",
+ q_id);
+ goto err_remove_eqs;
+ }
+ }
+ for (q_id = 0; q_id < num_aeqs; q_id++)
+ hinic3_set_msix_state(hwdev, aeqs->aeq[q_id].msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return 0;
+
+err_remove_eqs:
+ while (q_id > 0) {
+ q_id--;
+ remove_eq(&aeqs->aeq[q_id]);
+ }
+
+ destroy_workqueue(aeqs->workq);
+
+err_free_aeqs:
+ kfree(aeqs);
+
+ return err;
+}
+
+void hinic3_aeqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_aeqs *aeqs = hwdev->aeqs;
+ enum hinic3_aeq_type aeq_event;
+ struct hinic3_eq *eq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = aeqs->aeq + q_id;
+ remove_eq(eq);
+ hinic3_free_irq(hwdev, eq->irq_id);
+ }
+
+ for (aeq_event = 0; aeq_event < HINIC3_MAX_AEQ_EVENTS; aeq_event++)
+ hinic3_aeq_unregister_cb(hwdev, aeq_event);
+
+ destroy_workqueue(aeqs->workq);
+
+ kfree(aeqs);
+}
+
+int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
+ struct msix_entry *msix_entries)
+{
+ struct hinic3_ceqs *ceqs;
+ u16 q_id;
+ int err;
+
+ ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
+ if (!ceqs)
+ return -ENOMEM;
+
+ hwdev->ceqs = ceqs;
+ ceqs->hwdev = hwdev;
+ ceqs->num_ceqs = num_ceqs;
+
+ for (q_id = 0; q_id < num_ceqs; q_id++) {
+ err = init_eq(&ceqs->ceq[q_id], hwdev, q_id,
+ HINIC3_DEFAULT_CEQ_LEN, HINIC3_CEQ,
+ &msix_entries[q_id]);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init ceq %u\n",
+ q_id);
+ goto err_free_ceqs;
+ }
+ }
+ for (q_id = 0; q_id < num_ceqs; q_id++)
+ hinic3_set_msix_state(hwdev, ceqs->ceq[q_id].msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return 0;
+
+err_free_ceqs:
+ while (q_id > 0) {
+ q_id--;
+ remove_eq(&ceqs->ceq[q_id]);
+ }
+
+ kfree(ceqs);
+
+ return err;
+}
+
+void hinic3_ceqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_ceqs *ceqs = hwdev->ceqs;
+ enum hinic3_ceq_event ceq_event;
+ struct hinic3_eq *eq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = ceqs->ceq + q_id;
+ remove_eq(eq);
+ hinic3_free_irq(hwdev, eq->irq_id);
+ }
+
+ for (ceq_event = 0; ceq_event < HINIC3_MAX_CEQ_EVENTS; ceq_event++)
+ hinic3_ceq_unregister_cb(hwdev, ceq_event);
+
+ kfree(ceqs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h
new file mode 100644
index 000000000000..005a6e0745b3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_EQS_H_
+#define _HINIC3_EQS_H_
+
+#include <linux/interrupt.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_queue_common.h"
+
+#define HINIC3_MAX_AEQS 4
+#define HINIC3_MAX_CEQS 32
+
+#define HINIC3_AEQ_MAX_PAGES 4
+#define HINIC3_CEQ_MAX_PAGES 8
+
+#define HINIC3_AEQE_SIZE 64
+#define HINIC3_CEQE_SIZE 4
+
+#define HINIC3_AEQE_DESC_SIZE 4
+#define HINIC3_AEQE_DATA_SIZE (HINIC3_AEQE_SIZE - HINIC3_AEQE_DESC_SIZE)
+
+#define HINIC3_DEFAULT_AEQ_LEN 0x10000
+#define HINIC3_DEFAULT_CEQ_LEN 0x10000
+
+#define HINIC3_EQ_IRQ_NAME_LEN 64
+
+#define HINIC3_EQ_USLEEP_LOW_BOUND 900
+#define HINIC3_EQ_USLEEP_HIGH_BOUND 1000
+
+enum hinic3_eq_type {
+ HINIC3_AEQ = 0,
+ HINIC3_CEQ = 1,
+};
+
+enum hinic3_eq_intr_mode {
+ HINIC3_INTR_MODE_ARMED = 0,
+ HINIC3_INTR_MODE_ALWAYS = 1,
+};
+
+enum hinic3_eq_ci_arm_state {
+ HINIC3_EQ_NOT_ARMED = 0,
+ HINIC3_EQ_ARMED = 1,
+};
+
+struct hinic3_eq {
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_queue_pages qpages;
+ u16 q_id;
+ enum hinic3_eq_type type;
+ u32 eq_len;
+ u32 cons_idx;
+ u8 wrapped;
+ u32 irq_id;
+ u16 msix_entry_idx;
+ char irq_name[HINIC3_EQ_IRQ_NAME_LEN];
+ struct work_struct aeq_work;
+};
+
+struct hinic3_aeq_elem {
+ u8 aeqe_data[HINIC3_AEQE_DATA_SIZE];
+ __be32 desc;
+};
+
+enum hinic3_aeq_type {
+ HINIC3_HW_INTER_INT = 0,
+ HINIC3_MBX_FROM_FUNC = 1,
+ HINIC3_MSG_FROM_FW = 2,
+ HINIC3_MAX_AEQ_EVENTS = 6,
+};
+
+typedef void (*hinic3_aeq_event_cb)(struct hinic3_hwdev *hwdev, u8 *data,
+ u8 size);
+
+struct hinic3_aeqs {
+ struct hinic3_hwdev *hwdev;
+ hinic3_aeq_event_cb aeq_cb[HINIC3_MAX_AEQ_EVENTS];
+ struct hinic3_eq aeq[HINIC3_MAX_AEQS];
+ u16 num_aeqs;
+ struct workqueue_struct *workq;
+ /* lock for aeq event flag */
+ spinlock_t aeq_lock;
+};
+
+enum hinic3_ceq_event {
+ HINIC3_CMDQ = 3,
+ HINIC3_MAX_CEQ_EVENTS = 6,
+};
+
+typedef void (*hinic3_ceq_event_cb)(struct hinic3_hwdev *hwdev,
+ __le32 ceqe_data);
+
+struct hinic3_ceqs {
+ struct hinic3_hwdev *hwdev;
+
+ hinic3_ceq_event_cb ceq_cb[HINIC3_MAX_CEQ_EVENTS];
+
+ struct hinic3_eq ceq[HINIC3_MAX_CEQS];
+ u16 num_ceqs;
+ /* lock for ceq event flag */
+ spinlock_t ceq_lock;
+};
+
+int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
+ struct msix_entry *msix_entries);
+void hinic3_aeqs_free(struct hinic3_hwdev *hwdev);
+int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event,
+ hinic3_aeq_event_cb hwe_cb);
+void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event);
+int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
+ struct msix_entry *msix_entries);
+void hinic3_ceqs_free(struct hinic3_hwdev *hwdev);
+int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event,
+ hinic3_ceq_event_cb callback);
+void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
new file mode 100644
index 000000000000..7827c1f626db
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define HINIC3_CFG_MAX_QP 256
+
+static void hinic3_parse_pub_res_cap(struct hinic3_hwdev *hwdev,
+ struct hinic3_dev_cap *cap,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ cap->port_id = dev_cap->port_id;
+ cap->supp_svcs_bitmap = dev_cap->svc_cap_en;
+}
+
+static void hinic3_parse_l2nic_res_cap(struct hinic3_hwdev *hwdev,
+ struct hinic3_dev_cap *cap,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ struct hinic3_nic_service_cap *nic_svc_cap = &cap->nic_svc_cap;
+
+ nic_svc_cap->max_sqs = min(dev_cap->nic_max_sq_id + 1,
+ HINIC3_CFG_MAX_QP);
+}
+
+static void hinic3_parse_dev_cap(struct hinic3_hwdev *hwdev,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ struct hinic3_dev_cap *cap = &hwdev->cfg_mgmt->cap;
+
+ /* Public resource */
+ hinic3_parse_pub_res_cap(hwdev, cap, dev_cap, type);
+
+ /* L2 NIC resource */
+ if (hinic3_support_nic(hwdev))
+ hinic3_parse_l2nic_res_cap(hwdev, cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hinic3_hwdev *hwdev,
+ enum hinic3_func_type type)
+{
+ struct mgmt_msg_params msg_params = {};
+ struct cfg_cmd_dev_cap dev_cap = {};
+ int err;
+
+ dev_cap.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &dev_cap, sizeof(dev_cap));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_CFGM,
+ CFG_CMD_GET_DEV_CAP, &msg_params);
+ if (err || dev_cap.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get capability from FW, err: %d, status: 0x%x\n",
+ err, dev_cap.head.status);
+ return -EIO;
+ }
+
+ hinic3_parse_dev_cap(hwdev, &dev_cap, type);
+
+ return 0;
+}
+
+static int hinic3_init_irq_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ u16 intr_num = hwif->attr.num_irqs;
+ struct hinic3_irq_info *irq_info;
+ u16 intr_needed;
+
+ intr_needed = hwif->attr.msix_flex_en ? (hwif->attr.num_aeqs +
+ hwif->attr.num_ceqs + hwif->attr.num_sq) : intr_num;
+ if (intr_needed > intr_num) {
+ dev_warn(hwdev->dev, "Irq num cfg %d is less than the needed irq num %d msix_flex_en %d\n",
+ intr_num, intr_needed, hwdev->hwif->attr.msix_flex_en);
+ intr_needed = intr_num;
+ }
+
+ irq_info = &cfg_mgmt->irq_info;
+ irq_info->irq = kcalloc(intr_num, sizeof(struct hinic3_irq),
+ GFP_KERNEL);
+ if (!irq_info->irq)
+ return -ENOMEM;
+
+ irq_info->num_irq_hw = intr_needed;
+ mutex_init(&irq_info->irq_mutex);
+
+ return 0;
+}
+
+static int hinic3_init_irq_alloc_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+ struct hinic3_irq *irq = cfg_mgmt->irq_info.irq;
+ u16 nreq = cfg_mgmt->irq_info.num_irq_hw;
+ struct pci_dev *pdev = hwdev->pdev;
+ int actual_irq;
+ u16 i;
+
+ actual_irq = pci_alloc_irq_vectors(pdev, 2, nreq, PCI_IRQ_MSIX);
+ if (actual_irq < 0) {
+ dev_err(hwdev->dev, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n",
+ actual_irq);
+ return -ENOMEM;
+ }
+
+ nreq = actual_irq;
+ cfg_mgmt->irq_info.num_irq = nreq;
+
+ for (i = 0; i < nreq; ++i) {
+ irq[i].msix_entry_idx = i;
+ irq[i].irq_id = pci_irq_vector(pdev, i);
+ irq[i].allocated = false;
+ }
+
+ return 0;
+}
+
+int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ int err;
+
+ cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+ if (!cfg_mgmt)
+ return -ENOMEM;
+
+ hwdev->cfg_mgmt = cfg_mgmt;
+
+ err = hinic3_init_irq_info(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hinic3_irq_mgmt_info, err: %d\n",
+ err);
+ goto err_free_cfg_mgmt;
+ }
+
+ err = hinic3_init_irq_alloc_info(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hinic3_irq_info, err: %d\n",
+ err);
+ goto err_free_irq_info;
+ }
+
+ return 0;
+
+err_free_irq_info:
+ kfree(cfg_mgmt->irq_info.irq);
+ cfg_mgmt->irq_info.irq = NULL;
+err_free_cfg_mgmt:
+ kfree(cfg_mgmt);
+
+ return err;
+}
+
+void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+
+ pci_free_irq_vectors(hwdev->pdev);
+ kfree(cfg_mgmt->irq_info.irq);
+ cfg_mgmt->irq_info.irq = NULL;
+ kfree(cfg_mgmt);
+}
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num)
+{
+ struct hinic3_irq_info *irq_info;
+ struct hinic3_irq *curr;
+ u16 i, found = 0;
+
+ irq_info = &hwdev->cfg_mgmt->irq_info;
+ mutex_lock(&irq_info->irq_mutex);
+ for (i = 0; i < irq_info->num_irq && found < num; i++) {
+ curr = irq_info->irq + i;
+ if (curr->allocated)
+ continue;
+ curr->allocated = true;
+ alloc_arr[found].vector = curr->irq_id;
+ alloc_arr[found].entry = curr->msix_entry_idx;
+ found++;
+ }
+ mutex_unlock(&irq_info->irq_mutex);
+
+ *act_num = found;
+
+ return found == 0 ? -ENOMEM : 0;
+}
+
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id)
+{
+ struct hinic3_irq_info *irq_info;
+ struct hinic3_irq *curr;
+ u16 i;
+
+ irq_info = &hwdev->cfg_mgmt->irq_info;
+ mutex_lock(&irq_info->irq_mutex);
+ for (i = 0; i < irq_info->num_irq; i++) {
+ curr = irq_info->irq + i;
+ if (curr->irq_id == irq_id) {
+ curr->allocated = false;
+ break;
+ }
+ }
+ mutex_unlock(&irq_info->irq_mutex);
+}
+
+int hinic3_init_capability(struct hinic3_hwdev *hwdev)
+{
+ return get_cap_from_fw(hwdev, HINIC3_FUNC_TYPE_VF);
+}
+
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.supp_svcs_bitmap &
+ BIT(HINIC3_SERVICE_T_NIC);
+}
+
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs;
+}
+
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.port_id;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
new file mode 100644
index 000000000000..58806199bf54
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_CFG_H_
+#define _HINIC3_HW_CFG_H_
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+struct hinic3_hwdev;
+
+struct hinic3_irq {
+ u32 irq_id;
+ u16 msix_entry_idx;
+ bool allocated;
+};
+
+struct hinic3_irq_info {
+ struct hinic3_irq *irq;
+ u16 num_irq;
+ /* device max irq number */
+ u16 num_irq_hw;
+ /* protect irq alloc and free */
+ struct mutex irq_mutex;
+};
+
+struct hinic3_nic_service_cap {
+ u16 max_sqs;
+};
+
+/* Device capabilities */
+struct hinic3_dev_cap {
+ /* Bitmasks of services supported by device */
+ u16 supp_svcs_bitmap;
+ /* Physical port */
+ u8 port_id;
+ struct hinic3_nic_service_cap nic_svc_cap;
+};
+
+struct hinic3_cfg_mgmt_info {
+ struct hinic3_irq_info irq_info;
+ struct hinic3_dev_cap cap;
+};
+
+int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev);
+void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev);
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num);
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id);
+
+int hinic3_init_capability(struct hinic3_hwdev *hwdev);
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev);
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev);
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
new file mode 100644
index 000000000000..89638813df40
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
+ const struct hinic3_interrupt_info *info)
+{
+ struct comm_cmd_cfg_msix_ctrl_reg msix_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ msix_cfg.func_id = hinic3_global_func_id(hwdev);
+ msix_cfg.msix_index = info->msix_index;
+ msix_cfg.opcode = MGMT_MSG_CMD_OP_SET;
+
+ msix_cfg.lli_credit_cnt = info->lli_credit_limit;
+ msix_cfg.lli_timer_cnt = info->lli_timer_cfg;
+ msix_cfg.pending_cnt = info->pending_limit;
+ msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = info->resend_timer_cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &msix_cfg, sizeof(msix_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_MSIX_CTRL_REG, &msg_params);
+ if (err || msix_cfg.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set interrupt config, err: %d, status: 0x%x\n",
+ err, msix_cfg.head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
+{
+ struct comm_cmd_func_reset func_reset = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ func_reset.func_id = func_id;
+ func_reset.reset_flag = reset_flag;
+
+ mgmt_msg_params_init_default(&msg_params, &func_reset,
+ sizeof(func_reset));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FUNC_RESET, &msg_params);
+ if (err || func_reset.head.status) {
+ dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n",
+ reset_flag, err, func_reset.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hinic3_comm_features_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct comm_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature,
+ array_size(size, sizeof(u64)));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate feature, err: %d, status: 0x%x\n",
+ err, feature_nego.head.status);
+ return -EINVAL;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature,
+ array_size(size, sizeof(u64)));
+
+ return 0;
+}
+
+int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size)
+{
+ return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature,
+ size);
+}
+
+int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size)
+{
+ return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature,
+ size);
+}
+
+int hinic3_get_global_attr(struct hinic3_hwdev *hwdev,
+ struct comm_global_attr *attr)
+{
+ struct comm_cmd_get_glb_attr get_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ mgmt_msg_params_init_default(&msg_params, &get_attr, sizeof(get_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_GET_GLOBAL_ATTR, &msg_params);
+ if (err || get_attr.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get global attribute, err: %d, status: 0x%x\n",
+ err, get_attr.head.status);
+ return -EIO;
+ }
+
+ memcpy(attr, &get_attr.attr, sizeof(*attr));
+
+ return 0;
+}
+
+int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type,
+ u8 state)
+{
+ struct comm_cmd_set_func_svc_used_state used_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ used_state.func_id = hinic3_global_func_id(hwdev);
+ used_state.svc_type = svc_type;
+ used_state.used_state = state;
+
+ mgmt_msg_params_init_default(&msg_params, &used_state,
+ sizeof(used_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE,
+ &msg_params);
+ if (err || used_state.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func service used state, err: %d, status: 0x%x\n",
+ err, used_state.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st,
+ u8 at, u8 ph, u8 no_snooping, u8 tph_en)
+{
+ struct comm_cmd_set_dma_attr dma_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ dma_attr.func_id = hinic3_global_func_id(hwdev);
+ dma_attr.entry_idx = entry_idx;
+ dma_attr.st = st;
+ dma_attr.at = at;
+ dma_attr.ph = ph;
+ dma_attr.no_snooping = no_snooping;
+ dma_attr.tph_en = tph_en;
+
+ mgmt_msg_params_init_default(&msg_params, &dma_attr, sizeof(dma_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_DMA_ATTR, &msg_params);
+ if (err || dma_attr.head.status) {
+ dev_err(hwdev->dev, "Failed to set dma attr, err: %d, status: 0x%x\n",
+ err, dma_attr.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct comm_cmd_cfg_wq_page_size page_size_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ page_size_info.func_id = func_idx;
+ page_size_info.page_size = ilog2(page_size / HINIC3_MIN_PAGE_SIZE);
+ page_size_info.opcode = MGMT_MSG_CMD_OP_SET;
+
+ mgmt_msg_params_init_default(&msg_params, &page_size_info,
+ sizeof(page_size_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_PAGESIZE, &msg_params);
+ if (err || page_size_info.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set wq page size, err: %d, status: 0x%x\n",
+ err, page_size_info.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ root_ctxt.set_cmdq_depth = 1;
+ root_ctxt.cmdq_depth = ilog2(cmdq_depth);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set cmdq depth, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define HINIC3_WAIT_CMDQ_IDLE_TIMEOUT 5000
+
+static enum hinic3_wait_return check_cmdq_stop_handler(void *priv_data)
+{
+ struct hinic3_hwdev *hwdev = priv_data;
+ enum hinic3_cmdq_type cmdq_type;
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = hwdev->cmdqs;
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ return HINIC3_WAIT_PROCESS_WAITING;
+ }
+
+ return HINIC3_WAIT_PROCESS_CPL;
+}
+
+static int wait_cmdq_stop(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic3_cmdq_type cmdq_type;
+ int err;
+
+ if (!(cmdqs->status & HINIC3_CMDQ_ENABLE))
+ return 0;
+
+ cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
+ err = hinic3_wait_for_timeout(hwdev, check_cmdq_stop_handler,
+ HINIC3_WAIT_CMDQ_IDLE_TIMEOUT,
+ USEC_PER_MSEC);
+
+ if (err)
+ goto err_reenable_cmdq;
+
+ return 0;
+
+err_reenable_cmdq:
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ dev_err(hwdev->dev, "Cmdq %d is busy\n", cmdq_type);
+ }
+ cmdqs->status |= HINIC3_CMDQ_ENABLE;
+
+ return err;
+}
+
+int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
+{
+ struct comm_cmd_clear_resource clear_db = {};
+ struct comm_cmd_clear_resource clr_res = {};
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ struct mgmt_msg_params msg_params = {};
+ int ret = 0;
+ int err;
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ dev_warn(hwdev->dev, "CMDQ is still working, CMDQ timeout value is unreasonable\n");
+ ret = err;
+ }
+
+ hinic3_toggle_doorbell(hwif, DISABLE_DOORBELL);
+
+ clear_db.func_id = hwif->attr.func_global_idx;
+ mgmt_msg_params_init_default(&msg_params, &clear_db, sizeof(clear_db));
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FLUSH_DOORBELL, &msg_params);
+ if (err || clear_db.head.status) {
+ dev_warn(hwdev->dev, "Failed to flush doorbell, err: %d, status: 0x%x\n",
+ err, clear_db.head.status);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+
+ clr_res.func_id = hwif->attr.func_global_idx;
+ msg_params.buf_in = &clr_res;
+ msg_params.in_size = sizeof(clr_res);
+ err = hinic3_send_mbox_to_mgmt_no_ack(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_START_FLUSH,
+ &msg_params);
+ if (err) {
+ dev_warn(hwdev->dev, "Failed to notice flush message, err: %d\n",
+ err);
+ ret = err;
+ }
+
+ hinic3_toggle_doorbell(hwif, ENABLE_DOORBELL);
+
+ err = hinic3_reinit_cmdq_ctxts(hwdev);
+ if (err) {
+ dev_warn(hwdev->dev, "Failed to reinit cmdq\n");
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx)
+{
+ /* Supported RX buffer sizes in bytes. Configured by array index. */
+ static const int supported_sizes[16] = {
+ [0] = 32, [1] = 64, [2] = 96, [3] = 128,
+ [4] = 192, [5] = 256, [6] = 384, [7] = 512,
+ [8] = 768, [9] = 1024, [10] = 1536, [11] = 2048,
+ [12] = 3072, [13] = 4096, [14] = 8192, [15] = 16384,
+ };
+ u16 idx;
+
+ /* Scan from biggest to smallest. Choose supported size that is equal or
+ * smaller. For smaller value HW will under-utilize posted buffers. For
+ * bigger value HW may overrun posted buffers.
+ */
+ idx = ARRAY_SIZE(supported_sizes);
+ while (idx > 0) {
+ idx--;
+ if (supported_sizes[idx] <= rx_buf_sz) {
+ *buf_sz_idx = idx;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+ int rx_buf_sz)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ u16 buf_sz_idx;
+ int err;
+
+ err = get_hw_rx_buf_size_idx(rx_buf_sz, &buf_sz_idx);
+ if (err)
+ return err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+
+ root_ctxt.lro_en = 1;
+
+ root_ctxt.rq_depth = ilog2(rq_depth);
+ root_ctxt.rx_buf_sz = buf_sz_idx;
+ root_ctxt.sq_depth = ilog2(sq_depth);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set root context, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set root context, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
new file mode 100644
index 000000000000..304f5691f0c2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_COMM_H_
+#define _HINIC3_HW_COMM_H_
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_hwdev;
+
+#define HINIC3_WQ_PAGE_SIZE_ORDER 8
+
+struct hinic3_interrupt_info {
+ u32 lli_set;
+ u32 interrupt_coalesc_set;
+ u16 msix_index;
+ u8 lli_credit_limit;
+ u8 lli_timer_cfg;
+ u8 pending_limit;
+ u8 coalesc_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
+ const struct hinic3_interrupt_info *info);
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
+
+int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size);
+int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size);
+int hinic3_get_global_attr(struct hinic3_hwdev *hwdev,
+ struct comm_global_attr *attr);
+int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type,
+ u8 state);
+int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st,
+ u8 at, u8 ph, u8 no_snooping, u8 tph_en);
+
+int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth);
+int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev);
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+ int rx_buf_sz);
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
new file mode 100644
index 000000000000..623cf2d14cbc
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_INTF_H_
+#define _HINIC3_HW_INTF_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define MGMT_MSG_CMD_OP_SET 1
+#define MGMT_MSG_CMD_OP_GET 0
+
+#define MGMT_STATUS_PF_SET_VF_ALREADY 0x4
+#define MGMT_STATUS_EXIST 0x6
+#define MGMT_STATUS_CMD_UNSUPPORTED 0xFF
+
+#define MGMT_MSG_POLLING_TIMEOUT 0
+
+struct mgmt_msg_head {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct mgmt_msg_params {
+ const void *buf_in;
+ u32 in_size;
+ void *buf_out;
+ u32 expected_out_size;
+ u32 timeout_ms;
+};
+
+/* CMDQ MODULE_TYPE */
+enum mgmt_mod_type {
+ /* HW communication module */
+ MGMT_MOD_COMM = 0,
+ /* L2NIC module */
+ MGMT_MOD_L2NIC = 1,
+ /* Configuration module */
+ MGMT_MOD_CFGM = 7,
+ MGMT_MOD_HILINK = 14,
+};
+
+static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params,
+ void *inout_buf, u32 buf_size)
+{
+ msg_params->buf_in = inout_buf;
+ msg_params->buf_out = inout_buf;
+ msg_params->in_size = buf_size;
+ msg_params->expected_out_size = buf_size;
+ msg_params->timeout_ms = 0;
+}
+
+enum cfg_cmd {
+ CFG_CMD_GET_DEV_CAP = 0,
+};
+
+/* Device capabilities, defined by hw */
+struct cfg_cmd_dev_cap {
+ struct mgmt_msg_head head;
+
+ u16 func_id;
+ u16 rsvd1;
+
+ /* Public resources */
+ u8 host_id;
+ u8 ep_id;
+ u8 er_id;
+ u8 port_id;
+
+ u16 host_total_func;
+ u8 host_pf_num;
+ u8 pf_id_start;
+ u16 host_vf_num;
+ u16 vf_id_start;
+ u8 host_oq_id_mask_val;
+ u8 timer_en;
+ u8 host_valid_bitmap;
+ u8 rsvd_host;
+
+ u16 svc_cap_en;
+ u16 max_vf;
+ u8 flexq_en;
+ u8 valid_cos_bitmap;
+ u8 port_cos_valid_bitmap;
+ u8 rsvd2[45];
+
+ /* l2nic */
+ u16 nic_max_sq_id;
+ u16 nic_max_rq_id;
+ u16 nic_default_num_queues;
+
+ u8 rsvd3[250];
+};
+
+/* COMM Commands between Driver to fw */
+enum comm_cmd {
+ /* Commands for clearing FLR and resources */
+ COMM_CMD_FUNC_RESET = 0,
+ COMM_CMD_FEATURE_NEGO = 1,
+ COMM_CMD_FLUSH_DOORBELL = 2,
+ COMM_CMD_START_FLUSH = 3,
+ COMM_CMD_GET_GLOBAL_ATTR = 5,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE = 7,
+
+ /* Driver Configuration Commands */
+ COMM_CMD_SET_CMDQ_CTXT = 20,
+ COMM_CMD_SET_VAT = 21,
+ COMM_CMD_CFG_PAGESIZE = 22,
+ COMM_CMD_CFG_MSIX_CTRL_REG = 23,
+ COMM_CMD_SET_CEQ_CTRL_REG = 24,
+ COMM_CMD_SET_DMA_ATTR = 25,
+};
+
+struct comm_cmd_cfg_msix_ctrl_reg {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesce_timer_cnt;
+ u8 resend_timer_cnt;
+ u8 lli_timer_cnt;
+ u8 lli_credit_cnt;
+ u8 rsvd2[5];
+};
+
+enum comm_func_reset_bits {
+ COMM_FUNC_RESET_BIT_FLUSH = BIT(0),
+ COMM_FUNC_RESET_BIT_MQM = BIT(1),
+ COMM_FUNC_RESET_BIT_SMF = BIT(2),
+ COMM_FUNC_RESET_BIT_PF_BW_CFG = BIT(3),
+
+ COMM_FUNC_RESET_BIT_COMM = BIT(10),
+ /* clear mbox and aeq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_MGMT_CH = BIT(11),
+ /* clear cmdq and ceq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_CMD_CH = BIT(12),
+ COMM_FUNC_RESET_BIT_NIC = BIT(13),
+};
+
+#define COMM_FUNC_RESET_FLAG \
+ (COMM_FUNC_RESET_BIT_COMM | COMM_FUNC_RESET_BIT_COMM_CMD_CH | \
+ COMM_FUNC_RESET_BIT_FLUSH | COMM_FUNC_RESET_BIT_MQM | \
+ COMM_FUNC_RESET_BIT_SMF | COMM_FUNC_RESET_BIT_PF_BW_CFG)
+
+struct comm_cmd_func_reset {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+ u64 reset_flag;
+};
+
+#define COMM_MAX_FEATURE_QWORD 4
+struct comm_cmd_feature_nego {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[COMM_MAX_FEATURE_QWORD];
+};
+
+struct comm_global_attr {
+ u8 max_host_num;
+ u8 max_pf_num;
+ u16 vf_id_start;
+ /* for api cmd to mgmt cpu */
+ u8 mgmt_host_node_id;
+ u8 cmdq_num;
+ u8 rsvd1[34];
+};
+
+struct comm_cmd_get_glb_attr {
+ struct mgmt_msg_head head;
+ struct comm_global_attr attr;
+};
+
+enum comm_func_svc_type {
+ COMM_FUNC_SVC_T_COMM = 0,
+ COMM_FUNC_SVC_T_NIC = 1,
+};
+
+struct comm_cmd_set_func_svc_used_state {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 svc_type;
+ u8 used_state;
+ u8 rsvd[35];
+};
+
+struct comm_cmd_set_dma_attr {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 entry_idx;
+ u8 st;
+ u8 at;
+ u8 ph;
+ u8 no_snooping;
+ u8 tph_en;
+ u32 resv1;
+};
+
+struct comm_cmd_set_ceq_ctrl_reg {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 rsvd1;
+};
+
+struct comm_cmd_cfg_wq_page_size {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */
+ u8 page_size;
+ u32 rsvd1;
+};
+
+struct comm_cmd_set_root_ctxt {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u16 rx_buf_sz;
+ u8 lro_en;
+ u8 rsvd1;
+ u16 sq_depth;
+ u16 rq_depth;
+ u64 rsvd2;
+};
+
+struct comm_cmdq_ctxt_info {
+ __le64 curr_wqe_page_pfn;
+ __le64 wq_block_pfn;
+};
+
+struct comm_cmd_set_cmdq_ctxt {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 cmdq_id;
+ u8 rsvd1[5];
+ struct comm_cmdq_ctxt_info ctxt;
+};
+
+struct comm_cmd_clear_resource {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+};
+
+/* Services supported by HW. HW uses these values when delivering events.
+ * HW supports multiple services that are not yet supported by driver
+ * (e.g. RoCE).
+ */
+enum hinic3_service_type {
+ HINIC3_SERVICE_T_NIC = 0,
+ /* MAX is only used by SW for array sizes. */
+ HINIC3_SERVICE_T_MAX = 1,
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
new file mode 100644
index 000000000000..95a213133be9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_cmdq.h"
+#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+#define HINIC3_PCIE_SNOOP 0
+#define HINIC3_PCIE_TPH_DISABLE 0
+
+#define HINIC3_DMA_ATTR_INDIR_IDX_MASK GENMASK(9, 0)
+#define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \
+ FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val)
+
+#define HINIC3_DMA_ATTR_ENTRY_ST_MASK GENMASK(7, 0)
+#define HINIC3_DMA_ATTR_ENTRY_AT_MASK GENMASK(9, 8)
+#define HINIC3_DMA_ATTR_ENTRY_PH_MASK GENMASK(11, 10)
+#define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK BIT(12)
+#define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK BIT(13)
+#define HINIC3_DMA_ATTR_ENTRY_SET(val, member) \
+ FIELD_PREP(HINIC3_DMA_ATTR_ENTRY_##member##_MASK, val)
+
+#define HINIC3_PCIE_ST_DISABLE 0
+#define HINIC3_PCIE_AT_DISABLE 0
+#define HINIC3_PCIE_PH_DISABLE 0
+#define HINIC3_PCIE_MSIX_ATTR_ENTRY 0
+
+#define HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT 0
+#define HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
+#define HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG 7
+
+#define HINIC3_HWDEV_WQ_NAME "hinic3_hardware"
+#define HINIC3_WQ_MAX_REQ 10
+
+enum hinic3_hwdev_init_state {
+ HINIC3_HWDEV_MBOX_INITED = 2,
+ HINIC3_HWDEV_CMDQ_INITED = 3,
+};
+
+static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct msix_entry aeq_msix_entries[HINIC3_MAX_AEQS];
+ u16 num_aeqs, resp_num_irq, i;
+ int err;
+
+ num_aeqs = hwdev->hwif->attr.num_aeqs;
+ if (num_aeqs > HINIC3_MAX_AEQS) {
+ dev_warn(hwdev->dev, "Adjust aeq num to %d\n",
+ HINIC3_MAX_AEQS);
+ num_aeqs = HINIC3_MAX_AEQS;
+ }
+ err = hinic3_alloc_irqs(hwdev, num_aeqs, aeq_msix_entries,
+ &resp_num_irq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc aeq irqs, num_aeqs: %u\n",
+ num_aeqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_aeqs) {
+ dev_warn(hwdev->dev, "Adjust aeq num to %u\n",
+ resp_num_irq);
+ num_aeqs = resp_num_irq;
+ }
+
+ err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_msix_entries);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeqs\n");
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ for (i = 0; i < num_aeqs; i++)
+ hinic3_free_irq(hwdev, aeq_msix_entries[i].vector);
+
+ return err;
+}
+
+static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct msix_entry ceq_msix_entries[HINIC3_MAX_CEQS];
+ u16 num_ceqs, resp_num_irq, i;
+ int err;
+
+ num_ceqs = hwdev->hwif->attr.num_ceqs;
+ if (num_ceqs > HINIC3_MAX_CEQS) {
+ dev_warn(hwdev->dev, "Adjust ceq num to %d\n",
+ HINIC3_MAX_CEQS);
+ num_ceqs = HINIC3_MAX_CEQS;
+ }
+
+ err = hinic3_alloc_irqs(hwdev, num_ceqs, ceq_msix_entries,
+ &resp_num_irq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc ceq irqs, num_ceqs: %u\n",
+ num_ceqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_ceqs) {
+ dev_warn(hwdev->dev, "Adjust ceq num to %u\n",
+ resp_num_irq);
+ num_ceqs = resp_num_irq;
+ }
+
+ err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_msix_entries);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to init ceqs, err:%d\n", err);
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ for (i = 0; i < num_ceqs; i++)
+ hinic3_free_irq(hwdev, ceq_msix_entries[i].vector);
+
+ return err;
+}
+
+static int hinic3_comm_mbox_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_init_mbox(hwdev);
+ if (err)
+ return err;
+
+ hinic3_aeq_register_cb(hwdev, HINIC3_MBX_FROM_FUNC,
+ hinic3_mbox_func_aeqe_handler);
+ hinic3_aeq_register_cb(hwdev, HINIC3_MSG_FROM_FW,
+ hinic3_mgmt_msg_aeqe_handler);
+
+ set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic3_comm_mbox_free(struct hinic3_hwdev *hwdev)
+{
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MBX_FROM_FUNC);
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
+ hinic3_free_mbox(hwdev);
+}
+
+static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_aeqs *aeqs = hwdev->aeqs;
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = &aeqs->aeq[q_id];
+ info.msix_index = eq->msix_entry_idx;
+ err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ dev_err(hwdev->dev, "Set msix attr for aeq %d failed\n",
+ q_id);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_ceqs *ceqs = hwdev->ceqs;
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = &ceqs->ceq[q_id];
+ info.msix_index = eq->msix_entry_idx;
+ err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n",
+ q_id);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_comm_aeqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init async event queues\n");
+ return err;
+ }
+
+ err = hinic3_comm_mbox_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init mailbox\n");
+ goto err_free_comm_aeqs;
+ }
+
+ err = init_aeqs_msix_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeqs msix attr\n");
+ goto err_free_comm_mbox;
+ }
+
+ return 0;
+
+err_free_comm_mbox:
+ hinic3_comm_mbox_free(hwdev);
+err_free_comm_aeqs:
+ hinic3_aeqs_free(hwdev);
+
+ return err;
+}
+
+static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev)
+{
+ hinic3_comm_mbox_free(hwdev);
+ hinic3_aeqs_free(hwdev);
+}
+
+static int dma_attr_table_init(struct hinic3_hwdev *hwdev)
+{
+ u32 addr, val, dst_attr;
+
+ /* Indirect access, set entry_idx first */
+ addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR;
+ val = hinic3_hwif_read_reg(hwdev->hwif, addr);
+ val &= ~HINIC3_DMA_ATTR_ENTRY_AT_MASK;
+ val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(HINIC3_PCIE_MSIX_ATTR_ENTRY, IDX);
+ hinic3_hwif_write_reg(hwdev->hwif, addr, val);
+
+ addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR;
+ val = hinic3_hwif_read_reg(hwdev->hwif, addr);
+
+ dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN);
+ if (val == dst_attr)
+ return 0;
+
+ return hinic3_set_dma_attr_tbl(hwdev,
+ HINIC3_PCIE_MSIX_ATTR_ENTRY,
+ HINIC3_PCIE_ST_DISABLE,
+ HINIC3_PCIE_AT_DISABLE,
+ HINIC3_PCIE_PH_DISABLE,
+ HINIC3_PCIE_SNOOP,
+ HINIC3_PCIE_TPH_DISABLE);
+}
+
+static int init_basic_attributes(struct hinic3_hwdev *hwdev)
+{
+ struct comm_global_attr glb_attr;
+ int err;
+
+ err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev),
+ COMM_FUNC_RESET_FLAG);
+ if (err)
+ return err;
+
+ err = hinic3_get_comm_features(hwdev, hwdev->features,
+ COMM_MAX_FEATURE_QWORD);
+ if (err)
+ return err;
+
+ dev_dbg(hwdev->dev, "Comm hw features: 0x%llx\n", hwdev->features[0]);
+
+ err = hinic3_get_global_attr(hwdev, &glb_attr);
+ if (err)
+ return err;
+
+ err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 1);
+ if (err)
+ return err;
+
+ err = dma_attr_table_init(hwdev);
+ if (err)
+ return err;
+
+ hwdev->max_cmdq = min(glb_attr.cmdq_num, HINIC3_MAX_CMDQ_TYPES);
+ dev_dbg(hwdev->dev,
+ "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n",
+ glb_attr.max_host_num, glb_attr.max_pf_num,
+ glb_attr.vf_id_start, glb_attr.mgmt_host_node_id,
+ glb_attr.cmdq_num);
+
+ return 0;
+}
+
+static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_cmdqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmd queues\n");
+ return err;
+ }
+
+ hinic3_ceq_register_cb(hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler);
+
+ err = hinic3_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set cmdq depth\n");
+ goto err_free_cmdqs;
+ }
+
+ set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
+
+ return 0;
+
+err_free_cmdqs:
+ hinic3_cmdqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev)
+{
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+
+ hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ);
+ hinic3_cmdqs_free(hwdev);
+}
+
+static int init_cmdqs_channel(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_comm_ceqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init completion event queues\n");
+ return err;
+ }
+
+ err = init_ceqs_msix_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init ceqs msix attr\n");
+ goto err_free_ceqs;
+ }
+
+ hwdev->wq_page_size = HINIC3_MIN_PAGE_SIZE << HINIC3_WQ_PAGE_SIZE_ORDER;
+ err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
+ hwdev->wq_page_size);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set wq page size\n");
+ goto err_free_ceqs;
+ }
+
+ err = hinic3_comm_cmdqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmd queues\n");
+ goto err_reset_wq_page_size;
+ }
+
+ return 0;
+
+err_reset_wq_page_size:
+ hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
+ HINIC3_MIN_PAGE_SIZE);
+err_free_ceqs:
+ hinic3_ceqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev)
+{
+ hinic3_comm_cmdqs_free(hwdev);
+ hinic3_ceqs_free(hwdev);
+}
+
+static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = init_basic_mgmt_channel(hwdev);
+ if (err)
+ return err;
+
+ err = init_basic_attributes(hwdev);
+ if (err)
+ goto err_free_basic_mgmt_ch;
+
+ err = init_cmdqs_channel(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmdq channel\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ return 0;
+
+err_clear_func_svc_used_state:
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+err_free_basic_mgmt_ch:
+ free_base_mgmt_channel(hwdev);
+
+ return err;
+}
+
+static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
+{
+ hinic3_free_cmdqs_channel(hwdev);
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+ free_base_mgmt_channel(hwdev);
+}
+
+static DEFINE_IDA(hinic3_adev_ida);
+
+static int hinic3_adev_idx_alloc(void)
+{
+ return ida_alloc(&hinic3_adev_ida, GFP_KERNEL);
+}
+
+static void hinic3_adev_idx_free(int id)
+{
+ ida_free(&hinic3_adev_ida, id);
+}
+
+int hinic3_init_hwdev(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ struct hinic3_hwdev *hwdev;
+ int err;
+
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ pci_adapter->hwdev = hwdev;
+ hwdev->adapter = pci_adapter;
+ hwdev->pdev = pci_adapter->pdev;
+ hwdev->dev = &pci_adapter->pdev->dev;
+ hwdev->func_state = 0;
+ hwdev->dev_id = hinic3_adev_idx_alloc();
+ spin_lock_init(&hwdev->channel_lock);
+
+ err = hinic3_init_hwif(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hwif\n");
+ goto err_free_hwdev;
+ }
+
+ hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM,
+ HINIC3_WQ_MAX_REQ);
+ if (!hwdev->workq) {
+ dev_err(hwdev->dev, "Failed to alloc hardware workq\n");
+ err = -ENOMEM;
+ goto err_free_hwif;
+ }
+
+ err = hinic3_init_cfg_mgmt(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init config mgmt\n");
+ goto err_destroy_workqueue;
+ }
+
+ err = hinic3_init_comm_ch(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init communication channel\n");
+ goto err_free_cfg_mgmt;
+ }
+
+ err = hinic3_init_capability(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init capability\n");
+ goto err_uninit_comm_ch;
+ }
+
+ err = hinic3_set_comm_features(hwdev, hwdev->features,
+ COMM_MAX_FEATURE_QWORD);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set comm features\n");
+ goto err_uninit_comm_ch;
+ }
+
+ return 0;
+
+err_uninit_comm_ch:
+ hinic3_uninit_comm_ch(hwdev);
+err_free_cfg_mgmt:
+ hinic3_free_cfg_mgmt(hwdev);
+err_destroy_workqueue:
+ destroy_workqueue(hwdev->workq);
+err_free_hwif:
+ hinic3_free_hwif(hwdev);
+err_free_hwdev:
+ pci_adapter->hwdev = NULL;
+ hinic3_adev_idx_free(hwdev->dev_id);
+ kfree(hwdev);
+
+ return err;
+}
+
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
+{
+ u64 drv_features[COMM_MAX_FEATURE_QWORD] = {};
+
+ hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD);
+ hinic3_func_rx_tx_flush(hwdev);
+ hinic3_uninit_comm_ch(hwdev);
+ hinic3_free_cfg_mgmt(hwdev);
+ destroy_workqueue(hwdev->workq);
+ hinic3_free_hwif(hwdev);
+ hinic3_adev_idx_free(hwdev->dev_id);
+ kfree(hwdev);
+}
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+
+ spin_lock_bh(&hwdev->channel_lock);
+ if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
+ mbox = hwdev->mbox;
+ spin_lock(&mbox->mbox_lock);
+ if (mbox->event_flag == MBOX_EVENT_START)
+ mbox->event_flag = MBOX_EVENT_TIMEOUT;
+ spin_unlock(&mbox->mbox_lock);
+ }
+
+ if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state))
+ hinic3_cmdq_flush_sync_cmd(hwdev);
+
+ spin_unlock_bh(&hwdev->channel_lock);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
new file mode 100644
index 000000000000..62e2745e9316
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWDEV_H_
+#define _HINIC3_HWDEV_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/pci.h>
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_cmdqs;
+struct hinic3_hwif;
+
+enum hinic3_event_service_type {
+ HINIC3_EVENT_SRV_COMM = 0,
+ HINIC3_EVENT_SRV_NIC = 1
+};
+
+#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type))
+
+/* driver-specific data of pci_dev */
+struct hinic3_pcidev {
+ struct pci_dev *pdev;
+ struct hinic3_hwdev *hwdev;
+ /* Auxiliary devices */
+ struct hinic3_adev *hadev[HINIC3_SERVICE_T_MAX];
+
+ void __iomem *cfg_reg_base;
+ void __iomem *intr_reg_base;
+ void __iomem *db_base;
+ u64 db_dwqe_len;
+ u64 db_base_phy;
+
+ /* lock for attach/detach uld */
+ struct mutex pdev_mutex;
+ unsigned long state;
+};
+
+struct hinic3_hwdev {
+ struct hinic3_pcidev *adapter;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int dev_id;
+ struct hinic3_hwif *hwif;
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ struct hinic3_aeqs *aeqs;
+ struct hinic3_ceqs *ceqs;
+ struct hinic3_mbox *mbox;
+ struct hinic3_cmdqs *cmdqs;
+ struct workqueue_struct *workq;
+ /* protect channel init and uninit */
+ spinlock_t channel_lock;
+ u64 features[COMM_MAX_FEATURE_QWORD];
+ u32 wq_page_size;
+ u8 max_cmdq;
+ ulong func_state;
+};
+
+struct hinic3_event_info {
+ /* enum hinic3_event_service_type */
+ u16 service;
+ u16 type;
+ u8 event_data[104];
+};
+
+struct hinic3_adev {
+ struct auxiliary_device adev;
+ struct hinic3_hwdev *hwdev;
+ enum hinic3_service_type svc_type;
+
+ void (*event)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event);
+};
+
+int hinic3_init_hwdev(struct pci_dev *pdev);
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev);
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
new file mode 100644
index 000000000000..f76f140fb6f7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "hinic3_common.h"
+#include "hinic3_csr.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+
+#define HINIC3_HWIF_READY_TIMEOUT 10000
+#define HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT 60000
+#define HINIC3_PCIE_LINK_DOWN 0xFFFFFFFF
+
+/* config BAR4/5 4MB, DB & DWQE both 2MB */
+#define HINIC3_DB_DWQE_SIZE 0x00400000
+
+/* db/dwqe page size: 4K */
+#define HINIC3_DB_PAGE_SIZE 0x00001000
+#define HINIC3_DWQE_OFFSET 0x00000800
+#define HINIC3_DB_MAX_AREAS (HINIC3_DB_DWQE_SIZE / HINIC3_DB_PAGE_SIZE)
+
+#define HINIC3_MAX_MSIX_ENTRY 2048
+
+#define HINIC3_AF0_FUNC_GLOBAL_IDX_MASK GENMASK(11, 0)
+#define HINIC3_AF0_P2P_IDX_MASK GENMASK(16, 12)
+#define HINIC3_AF0_PCI_INTF_IDX_MASK GENMASK(19, 17)
+#define HINIC3_AF0_FUNC_TYPE_MASK BIT(28)
+#define HINIC3_AF0_GET(val, member) \
+ FIELD_GET(HINIC3_AF0_##member##_MASK, val)
+
+#define HINIC3_AF1_AEQS_PER_FUNC_MASK GENMASK(9, 8)
+#define HINIC3_AF1_MGMT_INIT_STATUS_MASK BIT(30)
+#define HINIC3_AF1_GET(val, member) \
+ FIELD_GET(HINIC3_AF1_##member##_MASK, val)
+
+#define HINIC3_AF2_CEQS_PER_FUNC_MASK GENMASK(8, 0)
+#define HINIC3_AF2_IRQS_PER_FUNC_MASK GENMASK(26, 16)
+#define HINIC3_AF2_GET(val, member) \
+ FIELD_GET(HINIC3_AF2_##member##_MASK, val)
+
+#define HINIC3_AF4_DOORBELL_CTRL_MASK BIT(0)
+#define HINIC3_AF4_GET(val, member) \
+ FIELD_GET(HINIC3_AF4_##member##_MASK, val)
+#define HINIC3_AF4_SET(val, member) \
+ FIELD_PREP(HINIC3_AF4_##member##_MASK, val)
+
+#define HINIC3_AF5_OUTBOUND_CTRL_MASK BIT(0)
+#define HINIC3_AF5_GET(val, member) \
+ FIELD_GET(HINIC3_AF5_##member##_MASK, val)
+
+#define HINIC3_AF6_PF_STATUS_MASK GENMASK(15, 0)
+#define HINIC3_AF6_FUNC_MAX_SQ_MASK GENMASK(31, 23)
+#define HINIC3_AF6_MSIX_FLEX_EN_MASK BIT(22)
+#define HINIC3_AF6_GET(val, member) \
+ FIELD_GET(HINIC3_AF6_##member##_MASK, val)
+
+#define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MASK))
+
+static void __iomem *hinic3_reg_addr(struct hinic3_hwif *hwif, u32 reg)
+{
+ return hwif->cfg_regs_base + HINIC3_GET_REG_ADDR(reg);
+}
+
+u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg)
+{
+ void __iomem *addr = hinic3_reg_addr(hwif, reg);
+
+ return ioread32be(addr);
+}
+
+void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val)
+{
+ void __iomem *addr = hinic3_reg_addr(hwif, reg);
+
+ iowrite32be(val, addr);
+}
+
+static enum hinic3_wait_return check_hwif_ready_handler(void *priv_data)
+{
+ struct hinic3_hwdev *hwdev = priv_data;
+ u32 attr1;
+
+ attr1 = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+
+ return HINIC3_AF1_GET(attr1, MGMT_INIT_STATUS) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_hwif_ready(struct hinic3_hwdev *hwdev)
+{
+ return hinic3_wait_for_timeout(hwdev, check_hwif_ready_handler,
+ HINIC3_HWIF_READY_TIMEOUT,
+ USEC_PER_MSEC);
+}
+
+/* Set attr struct from HW attr values. */
+static void set_hwif_attr(struct hinic3_func_attr *attr, u32 attr0, u32 attr1,
+ u32 attr2, u32 attr3, u32 attr6)
+{
+ attr->func_global_idx = HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+ attr->port_to_port_idx = HINIC3_AF0_GET(attr0, P2P_IDX);
+ attr->pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX);
+ attr->func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE);
+
+ attr->num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC));
+ attr->num_ceqs = HINIC3_AF2_GET(attr2, CEQS_PER_FUNC);
+ attr->num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC);
+ if (attr->num_irqs > HINIC3_MAX_MSIX_ENTRY)
+ attr->num_irqs = HINIC3_MAX_MSIX_ENTRY;
+
+ attr->num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ);
+ attr->msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN);
+}
+
+/* Read attributes from HW and set attribute struct. */
+static int init_hwif_attr(struct hinic3_hwdev *hwdev)
+{
+ u32 attr0, attr1, attr2, attr3, attr6;
+ struct hinic3_hwif *hwif;
+
+ hwif = hwdev->hwif;
+ attr0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR0_ADDR);
+ if (attr0 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+ if (attr1 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr2 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR2_ADDR);
+ if (attr2 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr3 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR3_ADDR);
+ if (attr3 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR);
+ if (attr6 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ set_hwif_attr(&hwif->attr, attr0, attr1, attr2, attr3, attr6);
+
+ if (!hwif->attr.num_ceqs) {
+ dev_err(hwdev->dev, "Ceq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+
+ if (!hwif->attr.num_irqs) {
+ dev_err(hwdev->dev,
+ "Irq num cfg in fw is zero, msix_flex_en %d\n",
+ hwif->attr.msix_flex_en);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static enum hinic3_doorbell_ctrl hinic3_get_doorbell_ctrl_status(struct hinic3_hwif *hwif)
+{
+ u32 attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC3_AF4_GET(attr4, DOORBELL_CTRL);
+}
+
+static enum hinic3_outbound_ctrl hinic3_get_outbound_ctrl_status(struct hinic3_hwif *hwif)
+{
+ u32 attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR);
+
+ return HINIC3_AF5_GET(attr5, OUTBOUND_CTRL);
+}
+
+void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
+ enum hinic3_doorbell_ctrl flag)
+{
+ u32 addr, attr4;
+
+ addr = HINIC3_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic3_hwif_read_reg(hwif, addr);
+
+ attr4 &= ~HINIC3_AF4_DOORBELL_CTRL_MASK;
+ attr4 |= HINIC3_AF4_SET(flag, DOORBELL_CTRL);
+
+ hinic3_hwif_write_reg(hwif, addr, attr4);
+}
+
+static int db_area_idx_init(struct hinic3_hwif *hwif, u64 db_base_phy,
+ u8 __iomem *db_base, u64 db_dwqe_len)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+ u32 db_max_areas;
+
+ hwif->db_base_phy = db_base_phy;
+ hwif->db_base = db_base;
+ hwif->db_dwqe_len = db_dwqe_len;
+
+ db_max_areas = db_dwqe_len > HINIC3_DB_DWQE_SIZE ?
+ HINIC3_DB_MAX_AREAS : db_dwqe_len / HINIC3_DB_PAGE_SIZE;
+ db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL);
+ if (!db_area->db_bitmap_array)
+ return -ENOMEM;
+
+ db_area->db_max_areas = db_max_areas;
+ spin_lock_init(&db_area->idx_lock);
+
+ return 0;
+}
+
+static void db_area_idx_free(struct hinic3_db_area *db_area)
+{
+ bitmap_free(db_area->db_bitmap_array);
+}
+
+static int get_db_idx(struct hinic3_hwif *hwif, u32 *idx)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+ u32 pg_idx;
+
+ spin_lock(&db_area->idx_lock);
+ pg_idx = find_first_zero_bit(db_area->db_bitmap_array,
+ db_area->db_max_areas);
+ if (pg_idx == db_area->db_max_areas) {
+ spin_unlock(&db_area->idx_lock);
+ return -ENOMEM;
+ }
+ set_bit(pg_idx, db_area->db_bitmap_array);
+ spin_unlock(&db_area->idx_lock);
+
+ *idx = pg_idx;
+
+ return 0;
+}
+
+static void free_db_idx(struct hinic3_hwif *hwif, u32 idx)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+
+ spin_lock(&db_area->idx_lock);
+ clear_bit(idx, db_area->db_bitmap_array);
+ spin_unlock(&db_area->idx_lock);
+}
+
+void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base)
+{
+ struct hinic3_hwif *hwif;
+ uintptr_t distance;
+ u32 idx;
+
+ hwif = hwdev->hwif;
+ distance = db_base - hwif->db_base;
+ idx = distance / HINIC3_DB_PAGE_SIZE;
+
+ free_db_idx(hwif, idx);
+}
+
+int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base)
+{
+ struct hinic3_hwif *hwif;
+ u8 __iomem *addr;
+ u32 idx;
+ int err;
+
+ hwif = hwdev->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return err;
+
+ addr = hwif->db_base + idx * HINIC3_DB_PAGE_SIZE;
+ *db_base = addr;
+
+ if (dwqe_base)
+ *dwqe_base = addr + HINIC3_DWQE_OFFSET;
+
+ return 0;
+}
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag)
+{
+ struct hinic3_hwif *hwif;
+ u8 int_msk = 1;
+ u32 mask_bits;
+ u32 addr;
+
+ hwif = hwdev->hwif;
+
+ if (flag)
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET);
+ else
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR);
+ mask_bits = mask_bits |
+ HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, mask_bits);
+}
+
+static void disable_all_msix(struct hinic3_hwdev *hwdev)
+{
+ u16 num_irqs = hwdev->hwif->attr.num_irqs;
+ u16 i;
+
+ for (i = 0; i < num_irqs; i++)
+ hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_DISABLE);
+}
+
+void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ u8 clear_resend_en)
+{
+ struct hinic3_hwif *hwif;
+ u32 msix_ctrl, addr;
+
+ hwif = hwdev->hwif;
+
+ msix_ctrl = HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) |
+ HINIC3_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, msix_ctrl);
+}
+
+void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_auto_mask flag)
+{
+ struct hinic3_hwif *hwif;
+ u32 mask_bits;
+ u32 addr;
+
+ hwif = hwdev->hwif;
+
+ if (flag)
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET);
+ else
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR);
+
+ mask_bits = mask_bits |
+ HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, mask_bits);
+}
+
+static enum hinic3_wait_return check_db_outbound_enable_handler(void *priv_data)
+{
+ enum hinic3_outbound_ctrl outbound_ctrl;
+ struct hinic3_hwif *hwif = priv_data;
+ enum hinic3_doorbell_ctrl db_ctrl;
+
+ db_ctrl = hinic3_get_doorbell_ctrl_status(hwif);
+ outbound_ctrl = hinic3_get_outbound_ctrl_status(hwif);
+ if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL)
+ return HINIC3_WAIT_PROCESS_CPL;
+
+ return HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif)
+{
+ return hinic3_wait_for_timeout(hwif, check_db_outbound_enable_handler,
+ HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT,
+ USEC_PER_MSEC);
+}
+
+int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_hwif *hwif;
+ u32 attr1, attr4, attr5;
+ int err;
+
+ hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+ if (!hwif)
+ return -ENOMEM;
+
+ hwdev->hwif = hwif;
+ hwif->cfg_regs_base = (u8 __iomem *)pci_adapter->cfg_reg_base +
+ HINIC3_VF_CFG_REG_OFFSET;
+
+ err = db_area_idx_init(hwif, pci_adapter->db_base_phy,
+ pci_adapter->db_base,
+ pci_adapter->db_dwqe_len);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init db area.\n");
+ goto err_free_hwif;
+ }
+
+ err = wait_hwif_ready(hwdev);
+ if (err) {
+ attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+ dev_err(hwdev->dev, "Chip status is not ready, attr1:0x%x\n",
+ attr1);
+ goto err_free_db_area_idx;
+ }
+
+ err = init_hwif_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Init hwif attr failed\n");
+ goto err_free_db_area_idx;
+ }
+
+ err = wait_until_doorbell_and_outbound_enabled(hwif);
+ if (err) {
+ attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR);
+ attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR);
+ dev_err(hwdev->dev, "HW doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n",
+ attr4, attr5);
+ goto err_free_db_area_idx;
+ }
+
+ disable_all_msix(hwdev);
+
+ return 0;
+
+err_free_db_area_idx:
+ db_area_idx_free(&hwif->db_area);
+err_free_hwif:
+ kfree(hwif);
+
+ return err;
+}
+
+void hinic3_free_hwif(struct hinic3_hwdev *hwdev)
+{
+ db_area_idx_free(&hwdev->hwif->db_area);
+ kfree(hwdev->hwif);
+}
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.func_global_idx;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
new file mode 100644
index 000000000000..c02904e861cc
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWIF_H_
+#define _HINIC3_HWIF_H_
+
+#include <linux/build_bug.h>
+#include <linux/spinlock_types.h>
+
+struct hinic3_hwdev;
+
+enum hinic3_func_type {
+ HINIC3_FUNC_TYPE_VF = 1,
+};
+
+struct hinic3_db_area {
+ unsigned long *db_bitmap_array;
+ u32 db_max_areas;
+ /* protect doorbell area alloc and free */
+ spinlock_t idx_lock;
+};
+
+struct hinic3_func_attr {
+ enum hinic3_func_type func_type;
+ u16 func_global_idx;
+ u16 global_vf_id_of_pf;
+ u16 num_irqs;
+ u16 num_sq;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 ppf_idx;
+ u8 num_aeqs;
+ u8 num_ceqs;
+ u8 msix_flex_en;
+};
+
+static_assert(sizeof(struct hinic3_func_attr) == 20);
+
+struct hinic3_hwif {
+ u8 __iomem *cfg_regs_base;
+ u64 db_base_phy;
+ u64 db_dwqe_len;
+ u8 __iomem *db_base;
+ struct hinic3_db_area db_area;
+ struct hinic3_func_attr attr;
+};
+
+enum hinic3_outbound_ctrl {
+ ENABLE_OUTBOUND = 0x0,
+ DISABLE_OUTBOUND = 0x1,
+};
+
+enum hinic3_doorbell_ctrl {
+ ENABLE_DOORBELL = 0,
+ DISABLE_DOORBELL = 1,
+};
+
+enum hinic3_msix_state {
+ HINIC3_MSIX_ENABLE,
+ HINIC3_MSIX_DISABLE,
+};
+
+enum hinic3_msix_auto_mask {
+ HINIC3_CLR_MSIX_AUTO_MASK,
+ HINIC3_SET_MSIX_AUTO_MASK,
+};
+
+u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg);
+void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val);
+
+void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
+ enum hinic3_doorbell_ctrl flag);
+
+int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base);
+void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base);
+
+int hinic3_init_hwif(struct hinic3_hwdev *hwdev);
+void hinic3_free_hwif(struct hinic3_hwdev *hwdev);
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag);
+void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ u8 clear_resend_en);
+void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_auto_mask flag);
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
new file mode 100644
index 000000000000..a69b361225e9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+static int hinic3_poll(struct napi_struct *napi, int budget)
+{
+ struct hinic3_irq_cfg *irq_cfg =
+ container_of(napi, struct hinic3_irq_cfg, napi);
+ struct hinic3_nic_dev *nic_dev;
+ bool busy = false;
+ int work_done;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+
+ busy |= hinic3_tx_poll(irq_cfg->txq, budget);
+
+ if (unlikely(!budget))
+ return 0;
+
+ work_done = hinic3_rx_poll(irq_cfg->rxq, budget);
+ busy |= work_done >= budget;
+
+ if (busy)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return work_done;
+}
+
+static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+
+ netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
+ napi_enable(&irq_cfg->napi);
+}
+
+static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ napi_disable(&irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
+ netif_napi_del(&irq_cfg->napi);
+}
+
+static irqreturn_t qp_irq(int irq, void *data)
+{
+ struct hinic3_irq_cfg *irq_cfg = data;
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+ hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx, 1);
+
+ napi_schedule(&irq_cfg->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id)
+{
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ int err;
+
+ netdev = irq_cfg->netdev;
+ nic_dev = netdev_priv(netdev);
+ qp_add_napi(irq_cfg);
+
+ info.msix_index = irq_cfg->msix_entry_idx;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = nic_dev->intr_coalesce[q_id].pending_limit;
+ info.coalesc_timer_cfg =
+ nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
+ info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+ err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info);
+ if (err) {
+ netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n");
+ qp_del_napi(irq_cfg);
+ return err;
+ }
+
+ err = request_irq(irq_cfg->irq_id, qp_irq, 0, irq_cfg->irq_name,
+ irq_cfg);
+ if (err) {
+ qp_del_napi(irq_cfg);
+ return err;
+ }
+
+ irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
+
+ return 0;
+}
+
+static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg)
+{
+ irq_set_affinity_hint(irq_cfg->irq_id, NULL);
+ free_irq(irq_cfg->irq_id, irq_cfg);
+}
+
+int hinic3_qps_irq_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_irq_cfg *irq_cfg;
+ struct msix_entry *msix_entry;
+ u32 local_cpu;
+ u16 q_id;
+ int err;
+
+ for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
+ msix_entry = &nic_dev->qps_msix_entries[q_id];
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+
+ irq_cfg->irq_id = msix_entry->vector;
+ irq_cfg->msix_entry_idx = msix_entry->entry;
+ irq_cfg->netdev = netdev;
+ irq_cfg->txq = &nic_dev->txqs[q_id];
+ irq_cfg->rxq = &nic_dev->rxqs[q_id];
+ nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
+
+ local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev));
+ cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
+
+ snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
+ "%s_qp%u", netdev->name, q_id);
+
+ err = hinic3_request_irq(irq_cfg, q_id);
+ if (err) {
+ netdev_err(netdev, "Failed to request Rx irq\n");
+ goto err_release_irqs;
+ }
+
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_SET_MSIX_AUTO_MASK);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+ }
+
+ return 0;
+
+err_release_irqs:
+ while (q_id > 0) {
+ q_id--;
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+ qp_del_napi(irq_cfg);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_CLR_MSIX_AUTO_MASK);
+ hinic3_release_irq(irq_cfg);
+ }
+
+ return err;
+}
+
+void hinic3_qps_irq_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+ qp_del_napi(irq_cfg);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_CLR_MSIX_AUTO_MASK);
+ hinic3_release_irq(irq_cfg);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
new file mode 100644
index 000000000000..3db8241a3b0c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_lld.h"
+#include "hinic3_mgmt.h"
+#include "hinic3_pci_id_tbl.h"
+
+#define HINIC3_VF_PCI_CFG_REG_BAR 0
+#define HINIC3_PCI_INTR_REG_BAR 2
+#define HINIC3_PCI_DB_BAR 4
+
+#define HINIC3_EVENT_POLL_SLEEP_US 1000
+#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000
+
+static struct hinic3_adev_device {
+ const char *name;
+} hinic3_adev_devices[HINIC3_SERVICE_T_MAX] = {
+ [HINIC3_SERVICE_T_NIC] = {
+ .name = "nic",
+ },
+};
+
+static bool hinic3_adev_svc_supported(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ switch (svc_type) {
+ case HINIC3_SERVICE_T_NIC:
+ return hinic3_support_nic(hwdev);
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void hinic3_comm_adev_release(struct device *dev)
+{
+ struct hinic3_adev *hadev = container_of(dev, struct hinic3_adev,
+ adev.dev);
+
+ kfree(hadev);
+}
+
+static struct hinic3_adev *hinic3_add_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_adev *hadev;
+ const char *svc_name;
+ int ret;
+
+ hadev = kzalloc(sizeof(*hadev), GFP_KERNEL);
+ if (!hadev)
+ return NULL;
+
+ svc_name = hinic3_adev_devices[svc_type].name;
+ hadev->adev.name = svc_name;
+ hadev->adev.id = hwdev->dev_id;
+ hadev->adev.dev.parent = hwdev->dev;
+ hadev->adev.dev.release = hinic3_comm_adev_release;
+ hadev->svc_type = svc_type;
+ hadev->hwdev = hwdev;
+
+ ret = auxiliary_device_init(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed init adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ kfree(hadev);
+ return NULL;
+ }
+
+ ret = auxiliary_device_add(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed to add adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ auxiliary_device_uninit(&hadev->adev);
+ return NULL;
+ }
+
+ return hadev;
+}
+
+static void hinic3_del_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_adev *hadev;
+ int timeout;
+ bool state;
+
+ timeout = read_poll_timeout(test_and_set_bit, state, !state,
+ HINIC3_EVENT_POLL_SLEEP_US,
+ HINIC3_EVENT_POLL_TIMEOUT_US,
+ false, svc_type, &pci_adapter->state);
+
+ hadev = pci_adapter->hadev[svc_type];
+ auxiliary_device_delete(&hadev->adev);
+ auxiliary_device_uninit(&hadev->adev);
+ pci_adapter->hadev[svc_type] = NULL;
+ if (!timeout)
+ clear_bit(svc_type, &pci_adapter->state);
+}
+
+static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ enum hinic3_service_type svc_type;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+
+ for (svc_type = 0; svc_type < HINIC3_SERVICE_T_MAX; svc_type++) {
+ if (!hinic3_adev_svc_supported(hwdev, svc_type))
+ continue;
+
+ pci_adapter->hadev[svc_type] = hinic3_add_one_adev(hwdev,
+ svc_type);
+ if (!pci_adapter->hadev[svc_type])
+ goto err_del_adevs;
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+
+ return 0;
+
+err_del_adevs:
+ while (svc_type > 0) {
+ svc_type--;
+ if (pci_adapter->hadev[svc_type]) {
+ hinic3_del_one_adev(hwdev, svc_type);
+ pci_adapter->hadev[svc_type] = NULL;
+ }
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+
+ return -ENOMEM;
+}
+
+static void hinic3_detach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ int i;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+ for (i = 0; i < ARRAY_SIZE(hinic3_adev_devices); i++) {
+ if (pci_adapter->hadev[i])
+ hinic3_del_one_adev(hwdev, i);
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+}
+
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+
+ return hadev->hwdev;
+}
+
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event))
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = event_handler;
+}
+
+void hinic3_adev_event_unregister(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = NULL;
+}
+
+static int hinic3_mapping_bar(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_VF_PCI_CFG_REG_BAR);
+ if (!pci_adapter->cfg_reg_base) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ return -ENOMEM;
+ }
+
+ pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_PCI_INTR_REG_BAR);
+ if (!pci_adapter->intr_reg_base) {
+ dev_err(&pdev->dev, "Failed to map interrupt regs\n");
+ goto err_unmap_cfg_reg_base;
+ }
+
+ pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR);
+ if (!pci_adapter->db_base) {
+ dev_err(&pdev->dev, "Failed to map doorbell regs\n");
+ goto err_unmap_intr_reg_base;
+ }
+
+ return 0;
+
+err_unmap_intr_reg_base:
+ iounmap(pci_adapter->intr_reg_base);
+
+err_unmap_cfg_reg_base:
+ iounmap(pci_adapter->cfg_reg_base);
+
+ return -ENOMEM;
+}
+
+static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter)
+{
+ iounmap(pci_adapter->db_base);
+ iounmap(pci_adapter->intr_reg_base);
+ iounmap(pci_adapter->cfg_reg_base);
+}
+
+static int hinic3_pci_init(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
+ if (!pci_adapter)
+ return -ENOMEM;
+
+ pci_adapter->pdev = pdev;
+ mutex_init(&pci_adapter->pdev_mutex);
+
+ pci_set_drvdata(pdev, pci_adapter);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto err_free_pci_adapter;
+ }
+
+ err = pci_request_regions(pdev, HINIC3_NIC_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request regions\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+err_free_pci_adapter:
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+
+ return err;
+}
+
+static void hinic3_pci_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+}
+
+static int hinic3_func_init(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ int err;
+
+ err = hinic3_init_hwdev(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize hardware device\n");
+ return err;
+ }
+
+ err = hinic3_attach_aux_devices(pci_adapter->hwdev);
+ if (err)
+ goto err_free_hwdev;
+
+ return 0;
+
+err_free_hwdev:
+ hinic3_free_hwdev(pci_adapter->hwdev);
+
+ return err;
+}
+
+static void hinic3_func_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_flush_mgmt_workq(pci_adapter->hwdev);
+ hinic3_detach_aux_devices(pci_adapter->hwdev);
+ hinic3_free_hwdev(pci_adapter->hwdev);
+}
+
+static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+ int err;
+
+ err = hinic3_mapping_bar(pdev, pci_adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map bar\n");
+ goto err_out;
+ }
+
+ err = hinic3_func_init(pdev, pci_adapter);
+ if (err)
+ goto err_unmap_bar;
+
+ return 0;
+
+err_unmap_bar:
+ hinic3_unmapping_bar(pci_adapter);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe function failed\n");
+
+ return err;
+}
+
+static void hinic3_remove_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+
+ hinic3_func_uninit(pdev);
+ hinic3_unmapping_bar(pci_adapter);
+}
+
+static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ err = hinic3_pci_init(pdev);
+ if (err)
+ goto err_out;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ err = hinic3_probe_func(pci_adapter);
+ if (err)
+ goto err_uninit_pci;
+
+ return 0;
+
+err_uninit_pci:
+ hinic3_pci_uninit(pdev);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe failed\n");
+
+ return err;
+}
+
+static void hinic3_remove(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_remove_func(pci_adapter);
+ hinic3_pci_uninit(pdev);
+}
+
+static const struct pci_device_id hinic3_pci_table[] = {
+ {PCI_VDEVICE(HUAWEI, PCI_DEV_ID_HINIC3_VF), 0},
+ {0, 0}
+
+};
+
+MODULE_DEVICE_TABLE(pci, hinic3_pci_table);
+
+static void hinic3_shutdown(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_disable_device(pdev);
+
+ if (pci_adapter)
+ hinic3_set_api_stop(pci_adapter->hwdev);
+}
+
+static struct pci_driver hinic3_driver = {
+ .name = HINIC3_NIC_DRV_NAME,
+ .id_table = hinic3_pci_table,
+ .probe = hinic3_probe,
+ .remove = hinic3_remove,
+ .shutdown = hinic3_shutdown,
+ .sriov_configure = pci_sriov_configure_simple
+};
+
+int hinic3_lld_init(void)
+{
+ return pci_register_driver(&hinic3_driver);
+}
+
+void hinic3_lld_exit(void)
+{
+ pci_unregister_driver(&hinic3_driver);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
new file mode 100644
index 000000000000..322b44803476
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_LLD_H_
+#define _HINIC3_LLD_H_
+
+#include <linux/auxiliary_bus.h>
+
+struct hinic3_event_info;
+
+#define HINIC3_NIC_DRV_NAME "hinic3"
+
+int hinic3_lld_init(void);
+void hinic3_lld_exit(void);
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event));
+void hinic3_adev_event_unregister(struct auxiliary_device *adev);
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
new file mode 100644
index 000000000000..6d87d4d895ba
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_lld.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rss.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver"
+
+#define HINIC3_RX_BUF_LEN 2048
+#define HINIC3_LRO_REPLENISH_THLD 256
+#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
+
+#define HINIC3_SQ_DEPTH 1024
+#define HINIC3_RQ_DEPTH 1024
+
+#define HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG 25
+#define HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+
+static void init_intr_coal_param(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_intr_coal_info *info;
+ u16 i;
+
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ info = &nic_dev->intr_coalesce[i];
+ info->pending_limit = HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+ info->resend_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ }
+}
+
+static int hinic3_init_intr_coalesce(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->intr_coalesce = kcalloc(nic_dev->max_qps,
+ sizeof(*nic_dev->intr_coalesce),
+ GFP_KERNEL);
+
+ if (!nic_dev->intr_coalesce)
+ return -ENOMEM;
+
+ init_intr_coal_param(netdev);
+
+ return 0;
+}
+
+static void hinic3_free_intr_coalesce(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->intr_coalesce);
+}
+
+static int hinic3_alloc_txrxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_alloc_txqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc txqs\n");
+ return err;
+ }
+
+ err = hinic3_alloc_rxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc rxqs\n");
+ goto err_free_txqs;
+ }
+
+ err = hinic3_init_intr_coalesce(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init_intr_coalesce\n");
+ goto err_free_rxqs;
+ }
+
+ return 0;
+
+err_free_rxqs:
+ hinic3_free_rxqs(netdev);
+
+err_free_txqs:
+ hinic3_free_txqs(netdev);
+
+ return err;
+}
+
+static void hinic3_free_txrxqs(struct net_device *netdev)
+{
+ hinic3_free_intr_coalesce(netdev);
+ hinic3_free_rxqs(netdev);
+ hinic3_free_txqs(netdev);
+}
+
+static int hinic3_init_nic_dev(struct net_device *netdev,
+ struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = hwdev->pdev;
+
+ nic_dev->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nic_dev->hwdev = hwdev;
+ nic_dev->pdev = pdev;
+
+ nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN;
+ nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
+ nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
+
+ return 0;
+}
+
+static int hinic3_sw_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
+ nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+
+ hinic3_try_to_enable_rss(netdev);
+
+ /* VF driver always uses random MAC address. During VM migration to a
+ * new device, the new device should learn the VMs old MAC rather than
+ * provide its own MAC. The product design assumes that every VF is
+ * suspectable to migration so the device avoids offering MAC address
+ * to VFs.
+ */
+ eth_hw_addr_random(netdev);
+ err = hinic3_set_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set default MAC\n");
+ goto err_clear_rss_config;
+ }
+
+ err = hinic3_alloc_txrxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc qps\n");
+ goto err_del_mac;
+ }
+
+ return 0;
+
+err_del_mac:
+ hinic3_del_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+err_clear_rss_config:
+ hinic3_clear_rss_config(netdev);
+
+ return err;
+}
+
+static void hinic3_sw_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxqs(netdev);
+ hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+ hinic3_clear_rss_config(netdev);
+}
+
+static void hinic3_assign_netdev_ops(struct net_device *netdev)
+{
+ hinic3_set_netdev_ops(netdev);
+}
+
+static void netdev_feature_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ netdev_features_t cso_fts = 0;
+ netdev_features_t tso_fts = 0;
+ netdev_features_t dft_fts;
+
+ dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_CSUM))
+ cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_SCTP_CRC))
+ cso_fts |= NETIF_F_SCTP_CRC;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO))
+ tso_fts |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ netdev->features |= dft_fts | cso_fts | tso_fts;
+}
+
+static int hinic3_set_default_hw_feature(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_set_nic_feature_to_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set nic features\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_link_status_change(struct net_device *netdev,
+ bool link_status_up)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (link_status_up) {
+ if (netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = true;
+ netif_carrier_on(netdev);
+ netdev_dbg(netdev, "Link is up\n");
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = false;
+ netif_carrier_off(netdev);
+ netdev_dbg(netdev, "Link is down\n");
+ }
+}
+
+static void hinic3_nic_event(struct auxiliary_device *adev,
+ struct hinic3_event_info *event)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ netdev = nic_dev->netdev;
+
+ switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) {
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_UP):
+ hinic3_link_status_change(netdev, true);
+ break;
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_DOWN):
+ hinic3_link_status_change(netdev, false);
+ break;
+ default:
+ break;
+ }
+}
+
+static int hinic3_nic_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct hinic3_hwdev *hwdev = hinic3_adev_get_hwdev(adev);
+ struct pci_dev *pdev = hwdev->pdev;
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ u16 max_qps, glb_func_id;
+ int err;
+
+ if (!hinic3_support_nic(hwdev)) {
+ dev_dbg(&adev->dev, "HW doesn't support nic\n");
+ return 0;
+ }
+
+ hinic3_adev_event_register(adev, hinic3_nic_event);
+
+ glb_func_id = hinic3_global_func_id(hwdev);
+ err = hinic3_func_reset(hwdev, glb_func_id, COMM_FUNC_RESET_BIT_NIC);
+ if (err) {
+ dev_err(&adev->dev, "Failed to reset function\n");
+ goto err_unregister_adev_event;
+ }
+
+ max_qps = hinic3_func_max_qnum(hwdev);
+ netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps);
+ if (!netdev) {
+ dev_err(&adev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_unregister_adev_event;
+ }
+
+ nic_dev = netdev_priv(netdev);
+ dev_set_drvdata(&adev->dev, nic_dev);
+ err = hinic3_init_nic_dev(netdev, hwdev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_init_nic_io(nic_dev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_sw_init(netdev);
+ if (err)
+ goto err_free_nic_io;
+
+ hinic3_assign_netdev_ops(netdev);
+
+ netdev_feature_init(netdev);
+ err = hinic3_set_default_hw_feature(netdev);
+ if (err)
+ goto err_uninit_sw;
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_uninit_nic_feature;
+
+ return 0;
+
+err_uninit_nic_feature:
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+
+err_uninit_sw:
+ hinic3_sw_uninit(netdev);
+
+err_free_nic_io:
+ hinic3_free_nic_io(nic_dev);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+err_unregister_adev_event:
+ hinic3_adev_event_unregister(adev);
+ dev_err(&pdev->dev, "NIC service probe failed\n");
+
+ return err;
+}
+
+static void hinic3_nic_remove(struct auxiliary_device *adev)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ if (!hinic3_support_nic(nic_dev->hwdev))
+ return;
+
+ netdev = nic_dev->netdev;
+ unregister_netdev(netdev);
+
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+ hinic3_sw_uninit(netdev);
+
+ hinic3_free_nic_io(nic_dev);
+
+ free_netdev(netdev);
+}
+
+static const struct auxiliary_device_id hinic3_nic_id_table[] = {
+ {
+ .name = HINIC3_NIC_DRV_NAME ".nic",
+ },
+ {}
+};
+
+static struct auxiliary_driver hinic3_nic_driver = {
+ .probe = hinic3_nic_probe,
+ .remove = hinic3_nic_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .name = "nic",
+ .id_table = hinic3_nic_id_table,
+};
+
+static __init int hinic3_nic_lld_init(void)
+{
+ int err;
+
+ err = hinic3_lld_init();
+ if (err)
+ return err;
+
+ err = auxiliary_driver_register(&hinic3_nic_driver);
+ if (err) {
+ hinic3_lld_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static __exit void hinic3_nic_lld_exit(void)
+{
+ auxiliary_driver_unregister(&hinic3_nic_driver);
+
+ hinic3_lld_exit();
+}
+
+module_init(hinic3_nic_lld_init);
+module_exit(hinic3_nic_lld_exit);
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION(HINIC3_NIC_DRV_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
new file mode 100644
index 000000000000..cf67e26acece
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -0,0 +1,860 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_common.h"
+#include "hinic3_csr.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define MBOX_INT_DST_AEQN_MASK GENMASK(11, 10)
+#define MBOX_INT_SRC_RESP_AEQN_MASK GENMASK(13, 12)
+#define MBOX_INT_STAT_DMA_MASK GENMASK(19, 14)
+/* TX size, expressed in 4 bytes units */
+#define MBOX_INT_TX_SIZE_MASK GENMASK(24, 20)
+/* SO_RO == strong order, relaxed order */
+#define MBOX_INT_STAT_DMA_SO_RO_MASK GENMASK(26, 25)
+#define MBOX_INT_WB_EN_MASK BIT(28)
+#define MBOX_INT_SET(val, field) \
+ FIELD_PREP(MBOX_INT_##field##_MASK, val)
+
+#define MBOX_CTRL_TRIGGER_AEQE_MASK BIT(0)
+#define MBOX_CTRL_TX_STATUS_MASK BIT(1)
+#define MBOX_CTRL_DST_FUNC_MASK GENMASK(28, 16)
+#define MBOX_CTRL_SET(val, field) \
+ FIELD_PREP(MBOX_CTRL_##field##_MASK, val)
+
+#define MBOX_MSG_POLLING_TIMEOUT_MS 8000 // send msg seg timeout
+#define MBOX_COMP_POLLING_TIMEOUT_MS 40000 // response
+
+#define MBOX_MAX_BUF_SZ 2048
+#define MBOX_HEADER_SZ 8
+
+/* MBOX size is 64B, 8B for mbox_header, 8B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16
+
+#define MBOX_SEQ_ID_START_VAL 0
+#define MBOX_SEQ_ID_MAX_VAL 42
+#define MBOX_LAST_SEG_MAX_LEN \
+ (MBOX_MAX_BUF_SZ - MBOX_SEQ_ID_MAX_VAL * MBOX_SEG_LEN)
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) == \
+ MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define MBOX_DMA_MSG_QUEUE_DEPTH 32
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_base + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define MBOX_MQ_CI_OFFSET \
+ (HINIC3_CFG_REGS_FLAG + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF + \
+ MBOX_HEADER_SZ + MBOX_SEG_LEN)
+
+#define MBOX_MQ_SYNC_CI_MASK GENMASK(7, 0)
+#define MBOX_MQ_ASYNC_CI_MASK GENMASK(15, 8)
+#define MBOX_MQ_CI_GET(val, field) \
+ FIELD_GET(MBOX_MQ_##field##_CI_MASK, val)
+
+#define MBOX_MGMT_FUNC_ID 0x1FFF
+#define MBOX_COMM_F_MBOX_SEGMENT BIT(3)
+
+static u8 *get_mobx_body_from_hdr(u8 *header)
+{
+ return header + MBOX_HEADER_SZ;
+}
+
+static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *mbox,
+ enum mbox_msg_direction_type dir,
+ u16 src_func_id)
+{
+ struct hinic3_msg_channel *msg_ch;
+
+ msg_ch = (src_func_id == MBOX_MGMT_FUNC_ID) ?
+ &mbox->mgmt_msg : mbox->func_msg;
+
+ return (dir == MBOX_MSG_SEND) ?
+ &msg_ch->recv_msg : &msg_ch->resp_msg;
+}
+
+static void resp_mbox_handler(struct hinic3_mbox *mbox,
+ const struct hinic3_msg_desc *msg_desc)
+{
+ spin_lock(&mbox->mbox_lock);
+ if (msg_desc->msg_info.msg_id == mbox->send_msg_id &&
+ mbox->event_flag == MBOX_EVENT_START)
+ mbox->event_flag = MBOX_EVENT_SUCCESS;
+ spin_unlock(&mbox->mbox_lock);
+}
+
+static bool mbox_segment_valid(struct hinic3_mbox *mbox,
+ struct hinic3_msg_desc *msg_desc,
+ __le64 mbox_header)
+{
+ u8 seq_id, seg_len, msg_id, mod;
+ __le16 src_func_idx, cmd;
+
+ seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
+ seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
+ msg_id = MBOX_MSG_HEADER_GET(mbox_header, MSG_ID);
+ mod = MBOX_MSG_HEADER_GET(mbox_header, MODULE);
+ cmd = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header, CMD));
+ src_func_idx = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
+ SRC_GLB_FUNC_IDX));
+
+ if (seq_id > MBOX_SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN ||
+ (seq_id == MBOX_SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN))
+ goto err_seg;
+
+ if (seq_id == 0) {
+ msg_desc->seq_id = seq_id;
+ msg_desc->msg_info.msg_id = msg_id;
+ msg_desc->mod = mod;
+ msg_desc->cmd = cmd;
+ } else {
+ if (seq_id != msg_desc->seq_id + 1 ||
+ msg_id != msg_desc->msg_info.msg_id ||
+ mod != msg_desc->mod || cmd != msg_desc->cmd)
+ goto err_seg;
+
+ msg_desc->seq_id = seq_id;
+ }
+
+ return true;
+
+err_seg:
+ dev_err(mbox->hwdev->dev,
+ "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
+ src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id,
+ msg_desc->mod, msg_desc->cmd);
+ dev_err(mbox->hwdev->dev,
+ "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
+ seg_len, seq_id, msg_id, mod, cmd);
+
+ return false;
+}
+
+static void recv_mbox_handler(struct hinic3_mbox *mbox,
+ u8 *header, struct hinic3_msg_desc *msg_desc)
+{
+ __le64 mbox_header = *((__force __le64 *)header);
+ u8 *mbox_body = get_mobx_body_from_hdr(header);
+ u8 seq_id, seg_len;
+ int pos;
+
+ if (!mbox_segment_valid(mbox, msg_desc, mbox_header)) {
+ msg_desc->seq_id = MBOX_SEQ_ID_MAX_VAL;
+ return;
+ }
+
+ seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
+ seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy(msg_desc->msg + pos, mbox_body, seg_len);
+
+ if (!MBOX_MSG_HEADER_GET(mbox_header, LAST))
+ return;
+
+ msg_desc->msg_len = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
+ MSG_LEN));
+ msg_desc->msg_info.status = MBOX_MSG_HEADER_GET(mbox_header, STATUS);
+
+ if (MBOX_MSG_HEADER_GET(mbox_header, DIRECTION) == MBOX_MSG_RESP)
+ resp_mbox_handler(mbox, msg_desc);
+}
+
+void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size)
+{
+ __le64 mbox_header = *((__force __le64 *)header);
+ enum mbox_msg_direction_type dir;
+ struct hinic3_msg_desc *msg_desc;
+ struct hinic3_mbox *mbox;
+ u16 src_func_id;
+
+ mbox = hwdev->mbox;
+ dir = MBOX_MSG_HEADER_GET(mbox_header, DIRECTION);
+ src_func_id = MBOX_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+ msg_desc = get_mbox_msg_desc(mbox, dir, src_func_id);
+ recv_mbox_handler(mbox, header, msg_desc);
+}
+
+static int init_mbox_dma_queue(struct hinic3_hwdev *hwdev,
+ struct mbox_dma_queue *mq)
+{
+ u32 size;
+
+ mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH;
+ mq->prod_idx = 0;
+ mq->cons_idx = 0;
+
+ size = mq->depth * MBOX_MAX_BUF_SZ;
+ mq->dma_buf_vaddr = dma_alloc_coherent(hwdev->dev, size,
+ &mq->dma_buf_paddr,
+ GFP_KERNEL);
+ if (!mq->dma_buf_vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void uninit_mbox_dma_queue(struct hinic3_hwdev *hwdev,
+ struct mbox_dma_queue *mq)
+{
+ dma_free_coherent(hwdev->dev, mq->depth * MBOX_MAX_BUF_SZ,
+ mq->dma_buf_vaddr, mq->dma_buf_paddr);
+}
+
+static int hinic3_init_mbox_dma_queue(struct hinic3_mbox *mbox)
+{
+ u32 val;
+ int err;
+
+ err = init_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ if (err)
+ return err;
+
+ err = init_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
+ if (err) {
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ return err;
+ }
+
+ val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
+ val &= ~MBOX_MQ_SYNC_CI_MASK;
+ val &= ~MBOX_MQ_ASYNC_CI_MASK;
+ hinic3_hwif_write_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET, val);
+
+ return 0;
+}
+
+static void hinic3_uninit_mbox_dma_queue(struct hinic3_mbox *mbox)
+{
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
+}
+
+static int alloc_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
+{
+ msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!msg_ch->resp_msg.msg)
+ return -ENOMEM;
+
+ msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!msg_ch->recv_msg.msg) {
+ kfree(msg_ch->resp_msg.msg);
+ return -ENOMEM;
+ }
+
+ msg_ch->resp_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
+ msg_ch->recv_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
+
+ return 0;
+}
+
+static void free_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
+{
+ kfree(msg_ch->recv_msg.msg);
+ kfree(msg_ch->resp_msg.msg);
+}
+
+static int init_mgmt_msg_channel(struct hinic3_mbox *mbox)
+{
+ int err;
+
+ err = alloc_mbox_msg_channel(&mbox->mgmt_msg);
+ if (err) {
+ dev_err(mbox->hwdev->dev, "Failed to alloc mgmt message channel\n");
+ return err;
+ }
+
+ err = hinic3_init_mbox_dma_queue(mbox);
+ if (err) {
+ dev_err(mbox->hwdev->dev, "Failed to init mbox dma queue\n");
+ free_mbox_msg_channel(&mbox->mgmt_msg);
+ return err;
+ }
+
+ return 0;
+}
+
+static void uninit_mgmt_msg_channel(struct hinic3_mbox *mbox)
+{
+ hinic3_uninit_mbox_dma_queue(mbox);
+ free_mbox_msg_channel(&mbox->mgmt_msg);
+}
+
+static int hinic3_init_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+ int err;
+
+ mbox = hwdev->mbox;
+ mbox->func_msg = kzalloc(sizeof(*mbox->func_msg), GFP_KERNEL);
+ if (!mbox->func_msg)
+ return -ENOMEM;
+
+ err = alloc_mbox_msg_channel(mbox->func_msg);
+ if (err)
+ goto err_free_func_msg;
+
+ return 0;
+
+err_free_func_msg:
+ kfree(mbox->func_msg);
+ mbox->func_msg = NULL;
+
+ return err;
+}
+
+static void hinic3_uninit_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+
+ free_mbox_msg_channel(mbox->func_msg);
+ kfree(mbox->func_msg);
+ mbox->func_msg = NULL;
+}
+
+static void prepare_send_mbox(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+
+ send_mbox->data = MBOX_AREA(mbox->hwdev->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(hwdev->dev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr, send_mbox->wb_paddr);
+}
+
+static int hinic3_mbox_pre_init(struct hinic3_hwdev *hwdev,
+ struct hinic3_mbox *mbox)
+{
+ mbox->hwdev = hwdev;
+ mutex_init(&mbox->mbox_send_lock);
+ spin_lock_init(&mbox->mbox_lock);
+
+ mbox->workq = create_singlethread_workqueue(HINIC3_MBOX_WQ_NAME);
+ if (!mbox->workq) {
+ dev_err(hwdev->dev, "Failed to initialize MBOX workqueue\n");
+ return -ENOMEM;
+ }
+ hwdev->mbox = mbox;
+
+ return 0;
+}
+
+int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+ int err;
+
+ mbox = kzalloc(sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ err = hinic3_mbox_pre_init(hwdev, mbox);
+ if (err)
+ goto err_free_mbox;
+
+ err = init_mgmt_msg_channel(mbox);
+ if (err)
+ goto err_destroy_workqueue;
+
+ err = hinic3_init_func_mbox_msg_channel(hwdev);
+ if (err)
+ goto err_uninit_mgmt_msg_ch;
+
+ err = alloc_mbox_wb_status(mbox);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc mbox write back status\n");
+ goto err_uninit_func_mbox_msg_ch;
+ }
+
+ prepare_send_mbox(mbox);
+
+ return 0;
+
+err_uninit_func_mbox_msg_ch:
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
+
+err_uninit_mgmt_msg_ch:
+ uninit_mgmt_msg_channel(mbox);
+
+err_destroy_workqueue:
+ destroy_workqueue(mbox->workq);
+
+err_free_mbox:
+ kfree(mbox);
+
+ return err;
+}
+
+void hinic3_free_mbox(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+
+ destroy_workqueue(mbox->workq);
+ free_mbox_wb_status(mbox);
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
+ uninit_mgmt_msg_channel(mbox);
+ kfree(mbox);
+}
+
+#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a
+#define MBOX_XOR_DATA_ALIGN 4
+static u32 mbox_dma_msg_xor(u32 *data, u32 msg_len)
+{
+ u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL;
+ u32 dw_len = msg_len / sizeof(u32);
+ u32 i;
+
+ for (i = 0; i < dw_len; i++)
+ xor ^= data[i];
+
+ return xor;
+}
+
+#define MBOX_MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1))
+
+static bool is_msg_queue_full(struct mbox_dma_queue *mq)
+{
+ return MBOX_MQ_ID_MASK(mq, (mq)->prod_idx + 1) ==
+ MBOX_MQ_ID_MASK(mq, (mq)->cons_idx);
+}
+
+static int mbox_prepare_dma_entry(struct hinic3_mbox *mbox,
+ struct mbox_dma_queue *mq,
+ struct mbox_dma_msg *dma_msg,
+ const void *msg, u32 msg_len)
+{
+ u64 dma_addr, offset;
+ void *dma_vaddr;
+
+ if (is_msg_queue_full(mq)) {
+ dev_err(mbox->hwdev->dev, "Mbox sync message queue is busy, pi: %u, ci: %u\n",
+ mq->prod_idx, MBOX_MQ_ID_MASK(mq, mq->cons_idx));
+ return -EBUSY;
+ }
+
+ /* copy data to DMA buffer */
+ offset = mq->prod_idx * MBOX_MAX_BUF_SZ;
+ dma_vaddr = (u8 *)mq->dma_buf_vaddr + offset;
+ memcpy(dma_vaddr, msg, msg_len);
+ dma_addr = mq->dma_buf_paddr + offset;
+ dma_msg->dma_addr_high = cpu_to_le32(upper_32_bits(dma_addr));
+ dma_msg->dma_addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+ dma_msg->msg_len = cpu_to_le32(msg_len);
+ /* The firmware obtains message based on 4B alignment. */
+ dma_msg->xor = cpu_to_le32(mbox_dma_msg_xor(dma_vaddr,
+ ALIGN(msg_len, MBOX_XOR_DATA_ALIGN)));
+ mq->prod_idx++;
+ mq->prod_idx = MBOX_MQ_ID_MASK(mq, mq->prod_idx);
+
+ return 0;
+}
+
+static int mbox_prepare_dma_msg(struct hinic3_mbox *mbox,
+ enum mbox_msg_ack_type ack_type,
+ struct mbox_dma_msg *dma_msg, const void *msg,
+ u32 msg_len)
+{
+ struct mbox_dma_queue *mq;
+ u32 val;
+
+ val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
+ if (ack_type == MBOX_MSG_ACK) {
+ mq = &mbox->sync_msg_queue;
+ mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC);
+ } else {
+ mq = &mbox->async_msg_queue;
+ mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC);
+ }
+
+ return mbox_prepare_dma_entry(mbox, mq, dma_msg, msg, msg_len);
+}
+
+static void clear_mbox_status(struct hinic3_send_mbox *mbox)
+{
+ __be64 *wb_status = mbox->wb_vaddr;
+
+ *wb_status = 0;
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_dword_write(const void *src, void __iomem *dst, u32 count)
+{
+ const __le32 *src32 = src;
+ u32 __iomem *dst32 = dst;
+ u32 i;
+
+ /* Data written to mbox is arranged in structs with little endian fields
+ * but when written to HW every dword (32bits) should be swapped since
+ * the HW will swap it again.
+ */
+ for (i = 0; i < count; i++)
+ __raw_writel(swab32((__force __u32)src32[i]), dst32 + i);
+}
+
+static void mbox_copy_header(struct hinic3_hwdev *hwdev,
+ struct hinic3_send_mbox *mbox, __le64 *header)
+{
+ mbox_dword_write(header, mbox->data, MBOX_HEADER_SZ / sizeof(__le32));
+}
+
+static void mbox_copy_send_data(struct hinic3_hwdev *hwdev,
+ struct hinic3_send_mbox *mbox, void *seg,
+ u32 seg_len)
+{
+ u32 __iomem *dst = (u32 __iomem *)(mbox->data + MBOX_HEADER_SZ);
+ u32 count, leftover, last_dword;
+ const __le32 *src = seg;
+
+ count = seg_len / sizeof(u32);
+ leftover = seg_len % sizeof(u32);
+ if (count > 0)
+ mbox_dword_write(src, dst, count);
+
+ if (leftover > 0) {
+ last_dword = 0;
+ memcpy(&last_dword, src + count, leftover);
+ mbox_dword_write(&last_dword, dst + count, 1);
+ }
+}
+
+static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
+ u16 dst_func, u16 dst_aeqn, u32 seg_len)
+{
+ struct hinic3_hwif *hwif = mbox->hwdev->hwif;
+ u32 mbox_int, mbox_ctrl, tx_size;
+
+ tx_size = ALIGN(seg_len + MBOX_HEADER_SZ, MBOX_SEG_LEN_ALIGN) >> 2;
+
+ mbox_int = MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ MBOX_INT_SET(0, STAT_DMA) |
+ MBOX_INT_SET(tx_size, TX_SIZE) |
+ MBOX_INT_SET(0, STAT_DMA_SO_RO) |
+ MBOX_INT_SET(1, WB_EN);
+
+ mbox_ctrl = MBOX_CTRL_SET(1, TX_STATUS) |
+ MBOX_CTRL_SET(0, TRIGGER_AEQE) |
+ MBOX_CTRL_SET(dst_func, DST_FUNC);
+
+ hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_INT_OFF, mbox_int);
+ hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF,
+ mbox_ctrl);
+}
+
+static u16 get_mbox_status(const struct hinic3_send_mbox *mbox)
+{
+ __be64 *wb_status = mbox->wb_vaddr;
+ u64 wb_val;
+
+ wb_val = be64_to_cpu(*wb_status);
+ /* verify reading before check */
+ rmb();
+
+ return wb_val & MBOX_WB_STATUS_ERRCODE_MASK;
+}
+
+static enum hinic3_wait_return check_mbox_wb_status(void *priv_data)
+{
+ struct hinic3_mbox *mbox = priv_data;
+ u16 wb_status;
+
+ wb_status = get_mbox_status(&mbox->send_mbox);
+
+ return MBOX_STATUS_FINISHED(wb_status) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int send_mbox_seg(struct hinic3_mbox *mbox, __le64 header,
+ u16 dst_func, void *seg, u32 seg_len, void *msg_info)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ enum mbox_msg_direction_type dir;
+ u16 dst_aeqn, wb_status, errcode;
+ int err;
+
+ /* mbox to mgmt cpu, hardware doesn't care about dst aeq id */
+ if (num_aeqs > MBOX_MSG_AEQ_FOR_MBOX) {
+ dir = MBOX_MSG_HEADER_GET(header, DIRECTION);
+ dst_aeqn = (dir == MBOX_MSG_SEND) ?
+ MBOX_MSG_AEQ_FOR_EVENT : MBOX_MSG_AEQ_FOR_MBOX;
+ } else {
+ dst_aeqn = 0;
+ }
+
+ clear_mbox_status(send_mbox);
+ mbox_copy_header(hwdev, send_mbox, &header);
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+ write_mbox_msg_attr(mbox, dst_func, dst_aeqn, seg_len);
+
+ err = hinic3_wait_for_timeout(mbox, check_mbox_wb_status,
+ MBOX_MSG_POLLING_TIMEOUT_MS,
+ USEC_PER_MSEC);
+ wb_status = get_mbox_status(send_mbox);
+ if (err) {
+ dev_err(hwdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
+ wb_status);
+ return err;
+ }
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ dev_err(hwdev->dev,
+ "Send mailbox segment to function %u error, wb status: 0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_msg(struct hinic3_mbox *mbox, u8 mod, u16 cmd,
+ const void *msg, u32 msg_len, u16 dst_func,
+ enum mbox_msg_direction_type direction,
+ enum mbox_msg_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ enum mbox_msg_data_type data_type = MBOX_MSG_DATA_INLINE;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ struct mbox_dma_msg dma_msg;
+ u32 seg_len = MBOX_SEG_LEN;
+ __le64 header = 0;
+ u32 seq_id = 0;
+ u16 rsp_aeq_id;
+ u8 *msg_seg;
+ int err = 0;
+ u32 left;
+
+ if (hwdev->hwif->attr.num_aeqs > MBOX_MSG_AEQ_FOR_MBOX)
+ rsp_aeq_id = MBOX_MSG_AEQ_FOR_MBOX;
+ else
+ rsp_aeq_id = 0;
+
+ if (dst_func == MBOX_MGMT_FUNC_ID &&
+ !(hwdev->features[0] & MBOX_COMM_F_MBOX_SEGMENT)) {
+ err = mbox_prepare_dma_msg(mbox, ack_type, &dma_msg,
+ msg, msg_len);
+ if (err)
+ goto err_send;
+
+ msg = &dma_msg;
+ msg_len = sizeof(dma_msg);
+ data_type = MBOX_MSG_DATA_DMA;
+ }
+
+ msg_seg = (u8 *)msg;
+ left = msg_len;
+
+ header = cpu_to_le64(MBOX_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ MBOX_MSG_HEADER_SET(mod, MODULE) |
+ MBOX_MSG_HEADER_SET(seg_len, SEG_LEN) |
+ MBOX_MSG_HEADER_SET(ack_type, NO_ACK) |
+ MBOX_MSG_HEADER_SET(data_type, DATA_TYPE) |
+ MBOX_MSG_HEADER_SET(MBOX_SEQ_ID_START_VAL, SEQID) |
+ MBOX_MSG_HEADER_SET(direction, DIRECTION) |
+ MBOX_MSG_HEADER_SET(cmd, CMD) |
+ MBOX_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ MBOX_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) |
+ MBOX_MSG_HEADER_SET(MBOX_MSG_FROM_MBOX, SOURCE) |
+ MBOX_MSG_HEADER_SET(!!msg_info->status, STATUS));
+
+ while (!(MBOX_MSG_HEADER_GET(header, LAST))) {
+ if (left <= MBOX_SEG_LEN) {
+ header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
+ header |=
+ cpu_to_le64(MBOX_MSG_HEADER_SET(left, SEG_LEN) |
+ MBOX_MSG_HEADER_SET(1, LAST));
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(mbox, header, dst_func, msg_seg,
+ seg_len, msg_info);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ MBOX_MSG_HEADER_GET(header, SEQID));
+ goto err_send;
+ }
+
+ left -= MBOX_SEG_LEN;
+ msg_seg += MBOX_SEG_LEN;
+ seq_id++;
+ header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
+ header |= cpu_to_le64(MBOX_MSG_HEADER_SET(seq_id, SEQID));
+ }
+
+err_send:
+ return err;
+}
+
+static void set_mbox_to_func_event(struct hinic3_mbox *mbox,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&mbox->mbox_lock);
+ mbox->event_flag = event_flag;
+ spin_unlock(&mbox->mbox_lock);
+}
+
+static enum hinic3_wait_return check_mbox_msg_finish(void *priv_data)
+{
+ struct hinic3_mbox *mbox = priv_data;
+
+ return (mbox->event_flag == MBOX_EVENT_SUCCESS) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_mbox_msg_completion(struct hinic3_mbox *mbox,
+ u32 timeout)
+{
+ u32 wait_time;
+ int err;
+
+ wait_time = (timeout != 0) ? timeout : MBOX_COMP_POLLING_TIMEOUT_MS;
+ err = hinic3_wait_for_timeout(mbox, check_mbox_msg_finish,
+ wait_time, USEC_PER_MSEC);
+ if (err) {
+ set_mbox_to_func_event(mbox, MBOX_EVENT_TIMEOUT);
+ return err;
+ }
+ set_mbox_to_func_event(mbox, MBOX_EVENT_END);
+
+ return 0;
+}
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+ struct mbox_msg_info msg_info = {};
+ struct hinic3_msg_desc *msg_desc;
+ u32 msg_len;
+ int err;
+
+ /* expect response message */
+ msg_desc = get_mbox_msg_desc(mbox, MBOX_MSG_RESP, MBOX_MGMT_FUNC_ID);
+ mutex_lock(&mbox->mbox_send_lock);
+ msg_info.msg_id = (mbox->send_msg_id + 1) & 0xF;
+ mbox->send_msg_id = msg_info.msg_id;
+ set_mbox_to_func_event(mbox, MBOX_EVENT_START);
+
+ err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
+ msg_params->in_size, MBOX_MGMT_FUNC_ID,
+ MBOX_MSG_SEND, MBOX_MSG_ACK, &msg_info);
+ if (err) {
+ dev_err(hwdev->dev, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n",
+ mod, cmd, msg_info.msg_id, err);
+ set_mbox_to_func_event(mbox, MBOX_EVENT_FAIL);
+ goto err_send;
+ }
+
+ if (wait_mbox_msg_completion(mbox, msg_params->timeout_ms)) {
+ dev_err(hwdev->dev,
+ "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id);
+ err = -ETIMEDOUT;
+ goto err_send;
+ }
+
+ if (mod != msg_desc->mod || cmd != le16_to_cpu(msg_desc->cmd)) {
+ dev_err(hwdev->dev,
+ "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n",
+ msg_desc->mod, msg_desc->cmd, mod, cmd);
+ err = -EFAULT;
+ goto err_send;
+ }
+
+ if (msg_desc->msg_info.status) {
+ err = msg_desc->msg_info.status;
+ goto err_send;
+ }
+
+ if (msg_params->buf_out) {
+ msg_len = le16_to_cpu(msg_desc->msg_len);
+ if (msg_len != msg_params->expected_out_size) {
+ dev_err(hwdev->dev,
+ "Invalid response mbox message length: %u for mod %d cmd %u, expected length: %u\n",
+ msg_desc->msg_len, mod, cmd,
+ msg_params->expected_out_size);
+ err = -EFAULT;
+ goto err_send;
+ }
+
+ memcpy(msg_params->buf_out, msg_desc->msg, msg_len);
+ }
+
+err_send:
+ mutex_unlock(&mbox->mbox_send_lock);
+
+ return err;
+}
+
+int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+ struct mbox_msg_info msg_info = {};
+ int err;
+
+ mutex_lock(&mbox->mbox_send_lock);
+ err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
+ msg_params->in_size, MBOX_MGMT_FUNC_ID,
+ MBOX_MSG_SEND, MBOX_MSG_NO_ACK, &msg_info);
+ if (err)
+ dev_err(hwdev->dev, "Send mailbox no ack failed\n");
+
+ mutex_unlock(&mbox->mbox_send_lock);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
new file mode 100644
index 000000000000..e71629e95086
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MBOX_H_
+#define _HINIC3_MBOX_H_
+
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+
+struct hinic3_hwdev;
+struct mgmt_msg_params;
+
+#define MBOX_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK GENMASK_ULL(12, 0)
+#define MBOX_MSG_HEADER_STATUS_MASK BIT_ULL(13)
+#define MBOX_MSG_HEADER_SOURCE_MASK BIT_ULL(15)
+#define MBOX_MSG_HEADER_AEQ_ID_MASK GENMASK_ULL(17, 16)
+#define MBOX_MSG_HEADER_MSG_ID_MASK GENMASK_ULL(21, 18)
+#define MBOX_MSG_HEADER_CMD_MASK GENMASK_ULL(31, 22)
+#define MBOX_MSG_HEADER_MSG_LEN_MASK GENMASK_ULL(42, 32)
+#define MBOX_MSG_HEADER_MODULE_MASK GENMASK_ULL(47, 43)
+#define MBOX_MSG_HEADER_SEG_LEN_MASK GENMASK_ULL(53, 48)
+#define MBOX_MSG_HEADER_NO_ACK_MASK BIT_ULL(54)
+#define MBOX_MSG_HEADER_DATA_TYPE_MASK BIT_ULL(55)
+#define MBOX_MSG_HEADER_SEQID_MASK GENMASK_ULL(61, 56)
+#define MBOX_MSG_HEADER_LAST_MASK BIT_ULL(62)
+#define MBOX_MSG_HEADER_DIRECTION_MASK BIT_ULL(63)
+
+#define MBOX_MSG_HEADER_SET(val, member) \
+ FIELD_PREP(MBOX_MSG_HEADER_##member##_MASK, val)
+#define MBOX_MSG_HEADER_GET(val, member) \
+ FIELD_GET(MBOX_MSG_HEADER_##member##_MASK, le64_to_cpu(val))
+
+/* identifies if a segment belongs to a message or to a response. A VF is only
+ * expected to send messages and receive responses. PF driver could receive
+ * messages and send responses.
+ */
+enum mbox_msg_direction_type {
+ MBOX_MSG_SEND = 0,
+ MBOX_MSG_RESP = 1,
+};
+
+/* Indicates if mbox message expects a response (ack) or not */
+enum mbox_msg_ack_type {
+ MBOX_MSG_ACK = 0,
+ MBOX_MSG_NO_ACK = 1,
+};
+
+enum mbox_msg_data_type {
+ MBOX_MSG_DATA_INLINE = 0,
+ MBOX_MSG_DATA_DMA = 1,
+};
+
+enum mbox_msg_src_type {
+ MBOX_MSG_FROM_MBOX = 1,
+};
+
+enum mbox_msg_aeq_type {
+ MBOX_MSG_AEQ_FOR_EVENT = 0,
+ MBOX_MSG_AEQ_FOR_MBOX = 1,
+};
+
+#define HINIC3_MBOX_WQ_NAME "hinic3_mbox"
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status;
+};
+
+struct hinic3_msg_desc {
+ u8 *msg;
+ __le16 msg_len;
+ u8 seq_id;
+ u8 mod;
+ __le16 cmd;
+ struct mbox_msg_info msg_info;
+};
+
+struct hinic3_msg_channel {
+ struct hinic3_msg_desc resp_msg;
+ struct hinic3_msg_desc recv_msg;
+};
+
+struct hinic3_send_mbox {
+ u8 __iomem *data;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+enum mbox_event_state {
+ MBOX_EVENT_START = 0,
+ MBOX_EVENT_FAIL = 1,
+ MBOX_EVENT_SUCCESS = 2,
+ MBOX_EVENT_TIMEOUT = 3,
+ MBOX_EVENT_END = 4,
+};
+
+struct mbox_dma_msg {
+ __le32 xor;
+ __le32 dma_addr_high;
+ __le32 dma_addr_low;
+ __le32 msg_len;
+ __le64 rsvd;
+};
+
+struct mbox_dma_queue {
+ void *dma_buf_vaddr;
+ dma_addr_t dma_buf_paddr;
+ u16 depth;
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct hinic3_mbox {
+ struct hinic3_hwdev *hwdev;
+ /* lock for send mbox message and ack message */
+ struct mutex mbox_send_lock;
+ struct hinic3_send_mbox send_mbox;
+ struct mbox_dma_queue sync_msg_queue;
+ struct mbox_dma_queue async_msg_queue;
+ struct workqueue_struct *workq;
+ /* driver and MGMT CPU */
+ struct hinic3_msg_channel mgmt_msg;
+ /* VF to PF */
+ struct hinic3_msg_channel *func_msg;
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+};
+
+void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size);
+int hinic3_init_mbox(struct hinic3_hwdev *hwdev);
+void hinic3_free_mbox(struct hinic3_hwdev *hwdev);
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
+int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
new file mode 100644
index 000000000000..c38d10cd7fac
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_eqs.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev)
+{
+ if (hwdev->aeqs)
+ flush_workqueue(hwdev->aeqs->workq);
+}
+
+void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size)
+{
+ if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) ==
+ MBOX_MSG_FROM_MBOX)
+ hinic3_mbox_func_aeqe_handler(hwdev, header, size);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
new file mode 100644
index 000000000000..bbef3b32a6ec
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_H_
+#define _HINIC3_MGMT_H_
+
+#include <linux/types.h>
+
+struct hinic3_hwdev;
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
+void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev,
+ u8 *header, u8 size);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
new file mode 100644
index 000000000000..6cc0345c39e4
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_INTERFACE_H_
+#define _HINIC3_MGMT_INTERFACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/if_ether.h>
+
+#include "hinic3_hw_intf.h"
+
+struct l2nic_cmd_feature_nego {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[4];
+};
+
+enum l2nic_func_tbl_cfg_bitmap {
+ L2NIC_FUNC_TBL_CFG_INIT = 0,
+ L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE = 1,
+ L2NIC_FUNC_TBL_CFG_MTU = 2,
+};
+
+struct l2nic_func_tbl_cfg {
+ u16 rx_wqe_buf_size;
+ u16 mtu;
+ u32 rsvd[9];
+};
+
+struct l2nic_cmd_set_func_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd;
+ u32 cfg_bitmap;
+ struct l2nic_func_tbl_cfg tbl_cfg;
+};
+
+struct l2nic_cmd_set_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_update_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_set_ci_attr {
+ struct mgmt_msg_head msg_head;
+ u16 func_idx;
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u32 rsvd;
+ u64 ci_addr;
+};
+
+struct l2nic_cmd_clear_qp_resource {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct l2nic_cmd_force_pkt_drop {
+ struct mgmt_msg_head msg_head;
+ u8 port;
+ u8 rsvd1[3];
+};
+
+struct l2nic_cmd_set_vport_state {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ /* 0--disable, 1--enable */
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct l2nic_cmd_set_dcb_state {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ /* 0 - get dcb state, 1 - set dcb state */
+ u8 op_code;
+ /* 0 - disable, 1 - enable dcb */
+ u8 state;
+ /* 0 - disable, 1 - enable dcb */
+ u8 port_state;
+ u8 rsvd[7];
+};
+
+#define L2NIC_RSS_TYPE_VALID_MASK BIT(23)
+#define L2NIC_RSS_TYPE_TCP_IPV6_EXT_MASK BIT(24)
+#define L2NIC_RSS_TYPE_IPV6_EXT_MASK BIT(25)
+#define L2NIC_RSS_TYPE_TCP_IPV6_MASK BIT(26)
+#define L2NIC_RSS_TYPE_IPV6_MASK BIT(27)
+#define L2NIC_RSS_TYPE_TCP_IPV4_MASK BIT(28)
+#define L2NIC_RSS_TYPE_IPV4_MASK BIT(29)
+#define L2NIC_RSS_TYPE_UDP_IPV6_MASK BIT(30)
+#define L2NIC_RSS_TYPE_UDP_IPV4_MASK BIT(31)
+#define L2NIC_RSS_TYPE_SET(val, member) \
+ FIELD_PREP(L2NIC_RSS_TYPE_##member##_MASK, val)
+#define L2NIC_RSS_TYPE_GET(val, member) \
+ FIELD_GET(L2NIC_RSS_TYPE_##member##_MASK, val)
+
+#define L2NIC_RSS_INDIR_SIZE 256
+#define L2NIC_RSS_KEY_SIZE 40
+
+/* IEEE 802.1Qaz std */
+#define L2NIC_DCB_COS_MAX 0x8
+
+struct l2nic_cmd_set_rss_ctx_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ u32 context;
+};
+
+struct l2nic_cmd_cfg_rss_engine {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct l2nic_cmd_cfg_rss_hash_key {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u8 key[L2NIC_RSS_KEY_SIZE];
+};
+
+struct l2nic_cmd_cfg_rss {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 rss_en;
+ u8 rq_priority_number;
+ u8 prio_tc[L2NIC_DCB_COS_MAX];
+ u16 num_qps;
+ u16 rsvd1;
+};
+
+/* Commands between NIC to fw */
+enum l2nic_cmd {
+ /* FUNC CFG */
+ L2NIC_CMD_SET_FUNC_TBL = 5,
+ L2NIC_CMD_SET_VPORT_ENABLE = 6,
+ L2NIC_CMD_SET_SQ_CI_ATTR = 8,
+ L2NIC_CMD_CLEAR_QP_RESOURCE = 11,
+ L2NIC_CMD_FEATURE_NEGO = 15,
+ L2NIC_CMD_SET_MAC = 21,
+ L2NIC_CMD_DEL_MAC = 22,
+ L2NIC_CMD_UPDATE_MAC = 23,
+ L2NIC_CMD_CFG_RSS = 60,
+ L2NIC_CMD_CFG_RSS_HASH_KEY = 63,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64,
+ L2NIC_CMD_SET_RSS_CTX_TBL = 65,
+ L2NIC_CMD_QOS_DCB_STATE = 110,
+ L2NIC_CMD_FORCE_PKT_DROP = 113,
+ L2NIC_CMD_MAX = 256,
+};
+
+struct l2nic_cmd_rss_set_indir_tbl {
+ __le32 rsvd[4];
+ __le16 entry[L2NIC_RSS_INDIR_SIZE];
+};
+
+/* NIC CMDQ MODE */
+enum l2nic_ucode_cmd {
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0,
+ L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX = 1,
+ L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL = 4,
+};
+
+/* hilink mac group command */
+enum mag_cmd {
+ MAG_CMD_GET_LINK_STATUS = 7,
+};
+
+/* firmware also use this cmd report link event to driver */
+struct mag_cmd_get_link_status {
+ struct mgmt_msg_head head;
+ u8 port_id;
+ /* 0:link down 1:link up */
+ u8 status;
+ u8 rsvd0[2];
+};
+
+enum hinic3_nic_feature_cap {
+ HINIC3_NIC_F_CSUM = BIT(0),
+ HINIC3_NIC_F_SCTP_CRC = BIT(1),
+ HINIC3_NIC_F_TSO = BIT(2),
+ HINIC3_NIC_F_LRO = BIT(3),
+ HINIC3_NIC_F_UFO = BIT(4),
+ HINIC3_NIC_F_RSS = BIT(5),
+ HINIC3_NIC_F_RX_VLAN_FILTER = BIT(6),
+ HINIC3_NIC_F_RX_VLAN_STRIP = BIT(7),
+ HINIC3_NIC_F_TX_VLAN_INSERT = BIT(8),
+ HINIC3_NIC_F_VXLAN_OFFLOAD = BIT(9),
+ HINIC3_NIC_F_FDIR = BIT(11),
+ HINIC3_NIC_F_PROMISC = BIT(12),
+ HINIC3_NIC_F_ALLMULTI = BIT(13),
+ HINIC3_NIC_F_RATE_LIMIT = BIT(16),
+};
+
+#define HINIC3_NIC_F_ALL_MASK 0x33bff
+#define HINIC3_NIC_DRV_DEFAULT_FEATURE 0x3f03f
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
new file mode 100644
index 000000000000..bbf22811a029
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rss.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+/* try to modify the number of irq to the target number,
+ * and return the actual number of irq.
+ */
+static u16 hinic3_qp_irq_change(struct net_device *netdev,
+ u16 dst_num_qp_irq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct msix_entry *qps_msix_entries;
+ u16 resp_irq_num, irq_num_gap, i;
+ u16 idx;
+ int err;
+
+ qps_msix_entries = nic_dev->qps_msix_entries;
+ if (dst_num_qp_irq > nic_dev->num_qp_irq) {
+ irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq;
+ err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap,
+ &qps_msix_entries[nic_dev->num_qp_irq],
+ &resp_irq_num);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc irqs\n");
+ return nic_dev->num_qp_irq;
+ }
+
+ nic_dev->num_qp_irq += resp_irq_num;
+ } else if (dst_num_qp_irq < nic_dev->num_qp_irq) {
+ irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq;
+ for (i = 0; i < irq_num_gap; i++) {
+ idx = (nic_dev->num_qp_irq - i) - 1;
+ hinic3_free_irq(nic_dev->hwdev,
+ qps_msix_entries[idx].vector);
+ qps_msix_entries[idx].vector = 0;
+ qps_msix_entries[idx].entry = 0;
+ }
+ nic_dev->num_qp_irq = dst_num_qp_irq;
+ }
+
+ return nic_dev->num_qp_irq;
+}
+
+static void hinic3_config_num_qps(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 alloc_num_irq, cur_num_irq;
+ u16 dst_num_irq;
+
+ if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
+ q_params->num_qps = 1;
+
+ if (nic_dev->num_qp_irq >= q_params->num_qps)
+ goto out;
+
+ cur_num_irq = nic_dev->num_qp_irq;
+
+ alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps);
+ if (alloc_num_irq < q_params->num_qps) {
+ q_params->num_qps = alloc_num_irq;
+ netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n",
+ q_params->num_qps);
+
+ /* The current irq may be in use, we must keep it */
+ dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps);
+ hinic3_qp_irq_change(netdev, dst_num_irq);
+ }
+
+out:
+ netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n",
+ q_params->num_qps);
+}
+
+static int hinic3_setup_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->num_qp_irq = 0;
+
+ nic_dev->qps_msix_entries = kcalloc(nic_dev->max_qps,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!nic_dev->qps_msix_entries)
+ return -ENOMEM;
+
+ hinic3_config_num_qps(netdev, &nic_dev->q_params);
+
+ return 0;
+}
+
+static void hinic3_destroy_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i;
+
+ for (i = 0; i < nic_dev->num_qp_irq; i++)
+ hinic3_free_irq(nic_dev->hwdev,
+ nic_dev->qps_msix_entries[i].vector);
+
+ kfree(nic_dev->qps_msix_entries);
+}
+
+static int hinic3_alloc_txrxq_resources(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ int err;
+
+ q_params->txqs_res = kcalloc(q_params->num_qps,
+ sizeof(*q_params->txqs_res), GFP_KERNEL);
+ if (!q_params->txqs_res)
+ return -ENOMEM;
+
+ q_params->rxqs_res = kcalloc(q_params->num_qps,
+ sizeof(*q_params->rxqs_res), GFP_KERNEL);
+ if (!q_params->rxqs_res) {
+ err = -ENOMEM;
+ goto err_free_txqs_res_arr;
+ }
+
+ q_params->irq_cfg = kcalloc(q_params->num_qps,
+ sizeof(*q_params->irq_cfg), GFP_KERNEL);
+ if (!q_params->irq_cfg) {
+ err = -ENOMEM;
+ goto err_free_rxqs_res_arr;
+ }
+
+ err = hinic3_alloc_txqs_res(netdev, q_params->num_qps,
+ q_params->sq_depth, q_params->txqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc txqs resource\n");
+ goto err_free_irq_cfg;
+ }
+
+ err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps,
+ q_params->rq_depth, q_params->rxqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc rxqs resource\n");
+ goto err_free_txqs_res;
+ }
+
+ return 0;
+
+err_free_txqs_res:
+ hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
+ q_params->txqs_res);
+err_free_irq_cfg:
+ kfree(q_params->irq_cfg);
+ q_params->irq_cfg = NULL;
+err_free_rxqs_res_arr:
+ kfree(q_params->rxqs_res);
+ q_params->rxqs_res = NULL;
+err_free_txqs_res_arr:
+ kfree(q_params->txqs_res);
+ q_params->txqs_res = NULL;
+
+ return err;
+}
+
+static void hinic3_free_txrxq_resources(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth,
+ q_params->rxqs_res);
+ hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
+ q_params->txqs_res);
+
+ kfree(q_params->irq_cfg);
+ q_params->irq_cfg = NULL;
+
+ kfree(q_params->rxqs_res);
+ q_params->rxqs_res = NULL;
+
+ kfree(q_params->txqs_res);
+ q_params->txqs_res = NULL;
+}
+
+static int hinic3_configure_txrxqs(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ int err;
+
+ err = hinic3_configure_txqs(netdev, q_params->num_qps,
+ q_params->sq_depth, q_params->txqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to configure txqs\n");
+ return err;
+ }
+
+ err = hinic3_configure_rxqs(netdev, q_params->num_qps,
+ q_params->rq_depth, q_params->rxqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to configure rxqs\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_configure(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ netdev->min_mtu = HINIC3_MIN_MTU_SIZE;
+ netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE;
+ err = hinic3_set_port_mtu(netdev, netdev->mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to set mtu\n");
+ return err;
+ }
+
+ /* Ensure DCB is disabled */
+ hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0);
+
+ if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) {
+ err = hinic3_rss_init(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init rss\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void hinic3_remove_configure(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
+ hinic3_rss_uninit(netdev);
+}
+
+static int hinic3_alloc_channel_resources(struct net_device *netdev,
+ struct hinic3_dyna_qp_params *qp_params,
+ struct hinic3_dyna_txrxq_params *trxq_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ qp_params->num_qps = trxq_params->num_qps;
+ qp_params->sq_depth = trxq_params->sq_depth;
+ qp_params->rq_depth = trxq_params->rq_depth;
+
+ err = hinic3_alloc_qps(nic_dev, qp_params);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc qps\n");
+ return err;
+ }
+
+ err = hinic3_alloc_txrxq_resources(netdev, trxq_params);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc txrxq resources\n");
+ hinic3_free_qps(nic_dev, qp_params);
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_free_channel_resources(struct net_device *netdev,
+ struct hinic3_dyna_qp_params *qp_params,
+ struct hinic3_dyna_txrxq_params *trxq_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxq_resources(netdev, trxq_params);
+ hinic3_free_qps(nic_dev, qp_params);
+}
+
+static int hinic3_open_channel(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_init_qp_ctxts(nic_dev);
+ if (err) {
+ netdev_err(netdev, "Failed to init qps\n");
+ return err;
+ }
+
+ err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params);
+ if (err) {
+ netdev_err(netdev, "Failed to configure txrxqs\n");
+ goto err_free_qp_ctxts;
+ }
+
+ err = hinic3_qps_irq_init(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init txrxq irq\n");
+ goto err_free_qp_ctxts;
+ }
+
+ err = hinic3_configure(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to configure device resources\n");
+ goto err_uninit_qps_irq;
+ }
+
+ return 0;
+
+err_uninit_qps_irq:
+ hinic3_qps_irq_uninit(netdev);
+err_free_qp_ctxts:
+ hinic3_free_qp_ctxts(nic_dev);
+
+ return err;
+}
+
+static void hinic3_close_channel(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_remove_configure(netdev);
+ hinic3_qps_irq_uninit(netdev);
+ hinic3_free_qp_ctxts(nic_dev);
+}
+
+static int hinic3_vport_up(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ bool link_status_up;
+ u16 glb_func_id;
+ int err;
+
+ glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
+ err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true);
+ if (err) {
+ netdev_err(netdev, "Failed to enable vport\n");
+ goto err_flush_qps_res;
+ }
+
+ err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
+ nic_dev->q_params.num_qps);
+ if (err) {
+ netdev_err(netdev, "Failed to set real number of queues\n");
+ goto err_flush_qps_res;
+ }
+ netif_tx_start_all_queues(netdev);
+
+ err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up);
+ if (!err && link_status_up)
+ netif_carrier_on(netdev);
+
+ return 0;
+
+err_flush_qps_res:
+ hinic3_flush_qps_res(nic_dev->hwdev);
+ /* wait to guarantee that no packets will be sent to host */
+ msleep(100);
+
+ return err;
+}
+
+static void hinic3_vport_down(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 glb_func_id;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
+ hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
+
+ hinic3_flush_txqs(netdev);
+ /* wait to guarantee that no packets will be sent to host */
+ msleep(100);
+ hinic3_flush_qps_res(nic_dev->hwdev);
+}
+
+static int hinic3_open(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params;
+ int err;
+
+ err = hinic3_init_nicio_res(nic_dev);
+ if (err) {
+ netdev_err(netdev, "Failed to init nicio resources\n");
+ return err;
+ }
+
+ err = hinic3_setup_num_qps(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to setup num_qps\n");
+ goto err_free_nicio_res;
+ }
+
+ err = hinic3_alloc_channel_resources(netdev, &qp_params,
+ &nic_dev->q_params);
+ if (err)
+ goto err_destroy_num_qps;
+
+ hinic3_init_qps(nic_dev, &qp_params);
+
+ err = hinic3_open_channel(netdev);
+ if (err)
+ goto err_uninit_qps;
+
+ err = hinic3_vport_up(netdev);
+ if (err)
+ goto err_close_channel;
+
+ return 0;
+
+err_close_channel:
+ hinic3_close_channel(netdev);
+err_uninit_qps:
+ hinic3_uninit_qps(nic_dev, &qp_params);
+ hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+err_destroy_num_qps:
+ hinic3_destroy_num_qps(netdev);
+err_free_nicio_res:
+ hinic3_free_nicio_res(nic_dev);
+
+ return err;
+}
+
+static int hinic3_close(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params;
+
+ hinic3_vport_down(netdev);
+ hinic3_close_channel(netdev);
+ hinic3_uninit_qps(nic_dev, &qp_params);
+ hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+
+ return 0;
+}
+
+static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int err;
+
+ err = hinic3_set_port_mtu(netdev, new_mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to change port mtu to %d\n",
+ new_mtu);
+ return err;
+ }
+
+ netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
+ return 0;
+}
+
+static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr,
+ saddr->sa_data, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+
+ if (err)
+ return err;
+
+ eth_hw_addr_set(netdev, saddr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops hinic3_netdev_ops = {
+ .ndo_open = hinic3_open,
+ .ndo_stop = hinic3_close,
+ .ndo_change_mtu = hinic3_change_mtu,
+ .ndo_set_mac_address = hinic3_set_mac_addr,
+ .ndo_start_xmit = hinic3_xmit_frame,
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev)
+{
+ netdev->netdev_ops = &hinic3_netdev_ops;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
new file mode 100644
index 000000000000..979f47ca77f9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct l2nic_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate nic feature, err:%d, status: 0x%x\n",
+ err, feature_nego.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64));
+
+ return 0;
+}
+
+int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_GET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits)
+{
+ return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits;
+}
+
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap)
+{
+ nic_dev->nic_io->feature_cap = feature_cap;
+}
+
+static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap,
+ const struct l2nic_func_tbl_cfg *cfg)
+{
+ struct l2nic_cmd_set_func_tbl cmd_func_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmd_func_tbl.func_id = hinic3_global_func_id(hwdev);
+ cmd_func_tbl.cfg_bitmap = cfg_bitmap;
+ cmd_func_tbl.tbl_cfg = *cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &cmd_func_tbl,
+ sizeof(cmd_func_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_FUNC_TBL, &msg_params);
+ if (err || cmd_func_tbl.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x\n",
+ cfg_bitmap, err, cmd_func_tbl.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ u32 cfg_bitmap;
+
+ func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */
+ func_tbl_cfg.rx_wqe_buf_size = nic_io->rx_buf_len;
+
+ cfg_bitmap = BIT(L2NIC_FUNC_TBL_CFG_INIT) |
+ BIT(L2NIC_FUNC_TBL_CFG_MTU) |
+ BIT(L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE);
+
+ return hinic3_set_function_table(nic_dev->hwdev, cfg_bitmap,
+ &func_tbl_cfg);
+}
+
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ func_tbl_cfg.mtu = new_mtu;
+
+ return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU),
+ &func_tbl_cfg);
+}
+
+static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status,
+ u16 vlan_id)
+{
+ if ((status && status != MGMT_STATUS_EXIST) ||
+ ((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) {
+ dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n");
+ return 0;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_EXIST) {
+ dev_warn(hwdev->dev, "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_DEL_MAC, &msg_params);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to delete MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct l2nic_cmd_update_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.old_mac, old_mac);
+ ether_addr_copy(mac_info.new_mac, new_mac);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_UPDATE_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, struct hinic3_sq_attr *attr)
+{
+ struct l2nic_cmd_set_ci_attr cons_idx_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cons_idx_attr.func_idx = hinic3_global_func_id(hwdev);
+ cons_idx_attr.dma_attr_off = attr->dma_attr_off;
+ cons_idx_attr.pending_limit = attr->pending_limit;
+ cons_idx_attr.coalescing_time = attr->coalescing_time;
+
+ if (attr->intr_en) {
+ cons_idx_attr.intr_en = attr->intr_en;
+ cons_idx_attr.intr_idx = attr->intr_idx;
+ }
+
+ cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+ cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+ mgmt_msg_params_init_default(&msg_params, &cons_idx_attr,
+ sizeof(cons_idx_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_SQ_CI_ATTR, &msg_params);
+ if (err || cons_idx_attr.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set ci attribute table, err: %d, status: 0x%x\n",
+ err, cons_idx_attr.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_clear_qp_resource sq_res = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ sq_res.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &sq_res, sizeof(sq_res));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CLEAR_QP_RESOURCE,
+ &msg_params);
+ if (err || sq_res.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to clear sq resources, err: %d, status: 0x%x\n",
+ err, sq_res.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_force_pkt_drop pkt_drop = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ pkt_drop.port = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &pkt_drop, sizeof(pkt_drop));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FORCE_PKT_DROP, &msg_params);
+ if ((pkt_drop.msg_head.status != MGMT_STATUS_CMD_UNSUPPORTED &&
+ pkt_drop.msg_head.status) || err) {
+ dev_err(hwdev->dev,
+ "Failed to set force tx packets drop, err: %d, status: 0x%x\n",
+ err, pkt_drop.msg_head.status);
+ return -EFAULT;
+ }
+
+ return pkt_drop.msg_head.status;
+}
+
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state)
+{
+ struct l2nic_cmd_set_dcb_state dcb_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ dcb_state.op_code = op_code;
+ dcb_state.state = state;
+ dcb_state.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &dcb_state,
+ sizeof(dcb_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_QOS_DCB_STATE, &msg_params);
+ if (err || dcb_state.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set dcb state, err: %d, status: 0x%x\n",
+ err, dcb_state.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up)
+{
+ struct mag_cmd_get_link_status get_link = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ get_link.port_id = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &get_link, sizeof(get_link));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK,
+ MAG_CMD_GET_LINK_STATUS, &msg_params);
+ if (err || get_link.head.status) {
+ dev_err(hwdev->dev, "Failed to get link state, err: %d, status: 0x%x\n",
+ err, get_link.head.status);
+ return -EIO;
+ }
+
+ *link_status_up = !!get_link.status;
+
+ return 0;
+}
+
+int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
+ bool enable)
+{
+ struct l2nic_cmd_set_vport_state en_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ en_state.func_id = func_id;
+ en_state.state = enable ? 1 : 0;
+
+ mgmt_msg_params_init_default(&msg_params, &en_state, sizeof(en_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_VPORT_ENABLE, &msg_params);
+ if (err || en_state.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set vport state, err: %d, status: 0x%x\n",
+ err, en_state.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
new file mode 100644
index 000000000000..b83b567fa542
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_CFG_H_
+#define _HINIC3_NIC_CFG_H_
+
+#include <linux/types.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_mgmt_interface.h"
+
+struct hinic3_hwdev;
+struct hinic3_nic_dev;
+
+#define HINIC3_MIN_MTU_SIZE 256
+#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600
+
+#define HINIC3_VLAN_ID_MASK 0x7FFF
+
+enum hinic3_nic_event_type {
+ HINIC3_NIC_EVENT_LINK_DOWN = 0,
+ HINIC3_NIC_EVENT_LINK_UP = 1,
+};
+
+struct hinic3_sq_attr {
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u64 ci_dma_base;
+};
+
+int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev);
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits);
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+
+int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev);
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id);
+
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev,
+ struct hinic3_sq_attr *attr);
+int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev);
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state);
+int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up);
+int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
+ bool enable);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
new file mode 100644
index 000000000000..5ba83261616c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_DEV_H_
+#define _HINIC3_NIC_DEV_H_
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_mgmt_interface.h"
+
+enum hinic3_flags {
+ HINIC3_RSS_ENABLE,
+};
+
+enum hinic3_rss_hash_type {
+ HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0,
+ HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1,
+};
+
+struct hinic3_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+struct hinic3_irq_cfg {
+ struct net_device *netdev;
+ u16 msix_entry_idx;
+ /* provided by OS */
+ u32 irq_id;
+ char irq_name[IFNAMSIZ + 16];
+ struct napi_struct napi;
+ cpumask_t affinity_mask;
+ struct hinic3_txq *txq;
+ struct hinic3_rxq *rxq;
+};
+
+struct hinic3_dyna_txrxq_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_dyna_txq_res *txqs_res;
+ struct hinic3_dyna_rxq_res *rxqs_res;
+ struct hinic3_irq_cfg *irq_cfg;
+};
+
+struct hinic3_intr_coal_info {
+ u8 pending_limit;
+ u8 coalesce_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+struct hinic3_nic_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_nic_io *nic_io;
+
+ u16 max_qps;
+ u16 rx_buf_len;
+ u32 lro_replenish_thld;
+ unsigned long flags;
+ struct hinic3_nic_service_cap nic_svc_cap;
+
+ struct hinic3_dyna_txrxq_params q_params;
+ struct hinic3_txq *txqs;
+ struct hinic3_rxq *rxqs;
+
+ enum hinic3_rss_hash_type rss_hash_type;
+ struct hinic3_rss_type rss_type;
+ u8 *rss_hkey;
+ u16 *rss_indir;
+
+ u16 num_qp_irq;
+ struct msix_entry *qps_msix_entries;
+
+ struct hinic3_intr_coal_info *intr_coalesce;
+
+ bool link_status_up;
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev);
+int hinic3_qps_irq_init(struct net_device *netdev);
+void hinic3_qps_irq_uninit(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
new file mode 100644
index 000000000000..d86cd1ba4605
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hw_intf.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+#define HINIC3_DEFAULT_TX_CI_PENDING_LIMIT 1
+#define HINIC3_DEFAULT_TX_CI_COALESCING_TIME 1
+#define HINIC3_DEFAULT_DROP_THD_ON (0xFFFF)
+#define HINIC3_DEFAULT_DROP_THD_OFF 0
+
+#define HINIC3_CI_Q_ADDR_SIZE (64)
+
+#define HINIC3_CI_TABLE_SIZE(num_qps) \
+ (ALIGN((num_qps) * HINIC3_CI_Q_ADDR_SIZE, HINIC3_MIN_PAGE_SIZE))
+
+#define HINIC3_CI_VADDR(base_addr, q_id) \
+ ((u8 *)(base_addr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define HINIC3_CI_PADDR(base_paddr, q_id) \
+ ((base_paddr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define SQ_WQ_PREFETCH_MAX 1
+#define SQ_WQ_PREFETCH_MIN 1
+#define SQ_WQ_PREFETCH_THRESHOLD 16
+
+#define RQ_WQ_PREFETCH_MAX 4
+#define RQ_WQ_PREFETCH_MIN 1
+#define RQ_WQ_PREFETCH_THRESHOLD 256
+
+/* (2048 - 8) / 64 */
+#define HINIC3_Q_CTXT_MAX 31
+
+enum hinic3_qp_ctxt_type {
+ HINIC3_QP_CTXT_TYPE_SQ = 0,
+ HINIC3_QP_CTXT_TYPE_RQ = 1,
+};
+
+struct hinic3_qp_ctxt_hdr {
+ __le16 num_queues;
+ __le16 queue_type;
+ __le16 start_qid;
+ __le16 rsvd;
+};
+
+struct hinic3_sq_ctxt {
+ __le32 ci_pi;
+ __le32 drop_mode_sp;
+ __le32 wq_pfn_hi_owner;
+ __le32 wq_pfn_lo;
+
+ __le32 rsvd0;
+ __le32 pkt_drop_thd;
+ __le32 global_sq_id;
+ __le32 vlan_ceq_attr;
+
+ __le32 pref_cache;
+ __le32 pref_ci_owner;
+ __le32 pref_wq_pfn_hi_ci;
+ __le32 pref_wq_pfn_lo;
+
+ __le32 rsvd8;
+ __le32 rsvd9;
+ __le32 wq_block_pfn_hi;
+ __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_rq_ctxt {
+ __le32 ci_pi;
+ __le32 ceq_attr;
+ __le32 wq_pfn_hi_type_owner;
+ __le32 wq_pfn_lo;
+
+ __le32 rsvd[3];
+ __le32 cqe_sge_len;
+
+ __le32 pref_cache;
+ __le32 pref_ci_owner;
+ __le32 pref_wq_pfn_hi_ci;
+ __le32 pref_wq_pfn_lo;
+
+ __le32 pi_paddr_hi;
+ __le32 pi_paddr_lo;
+ __le32 wq_block_pfn_hi;
+ __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_sq_ctxt_block {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_rq_ctxt_block {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_clean_queue_ctxt {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ __le32 rsvd;
+};
+
+#define SQ_CTXT_SIZE(num_sqs) \
+ (sizeof(struct hinic3_qp_ctxt_hdr) + \
+ (num_sqs) * sizeof(struct hinic3_sq_ctxt))
+
+#define RQ_CTXT_SIZE(num_rqs) \
+ (sizeof(struct hinic3_qp_ctxt_hdr) + \
+ (num_rqs) * sizeof(struct hinic3_rq_ctxt))
+
+#define SQ_CTXT_PREF_CI_HI_SHIFT 12
+#define SQ_CTXT_PREF_CI_HI(val) ((val) >> SQ_CTXT_PREF_CI_HI_SHIFT)
+
+#define SQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
+#define SQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
+#define SQ_CTXT_CI_PI_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_MODE_SP_FLAG_MASK BIT(0)
+#define SQ_CTXT_MODE_PKT_DROP_MASK BIT(1)
+#define SQ_CTXT_MODE_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_MODE_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
+#define SQ_CTXT_WQ_PAGE_OWNER_MASK BIT(23)
+#define SQ_CTXT_WQ_PAGE_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define SQ_CTXT_PKT_DROP_THD_ON_MASK GENMASK(15, 0)
+#define SQ_CTXT_PKT_DROP_THD_OFF_MASK GENMASK(31, 16)
+#define SQ_CTXT_PKT_DROP_THD_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_PKT_DROP_##member##_MASK, val)
+
+#define SQ_CTXT_GLOBAL_SQ_ID_MASK GENMASK(12, 0)
+#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_VLAN_INSERT_MODE_MASK GENMASK(20, 19)
+#define SQ_CTXT_VLAN_CEQ_EN_MASK BIT(23)
+#define SQ_CTXT_VLAN_CEQ_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_VLAN_##member##_MASK, val)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
+#define SQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
+#define SQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
+
+#define SQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
+#define SQ_CTXT_PREF_OWNER_MASK BIT(4)
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
+#define SQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
+#define SQ_CTXT_PREF_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_PREF_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
+#define SQ_CTXT_WQ_BLOCK_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
+#define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
+#define RQ_CTXT_CI_PI_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_CEQ_ATTR_INTR_MASK GENMASK(30, 21)
+#define RQ_CTXT_CEQ_ATTR_EN_MASK BIT(31)
+#define RQ_CTXT_CEQ_ATTR_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_CEQ_ATTR_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
+#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK GENMASK(29, 28)
+#define RQ_CTXT_WQ_PAGE_OWNER_MASK BIT(31)
+#define RQ_CTXT_WQ_PAGE_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define RQ_CTXT_CQE_LEN_MASK GENMASK(29, 28)
+#define RQ_CTXT_CQE_LEN_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
+#define RQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
+#define RQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
+
+#define RQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
+#define RQ_CTXT_PREF_OWNER_MASK BIT(4)
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
+#define RQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
+#define RQ_CTXT_PREF_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_PREF_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
+#define RQ_CTXT_WQ_BLOCK_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define WQ_PAGE_PFN_SHIFT 12
+#define WQ_BLOCK_PFN_SHIFT 9
+#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_nic_io *nic_io;
+ int err;
+
+ nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL);
+ if (!nic_io)
+ return -ENOMEM;
+
+ nic_dev->nic_io = nic_io;
+
+ err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 1);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set function svc used state\n");
+ goto err_free_nicio;
+ }
+
+ err = hinic3_init_function_table(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init function table\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ nic_io->rx_buf_len = nic_dev->rx_buf_len;
+
+ err = hinic3_get_nic_feature_from_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to get nic features\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ nic_io->feature_cap &= HINIC3_NIC_F_ALL_MASK;
+ nic_io->feature_cap &= HINIC3_NIC_DRV_DEFAULT_FEATURE;
+ dev_dbg(hwdev->dev, "nic features: 0x%llx\n\n", nic_io->feature_cap);
+
+ return 0;
+
+err_clear_func_svc_used_state:
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 0);
+err_free_nicio:
+ nic_dev->nic_io = NULL;
+ kfree(nic_io);
+
+ return err;
+}
+
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+
+ hinic3_set_func_svc_used_state(nic_dev->hwdev, COMM_FUNC_SVC_T_NIC, 0);
+ nic_dev->nic_io = NULL;
+ kfree(nic_io);
+}
+
+int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ void __iomem *db_base;
+ int err;
+
+ nic_io->max_qps = hinic3_func_max_qnum(hwdev);
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate doorbell for sqs\n");
+ return err;
+ }
+ nic_io->sqs_db_addr = db_base;
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ dev_err(hwdev->dev, "Failed to allocate doorbell for rqs\n");
+ return err;
+ }
+ nic_io->rqs_db_addr = db_base;
+
+ nic_io->ci_vaddr_base =
+ dma_alloc_coherent(hwdev->dev,
+ HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
+ &nic_io->ci_dma_base,
+ GFP_KERNEL);
+ if (!nic_io->ci_vaddr_base) {
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ dma_free_coherent(hwdev->dev,
+ HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
+}
+
+static int hinic3_create_sq(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ u16 q_id, u32 sq_depth, u16 sq_msix_idx)
+{
+ int err;
+
+ /* sq used & hardware request init 1 */
+ sq->owner = 1;
+
+ sq->q_id = q_id;
+ sq->msix_entry_idx = sq_msix_idx;
+
+ err = hinic3_wq_create(hwdev, &sq->wq, sq_depth,
+ BIT(HINIC3_SQ_WQEBB_SHIFT));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create tx queue %u wq\n",
+ q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_create_rq(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *rq,
+ u16 q_id, u32 rq_depth, u16 rq_msix_idx)
+{
+ int err;
+
+ rq->q_id = q_id;
+ rq->msix_entry_idx = rq_msix_idx;
+
+ err = hinic3_wq_create(hwdev, &rq->wq, rq_depth,
+ BIT(HINIC3_RQ_WQEBB_SHIFT +
+ HINIC3_NORMAL_RQ_WQE));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create rx queue %u wq\n",
+ q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_create_qp(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ struct hinic3_io_queue *rq, u16 q_id, u32 sq_depth,
+ u32 rq_depth, u16 qp_msix_idx)
+{
+ int err;
+
+ err = hinic3_create_sq(hwdev, sq, q_id, sq_depth, qp_msix_idx);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create sq, qid: %u\n",
+ q_id);
+ return err;
+ }
+
+ err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create rq, qid: %u\n",
+ q_id);
+ goto err_destroy_sq_wq;
+ }
+
+ return 0;
+
+err_destroy_sq_wq:
+ hinic3_wq_destroy(hwdev, &sq->wq);
+
+ return err;
+}
+
+static void hinic3_destroy_qp(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ struct hinic3_io_queue *rq)
+{
+ hinic3_wq_destroy(hwdev, &sq->wq);
+ hinic3_wq_destroy(hwdev, &rq->wq);
+}
+
+int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct msix_entry *qps_msix_entries = nic_dev->qps_msix_entries;
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_io_queue *sqs;
+ struct hinic3_io_queue *rqs;
+ u16 q_id;
+ int err;
+
+ if (qp_params->num_qps > nic_io->max_qps || !qp_params->num_qps)
+ return -EINVAL;
+
+ sqs = kcalloc(qp_params->num_qps, sizeof(*sqs), GFP_KERNEL);
+ if (!sqs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rqs = kcalloc(qp_params->num_qps, sizeof(*rqs), GFP_KERNEL);
+ if (!rqs) {
+ err = -ENOMEM;
+ goto err_free_sqs;
+ }
+
+ for (q_id = 0; q_id < qp_params->num_qps; q_id++) {
+ err = hinic3_create_qp(hwdev, &sqs[q_id], &rqs[q_id], q_id,
+ qp_params->sq_depth, qp_params->rq_depth,
+ qps_msix_entries[q_id].entry);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate qp %u, err: %d\n",
+ q_id, err);
+ goto err_destroy_qp;
+ }
+ }
+
+ qp_params->sqs = sqs;
+ qp_params->rqs = rqs;
+
+ return 0;
+
+err_destroy_qp:
+ while (q_id > 0) {
+ q_id--;
+ hinic3_destroy_qp(hwdev, &sqs[q_id], &rqs[q_id]);
+ }
+ kfree(rqs);
+err_free_sqs:
+ kfree(sqs);
+err_out:
+ return err;
+}
+
+void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id;
+
+ for (q_id = 0; q_id < qp_params->num_qps; q_id++)
+ hinic3_destroy_qp(hwdev, &qp_params->sqs[q_id],
+ &qp_params->rqs[q_id]);
+
+ kfree(qp_params->sqs);
+ kfree(qp_params->rqs);
+}
+
+void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_io_queue *sqs = qp_params->sqs;
+ struct hinic3_io_queue *rqs = qp_params->rqs;
+ u16 q_id;
+
+ nic_io->num_qps = qp_params->num_qps;
+ nic_io->sq = qp_params->sqs;
+ nic_io->rq = qp_params->rqs;
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sqs[q_id].cons_idx_addr =
+ (u16 *)HINIC3_CI_VADDR(nic_io->ci_vaddr_base, q_id);
+ /* clear ci value */
+ WRITE_ONCE(*sqs[q_id].cons_idx_addr, 0);
+
+ sqs[q_id].db_addr = nic_io->sqs_db_addr;
+ rqs[q_id].db_addr = nic_io->rqs_db_addr;
+ }
+}
+
+void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+
+ qp_params->sqs = nic_io->sq;
+ qp_params->rqs = nic_io->rq;
+ qp_params->num_qps = nic_io->num_qps;
+}
+
+static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr *qp_ctxt_hdr,
+ enum hinic3_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 q_id)
+{
+ qp_ctxt_hdr->queue_type = cpu_to_le16(ctxt_type);
+ qp_ctxt_hdr->num_queues = cpu_to_le16(num_queues);
+ qp_ctxt_hdr->start_qid = cpu_to_le16(q_id);
+ qp_ctxt_hdr->rsvd = 0;
+}
+
+static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id,
+ struct hinic3_sq_ctxt *sq_ctxt)
+{
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = hinic3_get_sq_local_ci(sq);
+ pi_start = hinic3_get_sq_local_pi(sq);
+
+ wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&sq->wq);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr);
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ sq_ctxt->ci_pi =
+ cpu_to_le32(SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ SQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+ sq_ctxt->drop_mode_sp =
+ cpu_to_le32(SQ_CTXT_MODE_SET(0, SP_FLAG) |
+ SQ_CTXT_MODE_SET(0, PKT_DROP));
+
+ sq_ctxt->wq_pfn_hi_owner =
+ cpu_to_le32(SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ SQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+ sq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ sq_ctxt->pkt_drop_thd =
+ cpu_to_le32(SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_ON, THD_ON) |
+ SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_OFF, THD_OFF));
+
+ sq_ctxt->global_sq_id =
+ cpu_to_le32(SQ_CTXT_GLOBAL_QUEUE_ID_SET((u32)sq_id,
+ GLOBAL_SQ_ID));
+
+ /* enable insert c-vlan by default */
+ sq_ctxt->vlan_ceq_attr =
+ cpu_to_le32(SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) |
+ SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE));
+
+ sq_ctxt->rsvd0 = 0;
+
+ sq_ctxt->pref_cache =
+ cpu_to_le32(SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+ SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+ SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+ sq_ctxt->pref_ci_owner =
+ cpu_to_le32(SQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ SQ_CTXT_PREF_SET(1, OWNER));
+
+ sq_ctxt->pref_wq_pfn_hi_ci =
+ cpu_to_le32(SQ_CTXT_PREF_SET(ci_start, CI_LOW) |
+ SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI));
+
+ sq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ sq_ctxt->wq_block_pfn_hi =
+ cpu_to_le32(SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+ sq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
+ u32 *wq_page_pfn_hi,
+ u32 *wq_page_pfn_lo,
+ u32 *wq_block_pfn_hi,
+ u32 *wq_block_pfn_lo)
+{
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+
+ wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ *wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ *wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr);
+ *wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ *wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+}
+
+static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
+ struct hinic3_rq_ctxt *rq_ctxt)
+{
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+ pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+
+ hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
+ &wq_block_pfn_hi, &wq_block_pfn_lo);
+
+ rq_ctxt->ci_pi =
+ cpu_to_le32(RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ RQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+ rq_ctxt->ceq_attr =
+ cpu_to_le32(RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR));
+
+ rq_ctxt->wq_pfn_hi_type_owner =
+ cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+ /* use 16Byte WQE */
+ rq_ctxt->wq_pfn_hi_type_owner |=
+ cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE));
+ rq_ctxt->cqe_sge_len = cpu_to_le32(RQ_CTXT_CQE_LEN_SET(1, CQE_LEN));
+
+ rq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ rq_ctxt->pref_cache =
+ cpu_to_le32(RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+ rq_ctxt->pref_ci_owner =
+ cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ RQ_CTXT_PREF_SET(1, OWNER));
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ cpu_to_le32(RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI_LOW));
+
+ rq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ rq_ctxt->wq_block_pfn_hi =
+ cpu_to_le32(RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+ rq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_sq_ctxt_block *sq_ctxt_block;
+ u16 q_id, curr_id, max_ctxts, i;
+ struct hinic3_sq_ctxt *sq_ctxt;
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_io_queue *sq;
+ __le64 out_param;
+ int err = 0;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ sq_ctxt_block = cmd_buf->buf;
+ sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+ HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+ HINIC3_QP_CTXT_TYPE_SQ, max_ctxts,
+ q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ sq = &nic_io->sq[curr_id];
+ hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]);
+ }
+
+ hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts));
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_rq_ctxt_block *rq_ctxt_block;
+ u16 q_id, curr_id, max_ctxts, i;
+ struct hinic3_rq_ctxt *rq_ctxt;
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_io_queue *rq;
+ __le64 out_param;
+ int err = 0;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ rq_ctxt_block = cmd_buf->buf;
+ rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+ HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+ HINIC3_QP_CTXT_TYPE_RQ, max_ctxts,
+ q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ rq = &nic_io->rq[curr_id];
+ hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]);
+ }
+
+ hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ int err;
+
+ err = init_sq_ctxts(nic_dev);
+ if (err)
+ return err;
+
+ err = init_rq_ctxts(nic_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_qp_ctxt_type ctxt_type)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_clean_queue_ctxt *ctxt_block;
+ struct hinic3_cmd_buf *cmd_buf;
+ __le64 out_param;
+ int err;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctxt_block = cmd_buf->buf;
+ ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps);
+ ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type);
+ ctxt_block->cmdq_hdr.start_qid = 0;
+ ctxt_block->cmdq_hdr.rsvd = 0;
+ ctxt_block->rsvd = 0;
+
+ hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n",
+ err, out_param);
+
+ err = -EFAULT;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic3_nic_dev *nic_dev)
+{
+ /* clean LRO/TSO context space */
+ return clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_SQ) ||
+ clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_RQ);
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq */
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_sq_attr sq_attr;
+ u32 rq_depth;
+ u16 q_id;
+ int err;
+
+ err = init_qp_ctxts(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init QP ctxts\n");
+ return err;
+ }
+
+ /* clean LRO/TSO context space */
+ err = clean_qp_offload_ctxt(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to clean qp offload ctxts\n");
+ return err;
+ }
+
+ rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE;
+
+ err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth,
+ nic_io->rx_buf_len);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set root context\n");
+ return err;
+ }
+
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sq_attr.ci_dma_base =
+ HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2;
+ sq_attr.pending_limit = HINIC3_DEFAULT_TX_CI_PENDING_LIMIT;
+ sq_attr.coalescing_time = HINIC3_DEFAULT_TX_CI_COALESCING_TIME;
+ sq_attr.intr_en = 1;
+ sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx;
+ sq_attr.l2nic_sqn = q_id;
+ sq_attr.dma_attr_off = 0;
+ err = hinic3_set_ci_table(hwdev, &sq_attr);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set ci table\n");
+ goto err_clean_root_ctxt;
+ }
+ }
+
+ return 0;
+
+err_clean_root_ctxt:
+ hinic3_clean_root_ctxt(hwdev);
+
+ return err;
+}
+
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ hinic3_clean_root_ctxt(nic_dev->hwdev);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
new file mode 100644
index 000000000000..12eefabcf1db
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_IO_H_
+#define _HINIC3_NIC_IO_H_
+
+#include <linux/bitfield.h>
+
+#include "hinic3_wq.h"
+
+struct hinic3_nic_dev;
+
+#define HINIC3_SQ_WQEBB_SHIFT 4
+#define HINIC3_RQ_WQEBB_SHIFT 3
+#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT)
+
+/* ******************** RQ_CTRL ******************** */
+enum hinic3_rq_wqe_type {
+ HINIC3_NORMAL_RQ_WQE = 1,
+};
+
+/* ******************** SQ_CTRL ******************** */
+#define HINIC3_TX_MSS_DEFAULT 0x3E00
+#define HINIC3_TX_MSS_MIN 0x50
+#define HINIC3_MAX_SQ_SGE 18
+
+struct hinic3_io_queue {
+ struct hinic3_wq wq;
+ u8 owner;
+ u16 q_id;
+ u16 msix_entry_idx;
+ u8 __iomem *db_addr;
+ u16 *cons_idx_addr;
+} ____cacheline_aligned;
+
+static inline u16 hinic3_get_sq_local_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->cons_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->prod_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return READ_ONCE(*sq->cons_idx_addr) & wq->idx_mask;
+}
+
+/* ******************** DB INFO ******************** */
+#define DB_INFO_QID_MASK GENMASK(12, 0)
+#define DB_INFO_CFLAG_MASK BIT(23)
+#define DB_INFO_COS_MASK GENMASK(26, 24)
+#define DB_INFO_TYPE_MASK GENMASK(31, 27)
+#define DB_INFO_SET(val, member) \
+ FIELD_PREP(DB_INFO_##member##_MASK, val)
+
+#define DB_PI_LOW_MASK 0xFFU
+#define DB_PI_HIGH_MASK 0xFFU
+#define DB_PI_HI_SHIFT 8
+#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK)
+#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK)
+#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi))
+#define DB_SRC_TYPE 1
+
+/* CFLAG_DATA_PATH */
+#define DB_CFLAG_DP_SQ 0
+#define DB_CFLAG_DP_RQ 1
+
+struct hinic3_nic_db {
+ __le32 db_info;
+ __le32 pi_hi;
+};
+
+static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
+ u8 cflag, u16 pi)
+{
+ struct hinic3_nic_db db;
+
+ db.db_info =
+ cpu_to_le32(DB_INFO_SET(DB_SRC_TYPE, TYPE) |
+ DB_INFO_SET(cflag, CFLAG) |
+ DB_INFO_SET(cos, COS) |
+ DB_INFO_SET(queue->q_id, QID));
+ db.pi_hi = cpu_to_le32(DB_PI_HIGH(pi));
+
+ writeq(*((u64 *)&db), DB_ADDR(queue, pi));
+}
+
+struct hinic3_dyna_qp_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_io_queue *sqs;
+ struct hinic3_io_queue *rqs;
+};
+
+struct hinic3_nic_io {
+ struct hinic3_io_queue *sq;
+ struct hinic3_io_queue *rq;
+
+ u16 num_qps;
+ u16 max_qps;
+
+ /* Base address for consumer index of all tx queues. Each queue is
+ * given a full cache line to hold its consumer index. HW updates
+ * current consumer index as it consumes tx WQEs.
+ */
+ void *ci_vaddr_base;
+ dma_addr_t ci_dma_base;
+
+ u8 __iomem *sqs_db_addr;
+ u8 __iomem *rqs_db_addr;
+
+ u16 rx_buf_len;
+ u64 feature_cap;
+};
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev);
+
+int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev);
+
+int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
new file mode 100644
index 000000000000..86c88d0bb4bd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_PCI_ID_TBL_H_
+#define _HINIC3_PCI_ID_TBL_H_
+
+#define PCI_DEV_ID_HINIC3_VF 0x375F
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
new file mode 100644
index 000000000000..fab9011de9ad
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_queue_common.h"
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size)
+{
+ u32 elem_per_page;
+
+ elem_per_page = min(page_size / elem_size, q_depth);
+
+ qpages->pages = NULL;
+ qpages->page_size = page_size;
+ qpages->num_pages = max(q_depth / elem_per_page, 1);
+ qpages->elem_size_shift = ilog2(elem_size);
+ qpages->elem_per_pg_shift = ilog2(elem_per_page);
+}
+
+static void __queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 pg_cnt)
+{
+ while (pg_cnt > 0) {
+ pg_cnt--;
+ hinic3_dma_free_coherent_align(hwdev->dev,
+ qpages->pages + pg_cnt);
+ }
+ kfree(qpages->pages);
+ qpages->pages = NULL;
+}
+
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages)
+{
+ __queue_pages_free(hwdev, qpages, qpages->num_pages);
+}
+
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align)
+{
+ u32 pg_idx;
+ int err;
+
+ qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]),
+ GFP_KERNEL);
+ if (!qpages->pages)
+ return -ENOMEM;
+
+ if (align == 0)
+ align = qpages->page_size;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ err = hinic3_dma_zalloc_coherent_align(hwdev->dev,
+ qpages->page_size,
+ align,
+ GFP_KERNEL,
+ qpages->pages + pg_idx);
+ if (err) {
+ __queue_pages_free(hwdev, qpages, pg_idx);
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
new file mode 100644
index 000000000000..ec4cae0a0929
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_QUEUE_COMMON_H_
+#define _HINIC3_QUEUE_COMMON_H_
+
+#include <linux/types.h>
+
+#include "hinic3_common.h"
+
+struct hinic3_hwdev;
+
+struct hinic3_queue_pages {
+ /* Array of DMA-able pages that actually holds the queue entries. */
+ struct hinic3_dma_addr_align *pages;
+ /* Page size in bytes. */
+ u32 page_size;
+ /* Number of pages, must be power of 2. */
+ u16 num_pages;
+ u8 elem_size_shift;
+ u8 elem_per_pg_shift;
+};
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size);
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align);
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages);
+
+/* Get pointer to queue entry at the specified index. Index does not have to be
+ * masked to queue depth, only least significant bits will be used. Also
+ * provides remaining elements in same page (including the first one) in case
+ * caller needs multiple entries.
+ */
+static inline void *get_q_element(const struct hinic3_queue_pages *qpages,
+ u32 idx, u32 *remaining_in_page)
+{
+ const struct hinic3_dma_addr_align *page;
+ u32 page_idx, elem_idx, elem_per_pg, ofs;
+ u8 shift;
+
+ shift = qpages->elem_per_pg_shift;
+ page_idx = (idx >> shift) & (qpages->num_pages - 1);
+ elem_per_pg = 1 << shift;
+ elem_idx = idx & (elem_per_pg - 1);
+ if (remaining_in_page)
+ *remaining_in_page = elem_per_pg - elem_idx;
+ ofs = elem_idx << qpages->elem_size_shift;
+ page = qpages->pages + page_idx;
+ return (char *)page->align_vaddr + ofs;
+}
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
new file mode 100644
index 000000000000..4ff1b2f79838
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/ethtool.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rss.h"
+
+static void hinic3_fillout_indir_tbl(struct net_device *netdev, u16 *indir)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i, num_qps;
+
+ num_qps = nic_dev->q_params.num_qps;
+ for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_qps);
+}
+
+static int hinic3_rss_cfg(struct hinic3_hwdev *hwdev, u8 rss_en, u16 num_qps)
+{
+ struct mgmt_msg_params msg_params = {};
+ struct l2nic_cmd_cfg_rss rss_cfg = {};
+ int err;
+
+ rss_cfg.func_id = hinic3_global_func_id(hwdev);
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.rq_priority_number = 0;
+ rss_cfg.num_qps = num_qps;
+
+ mgmt_msg_params_init_default(&msg_params, &rss_cfg, sizeof(rss_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS, &msg_params);
+ if (err || rss_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set rss cfg, err: %d, status: 0x%x\n",
+ err, rss_cfg.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic3_init_rss_parameters(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hash_type = HINIC3_RSS_HASH_ENGINE_TYPE_XOR;
+ nic_dev->rss_type.tcp_ipv6_ext = 1;
+ nic_dev->rss_type.ipv6_ext = 1;
+ nic_dev->rss_type.tcp_ipv6 = 1;
+ nic_dev->rss_type.ipv6 = 1;
+ nic_dev->rss_type.tcp_ipv4 = 1;
+ nic_dev->rss_type.ipv4 = 1;
+ nic_dev->rss_type.udp_ipv6 = 1;
+ nic_dev->rss_type.udp_ipv4 = 1;
+}
+
+static void decide_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int dev_cpus;
+
+ dev_cpus = netif_get_num_default_rss_queues();
+ nic_dev->q_params.num_qps = min(dev_cpus, nic_dev->max_qps);
+}
+
+static int alloc_rss_resource(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hkey = kmalloc_array(L2NIC_RSS_KEY_SIZE,
+ sizeof(nic_dev->rss_hkey[0]),
+ GFP_KERNEL);
+ if (!nic_dev->rss_hkey)
+ return -ENOMEM;
+
+ netdev_rss_key_fill(nic_dev->rss_hkey, L2NIC_RSS_KEY_SIZE);
+
+ nic_dev->rss_indir = kcalloc(L2NIC_RSS_INDIR_SIZE, sizeof(u16),
+ GFP_KERNEL);
+ if (!nic_dev->rss_indir) {
+ kfree(nic_dev->rss_hkey);
+ nic_dev->rss_hkey = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hinic3_rss_set_indir_tbl(struct hinic3_hwdev *hwdev,
+ const u16 *indir_table)
+{
+ struct l2nic_cmd_rss_set_indir_tbl *indir_tbl;
+ struct hinic3_cmd_buf *cmd_buf;
+ __le64 out_param;
+ int err;
+ u32 i;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ cmd_buf->size = cpu_to_le16(sizeof(struct l2nic_cmd_rss_set_indir_tbl));
+ indir_tbl = cmd_buf->buf;
+ memset(indir_tbl, 0, sizeof(*indir_tbl));
+
+ for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++)
+ indir_tbl->entry[i] = cpu_to_le16(indir_table[i]);
+
+ hinic3_cmdq_buf_swab32(indir_tbl, sizeof(*indir_tbl));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int hinic3_set_rss_type(struct hinic3_hwdev *hwdev,
+ struct hinic3_rss_type rss_type)
+{
+ struct l2nic_cmd_set_rss_ctx_tbl ctx_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ u32 ctx;
+ int err;
+
+ ctx_tbl.func_id = hinic3_global_func_id(hwdev);
+ ctx = L2NIC_RSS_TYPE_SET(1, VALID) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ L2NIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+ ctx_tbl.context = ctx;
+
+ mgmt_msg_params_init_default(&msg_params, &ctx_tbl, sizeof(ctx_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_RSS_CTX_TBL, &msg_params);
+
+ if (ctx_tbl.msg_head.status == MGMT_STATUS_CMD_UNSUPPORTED) {
+ return MGMT_STATUS_CMD_UNSUPPORTED;
+ } else if (err || ctx_tbl.msg_head.status) {
+ dev_err(hwdev->dev, "mgmt Failed to set rss context offload, err: %d, status: 0x%x\n",
+ err, ctx_tbl.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic3_rss_cfg_hash_type(struct hinic3_hwdev *hwdev, u8 opcode,
+ enum hinic3_rss_hash_type *type)
+{
+ struct l2nic_cmd_cfg_rss_engine hash_type_cmd = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ hash_type_cmd.func_id = hinic3_global_func_id(hwdev);
+ hash_type_cmd.opcode = opcode;
+
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ hash_type_cmd.hash_engine = *type;
+
+ mgmt_msg_params_init_default(&msg_params, &hash_type_cmd,
+ sizeof(hash_type_cmd));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE,
+ &msg_params);
+ if (err || hash_type_cmd.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to %s hash engine, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get",
+ err, hash_type_cmd.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ *type = hash_type_cmd.hash_engine;
+
+ return 0;
+}
+
+static int hinic3_rss_set_hash_type(struct hinic3_hwdev *hwdev,
+ enum hinic3_rss_hash_type type)
+{
+ return hinic3_rss_cfg_hash_type(hwdev, MGMT_MSG_CMD_OP_SET, &type);
+}
+
+static int hinic3_config_rss_hw_resource(struct net_device *netdev,
+ u16 *indir_tbl)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl);
+ if (err)
+ return err;
+
+ err = hinic3_set_rss_type(nic_dev->hwdev, nic_dev->rss_type);
+ if (err)
+ return err;
+
+ return hinic3_rss_set_hash_type(nic_dev->hwdev, nic_dev->rss_hash_type);
+}
+
+static int hinic3_rss_cfg_hash_key(struct hinic3_hwdev *hwdev, u8 opcode,
+ u8 *key)
+{
+ struct l2nic_cmd_cfg_rss_hash_key hash_key = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ hash_key.func_id = hinic3_global_func_id(hwdev);
+ hash_key.opcode = opcode;
+
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(hash_key.key, key, L2NIC_RSS_KEY_SIZE);
+
+ mgmt_msg_params_init_default(&msg_params, &hash_key, sizeof(hash_key));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS_HASH_KEY, &msg_params);
+ if (err || hash_key.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to %s hash key, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get",
+ err, hash_key.msg_head.status);
+ return -EINVAL;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(key, hash_key.key, L2NIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+static int hinic3_rss_set_hash_key(struct hinic3_hwdev *hwdev, u8 *key)
+{
+ return hinic3_rss_cfg_hash_key(hwdev, MGMT_MSG_CMD_OP_SET, key);
+}
+
+static int hinic3_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey);
+ if (err)
+ return err;
+
+ hinic3_fillout_indir_tbl(netdev, nic_dev->rss_indir);
+
+ err = hinic3_config_rss_hw_resource(netdev, nic_dev->rss_indir);
+ if (err)
+ return err;
+
+ err = hinic3_rss_cfg(nic_dev->hwdev, rss_en, nic_dev->q_params.num_qps);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int hinic3_rss_init(struct net_device *netdev)
+{
+ return hinic3_set_hw_rss_parameters(netdev, 1);
+}
+
+void hinic3_rss_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_rss_cfg(nic_dev->hwdev, 0, 0);
+}
+
+void hinic3_clear_rss_config(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->rss_hkey);
+ nic_dev->rss_hkey = NULL;
+
+ kfree(nic_dev->rss_indir);
+ nic_dev->rss_indir = NULL;
+}
+
+void hinic3_try_to_enable_rss(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->max_qps = hinic3_func_max_qnum(hwdev);
+ if (nic_dev->max_qps <= 1 ||
+ !hinic3_test_support(nic_dev, HINIC3_NIC_F_RSS))
+ goto err_reset_q_params;
+
+ err = alloc_rss_resource(netdev);
+ if (err) {
+ nic_dev->max_qps = 1;
+ goto err_reset_q_params;
+ }
+
+ set_bit(HINIC3_RSS_ENABLE, &nic_dev->flags);
+ decide_num_qps(netdev);
+ hinic3_init_rss_parameters(netdev);
+ err = hinic3_set_hw_rss_parameters(netdev, 0);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set hardware rss parameters\n");
+ hinic3_clear_rss_config(netdev);
+ nic_dev->max_qps = 1;
+ goto err_reset_q_params;
+ }
+
+ return;
+
+err_reset_q_params:
+ clear_bit(HINIC3_RSS_ENABLE, &nic_dev->flags);
+ nic_dev->q_params.num_qps = nic_dev->max_qps;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
new file mode 100644
index 000000000000..78d82c2aca06
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RSS_H_
+#define _HINIC3_RSS_H_
+
+#include <linux/netdevice.h>
+
+int hinic3_rss_init(struct net_device *netdev);
+void hinic3_rss_uninit(struct net_device *netdev);
+void hinic3_try_to_enable_rss(struct net_device *netdev);
+void hinic3_clear_rss_config(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
new file mode 100644
index 000000000000..16c00c3bb1ed
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <net/gro.h>
+#include <net/page_pool/helpers.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+
+#define HINIC3_RX_HDR_SIZE 256
+#define HINIC3_RX_BUFFER_WRITE 16
+
+#define HINIC3_RX_TCP_PKT 0x3
+#define HINIC3_RX_UDP_PKT 0x4
+#define HINIC3_RX_SCTP_PKT 0x7
+
+#define HINIC3_RX_IPV4_PKT 0
+#define HINIC3_RX_IPV6_PKT 1
+#define HINIC3_RX_INVALID_IP_TYPE 2
+
+#define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0
+#define HINIC3_RX_PKT_FORMAT_VXLAN 1
+
+#define HINIC3_LRO_PKT_HDR_LEN_IPV4 66
+#define HINIC3_LRO_PKT_HDR_LEN_IPV6 86
+#define HINIC3_LRO_PKT_HDR_LEN(cqe) \
+ (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \
+ HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
+ HINIC3_LRO_PKT_HDR_LEN_IPV4)
+
+int hinic3_alloc_rxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ u16 num_rxqs = nic_dev->max_qps;
+ struct hinic3_rxq *rxq;
+ u16 q_id;
+
+ nic_dev->rxqs = kcalloc(num_rxqs, sizeof(*nic_dev->rxqs), GFP_KERNEL);
+ if (!nic_dev->rxqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_rxqs; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rxq->netdev = netdev;
+ rxq->dev = &pdev->dev;
+ rxq->q_id = q_id;
+ rxq->buf_len = nic_dev->rx_buf_len;
+ rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
+ rxq->q_depth = nic_dev->q_params.rq_depth;
+ rxq->q_mask = nic_dev->q_params.rq_depth - 1;
+ }
+
+ return 0;
+}
+
+void hinic3_free_rxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->rxqs);
+}
+
+static int rx_alloc_mapped_page(struct page_pool *page_pool,
+ struct hinic3_rx_info *rx_info, u16 buf_len)
+{
+ struct page *page;
+ u32 page_offset;
+
+ if (likely(rx_info->page))
+ return 0;
+
+ page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx_info->page = page;
+ rx_info->page_offset = page_offset;
+
+ return 0;
+}
+
+/* Associate fixed completion element to every wqe in the rq. Every rq wqe will
+ * always post completion to the same place.
+ */
+static void rq_associate_cqes(struct hinic3_rxq *rxq)
+{
+ struct hinic3_queue_pages *qpages;
+ struct hinic3_rq_wqe *rq_wqe;
+ dma_addr_t cqe_dma;
+ u32 i;
+
+ qpages = &rxq->rq->wq.qpages;
+
+ for (i = 0; i < rxq->q_depth; i++) {
+ rq_wqe = get_q_element(qpages, i, NULL);
+ cqe_dma = rxq->cqe_start_paddr +
+ i * sizeof(struct hinic3_rq_cqe);
+ rq_wqe->cqe_hi_addr = cpu_to_le32(upper_32_bits(cqe_dma));
+ rq_wqe->cqe_lo_addr = cpu_to_le32(lower_32_bits(cqe_dma));
+ }
+}
+
+static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
+ dma_addr_t dma_addr, u16 len)
+{
+ struct hinic3_rq_wqe *rq_wqe;
+
+ rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
+ rq_wqe->buf_hi_addr = cpu_to_le32(upper_32_bits(dma_addr));
+ rq_wqe->buf_lo_addr = cpu_to_le32(lower_32_bits(dma_addr));
+}
+
+static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
+{
+ u32 i, free_wqebbs = rxq->delta - 1;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t dma_addr;
+ int err;
+
+ for (i = 0; i < free_wqebbs; i++) {
+ rx_info = &rxq->rx_info[rxq->next_to_update];
+
+ err = rx_alloc_mapped_page(rxq->page_pool, rx_info,
+ rxq->buf_len);
+ if (unlikely(err))
+ break;
+
+ dma_addr = page_pool_get_dma_addr(rx_info->page) +
+ rx_info->page_offset;
+ rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
+ rxq->buf_len);
+ rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
+ }
+
+ if (likely(i)) {
+ hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
+ rxq->next_to_update << HINIC3_NORMAL_RQ_WQE);
+ rxq->delta -= i;
+ rxq->next_to_alloc = rxq->next_to_update;
+ }
+
+ return i;
+}
+
+static u32 hinic3_alloc_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
+ u32 rq_depth, u16 buf_len)
+{
+ u32 free_wqebbs = rq_depth - 1;
+ u32 idx;
+ int err;
+
+ for (idx = 0; idx < free_wqebbs; idx++) {
+ err = rx_alloc_mapped_page(rqres->page_pool,
+ &rqres->rx_info[idx], buf_len);
+ if (err)
+ break;
+ }
+
+ return idx;
+}
+
+static void hinic3_free_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
+ u32 q_depth)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < q_depth; i++) {
+ rx_info = &rqres->rx_info[i];
+
+ if (rx_info->page) {
+ page_pool_put_full_page(rqres->page_pool,
+ rx_info->page, false);
+ rx_info->page = NULL;
+ }
+ }
+}
+
+static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
+ struct hinic3_rx_info *rx_info,
+ struct sk_buff *skb, u32 size)
+{
+ struct page *page;
+ u8 *va;
+
+ page = rx_info->page;
+ va = (u8 *)page_address(page) + rx_info->page_offset;
+ net_prefetch(va);
+
+ page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset,
+ rxq->buf_len);
+
+ if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+ memcpy(__skb_put(skb, size), va,
+ ALIGN(size, sizeof(long)));
+ page_pool_put_full_page(rxq->page_pool, page, false);
+
+ return;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_info->page_offset, size, rxq->buf_len);
+ skb_mark_for_recycle(skb);
+}
+
+static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb,
+ u32 sge_num, u32 pkt_len)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 temp_pkt_len = pkt_len;
+ u32 temp_sge_num = sge_num;
+ u32 sw_ci;
+ u32 size;
+
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ while (temp_sge_num) {
+ rx_info = &rxq->rx_info[sw_ci];
+ sw_ci = (sw_ci + 1) & rxq->q_mask;
+ if (unlikely(temp_pkt_len > rxq->buf_len)) {
+ size = rxq->buf_len;
+ temp_pkt_len -= rxq->buf_len;
+ } else {
+ size = temp_pkt_len;
+ }
+
+ hinic3_add_rx_frag(rxq, rx_info, skb, size);
+
+ /* clear contents of buffer_info */
+ rx_info->page = NULL;
+ temp_sge_num--;
+ }
+}
+
+static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len)
+{
+ u32 sge_num;
+
+ sge_num = pkt_len >> rxq->buf_len_shift;
+ sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0;
+
+ return sge_num;
+}
+
+static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
+ u32 pkt_len)
+{
+ struct sk_buff *skb;
+ u32 sge_num;
+
+ skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ sge_num = hinic3_get_sge_num(rxq, pkt_len);
+
+ net_prefetchw(skb->data);
+ packaging_skb(rxq, skb, sge_num, pkt_len);
+
+ rxq->cons_idx += sge_num;
+ rxq->delta += sge_num;
+
+ return skb;
+}
+
+static void hinic3_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type,
+ u32 status, struct sk_buff *skb)
+{
+ u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT);
+ u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE);
+ u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE);
+ u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR);
+ struct net_device *netdev = rxq->netdev;
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(csum_err)) {
+ /* pkt type is recognized by HW, and csum is wrong */
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ if (ip_type == HINIC3_RX_INVALID_IP_TYPE ||
+ !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL ||
+ pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ switch (pkt_type) {
+ case HINIC3_RX_TCP_PKT:
+ case HINIC3_RX_UDP_PKT:
+ case HINIC3_RX_SCTP_PKT:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ }
+}
+
+static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ __be16 proto;
+
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb),
+ num_lro);
+ skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ?
+ SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = num_lro;
+}
+
+static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
+ u32 pkt_len, u32 vlan_len, u32 status)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct sk_buff *skb;
+ u32 offload_type;
+ u16 num_lro;
+
+ skb = hinic3_fetch_rx_buffer(rxq, pkt_len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ hinic3_pull_tail(skb);
+
+ offload_type = le32_to_cpu(rx_cqe->offload_type);
+ hinic3_rx_csum(rxq, offload_type, status, skb);
+
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ hinic3_lro_set_gso_params(skb, num_lro);
+
+ skb_record_rx_queue(skb, rxq->q_id);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb_has_frag_list(skb)) {
+ napi_gro_flush(&rxq->irq_cfg->napi, false);
+ netif_receive_skb(skb);
+ } else {
+ napi_gro_receive(&rxq->irq_cfg->napi, skb);
+ }
+
+ return 0;
+}
+
+int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct page_pool_params pp_params = {};
+ struct hinic3_dyna_rxq_res *rqres;
+ u32 pkt_idx;
+ int idx;
+
+ for (idx = 0; idx < num_rq; idx++) {
+ rqres = &rxqs_res[idx];
+ rqres->rx_info = kcalloc(rq_depth, sizeof(*rqres->rx_info),
+ GFP_KERNEL);
+ if (!rqres->rx_info)
+ goto err_free_rqres;
+
+ rqres->cqe_start_vaddr =
+ dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ &rqres->cqe_start_paddr, GFP_KERNEL);
+ if (!rqres->cqe_start_vaddr) {
+ netdev_err(netdev, "Failed to alloc rxq%d rx cqe\n",
+ idx);
+ goto err_free_rx_info;
+ }
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = rq_depth * nic_dev->rx_buf_len /
+ PAGE_SIZE;
+ pp_params.nid = dev_to_node(&nic_dev->pdev->dev);
+ pp_params.dev = &nic_dev->pdev->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ rqres->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rqres->page_pool)) {
+ netdev_err(netdev, "Failed to create rxq%d page pool\n",
+ idx);
+ goto err_free_cqe;
+ }
+
+ pkt_idx = hinic3_alloc_rx_buffers(rqres, rq_depth,
+ nic_dev->rx_buf_len);
+ if (!pkt_idx) {
+ netdev_err(netdev, "Failed to alloc rxq%d rx buffers\n",
+ idx);
+ goto err_destroy_page_pool;
+ }
+ rqres->next_to_alloc = pkt_idx;
+ }
+
+ return 0;
+
+err_destroy_page_pool:
+ page_pool_destroy(rqres->page_pool);
+err_free_cqe:
+ dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ rqres->cqe_start_vaddr,
+ rqres->cqe_start_paddr);
+err_free_rx_info:
+ kfree(rqres->rx_info);
+err_free_rqres:
+ hinic3_free_rxqs_res(netdev, idx, rq_depth, rxqs_res);
+
+ return -ENOMEM;
+}
+
+void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_rxq_res *rqres;
+ int idx;
+
+ for (idx = 0; idx < num_rq; idx++) {
+ rqres = &rxqs_res[idx];
+
+ hinic3_free_rx_buffers(rqres, rq_depth);
+ page_pool_destroy(rqres->page_pool);
+ dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ rqres->cqe_start_vaddr,
+ rqres->cqe_start_paddr);
+ kfree(rqres->rx_info);
+ }
+}
+
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_rxq_res *rqres;
+ struct msix_entry *msix_entry;
+ struct hinic3_rxq *rxq;
+ u16 q_id;
+ u32 pkts;
+
+ for (q_id = 0; q_id < num_rq; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rqres = &rxqs_res[q_id];
+ msix_entry = &nic_dev->qps_msix_entries[q_id];
+
+ rxq->irq_id = msix_entry->vector;
+ rxq->msix_entry_idx = msix_entry->entry;
+ rxq->next_to_update = 0;
+ rxq->next_to_alloc = rqres->next_to_alloc;
+ rxq->q_depth = rq_depth;
+ rxq->delta = rxq->q_depth;
+ rxq->q_mask = rxq->q_depth - 1;
+ rxq->cons_idx = 0;
+
+ rxq->cqe_arr = rqres->cqe_start_vaddr;
+ rxq->cqe_start_paddr = rqres->cqe_start_paddr;
+ rxq->rx_info = rqres->rx_info;
+ rxq->page_pool = rqres->page_pool;
+
+ rxq->rq = &nic_dev->nic_io->rq[rxq->q_id];
+
+ rq_associate_cqes(rxq);
+
+ pkts = hinic3_rx_fill_buffers(rxq);
+ if (!pkts) {
+ netdev_err(netdev, "Failed to fill Rx buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u32 sw_ci, status, pkt_len, vlan_len;
+ struct hinic3_rq_cqe *rx_cqe;
+ u32 num_wqe = 0;
+ int nr_pkts = 0;
+ u16 num_lro;
+
+ while (likely(nr_pkts < budget)) {
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ rx_cqe = rxq->cqe_arr + sw_ci;
+ status = le32_to_cpu(rx_cqe->status);
+ if (!RQ_CQE_STATUS_GET(status, RXDONE))
+ break;
+
+ /* make sure we read rx_done before packet length */
+ rmb();
+
+ vlan_len = le32_to_cpu(rx_cqe->vlan_len);
+ pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
+ if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
+ break;
+
+ nr_pkts++;
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ num_wqe += hinic3_get_sge_num(rxq, pkt_len);
+
+ rx_cqe->status = 0;
+
+ if (num_wqe >= nic_dev->lro_replenish_thld)
+ break;
+ }
+
+ if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
+ hinic3_rx_fill_buffers(rxq);
+
+ return nr_pkts;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
new file mode 100644
index 000000000000..44ae841a3648
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RX_H_
+#define _HINIC3_RX_H_
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0)
+#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5)
+#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8)
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21)
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \
+ FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val)
+
+#define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0)
+#define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16)
+#define RQ_CQE_SGE_GET(val, member) \
+ FIELD_GET(RQ_CQE_SGE_##member##_MASK, val)
+
+#define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0)
+#define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16)
+#define RQ_CQE_STATUS_RXDONE_MASK BIT(31)
+#define RQ_CQE_STATUS_GET(val, member) \
+ FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
+
+/* RX Completion information that is provided by HW for a specific RX WQE */
+struct hinic3_rq_cqe {
+ __le32 status;
+ __le32 vlan_len;
+ __le32 offload_type;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
+ __le32 rsvd6;
+ __le32 pkt_info;
+};
+
+struct hinic3_rq_wqe {
+ __le32 buf_hi_addr;
+ __le32 buf_lo_addr;
+ __le32 cqe_hi_addr;
+ __le32 cqe_lo_addr;
+};
+
+struct hinic3_rx_info {
+ struct page *page;
+ u32 page_offset;
+};
+
+struct hinic3_rxq {
+ struct net_device *netdev;
+
+ u16 q_id;
+ u32 q_depth;
+ u32 q_mask;
+
+ u16 buf_len;
+ u32 buf_len_shift;
+
+ u32 cons_idx;
+ u32 delta;
+
+ u32 irq_id;
+ u16 msix_entry_idx;
+
+ /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is
+ * statically associated (by index) to a specific rq_wqe.
+ */
+ struct hinic3_rq_cqe *cqe_arr;
+ struct hinic3_rx_info *rx_info;
+ struct page_pool *page_pool;
+
+ struct hinic3_io_queue *rq;
+
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 next_to_alloc;
+ u16 next_to_update;
+ struct device *dev; /* device for DMA mapping */
+
+ dma_addr_t cqe_start_paddr;
+} ____cacheline_aligned;
+
+struct hinic3_dyna_rxq_res {
+ u16 next_to_alloc;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t cqe_start_paddr;
+ void *cqe_start_vaddr;
+ struct page_pool *page_pool;
+};
+
+int hinic3_alloc_rxqs(struct net_device *netdev);
+void hinic3_free_rxqs(struct net_device *netdev);
+
+int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
new file mode 100644
index 000000000000..92c43c05e3f2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <net/ip6_checksum.h>
+#include <net/ipv6.h>
+#include <net/netdev_queues.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_tx.h"
+#include "hinic3_wq.h"
+
+#define MIN_SKB_LEN 32
+
+int hinic3_alloc_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id, num_txqs = nic_dev->max_qps;
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_txq *txq;
+
+ if (!num_txqs) {
+ dev_err(hwdev->dev, "Cannot allocate zero size txqs\n");
+ return -EINVAL;
+ }
+
+ nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL);
+ if (!nic_dev->txqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_txqs; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ txq->netdev = netdev;
+ txq->q_id = q_id;
+ txq->q_depth = nic_dev->q_params.sq_depth;
+ txq->q_mask = nic_dev->q_params.sq_depth - 1;
+ txq->dev = &pdev->dev;
+ }
+
+ return 0;
+}
+
+void hinic3_free_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->txqs);
+}
+
+static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs,
+ dma_addr_t addr, u32 len)
+{
+ buf_descs->hi_addr = cpu_to_le32(upper_32_bits(addr));
+ buf_descs->lo_addr = cpu_to_le32(lower_32_bits(addr));
+ buf_descs->len = cpu_to_le32(len);
+}
+
+static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct hinic3_txq *txq,
+ struct hinic3_tx_info *tx_info,
+ struct hinic3_sq_wqe_combo *wqe_combo)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+ struct hinic3_sq_bufdesc *buf_desc = wqe_combo->bds_head;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dma_info *dma_info = tx_info->dma_info;
+ struct pci_dev *pdev = nic_dev->pdev;
+ skb_frag_t *frag;
+ u32 i, idx;
+ int err;
+
+ dma_info[0].dma = dma_map_single(&pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[0].dma))
+ return -EFAULT;
+
+ dma_info[0].len = skb_headlen(skb);
+
+ wqe_desc->hi_addr = cpu_to_le32(upper_32_bits(dma_info[0].dma));
+ wqe_desc->lo_addr = cpu_to_le32(lower_32_bits(dma_info[0].dma));
+
+ wqe_desc->ctrl_len = cpu_to_le32(dma_info[0].len);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &(skb_shinfo(skb)->frags[i]);
+ if (unlikely(i == wqe_combo->first_bds_num))
+ buf_desc = wqe_combo->bds_sec2;
+
+ idx = i + 1;
+ dma_info[idx].dma = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[idx].dma)) {
+ err = -EFAULT;
+ goto err_unmap_page;
+ }
+ dma_info[idx].len = skb_frag_size(frag);
+
+ hinic3_set_buf_desc(buf_desc, dma_info[idx].dma,
+ dma_info[idx].len);
+ buf_desc++;
+ }
+
+ return 0;
+
+err_unmap_page:
+ while (idx > 1) {
+ idx--;
+ dma_unmap_page(&pdev->dev, dma_info[idx].dma,
+ dma_info[idx].len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len,
+ DMA_TO_DEVICE);
+
+ return err;
+}
+
+static void hinic3_tx_unmap_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct hinic3_dma_info *dma_info)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags;) {
+ i++;
+ dma_unmap_page(&pdev->dev,
+ dma_info[i].dma,
+ dma_info[i].len, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_single(&pdev->dev, dma_info[0].dma,
+ dma_info[0].len, DMA_TO_DEVICE);
+}
+
+static void free_all_tx_skbs(struct net_device *netdev, u32 sq_depth,
+ struct hinic3_tx_info *tx_info_arr)
+{
+ struct hinic3_tx_info *tx_info;
+ u32 idx;
+
+ for (idx = 0; idx < sq_depth; idx++) {
+ tx_info = &tx_info_arr[idx];
+ if (tx_info->skb) {
+ hinic3_tx_unmap_skb(netdev, tx_info->skb,
+ tx_info->dma_info);
+ dev_kfree_skb_any(tx_info->skb);
+ tx_info->skb = NULL;
+ }
+ }
+}
+
+union hinic3_ip {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic3_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+enum hinic3_l3_type {
+ HINIC3_L3_UNKNOWN = 0,
+ HINIC3_L3_IP6_PKT = 1,
+ HINIC3_L3_IP4_PKT_NO_CSUM = 2,
+ HINIC3_L3_IP4_PKT_CSUM = 3,
+};
+
+enum hinic3_l4_offload_type {
+ HINIC3_L4_OFFLOAD_DISABLE = 0,
+ HINIC3_L4_OFFLOAD_TCP = 1,
+ HINIC3_L4_OFFLOAD_STCP = 2,
+ HINIC3_L4_OFFLOAD_UDP = 3,
+};
+
+/* initialize l4 offset and offload */
+static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4,
+ u8 l4_proto, u32 *offset,
+ enum hinic3_l4_offload_type *l4_offload)
+{
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = HINIC3_L4_OFFLOAD_TCP;
+ /* To be same with TSO, payload offset begins from payload */
+ *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) +
+ TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = HINIC3_L4_OFFLOAD_UDP;
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+ default:
+ *l4_offload = HINIC3_L4_OFFLOAD_DISABLE;
+ *offset = 0;
+ }
+}
+
+static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
+ struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ union hinic3_ip ip;
+ u8 l4_proto;
+
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
+
+ ip.hdr = skb_network_header(skb);
+ if (ip.v4->version == 4) {
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ union hinic3_l4 l4;
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
+ l4_proto = IPPROTO_RAW;
+ }
+
+ if (l4_proto != IPPROTO_UDP ||
+ ((struct udphdr *)skb_transport_header(skb))->dest !=
+ VXLAN_OFFLOAD_PORT_LE) {
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
+ }
+
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L4_EN));
+
+ return 1;
+}
+
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip,
+ union hinic3_l4 *l4,
+ enum hinic3_l3_type *l3_type, u8 *l4_proto)
+{
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ if (ip->v4->version == 4) {
+ *l3_type = HINIC3_L3_IP4_PKT_CSUM;
+ *l4_proto = ip->v4->protocol;
+ } else if (ip->v4->version == 6) {
+ *l3_type = HINIC3_L3_IP6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ l4_proto, &frag_off);
+ }
+ } else {
+ *l3_type = HINIC3_L3_UNKNOWN;
+ *l4_proto = 0;
+ }
+}
+
+static void hinic3_set_tso_info(struct hinic3_sq_task *task, __le32 *queue_info,
+ enum hinic3_l4_offload_type l4_offload,
+ u32 offset, u32 mss)
+{
+ if (l4_offload == HINIC3_L4_OFFLOAD_TCP) {
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, TSO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
+ } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) {
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UFO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
+ }
+
+ /* enable L3 calculation */
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L3_EN));
+
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF));
+
+ /* set MSS value */
+ *queue_info &= cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(mss, MSS));
+}
+
+static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int hinic3_tso(struct hinic3_sq_task *task, __le32 *queue_info,
+ struct sk_buff *skb)
+{
+ enum hinic3_l4_offload_type l4_offload;
+ enum hinic3_l3_type l3_type;
+ union hinic3_ip ip;
+ union hinic3_l4 l4;
+ u8 l4_proto;
+ u32 offset;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->encapsulation) {
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+ /* L3 checksum is always enabled */
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L3_EN));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
+
+ l4.hdr = skb_transport_header(skb);
+ ip.hdr = skb_network_header(skb);
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ task->pkt_info0 |=
+ cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L4_EN));
+ }
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto);
+
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload);
+
+ hinic3_set_tso_info(task, queue_info, l4_offload, offset,
+ skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task,
+ u16 vlan_tag, u8 vlan_tpid)
+{
+ /* vlan_tpid: 0=select TPID0 in IPSU, 1=select TPID1 in IPSU
+ * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU,
+ * 4=select TPID4 in IPSU
+ */
+ task->vlan_offload =
+ cpu_to_le32(SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
+ SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID));
+}
+
+static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
+ __le32 *queue_info, struct hinic3_txq *txq)
+{
+ u32 offload = 0;
+ int tso_cs_en;
+
+ task->pkt_info0 = 0;
+ task->ip_identify = 0;
+ task->rsvd = 0;
+ task->vlan_offload = 0;
+
+ tso_cs_en = hinic3_tso(task, queue_info, skb);
+ if (tso_cs_en < 0) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ } else if (tso_cs_en) {
+ offload |= HINIC3_TX_OFFLOAD_TSO;
+ } else {
+ tso_cs_en = hinic3_tx_csum(txq, task, skb);
+ if (tso_cs_en)
+ offload |= HINIC3_TX_OFFLOAD_CSUM;
+ }
+
+#define VLAN_INSERT_MODE_MAX 5
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ /* select vlan insert mode by qid, default 802.1Q Tag type */
+ hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb),
+ txq->q_id % VLAN_INSERT_MODE_MAX);
+ offload |= HINIC3_TX_OFFLOAD_VLAN;
+ }
+
+ if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) >
+ SQ_CTRL_MAX_PLDOFF)) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ }
+
+ return offload;
+}
+
+static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq,
+ u16 curr_pi, u16 wqebb_cnt)
+{
+ u16 owner = sq->owner;
+
+ if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth))
+ sq->owner = !sq->owner;
+
+ return owner;
+}
+
+static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
+ struct hinic3_sq_wqe_combo *wqe_combo,
+ u32 offload, u16 num_sge, u16 *curr_pi)
+{
+ struct hinic3_sq_bufdesc *first_part_wqebbs, *second_part_wqebbs;
+ u16 first_part_wqebbs_num, tmp_pi;
+
+ wqe_combo->ctrl_bd0 = hinic3_wq_get_one_wqebb(&txq->sq->wq, curr_pi);
+ if (!offload && num_sge == 1) {
+ wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE;
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, 1);
+ }
+
+ wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE;
+
+ if (offload) {
+ wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq,
+ &tmp_pi);
+ wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES;
+ } else {
+ wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS;
+ }
+
+ if (num_sge > 1) {
+ /* first wqebb contain bd0, and bd size is equal to sq wqebb
+ * size, so we use (num_sge - 1) as wanted weqbb_cnt
+ */
+ hinic3_wq_get_multi_wqebbs(&txq->sq->wq, num_sge - 1, &tmp_pi,
+ &first_part_wqebbs,
+ &second_part_wqebbs,
+ &first_part_wqebbs_num);
+ wqe_combo->bds_head = first_part_wqebbs;
+ wqe_combo->bds_sec2 = second_part_wqebbs;
+ wqe_combo->first_bds_num = first_part_wqebbs_num;
+ }
+
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi,
+ num_sge + !!offload);
+}
+
+static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+ __le32 queue_info, int nr_descs, u16 owner)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+
+ if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) {
+ wqe_desc->ctrl_len |=
+ cpu_to_le32(SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
+
+ /* compact wqe queue_info will transfer to chip */
+ wqe_desc->queue_info = 0;
+ return;
+ }
+
+ wqe_desc->ctrl_len |=
+ cpu_to_le32(SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
+ SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
+
+ wqe_desc->queue_info = queue_info;
+ wqe_desc->queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UC));
+
+ if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) {
+ wqe_desc->queue_info |=
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS));
+ } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) <
+ HINIC3_TX_MSS_MIN) {
+ /* mss should not be less than 80 */
+ wqe_desc->queue_info &=
+ cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
+ wqe_desc->queue_info |=
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS));
+ }
+}
+
+static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct hinic3_txq *txq)
+{
+ struct hinic3_sq_wqe_combo wqe_combo = {};
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_sq_task task;
+ u16 wqebb_cnt, num_sge;
+ __le32 queue_info = 0;
+ u16 saved_wq_prod_idx;
+ u16 owner, pi = 0;
+ u8 saved_sq_owner;
+ u32 offload;
+ int err;
+
+ if (unlikely(skb->len < MIN_SKB_LEN)) {
+ if (skb_pad(skb, MIN_SKB_LEN - skb->len))
+ goto err_out;
+
+ skb->len = MIN_SKB_LEN;
+ }
+
+ num_sge = skb_shinfo(skb)->nr_frags + 1;
+ /* assume normal wqe format + 1 wqebb for task info */
+ wqebb_cnt = num_sge + 1;
+
+ if (unlikely(hinic3_wq_free_wqebbs(&txq->sq->wq) < wqebb_cnt)) {
+ if (likely(wqebb_cnt > txq->tx_stop_thrs))
+ txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs);
+
+ netif_subqueue_try_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ offload = hinic3_tx_offload(skb, &task, &queue_info, txq);
+ if (unlikely(offload == HINIC3_TX_OFFLOAD_INVALID)) {
+ goto err_drop_pkt;
+ } else if (!offload) {
+ wqebb_cnt -= 1;
+ if (unlikely(num_sge == 1 &&
+ skb->len > HINIC3_COMPACT_WQEE_SKB_MAX_LEN))
+ goto err_drop_pkt;
+ }
+
+ saved_wq_prod_idx = txq->sq->wq.prod_idx;
+ saved_sq_owner = txq->sq->owner;
+
+ owner = hinic3_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi);
+ if (offload)
+ *wqe_combo.task = task;
+
+ tx_info = &txq->tx_info[pi];
+ tx_info->skb = skb;
+ tx_info->wqebb_cnt = wqebb_cnt;
+
+ err = hinic3_tx_map_skb(netdev, skb, txq, tx_info, &wqe_combo);
+ if (err) {
+ /* Rollback work queue to reclaim the wqebb we did not use */
+ txq->sq->wq.prod_idx = saved_wq_prod_idx;
+ txq->sq->owner = saved_sq_owner;
+ goto err_drop_pkt;
+ }
+
+ netif_subqueue_sent(netdev, txq->sq->q_id, skb->len);
+ netif_subqueue_maybe_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_stop_thrs,
+ txq->tx_start_thrs);
+
+ hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner);
+ hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ,
+ hinic3_get_sq_local_pi(txq->sq));
+
+ return NETDEV_TX_OK;
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+
+err_out:
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id = skb_get_queue_mapping(skb);
+
+ if (unlikely(!netif_carrier_ok(netdev)))
+ goto err_drop_pkt;
+
+ if (unlikely(q_id >= nic_dev->q_params.num_qps))
+ goto err_drop_pkt;
+
+ return hinic3_send_one_skb(skb, netdev, &nic_dev->txqs[q_id]);
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq)
+{
+ u16 sw_pi, hw_ci;
+
+ sw_pi = hinic3_get_sq_local_pi(sq);
+ hw_ci = hinic3_get_sq_hw_ci(sq);
+
+ return sw_pi == hw_ci;
+}
+
+#define HINIC3_FLUSH_QUEUE_POLL_SLEEP_US 10000
+#define HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US 10000000
+static int hinic3_stop_sq(struct hinic3_txq *txq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ int err, rc;
+
+ err = read_poll_timeout(hinic3_force_drop_tx_pkt, rc,
+ is_hw_complete_sq_process(txq->sq) || rc,
+ HINIC3_FLUSH_QUEUE_POLL_SLEEP_US,
+ HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US,
+ true, nic_dev->hwdev);
+ if (rc)
+ return rc;
+ else
+ return err;
+}
+
+/* packet transmission should be stopped before calling this function */
+void hinic3_flush_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 qid;
+ int err;
+
+ for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) {
+ err = hinic3_stop_sq(&nic_dev->txqs[qid]);
+ netdev_tx_reset_subqueue(netdev, qid);
+ if (err)
+ netdev_err(netdev, "Failed to stop sq%u\n", qid);
+ }
+}
+
+#define HINIC3_BDS_PER_SQ_WQEBB \
+ (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc))
+
+int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_dyna_txq_res *tqres;
+ int idx;
+
+ for (idx = 0; idx < num_sq; idx++) {
+ tqres = &txqs_res[idx];
+
+ tqres->tx_info = kcalloc(sq_depth, sizeof(*tqres->tx_info),
+ GFP_KERNEL);
+ if (!tqres->tx_info)
+ goto err_free_tqres;
+
+ tqres->bds = kcalloc(sq_depth * HINIC3_BDS_PER_SQ_WQEBB +
+ HINIC3_MAX_SQ_SGE, sizeof(*tqres->bds),
+ GFP_KERNEL);
+ if (!tqres->bds) {
+ kfree(tqres->tx_info);
+ goto err_free_tqres;
+ }
+ }
+
+ return 0;
+
+err_free_tqres:
+ while (idx > 0) {
+ idx--;
+ tqres = &txqs_res[idx];
+
+ kfree(tqres->bds);
+ kfree(tqres->tx_info);
+ }
+
+ return -ENOMEM;
+}
+
+void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_dyna_txq_res *tqres;
+ int idx;
+
+ for (idx = 0; idx < num_sq; idx++) {
+ tqres = &txqs_res[idx];
+
+ free_all_tx_skbs(netdev, sq_depth, tqres->tx_info);
+ kfree(tqres->bds);
+ kfree(tqres->tx_info);
+ }
+}
+
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_txq_res *tqres;
+ struct hinic3_txq *txq;
+ u16 q_id;
+ u32 idx;
+
+ for (q_id = 0; q_id < num_sq; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ tqres = &txqs_res[q_id];
+
+ txq->q_depth = sq_depth;
+ txq->q_mask = sq_depth - 1;
+
+ txq->tx_stop_thrs = min(HINIC3_DEFAULT_STOP_THRS,
+ sq_depth / 20);
+ txq->tx_start_thrs = min(HINIC3_DEFAULT_START_THRS,
+ sq_depth / 10);
+
+ txq->tx_info = tqres->tx_info;
+ for (idx = 0; idx < sq_depth; idx++)
+ txq->tx_info[idx].dma_info =
+ &tqres->bds[idx * HINIC3_BDS_PER_SQ_WQEBB];
+
+ txq->sq = &nic_dev->nic_io->sq[q_id];
+ }
+
+ return 0;
+}
+
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
+{
+ struct net_device *netdev = txq->netdev;
+ u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
+ struct hinic3_tx_info *tx_info;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts = 0;
+ u16 wqebb_cnt = 0;
+
+ hw_ci = hinic3_get_sq_hw_ci(txq->sq);
+ dma_rmb();
+ sw_ci = hinic3_get_sq_local_ci(txq->sq);
+
+ do {
+ tx_info = &txq->tx_info[sw_ci];
+
+ /* Did all wqebb of this wqe complete? */
+ if (hw_ci == sw_ci ||
+ ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt)
+ break;
+
+ sw_ci = (sw_ci + tx_info->wqebb_cnt) & txq->q_mask;
+ net_prefetch(&txq->tx_info[sw_ci]);
+
+ wqebb_cnt += tx_info->wqebb_cnt;
+ bytes_compl += tx_info->skb->len;
+ pkts++;
+
+ hinic3_tx_unmap_skb(netdev, tx_info->skb, tx_info->dma_info);
+ napi_consume_skb(tx_info->skb, budget);
+ tx_info->skb = NULL;
+ } while (likely(pkts < HINIC3_TX_POLL_WEIGHT));
+
+ hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt);
+
+ netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
+
+ return pkts == HINIC3_TX_POLL_WEIGHT;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
new file mode 100644
index 000000000000..7e1b872ba752
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_TX_H_
+#define _HINIC3_TX_H_
+
+#include <linux/bitops.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <net/checksum.h>
+
+#define VXLAN_OFFLOAD_PORT_LE cpu_to_be16(4789)
+#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((l4_hdr) - (skb)->data)
+
+#define HINIC3_COMPACT_WQEE_SKB_MAX_LEN 16383
+#define HINIC3_TX_POLL_WEIGHT 64
+#define HINIC3_DEFAULT_STOP_THRS 6
+#define HINIC3_DEFAULT_START_THRS 24
+
+enum sq_wqe_data_format {
+ SQ_NORMAL_WQE = 0,
+};
+
+enum sq_wqe_ec_type {
+ SQ_WQE_COMPACT_TYPE = 0,
+ SQ_WQE_EXTENDED_TYPE = 1,
+};
+
+enum sq_wqe_tasksect_len_type {
+ SQ_WQE_TASKSECT_46BITS = 0,
+ SQ_WQE_TASKSECT_16BYTES = 1,
+};
+
+enum hinic3_tx_offload_type {
+ HINIC3_TX_OFFLOAD_TSO = BIT(0),
+ HINIC3_TX_OFFLOAD_CSUM = BIT(1),
+ HINIC3_TX_OFFLOAD_VLAN = BIT(2),
+ HINIC3_TX_OFFLOAD_INVALID = BIT(3),
+ HINIC3_TX_OFFLOAD_ESP = BIT(4),
+};
+
+#define SQ_CTRL_BUFDESC_NUM_MASK GENMASK(26, 19)
+#define SQ_CTRL_TASKSECT_LEN_MASK BIT(27)
+#define SQ_CTRL_DATA_FORMAT_MASK BIT(28)
+#define SQ_CTRL_EXTENDED_MASK BIT(30)
+#define SQ_CTRL_OWNER_MASK BIT(31)
+#define SQ_CTRL_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_##member##_MASK, val)
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK GENMASK(9, 2)
+#define SQ_CTRL_QUEUE_INFO_UFO_MASK BIT(10)
+#define SQ_CTRL_QUEUE_INFO_TSO_MASK BIT(11)
+#define SQ_CTRL_QUEUE_INFO_MSS_MASK GENMASK(26, 13)
+#define SQ_CTRL_QUEUE_INFO_UC_MASK BIT(28)
+
+#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
+ FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, le32_to_cpu(val))
+
+#define SQ_CTRL_MAX_PLDOFF 221
+
+#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK BIT(19)
+#define SQ_TASK_INFO0_INNER_L4_EN_MASK BIT(24)
+#define SQ_TASK_INFO0_INNER_L3_EN_MASK BIT(25)
+#define SQ_TASK_INFO0_OUT_L4_EN_MASK BIT(27)
+#define SQ_TASK_INFO0_OUT_L3_EN_MASK BIT(28)
+#define SQ_TASK_INFO0_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val)
+
+#define SQ_TASK_INFO3_VLAN_TAG_MASK GENMASK(15, 0)
+#define SQ_TASK_INFO3_VLAN_TPID_MASK GENMASK(18, 16)
+#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK BIT(19)
+#define SQ_TASK_INFO3_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val)
+
+struct hinic3_sq_wqe_desc {
+ __le32 ctrl_len;
+ __le32 queue_info;
+ __le32 hi_addr;
+ __le32 lo_addr;
+};
+
+struct hinic3_sq_task {
+ __le32 pkt_info0;
+ __le32 ip_identify;
+ __le32 rsvd;
+ __le32 vlan_offload;
+};
+
+struct hinic3_sq_wqe_combo {
+ struct hinic3_sq_wqe_desc *ctrl_bd0;
+ struct hinic3_sq_task *task;
+ struct hinic3_sq_bufdesc *bds_head;
+ struct hinic3_sq_bufdesc *bds_sec2;
+ u16 first_bds_num;
+ u32 wqe_type;
+ u32 task_type;
+};
+
+struct hinic3_dma_info {
+ dma_addr_t dma;
+ u32 len;
+};
+
+struct hinic3_tx_info {
+ struct sk_buff *skb;
+ u16 wqebb_cnt;
+ struct hinic3_dma_info *dma_info;
+};
+
+struct hinic3_txq {
+ struct net_device *netdev;
+ struct device *dev;
+
+ u16 q_id;
+ u16 tx_stop_thrs;
+ u16 tx_start_thrs;
+ u32 q_mask;
+ u32 q_depth;
+
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_io_queue *sq;
+} ____cacheline_aligned;
+
+struct hinic3_dyna_txq_res {
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_dma_info *bds;
+};
+
+int hinic3_alloc_txqs(struct net_device *netdev);
+void hinic3_free_txqs(struct net_device *netdev);
+
+int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);
+void hinic3_flush_txqs(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
new file mode 100644
index 000000000000..bc3ffdc25cf6
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_wq.h"
+
+#define WQ_MIN_DEPTH 64
+#define WQ_MAX_DEPTH 65536
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_MAX_NUM_PAGES (HINIC3_MIN_PAGE_SIZE / WQ_PAGE_ADDR_SIZE)
+
+static int wq_init_wq_block(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ struct hinic3_queue_pages *qpages = &wq->qpages;
+ int i;
+
+ if (hinic3_wq_is_0_level_cla(wq)) {
+ wq->wq_block_paddr = qpages->pages[0].align_paddr;
+ wq->wq_block_vaddr = qpages->pages[0].align_vaddr;
+
+ return 0;
+ }
+
+ if (wq->qpages.num_pages > WQ_MAX_NUM_PAGES) {
+ dev_err(hwdev->dev, "wq num_pages exceed limit: %lu\n",
+ WQ_MAX_NUM_PAGES);
+ return -EFAULT;
+ }
+
+ wq->wq_block_vaddr = dma_alloc_coherent(hwdev->dev,
+ HINIC3_MIN_PAGE_SIZE,
+ &wq->wq_block_paddr,
+ GFP_KERNEL);
+ if (!wq->wq_block_vaddr)
+ return -ENOMEM;
+
+ for (i = 0; i < qpages->num_pages; i++)
+ wq->wq_block_vaddr[i] = cpu_to_be64(qpages->pages[i].align_paddr);
+
+ return 0;
+}
+
+static int wq_alloc_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ int err;
+
+ err = hinic3_queue_pages_alloc(hwdev, &wq->qpages, 0);
+ if (err)
+ return err;
+
+ err = wq_init_wq_block(hwdev, wq);
+ if (err) {
+ hinic3_queue_pages_free(hwdev, &wq->qpages);
+ return err;
+ }
+
+ return 0;
+}
+
+static void wq_free_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ if (!hinic3_wq_is_0_level_cla(wq))
+ dma_free_coherent(hwdev->dev,
+ HINIC3_MIN_PAGE_SIZE,
+ wq->wq_block_vaddr,
+ wq->wq_block_paddr);
+
+ hinic3_queue_pages_free(hwdev, &wq->qpages);
+}
+
+int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
+ u32 q_depth, u16 wqebb_size)
+{
+ u32 wq_page_size;
+
+ if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH ||
+ !is_power_of_2(q_depth) || !is_power_of_2(wqebb_size)) {
+ dev_err(hwdev->dev, "Invalid WQ: q_depth %u, wqebb_size %u\n",
+ q_depth, wqebb_size);
+ return -EINVAL;
+ }
+
+ wq_page_size = ALIGN(hwdev->wq_page_size, HINIC3_MIN_PAGE_SIZE);
+
+ memset(wq, 0, sizeof(*wq));
+ wq->q_depth = q_depth;
+ wq->idx_mask = q_depth - 1;
+
+ hinic3_queue_pages_init(&wq->qpages, q_depth, wq_page_size, wqebb_size);
+
+ return wq_alloc_pages(hwdev, wq);
+}
+
+void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ wq_free_pages(hwdev, wq);
+}
+
+void hinic3_wq_reset(struct hinic3_wq *wq)
+{
+ struct hinic3_queue_pages *qpages = &wq->qpages;
+ u16 pg_idx;
+
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++)
+ memset(qpages->pages[pg_idx].align_vaddr, 0, qpages->page_size);
+}
+
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num)
+{
+ u32 idx, remaining;
+
+ idx = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx += num_wqebbs;
+ *prod_idx = idx;
+ *first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining);
+ if (likely(remaining >= num_wqebbs)) {
+ *first_part_wqebbs_num = num_wqebbs;
+ *second_part_wqebbs = NULL;
+ } else {
+ *first_part_wqebbs_num = remaining;
+ idx += remaining;
+ *second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
+ }
+}
+
+bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq)
+{
+ return wq->qpages.num_pages == 1;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
new file mode 100644
index 000000000000..9b3f012bec80
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_WQ_H_
+#define _HINIC3_WQ_H_
+
+#include <linux/io.h>
+
+#include "hinic3_queue_common.h"
+
+struct hinic3_sq_bufdesc {
+ /* 31-bits Length, L2NIC only uses length[17:0] */
+ __le32 len;
+ __le32 rsvd;
+ __le32 hi_addr;
+ __le32 lo_addr;
+};
+
+/* Work queue is used to submit elements (tx, rx, cmd) to hw.
+ * Driver is the producer that advances prod_idx. cons_idx is advanced when
+ * HW reports completions of previously submitted elements.
+ */
+struct hinic3_wq {
+ struct hinic3_queue_pages qpages;
+ /* Unmasked producer/consumer indices that are advanced to natural
+ * integer overflow regardless of queue depth.
+ */
+ u16 cons_idx;
+ u16 prod_idx;
+
+ u32 q_depth;
+ u16 idx_mask;
+
+ /* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical
+ * Address (CLA) using 1 of 2 levels:
+ * level 0 - direct mapping of single wq page
+ * level 1 - indirect mapping of multiple pages via additional page
+ * table.
+ * When wq uses level 1, wq_block will hold the allocated indirection
+ * table.
+ */
+ dma_addr_t wq_block_paddr;
+ __be64 *wq_block_vaddr;
+} ____cacheline_aligned;
+
+/* Get number of elements in work queue that are in-use. */
+static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq)
+{
+ return READ_ONCE(wq->prod_idx) - READ_ONCE(wq->cons_idx);
+}
+
+static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq)
+{
+ /* Don't allow queue to become completely full, report (free - 1). */
+ return wq->q_depth - hinic3_wq_get_used(wq) - 1;
+}
+
+static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi)
+{
+ *pi = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx++;
+
+ return get_q_element(&wq->qpages, *pi, NULL);
+}
+
+static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs)
+{
+ wq->cons_idx += num_wqebbs;
+}
+
+static inline u64 hinic3_wq_get_first_wqe_page_addr(const struct hinic3_wq *wq)
+{
+ return wq->qpages.pages[0].align_paddr;
+}
+
+int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
+ u32 q_depth, u16 wqebb_size);
+void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq);
+void hinic3_wq_reset(struct hinic3_wq *wq);
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num);
+bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq);
+
+#endif
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 813403c2628f..baa598988f47 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -168,7 +168,7 @@ static void sni_82596_driver_remove(struct platform_device *pdev)
static struct platform_driver sni_82596_driver = {
.probe = sni_82596_probe,
- .remove_new = sni_82596_driver_remove,
+ .remove = sni_82596_driver_remove,
.driver = {
.name = sni_82596_string,
},
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index f2d4669c81cf..58a3d28d938c 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -1012,6 +1012,7 @@ sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
if(skb->len > XMIT_BUFF_SIZE)
{
printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index c0c112d95b89..4f4b23465c47 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -27,6 +27,19 @@ config IBMVETH
To compile this driver as a module, choose M here. The module will
be called ibmveth.
+config IBMVETH_KUNIT_TEST
+ bool "KUnit test for IBM LAN Virtual Ethernet support" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on KUNIT=y && IBMVETH=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the IBM LAN Virtual Ethernet driver.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 1e29e5c9a2df..9b006bc353a1 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -121,7 +121,7 @@ static struct platform_driver ehea_driver = {
.of_match_table = ehea_device_table,
},
.probe = ehea_probe_adapter,
- .remove_new = ehea_remove,
+ .remove = ehea_remove,
};
void ehea_dump(void *adr, int len, char *msg)
@@ -3063,14 +3063,13 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
static int ehea_setup_ports(struct ehea_adapter *adapter)
{
struct device_node *lhea_dn;
- struct device_node *eth_dn = NULL;
+ struct device_node *eth_dn;
const u32 *dn_log_port_id;
int i = 0;
lhea_dn = adapter->ofdev->dev.of_node;
- while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
-
+ for_each_child_of_node(lhea_dn, eth_dn) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (!dn_log_port_id) {
@@ -3102,12 +3101,11 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
u32 logical_port_id)
{
struct device_node *lhea_dn;
- struct device_node *eth_dn = NULL;
+ struct device_node *eth_dn;
const u32 *dn_log_port_id;
lhea_dn = adapter->ofdev->dev.of_node;
- while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
-
+ for_each_child_of_node(lhea_dn, eth_dn) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (dn_log_port_id)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index e6e47b1842ea..417dfa18daae 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -32,7 +32,6 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/bitops.h>
-#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -96,11 +95,6 @@ MODULE_LICENSE("GPL");
static u32 busy_phy_map;
static DEFINE_MUTEX(emac_phy_map_lock);
-/* This is the wait queue used to wait on any event related to probe, that
- * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
- */
-static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
-
/* Having stable interface names is a doomed idea. However, it would be nice
* if we didn't have completely random interface names at boot too :-) It's
* just a matter of making everybody's life easier. Since we are doing
@@ -116,9 +110,6 @@ static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
#define EMAC_BOOT_LIST_SIZE 4
static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
-/* How long should I wait for dependent devices ? */
-#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
-
/* I don't want to litter system log with timeout errors
* when we have brain-damaged PHY.
*/
@@ -418,8 +409,8 @@ do_retry:
static void emac_hash_mc(struct emac_instance *dev)
{
+ u32 __iomem *gaht_base = emac_gaht_base(dev);
const int regs = EMAC_XAHT_REGS(dev);
- u32 *gaht_base = emac_gaht_base(dev);
u32 gaht_temp[EMAC_XAHT_MAX_REGS];
struct netdev_hw_addr *ha;
int i;
@@ -973,8 +964,6 @@ static void __emac_set_multicast_list(struct emac_instance *dev)
* we need is just to stop RX channel. This seems to work on all
* tested SoCs. --ebs
*
- * If we need the full reset, we might just trigger the workqueue
- * and do it async... a bit nasty but should work --BenH
*/
dev->mcast_pending = 0;
emac_rx_disable(dev);
@@ -1098,7 +1087,7 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
/* This is to prevent starting RX channel in emac_rx_enable() */
set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
- dev->ndev->mtu = new_mtu;
+ WRITE_ONCE(dev->ndev->mtu, new_mtu);
emac_full_tx_reset(dev);
}
@@ -1130,7 +1119,7 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu)
}
if (!ret) {
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
dev->rx_skb_size = emac_rx_skb_size(new_mtu);
dev->rx_sync_size = emac_rx_sync_size(new_mtu);
}
@@ -1228,18 +1217,10 @@ static void emac_print_link_status(struct emac_instance *dev)
static int emac_open(struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
- int err, i;
+ int i;
DBG(dev, "open" NL);
- /* Setup error IRQ handler */
- err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
- if (err) {
- printk(KERN_ERR "%s: failed to request IRQ %d\n",
- ndev->name, dev->emac_irq);
- return err;
- }
-
/* Allocate RX ring */
for (i = 0; i < NUM_RX_BUFF; ++i)
if (emac_alloc_rx_skb(dev, i)) {
@@ -1293,8 +1274,6 @@ static int emac_open(struct net_device *ndev)
return 0;
oom:
emac_clean_rx_ring(dev);
- free_irq(dev->emac_irq, dev);
-
return -ENOMEM;
}
@@ -1408,8 +1387,6 @@ static int emac_close(struct net_device *ndev)
emac_clean_tx_ring(dev);
emac_clean_rx_ring(dev);
- free_irq(dev->emac_irq, dev);
-
netif_carrier_off(ndev);
return 0;
@@ -1750,6 +1727,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
/* NAPI poll context */
static int emac_poll_rx(void *param, int budget)
{
+ LIST_HEAD(rx_list);
struct emac_instance *dev = param;
int slot = dev->rx_slot, received = 0;
@@ -1806,8 +1784,7 @@ static int emac_poll_rx(void *param, int budget)
skb->protocol = eth_type_trans(skb, dev->ndev);
emac_rx_csum(dev, skb, ctrl);
- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
- ++dev->estats.rx_dropped_stack;
+ list_add_tail(&skb->list, &rx_list);
next:
++dev->stats.rx_packets;
skip:
@@ -1851,6 +1828,8 @@ static int emac_poll_rx(void *param, int budget)
goto next;
}
+ netif_receive_skb_list(&rx_list);
+
if (received) {
DBG2(dev, "rx %d BDs" NL, received);
dev->rx_slot = slot;
@@ -2390,7 +2369,9 @@ static int emac_check_deps(struct emac_instance *dev,
if (deps[i].drvdata != NULL)
there++;
}
- return there == EMAC_DEP_COUNT;
+ if (there != EMAC_DEP_COUNT)
+ return -EPROBE_DEFER;
+ return 0;
}
static void emac_put_deps(struct emac_instance *dev)
@@ -2402,19 +2383,6 @@ static void emac_put_deps(struct emac_instance *dev)
platform_device_put(dev->tah_dev);
}
-static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
- void *data)
-{
- /* We are only intereted in device addition */
- if (action == BUS_NOTIFY_BOUND_DRIVER)
- wake_up_all(&emac_probe_wait);
- return 0;
-}
-
-static struct notifier_block emac_of_bus_notifier = {
- .notifier_call = emac_of_bus_notify
-};
-
static int emac_wait_deps(struct emac_instance *dev)
{
struct emac_depentry deps[EMAC_DEP_COUNT];
@@ -2431,18 +2399,13 @@ static int emac_wait_deps(struct emac_instance *dev)
deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
if (dev->blist && dev->blist > emac_boot_list)
deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
- bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
- wait_event_timeout(emac_probe_wait,
- emac_check_deps(dev, deps),
- EMAC_PROBE_DEP_TIMEOUT);
- bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
- err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
+ err = emac_check_deps(dev, deps);
for (i = 0; i < EMAC_DEP_COUNT; i++) {
of_node_put(deps[i].node);
if (err)
platform_device_put(deps[i].ofdev);
}
- if (err == 0) {
+ if (!err) {
dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
@@ -2456,22 +2419,21 @@ static int emac_wait_deps(struct emac_instance *dev)
static int emac_read_uint_prop(struct device_node *np, const char *name,
u32 *val, int fatal)
{
- int len;
- const u32 *prop = of_get_property(np, name, &len);
- if (prop == NULL || len < sizeof(u32)) {
+ int err;
+
+ err = of_property_read_u32(np, name, val);
+ if (err) {
if (fatal)
- printk(KERN_ERR "%pOF: missing %s property\n",
- np, name);
- return -ENODEV;
+ pr_err("%pOF: missing %s property", np, name);
+ return err;
}
- *val = *prop;
return 0;
}
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
- struct phy_device *phy = dev->phy_dev;
+ struct phy_device *phy = ndev->phydev;
dev->phy.autoneg = phy->autoneg;
dev->phy.speed = phy->speed;
@@ -2522,22 +2484,20 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
phy->autoneg = AUTONEG_ENABLE;
phy->advertising = advertise;
- return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
+ return emac_mdio_phy_start_aneg(phy, ndev->phydev);
}
static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
phy->autoneg = AUTONEG_DISABLE;
phy->speed = speed;
phy->duplex = fd;
- return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
+ return emac_mdio_phy_start_aneg(phy, ndev->phydev);
}
static int emac_mdio_poll_link(struct mii_phy *phy)
@@ -2546,20 +2506,19 @@ static int emac_mdio_poll_link(struct mii_phy *phy)
struct emac_instance *dev = netdev_priv(ndev);
int res;
- res = phy_read_status(dev->phy_dev);
+ res = phy_read_status(ndev->phydev);
if (res) {
dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
return ethtool_op_get_link(ndev);
}
- return dev->phy_dev->link;
+ return ndev->phydev->link;
}
static int emac_mdio_read_link(struct mii_phy *phy)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
- struct phy_device *phy_dev = dev->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int res;
res = phy_read_status(phy_dev);
@@ -2576,10 +2535,9 @@ static int emac_mdio_read_link(struct mii_phy *phy)
static int emac_mdio_init_phy(struct mii_phy *phy)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
- phy_start(dev->phy_dev);
- return phy_init_hw(dev->phy_dev);
+ phy_start(ndev->phydev);
+ return phy_init_hw(ndev->phydev);
}
static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
@@ -2593,36 +2551,32 @@ static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
static int emac_dt_mdio_probe(struct emac_instance *dev)
{
struct device_node *mii_np;
+ struct mii_bus *bus;
int res;
- mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
+ mii_np = of_get_available_child_by_name(dev->ofdev->dev.of_node, "mdio");
if (!mii_np) {
dev_err(&dev->ofdev->dev, "no mdio definition found.");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- res = -ENODEV;
- goto put_node;
- }
-
- dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
- if (!dev->mii_bus) {
+ bus = devm_mdiobus_alloc(&dev->ofdev->dev);
+ if (!bus) {
res = -ENOMEM;
goto put_node;
}
- dev->mii_bus->priv = dev->ndev;
- dev->mii_bus->parent = dev->ndev->dev.parent;
- dev->mii_bus->name = "emac_mdio";
- dev->mii_bus->read = &emac_mii_bus_read;
- dev->mii_bus->write = &emac_mii_bus_write;
- dev->mii_bus->reset = &emac_mii_bus_reset;
- snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
- res = of_mdiobus_register(dev->mii_bus, mii_np);
+ bus->priv = dev->ndev;
+ bus->parent = dev->ndev->dev.parent;
+ bus->name = "emac_mdio";
+ bus->read = &emac_mii_bus_read;
+ bus->write = &emac_mii_bus_write;
+ bus->reset = &emac_mii_bus_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
+ res = devm_of_mdiobus_register(&dev->ofdev->dev, bus, mii_np);
if (res) {
dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
- dev->mii_bus->name, res);
+ bus->name, res);
}
put_node:
@@ -2633,26 +2587,28 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
static int emac_dt_phy_connect(struct emac_instance *dev,
struct device_node *phy_handle)
{
+ struct phy_device *phy_dev;
+
dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
GFP_KERNEL);
if (!dev->phy.def)
return -ENOMEM;
- dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
- 0, dev->phy_mode);
- if (!dev->phy_dev) {
+ phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link, 0,
+ dev->phy_mode);
+ if (!phy_dev) {
dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
return -ENODEV;
}
- dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
- dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
- dev->phy.def->name = dev->phy_dev->drv->name;
+ dev->phy.def->phy_id = phy_dev->drv->phy_id;
+ dev->phy.def->phy_id_mask = phy_dev->drv->phy_id_mask;
+ dev->phy.def->name = phy_dev->drv->name;
dev->phy.def->ops = &emac_dt_mdio_phy_ops;
ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
- dev->phy_dev->supported);
- dev->phy.address = dev->phy_dev->mdio.addr;
- dev->phy.mode = dev->phy_dev->interface;
+ phy_dev->supported);
+ dev->phy.address = phy_dev->mdio.addr;
+ dev->phy.mode = phy_dev->interface;
return 0;
}
@@ -2668,8 +2624,6 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
res = emac_dt_mdio_probe(dev);
if (!res) {
res = emac_dt_phy_connect(dev, phy_handle);
- if (res)
- mdiobus_unregister(dev->mii_bus);
}
}
@@ -2708,13 +2662,11 @@ static int emac_init_phy(struct emac_instance *dev)
return res;
res = of_phy_register_fixed_link(np);
- dev->phy_dev = of_phy_find_device(np);
- if (res || !dev->phy_dev) {
- mdiobus_unregister(dev->mii_bus);
+ ndev->phydev = of_phy_find_device(np);
+ if (res || !ndev->phydev)
return res ? res : -EINVAL;
- }
emac_adjust_link(dev->ndev);
- put_device(&dev->phy_dev->mdio.dev);
+ put_device(&ndev->phydev->mdio.dev);
}
return 0;
}
@@ -2980,9 +2932,12 @@ static int emac_init_config(struct emac_instance *dev)
/* Read MAC-address */
err = of_get_ethdev_address(np, dev->ndev);
- if (err)
- return dev_err_probe(&dev->ofdev->dev, err,
- "Can't get valid [local-]mac-address from OF !\n");
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (err) {
+ dev_warn(&dev->ofdev->dev, "Can't get valid mac-address. Generating random.");
+ eth_hw_addr_random(dev->ndev);
+ }
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
@@ -3053,7 +3008,7 @@ static int emac_probe(struct platform_device *ofdev)
/* Allocate our net_device structure */
err = -ENOMEM;
- ndev = alloc_etherdev(sizeof(struct emac_instance));
+ ndev = devm_alloc_etherdev(&ofdev->dev, sizeof(struct emac_instance));
if (!ndev)
goto err_gone;
@@ -3064,43 +3019,45 @@ static int emac_probe(struct platform_device *ofdev)
SET_NETDEV_DEV(ndev, &ofdev->dev);
/* Initialize some embedded data structures */
- mutex_init(&dev->mdio_lock);
- mutex_init(&dev->link_lock);
+ err = devm_mutex_init(&ofdev->dev, &dev->mdio_lock);
+ if (err)
+ goto err_gone;
+
+ err = devm_mutex_init(&ofdev->dev, &dev->link_lock);
+ if (err)
+ goto err_gone;
+
spin_lock_init(&dev->lock);
INIT_WORK(&dev->reset_work, emac_reset_work);
/* Init various config data based on device-tree */
err = emac_init_config(dev);
if (err)
- goto err_free;
+ goto err_gone;
- /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
- dev->emac_irq = irq_of_parse_and_map(np, 0);
- dev->wol_irq = irq_of_parse_and_map(np, 1);
- if (!dev->emac_irq) {
- printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
- err = -ENODEV;
- goto err_free;
+ /* Setup error IRQ handler */
+ dev->emac_irq = platform_get_irq(ofdev, 0);
+ err = devm_request_irq(&ofdev->dev, dev->emac_irq, emac_irq, 0, "EMAC",
+ dev);
+ if (err) {
+ dev_err_probe(&ofdev->dev, err, "failed to request IRQ %d",
+ dev->emac_irq);
+ goto err_gone;
}
+
ndev->irq = dev->emac_irq;
- /* Map EMAC regs */
- // TODO : platform_get_resource() and devm_ioremap_resource()
- dev->emacp = of_iomap(np, 0);
- if (dev->emacp == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
- err = -ENOMEM;
- goto err_irq_unmap;
+ dev->emacp = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(dev->emacp)) {
+ dev_err(&ofdev->dev, "can't map device registers");
+ err = PTR_ERR(dev->emacp);
+ goto err_gone;
}
/* Wait for dependent devices */
err = emac_wait_deps(dev);
- if (err) {
- printk(KERN_ERR
- "%pOF: Timeout waiting for dependent devices\n", np);
- /* display more info about what's missing ? */
- goto err_reg_unmap;
- }
+ if (err)
+ goto err_gone;
dev->mal = platform_get_drvdata(dev->mal_dev);
if (dev->mdio_dev != NULL)
dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
@@ -3187,7 +3144,7 @@ static int emac_probe(struct platform_device *ofdev)
netif_carrier_off(ndev);
- err = register_netdev(ndev);
+ err = devm_register_netdev(&ofdev->dev, ndev);
if (err) {
printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
np, err);
@@ -3200,10 +3157,6 @@ static int emac_probe(struct platform_device *ofdev)
wmb();
platform_set_drvdata(ofdev, dev);
- /* There's a new kid in town ! Let's tell everybody */
- wake_up_all(&emac_probe_wait);
-
-
printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
ndev->name, dev->cell_index, np, ndev->dev_addr);
@@ -3232,24 +3185,9 @@ static int emac_probe(struct platform_device *ofdev)
mal_unregister_commac(dev->mal, &dev->commac);
err_rel_deps:
emac_put_deps(dev);
- err_reg_unmap:
- iounmap(dev->emacp);
- err_irq_unmap:
- if (dev->wol_irq)
- irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq)
- irq_dispose_mapping(dev->emac_irq);
- err_free:
- free_netdev(ndev);
err_gone:
- /* if we were on the bootlist, remove us as we won't show up and
- * wake up all waiters to notify them in case they were waiting
- * on us
- */
- if (blist) {
+ if (blist)
*blist = NULL;
- wake_up_all(&emac_probe_wait);
- }
return err;
}
@@ -3259,8 +3197,6 @@ static void emac_remove(struct platform_device *ofdev)
DBG(dev, "remove" NL);
- unregister_netdev(dev->ndev);
-
cancel_work_sync(&dev->reset_work);
if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
@@ -3270,26 +3206,11 @@ static void emac_remove(struct platform_device *ofdev)
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_detach(dev->zmii_dev, dev->zmii_port);
- if (dev->phy_dev)
- phy_disconnect(dev->phy_dev);
-
- if (dev->mii_bus)
- mdiobus_unregister(dev->mii_bus);
-
busy_phy_map &= ~(1 << dev->phy.address);
DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
-
- iounmap(dev->emacp);
-
- if (dev->wol_irq)
- irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq)
- irq_dispose_mapping(dev->emac_irq);
-
- free_netdev(dev->ndev);
}
/* XXX Features in here should be replaced by properties... */
@@ -3317,7 +3238,7 @@ static struct platform_driver emac_driver = {
.of_match_table = emac_match,
},
.probe = emac_probe,
- .remove_new = emac_remove,
+ .remove = emac_remove,
};
static void __init emac_make_bootlist(void)
@@ -3328,16 +3249,15 @@ static void __init emac_make_bootlist(void)
/* Collect EMACs */
while((np = of_find_all_nodes(np)) != NULL) {
- const u32 *idx;
+ u32 idx;
if (of_match_node(emac_match, np) == NULL)
continue;
if (of_property_read_bool(np, "unused"))
continue;
- idx = of_get_property(np, "cell-index", NULL);
- if (idx == NULL)
+ if (of_property_read_u32(np, "cell-index", &idx))
continue;
- cell_indices[i] = *idx;
+ cell_indices[i] = idx;
emac_boot_list[i++] = of_node_get(np);
if (i >= EMAC_BOOT_LIST_SIZE) {
of_node_put(np);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 295516b07662..89fa1683ec3c 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -188,10 +188,6 @@ struct emac_instance {
struct emac_instance *mdio_instance;
struct mutex mdio_lock;
- /* Device-tree based phy configuration */
- struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
-
/* ZMII infos if any */
u32 zmii_ph;
u32 zmii_port;
@@ -400,7 +396,7 @@ static inline int emac_has_feature(struct emac_instance *dev,
((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
-static inline u32 *emac_xaht_base(struct emac_instance *dev)
+static inline u32 __iomem *emac_xaht_base(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int offset;
@@ -413,10 +409,10 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev)
else
offset = offsetof(struct emac_regs, u0.emac4.iaht1);
- return (u32 *)((ptrdiff_t)p + offset);
+ return (u32 __iomem *)((__force ptrdiff_t)p + offset);
}
-static inline u32 *emac_gaht_base(struct emac_instance *dev)
+static inline u32 __iomem *emac_gaht_base(struct emac_instance *dev)
{
/* GAHT registers always come after an identical number of
* IAHT registers.
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 2439f7e96e05..7d70056e9008 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -524,7 +524,8 @@ static int mal_probe(struct platform_device *ofdev)
unsigned long irqflags;
irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
- mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
+ mal = devm_kzalloc(&ofdev->dev, sizeof(struct mal_instance),
+ GFP_KERNEL);
if (!mal)
return -ENOMEM;
@@ -539,8 +540,7 @@ static int mal_probe(struct platform_device *ofdev)
printk(KERN_ERR
"mal%d: can't find MAL num-tx-chans property!\n",
index);
- err = -ENODEV;
- goto fail;
+ return -ENODEV;
}
mal->num_tx_chans = prop[0];
@@ -549,8 +549,7 @@ static int mal_probe(struct platform_device *ofdev)
printk(KERN_ERR
"mal%d: can't find MAL num-rx-chans property!\n",
index);
- err = -ENODEV;
- goto fail;
+ return -ENODEV;
}
mal->num_rx_chans = prop[0];
@@ -558,15 +557,13 @@ static int mal_probe(struct platform_device *ofdev)
if (dcr_base == 0) {
printk(KERN_ERR
"mal%d: can't find DCR resource!\n", index);
- err = -ENODEV;
- goto fail;
+ return -ENODEV;
}
mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
if (!DCR_MAP_OK(mal->dcr_host)) {
printk(KERN_ERR
"mal%d: failed to map DCRs !\n", index);
- err = -ENODEV;
- goto fail;
+ return -ENODEV;
}
if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
@@ -578,36 +575,21 @@ static int mal_probe(struct platform_device *ofdev)
printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
ofdev->dev.of_node);
err = -ENODEV;
- goto fail;
-#endif
- }
-
- mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
- mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
-
- if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
- mal->txde_irq = mal->rxde_irq = mal->serr_irq;
- } else {
- mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
- mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
- }
-
- if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
- !mal->txde_irq || !mal->rxde_irq) {
- printk(KERN_ERR
- "mal%d: failed to map interrupts !\n", index);
- err = -ENODEV;
goto fail_unmap;
+#endif
}
INIT_LIST_HEAD(&mal->poll_list);
INIT_LIST_HEAD(&mal->list);
spin_lock_init(&mal->lock);
- init_dummy_netdev(&mal->dummy_dev);
+ mal->dummy_dev = alloc_netdev_dummy(0);
+ if (!mal->dummy_dev) {
+ err = -ENOMEM;
+ goto fail_unmap;
+ }
- netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll,
+ netif_napi_add_weight(mal->dummy_dev, &mal->napi, mal_poll,
CONFIG_IBM_EMAC_POLL_WEIGHT);
/* Load power-on reset defaults */
@@ -637,7 +619,7 @@ static int mal_probe(struct platform_device *ofdev)
GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
- goto fail_unmap;
+ goto fail_dummy;
}
for (i = 0; i < mal->num_tx_chans; ++i)
@@ -650,31 +632,43 @@ static int mal_probe(struct platform_device *ofdev)
sizeof(struct mal_descriptor) *
mal_rx_bd_offset(mal, i));
+ mal->txeob_irq = platform_get_irq(ofdev, 0);
+ mal->rxeob_irq = platform_get_irq(ofdev, 1);
+ mal->serr_irq = platform_get_irq(ofdev, 2);
+
if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
+ mal->txde_irq = mal->rxde_irq = mal->serr_irq;
irqflags = IRQF_SHARED;
hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
} else {
+ mal->txde_irq = platform_get_irq(ofdev, 3);
+ mal->rxde_irq = platform_get_irq(ofdev, 4);
irqflags = 0;
hdlr_serr = mal_serr;
hdlr_txde = mal_txde;
hdlr_rxde = mal_rxde;
}
- err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
+ err = devm_request_irq(&ofdev->dev, mal->serr_irq, hdlr_serr, irqflags,
+ "MAL SERR", mal);
if (err)
goto fail2;
- err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
+ err = devm_request_irq(&ofdev->dev, mal->txde_irq, hdlr_txde, irqflags,
+ "MAL TX DE", mal);
if (err)
- goto fail3;
- err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
+ goto fail2;
+ err = devm_request_irq(&ofdev->dev, mal->txeob_irq, mal_txeob, 0,
+ "MAL TX EOB", mal);
if (err)
- goto fail4;
- err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
+ goto fail2;
+ err = devm_request_irq(&ofdev->dev, mal->rxde_irq, hdlr_rxde, irqflags,
+ "MAL RX DE", mal);
if (err)
- goto fail5;
- err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
+ goto fail2;
+ err = devm_request_irq(&ofdev->dev, mal->rxeob_irq, mal_rxeob, 0,
+ "MAL RX EOB", mal);
if (err)
- goto fail6;
+ goto fail2;
/* Enable all MAL SERR interrupt sources */
set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
@@ -693,21 +687,12 @@ static int mal_probe(struct platform_device *ofdev)
return 0;
- fail6:
- free_irq(mal->rxde_irq, mal);
- fail5:
- free_irq(mal->txeob_irq, mal);
- fail4:
- free_irq(mal->txde_irq, mal);
- fail3:
- free_irq(mal->serr_irq, mal);
fail2:
dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
+ fail_dummy:
+ free_netdev(mal->dummy_dev);
fail_unmap:
dcr_unmap(mal->dcr_host, 0x100);
- fail:
- kfree(mal);
-
return err;
}
@@ -726,20 +711,17 @@ static void mal_remove(struct platform_device *ofdev)
"mal%d: commac list is not empty on remove!\n",
mal->index);
- free_irq(mal->serr_irq, mal);
- free_irq(mal->txde_irq, mal);
- free_irq(mal->txeob_irq, mal);
- free_irq(mal->rxde_irq, mal);
- free_irq(mal->rxeob_irq, mal);
-
mal_reset(mal);
+ free_netdev(mal->dummy_dev);
+
+ dcr_unmap(mal->dcr_host, 0x100);
+
dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
- (NUM_TX_BUFF * mal->num_tx_chans +
- NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
- mal->bd_dma);
- kfree(mal);
+ (NUM_TX_BUFF * mal->num_tx_chans +
+ NUM_RX_BUFF * mal->num_rx_chans),
+ mal->bd_virt, mal->bd_dma);
}
static const struct of_device_id mal_platform_match[] =
@@ -768,7 +750,7 @@ static struct platform_driver mal_of_driver = {
.of_match_table = mal_platform_match,
},
.probe = mal_probe,
- .remove_new = mal_remove,
+ .remove = mal_remove,
};
int __init mal_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index d212373a72e7..e0ddc41186a2 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -205,7 +205,7 @@ struct mal_instance {
int index;
spinlock_t lock;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
unsigned int features;
};
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index e1712fdc3c31..b544dd8633b7 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -216,31 +216,24 @@ void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
static int rgmii_probe(struct platform_device *ofdev)
{
- struct device_node *np = ofdev->dev.of_node;
struct rgmii_instance *dev;
- struct resource regs;
- int rc;
+ int err;
- rc = -ENOMEM;
- dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL);
- if (dev == NULL)
- goto err_gone;
+ dev = devm_kzalloc(&ofdev->dev, sizeof(struct rgmii_instance),
+ GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
- mutex_init(&dev->lock);
- dev->ofdev = ofdev;
+ err = devm_mutex_init(&ofdev->dev, &dev->lock);
+ if (err)
+ return err;
- rc = -ENXIO;
- if (of_address_to_resource(np, 0, &regs)) {
- printk(KERN_ERR "%pOF: Can't get registers address\n", np);
- goto err_free;
- }
+ dev->ofdev = ofdev;
- rc = -ENOMEM;
- dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start,
- sizeof(struct rgmii_regs));
- if (dev->base == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
- goto err_free;
+ dev->base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(dev->base)) {
+ dev_err(&ofdev->dev, "can't map device registers");
+ return PTR_ERR(dev->base);
}
/* Check for RGMII flags */
@@ -266,21 +259,6 @@ static int rgmii_probe(struct platform_device *ofdev)
platform_set_drvdata(ofdev, dev);
return 0;
-
- err_free:
- kfree(dev);
- err_gone:
- return rc;
-}
-
-static void rgmii_remove(struct platform_device *ofdev)
-{
- struct rgmii_instance *dev = platform_get_drvdata(ofdev);
-
- WARN_ON(dev->users != 0);
-
- iounmap(dev->base);
- kfree(dev);
}
static const struct of_device_id rgmii_match[] =
@@ -300,7 +278,6 @@ static struct platform_driver rgmii_driver = {
.of_match_table = rgmii_match,
},
.probe = rgmii_probe,
- .remove_new = rgmii_remove,
};
int __init rgmii_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index fa3488258ca2..09f6373ed2f9 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -87,31 +87,24 @@ void *tah_dump_regs(struct platform_device *ofdev, void *buf)
static int tah_probe(struct platform_device *ofdev)
{
- struct device_node *np = ofdev->dev.of_node;
struct tah_instance *dev;
- struct resource regs;
- int rc;
+ int err;
- rc = -ENOMEM;
- dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL);
- if (dev == NULL)
- goto err_gone;
+ dev = devm_kzalloc(&ofdev->dev, sizeof(struct tah_instance),
+ GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
- mutex_init(&dev->lock);
- dev->ofdev = ofdev;
+ err = devm_mutex_init(&ofdev->dev, &dev->lock);
+ if (err)
+ return err;
- rc = -ENXIO;
- if (of_address_to_resource(np, 0, &regs)) {
- printk(KERN_ERR "%pOF: Can't get registers address\n", np);
- goto err_free;
- }
+ dev->ofdev = ofdev;
- rc = -ENOMEM;
- dev->base = (struct tah_regs __iomem *)ioremap(regs.start,
- sizeof(struct tah_regs));
- if (dev->base == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
- goto err_free;
+ dev->base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(dev->base)) {
+ dev_err(&ofdev->dev, "can't map device registers");
+ return PTR_ERR(dev->base);
}
platform_set_drvdata(ofdev, dev);
@@ -123,21 +116,6 @@ static int tah_probe(struct platform_device *ofdev)
wmb();
return 0;
-
- err_free:
- kfree(dev);
- err_gone:
- return rc;
-}
-
-static void tah_remove(struct platform_device *ofdev)
-{
- struct tah_instance *dev = platform_get_drvdata(ofdev);
-
- WARN_ON(dev->users != 0);
-
- iounmap(dev->base);
- kfree(dev);
}
static const struct of_device_id tah_match[] =
@@ -158,7 +136,6 @@ static struct platform_driver tah_driver = {
.of_match_table = tah_match,
},
.probe = tah_probe,
- .remove_new = tah_remove,
};
int __init tah_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 26e86cdee2f6..69ca6065de1c 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -232,32 +232,25 @@ void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
static int zmii_probe(struct platform_device *ofdev)
{
- struct device_node *np = ofdev->dev.of_node;
struct zmii_instance *dev;
- struct resource regs;
- int rc;
+ int err;
- rc = -ENOMEM;
- dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL);
- if (dev == NULL)
- goto err_gone;
+ dev = devm_kzalloc(&ofdev->dev, sizeof(struct zmii_instance),
+ GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ err = devm_mutex_init(&ofdev->dev, &dev->lock);
+ if (err)
+ return err;
- mutex_init(&dev->lock);
dev->ofdev = ofdev;
dev->mode = PHY_INTERFACE_MODE_NA;
- rc = -ENXIO;
- if (of_address_to_resource(np, 0, &regs)) {
- printk(KERN_ERR "%pOF: Can't get registers address\n", np);
- goto err_free;
- }
-
- rc = -ENOMEM;
- dev->base = (struct zmii_regs __iomem *)ioremap(regs.start,
- sizeof(struct zmii_regs));
- if (dev->base == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
- goto err_free;
+ dev->base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(dev->base)) {
+ dev_err(&ofdev->dev, "can't map device registers");
+ return PTR_ERR(dev->base);
}
/* We may need FER value for autodetection later */
@@ -271,21 +264,6 @@ static int zmii_probe(struct platform_device *ofdev)
platform_set_drvdata(ofdev, dev);
return 0;
-
- err_free:
- kfree(dev);
- err_gone:
- return rc;
-}
-
-static void zmii_remove(struct platform_device *ofdev)
-{
- struct zmii_instance *dev = platform_get_drvdata(ofdev);
-
- WARN_ON(dev->users != 0);
-
- iounmap(dev->base);
- kfree(dev);
}
static const struct of_device_id zmii_match[] =
@@ -306,7 +284,6 @@ static struct platform_driver zmii_driver = {
.of_match_table = zmii_match,
},
.probe = zmii_probe,
- .remove_new = zmii_remove,
};
int __init zmii_init(void)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b5aef0b29efe..6f0821f1e798 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,7 +39,6 @@
#include "ibmveth.h"
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
@@ -212,88 +211,169 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
struct ibmveth_buff_pool *pool)
{
- u32 i;
- u32 count = pool->size - atomic_read(&pool->available);
- u32 buffers_added = 0;
- struct sk_buff *skb;
- unsigned int free_index, index;
- u64 correlator;
+ union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0};
+ u32 remaining = pool->size - atomic_read(&pool->available);
+ u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0};
unsigned long lpar_rc;
+ u32 buffers_added = 0;
+ u32 i, filled, batch;
+ struct vio_dev *vdev;
dma_addr_t dma_addr;
+ struct device *dev;
+ u32 index;
- mb();
+ vdev = adapter->vdev;
+ dev = &vdev->dev;
- for (i = 0; i < count; ++i) {
- union ibmveth_buf_desc desc;
+ mb();
- skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ batch = adapter->rx_buffers_per_hcall;
+
+ while (remaining > 0) {
+ unsigned int free_index = pool->consumer_index;
+
+ /* Fill a batch of descriptors */
+ for (filled = 0; filled < min(remaining, batch); filled++) {
+ index = pool->free_map[free_index];
+ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+ adapter->replenish_add_buff_failure++;
+ netdev_info(adapter->netdev,
+ "Invalid map index %u, reset\n",
+ index);
+ schedule_work(&adapter->work);
+ break;
+ }
- if (!skb) {
- netdev_dbg(adapter->netdev,
- "replenish: unable to allocate skb\n");
- adapter->replenish_no_mem++;
- break;
- }
+ if (!pool->skbuff[index]) {
+ struct sk_buff *skb = NULL;
- free_index = pool->consumer_index;
- pool->consumer_index++;
- if (pool->consumer_index >= pool->size)
- pool->consumer_index = 0;
- index = pool->free_map[free_index];
+ skb = netdev_alloc_skb(adapter->netdev,
+ pool->buff_size);
+ if (!skb) {
+ adapter->replenish_no_mem++;
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
- BUG_ON(index == IBM_VETH_INVALID_MAP);
- BUG_ON(pool->skbuff[index] != NULL);
+ dma_addr = dma_map_single(dev, skb->data,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- pool->buff_size, DMA_FROM_DEVICE);
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+ } else {
+ /* re-use case */
+ dma_addr = pool->dma_addr[index];
+ }
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto failure;
+ if (rx_flush) {
+ unsigned int len;
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- pool->dma_addr[index] = dma_addr;
- pool->skbuff[index] = skb;
+ len = adapter->netdev->mtu + IBMVETH_BUFF_OH;
+ len = min(pool->buff_size, len);
+ ibmveth_flush_buffer(pool->skbuff[index]->data,
+ len);
+ }
- correlator = ((u64)pool->index << 32) | index;
- *(u64 *)skb->data = correlator;
+ descs[filled].fields.flags_len = IBMVETH_BUF_VALID |
+ pool->buff_size;
+ descs[filled].fields.address = dma_addr;
- desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
- desc.fields.address = dma_addr;
+ correlators[filled] = ((u64)pool->index << 32) | index;
+ *(u64 *)pool->skbuff[index]->data = correlators[filled];
- if (rx_flush) {
- unsigned int len = min(pool->buff_size,
- adapter->netdev->mtu +
- IBMVETH_BUFF_OH);
- ibmveth_flush_buffer(skb->data, len);
+ free_index++;
+ if (free_index >= pool->size)
+ free_index = 0;
}
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
- desc.desc);
+ if (!filled)
+ break;
+
+ /* single buffer case*/
+ if (filled == 1)
+ lpar_rc = h_add_logical_lan_buffer(vdev->unit_address,
+ descs[0].desc);
+ else
+ /* Multi-buffer hcall */
+ lpar_rc = h_add_logical_lan_buffers(vdev->unit_address,
+ descs[0].desc,
+ descs[1].desc,
+ descs[2].desc,
+ descs[3].desc,
+ descs[4].desc,
+ descs[5].desc,
+ descs[6].desc,
+ descs[7].desc);
if (lpar_rc != H_SUCCESS) {
- goto failure;
- } else {
- buffers_added++;
- adapter->replenish_add_buff_success++;
+ dev_warn_ratelimited(dev,
+ "RX h_add_logical_lan failed: filled=%u, rc=%lu, batch=%u\n",
+ filled, lpar_rc, batch);
+ goto hcall_failure;
}
- }
- mb();
- atomic_add(buffers_added, &(pool->available));
- return;
+ /* Only update pool state after hcall succeeds */
+ for (i = 0; i < filled; i++) {
+ free_index = pool->consumer_index;
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
-failure:
- pool->free_map[free_index] = index;
- pool->skbuff[index] = NULL;
- if (pool->consumer_index == 0)
- pool->consumer_index = pool->size - 1;
- else
- pool->consumer_index--;
- if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
- dma_unmap_single(&adapter->vdev->dev,
- pool->dma_addr[index], pool->buff_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- adapter->replenish_add_buff_failure++;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+ }
+
+ buffers_added += filled;
+ adapter->replenish_add_buff_success += filled;
+ remaining -= filled;
+
+ memset(&descs, 0, sizeof(descs));
+ memset(&correlators, 0, sizeof(correlators));
+ continue;
+
+hcall_failure:
+ for (i = 0; i < filled; i++) {
+ index = correlators[i] & 0xffffffffUL;
+ dma_addr = pool->dma_addr[index];
+
+ if (pool->skbuff[index]) {
+ if (dma_addr &&
+ !dma_mapping_error(dev, dma_addr))
+ dma_unmap_single(dev, dma_addr,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(pool->skbuff[index]);
+ pool->skbuff[index] = NULL;
+ }
+ }
+ adapter->replenish_add_buff_failure += filled;
+
+ /*
+ * If multi rx buffers hcall is no longer supported by FW
+ * e.g. in the case of Live Parttion Migration
+ */
+ if (batch > 1 && lpar_rc == H_FUNCTION) {
+ /*
+ * Instead of retry submit single buffer individually
+ * here just set the max rx buffer per hcall to 1
+ * buffers will be respleshed next time
+ * when ibmveth_replenish_buffer_pool() is called again
+ * with single-buffer case
+ */
+ netdev_info(adapter->netdev,
+ "RX Multi buffers not supported by FW, rc=%lu\n",
+ lpar_rc);
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_info(adapter->netdev,
+ "Next rx replesh will fall back to single-buffer hcall\n");
+ }
+ break;
+ }
mb();
atomic_add(buffers_added, &(pool->available));
@@ -363,28 +443,52 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
}
}
-/* remove a buffer from a pool */
-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
- u64 correlator)
+/**
+ * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
+ * @adapter: adapter instance
+ * @correlator: identifies pool and index
+ * @reuse: whether to reuse buffer
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - correlator maps to pool or index out of range
+ * * %-EFAULT - pool and index map to null skb
+ */
+static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator, bool reuse)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
unsigned int free_index;
struct sk_buff *skb;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return -EINVAL;
+ }
skb = adapter->rx_buff_pool[pool].skbuff[index];
+ if (WARN_ON(!skb)) {
+ schedule_work(&adapter->work);
+ return -EFAULT;
+ }
- BUG_ON(skb == NULL);
-
- adapter->rx_buff_pool[pool].skbuff[index] = NULL;
+ /* if we are going to reuse the buffer then keep the pointers around
+ * but mark index as available. replenish will see the skb pointer and
+ * assume it is to be recycled.
+ */
+ if (!reuse) {
+ /* remove the skb pointer to mark free. actual freeing is done
+ * by upper level networking after gro_recieve
+ */
+ adapter->rx_buff_pool[pool].skbuff[index] = NULL;
- dma_unmap_single(&adapter->vdev->dev,
- adapter->rx_buff_pool[pool].dma_addr[index],
- adapter->rx_buff_pool[pool].buff_size,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_buff_pool[pool].dma_addr[index],
+ adapter->rx_buff_pool[pool].buff_size,
+ DMA_FROM_DEVICE);
+ }
free_index = adapter->rx_buff_pool[pool].producer_index;
adapter->rx_buff_pool[pool].producer_index++;
@@ -396,6 +500,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
mb();
atomic_dec(&(adapter->rx_buff_pool[pool].available));
+
+ return 0;
}
/* get the current buffer on the rx queue */
@@ -405,62 +511,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return NULL;
+ }
return adapter->rx_buff_pool[pool].skbuff[index];
}
-/* recycle the current buffer on the rx queue */
-static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+/**
+ * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
+ *
+ * @adapter: pointer to adapter
+ * @reuse: whether to reuse buffer
+ *
+ * Context: called from ibmveth_poll
+ *
+ * Return:
+ * * %0 - success
+ * * other - non-zero return from ibmveth_remove_buffer_from_pool
+ */
+static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse)
{
- u32 q_index = adapter->rx_queue.index;
- u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
- unsigned int pool = correlator >> 32;
- unsigned int index = correlator & 0xffffffffUL;
- union ibmveth_buf_desc desc;
- unsigned long lpar_rc;
- int ret = 1;
-
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
-
- if (!adapter->rx_buff_pool[pool].active) {
- ibmveth_rxq_harvest_buffer(adapter);
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
- goto out;
- }
-
- desc.fields.flags_len = IBMVETH_BUF_VALID |
- adapter->rx_buff_pool[pool].buff_size;
- desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
-
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
+ u64 cor;
+ int rc;
- if (lpar_rc != H_SUCCESS) {
- netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
- "during recycle rc=%ld", lpar_rc);
- ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
- ret = 0;
- }
+ cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
+ rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ if (unlikely(rc))
+ return rc;
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
-out:
- return ret;
-}
-
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
-{
- ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
-
- if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
- adapter->rx_queue.index = 0;
- adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
- }
+ return 0;
}
static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
@@ -732,6 +820,35 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
+/**
+ * ibmveth_reset - Handle scheduled reset work
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: This routine acquires rtnl_mutex and disables its NAPI through
+ * ibmveth_close. It can't be called directly in a context that has
+ * already acquired rtnl_mutex or disabled its NAPI, or directly from
+ * a poll routine.
+ *
+ * Return: void
+ */
+static void ibmveth_reset(struct work_struct *w)
+{
+ struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
+ struct net_device *netdev = adapter->netdev;
+
+ netdev_dbg(netdev, "reset starting\n");
+
+ rtnl_lock();
+
+ dev_close(adapter->netdev);
+ dev_open(adapter->netdev, NULL);
+
+ rtnl_unlock();
+
+ netdev_dbg(netdev, "reset complete\n");
+}
+
static int ibmveth_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
@@ -1337,6 +1454,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
u16 mss = 0;
+restart_poll:
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1346,7 +1464,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
- ibmveth_rxq_recycle_buffer(adapter);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
@@ -1356,6 +1475,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
__sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
+ if (unlikely(!skb))
+ break;
/* if the large packet bit is set in the rx queue
* descriptor, the mss will be written by PHYP eight
@@ -1379,11 +1500,12 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
- if (!ibmveth_rxq_recycle_buffer(adapter))
- kfree_skb(skb);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
skb = new_skb;
} else {
- ibmveth_rxq_harvest_buffer(adapter);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false)))
+ break;
skb_reserve(skb, offset);
}
@@ -1420,24 +1542,28 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
ibmveth_replenish_task(adapter);
- if (frames_processed < budget) {
- napi_complete_done(napi, frames_processed);
+ if (frames_processed == budget)
+ goto out;
- /* We think we are done - reenable interrupts,
- * then check once more to make sure we are done.
- */
- lpar_rc = h_vio_signal(adapter->vdev->unit_address,
- VIO_IRQ_ENABLE);
+ if (!napi_complete_done(napi, frames_processed))
+ goto out;
- BUG_ON(lpar_rc != H_SUCCESS);
+ /* We think we are done - reenable interrupts,
+ * then check once more to make sure we are done.
+ */
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
+ if (WARN_ON(lpar_rc != H_SUCCESS)) {
+ schedule_work(&adapter->work);
+ goto out;
+ }
- if (ibmveth_rxq_pending_buffer(adapter) &&
- napi_schedule(napi)) {
- lpar_rc = h_vio_signal(adapter->vdev->unit_address,
- VIO_IRQ_DISABLE);
- }
+ if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ goto restart_poll;
}
+out:
return frames_processed;
}
@@ -1450,7 +1576,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
if (napi_schedule_prep(&adapter->napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ WARN_ON(lpar_rc != H_SUCCESS);
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1537,7 +1663,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev));
@@ -1692,6 +1818,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
+ INIT_WORK(&adapter->work, ibmveth_reset);
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
ibmveth_init_link_settings(netdev);
@@ -1727,6 +1854,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->features |= NETIF_F_FRAGLIST;
}
+ if (ret == H_SUCCESS &&
+ (ret_attr & IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT)) {
+ adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL;
+ netdev_dbg(netdev,
+ "RX Multi-buffer hcall supported by FW, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ } else {
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_dbg(netdev,
+ "RX Single-buffer hcall mode, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ }
+
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
@@ -1784,6 +1924,8 @@ static void ibmveth_remove(struct vio_dev *dev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i;
+ cancel_work_sync(&adapter->work);
+
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
kobject_put(&adapter->rx_buff_pool[i].kobj);
@@ -1813,6 +1955,26 @@ static ssize_t veth_pool_show(struct kobject *kobj,
return 0;
}
+/**
+ * veth_pool_store - sysfs store handler for pool attributes
+ * @kobj: kobject embedded in pool
+ * @attr: attribute being changed
+ * @buf: value being stored
+ * @count: length of @buf in bytes
+ *
+ * Stores new value in pool attribute. Verifies the range of the new value for
+ * size and buff_size. Verifies that at least one pool remains available to
+ * receive MTU-sized packets.
+ *
+ * Context: Process context.
+ * Takes and releases rtnl_mutex to ensure correct ordering of close
+ * and open calls.
+ * Return:
+ * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools
+ * * %-EINVAL - New pool size or buffer size is out of range
+ * * count - Return count for success
+ * * other - Return value from a failed ibmveth_open call
+ */
static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
@@ -1822,24 +1984,30 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
+ bool change = false;
+ u32 newbuff_size;
+ u32 oldbuff_size;
+ int newactive;
+ int oldactive;
+ u32 newsize;
+ u32 oldsize;
long rc;
+ rtnl_lock();
+
+ oldbuff_size = pool->buff_size;
+ oldactive = pool->active;
+ oldsize = pool->size;
+
+ newbuff_size = oldbuff_size;
+ newactive = oldactive;
+ newsize = oldsize;
+
if (attr == &veth_active_attr) {
- if (value && !pool->active) {
- if (netif_running(netdev)) {
- if (ibmveth_alloc_buffer_pool(pool)) {
- netdev_err(netdev,
- "unable to alloc pool\n");
- return -ENOMEM;
- }
- pool->active = 1;
- ibmveth_close(netdev);
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->active = 1;
- }
- } else if (!value && pool->active) {
+ if (value && !oldactive) {
+ newactive = 1;
+ change = true;
+ } else if (!value && oldactive) {
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
int i;
/* Make sure there is a buffer pool with buffers that
@@ -1855,48 +2023,60 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
if (i == IBMVETH_NUM_BUFF_POOLS) {
netdev_err(netdev, "no active pool >= MTU\n");
- return -EPERM;
+ rc = -EPERM;
+ goto unlock_err;
}
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->active = 0;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- }
- pool->active = 0;
+ newactive = 0;
+ change = true;
}
} else if (attr == &veth_num_attr) {
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
- return -EINVAL;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->size = value;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->size = value;
- }
+ rc = -EINVAL;
+ goto unlock_err;
+ }
+ if (value != oldsize) {
+ newsize = value;
+ change = true;
}
} else if (attr == &veth_size_attr) {
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
- return -EINVAL;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->buff_size = value;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->buff_size = value;
+ rc = -EINVAL;
+ goto unlock_err;
+ }
+ if (value != oldbuff_size) {
+ newbuff_size = value;
+ change = true;
+ }
+ }
+
+ if (change) {
+ if (netif_running(netdev))
+ ibmveth_close(netdev);
+
+ pool->active = newactive;
+ pool->buff_size = newbuff_size;
+ pool->size = newsize;
+
+ if (netif_running(netdev)) {
+ rc = ibmveth_open(netdev);
+ if (rc) {
+ pool->active = oldactive;
+ pool->buff_size = oldbuff_size;
+ pool->size = oldsize;
+ goto unlock_err;
}
}
}
+ rtnl_unlock();
/* kick the interrupt handler to allocate/deallocate pools */
ibmveth_interrupt(netdev->irq, netdev);
return count;
+
+unlock_err:
+ rtnl_unlock();
+ return rc;
}
@@ -1969,3 +2149,132 @@ static void __exit ibmveth_module_exit(void)
module_init(ibmveth_module_init);
module_exit(ibmveth_module_exit);
+
+#ifdef CONFIG_IBMVETH_KUNIT_TEST
+#include <kunit/test.h>
+
+/**
+ * ibmveth_reset_kunit - reset routine for running in KUnit environment
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: Called in the KUnit environment. Does nothing.
+ *
+ * Return: void
+ */
+static void ibmveth_reset_kunit(struct work_struct *w)
+{
+ netdev_dbg(NULL, "reset_kunit starting\n");
+ netdev_dbg(NULL, "reset_kunit complete\n");
+}
+
+/**
+ * ibmveth_remove_buffer_from_pool_test - unit test for some of
+ * ibmveth_remove_buffer_from_pool
+ * @test: pointer to kunit structure
+ *
+ * Tests the error returns from ibmveth_remove_buffer_from_pool.
+ * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be
+ * checked to see that these warnings happened.
+ *
+ * Return: void
+ */
+static void ibmveth_remove_buffer_from_pool_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+ u64 correlator;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = (u64)0 | 0;
+ pool->skbuff[0] = NULL;
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ flush_work(&adapter->work);
+}
+
+/**
+ * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer
+ * @test: pointer to kunit structure
+ *
+ * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for
+ * the NULL returns, so dmesg should be checked to see that these warnings
+ * happened.
+ *
+ * Return: void
+ */
+static void ibmveth_rxq_get_buffer_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ adapter->rx_queue.queue_len = 1;
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ pool->skbuff[0] = skb;
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter));
+
+ flush_work(&adapter->work);
+}
+
+static struct kunit_case ibmveth_test_cases[] = {
+ KUNIT_CASE(ibmveth_remove_buffer_from_pool_test),
+ KUNIT_CASE(ibmveth_rxq_get_buffer_test),
+ {}
+};
+
+static struct kunit_suite ibmveth_test_suite = {
+ .name = "ibmveth-kunit-test",
+ .test_cases = ibmveth_test_cases,
+};
+
+kunit_test_suite(ibmveth_test_suite);
+#endif
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 8468e2c59d7a..068f99df133e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -28,6 +28,7 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
+#define IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT 0x0000000000040000UL
#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
@@ -46,6 +47,24 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+static inline long h_add_logical_lan_buffers(unsigned long unit_address,
+ unsigned long desc1,
+ unsigned long desc2,
+ unsigned long desc3,
+ unsigned long desc4,
+ unsigned long desc5,
+ unsigned long desc6,
+ unsigned long desc7,
+ unsigned long desc8)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ return plpar_hcall9(H_ADD_LOGICAL_LAN_BUFFERS,
+ retbuf, unit_address,
+ desc1, desc2, desc3, desc4,
+ desc5, desc6, desc7, desc8);
+}
+
/* FW allows us to send 6 descriptors but we only use one so mark
* the other 5 as unused (0)
*/
@@ -101,6 +120,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
#define IBMVETH_MAX_QUEUES 16U
#define IBMVETH_DEFAULT_QUEUES 8U
+#define IBMVETH_MAX_RX_PER_HCALL 8U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -134,38 +154,40 @@ struct ibmveth_rx_q {
};
struct ibmveth_adapter {
- struct vio_dev *vdev;
- struct net_device *netdev;
- struct napi_struct napi;
- unsigned int mcastFilterSize;
- void * buffer_list_addr;
- void * filter_list_addr;
- void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
- unsigned int tx_ltb_size;
- dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
- dma_addr_t buffer_list_dma;
- dma_addr_t filter_list_dma;
- struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
- struct ibmveth_rx_q rx_queue;
- int rx_csum;
- int large_send;
- bool is_active_trunk;
-
- u64 fw_ipv6_csum_support;
- u64 fw_ipv4_csum_support;
- u64 fw_large_send_support;
- /* adapter specific stats */
- u64 replenish_task_cycles;
- u64 replenish_no_mem;
- u64 replenish_add_buff_failure;
- u64 replenish_add_buff_success;
- u64 rx_invalid_buffer;
- u64 rx_no_buffer;
- u64 tx_map_failed;
- u64 tx_send_failed;
- u64 tx_large_packets;
- u64 rx_large_packets;
- /* Ethtool settings */
+ struct vio_dev *vdev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct work_struct work;
+ unsigned int mcastFilterSize;
+ void *buffer_list_addr;
+ void *filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
+ dma_addr_t buffer_list_dma;
+ dma_addr_t filter_list_dma;
+ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
+ struct ibmveth_rx_q rx_queue;
+ int rx_csum;
+ int large_send;
+ bool is_active_trunk;
+ unsigned int rx_buffers_per_hcall;
+
+ u64 fw_ipv6_csum_support;
+ u64 fw_ipv4_csum_support;
+ u64 fw_large_send_support;
+ /* adapter specific stats */
+ u64 replenish_task_cycles;
+ u64 replenish_no_mem;
+ u64 replenish_add_buff_failure;
+ u64 replenish_add_buff_success;
+ u64 rx_invalid_buffer;
+ u64 rx_no_buffer;
+ u64 tx_map_failed;
+ u64 tx_send_failed;
+ u64 tx_large_packets;
+ u64 rx_large_packets;
+ /* Ethtool settings */
u8 duplex;
u32 speed;
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 30c47b8470ad..3808148c1fc7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -117,6 +117,7 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
static void flush_reset_queue(struct ibmvnic_adapter *adapter);
+static void print_subcrq_error(struct device *dev, int rc, const char *func);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -233,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -255,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -273,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;
@@ -751,6 +756,17 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
adapter->rx_pool[i].active = 0;
}
+static void ibmvnic_set_safe_max_ind_descs(struct ibmvnic_adapter *adapter)
+{
+ if (adapter->cur_max_ind_descs > IBMVNIC_SAFE_IND_DESC) {
+ netdev_info(adapter->netdev,
+ "set max ind descs from %u to safe limit %u\n",
+ adapter->cur_max_ind_descs,
+ IBMVNIC_SAFE_IND_DESC);
+ adapter->cur_max_ind_descs = IBMVNIC_SAFE_IND_DESC;
+ }
+}
+
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
@@ -838,7 +854,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
/* if send_subcrq_indirect queue is full, flush to VIOS */
- if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
+ if (ind_bufp->index == adapter->cur_max_ind_descs ||
i == count - 1) {
lpar_rc =
send_subcrq_indirect(adapter, handle,
@@ -857,6 +873,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
failure:
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
+
+ /* Detect platform limit H_PARAMETER */
+ if (lpar_rc == H_PARAMETER)
+ ibmvnic_set_safe_max_ind_descs(adapter);
+
+ /* For all error case, temporarily drop only this batch
+ * Rely on TCP/IP retransmissions to retry and recover
+ */
for (i = ind_bufp->index - 1; i >= 0; --i) {
struct ibmvnic_rx_buff *rx_buff;
@@ -2140,63 +2164,49 @@ static int ibmvnic_close(struct net_device *netdev)
}
/**
- * build_hdr_data - creates L2/L3/L4 header data buffer
+ * get_hdr_lens - fills list of L2/L3/L4 hdr lens
* @hdr_field: bitfield determining needed headers
* @skb: socket buffer
- * @hdr_len: array of header lengths
- * @hdr_data: buffer to write the header to
+ * @hdr_len: array of header lengths to be filled
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
* lengths and total buffer length to be used to build descriptors.
+ *
+ * Return: total len of all headers
*/
-static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
- int *hdr_len, u8 *hdr_data)
+static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len)
{
int len = 0;
- u8 *hdr;
- if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
- hdr_len[0] = sizeof(struct vlan_ethhdr);
- else
- hdr_len[0] = sizeof(struct ethhdr);
+
+ if ((hdr_field >> 6) & 1) {
+ hdr_len[0] = skb_mac_header_len(skb);
+ len += hdr_len[0];
+ }
+
+ if ((hdr_field >> 5) & 1) {
+ hdr_len[1] = skb_network_header_len(skb);
+ len += hdr_len[1];
+ }
+
+ if (!((hdr_field >> 4) & 1))
+ return len;
if (skb->protocol == htons(ETH_P_IP)) {
- hdr_len[1] = ip_hdr(skb)->ihl * 4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
hdr_len[2] = tcp_hdrlen(skb);
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hdr_len[1] = sizeof(struct ipv6hdr);
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
hdr_len[2] = tcp_hdrlen(skb);
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
- } else if (skb->protocol == htons(ETH_P_ARP)) {
- hdr_len[1] = arp_hdr_len(skb->dev);
- hdr_len[2] = 0;
}
- memset(hdr_data, 0, 120);
- if ((hdr_field >> 6) & 1) {
- hdr = skb_mac_header(skb);
- memcpy(hdr_data, hdr, hdr_len[0]);
- len += hdr_len[0];
- }
-
- if ((hdr_field >> 5) & 1) {
- hdr = skb_network_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[1]);
- len += hdr_len[1];
- }
-
- if ((hdr_field >> 4) & 1) {
- hdr = skb_transport_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[2]);
- len += hdr_len[2];
- }
- return len;
+ return len + hdr_len[2];
}
/**
@@ -2209,12 +2219,14 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
+ *
+ * Return: Number of header descs
*/
static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
union sub_crq *scrq_arr)
{
- union sub_crq hdr_desc;
+ union sub_crq *hdr_desc;
int tmp_len = len;
int num_descs = 0;
u8 *data, *cur;
@@ -2223,28 +2235,26 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
while (tmp_len > 0) {
cur = hdr_data + len - tmp_len;
- memset(&hdr_desc, 0, sizeof(hdr_desc));
- if (cur != hdr_data) {
- data = hdr_desc.hdr_ext.data;
+ hdr_desc = &scrq_arr[num_descs];
+ if (num_descs) {
+ data = hdr_desc->hdr_ext.data;
tmp = tmp_len > 29 ? 29 : tmp_len;
- hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
- hdr_desc.hdr_ext.len = tmp;
+ hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc->hdr_ext.len = tmp;
} else {
- data = hdr_desc.hdr.data;
+ data = hdr_desc->hdr.data;
tmp = tmp_len > 24 ? 24 : tmp_len;
- hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
- hdr_desc.hdr.len = tmp;
- hdr_desc.hdr.l2_len = (u8)hdr_len[0];
- hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
- hdr_desc.hdr.l4_len = (u8)hdr_len[2];
- hdr_desc.hdr.flag = hdr_field << 1;
+ hdr_desc->hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc->hdr.len = tmp;
+ hdr_desc->hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc->hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc->hdr.flag = hdr_field << 1;
}
memcpy(data, cur, tmp);
tmp_len -= tmp;
- *scrq_arr = hdr_desc;
- scrq_arr++;
num_descs++;
}
@@ -2267,13 +2277,11 @@ static void build_hdr_descs_arr(struct sk_buff *skb,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
- u8 hdr_data[140] = {0};
int tot_len;
- tot_len = build_hdr_data(hdr_field, skb, hdr_len,
- hdr_data);
- *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
- indir_arr + 1);
+ tot_len = get_hdr_lens(hdr_field, skb, hdr_len);
+ *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb),
+ tot_len, hdr_len, indir_arr + 1);
}
static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
@@ -2323,9 +2331,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_pool->num_buffers - 1 :
tx_pool->consumer_index - 1;
tx_buff = &tx_pool->tx_buff[index];
- adapter->netdev->stats.tx_packets--;
- adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
- adapter->tx_stats_buffers[queue_num].packets--;
+ adapter->tx_stats_buffers[queue_num].batched_packets--;
adapter->tx_stats_buffers[queue_num].bytes -=
tx_buff->skb->len;
dev_kfree_skb_any(tx_buff->skb);
@@ -2350,8 +2356,29 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
}
}
+static int send_subcrq_direct(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 *entry)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ dma_wmb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
+ cpu_to_be64(remote_handle),
+ cpu_to_be64(entry[0]), cpu_to_be64(entry[1]),
+ cpu_to_be64(entry[2]), cpu_to_be64(entry[3]));
+
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
+
+ return rc;
+}
+
static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
- struct ibmvnic_sub_crq_queue *tx_scrq)
+ struct ibmvnic_sub_crq_queue *tx_scrq,
+ bool indirect)
{
struct ibmvnic_ind_xmit_queue *ind_bufp;
u64 dma_addr;
@@ -2366,17 +2393,35 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
if (!entries)
return 0;
- rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
- if (rc)
- ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
+
+ if (indirect)
+ rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
else
+ rc = send_subcrq_direct(adapter, handle,
+ (u64 *)ind_bufp->indir_arr);
+
+ if (rc) {
+ dev_err_ratelimited(&adapter->vdev->dev,
+ "tx_flush failed, rc=%u (%llu entries dma=%pad handle=%llx)\n",
+ rc, entries, &dma_addr, handle);
+ /* Detect platform limit H_PARAMETER */
+ if (rc == H_PARAMETER)
+ ibmvnic_set_safe_max_ind_descs(adapter);
+
+ /* For all error case, temporarily drop only this batch
+ * Rely on TCP/IP retransmissions to retry and recover
+ */
+ ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
+ } else {
ind_bufp->index = 0;
- return 0;
+ }
+ return rc;
}
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ u32 cur_max_ind_descs = adapter->cur_max_ind_descs;
int queue_num = skb_get_queue_mapping(skb);
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
@@ -2390,13 +2435,16 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_map_failed = 0;
union sub_crq indir_arr[16];
unsigned int tx_dropped = 0;
- unsigned int tx_packets = 0;
+ unsigned int tx_dpackets = 0;
+ unsigned int tx_bpackets = 0;
unsigned int tx_bytes = 0;
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
unsigned long lpar_rc;
+ unsigned int skblen;
union sub_crq tx_crq;
unsigned int offset;
+ bool use_scrq_send_direct = false;
int num_entries = 1;
unsigned char *dst;
int bufidx = 0;
@@ -2424,7 +2472,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
goto out;
}
@@ -2439,8 +2489,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
ret = NETDEV_TX_OK;
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
goto out;
}
@@ -2452,6 +2504,20 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
memset(dst, 0, tx_pool->buf_size);
data_dma_addr = ltb->addr + offset;
+ /* if we are going to send_subcrq_direct this then we need to
+ * update the checksum before copying the data into ltb. Essentially
+ * these packets force disable CSO so that we can guarantee that
+ * FW does not need header info and we can send direct. Also, vnic
+ * server must be able to xmit standard packets without header data
+ */
+ if (*hdrs == 0 && !skb_is_gso(skb) &&
+ !ind_bufp->index && !netdev_xmit_more()) {
+ use_scrq_send_direct = true;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb))
+ use_scrq_send_direct = false;
+ }
+
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -2471,16 +2537,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_copy_from_linear_data(skb, dst, skb->len);
}
- /* post changes to long_term_buff *dst before VIOS accessing it */
- dma_wmb();
-
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
tx_buff = &tx_pool->tx_buff[bufidx];
+
+ /* Sanity checks on our free map to make sure it points to an index
+ * that is not being occupied by another skb. If skb memory is
+ * not freed then we see congestion control kick in and halt tx.
+ */
+ if (unlikely(tx_buff->skb)) {
+ dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
+ skb_is_gso(skb) ? "tso_pool" : "tx_pool",
+ queue_num, bufidx);
+ dev_kfree_skb_any(tx_buff->skb);
+ }
+
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
+ skblen = skb->len;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -2524,6 +2600,19 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
hdrs += 2;
+ } else if (use_scrq_send_direct) {
+ /* See above comment, CSO disabled with direct xmit */
+ tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD);
+ ind_bufp->index = 1;
+ tx_buff->num_entries = 1;
+ netdev_tx_sent_queue(txq, skb->len);
+ ind_bufp->indir_arr[0] = tx_crq;
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
+
+ tx_dpackets++;
+ goto early_exit;
}
if ((*hdrs >> 7) & 1)
@@ -2532,8 +2621,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.n_crq_elem = num_entries;
tx_buff->num_entries = num_entries;
/* flush buffer if current entry can not fit */
- if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (num_entries + ind_bufp->index > cur_max_ind_descs) {
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_flush_err;
}
@@ -2541,23 +2630,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
indir_arr[0] = tx_crq;
memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
num_entries * sizeof(struct ibmvnic_generic_scrq));
+
ind_bufp->index += num_entries;
if (__netdev_tx_sent_queue(txq, skb->len,
netdev_xmit_more() &&
- ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ ind_bufp->index < cur_max_ind_descs)) {
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
}
+ tx_bpackets++;
+
+early_exit:
if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) {
netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
netif_stop_subqueue(netdev, queue_num);
}
- tx_packets++;
- tx_bytes += skb->len;
+ tx_bytes += skblen;
txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
@@ -2584,12 +2676,10 @@ tx_err:
}
out:
rcu_read_unlock();
- netdev->stats.tx_dropped += tx_dropped;
- netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_packets;
adapter->tx_send_failed += tx_send_failed;
adapter->tx_map_failed += tx_map_failed;
- adapter->tx_stats_buffers[queue_num].packets += tx_packets;
+ adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
+ adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
@@ -3388,6 +3478,25 @@ err:
return -ret;
}
+static void ibmvnic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ stats->rx_packets += adapter->rx_stats_buffers[i].packets;
+ stats->rx_bytes += adapter->rx_stats_buffers[i].bytes;
+ }
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ stats->tx_packets += adapter->tx_stats_buffers[i].batched_packets;
+ stats->tx_packets += adapter->tx_stats_buffers[i].direct_packets;
+ stats->tx_bytes += adapter->tx_stats_buffers[i].bytes;
+ stats->tx_dropped += adapter->tx_stats_buffers[i].dropped_packets;
+ }
+}
+
static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
@@ -3503,17 +3612,14 @@ restart_poll:
length = skb->len;
napi_gro_receive(napi, skb); /* send it up */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += length;
adapter->rx_stats_buffers[scrq_num].packets++;
adapter->rx_stats_buffers[scrq_num].bytes += length;
frames_processed++;
}
if (adapter->state != VNIC_CLOSING &&
- ((atomic_read(&adapter->rx_pool[scrq_num].available) <
- adapter->req_rx_add_entries_per_subcrq / 2) ||
- frames_processed < budget))
+ (atomic_read(&adapter->rx_pool[scrq_num].available) <
+ adapter->req_rx_add_entries_per_subcrq / 2))
replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
if (napi_complete_done(napi, frames_processed)) {
@@ -3615,6 +3721,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_rx_mode = ibmvnic_set_multi,
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = ibmvnic_get_stats64,
.ndo_tx_timeout = ibmvnic_tx_timeout,
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
@@ -3751,29 +3858,20 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
+ ethtool_puts(&data, ibmvnic_stats[i].name);
for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "tx%d_batched_packets", i);
+ ethtool_sprintf(&data, "tx%d_direct_packets", i);
+ ethtool_sprintf(&data, "tx%d_bytes", i);
+ ethtool_sprintf(&data, "tx%d_dropped_packets", i);
}
for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "rx%d_packets", i);
+ ethtool_sprintf(&data, "rx%d_bytes", i);
+ ethtool_sprintf(&data, "rx%d_interrupts", i);
}
}
@@ -3820,7 +3918,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
(adapter, ibmvnic_stats[i].offset));
for (j = 0; j < adapter->req_tx_queues; j++) {
- data[i] = adapter->tx_stats_buffers[j].packets;
+ data[i] = adapter->tx_stats_buffers[j].batched_packets;
+ i++;
+ data[i] = adapter->tx_stats_buffers[j].direct_packets;
i++;
data[i] = adapter->tx_stats_buffers[j].bytes;
i++;
@@ -3937,7 +4037,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
dma_free_coherent(dev,
- IBMVNIC_IND_ARR_SZ,
+ IBMVNIC_IND_MAX_ARR_SZ,
scrq->ind_buf.indir_arr,
scrq->ind_buf.indir_dma);
@@ -3994,7 +4094,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->ind_buf.indir_arr =
dma_alloc_coherent(dev,
- IBMVNIC_IND_ARR_SZ,
+ IBMVNIC_IND_MAX_ARR_SZ,
&scrq->ind_buf.indir_dma,
GFP_KERNEL);
@@ -4057,6 +4157,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
adapter->num_active_tx_scrqs = 0;
}
+ /* Clean any remaining outstanding SKBs
+ * we freed the irq so we won't be hearing
+ * from them
+ */
+ clean_tx_pools(adapter);
+
if (adapter->rx_scrq) {
for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])
@@ -4147,20 +4253,17 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
struct device *dev = &adapter->vdev->dev;
+ int num_packets = 0, total_bytes = 0;
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_buff *txbuff;
struct netdev_queue *txq;
union sub_crq *next;
- int index;
- int i;
+ int index, i;
restart_loop:
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
int num_entries = 0;
- int total_bytes = 0;
- int num_packets = 0;
-
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
index = be32_to_cpu(next->tx_comp.correlators[i]);
@@ -4196,8 +4299,6 @@ restart_loop:
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
- txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
- netdev_tx_completed_queue(txq, num_packets, total_bytes);
if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
@@ -4222,6 +4323,9 @@ restart_loop:
goto restart_loop;
}
+ txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
+ netdev_tx_completed_queue(txq, num_packets, total_bytes);
+
return 0;
}
@@ -4773,6 +4877,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
strscpy(vlcd->name, adapter->netdev->name, len);
}
+static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf,
+ size_t len)
+{
+ unsigned char hex_str[16 * 3];
+
+ for (size_t i = 0; i < len; i += 16) {
+ hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8,
+ hex_str, sizeof(hex_str), false);
+ netdev_dbg(dev, "%s\n", hex_str);
+ }
+}
+
static int send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
@@ -4883,10 +4999,8 @@ static int send_login(struct ibmvnic_adapter *adapter)
vnic_add_client_data(adapter, vlcd);
netdev_dbg(adapter->netdev, "Login Buffer:\n");
- for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_buf))[i]);
- }
+ ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf,
+ adapter->login_buf_sz);
memset(&crq, 0, sizeof(crq));
crq.login.first = IBMVNIC_CRQ_CMD;
@@ -5263,15 +5377,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
- for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(buf))[i]);
+ ibmvnic_print_hex_dump(adapter->netdev, buf,
+ sizeof(adapter->ip_offload_buf));
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -5502,10 +5614,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
- for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_rsp_buf))[i]);
- }
+ ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf,
+ adapter->login_rsp_buf_sz);
/* Sanity checks */
if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
@@ -6290,6 +6400,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
rc = reset_sub_crq_queues(adapter);
}
} else {
+ if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ /* After an LPM, reset the max number of indirect
+ * subcrq descriptors per H_SEND_SUB_CRQ_INDIRECT
+ * hcall to the default max (e.g POWER8 -> POWER10)
+ *
+ * If the new destination platform does not support
+ * the higher limit max (e.g. POWER10-> POWER8 LPM)
+ * H_PARAMETER will trigger automatic fallback to the
+ * safe minimum limit.
+ */
+ adapter->cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS;
+ }
+
rc = init_sub_crqs(adapter);
}
@@ -6441,6 +6564,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+ adapter->cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS;
rc = register_netdev(netdev);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 94ac36b1408b..480dc587078f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -29,8 +29,9 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
-#define IBMVNIC_MAX_IND_DESCS 16
-#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
+#define IBMVNIC_MAX_IND_DESCS 128
+#define IBMVNIC_SAFE_IND_DESC 16
+#define IBMVNIC_IND_MAX_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -211,20 +212,25 @@ struct ibmvnic_statistics {
u8 reserved[72];
} __packed __aligned(8);
-#define NUM_TX_STATS 3
struct ibmvnic_tx_queue_stats {
- u64 packets;
+ u64 batched_packets;
+ u64 direct_packets;
u64 bytes;
u64 dropped_packets;
};
-#define NUM_RX_STATS 3
+#define NUM_TX_STATS \
+ (sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64))
+
struct ibmvnic_rx_queue_stats {
u64 packets;
u64 bytes;
u64 interrupts;
};
+#define NUM_RX_STATS \
+ (sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64))
+
struct ibmvnic_acl_buffer {
__be32 len;
__be32 version;
@@ -925,6 +931,7 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ u32 cur_max_ind_descs;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 639fbb12bd35..288fa8ce53af 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -16,6 +16,9 @@ config NET_VENDOR_INTEL
if NET_VENDOR_INTEL
+source "drivers/net/ethernet/intel/libeth/Kconfig"
+source "drivers/net/ethernet/intel/libie/Kconfig"
+
config E100
tristate "Intel(R) PRO/100+ support"
depends on PCI
@@ -41,7 +44,7 @@ config E100
config E1000
tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -143,7 +146,10 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select LIBIE_FWLOG if DEBUG_FS
select MDIO
+ select NET_DEVLINK
+ select PLDMFW
select PHYLIB
help
This driver supports Intel(R) 10GbE PCI Express family of
@@ -225,6 +231,8 @@ config I40E
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI
select AUXILIARY_BUS
+ select LIBIE
+ select LIBIE_ADMINQ
select NET_DEVLINK
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -253,10 +261,15 @@ config I40E_DCB
# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF
config IAVF
tristate
+ select LIBIE
+ select LIBIE_ADMINQ
+ select NET_SHAPER
+
config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports virtual functions for Intel XL710,
X710, X722, XXV710, and all devices advertising support for
@@ -283,7 +296,12 @@ config ICE
depends on GNSS || GNSS = n
select AUXILIARY_BUS
select DIMLIB
+ select LIBETH_XDP
+ select LIBIE
+ select LIBIE_ADMINQ
+ select LIBIE_FWLOG if DEBUG_FS
select NET_DEVLINK
+ select PACKING
select PLDMFW
select DPLL
help
@@ -327,7 +345,7 @@ config ICE_SWITCHDEV
config ICE_HWTS
bool "Support HW cross-timestamp on platforms with PTM support"
default y
- depends on ICE && X86
+ depends on ICE && X86 && PCIE_PTM
help
Say Y to enable hardware supported cross-timestamping on platforms
with PCIe PTM support. The cross-timestamp is available through
@@ -357,6 +375,7 @@ config IGC
default n
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
@@ -377,17 +396,6 @@ config IGC_LEDS
Optional support for controlling the NIC LED's with the netdev
LED trigger.
-config IDPF
- tristate "Intel(R) Infrastructure Data Path Function Support"
- depends on PCI_MSI
- select DIMLIB
- select PAGE_POOL
- select PAGE_POOL_STATS
- help
- This driver supports Intel(R) Infrastructure Data Path Function
- devices.
-
- To compile this driver as a module, choose M here. The module
- will be called idpf.
+source "drivers/net/ethernet/intel/idpf/Kconfig"
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index dacb481ee5b1..9a37dc76aef0 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -3,6 +3,9 @@
# Makefile for the Intel network device drivers.
#
+obj-$(CONFIG_LIBETH) += libeth/
+obj-y += libie/
+
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 3fcb8daaa243..5c56c1edd492 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -146,7 +146,7 @@
#include <linux/string.h>
#include <linux/firmware.h>
#include <linux/rtnetlink.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define DRV_NAME "e100"
@@ -161,7 +161,6 @@
#define FIRMWARE_D102E "e100/d102e_ucode.bin"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(FIRMWARE_D101M);
MODULE_FIRMWARE(FIRMWARE_D101S);
@@ -1683,7 +1682,7 @@ static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
static void e100_watchdog(struct timer_list *t)
{
- struct nic *nic = from_timer(nic, t, watchdog);
+ struct nic *nic = timer_container_of(nic, t, watchdog);
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
u32 speed;
@@ -2294,7 +2293,7 @@ static int e100_up(struct nic *nic)
return 0;
err_no_irq:
- del_timer_sync(&nic->watchdog);
+ timer_delete_sync(&nic->watchdog);
err_clean_cbs:
e100_clean_cbs(nic);
err_rx_clean_list:
@@ -2309,7 +2308,7 @@ static void e100_down(struct nic *nic)
netif_stop_queue(nic->netdev);
e100_hw_reset(nic);
free_irq(nic->pdev->irq, nic->netdev);
- del_timer_sync(&nic->watchdog);
+ timer_delete_sync(&nic->watchdog);
netif_carrier_off(nic->netdev);
e100_clean_cbs(nic);
e100_rx_clean_list(nic);
@@ -3037,7 +3036,7 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake)
return 0;
}
-static int __maybe_unused e100_suspend(struct device *dev_d)
+static int e100_suspend(struct device *dev_d)
{
bool wake;
@@ -3046,7 +3045,7 @@ static int __maybe_unused e100_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused e100_resume(struct device *dev_d)
+static int e100_resume(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct nic *nic = netdev_priv(netdev);
@@ -3163,7 +3162,7 @@ static const struct pci_error_handlers e100_err_handler = {
.resume = e100_io_resume,
};
-static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
static struct pci_driver e100_driver = {
.name = DRV_NAME,
@@ -3172,7 +3171,7 @@ static struct pci_driver e100_driver = {
.remove = e100_remove,
/* Power Management hooks */
- .driver.pm = &e100_pm_ops,
+ .driver.pm = pm_sleep_ptr(&e100_pm_ops),
.shutdown = e100_shutdown,
.err_handler = &e100_err_handler,
diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
index 314c52d44b7c..79491dec47e1 100644
--- a/drivers/net/ethernet/intel/e1000/Makefile
+++ b/drivers/net/ethernet/intel/e1000/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_E1000) += e1000.o
-e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
+e1000-y := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 75f3fd1d8d6e..ea6ccf4b728b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -116,7 +116,7 @@ struct e1000_adapter;
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE 0xFFFF
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index d06d29c6c037..726365c567ef 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -806,7 +806,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16)EEPROM_SUM) && !(*data))
+ if (checksum != EEPROM_SUM && !(*data))
*data = 2;
return *data;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index f9328f2e669f..0e5de52b1067 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3970,7 +3970,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
return E1000_SUCCESS;
#endif
- if (checksum == (u16)EEPROM_SUM)
+ if (checksum == EEPROM_SUM)
return E1000_SUCCESS;
else {
e_dbg("EEPROM Checksum Invalid\n");
@@ -3997,7 +3997,7 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
}
checksum += eeprom_data;
}
- checksum = (u16)EEPROM_SUM - checksum;
+ checksum = EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
e_dbg("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1d1e93686af2..292389aceb2d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -149,8 +149,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-static int __maybe_unused e1000_suspend(struct device *dev);
-static int __maybe_unused e1000_resume(struct device *dev);
+static int e1000_suspend(struct device *dev);
+static int e1000_resume(struct device *dev);
static void e1000_shutdown(struct pci_dev *pdev);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -175,21 +175,18 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
- .driver = {
- .pm = &e1000_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&e1000_pm_ops),
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
@@ -316,8 +313,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
} else {
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+ if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid &&
!test_bit(old_vid, adapter->active_vlans))
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
old_vid);
@@ -480,10 +476,6 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
-
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -516,6 +508,8 @@ void e1000_down(struct e1000_adapter *adapter)
*/
netif_carrier_off(netdev);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
@@ -1267,6 +1261,10 @@ static void e1000_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
+
e1000_phy_hw_reset(hw);
kfree(adapter->tx_ring);
@@ -1395,7 +1393,10 @@ int e1000_open(struct net_device *netdev)
/* From here on the code is the same as e1000_up() */
clear_bit(__E1000_DOWN, &adapter->flags);
+ netif_napi_set_irq(&adapter->napi, adapter->pdev->irq);
napi_enable(&adapter->napi);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi);
e1000_irq_enable(adapter);
@@ -3507,7 +3508,9 @@ static void e1000_reset_task(struct work_struct *work)
container_of(work, struct e1000_adapter, reset_task);
e_err(drv, "Reset adapter\n");
+ rtnl_lock();
e1000_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -3571,7 +3574,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
e1000_up(adapter);
@@ -5072,7 +5075,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ rtnl_lock();
e1000_down(adapter);
+ rtnl_unlock();
}
status = er32(STATUS);
@@ -5135,7 +5140,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-static int __maybe_unused e1000_suspend(struct device *dev)
+static int e1000_suspend(struct device *dev)
{
int retval;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -5147,7 +5152,7 @@ static int __maybe_unused e1000_suspend(struct device *dev)
return retval;
}
-static int __maybe_unused e1000_resume(struct device *dev)
+static int e1000_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -5233,16 +5238,20 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
e1000_down(adapter);
if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 0baa15503c38..18f22b6374d5 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -10,7 +10,6 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_E1000E) += e1000e.o
-e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
- mac.o manage.o nvm.o phy.o \
- param.o ethtool.o netdev.o ptp.o
-
+e1000e-y := 82571.o ich8lan.o 80003es2lan.o \
+ mac.o manage.o nvm.o phy.o \
+ param.o ethtool.o netdev.o ptp.o
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 23a58cada43a..ba331899d186 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -638,6 +638,9 @@
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
+/* Uninitialized ("empty") checksum word value */
+#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF
+
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
@@ -679,8 +682,6 @@
/* PCI/PCI-X/PCI-EX Config space */
#define PCI_HEADER_TYPE_REGISTER 0x0E
-#define PCI_HEADER_TYPE_MULTIFUNC 0x80
-
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
@@ -805,4 +806,7 @@
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
+#define E1000_FEXTNVM12_PHYPD_CTRL_MASK 0x00C00000
+#define E1000_FEXTNVM12_PHYPD_CTRL_P1 0x00800000
+
#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba9c19e6994c..aa08f397988e 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -64,7 +64,7 @@ struct e1000_info;
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_APME 0x0400
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE 0xFFFF
#define DEFAULT_JUMBO 9234
@@ -319,7 +319,7 @@ struct e1000_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
unsigned long tx_hwtstamp_start;
@@ -461,6 +461,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
+#define FLAG2_DISABLE_K1 BIT(16)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index dc553c51d79a..7b1ac90b3de4 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -26,6 +26,8 @@ struct e1000_stats {
static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
"s0ix-enabled",
+#define E1000E_PRIV_FLAGS_DISABLE_K1 BIT(1)
+ "disable-k1",
};
#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
@@ -156,7 +158,7 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
speed = adapter->link_speed;
cmd->base.duplex = adapter->link_duplex - 1;
}
- } else if (!pm_runtime_suspended(netdev->dev.parent)) {
+ } else {
u32 status = er32(STATUS);
if (status & E1000_STATUS_LU) {
@@ -274,16 +276,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
- pm_runtime_get_sync(netdev->dev.parent);
-
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
- ret_val = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* MDI setting is only allowed when autoneg enabled because
@@ -291,16 +290,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
* duplex is forced.
*/
if (cmd->base.eth_tp_mdix_ctrl) {
- if (hw->phy.media_type != e1000_media_type_copper) {
- ret_val = -EOPNOTSUPP;
- goto out;
- }
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
- ret_val = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
@@ -347,7 +343,6 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
}
out:
- pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return ret_val;
}
@@ -383,8 +378,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- pm_runtime_get_sync(netdev->dev.parent);
-
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
@@ -417,7 +410,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
}
out:
- pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return retval;
}
@@ -448,8 +440,6 @@ static void e1000_get_regs(struct net_device *netdev,
u32 *regs_buff = p;
u16 phy_data;
- pm_runtime_get_sync(netdev->dev.parent);
-
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1u << 24) |
@@ -495,8 +485,6 @@ static void e1000_get_regs(struct net_device *netdev,
e1e_rphy(hw, MII_STAT1000, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
-
- pm_runtime_put_sync(netdev->dev.parent);
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -529,8 +517,6 @@ static int e1000_get_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
- pm_runtime_get_sync(netdev->dev.parent);
-
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
ret_val = e1000_read_nvm(hw, first_word,
last_word - first_word + 1,
@@ -544,8 +530,6 @@ static int e1000_get_eeprom(struct net_device *netdev,
}
}
- pm_runtime_put_sync(netdev->dev.parent);
-
if (ret_val) {
/* a read error occurred, throw away the result */
memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -568,11 +552,11 @@ static int e1000_set_eeprom(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u16 *eeprom_buff;
- void *ptr;
- int max_len;
+ int ret_val = 0;
+ size_t max_len;
int first_word;
int last_word;
- int ret_val = 0;
+ void *ptr;
u16 i;
if (eeprom->len == 0)
@@ -595,8 +579,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
- pm_runtime_get_sync(netdev->dev.parent);
-
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
@@ -637,7 +619,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000e_update_nvm_checksum(hw);
out:
- pm_runtime_put_sync(netdev->dev.parent);
kfree(eeprom_buff);
return ret_val;
}
@@ -733,8 +714,6 @@ static int e1000_set_ringparam(struct net_device *netdev,
}
}
- pm_runtime_get_sync(netdev->dev.parent);
-
e1000e_down(adapter, true);
/* We can't just free everything and then setup again, because the
@@ -773,7 +752,6 @@ err_setup_rx:
e1000e_free_tx_resources(temp_tx);
err_setup:
e1000e_up(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
free_temp:
vfree(temp_tx);
vfree(temp_rx);
@@ -983,7 +961,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16)NVM_SUM) && !(*data))
+ if (checksum != NVM_SUM && !(*data))
*data = 2;
return *data;
@@ -1816,8 +1794,6 @@ static void e1000_diag_test(struct net_device *netdev,
u8 autoneg;
bool if_running = netif_running(netdev);
- pm_runtime_get_sync(netdev->dev.parent);
-
set_bit(__E1000_TESTING, &adapter->state);
if (!if_running) {
@@ -1903,8 +1879,6 @@ static void e1000_diag_test(struct net_device *netdev,
}
msleep_interruptible(4 * 1000);
-
- pm_runtime_put_sync(netdev->dev.parent);
}
static void e1000_get_wol(struct net_device *netdev,
@@ -2046,15 +2020,11 @@ static int e1000_set_coalesce(struct net_device *netdev,
adapter->itr_setting = adapter->itr & ~3;
}
- pm_runtime_get_sync(netdev->dev.parent);
-
if (adapter->itr_setting != 0)
e1000e_write_itr(adapter, adapter->itr);
else
e1000e_write_itr(adapter, 0);
- pm_runtime_put_sync(netdev->dev.parent);
-
return 0;
}
@@ -2068,9 +2038,7 @@ static int e1000_nway_reset(struct net_device *netdev)
if (!adapter->hw.mac.autoneg)
return -EINVAL;
- pm_runtime_get_sync(netdev->dev.parent);
e1000e_reinit_locked(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
return 0;
}
@@ -2084,12 +2052,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
int i;
char *p = NULL;
- pm_runtime_get_sync(netdev->dev.parent);
-
dev_get_stats(netdev, &net_stats);
- pm_runtime_put_sync(netdev->dev.parent);
-
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
@@ -2134,56 +2098,47 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
}
}
-static int e1000_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info,
- u32 __always_unused *rule_locs)
+static int e1000_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *info)
{
- info->data = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 mrqc;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc;
- pm_runtime_get_sync(netdev->dev.parent);
- mrqc = er32(MRQC);
- pm_runtime_put_sync(netdev->dev.parent);
+ info->data = 0;
- if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
- return 0;
+ mrqc = er32(MRQC);
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V6_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
+ if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
- }
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
default:
- return -EOPNOTSUPP;
+ break;
}
+ return 0;
}
static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
@@ -2211,13 +2166,9 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
- pm_runtime_get_sync(netdev->dev.parent);
-
ret_val = hw->phy.ops.acquire(hw);
- if (ret_val) {
- pm_runtime_put_sync(netdev->dev.parent);
+ if (ret_val)
return -EBUSY;
- }
/* EEE Capability */
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2257,8 +2208,6 @@ release:
if (ret_val)
ret_val = -ENODATA;
- pm_runtime_put_sync(netdev->dev.parent);
-
return ret_val;
}
@@ -2299,21 +2248,17 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
- pm_runtime_get_sync(netdev->dev.parent);
-
/* reset the link */
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
else
e1000e_reset(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
-
return 0;
}
static int e1000e_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2354,26 +2299,59 @@ static u32 e1000e_get_priv_flags(struct net_device *netdev)
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+ if (adapter->flags2 & FLAG2_DISABLE_K1)
+ priv_flags |= E1000E_PRIV_FLAGS_DISABLE_K1;
+
return priv_flags;
}
static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
unsigned int flags2 = adapter->flags2;
+ unsigned int changed;
- flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
- if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
- struct e1000_hw *hw = &adapter->hw;
+ flags2 &= ~(FLAG2_ENABLE_S0IX_FLOWS | FLAG2_DISABLE_K1);
- if (hw->mac.type < e1000_pch_cnp)
+ if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+ if (hw->mac.type < e1000_pch_cnp) {
+ e_err("S0ix is not supported on this device\n");
return -EINVAL;
+ }
+
flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
}
- if (flags2 != adapter->flags2)
+ if (priv_flags & E1000E_PRIV_FLAGS_DISABLE_K1) {
+ if (hw->mac.type < e1000_ich8lan) {
+ e_err("Disabling K1 is not supported on this device\n");
+ return -EINVAL;
+ }
+
+ flags2 |= FLAG2_DISABLE_K1;
+ }
+
+ changed = adapter->flags2 ^ flags2;
+ if (changed)
adapter->flags2 = flags2;
+ if (changed & FLAG2_DISABLE_K1) {
+ /* reset the hardware to apply the changes */
+ while (test_and_set_bit(__E1000_RESETTING,
+ &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(adapter->netdev)) {
+ e1000e_down(adapter, true);
+ e1000e_up(adapter);
+ } else {
+ e1000e_reset(adapter);
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ }
+
return 0;
}
@@ -2402,7 +2380,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000e_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
- .get_rxnfc = e1000_get_rxnfc,
+ .get_rxfh_fields = e1000_get_rxfh_fields,
.get_ts_info = e1000e_get_ts_info,
.get_eee = e1000e_get_eee,
.set_eee = e1000e_set_eee,
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 4b6e7536170a..fc8ed38aa095 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -108,8 +108,8 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8
#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
-#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
-#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
+#define E1000_DEV_ID_PCH_ADP_I219_LM19 0x550C
+#define E1000_DEV_ID_PCH_ADP_I219_V19 0x550D
#define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E
#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f9e94be36e97..0ff8688ac3b8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -286,6 +286,52 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
}
/**
+ * e1000_reconfigure_k1_params - reconfigure Kumeran K1 parameters.
+ * @hw: pointer to the HW structure
+ *
+ * By default K1 is enabled after MAC reset, so this function only
+ * disables it.
+ *
+ * Context: PHY semaphore must be held by caller.
+ * Return: 0 on success, negative on failure
+ */
+static s32 e1000_reconfigure_k1_params(struct e1000_hw *hw)
+{
+ u16 phy_timeout;
+ u32 fextnvm12;
+ s32 ret_val;
+
+ if (hw->mac.type < e1000_pch_mtp) {
+ if (hw->adapter->flags2 & FLAG2_DISABLE_K1)
+ return e1000_configure_k1_ich8lan(hw, false);
+ return 0;
+ }
+
+ /* Change Kumeran K1 power down state from P0s to P1 */
+ fextnvm12 = er32(FEXTNVM12);
+ fextnvm12 &= ~E1000_FEXTNVM12_PHYPD_CTRL_MASK;
+ fextnvm12 |= E1000_FEXTNVM12_PHYPD_CTRL_P1;
+ ew32(FEXTNVM12, fextnvm12);
+
+ /* Wait for the interface the settle */
+ usleep_range(1000, 1100);
+ if (hw->adapter->flags2 & FLAG2_DISABLE_K1)
+ return e1000_configure_k1_ich8lan(hw, false);
+
+ /* Change K1 exit timeout */
+ ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG,
+ &phy_timeout);
+ if (ret_val)
+ return ret_val;
+
+ phy_timeout &= ~I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK;
+ phy_timeout |= 0xF00;
+
+ return e1e_wphy_locked(hw, I217_PHY_TIMEOUTS_REG,
+ phy_timeout);
+}
+
+/**
* e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
* @hw: pointer to the HW structure
*
@@ -327,15 +373,22 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
* LANPHYPC Value bit to force the interconnect to PCIe mode.
*/
switch (hw->mac.type) {
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ case e1000_pch_nvp:
+ /* At this point the PHY might be inaccessible so don't
+ * propagate the failure
+ */
+ if (e1000_reconfigure_k1_params(hw))
+ e_dbg("Failed to reconfigure K1 parameters\n");
+
+ fallthrough;
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
- case e1000_pch_nvp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -419,8 +472,20 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
* the PHY is in.
*/
ret_val = hw->phy.ops.check_reset_block(hw);
- if (ret_val)
+ if (ret_val) {
e_err("ME blocked access to PHY after reset\n");
+ goto out;
+ }
+
+ if (hw->mac.type >= e1000_pch_mtp) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_err("Failed to reconfigure K1 parameters\n");
+ goto out;
+ }
+ ret_val = e1000_reconfigure_k1_params(hw);
+ hw->phy.ops.release(hw);
+ }
}
out:
@@ -1109,6 +1174,46 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
}
/**
+ * e1000e_force_smbus - Force interfaces to transition to SMBUS mode.
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC and the PHY to SMBUS mode. Assumes semaphore already
+ * acquired.
+ *
+ * Return: 0 on success, negative errno on failure.
+ **/
+static s32 e1000e_force_smbus(struct e1000_hw *hw)
+{
+ u16 smb_ctrl = 0;
+ u32 ctrl_ext;
+ s32 ret_val;
+
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
+ /* Force SMBus mode in the PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl);
+ if (ret_val) {
+ e1000e_enable_phy_retry(hw);
+ return ret_val;
+ }
+
+ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, smb_ctrl);
+
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in the MAC */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, ctrl_ext);
+
+ return 0;
+}
+
+/**
* e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
* @hw: pointer to the HW structure
* @to_sx: boolean indicating a system power state transition to Sx
@@ -1165,6 +1270,12 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
+ ret_val = e1000e_force_smbus(hw);
+ if (ret_val) {
+ e_dbg("Failed to force SMBUS: %d\n", ret_val);
+ goto release;
+ }
+
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
@@ -4170,6 +4281,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_update_nvm_checksum(hw);
if (ret_val)
return ret_val;
+ } else if (hw->mac.type == e1000_pch_tgp) {
+ return 0;
}
}
@@ -4842,6 +4955,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
u16 i;
e1000_initialize_hw_bits_ich8lan(hw);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_reconfigure_k1_params(hw);
+ hw->phy.ops.release(hw);
+ if (ret_val) {
+ e_dbg("Error failed to reconfigure K1 parameters\n");
+ return ret_val;
+ }
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2504b11c3169..5feb589a9b5f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -219,6 +219,10 @@
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
#define I217_PLL_CLOCK_GATE_MASK 0x07FF
+/* PHY Timeouts */
+#define I217_PHY_TIMEOUTS_REG PHY_REG(770, 21)
+#define I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK 0x0FC0
+
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index d7df2a0ed629..44249dd91bd6 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -331,8 +331,21 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
}
/* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * Do not queue up too many posted writes to prevent
+ * increased latency for other devices on the
+ * interconnect. Flush after each 8th posted write,
+ * to keep additional execution time low while still
+ * preventing increased latency.
+ */
+ if (!(i % 8) && i)
+ e1e_flush();
+ }
+ }
e1e_flush();
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3692fce20195..ddbe2f7d8112 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2761,7 +2761,7 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
ew32(RCTL, rctl);
- if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ if (adapter->mng_vlan_id != E1000_MNG_VLAN_NONE) {
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -2828,7 +2828,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
adapter->mng_vlan_id = vid;
}
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+ if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid)
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
}
@@ -2928,11 +2928,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
- writel(0, tx_ring->head);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring, 0);
- else
- writel(0, tx_ring->tail);
/* Set the Tx Interrupt Delay register */
ew32(TIDV, adapter->tx_int_delay);
@@ -3253,11 +3250,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
- writel(0, rx_ring->head);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, 0);
- else
- writel(0, rx_ring->tail);
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
@@ -3540,9 +3534,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
@@ -3558,6 +3549,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift;
}
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ /* System firmware can misreport this value, so set it to a
+ * stable 38400KHz frequency.
+ */
+ incperiod = INCPERIOD_38400KHZ;
+ incvalue = INCVALUE_38400KHZ;
+ shift = INCVALUE_SHIFT_38400KHZ;
+ adapter->cc.shift = shift;
+ break;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
@@ -3580,6 +3582,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
* @config: timestamp configuration
+ * @extack: netlink extended ACK for error report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -3593,7 +3596,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* exception of "all V2 events regardless of level 2 or 4".
**/
static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
@@ -3604,8 +3608,10 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l2 = false;
u32 regval;
- if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
+ NL_SET_ERR_MSG(extack, "No HW timestamp support");
return -EINVAL;
+ }
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -3614,6 +3620,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
case HWTSTAMP_TX_ON:
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported TX HW timestamp type");
return -ERANGE;
}
@@ -3687,6 +3694,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported RX HW timestamp filter");
return -ERANGE;
}
@@ -3699,7 +3707,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
ew32(TSYNCTXCTL, regval);
if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
(regval & E1000_TSYNCTXCTL_ENABLED)) {
- e_err("Timesync Tx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Tx Control register not set as expected");
return -EAGAIN;
}
@@ -3712,7 +3721,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
E1000_TSYNCRXCTL_TYPE_MASK)) !=
(regval & (E1000_TSYNCRXCTL_ENABLED |
E1000_TSYNCRXCTL_TYPE_MASK))) {
- e_err("Timesync Rx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Rx Control register not set as expected");
return -EAGAIN;
}
@@ -3907,6 +3917,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
{
struct ptp_clock_info *info = &adapter->ptp_clock_info;
struct e1000_hw *hw = &adapter->hw;
+ struct netlink_ext_ack extack = {};
unsigned long flags;
u32 timinca;
s32 ret_val;
@@ -3938,7 +3949,12 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->systim_lock, flags);
/* restore the previous hwtstamp configuration settings */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ ret_val = e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config,
+ &extack);
+ if (ret_val) {
+ if (extack._msg)
+ e_err("%s\n", extack._msg);
+ }
}
/**
@@ -4293,8 +4309,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
napi_synchronize(&adapter->napi);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
@@ -4420,7 +4436,7 @@ u64 e1000e_read_systim(struct e1000_adapter *adapter,
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
**/
-static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+static u64 e1000e_cyclecounter_read(struct cyclecounter *cc)
{
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
@@ -4613,6 +4629,7 @@ int e1000e_open(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
int err;
+ int irq;
/* disallow open during test */
if (test_bit(__E1000_TESTING, &adapter->state))
@@ -4676,7 +4693,15 @@ int e1000e_open(struct net_device *netdev)
/* From here on the code is the same as e1000e_up() */
clear_bit(__E1000_DOWN, &adapter->state);
+ if (adapter->int_mode == E1000E_INT_MODE_MSIX)
+ irq = adapter->msix_entries[0].vector;
+ else
+ irq = adapter->pdev->irq;
+
+ netif_napi_set_irq(&adapter->napi, irq);
napi_enable(&adapter->napi);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi);
e1000_irq_enable(adapter);
@@ -4735,6 +4760,8 @@ int e1000e_close(struct net_device *netdev)
netdev_info(netdev, "NIC Link is Down\n");
}
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(&adapter->napi);
e1000e_free_tx_resources(adapter->tx_ring);
@@ -4834,7 +4861,8 @@ static void e1000e_update_phy_task(struct work_struct *work)
**/
static void e1000_update_phy_info(struct timer_list *t)
{
- struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct e1000_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -5170,7 +5198,8 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
**/
static void e1000_watchdog(struct timer_list *t)
{
- struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct e1000_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -6038,7 +6067,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->max_frame_size = max_frame;
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
pm_runtime_get_sync(netdev->dev.parent);
@@ -6074,8 +6103,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(ifr);
@@ -6135,7 +6163,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
/**
* e1000e_hwtstamp_set - control hardware time stamping
* @netdev: network interface device structure
- * @ifr: interface request
+ * @config: timestamp configuration
+ * @extack: netlink extended ACK report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -6148,20 +6177,18 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int ret_val;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret_val = e1000e_config_hwtstamp(adapter, &config);
+ ret_val = e1000e_config_hwtstamp(adapter, config, extack);
if (ret_val)
return ret_val;
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@@ -6173,38 +6200,23 @@ static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
* by hardware so notify the caller the requested packets plus
* some others are time stamped.
*/
- config.rx_filter = HWTSTAMP_FILTER_SOME;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
break;
default:
break;
}
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *kernel_config)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
- sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
-}
+ *kernel_config = adapter->hwtstamp_config;
-static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return e1000_mii_ioctl(netdev, ifr, cmd);
- case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_set(netdev, ifr);
- case SIOCGHWTSTAMP:
- return e1000e_hwtstamp_get(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
@@ -6363,49 +6375,49 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
ew32(EXTCNF_CTRL, mac_data);
- /* Enable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(22);
- ew32(FEXTNVM7, mac_data);
-
/* Disable disconnected cable conditioning for Power Gating */
mac_data = er32(DPGFR);
mac_data |= BIT(2);
ew32(DPGFR, mac_data);
- /* Don't wake from dynamic Power Gating with clock request */
- mac_data = er32(FEXTNVM12);
- mac_data |= BIT(12);
- ew32(FEXTNVM12, mac_data);
-
- /* Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
- ew32(FEXTNVM9, mac_data);
-
- /* Enable K1 off to enable mPHY Power Gating */
- mac_data = er32(FEXTNVM6);
- mac_data |= BIT(31);
- ew32(FEXTNVM6, mac_data);
-
- /* Enable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data |= BIT(9);
- ew32(FEXTNVM8, mac_data);
-
/* Enable the Dynamic Clock Gating in the DMA and MAC */
mac_data = er32(CTRL_EXT);
mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
ew32(CTRL_EXT, mac_data);
-
- /* No MAC DPG gating SLP_S0 in modern standby
- * Switch the logic of the lanphypc to use PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data |= BIT(7);
- ew32(FEXTNVM5, mac_data);
}
+ /* Enable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(22);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Don't wake from dynamic Power Gating with clock request */
+ mac_data = er32(FEXTNVM12);
+ mac_data |= BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data &= ~BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Enable K1 off to enable mPHY Power Gating */
+ mac_data = er32(FEXTNVM6);
+ mac_data |= BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Enable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data |= BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* No MAC DPG gating SLP_S0 in modern standby
+ * Switch the logic of the lanphypc to use PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data |= BIT(7);
+ ew32(FEXTNVM5, mac_data);
+
/* Disable the time synchronization clock */
mac_data = er32(FEXTNVM7);
mac_data |= BIT(31);
@@ -6498,33 +6510,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
} else {
/* Request driver unconfigure the device from S0ix */
- /* Disable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data &= 0xFFBFFFFF;
- ew32(FEXTNVM7, mac_data);
-
- /* Disable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data &= ~BIT(9);
- ew32(FEXTNVM8, mac_data);
-
- /* Disable K1 off */
- mac_data = er32(FEXTNVM6);
- mac_data &= ~BIT(31);
- ew32(FEXTNVM6, mac_data);
-
- /* Disable Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
- ew32(FEXTNVM9, mac_data);
-
- /* Cancel not waking from dynamic
- * Power Gating with clock request
- */
- mac_data = er32(FEXTNVM12);
- mac_data &= ~BIT(12);
- ew32(FEXTNVM12, mac_data);
-
/* Cancel disable disconnected cable conditioning
* for Power Gating
*/
@@ -6537,13 +6522,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= 0xFFF7FFFF;
ew32(CTRL_EXT, mac_data);
- /* Revert the lanphypc logic to use the internal Gbe counter
- * and not the PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data &= 0xFFFFFF7F;
- ew32(FEXTNVM5, mac_data);
-
/* Enable the periodic inband message,
* Request PCIe clock in K1 page770_17[10:9] =01b
*/
@@ -6581,6 +6559,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= ~BIT(31);
mac_data |= BIT(0);
ew32(FEXTNVM7, mac_data);
+
+ /* Disable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= 0xFFBFFFFF;
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data &= ~BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Disable K1 off */
+ mac_data = er32(FEXTNVM6);
+ mac_data &= ~BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Disable Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data |= BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Cancel not waking from dynamic
+ * Power Gating with clock request
+ */
+ mac_data = er32(FEXTNVM12);
+ mac_data &= ~BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Revert the lanphypc logic to use the internal Gbe counter
+ * and not the PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data &= 0xFFFFFF7F;
+ ew32(FEXTNVM5, mac_data);
}
static int e1000e_pm_freeze(struct device *dev)
@@ -6623,7 +6635,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
- u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6672,8 +6683,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
- if (retval)
- return retval;
+ if (retval) {
+ e_err("Failed to enable wakeup\n");
+ goto skip_phy_configurations;
+ }
} else {
/* enable wakeup by the MAC */
ew32(WUFC, wufc);
@@ -6694,26 +6707,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
- if (retval)
- return retval;
+ if (retval) {
+ e_err("Failed to enable ULP\n");
+ goto skip_phy_configurations;
+ }
}
-
- /* Force SMBUS to allow WOL */
- /* Switching PHY interface always returns MDI error
- * so disable retry mechanism to avoid wasting time
- */
- e1000e_disable_phy_retry(hw);
-
- e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
- smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
-
- e1000e_enable_phy_retry(hw);
-
- /* Force SMBus mode in MAC */
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
@@ -6744,6 +6742,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
hw->phy.ops.release(hw);
}
+skip_phy_configurations:
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -6968,13 +6967,13 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
-static __maybe_unused int e1000e_pm_prepare(struct device *dev)
+static int e1000e_pm_prepare(struct device *dev)
{
return pm_runtime_suspended(dev) &&
pm_suspend_via_firmware();
}
-static __maybe_unused int e1000e_pm_suspend(struct device *dev)
+static int e1000e_pm_suspend(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6986,18 +6985,16 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc) {
- e1000e_pm_thaw(dev);
- } else {
+ if (!rc) {
/* Introduce S0ix implementation */
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_entry_flow(adapter);
}
- return rc;
+ return 0;
}
-static __maybe_unused int e1000e_pm_resume(struct device *dev)
+static int e1000e_pm_resume(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -7031,7 +7028,7 @@ static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
return -EBUSY;
}
-static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
+static int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7050,7 +7047,7 @@ static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
return rc;
}
-static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
+static int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7198,7 +7195,6 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
- pdev->state_saved = true;
pci_restore_state(pdev);
pci_set_master(pdev);
@@ -7356,9 +7352,11 @@ static const struct net_device_ops e1000e_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e1000_netpoll,
#endif
- .ndo_set_features = e1000_set_features,
- .ndo_fix_features = e1000_fix_features,
+ .ndo_set_features = e1000_set_features,
+ .ndo_fix_features = e1000_fix_features,
.ndo_features_check = passthru_features_check,
+ .ndo_hwtstamp_get = e1000e_hwtstamp_get,
+ .ndo_hwtstamp_set = e1000e_hwtstamp_set,
};
/**
@@ -7676,6 +7674,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* init PTP hardware clock */
e1000e_ptp_init(adapter);
+ if (hw->mac.type >= e1000_pch_mtp)
+ adapter->flags2 |= FLAG2_DISABLE_K1;
+
/* reset the hardware with the new settings */
e1000e_reset(adapter);
@@ -7751,8 +7752,8 @@ static void e1000_remove(struct pci_dev *pdev)
* from being rescheduled.
*/
set_bit(__E1000_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
@@ -7914,10 +7915,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V19), board_pch_adp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp },
@@ -7937,8 +7938,7 @@ static const struct pci_device_id e1000_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-static const struct dev_pm_ops e1000_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
+static const struct dev_pm_ops e1000e_pm_ops = {
.prepare = e1000e_pm_prepare,
.suspend = e1000e_pm_suspend,
.resume = e1000e_pm_resume,
@@ -7946,9 +7946,8 @@ static const struct dev_pm_ops e1000_pm_ops = {
.thaw = e1000e_pm_thaw,
.poweroff = e1000e_pm_suspend,
.restore = e1000e_pm_resume,
-#endif
- SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
- e1000e_pm_runtime_idle)
+ RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+ e1000e_pm_runtime_idle)
};
/* PCI Device API Driver */
@@ -7957,9 +7956,7 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
- .driver = {
- .pm = &e1000_pm_ops,
- },
+ .driver.pm = pm_ptr(&e1000e_pm_ops),
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
@@ -7991,7 +7988,6 @@ static void __exit e1000_exit_module(void)
}
module_exit(e1000_exit_module);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index e609f4df86f4..4bde1c9de1b9 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -558,7 +558,13 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16)NVM_SUM) {
+ if (hw->mac.type == e1000_pch_tgp &&
+ nvm_data == NVM_CHECKSUM_UNINITIALIZED) {
+ e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n");
+ return 0;
+ }
+
+ if (checksum != NVM_SUM) {
e_dbg("NVM Checksum Invalid\n");
return -E1000_ERR_NVM;
}
@@ -588,7 +594,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
e_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 93544f1cc2a5..f7ae0e0aa4a4 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -157,7 +157,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -181,7 +181,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success) {
*data = (u16)mdic;
@@ -237,7 +237,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -261,7 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success)
return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index bbcfd529399b..ec39e35f3857 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -124,7 +124,8 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
sys_cycles = er32(PLTSTMPH);
sys_cycles <<= 32;
sys_cycles |= er32(PLTSTMPL);
- *system = convert_art_to_tsc(sys_cycles);
+ system->cycles = sys_cycles;
+ system->cs_id = CSID_X86_ART;
return 0;
}
@@ -228,14 +229,11 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
- u64 ns;
/* Update the timecounter */
- ns = timecounter_read(&adapter->tc);
+ ts = ns_to_timespec64(timecounter_read(&adapter->tc));
- ts = ns_to_timespec64(ns);
- e_dbg("SYSTIM overflow check at %lld.%09lu\n",
- (long long) ts.tv_sec, ts.tv_nsec);
+ e_dbg("SYSTIM overflow check at %ptSp\n", &ts);
schedule_delayed_work(&adapter->systim_overflow_work,
E1000_SYSTIM_OVERFLOW_PERIOD);
@@ -294,15 +292,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
else
adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 6119a4108838..65a2816142d9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -189,13 +189,14 @@ struct fm10k_q_vector {
struct fm10k_ring_container rx, tx;
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
cpumask_t affinity_mask;
char name[IFNAMSIZ + 9];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_q_vector;
#endif /* CONFIG_DEBUG_FS */
- struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index f51a63fca513..1f919a50c765 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -447,17 +447,16 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
/**
* fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
* @q: pointer to the ring of hardware statistics queue
- * @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
*
* Function invalidates the index values for the queues so any updates that
* may have happened are ignored and the base for the queue stats is reset.
**/
-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count)
{
u32 i;
- for (i = 0; i < count; i++, idx++, q++) {
+ for (i = 0; i < count; i++, q++) {
q->rx_stats_idx = 0;
q->tx_stats_idx = 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 4c48fb73b3e7..13fca6a91a01 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -43,6 +43,6 @@ u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
u32 idx, u32 count);
#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count);
s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
#endif /* _FM10K_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 1bc5b6c0b897..76e42abca965 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -560,7 +560,7 @@ static int fm10k_set_ringparam(struct net_device *netdev,
/* allocate temporary buffer to store rings in */
i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
- temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
+ temp_ring = vmalloc_array(i, sizeof(struct fm10k_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -691,9 +691,11 @@ static int fm10k_set_coalesce(struct net_device *dev,
return 0;
}
-static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *cmd)
+static int fm10k_get_rssh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on fm10k */
@@ -732,30 +734,18 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
return 0;
}
-static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+static u32 fm10k_get_rx_ring_count(struct net_device *dev)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = interface->num_rx_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- ret = fm10k_get_rss_hash_opts(interface, cmd);
- break;
- default:
- break;
- }
- return ret;
+ return interface->num_rx_queues;
}
-static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *nfc)
+static int fm10k_set_rssh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
interface->flags);
int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
@@ -871,22 +861,6 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return 0;
}
-static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = fm10k_set_rss_hash_opt(interface, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
{
struct fm10k_hw *hw = &interface->hw;
@@ -1175,8 +1149,7 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_ringparam = fm10k_set_ringparam,
.get_coalesce = fm10k_get_coalesce,
.set_coalesce = fm10k_set_coalesce,
- .get_rxnfc = fm10k_get_rxnfc,
- .set_rxnfc = fm10k_set_rxnfc,
+ .get_rx_ring_count = fm10k_get_rx_ring_count,
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
@@ -1186,6 +1159,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_rxfh_key_size = fm10k_get_rssrk_size,
.get_rxfh = fm10k_get_rssh,
.set_rxfh = fm10k_set_rssh,
+ .get_rxfh_fields = fm10k_get_rssh_fields,
+ .set_rxfh_fields = fm10k_set_rssh_fields,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index fc373472e4e1..b8c15b837fda 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -17,7 +17,6 @@ static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
"Copyright(c) 2013 - 2019 Intel Corporation.";
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
@@ -38,7 +37,7 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
fm10k_driver_name);
if (!fm10k_workqueue)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index d748b98274e7..d75b8a50413d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -199,8 +199,8 @@ static void fm10k_start_service_event(struct fm10k_intfc *interface)
**/
static void fm10k_service_timer(struct timer_list *t)
{
- struct fm10k_intfc *interface = from_timer(interface, t,
- service_timer);
+ struct fm10k_intfc *interface = timer_container_of(interface, t,
+ service_timer);
/* Reset the timer */
mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
@@ -2245,7 +2245,7 @@ static void fm10k_remove(struct pci_dev *pdev)
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct net_device *netdev = interface->netdev;
- del_timer_sync(&interface->service_timer);
+ timer_delete_sync(&interface->service_timer);
fm10k_stop_service_event(interface);
fm10k_stop_macvlan_task(interface);
@@ -2342,7 +2342,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us.
**/
-static int __maybe_unused fm10k_resume(struct device *dev)
+static int fm10k_resume(struct device *dev)
{
struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
@@ -2369,7 +2369,7 @@ static int __maybe_unused fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us.
**/
-static int __maybe_unused fm10k_suspend(struct device *dev)
+static int fm10k_suspend(struct device *dev)
{
struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
@@ -2423,12 +2423,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
-
- /* After second error pci->state_saved is false, this
- * resets it so EEH doesn't break.
- */
- pci_save_state(pdev);
-
pci_wake_from_d3(pdev, false);
result = PCI_ERS_RESULT_RECOVERED;
@@ -2502,16 +2496,14 @@ static const struct pci_error_handlers fm10k_err_handler = {
.reset_done = fm10k_io_reset_done,
};
-static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
static struct pci_driver fm10k_driver = {
.name = fm10k_driver_name,
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
- .driver = {
- .pm = &fm10k_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&fm10k_pm_ops),
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 98861cc6df7c..3394645a18fe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1180,126 +1180,6 @@ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
}
/**
- * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
- * @hw: Pointer to hardware structure
- * @results: Pointer array to message, results[0] is pointer to message
- * @mbx: Pointer to mailbox information structure
- *
- * This function is a default handler for MAC/VLAN requests from the VF.
- * The assumption is that in this case it is acceptable to just directly
- * hand off the message from the VF to the underlying shared code.
- **/
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
-{
- struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u8 mac[ETH_ALEN];
- u32 *result;
- int err = 0;
- bool set;
- u16 vlan;
- u32 vid;
-
- /* we shouldn't be updating rules on a disabled interface */
- if (!FM10K_VF_FLAG_ENABLED(vf_info))
- err = FM10K_ERR_PARAM;
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
- result = results[FM10K_MAC_VLAN_MSG_VLAN];
-
- /* record VLAN id requested */
- err = fm10k_tlv_attr_get_u32(result, &vid);
- if (err)
- return err;
-
- set = !(vid & FM10K_VLAN_CLEAR);
- vid &= ~FM10K_VLAN_CLEAR;
-
- /* if the length field has been set, this is a multi-bit
- * update request. For multi-bit requests, simply disallow
- * them when the pf_vid has been set. In this case, the PF
- * should have already cleared the VLAN_TABLE, and if we
- * allowed them, it could allow a rogue VF to receive traffic
- * on a VLAN it was not assigned. In the single-bit case, we
- * need to modify requests for VLAN 0 to use the default PF or
- * SW vid when assigned.
- */
-
- if (vid >> 16) {
- /* prevent multi-bit requests when PF has
- * administratively set the VLAN for this VF
- */
- if (vf_info->pf_vid)
- return FM10K_ERR_PARAM;
- } else {
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
-
- vid = err;
- }
-
- /* update VSI info for VF in regards to VLAN table */
- err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
- result = results[FM10K_MAC_VLAN_MSG_MAC];
-
- /* record unicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* block attempts to set MAC for a locked device */
- if (is_valid_ether_addr(vf_info->mac) &&
- !ether_addr_equal(mac, vf_info->mac))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new unicast address */
- err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
- mac, vlan, set, 0);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
- result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
-
- /* record multicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* verify that the VF is allowed to request multicast */
- if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
- mac, vlan, set);
- }
-
- return err;
-}
-
-/**
* fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
* @vf_info: VF info structure containing capability flags
* @mode: Requested xcast mode
@@ -1509,7 +1389,7 @@ static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
/* Unbind Queue Statistics */
- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_pf(hw, stats);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 8e814df709d2..ad3696893cb1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -99,8 +99,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid);
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
- struct fm10k_mbx_info *);
s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 7fb1961f2921..6861a0bdc14e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -465,7 +465,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
struct fm10k_hw_stats *stats)
{
/* Unbind Queue Statistics */
- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_vf(hw, stats);
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index cad93f323bd5..9faa4339a76c 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -10,7 +10,7 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_I40E) += i40e.o
-i40e-objs := i40e_main.o \
+i40e-y := i40e_main.o \
i40e_ethtool.o \
i40e_adminq.o \
i40e_common.o \
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2fbabcdb5bb5..d2d03db2acec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -4,6 +4,7 @@
#ifndef _I40E_H_
#define _I40E_H_
+#include <linux/linkmode.h>
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
@@ -87,6 +88,7 @@ enum i40e_state {
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
+ __I40E_MDD_VF_PRINT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
__I40E_TIMEOUT_RECOVERY_PENDING,
@@ -190,6 +192,7 @@ enum i40e_pf_flags {
*/
I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_FLAG_MDD_AUTO_RESET_VF,
I40E_PF_FLAGS_NBITS, /* must be last */
};
@@ -545,6 +548,7 @@ struct i40e_pf {
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
u16 sw_int_count; /* SW interrupt count */
+ u32 link_down_events;
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -570,8 +574,12 @@ struct i40e_pf {
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u32 vf_aq_requests;
+ /* If set to non-zero, the device uses this value
+ * as maximum number of MAC filters per VF.
+ */
+ u32 max_mac_per_vf;
u32 arq_overflows; /* Not fatal, possibly indicative of problems */
-
+ struct ratelimit_state mdd_message_rate_limit;
/* DCBx/DCBNL capability for PF that indicates
* whether DCBx is managed by firmware or host
* based agent (LLDPAD). Also, indicates what
@@ -657,7 +665,7 @@ struct i40e_pf {
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
struct work_struct ptp_extts0_work;
ktime_t ptp_reset_start;
@@ -735,7 +743,7 @@ __i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
_i++, _veb = __i40e_pf_next_veb(_pf, &_i))
/**
- * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
+ * i40e_addr_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
* Simply copies the address and returns it as a u64 for hashing
@@ -754,6 +762,7 @@ enum i40e_filter_state {
I40E_FILTER_ACTIVE, /* Added to switch by FW */
I40E_FILTER_FAILED, /* Rejected by FW */
I40E_FILTER_REMOVE, /* To be removed */
+ I40E_FILTER_NEW_SYNC, /* New, not sent yet, is in i40e_sync_vsi_filters() */
/* There is no 'removed' state; the filter struct is freed */
};
struct i40e_mac_filter {
@@ -788,7 +797,6 @@ struct i40e_veb {
u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
- u16 flags;
u16 bw_limit;
u8 bw_max_quanta;
bool is_abs_credits;
@@ -942,6 +950,7 @@ struct i40e_q_vector {
u16 reg_idx; /* register index of the interrupt */
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
struct i40e_ring_container rx;
struct i40e_ring_container tx;
@@ -952,7 +961,6 @@ struct i40e_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
- struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
bool in_busy_poll;
@@ -1188,7 +1196,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
u32 i40e_get_current_fd_count(struct i40e_pf *pf);
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
@@ -1196,7 +1203,6 @@ void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan);
void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
@@ -1213,7 +1219,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
-struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
@@ -1237,8 +1243,8 @@ static inline void i40e_dbg_exit(void) {}
int i40e_lan_add_device(struct i40e_pf *pf);
int i40e_lan_del_device(struct i40e_pf *pf);
void i40e_client_subtask(struct i40e_pf *pf);
-void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
-void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
+void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf);
+void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
void i40e_client_update_msix_info(struct i40e_pf *pf);
@@ -1276,7 +1282,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr);
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
-int i40e_count_filters(struct i40e_vsi *vsi);
+int i40e_count_all_filters(struct i40e_vsi *vsi);
+int i40e_count_active_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
@@ -1301,8 +1308,11 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf);
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
-int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int i40e_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void i40e_ptp_save_hw_time(struct i40e_pf *pf);
void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
@@ -1312,7 +1322,6 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
int i40e_get_partition_bw_setting(struct i40e_pf *pf);
int i40e_set_partition_bw_setting(struct i40e_pf *pf);
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
@@ -1374,6 +1383,17 @@ i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid)
}
/**
+ * i40e_pf_get_main_vsi - get pointer to main VSI
+ * @pf: pointer to a PF
+ *
+ * Return: pointer to main VSI or NULL if it does not exist
+ **/
+static inline struct i40e_vsi *i40e_pf_get_main_vsi(struct i40e_pf *pf)
+{
+ return (pf->lan_vsi != I40E_NO_VSI) ? pf->vsi[pf->lan_vsi] : NULL;
+}
+
+/**
* i40e_pf_get_veb_by_seid - find VEB by SEID
* @pf: pointer to a PF
* @seid: SEID of the VSI
@@ -1391,4 +1411,15 @@ i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid)
return NULL;
}
+/**
+ * i40e_pf_get_main_veb - get pointer to main VEB
+ * @pf: pointer to a PF
+ *
+ * Return: pointer to main VEB or NULL if it does not exist
+ **/
+static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf)
+{
+ return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index f73f5930fc58..096ec46bb619 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -18,7 +18,7 @@ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
(hw->aq.num_asq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct libie_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
@@ -44,7 +44,7 @@ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
(hw->aq.num_arq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct libie_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
return ret_code;
@@ -80,7 +80,7 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
**/
static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct i40e_dma_mem *bi;
int ret_code;
int i;
@@ -108,9 +108,9 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
/* now configure the descriptors for use */
desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -119,12 +119,12 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
- desc->params.external.addr_high =
+ desc->params.generic.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
+ desc->params.generic.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
}
alloc_arq_bufs:
@@ -691,8 +691,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
struct i40e_adminq_ring *asq = &(hw->aq.asq);
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct i40e_aq_desc desc_cb;
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc desc_cb;
+ struct libie_aq_desc *desc;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
@@ -750,7 +750,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
**/
static int
i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -758,7 +758,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
{
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
- struct i40e_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
int status = 0;
@@ -771,7 +771,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
goto asq_send_command_error;
}
- hw->aq.asq_last_status = I40E_AQ_RC_OK;
+ hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
val = rd32(hw, I40E_PF_ATQH);
if (val >= hw->aq.num_asq_entries) {
@@ -851,9 +851,9 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
/* Update the address values in the desc with the pa value
* for respective buffer
*/
- desc_on_ring->params.external.addr_high =
+ desc_on_ring->params.generic.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
+ desc_on_ring->params.generic.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
@@ -905,13 +905,13 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ if ((enum libie_aq_err)retval == LIBIE_AQ_RC_OK)
status = 0;
- else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ else if ((enum libie_aq_err)retval == LIBIE_AQ_RC_EBUSY)
status = -EBUSY;
else
status = -EIO;
- hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ hw->aq.asq_last_status = (enum libie_aq_err)retval;
}
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
@@ -954,7 +954,7 @@ asq_send_command_error:
**/
int
i40e_asq_send_command_atomic(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -972,7 +972,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
}
int
-i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -996,12 +996,12 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
**/
int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
int status;
@@ -1016,16 +1016,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}
-int
-i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
-{
- return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
- cmd_details, true, aq_status);
-}
-
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
@@ -1033,13 +1023,13 @@ i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
*
* Fill the desc with default values
**/
-void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+void i40e_fill_default_direct_cmd_desc(struct libie_aq_desc *desc,
u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -1057,7 +1047,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct i40e_dma_mem *bi;
int ret_code = 0;
u16 desc_idx;
@@ -1091,9 +1081,9 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & I40E_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = -EIO;
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -1117,14 +1107,14 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, I40E_PF_ARQT, ntc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index ee86d2c53079..1be97a3a86ce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -9,7 +9,7 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,7 +39,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
- struct i40e_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -47,7 +47,7 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -72,8 +72,8 @@ struct i40e_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum i40e_admin_queue_err asq_last_status;
- enum i40e_admin_queue_err arq_last_status;
+ enum libie_aq_err asq_last_status;
+ enum libie_aq_err arq_last_status;
};
/**
@@ -109,10 +109,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
- /* aq_rc is invalid if AQ timed out */
- if (aq_ret == -EIO)
- return -EAGAIN;
-
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
@@ -123,7 +119,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+void i40e_fill_default_direct_cmd_desc(struct libie_aq_desc *desc,
u16 opcode);
#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index c8f35d4de271..cc02a85ad42b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
#include <linux/bits.h>
#include <linux/types.h>
@@ -30,75 +32,6 @@
/* API version 1.10 for X722 devices adds ability to request FEC encoding */
#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
-struct i40e_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-
-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
-};
-
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
@@ -320,21 +253,6 @@ struct i40e_aqc_get_version {
__le16 api_minor;
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-
-/* Send driver version (indirect 0x0002) */
-struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
-
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
__le32 driver_unloading;
@@ -352,75 +270,6 @@ struct i40e_aqc_set_pf_context {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct i40e_aqc_list_capabilites {
- u8 command_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
-
-struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
-};
-
-/* list of caps */
-
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_ISCSI 0x0022
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
-#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
-
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
__le16 command_flags;
@@ -1712,6 +1561,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN BIT(2)
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index b32071ee84af..518bc738ea3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -101,25 +101,26 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
/**
* i40e_notify_client_of_l2_param_changes - call the client notify callback
- * @vsi: the VSI with l2 param changes
+ * @pf: PF device pointer
*
- * If there is a client to this VSI, call the client
+ * If there is a client, call its callback
**/
-void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
+void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf)
{
- struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev = pf->cinst;
struct i40e_params params;
if (!cdev || !cdev->client)
return;
if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
- dev_dbg(&vsi->back->pdev->dev,
+ dev_dbg(&pf->pdev->dev,
"Cannot locate client instance l2_param_change routine\n");
return;
}
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ dev_dbg(&pf->pdev->dev,
+ "Client is not open, abort l2 param change\n");
return;
}
memset(&params, 0, sizeof(params));
@@ -157,20 +158,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
/**
* i40e_notify_client_of_netdev_close - call the client close callback
- * @vsi: the VSI with netdev closed
+ * @pf: PF device pointer
* @reset: true when close called due to a reset pending
*
* If there is a client to this netdev, call the client with close
**/
-void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset)
{
- struct i40e_pf *pf = vsi->back;
struct i40e_client_instance *cdev = pf->cinst;
if (!cdev || !cdev->client)
return;
if (!cdev->client->ops || !cdev->client->ops->close) {
- dev_dbg(&vsi->back->pdev->dev,
+ dev_dbg(&pf->pdev->dev,
"Cannot locate client instance close routine\n");
return;
}
@@ -333,9 +333,9 @@ static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name)
**/
static void i40e_client_add_instance(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev = NULL;
struct netdev_hw_addr *mac = NULL;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
@@ -359,8 +359,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
if (i40e_client_get_params(vsi, &cdev->lan_info.params))
goto free_cdev;
- mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
- struct netdev_hw_addr, list);
+ mac = list_first_entry_or_null(&cdev->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
if (mac)
ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
else
@@ -399,9 +399,9 @@ void i40e_client_del_instance(struct i40e_pf *pf)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
- struct i40e_client *client;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_client *client;
int ret = 0;
if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
@@ -665,8 +665,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
bool is_vf, u32 vf_id,
u32 flag, u32 valid_flag)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(ldev->pf);
struct i40e_pf *pf = ldev->pf;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
int err;
@@ -682,9 +682,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
if (err) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -711,8 +709,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
dev_info(&pf->pdev->dev,
"update VSI ctxt for PE failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
}
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index de6ca6295742..59f5c1e810eb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -69,66 +69,6 @@ int i40e_set_mac_type(struct i40e_hw *hw)
}
/**
- * i40e_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
-{
- switch (aq_err) {
- case I40E_AQ_RC_OK:
- return "OK";
- case I40E_AQ_RC_EPERM:
- return "I40E_AQ_RC_EPERM";
- case I40E_AQ_RC_ENOENT:
- return "I40E_AQ_RC_ENOENT";
- case I40E_AQ_RC_ESRCH:
- return "I40E_AQ_RC_ESRCH";
- case I40E_AQ_RC_EINTR:
- return "I40E_AQ_RC_EINTR";
- case I40E_AQ_RC_EIO:
- return "I40E_AQ_RC_EIO";
- case I40E_AQ_RC_ENXIO:
- return "I40E_AQ_RC_ENXIO";
- case I40E_AQ_RC_E2BIG:
- return "I40E_AQ_RC_E2BIG";
- case I40E_AQ_RC_EAGAIN:
- return "I40E_AQ_RC_EAGAIN";
- case I40E_AQ_RC_ENOMEM:
- return "I40E_AQ_RC_ENOMEM";
- case I40E_AQ_RC_EACCES:
- return "I40E_AQ_RC_EACCES";
- case I40E_AQ_RC_EFAULT:
- return "I40E_AQ_RC_EFAULT";
- case I40E_AQ_RC_EBUSY:
- return "I40E_AQ_RC_EBUSY";
- case I40E_AQ_RC_EEXIST:
- return "I40E_AQ_RC_EEXIST";
- case I40E_AQ_RC_EINVAL:
- return "I40E_AQ_RC_EINVAL";
- case I40E_AQ_RC_ENOTTY:
- return "I40E_AQ_RC_ENOTTY";
- case I40E_AQ_RC_ENOSPC:
- return "I40E_AQ_RC_ENOSPC";
- case I40E_AQ_RC_ENOSYS:
- return "I40E_AQ_RC_ENOSYS";
- case I40E_AQ_RC_ERANGE:
- return "I40E_AQ_RC_ERANGE";
- case I40E_AQ_RC_EFLUSHED:
- return "I40E_AQ_RC_EFLUSHED";
- case I40E_AQ_RC_BAD_ADDR:
- return "I40E_AQ_RC_BAD_ADDR";
- case I40E_AQ_RC_EMODE:
- return "I40E_AQ_RC_EMODE";
- case I40E_AQ_RC_EFBIG:
- return "I40E_AQ_RC_EFBIG";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
- return hw->err_str;
-}
-
-/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -141,7 +81,7 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ struct libie_aq_desc *aq_desc = (struct libie_aq_desc *)desc;
u32 effective_mask = hw->debug_mask & mask;
char prefix[27];
u16 len;
@@ -164,12 +104,12 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->cookie_low));
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.internal.param0),
- le32_to_cpu(aq_desc->params.internal.param1));
+ le32_to_cpu(aq_desc->params.generic.param0),
+ le32_to_cpu(aq_desc->params.generic.param1));
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.external.addr_high),
- le32_to_cpu(aq_desc->params.external.addr_low));
+ le32_to_cpu(aq_desc->params.generic.addr_high),
+ le32_to_cpu(aq_desc->params.generic.addr_low));
if (buffer && buf_len != 0 && len != 0 &&
(effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
@@ -214,14 +154,14 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
int i40e_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ struct i40e_aqc_queue_shutdown *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
+ cmd = libie_aq_raw(&desc);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
@@ -245,9 +185,8 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
u8 *lut, u16 lut_size,
bool set)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_lut *cmd_resp =
- (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp;
+ struct libie_aq_desc desc;
int status;
u16 flags;
@@ -258,9 +197,10 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_lut);
+ cmd_resp = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
@@ -326,10 +266,9 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
struct i40e_aqc_get_set_rss_key_data *key,
bool set)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_key *cmd_resp =
- (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ struct i40e_aqc_get_set_rss_key *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (set)
@@ -339,9 +278,10 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_key);
+ cmd_resp = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
@@ -381,259 +321,6 @@ int i40e_aq_set_rss_key(struct i40e_hw *hw,
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT i40e_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum i40e_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- I40E_RX_PTYPE_##OUTER_FRAG, \
- I40E_RX_PTYPE_TUNNEL_##T, \
- I40E_RX_PTYPE_TUNNEL_END_##TE, \
- I40E_RX_PTYPE_##TEF, \
- I40E_RX_PTYPE_INNER_PROT_##I, \
- I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
-#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
-#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
-struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
- /* L2 Packet types */
- I40E_PTT_UNUSED_ENTRY(0),
- I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(4),
- I40E_PTT_UNUSED_ENTRY(5),
- I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(8),
- I40E_PTT_UNUSED_ENTRY(9),
- I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(25),
- I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(32),
- I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(39),
- I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(47),
- I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(54),
- I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(62),
- I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(69),
- I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(77),
- I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(84),
- I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(91),
- I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(98),
- I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(105),
- I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(113),
- I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(120),
- I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(128),
- I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(135),
- I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(143),
- I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(150),
- I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@@ -692,13 +379,13 @@ i40e_aq_mac_address_read(struct i40e_hw *hw,
struct i40e_aqc_mac_address_read_data *addrs,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_mac_address_read *cmd_data =
- (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ struct i40e_aqc_mac_address_read *cmd_data;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+ cmd_data = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, addrs,
sizeof(*addrs), cmd_details);
@@ -718,13 +405,13 @@ int i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_mac_address_write *cmd_data =
- (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ struct i40e_aqc_mac_address_write *cmd_data;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
+ cmd_data = libie_aq_raw(&desc);
cmd_data->command_flags = cpu_to_le16(flags);
cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
@@ -1070,10 +757,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
void i40e_clear_hw(struct i40e_hw *hw)
{
u32 num_queues, base_queue;
- u32 num_pf_int;
- u32 num_vf_int;
+ s32 num_pf_int;
+ s32 num_vf_int;
u32 num_vfs;
- u32 i, j;
+ s32 i;
+ u32 j;
u32 val;
u32 eol = 0x7ff;
@@ -1313,7 +1001,7 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
{
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!abilities)
@@ -1323,36 +1011,36 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_abilities);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
if (qualified_modules)
- desc.params.external.param0 |=
+ desc.params.generic.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
if (report_init)
- desc.params.external.param0 |=
+ desc.params.generic.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
switch (hw->aq.asq_last_status) {
- case I40E_AQ_RC_EIO:
+ case LIBIE_AQ_RC_EIO:
status = -EIO;
break;
- case I40E_AQ_RC_EAGAIN:
+ case LIBIE_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
status = -EIO;
break;
- /* also covers I40E_AQ_RC_OK */
+ /* also covers LIBIE_AQ_RC_OK */
default:
break;
}
- } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ } while ((hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN) &&
(total_delay < max_delay));
if (status)
@@ -1389,9 +1077,8 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_aq_set_phy_config *config,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aq_set_phy_config *cmd =
- (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ struct i40e_aq_set_phy_config *cmd;
+ struct libie_aq_desc desc;
int status;
if (!config)
@@ -1400,6 +1087,7 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_config);
+ cmd = libie_aq_raw(&desc);
*cmd = *config;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1502,6 +1190,40 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
}
/**
+ * i40e_aq_set_mac_config - Configure MAC settings
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603). Note that max_frame_size must be greater
+ * than zero.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_set_mac_config *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ if (max_frame_size == 0)
+ return -EINVAL;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = cpu_to_le16(max_frame_size);
+ cmd->params = I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
+ cmd->fc_refresh_threshold =
+ cpu_to_le16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
+
+ return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+}
+
+/**
* i40e_aq_clear_pxe_mode
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
@@ -1511,14 +1233,14 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_clear_pxe *cmd =
- (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ struct i40e_aqc_clear_pxe *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_pxe_mode);
+ cmd = libie_aq_raw(&desc);
cmd->rx_cnt = 0x2;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1540,14 +1262,14 @@ int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
bool enable_link,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_link_restart_an *cmd =
- (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ struct i40e_aqc_set_link_restart_an *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_link_restart_an);
+ cmd = libie_aq_raw(&desc);
cmd->command = I40E_AQ_PHY_RESTART_AN;
if (enable_link)
cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
@@ -1572,16 +1294,16 @@ int i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_link_status *resp =
- (struct i40e_aqc_get_link_status *)&desc.params.raw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_aqc_get_link_status *resp;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
u16 command_flags;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+ resp = libie_aq_raw(&desc);
if (enable_lse)
command_flags = I40E_AQ_LSE_ENABLE;
else
@@ -1667,14 +1389,14 @@ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
u16 mask,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_phy_int_mask *cmd =
- (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ struct i40e_aqc_set_phy_int_mask *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_int_mask);
+ cmd = libie_aq_raw(&desc);
cmd->event_mask = cpu_to_le16(mask);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1693,11 +1415,11 @@ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_lb_mode *cmd =
- (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ struct i40e_aqc_set_lb_mode *cmd;
+ struct libie_aq_desc desc;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
+ cmd = libie_aq_raw(&desc);
if (ena_lpbk) {
if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
@@ -1719,14 +1441,14 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_phy_debug *cmd =
- (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ struct i40e_aqc_set_phy_debug *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
+ cmd = libie_aq_raw(&desc);
cmd->command_flags = cmd_flags;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1746,23 +1468,22 @@ int i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_vsi);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
cmd->connection_type = vsi_ctx->connection_type;
cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info),
@@ -1790,15 +1511,14 @@ int i40e_aq_set_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)
- &desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = cpu_to_le16(seid);
@@ -1818,15 +1538,14 @@ int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)
- &desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(0);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = cpu_to_le16(seid);
@@ -1849,15 +1568,15 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool rx_only_promisc)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
@@ -1888,15 +1607,15 @@ int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set)
flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
@@ -1923,15 +1642,15 @@ int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
@@ -1959,15 +1678,15 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (i40e_is_aq_api_ver_ge(hw, 1, 5))
@@ -2000,9 +1719,8 @@ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
@@ -2012,6 +1730,7 @@ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
cmd->seid = cpu_to_le16(seid);
@@ -2035,14 +1754,14 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 seid, bool set_filter,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set_filter)
cmd->promiscuous_flags
|= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
@@ -2058,37 +1777,6 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @cmd_details: pointer to command details structure or NULL
- **/
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- u16 flags = 0;
- int status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
- flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
- cmd->promiscuous_flags = cpu_to_le16(flags);
- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
- cmd->seid = cpu_to_le16(seid);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2098,20 +1786,19 @@ int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), NULL);
@@ -2140,19 +1827,18 @@ int i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info),
@@ -2179,16 +1865,16 @@ int i40e_aq_get_switch_config(struct i40e_hw *hw,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *scfg =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_switch_seid *scfg;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_config);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ scfg = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
scfg->seid = cpu_to_le16(*start_seid);
status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
@@ -2213,13 +1899,13 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_switch_config *scfg =
- (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ struct i40e_aqc_set_switch_config *scfg;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_switch_config);
+ scfg = libie_aq_raw(&desc);
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
scfg->mode = mode;
@@ -2251,11 +1937,11 @@ int i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_version *resp =
- (struct i40e_aqc_get_version *)&desc.params.raw;
+ struct i40e_aqc_get_version *resp;
+ struct libie_aq_desc desc;
int status;
+ resp = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2288,22 +1974,22 @@ int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_driver_version *cmd =
- (struct i40e_aqc_driver_version *)&desc.params.raw;
+ struct libie_aqc_driver_ver *cmd;
+ struct libie_aq_desc desc;
int status;
u16 len;
if (dv == NULL)
return -EINVAL;
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
- cmd->driver_major_ver = dv->major_version;
- cmd->driver_minor_ver = dv->minor_version;
- cmd->driver_build_ver = dv->build_version;
- cmd->driver_subbuild_ver = dv->subbuild_version;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD);
+ cmd->major_ver = dv->major_version;
+ cmd->minor_ver = dv->minor_version;
+ cmd->build_ver = dv->build_version;
+ cmd->subbuild_ver = dv->subbuild_version;
len = 0;
while (len < sizeof(dv->driver_string) &&
@@ -2403,11 +2089,9 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
bool enable_stats,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_veb *cmd =
- (struct i40e_aqc_add_veb *)&desc.params.raw;
- struct i40e_aqc_add_veb_completion *resp =
- (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp;
+ struct i40e_aqc_add_veb *cmd;
+ struct libie_aq_desc desc;
u16 veb_flags = 0;
int status;
@@ -2415,6 +2099,8 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
if (!!uplink_seid != !!downlink_seid)
return -EINVAL;
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
cmd->uplink_seid = cpu_to_le16(uplink_seid);
@@ -2461,15 +2147,14 @@ int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 *vebs_used, u16 *vebs_free,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
- (struct i40e_aqc_get_veb_parameters_completion *)
- &desc.params.raw;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (veb_seid == 0)
return -EINVAL;
+ cmd_resp = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_veb_parameters);
cmd_resp->seid = cpu_to_le16(veb_seid);
@@ -2511,10 +2196,9 @@ get_veb_exit:
**/
static u16
i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
- struct i40e_aq_desc *desc, u16 count, u16 seid)
+ struct libie_aq_desc *desc, u16 count, u16 seid)
{
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc->params.raw;
+ struct i40e_aqc_macvlan *cmd = libie_aq_raw(desc);
u16 buf_size;
int i;
@@ -2532,9 +2216,9 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
- desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc->flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
return buf_size;
}
@@ -2554,7 +2238,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2585,9 +2269,9 @@ int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2614,9 +2298,8 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
+ struct i40e_aqc_macvlan *cmd;
+ struct libie_aq_desc desc;
u16 buf_size;
int status;
@@ -2627,14 +2310,15 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = libie_aq_raw(&desc);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
cmd->seid[2] = 0;
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
cmd_details, true);
@@ -2661,10 +2345,10 @@ int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
struct i40e_aqc_macvlan *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2674,151 +2358,21 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
- cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
cmd->seid[2] = 0;
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
cmd_details, true, aq_status);
}
/**
- * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
- * @hw: pointer to the hw struct
- * @opcode: AQ opcode for add or delete mirror rule
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @id: Destination VSI SEID or Rule ID
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
- * VEBs/VEPA elements only
- **/
-static int i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_delete_mirror_rule *cmd =
- (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
- struct i40e_aqc_add_delete_mirror_rule_completion *resp =
- (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- u16 buf_size;
- int status;
-
- buf_size = count * sizeof(*mr_list);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, opcode);
- cmd->seid = cpu_to_le16(sw_seid);
- cmd->rule_type = cpu_to_le16(rule_type &
- I40E_AQC_MIRROR_RULE_TYPE_MASK);
- cmd->num_entries = cpu_to_le16(count);
- /* Dest VSI for add, rule_id for delete */
- cmd->destination = cpu_to_le16(id);
- if (mr_list) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- }
-
- status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
- cmd_details);
- if (!status ||
- hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
- if (rule_id)
- *rule_id = le16_to_cpu(resp->rule_id);
- if (rules_used)
- *rules_used = le16_to_cpu(resp->mirror_rules_used);
- if (rules_free)
- *rules_free = le16_to_cpu(resp->mirror_rules_free);
- }
- return status;
-}
-
-/**
- * i40e_aq_add_mirrorrule - add a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @dest_vsi: SEID of VSI to which packets will be mirrored
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
- **/
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
- rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
- rule_type, dest_vsi, count, mr_list,
- cmd_details, rule_id, rules_used, rules_free);
-}
-
-/**
- * i40e_aq_delete_mirrorrule - delete a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @count: length of the list
- * @rule_id: Rule ID that is returned in the receive desc as part of
- * add_mirrorrule.
- * @mr_list: list of mirrored VLAN IDs to be removed
- * @cmd_details: pointer to command details structure or NULL
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
- **/
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
-{
- /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- /* count and mr_list shall be valid for rule_type INGRESS VLAN
- * mirroring. For other rule_type, count and rule_type should
- * not matter.
- */
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
- rule_type, rule_id, count, mr_list,
- cmd_details, NULL, rules_used, rules_free);
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF id to send msg
@@ -2834,21 +2388,21 @@ int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_pf_vf_message *cmd =
- (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ struct i40e_aqc_pf_vf_message *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd = libie_aq_raw(&desc);
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_SI);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
@@ -2869,9 +2423,8 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_reg_read_write *cmd_resp =
- (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (reg_val == NULL)
@@ -2879,6 +2432,7 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2904,13 +2458,13 @@ int i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_reg_read_write *cmd =
- (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_debug_reg_read_write *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+ cmd = libie_aq_raw(&desc);
cmd->address = cpu_to_le32(reg_addr);
cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
@@ -2937,16 +2491,16 @@ int i40e_aq_request_resource(struct i40e_hw *hw,
u8 sdp_number, u64 *timeout,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_request_resource *cmd_resp =
- (struct i40e_aqc_request_resource *)&desc.params.raw;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
- cmd_resp->resource_id = cpu_to_le16(resource);
+ cmd_resp = libie_aq_raw(&desc);
+ cmd_resp->res_id = cpu_to_le16(resource);
cmd_resp->access_type = cpu_to_le16(access);
- cmd_resp->resource_number = cpu_to_le32(sdp_number);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
/* The completion specifies the maximum time in ms that the driver
@@ -2955,7 +2509,7 @@ int i40e_aq_request_resource(struct i40e_hw *hw,
* busy return value and the timeout field indicates the maximum time
* the current owner of the resource has to free it.
*/
- if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ if (!status || hw->aq.asq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
@@ -2975,15 +2529,15 @@ int i40e_aq_release_resource(struct i40e_hw *hw,
u8 sdp_number,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_request_resource *cmd =
- (struct i40e_aqc_request_resource *)&desc.params.raw;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
- cmd->resource_id = cpu_to_le16(resource);
- cmd->resource_number = cpu_to_le32(sdp_number);
+ cmd = libie_aq_raw(&desc);
+ cmd->res_id = cpu_to_le16(resource);
+ cmd->res_number = cpu_to_le32(sdp_number);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3007,9 +2561,8 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
bool last_command,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -3020,6 +2573,7 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -3027,9 +2581,9 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
@@ -3052,9 +2606,8 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, bool last_command,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -3065,6 +2618,7 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -3091,7 +2645,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 cap_count,
enum i40e_admin_queue_opc list_type_opc)
{
- struct i40e_aqc_list_capabilities_element_resp *cap;
+ struct libie_aqc_list_caps_elem *cap;
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
@@ -3100,7 +2654,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
int status;
u32 i = 0;
- cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+ cap = (struct libie_aqc_list_caps_elem *)buff;
if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
p = &hw->dev_caps;
@@ -3110,17 +2664,17 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
return;
for (i = 0; i < cap_count; i++, cap++) {
- id = le16_to_cpu(cap->id);
+ id = le16_to_cpu(cap->cap);
number = le32_to_cpu(cap->number);
logical_id = le32_to_cpu(cap->logical_id);
phys_id = le32_to_cpu(cap->phys_id);
- major_rev = cap->major_rev;
+ major_rev = cap->major_ver;
switch (id) {
- case I40E_AQ_CAP_ID_SWITCH_MODE:
+ case LIBIE_AQC_CAPS_SWITCH_MODE:
p->switch_mode = number;
break;
- case I40E_AQ_CAP_ID_MNG_MODE:
+ case LIBIE_AQC_CAPS_MNG_MODE:
p->management_mode = number;
if (major_rev > 1) {
p->mng_protocols_over_mctp = logical_id;
@@ -3131,76 +2685,76 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->mng_protocols_over_mctp = 0;
}
break;
- case I40E_AQ_CAP_ID_NPAR_ACTIVE:
+ case LIBIE_AQC_CAPS_NPAR_ACTIVE:
p->npar_enable = number;
break;
- case I40E_AQ_CAP_ID_OS2BMC_CAP:
+ case LIBIE_AQC_CAPS_OS2BMC_CAP:
p->os2bmc = number;
break;
- case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
p->valid_functions = number;
break;
- case I40E_AQ_CAP_ID_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
if (number == 1)
p->sr_iov_1_1 = true;
break;
- case I40E_AQ_CAP_ID_VF:
+ case LIBIE_AQC_CAPS_VF:
p->num_vfs = number;
p->vf_base_id = logical_id;
break;
- case I40E_AQ_CAP_ID_VMDQ:
+ case LIBIE_AQC_CAPS_VMDQ:
if (number == 1)
p->vmdq = true;
break;
- case I40E_AQ_CAP_ID_8021QBG:
+ case LIBIE_AQC_CAPS_8021QBG:
if (number == 1)
p->evb_802_1_qbg = true;
break;
- case I40E_AQ_CAP_ID_8021QBR:
+ case LIBIE_AQC_CAPS_8021QBR:
if (number == 1)
p->evb_802_1_qbh = true;
break;
- case I40E_AQ_CAP_ID_VSI:
+ case LIBIE_AQC_CAPS_VSI:
p->num_vsis = number;
break;
- case I40E_AQ_CAP_ID_DCB:
+ case LIBIE_AQC_CAPS_DCB:
if (number == 1) {
p->dcb = true;
p->enabled_tcmap = logical_id;
p->maxtc = phys_id;
}
break;
- case I40E_AQ_CAP_ID_FCOE:
+ case LIBIE_AQC_CAPS_FCOE:
if (number == 1)
p->fcoe = true;
break;
- case I40E_AQ_CAP_ID_ISCSI:
+ case LIBIE_AQC_CAPS_ISCSI:
if (number == 1)
p->iscsi = true;
break;
- case I40E_AQ_CAP_ID_RSS:
+ case LIBIE_AQC_CAPS_RSS:
p->rss = true;
p->rss_table_size = number;
p->rss_table_entry_width = logical_id;
break;
- case I40E_AQ_CAP_ID_RXQ:
+ case LIBIE_AQC_CAPS_RXQS:
p->num_rx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_AQ_CAP_ID_TXQ:
+ case LIBIE_AQC_CAPS_TXQS:
p->num_tx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_AQ_CAP_ID_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
p->num_msix_vectors = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MSIX vector count = %d\n",
p->num_msix_vectors);
break;
- case I40E_AQ_CAP_ID_VF_MSIX:
+ case LIBIE_AQC_CAPS_VF_MSIX:
p->num_msix_vectors_vf = number;
break;
- case I40E_AQ_CAP_ID_FLEX10:
+ case LIBIE_AQC_CAPS_FLEX10:
if (major_rev == 1) {
if (number == 1) {
p->flex10_enable = true;
@@ -3216,42 +2770,42 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->flex10_mode = logical_id;
p->flex10_status = phys_id;
break;
- case I40E_AQ_CAP_ID_CEM:
+ case LIBIE_AQC_CAPS_CEM:
if (number == 1)
p->mgmt_cem = true;
break;
- case I40E_AQ_CAP_ID_IWARP:
+ case LIBIE_AQC_CAPS_RDMA:
if (number == 1)
p->iwarp = true;
break;
- case I40E_AQ_CAP_ID_LED:
+ case LIBIE_AQC_CAPS_LED:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->led[phys_id] = true;
break;
- case I40E_AQ_CAP_ID_SDP:
+ case LIBIE_AQC_CAPS_SDP:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->sdp[phys_id] = true;
break;
- case I40E_AQ_CAP_ID_MDIO:
+ case LIBIE_AQC_CAPS_MDIO:
if (number == 1) {
p->mdio_port_num = phys_id;
p->mdio_port_mode = logical_id;
}
break;
- case I40E_AQ_CAP_ID_1588:
+ case LIBIE_AQC_CAPS_1588:
if (number == 1)
p->ieee_1588 = true;
break;
- case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
+ case LIBIE_AQC_CAPS_FD:
p->fd = true;
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
- case I40E_AQ_CAP_ID_WSR_PROT:
+ case LIBIE_AQC_CAPS_WSR_PROT:
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
- case I40E_AQ_CAP_ID_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
p->sec_rev_disabled = true;
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
@@ -3343,11 +2897,11 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aqc_list_capabilites *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int status = 0;
- cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
@@ -3357,9 +2911,9 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
*data_size = le16_to_cpu(desc.datalen);
@@ -3392,9 +2946,8 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -3405,6 +2958,7 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -3422,9 +2976,9 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
@@ -3433,41 +2987,6 @@ i40e_aq_update_nvm_exit:
}
/**
- * i40e_aq_rearrange_nvm
- * @hw: pointer to the hw struct
- * @rearrange_nvm: defines direction of rearrangement
- * @cmd_details: pointer to command details structure or NULL
- *
- * Rearrange NVM structure, available only for transition FW
- **/
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aqc_nvm_update *cmd;
- struct i40e_aq_desc desc;
- int status;
-
- cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
-
- rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
- I40E_AQ_NVM_REARRANGE_TO_STRUCT);
-
- if (!rearrange_nvm) {
- status = -EINVAL;
- goto i40e_aq_rearrange_nvm_exit;
- }
-
- cmd->command_flags |= rearrange_nvm;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-i40e_aq_rearrange_nvm_exit:
- return status;
-}
-
-/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -3485,11 +3004,9 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_get_mib *cmd =
- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
- struct i40e_aqc_lldp_get_mib *resp =
- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp;
+ struct i40e_aqc_lldp_get_mib *cmd;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -3497,16 +3014,18 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
desc.datalen = cpu_to_le16(buff_size);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
@@ -3535,19 +3054,19 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_lldp_set_local_mib *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
if (buff_size == 0 || !buff)
return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_lldp_set_local_mib);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->type = mib_type;
@@ -3572,13 +3091,13 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_update_mib *cmd =
- (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_update_mib *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+ cmd = libie_aq_raw(&desc);
if (!enable_update)
cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
@@ -3588,44 +3107,6 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
- * i40e_aq_restore_lldp
- * @hw: pointer to the hw struct
- * @setting: pointer to factory setting variable or NULL
- * @restore: True if factory settings should be restored
- * @cmd_details: pointer to command details structure or NULL
- *
- * Restore LLDP Agent factory settings if @restore set to True. In other case
- * only returns factory setting in AQ response.
- **/
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_restore *cmd =
- (struct i40e_aqc_lldp_restore *)&desc.params.raw;
- int status;
-
- if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Restore LLDP not supported by current FW version.\n");
- return -ENODEV;
- }
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
-
- if (restore)
- cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (setting)
- *setting = cmd->command & 1;
-
- return status;
-}
-
-/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
@@ -3638,13 +3119,13 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_stop *cmd =
- (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ struct i40e_aqc_lldp_stop *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+ cmd = libie_aq_raw(&desc);
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
@@ -3672,13 +3153,13 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_start *cmd =
- (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ struct i40e_aqc_lldp_start *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+ cmd = libie_aq_raw(&desc);
cmd->command = I40E_AQ_LLDP_AGENT_START;
if (persist) {
@@ -3705,9 +3186,8 @@ int
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_dcb_parameters *cmd =
- (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ struct i40e_aqc_set_dcb_parameters *cmd;
+ struct libie_aq_desc desc;
int status;
if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
@@ -3716,6 +3196,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
+ cmd = libie_aq_raw(&desc);
if (dcb_enable) {
cmd->valid_flags = I40E_DCB_VALID;
cmd->command = I40E_AQ_DCB_SET_AGENT;
@@ -3738,7 +3219,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -3746,7 +3227,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
cmd_details);
@@ -3770,15 +3251,15 @@ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_udp_tunnel *cmd =
- (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
- struct i40e_aqc_del_udp_tunnel_completion *resp =
- (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp;
+ struct i40e_aqc_add_udp_tunnel *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->udp_port = cpu_to_le16(udp_port);
cmd->protocol_type = protocol_index;
@@ -3799,13 +3280,13 @@ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_remove_udp_tunnel *cmd =
- (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_remove_udp_tunnel *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+ cmd = libie_aq_raw(&desc);
cmd->index = index;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3824,9 +3305,8 @@ int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_switch_seid *cmd;
+ struct libie_aq_desc desc;
int status;
if (seid == 0)
@@ -3834,6 +3314,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+ cmd = libie_aq_raw(&desc);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
@@ -3854,7 +3335,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
@@ -3880,9 +3361,8 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_tx_sched_ind *cmd =
- (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ struct i40e_aqc_tx_sched_ind *cmd;
+ struct libie_aq_desc desc;
int status;
bool cmd_param_flag = false;
@@ -3909,12 +3389,13 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, opcode);
+ cmd = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (cmd_param_flag)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
@@ -3937,14 +3418,14 @@ int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_configure_vsi_bw_limit *cmd =
- (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_vsi_bw_limit);
+ cmd = libie_aq_raw(&desc);
cmd->vsi_seid = cpu_to_le16(seid);
cmd->credit = cpu_to_le16(credit);
cmd->max_credit = max_credit;
@@ -4272,18 +3753,16 @@ int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_control_packet_filter *cmd =
- (struct i40e_aqc_add_remove_control_packet_filter *)
- &desc.params.raw;
- struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
- (struct i40e_aqc_add_remove_control_packet_filter_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd;
+ struct libie_aq_desc desc;
int status;
if (vsi_seid == 0)
return -EINVAL;
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
if (is_add) {
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_control_packet_filter);
@@ -4351,15 +3830,15 @@ static int i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_write *cmd_resp =
- (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ struct i40e_aqc_alternate_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (!reg_val0)
return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address0 = cpu_to_le32(reg_addr0);
cmd_resp->address1 = cpu_to_le32(reg_addr1);
@@ -4387,10 +3866,10 @@ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_tx_sched_ind *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
cmd->vsi_seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -4408,7 +3887,7 @@ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
int i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
@@ -4485,11 +3964,9 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_dump_internals *cmd =
- (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
- struct i40e_aqc_debug_dump_internals *resp =
- (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp;
+ struct i40e_aqc_debug_dump_internals *cmd;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -4497,10 +3974,12 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_debug_dump_internals);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
cmd->cluster_id = cluster_id;
cmd->table_id = table_id;
@@ -4577,18 +4056,18 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
u16 bwd_size = sizeof(*bw_data);
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(bwd_size);
@@ -4823,84 +4302,6 @@ phy_write_end:
}
/**
- * i40e_write_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Writes value to specified PHY register
- **/
-int i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_write_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
- * i40e_read_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Reads specified PHY register value
- **/
-int i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_read_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -4916,80 +4317,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
}
/**
- * i40e_blink_phy_link_led
- * @hw: pointer to the HW structure
- * @time: time how long led will blinks in secs
- * @interval: gap between LED on and off in msecs
- *
- * Blinks PHY link LED
- **/
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
-{
- u16 led_addr = I40E_PHY_LED_PROV_REG_1;
- u16 gpio_led_port;
- u8 phy_addr = 0;
- int status = 0;
- u16 led_ctl;
- u8 port_num;
- u16 led_reg;
- u32 i;
-
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- led_addr++) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
- if (status)
- goto phy_blinking_end;
- led_ctl = led_reg;
- if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
- led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
- if (status)
- goto phy_blinking_end;
- break;
- }
- }
-
- if (time > 0 && interval > 0) {
- for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
- if (status)
- goto restore_config;
- if (led_reg & I40E_PHY_LED_MANUAL_ON)
- led_reg = 0;
- else
- led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
- if (status)
- goto restore_config;
- msleep(interval);
- }
- }
-
-restore_config:
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
-
-phy_blinking_end:
- return status;
-}
-
-/**
* i40e_led_get_reg - read LED register
* @hw: pointer to the HW structure
* @led_addr: LED register address
@@ -5172,9 +4499,8 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (!reg_val)
@@ -5182,6 +4508,7 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -5210,7 +4537,7 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
@@ -5238,13 +4565,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+ cmd = libie_aq_raw(&desc);
cmd->address = cpu_to_le32(reg_addr);
cmd->value = cpu_to_le32(reg_val);
@@ -5272,7 +4599,7 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
reg_val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
@@ -5331,14 +4658,14 @@ int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ struct i40e_aqc_phy_register_access *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
+ cmd = libie_aq_raw(&desc);
cmd->phy_interface = phy_select;
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
@@ -5376,14 +4703,14 @@ int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ struct i40e_aqc_phy_register_access *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
+ cmd = libie_aq_raw(&desc);
cmd->phy_interface = phy_select;
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
@@ -5415,19 +4742,18 @@ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_write_personalization_profile *cmd =
- (struct i40e_aqc_write_personalization_profile *)
- &desc.params.raw;
+ struct i40e_aqc_write_personalization_profile *cmd;
struct i40e_aqc_write_ddp_resp *resp;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
@@ -5435,7 +4761,7 @@ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ resp = libie_aq_raw(&desc);
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -5457,17 +4783,17 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_applied_profiles *cmd =
- (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ struct i40e_aqc_get_applied_profiles *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ cmd = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->flags = flags;
@@ -5522,39 +4848,6 @@ i40e_find_segment_in_package(u32 segment_type,
(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
/**
- * i40e_find_section_in_profile
- * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
- * @profile: pointer to the i40e segment header to be searched
- *
- * This function searches i40e segment for a particular section type. On
- * success it returns a pointer to the section header, otherwise it will
- * return NULL.
- **/
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile)
-{
- struct i40e_profile_section_header *sec;
- struct i40e_section_table *sec_tbl;
- u32 sec_off;
- u32 i;
-
- if (profile->header.type != SEGMENT_TYPE_I40E)
- return NULL;
-
- I40E_SECTION_TABLE(profile, sec_tbl);
-
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec_off = sec_tbl->section_offset[i];
- sec = I40E_SECTION_HEADER(profile, sec_off);
- if (sec->section.type == section_type)
- return sec;
- }
-
- return NULL;
-}
-
-/**
* i40e_ddp_exec_aq_section - Execute generic AQ for DDP
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
@@ -5562,7 +4855,7 @@ i40e_find_section_in_profile(u32 section_type,
static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
struct i40e_profile_aq_section *aq)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u8 *msg = NULL;
u16 msglen;
int status;
@@ -5573,10 +4866,10 @@ static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
msglen = aq->datalen;
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
msg = &aq->data[0];
}
@@ -5777,45 +5070,6 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
}
/**
- * i40e_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id)
-{
- struct i40e_profile_section_header *sec = NULL;
- struct i40e_profile_info *pinfo;
- u32 offset = 0, info = 0;
- int status = 0;
-
- sec = (struct i40e_profile_section_header *)profile_info_sec;
- sec->tbl_size = 1;
- sec->data_end = sizeof(struct i40e_profile_section_header) +
- sizeof(struct i40e_profile_info);
- sec->section.type = SECTION_TYPE_INFO;
- sec->section.offset = sizeof(struct i40e_profile_section_header);
- sec->section.size = sizeof(struct i40e_profile_info);
- pinfo = (struct i40e_profile_info *)(profile_info_sec +
- sec->section.offset);
- pinfo->track_id = track_id;
- pinfo->version = profile->version;
- pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
-
- status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
-
- return status;
-}
-
-/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
@@ -5832,18 +5086,18 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
@@ -5869,9 +5123,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
int i;
@@ -5879,9 +5132,10 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
@@ -5925,18 +5179,18 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
@@ -5962,9 +5216,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
int i;
@@ -5972,9 +5225,10 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 8db1eb0c1768..9e0c9597aeb9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -750,7 +750,7 @@ static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -799,7 +799,7 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
}
/* CEE mode not enabled try querying IEEE data */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
return i40e_get_ieee_dcb_config(hw);
if (ret)
@@ -816,7 +816,7 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -925,11 +925,11 @@ i40e_get_fw_lldp_status(struct i40e_hw *hw,
if (!ret) {
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ } else if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT) {
/* MIB is not available yet but the agent is running */
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
ret = 0;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ } else if (hw->aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
*lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
ret = 0;
}
@@ -1491,19 +1491,6 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
}
/**
- * i40e_dcb_hw_get_num_tc
- * @hw: pointer to the hw struct
- *
- * Returns number of traffic classes configured in HW
- **/
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
-{
- u32 reg = rd32(hw, I40E_PRTDCB_GENC);
-
- return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
-}
-
-/**
* i40e_dcb_hw_rx_ets_bw_config
* @hw: pointer to the hw struct
* @bw_share: Bandwidth share indexed per traffic class
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index d76497566e40..d5662c639c41 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -253,7 +253,6 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
u8 pfc_en, u8 *prio_tc);
void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
u8 *mode, u8 *prio_type);
void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 8aa43aefe84c..a2ccf4c5e30b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -136,7 +136,7 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB ETS configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -175,7 +175,7 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB PFC configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -226,7 +226,7 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -291,7 +291,7 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index 2f53f0f53bc3..daa9f2c42f70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -407,8 +407,9 @@ static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
**/
static int i40e_ddp_restore(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct net_device *netdev = vsi->netdev;
struct i40e_ddp_old_profile_list *entry;
- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
int status = 0;
if (!list_empty(&pf->ddp_old_prof)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index f9ba45f596c9..c17b5d290f0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -40,47 +40,6 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
* setup, adding or removing filters, or other things. Many of
* these will be useful for some forms of unit testing.
**************************************************************/
-static char i40e_dbg_command_buf[256] = "";
-
-/**
- * i40e_dbg_command_read - read for command datum
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct i40e_pf *pf = filp->private_data;
- int bytes_not_copied;
- int buf_size = 256;
- char *buf;
- int len;
-
- /* don't allow partial reads */
- if (*ppos != 0)
- return 0;
- if (count < buf_size)
- return -ENOSPC;
-
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOSPC;
-
- len = snprintf(buf, buf_size, "%s: %s\n",
- pf->vsi[pf->lan_vsi]->netdev->name,
- i40e_dbg_command_buf);
-
- bytes_not_copied = copy_to_user(buffer, buf, len);
- kfree(buf);
-
- if (bytes_not_copied)
- return -EFAULT;
-
- *ppos = len;
- return len;
-}
static char *i40e_filter_state_string[] = {
"INVALID",
@@ -88,6 +47,7 @@ static char *i40e_filter_state_string[] = {
"ACTIVE",
"FAILED",
"REMOVE",
+ "NEW_SYNC",
};
/**
@@ -128,7 +88,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" state[%d] = %08lx\n",
i, vsi->state[i]);
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
pf->hw.mac.port_addr);
@@ -487,7 +447,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
for (i = 0; i < ring->count; i++) {
- struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
@@ -500,7 +460,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
ring = &(hw->aq.arq);
for (i = 0; i < ring->count; i++) {
- struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
@@ -720,7 +680,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
dev_info(&pf->pdev->dev, " num MDD=%lld\n",
- vf->num_mdd_events);
+ vf->mdd_tx_events.count + vf->mdd_rx_events.count);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
@@ -786,7 +746,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
if (cnt == 0) {
/* default to PF VSI */
- vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+ vsi = i40e_pf_get_main_vsi(pf);
+ vsi_seid = vsi->seid;
} else if (vsi_seid < 0) {
dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
vsi_seid);
@@ -867,7 +828,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
+ veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc);
if (veb)
dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
else
@@ -1030,7 +991,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = pf->vsi[pf->lan_vsi];
+ vsi = i40e_pf_get_main_vsi(pf);
switch_id =
le16_to_cpu(vsi->info.switch_id) &
I40E_AQ_VSI_SW_ID_MASK;
@@ -1265,10 +1226,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
int ret;
- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[11],
@@ -1276,10 +1237,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
- &desc->params.internal.param0,
- &desc->params.internal.param1,
- &desc->params.internal.param2,
- &desc->params.internal.param3);
+ &desc->params.generic.param0,
+ &desc->params.generic.param1,
+ &desc->params.generic.addr_high,
+ &desc->params.generic.addr_low);
if (cnt != 10) {
dev_info(&pf->pdev->dev,
"send aq_cmd: bad command string, cnt=%d\n",
@@ -1304,19 +1265,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
desc->flags, desc->opcode, desc->datalen, desc->retval,
desc->cookie_high, desc->cookie_low,
- desc->params.internal.param0,
- desc->params.internal.param1,
- desc->params.internal.param2,
- desc->params.internal.param3);
+ desc->params.generic.param0,
+ desc->params.generic.param1,
+ desc->params.generic.addr_high,
+ desc->params.generic.addr_low);
kfree(desc);
desc = NULL;
} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
u16 buffer_len;
u8 *buff;
int ret;
- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[20],
@@ -1324,10 +1285,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
- &desc->params.internal.param0,
- &desc->params.internal.param1,
- &desc->params.internal.param2,
- &desc->params.internal.param3,
+ &desc->params.generic.param0,
+ &desc->params.generic.param1,
+ &desc->params.generic.addr_high,
+ &desc->params.generic.addr_low,
&buffer_len);
if (cnt != 11) {
dev_info(&pf->pdev->dev,
@@ -1347,7 +1308,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
goto command_write_done;
}
- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
ret = i40e_asq_send_command(&pf->hw, desc, buff,
buffer_len, NULL);
if (!ret) {
@@ -1365,10 +1326,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
desc->flags, desc->opcode, desc->datalen, desc->retval,
desc->cookie_high, desc->cookie_low,
- desc->params.internal.param0,
- desc->params.internal.param1,
- desc->params.internal.param2,
- desc->params.internal.param3);
+ desc->params.generic.param0,
+ desc->params.generic.param1,
+ desc->params.generic.addr_high,
+ desc->params.generic.addr_low);
print_hex_dump(KERN_INFO, "AQ buffer WB: ",
DUMP_PREFIX_OFFSET, 16, 1,
buff, buffer_len, true);
@@ -1380,6 +1341,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+ /* Get main VSI */
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1391,10 +1355,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
- pf->hw.mac.addr,
- ETH_P_LLDP, 0,
- pf->vsi[pf->lan_vsi]->seid,
- 0, true, NULL, NULL);
+ pf->hw.mac.addr, ETH_P_LLDP, 0,
+ main_vsi->seid, 0, true, NULL,
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: Add Control Packet Filter AQ command failed =0x%x\n",
@@ -1409,10 +1372,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
int ret;
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
- pf->hw.mac.addr,
- ETH_P_LLDP, 0,
- pf->vsi[pf->lan_vsi]->seid,
- 0, false, NULL, NULL);
+ pf->hw.mac.addr, ETH_P_LLDP, 0,
+ main_vsi->seid, 0, false, NULL,
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: Remove Control Packet Filter AQ command failed =0x%x\n",
@@ -1617,7 +1579,6 @@ command_write_done:
static const struct file_operations i40e_dbg_command_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = i40e_dbg_command_read,
.write = i40e_dbg_command_write,
};
@@ -1626,47 +1587,6 @@ static const struct file_operations i40e_dbg_command_fops = {
* The netdev_ops entry in debugfs is for giving the driver commands
* to be executed from the netdev operations.
**************************************************************/
-static char i40e_dbg_netdev_ops_buf[256] = "";
-
-/**
- * i40e_dbg_netdev_ops_read - read for netdev_ops datum
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct i40e_pf *pf = filp->private_data;
- int bytes_not_copied;
- int buf_size = 256;
- char *buf;
- int len;
-
- /* don't allow partal reads */
- if (*ppos != 0)
- return 0;
- if (count < buf_size)
- return -ENOSPC;
-
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOSPC;
-
- len = snprintf(buf, buf_size, "%s: %s\n",
- pf->vsi[pf->lan_vsi]->netdev->name,
- i40e_dbg_netdev_ops_buf);
-
- bytes_not_copied = copy_to_user(buffer, buf, len);
- kfree(buf);
-
- if (bytes_not_copied)
- return -EFAULT;
-
- *ppos = len;
- return len;
-}
/**
* i40e_dbg_netdev_ops_write - write into netdev_ops datum
@@ -1680,35 +1600,36 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ char *cmd_buf, *buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
- char *buf_tmp;
int vsi_seid;
int i, cnt;
/* don't allow partial writes */
if (*ppos != 0)
return 0;
- if (count >= sizeof(i40e_dbg_netdev_ops_buf))
- return -ENOSPC;
- memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
- bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
- buffer, count);
- if (bytes_not_copied)
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+ if (bytes_not_copied) {
+ kfree(cmd_buf);
return -EFAULT;
- i40e_dbg_netdev_ops_buf[count] = '\0';
+ }
+ cmd_buf[count] = '\0';
- buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+ buf_tmp = strchr(cmd_buf, '\n');
if (buf_tmp) {
*buf_tmp = '\0';
- count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+ count = buf_tmp - cmd_buf + 1;
}
- if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ if (strncmp(cmd_buf, "change_mtu", 10) == 0) {
int mtu;
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+ cnt = sscanf(&cmd_buf[11], "%i %i",
&vsi_seid, &mtu);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
@@ -1730,8 +1651,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
}
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ } else if (strncmp(cmd_buf, "set_rx_mode", 11) == 0) {
+ cnt = sscanf(&cmd_buf[11], "%i", &vsi_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
goto netdev_ops_write_done;
@@ -1751,8 +1672,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
}
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+ } else if (strncmp(cmd_buf, "napi", 4) == 0) {
+ cnt = sscanf(&cmd_buf[4], "%i", &vsi_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
goto netdev_ops_write_done;
@@ -1770,21 +1691,20 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "napi called\n");
}
} else {
- dev_info(&pf->pdev->dev, "unknown command '%s'\n",
- i40e_dbg_netdev_ops_buf);
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
dev_info(&pf->pdev->dev, "available commands\n");
dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
}
netdev_ops_write_done:
+ kfree(cmd_buf);
return count;
}
static const struct file_operations i40e_dbg_netdev_ops_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = i40e_dbg_netdev_ops_read,
.write = i40e_dbg_netdev_ops_write,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
index cc4e9e2addb7..229179ccc131 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devlink.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
@@ -5,6 +5,42 @@
#include "i40e.h"
#include "i40e_devlink.h"
+static int i40e_max_mac_per_vf_set(struct devlink *devlink,
+ u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_pf *pf = devlink_priv(devlink);
+
+ if (pf->num_alloc_vfs > 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change max_mac_per_vf while SR-IOV is enabled");
+ return -EBUSY;
+ }
+
+ pf->max_mac_per_vf = ctx->val.vu32;
+ return 0;
+}
+
+static int i40e_max_mac_per_vf_get(struct devlink *devlink,
+ u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vu32 = pf->max_mac_per_vf;
+ return 0;
+}
+
+static const struct devlink_param i40e_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(MAX_MAC_PER_VF,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ i40e_max_mac_per_vf_get,
+ i40e_max_mac_per_vf_set,
+ NULL),
+};
+
static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len)
{
u8 dsn[8];
@@ -165,7 +201,18 @@ void i40e_free_pf(struct i40e_pf *pf)
**/
void i40e_devlink_register(struct i40e_pf *pf)
{
- devlink_register(priv_to_devlink(pf));
+ struct devlink *dl = priv_to_devlink(pf);
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ err = devlink_params_register(dl, i40e_dl_params,
+ ARRAY_SIZE(i40e_dl_params));
+ if (err)
+ dev_err(dev,
+ "devlink params register failed with error %d", err);
+
+ devlink_register(dl);
+
}
/**
@@ -176,7 +223,11 @@ void i40e_devlink_register(struct i40e_pf *pf)
**/
void i40e_devlink_unregister(struct i40e_pf *pf)
{
- devlink_unregister(priv_to_devlink(pf));
+ struct devlink *dl = priv_to_devlink(pf);
+
+ devlink_unregister(dl);
+ devlink_params_unregister(dl, i40e_dl_params,
+ ARRAY_SIZE(i40e_dl_params));
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 42e7e6cdaa6d..f2c2646ea298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3,6 +3,7 @@
/* ethtool support for i40e */
+#include <linux/net/intel/libie/pctype.h>
#include "i40e_devids.h"
#include "i40e_diag.h"
#include "i40e_txrx_common.h"
@@ -459,6 +460,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
+ I40E_PRIV_FLAG("mdd-auto-reset-vf",
+ I40E_FLAG_MDD_AUTO_RESET_VF, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1241,7 +1244,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
i40e_partition_setting_complaint(pf);
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
@@ -1459,7 +1462,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
netdev_info(netdev,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
}
@@ -1469,7 +1472,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
netdev_dbg(netdev,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -1517,7 +1520,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
netdev_info(netdev,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
}
@@ -1531,7 +1534,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
netdev_dbg(netdev,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
done:
@@ -1638,7 +1641,7 @@ static int i40e_nway_reset(struct net_device *netdev)
if (ret) {
netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -1710,7 +1713,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
@@ -1755,19 +1758,19 @@ static int i40e_set_pauseparam(struct net_device *netdev,
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -1915,13 +1918,13 @@ static int i40e_get_eeprom(struct net_device *netdev,
ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
- if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ if (ret_val && hw->aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev,
"read NVM failed, invalid offset 0x%x\n",
offset);
break;
} else if (ret_val &&
- hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
+ hw->aq.asq_last_status == LIBIE_AQ_RC_EACCES) {
dev_info(&pf->pdev->dev,
"read NVM failed, access, offset 0x%x\n",
offset);
@@ -2029,7 +2032,7 @@ static void i40e_get_ringparam(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
ring->rx_max_pending = i40e_get_max_num_descriptors(pf);
ring->tx_max_pending = i40e_get_max_num_descriptors(pf);
@@ -2292,7 +2295,7 @@ static int i40e_get_stats_count(struct net_device *netdev)
struct i40e_pf *pf = vsi->back;
int stats_len;
- if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+ if (vsi->type == I40E_VSI_MAIN && pf->hw.partition_id == 1)
stats_len = I40E_PF_STATS_LEN;
else
stats_len = I40E_VSI_STATS_LEN;
@@ -2422,17 +2425,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_unlock();
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1)
goto check_data_pointer;
- veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->lan_veb < I40E_MAX_VEB) &&
- test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags));
+ veb = i40e_pf_get_main_veb(pf);
+ veb_stats = veb && test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
- if (veb_stats) {
- veb = pf->veb[pf->lan_veb];
+ if (veb_stats)
i40e_update_veb_stats(veb);
- }
/* If veb stats aren't enabled, pass NULL instead of the veb so that
* we initialize stats to zero and update the data pointer
@@ -2495,7 +2495,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
"rx", i);
}
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1)
goto check_data_pointer;
i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
@@ -2549,7 +2549,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
static int i40e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
@@ -2558,16 +2558,12 @@ static int i40e_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (pf->ptp_clock)
info->phc_index = ptp_clock_index(pf->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
@@ -2754,6 +2750,15 @@ skip_ol_tests:
netif_info(pf, drv, netdev, "testing failed\n");
}
+static void i40e_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
@@ -2792,7 +2797,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* NVM bit on means WoL disabled for the port */
@@ -3134,15 +3139,12 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
return __i40e_set_coalesce(netdev, ec, queue);
}
-/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @pf: pointer to the physical function struct
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+static int i40e_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 flow_pctype = 0;
u64 i_set = 0;
@@ -3151,16 +3153,16 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
switch (cmd->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
@@ -3370,6 +3372,7 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
struct i40e_rx_flow_userdef userdef = {0};
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
+ struct i40e_vsi *vsi;
u64 input_set;
u16 index;
@@ -3416,28 +3419,28 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
switch (rule->flow_type) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
break;
default:
/* If we have stored a filter with a flow type not listed here
@@ -3493,9 +3496,8 @@ no_input_set:
fsp->flow_type |= FLOW_EXT;
}
- if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
- struct i40e_vsi *vsi;
-
+ vsi = i40e_pf_get_main_vsi(pf);
+ if (rule->dest_vsi != vsi->id) {
vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
if (vsi && vsi->type == I40E_VSI_SRIOV) {
/* VFs are zero-indexed by the driver, but ethtool
@@ -3520,6 +3522,20 @@ no_input_set:
}
/**
+ * i40e_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 i40e_get_rx_ring_count(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -3536,13 +3552,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- ret = i40e_get_rss_hash_opts(pf, cmd);
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
/* report total rule count */
@@ -3571,7 +3580,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
* Returns value of bits to be set per user request
**/
static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
u64 i_setc)
{
u64 i_set = i_setc;
@@ -3616,15 +3625,13 @@ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
}
#define FLOW_PCTYPES_SIZE 64
-/**
- * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @pf: pointer to the physical function struct
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+static int i40e_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
@@ -3648,40 +3655,40 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -3690,7 +3697,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -3699,15 +3706,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case IPV6_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
default:
return -EINVAL;
@@ -4317,36 +4324,36 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
switch (fsp->flow_type & ~FLOW_EXT) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
fdir_filter_count = &pf->fd_sctp4_filter_cnt;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
fdir_filter_count = &pf->fd_tcp4_filter_cnt;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
fdir_filter_count = &pf->fd_udp4_filter_cnt;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
fdir_filter_count = &pf->fd_sctp6_filter_cnt;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
fdir_filter_count = &pf->fd_tcp6_filter_cnt;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
fdir_filter_count = &pf->fd_udp6_filter_cnt;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
fdir_filter_count = &pf->fd_ip4_filter_cnt;
flex_l3 = true;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
fdir_filter_count = &pf->fd_ip6_filter_cnt;
flex_l3 = true;
break;
@@ -4682,8 +4689,8 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
* separate support, we'll always assume and enforce that the two flow
* types must have matching input sets.
*/
- if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ if (index == LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
new_mask);
/* Add the new offset and update table, if necessary */
@@ -4959,13 +4966,9 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40e_set_rss_hash_opt(pf, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = i40e_add_fdir_ethtool(vsi, cmd);
break;
@@ -5256,9 +5259,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS);
DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS);
struct i40e_netdev_priv *np = netdev_priv(dev);
- enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ enum libie_aq_err adq_err;
u32 reset_needed = 0;
int status;
u32 i, j;
@@ -5306,7 +5309,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
}
flags_complete:
- bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS);
+ bitmap_xor(changed_flags, new_flags, orig_flags, I40E_PF_FLAGS_NBITS);
if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags))
reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
@@ -5378,12 +5381,11 @@ flags_complete:
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
0, NULL);
- if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
}
@@ -5445,16 +5447,16 @@ flags_complete:
if (status) {
adq_err = pf->hw.aq.asq_last_status;
switch (adq_err) {
- case I40E_AQ_RC_EEXIST:
+ case LIBIE_AQ_RC_EEXIST:
dev_warn(&pf->pdev->dev,
"FW LLDP agent is already running\n");
reset_needed = 0;
break;
- case I40E_AQ_RC_EPERM:
+ case LIBIE_AQ_RC_EPERM:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
- case I40E_AQ_RC_EAGAIN:
+ case LIBIE_AQ_RC_EAGAIN:
dev_warn(&pf->pdev->dev,
"Stop FW LLDP agent command is still being processed, please try again in a second.\n");
return -EBUSY;
@@ -5462,8 +5464,7 @@ flags_complete:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %pe, %s\n",
ERR_PTR(status),
- i40e_aq_str(&pf->hw,
- adq_err));
+ libie_aq_str(adq_err));
return -EINVAL;
}
}
@@ -5644,6 +5645,26 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static void i40e_eee_capability_to_kedata_supported(__le16 eee_capability_,
+ unsigned long *supported)
+{
+ const int eee_capability = le16_to_cpu(eee_capability_);
+ static const int lut[] = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ };
+
+ linkmode_zero(supported);
+ for (unsigned int i = ARRAY_SIZE(lut); i--; )
+ if (eee_capability & BIT(i + 1))
+ linkmode_set_bit(lut[i], supported);
+}
+
static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -5651,7 +5672,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- int status = 0;
+ int status;
/* Get initial PHY capabilities */
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
@@ -5664,11 +5685,18 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
+ i40e_eee_capability_to_kedata_supported(phy_cfg.eee_capability,
+ edata->supported);
+ linkmode_copy(edata->lp_advertised, edata->supported);
+
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
+ linkmode_zero(edata->advertised);
+ if (phy_cfg.eee_capability)
+ linkmode_copy(edata->advertised, edata->supported);
edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
@@ -5684,10 +5712,11 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ethtool_not_used {
- u32 value;
+ bool value;
const char *name;
} param[] = {
- {edata->tx_lpi_timer, "tx-timer"},
+ {!!(edata->advertised[0] & ~edata->supported[0]), "advertise"},
+ {!!edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
int i;
@@ -5713,7 +5742,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
__le16 eee_capability;
- int status = 0;
+ int status;
/* Deny parameters we don't support */
if (i40e_is_eee_param_supported(netdev, edata))
@@ -5786,6 +5815,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_regs = i40e_get_regs,
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = i40e_get_link_ext_stats,
.get_wol = i40e_get_wol,
.set_wol = i40e_set_wol,
.set_eeprom = i40e_set_eeprom,
@@ -5799,6 +5829,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_msglevel = i40e_set_msglevel,
.get_rxnfc = i40e_get_rxnfc,
.set_rxnfc = i40e_set_rxnfc,
+ .get_rx_ring_count = i40e_get_rx_ring_count,
.self_test = i40e_diag_test,
.get_strings = i40e_get_strings,
.get_eee = i40e_get_eee,
@@ -5812,6 +5843,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_rxfh_indir_size = i40e_get_rxfh_indir_size,
.get_rxfh = i40e_get_rxfh,
.set_rxfh = i40e_set_rxfh,
+ .get_rxfh_fields = i40e_get_rxfh_fields,
+ .set_rxfh_fields = i40e_set_rxfh_fields,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
.get_module_info = i40e_get_module_info,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 48b9ddb2b1b3..d8192aa23254 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3,6 +3,7 @@
#include <generated/utsrelease.h>
#include <linux/crash_dump.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/if_bridge.h>
#include <linux/if_macvlan.h>
#include <linux/module.h>
@@ -98,8 +99,9 @@ static int debug = -1;
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
@@ -989,7 +991,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
ns->tx_dropped = es->tx_discards;
/* pull in a couple PF stats if this is the main vsi */
- if (vsi == pf->vsi[pf->lan_vsi]) {
+ if (vsi->type == I40E_VSI_MAIN) {
ns->rx_crc_errors = pf->stats.crc_errors;
ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
ns->rx_length_errors = pf->stats.rx_length_errors;
@@ -1234,19 +1236,37 @@ void i40e_update_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
}
/**
- * i40e_count_filters - counts VSI mac filters
+ * i40e_count_all_filters - counts VSI MAC filters
* @vsi: the VSI to be searched
*
- * Returns count of mac filters
- **/
-int i40e_count_filters(struct i40e_vsi *vsi)
+ * Return: count of MAC filters in any state.
+ */
+int i40e_count_all_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt, cnt = 0;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ cnt++;
+
+ return cnt;
+}
+
+/**
+ * i40e_count_active_filters - counts VSI MAC filters
+ * @vsi: the VSI to be searched
+ *
+ * Return: count of active MAC filters.
+ */
+int i40e_count_active_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -1255,6 +1275,7 @@ int i40e_count_filters(struct i40e_vsi *vsi)
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_NEW_SYNC ||
f->state == I40E_FILTER_ACTIVE)
++cnt;
}
@@ -1441,6 +1462,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
new->f = add_head;
new->state = add_head->state;
+ if (add_head->state == I40E_FILTER_NEW)
+ add_head->state = I40E_FILTER_NEW_SYNC;
/* Add the new filter to the tmp list */
hlist_add_head(&new->hlist, tmp_add_list);
@@ -1550,6 +1573,8 @@ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
return -ENOMEM;
new_mac->f = add_head;
new_mac->state = add_head->state;
+ if (add_head->state == I40E_FILTER_NEW)
+ add_head->state = I40E_FILTER_NEW_SYNC;
/* Add the new filter to the tmp list */
hlist_add_head(&new_mac->hlist, tmp_add_list);
@@ -1661,9 +1686,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* @vsi: VSI to remove from
* @f: the filter to remove from the list
*
- * This function should be called instead of i40e_del_filter only if you know
- * the exact filter you will remove already, such as via i40e_find_filter or
- * i40e_find_mac.
+ * This function requires you've found * the exact filter you will remove
+ * already, such as via i40e_find_filter or i40e_find_mac.
*
* NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
@@ -1693,29 +1717,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
}
/**
- * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
- * @vsi: the VSI to be searched
- * @macaddr: the MAC address
- * @vlan: the VLAN
- *
- * NOTE: This function is expected to be called with mac_filter_hash_lock
- * being held.
- * ANOTHER NOTE: This function MUST be called from within the context of
- * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
- * instead of list_for_each_entry().
- **/
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
-{
- struct i40e_mac_filter *f;
-
- if (!vsi || !macaddr)
- return;
-
- f = i40e_find_filter(vsi, macaddr, vlan);
- __i40e_del_filter(vsi, f);
-}
-
-/**
* i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
@@ -1734,6 +1735,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
struct hlist_node *h;
int bkt;
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
if (vsi->info.pvid)
return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid));
@@ -1831,7 +1833,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ret)
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
/* schedule our worker thread which will take care of
@@ -1863,7 +1865,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot set RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
}
@@ -1875,7 +1877,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
}
@@ -2357,19 +2359,18 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
- enum i40e_admin_queue_err aq_status;
+ enum libie_aq_err aq_status;
int aq_ret;
aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
&aq_status);
/* Explicitly ignore and do not report when firmware returns ENOENT */
- if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ if (aq_ret && !(aq_status == LIBIE_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
- vsi_name, ERR_PTR(aq_ret),
- i40e_aq_str(hw, aq_status));
+ vsi_name, ERR_PTR(aq_ret), libie_aq_str(aq_status));
}
}
@@ -2392,7 +2393,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
- enum i40e_admin_queue_err aq_status;
+ enum libie_aq_err aq_status;
int fcnt;
i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
@@ -2403,19 +2404,17 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_status), vsi_name);
+ libie_aq_str(aq_status), vsi_name);
} else if (vsi->type == I40E_VSI_SRIOV ||
vsi->type == I40E_VSI_VMDQ1 ||
vsi->type == I40E_VSI_VMDQ2) {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
- i40e_aq_str(hw, aq_status), vsi_name,
- vsi_name);
+ libie_aq_str(aq_status), vsi_name, vsi_name);
} else {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
- i40e_aq_str(hw, aq_status), vsi_name,
- vsi->type);
+ libie_aq_str(aq_status), vsi_name, vsi->type);
}
}
}
@@ -2436,7 +2435,8 @@ static int
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_mac_filter *f)
{
- bool enable = f->state == I40E_FILTER_NEW;
+ bool enable = f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_NEW_SYNC;
struct i40e_hw *hw = &vsi->back->hw;
int aq_ret;
@@ -2457,8 +2457,7 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s, forcing overflow promiscuous on %s\n",
- i40e_aq_str(hw, hw->aq.asq_last_status),
- vsi_name);
+ libie_aq_str(hw->aq.asq_last_status), vsi_name);
}
return aq_ret;
@@ -2475,12 +2474,12 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
**/
static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
int aq_ret;
if (vsi->type == I40E_VSI_MAIN &&
- pf->lan_veb != I40E_NO_VEB &&
+ i40e_pf_get_main_veb(pf) &&
!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
@@ -2499,7 +2498,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"Set default VSI failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
@@ -2511,7 +2510,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"set unicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
hw,
@@ -2521,7 +2520,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"set multicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
}
@@ -2610,6 +2609,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* Add it to the hash list */
hlist_add_head(&new->hlist, &tmp_add_list);
+ f->state = I40E_FILTER_NEW_SYNC;
}
/* Count the number of active (current and new) VLAN
@@ -2761,7 +2761,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
spin_lock_bh(&vsi->mac_filter_hash_lock);
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
/* Only update the state if we're still NEW */
- if (new->f->state == I40E_FILTER_NEW)
+ if (new->f->state == I40E_FILTER_NEW ||
+ new->f->state == I40E_FILTER_NEW_SYNC)
new->f->state = new->state;
hlist_del(&new->hlist);
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
@@ -2827,7 +2828,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
"set multi promisc failed on %s, err %pe aq_err %s\n",
vsi_name,
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
cur_multipromisc ? "entering" : "leaving");
@@ -2848,7 +2849,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc ? "on" : "off",
vsi_name,
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
}
out:
@@ -2960,7 +2961,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
@@ -2969,27 +2970,6 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * i40e_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- **/
-int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return i40e_ptp_get_ts_config(pf, ifr);
- case SIOCSHWTSTAMP:
- return i40e_ptp_set_ts_config(pf, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -3018,8 +2998,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
}
}
@@ -3053,8 +3032,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
}
}
@@ -3298,8 +3276,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
dev_info(&vsi->back->pdev->dev,
"add pvid failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -4197,7 +4174,7 @@ free_queue_irqs:
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
irq_update_affinity_hint(irq_num, NULL);
- free_irq(irq_num, &vsi->q_vectors[vector]);
+ free_irq(irq_num, vsi->q_vectors[vector]);
}
return err;
}
@@ -4322,7 +4299,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
/* We do not have a way to disarm Queue causes while leaving
@@ -5472,7 +5449,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
**/
static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
u8 enabled_tc = 1, i;
@@ -5489,13 +5466,14 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
u8 i, enabled_tc = 1;
u8 num_tc = 0;
- struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- if (i40e_is_tc_mqprio_enabled(pf))
- return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+
+ return vsi->mqprio_qopt.qopt.num_tc;
+ }
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
@@ -5503,7 +5481,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
/* SFP mode will be enabled for all TCs on port */
if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
- return i40e_dcb_get_num_tc(dcbcfg);
+ return i40e_dcb_get_num_tc(&pf->hw.local_dcbx_config);
/* MFP mode return count of enabled TCs for this PF */
if (pf->hw.func_caps.iscsi)
@@ -5568,7 +5546,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5579,7 +5557,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5769,7 +5747,7 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
if (ret) {
dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
/* update the local VSI info with updated queue map */
@@ -5825,7 +5803,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Failed querying vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
@@ -5892,7 +5870,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -5905,7 +5883,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -5916,6 +5894,28 @@ out:
}
/**
+ * i40e_vsi_reconfig_tc - Reconfigure VSI Tx Scheduler for stored TC map
+ * @vsi: VSI to be reconfigured
+ *
+ * This reconfigures a particular VSI for TCs that are mapped to the
+ * TC bitmap stored previously for the VSI.
+ *
+ * Context: It is expected that the VSI queues have been quisced before
+ * calling this function.
+ *
+ * Return: 0 on success, negative value on failure
+ **/
+static int i40e_vsi_reconfig_tc(struct i40e_vsi *vsi)
+{
+ u8 enabled_tc;
+
+ enabled_tc = vsi->tc_config.enabled_tc;
+ vsi->tc_config.enabled_tc = 0;
+
+ return i40e_vsi_config_tc(vsi, enabled_tc);
+}
+
+/**
* i40e_get_link_speed - Returns link speed for the interface
* @vsi: VSI to be configured
*
@@ -5997,7 +5997,7 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
dev_err(&pf->pdev->dev,
"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
max_tx_rate, seid, ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
@@ -6009,8 +6009,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
**/
static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
{
- enum i40e_admin_queue_err last_aq_status;
struct i40e_cloud_filter *cfilter;
+ enum libie_aq_err last_aq_status;
struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
@@ -6073,7 +6073,7 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"Failed to delete cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
+ libie_aq_str(last_aq_status));
kfree(cfilter);
}
@@ -6208,7 +6208,7 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
kfree(lut);
return ret;
}
@@ -6307,8 +6307,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
dev_info(&pf->pdev->dev,
"add new vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -6477,6 +6476,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
+ struct i40e_vsi *main_vsi;
u8 vsi_type;
u16 seid;
int ret;
@@ -6490,7 +6490,8 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
}
/* underlying switching element */
- seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ seid = main_vsi->uplink_seid;
/* create channel (VSI), configure TX rings */
ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
@@ -6549,12 +6550,10 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
pf->last_sw_conf_valid_flags,
mode, NULL);
- if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ if (ret && hw->aq.asq_last_status != LIBIE_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return ret;
}
@@ -6753,8 +6752,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
if (ret) {
dev_info(&pf->pdev->dev,
"VEB bw config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -6763,8 +6761,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
if (ret) {
dev_info(&pf->pdev->dev,
"Failed getting veb bw config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
out:
@@ -6808,7 +6805,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
/* - Enable all TCs for the LAN VSI
* - For all others keep them at TC0 for now
*/
- if (v == pf->lan_vsi)
+ if (vsi->type == I40E_VSI_MAIN)
tc_map = i40e_pf_get_tc_map(pf);
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
@@ -6845,7 +6842,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
dev_info(&pf->pdev->dev,
"Resume Port Tx failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -6869,8 +6866,7 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"Suspend Port Tx failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -6909,8 +6905,7 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
if (ret) {
dev_info(&pf->pdev->dev,
"Set DCB Config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7026,8 +7021,7 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (ret) {
dev_info(&pf->pdev->dev,
"Modify Port ETS failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7047,7 +7041,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
/* Configure Rx Packet Buffers in HW */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
+ mfs_tc[i] = main_vsi->netdev->mtu;
mfs_tc[i] += I40E_PACKET_HDR_PAD;
}
@@ -7064,8 +7060,7 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (ret) {
dev_info(&pf->pdev->dev,
"DCB Updated failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7148,8 +7143,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
if (err) {
dev_info(&pf->pdev->dev,
"Enable Port ETS failed, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
}
@@ -7222,14 +7216,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
- } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ } else if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
}
out:
@@ -7237,6 +7230,26 @@ out:
}
#endif /* CONFIG_I40E_DCB */
+static void i40e_print_link_message_eee(struct i40e_vsi *vsi,
+ const char *speed, const char *fc)
+{
+ struct ethtool_keee kedata;
+
+ memzero_explicit(&kedata, sizeof(kedata));
+ if (vsi->netdev->ethtool_ops->get_eee)
+ vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &kedata);
+
+ if (!linkmode_empty(kedata.supported))
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n",
+ speed, fc,
+ kedata.eee_enabled ? "Enabled" : "Disabled");
+ else
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
+ speed, fc);
+}
+
/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
@@ -7368,9 +7381,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
"NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, req_fec, fec, an, fc);
} else {
- netdev_info(vsi->netdev,
- "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
- speed, fc);
+ i40e_print_link_message_eee(vsi, speed, fc);
}
}
@@ -7467,8 +7478,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status));
return err;
}
speed = abilities.link_speed;
@@ -7479,8 +7489,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status));
return err;
}
@@ -7524,8 +7533,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"set phy config ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
return err;
}
@@ -7865,8 +7873,7 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
}
dev_info(&pf->pdev->dev,
"Error adding mac filter on macvlan err %pe, aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, aq_err));
+ ERR_PTR(ret), libie_aq_str(aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@@ -7938,8 +7945,7 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
if (ret) {
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return ret;
}
/* update the local VSI info with updated queue map */
@@ -8154,8 +8160,7 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
} else {
dev_info(&pf->pdev->dev,
"Error deleting mac filter on macvlan err %pe, aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, aq_err));
+ ERR_PTR(ret), libie_aq_str(aq_err));
}
break;
}
@@ -8643,6 +8648,10 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -9113,7 +9122,7 @@ err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
return err;
@@ -9154,47 +9163,47 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
i40e_reset_fdir_filter_cnt(pf);
/* Reprogram the default input set for TCP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for TCP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for Other/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
/* Reprogram the default input set for Other/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV6,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
@@ -9405,8 +9414,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
static int i40e_handle_lldp_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_lldp_get_mib *mib =
- (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *mib = libie_aq_raw(&e->desc);
struct i40e_hw *hw = &pf->hw;
struct i40e_dcbx_config tmp_dcbx_cfg;
bool need_reconfig = false;
@@ -9463,8 +9471,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
dev_info(&pf->pdev->dev,
"Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
goto exit;
}
@@ -9545,8 +9552,7 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_lan_overflow *data =
- (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+ struct i40e_aqc_lan_overflow *data = libie_aq_raw(&e->desc);
u32 queue = le32_to_cpu(data->prtdcb_rupto);
u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
struct i40e_hw *hw = &pf->hw;
@@ -9571,19 +9577,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
}
/**
- * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
- * @pf: board private structure
- **/
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
-{
- u32 val, fcnt_prog;
-
- val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
- fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
- return fcnt_prog;
-}
-
-/**
* i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
@@ -9635,7 +9628,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
* settings. It is safe to restore the default input set
* because there are no active TCPv4 filter rules.
*/
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
@@ -9804,7 +9797,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
} else {
/* replay sideband filters */
- i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_filter_restore(i40e_pf_get_main_vsi(pf));
if (!disable_atr && !pf->fd_tcp4_filter_cnt)
clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
@@ -9902,7 +9895,8 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
**/
static void i40e_link_event(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct i40e_veb *veb = i40e_pf_get_main_veb(pf);
u8 new_link_speed, old_link_speed;
bool new_link, old_link;
int status;
@@ -9937,13 +9931,16 @@ static void i40e_link_event(struct i40e_pf *pf)
new_link == netif_carrier_ok(vsi->netdev)))
return;
+ if (!new_link && old_link)
+ pf->link_down_events++;
+
i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
*/
- if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
- i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+ if (veb)
+ i40e_veb_link_event(veb, new_link);
else
i40e_vsi_link_event(vsi, new_link);
@@ -10075,8 +10072,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
static void i40e_handle_link_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_get_link_status *status =
- (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+ struct i40e_aqc_get_link_status *status = libie_aq_raw(&e->desc);
/* Do a new status request to re-enable LSE reporting
* and load new status information into the hw struct
@@ -10273,7 +10269,7 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
**/
static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_vsi_context ctxt;
int ret;
@@ -10284,8 +10280,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
@@ -10296,8 +10291,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
}
@@ -10309,7 +10303,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
**/
static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_vsi_context ctxt;
int ret;
@@ -10320,8 +10314,7 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
@@ -10332,8 +10325,7 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
}
@@ -10385,7 +10377,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (veb->uplink_seid == pf->mac_seid) {
/* Check that the LAN VSI has VEB owning flag set */
- ctl_vsi = pf->vsi[pf->lan_vsi];
+ ctl_vsi = i40e_pf_get_main_vsi(pf);
if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
!(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
@@ -10448,12 +10440,12 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
{
- struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+ struct libie_aqc_list_caps_elem *cap_buf;
u16 data_size;
int buf_len;
int err;
- buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+ buf_len = 40 * sizeof(struct libie_aqc_list_caps_elem);
do {
cap_buf = kzalloc(buf_len, GFP_KERNEL);
if (!cap_buf)
@@ -10466,15 +10458,14 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
/* data loaded, buffer no longer needed */
kfree(cap_buf);
- if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_ENOMEM) {
/* retry with a larger buffer */
buf_len = data_size;
- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ } else if (pf->hw.aq.asq_last_status != LIBIE_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
"capability discovery failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENODEV;
}
} while (err);
@@ -10528,7 +10519,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
**/
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi;
+ struct i40e_vsi *main_vsi, *vsi;
/* quick workaround for an NVM issue that leaves a critical register
* uninitialized
@@ -10553,8 +10544,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
/* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
- pf->vsi[pf->lan_vsi]->seid, 0);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, main_vsi->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
@@ -10611,8 +10602,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
dev_dbg(&pf->pdev->dev,
"Failed to rebuild cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -10833,7 +10823,7 @@ static int i40e_reset(struct i40e_pf *pf)
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
struct i40e_veb *veb;
int ret;
@@ -10842,7 +10832,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
is_recovery_mode_reported)
- i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+ i40e_set_ethtool_ops(vsi->netdev);
if (test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_RECOVERY_MODE, pf->state))
@@ -10853,8 +10843,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_init_adminq(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto clear_recovery;
}
i40e_get_oem_version(&pf->hw);
@@ -10965,8 +10954,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
@@ -11064,8 +11052,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (ret)
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
@@ -11096,8 +11083,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
dev_warn(&pf->pdev->dev,
"Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
pf->cur_promisc ? "on" : "off",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
@@ -11138,6 +11124,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
ret = i40e_reset(pf);
if (!ret)
i40e_rebuild(pf, reinit, lock_acquired);
+ else
+ dev_err(&pf->pdev->dev, "%s: i40e_reset() FAILED", __func__);
}
/**
@@ -11156,6 +11144,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
}
/**
+ * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event
+ * @pf: board private structure
+ * @vf: pointer to the VF structure
+ * @is_tx: true - for Tx event, false - for Rx
+ */
+static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf,
+ bool is_tx)
+{
+ dev_err(&pf->pdev->dev, is_tx ?
+ "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" :
+ "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n",
+ is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count,
+ pf->hw.pf_id,
+ vf->vf_id,
+ vf->default_lan_addr.addr,
+ str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)));
+}
+
+/**
+ * i40e_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+static void i40e_print_vfs_mdd_events(struct i40e_pf *pf)
+{
+ unsigned int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ if (!__ratelimit(&pf->mdd_message_rate_limit))
+ return;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct i40e_vf *vf = &pf->vf[i];
+ bool is_printed = false;
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, false);
+ is_printed = true;
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, true);
+ is_printed = true;
+ }
+
+ if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF #%d\n",
+ i);
+ }
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the PF structure
*
@@ -11169,8 +11218,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ i40e_print_vfs_mdd_events(pf);
return;
+ }
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
@@ -11214,36 +11268,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* see if one of the VFs needs its hand slapped */
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ bool is_mdd_on_tx = false;
+ bool is_mdd_on_rx = false;
+
vf = &(pf->vf[i]);
reg = rd32(hw, I40E_VP_MDET_TX(i));
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_tx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_tx = true;
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_rx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_rx = true;
+ }
+
+ if ((is_mdd_on_tx || is_mdd_on_rx) &&
+ test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ if (is_mdd_on_rx)
+ i40e_print_vf_mdd_event(pf, vf, false);
+ if (is_mdd_on_tx)
+ i40e_print_vf_mdd_event(pf, vf, true);
+
+ i40e_vc_reset_vf(vf, true);
}
}
- /* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw);
+
+ i40e_print_vfs_mdd_events(pf);
}
/**
@@ -11266,7 +11332,7 @@ static void i40e_service_task(struct work_struct *work)
return;
if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
- i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+ i40e_detect_recover_hung(pf);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
@@ -11275,14 +11341,12 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
/* Client subtask will reopen next time through. */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
- true);
+ i40e_notify_client_of_netdev_close(pf, true);
} else {
i40e_client_subtask(pf);
if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
pf->state))
- i40e_notify_client_of_l2_param_changes(
- pf->vsi[pf->lan_vsi]);
+ i40e_notify_client_of_l2_param_changes(pf);
}
i40e_sync_filters_subtask(pf);
} else {
@@ -11312,7 +11376,7 @@ static void i40e_service_task(struct work_struct *work)
**/
static void i40e_service_timer(struct timer_list *t)
{
- struct i40e_pf *pf = from_timer(pf, t, service_timer);
+ struct i40e_pf *pf = timer_container_of(pf, t, service_timer);
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
@@ -11990,7 +12054,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
/* if not MSIX, give the one vector only to the LAN VSI */
if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_q_vectors = vsi->num_q_vectors;
- else if (vsi == pf->vsi[pf->lan_vsi])
+ else if (vsi->type == I40E_VSI_MAIN)
num_q_vectors = 1;
else
return -EINVAL;
@@ -12225,8 +12289,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot get RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -12239,8 +12302,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot get RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -12396,7 +12458,7 @@ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
**/
static int i40e_pf_config_rss(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
u8 seed[I40E_HKEY_ARRAY_SIZE];
u8 *lut;
struct i40e_hw *hw = &pf->hw;
@@ -12407,7 +12469,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= i40e_pf_get_default_rss_hena(pf);
+ hena |= i40e_pf_get_default_rss_hashcfg(pf);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -12468,7 +12530,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
int new_rss_size;
if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags))
@@ -12555,89 +12617,6 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
- * @pf: board private structure
- **/
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
-{
- /* Commit temporary BW setting to permanent NVM image */
- enum i40e_admin_queue_err last_aq_status;
- u16 nvm_word;
- int ret;
-
- if (pf->hw.partition_id != 1) {
- dev_info(&pf->pdev->dev,
- "Commit BW only works on partition 1! This is partition %d",
- pf->hw.partition_id);
- ret = -EOPNOTSUPP;
- goto bw_commit_out;
- }
-
- /* Acquire NVM for read access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Read word 0x10 of NVM - SW compatibility word 1 */
- ret = i40e_aq_read_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word), &nvm_word,
- false, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Wait a bit for NVM release to complete */
- msleep(50);
-
- /* Acquire NVM for write access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
- /* Write it back out unchanged to initiate update NVM,
- * which will force a write of the shadow (alt) RAM to
- * the NVM - thus storing the bandwidth values permanently.
- */
- ret = i40e_aq_update_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word),
- &nvm_word, true, 0, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret)
- dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
-bw_commit_out:
-
- return ret;
-}
-
-/**
* i40e_is_total_port_shutdown_enabled - read NVM and return value
* if total port shutdown feature is enabled for this PF
* @pf: board private structure
@@ -12984,8 +12963,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
NULL);
if (ret) {
netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -13004,8 +12982,7 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -13036,12 +13013,13 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @notified: whether notification was emitted
* @extack: netlink extended ack, unused currently
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -13107,7 +13085,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
int rem;
/* Only for PF VSI for now */
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* Find the HW bridge for PF VSI */
@@ -13117,20 +13095,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- mode = nla_get_u16(attr);
if ((mode != BRIDGE_MODE_VEPA) &&
(mode != BRIDGE_MODE_VEB))
return -EINVAL;
/* Insert a new HW bridge */
if (!veb) {
- veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
if (veb) {
veb->bridge_mode = mode;
@@ -13179,7 +13153,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct i40e_veb *veb;
/* Only for PF VSI for now */
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* Find the HW bridge for the PF VSI */
@@ -13264,6 +13238,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
bool need_reset;
int i;
+ /* VSI shall be deleted in a moment, block loading new programs */
+ if (prog && test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
/* Don't allow frames that span over multiple buffers */
if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
@@ -13272,14 +13250,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
/* When turning XDP on->off/off->on we reset and rebuild the rings. */
need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
-
if (need_reset)
i40e_prep_for_reset(pf);
- /* VSI shall be deleted in a moment, just return EINVAL */
- if (test_bit(__I40E_IN_REMOVE, pf->state))
- return -EINVAL;
-
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
@@ -13609,7 +13582,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
- .ndo_eth_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -13637,6 +13609,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_xsk_wakeup = i40e_xsk_wakeup,
.ndo_dfwd_add_station = i40e_fwd_add,
.ndo_dfwd_del_station = i40e_fwd_del,
+ .ndo_hwtstamp_get = i40e_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = i40e_ptp_hwtstamp_set,
};
/**
@@ -13761,9 +13735,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* the end, which is 4 bytes long, so force truncation of the
* original name by IFNAMSIZ - 4
*/
- snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
- IFNAMSIZ - 4,
- pf->vsi[pf->lan_vsi]->netdev->name);
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
+ snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4,
+ main_vsi->netdev->name);
eth_random_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -13897,8 +13872,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
vsi->info = ctxt.info;
@@ -13927,8 +13901,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"update vsi failed, err %d aq_err %s\n",
ret,
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -13947,8 +13920,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"update vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -13971,8 +13943,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
"failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
enabled_tc,
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
}
break;
@@ -14066,8 +14037,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"add vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -14097,8 +14067,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get vsi bw info, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
}
@@ -14130,8 +14099,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
vsi->seid, vsi->uplink_seid);
return -ENODEV;
}
- if (vsi == pf->vsi[pf->lan_vsi] &&
- !test_bit(__I40E_DOWN, pf->state)) {
+ if (vsi->type == I40E_VSI_MAIN && !test_bit(__I40E_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
@@ -14275,9 +14243,9 @@ vector_setup_out:
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
+ struct i40e_vsi *main_vsi;
u16 alloc_queue_pairs;
struct i40e_pf *pf;
- u8 enabled_tc;
int ret;
if (!vsi)
@@ -14309,10 +14277,10 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
/* Update the FW view of the VSI. Force a reset of TC and queue
* layout configurations.
*/
- enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
- pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
- pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
- i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ main_vsi->seid = pf->main_vsi_seid;
+ i40e_vsi_reconfig_tc(main_vsi);
+
if (vsi->type == I40E_VSI_MAIN)
i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
@@ -14386,13 +14354,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
if (vsi->uplink_seid == pf->mac_seid)
- veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, pf->mac_seid, vsi->seid,
vsi->tc_config.enabled_tc);
else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
- veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
if (veb) {
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
+ if (vsi->type != I40E_VSI_MAIN) {
dev_info(&vsi->back->pdev->dev,
"New VSI creation error, uplink seid of LAN VSI expected.\n");
return NULL;
@@ -14547,8 +14515,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -14557,8 +14524,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw ets config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -14746,8 +14712,7 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return -EPERM;
}
@@ -14757,16 +14722,14 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB statistics idx, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB bw info, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
}
@@ -14783,7 +14746,6 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/**
* i40e_veb_setup - Set up a VEB
* @pf: board private structure
- * @flags: VEB setup flags
* @uplink_seid: the switch element to link to
* @vsi_seid: the initial VSI seid
* @enabled_tc: Enabled TC bit-map
@@ -14796,9 +14758,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
* Returns pointer to the successfully allocated VEB sw struct on
* success, otherwise returns NULL on failure.
**/
-struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
- u16 uplink_seid, u16 vsi_seid,
- u8 enabled_tc)
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid,
+ u16 vsi_seid, u8 enabled_tc)
{
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb;
@@ -14829,7 +14790,6 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
if (veb_idx < 0)
goto err_alloc;
veb = pf->veb[veb_idx];
- veb->flags = flags;
veb->uplink_seid = uplink_seid;
veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
@@ -14881,7 +14841,8 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
/* Main VEB? */
if (uplink_seid != pf->mac_seid)
break;
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_main_veb(pf);
+ if (!veb) {
int v;
/* find existing or else empty VEB */
@@ -14895,12 +14856,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
pf->lan_veb = v;
}
}
- if (pf->lan_veb >= I40E_MAX_VEB)
+
+ /* Try to get again main VEB as pf->lan_veb may have changed */
+ veb = i40e_pf_get_main_veb(pf);
+ if (!veb)
break;
- pf->veb[pf->lan_veb]->seid = seid;
- pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
- pf->veb[pf->lan_veb]->pf = pf;
+ veb->seid = seid;
+ veb->uplink_seid = pf->mac_seid;
+ veb->pf = pf;
break;
case I40E_SWITCH_ELEMENT_TYPE_VSI:
if (num_reported != 1)
@@ -14960,9 +14924,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
if (ret) {
dev_info(&pf->pdev->dev,
"get switch config failed err %d aq_err %s\n",
- ret,
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ ret, libie_aq_str(pf->hw.aq.asq_last_status));
kfree(aq_buf);
return -ENOENT;
}
@@ -14998,6 +14960,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
+ struct i40e_vsi *main_vsi;
u16 flags = 0;
int ret;
@@ -15006,8 +14969,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't fetch switch config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
i40e_pf_reset_stats(pf);
@@ -15030,34 +14992,36 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
NULL);
- if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
pf->last_sw_conf_valid_flags = valid_flags;
}
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI || reinit) {
- struct i40e_vsi *vsi = NULL;
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ if (!main_vsi || reinit) {
+ struct i40e_veb *veb;
u16 uplink_seid;
/* Set up the PF VSI associated with the PF's main VSI
* that is already in the HW switch
*/
- if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
- uplink_seid = pf->veb[pf->lan_veb]->seid;
+ veb = i40e_pf_get_main_veb(pf);
+ if (veb)
+ uplink_seid = veb->seid;
else
uplink_seid = pf->mac_seid;
- if (pf->lan_vsi == I40E_NO_VSI)
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (!main_vsi)
+ main_vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN,
+ uplink_seid, 0);
else if (reinit)
- vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
- if (!vsi) {
+ main_vsi = i40e_vsi_reinit_setup(main_vsi);
+ if (!main_vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
@@ -15065,13 +15029,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
}
} else {
/* force a reset of TC and queue layout configurations */
- u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
-
- pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
- pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
- i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ main_vsi->seid = pf->main_vsi_seid;
+ i40e_vsi_reconfig_tc(main_vsi);
}
- i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_vlan_stripping_disable(main_vsi);
i40e_fdir_sb_setup(pf);
@@ -15098,7 +15059,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
rtnl_lock();
/* repopulate tunnel port filters */
- udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
+ udp_tunnel_nic_reset_ntf(main_vsi->netdev);
if (!lock_acquired)
rtnl_unlock();
@@ -15242,6 +15203,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
#define REMAIN(__x) (INFO_STRING_LEN - (__x))
static void i40e_print_features(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
char *buf;
int i;
@@ -15255,8 +15217,7 @@ static void i40e_print_features(struct i40e_pf *pf)
i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
- pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs);
+ pf->hw.func_caps.num_vsis, main_vsi->num_queue_pairs);
if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " RSS");
if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
@@ -15876,7 +15837,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
- pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
@@ -15920,7 +15880,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
- INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
+
+ vsi = i40e_pf_get_main_vsi(pf);
+ INIT_LIST_HEAD(&vsi->ch_list);
/* if FDIR VSI was set up, start it now */
vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
@@ -15936,8 +15898,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
+
+ /* VF MDD event logs are rate limited to one second intervals */
+ ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1);
/* Reconfigure hardware for allowing smaller MSS in the case
* of TSO, so that we avoid the MDD being fired and causing
@@ -15956,8 +15920,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -16088,8 +16051,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* set the FEC config due to the board capabilities */
@@ -16099,16 +16061,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
- /* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
- val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
- rd32(&pf->hw, I40E_PRTGL_SAH));
- if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- pf->hw.port, val);
+
+ err = i40e_aq_set_mac_config(hw, MAX_FRAME_SIZE_DEFAULT, NULL);
+ if (err)
+ dev_warn(&pdev->dev, "set mac config ret = %pe last_status = %s\n",
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
+
+ /* Make sure the MFS is set to the expected value */
+ val = rd32(hw, I40E_PRTGL_SAH);
+ FIELD_MODIFY(I40E_PRTGL_SAH_MFS_MASK, &val, MAX_FRAME_SIZE_DEFAULT);
+ wr32(hw, I40E_PRTGL_SAH, val);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16223,7 +16188,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
i40e_fdir_teardown(pf);
@@ -16304,6 +16269,139 @@ unmap:
}
/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+ struct i40e_hw *hw = &pf->hw;
+ u8 mac_addr[6];
+ u16 flags = 0;
+ int ret;
+
+ /* Get current MAC address in case it's an LAA */
+ if (main_vsi && main_vsi->netdev) {
+ ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Failed to retrieve MAC address; using default\n");
+ ether_addr_copy(mac_addr, hw->mac.addr);
+ }
+
+ /* The FW expects the mac address write cmd to first be called with
+ * one of these flags before calling it again with the multicast
+ * enable flags.
+ */
+ flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+ if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+ flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+ return;
+ }
+
+ flags = I40E_AQC_MC_MAG_EN
+ | I40E_AQC_WOL_PRESERVE_ON_PFR
+ | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to enable Multicast Magic Packet wake up\n");
+}
+
+/**
+ * i40e_io_suspend - suspend all IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_suspend(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_DOWN, pf->state);
+
+ /* Ensure service task will not be running */
+ timer_delete_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf, false);
+
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
+ i40e_enable_mc_magic_wake(pf);
+
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
+
+ i40e_prep_for_reset(pf);
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ /* Clear the interrupt scheme and release our IRQs so that the system
+ * can safely hibernate even when there are a large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ i40e_clear_interrupt_scheme(pf);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+/**
+ * i40e_io_resume - resume IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_resume(struct i40e_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ /* We need to hold the RTNL lock prior to restoring interrupt schemes,
+ * since we're going to be restoring queues
+ */
+ rtnl_lock();
+
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ err = i40e_restore_interrupt_scheme(pf);
+ if (err) {
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
+ err);
+ }
+
+ clear_bit(__I40E_DOWN, pf->state);
+ i40e_reset_and_rebuild(pf, false, true);
+
+ rtnl_unlock();
+
+ /* Clear suspended state last after everything is recovered */
+ clear_bit(__I40E_SUSPENDED, pf->state);
+
+ /* Restart the service task */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+}
+
+/**
* i40e_pci_error_detected - warning that something funky happened in PCI land
* @pdev: PCI device information struct
* @error: the type of PCI error
@@ -16327,7 +16425,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, pf->state))
- i40e_prep_for_reset(pf);
+ i40e_io_suspend(pf);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -16349,14 +16447,14 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
u32 reg;
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (pci_enable_device_mem(pdev)) {
+ /* enable I/O and memory of the device */
+ if (pci_enable_device(pdev)) {
dev_info(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
@@ -16412,54 +16510,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
if (test_bit(__I40E_SUSPENDED, pf->state))
return;
- i40e_handle_reset_warning(pf, false);
-}
-
-/**
- * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
- * using the mac_address_write admin q function
- * @pf: pointer to i40e_pf struct
- **/
-static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u8 mac_addr[6];
- u16 flags = 0;
- int ret;
-
- /* Get current MAC address in case it's an LAA */
- if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
- ether_addr_copy(mac_addr,
- pf->vsi[pf->lan_vsi]->netdev->dev_addr);
- } else {
- dev_err(&pf->pdev->dev,
- "Failed to retrieve MAC address; using default\n");
- ether_addr_copy(mac_addr, hw->mac.addr);
- }
-
- /* The FW expects the mac address write cmd to first be called with
- * one of these flags before calling it again with the multicast
- * enable flags.
- */
- flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
-
- if (hw->func_caps.flex10_enable && hw->partition_id != 1)
- flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
-
- ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
- return;
- }
-
- flags = I40E_AQC_MC_MAG_EN
- | I40E_AQC_WOL_PRESERVE_ON_PFR
- | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
- ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
- if (ret)
- dev_err(&pf->pdev->dev,
- "Failed to enable Multicast Magic Packet wake up\n");
+ i40e_io_resume(pf);
}
/**
@@ -16474,7 +16525,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
- del_timer_sync(&pf->service_timer);
+ timer_delete_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
@@ -16482,7 +16533,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
pf->wol_en)
@@ -16518,93 +16569,28 @@ static void i40e_shutdown(struct pci_dev *pdev)
* i40e_suspend - PM callback for moving to D3
* @dev: generic device information structure
**/
-static int __maybe_unused i40e_suspend(struct device *dev)
+static int i40e_suspend(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
- struct i40e_hw *hw = &pf->hw;
/* If we're already suspended, then there is nothing to do */
if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
return 0;
-
- set_bit(__I40E_DOWN, pf->state);
-
- /* Ensure service task will not be running */
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
-
- /* Client close must be called explicitly here because the timer
- * has been stopped.
- */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
-
- if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
- pf->wol_en)
- i40e_enable_mc_magic_wake(pf);
-
- /* Since we're going to destroy queues during the
- * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
- * whole section
- */
- rtnl_lock();
-
- i40e_prep_for_reset(pf);
-
- wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
- wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
-
- /* Clear the interrupt scheme and release our IRQs so that the system
- * can safely hibernate even when there are a large number of CPUs.
- * Otherwise hibernation might fail when mapping all the vectors back
- * to CPU0.
- */
- i40e_clear_interrupt_scheme(pf);
-
- rtnl_unlock();
-
- return 0;
+ return i40e_io_suspend(pf);
}
/**
* i40e_resume - PM callback for waking up from D3
* @dev: generic device information structure
**/
-static int __maybe_unused i40e_resume(struct device *dev)
+static int i40e_resume(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
- int err;
/* If we're not suspended, then there is nothing to do */
if (!test_bit(__I40E_SUSPENDED, pf->state))
return 0;
-
- /* We need to hold the RTNL lock prior to restoring interrupt schemes,
- * since we're going to be restoring queues
- */
- rtnl_lock();
-
- /* We cleared the interrupt scheme when we suspended, so we need to
- * restore it now to resume device functionality.
- */
- err = i40e_restore_interrupt_scheme(pf);
- if (err) {
- dev_err(dev, "Cannot restore interrupt scheme: %d\n",
- err);
- }
-
- clear_bit(__I40E_DOWN, pf->state);
- i40e_reset_and_rebuild(pf, false, true);
-
- rtnl_unlock();
-
- /* Clear suspended state last after everything is recovered */
- clear_bit(__I40E_SUSPENDED, pf->state);
-
- /* Restart the service task */
- mod_timer(&pf->service_timer,
- round_jiffies(jiffies + pf->service_timer_period));
-
- return 0;
+ return i40e_io_resume(pf);
}
static const struct pci_error_handlers i40e_err_handler = {
@@ -16615,16 +16601,14 @@ static const struct pci_error_handlers i40e_err_handler = {
.resume = i40e_pci_error_resume,
};
-static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
static struct pci_driver i40e_driver = {
.name = i40e_driver_name,
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
- .driver = {
- .pm = &i40e_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&i40e_pm_ops),
.shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
@@ -16650,7 +16634,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", WQ_PERCPU, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 605fd82f5d20..ed3c54e36be3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -734,37 +734,7 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw,
return ret_code;
}
-static int i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
-static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static inline u8 i40e_nvmupd_get_module(u32 val)
+static u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
}
@@ -799,121 +769,408 @@ static const char * const i40e_nvm_update_state_str[] = {
};
/**
- * i40e_nvmupd_command - Process an NVM update command
+ * i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command
- * @bytes: pointer to the data buffer
+ * @cmd: pointer to nvm update command buffer
* @perrno: pointer to return error code
*
- * Dispatches command depending on what update state is current
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
**/
-int i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static enum i40e_nvmupd_cmd
+i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd,
+ int *perrno)
{
enum i40e_nvmupd_cmd upd_cmd;
- int status;
-
- /* assume success */
- *perrno = 0;
+ u8 module, transaction;
- /* early check for status command and debug msgs */
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ /* anything that doesn't match a recognized case is an error */
+ upd_cmd = I40E_NVMUPD_INVALID;
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
- i40e_nvm_update_state_str[upd_cmd],
- hw->nvmupd_state,
- hw->nvm_release_on_done, hw->nvm_wait_opcode,
- cmd->command, cmd->config, cmd->offset, cmd->data_size);
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
- if (upd_cmd == I40E_NVMUPD_INVALID) {
- *perrno = -EFAULT;
+ /* limits on data size */
+ if (cmd->data_size < 1 || cmd->data_size > I40E_NVMUPD_MAX_DATA) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command returns %d errno %d\n",
- upd_cmd, *perrno);
+ "%s data_size %d\n", __func__, cmd->data_size);
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
}
- /* a status request returns immediately rather than
- * going into the state machine
- */
- if (upd_cmd == I40E_NVMUPD_STATUS) {
- if (!cmd->data_size) {
- *perrno = -EFAULT;
- return -EINVAL;
+ switch (cmd->command) {
+ case I40E_NVM_READ:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_READ_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_READ_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_READ_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_READ_SA;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
+ break;
- bytes[0] = hw->nvmupd_state;
-
- if (cmd->data_size >= 4) {
- bytes[1] = 0;
- *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ case I40E_NVM_WRITE:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_WRITE_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_WRITE_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_WRITE_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_WRITE_SA;
+ break;
+ case I40E_NVM_ERA:
+ upd_cmd = I40E_NVMUPD_WRITE_ERA;
+ break;
+ case I40E_NVM_CSUM:
+ upd_cmd = I40E_NVMUPD_CSUM_CON;
+ break;
+ case (I40E_NVM_CSUM | I40E_NVM_SA):
+ upd_cmd = I40E_NVMUPD_CSUM_SA;
+ break;
+ case (I40E_NVM_CSUM | I40E_NVM_LCB):
+ upd_cmd = I40E_NVMUPD_CSUM_LCB;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
}
+ break;
+ }
- /* Clear error status on read */
- if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ return upd_cmd;
+}
- return 0;
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
- /* Clear status even it is not read and log */
- if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ u8 preservation_flags;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_update_nvm(hw, module, cmd->offset,
+ (u16)cmd->data_size, bytes, last,
+ preservation_flags, &cmd_details);
+ if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
- /* Acquire lock to prevent race condition where adminq_task
- * can execute after i40e_nvmupd_nvm_read/write but before state
- * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
- *
- * During NVMUpdate, it is observed that lock could be held for
- * ~5ms for most commands. However lock is held for ~60ms for
- * NVMUPD_CSUM_LCB command.
- */
- mutex_lock(&hw->aq.arq_mutex);
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT:
- status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
- break;
+ return status;
+}
- case I40E_NVMUPD_STATE_READING:
- status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
- break;
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status;
+ bool last;
- case I40E_NVMUPD_STATE_WRITING:
- status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
- break;
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
- case I40E_NVMUPD_STATE_INIT_WAIT:
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- /* if we need to stop waiting for an event, clear
- * the wait info and return before doing anything else
- */
- if (cmd->offset == 0xffff) {
- i40e_nvmupd_clear_wait_state(hw);
- status = 0;
- break;
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ bytes, last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ struct libie_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+ int status;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return 0;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct libie_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return -EINVAL;
+ }
+ aq_desc = (struct libie_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
}
- status = -EBUSY;
- *perrno = -EBUSY;
- break;
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
- default:
- /* invalid state, should never happen */
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: no such state %d\n", hw->nvmupd_state);
- status = -EOPNOTSUPP;
- *perrno = -ESRCH;
- break;
+ "%s err %pe aq_err %s\n",
+ __func__, ERR_PTR(status),
+ libie_aq_str(hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
+ }
+
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
- mutex_unlock(&hw->aq.arq_mutex);
return status;
}
/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct libie_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return -EINVAL;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct libie_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+ return 0;
+}
+
+/**
* i40e_nvmupd_state_init - Handle NVM update state Init
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -937,7 +1194,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
@@ -948,7 +1205,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
if (status)
@@ -962,7 +1219,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
if (status) {
@@ -979,7 +1236,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status) {
@@ -996,7 +1253,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status) {
@@ -1012,7 +1269,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
@@ -1185,7 +1442,7 @@ retry:
* so here we try to reacquire the semaphore then retry the write.
* We only do one retry, then give up.
*/
- if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ if (status && hw->aq.asq_last_status == LIBIE_AQ_RC_EBUSY &&
!retry_attempt) {
u32 old_asq_status = hw->aq.asq_last_status;
int old_status = status;
@@ -1215,457 +1472,168 @@ retry:
}
/**
- * i40e_nvmupd_clear_wait_state - clear wait state on hw
- * @hw: pointer to the hardware structure
- **/
-void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
-{
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n",
- hw->nvm_wait_opcode);
-
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = false;
- }
- hw->nvm_wait_opcode = 0;
-
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
-}
-
-/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
- * @hw: pointer to the hardware structure
- * @opcode: the event that just happened
- * @desc: AdminQ descriptor
- **/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc)
-{
- u32 aq_desc_len = sizeof(struct i40e_aq_desc);
-
- if (opcode == hw->nvm_wait_opcode) {
- memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
- i40e_nvmupd_clear_wait_state(hw);
- }
-}
-
-/**
- * i40e_nvmupd_validate_command - Validate given command
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * Return one of the valid command types or I40E_NVMUPD_INVALID
- **/
-static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
-{
- enum i40e_nvmupd_cmd upd_cmd;
- u8 module, transaction;
-
- /* anything that doesn't match a recognized case is an error */
- upd_cmd = I40E_NVMUPD_INVALID;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
-
- /* limits on data size */
- if ((cmd->data_size < 1) ||
- (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command data_size %d\n",
- cmd->data_size);
- *perrno = -EFAULT;
- return I40E_NVMUPD_INVALID;
- }
-
- switch (cmd->command) {
- case I40E_NVM_READ:
- switch (transaction) {
- case I40E_NVM_CON:
- upd_cmd = I40E_NVMUPD_READ_CON;
- break;
- case I40E_NVM_SNT:
- upd_cmd = I40E_NVMUPD_READ_SNT;
- break;
- case I40E_NVM_LCB:
- upd_cmd = I40E_NVMUPD_READ_LCB;
- break;
- case I40E_NVM_SA:
- upd_cmd = I40E_NVMUPD_READ_SA;
- break;
- case I40E_NVM_EXEC:
- if (module == 0xf)
- upd_cmd = I40E_NVMUPD_STATUS;
- else if (module == 0)
- upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
- break;
- case I40E_NVM_AQE:
- upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
- break;
- }
- break;
-
- case I40E_NVM_WRITE:
- switch (transaction) {
- case I40E_NVM_CON:
- upd_cmd = I40E_NVMUPD_WRITE_CON;
- break;
- case I40E_NVM_SNT:
- upd_cmd = I40E_NVMUPD_WRITE_SNT;
- break;
- case I40E_NVM_LCB:
- upd_cmd = I40E_NVMUPD_WRITE_LCB;
- break;
- case I40E_NVM_SA:
- upd_cmd = I40E_NVMUPD_WRITE_SA;
- break;
- case I40E_NVM_ERA:
- upd_cmd = I40E_NVMUPD_WRITE_ERA;
- break;
- case I40E_NVM_CSUM:
- upd_cmd = I40E_NVMUPD_CSUM_CON;
- break;
- case (I40E_NVM_CSUM|I40E_NVM_SA):
- upd_cmd = I40E_NVMUPD_CSUM_SA;
- break;
- case (I40E_NVM_CSUM|I40E_NVM_LCB):
- upd_cmd = I40E_NVMUPD_CSUM_LCB;
- break;
- case I40E_NVM_EXEC:
- if (module == 0)
- upd_cmd = I40E_NVMUPD_EXEC_AQ;
- break;
- }
- break;
- }
-
- return upd_cmd;
-}
-
-/**
- * i40e_nvmupd_exec_aq - Run an AQ command
+ * i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
+ * @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
* @perrno: pointer to return error code
*
- * cmd structure contains identifiers and data buffer
+ * Dispatches command depending on what update state is current
**/
-static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- struct i40e_asq_cmd_details cmd_details;
- struct i40e_aq_desc *aq_desc;
- u32 buff_size = 0;
- u8 *buff = NULL;
- u32 aq_desc_len;
- u32 aq_data_len;
+ enum i40e_nvmupd_cmd upd_cmd;
int status;
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- if (cmd->offset == 0xffff)
- return 0;
+ /* assume success */
+ *perrno = 0;
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- aq_desc_len = sizeof(struct i40e_aq_desc);
- memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
+ cmd->command, cmd->config, cmd->offset, cmd->data_size);
- /* get the aq descriptor */
- if (cmd->data_size < aq_desc_len) {
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
- cmd->data_size, aq_desc_len);
- *perrno = -EINVAL;
- return -EINVAL;
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
}
- aq_desc = (struct i40e_aq_desc *)bytes;
- /* if data buffer needed, make sure it's ready */
- aq_data_len = cmd->data_size - aq_desc_len;
- buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
- if (buff_size) {
- if (!hw->nvm_buff.va) {
- status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
- hw->aq.asq_buf_size);
- if (status)
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
- status);
- }
-
- if (hw->nvm_buff.va) {
- buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return -EINVAL;
}
- }
-
- if (cmd->offset)
- memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
-
- /* and away we go! */
- status = i40e_asq_send_command(hw, aq_desc, buff,
- buff_size, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "%s err %pe aq_err %s\n",
- __func__, ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
- return status;
- }
-
- /* should we wait for a followup event? */
- if (cmd->offset) {
- hw->nvm_wait_opcode = cmd->offset;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
- }
-
- return status;
-}
-/**
- * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
-{
- u32 aq_total_len;
- u32 aq_desc_len;
- int remainder;
- u8 *buff;
-
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
-
- aq_desc_len = sizeof(struct i40e_aq_desc);
- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+ bytes[0] = hw->nvmupd_state;
- /* check offset range */
- if (cmd->offset > aq_total_len) {
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
- __func__, cmd->offset, aq_total_len);
- *perrno = -EINVAL;
- return -EINVAL;
- }
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
- /* check copylength range */
- if (cmd->data_size > (aq_total_len - cmd->offset)) {
- int new_len = aq_total_len - cmd->offset;
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
- __func__, cmd->data_size, new_len);
- cmd->data_size = new_len;
+ return 0;
}
- remainder = cmd->data_size;
- if (cmd->offset < aq_desc_len) {
- u32 len = aq_desc_len - cmd->offset;
-
- len = min(len, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
- __func__, cmd->offset, cmd->offset + len);
-
- buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
-
- bytes += len;
- remainder -= len;
- buff = hw->nvm_buff.va;
- } else {
- buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
- if (remainder > 0) {
- int start_byte = buff - (u8 *)hw->nvm_buff.va;
-
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
- __func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
- }
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
+ */
+ mutex_lock(&hw->aq.arq_mutex);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT:
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
+ break;
- return 0;
-}
+ case I40E_NVMUPD_STATE_READING:
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
+ break;
-/**
- * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
-{
- u32 aq_total_len;
- u32 aq_desc_len;
+ case I40E_NVMUPD_STATE_WRITING:
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_clear_wait_state(hw);
+ status = 0;
+ break;
+ }
- aq_desc_len = sizeof(struct i40e_aq_desc);
- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+ status = -EBUSY;
+ *perrno = -EBUSY;
+ break;
- /* check copylength range */
- if (cmd->data_size > aq_total_len) {
+ default:
+ /* invalid state, should never happen */
i40e_debug(hw, I40E_DEBUG_NVM,
- "%s: copy length %d too big, trimming to %d\n",
- __func__, cmd->data_size, aq_total_len);
- cmd->data_size = aq_total_len;
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
+ status = -EOPNOTSUPP;
+ *perrno = -ESRCH;
+ break;
}
- memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
-
- return 0;
+ mutex_unlock(&hw->aq.arq_mutex);
+ return status;
}
/**
- * i40e_nvmupd_nvm_read - Read NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
+ * @hw: pointer to the hardware structure
**/
-static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- int status;
- bool last;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
-
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- bytes, last, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_read status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
}
+ hw->nvm_wait_opcode = 0;
- return status;
-}
-
-/**
- * i40e_nvmupd_nvm_erase - Erase an NVM module
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
- **/
-static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
-{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- int status = 0;
- bool last;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction & I40E_NVM_LCB);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
- status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- last, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_erase status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ default:
+ break;
}
-
- return status;
}
/**
- * i40e_nvmupd_nvm_write - Write NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
**/
-static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct libie_aq_desc *desc)
{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- u8 preservation_flags;
- int status = 0;
- bool last;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction & I40E_NVM_LCB);
- preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
-
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ u32 aq_desc_len = sizeof(struct libie_aq_desc);
- status = i40e_aq_update_nvm(hw, module, cmd->offset,
- (u16)cmd->data_size, bytes, last,
- preservation_flags, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_write status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ if (opcode == hw->nvm_wait_opcode) {
+ memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+ i40e_nvmupd_clear_wait_state(hw);
}
-
- return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index ce1f11b8ad65..26bb7bffe361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -23,29 +23,22 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
int
-i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
int
-i40e_asq_send_command_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-int
-i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
@@ -53,7 +46,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
bool i40e_check_asq_alive(struct i40e_hw *hw);
int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -72,8 +64,6 @@ int i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode);
int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
/* admin send queue commands */
@@ -108,6 +98,8 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
+ struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
@@ -141,9 +133,6 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -167,7 +156,7 @@ int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
@@ -175,15 +164,7 @@ int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
+ enum libie_aq_err *aq_status);
int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
@@ -220,9 +201,6 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
@@ -234,9 +212,6 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details);
@@ -365,19 +340,12 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc);
+ struct libie_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
int i40e_set_mac_type(struct i40e_hw *hw);
-extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
-
-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return i40e_ptype_lookup[ptype];
-}
-
/**
* i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
* @link_speed: the speed to convert
@@ -465,13 +433,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
int i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
-int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
@@ -484,20 +446,12 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile);
int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id);
-
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e7ebcb09f23c..33535418178b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -550,7 +550,7 @@ static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
pins.gpio_4 = pf->ptp_pins->gpio_4;
/* To turn on the pin - find the corresponding one based on
- * the given index. To to turn the function off - find
+ * the given index. To turn the function off - find
* which pin had it assigned. Don't use ptp_find_pin here
* because it tries to lock the pincfg_mux which is locked by
* ptp_pin_store() that calls here.
@@ -912,23 +912,26 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
}
/**
- * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * i40e_ptp_hwtstamp_get - interface to read the HW timestamping
+ * @netdev: Network device structure
+ * @config: Timestamping configuration structure
*
* Obtain the current hardware timestamping settigs as requested. To do this,
* keep a shadow copy of the timestamp settings rather than attempting to
* deconstruct it from the registers.
**/
-int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+int i40e_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config = &pf->tstamp_config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = pf->tstamp_config;
+
+ return 0;
}
/**
@@ -1167,7 +1170,7 @@ int i40e_ptp_alloc_pins(struct i40e_pf *pf)
* more broad if the specific filter is not directly supported.
**/
static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct i40e_hw *hw = &pf->hw;
u32 tsyntype, regval;
@@ -1290,9 +1293,10 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
}
/**
- * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * i40e_ptp_hwtstamp_set - interface to control the HW timestamping
+ * @netdev: Network device structure
+ * @config: Timestamping configuration structure
+ * @extack: Netlink extended ack structure for error reporting
*
* Respond to the user filter requests and make the appropriate hardware
* changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
@@ -1303,26 +1307,25 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
* as the user receives the timestamps they care about and the user is notified
* the filter has been broadened.
**/
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+int i40e_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
int err;
if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = i40e_ptp_set_timestamp_mode(pf, &config);
+ err = i40e_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
/* save these settings for future reference */
- pf->tstamp_config = config;
+ pf->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -1472,7 +1475,8 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
**/
void i40e_ptp_init(struct i40e_pf *pf)
{
- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct net_device *netdev = vsi->netdev;
struct i40e_hw *hw = &pf->hw;
u32 pf_id;
long err;
@@ -1536,6 +1540,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
**/
void i40e_ptp_stop(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
u32 regval;
@@ -1555,7 +1560,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
ptp_clock_unregister(pf->ptp_clock);
pf->ptp_clock = NULL;
dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
- pf->vsi[pf->lan_vsi]->netdev->name);
+ main_vsi->netdev->name);
}
if (i40e_is_ptp_pin_dev(&pf->hw)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 33b4e30f5e00..759f3d1c4c8f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -89,8 +89,8 @@ TRACE_EVENT(i40e_napi_poll,
__entry->tx_clean_complete = tx_clean_complete;
__entry->irq_num = q->irq_num;
__entry->curr_cpu = get_cpu();
- __assign_str(qname, q->name);
- __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+ __assign_str(qname);
+ __assign_str(dev_name);
__assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask),
nr_cpumask_bits);
),
@@ -132,7 +132,7 @@ DECLARE_EVENT_CLASS(
__entry->ring = ring;
__entry->desc = desc;
__entry->buf = buf;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -177,7 +177,7 @@ DECLARE_EVENT_CLASS(
__entry->ring = ring;
__entry->desc = desc;
__entry->xdp = xdp;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -219,7 +219,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign(
__entry->skb = skb;
__entry->ring = ring;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 1a12b732818e..cc0b9efc2637 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/pctype.h>
+#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
#include <net/mpls.h>
@@ -23,7 +25,7 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
- u32 flex_ptype, dtype_cmd;
+ u32 flex_ptype, dtype_cmd, vsi_id;
u16 i;
/* grab the next descriptor */
@@ -41,8 +43,8 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
/* Use LAN VSI Id if not programmed by user */
- flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK,
- fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id);
+ vsi_id = fdata->dest_vsi ? : i40e_pf_get_main_vsi(pf)->id;
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, vsi_id);
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
@@ -396,12 +398,12 @@ static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP);
if (ret) {
kfree(raw_packet);
@@ -443,12 +445,12 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP);
if (ret) {
kfree(raw_packet);
@@ -498,12 +500,12 @@ static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
if (ret) {
kfree(raw_packet);
@@ -542,11 +544,11 @@ static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
int i;
if (ipv4) {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV4;
} else {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV6;
}
for (i = iter_start; i <= iter_end; i++) {
@@ -860,13 +862,15 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @vsi: pointer to vsi struct with tx queues
+ * @pf: pointer to PF struct
*
- * VSI has netdev and netdev has TX queues. This function is to check each of
- * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
**/
-void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+void i40e_detect_recover_hung(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_ring *tx_ring = NULL;
struct net_device *netdev;
unsigned int i;
@@ -944,9 +948,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (!eop_desc)
break;
- /* prevent any other reads prior to eop_desc */
- smp_rmb();
-
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
@@ -1741,38 +1742,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
- rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
- decoded = decode_rx_desc_ptype(ptype);
-
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
- /* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
return;
+ rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
+
/* did the hardware decode the packet and checksum? */
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* both known and outer_ip must be set for the below code to work */
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 &&
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1800,20 +1793,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case I40E_RX_PTYPE_INNER_PROT_TCP:
- case I40E_RX_PTYPE_INNER_PROT_UDP:
- case I40E_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- fallthrough;
- default:
- break;
- }
-
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -1821,29 +1804,6 @@ checksum_fail:
}
/**
- * i40e_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- **/
-static inline int i40e_ptype_to_htype(u8 ptype)
-{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
-
- if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- else
- return PKT_HASH_TYPE_L2;
-}
-
-/**
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
@@ -1855,17 +1815,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
struct sk_buff *skb,
u8 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (!(ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -2144,9 +2106,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
*/
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE);
if (unlikely(!skb))
return NULL;
@@ -2188,10 +2148,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
/* First buffer has already been processed, so bump ntc */
if (++rx_ring->next_to_clean == rx_ring->count)
@@ -2243,10 +2203,9 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
} else {
@@ -2986,9 +2945,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
tx_ring->queue_index);
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
- (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
- (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 2cdc7de6301c..1e5fd63d47f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -4,6 +4,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
#include <net/xdp.h>
#include "i40e_type.h"
@@ -71,30 +72,30 @@ enum i40e_dyn_idx {
#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
-#define I40E_DEFAULT_RSS_HENA ( \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-
-#define i40e_pf_get_default_rss_hena(pf) \
+#define I40E_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HASHCFG_EXPANDED (I40E_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hashcfg(pf) \
(test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ I40E_DEFAULT_RSS_HASHCFG_EXPANDED : I40E_DEFAULT_RSS_HASHCFG)
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
@@ -470,7 +471,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
-void i40e_detect_recover_hung(struct i40e_vsi *vsi);
+void i40e_detect_recover_hung(struct i40e_pf *pf);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index d9031499697e..ed8bbdb586da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -24,7 +24,7 @@
/* forward declaration */
struct i40e_hw;
-typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct libie_aq_desc *);
/* Data type manipulation macros. */
@@ -555,8 +555,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
- struct i40e_aq_desc nvm_wb_desc;
- struct i40e_aq_desc nvm_aq_event_desc;
+ struct libie_aq_desc nvm_wb_desc;
+ struct libie_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -745,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
#define I40E_RXD_QW1_PTYPE_SHIFT 30
#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
-/* Packet type non-ip values */
-enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct i40e_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum i40e_rx_ptype_outer_ip {
- I40E_RX_PTYPE_OUTER_L2 = 0,
- I40E_RX_PTYPE_OUTER_IP = 1
-};
-
-enum i40e_rx_ptype_outer_ip_ver {
- I40E_RX_PTYPE_OUTER_NONE = 0,
- I40E_RX_PTYPE_OUTER_IPV4 = 0,
- I40E_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum i40e_rx_ptype_outer_fragmented {
- I40E_RX_PTYPE_NOT_FRAG = 0,
- I40E_RX_PTYPE_FRAG = 1
-};
-
-enum i40e_rx_ptype_tunnel_type {
- I40E_RX_PTYPE_TUNNEL_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum i40e_rx_ptype_tunnel_end_prot {
- I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum i40e_rx_ptype_inner_prot {
- I40E_RX_PTYPE_INNER_PROT_NONE = 0,
- I40E_RX_PTYPE_INNER_PROT_UDP = 1,
- I40E_RX_PTYPE_INNER_PROT_TCP = 2,
- I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
- I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
- I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum i40e_rx_ptype_payload_layer {
- I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
@@ -1017,38 +929,6 @@ struct i40e_filter_program_desc {
#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-/* Packet Classifier Types for filters */
-enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- I40E_FILTER_PCTYPE_FCOE_OX = 48,
- I40E_FILTER_PCTYPE_FCOE_RX = 49,
- I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
enum i40e_filter_program_desc_dest {
I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 232b65b9c8ea..8b30a3accd31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -216,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
* @notify_vf: notify vf about reset or not
* Reset VF handler.
**/
-static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
@@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
- (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
wr32(hw, reg_idx, reg);
}
@@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
tx_ctx.base = info->dma_ring_addr / 128;
+
+ /* ring_len has to be multiple of 8 */
+ if (!IS_ALIGNED(info->ring_len, 8) ||
+ info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ ret = -EINVAL;
+ goto error_context;
+ }
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
@@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
rx_ctx.base = info->dma_ring_addr / 128;
+
+ /* ring_len has to be multiple of 32 */
+ if (!IS_ALIGNED(info->ring_len, 32) ||
+ info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ ret = -EINVAL;
+ goto error_param;
+ }
rx_ctx.qlen = info->ring_len;
if (info->splithdr_enabled) {
@@ -795,13 +809,13 @@ error_param:
static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
{
struct i40e_mac_filter *f = NULL;
+ struct i40e_vsi *main_vsi, *vsi;
struct i40e_pf *pf = vf->pf;
- struct i40e_vsi *vsi;
u64 max_tx_rate = 0;
int ret = 0;
- vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
- vf->vf_id);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id);
if (!vsi) {
dev_err(&pf->pdev->dev,
@@ -812,7 +826,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
}
if (!idx) {
- u64 hena = i40e_pf_get_default_rss_hena(pf);
+ u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
u8 broadcast[ETH_ALEN];
vf->lan_vsi_idx = vsi->idx;
@@ -841,8 +855,9 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
- wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(hashcfg >> 32));
/* program mac filter only for VF VSI */
ret = i40e_sync_vsi_filters(vsi);
if (ret)
@@ -1289,9 +1304,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
return aq_ret;
}
@@ -1305,9 +1319,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
}
return aq_ret;
@@ -1322,9 +1335,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
if (!aq_tmp)
aq_tmp = aq_ret;
@@ -1338,9 +1350,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
if (!aq_tmp)
aq_tmp = aq_ret;
@@ -1453,6 +1464,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* functions that may still be running at this point.
*/
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -1546,8 +1558,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is in reset, resets successfully, or resets
- * are disabled and false otherwise.
+ * Return: True if reset was performed successfully or if resets are disabled.
+ * False if reset is already in progress.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1566,7 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* If VF is being reset already we don't need to continue. */
if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- return true;
+ return false;
i40e_trigger_vf_reset(vf, flr);
@@ -2119,7 +2131,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
size_t len = 0;
int ret;
- if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
+ i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
+
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
+ test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
aq_ret = -EINVAL;
goto err;
}
@@ -2213,13 +2228,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
eth_zero_addr(vf->default_lan_addr.addr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
}
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
err:
/* send the response back to the VF */
@@ -2382,7 +2400,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
- if (idx >= ARRAY_SIZE(vf->ch)) {
+ if (idx >= vf->num_tc) {
aq_ret = -ENODEV;
goto error_param;
}
@@ -2403,7 +2421,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* to its appropriate VSIs based on TC mapping
*/
if (vf->adq_enabled) {
- if (idx >= ARRAY_SIZE(vf->ch)) {
+ if (idx >= vf->num_tc) {
aq_ret = -ENODEV;
goto error_param;
}
@@ -2453,8 +2471,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id, queue_id;
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
- if (vf->adq_enabled) {
- vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
+ u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
+
+ if (vf->adq_enabled && idx < vf->num_tc) {
+ vsi_id = vf->ch[idx].vsi_id;
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
} else {
queue_id = vsi_queue_id;
@@ -2842,24 +2862,6 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
-/**
- * i40e_can_vf_change_mac
- * @vf: pointer to the VF info
- *
- * Return true if the VF is allowed to change its MAC filters, false otherwise
- */
-static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
-{
- /* If the VF MAC address has been set administratively (via the
- * ndo_set_vf_mac command), then deny permission to the VF to
- * add/delete unicast MAC addresses, unless the VF is trusted
- */
- if (vf->pf_set_mac && !vf->trusted)
- return false;
-
- return true;
-}
-
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2898,8 +2900,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
struct i40e_hw *hw = &pf->hw;
- int mac2add_cnt = 0;
- int i;
+ int i, mac_add_max, mac_add_cnt = 0;
+ bool vf_trusted;
+
+ vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
@@ -2919,9 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!i40e_can_vf_change_mac(vf) &&
- !is_multicast_ether_addr(addr) &&
- !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (!vf_trusted && !is_multicast_ether_addr(addr) &&
+ vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
@@ -2930,31 +2933,50 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
/*count filters that really will be added*/
f = i40e_find_mac(vsi, addr);
if (!f)
- ++mac2add_cnt;
+ ++mac_add_cnt;
}
+ /* Determine the maximum number of MAC addresses this VF may use.
+ *
+ * - For untrusted VFs: use a fixed small limit.
+ *
+ * - For trusted VFs: limit is calculated by dividing total MAC
+ * filter pool across all VFs/ports.
+ *
+ * - User can override this by devlink param "max_mac_per_vf".
+ * If set its value is used as a strict cap for both trusted and
+ * untrusted VFs.
+ * Note:
+ * even when overridden, this is a theoretical maximum; hardware
+ * may reject additional MACs if the absolute HW limit is reached.
+ */
+ if (!vf_trusted)
+ mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF;
+ else
+ mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports);
+
+ if (pf->max_mac_per_vf > 0)
+ mac_add_max = pf->max_mac_per_vf;
- /* If this VF is not privileged, then we can't add more than a limited
- * number of addresses. Check to make sure that the additions do not
- * push us over the limit.
+ /* VF can replace all its filters in one step, in this case mac_add_max
+ * will be added as active and another mac_add_max will be in
+ * a to-be-removed state. Account for that.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
- if ((i40e_count_filters(vsi) + mac2add_cnt) >
- I40E_VC_MAX_MAC_ADDR_PER_VF) {
+ if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max ||
+ (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) {
+ if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) {
dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ "Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n",
+ mac_add_max);
return -EPERM;
}
- /* If this VF is trusted, it can use more resources than untrusted.
- * However to ensure that every trusted VF has appropriate number of
- * resources, divide whole pool of resources per port and then across
- * all VFs.
- */
- } else {
- if ((i40e_count_filters(vsi) + mac2add_cnt) >
- I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
- hw->num_ports)) {
+ if (!vf_trusted) {
dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n",
+ mac_add_max);
return -EPERM;
}
}
@@ -3135,10 +3157,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
const u8 *addr = al->list[i].addr;
/* Allow to delete VF primary MAC only if it was not set
- * administratively by PF or if VF is trusted.
+ * administratively by PF.
*/
if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
- if (i40e_can_vf_change_mac(vf))
+ if (!vf->pf_set_mac)
was_unimac_deleted = true;
else
continue;
@@ -3322,8 +3344,9 @@ error_param:
static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
- int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ struct i40e_vsi *main_vsi;
int aq_ret = 0;
+ int abs_vf_id;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
@@ -3331,8 +3354,9 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
- msg, msglen);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen);
error_param:
/* send the response to the VF */
@@ -3443,15 +3467,15 @@ err:
}
/**
- * i40e_vc_get_rss_hena
+ * i40e_vc_get_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Return the RSS HENA bits allowed by the hardware
+ * Return the RSS Hash configuration bits allowed by the hardware
**/
-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
struct i40e_pf *pf = vf->pf;
int aq_ret = 0;
int len = 0;
@@ -3460,7 +3484,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- len = sizeof(struct virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hashcfg);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
@@ -3468,26 +3492,26 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
len = 0;
goto err;
}
- vrh->hena = i40e_pf_get_default_rss_hena(pf);
+ vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
err:
/* send the response back to the VF */
- aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS,
aq_ret, (u8 *)vrh, len);
kfree(vrh);
return aq_ret;
}
/**
- * i40e_vc_set_rss_hena
+ * i40e_vc_set_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Set the RSS HENA bits for the VF
+ * Set the RSS Hash configuration bits for the VF
**/
-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh =
- (struct virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
int aq_ret = 0;
@@ -3496,13 +3520,14 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id),
+ (u32)vrh->hashcfg);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
- (u32)(vrh->hena >> 32));
+ (u32)(vrh->hashcfg >> 32));
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret);
}
/**
@@ -3585,7 +3610,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
- tc_filter->action_meta > vf->num_tc) {
+ tc_filter->action_meta >= vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
@@ -3742,8 +3767,7 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
hlist_del(&cfilter->cloud_node);
kfree(cfilter);
@@ -3845,7 +3869,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
goto err;
}
@@ -3883,6 +3907,8 @@ err:
aq_ret);
}
+#define I40E_MAX_VF_CLOUD_FILTER 0xFF00
+
/**
* i40e_vc_add_cloud_filter
* @vf: pointer to the VF info
@@ -3922,6 +3948,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
goto err_out;
}
+ if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d: Max number of filters reached, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = -ENOSPC;
+ goto err_out;
+ }
+
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
if (!cfilter) {
aq_ret = -ENOMEM;
@@ -3981,7 +4015,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -4249,11 +4283,11 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- ret = i40e_vc_get_rss_hena(vf, msg);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ ret = i40e_vc_get_rss_hashcfg(vf, msg);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- ret = i40e_vc_set_rss_hena(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ ret = i40e_vc_set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
ret = i40e_vc_enable_vlan_stripping(vf, msg);
@@ -4324,7 +4358,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & BIT(bit_idx))
/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
- i40e_reset_vf(vf, true);
+ if (!i40e_reset_vf(vf, true)) {
+ /* At least one VF did not finish resetting, retry next time */
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
return 0;
@@ -4766,6 +4803,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
unsigned long q_map;
struct i40e_vf *vf;
int abs_vf_id;
+ int old_link;
int ret = 0;
int tmp;
@@ -4784,6 +4822,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf = &pf->vf[vf_id];
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ /* skip VF link state change if requested state is already set */
+ if (!vf->link_forced)
+ old_link = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ old_link = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ old_link = IFLA_VF_LINK_STATE_DISABLE;
+
+ if (link == old_link)
+ goto error_out;
+
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
@@ -4999,7 +5048,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
vf_stats->broadcast = stats->rx_broadcast;
vf_stats->multicast = stats->rx_multicast;
vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
- vf_stats->tx_dropped = stats->tx_discards;
+ vf_stats->tx_dropped = stats->tx_errors;
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 66f95e2f3146..f558b45725c8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -41,7 +41,8 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
- I40E_VF_STATE_RESETTING
+ I40E_VF_STATE_RESETTING,
+ I40E_VF_STATE_RESOURCES_LOADED,
};
/* VF capabilities */
@@ -64,6 +65,12 @@ struct i40evf_channel {
u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
};
+struct i40e_mdd_vf_events {
+ u64 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u64 last_printed;
+};
+
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
@@ -92,7 +99,9 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
- u64 num_mdd_events; /* num of mdd events detected */
+ /* num of mdd tx and rx events detected */
+ struct i40e_mdd_vf_events mdd_rx_events;
+ struct i40e_mdd_vf_events mdd_tx_events;
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
@@ -120,6 +129,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf);
bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 11500003af0d..9f47388eaba5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
@@ -301,8 +302,7 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
goto out;
@@ -396,32 +396,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}
-static int
-i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
@@ -483,12 +457,14 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_set_size(bi, size);
- xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi);
if (!first)
first = bi;
- else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+ else if (!xsk_buff_add_frag(first, bi)) {
+ xsk_buff_free(first);
break;
+ }
if (++next_to_process == count)
next_to_process = 0;
@@ -554,7 +530,8 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des
dma_addr_t dma;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ef156fad52f2..dd16351a7af8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
-/* This value should match the pragma in the loop_unrolled_for
+/* This value should match the pragma in the unrolled_count()
* macro. Why 4? It is strictly empirical. It seems to be a good
* compromise between the advantage of having simultaneous outstanding
* reads to the DMA array that can hide each others latency and the
@@ -14,14 +14,6 @@
*/
#define PKTS_PER_BATCH 4
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 4") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct i40e_ring;
struct i40e_vsi;
struct net_device;
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 2d154a4e2fd7..e13720a728ff 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -11,6 +11,7 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
-iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
- iavf_adv_rss.o \
- iavf_txrx.o iavf_common.o iavf_adminq.o
+iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
+ iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
+
+iavf-$(CONFIG_PTP_1588_CLOCK) += iavf_ptp.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index db8188c7ac4b..a87e0c6d4017 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -33,12 +33,15 @@
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_skbedit.h>
+#include <net/net_shaper.h>
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
+#include "iavf_types.h"
#include <linux/bitmap.h>
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -80,7 +83,7 @@ struct iavf_vsi {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))
+#define IAVF_RX_DESC(R, i) (&(((struct iavf_rx_desc *)((R)->desc))[i]))
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
@@ -111,8 +114,6 @@ struct iavf_q_vector {
u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -249,6 +250,9 @@ struct iavf_cloud_filter {
#define IAVF_RESET_WAIT_DETECTED_COUNT 500
#define IAVF_RESET_WAIT_COMPLETE_COUNT 2000
+#define IAVF_MAX_QOS_TC_NUM 8
+#define IAVF_DEFAULT_QUANTA_SIZE 1024
+
/* board specific private data structure */
struct iavf_adapter {
struct workqueue_struct *wq;
@@ -262,10 +266,10 @@ struct iavf_adapter {
struct list_head vlan_filter_list;
int num_vlan_filters;
struct list_head mac_filter_list;
- struct mutex crit_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
+ u8 rxdid;
int num_active_queues;
int num_req_queues;
@@ -287,7 +291,7 @@ struct iavf_adapter {
#define IAVF_FLAG_RESET_PENDING BIT(4)
#define IAVF_FLAG_RESET_NEEDED BIT(5)
#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
-#define IAVF_FLAG_LEGACY_RX BIT(15)
+/* BIT(15) is free, was IAVF_FLAG_LEGACY_RX */
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
@@ -309,8 +313,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */
#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11)
-#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+#define IAVF_FLAG_AQ_GET_RSS_HASHCFG BIT_ULL(11)
+#define IAVF_FLAG_AQ_SET_RSS_HASHCFG BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
@@ -335,6 +339,20 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36)
#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
+#define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39)
+#define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40)
+#define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41)
+#define IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS BIT_ULL(42)
+#define IAVF_FLAG_AQ_GET_PTP_CAPS BIT_ULL(43)
+#define IAVF_FLAG_AQ_SEND_PTP_CMD BIT_ULL(44)
+
+ /* AQ messages that must be sent after IAVF_FLAG_AQ_GET_CONFIG, in
+ * order to negotiated extended capabilities.
+ */
+#define IAVF_FLAG_AQ_EXTENDED_CAPS \
+ (IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS | \
+ IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS | \
+ IAVF_FLAG_AQ_GET_PTP_CAPS)
/* flags for processing extended capability messages during
* __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
@@ -346,10 +364,18 @@ struct iavf_adapter {
u64 extended_caps;
#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+#define IAVF_EXTENDED_CAP_SEND_RXDID BIT_ULL(2)
+#define IAVF_EXTENDED_CAP_RECV_RXDID BIT_ULL(3)
+#define IAVF_EXTENDED_CAP_SEND_PTP BIT_ULL(4)
+#define IAVF_EXTENDED_CAP_RECV_PTP BIT_ULL(5)
#define IAVF_EXTENDED_CAPS \
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
- IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_SEND_RXDID | \
+ IAVF_EXTENDED_CAP_RECV_RXDID | \
+ IAVF_EXTENDED_CAP_SEND_PTP | \
+ IAVF_EXTENDED_CAP_RECV_PTP)
/* Lock to prevent possible clobbering of
* current_netdev_promisc_flags
@@ -393,6 +419,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN_V2)
#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_CRC)
+#define TC_U32_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_TC_U32)
#define VLAN_V2_FILTERING_ALLOWED(_a) \
(VLAN_V2_ALLOWED((_a)) && \
((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
@@ -405,19 +433,28 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_FDIR_PF)
#define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
+#define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_QOS)
+#define IAVF_RXDID_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+#define IAVF_PTP_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
struct virtchnl_vlan_caps vlan_v2_caps;
+ u64 supp_rxdids;
+ struct iavf_ptp ptp;
u16 msg_enable;
struct iavf_eth_stats current_stats;
+ struct virtchnl_qos_cap_list *qos_caps;
struct iavf_vsi vsi;
u32 aq_wait_count;
/* RSS stuff */
enum virtchnl_rss_algorithm hfunc;
- u64 hena;
+ u64 rss_hashcfg;
u16 rss_key_size;
u16 rss_lut_size;
u8 *rss_key;
@@ -437,6 +474,7 @@ struct iavf_adapter {
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
+ u16 raw_fdir_active_fltr;
struct list_head fdir_list_head;
spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
@@ -444,6 +482,32 @@ struct iavf_adapter {
spinlock_t adv_rss_lock; /* protect the RSS management list */
};
+/* Must be called with fdir_fltr_lock lock held */
+static inline bool iavf_fdir_max_reached(struct iavf_adapter *adapter)
+{
+ return adapter->fdir_active_fltr + adapter->raw_fdir_active_fltr >=
+ IAVF_MAX_FDIR_FILTERS;
+}
+
+static inline void
+iavf_inc_fdir_active_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (iavf_is_raw_fdir(fltr))
+ adapter->raw_fdir_active_fltr++;
+ else
+ adapter->fdir_active_fltr++;
+}
+
+static inline void
+iavf_dec_fdir_active_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (iavf_is_raw_fdir(fltr))
+ adapter->raw_fdir_active_fltr--;
+ else
+ adapter->fdir_active_fltr--;
+}
/* Ethtool Private Flags */
@@ -499,37 +563,33 @@ static inline void iavf_change_state(struct iavf_adapter *adapter,
iavf_state_str(adapter->state));
}
-int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_finish_config(struct iavf_adapter *adapter);
-void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
-void iavf_update_stats(struct iavf_adapter *adapter);
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
-void iavf_napi_add_all(struct iavf_adapter *adapter);
-void iavf_napi_del_all(struct iavf_adapter *adapter);
-
int iavf_send_api_ver(struct iavf_adapter *adapter);
int iavf_verify_api_ver(struct iavf_adapter *adapter);
int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter);
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
-void iavf_deconfigure_queues(struct iavf_adapter *adapter);
void iavf_enable_queues(struct iavf_adapter *adapter);
void iavf_disable_queues(struct iavf_adapter *adapter);
void iavf_map_queues(struct iavf_adapter *adapter);
-int iavf_request_queues(struct iavf_adapter *adapter, int num);
void iavf_add_ether_addrs(struct iavf_adapter *adapter);
void iavf_del_ether_addrs(struct iavf_adapter *adapter);
void iavf_add_vlans(struct iavf_adapter *adapter);
@@ -538,8 +598,8 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter);
bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
void iavf_request_stats(struct iavf_adapter *adapter);
int iavf_request_reset(struct iavf_adapter *adapter);
-void iavf_get_hena(struct iavf_adapter *adapter);
-void iavf_set_hena(struct iavf_adapter *adapter);
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter);
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
@@ -549,8 +609,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
enum virtchnl_ops v_opcode,
enum iavf_status v_retval, u8 *msg, u16 msglen);
int iavf_config_rss(struct iavf_adapter *adapter);
-int iavf_lan_add_device(struct iavf_adapter *adapter);
-int iavf_lan_del_device(struct iavf_adapter *adapter);
+void iavf_cfg_queues_bw(struct iavf_adapter *adapter);
+void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter);
+void iavf_get_qos_caps(struct iavf_adapter *adapter);
void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 82fcd18ad660..6937b7dd44cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -18,7 +18,7 @@ static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
iavf_mem_atq_ring,
(hw->aq.num_asq_entries *
- sizeof(struct iavf_aq_desc)),
+ sizeof(struct libie_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
@@ -45,7 +45,7 @@ static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
iavf_mem_arq_ring,
(hw->aq.num_arq_entries *
- sizeof(struct iavf_aq_desc)),
+ sizeof(struct libie_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
@@ -81,7 +81,7 @@ static void iavf_free_adminq_arq(struct iavf_hw *hw)
**/
static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
{
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct iavf_dma_mem *bi;
enum iavf_status ret_code;
int i;
@@ -111,9 +111,9 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
/* now configure the descriptors for use */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -122,12 +122,12 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
- desc->params.external.addr_high =
+ desc->params.generic.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
+ desc->params.generic.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
}
alloc_arq_bufs:
@@ -558,8 +558,8 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
struct iavf_adminq_ring *asq = &hw->aq.asq;
struct iavf_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct iavf_aq_desc desc_cb;
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc desc_cb;
+ struct libie_aq_desc *desc;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
@@ -573,7 +573,7 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
memset((void *)details, 0,
sizeof(struct iavf_asq_cmd_details));
ntc++;
@@ -615,14 +615,14 @@ bool iavf_asq_done(struct iavf_hw *hw)
* queue. It runs the queue, cleans the queue, etc
**/
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
- struct iavf_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_dma_mem *dma_buff = NULL;
struct iavf_asq_cmd_details *details;
- struct iavf_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
enum iavf_status status = 0;
u16 retval = 0;
@@ -637,7 +637,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
goto asq_send_command_error;
}
- hw->aq.asq_last_status = IAVF_AQ_RC_OK;
+ hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
val = rd32(hw, IAVF_VF_ATQH1);
if (val >= hw->aq.num_asq_entries) {
@@ -717,9 +717,9 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
/* Update the address values in the desc with the pa value
* for respective buffer
*/
- desc_on_ring->params.external.addr_high =
+ desc_on_ring->params.generic.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
+ desc_on_ring->params.generic.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
@@ -766,13 +766,13 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
+ if ((enum libie_aq_err)retval == LIBIE_AQ_RC_OK)
status = 0;
- else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ else if ((enum libie_aq_err)retval == LIBIE_AQ_RC_EBUSY)
status = IAVF_ERR_NOT_READY;
else
status = IAVF_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
+ hw->aq.asq_last_status = (enum libie_aq_err)retval;
}
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
@@ -809,12 +809,12 @@ asq_send_command_error:
*
* Fill the desc with default values
**/
-void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
+void iavf_fill_default_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -832,7 +832,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc *desc;
enum iavf_status ret_code = 0;
struct iavf_dma_mem *bi;
u16 desc_idx;
@@ -866,9 +866,9 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & IAVF_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
@@ -892,14 +892,14 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, IAVF_VF_ARQT1, ntc);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index 406506f64bdd..bbf5c4b3a2ae 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -9,7 +9,7 @@
#include "iavf_adminq_cmd.h"
#define IAVF_ADMINQ_DESC(R, i) \
- (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,7 +39,7 @@ struct iavf_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
- struct iavf_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
#define IAVF_ADMINQ_DETAILS(R, i) \
@@ -47,7 +47,7 @@ struct iavf_asq_cmd_details {
/* ARQ event information */
struct iavf_arq_event_info {
- struct iavf_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -72,8 +72,8 @@ struct iavf_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum iavf_admin_queue_err asq_last_status;
- enum iavf_admin_queue_err arq_last_status;
+ enum libie_aq_err asq_last_status;
+ enum libie_aq_err arq_last_status;
};
/**
@@ -123,6 +123,6 @@ static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
#define IAVF_AQ_LARGE_BUF 512
#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode);
+void iavf_fill_default_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
#endif /* _IAVF_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
index bc512308557b..0482c9ce9b9c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _IAVF_ADMINQ_CMD_H_
#define _IAVF_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
/* This header file defines the iavf Admin Queue commands and is shared between
* iavf Firmware and Software.
*
@@ -21,87 +23,6 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
-struct iavf_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define IAVF_AQ_FLAG_DD_SHIFT 0
-#define IAVF_AQ_FLAG_CMP_SHIFT 1
-#define IAVF_AQ_FLAG_ERR_SHIFT 2
-#define IAVF_AQ_FLAG_VFE_SHIFT 3
-#define IAVF_AQ_FLAG_LB_SHIFT 9
-#define IAVF_AQ_FLAG_RD_SHIFT 10
-#define IAVF_AQ_FLAG_VFC_SHIFT 11
-#define IAVF_AQ_FLAG_BUF_SHIFT 12
-#define IAVF_AQ_FLAG_SI_SHIFT 13
-#define IAVF_AQ_FLAG_EI_SHIFT 14
-#define IAVF_AQ_FLAG_FE_SHIFT 15
-
-#define IAVF_AQ_FLAG_DD BIT(IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define IAVF_AQ_FLAG_CMP BIT(IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define IAVF_AQ_FLAG_ERR BIT(IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define IAVF_AQ_FLAG_VFE BIT(IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define IAVF_AQ_FLAG_LB BIT(IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define IAVF_AQ_FLAG_RD BIT(IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define IAVF_AQ_FLAG_VFC BIT(IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define IAVF_AQ_FLAG_BUF BIT(IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define IAVF_AQ_FLAG_SI BIT(IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define IAVF_AQ_FLAG_EI BIT(IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define IAVF_AQ_FLAG_FE BIT(IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
-
-/* error codes */
-enum iavf_admin_queue_err {
- IAVF_AQ_RC_OK = 0, /* success */
- IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
- IAVF_AQ_RC_ENOENT = 2, /* No such element */
- IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
- IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
- IAVF_AQ_RC_EIO = 5, /* I/O error */
- IAVF_AQ_RC_ENXIO = 6, /* No such resource */
- IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
- IAVF_AQ_RC_EAGAIN = 8, /* Try again */
- IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
- IAVF_AQ_RC_EACCES = 10, /* Permission denied */
- IAVF_AQ_RC_EFAULT = 11, /* Bad address */
- IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
- IAVF_AQ_RC_EEXIST = 13, /* object already exists */
- IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
- IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
- IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
- IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- IAVF_AQ_RC_EFBIG = 22, /* File too large */
-};
-
/* Admin Queue command opcodes */
enum iavf_admin_queue_opc {
/* aq commands */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
index a9e1da35e248..4d12dfe1b481 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
@@ -91,6 +91,55 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
}
/**
+ * iavf_fill_adv_rss_gtp_hdr - Fill GTP-related RSS protocol headers
+ * @proto_hdrs: pointer to the virtchnl protocol headers structure to populate
+ * @packet_hdrs: bitmask of packet header types to configure
+ * @hash_flds: RSS hash field configuration
+ *
+ * This function populates the virtchnl protocol header structure with
+ * appropriate GTP-related header types based on the specified packet_hdrs.
+ * It supports GTPC, GTPU with extension headers, and uplink/downlink PDU
+ * types. For certain GTPU types, it also appends an IPv4 header to enable
+ * hashing on the destination IP address.
+ *
+ * Return: 0 on success or -EOPNOTSUPP if the packet_hdrs value is unsupported.
+ */
+static int
+iavf_fill_adv_rss_gtp_hdr(struct virtchnl_proto_hdrs *proto_hdrs,
+ u32 packet_hdrs, u64 hash_flds)
+{
+ struct virtchnl_proto_hdr *hdr;
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+
+ switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID:
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC:
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPC);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH:
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP:
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN:
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
+ fallthrough;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP:
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
* iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message
* @rss_cfg: the virtchnl message to be filled with RSS configuration setting
* @packet_hdrs: the RSS configuration protocol header types
@@ -103,6 +152,8 @@ int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
u32 packet_hdrs, u64 hash_flds, bool symm)
{
+ const u32 packet_l3_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3;
+ const u32 packet_l4_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4;
struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
struct virtchnl_proto_hdr *hdr;
@@ -113,31 +164,41 @@ iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
proto_hdrs->tunnel_level = 0; /* always outer layer */
- hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
- switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) {
- case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4:
- iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds);
- break;
- case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6:
- iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds);
- break;
- default:
- return -EINVAL;
+ if (packet_l3_hdrs) {
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ switch (packet_l3_hdrs) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4:
+ iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6:
+ iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds);
+ break;
+ default:
+ return -EINVAL;
+ }
}
- hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
- switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) {
- case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP:
- iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds);
- break;
- case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP:
- iavf_fill_adv_rss_udp_hdr(hdr, hash_flds);
- break;
- case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP:
- iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds);
- break;
- default:
- return -EINVAL;
+ if (packet_l4_hdrs) {
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ switch (packet_l4_hdrs) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP:
+ iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP:
+ iavf_fill_adv_rss_udp_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP:
+ iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) {
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ if (iavf_fill_adv_rss_gtp_hdr(proto_hdrs, packet_hdrs, hash_flds))
+ return -EINVAL;
}
return 0;
@@ -186,6 +247,8 @@ iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
proto = "UDP";
else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
proto = "SCTP";
+ else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP)
+ proto = "GTP";
else
return;
@@ -211,6 +274,16 @@ iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
strcat(hash_opt, "dst port,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPC_TEID)
+ strcat(hash_opt, "gtp-c,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID)
+ strcat(hash_opt, "gtp-u ip,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID)
+ strcat(hash_opt, "gtp-u ext,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID)
+ strcat(hash_opt, "gtp-u ul,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID)
+ strcat(hash_opt, "gtp-u dl,");
if (!action)
action = "";
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
index e31eb2afebea..74cc9e0d528c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
@@ -22,6 +22,12 @@ enum iavf_adv_rss_flow_seg_hdr {
IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004,
IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008,
IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC = 0x00000400,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
};
#define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \
@@ -33,6 +39,14 @@ enum iavf_adv_rss_flow_seg_hdr {
IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \
IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
+#define IAVF_ADV_RSS_FLOW_SEG_HDR_GTP \
+ (IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP)
+
enum iavf_adv_rss_flow_field {
/* L3 */
IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA,
@@ -46,6 +60,17 @@ enum iavf_adv_rss_flow_field {
IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ /* GTPC_TEID */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID,
+ /* GTPU_IP */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID,
+ /* GTPU_EH */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_QFI,
+ /* GTPU_UP */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID,
+ /* GTPU_DWN */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID,
/* The total number of enums must not exceed 64 */
IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX
@@ -72,6 +97,12 @@ enum iavf_adv_rss_flow_field {
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT)
#define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_GTPC_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID)
+#define IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID)
+#define IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID)
+#define IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID)
+#define IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID)
/* bookkeeping of advanced RSS configuration */
struct iavf_adv_rss {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 5a25233a89d5..614a886bca99 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -8,66 +8,6 @@
#include "iavf_prototype.h"
/**
- * iavf_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
-{
- switch (aq_err) {
- case IAVF_AQ_RC_OK:
- return "OK";
- case IAVF_AQ_RC_EPERM:
- return "IAVF_AQ_RC_EPERM";
- case IAVF_AQ_RC_ENOENT:
- return "IAVF_AQ_RC_ENOENT";
- case IAVF_AQ_RC_ESRCH:
- return "IAVF_AQ_RC_ESRCH";
- case IAVF_AQ_RC_EINTR:
- return "IAVF_AQ_RC_EINTR";
- case IAVF_AQ_RC_EIO:
- return "IAVF_AQ_RC_EIO";
- case IAVF_AQ_RC_ENXIO:
- return "IAVF_AQ_RC_ENXIO";
- case IAVF_AQ_RC_E2BIG:
- return "IAVF_AQ_RC_E2BIG";
- case IAVF_AQ_RC_EAGAIN:
- return "IAVF_AQ_RC_EAGAIN";
- case IAVF_AQ_RC_ENOMEM:
- return "IAVF_AQ_RC_ENOMEM";
- case IAVF_AQ_RC_EACCES:
- return "IAVF_AQ_RC_EACCES";
- case IAVF_AQ_RC_EFAULT:
- return "IAVF_AQ_RC_EFAULT";
- case IAVF_AQ_RC_EBUSY:
- return "IAVF_AQ_RC_EBUSY";
- case IAVF_AQ_RC_EEXIST:
- return "IAVF_AQ_RC_EEXIST";
- case IAVF_AQ_RC_EINVAL:
- return "IAVF_AQ_RC_EINVAL";
- case IAVF_AQ_RC_ENOTTY:
- return "IAVF_AQ_RC_ENOTTY";
- case IAVF_AQ_RC_ENOSPC:
- return "IAVF_AQ_RC_ENOSPC";
- case IAVF_AQ_RC_ENOSYS:
- return "IAVF_AQ_RC_ENOSYS";
- case IAVF_AQ_RC_ERANGE:
- return "IAVF_AQ_RC_ERANGE";
- case IAVF_AQ_RC_EFLUSHED:
- return "IAVF_AQ_RC_EFLUSHED";
- case IAVF_AQ_RC_BAD_ADDR:
- return "IAVF_AQ_RC_BAD_ADDR";
- case IAVF_AQ_RC_EMODE:
- return "IAVF_AQ_RC_EMODE";
- case IAVF_AQ_RC_EFBIG:
- return "IAVF_AQ_RC_EFBIG";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
- return hw->err_str;
-}
-
-/**
* iavf_stat_str - convert status err code to a string
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
@@ -228,7 +168,7 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc;
+ struct libie_aq_desc *aq_desc = (struct libie_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
if ((!(mask & hw->debug_mask)) || !desc)
@@ -244,11 +184,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->cookie_high),
le32_to_cpu(aq_desc->cookie_low));
iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.internal.param0),
- le32_to_cpu(aq_desc->params.internal.param1));
+ le32_to_cpu(aq_desc->params.generic.param0),
+ le32_to_cpu(aq_desc->params.generic.param1));
iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.external.addr_high),
- le32_to_cpu(aq_desc->params.external.addr_low));
+ le32_to_cpu(aq_desc->params.generic.addr_high),
+ le32_to_cpu(aq_desc->params.generic.addr_low));
if (buffer && aq_desc->datalen) {
u16 len = le16_to_cpu(aq_desc->datalen);
@@ -297,11 +237,11 @@ bool iavf_check_asq_alive(struct iavf_hw *hw)
**/
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
{
- struct iavf_aq_desc desc;
- struct iavf_aqc_queue_shutdown *cmd =
- (struct iavf_aqc_queue_shutdown *)&desc.params.raw;
+ struct iavf_aqc_queue_shutdown *cmd;
+ struct libie_aq_desc desc;
enum iavf_status status;
+ cmd = libie_aq_raw(&desc);
iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown);
if (unloading)
@@ -327,12 +267,13 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
u8 *lut, u16 lut_size,
bool set)
{
+ struct iavf_aqc_get_set_rss_lut *cmd_resp;
+ struct libie_aq_desc desc;
enum iavf_status status;
- struct iavf_aq_desc desc;
- struct iavf_aqc_get_set_rss_lut *cmd_resp =
- (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
u16 flags;
+ cmd_resp = libie_aq_raw(&desc);
+
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
iavf_aqc_opc_set_rss_lut);
@@ -341,8 +282,8 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
iavf_aqc_opc_get_rss_lut);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1);
@@ -392,11 +333,12 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
struct iavf_aqc_get_set_rss_key_data *key,
bool set)
{
- enum iavf_status status;
- struct iavf_aq_desc desc;
- struct iavf_aqc_get_set_rss_key *cmd_resp =
- (struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
+ struct iavf_aqc_get_set_rss_key *cmd_resp;
+ struct libie_aq_desc desc;
+ enum iavf_status status;
+
+ cmd_resp = libie_aq_raw(&desc);
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
@@ -406,8 +348,8 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
iavf_aqc_opc_get_rss_key);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1);
@@ -432,259 +374,6 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT iavf_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum iavf_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
- IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- IAVF_RX_PTYPE_##OUTER_FRAG, \
- IAVF_RX_PTYPE_TUNNEL_##T, \
- IAVF_RX_PTYPE_TUNNEL_END_##TE, \
- IAVF_RX_PTYPE_##TEF, \
- IAVF_RX_PTYPE_INNER_PROT_##I, \
- IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
-#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
-#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */
-struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
- /* L2 Packet types */
- IAVF_PTT_UNUSED_ENTRY(0),
- IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT_UNUSED_ENTRY(4),
- IAVF_PTT_UNUSED_ENTRY(5),
- IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT_UNUSED_ENTRY(8),
- IAVF_PTT_UNUSED_ENTRY(9),
- IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(25),
- IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(32),
- IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(39),
- IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(47),
- IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(54),
- IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(62),
- IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(69),
- IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(77),
- IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(84),
- IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(91),
- IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(98),
- IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(105),
- IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(113),
- IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(120),
- IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(128),
- IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(135),
- IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(143),
- IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(150),
- IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
/**
* iavf_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
@@ -705,18 +394,18 @@ enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_asq_cmd_details details;
- struct iavf_aq_desc desc;
+ struct libie_aq_desc desc;
enum iavf_status status;
iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_SI);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(IAVF_AQ_FLAG_BUF
- | IAVF_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF
+ | LIBIE_AQ_FLAG_RD));
if (msglen > IAVF_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
if (!cmd_details) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 378c3e9ddf9d..2cc21289a707 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -4,6 +4,8 @@
#include <linux/bitfield.h>
#include <linux/uaccess.h>
+#include <net/netdev_lock.h>
+
/* ethtool support for iavf */
#include "iavf.h"
@@ -240,29 +242,6 @@ static const struct iavf_stats iavf_gstrings_stats[] = {
#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
-/* For now we have one and only one private flag and it is only defined
- * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
- * of leaving all this code sitting around empty we will strip it unless
- * our one private flag is actually available.
- */
-struct iavf_priv_flags {
- char flag_string[ETH_GSTRING_LEN];
- u32 flag;
- bool read_only;
-};
-
-#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
- .flag_string = _name, \
- .flag = _flag, \
- .read_only = _read_only, \
-}
-
-static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
- IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
-};
-
-#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
-
/**
* iavf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -342,8 +321,6 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset)
return IAVF_STATS_LEN +
(IAVF_QUEUE_STATS_LEN * 2 *
netdev->real_num_tx_queues);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return IAVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -386,21 +363,6 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
}
/**
- * iavf_get_priv_flag_strings - Get private flag strings
- * @netdev: network interface device structure
- * @data: buffer for string data
- *
- * Builds the private flags string table
- **/
-static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
-{
- unsigned int i;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
- ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string);
-}
-
-/**
* iavf_get_stat_strings - Get stat strings
* @netdev: network interface device structure
* @data: buffer for string data
@@ -438,109 +400,12 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
case ETH_SS_STATS:
iavf_get_stat_strings(netdev, data);
break;
- case ETH_SS_PRIV_FLAGS:
- iavf_get_priv_flag_strings(netdev, data);
- break;
default:
break;
}
}
/**
- * iavf_get_priv_flags - report device private flags
- * @netdev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 iavf_get_priv_flags(struct net_device *netdev)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 i, ret_flags = 0;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (priv_flags->flag & adapter->flags)
- ret_flags |= BIT(i);
- }
-
- return ret_flags;
-}
-
-/**
- * iavf_set_priv_flags - set private flags
- * @netdev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 orig_flags, new_flags, changed_flags;
- int ret = 0;
- u32 i;
-
- orig_flags = READ_ONCE(adapter->flags);
- new_flags = orig_flags;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
-
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
- return -EOPNOTSUPP;
- }
-
- /* Before we finalize any flag changes, any checks which we need to
- * perform to determine if the new flags will be supported should go
- * here...
- */
-
- /* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg returns anything but the old value, this means
- * something else must have modified the flags variable since we
- * copied it. We'll just punt with an error and log something in the
- * message buffer.
- */
- if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
- dev_warn(&adapter->pdev->dev,
- "Unable to update adapter->flags as it was modified by another thread...\n");
- return -EAGAIN;
- }
-
- changed_flags = orig_flags ^ new_flags;
-
- /* Process any additional changes needed as a result of flag changes.
- * The changed_flags value reflects the list of bits that were changed
- * in the code above.
- */
-
- /* issue a reset to force legacy-rx change to take effect */
- if (changed_flags & IAVF_FLAG_LEGACY_RX) {
- if (netif_running(netdev)) {
- iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
- ret = iavf_wait_for_reset(adapter);
- if (ret)
- netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
- }
- }
-
- return ret;
-}
-
-/**
* iavf_get_msglevel - Get debug message level
* @netdev: network interface device structure
*
@@ -585,7 +450,6 @@ static void iavf_get_drvinfo(struct net_device *netdev,
strscpy(drvinfo->driver, iavf_driver_name, 32);
strscpy(drvinfo->fw_version, "N/A", 4);
strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -1065,7 +929,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
- rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ rule = iavf_find_fdir_fltr(adapter, false, fsp->location);
if (!rule) {
ret = -EINVAL;
goto release_lock;
@@ -1210,6 +1074,9 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
if (cnt == cmd->rule_cnt) {
val = -EMSGSIZE;
goto release_lock;
@@ -1391,9 +1258,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct iavf_fdir_fltr *fltr;
- int count = 50;
int err;
+ netdev_assert_locked(adapter->netdev);
+
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
@@ -1401,15 +1269,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return -EINVAL;
spin_lock_bh(&adapter->fdir_fltr_lock);
- if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
- spin_unlock_bh(&adapter->fdir_fltr_lock);
- dev_err(&adapter->pdev->dev,
- "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
- IAVF_MAX_FDIR_FILTERS);
- return -ENOSPC;
- }
-
- if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
+ if (iavf_find_fdir_fltr(adapter, false, fsp->location)) {
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
spin_unlock_bh(&adapter->fdir_fltr_lock);
return -EEXIST;
@@ -1420,35 +1280,13 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (!fltr)
return -ENOMEM;
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(fltr);
- return -EINVAL;
- }
- udelay(1);
- }
-
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
- if (err)
- goto ret;
-
- spin_lock_bh(&adapter->fdir_fltr_lock);
- iavf_fdir_list_add_fltr(adapter, fltr);
- adapter->fdir_active_fltr++;
-
- if (adapter->link_up)
- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- else
- fltr->state = IAVF_FDIR_FLTR_INACTIVE;
- spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (!err)
+ err = iavf_fdir_add_fltr(adapter, fltr);
- if (adapter->link_up)
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
-ret:
- if (err && fltr)
+ if (err)
kfree(fltr);
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -1462,44 +1300,14 @@ ret:
static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
- struct iavf_fdir_fltr *fltr = NULL;
- int err = 0;
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
- spin_lock_bh(&adapter->fdir_fltr_lock);
- fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
- if (fltr) {
- if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
- fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
- } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
- list_del(&fltr->list);
- kfree(fltr);
- adapter->fdir_active_fltr--;
- fltr = NULL;
- } else {
- err = -EBUSY;
- }
- } else if (adapter->fdir_active_fltr) {
- err = -EINVAL;
- }
- spin_unlock_bh(&adapter->fdir_fltr_lock);
-
- if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
-
- return err;
+ return iavf_fdir_del_fltr(adapter, false, fsp->location);
}
-/**
- * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
- * @cmd: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
+static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd)
{
u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
@@ -1528,6 +1336,56 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
break;
+ case GTPU_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_TEID_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_EH_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_UL_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_DL_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_TEID_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_EH_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_UL_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_DL_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
default:
break;
}
@@ -1535,15 +1393,8 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
return hdrs;
}
-/**
- * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
- * @cmd: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended hash fields for
- * RSS configuration
- */
-static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
+static u64
+iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
@@ -1552,6 +1403,12 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ case GTPU_V4_FLOW:
+ case GTPC_V4_FLOW:
+ case GTPC_TEID_V4_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_DL_V4_FLOW:
if (cmd->data & RXH_IP_SRC)
hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
if (cmd->data & RXH_IP_DST)
@@ -1560,6 +1417,12 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
if (cmd->data & RXH_IP_SRC)
hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
if (cmd->data & RXH_IP_DST)
@@ -1581,6 +1444,7 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
break;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
+ case GTPC_V4_FLOW:
if (cmd->data & RXH_L4_B_0_1)
hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
if (cmd->data & RXH_L4_B_2_3)
@@ -1597,28 +1461,51 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
break;
}
}
+ if (cmd->data & RXH_GTP_TEID) {
+ switch (cmd->flow_type) {
+ case GTPC_TEID_V4_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPC_TEID;
+ break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID;
+ break;
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID;
+ break;
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID;
+ break;
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID;
+ break;
+ default:
+ break;
+ }
+ }
return hfld;
}
-/**
- * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: pointer to the VF adapter structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+iavf_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
- int count = 50, err = 0;
bool symm = false;
u64 hash_flds;
+ int err = 0;
u32 hdrs;
+ netdev_assert_locked(adapter->netdev);
+
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
@@ -1642,15 +1529,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return -EINVAL;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(rss_new);
- return -EINVAL;
- }
-
- udelay(1);
- }
-
spin_lock_bh(&adapter->adv_rss_lock);
rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
if (rss_old) {
@@ -1679,25 +1557,16 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!err)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
- mutex_unlock(&adapter->crit_lock);
-
if (!rss_new_add)
kfree(rss_new);
return err;
}
-/**
- * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @adapter: pointer to the VF adapter structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+iavf_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *cmd)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_adv_rss *rss;
u64 hash_flds;
u32 hdrs;
@@ -1762,9 +1631,6 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = iavf_del_fdir_ethtool(adapter, cmd);
break;
- case ETHTOOL_SRXFH:
- ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -1773,6 +1639,19 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * iavf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 iavf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_active_queues;
+}
+
+/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -1787,10 +1666,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_active_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
@@ -1806,9 +1681,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -1987,7 +1859,7 @@ static int iavf_set_rxfh(struct net_device *netdev,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
@@ -1995,8 +1867,6 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.get_strings = iavf_get_strings,
.get_ethtool_stats = iavf_get_ethtool_stats,
.get_sset_count = iavf_get_sset_count,
- .get_priv_flags = iavf_get_priv_flags,
- .set_priv_flags = iavf_set_priv_flags,
.get_msglevel = iavf_get_msglevel,
.set_msglevel = iavf_set_msglevel,
.get_coalesce = iavf_get_coalesce,
@@ -2005,9 +1875,12 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
.set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
+ .get_rx_ring_count = iavf_get_rx_ring_count,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
.set_rxfh = iavf_set_rxfh,
+ .get_rxfh_fields = iavf_get_rxfh_fields,
+ .set_rxfh_fields = iavf_set_rxfh_fields,
.get_channels = iavf_get_channels,
.set_channels = iavf_set_channels,
.get_rxfh_key_size = iavf_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 2d47b0b4640e..a1b3b44cc14a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -796,6 +796,9 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
if (tmp->flow_type != fltr->flow_type)
continue;
@@ -815,33 +818,52 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
}
/**
- * iavf_find_fdir_fltr_by_loc - find filter with location
+ * iavf_find_fdir_fltr - find FDIR filter
* @adapter: pointer to the VF adapter structure
- * @loc: location to find.
+ * @is_raw: filter type, is raw (tc u32) or not (ethtool)
+ * @data: data to ID the filter, type dependent
*
- * Returns pointer to Flow Director filter if found or null
+ * Returns: pointer to Flow Director filter if found or NULL. Lock must be held.
*/
-struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
+struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
+ bool is_raw, u32 data)
{
struct iavf_fdir_fltr *rule;
- list_for_each_entry(rule, &adapter->fdir_list_head, list)
- if (rule->loc == loc)
+ list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if ((is_raw && rule->cls_u32_handle == data) ||
+ (!is_raw && rule->loc == data))
return rule;
+ }
return NULL;
}
/**
- * iavf_fdir_list_add_fltr - add a new node to the flow director filter list
+ * iavf_fdir_add_fltr - add a new node to the flow director filter list
* @adapter: pointer to the VF adapter structure
* @fltr: filter node to add to structure
+ *
+ * Return: 0 on success or negative errno on failure.
*/
-void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
{
struct iavf_fdir_fltr *rule, *parent = NULL;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ if (iavf_fdir_max_reached(adapter)) {
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ dev_err(&adapter->pdev->dev,
+ "Unable to add Flow Director filter (limit (%u) reached)\n",
+ IAVF_MAX_FDIR_FILTERS);
+ return -ENOSPC;
+ }
+
list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ break;
+
if (rule->loc >= fltr->loc)
break;
parent = rule;
@@ -851,4 +873,55 @@ void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr
list_add(&fltr->list, &parent->list);
else
list_add(&fltr->list, &adapter->fdir_list_head);
+
+ iavf_inc_fdir_active_fltr(adapter, fltr);
+
+ if (adapter->link_up)
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ else
+ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (adapter->link_up)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
+
+ return 0;
+}
+
+/**
+ * iavf_fdir_del_fltr - delete a flow director filter from the list
+ * @adapter: pointer to the VF adapter structure
+ * @is_raw: filter type, is raw (tc u32) or not (ethtool)
+ * @data: data to ID the filter, type dependent
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data)
+{
+ struct iavf_fdir_fltr *fltr = NULL;
+ int err = 0;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ fltr = iavf_find_fdir_fltr(adapter, is_raw, data);
+
+ if (fltr) {
+ if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+ list_del(&fltr->list);
+ iavf_dec_fdir_active_fltr(adapter, fltr);
+ kfree(fltr);
+ fltr = NULL;
+ } else {
+ err = -EBUSY;
+ }
+ } else if (adapter->fdir_active_fltr) {
+ err = -EINVAL;
+ }
+
+ if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
+
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return err;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index d31bd923ba8c..e84a5351162f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -117,17 +117,26 @@ struct iavf_fdir_fltr {
u32 flow_id;
+ u32 cls_u32_handle; /* for FDIR added via tc u32 */
u32 loc; /* Rule location inside the flow table */
u32 q_index;
struct virtchnl_fdir_add vc_add_msg;
};
+static inline bool iavf_is_raw_fdir(struct iavf_fdir_fltr *fltr)
+{
+ return !fltr->vc_add_msg.rule_cfg.proto_hdrs.count;
+}
+
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr);
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
-void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
-struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
+int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr);
+int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data);
+struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
+ bool is_raw, u32 data);
#endif /* _IAVF_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index ef2440f3abf8..c2fbe443ef85 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/net/intel/libie/rx.h>
+#include <net/netdev_lock.h>
+
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
* be included exactly once across the whole kernel with
@@ -43,8 +47,10 @@ static const struct pci_device_id iavf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf");
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
+MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
@@ -523,33 +529,6 @@ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
}
/**
- * iavf_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- **/
-static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct iavf_q_vector *q_vector =
- container_of(notify, struct iavf_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * iavf_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- **/
-static void iavf_irq_affinity_release(struct kref *ref) {}
-
-/**
* iavf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
* @basename: device basename
@@ -563,7 +542,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
unsigned int vector, q_vectors;
unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num, err;
- int cpu;
iavf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -598,17 +576,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
"Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
- /* register for affinity change notifications */
- q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
- q_vector->affinity_notify.release =
- iavf_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* Spread the IRQ affinity hints across online CPUs. Note that
- * get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_update_affinity_hint.
- */
- cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -617,8 +584,6 @@ free_queue_irqs:
while (vector) {
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -660,6 +625,7 @@ static int iavf_request_misc_irq(struct iavf_adapter *adapter)
**/
static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
{
+ struct iavf_q_vector *q_vector;
int vector, irq_num, q_vectors;
if (!adapter->msix_entries)
@@ -668,10 +634,10 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (vector = 0; vector < q_vectors; vector++) {
+ q_vector = &adapter->q_vectors[vector];
+ netif_napi_set_irq_locked(&q_vector->napi, -1);
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
- free_irq(irq_num, &adapter->q_vectors[vector]);
+ free_irq(irq_num, q_vector);
}
}
@@ -707,6 +673,47 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
}
/**
+ * iavf_select_rx_desc_format - Select Rx descriptor format
+ * @adapter: adapter private structure
+ *
+ * Select what Rx descriptor format based on availability and enabled
+ * features.
+ *
+ * Return: the desired RXDID to select for a given Rx queue, as defined by
+ * enum virtchnl_rxdid_format.
+ */
+static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
+{
+ u64 rxdids = adapter->supp_rxdids;
+
+ /* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
+ * stick with the default value of the legacy 32 byte format.
+ */
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return VIRTCHNL_RXDID_1_32B_BASE;
+
+ /* Rx timestamping requires the use of flexible NIC descriptors */
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
+ return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
+
+ pci_warn(adapter->pdev,
+ "Unable to negotiate flexible descriptor format\n");
+ }
+
+ /* Warn if the PF does not list support for the default legacy
+ * descriptor format. This shouldn't happen, as this is the format
+ * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
+ * likely caused by a bug in the PF implementation failing to indicate
+ * support for the format.
+ */
+ if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
+ netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
+
+ return VIRTCHNL_RXDID_1_32B_BASE;
+}
+
+/**
* iavf_configure_rx - Configure Receive Unit after Reset
* @adapter: board private structure
*
@@ -714,39 +721,13 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
**/
static void iavf_configure_rx(struct iavf_adapter *adapter)
{
- unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
struct iavf_hw *hw = &adapter->hw;
- int i;
-
- /* Legacy Rx will always default to a 2048 buffer size. */
-#if (PAGE_SIZE < 8192)
- if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
- struct net_device *netdev = adapter->netdev;
- /* For jumbo frames on systems with 4K pages we have to use
- * an order 1 page, so we might as well increase the size
- * of our Rx buffer to make better use of the available space
- */
- rx_buf_len = IAVF_RXBUFFER_3072;
+ adapter->rxdid = iavf_select_rx_desc_format(adapter);
- /* We use a 1536 buffer size for configurations with
- * standard Ethernet mtu. On x86 this gives us enough room
- * for shared info and 192 bytes of padding.
- */
- if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
- (netdev->mtu <= ETH_DATA_LEN))
- rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
- }
-#endif
-
- for (i = 0; i < adapter->num_active_queues; i++) {
+ for (u32 i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = rx_buf_len;
-
- if (adapter->flags & IAVF_FLAG_LEGACY_RX)
- clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
- else
- set_ring_build_skb_enabled(&adapter->rx_rings[i]);
+ adapter->rx_rings[i].rxdid = adapter->rxdid;
}
}
@@ -800,6 +781,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
+ } else if (f->state == IAVF_VLAN_REMOVE) {
+ /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
+ * We can safely only change the state here.
+ */
+ f->state = IAVF_VLAN_ACTIVE;
}
clearout:
@@ -820,8 +806,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
- f->state = IAVF_VLAN_REMOVE;
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ /* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
+ * Remove it from the list.
+ */
+ if (f->state == IAVF_VLAN_ADD) {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ } else {
+ f->state = IAVF_VLAN_REMOVE;
+ iavf_schedule_aq_request(adapter,
+ IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ }
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -1207,7 +1203,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)
q_vector = &adapter->q_vectors[q_idx];
napi = &q_vector->napi;
- napi_enable(napi);
+ napi_enable_locked(napi);
}
}
@@ -1223,7 +1219,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
- napi_disable(&q_vector->napi);
+ napi_disable_locked(&q_vector->napi);
}
}
@@ -1252,11 +1248,11 @@ static void iavf_configure(struct iavf_adapter *adapter)
/**
* iavf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
static void iavf_up_complete(struct iavf_adapter *adapter)
{
+ netdev_assert_locked(adapter->netdev);
+
iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@@ -1375,13 +1371,13 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
/**
* iavf_down - Shutdown the connection processing
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ netdev_assert_locked(netdev);
+
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -1615,7 +1611,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
- rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
rx_ring->itr_setting = IAVF_ITR_RX_DEF;
}
@@ -1700,7 +1695,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -1710,7 +1705,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -1789,12 +1784,13 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
+ adapter->rss_hashcfg =
+ IAVF_DEFAULT_RSS_HASHCFG_EXPANDED;
else
- adapter->hena = IAVF_DEFAULT_RSS_HENA;
+ adapter->rss_hashcfg = IAVF_DEFAULT_RSS_HASHCFG;
- wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
- wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->rss_hashcfg);
+ wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->rss_hashcfg >> 32));
}
iavf_fill_rss_lut(adapter);
@@ -1812,7 +1808,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
**/
static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
{
- int q_idx = 0, num_q_vectors;
+ int q_idx = 0, num_q_vectors, irq_num;
struct iavf_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
@@ -1822,14 +1818,15 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
return -ENOMEM;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector;
q_vector = &adapter->q_vectors[q_idx];
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
- cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_config_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll, q_idx);
+ netif_napi_set_irq_locked(&q_vector->napi, irq_num);
}
return 0;
@@ -1855,7 +1852,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
+ netif_napi_del_locked(&q_vector->napi);
}
kfree(adapter->q_vectors);
adapter->q_vectors = NULL;
@@ -1991,18 +1988,21 @@ err:
* iavf_finish_config - do all netdev work that needs RTNL
* @work: our work_struct
*
- * Do work that needs both RTNL and crit_lock.
- **/
+ * Do work that needs RTNL.
+ */
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
+ bool netdev_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
- /* Always take RTNL first to prevent circular lock dependency */
+ /* Always take RTNL first to prevent circular lock dependency;
+ * the dev->lock (== netdev lock) is needed to update the queue number.
+ */
rtnl_lock();
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(adapter->netdev);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
adapter->netdev->reg_state == NETREG_REGISTERED &&
@@ -2013,26 +2013,33 @@ static void iavf_finish_config(struct work_struct *work)
switch (adapter->state) {
case __IAVF_DOWN:
+ /* Set the real number of queues when reset occurs while
+ * state == __IAVF_DOWN
+ */
+ pairs = adapter->num_active_queues;
+ netif_set_real_num_rx_queues(adapter->netdev, pairs);
+ netif_set_real_num_tx_queues(adapter->netdev, pairs);
+
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ netdev_unlock(adapter->netdev);
+ netdev_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
+ netdev_lock(adapter->netdev);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
+ netdev_unlock(adapter->netdev);
goto out;
}
}
-
- /* Set the real number of queues when reset occurs while
- * state == __IAVF_DOWN
- */
- fallthrough;
+ break;
case __IAVF_RUNNING:
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
@@ -2044,7 +2051,8 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- mutex_unlock(&adapter->crit_lock);
+ if (!netdev_released)
+ netdev_unlock(adapter->netdev);
rtnl_unlock();
}
@@ -2073,6 +2081,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return iavf_send_vf_config_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
return iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
+ return iavf_send_vf_supported_rxdids_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
+ return iavf_send_vf_ptp_caps_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -2113,6 +2125,21 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) {
+ iavf_cfg_queues_bw(adapter);
+ return 0;
+ }
+
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_QOS_CAPS) {
+ iavf_get_qos_caps(adapter);
+ return 0;
+ }
+
+ if (adapter->aq_required & IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE) {
+ iavf_cfg_queues_quanta_size(adapter);
+ return 0;
+ }
+
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
iavf_configure_queues(adapter);
return 0;
@@ -2131,12 +2158,12 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
- iavf_get_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) {
+ iavf_get_rss_hashcfg(adapter);
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
- iavf_set_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) {
+ iavf_set_rss_hashcfg(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
@@ -2222,7 +2249,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
return 0;
}
-
+ if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
+ iavf_virtchnl_send_ptp_cmd(adapter);
+ return IAVF_SUCCESS;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -2587,6 +2617,112 @@ err:
}
/**
+ * iavf_init_send_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for supported RXDIDs to the PF.
+ * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC.
+ */
+static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ ret = iavf_send_vf_supported_rxdids_msg(adapter);
+ if (ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this
+ * case, we did not send the capability exchange message and
+ * do not expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID;
+}
+
+/**
+ * iavf_init_recv_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the supported RXDIDs message from the PF.
+ **/
+static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids));
+
+ ret = iavf_get_vf_supported_rxdids(adapter);
+ if (ret)
+ goto err;
+
+ /* We've processed the PF response to the
+ * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for 1588 PTP capabilities to the PF.
+ * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP
+ */
+static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter)
+{
+ if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we
+ * did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP;
+}
+
+/**
+ * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the PTP capabilities supported on this VF.
+ **/
+static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter)
+{
+ memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps));
+
+ if (iavf_get_vf_ptp_caps(adapter))
+ goto err;
+
+ /* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS
+ * message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
* iavf_init_process_extended_caps - Part of driver startup
* @adapter: board private structure
*
@@ -2610,6 +2746,24 @@ static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
return;
}
+ /* Process capability exchange for RXDID formats */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) {
+ iavf_init_send_supported_rxdids(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) {
+ iavf_init_recv_supported_rxdids(adapter);
+ return;
+ }
+
+ /* Process capability exchange for PTP features */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) {
+ iavf_init_send_ptp_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) {
+ iavf_init_recv_ptp_caps(adapter);
+ return;
+ }
+
/* When we reach here, no further extended capabilities exchanges are
* necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
*/
@@ -2642,9 +2796,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
iavf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- /* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
+ netdev->max_mtu = LIBIE_MAX_MTU;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2699,6 +2852,12 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
/* request initial VLAN offload settings */
iavf_set_vlan_offload_features(adapter, 0, netdev->features);
+ if (QOS_ALLOWED(adapter))
+ adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
+
+ /* Setup initial PTP configuration */
+ iavf_ptp_init(adapter);
+
iavf_schedule_finish_config(adapter);
return;
@@ -2711,24 +2870,15 @@ err:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
-/**
- * iavf_watchdog_task - Periodic call-back task
- * @work: pointer to work_struct
- **/
-static void iavf_watchdog_task(struct work_struct *work)
+static const int IAVF_NO_RESCHED = -1;
+
+/* return: msec delay for requeueing itself */
+static int iavf_watchdog_step(struct iavf_adapter *adapter)
{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- watchdog_task.work);
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
- return;
-
- goto restart_watchdog;
- }
+ netdev_assert_locked(adapter->netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
@@ -2736,34 +2886,19 @@ static void iavf_watchdog_task(struct work_struct *work)
switch (adapter->state) {
case __IAVF_STARTUP:
iavf_startup(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -2771,24 +2906,18 @@ static void iavf_watchdog_task(struct work_struct *work)
* watchdog task, iavf_remove should handle this state
* as it can loop forever
*/
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, (5 * HZ));
- return;
+ return 5000;
}
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
- return;
+ return 1000;
case __IAVF_COMM_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -2798,8 +2927,7 @@ static void iavf_watchdog_task(struct work_struct *work)
*/
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -2817,16 +2945,9 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task,
- msecs_to_jiffies(10));
- return;
+ return 10;
case __IAVF_RESETTING:
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
- return;
+ return 2000;
case __IAVF_DOWN:
case __IAVF_DOWN_PENDING:
case __IAVF_TESTING:
@@ -2853,8 +2974,7 @@ static void iavf_watchdog_task(struct work_struct *work)
break;
case __IAVF_REMOVE:
default:
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
/* check for hw reset */
@@ -2864,22 +2984,29 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, HZ * 2);
- return;
}
- mutex_unlock(&adapter->crit_lock);
-restart_watchdog:
+ return adapter->aq_required ? 20 : 2000;
+}
+
+static void iavf_watchdog_task(struct work_struct *work)
+{
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
+ watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
+ int msec_delay;
+
+ netdev_lock(netdev);
+ msec_delay = iavf_watchdog_step(adapter);
+ /* note that we schedule a different task */
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
- if (adapter->aq_required)
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(20));
- else
+
+ if (msec_delay != IAVF_NO_RESCHED)
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
+ msecs_to_jiffies(msec_delay));
+ netdev_unlock(netdev);
}
/**
@@ -2887,14 +3014,15 @@ restart_watchdog:
* @adapter: board private structure
*
* Set communication failed flag and free all resources.
- * NOTE: This function is expected to be called with crit_lock being held.
- **/
+ */
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *fv, *fvtmp;
struct iavf_cloud_filter *cf, *cftmp;
+ netdev_assert_locked(adapter->netdev);
+
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
/* We don't use netif_running() because it may be true prior to
@@ -2948,6 +3076,30 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
}
/**
+ * iavf_reconfig_qs_bw - Call-back task to handle hardware reset
+ * @adapter: board private structure
+ *
+ * After a reset, the shaper parameters of queues need to be replayed again.
+ * Since the net_shaper object inside TX rings persists across reset,
+ * set the update flag for all queues so that the virtchnl message is triggered
+ * for all queues.
+ **/
+static void iavf_reconfig_qs_bw(struct iavf_adapter *adapter)
+{
+ int i, num = 0;
+
+ for (i = 0; i < adapter->num_active_queues; i++)
+ if (adapter->tx_rings[i].q_shaper.bw_min ||
+ adapter->tx_rings[i].q_shaper.bw_max) {
+ adapter->tx_rings[i].q_shaper_update = true;
+ num++;
+ }
+
+ if (num)
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+}
+
+/**
* iavf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
*
@@ -2970,15 +3122,7 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
- /* When device is being removed it doesn't make sense to run the reset
- * task, just return in such a case.
- */
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state != __IAVF_REMOVE)
- queue_work(adapter->wq, &adapter->reset_task);
-
- return;
- }
+ netdev_lock(netdev);
iavf_misc_irq_disable(adapter);
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
@@ -3023,11 +3167,22 @@ static void iavf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
continue_reset:
+ /* If we are still early in the state machine, just restart. */
+ if (adapter->state <= __IAVF_INIT_FAILED) {
+ iavf_shutdown_adminq(hw);
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ iavf_startup(adapter);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ netdev_unlock(netdev);
+ return;
+ }
+
/* We don't use netif_running() because it may be true prior to
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
@@ -3079,15 +3234,18 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
- /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
- * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
- * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
- * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
- * been successfully sent and negotiated
- */
- adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
+ /* Certain capabilities require an extended negotiation process using
+ * extra messages that must be processed after getting the VF
+ * configuration. The related checks such as VLAN_V2_ALLOWED() are not
+ * reliable here, since the configuration has not yet been negotiated.
+ *
+ * Always set these flags, since them related VIRTCHNL messages won't
+ * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS;
+
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* Delete filter for the current MAC address, it could have
@@ -3153,6 +3311,8 @@ continue_reset:
iavf_up_complete(adapter);
iavf_irq_enable(adapter, true);
+
+ iavf_reconfig_qs_bw(adapter);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@@ -3161,7 +3321,7 @@ continue_reset:
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
wake_up(&adapter->reset_waitqueue);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
reset_err:
@@ -3171,7 +3331,7 @@ reset_err:
}
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3183,6 +3343,7 @@ static void iavf_adminq_task(struct work_struct *work)
{
struct iavf_adapter *adapter =
container_of(work, struct iavf_adapter, adminq_task);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
enum virtchnl_ops v_op;
@@ -3190,13 +3351,7 @@ static void iavf_adminq_task(struct work_struct *work)
u32 val, oldval;
u16 pending;
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
- return;
-
- queue_work(adapter->wq, &adapter->adminq_task);
- goto out;
- }
+ netdev_lock(netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto unlock;
@@ -3263,8 +3418,7 @@ static void iavf_adminq_task(struct work_struct *work)
freedom:
kfree(event.msg_buf);
unlock:
- mutex_unlock(&adapter->crit_lock);
-out:
+ netdev_unlock(netdev);
/* re-enable Admin queue interrupt cause */
iavf_misc_irq_enable(adapter);
}
@@ -3503,6 +3657,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt *mqprio_qopt)
+{
+ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+ int i;
+
+ if (adapter->num_tc != mqprio_qopt->num_tc)
+ return false;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ if (ch[i].count != mqprio_qopt->count[i] ||
+ ch[i].offset != mqprio_qopt->offset[i])
+ return false;
+ }
+ return true;
+}
+
+/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_data: tc offload data
@@ -3559,7 +3741,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
- if (adapter->num_tc == num_tc)
+ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;
@@ -3757,6 +3939,10 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -3925,8 +4111,8 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
- struct iavf_cloud_filter *filter = NULL;
- int err = -EINVAL, count = 50;
+ struct iavf_cloud_filter *filter;
+ int err;
if (tc < 0) {
dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
@@ -3936,17 +4122,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
-
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(filter);
- return err;
- }
- udelay(1);
- }
-
filter->cookie = cls_flower->cookie;
+ netdev_lock(adapter->netdev);
+
/* bail out here if filter already exists */
spin_lock_bh(&adapter->cloud_filter_list_lock);
if (iavf_find_cf(adapter, &cls_flower->cookie)) {
@@ -3980,7 +4159,7 @@ err:
if (err)
kfree(filter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
return err;
}
@@ -4010,7 +4189,7 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
/**
* iavf_setup_tc_cls_flower - flower classifier offloads
- * @adapter: board private structure
+ * @adapter: pointer to iavf adapter structure
* @cls_flower: pointer to flow_cls_offload struct with flow info
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
@@ -4029,6 +4208,154 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
}
/**
+ * iavf_add_cls_u32 - Add U32 classifier offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_add_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ struct netlink_ext_ack *extack = cls_u32->common.extack;
+ struct virtchnl_fdir_rule *rule_cfg;
+ struct virtchnl_filter_action *vact;
+ struct virtchnl_proto_hdrs *hdrs;
+ struct ethhdr *spec_h, *mask_h;
+ const struct tc_action *act;
+ struct iavf_fdir_fltr *fltr;
+ struct tcf_exts *exts;
+ unsigned int q_index;
+ int i, status = 0;
+ int off_base = 0;
+
+ if (cls_u32->knode.link_handle) {
+ NL_SET_ERR_MSG_MOD(extack, "Linking not supported");
+ return -EOPNOTSUPP;
+ }
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ rule_cfg = &fltr->vc_add_msg.rule_cfg;
+ hdrs = &rule_cfg->proto_hdrs;
+ hdrs->count = 0;
+
+ /* The parser lib at the PF expects the packet starting with MAC hdr */
+ switch (ntohs(cls_u32->common.protocol)) {
+ case ETH_P_802_3:
+ break;
+ case ETH_P_IP:
+ spec_h = (struct ethhdr *)hdrs->raw.spec;
+ mask_h = (struct ethhdr *)hdrs->raw.mask;
+ spec_h->h_proto = htons(ETH_P_IP);
+ mask_h->h_proto = htons(0xFFFF);
+ off_base += ETH_HLEN;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported");
+ status = -EOPNOTSUPP;
+ goto free_alloc;
+ }
+
+ for (i = 0; i < cls_u32->knode.sel->nkeys; i++) {
+ __be32 val, mask;
+ int off;
+
+ off = off_base + cls_u32->knode.sel->keys[i].off;
+ val = cls_u32->knode.sel->keys[i].val;
+ mask = cls_u32->knode.sel->keys[i].mask;
+
+ if (off >= sizeof(hdrs->raw.spec)) {
+ NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed.");
+ status = -EINVAL;
+ goto free_alloc;
+ }
+
+ memcpy(&hdrs->raw.spec[off], &val, sizeof(val));
+ memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask));
+ hdrs->raw.pkt_len = off + sizeof(val);
+ }
+
+ /* Only one action is allowed */
+ rule_cfg->action_set.count = 1;
+ vact = &rule_cfg->action_set.actions[0];
+ exts = cls_u32->knode.exts;
+
+ tcf_exts_for_each_action(i, act, exts) {
+ /* FDIR queue */
+ if (is_tcf_skbedit_rx_queue_mapping(act)) {
+ q_index = tcf_skbedit_rx_queue_mapping(act);
+ if (q_index >= adapter->num_active_queues) {
+ status = -EINVAL;
+ goto free_alloc;
+ }
+
+ vact->type = VIRTCHNL_ACTION_QUEUE;
+ vact->act_conf.queue.index = q_index;
+ break;
+ }
+
+ /* Drop */
+ if (is_tcf_gact_shot(act)) {
+ vact->type = VIRTCHNL_ACTION_DROP;
+ break;
+ }
+
+ /* Unsupported action */
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action.");
+ status = -EOPNOTSUPP;
+ goto free_alloc;
+ }
+
+ fltr->vc_add_msg.vsi_id = adapter->vsi.id;
+ fltr->cls_u32_handle = cls_u32->knode.handle;
+ return iavf_fdir_add_fltr(adapter, fltr);
+
+free_alloc:
+ kfree(fltr);
+ return status;
+}
+
+/**
+ * iavf_del_cls_u32 - Delete U32 classifier offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_del_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle);
+}
+
+/**
+ * iavf_setup_tc_cls_u32 - U32 filter offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ switch (cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return iavf_add_cls_u32(adapter, cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return iavf_del_cls_u32(adapter, cls_u32);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* iavf_setup_tc_block_cb - block callback for tc
* @type: type of offload
* @type_data: offload data
@@ -4047,6 +4374,8 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
+ case TC_SETUP_CLSU32:
+ return iavf_setup_tc_cls_u32(cb_priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4127,33 +4456,20 @@ static int iavf_open(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int err;
+ netdev_assert_locked(netdev);
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
- * is already taken and iavf_open is called from an upper
- * device's notifier reacting on NETDEV_REGISTER event.
- * We have to leave here to avoid dead lock.
- */
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
- return -EBUSY;
-
- usleep_range(500, 1000);
- }
-
- if (adapter->state != __IAVF_DOWN) {
- err = -EBUSY;
- goto err_unlock;
- }
+ if (adapter->state != __IAVF_DOWN)
+ return -EBUSY;
if (adapter->state == __IAVF_RUNNING &&
!test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
- err = 0;
- goto err_unlock;
+ return 0;
}
/* allocate transmit descriptors */
@@ -4172,9 +4488,7 @@ static int iavf_open(struct net_device *netdev)
goto err_req_irq;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
iavf_add_filter(adapter, adapter->hw.mac.addr);
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* Restore filters that were removed with IFF_DOWN */
@@ -4187,8 +4501,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
- mutex_unlock(&adapter->crit_lock);
-
return 0;
err_req_irq:
@@ -4198,8 +4510,6 @@ err_setup_rx:
iavf_free_all_rx_resources(adapter);
err_setup_tx:
iavf_free_all_tx_resources(adapter);
-err_unlock:
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -4221,12 +4531,10 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
- mutex_lock(&adapter->crit_lock);
+ netdev_assert_locked(netdev);
- if (adapter->state <= __IAVF_DOWN_PENDING) {
- mutex_unlock(&adapter->crit_lock);
+ if (adapter->state <= __IAVF_DOWN_PENDING)
return 0;
- }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
@@ -4257,7 +4565,7 @@ static int iavf_close(struct net_device *netdev)
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
@@ -4275,10 +4583,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+ netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore;
- mutex_unlock(&adapter->crit_lock);
+
return 0;
}
@@ -4296,7 +4604,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
@@ -4329,8 +4637,8 @@ static void iavf_disable_fdir(struct iavf_adapter *adapter)
fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
/* Delete filters not registered in PF */
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
@@ -4740,6 +5048,115 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
return iavf_fix_strip_features(adapter, features);
}
+static int iavf_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ *config = adapter->ptp.hwtstamp_config;
+
+ return 0;
+}
+
+static int iavf_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return iavf_ptp_set_ts_config(adapter, config, extack);
+}
+
+static int
+iavf_verify_shaper(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(binding->netdev);
+ u64 vf_max;
+
+ if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE) {
+ vf_max = adapter->qos_caps->cap[0].shaper.peak;
+ if (vf_max && shaper->bw_max > vf_max) {
+ NL_SET_ERR_MSG_FMT(extack, "Max rate (%llu) of queue %d can't exceed max TX rate of VF (%llu kbps)",
+ shaper->bw_max, shaper->handle.id,
+ vf_max);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int
+iavf_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(binding->netdev);
+ const struct net_shaper_handle *handle = &shaper->handle;
+ struct iavf_ring *tx_ring;
+ int ret;
+
+ netdev_assert_locked(adapter->netdev);
+
+ if (handle->id >= adapter->num_active_queues)
+ return 0;
+
+ ret = iavf_verify_shaper(binding, shaper, extack);
+ if (ret)
+ return ret;
+
+ tx_ring = &adapter->tx_rings[handle->id];
+
+ tx_ring->q_shaper.bw_min = div_u64(shaper->bw_min, 1000);
+ tx_ring->q_shaper.bw_max = div_u64(shaper->bw_max, 1000);
+ tx_ring->q_shaper_update = true;
+
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+
+ return 0;
+}
+
+static int iavf_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(binding->netdev);
+ struct iavf_ring *tx_ring;
+
+ netdev_assert_locked(adapter->netdev);
+
+ if (handle->id >= adapter->num_active_queues)
+ return 0;
+
+ tx_ring = &adapter->tx_rings[handle->id];
+ tx_ring->q_shaper.bw_min = 0;
+ tx_ring->q_shaper.bw_max = 0;
+ tx_ring->q_shaper_update = true;
+
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+
+ return 0;
+}
+
+static void iavf_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ if (scope != NET_SHAPER_SCOPE_QUEUE)
+ return;
+
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops iavf_shaper_ops = {
+ .set = iavf_shaper_set,
+ .delete = iavf_shaper_del,
+ .capabilities = iavf_shaper_cap,
+};
+
static const struct net_device_ops iavf_netdev_ops = {
.ndo_open = iavf_open,
.ndo_stop = iavf_close,
@@ -4755,6 +5172,9 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_fix_features = iavf_fix_features,
.ndo_set_features = iavf_set_features,
.ndo_setup_tc = iavf_setup_tc,
+ .net_shaper_ops = &iavf_shaper_ops,
+ .ndo_hwtstamp_get = iavf_hwstamp_get,
+ .ndo_hwtstamp_set = iavf_hwstamp_set,
};
/**
@@ -4840,9 +5260,11 @@ int iavf_process_config(struct iavf_adapter *adapter)
/* get HW VLAN features that can be toggled */
hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
- /* Enable cloud filter if ADQ is supported */
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
+ /* Enable HW TC offload if ADQ or tc U32 is supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ||
+ TC_U32_SUPPORT(adapter))
hw_features |= NETIF_F_HW_TC;
+
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;
@@ -4899,7 +5321,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct iavf_adapter *adapter = NULL;
struct iavf_hw *hw = NULL;
- int err;
+ int err, len;
err = pci_enable_device(pdev);
if (err)
@@ -4928,6 +5350,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_etherdev;
}
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
@@ -4967,10 +5390,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.func = PCI_FUNC(pdev->devfn);
hw->bus.bus_id = pdev->bus->number;
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&adapter->crit_lock);
+ len = struct_size(adapter->qos_caps, cap, IAVF_MAX_QOS_TC_NUM);
+ adapter->qos_caps = kzalloc(len, GFP_KERNEL);
+ if (!adapter->qos_caps) {
+ err = -ENOMEM;
+ goto err_alloc_qos_cap;
+ }
+
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -5000,11 +5426,17 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ INIT_LIST_HEAD(&adapter->ptp.aq_cmds);
+ init_waitqueue_head(&adapter->ptp.phc_time_waitqueue);
+ mutex_init(&adapter->ptp.aq_cmd_lock);
+
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Initialization goes on in the work. Do not add more of it below. */
return 0;
+err_alloc_qos_cap:
+ iounmap(hw->hw_addr);
err_ioremap:
destroy_workqueue(adapter->wq);
err_alloc_wq:
@@ -5023,24 +5455,28 @@ err_dma:
*
* Called when the system (VM) is entering sleep/suspend.
**/
-static int __maybe_unused iavf_suspend(struct device *dev_d)
+static int iavf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
+ bool running;
netif_device_detach(netdev);
- mutex_lock(&adapter->crit_lock);
-
- if (netif_running(netdev)) {
+ running = netif_running(netdev);
+ if (running)
rtnl_lock();
+ netdev_lock(netdev);
+
+ if (running)
iavf_down(adapter);
- rtnl_unlock();
- }
+
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
+ if (running)
+ rtnl_unlock();
return 0;
}
@@ -5051,11 +5487,11 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
-static int __maybe_unused iavf_resume(struct device *dev_d)
+static int iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter;
- u32 err;
+ int err;
adapter = iavf_pdev_to_adapter(pdev);
@@ -5117,20 +5553,20 @@ static void iavf_remove(struct pci_dev *pdev)
* There are flows where register/unregister netdev may race.
*/
while (1) {
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(netdev);
if (adapter->state == __IAVF_RUNNING ||
adapter->state == __IAVF_DOWN ||
adapter->state == __IAVF_INIT_FAILED) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
break;
}
/* Simply return if we already went through iavf_shutdown */
if (adapter->state == __IAVF_REMOVE) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
usleep_range(500, 1000);
}
cancel_delayed_work_sync(&adapter->watchdog_task);
@@ -5139,7 +5575,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(netdev);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5151,11 +5587,15 @@ static void iavf_remove(struct pci_dev *pdev)
msleep(50);
}
+ iavf_ptp_release(adapter);
+
iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
+ netdev_unlock(netdev);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->adminq_task);
+ netdev_lock(netdev);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
@@ -5173,8 +5613,7 @@ static void iavf_remove(struct pci_dev *pdev)
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
mutex_destroy(&hw->aq.asq_mutex);
- mutex_unlock(&adapter->crit_lock);
- mutex_destroy(&adapter->crit_lock);
+ netdev_unlock(netdev);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
@@ -5238,14 +5677,14 @@ static void iavf_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
-static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
static struct pci_driver iavf_driver = {
.name = iavf_driver_name,
.id_table = iavf_pci_tbl,
.probe = iavf_probe,
.remove = iavf_remove,
- .driver.pm = &iavf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&iavf_pm_ops),
.shutdown = iavf_shutdown,
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index 4a48e6171405..7f9f9dbf959a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -18,12 +18,11 @@
/* adminq functions */
enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
-void iavf_adminq_init_ring_data(struct iavf_hw *hw);
enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
struct iavf_arq_event_info *e,
u16 *events_pending);
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
- struct iavf_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details);
@@ -33,11 +32,8 @@ bool iavf_asq_done(struct iavf_hw *hw);
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
-void iavf_idle_aq(struct iavf_hw *hw);
-void iavf_resume_aq(struct iavf_hw *hw);
bool iavf_check_asq_alive(struct iavf_hw *hw);
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
@@ -45,13 +41,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
struct iavf_aqc_get_set_rss_key_data *key);
-extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
-
-static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return iavf_ptype_lookup[ptype];
-}
-
void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg);
enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.c b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
new file mode 100644
index 000000000000..9cbd8c154031
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "iavf.h"
+#include "iavf_ptp.h"
+
+#define iavf_clock_to_adapter(info) \
+ container_of_const(info, struct iavf_adapter, ptp.info)
+
+/**
+ * iavf_ptp_disable_rx_tstamp - Disable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Disable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_disable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags &= ~IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_enable_rx_tstamp - Enable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Enable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_enable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags |= IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_set_timestamp_mode - Set device timestamping mode
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config
+ *
+ * Set the timestamping mode requested from the userspace.
+ *
+ * Note: this function always translates Rx timestamp requests for any packet
+ * category into HWTSTAMP_FILTER_ALL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int iavf_ptp_set_timestamp_mode(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config)
+{
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ return -EOPNOTSUPP;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ iavf_ptp_disable_rx_tstamp(adapter);
+ return 0;
+ } else if (config->rx_filter > HWTSTAMP_FILTER_NTP_ALL) {
+ return -ERANGE;
+ } else if (!(iavf_ptp_cap_supported(adapter,
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))) {
+ return -EOPNOTSUPP;
+ }
+
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ iavf_ptp_enable_rx_tstamp(adapter);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_set_ts_config - Set timestamping configuration
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config structure
+ * @extack: pointer to netlink_ext_ack structure
+ *
+ * Program the requested timestamping configuration to the device.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = iavf_ptp_set_timestamp_mode(adapter, config);
+ if (err)
+ return err;
+
+ /* Save successful settings for future reference */
+ adapter->ptp.hwtstamp_config = *config;
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_cap_supported - Check if a PTP capability is supported
+ * @adapter: private adapter structure
+ * @cap: the capability bitmask to check
+ *
+ * Return: true if every capability set in cap is also set in the enabled
+ * capabilities reported by the PF, false otherwise.
+ */
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap)
+{
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return false;
+
+ /* Only return true if every bit in cap is set in hw_caps.caps */
+ return (adapter->ptp.hw_caps.caps & cap) == cap;
+}
+
+/**
+ * iavf_allocate_ptp_cmd - Allocate a PTP command message structure
+ * @v_opcode: the virtchnl opcode
+ * @msglen: length in bytes of the associated virtchnl structure
+ *
+ * Allocates a PTP command message and pre-fills it with the provided message
+ * length and opcode.
+ *
+ * Return: allocated PTP command.
+ */
+static struct iavf_ptp_aq_cmd *iavf_allocate_ptp_cmd(enum virtchnl_ops v_opcode,
+ u16 msglen)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ cmd = kzalloc(struct_size(cmd, msg, msglen), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->v_opcode = v_opcode;
+ cmd->msglen = msglen;
+
+ return cmd;
+}
+
+/**
+ * iavf_queue_ptp_cmd - Queue PTP command for sending over virtchnl
+ * @adapter: private adapter structure
+ * @cmd: the command structure to send
+ *
+ * Queue the given command structure into the PTP virtchnl command queue tos
+ * end to the PF.
+ */
+static void iavf_queue_ptp_cmd(struct iavf_adapter *adapter,
+ struct iavf_ptp_aq_cmd *cmd)
+{
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_add_tail(&cmd->list, &adapter->ptp.aq_cmds);
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+}
+
+/**
+ * iavf_send_phc_read - Send request to read PHC time
+ * @adapter: private adapter structure
+ *
+ * Send a request to obtain the PTP hardware clock time. This allocates the
+ * VIRTCHNL_OP_1588_PTP_GET_TIME message and queues it up to send to
+ * indirectly read the PHC time.
+ *
+ * This function does not wait for the reply from the PF.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_send_phc_read(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ cmd = iavf_allocate_ptp_cmd(VIRTCHNL_OP_1588_PTP_GET_TIME,
+ sizeof(struct virtchnl_phc_time));
+ if (!cmd)
+ return -ENOMEM;
+
+ iavf_queue_ptp_cmd(adapter, cmd);
+
+ return 0;
+}
+
+/**
+ * iavf_read_phc_indirect - Indirectly read the PHC time via virtchnl
+ * @adapter: private adapter structure
+ * @ts: storage for the timestamp value
+ * @sts: system timestamp values before and after the read
+ *
+ * Used when the device does not have direct register access to the PHC time.
+ * Indirectly reads the time via the VIRTCHNL_OP_1588_PTP_GET_TIME, and waits
+ * for the reply from the PF.
+ *
+ * Based on some simple measurements using ftrace and phc2sys, this clock
+ * access method has about a ~110 usec latency even when the system is not
+ * under load. In order to achieve acceptable results when using phc2sys with
+ * the indirect clock access method, it is recommended to use more
+ * conservative proportional and integration constants with the P/I servo.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_read_phc_indirect(struct iavf_adapter *adapter,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ long ret;
+ int err;
+
+ adapter->ptp.phc_time_ready = false;
+
+ ptp_read_system_prets(sts);
+
+ err = iavf_send_phc_read(adapter);
+ if (err)
+ return err;
+
+ ret = wait_event_interruptible_timeout(adapter->ptp.phc_time_waitqueue,
+ adapter->ptp.phc_time_ready,
+ HZ);
+
+ ptp_read_system_postts(sts);
+
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+
+ *ts = ns_to_timespec64(adapter->ptp.cached_phc_time);
+
+ return 0;
+}
+
+static int iavf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ return iavf_read_phc_indirect(adapter, ts, sts);
+}
+
+static int iavf_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * iavf_ptp_cache_phc_time - Cache PHC time for performing timestamp extension
+ * @adapter: private adapter structure
+ *
+ * Periodically cache the PHC time in order to allow for timestamp extension.
+ * This is required because the Tx and Rx timestamps only contain 32bits of
+ * nanoseconds. Timestamp extension allows calculating the corrected 64bit
+ * timestamp. This algorithm relies on the cached time being within ~1 second
+ * of the timestamp.
+ */
+static void iavf_ptp_cache_phc_time(struct iavf_adapter *adapter)
+{
+ if (!time_is_before_jiffies(adapter->ptp.cached_phc_updated + HZ))
+ return;
+
+ /* The response from virtchnl will store the time into
+ * cached_phc_time.
+ */
+ iavf_send_phc_read(adapter);
+}
+
+/**
+ * iavf_ptp_do_aux_work - Perform periodic work required for PTP support
+ * @info: PTP clock info structure
+ *
+ * Handler to take care of periodic work required for PTP operation. This
+ * includes the following tasks:
+ *
+ * 1) updating cached_phc_time
+ *
+ * cached_phc_time is used by the Tx and Rx timestamp flows in order to
+ * perform timestamp extension, by carefully comparing the timestamp
+ * 32bit nanosecond timestamps and determining the corrected 64bit
+ * timestamp value to report to userspace. This algorithm only works if
+ * the cached_phc_time is within ~1 second of the Tx or Rx timestamp
+ * event. This task periodically reads the PHC time and stores it, to
+ * ensure that timestamp extension operates correctly.
+ *
+ * Returns: time in jiffies until the periodic task should be re-scheduled.
+ */
+static long iavf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ iavf_ptp_cache_phc_time(adapter);
+
+ /* Check work about twice a second */
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * iavf_ptp_register_clock - Register a new PTP for userspace
+ * @adapter: private adapter structure
+ *
+ * Allocate and register a new PTP clock device if necessary.
+ *
+ * Return: 0 if success, error otherwise.
+ */
+static int iavf_ptp_register_clock(struct iavf_adapter *adapter)
+{
+ struct ptp_clock_info *ptp_info = &adapter->ptp.info;
+ struct device *dev = &adapter->pdev->dev;
+ struct ptp_clock *clock;
+
+ snprintf(ptp_info->name, sizeof(ptp_info->name), "%s-%s-clk",
+ KBUILD_MODNAME, dev_name(dev));
+ ptp_info->owner = THIS_MODULE;
+ ptp_info->gettimex64 = iavf_ptp_gettimex64;
+ ptp_info->settime64 = iavf_ptp_settime64;
+ ptp_info->do_aux_work = iavf_ptp_do_aux_work;
+
+ clock = ptp_clock_register(ptp_info, dev);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ adapter->ptp.clock = clock;
+
+ dev_dbg(&adapter->pdev->dev, "PTP clock %s registered\n",
+ adapter->ptp.info.name);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_init - Initialize PTP support if capability was negotiated
+ * @adapter: private adapter structure
+ *
+ * Initialize PTP functionality, based on the capabilities that the PF has
+ * enabled for this VF.
+ */
+void iavf_ptp_init(struct iavf_adapter *adapter)
+{
+ int err;
+
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC)) {
+ pci_notice(adapter->pdev,
+ "Device does not have PTP clock support\n");
+ return;
+ }
+
+ err = iavf_ptp_register_clock(adapter);
+ if (err) {
+ pci_err(adapter->pdev,
+ "Failed to register PTP clock device (%p)\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ for (int i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ rx_ring->ptp = &adapter->ptp;
+ }
+
+ ptp_schedule_worker(adapter->ptp.clock, 0);
+}
+
+/**
+ * iavf_ptp_release - Disable PTP support
+ * @adapter: private adapter structure
+ *
+ * Release all PTP resources that were previously initialized.
+ */
+void iavf_ptp_release(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd, *tmp;
+
+ if (!adapter->ptp.clock)
+ return;
+
+ pci_dbg(adapter->pdev, "removing PTP clock %s\n",
+ adapter->ptp.info.name);
+ ptp_clock_unregister(adapter->ptp.clock);
+ adapter->ptp.clock = NULL;
+
+ /* Cancel any remaining uncompleted PTP clock commands */
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &adapter->ptp.aq_cmds, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+}
+
+/**
+ * iavf_ptp_process_caps - Handle change in PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Handle any state changes necessary due to change in PTP capabilities, such
+ * as after a device reset or change in configuration from the PF.
+ */
+void iavf_ptp_process_caps(struct iavf_adapter *adapter)
+{
+ bool phc = iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC);
+
+ /* Check if the device gained or lost necessary access to support the
+ * PTP hardware clock. If so, driver must respond appropriately by
+ * creating or destroying the PTP clock device.
+ */
+ if (adapter->ptp.clock && !phc)
+ iavf_ptp_release(adapter);
+ else if (!adapter->ptp.clock && phc)
+ iavf_ptp_init(adapter);
+
+ /* Check if the device lost access to Rx timestamp incoming packets */
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+ }
+}
+
+/**
+ * iavf_ptp_extend_32b_timestamp - Convert a 32b nanoseconds timestamp to 64b
+ * nanoseconds
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Extend the 32bit nanosecond timestamp using the following algorithm and
+ * assumptions:
+ *
+ * 1) have a recently cached copy of the PHC time
+ * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
+ * seconds) before or after the PHC time was captured.
+ * 3) calculate the delta between the cached time and the timestamp
+ * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
+ * captured after the PHC time. In this case, the full timestamp is just
+ * the cached PHC time plus the delta.
+ * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
+ * timestamp was captured *before* the PHC time, i.e. because the PHC
+ * cache was updated after the timestamp was captured by hardware. In this
+ * case, the full timestamp is the cached time minus the inverse delta.
+ *
+ * This algorithm works even if the PHC time was updated after a Tx timestamp
+ * was requested, but before the Tx timestamp event was reported from
+ * hardware.
+ *
+ * This calculation primarily relies on keeping the cached PHC time up to
+ * date. If the timestamp was captured more than 2^31 nanoseconds after the
+ * PHC time, it is possible that the lower 32bits of PHC time have
+ * overflowed more than once, and we might generate an incorrect timestamp.
+ *
+ * This is prevented by (a) periodically updating the cached PHC time once
+ * a second, and (b) discarding any Tx timestamp packet if it has waited for
+ * a timestamp for more than one second.
+ *
+ * Return: extended timestamp (to 64b).
+ */
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp)
+{
+ u32 low = lower_32_bits(cached_phc_time);
+ u32 delta = in_tstamp - low;
+ u64 ns;
+
+ /* Do not assume that the in_tstamp is always more recent than the
+ * cached PHC time. If the delta is large, it indicates that the
+ * in_tstamp was taken in the past, and should be converted
+ * forward.
+ */
+ if (delta > S32_MAX)
+ ns = cached_phc_time - (low - in_tstamp);
+ else
+ ns = cached_phc_time + delta;
+
+ return ns;
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.h b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
new file mode 100644
index 000000000000..783b8f287cd9
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_PTP_H_
+#define _IAVF_PTP_H_
+
+#include "iavf_types.h"
+
+/* bit indicating whether a 40bit timestamp is valid */
+#define IAVF_PTP_40B_TSTAMP_VALID BIT(24)
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void iavf_ptp_init(struct iavf_adapter *adapter);
+void iavf_ptp_release(struct iavf_adapter *adapter);
+void iavf_ptp_process_caps(struct iavf_adapter *adapter);
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap);
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter);
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp);
+#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+static inline void iavf_ptp_init(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_release(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_process_caps(struct iavf_adapter *adapter) { }
+static inline bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter,
+ u32 cap)
+{
+ return false;
+}
+
+static inline void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) { }
+static inline int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -1;
+}
+
+static inline u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time,
+ u32 in_tstamp)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* _IAVF_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 82fda6f5abf0..c5e4d1823886 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(
__entry->ring = ring;
__entry->desc = desc;
__entry->buf = buf;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
iavf_rx_template,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -128,7 +128,7 @@ DECLARE_EVENT_CLASS(
__entry->ring = ring;
__entry->desc = desc;
__entry->skb = skb;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -170,7 +170,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign(
__entry->skb = skb;
__entry->ring = ring;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index b71484c87a84..363c42bf3dcf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2,11 +2,32 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bitfield.h>
+#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include "iavf.h"
#include "iavf_trace.h"
#include "iavf_prototype.h"
+#include "iavf_ptp.h"
+
+/**
+ * iavf_is_descriptor_done - tests DD bit in Rx descriptor
+ * @qw1: quad word 1 from descriptor to get Descriptor Done field from
+ * @flex: is the descriptor flex or legacy
+ *
+ * This function tests the descriptor done bit in specified descriptor. Because
+ * there are two types of descriptors (legacy and flex) the parameter rx_ring
+ * is used to distinguish.
+ *
+ * Return: true or false based on the state of DD bit in Rx descriptor.
+ */
+static bool iavf_is_descriptor_done(u64 qw1, bool flex)
+{
+ if (flex)
+ return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1);
+ else
+ return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1);
+}
static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -184,7 +205,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
* pending work.
*/
packets = tx_ring->stats.packets & INT_MAX;
- if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ if (tx_ring->prev_pkt_ctr == packets) {
iavf_force_wb(vsi, tx_ring->q_vector);
continue;
}
@@ -193,7 +214,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
* to iavf_get_tx_pending()
*/
smp_rmb();
- tx_ring->tx_stats.prev_pkt_ctr =
+ tx_ring->prev_pkt_ctr =
iavf_get_tx_pending(tx_ring, true) ? packets : -1;
}
}
@@ -319,7 +340,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__IAVF_VSI_DOWN, vsi->state) &&
(IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
+ tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB;
}
/* notify netdev of completed buffers */
@@ -674,7 +695,7 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->tx_stats.prev_pkt_ctr = -1;
+ tx_ring->prev_pkt_ctr = -1;
return 0;
err:
@@ -689,11 +710,8 @@ err:
**/
static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{
- unsigned long bi_size;
- u16 i;
-
/* ring already cleared, nothing to do */
- if (!rx_ring->rx_bi)
+ if (!rx_ring->rx_fqes)
return;
if (rx_ring->skb) {
@@ -701,41 +719,16 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
rx_ring->skb = NULL;
}
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ /* Free all the Rx ring buffers */
+ for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
+ const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i];
- if (!rx_bi->page)
- continue;
+ libeth_rx_recycle_slow(rx_fqes->netmem);
- /* Invalidate cache lines that may have been written to by
- * device so that we avoid corrupting memory.
- */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->dma,
- rx_bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
-
- /* free resources associated with mapping */
- dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IAVF_RX_DMA_ATTR);
-
- __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
-
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
}
- bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_bi, 0, bi_size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -748,15 +741,22 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
**/
void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{
+ struct libeth_fq fq = {
+ .fqes = rx_ring->rx_fqes,
+ .pp = rx_ring->pp,
+ };
+
iavf_clean_rx_ring(rx_ring);
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
if (rx_ring->desc) {
- dma_free_coherent(rx_ring->dev, rx_ring->size,
+ dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
+
+ libeth_rx_fq_destroy(&fq);
+ rx_ring->rx_fqes = NULL;
+ rx_ring->pp = NULL;
}
/**
@@ -767,38 +767,46 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring)
**/
int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- int bi_size;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_bi);
- bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
- rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
- if (!rx_ring->rx_bi)
- goto err;
+ struct libeth_fq fq = {
+ .count = rx_ring->count,
+ .buf_len = LIBIE_MAX_RX_BUF_LEN,
+ .nid = NUMA_NO_NODE,
+ };
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi);
+ if (ret)
+ return ret;
+
+ rx_ring->pp = fq.pp;
+ rx_ring->rx_fqes = fq.fqes;
+ rx_ring->truesize = fq.truesize;
+ rx_ring->rx_buf_len = fq.buf_len;
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
- dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
rx_ring->size);
goto err;
}
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
return 0;
+
err:
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
+ libeth_rx_fq_destroy(&fq);
+ rx_ring->rx_fqes = NULL;
+ rx_ring->pp = NULL;
+
return -ENOMEM;
}
@@ -811,9 +819,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -824,69 +829,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
}
/**
- * iavf_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
-}
-
-/**
- * iavf_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buffer struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
- **/
-static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- /* alloc new page for storage */
- page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IAVF_RX_DMA_ATTR);
-
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, iavf_rx_pg_order(rx_ring));
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = iavf_rx_offset(rx_ring);
-
- /* initialize pagecnt_bias to 1 representing we fully own page */
- bi->pagecnt_bias = 1;
-
- return true;
-}
-
-/**
* iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @skb: packet to send up
@@ -916,43 +858,42 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring,
**/
bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
{
+ const struct libeth_fq_fp fq = {
+ .pp = rx_ring->pp,
+ .fqes = rx_ring->rx_fqes,
+ .truesize = rx_ring->truesize,
+ .count = rx_ring->count,
+ };
u16 ntu = rx_ring->next_to_use;
- union iavf_rx_desc *rx_desc;
- struct iavf_rx_buffer *bi;
+ struct iavf_rx_desc *rx_desc;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
rx_desc = IAVF_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
do {
- if (!iavf_alloc_mapped_page(rx_ring, bi))
- goto no_buffers;
+ dma_addr_t addr;
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ addr = libeth_rx_alloc(&fq, ntu);
+ if (addr == DMA_MAPPING_ERROR)
+ goto no_buffers;
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->qw0 = cpu_to_le64(addr);
rx_desc++;
- bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = IAVF_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
ntu = 0;
}
/* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
+ rx_desc->qw1 = 0;
cleaned_count--;
} while (cleaned_count);
@@ -966,6 +907,8 @@ no_buffers:
if (rx_ring->next_to_use != ntu)
iavf_release_rx_desc(rx_ring, ntu);
+ rx_ring->rx_stats.alloc_page_failed++;
+
/* make sure to come back via polling to try again after
* allocation failure
*/
@@ -973,81 +916,46 @@ no_buffers:
}
/**
- * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * iavf_rx_csum - Indicate in skb if hw indicated a good checksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
+ * @decoded_pt: decoded ptype information
+ * @csum_bits: decoded Rx descriptor information
**/
-static void iavf_rx_checksum(struct iavf_vsi *vsi,
- struct sk_buff *skb,
- union iavf_rx_desc *rx_desc)
+static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb,
+ struct libeth_rx_pt decoded_pt,
+ struct libeth_rx_csum csum_bits)
{
- struct iavf_rx_ptype_decoded decoded;
- u32 rx_error, rx_status;
bool ipv4, ipv6;
- u8 ptype;
- u64 qword;
-
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
- rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
- decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
-
- /* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM))
- return;
-
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
- return;
-
- /* both known and outer_ip must be set for the below code to work */
- if (!(decoded.known && decoded.outer_ip))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 &&
- (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
- BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
- if (ipv6 &&
- rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
- /* don't increment checksum err here, non-fatal err */
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
+ if (unlikely(csum_bits.pprs))
return;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case IAVF_RX_PTYPE_INNER_PROT_TCP:
- case IAVF_RX_PTYPE_INNER_PROT_UDP:
- case IAVF_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- fallthrough;
- default:
- break;
- }
-
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -1055,73 +963,196 @@ checksum_fail:
}
/**
- * iavf_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
+ * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
*
- * Returns a hash type to be used by skb_set_hash
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
**/
-static int iavf_ptype_to_htype(u8 ptype)
+static struct libeth_rx_csum
+iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
{
- struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+ struct libeth_rx_csum csum_bits = {};
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
- if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- else
- return PKT_HASH_TYPE_L2;
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1);
+ csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1);
+
+ return csum_bits;
}
/**
- * iavf_rx_hash - set the hash value in the skb
+ * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1);
+ csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1);
+ csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_legacy_rx_hash - set the hash value in the skb
* @ring: descriptor ring
- * @rx_desc: specific descriptor
+ * @qw0: quad word 0
+ * @qw1: quad word 1
* @skb: skb currently being received and modified
- * @rx_ptype: Rx packet type
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
**/
-static void iavf_rx_hash(struct iavf_ring *ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0,
+ __le64 qw1, struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
{
+ const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M);
u32 hash;
- const __le64 rss_mask =
- cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (!(ring->netdev->features & NETIF_F_RXHASH))
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
return;
- if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
- hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
+ if ((qw1 & rss_mask) == rss_mask) {
+ hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
}
}
/**
+ * iavf_flex_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @qw1: quad word 1
+ * @skb: skb currently being received and modified
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ **/
+static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1,
+ struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
+{
+ bool rss_valid;
+ u32 hash;
+
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
+ return;
+
+ rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M);
+ if (rss_valid) {
+ hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
+ }
+}
+
+/**
+ * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor
+ * @rx_ring: descriptor ring
+ * @qw2: quad word 2 of descriptor
+ * @qw3: quad word 3 of descriptor
+ * @skb: skb currently being received
+ *
+ * Read the Rx timestamp value from the descriptor and pass it to the stack.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ */
+static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2,
+ __le64 qw3, struct sk_buff *skb)
+{
+ u32 tstamp;
+ u64 ns;
+
+ /* Skip processing if timestamps aren't enabled */
+ if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP))
+ return;
+
+ /* Check if this Rx descriptor has a valid timestamp */
+ if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID))
+ return;
+
+ /* the ts_low field only contains the valid bit and sub-nanosecond
+ * precision, so we don't need to extract it.
+ */
+ tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M);
+
+ ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time,
+ tstamp);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ns),
+ };
+}
+
+/**
* iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
+ * @ptype: the packet type decoded by hardware
+ * @flex: is the descriptor flex or legacy
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static void
-iavf_process_skb_fields(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ struct sk_buff *skb, u32 ptype,
+ bool flex)
{
- iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
- iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
+ struct libeth_rx_csum csum_bits;
+ struct libeth_rx_pt decoded_pt;
+ __le64 qw0 = rx_desc->qw0;
+ __le64 qw1 = rx_desc->qw1;
+ __le64 qw2 = rx_desc->qw2;
+ __le64 qw3 = rx_desc->qw3;
+
+ decoded_pt = libie_rx_pt_parse(ptype);
+
+ if (flex) {
+ iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt);
+ iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb);
+ csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ } else {
+ iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
+ csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ }
+ iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits);
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -1152,95 +1183,9 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
}
/**
- * iavf_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *old_buff)
-{
- struct iavf_rx_buffer *new_buff;
- u16 nta = rx_ring->next_to_alloc;
-
- new_buff = &rx_ring->rx_bi[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_buff->dma = old_buff->dma;
- new_buff->page = old_buff->page;
- new_buff->page_offset = old_buff->page_offset;
- new_buff->pagecnt_bias = old_buff->pagecnt_bias;
-}
-
-/**
- * iavf_can_reuse_rx_page - Determine if this page can be reused by
- * the adapter for another receive
- *
- * @rx_buffer: buffer containing the page
- *
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
- * an unused region in the page.
- *
- * For small pages, @truesize will be a constant value, half the size
- * of the memory at page. We'll attempt to alternate between high and
- * low halves of the page, with one half ready for use by the hardware
- * and the other half being consumed by the stack. We use the page
- * ref count to determine whether the stack has finished consuming the
- * portion of this page that was passed up with a previous packet. If
- * the page ref count is >1, we'll assume the "other" half page is
- * still busy, and this page cannot be reused.
- *
- * For larger pages, @truesize will be the actual space used by the
- * received packet (adjusted upward to an even multiple of the cache
- * line size). This will advance through the page by the amount
- * actually consumed by the received packets while there is still
- * space for a buffer. Each region of larger pages will be used at
- * most once, after which the page will not be reused.
- *
- * In either case, if the page is reusable its refcount is increased.
- **/
-static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
-{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
- struct page *page = rx_buffer->page;
-
- /* Is any reuse possible? */
- if (!dev_page_is_reusable(page))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
- return false;
-#else
-#define IAVF_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
- if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
- return false;
-#endif
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
- rx_buffer->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
* iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
* @skb: sk_buff to place the data into
+ * @rx_buffer: buffer containing page to add
* @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1248,210 +1193,55 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
*
* The function will then update the page offset.
**/
-static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
- struct sk_buff *skb,
+static void iavf_add_rx_frag(struct sk_buff *skb,
+ const struct libeth_fqe *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
-#endif
-
- if (!size)
- return;
+ u32 hr = netmem_get_pp(rx_buffer->netmem)->p.offset;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->page_offset, size, truesize);
-
- /* page is being used so we must update the page offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-}
-
-/**
- * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
- * @rx_ring: rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- *
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
- */
-static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
- const unsigned int size)
-{
- struct iavf_rx_buffer *rx_buffer;
-
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- prefetchw(rx_buffer->page);
- if (!size)
- return rx_buffer;
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buffer->pagecnt_bias--;
-
- return rx_buffer;
-}
-
-/**
- * iavf_construct_skb - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- * @size: size of buffer to add to skb
- *
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
- unsigned int size)
-{
- void *va;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#endif
- unsigned int headlen;
- struct sk_buff *skb;
-
- if (!rx_buffer)
- return NULL;
- /* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- net_prefetch(va);
-
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- IAVF_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- return NULL;
-
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > IAVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
- /* update all of the pointers */
- size -= headlen;
- if (size) {
- skb_add_rx_frag(skb, 0, rx_buffer->page,
- rx_buffer->page_offset + headlen,
- size, truesize);
-
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
- } else {
- /* buffer is unused, reset bias back to rx_buffer */
- rx_buffer->pagecnt_bias++;
- }
-
- return skb;
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer->netmem, rx_buffer->offset + hr,
+ size, rx_buffer->truesize);
}
/**
* iavf_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
* @size: size of buffer to add to skb
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
-static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
+static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- void *va;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
-#endif
+ struct page *buf_page = __netmem_to_page(rx_buffer->netmem);
+ u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
struct sk_buff *skb;
+ void *va;
- if (!rx_buffer || !size)
- return NULL;
/* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- net_prefetch(va);
+ va = page_address(buf_page) + rx_buffer->offset;
+ net_prefetch(va + hr);
/* build an skb around the page buffer */
- skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
+ skb = napi_build_skb(va, rx_buffer->truesize);
if (unlikely(!skb))
return NULL;
+ skb_mark_for_recycle(skb);
+
/* update pointers within the skb to store the data */
- skb_reserve(skb, IAVF_SKB_PAD);
+ skb_reserve(skb, hr);
__skb_put(skb, size);
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-
return skb;
}
/**
- * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- *
- * This function will clean up the contents of the rx_buffer. It will
- * either recycle the buffer or unmap it and free the associated resources.
- */
-static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer)
-{
- if (!rx_buffer)
- return;
-
- if (iavf_can_reuse_rx_page(rx_buffer)) {
- /* hand second half of page back to the ring */
- iavf_reuse_rx_page(rx_ring, rx_buffer);
- rx_ring->rx_stats.page_reuse_count++;
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buffer->page,
- rx_buffer->pagecnt_bias);
- }
-
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
-}
-
-/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
+ * @fields: Rx descriptor extracted fields
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1459,8 +1249,7 @@ static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
* that this is in fact a non-EOP buffer.
**/
static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct libeth_rqe_info fields)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -1471,8 +1260,7 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
prefetch(IAVF_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */
-#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
- if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
+ if (likely(fields.eop))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1481,6 +1269,109 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
}
/**
+ * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ struct libeth_rqe_info fields;
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2);
+ }
+
+ return fields;
+}
+
+/**
+ * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ struct libeth_rqe_info fields = {};
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2);
+ }
+
+ return fields;
+}
+
+static struct libeth_rqe_info
+iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ bool flex)
+{
+ if (flex)
+ return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
+ else
+ return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
+}
+
+/**
* iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1494,18 +1385,17 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
**/
static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
{
+ bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
- struct iavf_rx_buffer *rx_buffer;
- union iavf_rx_desc *rx_desc;
- unsigned int size;
- u16 vlan_tag = 0;
- u8 rx_ptype;
- u64 qword;
+ struct libeth_rqe_info fields;
+ struct libeth_fqe *rx_buffer;
+ struct iavf_rx_desc *rx_desc;
+ u64 qw1;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
@@ -1516,55 +1406,50 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then the length will be non-zero
- */
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we have
* verified the descriptor has been written back.
*/
dma_rmb();
-#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
- if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+
+ qw1 = le64_to_cpu(rx_desc->qw1);
+ /* If DD field (descriptor done) is unset then other fields are
+ * not valid
+ */
+ if (!iavf_is_descriptor_done(qw1, flex))
break;
- size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
+ fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = iavf_get_rx_buffer(rx_ring, size);
+
+ rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
+ if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len))
+ goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
- skb = iavf_build_skb(rx_ring, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, fields.len);
else
- skb = iavf_construct_skb(rx_ring, rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer && size)
- rx_buffer->pagecnt_bias++;
break;
}
- iavf_put_rx_buffer(rx_ring, rx_buffer);
+skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb))
+ if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
+ /* RXE field in descriptor is an indication of the MAC errors
+ * (like CRC, alignment, oversize etc). If it is set then iavf
+ * should finish.
*/
- if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
+ if (unlikely(fields.rxe)) {
dev_kfree_skb_any(skb);
skb = NULL;
continue;
@@ -1578,22 +1463,11 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
/* populate checksum, VLAN, and protocol */
- iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
- rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
- if (rx_desc->wb.qword2.ext_status &
- cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
- rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
+ iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- iavf_receive_skb(rx_ring, skb, vlan_tag);
+ iavf_receive_skb(rx_ring, skb, fields.vlan);
skb = NULL;
/* update budget accounting */
@@ -1743,8 +1617,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
clean_complete = false;
continue;
}
- arm_wb |= ring->arm_wb;
- ring->arm_wb = false;
+ arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB);
+ ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB;
}
/* Handle case where we are called by netpoll with a budget of 0 */
@@ -1776,7 +1650,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
* continue to poll, otherwise we must stop polling so the
* interrupt can move to the correct cpu.
*/
- if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
+ if (!cpumask_test_cpu(cpu_id,
+ &q_vector->napi.config->affinity_mask)) {
/* Tell napi that we are done polling */
napi_complete_done(napi, work_done);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 10ba36602c0c..df49b0b1d54a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -4,6 +4,8 @@
#ifndef _IAVF_TXRX_H_
#define _IAVF_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
+
/* Interrupt Throttling and Rate Limiting Goodies */
#define IAVF_DEFAULT_IRQ_WORK 256
@@ -59,116 +61,26 @@ enum iavf_dyn_idx_t {
#define IAVF_PE_ITR IAVF_IDX_ITR2
/* Supported RSS offloads */
-#define IAVF_DEFAULT_RSS_HENA ( \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-
-/* Supported Rx Buffer Sizes (a multiple of 128) */
-#define IAVF_RXBUFFER_256 256
-#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
-#define IAVF_RXBUFFER_2048 2048
-#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
-#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */
-
-/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
- * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
- */
-#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
-#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
-#define iavf_rx_desc iavf_32byte_rx_desc
-
-#define IAVF_RX_DMA_ATTR \
- (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-
-/* Attempt to maximize the headroom available for incoming frames. We
- * use a 2K buffer for receives and need 1536/1534 to store the data for
- * the frame. This leaves us with 512 bytes of room. From that we need
- * to deduct the space needed for the shared info and the padding needed
- * to IP align the frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define IAVF_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
-
-static inline int iavf_compute_pad(int rx_buf_len)
-{
- int page_size, pad_size;
-
- page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
-
- return pad_size;
-}
-
-static inline int iavf_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (IAVF_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = IAVF_RXBUFFER_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return iavf_compute_pad(rx_buf_len);
-}
-
-#define IAVF_SKB_PAD iavf_skb_pad()
-#else
-#define IAVF_2K_TOO_SMALL_WITH_PADDING false
-#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
-
-/**
- * iavf_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
- const u64 stat_err_bits)
-{
- return !!(rx_desc->wb.qword1.status_error_len &
- cpu_to_le64(stat_err_bits));
-}
+#define IAVF_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define IAVF_DEFAULT_RSS_HASHCFG_EXPANDED (IAVF_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i) \
@@ -271,17 +183,6 @@ struct iavf_tx_buffer {
u32 tx_flags;
};
-struct iavf_rx_buffer {
- dma_addr_t dma;
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
-#else
- __u16 page_offset;
-#endif
- __u16 pagecnt_bias;
-};
-
struct iavf_queue_stats {
u64 packets;
u64 bytes;
@@ -293,7 +194,6 @@ struct iavf_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
- int prev_pkt_ctr;
u64 tx_lost_interrupt;
};
@@ -301,14 +201,6 @@ struct iavf_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buff_failed;
- u64 page_reuse_count;
- u64 realloc_count;
-};
-
-enum iavf_ring_state_t {
- __IAVF_TX_FDIR_INIT_DONE,
- __IAVF_TX_XPS_INIT_DONE,
- __IAVF_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
@@ -326,16 +218,19 @@ enum iavf_ring_state_t {
struct iavf_ring {
struct iavf_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
- struct device *dev; /* Used for DMA mapping */
+ union {
+ struct page_pool *pp; /* Used on Rx for buffer management */
+ struct device *dev; /* Used on Tx for DMA mapping */
+ };
struct net_device *netdev; /* netdev ring maps to */
union {
+ struct libeth_fqe *rx_fqes;
struct iavf_tx_buffer *tx_bi;
- struct iavf_rx_buffer *rx_bi;
};
- DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
- u16 queue_index; /* Queue number of ring */
- u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ u32 truesize;
+
+ u16 queue_index; /* Queue number of ring */
/* high bit set means dynamic, use accessors routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
@@ -345,26 +240,21 @@ struct iavf_ring {
u16 itr_setting;
u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
- u16 rx_buf_len;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u8 atr_sample_rate;
- u8 atr_count;
-
- bool ring_active; /* is ring online or not */
- bool arm_wb; /* do something to arm write back */
- u8 packet_stride;
+ u16 rxdid; /* Rx descriptor format */
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
+/* BIT(2) is free */
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
+#define IAVF_TXRX_FLAGS_HW_TSTAMP BIT(6)
/* stats structs */
struct iavf_queue_stats stats;
@@ -374,6 +264,7 @@ struct iavf_ring {
struct iavf_rx_queue_stats rx_stats;
};
+ int prev_pkt_ctr; /* For Tx stall detection */
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
@@ -381,7 +272,6 @@ struct iavf_ring {
struct iavf_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
- u16 next_to_alloc;
struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must
* return before it sees the EOP for
* the current packet, we save that skb
@@ -390,22 +280,13 @@ struct iavf_ring {
* iavf_clean_rx_ring_irq() is called
* for this ring.
*/
-} ____cacheline_internodealigned_in_smp;
-
-static inline bool ring_uses_build_skb(struct iavf_ring *ring)
-{
- return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
-}
-static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
-{
- ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
+ struct iavf_ptp *ptp;
-static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
-{
- ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
+ u32 rx_buf_len;
+ struct net_shaper q_shaper;
+ bool q_shaper_update;
+} ____cacheline_internodealigned_in_smp;
#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
@@ -428,17 +309,6 @@ struct iavf_ring_container {
#define iavf_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
-{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
- return 0;
-}
-
-#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
-
bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index 2b6a207fa441..1d8cf29cb65a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -10,8 +10,6 @@
#include "iavf_adminq.h"
#include "iavf_devids.h"
-#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
-
/* IAVF_MASK is a macro used on 32 bit registers */
#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift))
@@ -21,7 +19,7 @@
/* forward declaration */
struct iavf_hw;
-typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *);
+typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct libie_aq_desc *);
/* Data type manipulation macros. */
@@ -180,110 +178,116 @@ struct iavf_hw {
char err_str[16];
};
-/* RX Descriptors */
-union iavf_16byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow director filter id */
- __le32 fcoe_param; /* FCoE DDP Context id */
- } hi_dword;
- } qword0;
- struct {
- /* ext status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- } wb; /* writeback */
-};
-
-union iavf_32byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_buffer_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fcoe_param; /* FCoE DDP Context id */
- /* Flow director filter id in case of
- * Programming status desc WB
- */
- __le32 fd_id;
- } hi_dword;
- } qword0;
- struct {
- /* status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- struct {
- __le16 ext_status; /* extended status */
- __le16 rsvd;
- __le16 l2tag2_1;
- __le16 l2tag2_2;
- } qword2;
- struct {
- union {
- __le32 flex_bytes_lo;
- __le32 pe_status;
- } lo_dword;
- union {
- __le32 flex_bytes_hi;
- __le32 fd_id;
- } hi_dword;
- } qword3;
- } wb; /* writeback */
-};
-
-enum iavf_rx_desc_status_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
- IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
- IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
- IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
- IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
- IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
- IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- /* Note: Bit 8 is reserved in X710 and XL710 */
- IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
- IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
- IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
- IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
- IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- /* Note: For non-tunnel packets INT_UDP_0 is the right status for
- * UDP header
- */
- IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
- IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-#define IAVF_RXD_QW1_STATUS_SHIFT 0
-#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
- << IAVF_RXD_QW1_STATUS_SHIFT)
+/**
+ * struct iavf_rx_desc - Receive descriptor (both legacy and flexible)
+ * @qw0: quad word 0 fields:
+ * Legacy: Descriptor Type; Mirror ID; L2TAG1P (S-TAG); Filter Status
+ * Flex: Descriptor Type; Mirror ID; UMBCAST; Packet Type; Flexible Flags
+ * Section 0; Packet Length; Header Length; Split Header Flag;
+ * Flexible Flags section 1 / Extended Status
+ * @qw1: quad word 1 fields:
+ * Legacy: Status Field; Error Field; Packet Type; Packet Length (packet,
+ * header, Split Header Flag)
+ * Flex: Status / Error 0 Field; L2TAG1P (S-TAG); Flexible Metadata
+ * Container #0; Flexible Metadata Container #1
+ * @qw2: quad word 2 fields:
+ * Legacy: Extended Status; 1st L2TAG2P (C-TAG); 2nd L2TAG2P (C-TAG)
+ * Flex: Status / Error 1 Field; Flexible Flags section 2; Timestamp Low;
+ * 1st L2TAG2 (C-TAG); 2nd L2TAG2 (C-TAG)
+ * @qw3: quad word 3 fields:
+ * Legacy: FD Filter ID / Flexible Bytes
+ * Flex: Flexible Metadata Container #2; Flexible Metadata Container #3;
+ * Flexible Metadata Container #4 / Timestamp High 0; Flexible
+ * Metadata Container #5 / Timestamp High 1;
+ */
+struct iavf_rx_desc {
+ aligned_le64 qw0;
+/* The hash signature (RSS) */
+#define IAVF_RXD_LEGACY_RSS_M GENMASK_ULL(63, 32)
+/* Stripped C-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG1_M GENMASK_ULL(33, 16)
+/* Packet type */
+#define IAVF_RXD_FLEX_PTYPE_M GENMASK_ULL(25, 16)
+/* Packet length */
+#define IAVF_RXD_FLEX_PKT_LEN_M GENMASK_ULL(45, 32)
+
+ aligned_le64 qw1;
+/* Descriptor done indication flag. */
+#define IAVF_RXD_LEGACY_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_LEGACY_EOP_M BIT(1)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_LEGACY_L2TAG1P_M BIT(2)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_LEGACY_L3L4P_M BIT(3)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_LEGACY_IPV6EXADD_M BIT(15)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_LEGACY_RXE_M BIT(19)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ */
+#define IAVF_RXD_LEGACY_IPE_M BIT(22)
+#define IAVF_RXD_LEGACY_L4E_M BIT(23)
+#define IAVF_RXD_LEGACY_EIPE_M BIT(24)
+/* Set for packets that skip checksum calculation in pre-parser */
+#define IAVF_RXD_LEGACY_PPRS_M BIT(26)
+/* Indicates the content in the Filter Status field */
+#define IAVF_RXD_LEGACY_FLTSTAT_M GENMASK_ULL(13, 12)
+/* Packet type */
+#define IAVF_RXD_LEGACY_PTYPE_M GENMASK_ULL(37, 30)
+/* Packet length */
+#define IAVF_RXD_LEGACY_LENGTH_M GENMASK_ULL(51, 38)
+/* Descriptor done indication flag */
+#define IAVF_RXD_FLEX_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_FLEX_EOP_M BIT(1)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_FLEX_L3L4P_M BIT(3)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ * - EUDPE: External UDP checksum error (tunneled packets)
+ */
+#define IAVF_RXD_FLEX_XSUM_IPE_M BIT(4)
+#define IAVF_RXD_FLEX_XSUM_L4E_M BIT(5)
+#define IAVF_RXD_FLEX_XSUM_EIPE_M BIT(6)
+#define IAVF_RXD_FLEX_XSUM_EUDPE_M BIT(7)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_FLEX_IPV6EXADD_M BIT(9)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_FLEX_RXE_M BIT(10)
+/* Indicates that the RSS/HASH result is valid */
+#define IAVF_RXD_FLEX_RSS_VALID_M BIT(12)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_FLEX_L2TAG1P_M BIT(13)
+/* Stripped L2 Tag from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG1_M GENMASK_ULL(31, 16)
+/* The hash signature (RSS) */
+#define IAVF_RXD_FLEX_RSS_HASH_M GENMASK_ULL(63, 32)
+
+ aligned_le64 qw2;
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_LEGACY_L2TAG2P_M BIT(0)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 32)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG2_2_M GENMASK_ULL(63, 48)
+/* The packet is a UDP tunneled packet */
+#define IAVF_RXD_FLEX_NAT_M BIT(4)
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_FLEX_L2TAG2P_M BIT(11)
+ aligned_le64 qw3;
+#define IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M GENMASK_ULL(63, 32)
+} __aligned(4 * sizeof(__le64));
+static_assert(sizeof(struct iavf_rx_desc) == 32);
#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
@@ -300,22 +304,6 @@ enum iavf_rx_desc_fltstat_values {
IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
-#define IAVF_RXD_QW1_ERROR_SHIFT 19
-#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
-
-enum iavf_rx_desc_error_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
- IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
- IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
- IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
- IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
- IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
- IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
- IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
- IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
-};
-
enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
@@ -324,101 +312,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
};
-#define IAVF_RXD_QW1_PTYPE_SHIFT 30
-#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-
-/* Packet type non-ip values */
-enum iavf_rx_l2_ptype {
- IAVF_RX_PTYPE_L2_RESERVED = 0,
- IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
- IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
- IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
- IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
- IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
- IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
- IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
- IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- IAVF_RX_PTYPE_L2_ARP = 11,
- IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
- IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct iavf_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum iavf_rx_ptype_outer_ip {
- IAVF_RX_PTYPE_OUTER_L2 = 0,
- IAVF_RX_PTYPE_OUTER_IP = 1
-};
-
-enum iavf_rx_ptype_outer_ip_ver {
- IAVF_RX_PTYPE_OUTER_NONE = 0,
- IAVF_RX_PTYPE_OUTER_IPV4 = 0,
- IAVF_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum iavf_rx_ptype_outer_fragmented {
- IAVF_RX_PTYPE_NOT_FRAG = 0,
- IAVF_RX_PTYPE_FRAG = 1
-};
-
-enum iavf_rx_ptype_tunnel_type {
- IAVF_RX_PTYPE_TUNNEL_NONE = 0,
- IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum iavf_rx_ptype_tunnel_end_prot {
- IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
- IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum iavf_rx_ptype_inner_prot {
- IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
- IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
- IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
- IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
- IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
- IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum iavf_rx_ptype_payload_layer {
- IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
-#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
-#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
-
#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
@@ -437,6 +330,8 @@ enum iavf_rx_desc_ext_status_bits {
IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
+#define IAVF_RX_DESC_EXT_STATUS_L2TAG2P_M BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)
+
enum iavf_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
@@ -568,38 +463,6 @@ enum iavf_tx_ctx_desc_cmd_bits {
IAVF_TX_CTX_DESC_SWPE = 0x40
};
-/* Packet Classifier Types for filters */
-enum iavf_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- IAVF_FILTER_PCTYPE_FCOE_OX = 48,
- IAVF_FILTER_PCTYPE_FCOE_RX = 49,
- IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_types.h b/drivers/net/ethernet/intel/iavf/iavf_types.h
new file mode 100644
index 000000000000..a095855122bf
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_TYPES_H_
+#define _IAVF_TYPES_H_
+
+#include "iavf_types.h"
+
+#include <linux/avf/virtchnl.h>
+#include <linux/ptp_clock_kernel.h>
+
+/* structure used to queue PTP commands for processing */
+struct iavf_ptp_aq_cmd {
+ struct list_head list;
+ enum virtchnl_ops v_opcode:16;
+ u16 msglen;
+ u8 msg[] __counted_by(msglen);
+};
+
+struct iavf_ptp {
+ wait_queue_head_t phc_time_waitqueue;
+ struct virtchnl_ptp_caps hw_caps;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct list_head aq_cmds;
+ u64 cached_phc_time;
+ unsigned long cached_phc_updated;
+ /* Lock protecting access to the AQ command list */
+ struct mutex aq_cmd_lock;
+ struct kernel_hwtstamp_config hwtstamp_config;
+ bool phc_time_ready:1;
+};
+
+#endif /* _IAVF_TYPES_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 22f2df7c460b..88156082a41d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/net/intel/libie/rx.h>
+
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/**
@@ -26,7 +29,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
if (status)
dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
op, iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -76,6 +79,23 @@ iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
return iavf_status_to_errno(status);
received_op =
(enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+
+ if (received_op == VIRTCHNL_OP_EVENT) {
+ struct iavf_adapter *adapter = hw->back;
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event->msg_buf;
+
+ if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING)
+ continue;
+
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
+ iavf_schedule_reset(adapter,
+ IAVF_FLAG_RESET_PENDING);
+
+ return -EIO;
+ }
+
if (op_to_poll == received_op)
break;
}
@@ -140,15 +160,19 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_CAP_PTP |
VIRTCHNL_VF_OFFLOAD_ADQ |
VIRTCHNL_VF_OFFLOAD_USO |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
- VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+ VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
+ VIRTCHNL_VF_OFFLOAD_QOS;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
@@ -173,6 +197,54 @@ int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
+
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ NULL, 0);
+}
+
+/**
+ * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
+ * capabilities available to this device. This includes the following
+ * potential access:
+ *
+ * * READ_PHC - access to read the PTP hardware clock time
+ * * RX_TSTAMP - access to request Rx timestamps on all received packets
+ *
+ * The PF will reply with the same opcode a filled out copy of the
+ * virtchnl_ptp_caps structure which defines the specifics of which features
+ * are accessible to this device.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps hw_caps = {
+ .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
+ };
+
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
+
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ (u8 *)&hw_caps, sizeof(hw_caps));
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -259,6 +331,40 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
return err;
}
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
+{
+ struct iavf_arq_event_info event;
+ u64 rxdids;
+ int err;
+
+ event.msg_buf = (u8 *)&rxdids;
+ event.buf_len = sizeof(rxdids);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
+ if (!err)
+ adapter->supp_rxdids = rxdids;
+
+ return err;
+}
+
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps caps = {};
+ struct iavf_arq_event_info event;
+ int err;
+
+ event.msg_buf = (u8 *)&caps;
+ event.buf_len = sizeof(caps);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_1588_PTP_GET_CAPS);
+ if (!err)
+ adapter->ptp.hw_caps = caps;
+
+ return err;
+}
+
/**
* iavf_configure_queues
* @adapter: adapter structure
@@ -268,13 +374,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
+ u32 i, max_frame;
+ u8 rx_flags = 0;
size_t len;
- if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
- max_frame = IAVF_MAX_RXBUFFER;
+ max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
+ max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -288,10 +395,8 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
- /* Limit maximum frame size when jumbo frames is not enabled */
- if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
- (adapter->netdev->mtu <= ETH_DATA_LEN))
- max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
+ rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
@@ -309,12 +414,13 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
- vqpi->rxq.databuffer_size =
- ALIGN(adapter->rx_rings[i].rx_buf_len,
- BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ if (IAVF_RXDID_ALLOWED(adapter))
+ vqpi->rxq.rxdid = adapter->rxdid;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
+ vqpi->rxq.flags = rx_flags;
vqpi++;
}
@@ -687,7 +793,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN add (v1) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
@@ -732,7 +839,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN add (v2) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
@@ -835,7 +943,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN delete (v1) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
@@ -881,7 +990,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN delete (v2) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
@@ -1039,12 +1149,12 @@ void iavf_request_stats(struct iavf_adapter *adapter)
}
/**
- * iavf_get_hena
+ * iavf_get_rss_hashcfg
* @adapter: adapter structure
*
- * Request hash enable capabilities from PF
+ * Request RSS Hash enable bits from PF
**/
-void iavf_get_hena(struct iavf_adapter *adapter)
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1052,20 +1162,20 @@ void iavf_get_hena(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
- adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
+ adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0);
}
/**
- * iavf_set_hena
+ * iavf_set_rss_hashcfg
* @adapter: adapter structure
*
* Request the PF to set our RSS hash capabilities
**/
-void iavf_set_hena(struct iavf_adapter *adapter)
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter)
{
- struct virtchnl_rss_hena vrh;
+ struct virtchnl_rss_hashcfg vrh;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1073,10 +1183,10 @@ void iavf_set_hena(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- vrh.hena = adapter->hena;
- adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
- adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
+ vrh.hashcfg = adapter->rss_hashcfg;
+ adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh,
sizeof(vrh));
}
@@ -1405,6 +1515,67 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
+ * @adapter: adapter private structure
+ *
+ * De-queue one PTP command request and send the command message to the PF.
+ * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
+ */
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+ int err;
+
+ if (!adapter->ptp.clock) {
+ /* This shouldn't be possible to hit, since no messages should
+ * be queued if PTP is not initialized.
+ */
+ pci_err(adapter->pdev, "PTP is not initialized\n");
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ return;
+ }
+
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
+ struct iavf_ptp_aq_cmd, list);
+ if (!cmd) {
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ goto out_unlock;
+ }
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ pci_err(adapter->pdev,
+ "Cannot send PTP command %d, command %d pending\n",
+ cmd->v_opcode, adapter->current_op);
+ goto out_unlock;
+ }
+
+ err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
+ if (!err) {
+ /* Command was sent without errors, so we can remove it from
+ * the list and discard it.
+ */
+ list_del(&cmd->list);
+ kfree(cmd);
+ } else {
+ /* We failed to send the command, try again next cycle */
+ pci_err(adapter->pdev, "Failed to send PTP command %d\n",
+ cmd->v_opcode);
+ }
+
+ if (list_empty(&adapter->ptp.aq_cmds))
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+
+out_unlock:
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+}
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
/**
* iavf_print_link_message - print link up or down
* @adapter: adapter structure
@@ -1512,6 +1683,130 @@ iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
}
/**
+ * iavf_get_qos_caps - get qos caps support
+ * @adapter: iavf adapter struct instance
+ *
+ * This function requests PF for Supported QoS Caps.
+ */
+void iavf_get_qos_caps(struct iavf_adapter *adapter)
+{
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev,
+ "Cannot get qos caps, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ adapter->current_op = VIRTCHNL_OP_GET_QOS_CAPS;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_QOS_CAPS;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_QOS_CAPS, NULL, 0);
+}
+
+/**
+ * iavf_set_quanta_size - set quanta size of queue chunk
+ * @adapter: iavf adapter struct instance
+ * @quanta_size: quanta size in bytes
+ * @queue_index: starting index of queue chunk
+ * @num_queues: number of queues in the queue chunk
+ *
+ * This function requests PF to set quanta size of queue chunk
+ * starting at queue_index.
+ */
+static void
+iavf_set_quanta_size(struct iavf_adapter *adapter, u16 quanta_size,
+ u16 queue_index, u16 num_queues)
+{
+ struct virtchnl_quanta_cfg quanta_cfg;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev,
+ "Cannot set queue quanta size, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ adapter->current_op = VIRTCHNL_OP_CONFIG_QUANTA;
+ quanta_cfg.quanta_size = quanta_size;
+ quanta_cfg.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+ quanta_cfg.queue_select.start_queue_id = queue_index;
+ quanta_cfg.queue_select.num_queues = num_queues;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUANTA,
+ (u8 *)&quanta_cfg, sizeof(quanta_cfg));
+}
+
+/**
+ * iavf_cfg_queues_quanta_size - configure quanta size of queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF configure quanta size of allocated queues.
+ **/
+void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter)
+{
+ int quanta_size = IAVF_DEFAULT_QUANTA_SIZE;
+
+ /* Set Queue Quanta Size to default */
+ iavf_set_quanta_size(adapter, quanta_size, 0,
+ adapter->num_active_queues);
+}
+
+/**
+ * iavf_cfg_queues_bw - configure bandwidth of allocated queues
+ * @adapter: iavf adapter structure instance
+ *
+ * This function requests PF to configure queue bandwidth of allocated queues
+ */
+void iavf_cfg_queues_bw(struct iavf_adapter *adapter)
+{
+ struct virtchnl_queues_bw_cfg *qs_bw_cfg;
+ struct net_shaper *q_shaper;
+ int qs_to_update = 0;
+ int i, inx = 0;
+ size_t len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev,
+ "Cannot set tc queue bw, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ for (i = 0; i < adapter->num_active_queues; i++) {
+ if (adapter->tx_rings[i].q_shaper_update)
+ qs_to_update++;
+ }
+ len = struct_size(qs_bw_cfg, cfg, qs_to_update);
+ qs_bw_cfg = kzalloc(len, GFP_KERNEL);
+ if (!qs_bw_cfg)
+ return;
+
+ qs_bw_cfg->vsi_id = adapter->vsi.id;
+ qs_bw_cfg->num_queues = qs_to_update;
+
+ for (i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *tx_ring = &adapter->tx_rings[i];
+
+ q_shaper = &tx_ring->q_shaper;
+ if (tx_ring->q_shaper_update) {
+ qs_bw_cfg->cfg[inx].queue_id = i;
+ qs_bw_cfg->cfg[inx].shaper.peak = q_shaper->bw_max;
+ qs_bw_cfg->cfg[inx].shaper.committed = q_shaper->bw_min;
+ qs_bw_cfg->cfg[inx].tc = 0;
+ inx++;
+ }
+ }
+
+ adapter->current_op = VIRTCHNL_OP_CONFIG_QUEUE_BW;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ (u8 *)qs_bw_cfg, len);
+ kfree(qs_bw_cfg);
+}
+
+/**
* iavf_enable_channels
* @adapter: adapter structure
*
@@ -1966,8 +2261,8 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
* list on PF is already cleared after a reset
*/
list_del(&f->list);
+ iavf_dec_fdir_active_fltr(adapter, f);
kfree(f);
- adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -1977,6 +2272,37 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
+ * @adapter: private adapter structure
+ * @data: the message from the PF
+ * @len: length of the message from the PF
+ *
+ * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
+ * is sent by the PF in response to the same op as a request from the VF.
+ * Extract the 64bit nanoseconds time from the message and store it in
+ * cached_phc_time. Then, notify any thread that is waiting for the update via
+ * the wait queue.
+ */
+static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
+ void *data, u16 len)
+{
+ struct virtchnl_phc_time *msg = data;
+
+ if (len != sizeof(*msg)) {
+ dev_err_once(&adapter->pdev->dev,
+ "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
+ len, sizeof(*msg));
+ return;
+ }
+
+ adapter->ptp.cached_phc_time = msg->time;
+ adapter->ptp.cached_phc_updated = jiffies;
+ adapter->ptp.phc_time_ready = true;
+
+ wake_up(&adapter->ptp.phc_time_waitqueue);
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2140,8 +2466,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_err(&adapter->pdev->dev,
"%s\n", msg);
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -2231,6 +2557,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
break;
+ case VIRTCHNL_OP_GET_QOS_CAPS:
+ dev_warn(&adapter->pdev->dev, "Failed to Get Qos CAPs, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ dev_warn(&adapter->pdev->dev, "Failed to Config Quanta, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ dev_warn(&adapter->pdev->dev, "Failed to Config Queue BW, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -2376,6 +2714,25 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
aq_required;
}
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ if (msglen != sizeof(u64))
+ return;
+
+ adapter->supp_rxdids = *(u64 *)msg;
+
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ if (msglen != sizeof(adapter->ptp.hw_caps))
+ return;
+
+ adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
+
+ /* process any state change needed due to new capabilities */
+ iavf_ptp_process_caps(adapter);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
+ break;
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
iavf_irq_enable(adapter, true);
@@ -2399,11 +2756,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: {
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
if (msglen == sizeof(*vrh))
- adapter->hena = vrh->hena;
+ adapter->rss_hashcfg = vrh->hashcfg;
else
dev_warn(&adapter->pdev->dev,
"Invalid message %d from PF\n", v_opcode);
@@ -2456,8 +2814,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
- dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
- fdir->loc);
+ if (!iavf_is_raw_fdir(fdir))
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
+ fdir->loc);
+ else
+ dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
+ TC_U32_USERHTID(fdir->cls_u32_handle));
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
fdir->flow_id = add_fltr->flow_id;
} else {
@@ -2465,8 +2827,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
add_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
}
}
}
@@ -2484,11 +2846,15 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
del_fltr->status ==
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
- dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
- fdir->loc);
+ if (!iavf_is_raw_fdir(fdir))
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ fdir->loc);
+ else
+ dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
+ TC_U32_USERHTID(fdir->cls_u32_handle));
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
} else {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
@@ -2565,6 +2931,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_netdev_features_vlan_strip_set(netdev, false);
break;
+ case VIRTCHNL_OP_GET_QOS_CAPS: {
+ u16 len = struct_size(adapter->qos_caps, cap,
+ IAVF_MAX_QOS_TC_NUM);
+
+ memcpy(adapter->qos_caps, msg, min(msglen, len));
+
+ adapter->aq_required |= IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW: {
+ int i;
+ /* shaper configuration is successful for all queues */
+ for (i = 0; i < adapter->num_active_queues; i++)
+ adapter->tx_rings[i].q_shaper_update = false;
+ }
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index cddd82d4ca0f..5b2c666496e7 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -5,6 +5,7 @@
# Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_ICE) += ice.o
ice-y := ice_main.o \
@@ -27,25 +28,33 @@ ice-y := ice_main.o \
ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
+ ice_parser.o \
+ ice_parser_rt.o \
ice_idc.o \
- ice_devlink.o \
+ devlink/devlink.o \
+ devlink/health.o \
+ devlink/port.o \
+ ice_sf_eth.o \
+ ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o \
- ice_fwlog.o \
- ice_debugfs.o
+ ice_debugfs.o \
+ ice_adapter.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
- ice_virtchnl.o \
- ice_virtchnl_allowlist.o \
- ice_virtchnl_fdir.o \
+ virt/allowlist.o \
+ virt/fdir.o \
+ virt/queues.o \
+ virt/virtchnl.o \
+ virt/rss.o \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
-ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o ice_tspll.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index b516e42b41f0..d88b7f3fd1f9 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -5,12 +5,12 @@
#include "ice.h"
#include "ice_lib.h"
-#include "ice_devlink.h"
+#include "devlink.h"
+#include "port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
-
-static int ice_active_port_option = -1;
+#include "ice_sf_eth.h"
/* context for devlink info version reporting */
struct ice_info_ctx {
@@ -293,7 +293,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_discover_dev_caps(hw, &ctx->dev_caps);
if (err) {
dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
goto out_free_ctx;
}
@@ -302,7 +302,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
if (err) {
dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
@@ -313,7 +313,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
if (err) {
dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
@@ -324,7 +324,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
if (err) {
dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
@@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
break;
case ICE_VERSION_RUNNING:
- err = devlink_info_version_running_put(req, key, ctx->buf);
+ err = devlink_info_version_running_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
goto out_free_ctx;
}
break;
case ICE_VERSION_STORED:
- err = devlink_info_version_stored_put(req, key, ctx->buf);
+ err = devlink_info_version_stored_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
goto out_free_ctx;
@@ -436,7 +440,7 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
err = ice_aq_nvm_update_empr(hw);
if (err) {
dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
return err;
}
@@ -455,6 +459,7 @@ static void ice_devlink_reinit_down(struct ice_pf *pf)
rtnl_lock();
ice_vsi_decfg(ice_get_main_vsi(pf));
rtnl_unlock();
+ ice_deinit_pf(pf);
ice_deinit_dev(pf);
}
@@ -478,17 +483,17 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
if (ice_is_eswitch_mode_switchdev(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Go to legacy mode before doing reinit\n");
+ "Go to legacy mode before doing reinit");
return -EOPNOTSUPP;
}
if (ice_is_adq_active(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Turn off ADQ before doing reinit\n");
+ "Turn off ADQ before doing reinit");
return -EOPNOTSUPP;
}
if (ice_has_vfs(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Remove all VFs before doing reinit\n");
+ "Remove all VFs before doing reinit");
return -EOPNOTSUPP;
}
ice_devlink_reinit_down(pf);
@@ -526,248 +531,155 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf,
}
/**
- * ice_devlink_port_opt_speed_str - convert speed to a string
- * @speed: speed value
- */
-static const char *ice_devlink_port_opt_speed_str(u8 speed)
-{
- switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
- case ICE_AQC_PORT_OPT_MAX_LANE_100M:
- return "0.1";
- case ICE_AQC_PORT_OPT_MAX_LANE_1G:
- return "1";
- case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
- return "2.5";
- case ICE_AQC_PORT_OPT_MAX_LANE_5G:
- return "5";
- case ICE_AQC_PORT_OPT_MAX_LANE_10G:
- return "10";
- case ICE_AQC_PORT_OPT_MAX_LANE_25G:
- return "25";
- case ICE_AQC_PORT_OPT_MAX_LANE_50G:
- return "50";
- case ICE_AQC_PORT_OPT_MAX_LANE_100G:
- return "100";
- }
-
- return "-";
-}
-
-#define ICE_PORT_OPT_DESC_LEN 50
-/**
- * ice_devlink_port_options_print - Print available port split options
- * @pf: the PF to print split port options
+ * ice_get_tx_topo_user_sel - Read user's choice from flash
+ * @pf: pointer to pf structure
+ * @layers: value read from flash will be saved here
+ *
+ * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV.
*
- * Prints a table with available port split options and max port speeds
+ * Return: zero when read was successful, negative values otherwise.
*/
-static void ice_devlink_port_options_print(struct ice_pf *pf)
+static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers)
{
- u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
- struct ice_aqc_get_port_options_elem *options, *opt;
- struct device *dev = ice_pf_to_dev(pf);
- bool active_valid, pending_valid;
- char desc[ICE_PORT_OPT_DESC_LEN];
- const char *str;
- int status;
+ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
+ struct ice_hw *hw = &pf->hw;
+ int err;
- options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
- sizeof(*options), GFP_KERNEL);
- if (!options)
- return;
+ err = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (err)
+ return err;
- for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
- opt = options + i * ICE_AQC_PORT_OPT_MAX;
- options_count = ICE_AQC_PORT_OPT_MAX;
- active_valid = 0;
+ err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
+ sizeof(usr_sel), &usr_sel, true, true, NULL);
+ if (err)
+ goto exit_release_res;
- status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
- i, true, &active_idx,
- &active_valid, &pending_idx,
- &pending_valid);
- if (status) {
- dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
- i, status);
- goto err;
- }
- }
+ if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL)
+ *layers = ICE_SCHED_5_LAYERS;
+ else
+ *layers = ICE_SCHED_9_LAYERS;
- dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
- dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
- dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+exit_release_res:
+ ice_release_nvm(hw);
- for (i = 0; i < options_count; i++) {
- cnt = 0;
+ return err;
+}
- if (i == ice_active_port_option)
- str = "Active";
- else if ((i == pending_idx) && pending_valid)
- str = "Pending";
- else
- str = "";
+/**
+ * ice_update_tx_topo_user_sel - Save user's preference in flash
+ * @pf: pointer to pf structure
+ * @layers: value to be saved in flash
+ *
+ * Variable "layers" defines user's preference about number of layers in Tx
+ * Scheduler Topology Tree. This choice should be stored in PFA TLV field
+ * and be picked up by driver, next time during init.
+ *
+ * Return: zero when save was successful, negative values otherwise.
+ */
+static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers)
+{
+ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
+ struct ice_hw *hw = &pf->hw;
+ int err;
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%-8s", str);
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err)
+ return err;
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%-6u", options[i].pmd);
+ err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
+ sizeof(usr_sel), &usr_sel, true, true, NULL);
+ if (err)
+ goto exit_release_res;
- for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
- speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
- str = ice_devlink_port_opt_speed_str(speed);
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%3s ", str);
- }
+ if (layers == ICE_SCHED_5_LAYERS)
+ usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL;
+ else
+ usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL;
- dev_dbg(dev, "%s\n", desc);
- }
+ err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2,
+ sizeof(usr_sel.data), &usr_sel.data,
+ true, NULL, NULL);
+exit_release_res:
+ ice_release_nvm(hw);
-err:
- kfree(options);
+ return err;
}
/**
- * ice_devlink_aq_set_port_option - Send set port option admin queue command
- * @pf: the PF to print split port options
- * @option_idx: selected port option
- * @extack: extended netdev ack structure
+ * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to store the parameter value
+ * @extack: netlink extended ACK structure
*
- * Sends set port option admin queue command with selected port option and
- * calls NVM write activate.
+ * Return: zero on success and negative value on failure.
*/
-static int
-ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
- struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
- struct device *dev = ice_pf_to_dev(pf);
- int status;
-
- status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
- if (status) {
- dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
- return -EIO;
- }
-
- status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
- if (status) {
- dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- return -EIO;
- }
-
- status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
- if (status) {
- dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
- ice_release_nvm(&pf->hw);
- return -EIO;
- }
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
- ice_release_nvm(&pf->hw);
+ err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8);
+ if (err)
+ return err;
- NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
return 0;
}
/**
- * ice_devlink_port_split - .port_split devlink handler
- * @devlink: devlink instance structure
- * @port: devlink port structure
- * @count: number of ports to split to
- * @extack: extended netdev ack structure
- *
- * Callback for the devlink .port_split operation.
- *
- * Unfortunately, the devlink expression of available options is limited
- * to just a number, so search for an FW port option which supports
- * the specified number. As there could be multiple FW port options with
- * the same port split count, allow switching between them. When the same
- * port split count request is issued again, switch to the next FW port
- * option with the same port split count.
+ * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to get the parameter value
+ * @extack: netlink extended ACK structure
*
- * Return: zero on success or an error code on failure.
+ * Return: zero on success and negative value on failure.
*/
-static int
-ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
- unsigned int count, struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
- struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
- u8 i, j, active_idx, pending_idx, new_option;
struct ice_pf *pf = devlink_priv(devlink);
- u8 option_count = ICE_AQC_PORT_OPT_MAX;
- struct device *dev = ice_pf_to_dev(pf);
- bool active_valid, pending_valid;
- int status;
-
- status = ice_aq_get_port_options(&pf->hw, options, &option_count,
- 0, true, &active_idx, &active_valid,
- &pending_idx, &pending_valid);
- if (status) {
- dev_dbg(dev, "Couldn't read port split options, err = %d\n",
- status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
- return -EIO;
- }
-
- new_option = ICE_AQC_PORT_OPT_MAX;
- active_idx = pending_valid ? pending_idx : active_idx;
- for (i = 1; i <= option_count; i++) {
- /* In order to allow switching between FW port options with
- * the same port split count, search for a new option starting
- * from the active/pending option (with array wrap around).
- */
- j = (active_idx + i) % option_count;
-
- if (count == options[j].pmd) {
- new_option = j;
- break;
- }
- }
-
- if (new_option == active_idx) {
- dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
- count);
- NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
- ice_devlink_port_options_print(pf);
- return -EINVAL;
- }
-
- if (new_option == ICE_AQC_PORT_OPT_MAX) {
- dev_dbg(dev, "request to split: count: %u not found\n", count);
- NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
- ice_devlink_port_options_print(pf);
- return -EINVAL;
- }
+ int err;
- status = ice_devlink_aq_set_port_option(pf, new_option, extack);
- if (status)
- return status;
+ err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8);
+ if (err)
+ return err;
- ice_devlink_port_options_print(pf);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect.");
return 0;
}
/**
- * ice_devlink_port_unsplit - .port_unsplit devlink handler
- * @devlink: devlink instance structure
- * @port: devlink port structure
- * @extack: extended netdev ack structure
+ * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers
+ * parameter value
+ * @devlink: unused pointer to devlink instance
+ * @id: the parameter ID to validate
+ * @val: value to validate
+ * @extack: netlink extended ACK structure
*
- * Callback for the devlink .port_unsplit operation.
- * Calls ice_devlink_port_split with split count set to 1.
- * There could be no FW option available with split count 1.
+ * Supported values are:
+ * - 5 - five layers Tx Scheduler Topology Tree
+ * - 9 - nine layers Tx Scheduler Topology Tree
*
- * Return: zero on success or an error code on failure.
+ * Return: zero when passed parameter value is supported. Negative value on
+ * error.
*/
-static int
-ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
- struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
- return ice_devlink_port_split(devlink, port, 1, extack);
+ if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Wrong number of tx scheduler layers provided.");
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
@@ -841,6 +753,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_sched_node *tc_node, struct ice_pf *pf)
{
struct devlink_rate *rate_node = NULL;
+ struct ice_dynamic_port *sf;
struct ice_vf *vf;
int i;
@@ -852,6 +765,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
} else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_VF &&
pf->vsi[node->vsi_handle]->vf) {
vf = pf->vsi[node->vsi_handle]->vf;
if (!vf->devlink_port.devlink_rate)
@@ -860,6 +774,16 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
*/
devl_rate_leaf_create(&vf->devlink_port, node,
node->parent->rate_node);
+ } else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_SF &&
+ pf->vsi[node->vsi_handle]->sf) {
+ sf = pf->vsi[node->vsi_handle]->sf;
+ if (!sf->devlink_port.devlink_rate)
+ /* leaf nodes doesn't have children
+ * so we don't set rate_node
+ */
+ devl_rate_leaf_create(&sf->devlink_port, node,
+ node->parent->rate_node);
} else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
node->parent->rate_node) {
rate_node = devl_rate_node_create(devlink, node, node->name,
@@ -891,10 +815,8 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
tc_node = pi->root->children[0];
mutex_lock(&pi->sched_lock);
- devl_lock(devlink);
for (i = 0; i < tc_node->num_children; i++)
ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
- devl_unlock(devlink);
mutex_unlock(&pi->sched_lock);
return 0;
@@ -1062,6 +984,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
/* preallocate memory for ice_sched_node */
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
*priv = node;
return 0;
@@ -1283,6 +1208,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+static void ice_set_min_max_msix(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ &val);
+ if (!err)
+ pf->msix.min = val.vu32;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ &val);
+ if (!err)
+ pf->msix.max = val.vu32;
+}
+
/**
* ice_devlink_reinit_up - do reinit of the given PF
* @pf: pointer to the PF struct
@@ -1290,21 +1234,36 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
static int ice_devlink_reinit_up(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct ice_vsi_cfg_params params;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool need_dev_deinit = false;
int err;
+ err = ice_init_hw(&pf->hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ /* load MSI-X values */
+ ice_set_min_max_msix(pf);
+
err = ice_init_dev(pf);
if (err)
- return err;
+ goto unroll_hw_init;
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ err = ice_init_pf(pf);
+ if (err) {
+ dev_err(dev, "ice_init_pf failed: %d\n", err);
+ goto unroll_dev_init;
+ }
+
+ vsi->flags = ICE_VSI_FLAG_INIT;
rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_vsi_cfg(vsi);
rtnl_unlock();
if (err)
- goto err_vsi_cfg;
+ goto unroll_pf_init;
/* No need to take devl_lock, it's already taken by devlink API */
err = ice_load(pf);
@@ -1317,8 +1276,14 @@ err_load:
rtnl_lock();
ice_vsi_decfg(vsi);
rtnl_unlock();
-err_vsi_cfg:
- ice_deinit_dev(pf);
+unroll_pf_init:
+ ice_deinit_pf(pf);
+unroll_dev_init:
+ need_dev_deinit = true;
+unroll_hw_init:
+ ice_deinit_hw(&pf->hw);
+ if (need_dev_deinit)
+ ice_deinit_dev(pf);
return err;
}
@@ -1378,37 +1343,52 @@ static const struct devlink_ops ice_devlink_ops = {
.rate_leaf_parent_set = ice_devlink_set_parent,
.rate_node_parent_set = ice_devlink_set_parent,
+
+ .port_new = ice_devlink_port_new,
};
+static const struct devlink_ops ice_sf_devlink_ops;
+
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2);
return 0;
}
-static int
-ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool roce_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!roce_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return ret;
}
@@ -1419,11 +1399,16 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) {
NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1433,33 +1418,44 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
static int
ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP);
return 0;
}
-static int
-ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool iw_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!iw_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return ret;
}
@@ -1474,7 +1470,7 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) {
NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1482,260 +1478,351 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
return 0;
}
-static const struct devlink_param ice_devlink_params[] = {
- DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- ice_devlink_enable_roce_get,
- ice_devlink_enable_roce_set,
- ice_devlink_enable_roce_validate),
- DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- ice_devlink_enable_iw_get,
- ice_devlink_enable_iw_set,
- ice_devlink_enable_iw_validate),
+#define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled"
+#define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled"
+#define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized"
-};
+/**
+ * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode.
+ * @mode: local forwarding for mode used in port_info struct.
+ *
+ * Return: Mode respective string or "Invalid".
+ */
+static const char *
+ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode)
+{
+ switch (mode) {
+ case ICE_LOCAL_FWD_MODE_ENABLED:
+ return DEVLINK_LOCAL_FWD_ENABLED_STR;
+ case ICE_LOCAL_FWD_MODE_PRIORITIZED:
+ return DEVLINK_LOCAL_FWD_PRIORITIZED_STR;
+ case ICE_LOCAL_FWD_MODE_DISABLED:
+ return DEVLINK_LOCAL_FWD_DISABLED_STR;
+ }
-static void ice_devlink_free(void *devlink_ptr)
-{
- devlink_free((struct devlink *)devlink_ptr);
+ return "Invalid";
}
/**
- * ice_allocate_pf - Allocate devlink and return PF structure pointer
- * @dev: the device to allocate for
+ * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name.
+ * @mode_str: local forwarding mode string.
*
- * Allocate a devlink instance for this device and return the private area as
- * the PF structure. The devlink memory is kept track of through devres by
- * adding an action to remove it when unwinding.
+ * Return: Mode value or negative number if invalid.
*/
-struct ice_pf *ice_allocate_pf(struct device *dev)
+static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
{
- struct devlink *devlink;
-
- devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev);
- if (!devlink)
- return NULL;
-
- /* Add an action to teardown the devlink when unwinding the driver */
- if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
- return NULL;
+ if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR))
+ return ICE_LOCAL_FWD_MODE_ENABLED;
+ else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR))
+ return ICE_LOCAL_FWD_MODE_PRIORITIZED;
+ else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR))
+ return ICE_LOCAL_FWD_MODE_DISABLED;
- return devlink_priv(devlink);
+ return -EINVAL;
}
/**
- * ice_devlink_register - Register devlink interface for this PF
- * @pf: the PF to register the devlink for.
- *
- * Register the devlink instance associated with this physical function.
+ * ice_devlink_local_fwd_get - Get local_fwd parameter.
+ * @devlink: Pointer to the devlink instance.
+ * @id: The parameter ID to set.
+ * @ctx: Context to store the parameter value.
+ * @extack: netlink extended ACK structure
*
- * Return: zero on success or an error code on failure.
+ * Return: Zero.
*/
-void ice_devlink_register(struct ice_pf *pf)
+static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
- struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct ice_port_info *pi;
+ const char *mode_str;
+
+ pi = pf->hw.port_info;
+ mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode);
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str);
- devlink_register(devlink);
+ return 0;
}
/**
- * ice_devlink_unregister - Unregister devlink resources for this PF.
- * @pf: the PF structure to cleanup
+ * ice_devlink_local_fwd_set - Set local_fwd parameter.
+ * @devlink: Pointer to the devlink instance.
+ * @id: The parameter ID to set.
+ * @ctx: Context to get the parameter value.
+ * @extack: Netlink extended ACK structure.
*
- * Releases resources used by devlink and cleans up associated memory.
+ * Return: Zero.
*/
-void ice_devlink_unregister(struct ice_pf *pf)
+static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
- devlink_unregister(priv_to_devlink(pf));
+ int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr);
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_port_info *pi;
+
+ pi = pf->hw.port_info;
+ if (pi->local_fwd_mode != new_local_fwd_mode) {
+ pi->local_fwd_mode = new_local_fwd_mode;
+ dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr);
+ ice_schedule_reset(pf, ICE_RESET_CORER);
+ }
+
+ return 0;
}
/**
- * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
- * @pf: the PF to create a devlink port for
- * @ppid: struct with switch id information
+ * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value.
+ * @devlink: Unused pointer to devlink instance.
+ * @id: The parameter ID to validate.
+ * @val: Value to validate.
+ * @extack: Netlink extended ACK structure.
+ *
+ * Supported values are:
+ * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled
+ * "prioritized" - local_fwd traffic is prioritized in scheduling.
+ *
+ * Return: Zero when passed parameter value is supported. Negative value on
+ * error.
*/
-static void
-ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
+static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
- struct pci_dev *pdev = pf->pdev;
- u64 id;
+ struct ice_pf *pf = devlink_priv(devlink);
- id = pci_get_dsn(pdev);
+ if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors)
+ return -EINVAL;
- ppid->id_len = sizeof(id);
- put_unaligned_be64(id, &ppid->id);
+ return 0;
}
-int ice_devlink_register_params(struct ice_pf *pf)
+static int
+ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
- struct devlink *devlink = priv_to_devlink(pf);
+ if (val.vu32 < ICE_MIN_MSIX)
+ return -EINVAL;
- return devlink_params_register(devlink, ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
+ return 0;
}
-void ice_devlink_unregister_params(struct ice_pf *pf)
+static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
- devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+enum ice_param_id {
+ ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+ ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
+};
+
+static const struct devlink_param ice_dvl_rdma_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_roce_get,
+ ice_devlink_enable_roce_set,
+ ice_devlink_enable_roce_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_iw_get,
+ ice_devlink_enable_iw_set,
+ ice_devlink_enable_iw_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_enable_rdma_validate),
+};
+
+static const struct devlink_param ice_dvl_msix_params[] = {
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_max_pf_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_min_pf_validate),
+};
+
+static const struct devlink_param ice_dvl_sched_params[] = {
+ DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+ "tx_scheduling_layers",
+ DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ ice_devlink_tx_sched_layers_get,
+ ice_devlink_tx_sched_layers_set,
+ ice_devlink_tx_sched_layers_validate),
+ DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
+ "local_forwarding", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_local_fwd_get,
+ ice_devlink_local_fwd_set,
+ ice_devlink_local_fwd_validate),
+};
+
+static void ice_devlink_free(void *devlink_ptr)
+{
+ devlink_free((struct devlink *)devlink_ptr);
}
/**
- * ice_devlink_set_port_split_options - Set port split options
- * @pf: the PF to set port split options
- * @attrs: devlink attributes
+ * ice_allocate_pf - Allocate devlink and return PF structure pointer
+ * @dev: the device to allocate for
*
- * Sets devlink port split options based on available FW port options
+ * Allocate a devlink instance for this device and return the private area as
+ * the PF structure. The devlink memory is kept track of through devres by
+ * adding an action to remove it when unwinding.
*/
-static void
-ice_devlink_set_port_split_options(struct ice_pf *pf,
- struct devlink_port_attrs *attrs)
+struct ice_pf *ice_allocate_pf(struct device *dev)
{
- struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
- u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
- bool active_valid, pending_valid;
- int status;
+ struct devlink *devlink;
- status = ice_aq_get_port_options(&pf->hw, options, &option_count,
- 0, true, &active_idx, &active_valid,
- &pending_idx, &pending_valid);
- if (status) {
- dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
- status);
- return;
- }
+ devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev);
+ if (!devlink)
+ return NULL;
- /* find the biggest available port split count */
- for (i = 0; i < option_count; i++)
- attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+ /* Add an action to teardown the devlink when unwinding the driver */
+ if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
+ return NULL;
- attrs->splittable = attrs->lanes ? 1 : 0;
- ice_active_port_option = active_idx;
+ return devlink_priv(devlink);
}
-static const struct devlink_port_ops ice_devlink_port_ops = {
- .port_split = ice_devlink_port_split,
- .port_unsplit = ice_devlink_port_unsplit,
-};
-
/**
- * ice_devlink_create_pf_port - Create a devlink port for this PF
- * @pf: the PF to create a devlink port for
+ * ice_allocate_sf - Allocate devlink and return SF structure pointer
+ * @dev: the device to allocate for
+ * @pf: pointer to the PF structure
*
- * Create and register a devlink_port for this PF.
- * This function has to be called under devl_lock.
+ * Allocate a devlink instance for SF.
*
- * Return: zero on success or an error code on failure.
+ * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error
*/
-int ice_devlink_create_pf_port(struct ice_pf *pf)
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf)
{
- struct devlink_port_attrs attrs = {};
- struct devlink_port *devlink_port;
struct devlink *devlink;
- struct ice_vsi *vsi;
- struct device *dev;
int err;
- devlink = priv_to_devlink(pf);
-
- dev = ice_pf_to_dev(pf);
-
- devlink_port = &pf->devlink_port;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return -EIO;
-
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.bus.func;
-
- /* As FW supports only port split options for whole device,
- * set port split options only for first PF.
- */
- if (pf->hw.pf_id == 0)
- ice_devlink_set_port_split_options(pf, &attrs);
-
- ice_devlink_set_switch_id(pf, &attrs.switch_id);
-
- devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv),
+ dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
- err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
- &ice_devlink_port_ops);
+ err = devl_nested_devlink_set(priv_to_devlink(pf), devlink);
if (err) {
- dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
- pf->hw.pf_id, err);
- return err;
+ devlink_free(devlink);
+ return ERR_PTR(err);
}
- return 0;
+ return devlink_priv(devlink);
}
/**
- * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
- * @pf: the PF to cleanup
+ * ice_devlink_register - Register devlink interface for this PF
+ * @pf: the PF to register the devlink for.
*
- * Unregisters the devlink_port structure associated with this PF.
- * This function has to be called under devl_lock.
+ * Register the devlink instance associated with this physical function.
+ *
+ * Return: zero on success or an error code on failure.
*/
-void ice_devlink_destroy_pf_port(struct ice_pf *pf)
+void ice_devlink_register(struct ice_pf *pf)
{
- devl_port_unregister(&pf->devlink_port);
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ devl_register(devlink);
}
/**
- * ice_devlink_create_vf_port - Create a devlink port for this VF
- * @vf: the VF to create a port for
- *
- * Create and register a devlink_port for this VF.
+ * ice_devlink_unregister - Unregister devlink resources for this PF.
+ * @pf: the PF structure to cleanup
*
- * Return: zero on success or an error code on failure.
+ * Releases resources used by devlink and cleans up associated memory.
*/
-int ice_devlink_create_vf_port(struct ice_vf *vf)
+void ice_devlink_unregister(struct ice_pf *pf)
{
- struct devlink_port_attrs attrs = {};
- struct devlink_port *devlink_port;
- struct devlink *devlink;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_pf *pf;
- int err;
-
- pf = vf->pf;
- dev = ice_pf_to_dev(pf);
- devlink_port = &vf->devlink_port;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
+ devl_unregister(priv_to_devlink(pf));
+}
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
- attrs.pci_vf.pf = pf->hw.bus.func;
- attrs.pci_vf.vf = vf->vf_id;
+int ice_devlink_register_params(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
+ struct ice_hw *hw = &pf->hw;
+ int status;
- ice_devlink_set_switch_id(pf, &attrs.switch_id);
+ status = devl_params_register(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
+ if (status)
+ return status;
- devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
+ status = devl_params_register(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+ if (status)
+ goto unregister_rdma_params;
- err = devlink_port_register(devlink, devlink_port, vsi->idx);
- if (err) {
- dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
- vf->vf_id, err);
- return err;
- }
+ if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ status = devl_params_register(devlink, ice_dvl_sched_params,
+ ARRAY_SIZE(ice_dvl_sched_params));
+ if (status)
+ goto unregister_msix_params;
+
+ value.vu32 = pf->msix.max;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ value);
+ value.vu32 = pf->msix.min;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
return 0;
+
+unregister_msix_params:
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+unregister_rdma_params:
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
+ return status;
}
-/**
- * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
- * @vf: the VF to cleanup
- *
- * Unregisters the devlink_port structure associated with this VF.
- */
-void ice_devlink_destroy_vf_port(struct ice_vf *vf)
+void ice_devlink_unregister_params(struct ice_pf *pf)
{
- devl_rate_leaf_destroy(&vf->devlink_port);
- devlink_port_unregister(&vf->devlink_port);
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_hw *hw = &pf->hw;
+
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+
+ if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ devl_params_unregister(devlink, ice_dvl_sched_params,
+ ARRAY_SIZE(ice_dvl_sched_params));
}
#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
@@ -1976,8 +2063,8 @@ void ice_devlink_init_regions(struct ice_pf *pf)
u64 nvm_size, sram_size;
nvm_size = pf->hw.flash.flash_size;
- pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
- nvm_size);
+ pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1,
+ nvm_size);
if (IS_ERR(pf->nvm_region)) {
dev_err(dev, "failed to create NVM devlink region, err %ld\n",
PTR_ERR(pf->nvm_region));
@@ -1985,17 +2072,17 @@ void ice_devlink_init_regions(struct ice_pf *pf)
}
sram_size = pf->hw.flash.sr_words * 2u;
- pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
- 1, sram_size);
+ pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops,
+ 1, sram_size);
if (IS_ERR(pf->sram_region)) {
dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
PTR_ERR(pf->sram_region));
pf->sram_region = NULL;
}
- pf->devcaps_region = devlink_region_create(devlink,
- &ice_devcaps_region_ops, 10,
- ICE_AQ_MAX_BUF_LEN);
+ pf->devcaps_region = devl_region_create(devlink,
+ &ice_devcaps_region_ops, 10,
+ ICE_AQ_MAX_BUF_LEN);
if (IS_ERR(pf->devcaps_region)) {
dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
PTR_ERR(pf->devcaps_region));
@@ -2012,11 +2099,11 @@ void ice_devlink_init_regions(struct ice_pf *pf)
void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
- devlink_region_destroy(pf->nvm_region);
+ devl_region_destroy(pf->nvm_region);
if (pf->sram_region)
- devlink_region_destroy(pf->sram_region);
+ devl_region_destroy(pf->sram_region);
if (pf->devcaps_region)
- devlink_region_destroy(pf->devcaps_region);
+ devl_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h
index d291c0e2e17b..1af3b0763fbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h
@@ -5,6 +5,7 @@
#define _ICE_DEVLINK_H_
struct ice_pf *ice_allocate_pf(struct device *dev);
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
new file mode 100644
index 000000000000..8e9a8a8178d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */
+#include "health.h"
+
+#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \
+ devlink_fmsg_put(fmsg, #name, (obj)->name)
+
+#define ICE_HEALTH_STATUS_DATA_SIZE 2
+
+struct ice_health_status {
+ enum ice_aqc_health_status code;
+ const char *description;
+ const char *solution;
+ const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE];
+};
+
+/*
+ * In addition to the health status codes provided below, the firmware might
+ * generate Health Status Codes that are not pertinent to the end-user.
+ * For instance, Health Code 0x1002 is triggered when the command fails.
+ * Such codes should be disregarded by the end-user.
+ * The below lookup requires to be sorted by code.
+ */
+
+static const char ice_common_port_solutions[] =
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
+static const char ice_port_number_label[] = "Port Number";
+static const char ice_update_nvm_solution[] = "Update to the latest NVM image.";
+
+static const struct ice_health_status ice_health_status_lookup[] = {
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.",
+ "Change or replace the module or cable.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM,
+ "Device cannot communicate with the module.",
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.",
+ "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.",
+ "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.",
+ "Change or replace the module or cable. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.",
+ NULL, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.",
+ "Change the module or configure the port option to match the current module speed. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT,
+ "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.",
+ "Check link partner connection and configuration.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED,
+ "Port speed is limited by PHY capabilities.",
+ "Change the module to align to port option.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD,
+ "Failed to load the firmware image in the external PHY.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.",
+ ice_update_nvm_solution, {"Extended Error"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.",
+ "If issue persists, call customer support.", {"Access Type"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.",
+ "Update to latest base driver and DDP package."},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.",
+ ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB,
+ "Supplied MIB file is invalid. DCB reverted to default configuration.",
+ "Disable FW-LLDP and check DCBx system configuration.",
+ {ice_port_number_label, "MIB ID"}},
+};
+
+static int ice_health_status_lookup_compare(const void *a, const void *b)
+{
+ return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code;
+}
+
+static const struct ice_health_status *ice_get_health_status(u16 code)
+{
+ struct ice_health_status key = { .code = code };
+
+ return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup),
+ sizeof(struct ice_health_status), ice_health_status_lookup_compare);
+}
+
+static void ice_describe_status_code(struct devlink_fmsg *fmsg,
+ struct ice_aqc_health_status_elem *hse)
+{
+ static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" };
+ const struct ice_health_status *health_code;
+ u32 internal_data[2];
+ u16 status_code;
+
+ status_code = le16_to_cpu(hse->health_status_code);
+
+ devlink_fmsg_put(fmsg, "Syndrome", status_code);
+ if (status_code) {
+ internal_data[0] = le32_to_cpu(hse->internal_data1);
+ internal_data[1] = le32_to_cpu(hse->internal_data2);
+
+ health_code = ice_get_health_status(status_code);
+ if (!health_code)
+ return;
+
+ devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description);
+ if (health_code->solution)
+ devlink_fmsg_string_pair_put(fmsg, "Possible Solution",
+ health_code->solution);
+
+ for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) {
+ if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA)
+ devlink_fmsg_u32_pair_put(fmsg,
+ health_code->data_label[i] ?
+ health_code->data_label[i] :
+ aux_label[i],
+ internal_data[i]);
+ }
+ }
+}
+
+static int
+ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack __always_unused *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static void ice_config_health_events(struct ice_pf *pf, bool enable)
+{
+ u8 enable_bits = 0;
+ int ret;
+
+ if (enable)
+ enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK;
+
+ ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n",
+ str_enable_disable(enable), ret,
+ libie_aq_str(pf->hw.adminq.sq_last_status));
+}
+
+/**
+ * ice_process_health_status_event - Process the health status event from FW
+ * @pf: pointer to the PF structure
+ * @event: event structure containing the Health Status Event opcode
+ *
+ * Decode the Health Status Events and print the associated messages
+ */
+void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ const struct ice_aqc_health_status_elem *health_info;
+ const struct ice_aqc_get_health_status *cmd;
+ u16 count;
+
+ health_info = (struct ice_aqc_health_status_elem *)event->msg_buf;
+ cmd = libie_aq_raw(&event->desc);
+ count = le16_to_cpu(cmd->health_status_count);
+
+ if (count > (event->buf_len / sizeof(*health_info))) {
+ dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n");
+ return;
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ const struct ice_health_status *health_code;
+ u16 status_code;
+
+ status_code = le16_to_cpu(health_info->health_status_code);
+ health_code = ice_get_health_status(status_code);
+
+ if (health_code) {
+ switch (le16_to_cpu(health_info->event_source)) {
+ case ICE_AQC_HEALTH_STATUS_GLOBAL:
+ pf->health_reporters.fw_status = *health_info;
+ devlink_health_report(pf->health_reporters.fw,
+ "FW syndrome reported", NULL);
+ break;
+ case ICE_AQC_HEALTH_STATUS_PF:
+ case ICE_AQC_HEALTH_STATUS_PORT:
+ pf->health_reporters.port_status = *health_info;
+ devlink_health_report(pf->health_reporters.port,
+ "Port syndrome reported", NULL);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n");
+ }
+ } else {
+ u32 data1, data2;
+ u16 source;
+
+ source = le16_to_cpu(health_info->event_source);
+ data1 = le32_to_cpu(health_info->internal_data1);
+ data2 = le32_to_cpu(health_info->internal_data2);
+ dev_dbg(ice_pf_to_dev(pf),
+ "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x",
+ status_code, source, data1, data2);
+ }
+ health_info++;
+ }
+}
+
+/**
+ * ice_devlink_health_report - boilerplate to call given @reporter
+ *
+ * @reporter: devlink health reporter to call, do nothing on NULL
+ * @msg: message to pass up, "event name" is fine
+ * @priv_ctx: typically some event struct
+ */
+static void ice_devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ if (!reporter)
+ return;
+
+ /* We do not do auto recovering, so return value of the below function
+ * will always be 0, thus we do ignore it.
+ */
+ devlink_health_report(reporter, msg, priv_ctx);
+}
+
+struct ice_mdd_event {
+ enum ice_mdd_src src;
+ u16 vf_num;
+ u16 queue;
+ u8 pf_num;
+ u8 event;
+};
+
+static const char *ice_mdd_src_to_str(enum ice_mdd_src src)
+{
+ switch (src) {
+ case ICE_MDD_SRC_TX_PQM:
+ return "tx_pqm";
+ case ICE_MDD_SRC_TX_TCLAN:
+ return "tx_tclan";
+ case ICE_MDD_SRC_TX_TDPU:
+ return "tx_tdpu";
+ case ICE_MDD_SRC_RX:
+ return "rx";
+ default:
+ return "invalid";
+ }
+}
+
+static int
+ice_mdd_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_mdd_event *mdd_event = priv_ctx;
+ const char *src;
+
+ if (!mdd_event)
+ return 0;
+
+ src = ice_mdd_src_to_str(mdd_event->src);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "src", src);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+/**
+ * ice_report_mdd_event - Report an MDD event through devlink health
+ * @pf: the PF device structure
+ * @src: the HW block that was the source of this MDD event
+ * @pf_num: the pf_num on which the MDD event occurred
+ * @vf_num: the vf_num on which the MDD event occurred
+ * @event: the event type of the MDD event
+ * @queue: the queue on which the MDD event occurred
+ *
+ * Report an MDD event that has occurred on this PF.
+ */
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue)
+{
+ struct ice_mdd_event ev = {
+ .src = src,
+ .pf_num = pf_num,
+ .vf_num = vf_num,
+ .event = event,
+ .queue = queue,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev);
+}
+
+/**
+ * ice_fmsg_put_ptr - put hex value of pointer into fmsg
+ *
+ * @fmsg: devlink fmsg under construction
+ * @name: name to pass
+ * @ptr: 64 bit value to print as hex and put into fmsg
+ */
+static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name,
+ void *ptr)
+{
+ char buf[sizeof(ptr) * 3];
+
+ sprintf(buf, "%p", ptr);
+ devlink_fmsg_put(fmsg, name, buf);
+}
+
+struct ice_tx_hang_event {
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ u16 queue;
+ u16 next_to_clean;
+ u16 next_to_use;
+ struct ice_tx_ring *tx_ring;
+};
+
+static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_tx_hang_event *event = priv_ctx;
+ struct sk_buff *skb;
+
+ if (!event)
+ return 0;
+
+ skb = event->tx_ring->tx_buf->skb;
+ devlink_fmsg_obj_nest_start(fmsg);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use);
+ devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name);
+ ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc);
+ ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma);
+ ice_fmsg_put_ptr(fmsg, "skb-ptr", skb);
+ devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc,
+ event->tx_ring->count * sizeof(struct ice_tx_desc));
+ devlink_fmsg_dump_skb(fmsg, skb);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+
+ buf->tx_ring = tx_ring;
+ buf->vsi_num = vsi_num;
+ buf->head = head;
+ buf->intr = intr;
+}
+
+void ice_report_tx_hang(struct ice_pf *pf)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+ struct ice_tx_ring *tx_ring = buf->tx_ring;
+
+ struct ice_tx_hang_event ev = {
+ .head = buf->head,
+ .intr = buf->intr,
+ .vsi_num = buf->vsi_num,
+ .queue = tx_ring->q_index,
+ .next_to_clean = tx_ring->next_to_clean,
+ .next_to_use = tx_ring->next_to_use,
+ .tx_ring = tx_ring,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev);
+}
+
+static struct devlink_health_reporter *
+ice_init_devlink_rep(struct ice_pf *pf,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct devlink_health_reporter *rep;
+
+ rep = devl_health_reporter_create(devlink, ops, pf);
+ if (IS_ERR(rep)) {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_err(dev, "failed to create devlink %s health report er",
+ ops->name);
+ return NULL;
+ }
+ return rep;
+}
+
+#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \
+ ._field = ice_##_name##_reporter_##_field,
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ }
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \
+ }
+
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose);
+
+/**
+ * ice_health_init - allocate and init all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_init(struct ice_pf *pf)
+{
+ struct ice_health *reps = &pf->health_reporters;
+
+ reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops);
+ reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops);
+
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops);
+ reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops);
+ ice_config_health_events(pf, true);
+ }
+}
+
+/**
+ * ice_deinit_devl_reporter - destroy given devlink health reporter
+ * @reporter: reporter to destroy
+ */
+static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devl_health_reporter_destroy(reporter);
+}
+
+/**
+ * ice_health_deinit - deallocate all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_deinit(struct ice_pf *pf)
+{
+ ice_deinit_devl_reporter(pf->health_reporters.mdd);
+ ice_deinit_devl_reporter(pf->health_reporters.tx_hang);
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ ice_deinit_devl_reporter(pf->health_reporters.fw);
+ ice_deinit_devl_reporter(pf->health_reporters.port);
+ ice_config_health_events(pf, false);
+ }
+}
+
+static
+void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devlink_health_reporter_state_update(reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+/**
+ * ice_health_clear - clear devlink health issues after a reset
+ * @pf: the PF device structure
+ *
+ * Mark the PF in healthy state again after a reset has completed.
+ */
+void ice_health_clear(struct ice_pf *pf)
+{
+ ice_health_assign_healthy_state(pf->health_reporters.mdd);
+ ice_health_assign_healthy_state(pf->health_reporters.tx_hang);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h
new file mode 100644
index 000000000000..5edfc4d2adce
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _HEALTH_H_
+#define _HEALTH_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: health.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * devlink health mechanism for ice driver.
+ */
+
+struct ice_aqc_health_status_elem;
+struct ice_pf;
+struct ice_tx_ring;
+struct ice_rq_event_info;
+
+enum ice_mdd_src {
+ ICE_MDD_SRC_TX_PQM,
+ ICE_MDD_SRC_TX_TCLAN,
+ ICE_MDD_SRC_TX_TDPU,
+ ICE_MDD_SRC_RX,
+};
+
+/**
+ * struct ice_health - stores ice devlink health reporters and accompanied data
+ * @fw: devlink health reporter for FW Health Status events
+ * @mdd: devlink health reporter for MDD detection event
+ * @port: devlink health reporter for Port Health Status events
+ * @tx_hang: devlink health reporter for tx_hang event
+ * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from
+ * non-sleeping context
+ * @tx_ring: ring that the hang occurred on
+ * @head: descriptor head
+ * @intr: interrupt register value
+ * @vsi_num: VSI owning the queue that the hang occurred on
+ * @fw_status: buffer for last received FW Status event
+ * @port_status: buffer for last received Port Status event
+ */
+struct ice_health {
+ struct devlink_health_reporter *fw;
+ struct devlink_health_reporter *mdd;
+ struct devlink_health_reporter *port;
+ struct devlink_health_reporter *tx_hang;
+ struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf,
+ struct ice_tx_ring *tx_ring;
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ );
+ struct ice_aqc_health_status_elem fw_status;
+ struct ice_aqc_health_status_elem port_status;
+};
+
+void ice_process_health_status_event(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+
+void ice_health_init(struct ice_pf *pf);
+void ice_health_deinit(struct ice_pf *pf);
+void ice_health_clear(struct ice_pf *pf);
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr);
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue);
+void ice_report_tx_hang(struct ice_pf *pf);
+
+#endif /* _HEALTH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/devlink/port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
new file mode 100644
index 000000000000..63fb36fc4b3d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -0,0 +1,1001 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include <linux/vmalloc.h>
+
+#include "ice.h"
+#include "devlink.h"
+#include "port.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+
+static int ice_active_port_option = -1;
+
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_40G:
+ return "40";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
+/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
+static const struct devlink_port_ops ice_devlink_port_ops = {
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
+};
+
+/**
+ * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
+ * @pf: the PF to create a devlink port for
+ * @ppid: struct with switch id information
+ */
+static void
+ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = pf->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ice_devlink_create_pf_port - Create a devlink port for this PF
+ * @pf: the PF to create a devlink port for
+ *
+ * Create and register a devlink_port for this PF.
+ * This function has to be called under devl_lock.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_pf_port(struct ice_pf *pf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
+
+ devlink = priv_to_devlink(pf);
+
+ dev = ice_pf_to_dev(pf);
+
+ devlink_port = &pf->devlink_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EIO;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.pf_id;
+
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
+ pf->hw.pf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this PF.
+ * This function has to be called under devl_lock.
+ */
+void ice_devlink_destroy_pf_port(struct ice_pf *pf)
+{
+ devl_port_unregister(&pf->devlink_port);
+}
+
+/**
+ * ice_devlink_port_get_vf_fn_mac - .port_fn_hw_addr_get devlink handler
+ * @port: devlink port structure
+ * @hw_addr: MAC address of the port
+ * @hw_addr_len: length of MAC address
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_fn_hw_addr_get operation
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_port_get_vf_fn_mac(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_vf *vf = container_of(port, struct ice_vf, devlink_port);
+
+ ether_addr_copy(hw_addr, vf->dev_lan_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_set_vf_fn_mac - .port_fn_hw_addr_set devlink handler
+ * @port: devlink port structure
+ * @hw_addr: MAC address of the port
+ * @hw_addr_len: length of MAC address
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_fn_hw_addr_set operation
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_port_set_vf_fn_mac(struct devlink_port *port,
+ const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+
+{
+ struct devlink_port_attrs *attrs = &port->attrs;
+ struct devlink_port_pci_vf_attrs *pci_vf;
+ struct devlink *devlink = port->devlink;
+ struct ice_pf *pf;
+ u16 vf_id;
+
+ pf = devlink_priv(devlink);
+ pci_vf = &attrs->pci_vf;
+ vf_id = pci_vf->vf;
+
+ return __ice_set_vf_mac(pf, vf_id, hw_addr);
+}
+
+static const struct devlink_port_ops ice_devlink_vf_port_ops = {
+ .port_fn_hw_addr_get = ice_devlink_port_get_vf_fn_mac,
+ .port_fn_hw_addr_set = ice_devlink_port_set_vf_fn_mac,
+};
+
+/**
+ * ice_devlink_create_vf_port - Create a devlink port for this VF
+ * @vf: the VF to create a port for
+ *
+ * Create and register a devlink_port for this VF.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_vf_port(struct ice_vf *vf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ int err;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ devlink_port = &vf->devlink_port;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = pf->hw.pf_id;
+ attrs.pci_vf.vf = vf->vf_id;
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_vf_port_ops);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
+ * @vf: the VF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this VF.
+ */
+void ice_devlink_destroy_vf_port(struct ice_vf *vf)
+{
+ devl_rate_leaf_destroy(&vf->devlink_port);
+ devl_port_unregister(&vf->devlink_port);
+}
+
+/**
+ * ice_devlink_create_sf_dev_port - Register virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Register virtual flavour devlink port for the subfunction auxiliary device
+ * created after activating a dynamically added devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ struct devlink_port_attrs attrs = {};
+ struct ice_dynamic_port *dyn_port;
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+
+ dyn_port = sf_dev->dyn_port;
+ vsi = dyn_port->vsi;
+
+ devlink_port = &sf_dev->priv->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(sf_dev->priv);
+
+ return devl_port_register(devlink, devlink_port, vsi->idx);
+}
+
+/**
+ * ice_devlink_destroy_sf_dev_port - Destroy virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Unregisters the virtual port associated with this subfunction.
+ */
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ devl_port_unregister(&sf_dev->priv->devlink_port);
+}
+
+/**
+ * ice_activate_dynamic_port - Activate a dynamic port
+ * @dyn_port: dynamic port instance to activate
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port based on its flavour.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_activate_dynamic_port(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (dyn_port->active)
+ return 0;
+
+ err = ice_sf_eth_activate(dyn_port, extack);
+ if (err)
+ return err;
+
+ dyn_port->active = true;
+
+ return 0;
+}
+
+/**
+ * ice_deactivate_dynamic_port - Deactivate a dynamic port
+ * @dyn_port: dynamic port instance to deactivate
+ *
+ * Undo activation of a dynamic port.
+ */
+static void ice_deactivate_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ if (!dyn_port->active)
+ return;
+
+ ice_sf_eth_deactivate(dyn_port);
+ dyn_port->active = false;
+}
+
+/**
+ * ice_dealloc_dynamic_port - Deallocate and remove a dynamic port
+ * @dyn_port: dynamic port instance to deallocate
+ *
+ * Free resources associated with a dynamically added devlink port. Will
+ * deactivate the port if its currently active.
+ */
+static void ice_dealloc_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port *devlink_port = &dyn_port->devlink_port;
+ struct ice_pf *pf = dyn_port->pf;
+
+ ice_deactivate_dynamic_port(dyn_port);
+
+ xa_erase(&pf->sf_nums, devlink_port->attrs.pci_sf.sf);
+ ice_eswitch_detach_sf(pf, dyn_port);
+ ice_vsi_free(dyn_port->vsi);
+ xa_erase(&pf->dyn_ports, dyn_port->vsi->idx);
+ kfree(dyn_port);
+}
+
+/**
+ * ice_dealloc_all_dynamic_ports - Deallocate all dynamic devlink ports
+ * @pf: pointer to the pf structure
+ */
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf)
+{
+ struct ice_dynamic_port *dyn_port;
+ unsigned long index;
+
+ xa_for_each(&pf->dyn_ports, index, dyn_port)
+ ice_dealloc_dynamic_port(dyn_port);
+}
+
+/**
+ * ice_devlink_port_new_check_attr - Check that new port attributes are valid
+ * @pf: pointer to the PF structure
+ * @new_attr: the attributes for the new port
+ * @extack: extack for reporting error messages
+ *
+ * Check that the attributes for the new port are valid before continuing to
+ * allocate the devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_new_check_attr(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack)
+{
+ if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
+ NL_SET_ERR_MSG_MOD(extack, "Flavour other than pcisf is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->controller_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Setting controller is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->port_index_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support user defined port index assignment");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->pfnum != pf->hw.pf_id) {
+ NL_SET_ERR_MSG_MOD(extack, "Incorrect pfnum supplied");
+ return -EINVAL;
+ }
+
+ if (!pci_msix_can_alloc_dyn(pf->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Dynamic MSIX-X interrupt allocation is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_del - devlink handler for port delete
+ * @devlink: pointer to devlink
+ * @port: devlink port to be deleted
+ * @extack: pointer to extack
+ *
+ * Deletes devlink port and deallocates all resources associated with
+ * created subfunction.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_del(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+ ice_dealloc_dynamic_port(dyn_port);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_set - devlink handler for mac address set
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Sets mac address for the port, verifies arguments and copies address
+ * to the subfunction structure.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->attached) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ethernet address can be change only in detached state");
+ return -EBUSY;
+ }
+
+ if (hw_addr_len != ETH_ALEN || !is_valid_ether_addr(hw_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid ethernet address");
+ return -EADDRNOTAVAIL;
+ }
+
+ ether_addr_copy(dyn_port->hw_addr, hw_addr);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_get - devlink handler for mac address get
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Returns mac address for the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ ether_addr_copy(hw_addr, dyn_port->hw_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_set - devlink handler for port state set
+ * @port: pointer to devlink port
+ * @state: state to set
+ * @extack: extack for reporting error messages
+ *
+ * Activates or deactivates the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ switch (state) {
+ case DEVLINK_PORT_FN_STATE_ACTIVE:
+ return ice_activate_dynamic_port(dyn_port, extack);
+
+ case DEVLINK_PORT_FN_STATE_INACTIVE:
+ ice_deactivate_dynamic_port(dyn_port);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_get - devlink handler for port state get
+ * @port: pointer to devlink port
+ * @state: admin configured state of the port
+ * @opstate: current port operational state
+ * @extack: extack for reporting error messages
+ *
+ * Gets port state.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->active)
+ *state = DEVLINK_PORT_FN_STATE_ACTIVE;
+ else
+ *state = DEVLINK_PORT_FN_STATE_INACTIVE;
+
+ if (dyn_port->attached)
+ *opstate = DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ else
+ *opstate = DEVLINK_PORT_FN_OPSTATE_DETACHED;
+
+ return 0;
+}
+
+static const struct devlink_port_ops ice_devlink_port_sf_ops = {
+ .port_del = ice_devlink_port_del,
+ .port_fn_hw_addr_get = ice_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = ice_devlink_port_fn_hw_addr_set,
+ .port_fn_state_get = ice_devlink_port_fn_state_get,
+ .port_fn_state_set = ice_devlink_port_fn_state_set,
+};
+
+/**
+ * ice_reserve_sf_num - Reserve a subfunction number for this port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @sfnum: on success, the sf number reserved
+ *
+ * Reserve a subfunction number for this port. Only called for
+ * DEVLINK_PORT_FLAVOUR_PCI_SF ports.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_reserve_sf_num(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack, u32 *sfnum)
+{
+ int err;
+
+ /* If user didn't request an explicit number, pick one */
+ if (!new_attr->sfnum_valid)
+ return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b,
+ GFP_KERNEL);
+
+ /* Otherwise, check and use the number provided */
+ err = xa_insert(&pf->sf_nums, new_attr->sfnum, NULL, GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ NL_SET_ERR_MSG_MOD(extack, "Subfunction with given sfnum already exists");
+ return err;
+ }
+
+ *sfnum = new_attr->sfnum;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_create_sf_port - Register PCI subfunction devlink port
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Register PCI subfunction flavour devlink port for a dynamically added
+ * subfunction port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+
+ vsi = dyn_port->vsi;
+ pf = dyn_port->pf;
+
+ devlink_port = &dyn_port->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_SF;
+ attrs.pci_sf.pf = pf->hw.pf_id;
+ attrs.pci_sf.sf = dyn_port->sfnum;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ return devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_sf_ops);
+}
+
+/**
+ * ice_devlink_destroy_sf_port - Destroy the devlink_port for this SF
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Unregisters the devlink_port structure associated with this SF.
+ */
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ devl_rate_leaf_destroy(&dyn_port->devlink_port);
+ devl_port_unregister(&dyn_port->devlink_port);
+}
+
+/**
+ * ice_alloc_dynamic_port - Allocate new dynamic port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @devlink_port: index of newly created devlink port
+ *
+ * Allocate a new dynamic port instance and prepare it for configuration
+ * with devlink.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_alloc_dynamic_port(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_dynamic_port *dyn_port;
+ struct ice_vsi *vsi;
+ u32 sfnum;
+ int err;
+
+ err = ice_reserve_sf_num(pf, new_attr, extack, &sfnum);
+ if (err)
+ return err;
+
+ dyn_port = kzalloc(sizeof(*dyn_port), GFP_KERNEL);
+ if (!dyn_port) {
+ err = -ENOMEM;
+ goto unroll_reserve_sf_num;
+ }
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VSI");
+ err = -ENOMEM;
+ goto unroll_dyn_port_alloc;
+ }
+
+ dyn_port->vsi = vsi;
+ dyn_port->pf = pf;
+ dyn_port->sfnum = sfnum;
+ eth_random_addr(dyn_port->hw_addr);
+
+ err = xa_insert(&pf->dyn_ports, vsi->idx, dyn_port, GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Port index reservation failed");
+ goto unroll_vsi_alloc;
+ }
+
+ err = ice_eswitch_attach_sf(pf, dyn_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to attach SF to eswitch");
+ goto unroll_xa_insert;
+ }
+
+ *devlink_port = &dyn_port->devlink_port;
+
+ return 0;
+
+unroll_xa_insert:
+ xa_erase(&pf->dyn_ports, vsi->idx);
+unroll_vsi_alloc:
+ ice_vsi_free(vsi);
+unroll_dyn_port_alloc:
+ kfree(dyn_port);
+unroll_reserve_sf_num:
+ xa_erase(&pf->sf_nums, sfnum);
+
+ return err;
+}
+
+/**
+ * ice_devlink_port_new - devlink handler for the new port
+ * @devlink: pointer to devlink
+ * @new_attr: pointer to the port new attributes
+ * @extack: extack for reporting error messages
+ * @devlink_port: pointer to a new port
+ *
+ * Creates new devlink port, checks new port attributes and reject
+ * any unsupported parameters, allocates new subfunction for that port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
+
+ err = ice_devlink_port_new_check_attr(pf, new_attr, extack);
+ if (err)
+ return err;
+
+ if (!ice_is_eswitch_mode_switchdev(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "SF ports are only supported in eswitch switchdev mode");
+ return -EOPNOTSUPP;
+ }
+
+ return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/port.h b/drivers/net/ethernet/intel/ice/devlink/port.h
new file mode 100644
index 000000000000..e89ddd60eeac
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/port.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _DEVLINK_PORT_H_
+#define _DEVLINK_PORT_H_
+
+#include "../ice.h"
+#include "../ice_sf_eth.h"
+
+/**
+ * struct ice_dynamic_port - Track dynamically added devlink port instance
+ * @hw_addr: the HW address for this port
+ * @active: true if the port has been activated
+ * @attached: true if the prot is attached
+ * @devlink_port: the associated devlink port structure
+ * @pf: pointer to the PF private structure
+ * @vsi: the VSI associated with this port
+ * @repr_id: the representor ID
+ * @sfnum: the subfunction ID
+ * @sf_dev: pointer to the subfunction device
+ *
+ * An instance of a dynamically added devlink port. Each port flavour
+ */
+struct ice_dynamic_port {
+ u8 hw_addr[ETH_ALEN];
+ u8 active: 1;
+ u8 attached: 1;
+ struct devlink_port devlink_port;
+ struct ice_pf *pf;
+ struct ice_vsi *vsi;
+ unsigned long repr_id;
+ u32 sfnum;
+ /* Flavour-specific implementation data */
+ union {
+ struct ice_sf_dev *sf_dev;
+ };
+};
+
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf);
+
+int ice_devlink_create_pf_port(struct ice_pf *pf);
+void ice_devlink_destroy_pf_port(struct ice_pf *pf);
+int ice_devlink_create_vf_port(struct ice_vf *vf);
+void ice_devlink_destroy_vf_port(struct ice_vf *vf);
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port);
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port);
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev);
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev);
+
+#define ice_devlink_port_to_dyn(port) \
+ container_of(port, struct ice_dynamic_port, devlink_port)
+
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port);
+#endif /* _DEVLINK_PORT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 365c03d1c462..147aaee192a7 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -67,6 +67,7 @@
#include "ice_sriov.h"
#include "ice_vf_mbx.h"
#include "ice_ptp.h"
+#include "ice_tspll.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
@@ -77,11 +78,17 @@
#include "ice_gnss.h"
#include "ice_irq.h"
#include "ice_dpll.h"
+#include "ice_adapter.h"
+#include "devlink/health.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC 64
-#define ICE_MAX_NUM_DESC 8160
+#define ICE_MAX_NUM_DESC_E810 8160
+#define ICE_MAX_NUM_DESC_E830 8096
+#define ICE_MAX_NUM_DESC_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ ICE_MAX_NUM_DESC_E830 : \
+ ICE_MAX_NUM_DESC_E810)
#define ICE_DFLT_MIN_RX_DESC 512
#define ICE_DFLT_NUM_TX_DESC 256
#define ICE_DFLT_NUM_RX_DESC 2048
@@ -95,9 +102,6 @@
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
-#define ICE_RDMA_NUM_AEQ_MSIX 4
-#define ICE_MIN_RDMA_MSIX 2
-#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -180,11 +184,9 @@
#define ice_for_each_chnl_tc(i) \
for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
-#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX)
+#define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX
-#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
- ICE_PROMISC_UCAST_RX | \
- ICE_PROMISC_VLAN_TX | \
+#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \
ICE_PROMISC_VLAN_RX)
#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
@@ -196,16 +198,18 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
-#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
-
enum ice_feature {
ICE_F_DSCP,
ICE_F_PHY_RCLK,
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_TXTIME,
+ ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
+ ICE_F_SRIOV_AA_LAG,
+ ICE_F_MBX_LIMIT,
ICE_F_MAX
};
@@ -317,6 +321,7 @@ enum ice_vsi_state {
ICE_VSI_UMAC_FLTR_CHANGED,
ICE_VSI_MMAC_FLTR_CHANGED,
ICE_VSI_PROMISC_CHANGED,
+ ICE_VSI_REBUILD_PENDING,
ICE_VSI_STATE_NBITS /* must be last */
};
@@ -330,7 +335,6 @@ struct ice_vsi {
struct net_device *netdev;
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
- struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_rx_ring **rx_rings; /* Rx ring array */
struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
@@ -347,13 +351,11 @@ struct ice_vsi {
u16 num_q_vectors;
/* tell if only dynamic irq allocation is allowed */
bool irq_dyn_alloc;
+ bool hsplit:1;
- enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
- struct ice_vf *vf; /* VF associated with this VSI */
-
u16 num_gfltr;
u16 num_bfltr;
@@ -374,7 +376,6 @@ struct ice_vsi {
atomic_t *arfs_last_fltr_id;
u16 max_frame;
- u16 rx_buf_len;
struct ice_aqc_vsi_props info; /* VSI properties */
struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
@@ -408,13 +409,12 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
- u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
- unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct mutex xdp_state_lock;
struct net_device **target_netdevs;
@@ -445,12 +445,23 @@ struct ice_vsi {
u8 old_numtc;
u16 old_ena_tc;
- struct ice_channel *ch;
-
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
struct ice_agg_node *agg_node;
+
+ struct_group_tagged(ice_vsi_cfg_params, params,
+ struct ice_port_info *port_info; /* back pointer to port_info */
+ struct ice_channel *ch; /* VSI's channel structure, may be NULL */
+ union {
+ /* VF associated with this VSI, may be NULL */
+ struct ice_vf *vf;
+ /* SF associated with this VSI, may be NULL */
+ struct ice_dynamic_port *sf;
+ };
+ u32 flags; /* VSI flags used for rebuild and configuration */
+ enum ice_vsi_type type; /* the type of the VSI */
+ );
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -458,7 +469,7 @@ struct ice_q_vector {
struct ice_vsi *vsi;
u16 v_idx; /* index in the vsi->q_vector array. */
- u16 reg_idx;
+ u16 reg_idx; /* PF relative register index */
u8 num_ring_rx; /* total number of Rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */
u8 wb_on_itr:1; /* if true, WB on ITR is enabled */
@@ -472,14 +483,12 @@ struct ice_q_vector {
struct ice_ring_container rx;
struct ice_ring_container tx;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
-
struct ice_channel *ch;
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
+ u16 vf_reg_idx; /* VF relative register index */
struct msi_map irq;
} ____cacheline_internodealigned_in_smp;
@@ -503,16 +512,17 @@ enum ice_pf_flags {
ICE_FLAG_MOD_POWER_UNSUPPORTED,
ICE_FLAG_PHY_FW_LOAD_FAILED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
- ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_FLAG_UNPLUG_AUX_DEV,
+ ICE_FLAG_AUX_DEV_CREATED,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
+ ICE_FLAG_LLDP_AQ_FLTR,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -522,17 +532,10 @@ enum ice_misc_thread_tasks {
};
struct ice_eswitch {
- struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
struct ice_esw_br_offloads *br_offloads;
struct xarray reprs;
bool is_running;
- /* struct to allow cp queues management optimization */
- struct {
- int to_reach;
- int value;
- bool is_reaching;
- } qs;
};
struct ice_agg_node {
@@ -542,8 +545,17 @@ struct ice_agg_node {
u8 valid;
};
+struct ice_pf_msix {
+ u32 cur;
+ u32 min;
+ u32 max;
+ u32 total;
+ u32 rest;
+};
+
struct ice_pf {
struct pci_dev *pdev;
+ struct ice_adapter *adapter;
struct devlink_region *nvm_region;
struct devlink_region *sram_region;
@@ -553,15 +565,8 @@ struct ice_pf {
struct devlink_port devlink_port;
/* OS reserved IRQ details */
- struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
- /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
- * number of MSIX vectors needed for all SR-IOV VFs from the number of
- * MSIX vectors allowed on this PF.
- */
- u16 sriov_base_vector;
- unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
- u16 sriov_irq_size; /* size of the irq_bm bitmap */
+ struct ice_virt_irq_tracker virt_irq_tracker;
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -570,9 +575,6 @@ struct ice_pf {
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
struct dentry *ice_debugfs_pf;
- struct dentry *ice_debugfs_pf_fwlog;
- /* keep track of all the dentrys for FW log modules */
- struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -580,6 +582,7 @@ struct ice_pf {
DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
+ unsigned long *txtime_txqs; /* bitmap to track PF Tx Time queue */
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
@@ -594,7 +597,6 @@ struct ice_pf {
struct gnss_serial *gnss_serial;
struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
- u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
@@ -611,7 +613,7 @@ struct ice_pf {
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u16 num_lan_msix; /* Total MSIX vectors for base driver */
+ struct ice_pf_msix msix;
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -620,6 +622,7 @@ struct ice_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u32 link_down_events;
u8 wol_ena : 1; /* software state of WoL */
u32 wakeup_reason; /* last wakeup reason */
@@ -627,14 +630,12 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
- u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
- struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
/* count of tc_flower filters specific to channel (aka where filter
@@ -653,6 +654,9 @@ struct ice_pf {
struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
+ struct xarray dyn_ports;
+ struct xarray sf_nums;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -662,6 +666,10 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
struct device *hwmon_dev;
+ struct ice_health health_reporters;
+ struct iidc_rdma_core_dev_info *cdev_info;
+
+ u8 num_quanta_prof_used;
};
extern struct workqueue_struct *ice_lag_wq;
@@ -749,21 +757,61 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
- * ice_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Rx ring to use
+ * ice_is_txtime_ena - check if Tx Time is enabled on the Tx ring
+ * @ring: pointer to Tx ring
*
- * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
- * present, NULL otherwise.
+ * Return: true if the Tx ring has Tx Time enabled, false otherwise.
*/
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+static inline bool ice_is_txtime_ena(const struct ice_tx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
- u16 qid = ring->q_index;
+ struct ice_pf *pf = vsi->back;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+ return test_bit(ring->q_index, pf->txtime_txqs);
+}
+
+/**
+ * ice_is_txtime_cfg - check if Tx Time is configured on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring is configured for Tx ring, false otherwise.
+ */
+static inline bool ice_is_txtime_cfg(const struct ice_tx_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_TXTIME);
+}
+
+/**
+ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
+ *
+ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
+ * attached and configured as zero-copy, NULL otherwise.
+ */
+static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
+ u16 qid)
+{
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+ if (!ice_is_xdp_ena_vsi(vsi))
return NULL;
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+/**
+ * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
+ * @ring: Rx ring to use
+ *
+ * Sets XSK buff pool pointer on Rx ring.
+ */
+static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ u16 qid = ring->q_index;
+
+ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
}
/**
@@ -788,12 +836,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
- ring->xsk_pool = NULL;
- return;
- }
-
- ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
}
/**
@@ -895,11 +938,10 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
-void ice_debugfs_fwlog_init(struct ice_pf *pf);
+int ice_debugfs_pf_init(struct ice_pf *pf);
void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
@@ -909,6 +951,7 @@ int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
+void ice_set_ethtool_sf_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
@@ -922,9 +965,17 @@ int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+
+enum ice_xdp_cfg {
+ ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
+ ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
+};
+
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
@@ -940,7 +991,6 @@ int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
void ice_deinit_rdma(struct ice_pf *pf);
-const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int
@@ -981,11 +1031,23 @@ int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+void ice_start_service_task(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+void ice_init_dev_hw(struct ice_pf *pf);
int ice_init_dev(struct ice_pf *pf);
void ice_deinit_dev(struct ice_pf *pf);
+int ice_init_pf(struct ice_pf *pf);
+void ice_deinit_pf(struct ice_pf *pf);
+int ice_change_mtu(struct net_device *netdev, int new_mtu);
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+void ice_set_netdev_features(struct net_device *netdev);
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
+void ice_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
/**
* ice_set_rdma_cap - enable RDMA support
@@ -1014,4 +1076,62 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
}
extern const struct xdp_metadata_ops ice_xdp_md_ops;
+
+/**
+ * ice_is_dual - Check if given config is multi-NAC
+ * @hw: pointer to HW structure
+ *
+ * Return: true if the device is running in mutli-NAC (Network
+ * Acceleration Complex) configuration variant, false otherwise
+ * (always false for non-E825 devices).
+ */
+static inline bool ice_is_dual(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
+}
+
+/**
+ * ice_is_primary - Check if given device belongs to the primary complex
+ * @hw: pointer to HW structure
+ *
+ * Check if given PF/HW is running on primary complex in multi-NAC
+ * configuration.
+ *
+ * Return: true if the device is dual, false otherwise (always true
+ * for non-E825 devices).
+ */
+static inline bool ice_is_primary(struct ice_hw *hw)
+{
+ return hw->mac_type != ICE_MAC_GENERIC_3K_E825 ||
+ !ice_is_dual(hw) ||
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M);
+}
+
+/**
+ * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF
+ * @pf: pointer to PF structure
+ *
+ * Return: true if PF owns primary timer, false otherwise.
+ */
+static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf)
+{
+ return pf->hw.func_caps.ts_func_info.src_tmr_owned &&
+ ice_is_primary(&pf->hw);
+}
+
+/**
+ * ice_get_primary_hw - Get pointer to primary ice_hw structure
+ * @pf: pointer to PF structure
+ *
+ * Return: A pointer to ice_hw structure with access to timesync
+ * register space.
+ */
+static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf)
+{
+ if (!pf->adapter->ctrl_pf)
+ return &pf->hw;
+ else
+ return &pf->adapter->ctrl_pf->hw;
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
new file mode 100644
index 000000000000..0a8a48cd4bce
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright Red Hat
+
+#include <linux/cleanup.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+#include "ice_adapter.h"
+#include "ice.h"
+
+static DEFINE_XARRAY(ice_adapters);
+static DEFINE_MUTEX(ice_adapters_mutex);
+
+#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
+
+#define ICE_ADAPTER_INDEX_E825C \
+ (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
+
+static u64 ice_adapter_index(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ /* E825C devices have multiple NACs which are connected to the
+ * same clock source, and which must share the same
+ * ice_adapter structure. We can't use the serial number since
+ * each NAC has its own NVM generated with its own unique
+ * Device Serial Number. Instead, rely on the embedded nature
+ * of the E825C devices, and use a fixed index. This relies on
+ * the fact that all E825C physical functions in a given
+ * system are part of the same overall device.
+ */
+ return ICE_ADAPTER_INDEX_E825C;
+ default:
+ return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
+ }
+}
+
+static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
+{
+ u64 index = ice_adapter_index(pdev);
+
+#if BITS_PER_LONG == 64
+ return index;
+#else
+ return (u32)index ^ (u32)(index >> 32);
+#endif
+}
+
+static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
+{
+ struct ice_adapter *adapter;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return NULL;
+
+ adapter->index = ice_adapter_index(pdev);
+ spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ spin_lock_init(&adapter->txq_ctx_lock);
+ refcount_set(&adapter->refcount, 1);
+
+ mutex_init(&adapter->ports.lock);
+ INIT_LIST_HEAD(&adapter->ports.ports);
+
+ return adapter;
+}
+
+static void ice_adapter_free(struct ice_adapter *adapter)
+{
+ WARN_ON(!list_empty(&adapter->ports.ports));
+ mutex_destroy(&adapter->ports.lock);
+
+ kfree(adapter);
+}
+
+/**
+ * ice_adapter_get - Get a shared ice_adapter structure.
+ * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter.
+ *
+ * Gets a pointer to a shared ice_adapter structure. Physical functions (PFs)
+ * of the same multi-function PCI device share one ice_adapter structure.
+ * The ice_adapter is reference-counted. The PF driver must use ice_adapter_put
+ * to release its reference.
+ *
+ * Context: Process, may sleep.
+ * Return: Pointer to ice_adapter on success.
+ * ERR_PTR() on error. -ENOMEM is the only possible error.
+ */
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
+{
+ struct ice_adapter *adapter;
+ unsigned long index;
+ int err;
+
+ index = ice_adapter_xa_index(pdev);
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ adapter = xa_load(&ice_adapters, index);
+ if (adapter) {
+ refcount_inc(&adapter->refcount);
+ WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
+ return adapter;
+ }
+ err = xa_reserve(&ice_adapters, index, GFP_KERNEL);
+ if (err)
+ return ERR_PTR(err);
+
+ adapter = ice_adapter_new(pdev);
+ if (!adapter) {
+ xa_release(&ice_adapters, index);
+ return ERR_PTR(-ENOMEM);
+ }
+ xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
+ }
+ return adapter;
+}
+
+/**
+ * ice_adapter_put - Release a reference to the shared ice_adapter structure.
+ * @pdev: Pointer to the pci_dev whose driver is releasing the ice_adapter.
+ *
+ * Releases the reference to ice_adapter previously obtained with
+ * ice_adapter_get.
+ *
+ * Context: Process, may sleep.
+ */
+void ice_adapter_put(struct pci_dev *pdev)
+{
+ struct ice_adapter *adapter;
+ unsigned long index;
+
+ index = ice_adapter_xa_index(pdev);
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ adapter = xa_load(&ice_adapters, index);
+ if (WARN_ON(!adapter))
+ return;
+ if (!refcount_dec_and_test(&adapter->refcount))
+ return;
+
+ WARN_ON(xa_erase(&ice_adapters, index) != adapter);
+ }
+ ice_adapter_free(adapter);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
new file mode 100644
index 000000000000..e95266c7f20b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-FileCopyrightText: Copyright Red Hat */
+
+#ifndef _ICE_ADAPTER_H_
+#define _ICE_ADAPTER_H_
+
+#include <linux/types.h>
+#include <linux/spinlock_types.h>
+#include <linux/refcount_types.h>
+
+struct pci_dev;
+struct ice_pf;
+
+/**
+ * struct ice_port_list - data used to store the list of adapter ports
+ *
+ * This structure contains data used to maintain a list of adapter ports
+ *
+ * @ports: list of ports
+ * @lock: protect access to the ports list
+ */
+struct ice_port_list {
+ struct list_head ports;
+ /* To synchronize the ports list operations */
+ struct mutex lock;
+};
+
+/**
+ * struct ice_adapter - PCI adapter resources shared across PFs
+ * @refcount: Reference count. struct ice_pf objects hold the references.
+ * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
+ * register of the PTP clock.
+ * @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
+ * @ctrl_pf: Control PF of the adapter
+ * @ports: Ports list
+ * @index: 64-bit index cached for collision detection on 32bit systems
+ */
+struct ice_adapter {
+ refcount_t refcount;
+ /* For access to the GLTSYN_TIME register */
+ spinlock_t ptp_gltsyn_time_lock;
+ /* For access to GLCOMM_QTX_CNTX_CTL register */
+ spinlock_t txq_ctx_lock;
+
+ struct ice_pf *ctrl_pf;
+ struct ice_port_list ports;
+ u64 index;
+};
+
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
+void ice_adapter_put(struct pci_dev *pdev);
+
+#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1f3e7a6903e5..859e9c66f3e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _ICE_ADMINQ_CMD_H_
#define _ICE_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
/* This header file defines the Admin Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
*/
@@ -12,37 +14,28 @@
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
-struct ice_aqc_generic {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
-};
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
-/* Get version (direct 0x0001) */
-struct ice_aqc_get_ver {
- __le32 rom_ver;
- __le32 fw_build;
- u8 fw_branch;
- u8 fw_major;
- u8 fw_minor;
- u8 fw_patch;
- u8 api_branch;
- u8 api_major;
- u8 api_minor;
- u8 api_patch;
-};
-
-/* Send driver version (indirect 0x0002) */
-struct ice_aqc_driver_ver {
- u8 major_ver;
- u8 minor_ver;
- u8 build_ver;
- u8 subbuild_ver;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+
+/* The Tx queue context is 40 bytes, and includes some internal state. The
+ * Admin Queue buffers don't include the internal state, so only include the
+ * first 22 bytes of the context.
+ */
+#define ICE_TXQ_CTX_SZ 22
+
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
+#define ICE_TXQ_CTX_FULL_SIZE_DWORDS 10
+#define ICE_TXQ_CTX_FULL_SZ \
+ (ICE_TXQ_CTX_FULL_SIZE_DWORDS * sizeof(u32))
+
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t;
+
+#define ICE_TXTIME_CTX_SZ 25
+
+typedef struct __packed { u8 buf[ICE_TXTIME_CTX_SZ]; } ice_txtime_ctx_buf_t;
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
@@ -51,92 +44,6 @@ struct ice_aqc_q_shutdown {
u8 reserved[15];
};
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct ice_aqc_req_res {
- __le16 res_id;
-#define ICE_AQC_RES_ID_NVM 1
-#define ICE_AQC_RES_ID_SDP 2
-#define ICE_AQC_RES_ID_CHNG_LOCK 3
-#define ICE_AQC_RES_ID_GLBL_LOCK 4
- __le16 access_type;
-#define ICE_AQC_RES_ACCESS_READ 1
-#define ICE_AQC_RES_ACCESS_WRITE 2
-
- /* Upon successful completion, FW writes this value and driver is
- * expected to release resource before timeout. This value is provided
- * in milliseconds.
- */
- __le32 timeout;
-#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
-#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
-#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
-#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin ID of the SDP */
- __le32 res_number;
- /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
- __le16 status;
-#define ICE_AQ_RES_GLBL_SUCCESS 0
-#define ICE_AQ_RES_GLBL_IN_PROG 1
-#define ICE_AQ_RES_GLBL_DONE 2
- u8 reserved[2];
-};
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct ice_aqc_list_caps {
- u8 cmd_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-/* Device/Function buffer entry, repeated per reported capability */
-struct ice_aqc_list_caps_elem {
- __le16 cap;
-#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
-#define ICE_AQC_CAPS_SRIOV 0x0012
-#define ICE_AQC_CAPS_VF 0x0013
-#define ICE_AQC_CAPS_VSI 0x0017
-#define ICE_AQC_CAPS_DCB 0x0018
-#define ICE_AQC_CAPS_RSS 0x0040
-#define ICE_AQC_CAPS_RXQS 0x0041
-#define ICE_AQC_CAPS_TXQS 0x0042
-#define ICE_AQC_CAPS_MSIX 0x0043
-#define ICE_AQC_CAPS_FD 0x0045
-#define ICE_AQC_CAPS_1588 0x0046
-#define ICE_AQC_CAPS_MAX_MTU 0x0047
-#define ICE_AQC_CAPS_NVM_VER 0x0048
-#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
-#define ICE_AQC_CAPS_OROM_VER 0x004A
-#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
-#define ICE_AQC_CAPS_NET_VER 0x004C
-#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
-#define ICE_AQC_CAPS_RDMA 0x0051
-#define ICE_AQC_CAPS_SENSOR_READING 0x0067
-#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
-#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
-#define ICE_AQC_CAPS_NVM_MGMT 0x0080
-#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
-#define ICE_AQC_BIT_ROCEV2_LAG 0x01
-#define ICE_AQC_BIT_SRIOV_LAG 0x02
-
- u8 major_ver;
- u8 minor_ver;
- /* Number of resources described by this capability */
- __le32 number;
- /* Only meaningful for some types of resources */
- __le32 logical_id;
- /* Only meaningful for some types of resources */
- __le32 phys_id;
- __le64 rsvd1;
- __le64 rsvd2;
-};
-
/* Manage MAC address, read command - indirect (0x0107)
* This struct is also used for the response
*/
@@ -230,6 +137,13 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
+/* Loopback port parameter mode values. */
+enum ice_local_fwd_mode {
+ ICE_LOCAL_FWD_MODE_ENABLED = 0,
+ ICE_LOCAL_FWD_MODE_DISABLED = 1,
+ ICE_LOCAL_FWD_MODE_PRIORITIZED = 2,
+};
+
/* Set Port parameters, (direct, 0x0203) */
struct ice_aqc_set_port_params {
__le16 cmd_flags;
@@ -238,7 +152,9 @@ struct ice_aqc_set_port_params {
__le16 swid;
#define ICE_AQC_PORT_SWID_VALID BIT(15)
#define ICE_AQC_PORT_SWID_M 0xFF
- u8 reserved[10];
+ u8 local_fwd_mode;
+#define ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID BIT(2)
+ u8 reserved[9];
};
/* These resource type defines are used for all switch resource
@@ -264,6 +180,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15)
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
@@ -808,6 +726,23 @@ struct ice_aqc_get_topo {
__le32 addr_low;
};
+/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
+struct ice_aqc_get_set_tx_topo {
+ u8 set_flags;
+#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
+#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
+#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
+#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
+
+ u8 get_flags;
+#define ICE_AQC_TX_TOPO_GET_RAM 2
+
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
@@ -1440,6 +1375,71 @@ struct ice_aqc_get_sensor_reading_resp {
} data;
};
+/* DNL call command (indirect 0x0682)
+ * Struct is used for both command and response
+ */
+struct ice_aqc_dnl_call_command {
+ u8 ctx; /* Used in command, reserved in response */
+ u8 reserved;
+ __le16 activity_id;
+#define ICE_AQC_ACT_ID_DNL 0x1129
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_dnl_equa_param {
+ __le16 data_in;
+#define ICE_AQC_RX_EQU_SHIFT 8
+#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_BW (0x23 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_GAIN (0x30 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_GAIN2 (0x31 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_2 (0x32 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_3 (0x33 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_4 (0x34 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_5 (0x35 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_6 (0x36 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_7 (0x37 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_8 (0x38 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_9 (0x39 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_10 (0x3A << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_11 (0x3B << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_12 (0x3C << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_TX_EQU_PRE1 0x0
+#define ICE_AQC_TX_EQU_PRE3 0x3
+#define ICE_AQC_TX_EQU_ATTEN 0x4
+#define ICE_AQC_TX_EQU_POST1 0x8
+#define ICE_AQC_TX_EQU_PRE2 0xC
+ __le16 op_code_serdes_sel;
+#define ICE_AQC_OP_CODE_SHIFT 4
+#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT)
+#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT)
+ __le32 reserved[3];
+};
+
+struct ice_aqc_dnl_equa_respon {
+ /* Equalization value can be negative */
+ int val;
+ __le32 reserved[3];
+};
+
+/* DNL call command/response buffer (indirect 0x0682) */
+struct ice_aqc_dnl_call {
+ union {
+ struct ice_aqc_dnl_equa_param txrx_equa_reqs;
+ __le32 stores[4];
+ struct ice_aqc_dnl_equa_respon txrx_equa_resp;
+ } sto;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -1569,6 +1569,8 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
+#define ICE_AQC_PORT_OPT_MAX_LANE_40G 9
u8 global_scid[2];
u8 phy_scid[2];
@@ -1663,6 +1665,24 @@ struct ice_aqc_nvm {
};
#define ICE_AQC_NVM_START_POINT 0
+#define ICE_AQC_NVM_SECTOR_UNIT 4096
+#define ICE_AQC_NVM_SDP_AC_PTR_OFFSET 0xD8
+#define ICE_AQC_NVM_SDP_AC_PTR_M GENMASK(14, 0)
+#define ICE_AQC_NVM_SDP_AC_PTR_INVAL 0x7FFF
+#define ICE_AQC_NVM_SDP_AC_PTR_TYPE_M BIT(15)
+#define ICE_AQC_NVM_SDP_AC_SDP_NUM_M GENMASK(2, 0)
+#define ICE_AQC_NVM_SDP_AC_DIR_M BIT(3)
+#define ICE_AQC_NVM_SDP_AC_PIN_M GENMASK(15, 6)
+#define ICE_AQC_NVM_SDP_AC_MAX_SIZE 7
+
+#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
+
+struct ice_aqc_nvm_tx_topo_user_sel {
+ __le16 length;
+ u8 data;
+#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
+ u8 reserved;
+};
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
@@ -1693,6 +1713,7 @@ struct ice_aqc_nvm_pass_comp_tbl {
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3
u8 component_response_code; /* Response only */
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
@@ -1970,10 +1991,10 @@ struct ice_aqc_add_txqs_perq {
__le16 txq_id;
u8 rsvd[2];
__le32 q_teid;
- u8 txq_ctx[22];
+ ice_txq_ctx_buf_t txq_ctx;
u8 rsvd2[2];
struct ice_aqc_txsched_elem info;
-};
+} __packed;
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
@@ -2043,6 +2064,10 @@ struct ice_aqc_cfg_txqs {
#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7
#define ICE_AQC_Q_CFG_DST_PRT_S 3
#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S)
+#define ICE_AQC_Q_CFG_MODE_M GENMASK(7, 6)
+#define ICE_AQC_Q_CFG_MODE_SAME_PF 0x0
+#define ICE_AQC_Q_CFG_MODE_GIVE_OWN 0x1
+#define ICE_AQC_Q_CFG_MODE_KEEP_OWN 0x2
u8 time_out;
#define ICE_AQC_Q_CFG_TIMEOUT_S 2
#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S)
@@ -2096,6 +2121,34 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
+/* Set Tx Time LAN Queue (indirect 0x0C35) */
+struct ice_aqc_set_txtimeqs {
+ __le16 q_id;
+ __le16 q_amount;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Set Tx Time Queue
+ * command (0x0C35). Only used within struct ice_aqc_set_txtime_qgrp.
+ */
+struct ice_aqc_set_txtimeqs_perq {
+ u8 reserved[4];
+ ice_txtime_ctx_buf_t txtime_ctx;
+ u8 reserved1[3];
+};
+
+/* The format of the command buffer for Set Tx Time Queue (0x0C35)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_set_txtime_qgrp is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_set_txtime_qgrp {
+ u8 reserved[8];
+ struct ice_aqc_set_txtimeqs_perq txtimeqs[];
+};
+
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2150,6 +2203,24 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+#define ICE_CGU_INPUT_PHASE_OFFSET_BYTES 6
+
+struct ice_cgu_input_measure {
+ u8 phase_offset[ICE_CGU_INPUT_PHASE_OFFSET_BYTES];
+ __le32 freq;
+} __packed __aligned(sizeof(__le16));
+
+#define ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M ICE_M(0xf, 0)
+
+/* Get CGU input measure command response data structure (indirect 0x0C59) */
+struct ice_aqc_get_cgu_input_measure {
+ u8 dpll_idx_opt;
+ u8 length;
+ u8 rsvd[6];
+};
+
+#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
+
/* Get CGU abilities command response data structure (indirect 0x0C61) */
struct ice_aqc_get_cgu_abilities {
u8 num_inputs;
@@ -2164,6 +2235,8 @@ struct ice_aqc_get_cgu_abilities {
u8 rsvd[3];
};
+#define ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN BIT(7)
+
/* Set CGU input config (direct 0x0C62) */
struct ice_aqc_set_cgu_input_config {
u8 input_idx;
@@ -2358,219 +2431,85 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_XLR,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_SYNCE,
- ICE_AQC_FW_LOG_ID_HEALTH,
- ICE_AQC_FW_LOG_ID_TSDRV,
- ICE_AQC_FW_LOG_ID_PFREG,
- ICE_AQC_FW_LOG_ID_MDLVER,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
-/* Set FW Logging configuration (indirect 0xFF30)
- * Register for FW Logging (indirect 0xFF31)
- * Query FW Logging (indirect 0xFF32)
- * FW Log Event (indirect 0xFF33)
- */
-struct ice_aqc_fw_log {
- u8 cmd_flags;
-#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
-#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
-#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
-#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
-#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
-#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
-
- u8 rsp_flag;
- __le16 fw_rt_msb;
- union {
- struct {
- __le32 fw_rt_lsb;
- } sync;
- struct {
- __le16 log_resolution;
-#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
-#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
-
- __le16 mdl_cnt;
- } cfg;
- } ops;
+enum ice_aqc_health_status_mask {
+ ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
+ ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2),
+};
+
+/* Set Health Status (direct 0xFF20) */
+struct ice_aqc_set_health_status_cfg {
+ u8 event_source;
+ u8 reserved[15];
+};
+
+enum ice_aqc_health_status {
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106,
+ ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107,
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109,
+ ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F,
+ ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113,
+ ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116,
+ ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121,
+ ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500,
+ ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503,
+ ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508,
+ ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509,
+ ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A,
+ ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C,
+ ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002,
+};
+
+/* Get Health Status (indirect 0xFF22) */
+struct ice_aqc_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
-/* Response Buffer for:
- * Set Firmware Logging Configuration (0xFF30)
- * Query FW Logging (0xFF32)
- */
-struct ice_aqc_fw_log_cfg_resp {
- __le16 module_identifier;
- u8 log_level;
- u8 rsvd0;
+enum ice_aqc_health_status_scope {
+ ICE_AQC_HEALTH_STATUS_PF = 0x1,
+ ICE_AQC_HEALTH_STATUS_PORT = 0x2,
+ ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3,
};
-/**
- * struct ice_aq_desc - Admin Queue (AQ) descriptor
- * @flags: ICE_AQ_FLAG_* flags
- * @opcode: AQ command opcode
- * @datalen: length in bytes of indirect/external data buffer
- * @retval: return value from firmware
- * @cookie_high: opaque data high-half
- * @cookie_low: opaque data low-half
- * @params: command-specific parameters
- *
- * Descriptor format for commands the driver posts on the Admin Transmit Queue
- * (ATQ). The firmware writes back onto the command descriptor and returns
- * the result of the command. Asynchronous events that are not an immediate
- * result of the command are written to the Admin Receive Queue (ARQ) using
- * the same descriptor format. Descriptors are in little-endian notation with
- * 32-bit words.
+#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF
+
+/* Get Health Status event buffer entry (0xFF22),
+ * repeated per reported health status.
*/
-struct ice_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- u8 raw[16];
- struct ice_aqc_generic generic;
- struct ice_aqc_get_ver get_ver;
- struct ice_aqc_driver_ver driver_ver;
- struct ice_aqc_q_shutdown q_shutdown;
- struct ice_aqc_req_res res_owner;
- struct ice_aqc_manage_mac_read mac_read;
- struct ice_aqc_manage_mac_write mac_write;
- struct ice_aqc_clear_pxe clear_pxe;
- struct ice_aqc_list_caps get_cap;
- struct ice_aqc_get_phy_caps get_phy;
- struct ice_aqc_set_phy_cfg set_phy;
- struct ice_aqc_restart_an restart_an;
- struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
- struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
- struct ice_aqc_get_sensor_reading get_sensor_reading;
- struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
- struct ice_aqc_gpio read_write_gpio;
- struct ice_aqc_sff_eeprom read_write_sff_param;
- struct ice_aqc_set_port_id_led set_port_id_led;
- struct ice_aqc_get_port_options get_port_options;
- struct ice_aqc_set_port_option set_port_option;
- struct ice_aqc_get_sw_cfg get_sw_conf;
- struct ice_aqc_set_port_params set_port_params;
- struct ice_aqc_sw_rules sw_rules;
- struct ice_aqc_add_get_recipe add_get_recipe;
- struct ice_aqc_recipe_to_profile recipe_to_profile;
- struct ice_aqc_get_topo get_topo;
- struct ice_aqc_sched_elem_cmd sched_elem_cmd;
- struct ice_aqc_query_txsched_res query_sched_res;
- struct ice_aqc_query_port_ets port_ets;
- struct ice_aqc_rl_profile rl_profile;
- struct ice_aqc_nvm nvm;
- struct ice_aqc_nvm_checksum nvm_checksum;
- struct ice_aqc_nvm_pkg_data pkg_data;
- struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
- struct ice_aqc_pf_vf_msg virt;
- struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
- struct ice_aqc_lldp_get_mib lldp_get_mib;
- struct ice_aqc_lldp_set_mib_change lldp_set_event;
- struct ice_aqc_lldp_stop lldp_stop;
- struct ice_aqc_lldp_start lldp_start;
- struct ice_aqc_lldp_set_local_mib lldp_set_mib;
- struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
- struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
- struct ice_aqc_get_set_rss_lut get_set_rss_lut;
- struct ice_aqc_get_set_rss_key get_set_rss_key;
- struct ice_aqc_neigh_dev_req neigh_dev;
- struct ice_aqc_add_txqs add_txqs;
- struct ice_aqc_dis_txqs dis_txqs;
- struct ice_aqc_cfg_txqs cfg_txqs;
- struct ice_aqc_add_rdma_qset add_rdma_qset;
- struct ice_aqc_add_get_update_free_vsi vsi_cmd;
- struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
- struct ice_aqc_download_pkg download_pkg;
- struct ice_aqc_set_cgu_input_config set_cgu_input_config;
- struct ice_aqc_get_cgu_input_config get_cgu_input_config;
- struct ice_aqc_set_cgu_output_config set_cgu_output_config;
- struct ice_aqc_get_cgu_output_config get_cgu_output_config;
- struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status;
- struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config;
- struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio;
- struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
- struct ice_aqc_get_cgu_info get_cgu_info;
- struct ice_aqc_driver_shared_params drv_shared_params;
- struct ice_aqc_fw_log fw_log;
- struct ice_aqc_set_mac_lb set_mac_lb;
- struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
- struct ice_aqc_set_mac_cfg set_mac_cfg;
- struct ice_aqc_set_event_mask set_event_mask;
- struct ice_aqc_get_link_status get_link_status;
- struct ice_aqc_event_lan_overflow lan_overflow;
- struct ice_aqc_get_link_topo get_link_topo;
- struct ice_aqc_i2c read_write_i2c;
- struct ice_aqc_read_i2c_resp read_i2c_resp;
- } params;
-};
-
-/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
-#define ICE_AQ_LG_BUF 512
-
-#define ICE_AQ_FLAG_ERR_S 2
-#define ICE_AQ_FLAG_LB_S 9
-#define ICE_AQ_FLAG_RD_S 10
-#define ICE_AQ_FLAG_BUF_S 12
-#define ICE_AQ_FLAG_SI_S 13
-
-#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
-#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
-#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
-#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
-#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */
-
-/* error codes */
-enum ice_aq_err {
- ICE_AQ_RC_OK = 0, /* Success */
- ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
- ICE_AQ_RC_ENOENT = 2, /* No such element */
- ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
- ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
- ICE_AQ_RC_EEXIST = 13, /* Object already exists */
- ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
- ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
- ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
- ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
- ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
- ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
- ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
- ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+struct ice_aqc_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+ __le32 internal_data1;
+ __le32 internal_data2;
};
/* Admin Queue command opcodes */
@@ -2642,6 +2581,10 @@ enum ice_adminq_opc {
ice_aqc_opc_query_sched_res = 0x0412,
ice_aqc_opc_remove_rl_profiles = 0x0415,
+ /* tx topology commands */
+ ice_aqc_opc_set_tx_topo = 0x0417,
+ ice_aqc_opc_get_tx_topo = 0x0418,
+
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
ice_aqc_opc_set_phy_cfg = 0x0601,
@@ -2653,6 +2596,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
ice_aqc_opc_get_sensor_reading = 0x0632,
+ ice_aqc_opc_dnl_call = 0x0682,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
@@ -2702,6 +2646,9 @@ enum ice_adminq_opc {
ice_aqc_opc_cfg_txqs = 0x0C32,
ice_aqc_opc_add_rdma_qset = 0x0C33,
+ /* Tx Time queue commands */
+ ice_aqc_opc_set_txtimeqs = 0x0C35,
+
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_upload_section = 0x0C41,
@@ -2709,6 +2656,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* 1588/SyncE commands/events */
+ ice_aqc_opc_get_cgu_input_measure = 0x0C59,
ice_aqc_opc_get_cgu_abilities = 0x0C61,
ice_aqc_opc_set_cgu_input_config = 0x0C62,
ice_aqc_opc_get_cgu_input_config = 0x0C63,
@@ -2725,6 +2673,10 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
+ /* System Diagnostic commands */
+ ice_aqc_opc_set_health_status_cfg = 0xFF20,
+ ice_aqc_opc_get_health_status = 0xFF22,
+
/* FW Logging Commands */
ice_aqc_opc_fw_logs_config = 0xFF30,
ice_aqc_opc_fw_logs_register = 0xFF31,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 7cee365cc7d1..1f7834c03550 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -378,6 +378,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
}
/**
+ * ice_arfs_cmp - Check if aRFS filter matches this flow.
+ * @fltr_info: filter info of the saved ARFS entry.
+ * @fk: flow dissector keys.
+ * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
+ * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
+ *
+ * Since this function assumes limited values for n_proto and ip_proto, it
+ * is meant to be called only from ice_rx_flow_steer().
+ *
+ * Return:
+ * * true - fltr_info refers to the same flow as fk.
+ * * false - fltr_info and fk refer to different flows.
+ */
+static bool
+ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
+ __be16 n_proto, u8 ip_proto)
+{
+ /* Determine if the filter is for IPv4 or IPv6 based on flow_type,
+ * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
+ */
+ bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ /* Following checks are arranged in the quickest and most discriminative
+ * fields first for early failure.
+ */
+ if (is_v4)
+ return n_proto == htons(ETH_P_IP) &&
+ fltr_info->ip.v4.src_port == fk->ports.src &&
+ fltr_info->ip.v4.dst_port == fk->ports.dst &&
+ fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
+ fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
+ fltr_info->ip.v4.proto == ip_proto;
+
+ return fltr_info->ip.v6.src_port == fk->ports.src &&
+ fltr_info->ip.v6.dst_port == fk->ports.dst &&
+ fltr_info->ip.v6.proto == ip_proto &&
+ !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+}
+
+/**
* ice_rx_flow_steer - steer the Rx flow to where application is being run
* @netdev: ptr to the netdev being adjusted
* @skb: buffer with required header information
@@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
continue;
fltr_info = &arfs_entry->fltr_info;
+
+ if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
+ continue;
+
ret = fltr_info->fltr_id;
if (fltr_info->q_index == rxq_idx ||
@@ -511,7 +559,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
@@ -571,25 +619,6 @@ void ice_clear_arfs(struct ice_vsi *vsi)
}
/**
- * ice_free_cpu_rx_rmap - free setup CPU reverse map
- * @vsi: the VSI to be forwarded to
- */
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
-{
- struct net_device *netdev;
-
- if (!vsi || vsi->type != ICE_VSI_PF)
- return;
-
- netdev = vsi->netdev;
- if (!netdev || !netdev->rx_cpu_rmap)
- return;
-
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
-}
-
-/**
* ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
* @vsi: the VSI to be forwarded to
*/
@@ -597,7 +626,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -610,18 +638,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
vsi->type, netdev->name, vsi->num_q_vectors);
- netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
- if (unlikely(!netdev->rx_cpu_rmap))
- return -EINVAL;
-
- ice_for_each_q_vector(vsi, i)
- if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- vsi->q_vectors[i]->irq.virq)) {
- ice_free_cpu_rx_rmap(vsi);
- return -EINVAL;
- }
-
- return 0;
+ return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 9669ad9bf7b5..9706293128c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -45,7 +45,6 @@ int
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
u16 rxq_idx, u32 flow_id);
void ice_clear_arfs(struct ice_vsi *vsi);
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
void ice_init_arfs(struct ice_vsi *vsi);
void ice_sync_arfs_fltrs(struct ice_pf *pf);
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
-static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
static inline void ice_init_arfs(struct ice_vsi *vsi) { }
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
static inline void ice_remove_arfs(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index a545a7917e4f..eadb1e3d12b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <net/xdp_sock_drv.h>
+#include <linux/net/intel/libie/rx.h>
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -121,7 +122,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->irq.index = -ENOENT;
if (vsi->type == ICE_VSI_VF) {
- q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector);
+ ice_calc_vf_reg_idx(vsi->vf, q_vector);
goto out;
} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
@@ -145,17 +146,15 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
-
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ q_vector->vf_reg_idx = q_vector->irq.index;
/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
* handler here (i.e. resume, reset/rebuild, etc.)
*/
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
+ netif_napi_add_config(vsi->netdev, &q_vector->napi,
+ ice_napi_poll, v_idx);
out:
/* tie q_vector and VSI together */
@@ -189,16 +188,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_tx_ring(tx_ring, q_vector->tx) {
- ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
- NULL);
+ ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx)
tx_ring->q_vector = NULL;
- }
- ice_for_each_rx_ring(rx_ring, q_vector->rx) {
- ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
- NULL);
+
+ ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx)
rx_ring->q_vector = NULL;
- }
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -249,7 +243,8 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
+static u16
+ice_calc_txq_handle(const struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
@@ -257,37 +252,13 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
return ring->q_index - ring->ch->base_q;
/* Idea here for calculation is that we subtract the number of queue
- * count from TC that ring belongs to from it's absolute queue index
+ * count from TC that ring belongs to from its absolute queue index
* and as a result we get the queue's index within TC.
*/
return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
}
/**
- * ice_eswitch_calc_txq_handle
- * @ring: pointer to ring which unique index is needed
- *
- * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
- * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
- * here by finding index in vsi->tx_rings of this ring.
- *
- * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
- * because VSI is get from ring->vsi, so it has to be present in this VSI.
- */
-static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
-{
- const struct ice_vsi *vsi = ring->vsi;
- int i;
-
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i] == ring)
- return i;
- }
-
- return ICE_INVAL_Q_INDEX;
-}
-
-/**
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
* @ring: The Tx ring to configure
*
@@ -303,35 +274,26 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
return;
- netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->napi.config->affinity_mask,
ring->q_index);
}
/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
+ * ice_set_txq_ctx_vmvf - set queue context VM/VF type and number by VSI type
+ * @ring: the Tx ring to configure
+ * @vmvf_type: VM/VF type
+ * @vmvf_num: VM/VF number
*
- * Configure the Tx descriptor ring in TLAN context.
+ * Return: 0 on success and a negative value on error.
*/
-static void
-ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+static int
+ice_set_txq_ctx_vmvf(struct ice_tx_ring *ring, u8 *vmvf_type, u16 *vmvf_num)
{
struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+ struct ice_hw *hw;
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
+ hw = &vsi->back->hw;
/* queue belongs to a specific VSI type
* VF / VM index should be programmed per vmvf_type setting:
@@ -344,21 +306,60 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_CTRL:
case ICE_VSI_PF:
if (ring->ch)
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
else
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ *vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ case ICE_VSI_SF:
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
break;
default:
- return;
+ dev_info(ice_pf_to_dev(vsi->back),
+ "Unable to set VMVF type for VSI type %d\n",
+ vsi->type);
+ return -EINVAL;
}
+ return 0;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: the Tx ring to configure
+ * @tlan_ctx: pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw;
+ int err;
+
+ hw = &vsi->back->hw;
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(ring, &tlan_ctx->vmvf_type,
+ &tlan_ctx->vmvf_num);
+ if (err)
+ return err;
/* make sure the context is associated with the right VSI */
if (ring->ch)
@@ -375,6 +376,8 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
break;
}
+ tlan_ctx->quanta_prof_idx = ring->quanta_prof_id;
+
tlan_ctx->tso_ena = ICE_TX_LEGACY;
tlan_ctx->tso_qnum = pf_q;
@@ -383,22 +386,83 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
* 1: Legacy Host Interface
*/
tlan_ctx->legacy_int = ICE_TX_LEGACY;
+
+ return 0;
}
/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
+ * ice_setup_txtime_ctx - setup a struct ice_txtime_ctx instance
+ * @ring: the tstamp ring to configure
+ * @txtime_ctx: pointer to the Tx time queue context structure to be initialized
*
- * Returns the offset value for ring into the data buffer.
+ * Return: 0 on success and a negative value on error.
*/
-static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
+static int
+ice_setup_txtime_ctx(const struct ice_tstamp_ring *ring,
+ struct ice_txtime_ctx *txtime_ctx)
{
- if (ice_ring_uses_build_skb(rx_ring))
- return ICE_SKB_PAD;
+ struct ice_tx_ring *tx_ring = ring->tx_ring;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ int err;
+
+ txtime_ctx->base = ring->dma >> ICE_TXTIME_CTX_BASE_S;
+
+ /* Tx time Queue Length */
+ txtime_ctx->qlen = ring->count;
+ txtime_ctx->txtime_ena_q = 1;
+
+ /* PF number */
+ txtime_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(tx_ring, &txtime_ctx->vmvf_type,
+ &txtime_ctx->vmvf_num);
+ if (err)
+ return err;
+
+ /* make sure the context is associated with the right VSI */
+ if (tx_ring->ch)
+ txtime_ctx->src_vsi = tx_ring->ch->vsi_num;
+ else
+ txtime_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ txtime_ctx->ts_res = ICE_TXTIME_CTX_RESOLUTION_128NS;
+ txtime_ctx->drbell_mode_32 = ICE_TXTIME_CTX_DRBELL_MODE_32;
+ txtime_ctx->ts_fetch_prof_id = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+
return 0;
}
/**
+ * ice_calc_ts_ring_count - calculate the number of Tx time stamp descriptors
+ * @tx_ring: Tx ring to calculate the count for
+ *
+ * Return: the number of Tx time stamp descriptors.
+ */
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring)
+{
+ u16 prof = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ u16 max_fetch_desc = 0, fetch, i;
+ u32 reg;
+
+ for (i = 0; i < ICE_TXTIME_FETCH_PROFILE_CNT; i++) {
+ reg = rd32(hw, E830_GLTXTIME_FETCH_PROFILE(prof, 0));
+ fetch = FIELD_GET(E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M,
+ reg);
+ max_fetch_desc = max(fetch, max_fetch_desc);
+ }
+
+ if (!max_fetch_desc)
+ max_fetch_desc = ICE_TXTIME_FETCH_TS_DESC_DFLT;
+
+ max_fetch_desc = ALIGN(max_fetch_desc, ICE_REQ_DESC_MULTIPLE);
+
+ return tx_ring->count + max_fetch_desc;
+}
+
+/**
* ice_setup_rx_ctx - Configure a receive ring context
* @ring: The Rx ring to configure
*
@@ -460,8 +524,29 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
else
rlan_ctx.l2tsel = 1;
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ if (ring->hdr_pp) {
+ rlan_ctx.hbuf = ring->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
+ rlan_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
+
+ /*
+ * If the frame is TCP/UDP/SCTP, it will be split by the
+ * payload.
+ * If not, but it's an IPv4/IPv6 frame, it will be split by
+ * the IP header.
+ * If not IP, it will be split by the Ethernet header.
+ *
+ * In any case, the header buffer will never be left empty.
+ */
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_IP |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
+ } else {
+ rlan_ctx.hbuf = 0;
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ }
+
rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
/* This controls whether VLAN is stripped from inner headers
@@ -479,6 +564,17 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* Enable descriptor prefetch */
+ rlan_ctx.prefena = 1;
+
+ /* PF acts as uplink for switchdev; set flex descriptor with src_vsi
+ * metadata and flags to allow redirecting to PR netdev
+ */
+ if (ice_is_eswitch_mode_switchdev(vsi->back)) {
+ ring->flags |= ICE_RX_FLAGS_MULTIDEV;
+ rxdid = ICE_RXDID_FLEX_NIC_2;
+ }
+
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
* increasing context priority to pick up profile ID; default is 0x01;
@@ -487,9 +583,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
- false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -502,14 +595,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
if (vsi->type == ICE_VSI_VF)
return 0;
- /* configure Rx buffer alignment */
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- ice_clear_ring_build_skb_ena(ring);
- else
- ice_set_ring_build_skb_ena(ring);
-
- ring->rx_offset = ice_rx_offset(ring);
-
/* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -517,17 +602,51 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
return 0;
}
-static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
+static int ice_rxq_pp_create(struct ice_rx_ring *rq)
{
- void *ctx_ptr = &ring->pkt_ctx;
- struct xsk_cb_desc desc = {};
-
- XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
- desc.src = &ctx_ptr;
- desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
- sizeof(struct xdp_buff);
- desc.bytes = sizeof(ctx_ptr);
- xsk_pool_fill_cb(ring->xsk_pool, &desc);
+ struct libeth_fq fq = {
+ .count = rq->count,
+ .nid = NUMA_NO_NODE,
+ .hsplit = rq->vsi->hsplit,
+ .xdp = ice_is_xdp_ena_vsi(rq->vsi),
+ .buf_len = LIBIE_MAX_RX_BUF_LEN,
+ };
+ int err;
+
+ err = libeth_rx_fq_create(&fq, &rq->q_vector->napi);
+ if (err)
+ return err;
+
+ rq->pp = fq.pp;
+ rq->rx_fqes = fq.fqes;
+ rq->truesize = fq.truesize;
+ rq->rx_buf_len = fq.buf_len;
+
+ if (!fq.hsplit)
+ return 0;
+
+ fq = (struct libeth_fq){
+ .count = rq->count,
+ .type = LIBETH_FQE_HDR,
+ .nid = NUMA_NO_NODE,
+ .xdp = ice_is_xdp_ena_vsi(rq->vsi),
+ };
+
+ err = libeth_rx_fq_create(&fq, &rq->q_vector->napi);
+ if (err)
+ goto destroy;
+
+ rq->hdr_pp = fq.pp;
+ rq->hdr_fqes = fq.fqes;
+ rq->hdr_truesize = fq.truesize;
+ rq->rx_hdr_len = fq.buf_len;
+
+ return 0;
+
+destroy:
+ ice_rxq_pp_destroy(rq);
+
+ return err;
}
/**
@@ -539,12 +658,11 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
- u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
+ u32 num_bufs = ICE_DESC_UNUSED(ring);
+ u32 rx_buf_len;
int err;
- ring->rx_buf_len = ring->vsi->rx_buf_len;
-
- if (ring->vsi->type == ICE_VSI_PF) {
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
@@ -554,16 +672,20 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return err;
}
- ring->xsk_pool = ice_xsk_pool(ring);
+ ice_rx_xsk_pool(ring);
+ err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool);
+ if (err)
+ return err;
+
if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq);
- ring->rx_buf_len =
+ rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
- ring->rx_buf_len);
+ rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -572,36 +694,33 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
- ice_xsk_pool_fill_cb(ring);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
+ err = ice_rxq_pp_create(ring);
+ if (err)
+ return err;
+
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
- return err;
+ goto err_destroy_fq;
}
-
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (err)
- return err;
+ xdp_rxq_info_attach_page_pool(&ring->xdp_rxq,
+ ring->pp);
}
}
- xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
ring->xdp.data = NULL;
- ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
ring->q_index, err);
- return err;
+ goto err_destroy_fq;
}
if (ring->xsk_pool) {
@@ -615,7 +734,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
if (!ok) {
u16 pf_q = ring->vsi->rxq_map[ring->q_index];
@@ -626,9 +745,20 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ice_alloc_rx_bufs(ring, num_bufs);
+ if (ring->vsi->type == ICE_VSI_CTRL)
+ ice_init_ctrl_rx_descs(ring, num_bufs);
+ else
+ err = ice_alloc_rx_bufs(ring, num_bufs);
+
+ if (err)
+ goto err_destroy_fq;
return 0;
+
+err_destroy_fq:
+ ice_rxq_pp_destroy(ring);
+
+ return err;
}
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
@@ -642,21 +772,17 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
+ * @ring: Rx ring to configure
+ *
+ * Determine the maximum frame size and Rx buffer length to use for a PF VSI.
+ * Set these in the associated Rx ring structure.
*/
-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ if (!vsi->netdev) {
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
}
}
@@ -671,15 +797,15 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
u16 i;
- if (vsi->type == ICE_VSI_VF)
- goto setup_rings;
-
- ice_vsi_cfg_frame_size(vsi);
-setup_rings:
/* set up individual rings */
ice_for_each_rxq(vsi, i) {
- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
+ int err;
+
+ if (vsi->type != ICE_VSI_VF)
+ ice_vsi_cfg_frame_size(vsi, ring);
+ err = ice_vsi_cfg_rxq(ring);
if (err)
return err;
}
@@ -794,13 +920,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return 0;
err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
- dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
+ vsi->num_q_vectors, vsi->vsi_num, v_idx);
+ vsi->num_q_vectors = v_idx;
+ return v_idx ? 0 : err;
}
/**
@@ -860,6 +984,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
}
rx_rings_rem -= rx_rings_per_v;
}
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_map_xdp_rings(vsi);
}
/**
@@ -877,13 +1004,49 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_tstamp - Configure Tx time stamp queue
+ * @tx_ring: Tx ring to be configured with timestamping
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_cfg_tstamp(struct ice_tx_ring *tx_ring)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_set_txtime_qgrp, txtime_qg_buf,
+ txtimeqs, 1);
+ u8 txtime_buf_len = struct_size(txtime_qg_buf, txtimeqs, 1);
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct ice_txtime_ctx txtime_ctx = {};
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u16 pf_q = tx_ring->reg_idx;
+ int err;
+
+ err = ice_setup_txtime_ctx(tstamp_ring, &txtime_ctx);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx time queue context for queue %d, error: %d\n",
+ pf_q, err);
+ return err;
+ }
+ ice_pack_txtime_ctx(&txtime_ctx,
+ &txtime_qg_buf->txtimeqs[0].txtime_ctx);
+
+ tstamp_ring->tail = hw->hw_addr + E830_GLQTX_TXTIME_DBELL_LSB(pf_q);
+ return ice_aq_set_txtimeq(hw, pf_q, 1, txtime_qg_buf,
+ txtime_buf_len, NULL);
+}
+
+/**
* ice_vsi_cfg_txq - Configure single Tx queue
* @vsi: the VSI that queue belongs to
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
+ *
+ * Return: 0 on success and a negative value on error.
*/
static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_vsi_cfg_txq(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
@@ -892,19 +1055,23 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ u32 pf_q, vsi_idx;
int status;
- u16 pf_q;
u8 tc;
/* Configure XPS */
ice_cfg_xps_tx_ring(ring);
pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ status = ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx context for queue %d, error: %d\n",
+ pf_q, status);
+ return status;
+ }
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
@@ -919,23 +1086,17 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
- if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
- ring->q_handle = ice_eswitch_calc_txq_handle(ring);
+ ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
- if (ring->q_handle == ICE_INVAL_Q_INDEX)
- return -ENODEV;
+ if (ch) {
+ tc = 0;
+ vsi_idx = ch->ch_vsi->idx;
} else {
- ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
+ vsi_idx = vsi->idx;
}
- if (ch)
- status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
- else
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
+ status = ice_ena_vsi_txq(vsi->port_info, vsi_idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
@@ -950,7 +1111,32 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
if (pf_q == le16_to_cpu(txq->txq_id))
ring->txq_teid = le32_to_cpu(txq->q_teid);
+ if (ice_is_txtime_ena(ring)) {
+ status = ice_alloc_setup_tstamp_ring(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to allocate Tx timestamp ring, error: %d\n",
+ status);
+ goto err_setup_tstamp;
+ }
+
+ status = ice_cfg_tstamp(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set Tx Time queue context, error: %d\n",
+ status);
+ goto err_cfg_tstamp;
+ }
+ }
return 0;
+
+err_cfg_tstamp:
+ ice_free_tx_tstamp_ring(ring);
+err_setup_tstamp:
+ ice_dis_vsi_txq(vsi->port_info, vsi_idx, tc, 1, &ring->q_handle,
+ &ring->reg_idx, &ring->txq_teid, ICE_NO_RESET,
+ tlan_ctx.vmvf_num, NULL);
+
+ return status;
}
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
@@ -1209,3 +1395,148 @@ ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
txq_meta->tc = tc;
}
}
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
+ if (vsi->xdp_rings)
+ memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (vsi->xdp_rings)
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ int fail = 0;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (!fail)
+ fail = err;
+ if (vsi->xdp_rings) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (!fail)
+ fail = err;
+ }
+
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return fail;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_q_vector *q_vector;
+ int fail = 0;
+ bool link_up;
+ int err;
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
+ if (!fail)
+ fail = err;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
+ if (!fail)
+ fail = err;
+ ice_set_ring_xdp(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
+ if (!fail)
+ fail = err;
+
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
+ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
+
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
+ if (!fail)
+ fail = err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
+
+ return fail;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index b711bc921928..d28294247599 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -32,4 +32,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
void
ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx);
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx);
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
deleted file mode 100644
index 57abd52386d0..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018-2021, Intel Corporation. */
-
-#ifndef _ICE_CGU_REGS_H_
-#define _ICE_CGU_REGS_H_
-
-#define NAC_CGU_DWORD9 0x24
-union nac_cgu_dword9 {
- struct {
- u32 time_ref_freq_sel : 3;
- u32 clk_eref1_en : 1;
- u32 clk_eref0_en : 1;
- u32 time_ref_en : 1;
- u32 time_sync_en : 1;
- u32 one_pps_out_en : 1;
- u32 clk_ref_synce_en : 1;
- u32 clk_synce1_en : 1;
- u32 clk_synce0_en : 1;
- u32 net_clk_ref1_en : 1;
- u32 net_clk_ref0_en : 1;
- u32 clk_synce1_amp : 2;
- u32 misc6 : 1;
- u32 clk_synce0_amp : 2;
- u32 one_pps_out_amp : 2;
- u32 misc24 : 12;
- } field;
- u32 val;
-};
-
-#define NAC_CGU_DWORD19 0x4c
-union nac_cgu_dword19 {
- struct {
- u32 tspll_fbdiv_intgr : 8;
- u32 fdpll_ulck_thr : 5;
- u32 misc15 : 3;
- u32 tspll_ndivratio : 4;
- u32 tspll_iref_ndivratio : 3;
- u32 misc19 : 1;
- u32 japll_ndivratio : 4;
- u32 japll_iref_ndivratio : 3;
- u32 misc27 : 1;
- } field;
- u32 val;
-};
-
-#define NAC_CGU_DWORD22 0x58
-union nac_cgu_dword22 {
- struct {
- u32 fdpll_frac_div_out_nc : 2;
- u32 fdpll_lock_int_for : 1;
- u32 synce_hdov_int_for : 1;
- u32 synce_lock_int_for : 1;
- u32 fdpll_phlead_slip_nc : 1;
- u32 fdpll_acc1_ovfl_nc : 1;
- u32 fdpll_acc2_ovfl_nc : 1;
- u32 synce_status_nc : 6;
- u32 fdpll_acc1f_ovfl : 1;
- u32 misc18 : 1;
- u32 fdpllclk_div : 4;
- u32 time1588clk_div : 4;
- u32 synceclk_div : 4;
- u32 synceclk_sel_div2 : 1;
- u32 fdpllclk_sel_div2 : 1;
- u32 time1588clk_sel_div2 : 1;
- u32 misc3 : 1;
- } field;
- u32 val;
-};
-
-#define NAC_CGU_DWORD24 0x60
-union nac_cgu_dword24 {
- struct {
- u32 tspll_fbdiv_frac : 22;
- u32 misc20 : 2;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
- } field;
- u32 val;
-};
-
-#define TSPLL_CNTR_BIST_SETTINGS 0x344
-union tspll_cntr_bist_settings {
- struct {
- u32 i_irefgen_settling_time_cntr_7_0 : 8;
- u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
- u32 reserved195 : 5;
- u32 i_plllock_sel_0 : 1;
- u32 i_plllock_sel_1 : 1;
- u32 i_plllock_cnt_6_0 : 7;
- u32 i_plllock_cnt_10_7 : 4;
- u32 reserved200 : 4;
- } field;
- u32 val;
-};
-
-#define TSPLL_RO_BWM_LF 0x370
-union tspll_ro_bwm_lf {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 biascaldone_cri : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 m2fbdivmod_cri_7_0 : 8;
- } field;
- u32 val;
-};
-
-#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index d9f6cc71d900..046bc9c65c51 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,6 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#include "ice_ptp_hw.h"
+#include <linux/packing.h>
#define ICE_PF_RESET_WAIT_COUNT 300
#define ICE_MAX_NETLIST_SIZE 10
@@ -160,10 +161,25 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E825C_SGMII:
hw->mac_type = ICE_MAC_GENERIC_3K_E825;
break;
- case ICE_DEV_ID_E830_BACKPLANE:
- case ICE_DEV_ID_E830_QSFP56:
- case ICE_DEV_ID_E830_SFP:
- case ICE_DEV_ID_E830_SFP_DD:
+ case ICE_DEV_ID_E830CC_BACKPLANE:
+ case ICE_DEV_ID_E830CC_QSFP56:
+ case ICE_DEV_ID_E830CC_SFP:
+ case ICE_DEV_ID_E830CC_SFP_DD:
+ case ICE_DEV_ID_E830C_BACKPLANE:
+ case ICE_DEV_ID_E830_XXV_BACKPLANE:
+ case ICE_DEV_ID_E830C_QSFP:
+ case ICE_DEV_ID_E830_XXV_QSFP:
+ case ICE_DEV_ID_E830C_SFP:
+ case ICE_DEV_ID_E830_XXV_SFP:
+ case ICE_DEV_ID_E835CC_BACKPLANE:
+ case ICE_DEV_ID_E835CC_QSFP56:
+ case ICE_DEV_ID_E835CC_SFP:
+ case ICE_DEV_ID_E835C_BACKPLANE:
+ case ICE_DEV_ID_E835C_QSFP:
+ case ICE_DEV_ID_E835C_SFP:
+ case ICE_DEV_ID_E835_L_BACKPLANE:
+ case ICE_DEV_ID_E835_L_QSFP:
+ case ICE_DEV_ID_E835_L_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -179,7 +195,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
- * Return: true if mac_type is generic (with SBQ support), false if not
+ * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@@ -188,93 +204,39 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_e810
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810 based, false if not.
- */
-bool ice_is_e810(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_E810;
-}
-
-/**
- * ice_is_e810t
- * @hw: pointer to the hardware structure
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
*
- * returns true if the device is E810T based, false if not.
+ * Return: true if the device has c827 phy.
*/
-bool ice_is_e810t(struct ice_hw *hw)
+static bool ice_is_pf_c827(struct ice_hw *hw)
{
- switch (hw->device_id) {
- case ICE_DEV_ID_E810C_SFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T:
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T6:
- case ICE_SUBDEV_ID_E810T7:
- return true;
- }
- break;
- case ICE_DEV_ID_E810C_QSFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T5:
- return true;
- }
- break;
- default:
- break;
- }
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
- return false;
-}
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
-/**
- * ice_is_e823
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E823-L or E823-C based, false if not.
- */
-bool ice_is_e823(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
return true;
- default:
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
return false;
- }
-}
-/**
- * ice_is_e825c - Check if a device is E825C family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E825-C based, false if not.
- */
-bool ice_is_e825c(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
return true;
- default:
- return false;
- }
+
+ return false;
}
/**
@@ -286,7 +248,7 @@ bool ice_is_e825c(struct ice_hw *hw)
*/
int ice_clear_pf_cfg(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
@@ -314,12 +276,12 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
u16 flags;
u8 i;
- cmd = &desc.params.mac_read;
+ cmd = libie_aq_raw(&desc);
if (buf_size < sizeof(*resp))
return -EINVAL;
@@ -368,12 +330,12 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
{
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
const char *prefix;
struct ice_hw *hw;
int status;
- cmd = &desc.params.get_phy;
+ cmd = libie_aq_raw(&desc);
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
return -EINVAL;
@@ -462,9 +424,9 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.get_link_topo;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
@@ -492,19 +454,20 @@ int
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_get_link_topo *resp;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
+ resp = libie_aq_raw(&desc);
+ *resp = *cmd;
if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
return -EINTR;
if (node_handle)
- *node_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *node_handle = le16_to_cpu(resp->addr.handle);
if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
+ *node_part_number = resp->node_part_num;
return 0;
}
@@ -512,7 +475,8 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
/**
* ice_find_netlist_node
* @hw: pointer to the hw struct
- * @node_type_ctx: type of netlist node to look for
+ * @node_type: type of netlist node to look for
+ * @ctx: context of the search
* @node_part_number: node part number to look for
* @node_handle: output parameter if node found - optional
*
@@ -522,10 +486,12 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
* valid if the function returns zero, and should be ignored on any non-zero
* return value.
*
- * Returns: 0 if the node is found, -ENOENT if no handle was found, and
- * a negative error code on failure to access the AQ.
+ * Return:
+ * * 0 if the node is found,
+ * * -ENOENT if no handle was found,
+ * * negative error code on failure to access the AQ.
*/
-static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
u8 node_part_number, u16 *node_handle)
{
u8 idx;
@@ -536,8 +502,8 @@ static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
int status;
cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
- node_type_ctx);
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx);
cmd.addr.topo_params.index = idx;
status = ice_aq_get_netlist_node(hw, &cmd,
@@ -724,8 +690,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
- struct ice_aq_desc desc;
struct ice_hw *hw;
u16 cmd_flags;
int status;
@@ -740,7 +706,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
- resp = &desc.params.get_link_status;
+ resp = libie_aq_raw(&desc);
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
@@ -869,9 +835,9 @@ int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_mac_cfg;
+ cmd = libie_aq_raw(&desc);
if (max_frame_size == 0)
return -EINVAL;
@@ -904,6 +870,9 @@ static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0;
+ /* Initialize recipe count with default recipes read from NVM */
+ sw->recp_cnt = ICE_SW_LKUP_LAST;
+
status = ice_init_def_sw_recp(hw);
if (status) {
devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
@@ -931,14 +900,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
}
recps = sw->recp_list;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
- struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
-
recps[i].root_rid = i;
- list_for_each_entry_safe(rg_entry, tmprg_entry,
- &recps[i].rg_list, l_entry) {
- list_del(&rg_entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), rg_entry);
- }
if (recps[i].adv_rule) {
struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
@@ -963,7 +925,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), lst_itr);
}
}
- devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -997,6 +958,64 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout.
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ice_hw *hw = priv;
+
+ return ice_aq_send_cmd(hw, desc, buf, size, NULL);
+}
+
+static int __fwlog_init(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = pf->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .priv = hw,
+ };
+ int err;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ err = ice_debugfs_pf_init(pf);
+ if (err)
+ return err;
+
+ api.debugfs_root = pf->ice_debugfs_pf;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -1024,7 +1043,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- status = ice_fwlog_init(hw);
+ status = __fwlog_init(hw);
if (status)
ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
status);
@@ -1056,6 +1075,7 @@ int ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
+ hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED;
/* set the back pointer to HW */
hw->port_info->hw = hw;
@@ -1141,9 +1161,27 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
+
+ ice_init_dev_hw(hw->back);
+
mutex_init(&hw->tnl_lock);
- return 0;
+ ice_init_chk_recipe_reuse_support(hw);
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ status = ice_wait_for_fw(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
+ goto err_unroll_fltr_mgmt_struct;
+ }
+ }
+
+ hw->lane_num = ice_get_phy_lane_number(hw);
+
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
err_unroll_sched:
@@ -1155,6 +1193,16 @@ err_unroll_cqinit:
return status;
}
+static void __fwlog_deinit(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ ice_debugfs_pf_deinit(hw->back);
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
/**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure
@@ -1173,8 +1221,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_seg(hw);
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
-
- ice_fwlog_deinit(hw);
+ __fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -1329,39 +1376,51 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_copy_rxq_ctx_to_hw
+ * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
- *
- * Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+ const ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
{
- u8 i;
+ /* Copy each dword separately to HW */
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)rxq_ctx)[i];
- if (!ice_rxq_ctx)
- return -EINVAL;
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
- if (rxq_index > QRX_CTRL_MAX_INDEX)
- return -EINVAL;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
+ }
+}
- /* Copy each dword separately to HW */
- for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+/**
+ * ice_copy_rxq_ctx_from_hw - Copy packed Rx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @rxq_ctx: pointer to the packed Rx queue context
+ * @rxq_index: the index of the Rx queue
+ */
+static void ice_copy_rxq_ctx_from_hw(struct ice_hw *hw,
+ ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
+{
+ u32 *ctx = (u32 *)rxq_ctx;
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
- }
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
- return 0;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
+ }
}
+#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
+ PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
+
/* LAN Rx Queue Context */
-static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
- /* Field Width LSB */
+static const struct packed_field_u8 ice_rlan_ctx_fields[] = {
+ /* Field Width LSB */
ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
@@ -1382,35 +1441,90 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
- { 0 }
};
/**
- * ice_write_rxq_ctx
+ * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
+ * @ctx: the Rx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
+ ice_rxq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_unpack_rxq_ctx - Unpack Rx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Rx queue context to unpack
+ *
+ * Unpack the Rx queue context from the HW buffer into the CPU-friendly
+ * structure.
+ */
+static void ice_unpack_rxq_ctx(const ice_rxq_ctx_buf_t *buf,
+ struct ice_rlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
- * @rlan_ctx: pointer to the rxq context
+ * @rlan_ctx: pointer to the unpacked Rx queue context
* @rxq_index: the index of the Rx queue
*
- * Converts rxq context from sparse to dense structure and then writes
- * it to HW register space and enables the hardware to prefetch descriptors
- * instead of only fetching them on demand
+ * Pack the sparse Rx Queue context into dense hardware format and write it
+ * into the HW register space.
+ *
+ * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ ice_rxq_ctx_buf_t buf = {};
- if (!rlan_ctx)
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
- rlan_ctx->prefena = 1;
+ ice_pack_rxq_ctx(rlan_ctx, &buf);
+ ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
- ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ return 0;
+}
+
+/**
+ * ice_read_rxq_ctx - Read Rx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @rlan_ctx: pointer to the Rx queue context
+ * @rxq_index: the index of the Rx queue
+ *
+ * Read the Rx queue context from the hardware registers, and unpack it into
+ * the sparse Rx queue context structure.
+ *
+ * Returns: 0 on success, or -EINVAL if the Rx queue index is invalid.
+ */
+int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
+{
+ ice_rxq_ctx_buf_t buf = {};
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return -EINVAL;
+
+ ice_copy_rxq_ctx_from_hw(hw, &buf, rxq_index);
+ ice_unpack_rxq_ctx(&buf, rlan_ctx);
+
+ return 0;
}
/* LAN Tx Queue Context */
-const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
@@ -1439,10 +1553,227 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
- { 0 }
};
+/**
+ * ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the Admin Queue HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed Admin Queue layout.
+ */
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout, including the internal data portion.
+ */
+static void ice_pack_txq_ctx_full(const struct ice_tlan_ctx *ctx,
+ ice_txq_ctx_buf_full_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Tx queue context to unpack
+ *
+ * Unpack the Tx queue context from the HW buffer (including the full internal
+ * state) into the CPU-friendly structure.
+ */
+static void ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf,
+ struct ice_tlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ *
+ * Copy Tx Queue context from HW register space to dense structure
+ */
+static void ice_copy_txq_ctx_from_hw(struct ice_hw *hw,
+ ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 *ctx = (u32 *)txq_ctx;
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_READ) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, GLCOMM_QTX_CNTX_DATA(i));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, *ctx);
+ }
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_copy_txq_ctx_to_hw - Copy Tx Queue context into HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ */
+static void ice_copy_txq_ctx_to_hw(struct ice_hw *hw,
+ const ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ /* Copy each dword separately to HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)txq_ctx)[i];
+
+ wr32(hw, GLCOMM_QTX_CNTX_DATA(i), ctx);
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, ctx);
+ }
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_read_txq_ctx - Read Tx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Read the Tx queue context from the HW registers, then unpack it into the
+ * ice_tlan_ctx structure for use.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_copy_txq_ctx_from_hw(hw, &buf, txq_index);
+ ice_unpack_txq_ctx_full(&buf, tlan_ctx);
+
+ return 0;
+}
+
+/**
+ * ice_write_txq_ctx - Write Tx queue context to HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Pack the Tx queue context into the dense HW layout, then write it into the
+ * HW registers.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_pack_txq_ctx_full(tlan_ctx, &buf);
+ ice_copy_txq_ctx_to_hw(hw, &buf, txq_index);
+
+ return 0;
+}
+
+/* Tx time Queue Context */
+static const struct packed_field_u8 ice_txtime_ctx_fields[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_txtime_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_txtime_ctx, pf_num, 3, 57),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_num, 10, 60),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_type, 2, 70),
+ ICE_CTX_STORE(ice_txtime_ctx, src_vsi, 10, 72),
+ ICE_CTX_STORE(ice_txtime_ctx, cpuid, 8, 82),
+ ICE_CTX_STORE(ice_txtime_ctx, tphrd_desc, 1, 90),
+ ICE_CTX_STORE(ice_txtime_ctx, qlen, 13, 91),
+ ICE_CTX_STORE(ice_txtime_ctx, timer_num, 1, 104),
+ ICE_CTX_STORE(ice_txtime_ctx, txtime_ena_q, 1, 105),
+ ICE_CTX_STORE(ice_txtime_ctx, drbell_mode_32, 1, 106),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_res, 4, 107),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_round_type, 2, 111),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_pacing_slot, 3, 113),
+ ICE_CTX_STORE(ice_txtime_ctx, merging_ena, 1, 116),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_prof_id, 4, 117),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_cache_line_aln_thld, 4, 121),
+ ICE_CTX_STORE(ice_txtime_ctx, tx_pipe_delay_mode, 1, 125),
+};
+
+/**
+ * ice_pack_txtime_ctx - pack Tx time queue context into a HW buffer
+ * @ctx: the Tx time queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx time queue context from the CPU-friendly unpacked buffer into
+ * its bit-packed HW layout.
+ */
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_txtime_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -1458,15 +1789,16 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
return ice_sq_send_cmd(hw, ice_get_sbq(hw),
- (struct ice_aq_desc *)desc, buf, buf_size, cd);
+ (struct libie_aq_desc *)desc, buf, buf_size, cd);
}
/**
* ice_sbq_rw_reg - Fill Sideband Queue command
* @hw: pointer to the HW struct
* @in: message info to be filled in descriptor
+ * @flags: control queue descriptor flags
*/
-int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
{
struct ice_sbq_cmd_desc desc = {0};
struct ice_sbq_msg_req msg = {0};
@@ -1490,7 +1822,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
*/
msg_len -= sizeof(msg.data);
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags = cpu_to_le16(flags);
desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
desc.param0.cmd_len = cpu_to_le16(msg_len);
status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
@@ -1542,10 +1874,10 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
*/
static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc_cpy;
+ struct libie_aq_desc desc_cpy;
bool is_cmd_for_retry;
u8 idx = 0;
u16 opcode;
@@ -1566,7 +1898,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
if (!is_cmd_for_retry || !status ||
- hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
+ hw->adminq.sq_last_status != LIBIE_AQ_RC_EBUSY)
break;
memcpy(desc, &desc_cpy, sizeof(desc_cpy));
@@ -1589,10 +1921,10 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
int
-ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
+ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd = &desc->params.res_owner;
+ struct libie_aqc_req_res *cmd = libie_aq_raw(desc);
bool lock_acquired = false;
int status;
@@ -1615,13 +1947,15 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_set_port_params:
case ice_aqc_opc_get_vlan_mode_parameters:
case ice_aqc_opc_set_vlan_mode_parameters:
+ case ice_aqc_opc_set_tx_topo:
+ case ice_aqc_opc_get_tx_topo:
case ice_aqc_opc_add_recipe:
case ice_aqc_opc_recipe_to_profile:
case ice_aqc_opc_get_recipe:
case ice_aqc_opc_get_recipe_to_profile:
break;
case ice_aqc_opc_release_res:
- if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
+ if (le16_to_cpu(cmd->res_id) == LIBIE_AQC_RES_ID_GLBL_LOCK)
break;
fallthrough;
default:
@@ -1646,8 +1980,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*/
int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
- struct ice_aqc_get_ver *resp;
- struct ice_aq_desc desc;
+ struct libie_aqc_get_ver *resp;
+ struct libie_aq_desc desc;
int status;
resp = &desc.params.get_ver;
@@ -1683,8 +2017,8 @@ int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
- struct ice_aqc_driver_ver *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_driver_ver *cmd;
+ struct libie_aq_desc desc;
u16 len;
cmd = &desc.params.driver_ver;
@@ -1694,7 +2028,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->major_ver = dv->major_ver;
cmd->minor_ver = dv->minor_ver;
cmd->build_ver = dv->build_ver;
@@ -1719,9 +2053,9 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.q_shutdown;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
@@ -1762,8 +2096,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd_resp;
- struct ice_aq_desc desc;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int status;
cmd_resp = &desc.params.res_owner;
@@ -1785,20 +2119,20 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* Global config lock response utilizes an additional status field.
*
* If the Global config lock resource is held by some other driver, the
- * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * command completes with LIBIE_AQ_RES_GLBL_IN_PROG in the status field
* and the timeout field indicates the maximum time the current owner
* of the resource has to free it.
*/
if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
- if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ if (le16_to_cpu(cmd_resp->status) == LIBIE_AQ_RES_GLBL_SUCCESS) {
*timeout = le32_to_cpu(cmd_resp->timeout);
return 0;
} else if (le16_to_cpu(cmd_resp->status) ==
- ICE_AQ_RES_GLBL_IN_PROG) {
+ LIBIE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout);
return -EIO;
} else if (le16_to_cpu(cmd_resp->status) ==
- ICE_AQ_RES_GLBL_DONE) {
+ LIBIE_AQ_RES_GLBL_DONE) {
return -EALREADY;
}
@@ -1811,7 +2145,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
* with a busy return value and the timeout field indicates the maximum
* time the current owner of the resource has to free it.
*/
- if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
+ if (!status || hw->adminq.sq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
@@ -1830,8 +2164,8 @@ static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
cmd = &desc.params.res_owner;
@@ -1940,16 +2274,16 @@ int ice_aq_alloc_free_res(struct ice_hw *hw,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_cmd *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.sw_res_ctrl;
+ cmd = libie_aq_raw(&desc);
if (!buf || buf_size < flex_array_size(buf, elem, 1))
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_entries = cpu_to_le16(1);
@@ -2063,7 +2397,7 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
*/
static bool
ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
- struct ice_aqc_list_caps_elem *elem, const char *prefix)
+ struct libie_aqc_list_caps_elem *elem, const char *prefix)
{
u32 logical_id = le32_to_cpu(elem->logical_id);
u32 phys_id = le32_to_cpu(elem->phys_id);
@@ -2072,17 +2406,17 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
bool found = true;
switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
caps->valid_functions);
break;
- case ICE_AQC_CAPS_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
caps->sr_iov_1_1);
break;
- case ICE_AQC_CAPS_DCB:
+ case LIBIE_AQC_CAPS_DCB:
caps->dcb = (number == 1);
caps->active_tc_bitmap = logical_id;
caps->maxtc = phys_id;
@@ -2091,7 +2425,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->active_tc_bitmap);
ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
break;
- case ICE_AQC_CAPS_RSS:
+ case LIBIE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
@@ -2099,7 +2433,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
caps->rss_table_entry_width);
break;
- case ICE_AQC_CAPS_RXQS:
+ case LIBIE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
@@ -2107,7 +2441,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
caps->rxq_first_id);
break;
- case ICE_AQC_CAPS_TXQS:
+ case LIBIE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
@@ -2115,7 +2449,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
caps->txq_first_id);
break;
- case ICE_AQC_CAPS_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
@@ -2123,53 +2457,60 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
- case ICE_AQC_CAPS_PENDING_NVM_VER:
+ case LIBIE_AQC_CAPS_PENDING_NVM_VER:
caps->nvm_update_pending_nvm = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
break;
- case ICE_AQC_CAPS_PENDING_OROM_VER:
+ case LIBIE_AQC_CAPS_PENDING_OROM_VER:
caps->nvm_update_pending_orom = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
break;
- case ICE_AQC_CAPS_PENDING_NET_VER:
+ case LIBIE_AQC_CAPS_PENDING_NET_VER:
caps->nvm_update_pending_netlist = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
break;
- case ICE_AQC_CAPS_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
caps->nvm_unified_update =
(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
true : false;
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
break;
- case ICE_AQC_CAPS_RDMA:
- caps->rdma = (number == 1);
+ case LIBIE_AQC_CAPS_RDMA:
+ if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
+ caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
- case ICE_AQC_CAPS_MAX_MTU:
+ case LIBIE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
- case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ case LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
caps->pcie_reset_avoidance = (number > 0);
ice_debug(hw, ICE_DBG_INIT,
"%s: pcie_reset_avoidance = %d\n", prefix,
caps->pcie_reset_avoidance);
break;
- case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ case LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
caps->reset_restrict_support = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
- case ICE_AQC_CAPS_FW_LAG_SUPPORT:
- caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+ case LIBIE_AQC_CAPS_FW_LAG_SUPPORT:
+ caps->roce_lag = number & LIBIE_AQC_BIT_ROCEV2_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
- caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+ caps->sriov_lag = number & LIBIE_AQC_BIT_SRIOV_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
+ caps->sriov_aa_lag = number & LIBIE_AQC_BIT_SRIOV_AA_LAG;
+ ice_debug(hw, ICE_DBG_INIT, "%s: sriov_aa_lag = %u\n",
+ prefix, caps->sriov_aa_lag);
+ break;
+ case LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ caps->tx_sched_topo_comp_mode_en = (number == 1);
break;
default:
/* Not one of the recognized common capabilities */
@@ -2222,7 +2563,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
*/
static void
ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 logical_id = le32_to_cpu(cap->logical_id);
u32 number = le32_to_cpu(cap->number);
@@ -2245,7 +2586,7 @@ ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
@@ -2264,7 +2605,7 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
struct ice_ts_func_info *info = &func_p->ts_func_info;
u32 number = le32_to_cpu(cap->number);
@@ -2277,11 +2618,16 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
- info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
+ info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
+ info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ } else {
+ info->clk_freq = ICE_TSPLL_FREQ_156_250;
+ info->clk_src = ICE_CLK_SRC_TIME_REF;
+ }
- if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
- info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ if (info->clk_freq < NUM_ICE_TSPLL_FREQ) {
+ info->time_ref = (enum ice_tspll_freq)info->clk_freq;
} else {
/* Unknown clock frequency, so assume a (probably incorrect)
* default to avoid out-of-bounds look ups of frequency
@@ -2289,7 +2635,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
info->clk_freq);
- info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ info->time_ref = ICE_TSPLL_FREQ_25_000;
}
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
@@ -2358,7 +2704,7 @@ static void
ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
void *buf, u32 cap_count)
{
- struct ice_aqc_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
cap_resp = buf;
@@ -2373,16 +2719,16 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
&cap_resp[i], "func caps");
switch (cap) {
- case ICE_AQC_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_1588:
+ case LIBIE_AQC_CAPS_1588:
ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ice_parse_fdir_func_caps(hw, func_p);
break;
default:
@@ -2398,6 +2744,25 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
}
/**
+ * ice_func_id_to_logical_id - map from function id to logical pf id
+ * @active_function_bitmap: active function bitmap
+ * @pf_id: function number of device
+ *
+ * Return: logical PF ID.
+ */
+static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
+{
+ u8 logical_id = 0;
+ u8 i;
+
+ for (i = 0; i < pf_id; i++)
+ if (active_function_bitmap & BIT(i))
+ logical_id++;
+
+ return logical_id;
+}
+
+/**
* ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2407,13 +2772,15 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
dev_p->num_funcs = hweight32(number);
ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
dev_p->num_funcs);
+
+ hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id);
}
/**
@@ -2426,7 +2793,7 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2445,7 +2812,7 @@ ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2464,7 +2831,7 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
u32 logical_id = le32_to_cpu(cap->logical_id);
@@ -2484,6 +2851,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
+ info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2506,6 +2874,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
info->ts_ll_int_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n",
+ info->ll_phy_tmr_update);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2522,7 +2892,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2542,7 +2912,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->supported_sensors = le32_to_cpu(cap->number);
@@ -2552,6 +2922,34 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
+ */
+static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
+ struct ice_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->nac_topo.mode = le32_to_cpu(cap->number);
+ dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
+
+ dev_info(ice_hw_to_dev(hw),
+ "PF is configured in %s mode with IP instance ID %d\n",
+ (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
+ "primary" : "secondary", dev_p->nac_topo.id);
+
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
+ dev_p->nac_topo.id);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2569,7 +2967,7 @@ static void
ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
void *buf, u32 cap_count)
{
- struct ice_aqc_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
cap_resp = buf;
@@ -2584,24 +2982,27 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
&cap_resp[i], "dev caps");
switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_1588:
+ case LIBIE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_SENSOR_READING:
+ case LIBIE_AQC_CAPS_SENSOR_READING:
ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
break;
+ case LIBIE_AQC_CAPS_NAC_TOPOLOGY:
+ ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -2615,40 +3016,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- */
-bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_is_phy_rclk_in_netlist
* @hw: pointer to the hw struct
*
@@ -2656,9 +3023,11 @@ bool ice_is_pf_c827(struct ice_hw *hw)
*/
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
{
- if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
- ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
return false;
@@ -2674,6 +3043,7 @@ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
NULL))
return false;
@@ -2694,12 +3064,14 @@ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
bool ice_is_cgu_in_netlist(struct ice_hw *hw)
{
if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
return true;
} else if (!ice_find_netlist_node(hw,
ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
@@ -2718,6 +3090,7 @@ bool ice_is_cgu_in_netlist(struct ice_hw *hw)
bool ice_is_gps_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
return false;
@@ -2747,8 +3120,8 @@ int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
- struct ice_aqc_list_caps *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int status;
cmd = &desc.params.get_cap;
@@ -2789,7 +3162,7 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* device will return, we can simply send a 4KB buffer, the maximum
* possible size that firmware can return.
*/
- cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
ice_aqc_opc_list_dev_caps, NULL);
@@ -2823,7 +3196,7 @@ ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
* device will return, we can simply send a 4KB buffer, the maximum
* possible size that firmware can return.
*/
- cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
ice_aqc_opc_list_func_caps, NULL);
@@ -2932,9 +3305,9 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_write *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.mac_write;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags;
@@ -2951,10 +3324,12 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*/
static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_clear_pxe *cmd;
+ struct libie_aq_desc desc;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
- desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
+ cmd->rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -2987,16 +3362,19 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
{
struct ice_aqc_set_port_params *cmd;
struct ice_hw *hw = pi->hw;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 cmd_flags = 0;
- cmd = &desc.params.set_port_params;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
if (double_vlan)
cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
cmd->cmd_flags = cpu_to_le16(cmd_flags);
+ cmd->local_fwd_mode = pi->local_fwd_mode |
+ ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID;
+
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -3014,6 +3392,7 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E825C_SGMII:
return true;
default:
return false;
@@ -3030,11 +3409,13 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
- * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
- * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ *
+ * Return:
+ * * PHY speed for recognized PHY type
+ * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
+u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
{
u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
@@ -3135,6 +3516,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
case ICE_PHY_TYPE_HIGH_100G_AUI2:
speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
break;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
+ break;
default:
speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
@@ -3212,7 +3603,8 @@ int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_set_phy_cfg *cmd;
+ struct libie_aq_desc desc;
int status;
if (!cfg)
@@ -3227,8 +3619,9 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- desc.params.set_phy.lport_num = pi->lport;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ cmd->lport_num = pi->lport;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
@@ -3244,7 +3637,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
cfg->link_fec_opt);
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
status = 0;
if (!status)
@@ -3286,6 +3679,100 @@ int ice_update_link_info(struct ice_port_info *pi)
}
/**
+ * ice_aq_get_phy_equalization - function to read serdes equaliser
+ * value from firmware using admin queue command.
+ * @hw: pointer to the HW struct
+ * @data_in: represents the serdes equalization parameter requested
+ * @op_code: represents the serdes number and flag to represent tx or rx
+ * @serdes_num: represents the serdes number
+ * @output: pointer to the caller-supplied buffer to return serdes equaliser
+ *
+ * Return: non-zero status on error and 0 on success.
+ */
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output)
+{
+ struct ice_aqc_dnl_call_command *cmd;
+ struct ice_aqc_dnl_call buf = {};
+ struct libie_aq_desc desc;
+ int err;
+
+ buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
+ buf.sto.txrx_equa_reqs.op_code_serdes_sel =
+ cpu_to_le16(op_code | (serdes_num & 0xF));
+ cmd = libie_aq_raw(&desc);
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD |
+ LIBIE_AQ_FLAG_SI);
+ desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
+ cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
+
+ err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call),
+ NULL);
+ *output = err ? 0 : buf.sto.txrx_equa_resp.val;
+
+ return err;
+}
+
+#define FEC_REG_PORT(port) { \
+ FEC_CORR_LOW_REG_PORT##port, \
+ FEC_CORR_HIGH_REG_PORT##port, \
+ FEC_UNCORR_LOW_REG_PORT##port, \
+ FEC_UNCORR_HIGH_REG_PORT##port, \
+}
+
+static const u32 fec_reg[][ICE_FEC_MAX] = {
+ FEC_REG_PORT(0),
+ FEC_REG_PORT(1),
+ FEC_REG_PORT(2),
+ FEC_REG_PORT(3)
+};
+
+/**
+ * ice_aq_get_fec_stats - reads fec stats from phy
+ * @hw: pointer to the HW struct
+ * @pcs_quad: represents pcsquad of user input serdes
+ * @pcs_port: represents the pcs port number part of above pcs quad
+ * @fec_type: represents FEC stats type
+ * @output: pointer to the caller-supplied buffer to return requested fec stats
+ *
+ * Return: non-zero status on error and 0 on success.
+ */
+int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ enum ice_fec_stats_types fec_type, u32 *output)
+{
+ u16 flag = (LIBIE_AQ_FLAG_RD | LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_SI);
+ struct ice_sbq_msg_input msg = {};
+ u32 receiver_id, reg_offset;
+ int err;
+
+ if (pcs_port > 3)
+ return -EINVAL;
+
+ reg_offset = fec_reg[pcs_port][fec_type];
+
+ if (pcs_quad == 0)
+ receiver_id = FEC_RECEIVER_ID_PCS0;
+ else if (pcs_quad == 1)
+ receiver_id = FEC_RECEIVER_ID_PCS1;
+ else
+ return -EINVAL;
+
+ msg.msg_addr_low = lower_16_bits(reg_offset);
+ msg.msg_addr_high = receiver_id;
+ msg.opcode = ice_sbq_msg_rd;
+ msg.dest_dev = ice_sbq_dev_phy_0;
+
+ err = ice_sbq_rw_reg(hw, &msg, flag);
+ if (err)
+ return err;
+
+ *output = msg.data;
+ return 0;
+}
+
+/**
* ice_cache_phy_user_req
* @pi: port information structure
* @cache_data: PHY logging data
@@ -3678,9 +4165,9 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
struct ice_aqc_restart_an *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.restart_an;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
@@ -3708,9 +4195,9 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_event_mask *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_event_mask;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
@@ -3732,9 +4219,9 @@ int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_mac_lb;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
if (ena_lpbk)
@@ -3757,9 +4244,9 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
{
struct ice_aqc_set_port_id_led *cmd;
struct ice_hw *hw = pi->hw;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_port_id_led;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
@@ -3795,7 +4282,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
u8 *pending_option_idx, bool *pending_option_valid)
{
struct ice_aqc_get_port_options *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
u8 i;
@@ -3803,7 +4290,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
return -EINVAL;
- cmd = &desc.params.get_port_options;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
if (lport_valid)
@@ -3869,12 +4356,12 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option)
{
struct ice_aqc_set_port_option *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
return -EINVAL;
- cmd = &desc.params.set_port_option;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
if (lport_valid)
@@ -3887,6 +4374,68 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ /* E82X does not have sequential IDs, lane number is PF ID.
+ * For E825 device, the exception is the variant with external
+ * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
+ * mapping.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->device_id == ICE_DEV_ID_E825C_SGMII)
+ return hw->pf_id;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_40G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ ice_is_dual(hw) && !ice_is_primary(hw))
+ lane += ICE_PORTS_PER_QUAD;
+ kfree(options);
+ return lane;
+ }
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -3907,7 +4456,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i2c_bus_addr;
int status;
@@ -3915,8 +4464,8 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
- cmd = &desc.params.read_write_sff_param;
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
@@ -3976,7 +4525,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw,
struct ice_aqc_get_set_rss_lut *desc_params;
enum ice_aqc_lut_flags flags;
enum ice_lut_size lut_size;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u8 *lut = params->lut;
@@ -3992,9 +4541,9 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw,
opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
if (set)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
- desc_params = &desc.params.get_set_rss_lut;
+ desc_params = libie_aq_raw(&desc);
vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
@@ -4049,16 +4598,16 @@ __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
{
struct ice_aqc_get_set_rss_key *desc_params;
u16 key_size = sizeof(*key);
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
}
- desc_params = &desc.params.get_set_rss_key;
+ desc_params = libie_aq_raw(&desc);
desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
@@ -4130,10 +4679,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_add_tx_qgrp *list;
struct ice_aqc_add_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i, sum_size = 0;
- cmd = &desc.params.add_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
@@ -4152,7 +4701,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sum_size)
return -EINVAL;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_qgrps = num_qgrps;
@@ -4179,12 +4728,12 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 vmvf_and_timeout;
u16 i, sz = 0;
int status;
- cmd = &desc.params.dis_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
/* qg_list can be NULL only in VM/VF reset flow */
@@ -4225,7 +4774,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
/* set RD bit to indicate that command buffer is provided by the driver
* and it needs to be read by the firmware
*/
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
for (i = 0, item = qg_list; i < num_qgrps; i++) {
u16 item_size = struct_size(item, q_id, item->num_qs);
@@ -4257,40 +4806,42 @@ do_aq:
}
/**
- * ice_aq_cfg_lan_txq
+ * ice_aq_cfg_lan_txq - send AQ command 0x0C32 to FW
* @hw: pointer to the hardware structure
* @buf: buffer for command
* @buf_size: size of buffer in bytes
* @num_qs: number of queues being configured
* @oldport: origination lport
* @newport: destination lport
+ * @mode: cmd_type for move to use
* @cd: pointer to command details structure or NULL
*
* Move/Configure LAN Tx queue (0x0C32)
*
- * There is a better AQ command to use for moving nodes, so only coding
- * this one for configuring the node.
+ * Return: Zero on success, associated error code on failure.
*/
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd)
+ u8 mode, struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.cfg_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (!buf)
return -EINVAL;
- cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
+ cmd->cmd_type = mode;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
+ cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_MODE_M,
+ ICE_AQC_Q_CFG_MODE_KEEP_OWN);
cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
cmd->blocked_cgds = 0;
@@ -4318,10 +4869,10 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
{
struct ice_aqc_add_rdma_qset_data *list;
struct ice_aqc_add_rdma_qset *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i, sum_size = 0;
- cmd = &desc.params.add_rdma_qset;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
@@ -4339,213 +4890,54 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
if (buf_size != sum_size)
return -EINVAL;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_qset_grps = num_qset_grps;
return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
-/* End of FW Admin Queue command wrappers */
-
-/**
- * ice_pack_ctx_byte - write a byte to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u8 src_byte, dest_byte, mask;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- src_byte = *from;
- src_byte <<= shift_width;
- src_byte &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_byte, dest, sizeof(dest_byte));
-
- dest_byte &= ~mask; /* get the bits not changing */
- dest_byte |= src_byte; /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_byte, sizeof(dest_byte));
-}
-
-/**
- * ice_pack_ctx_word - write a word to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u16 src_word, mask;
- __le16 dest_word;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_word = *(u16 *)from;
- src_word <<= shift_width;
- src_word &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_word, dest, sizeof(dest_word));
-
- dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
- dest_word |= cpu_to_le16(src_word); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_word, sizeof(dest_word));
-}
-
-/**
- * ice_pack_ctx_dword - write a dword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u32 src_dword, mask;
- __le32 dest_dword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_dword = *(u32 *)from;
- src_dword <<= shift_width;
- src_dword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_dword, dest, sizeof(dest_dword));
-
- dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
- dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_dword, sizeof(dest_dword));
-}
-
/**
- * ice_pack_ctx_qword - write a qword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
+ * ice_aq_set_txtimeq - set Tx time queues
+ * @hw: pointer to the hardware structure
+ * @txtimeq: first Tx time queue id to configure
+ * @q_count: number of queues to configure
+ * @txtime_qg: queue group to be set
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Tx Time queue (0x0C35)
+ * Return: 0 on success or negative value on failure.
*/
-static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg, u16 buf_size,
+ struct ice_sq_cd *cd)
{
- u64 src_qword, mask;
- __le64 dest_qword;
- u8 *from, *dest;
- u16 shift_width;
+ struct ice_aqc_set_txtimeqs *cmd;
+ struct libie_aq_desc desc;
+ u16 size;
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
+ if (!txtime_qg || txtimeq > ICE_TXTIME_MAX_QUEUE ||
+ q_count < 1 || q_count > ICE_SET_TXTIME_MAX_Q_AMOUNT)
+ return -EINVAL;
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_qword = *(u64 *)from;
- src_qword <<= shift_width;
- src_qword &= mask;
+ size = struct_size(txtime_qg, txtimeqs, q_count);
+ if (buf_size != size)
+ return -EINVAL;
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
+ cmd = libie_aq_raw(&desc);
- memcpy(&dest_qword, dest, sizeof(dest_qword));
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_txtimeqs);
- dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
- dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
- /* put it all back */
- memcpy(dest, &dest_qword, sizeof(dest_qword));
+ cmd->q_id = cpu_to_le16(txtimeq);
+ cmd->q_amount = cpu_to_le16(q_count);
+ return ice_aq_send_cmd(hw, &desc, txtime_qg, buf_size, cd);
}
-/**
- * ice_set_ctx - set context bits in packed structure
- * @hw: pointer to the hardware structure
- * @src_ctx: pointer to a generic non-packed context structure
- * @dest_ctx: pointer to memory for the packed structure
- * @ce_info: List of Rx context elements
- */
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- int f;
-
- for (f = 0; ce_info[f].width; f++) {
- /* We have to deal with each element of the FW response
- * using the correct size so that we are correct regardless
- * of the endianness of the machine.
- */
- if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
- ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
- f, ce_info[f].width, ce_info[f].size_of);
- continue;
- }
- switch (ce_info[f].size_of) {
- case sizeof(u8):
- ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u16):
- ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u32):
- ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u64):
- ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
+/* End of FW Admin Queue command wrappers */
/**
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
@@ -4956,6 +5348,32 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
/**
+ * ice_aq_get_cgu_input_pin_measure - get input pin signal measurements
+ * @hw: pointer to the HW struct
+ * @dpll_idx: index of dpll to be measured
+ * @meas: array to be filled with results
+ * @meas_num: max number of results array can hold
+ *
+ * Get CGU measurements (0x0C59) of phase and frequency offsets for input
+ * pins on given dpll.
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num)
+{
+ struct ice_aqc_get_cgu_input_measure *cmd;
+ struct libie_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_measure);
+ cmd = libie_aq_raw(&desc);
+ cmd->dpll_idx_opt = dpll_idx & ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M;
+
+ return ice_aq_send_cmd(hw, &desc, meas, meas_num * sizeof(*meas), NULL);
+}
+
+/**
* ice_aq_get_cgu_abilities - get cgu abilities
* @hw: pointer to the HW struct
* @abilities: CGU abilities
@@ -4967,7 +5385,7 @@ int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
@@ -4990,10 +5408,10 @@ ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
u32 freq, s32 phase_delay)
{
struct ice_aqc_set_cgu_input_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
- cmd = &desc.params.set_cgu_input_config;
+ cmd = libie_aq_raw(&desc);
cmd->input_idx = input_idx;
cmd->flags1 = flags1;
cmd->flags2 = flags2;
@@ -5022,11 +5440,11 @@ ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
{
struct ice_aqc_get_cgu_input_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int ret;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
- cmd = &desc.params.get_cgu_input_config;
+ cmd = libie_aq_raw(&desc);
cmd->input_idx = input_idx;
ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5065,10 +5483,10 @@ ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
u8 src_sel, u32 freq, s32 phase_delay)
{
struct ice_aqc_set_cgu_output_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
- cmd = &desc.params.set_cgu_output_config;
+ cmd = libie_aq_raw(&desc);
cmd->output_idx = output_idx;
cmd->flags = flags;
cmd->src_sel = src_sel;
@@ -5095,11 +5513,11 @@ ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
u8 *src_sel, u32 *freq, u32 *src_freq)
{
struct ice_aqc_get_cgu_output_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int ret;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
- cmd = &desc.params.get_cgu_output_config;
+ cmd = libie_aq_raw(&desc);
cmd->output_idx = output_idx;
ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5136,11 +5554,11 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
u8 *eec_mode)
{
struct ice_aqc_get_cgu_dpll_status *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
- cmd = &desc.params.get_cgu_dpll_status;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5174,10 +5592,10 @@ ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
u8 config, u8 eec_mode)
{
struct ice_aqc_set_cgu_dpll_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
- cmd = &desc.params.set_cgu_dpll_config;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_state = ref_state;
cmd->config = config;
@@ -5201,10 +5619,10 @@ ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 ref_priority)
{
struct ice_aqc_set_cgu_ref_prio *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
- cmd = &desc.params.set_cgu_ref_prio;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_idx = ref_idx;
cmd->ref_priority = ref_priority;
@@ -5227,11 +5645,11 @@ ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 *ref_prio)
{
struct ice_aqc_get_cgu_ref_prio *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
- cmd = &desc.params.get_cgu_ref_prio;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_idx = ref_idx;
@@ -5257,11 +5675,11 @@ ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
u32 *cgu_fw_ver)
{
struct ice_aqc_get_cgu_info *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
- cmd = &desc.params.get_cgu_info;
+ cmd = libie_aq_raw(&desc);
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status) {
@@ -5288,11 +5706,11 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
u32 *freq)
{
struct ice_aqc_set_phy_rec_clk_out *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
- cmd = &desc.params.set_phy_rec_clk_out;
+ cmd = libie_aq_raw(&desc);
cmd->phy_output = phy_output;
cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
@@ -5321,11 +5739,11 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
u8 *flags, u16 *node_handle)
{
struct ice_aqc_get_phy_rec_clk_out *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
- cmd = &desc.params.get_phy_rec_clk_out;
+ cmd = libie_aq_raw(&desc);
cmd->phy_output = *phy_output;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5353,11 +5771,11 @@ int ice_aq_get_sensor_reading(struct ice_hw *hw,
struct ice_aqc_get_sensor_reading_resp *data)
{
struct ice_aqc_get_sensor_reading *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
- cmd = &desc.params.get_sensor_reading;
+ cmd = libie_aq_raw(&desc);
#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
#define ICE_INTERNAL_TEMP_SENSOR 0
cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
@@ -5365,7 +5783,7 @@ int ice_aq_get_sensor_reading(struct ice_hw *hw,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status)
- memcpy(data, &desc.params.get_sensor_reading_resp,
+ memcpy(data, &desc.params.raw,
sizeof(*data));
return status;
@@ -5562,13 +5980,13 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc = { 0 };
+ struct libie_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 data_size;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
- cmd = &desc.params.read_write_i2c;
+ cmd = libie_aq_raw(&desc);
if (!data)
return -EINVAL;
@@ -5585,7 +6003,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_aqc_read_i2c_resp *resp;
u8 i;
- resp = &desc.params.read_i2c_resp;
+ resp = libie_aq_raw(&desc);
for (i = 0; i < data_size; i++) {
*data = resp->i2c_data[i];
data++;
@@ -5617,12 +6035,12 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc = { 0 };
+ struct libie_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 data_size;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
- cmd = &desc.params.read_write_i2c;
+ cmd = libie_aq_raw(&desc);
data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
@@ -5641,6 +6059,95 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_get_pca9575_handle - find and return the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value.
+ *
+ * Return: 0 on success, -ENXIO when there's no PCA9575 present.
+ */
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct libie_aq_desc desc;
+ int err;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -ENXIO;
+
+ /* If handle was not detected read it from the netlist */
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ cmd = libie_aq_raw(&desc);
+ cmd->addr.topo_params.node_type_ctx =
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
+ cmd->addr.topo_params.index = idx;
+
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (err)
+ return -ENXIO;
+
+ /* Verify if we found the right IO expander type */
+ if (cmd->node_part_num != ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -ENXIO;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(cmd->addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_pca9575_reg - read the register from the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
@@ -5654,11 +6161,11 @@ int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd)
{
+ struct libie_aq_desc desc;
struct ice_aqc_gpio *cmd;
- struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
- cmd = &desc.params.read_write_gpio;
+ cmd = libie_aq_raw(&desc);
cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
@@ -5681,12 +6188,12 @@ int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd)
{
+ struct libie_aq_desc desc;
struct ice_aqc_gpio *cmd;
- struct ice_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
- cmd = &desc.params.read_write_gpio;
+ cmd = libie_aq_raw(&desc);
cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
@@ -5823,6 +6330,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
}
/**
+ * ice_is_fw_health_report_supported - checks if firmware supports health events
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if firmware supports health status reports,
+ * false otherwise
+ */
+bool ice_is_fw_health_report_supported(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ,
+ ICE_FW_API_HEALTH_REPORT_MIN,
+ ICE_FW_API_HEALTH_REPORT_PATCH);
+}
+
+/**
+ * ice_aq_set_health_status_cfg - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF. The supported event types are: PF-specific, all PFs, and global.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
+{
+ struct ice_aqc_set_health_status_cfg *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
+
+ cmd->event_source = event_source;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
@@ -5837,16 +6382,16 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_set_mib;
+ cmd = libie_aq_raw(&desc);
if (buf_size == 0 || !buf)
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
- desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
desc.datalen = cpu_to_le16(buf_size);
cmd->type = mib_type;
@@ -5872,16 +6417,22 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
/**
* ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
* @hw: pointer to HW struct
- * @vsi_num: absolute HW index for VSI
+ * @vsi: VSI to add the filter to
* @add: boolean for if adding or removing a filter
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed
+ * with this HW or VSI, otherwise an error corresponding to
+ * the AQ transaction result.
*/
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
+
+ if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
+ return -EOPNOTSUPP;
- cmd = &desc.params.lldp_filter_ctrl;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
@@ -5890,7 +6441,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
else
cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
- cmd->vsi_num = cpu_to_le16(vsi_num);
+ cmd->vsi_num = cpu_to_le16(vsi->vsi_num);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -5901,7 +6452,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
int ice_lldp_execute_pending_mib(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
@@ -5957,3 +6508,86 @@ u32 ice_get_link_speed(u16 index)
return ice_aq_to_link_speed[index];
}
+
+/**
+ * ice_get_dest_cgu - get destination CGU dev for given HW
+ * @hw: pointer to the HW struct
+ *
+ * Get CGU client id for CGU register read/write operations.
+ *
+ * Return: CGU device id to use in SBQ transactions.
+ */
+static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
+{
+ /* On dual complex E825 only complex 0 has functional CGU powering all
+ * the PHYs.
+ * SBQ destination device cgu points to CGU on a current complex and to
+ * access primary CGU from the secondary complex, the driver should use
+ * cgu_peer as a destination device.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
+ !ice_is_primary(hw))
+ return ice_sbq_dev_cgu_peer;
+ return ice_sbq_dev_cgu;
+}
+
+/**
+ * ice_read_cgu_reg - Read a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to read
+ * @val: Storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to read from CGU.
+ */
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = addr
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_cgu_reg - Write a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to write
+ * @val: Value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to write to CGU.
+ */
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = addr,
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index ffb22c7ce28b..e700ac0dc347 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -10,6 +10,7 @@
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
+#include "ice_parser.h"
#include <linux/avf/virtchnl.h>
#include "ice_switch.h"
#include "ice_fdir.h"
@@ -17,13 +18,75 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
+#define FEC_REG_SHIFT 2
+#define FEC_RECV_ID_SHIFT 4
+#define FEC_CORR_LOW_REG_PORT0 (0x02 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT0 (0x03 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT0 (0x04 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT0 (0x05 << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT1 (0x42 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT1 (0x43 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT1 (0x44 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT1 (0x45 << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT2 (0x4A << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT2 (0x4B << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT2 (0x4C << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT2 (0x4D << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT3 (0x52 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT3 (0x53 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT3 (0x54 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT3 (0x55 << FEC_REG_SHIFT)
+#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT)
+#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT)
+
+#define ICE_CGU_R9 0x24
+#define ICE_CGU_R9_TIME_REF_FREQ_SEL GENMASK(2, 0)
+#define ICE_CGU_R9_CLK_EREF0_EN BIT(4)
+#define ICE_CGU_R9_TIME_REF_EN BIT(5)
+#define ICE_CGU_R9_TIME_SYNC_EN BIT(6)
+#define ICE_CGU_R9_ONE_PPS_OUT_EN BIT(7)
+#define ICE_CGU_R9_ONE_PPS_OUT_AMP GENMASK(19, 18)
+
+#define ICE_CGU_R16 0x40
+#define ICE_CGU_R16_TSPLL_CK_REFCLKFREQ GENMASK(31, 24)
+
+#define ICE_CGU_R19 0x4C
+#define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X GENMASK(7, 0)
+#define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 GENMASK(9, 0)
+#define ICE_CGU_R19_TSPLL_NDIVRATIO GENMASK(19, 16)
+
+#define ICE_CGU_R22 0x58
+#define ICE_CGU_R22_TIME1588CLK_DIV GENMASK(23, 20)
+#define ICE_CGU_R22_TIME1588CLK_DIV2 BIT(30)
+
+#define ICE_CGU_R23 0x5C
+#define ICE_CGU_R24 0x60
+#define ICE_CGU_R24_FBDIV_FRAC GENMASK(21, 0)
+#define ICE_CGU_R23_R24_TSPLL_ENABLE BIT(24)
+#define ICE_CGU_R23_R24_REF1588_CK_DIV GENMASK(30, 27)
+#define ICE_CGU_R23_R24_TIME_REF_SEL BIT(31)
+
+#define ICE_CGU_BW_TDC 0x31C
+#define ICE_CGU_BW_TDC_PLLLOCK_SEL GENMASK(30, 29)
+
+#define ICE_CGU_RO_LOCK 0x3F0
+#define ICE_CGU_RO_LOCK_TRUE_LOCK BIT(12)
+#define ICE_CGU_RO_LOCK_UNLOCK BIT(13)
+
+#define ICE_CGU_CNTR_BIST 0x344
+#define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 BIT(15)
+#define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1 BIT(16)
+
+#define ICE_CGU_RO_BWM_LF 0x370
+#define ICE_CGU_RO_BWM_LF_TRUE_LOCK BIT(12)
+
int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
int ice_check_reset(struct ice_hw *hw);
int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
int ice_create_all_ctrlq(struct ice_hw *hw);
int ice_init_all_ctrlq(struct ice_hw *hw);
-void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -46,7 +109,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
int ice_get_caps(struct ice_hw *hw);
@@ -55,6 +118,12 @@ void ice_set_safe_mode_caps(struct ice_hw *hw);
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
+int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index);
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -69,15 +138,14 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
-void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
-extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
+
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
extern struct mutex ice_global_cfg_lock_sw;
int
-ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
+ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
@@ -91,7 +159,6 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-bool ice_is_pf_c827(struct ice_hw *hw);
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
bool ice_is_cgu_in_netlist(struct ice_hw *hw);
@@ -111,7 +178,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
-bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -121,6 +187,13 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+bool ice_is_fw_health_report_supported(struct ice_hw *hw);
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source);
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output);
+int
+ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ enum ice_fec_stats_types fec_type, u32 *output);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
@@ -166,6 +239,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
@@ -196,12 +270,21 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd);
+ u8 mode, struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
-int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg,
+ u16 buf_size, struct ice_sq_cd *cd);
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
@@ -248,9 +331,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-bool ice_is_e810t(struct ice_hw *hw);
-bool ice_is_e823(struct ice_hw *hw);
-bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -261,12 +341,12 @@ int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
bool ice_is_100m_speed_supported(struct ice_hw *hw);
+u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add);
int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
@@ -276,5 +356,9 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val);
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index ffe660f34992..dcb837cadd18 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -90,7 +90,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
+ size_t size = cq->num_sq_entries * sizeof(struct libie_aq_desc);
cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
&cq->sq.desc_buf.pa,
@@ -99,17 +99,6 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return -ENOMEM;
cq->sq.desc_buf.size = size;
- cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
- sizeof(struct ice_sq_cd), GFP_KERNEL);
- if (!cq->sq.cmd_buf) {
- dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
- cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
- cq->sq.desc_buf.va = NULL;
- cq->sq.desc_buf.pa = 0;
- cq->sq.desc_buf.size = 0;
- return -ENOMEM;
- }
-
return 0;
}
@@ -121,7 +110,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
+ size_t size = cq->num_rq_entries * sizeof(struct libie_aq_desc);
cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
&cq->rq.desc_buf.pa,
@@ -170,7 +159,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* allocate the mapped buffers */
for (i = 0; i < cq->num_rq_entries; i++) {
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct ice_dma_mem *bi;
bi = &cq->rq.r.rq_bi[i];
@@ -184,11 +173,11 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* now configure the descriptors for use */
desc = ICE_CTL_Q_DESC(cq->rq, i);
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (cq->rq_buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
- /* This is in accordance with Admin queue design, there is no
+ /* This is in accordance with control queue design, there is no
* register for buffer size configuration
*/
desc->datalen = cpu_to_le16(bi->size);
@@ -338,8 +327,6 @@ do { \
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
} \
- /* free the buffer info list */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
@@ -405,11 +392,11 @@ init_ctrlq_exit:
}
/**
- * ice_init_rq - initialize ARQ
+ * ice_init_rq - initialize receive side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
- * The main initialization routine for the Admin Receive (Event) Queue.
+ * The main initialization routine for Receive side of a control queue.
* Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
@@ -465,7 +452,7 @@ init_ctrlq_exit:
}
/**
- * ice_shutdown_sq - shutdown the Control ATQ
+ * ice_shutdown_sq - shutdown the transmit side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@@ -482,7 +469,7 @@ static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto shutdown_sq_out;
}
- /* Stop firmware AdminQ processing */
+ /* Stop processing of the control queue */
wr32(hw, cq->sq.head, 0);
wr32(hw, cq->sq.tail, 0);
wr32(hw, cq->sq.len, 0);
@@ -501,7 +488,7 @@ shutdown_sq_out:
}
/**
- * ice_aq_ver_check - Check the reported AQ API version.
+ * ice_aq_ver_check - Check the reported AQ API version
* @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
@@ -510,22 +497,31 @@ shutdown_sq_out:
*/
static bool ice_aq_ver_check(struct ice_hw *hw)
{
- if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
+ u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
+
+ if (hw->api_maj_ver > exp_fw_api_ver_major) {
/* Major API version is newer than expected, don't load */
dev_warn(ice_hw_to_dev(hw),
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
- } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
- if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ } else if (hw->api_maj_ver == exp_fw_api_ver_major) {
+ if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
- else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
+ else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
} else {
/* Major API version is older than expected, log a warning */
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
}
return true;
}
@@ -684,10 +680,12 @@ struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks.
*/
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
+ bool unloading)
{
struct ice_ctl_q_info *cq;
@@ -695,7 +693,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
+ ice_aq_q_shutdown(hw, unloading);
break;
case ICE_CTL_Q_SB:
cq = &hw->sbq;
@@ -714,20 +712,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
{
/* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
/* Shutdown PHY Sideband */
if (ice_is_sbq_supported(hw))
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
/* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
/**
@@ -759,7 +758,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@@ -840,7 +839,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
void ice_destroy_all_ctrlq(struct ice_hw *hw)
{
/* shut down all the control queues first */
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
@@ -849,7 +848,7 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
}
/**
- * ice_clean_sq - cleans Admin send queue (ATQ)
+ * ice_clean_sq - cleans send side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@@ -859,21 +858,17 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
- struct ice_sq_cd *details;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
memset(desc, 0, sizeof(*desc));
- memset(details, 0, sizeof(*details));
ntc++;
if (ntc == sq->count)
ntc = 0;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
}
sq->next_to_clean = ntc;
@@ -882,18 +877,43 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
+ * ice_ctl_q_str - Convert control queue type to string
+ * @qtype: the control queue type
+ *
+ * Return: A string name for the given control queue type.
+ */
+static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
+{
+ switch (qtype) {
+ case ICE_CTL_Q_UNKNOWN:
+ return "Unknown CQ";
+ case ICE_CTL_Q_ADMIN:
+ return "AQ";
+ case ICE_CTL_Q_MAILBOX:
+ return "MBXQ";
+ case ICE_CTL_Q_SB:
+ return "SBQ";
+ default:
+ return "Unrecognized CQ";
+ }
+}
+
+/**
* ice_debug_cq
* @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
* @desc: pointer to control queue descriptor
* @buf: pointer to command buffer
* @buf_len: max length of buf
+ * @response: true if this is the writeback response
*
* Dumps debug log about control command with descriptor contents.
*/
-static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ void *desc, void *buf, u16 buf_len, bool response)
{
- struct ice_aq_desc *cq_desc = desc;
- u16 len;
+ struct libie_aq_desc *cq_desc = desc;
+ u16 datalen, flags;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
@@ -902,48 +922,64 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
if (!desc)
return;
- len = le16_to_cpu(cq_desc->datalen);
+ datalen = le16_to_cpu(cq_desc->datalen);
+ flags = le16_to_cpu(cq_desc->flags);
- ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- le16_to_cpu(cq_desc->opcode),
- le16_to_cpu(cq_desc->flags),
- le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1) 0x%08X 0x%08X\n\taddr (h,l) 0x%08X 0x%08X\n",
+ ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
+ le16_to_cpu(cq_desc->opcode), flags, datalen,
+ le16_to_cpu(cq_desc->retval),
le32_to_cpu(cq_desc->cookie_high),
- le32_to_cpu(cq_desc->cookie_low));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->cookie_low),
le32_to_cpu(cq_desc->params.generic.param0),
- le32_to_cpu(cq_desc->params.generic.param1));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.param1),
le32_to_cpu(cq_desc->params.generic.addr_high),
le32_to_cpu(cq_desc->params.generic.addr_low));
- if (buf && cq_desc->datalen != 0) {
- ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
- if (buf_len < len)
- len = buf_len;
-
- ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
+ /* Dump buffer iff 1) one exists and 2) is either a response indicated
+ * by the DD and/or CMP flag set or a command with the RD flag set.
+ */
+ if (buf && cq_desc->datalen &&
+ (flags & (LIBIE_AQ_FLAG_DD | LIBIE_AQ_FLAG_CMP |
+ LIBIE_AQ_FLAG_RD))) {
+ char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 ";
+
+ sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ",
+ le32_to_cpu(cq_desc->params.generic.addr_high),
+ le32_to_cpu(cq_desc->params.generic.addr_low));
+ ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix,
+ buf,
+ min_t(u16, buf_len, datalen));
}
}
/**
- * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
+ * ice_sq_done - poll until the last send on a control queue has completed
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
- * Returns true if the firmware has processed all descriptors on the
- * admin send queue. Returns false if there are still requests pending.
+ * Use read_poll_timeout to poll the control queue head, checking until it
+ * matches next_to_use. According to the control queue designers, this has
+ * better timing reliability than the DD bit.
+ *
+ * Return: true if all the descriptors on the send side of a control queue
+ * are finished processing, false otherwise.
*/
static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- /* AQ designers suggest use of head for better
- * timing reliability than DD bit
+ u32 head;
+
+ /* Wait a short time before the initial check, to allow hardware time
+ * for completion.
*/
- return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
+ udelay(5);
+
+ return !rd32_poll_timeout(hw, cq->sq.head,
+ head, head == cq->sq.next_to_use,
+ 20, ICE_CTL_Q_SQ_CMD_TIMEOUT);
}
/**
- * ice_sq_send_cmd - send command to Control Queue (ATQ)
+ * ice_sq_send_cmd - send command to a control queue
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command
@@ -951,19 +987,18 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
- * This is the main send command routine for the ATQ. It runs the queue,
- * cleans the queue, etc.
+ * Main command for the transmit side of a control queue. It puts the command
+ * on the queue, bumps the tail, waits for processing of the command, captures
+ * command status and results, etc.
*/
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_dma_mem *dma_buf = NULL;
- struct ice_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
- struct ice_sq_cd *details;
- unsigned long timeout;
int status = 0;
u16 retval = 0;
u32 val = 0;
@@ -973,7 +1008,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
return -EBUSY;
mutex_lock(&cq->sq_lock);
- cq->sq_last_status = ICE_AQ_RC_OK;
+ cq->sq_last_status = LIBIE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
@@ -994,9 +1029,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
}
val = rd32(hw, cq->sq.head);
@@ -1007,12 +1042,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
- details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
- if (cd)
- *details = *cd;
- else
- memset(details, 0, sizeof(*details));
-
/* Call clean and check queue available function to reclaim the
* descriptors that were processed by FW/MBX; the function returns the
* number of desc available. The clean function called here could be
@@ -1049,7 +1078,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* Debug desc and buffer */
ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
+ ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
@@ -1057,20 +1086,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
ice_flush(hw);
- /* Wait a short time before initial ice_sq_done() check, to allow
- * hardware time for completion.
+ /* Wait for the command to complete. If it finishes within the
+ * timeout, copy the descriptor back to temp.
*/
- udelay(5);
-
- timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
- do {
- if (ice_sq_done(hw, cq))
- break;
-
- usleep_range(100, 150);
- } while (time_before(jiffies, timeout));
-
- /* if ready, copy the desc back to temp */
if (ice_sq_done(hw, cq)) {
memcpy(desc, desc_on_ring, sizeof(*desc));
if (buf) {
@@ -1095,19 +1113,18 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
retval &= 0xff;
}
cmd_completed = true;
- if (!status && retval != ICE_AQ_RC_OK)
+ if (!status && retval != LIBIE_AQ_RC_OK)
status = -EIO;
- cq->sq_last_status = (enum ice_aq_err)retval;
+ cq->sq_last_status = (enum libie_aq_err)retval;
}
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
- ice_debug_cq(hw, (void *)desc, buf, buf_size);
+ ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
/* save writeback AQ if requested */
- if (details->wb_desc)
- memcpy(details->wb_desc, desc_on_ring,
- sizeof(*details->wb_desc));
+ if (cd && cd->wb_desc)
+ memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc));
/* update the error if time out occurred */
if (!cmd_completed) {
@@ -1133,12 +1150,12 @@ sq_send_command_error:
*
* Fill the desc with default values
*/
-void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
+void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
memset(desc, 0, sizeof(*desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -1148,17 +1165,17 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
- * This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
- * left to process through 'pending'.
+ * Clean one element from the receive side of a control queue. On return 'e'
+ * contains contents of the message, and 'pending' contains the number of
+ * events left to process.
*/
int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
+ enum libie_aq_err rq_last_status;
u16 ntc = cq->rq.next_to_clean;
- enum ice_aq_err rq_last_status;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct ice_dma_mem *bi;
int ret_code = 0;
u16 desc_idx;
@@ -1191,9 +1208,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
desc = ICE_CTL_Q_DESC(cq->rq, ntc);
desc_idx = ntc;
- rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
+ rq_last_status = (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & ICE_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
@@ -1206,7 +1223,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
+ ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
@@ -1214,9 +1231,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
bi = &cq->rq.r.rq_bi[ntc];
memset(desc, 0, sizeof(*desc));
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (cq->rq_buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16(bi->size);
desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 8f2fd1613a95..788040dd662e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -12,7 +12,7 @@
#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
- (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
@@ -21,9 +21,18 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*/
-#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x01
-#define EXP_FW_API_VER_MINOR 0x05
+#define EXP_FW_API_VER_MAJOR_E810 0x01
+#define EXP_FW_API_VER_MINOR_E810 0x05
+
+#define EXP_FW_API_VER_MAJOR_E830 0x01
+#define EXP_FW_API_VER_MINOR_E830 0x07
+
+#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MAJOR_E830 : \
+ EXP_FW_API_VER_MAJOR_E810)
+#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MINOR_E830 : \
+ EXP_FW_API_VER_MINOR_E810)
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
@@ -34,14 +43,13 @@ enum ice_ctl_q {
};
/* Control Queue timeout settings - max delay 1s */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT USEC_PER_SEC
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
- void *cmd_buf; /* command buffer memory */
union {
struct ice_dma_mem *sq_bi;
@@ -68,14 +76,12 @@ struct ice_ctl_q_ring {
/* sq transaction details */
struct ice_sq_cd {
- struct ice_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
-#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
-
/* rq event information */
struct ice_rq_event_info {
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -90,7 +96,7 @@ struct ice_ctl_q_info {
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
- enum ice_aq_err sq_last_status; /* last status on send queue */
+ enum libie_aq_err sq_last_status; /* last status on send queue */
struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 74418c445cc4..abea84f14658 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -24,10 +24,10 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.lldp_get_mib;
+ cmd = libie_aq_raw(&desc);
if (buf_size == 0 || !buf)
return -EINVAL;
@@ -64,9 +64,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_mib_change *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_set_event;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
@@ -95,9 +95,9 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_stop;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
@@ -121,9 +121,9 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_start;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
@@ -677,11 +677,11 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 opcode;
int status;
- cmd = &desc.params.lldp_agent_ctrl;
+ cmd = libie_aq_raw(&desc);
opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
@@ -714,7 +714,7 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
@@ -733,13 +733,13 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return -EINVAL;
- cmd = &desc.params.set_query_pfc_mode;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
@@ -914,7 +914,7 @@ static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ if (pi->hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -941,7 +941,7 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
/* CEE mode */
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
ice_cee_to_dcb_cfg(&cee_cfg, pi);
- } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ } else if (pi->hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
@@ -965,7 +965,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_aqc_lldp_get_mib *mib;
u8 change_type, dcbx_mode;
- mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ mib = libie_aq_raw(&event->desc);
change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
@@ -1288,7 +1288,7 @@ ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
/* bytes 0 - 63 - IPv4 DSCP2UP LUT */
- for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ for (i = 0; i < DSCP_MAX; i++) {
/* IPv4 mapping */
buf[i] = dcbcfg->dscp_map[i];
/* IPv6 mapping */
@@ -1537,12 +1537,12 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!pi)
return -EINVAL;
- cmd = &desc.params.port_ets;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 6e20ee610022..9fc8681cc58e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -3,7 +3,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
@@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
break;
case ICE_VSI_CHNL:
+ case ICE_VSI_SF:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
vsi->tc_cfg.numtc = 1;
break;
@@ -291,7 +292,6 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
switch (vsi->type) {
case ICE_VSI_CHNL:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
if (ena)
ice_ena_vsi(vsi, locked);
@@ -352,8 +352,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
+ struct iidc_rdma_event *event;
int ret = ICE_DCB_NO_HW_CHG;
- struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -405,7 +405,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
- set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
@@ -740,7 +740,9 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- struct iidc_event *event;
+ struct iidc_rdma_priv_dev_info *privd;
+ struct iidc_rdma_core_dev_info *cdev;
+ struct iidc_rdma_event *event;
u8 tc_map = 0;
int v, ret;
@@ -776,21 +778,24 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
/* no need to proceed with remaining cfg if it is CHNL
* or switchdev VSI
*/
- if (vsi->type == ICE_VSI_CHNL ||
- vsi->type == ICE_VSI_SWITCHDEV_CTRL)
+ if (vsi->type == ICE_VSI_CHNL)
continue;
ice_vsi_map_rings_to_vectors(vsi);
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- if (!locked) {
+
+ cdev = pf->cdev_info;
+ if (cdev && !locked) {
+ privd = cdev->iidc_priv;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
/* Notify the AUX drivers that TC change is finished */
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -847,7 +852,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- ice_cfg_sw_lldp(pf_vsi, false, true);
+ ice_cfg_sw_rx_lldp(pf, true);
pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
@@ -946,6 +951,37 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
/**
+ * ice_setup_dcb_qos_info - Setup DCB QoS information
+ * @pf: ptr to ice_pf
+ * @qos_info: QoS param instance
+ */
+void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ struct ice_dcbx_cfg *dcbx_cfg;
+ unsigned int i;
+ u32 up2tc;
+
+ if (!pf || !qos_info)
+ return;
+
+ dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+
+ qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
+
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos_info->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < DSCP_MAX; i++)
+ qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i];
+}
+
+/**
* ice_dcb_is_mib_change_pending - Check if MIB change is pending
* @state: MIB change state
*/
@@ -984,7 +1020,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
pi = pf->hw.port_info;
- mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ mib = libie_aq_raw(&event->desc);
/* Ignore if event is not for Nearest Bridge */
mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 800879a88c5e..da9ba814b4e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -31,6 +31,9 @@ void
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
+ice_setup_dcb_qos_info(struct ice_pf *pf,
+ struct iidc_rdma_qos_params *qos_info);
+void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
/**
@@ -134,5 +137,11 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
+static inline void
+ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ qos_info->num_tc = 1;
+ qos_info->tc_info[0].rel_bw = 100;
+}
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 6d50b90a7359..a10c1c8d8697 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -754,7 +754,7 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
if (!ice_is_feature_supported(pf, ICE_F_DSCP))
return -EOPNOTSUPP;
- if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ if (app->protocol >= DSCP_MAX) {
netdev_err(netdev, "DSCP value 0x%04X out of range\n",
app->protocol);
return -EINVAL;
@@ -931,7 +931,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
/* if the last DSCP mapping just got deleted, need to switch
* to L2 VLAN QoS mode
*/
- if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) &&
new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
ret = ice_aq_set_pfc_mode(&pf->hw,
ICE_AQC_PFC_VLAN_BASED_PFC,
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index fc91c4d41186..3b2d9c436979 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -4,6 +4,7 @@
#include "ice_common.h"
#include "ice.h"
#include "ice_ddp.h"
+#include "ice_sched.h"
/* For supporting double VLAN mode, it is necessary to enable or disable certain
* boost tcam entries. The metadata labels names that match the following
@@ -30,7 +31,7 @@ static const struct ice_tunnel_type_scan tnls[] = {
* Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment.
*/
-static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+static enum ice_ddp_state ice_verify_pkg(const struct ice_pkg_hdr *pkg, u32 len)
{
u32 seg_count;
u32 i;
@@ -56,13 +57,13 @@ static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
/* all segments must fit within length */
for (i = 0; i < seg_count; i++) {
u32 off = le32_to_cpu(pkg->seg_offset[i]);
- struct ice_generic_seg_hdr *seg;
+ const struct ice_generic_seg_hdr *seg;
/* segment header must fit */
if (len < off + sizeof(*seg))
return ICE_DDP_PKG_INVALID_FILE;
- seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+ seg = (void *)pkg + off;
/* segment body must fit */
if (len < off + le32_to_cpu(seg->seg_size))
@@ -118,13 +119,13 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
*
* This helper function validates a buffer's header.
*/
-static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+static const struct ice_buf_hdr *ice_pkg_val_buf(const struct ice_buf *buf)
{
- struct ice_buf_hdr *hdr;
+ const struct ice_buf_hdr *hdr;
u16 section_count;
u16 data_end;
- hdr = (struct ice_buf_hdr *)buf->buf;
+ hdr = (const struct ice_buf_hdr *)buf->buf;
/* verify data */
section_count = le16_to_cpu(hdr->section_count);
if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
@@ -164,8 +165,8 @@ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
* unexpected value has been detected (for example an invalid section count or
* an invalid buffer end value).
*/
-static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
- struct ice_pkg_enum *state)
+static const struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state)
{
if (ice_seg) {
state->buf_table = ice_find_buf_table(ice_seg);
@@ -288,11 +289,11 @@ void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
* indicates a base offset of 10, and the index for the entry is 2, then
* section handler function should set the offset to 10 + 2 = 12.
*/
-static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
- struct ice_pkg_enum *state, u32 sect_type,
- u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
+void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state, u32 sect_type,
+ u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
{
void *entry;
@@ -721,6 +722,12 @@ static bool ice_is_gtp_c_profile(u16 prof_idx)
}
}
+static bool ice_is_pfcp_profile(u16 prof_idx)
+{
+ return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE &&
+ prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION;
+}
+
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
@@ -738,6 +745,9 @@ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
if (ice_is_gtp_u_profile(prof_idx))
return ICE_PROF_TUN_GTPU;
+ if (ice_is_pfcp_profile(prof_idx))
+ return ICE_PROF_TUN_PFCP;
+
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@ -1091,16 +1101,16 @@ struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
return &bld->buf;
}
-static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum libie_aq_err aq_err)
{
switch (aq_err) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
+ case LIBIE_AQ_RC_ENOSEC:
+ case LIBIE_AQ_RC_EBADSIG:
return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
- case ICE_AQ_RC_ESVN:
+ case LIBIE_AQ_RC_ESVN:
return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
+ case LIBIE_AQ_RC_EBADMAN:
+ case LIBIE_AQ_RC_EBADBUF:
return ICE_DDP_PKG_LOAD_ERROR;
default:
return ICE_DDP_PKG_ERR;
@@ -1170,7 +1180,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (error_offset)
@@ -1178,9 +1188,9 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
if (error_info)
*error_info = 0;
- cmd = &desc.params.download_pkg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
@@ -1201,6 +1211,132 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
+ * @buf: pointer to buffer header
+ * Return: whether given @buf is a metadata one.
+ */
+static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
+{
+ return le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF;
+}
+
+/**
+ * struct ice_ddp_send_ctx - sending context of current DDP segment
+ * @hw: pointer to the hardware struct
+ *
+ * Keeps current sending state (header, error) for the purpose of proper "last"
+ * bit setting in ice_aq_download_pkg(). Use via calls to ice_ddp_send_hunk().
+ */
+struct ice_ddp_send_ctx {
+ struct ice_hw *hw;
+/* private: only for ice_ddp_send_hunk() */
+ struct ice_buf_hdr *hdr;
+ int err;
+};
+
+static void ice_ddp_send_ctx_set_err(struct ice_ddp_send_ctx *ctx, int err)
+{
+ ctx->err = err;
+}
+
+/**
+ * ice_ddp_send_hunk - send one hunk of data to FW
+ * @ctx: current segment sending context
+ * @hunk: next hunk to send, size is always ICE_PKG_BUF_SIZE
+ *
+ * Send the next hunk of data to FW, retrying if needed.
+ *
+ * Notice: must be called once more with a NULL @hunk to finish up; such call
+ * will set up the "last" bit of an AQ request. After such call @ctx.hdr is
+ * cleared, @hw is still valid.
+ *
+ * Return: %ICE_DDP_PKG_SUCCESS if there were no problems; a sticky @err
+ * otherwise.
+ */
+static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx,
+ struct ice_buf_hdr *hunk)
+{
+ struct ice_buf_hdr *prev_hunk = ctx->hdr;
+ struct ice_hw *hw = ctx->hw;
+ bool prev_was_last = !hunk;
+ enum libie_aq_err aq_err;
+ u32 offset, info;
+ int attempt, err;
+
+ if (ctx->err)
+ return ctx->err;
+
+ ctx->hdr = hunk;
+ if (!prev_hunk)
+ return ICE_DDP_PKG_SUCCESS; /* no problem so far */
+
+ for (attempt = 0; attempt < 5; attempt++) {
+ if (attempt)
+ msleep(20);
+
+ err = ice_aq_download_pkg(hw, prev_hunk, ICE_PKG_BUF_SIZE,
+ prev_was_last, &offset, &info, NULL);
+
+ aq_err = hw->adminq.sq_last_status;
+ if (aq_err != LIBIE_AQ_RC_ENOSEC &&
+ aq_err != LIBIE_AQ_RC_EBADSIG)
+ break;
+ }
+
+ if (err) {
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
+ err, offset, info);
+ ctx->err = ice_map_aq_err_to_ddp_state(aq_err);
+ } else if (attempt) {
+ dev_dbg(ice_hw_to_dev(hw),
+ "ice_aq_download_pkg number of retries: %d\n", attempt);
+ }
+
+ return ctx->err;
+}
+
+/**
+ * ice_dwnld_cfg_bufs_no_lock
+ * @ctx: context of the current buffers section to send
+ * @bufs: pointer to an array of buffers
+ * @start: buffer index of first buffer to download
+ * @count: the number of buffers to download
+ *
+ * Downloads package configuration buffers to the firmware. Metadata buffers
+ * are skipped, and the first metadata buffer found indicates that the rest
+ * of the buffers are all metadata buffers.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs_no_lock(struct ice_ddp_send_ctx *ctx, struct ice_buf *bufs,
+ u32 start, u32 count)
+{
+ struct ice_buf_hdr *bh;
+ enum ice_ddp_state err;
+
+ if (!bufs || !count) {
+ ice_ddp_send_ctx_set_err(ctx, ICE_DDP_PKG_ERR);
+ return ICE_DDP_PKG_ERR;
+ }
+
+ bufs += start;
+
+ for (int i = 0; i < count; i++, bufs++) {
+ bh = (struct ice_buf_hdr *)bufs;
+ /* Metadata buffers should not be sent to FW,
+ * their presence means "we are done here".
+ */
+ if (ice_is_buffer_metadata(bh))
+ break;
+
+ err = ice_ddp_send_hunk(ctx, bh);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_pkg_seg_by_idx
* @pkg_hdr: pointer to the package header to be searched
* @idx: index of segment
@@ -1260,117 +1396,20 @@ ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
}
/**
- * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
- * @buf: pointer to buffer header
- */
-static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
-{
- if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF)
- return true;
-
- return false;
-}
-
-/**
- * ice_is_last_download_buffer
- * @buf: pointer to current buffer header
- * @idx: index of the buffer in the current sequence
- * @count: the buffer count in the current sequence
- *
- * Note: this routine should only be called if the buffer is not the last buffer
- */
-static bool
-ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
-{
- struct ice_buf *next_buf;
-
- if ((idx + 1) == count)
- return true;
-
- /* A set metadata flag in the next buffer will signal that the current
- * buffer will be the last buffer downloaded
- */
- next_buf = ((struct ice_buf *)buf) + 1;
-
- return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
-}
-
-/**
- * ice_dwnld_cfg_bufs_no_lock
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @start: buffer index of first buffer to download
- * @count: the number of buffers to download
- * @indicate_last: if true, then set last buffer flag on last buffer download
- *
- * Downloads package configuration buffers to the firmware. Metadata buffers
- * are skipped, and the first metadata buffer found indicates that the rest
- * of the buffers are all metadata buffers.
- */
-static enum ice_ddp_state
-ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
- u32 count, bool indicate_last)
-{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_buf_hdr *bh;
- enum ice_aq_err err;
- u32 offset, info, i;
-
- if (!bufs || !count)
- return ICE_DDP_PKG_ERR;
-
- /* If the first buffer's first section has its metadata bit set
- * then there are no buffers to be downloaded, and the operation is
- * considered a success.
- */
- bh = (struct ice_buf_hdr *)(bufs + start);
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return ICE_DDP_PKG_SUCCESS;
-
- for (i = 0; i < count; i++) {
- bool last = false;
- int status;
-
- bh = (struct ice_buf_hdr *)(bufs + start + i);
-
- if (indicate_last)
- last = ice_is_last_download_buffer(bh, i, count);
-
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
-
- /* Save AQ status from download package */
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
- status, offset, info);
- err = hw->adminq.sq_last_status;
- state = ice_map_aq_err_to_ddp_state(err);
- break;
- }
-
- if (last)
- break;
- }
-
- return state;
-}
-
-/**
* ice_download_pkg_sig_seg - download a signature segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @seg: pointer to signature segment
*/
static enum ice_ddp_state
-ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
+ice_download_pkg_sig_seg(struct ice_ddp_send_ctx *ctx, struct ice_sign_seg *seg)
{
- return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
- le32_to_cpu(seg->buf_tbl.buf_count),
- false);
+ return ice_dwnld_cfg_bufs_no_lock(ctx, seg->buf_tbl.buf_array, 0,
+ le32_to_cpu(seg->buf_tbl.buf_count));
}
/**
* ice_download_pkg_config_seg - download a config segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @pkg_hdr: pointer to package header
* @idx: segment index
* @start: starting buffer
@@ -1379,8 +1418,9 @@ ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
* Note: idx must reference a ICE segment
*/
static enum ice_ddp_state
-ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
- u32 idx, u32 start, u32 count)
+ice_download_pkg_config_seg(struct ice_ddp_send_ctx *ctx,
+ struct ice_pkg_hdr *pkg_hdr, u32 idx, u32 start,
+ u32 count)
{
struct ice_buf_table *bufs;
struct ice_seg *seg;
@@ -1396,46 +1436,56 @@ ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
if (start >= buf_count || start + count > buf_count)
return ICE_DDP_PKG_ERR;
- return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
- true);
+ return ice_dwnld_cfg_bufs_no_lock(ctx, bufs->buf_array, start, count);
+}
+
+static bool ice_is_last_sign_seg(u32 flags)
+{
+ return !(flags & ICE_SIGN_SEG_FLAGS_VALID) || /* behavior prior to valid */
+ (flags & ICE_SIGN_SEG_FLAGS_LAST);
}
/**
* ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @pkg_hdr: pointer to package header
* @idx: segment index (must be a signature segment)
*
* Note: idx must reference a signature segment
*/
static enum ice_ddp_state
-ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
- u32 idx)
+ice_dwnld_sign_and_cfg_segs(struct ice_ddp_send_ctx *ctx,
+ struct ice_pkg_hdr *pkg_hdr, u32 idx)
{
+ u32 conf_idx, start, count, flags;
enum ice_ddp_state state;
struct ice_sign_seg *seg;
- u32 conf_idx;
- u32 start;
- u32 count;
seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
if (!seg) {
state = ICE_DDP_PKG_ERR;
- goto exit;
+ ice_ddp_send_ctx_set_err(ctx, state);
+ return state;
}
- conf_idx = le32_to_cpu(seg->signed_seg_idx);
- start = le32_to_cpu(seg->signed_buf_start);
count = le32_to_cpu(seg->signed_buf_count);
+ state = ice_download_pkg_sig_seg(ctx, seg);
+ if (state || !count)
+ return state;
- state = ice_download_pkg_sig_seg(hw, seg);
- if (state)
- goto exit;
+ conf_idx = le32_to_cpu(seg->signed_seg_idx);
+ start = le32_to_cpu(seg->signed_buf_start);
- state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
+ state = ice_download_pkg_config_seg(ctx, pkg_hdr, conf_idx, start,
count);
-exit:
+ /* finish up by sending last hunk with "last" flag set if requested by
+ * DDP content
+ */
+ flags = le32_to_cpu(seg->flags);
+ if (ice_is_last_sign_seg(flags))
+ state = ice_ddp_send_hunk(ctx, NULL);
+
return state;
}
@@ -1488,8 +1538,9 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw)
static enum ice_ddp_state
ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
- enum ice_aq_err aq_err = hw->adminq.sq_last_status;
+ enum libie_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
+ struct ice_ddp_send_ctx ctx = { .hw = hw };
int status;
u32 i;
@@ -1510,7 +1561,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
hw->pkg_sign_type))
continue;
- state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
+ state = ice_dwnld_sign_and_cfg_segs(&ctx, pkg_hdr, i);
if (state)
break;
}
@@ -1535,6 +1586,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
+ struct ice_ddp_send_ctx ctx = { .hw = hw };
enum ice_ddp_state state;
struct ice_buf_hdr *bh;
int status;
@@ -1547,7 +1599,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* considered a success.
*/
bh = (struct ice_buf_hdr *)bufs;
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ if (ice_is_buffer_metadata(bh))
return ICE_DDP_PKG_SUCCESS;
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
@@ -1557,7 +1609,9 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
}
- state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
+ ice_dwnld_cfg_bufs_no_lock(&ctx, bufs, 0, count);
+ /* finish up by sending last hunk with "last" flag set */
+ state = ice_ddp_send_hunk(&ctx, NULL);
if (!state)
state = ice_post_dwnld_pkg_actions(hw);
@@ -1634,7 +1688,7 @@ static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
@@ -1658,7 +1712,7 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (error_offset)
@@ -1666,9 +1720,9 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
if (error_info)
*error_info = 0;
- cmd = &desc.params.download_pkg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
@@ -1700,10 +1754,10 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
}
@@ -1771,9 +1825,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* success it returns a pointer to the segment header, otherwise it will
* return NULL.
*/
-static struct ice_generic_seg_hdr *
+static const struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
+ const struct ice_pkg_hdr *pkg_hdr)
{
u32 i;
@@ -1784,11 +1838,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
/* Search all package segments for the requested segment type */
for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
- struct ice_generic_seg_hdr *seg;
+ const struct ice_generic_seg_hdr *seg;
- seg = (struct ice_generic_seg_hdr
- *)((u8 *)pkg_hdr +
- le32_to_cpu(pkg_hdr->seg_offset[i]));
+ seg = (void *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]);
if (le32_to_cpu(seg->seg_type) == seg_type)
return seg;
@@ -2250,6 +2302,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+ if (!buf_copy)
+ return ICE_DDP_PKG_ERR;
state = ice_init_pkg(hw, buf_copy, len);
if (!ice_is_init_pkg_successful(state)) {
@@ -2263,3 +2317,231 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return state;
}
+
+/**
+ * ice_get_set_tx_topo - get or set Tx topology
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @buf_size: buffer size
+ * @cd: pointer to command details structure or NULL
+ * @flags: pointer to descriptor flags
+ * @set: 0-get, 1-set topology
+ *
+ * The function will get or set Tx topology
+ *
+ * Return: zero when set was successful, negative values otherwise.
+ */
+static int
+ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
+ struct ice_sq_cd *cd, u8 *flags, bool set)
+{
+ struct ice_aqc_get_set_tx_topo *cmd;
+ struct libie_aq_desc desc;
+ int status;
+
+ cmd = libie_aq_raw(&desc);
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
+ cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
+ /* requested to update a new topology, not a default topology */
+ if (buf)
+ cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
+ ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
+ cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+
+ if (hw->mac_type == ICE_MAC_E810 ||
+ hw->mac_type == ICE_MAC_GENERIC)
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+ }
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+ /* read the return flag values (first byte) for get operation */
+ if (!set && flags)
+ *flags = cmd->set_flags;
+
+ return 0;
+}
+
+/**
+ * ice_cfg_tx_topo - Initialize new Tx topology if available
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @len: buffer size
+ *
+ * The function will apply the new Tx topology from the package buffer
+ * if available.
+ *
+ * Return:
+ * * 0 - Successfully applied topology configuration.
+ * * -EBUSY - Failed to acquire global configuration lock.
+ * * -EEXIST - Topology configuration has already been applied.
+ * * -EIO - Unable to apply topology configuration.
+ * * -ENODEV - Failed to re-initialize device after applying configuration.
+ * * Other negative error codes indicate unexpected failures.
+ */
+int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
+{
+ u8 *new_topo = NULL, *topo __free(kfree) = NULL;
+ const struct ice_run_time_cfg_seg *seg;
+ const struct ice_buf_hdr *section;
+ const struct ice_pkg_hdr *pkg_hdr;
+ enum ice_ddp_state state;
+ u16 offset, size = 0;
+ u32 reg = 0;
+ int status;
+ u8 flags;
+
+ if (!buf || !len)
+ return -EINVAL;
+
+ /* Does FW support new Tx topology mode ? */
+ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
+ ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!topo)
+ return -ENOMEM;
+
+ /* Get the current Tx topology flags */
+ status = ice_get_set_tx_topo(hw, topo, ICE_AQ_MAX_BUF_LEN, NULL, &flags,
+ false);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
+ return -EIO;
+ }
+
+ /* Is default topology already applied ? */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n");
+ return -EEXIST;
+ }
+
+ /* Is new topology already applied ? */
+ if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n");
+ return -EEXIST;
+ }
+
+ /* Setting topology already issued? */
+ if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
+ ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n");
+ /* Add a small delay before exiting */
+ msleep(2000);
+ return -EEXIST;
+ }
+
+ /* Change the topology from new to default (5 to 9) */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
+ goto update_topo;
+ }
+
+ pkg_hdr = (const struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg_hdr, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
+ state);
+ return -EIO;
+ }
+
+ /* Find runtime configuration segment */
+ seg = (const struct ice_run_time_cfg_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
+ if (!seg) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
+ return -EIO;
+ }
+
+ if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
+ seg->buf_table.buf_count);
+ return -EIO;
+ }
+
+ section = ice_pkg_val_buf(seg->buf_table.buf_array);
+ if (!section || le32_to_cpu(section->section_entry[0].type) !=
+ ICE_SID_TX_5_LAYER_TOPO) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
+ return -EIO;
+ }
+
+ size = le16_to_cpu(section->section_entry[0].size);
+ offset = le16_to_cpu(section->section_entry[0].offset);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
+ return -EIO;
+ }
+
+ /* Make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
+ return -EIO;
+ }
+
+ /* Get the new topology buffer, reuse current topo copy mem */
+ static_assert(ICE_PKG_BUF_SIZE == ICE_AQ_MAX_BUF_LEN);
+ new_topo = topo;
+ memcpy(new_topo, (u8 *)section + offset, size);
+
+update_topo:
+ /* Acquire global lock to make sure that set topology issued
+ * by one PF.
+ */
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
+ return -EBUSY;
+ }
+
+ /* Check if reset was triggered already. */
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (reg & GLGEN_RSTAT_DEVSTATE_M) {
+ ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
+ ice_check_reset(hw);
+ /* Reset is in progress, re-init the HW again */
+ goto reinit_hw;
+ }
+
+ /* Set new topology */
+ status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n",
+ ERR_PTR(status));
+ /* only report -EIO here as the caller checks the error value
+ * and reports an informational error message informing that
+ * the driver failed to program Tx topology.
+ */
+ status = -EIO;
+ }
+
+ /* Even if Tx topology config failed, we need to CORE reset here to
+ * clear the global configuration lock. Delay 1 second to allow
+ * hardware to settle then issue a CORER
+ */
+ msleep(1000);
+ ice_reset(hw, ICE_RESET_CORER);
+ ice_check_reset(hw);
+
+reinit_hw:
+ /* Since we triggered a CORER, re-initialize hardware */
+ ice_deinit_hw(hw);
+ if (ice_init_hw(hw)) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n");
+ return -ENODEV;
+ }
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index ff66c2ffb1a2..8a2d57fc5dae 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -181,7 +181,10 @@ struct ice_sign_seg {
__le32 signed_seg_idx;
__le32 signed_buf_start;
__le32 signed_buf_count;
-#define ICE_SIGN_SEG_RESERVED_COUNT 44
+#define ICE_SIGN_SEG_FLAGS_VALID 0x80000000
+#define ICE_SIGN_SEG_FLAGS_LAST 0x00000001
+ __le32 flags;
+#define ICE_SIGN_SEG_RESERVED_COUNT 40
u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
struct ice_buf_table buf_tbl;
};
@@ -261,10 +264,17 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
+#define ICE_SID_RXPARSER_CAM 50
+#define ICE_SID_RXPARSER_NOMATCH_CAM 51
+#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
+#define ICE_SID_RXPARSER_MARKER_GRP 72
+#define ICE_SID_RXPARSER_PG_SPILL 76
+#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
@@ -276,6 +286,7 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
+#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
@@ -430,7 +441,7 @@ struct ice_pkg_enum {
u32 buf_idx;
u32 type;
- struct ice_buf_hdr *buf;
+ const struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
@@ -451,7 +462,14 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset));
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
+int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len);
+
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index d252d98218d0..f450250fc827 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -1,647 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Intel Corporation. */
-#include <linux/fs.h>
#include <linux/debugfs.h>
-#include <linux/random.h>
-#include <linux/vmalloc.h>
#include "ice.h"
static struct dentry *ice_debugfs_root;
-/* create a define that has an extra module that doesn't really exist. this
- * is so we can add a module 'all' to easily enable/disable all the modules
- */
-#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
- */
-static const char * const ice_fwlog_module_string[] = {
- "general",
- "ctrl",
- "link",
- "link_topo",
- "dnl",
- "i2c",
- "sdp",
- "mdio",
- "adminq",
- "hdma",
- "lldp",
- "dcbx",
- "dcb",
- "xlr",
- "nvm",
- "auth",
- "vpd",
- "iosf",
- "parser",
- "sw",
- "scheduler",
- "txq",
- "rsvd",
- "post",
- "watchdog",
- "task_dispatch",
- "mng",
- "synce",
- "health",
- "tsdrv",
- "pfreg",
- "mdlver",
- "all",
-};
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
-static const char * const ice_fwlog_level_string[] = {
- "none",
- "error",
- "warning",
- "normal",
- "verbose",
-};
-
-static const char * const ice_fwlog_log_size[] = {
- "128K",
- "256K",
- "512K",
- "1M",
- "2M",
-};
-
-/**
- * ice_fwlog_print_module_cfg - print current FW logging module configuration
- * @hw: pointer to the HW structure
- * @module: module to print
- * @s: the seq file to put data into
- */
-static void
-ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
-{
- struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
- struct ice_fwlog_module_entry *entry;
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- entry = &cfg->module_entries[module];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- } else {
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- entry = &cfg->module_entries[i];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- }
- }
-}
-
-static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
-{
- int i, module;
-
- module = -1;
- /* find the module based on the dentry */
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
- module = i;
- break;
- }
- }
-
- return module;
-}
-
-/**
- * ice_debugfs_module_show - read from 'module' file
- * @s: the opened file
- * @v: pointer to the offset
- */
-static int ice_debugfs_module_show(struct seq_file *s, void *v)
-{
- const struct file *filp = s->file;
- struct dentry *dentry;
- struct ice_pf *pf;
- int module;
-
- dentry = file_dentry(filp);
- pf = s->private;
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(ice_pf_to_dev(pf), "unknown module\n");
- return -EINVAL;
- }
-
- ice_fwlog_print_module_cfg(&pf->hw, module, s);
-
- return 0;
-}
-
-static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ice_debugfs_module_show, inode->i_private);
-}
-
-/**
- * ice_debugfs_module_write - write into 'module' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_module_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = file_inode(filp)->i_private;
- struct dentry *dentry = file_dentry(filp);
- struct device *dev = ice_pf_to_dev(pf);
- char user_val[16], *cmd_buf;
- int module, log_level, cnt;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 8)
- return -EINVAL;
-
- cmd_buf = memdup_user(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(dev, "unknown module\n");
- return -EINVAL;
- }
-
- cnt = sscanf(cmd_buf, "%s", user_val);
- if (cnt != 1)
- return -EINVAL;
-
- log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
- if (log_level < 0) {
- dev_info(dev, "unknown log level '%s'\n", user_val);
- return -EINVAL;
- }
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- ice_pf_fwlog_update_module(pf, log_level, module);
- } else {
- /* the module 'all' is a shortcut so that we can set
- * all of the modules to the same level quickly
- */
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
- ice_pf_fwlog_update_module(pf, log_level, i);
- }
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_module_fops = {
- .owner = THIS_MODULE,
- .open = ice_debugfs_module_open,
- .read = seq_read,
- .release = single_release,
- .write = ice_debugfs_module_write,
-};
-
-/**
- * ice_debugfs_nr_messages_read - read from 'nr_messages' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%d\n",
- hw->fwlog_cfg.log_resolution);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_nr_messages_write - write into 'nr_messages' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- s16 nr_messages;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 4)
- return -EINVAL;
-
- cmd_buf = memdup_user(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtos16(user_val, 0, &nr_messages);
- if (ret)
- return ret;
-
- if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
- nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
- dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
- nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
- ICE_AQC_FW_LOG_MAX_RESOLUTION);
- return -EINVAL;
- }
-
- hw->fwlog_cfg.log_resolution = nr_messages;
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_nr_messages_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_nr_messages_read,
- .write = ice_debugfs_nr_messages_write,
-};
-
-/**
- * ice_debugfs_enable_read - read from 'enable' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_enable_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%u\n",
- (u16)(hw->fwlog_cfg.options &
- ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_enable_write - write into 'enable' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_enable_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- bool enable;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 2)
- return -EINVAL;
-
- cmd_buf = memdup_user(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtobool(user_val, &enable);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
-
- ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- ret = ice_fwlog_register(hw);
- else
- ret = ice_fwlog_unregister(hw);
-
- if (ret)
- goto enable_write_error;
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-enable_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_enable_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_enable_read,
- .write = ice_debugfs_enable_write,
-};
-
-/**
- * ice_debugfs_log_size_read - read from 'log_size' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_log_size_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
- int index;
-
- index = hw->fwlog_ring.index;
- snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_log_size_write - write into 'log_size' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- ssize_t ret;
- int index;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 5)
- return -EINVAL;
-
- cmd_buf = memdup_user(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- index = sysfs_match_string(ice_fwlog_log_size, user_val);
- if (index < 0) {
- dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
- user_val);
- ret = -EINVAL;
- goto log_size_write_error;
- } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
- dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
- ret = -EINVAL;
- goto log_size_write_error;
- }
-
- /* free all the buffers and the tracking info and resize */
- ice_fwlog_realloc_rings(hw, index);
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-log_size_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_log_size_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_log_size_read,
- .write = ice_debugfs_log_size_write,
-};
-
-/**
- * ice_debugfs_data_read - read from 'data' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- int data_copied = 0;
- bool done = false;
-
- if (ice_fwlog_ring_empty(&hw->fwlog_ring))
- return 0;
-
- while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
- struct ice_fwlog_data *log;
- u16 cur_buf_len;
-
- log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
- cur_buf_len = log->data_size;
- if (cur_buf_len >= count) {
- done = true;
- continue;
- }
-
- if (copy_to_user(buffer, log->data, cur_buf_len)) {
- /* if there is an error then bail and return whatever
- * the driver has copied so far
- */
- done = true;
- continue;
- }
-
- data_copied += cur_buf_len;
- buffer += cur_buf_len;
- count -= cur_buf_len;
- *ppos += cur_buf_len;
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-
- return data_copied;
-}
-
-/**
- * ice_debugfs_data_write - write into 'data' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- ssize_t ret;
-
- /* don't allow partial writes */
- if (*ppos != 0)
- return 0;
-
- /* any value is allowed to clear the buffer so no need to even look at
- * what the value is
- */
- if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
- } else {
- dev_info(dev, "Can't clear FW log data while FW log running\n");
- ret = -EINVAL;
- goto nr_buffs_write_error;
- }
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-nr_buffs_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_data_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_data_read,
- .write = ice_debugfs_data_write,
-};
-
-/**
- * ice_debugfs_fwlog_init - setup the debugfs directory
- * @pf: the ice that is starting up
- */
-void ice_debugfs_fwlog_init(struct ice_pf *pf)
+int ice_debugfs_pf_init(struct ice_pf *pf)
{
const char *name = pci_name(pf->pdev);
- struct dentry *fw_modules_dir;
- struct dentry **fw_modules;
- int i;
-
- /* only support fw log commands on PF 0 */
- if (pf->hw.bus.func)
- return;
-
- /* allocate space for this first because if it fails then we don't
- * need to unwind
- */
- fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
- GFP_KERNEL);
- if (!fw_modules)
- return;
pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
if (IS_ERR(pf->ice_debugfs_pf))
- goto err_create_module_files;
-
- pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
- pf->ice_debugfs_pf);
- if (IS_ERR(pf->ice_debugfs_pf))
- goto err_create_module_files;
+ return PTR_ERR(pf->ice_debugfs_pf);
- fw_modules_dir = debugfs_create_dir("modules",
- pf->ice_debugfs_pf_fwlog);
- if (IS_ERR(fw_modules_dir))
- goto err_create_module_files;
-
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
- 0600, fw_modules_dir, pf,
- &ice_debugfs_module_fops);
- if (IS_ERR(fw_modules[i]))
- goto err_create_module_files;
- }
-
- debugfs_create_file("nr_messages", 0600,
- pf->ice_debugfs_pf_fwlog, pf,
- &ice_debugfs_nr_messages_fops);
-
- pf->ice_debugfs_pf_fwlog_modules = fw_modules;
-
- debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_enable_fops);
-
- debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_log_size_fops);
-
- debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_data_fops);
-
- return;
-
-err_create_module_files:
- debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
- kfree(fw_modules);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 9dfae9bce758..bd4e66df0372 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -6,6 +6,24 @@
/* Device IDs */
#define ICE_DEV_ID_E822_SI_DFLT 0x1888
+/* Intel(R) Ethernet Controller E835-CC for backplane */
+#define ICE_DEV_ID_E835CC_BACKPLANE 0x1248
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835CC_QSFP56 0x1249
+/* Intel(R) Ethernet Controller E835-CC for SFP */
+#define ICE_DEV_ID_E835CC_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-C for backplane */
+#define ICE_DEV_ID_E835C_BACKPLANE 0x1261
+/* Intel(R) Ethernet Controller E835-C for QSFP */
+#define ICE_DEV_ID_E835C_QSFP 0x1262
+/* Intel(R) Ethernet Controller E835-C for SFP */
+#define ICE_DEV_ID_E835C_SFP 0x1263
+/* Intel(R) Ethernet Controller E835-L for backplane */
+#define ICE_DEV_ID_E835_L_BACKPLANE 0x1265
+/* Intel(R) Ethernet Controller E835-L for QSFP */
+#define ICE_DEV_ID_E835_L_QSFP 0x1266
+/* Intel(R) Ethernet Controller E835-L for SFP */
+#define ICE_DEV_ID_E835_L_SFP 0x1267
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
@@ -16,14 +34,26 @@
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E830-CC for backplane */
+#define ICE_DEV_ID_E830CC_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-CC for QSFP */
+#define ICE_DEV_ID_E830CC_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-CC for SFP */
+#define ICE_DEV_ID_E830CC_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-CC for SFP-DD */
+#define ICE_DEV_ID_E830CC_SFP_DD 0x12D4
/* Intel(R) Ethernet Controller E830-C for backplane */
-#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5
/* Intel(R) Ethernet Controller E830-C for QSFP */
-#define ICE_DEV_ID_E830_QSFP56 0x12D2
+#define ICE_DEV_ID_E830C_QSFP 0x12D8
/* Intel(R) Ethernet Controller E830-C for SFP */
-#define ICE_DEV_ID_E830_SFP 0x12D3
-/* Intel(R) Ethernet Controller E830-C for SFP-DD */
-#define ICE_DEV_ID_E830_SFP_DD 0x12D4
+#define ICE_DEV_ID_E830C_SFP 0x12DA
+/* Intel(R) Ethernet Controller E830-XXV for backplane */
+#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC
+/* Intel(R) Ethernet Controller E830-XXV for QSFP */
+#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD
+/* Intel(R) Ethernet Controller E830-XXV for SFP */
+#define ICE_DEV_ID_E830_XXV_SFP 0x12DE
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index e92be6f130a3..53b54e395a2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -9,6 +9,45 @@
#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
#define ICE_DPLL_PIN_IDX_INVALID 0xff
#define ICE_DPLL_RCLK_NUM_PER_PF 1
+#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
+#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125
+#define ICE_DPLL_PIN_PRIO_OUTPUT 0xff
+#define ICE_DPLL_INPUT_REF_NUM 10
+#define ICE_DPLL_PHASE_OFFSET_PERIOD 2
+#define ICE_DPLL_SW_PIN_INPUT_BASE_SFP 4
+#define ICE_DPLL_SW_PIN_INPUT_BASE_QSFP 6
+#define ICE_DPLL_SW_PIN_OUTPUT_BASE 0
+
+#define ICE_DPLL_PIN_SW_INPUT_ABS(in_idx) \
+ (ICE_DPLL_SW_PIN_INPUT_BASE_SFP + (in_idx))
+
+#define ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_DPLL_PIN_SW_OUTPUT_ABS(out_idx) \
+ (ICE_DPLL_SW_PIN_OUTPUT_BASE + (out_idx))
+
+#define ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_SR_PFA_DPLL_DEFAULTS 0x152
+#define ICE_DPLL_PFA_REF_SYNC_TYPE 0x2420
+#define ICE_DPLL_PFA_REF_SYNC_TYPE2 0x2424
+#define ICE_DPLL_PFA_END 0xFFFF
+#define ICE_DPLL_PFA_HEADER_LEN 4
+#define ICE_DPLL_PFA_ENTRY_LEN 3
+#define ICE_DPLL_PFA_MAILBOX_REF_SYNC_PIN_S 4
+#define ICE_DPLL_PFA_MASK_OFFSET 1
+#define ICE_DPLL_PFA_VALUE_OFFSET 2
+
+#define ICE_DPLL_E810C_SFP_NC_PINS 2
+#define ICE_DPLL_E810C_SFP_NC_START 4
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -16,20 +55,60 @@
* @ICE_DPLL_PIN_TYPE_INPUT: input pin
* @ICE_DPLL_PIN_TYPE_OUTPUT: output pin
* @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin
+ * @ICE_DPLL_PIN_TYPE_SOFTWARE: software controlled SMA/U.FL pins
*/
enum ice_dpll_pin_type {
ICE_DPLL_PIN_INVALID,
ICE_DPLL_PIN_TYPE_INPUT,
ICE_DPLL_PIN_TYPE_OUTPUT,
ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
};
static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_INPUT] = "input",
[ICE_DPLL_PIN_TYPE_OUTPUT] = "output",
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
+ [ICE_DPLL_PIN_TYPE_SOFTWARE] = "software",
};
+static const char * const ice_dpll_sw_pin_sma[] = { "SMA1", "SMA2" };
+static const char * const ice_dpll_sw_pin_ufl[] = { "U.FL1", "U.FL2" };
+
+static const struct dpll_pin_frequency ice_esync_range[] = {
+ DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ),
+};
+
+/**
+ * ice_dpll_is_sw_pin - check if given pin shall be controlled by SW
+ * @pf: private board structure
+ * @index: index of a pin as understood by FW
+ * @input: true for input, false for output
+ *
+ * Check if the pin shall be controlled by SW - instead of providing raw access
+ * for pin control. For E810 NIC with dpll there is additional MUX-related logic
+ * between SMA/U.FL pins/connectors and dpll device, best to give user access
+ * with series of wrapper functions as from user perspective they convey single
+ * functionality rather then separated pins.
+ *
+ * Return:
+ * * true - pin controlled by SW
+ * * false - pin not controlled by SW
+ */
+static bool ice_dpll_is_sw_pin(struct ice_pf *pf, u8 index, bool input)
+{
+ if (input && pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ index -= ICE_DPLL_SW_PIN_INPUT_BASE_QSFP -
+ ICE_DPLL_SW_PIN_INPUT_BASE_SFP;
+
+ if ((input && (index == ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX)) ||
+ (!input && (index == ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX)))
+ return true;
+ return false;
+}
+
/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
@@ -89,9 +168,9 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ "err:%d %s failed to set pin freq:%u on pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
freq, pin->idx);
return ret;
}
@@ -274,6 +353,87 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_pin_frequency_set - callback to set frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls set frequency command for corresponding and active input/output pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT)
+ ret = ice_dpll_input_frequency_set(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ else
+ ret = ice_dpll_output_frequency_set(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_frequency_get - callback for get frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls get frequency command for corresponding active input/output.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ *frequency = 0;
+ return 0;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_input_frequency_get(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ } else {
+ ret = ice_dpll_output_frequency_get(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+ }
+
+ return ret;
+}
+
+/**
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
@@ -316,8 +476,8 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to enable %s pin:%u\n",
- ret, ice_aq_str(hw->adminq.sq_last_status),
+ "err:%d %s failed to enable %s pin:%u",
+ ret, libie_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
@@ -361,14 +521,75 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to disable %s pin:%u\n",
- ret, ice_aq_str(hw->adminq.sq_last_status),
+ "err:%d %s failed to disable %s pin:%u",
+ ret, libie_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
}
/**
+ * ice_dpll_sw_pins_update - update status of all SW pins
+ * @pf: private board struct
+ *
+ * Determine and update pin struct fields (direction/active) of their current
+ * values for all the SW controlled pins.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pins_update(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *p;
+ u8 data = 0;
+ int ret;
+
+ ret = ice_read_sma_ctrl(&pf->hw, &data);
+ if (ret)
+ return ret;
+ /* no change since last check */
+ if (d->sma_data == data)
+ return 0;
+
+ /*
+ * SMA1/U.FL1 vs SMA2/U.FL2 are using different bit scheme to decide
+ * on their direction and if are active
+ */
+ p = &d->sma[ICE_DPLL_PIN_SW_1_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if (data & ICE_SMA1_DIR_EN) {
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ if (data & ICE_SMA1_TX_EN)
+ p->active = false;
+ }
+
+ p = &d->sma[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if ((data & ICE_SMA2_INACTIVE_MASK) == ICE_SMA2_INACTIVE_MASK)
+ p->active = false;
+ else if (data & ICE_SMA2_DIR_EN)
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_1_IDX];
+ if (!(data & (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)))
+ p->active = true;
+ else
+ p->active = false;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = (data & ICE_SMA2_DIR_EN) && !(data & ICE_SMA2_UFL2_RX_DIS);
+ d->sma_data = data;
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_update - update pin's state
* @pf: private board struct
* @pin: structure with pin attributes to be updated
@@ -394,8 +615,8 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
switch (pin_type) {
case ICE_DPLL_PIN_TYPE_INPUT:
- ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
- NULL, &pin->flags[0],
+ ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, &pin->status,
+ NULL, NULL, &pin->flags[0],
&pin->freq, &pin->phase_adjust);
if (ret)
goto err;
@@ -430,7 +651,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
goto err;
parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ if (ICE_AQC_GET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
pin->state[pf->dplls.eec.dpll_idx] =
parent == pf->dplls.eec.dpll_idx ?
DPLL_PIN_STATE_CONNECTED :
@@ -465,6 +686,11 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
DPLL_PIN_STATE_DISCONNECTED;
}
break;
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ ret = ice_dpll_sw_pins_update(pf);
+ if (ret)
+ goto err;
+ break;
default:
return -EINVAL;
}
@@ -473,15 +699,15 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
err:
if (extack)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to update %s pin:%u\n",
+ "err:%d %s failed to update %s pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
else
dev_err_ratelimited(ice_pf_to_dev(pf),
"err:%d %s failed to update %s pin:%u\n",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
}
@@ -512,9 +738,9 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
(u8)prio);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ "err:%d %s failed to set pin prio:%u on pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
prio, pin->idx);
else
dpll->input_prio[pin->idx] = prio;
@@ -582,6 +808,67 @@ static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
+ * ice_dpll_phase_offset_monitor_set - set phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: feature state to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Enable/disable phase offset monitor feature of dpll.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_set(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (state == DPLL_FEATURE_STATE_ENABLE)
+ d->phase_offset_monitor_period = ICE_DPLL_PHASE_OFFSET_PERIOD;
+ else
+ d->phase_offset_monitor_period = 0;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_phase_offset_monitor_get - get phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds current state of phase offset monitor
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides current state of phase offset monitor
+ * features on dpll device.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (d->phase_offset_monitor_period)
+ *state = DPLL_FEATURE_STATE_ENABLE;
+ else
+ *state = DPLL_FEATURE_STATE_DISABLE;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_set - set pin's state on dpll
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -651,6 +938,8 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv;
+ if (state == DPLL_PIN_STATE_SELECTABLE)
+ return -EINVAL;
if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
return 0;
@@ -785,6 +1074,270 @@ ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sma_direction_set - set direction of SMA pin
+ * @p: pointer to a pin
+ * @direction: requested direction of the pin
+ * @extack: error reporting
+ *
+ * Wrapper for dpll subsystem callback. Set direction of a SMA pin.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ u8 data;
+ int ret;
+
+ if (p->direction == direction && p->active)
+ return 0;
+ ret = ice_read_sma_ctrl(&p->pf->hw, &data);
+ if (ret)
+ return ret;
+
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ data &= ~ICE_SMA1_MASK;
+ if (direction == DPLL_PIN_DIRECTION_OUTPUT)
+ data |= ICE_SMA1_DIR_EN;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (direction == DPLL_PIN_DIRECTION_INPUT) {
+ data &= ~ICE_SMA2_DIR_EN;
+ } else {
+ data &= ~ICE_SMA2_TX_EN;
+ data |= ICE_SMA2_DIR_EN;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = ice_write_sma_ctrl(&p->pf->hw, data);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(p->pf, p,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_ufl_pin_state_set - set U.FL pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set the state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ enum ice_dpll_pin_type type;
+ struct ice_pf *pf = p->pf;
+ struct ice_hw *hw;
+ bool enable;
+ u8 data;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ hw = &pf->hw;
+ ret = ice_read_sma_ctrl(hw, &data);
+ if (ret)
+ goto unlock;
+
+ ret = -EINVAL;
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ if (state == DPLL_PIN_STATE_CONNECTED) {
+ data &= ~ICE_SMA1_MASK;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA1_TX_EN;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (state == DPLL_PIN_STATE_SELECTABLE) {
+ data |= ICE_SMA2_DIR_EN;
+ data &= ~ICE_SMA2_UFL2_RX_DIS;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA2_UFL2_RX_DIS;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ break;
+ default:
+ goto unlock;
+ }
+
+ ret = ice_write_sma_ctrl(hw, data);
+ if (ret)
+ goto unlock;
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+ if (ret)
+ goto unlock;
+
+ if (enable)
+ ret = ice_dpll_pin_enable(hw, target, d->dpll_idx, type, extack);
+ else
+ ret = ice_dpll_pin_disable(hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_state_get - get SW pin state
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a SW pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = p->pf;
+ int ret = 0;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!p->active) {
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ goto unlock;
+ }
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_pin_state_update(pf, p->input,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->input->state[d->dpll_idx];
+ } else {
+ ret = ice_dpll_pin_state_update(pf, p->output,
+ ICE_DPLL_PIN_TYPE_OUTPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->output->state[d->dpll_idx];
+ }
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sma_pin_state_set - set SMA pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_sma_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = sma->pf;
+ enum ice_dpll_pin_type type;
+ bool enable;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ if (!sma->active) {
+ ret = ice_dpll_sma_direction_set(sma, sma->direction, extack);
+ if (ret)
+ goto unlock;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ enable = state == DPLL_PIN_STATE_SELECTABLE;
+ target = sma->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ } else {
+ enable = state == DPLL_PIN_STATE_CONNECTED;
+ target = sma->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ }
+
+ if (enable)
+ ret = ice_dpll_pin_enable(&pf->hw, target, d->dpll_idx, type,
+ extack);
+ else
+ ret = ice_dpll_pin_disable(&pf->hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
* ice_dpll_input_prio_get - get dpll's input prio
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -852,6 +1405,47 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
return ret;
}
+static int
+ice_dpll_sw_input_prio_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (p->input && p->direction == DPLL_PIN_DIRECTION_INPUT)
+ *prio = d->input_prio[p->input->idx];
+ else
+ *prio = ICE_DPLL_PIN_PRIO_OUTPUT;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+static int
+ice_dpll_sw_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ if (!p->input || p->direction != DPLL_PIN_DIRECTION_INPUT)
+ return -EINVAL;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_hw_input_prio_set(pf, d, p->input, prio, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
/**
* ice_dpll_input_direction - callback for get input pin direction
* @pin: pointer to a pin
@@ -903,6 +1497,76 @@ ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_pin_sma_direction_set - callback for set SMA pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: requested pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sma_direction_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_sma_direction_set(p, direction, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_sw_direction_get - callback for get SW pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: on success holds pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sw_direction_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ *direction = p->direction;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -996,9 +1660,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
mutex_unlock(&pf->dplls.lock);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
phase_adjust, p->idx, d->dpll_idx);
return ret;
@@ -1016,7 +1680,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
* Dpll subsystem callback. Wraps a handler for setting phase adjust on input
* pin.
*
- * Context: Calls a function which acquires pf->dplls.lock
+ * Context: Calls a function which acquires and releases pf->dplls.lock
* Return:
* * 0 - success
* * negative - error
@@ -1060,6 +1724,82 @@ ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
ICE_DPLL_PIN_TYPE_OUTPUT);
}
+/**
+ * ice_dpll_sw_phase_adjust_get - callback for get SW pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: on success holds phase adjust value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for getting phase adjust on sw
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_get(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+ else
+ return ice_dpll_pin_phase_adjust_get(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+}
+
+/**
+ * ice_dpll_sw_phase_adjust_set - callback for set SW pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on output
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_set(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_INPUT);
+ else
+ return ice_dpll_pin_phase_adjust_set(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100
#define ICE_DPLL_PHASE_OFFSET_FACTOR \
(DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER)
@@ -1085,12 +1825,16 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
s64 *phase_offset, struct netlink_ext_ack *extack)
{
+ struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv;
struct ice_pf *pf = d->pf;
mutex_lock(&pf->dplls.lock);
- if (d->active_input == pin)
+ if (d->active_input == pin || (p->input &&
+ d->active_input == p->input->pin))
*phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
+ else if (d->phase_offset_monitor_period)
+ *phase_offset = p->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
else
*phase_offset = 0;
mutex_unlock(&pf->dplls.lock);
@@ -1099,6 +1843,427 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_output_esync_set - callback for setting embedded sync
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on output pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flags = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN)
+ flags = ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ if (freq == DPLL_PIN_FREQUENCY_1_HZ) {
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) {
+ ret = 0;
+ } else {
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags,
+ 0, 0, 0);
+ }
+ } else {
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)) {
+ ret = 0;
+ } else {
+ flags &= ~ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags,
+ 0, 0, 0);
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_esync_get - callback for getting embedded sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync pin properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * and capabilities on output pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY) ||
+ p->freq != DPLL_PIN_FREQUENCY_10_MHZ) {
+ mutex_unlock(&pf->dplls.lock);
+ return -EOPNOTSUPP;
+ }
+ esync->range = ice_esync_range;
+ esync->range_num = ARRAY_SIZE(ice_esync_range);
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) {
+ esync->freq = DPLL_PIN_FREQUENCY_1_HZ;
+ esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT;
+ } else {
+ esync->freq = 0;
+ esync->pulse = 0;
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_esync_set - callback for setting embedded sync
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flags_en = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ if (freq == DPLL_PIN_FREQUENCY_1_HZ) {
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) {
+ ret = 0;
+ } else {
+ flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0,
+ flags_en, 0, 0);
+ }
+ } else {
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)) {
+ ret = 0;
+ } else {
+ flags_en &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0,
+ flags_en, 0, 0);
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_esync_get - callback for getting embedded sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync pin properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * and capabilities on input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!(p->status & ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP) ||
+ p->freq != DPLL_PIN_FREQUENCY_10_MHZ) {
+ mutex_unlock(&pf->dplls.lock);
+ return -EOPNOTSUPP;
+ }
+ esync->range = ice_esync_range;
+ esync->range_num = ARRAY_SIZE(ice_esync_range);
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) {
+ esync->freq = DPLL_PIN_FREQUENCY_1_HZ;
+ esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT;
+ } else {
+ esync->freq = 0;
+ esync->pulse = 0;
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_sw_esync_set - callback for setting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_set(p->input->pin, p->input, dpll,
+ dpll_priv, freq, extack);
+ else
+ return ice_dpll_output_esync_set(p->output->pin, p->output,
+ dpll, dpll_priv, freq, extack);
+}
+
+/**
+ * ice_dpll_sw_esync_get - callback for getting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync frequency and properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * of SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_get(p->input->pin, p->input, dpll,
+ dpll_priv, esync, extack);
+ else
+ return ice_dpll_output_esync_get(p->output->pin, p->output,
+ dpll, dpll_priv, esync,
+ extack);
+}
+
+/*
+ * ice_dpll_input_ref_sync_set - callback for setting reference sync feature
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: requested state for reference sync for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync frequency
+ * feature for input pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_ref_sync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin, void *ref_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ u8 flags_en = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ if (state == DPLL_PIN_STATE_CONNECTED)
+ flags_en |= ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0, flags_en, 0, 0);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_ref_sync_get - callback for getting reference sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: on success holds reference sync state for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync frequency
+ * feature for input pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin, void *ref_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN)
+ *state = DPLL_PIN_STATE_CONNECTED;
+ else
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/*
+ * ice_dpll_sw_input_ref_sync_set - callback for setting reference sync feature
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: requested state for reference sync for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync
+ * feature for input pins.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_input_ref_sync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin,
+ void *ref_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ return ice_dpll_input_ref_sync_set(pin, p->input, ref_pin, ref_pin_priv,
+ state, extack);
+}
+
+/**
+ * ice_dpll_sw_input_ref_sync_get - callback for getting reference sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: on success holds reference sync state for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync feature for
+ * input pins.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin,
+ void *ref_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ return ice_dpll_input_ref_sync_get(pin, p->input, ref_pin, ref_pin_priv,
+ state, extack);
+}
+
+/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1146,9 +2311,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
&p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
state, p->idx, parent->idx);
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -1211,6 +2376,37 @@ static const struct dpll_pin_ops ice_dpll_rclk_ops = {
.direction_get = ice_dpll_input_direction,
};
+static const struct dpll_pin_ops ice_dpll_pin_sma_ops = {
+ .state_on_dpll_set = ice_dpll_sma_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .direction_set = ice_dpll_pin_sma_direction_set,
+ .prio_get = ice_dpll_sw_input_prio_get,
+ .prio_set = ice_dpll_sw_input_prio_set,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .ref_sync_set = ice_dpll_sw_input_ref_sync_set,
+ .ref_sync_get = ice_dpll_sw_input_ref_sync_get,
+};
+
+static const struct dpll_pin_ops ice_dpll_pin_ufl_ops = {
+ .state_on_dpll_set = ice_dpll_ufl_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+};
+
static const struct dpll_pin_ops ice_dpll_input_ops = {
.frequency_get = ice_dpll_input_frequency_get,
.frequency_set = ice_dpll_input_frequency_set,
@@ -1222,6 +2418,10 @@ static const struct dpll_pin_ops ice_dpll_input_ops = {
.phase_adjust_get = ice_dpll_pin_phase_adjust_get,
.phase_adjust_set = ice_dpll_input_phase_adjust_set,
.phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_input_esync_set,
+ .esync_get = ice_dpll_input_esync_get,
+ .ref_sync_set = ice_dpll_input_ref_sync_set,
+ .ref_sync_get = ice_dpll_input_ref_sync_get,
};
static const struct dpll_pin_ops ice_dpll_output_ops = {
@@ -1232,6 +2432,8 @@ static const struct dpll_pin_ops ice_dpll_output_ops = {
.direction_get = ice_dpll_output_direction,
.phase_adjust_get = ice_dpll_pin_phase_adjust_get,
.phase_adjust_set = ice_dpll_output_phase_adjust_set,
+ .esync_set = ice_dpll_output_esync_set,
+ .esync_get = ice_dpll_output_esync_get,
};
static const struct dpll_device_ops ice_dpll_ops = {
@@ -1239,6 +2441,13 @@ static const struct dpll_device_ops ice_dpll_ops = {
.mode_get = ice_dpll_mode_get,
};
+static const struct dpll_device_ops ice_dpll_pom_ops = {
+ .lock_status_get = ice_dpll_lock_status_get,
+ .mode_get = ice_dpll_mode_get,
+ .phase_offset_monitor_set = ice_dpll_phase_offset_monitor_set,
+ .phase_offset_monitor_get = ice_dpll_phase_offset_monitor_get,
+};
+
/**
* ice_generate_clock_id - generates unique clock_id for registering dpll.
* @pf: board private structure
@@ -1284,6 +2493,110 @@ static void ice_dpll_notify_changes(struct ice_dpll *d)
}
/**
+ * ice_dpll_is_pps_phase_monitor - check if dpll capable of phase offset monitor
+ * @pf: pf private structure
+ *
+ * Check if firmware is capable of supporting admin command to provide
+ * phase offset monitoring on all the input pins on PPS dpll.
+ *
+ * Returns:
+ * * true - PPS dpll phase offset monitoring is supported
+ * * false - PPS dpll phase offset monitoring is not supported
+ */
+static bool ice_dpll_is_pps_phase_monitor(struct ice_pf *pf)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ int ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+
+ if (ret && pf->hw.adminq.sq_last_status == LIBIE_AQ_RC_ESRCH)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_dpll_pins_notify_mask - notify dpll subsystem about bulk pin changes
+ * @pins: array of ice_dpll_pin pointers registered within dpll subsystem
+ * @pin_num: number of pins
+ * @phase_offset_ntf_mask: bitmask of pin indexes to notify
+ *
+ * Iterate over array of pins and call dpll subsystem pin notify if
+ * corresponding pin index within bitmask is set.
+ *
+ * Context: Must be called while pf->dplls.lock is released.
+ */
+static void ice_dpll_pins_notify_mask(struct ice_dpll_pin *pins,
+ u8 pin_num,
+ u32 phase_offset_ntf_mask)
+{
+ int i = 0;
+
+ for (i = 0; i < pin_num; i++)
+ if (phase_offset_ntf_mask & (1 << i))
+ dpll_pin_change_ntf(pins[i].pin);
+}
+
+/**
+ * ice_dpll_pps_update_phase_offsets - update phase offset measurements
+ * @pf: pf private structure
+ * @phase_offset_pins_updated: returns mask of updated input pin indexes
+ *
+ * Read phase offset measurements for PPS dpll device and store values in
+ * input pins array. On success phase_offset_pins_updated - fills bitmask of
+ * updated input pin indexes, pins shall be notified.
+ *
+ * Context: Shall be called with pf->dplls.lock being locked.
+ * Returns:
+ * * 0 - success or no data available
+ * * negative - AQ failure
+ */
+static int ice_dpll_pps_update_phase_offsets(struct ice_pf *pf,
+ u32 *phase_offset_pins_updated)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ struct ice_dpll_pin *p;
+ s64 phase_offset, tmp;
+ int i, j, ret;
+
+ *phase_offset_pins_updated = 0;
+ ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+ if (ret && pf->hw.adminq.sq_last_status == LIBIE_AQ_RC_EAGAIN) {
+ return 0;
+ } else if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed to get input pin measurements dpll=%d, ret=%d %s\n",
+ DPLL_TYPE_PPS, ret,
+ libie_aq_str(pf->hw.adminq.sq_last_status));
+ return ret;
+ }
+ for (i = 0; i < pf->dplls.num_inputs; i++) {
+ p = &pf->dplls.inputs[i];
+ phase_offset = 0;
+ for (j = 0; j < ICE_CGU_INPUT_PHASE_OFFSET_BYTES; j++) {
+ tmp = meas[i].phase_offset[j];
+#ifdef __LITTLE_ENDIAN
+ phase_offset += tmp << 8 * j;
+#else
+ phase_offset += tmp << 8 *
+ (ICE_CGU_INPUT_PHASE_OFFSET_BYTES - 1 - j);
+#endif
+ }
+ phase_offset = sign_extend64(phase_offset, 47);
+ if (p->phase_offset != phase_offset) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "phase offset changed for pin:%d old:%llx, new:%llx\n",
+ p->idx, p->phase_offset, phase_offset);
+ p->phase_offset = phase_offset;
+ *phase_offset_pins_updated |= (1 << i);
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_update_state - update dpll state
* @pf: pf private structure
* @d: pointer to queried dpll device
@@ -1314,7 +2627,7 @@ ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init)
dev_err(ice_pf_to_dev(pf),
"update dpll=%d state failed, ret=%d %s\n",
d->dpll_idx, ret,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
return ret;
}
if (init) {
@@ -1369,14 +2682,19 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
+ u32 phase_offset_ntf = 0;
int ret = 0;
if (ice_is_reset_in_progress(pf->state))
goto resched;
mutex_lock(&pf->dplls.lock);
+ d->periodic_counter++;
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
ret = ice_dpll_update_state(pf, dp, false);
+ if (!ret && dp->phase_offset_monitor_period &&
+ d->periodic_counter % dp->phase_offset_monitor_period == 0)
+ ret = ice_dpll_pps_update_phase_offsets(pf, &phase_offset_ntf);
if (ret) {
d->cgu_state_acq_err_num++;
/* stop rescheduling this worker */
@@ -1391,6 +2709,9 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
mutex_unlock(&pf->dplls.lock);
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+ if (phase_offset_ntf)
+ ice_dpll_pins_notify_mask(d->inputs, d->num_inputs,
+ phase_offset_ntf);
resched:
/* Run twice a second or reschedule if update failed */
@@ -1400,6 +2721,88 @@ resched:
}
/**
+ * ice_dpll_init_ref_sync_inputs - initialize reference sync pin pairs
+ * @pf: pf private structure
+ *
+ * Read DPLL TLV capabilities and initialize reference sync pin pairs in
+ * dpll subsystem.
+ *
+ * Return:
+ * * 0 - success or nothing to do (no ref-sync tlv are present)
+ * * negative - AQ failure
+ */
+static int ice_dpll_init_ref_sync_inputs(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *inputs = pf->dplls.inputs;
+ struct ice_hw *hw = &pf->hw;
+ u16 addr, len, end, hdr;
+ int ret;
+
+ ret = ice_get_pfa_module_tlv(hw, &hdr, &len, ICE_SR_PFA_DPLL_DEFAULTS);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to read PFA dpll defaults TLV ret=%d\n", ret);
+ return ret;
+ }
+ end = hdr + len;
+
+ for (addr = hdr + ICE_DPLL_PFA_HEADER_LEN; addr < end;
+ addr += ICE_DPLL_PFA_ENTRY_LEN) {
+ unsigned long bit, ul_mask, offset;
+ u16 pin, mask, buf;
+ bool valid = false;
+
+ ret = ice_read_sr_word(hw, addr, &buf);
+ if (ret)
+ return ret;
+
+ switch (buf) {
+ case ICE_DPLL_PFA_REF_SYNC_TYPE:
+ case ICE_DPLL_PFA_REF_SYNC_TYPE2:
+ {
+ u16 mask_addr = addr + ICE_DPLL_PFA_MASK_OFFSET;
+ u16 val_addr = addr + ICE_DPLL_PFA_VALUE_OFFSET;
+
+ ret = ice_read_sr_word(hw, mask_addr, &mask);
+ if (ret)
+ return ret;
+ ret = ice_read_sr_word(hw, val_addr, &pin);
+ if (ret)
+ return ret;
+ if (buf == ICE_DPLL_PFA_REF_SYNC_TYPE)
+ pin >>= ICE_DPLL_PFA_MAILBOX_REF_SYNC_PIN_S;
+ valid = true;
+ break;
+ }
+ case ICE_DPLL_PFA_END:
+ addr = end;
+ break;
+ default:
+ continue;
+ }
+ if (!valid)
+ continue;
+
+ ul_mask = mask;
+ offset = 0;
+ for_each_set_bit(bit, &ul_mask, BITS_PER_TYPE(u16)) {
+ int i, j;
+
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP &&
+ pin > ICE_DPLL_E810C_SFP_NC_START)
+ offset = -ICE_DPLL_E810C_SFP_NC_PINS;
+ i = pin + offset;
+ j = bit + offset;
+ if (i < 0 || j < 0)
+ return -ERANGE;
+ inputs[i].ref_sync = j;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_release_pins - release pins resources from dpll subsystem
* @pins: pointer to pins array
* @count: number of pins
@@ -1469,7 +2872,38 @@ ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int i;
for (i = 0; i < count; i++)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+}
+
+/**
+ * ice_dpll_pin_ref_sync_register - register reference sync pins
+ * @pins: pointer to pins array
+ * @count: number of pins
+ *
+ * Register reference sync pins in dpll subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_pin_ref_sync_register(struct ice_dpll_pin *pins, int count)
+{
+ int ret, i;
+
+ for (i = 0; i < count; i++) {
+ if (!pins[i].hidden && pins[i].ref_sync) {
+ int j = pins[i].ref_sync;
+
+ ret = dpll_pin_ref_sync_pair_add(pins[i].pin,
+ pins[j].pin);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -1492,16 +2926,19 @@ ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int ret, i;
for (i = 0; i < count; i++) {
- ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
- if (ret)
- goto unregister_pins;
+ if (!pins[i].hidden) {
+ ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
+ if (ret)
+ goto unregister_pins;
+ }
}
return 0;
unregister_pins:
while (--i >= 0)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
return ret;
}
@@ -1626,6 +3063,8 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
struct dpll_pin *parent;
int ret, i;
+ if (WARN_ON((!vsi || !vsi->netdev)))
+ return -EINVAL;
ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF,
pf->dplls.clock_id);
if (ret)
@@ -1641,8 +3080,6 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
if (ret)
goto unregister_pins;
}
- if (WARN_ON((!vsi || !vsi->netdev)))
- return -EINVAL;
dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
@@ -1689,6 +3126,18 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
ice_dpll_unregister_pins(de->dpll, outputs,
&ice_dpll_output_ops, num_outputs);
ice_dpll_release_pins(outputs, num_outputs);
+ if (!pf->dplls.generic) {
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ }
}
}
@@ -1706,8 +3155,7 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
*/
static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
{
- u32 rclk_idx;
- int ret;
+ int ret, count;
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
pf->dplls.num_inputs,
@@ -1715,23 +3163,64 @@ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
pf->dplls.eec.dpll, pf->dplls.pps.dpll);
if (ret)
return ret;
+ count = pf->dplls.num_inputs;
if (cgu) {
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
- pf->dplls.num_inputs,
+ count,
pf->dplls.num_outputs,
&ice_dpll_output_ops,
pf->dplls.eec.dpll,
pf->dplls.pps.dpll);
if (ret)
goto deinit_inputs;
+ count += pf->dplls.num_outputs;
+ if (!pf->dplls.generic) {
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.sma,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_outputs;
+ count += ICE_DPLL_PIN_SW_NUM;
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.ufl,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_sma;
+ count += ICE_DPLL_PIN_SW_NUM;
+ }
+ ret = ice_dpll_pin_ref_sync_register(pf->dplls.inputs,
+ pf->dplls.num_inputs);
+ if (ret)
+ goto deinit_ufl;
+ ret = ice_dpll_pin_ref_sync_register(pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM);
+ if (ret)
+ goto deinit_ufl;
+ } else {
+ count += pf->dplls.num_outputs + 2 * ICE_DPLL_PIN_SW_NUM;
}
- rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id;
- ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx,
+ ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, count + pf->hw.pf_id,
&ice_dpll_rclk_ops);
if (ret)
- goto deinit_outputs;
+ goto deinit_ufl;
return 0;
+deinit_ufl:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+deinit_sma:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
deinit_outputs:
ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
pf->dplls.num_outputs,
@@ -1757,7 +3246,7 @@ static void
ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
{
if (cgu)
- dpll_device_unregister(d->dpll, &ice_dpll_ops, d);
+ dpll_device_unregister(d->dpll, d->ops, d);
dpll_device_put(d->dpll);
}
@@ -1791,12 +3280,17 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
}
d->pf = pf;
if (cgu) {
+ const struct dpll_device_ops *ops = &ice_dpll_ops;
+
+ if (type == DPLL_TYPE_PPS && ice_dpll_is_pps_phase_monitor(pf))
+ ops = &ice_dpll_pom_ops;
ice_dpll_update_state(pf, d, true);
- ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
+ ret = dpll_device_register(d->dpll, type, ops, d);
if (ret) {
dpll_device_put(d->dpll);
return ret;
}
+ d->ops = ops;
}
return 0;
@@ -1833,7 +3327,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct kthread_worker *kworker;
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
- kworker = kthread_create_worker(0, "ice-dplls-%s",
+ kworker = kthread_run_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -1845,6 +3339,85 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
}
/**
+ * ice_dpll_phase_range_set - initialize phase adjust range helper
+ * @range: pointer to phase adjust range struct to be initialized
+ * @phase_adj: a value to be used as min(-)/max(+) boundary
+ */
+static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
+ u32 phase_adj)
+{
+ range->min = -phase_adj;
+ range->max = phase_adj;
+}
+
+/**
+ * ice_dpll_init_info_pins_generic - initializes generic pins info
+ * @pf: board private structure
+ * @input: if input pins initialized
+ *
+ * Init information for generic pins, cache them in PF's pins structures.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
+{
+ struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps;
+ static const char labels[][sizeof("99")] = {
+ "0", "1", "2", "3", "4", "5", "6", "7", "8",
+ "9", "10", "11", "12", "13", "14", "15" };
+ u32 cap = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ enum ice_dpll_pin_type pin_type;
+ int i, pin_num, ret = -EINVAL;
+ struct ice_dpll_pin *pins;
+ u32 phase_adj_max;
+
+ if (input) {
+ pin_num = pf->dplls.num_inputs;
+ pins = pf->dplls.inputs;
+ phase_adj_max = pf->dplls.input_phase_adj_max;
+ pin_type = ICE_DPLL_PIN_TYPE_INPUT;
+ cap |= DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
+ } else {
+ pin_num = pf->dplls.num_outputs;
+ pins = pf->dplls.outputs;
+ phase_adj_max = pf->dplls.output_phase_adj_max;
+ pin_type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ }
+ if (pin_num > ARRAY_SIZE(labels))
+ return ret;
+
+ for (i = 0; i < pin_num; i++) {
+ pins[i].idx = i;
+ pins[i].prop.board_label = labels[i];
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
+ pins[i].prop.capabilities = cap;
+ pins[i].pf = pf;
+ ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+ if (ret)
+ break;
+ if (input && pins[i].freq == ICE_DPLL_PIN_GEN_RCLK_FREQ)
+ pins[i].prop.type = DPLL_PIN_TYPE_MUX;
+ else
+ pins[i].prop.type = DPLL_PIN_TYPE_EXT;
+ if (!input)
+ continue;
+ ret = ice_aq_get_cgu_ref_prio(&pf->hw, de->dpll_idx, i,
+ &de->input_prio[i]);
+ if (ret)
+ break;
+ ret = ice_aq_get_cgu_ref_prio(&pf->hw, dp->dpll_idx, i,
+ &dp->input_prio[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
* ice_dpll_init_info_direct_pins - initializes direct pins info
* @pf: board private structure
* @pin_type: type of pins being initialized
@@ -1865,6 +3438,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
unsigned long caps;
+ u32 phase_adj_max;
u8 freq_supp_num;
bool input;
@@ -1872,16 +3446,22 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
case ICE_DPLL_PIN_TYPE_INPUT:
pins = pf->dplls.inputs;
num_pins = pf->dplls.num_inputs;
+ phase_adj_max = pf->dplls.input_phase_adj_max;
input = true;
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
pins = pf->dplls.outputs;
num_pins = pf->dplls.num_outputs;
+ phase_adj_max = pf->dplls.output_phase_adj_max;
input = false;
break;
default:
return -EINVAL;
}
+ if (num_pins != ice_cgu_get_num_pins(hw, input)) {
+ pf->dplls.generic = true;
+ return ice_dpll_init_info_pins_generic(pf, input);
+ }
for (i = 0; i < num_pins; i++) {
caps = 0;
@@ -1899,19 +3479,17 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
- pins[i].prop.phase_range.min =
- pf->dplls.input_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.input_phase_adj_max;
+ if (ice_dpll_is_sw_pin(pf, i, true))
+ pins[i].hidden = true;
} else {
- pins[i].prop.phase_range.min =
- pf->dplls.output_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.output_phase_adj_max;
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
+ if (ice_dpll_is_sw_pin(pf, i, false))
+ pins[i].hidden = true;
}
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
@@ -1921,6 +3499,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
pins[i].prop.freq_supported_num = freq_supp_num;
pins[i].pf = pf;
}
+ if (input)
+ ret = ice_dpll_init_ref_sync_inputs(pf);
return ret;
}
@@ -1948,6 +3528,91 @@ static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf)
}
/**
+ * ice_dpll_init_info_sw_pins - initializes software controlled pin information
+ * @pf: board private structure
+ *
+ * Init information for software controlled pins, cache them in
+ * pf->dplls.sma and pf->dplls.ufl.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_sw_pins(struct ice_pf *pf)
+{
+ u8 freq_supp_num, pin_abs_idx, input_idx_offset = 0;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *pin;
+ u32 phase_adj_max, caps;
+ int i, ret;
+
+ if (pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ input_idx_offset = ICE_E810_RCLK_PINS_NUM;
+ phase_adj_max = max(d->input_phase_adj_max, d->output_phase_adj_max);
+ caps = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->sma[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) + input_idx_offset;
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_sma[i];
+ pin->input = &d->inputs[pin_abs_idx];
+ if (pin->input->ref_sync)
+ pin->ref_sync = pin->input->ref_sync - pin_abs_idx;
+ pin->output = &d->outputs[ICE_DPLL_PIN_SW_OUTPUT_ABS(i)];
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->ufl[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin->prop.capabilities = caps;
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_ufl[i];
+ if (i == ICE_DPLL_PIN_SW_1_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_OUTPUT_ABS(i);
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ false,
+ &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->input = NULL;
+ pin->output = &d->outputs[pin_abs_idx];
+ } else if (i == ICE_DPLL_PIN_SW_2_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_INPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) +
+ input_idx_offset;
+ pin->output = NULL;
+ pin->input = &d->inputs[pin_abs_idx];
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ }
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ ret = ice_dpll_pin_state_update(pf, pin, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
* ice_dpll_init_pins_info - init pins info wrapper
* @pf: board private structure
* @pin_type: type of pins being initialized
@@ -1967,6 +3632,8 @@ ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
return ice_dpll_init_info_direct_pins(pf, pin_type);
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
return ice_dpll_init_info_rclk_pin(pf);
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ return ice_dpll_init_info_sw_pins(pf);
default:
return -EINVAL;
}
@@ -2011,7 +3678,7 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
if (ret) {
dev_err(ice_pf_to_dev(pf),
"err:%d %s failed to read cgu abilities\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
return ret;
}
@@ -2019,8 +3686,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
dp->dpll_idx = abilities.pps_dpll_idx;
d->num_inputs = abilities.num_inputs;
d->num_outputs = abilities.num_outputs;
- d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
- d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
alloc_size = sizeof(*d->inputs) * d->num_inputs;
d->inputs = kzalloc(alloc_size, GFP_KERNEL);
@@ -2051,6 +3720,9 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT);
if (ret)
goto deinit_info;
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_SOFTWARE);
+ if (ret)
+ goto deinit_info;
}
ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index 93172e93995b..c0da03384ce9 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -8,6 +8,18 @@
#define ICE_DPLL_RCLK_NUM_MAX 4
+/**
+ * enum ice_dpll_pin_sw - enumerate ice software pin indices:
+ * @ICE_DPLL_PIN_SW_1_IDX: index of first SW pin
+ * @ICE_DPLL_PIN_SW_2_IDX: index of second SW pin
+ * @ICE_DPLL_PIN_SW_NUM: number of SW pins in pair
+ */
+enum ice_dpll_pin_sw {
+ ICE_DPLL_PIN_SW_1_IDX,
+ ICE_DPLL_PIN_SW_2_IDX,
+ ICE_DPLL_PIN_SW_NUM
+};
+
/** ice_dpll_pin - store info about pins
* @pin: dpll pin structure
* @pf: pointer to pf, which has registered the dpll_pin
@@ -19,6 +31,8 @@
* @prop: pin properties
* @freq: current frequency of a pin
* @phase_adjust: current phase adjust value
+ * @phase_offset: monitored phase offset value
+ * @ref_sync: store id of reference sync pin
*/
struct ice_dpll_pin {
struct dpll_pin *pin;
@@ -31,6 +45,14 @@ struct ice_dpll_pin {
struct dpll_pin_properties prop;
u32 freq;
s32 phase_adjust;
+ struct ice_dpll_pin *input;
+ struct ice_dpll_pin *output;
+ enum dpll_pin_direction direction;
+ s64 phase_offset;
+ u8 status;
+ u8 ref_sync;
+ bool active;
+ bool hidden;
};
/** ice_dpll - store info required for DPLL control
@@ -46,8 +68,10 @@ struct ice_dpll_pin {
* @input_prio: priorities of each input
* @dpll_state: current dpll sync state
* @prev_dpll_state: last dpll sync state
+ * @phase_offset_monitor_period: period for phase offset monitor read frequency
* @active_input: pointer to active input pin
* @prev_input: pointer to previous active input pin
+ * @ops: holds the registered ops
*/
struct ice_dpll {
struct dpll_device *dpll;
@@ -63,8 +87,10 @@ struct ice_dpll {
enum dpll_lock_status dpll_state;
enum dpll_lock_status prev_dpll_state;
enum dpll_mode mode;
+ u32 phase_offset_monitor_period;
struct dpll_pin *active_input;
struct dpll_pin *prev_input;
+ const struct dpll_device_ops *ops;
};
/** ice_dplls - store info required for CCU (clock controlling unit)
@@ -83,6 +109,7 @@ struct ice_dpll {
* @clock_id: clock_id of dplls
* @input_phase_adj_max: max phase adjust value for an input pins
* @output_phase_adj_max: max phase adjust value for an output pins
+ * @periodic_counter: counter of periodic work executions
*/
struct ice_dplls {
struct kthread_worker *kworker;
@@ -92,14 +119,19 @@ struct ice_dplls {
struct ice_dpll pps;
struct ice_dpll_pin *inputs;
struct ice_dpll_pin *outputs;
+ struct ice_dpll_pin sma[ICE_DPLL_PIN_SW_NUM];
+ struct ice_dpll_pin ufl[ICE_DPLL_PIN_SW_NUM];
struct ice_dpll_pin rclk;
u8 num_inputs;
u8 num_outputs;
- int cgu_state_acq_err_num;
+ u8 sma_data;
u8 base_rclk_idx;
+ int cgu_state_acq_err_num;
u64 clock_id;
s32 input_phase_adj_max;
s32 output_phase_adj_max;
+ u32 periodic_counter;
+ bool generic;
};
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 9069725c71b4..2e4f0969035f 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -7,89 +7,10 @@
#include "ice_eswitch_br.h"
#include "ice_fltr.h"
#include "ice_repr.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
#include "ice_tc_lib.h"
/**
- * ice_eswitch_del_sp_rules - delete adv rules added on PRs
- * @pf: pointer to the PF struct
- *
- * Delete all advanced rules that were used to forward packets with the
- * device's VSI index to the corresponding eswitch ctrl VSI queue.
- */
-static void ice_eswitch_del_sp_rules(struct ice_pf *pf)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(&pf->eswitch.reprs, id, repr) {
- if (repr->sp_rule.rid)
- ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule);
- }
-}
-
-/**
- * ice_eswitch_add_sp_rule - add adv rule with device's VSI index
- * @pf: pointer to PF struct
- * @repr: pointer to the repr struct
- *
- * This function adds advanced rule that forwards packets with
- * device's VSI index to the corresponding eswitch ctrl VSI queue.
- */
-static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr)
-{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
- struct ice_adv_rule_info rule_info = { 0 };
- struct ice_adv_lkup_elem *list;
- struct ice_hw *hw = &pf->hw;
- const u16 lkups_cnt = 1;
- int err;
-
- list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
- if (!list)
- return -ENOMEM;
-
- ice_rule_add_src_vsi_metadata(list);
-
- rule_info.sw_act.flag = ICE_FLTR_TX;
- rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
- rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
- rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
- ctrl_vsi->rxq_map[repr->q_id];
- rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
- rule_info.flags_info.act_valid = true;
- rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
- rule_info.src_vsi = repr->src_vsi->idx;
-
- err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
- &repr->sp_rule);
- if (err)
- dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d",
- repr->id);
-
- kfree(list);
- return err;
-}
-
-static int
-ice_eswitch_add_sp_rules(struct ice_pf *pf)
-{
- struct ice_repr *repr;
- unsigned long id;
- int err;
-
- xa_for_each(&pf->eswitch.reprs, id, repr) {
- err = ice_eswitch_add_sp_rule(pf, repr);
- if (err) {
- ice_eswitch_del_sp_rules(pf);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
* ice_eswitch_setup_env - configure eswitch HW filters
* @pf: pointer to PF struct
*
@@ -99,12 +20,16 @@ ice_eswitch_add_sp_rules(struct ice_pf *pf)
static int ice_eswitch_setup_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct net_device *netdev = uplink_vsi->netdev;
+ bool if_running = netif_running(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
- bool rule_added = false;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state))
+ if (ice_down(uplink_vsi))
+ return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, false);
netif_addr_lock_bh(netdev);
__dev_uc_unsync(netdev, NULL);
@@ -112,98 +37,47 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
netif_addr_unlock_bh(netdev);
if (ice_vsi_add_vlan_zero(uplink_vsi))
+ goto err_vlan_zero;
+
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
- if (ice_set_dflt_vsi(uplink_vsi))
- goto err_def_rx;
- rule_added = true;
- }
+ if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+ ICE_FLTR_TX))
+ goto err_def_tx;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
if (vlan_ops->dis_rx_filtering(uplink_vsi))
- goto err_dis_rx;
-
- if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_uplink;
-
- if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_control;
+ goto err_vlan_filtering;
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
+ if (if_running && ice_up(uplink_vsi))
+ goto err_up;
+
return 0;
+err_up:
+ ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_control:
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
-err_dis_rx:
- if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi);
+err_vlan_filtering:
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_TX);
+err_def_tx:
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_RX);
err_def_rx:
+ ice_vsi_del_vlan_zero(uplink_vsi);
+err_vlan_zero:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
- return -ENODEV;
-}
+ if (if_running)
+ ice_up(uplink_vsi);
-/**
- * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
- * @eswitch: pointer to eswitch struct
- *
- * In eswitch number of allocated Tx/Rx rings is equal.
- *
- * This function fills q_vectors structures associated with representor and
- * move each ring pairs to port representor netdevs. Each port representor
- * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
- * number of VFs.
- */
-static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
-{
- struct ice_vsi *vsi = eswitch->control_vsi;
- unsigned long repr_id = 0;
- int q_id;
-
- ice_for_each_txq(vsi, q_id) {
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- struct ice_repr *repr;
-
- repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX,
- XA_PRESENT);
- if (!repr)
- break;
-
- repr_id += 1;
- repr->q_id = q_id;
- q_vector = repr->q_vector;
- tx_ring = vsi->tx_rings[q_id];
- rx_ring = vsi->rx_rings[q_id];
-
- q_vector->vsi = vsi;
- q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
-
- q_vector->num_ring_tx = 1;
- q_vector->tx.tx_ring = tx_ring;
- tx_ring->q_vector = q_vector;
- tx_ring->next = NULL;
- tx_ring->netdev = repr->netdev;
- /* In switchdev mode, from OS stack perspective, there is only
- * one queue for given netdev, so it needs to be indexed as 0.
- */
- tx_ring->q_index = 0;
-
- q_vector->num_ring_rx = 1;
- q_vector->rx.rx_ring = rx_ring;
- rx_ring->q_vector = q_vector;
- rx_ring->next = NULL;
- rx_ring->netdev = repr->netdev;
- }
+ return -ENODEV;
}
/**
@@ -225,8 +99,6 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
repr->dst = NULL;
ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
ICE_FWD_TO_VSI);
-
- netif_napi_del(&repr->q_vector->napi);
}
/**
@@ -236,43 +108,64 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
*/
static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!repr->dst)
- goto err_add_mac_fltr;
+ return -ENOMEM;
- if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
- goto err_dst_free;
+ netif_keep_dst(uplink_vsi->netdev);
- if (ice_vsi_add_vlan_zero(vsi))
- goto err_update_security;
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = uplink_vsi->netdev;
- netif_napi_add(repr->netdev, &repr->q_vector->napi,
- ice_napi_poll);
+ return 0;
+}
+
+/**
+ * ice_eswitch_cfg_vsi - configure VSI to work in slow-path
+ * @vsi: VSI structure of representee
+ * @mac: representee MAC
+ *
+ * Return: 0 on success, non-zero on error.
+ */
+int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ int err;
- netif_keep_dst(repr->netdev);
+ ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
- dst = repr->dst;
- dst->u.port_info.port_id = vsi->vsi_num;
- dst->u.port_info.lower_dev = repr->netdev;
- ice_repr_set_traffic_vsi(repr, ctrl_vsi);
+ err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+ if (err)
+ goto err_update_security;
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err)
+ goto err_vlan_zero;
return 0;
-err_update_security:
+err_vlan_zero:
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
-err_dst_free:
- metadata_dst_free(repr->dst);
- repr->dst = NULL;
-err_add_mac_fltr:
- ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
+err_update_security:
+ ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
- return -ENODEV;
+ return err;
+}
+
+/**
+ * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev
+ * @vsi: VSI structure of representee
+ * @mac: representee MAC
+ */
+void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
}
/**
@@ -280,16 +173,16 @@ err_add_mac_fltr:
* @repr_id: representor ID
* @vsi: VSI for which port representor is configured
*/
-void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
+void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
- int ret;
+ int err;
if (!ice_is_switchdev_running(pf))
return;
- repr = xa_load(&pf->eswitch.reprs, repr_id);
+ repr = xa_load(&pf->eswitch.reprs, *repr_id);
if (!repr)
return;
@@ -299,12 +192,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
if (repr->br_port)
repr->br_port->vsi = vsi;
- ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
- if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
- ICE_FWD_TO_VSI);
+ err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac);
+ if (err)
dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
repr->id);
+
+ /* The VSI number is different, reload the PR with new id */
+ if (repr->id != vsi->vsi_num) {
+ xa_erase(&pf->eswitch.reprs, repr->id);
+ repr->id = vsi->vsi_num;
+ if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL))
+ dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d",
+ repr->id);
+ *repr_id = repr->id;
}
}
@@ -318,27 +218,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- struct ice_netdev_priv *np;
- struct ice_repr *repr;
- struct ice_vsi *vsi;
-
- np = netdev_priv(netdev);
- vsi = np->vsi;
-
- if (!vsi || !ice_is_switchdev_running(vsi->back))
- return NETDEV_TX_BUSY;
-
- if (ice_is_reset_in_progress(vsi->back->state) ||
- test_bit(ICE_VF_DIS, vsi->back->state))
- return NETDEV_TX_BUSY;
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ unsigned int len = skb->len;
+ int ret;
- repr = ice_netdev_to_repr(netdev);
skb_dst_drop(skb);
dst_hold((struct dst_entry *)repr->dst);
skb_dst_set(skb, (struct dst_entry *)repr->dst);
- skb->queue_mapping = repr->q_id;
+ skb->dev = repr->dst->u.port_info.lower_dev;
+
+ ret = dev_queue_xmit(skb);
+ ice_repr_inc_tx_stats(repr, len, ret);
- return ice_start_xmit(skb, netdev);
+ return ret;
}
/**
@@ -354,6 +246,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
u64 cd_cmd, dst_vsi;
if (!dst) {
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+
+ if (unlikely(eth->h_proto == htons(ETH_P_LLDP)))
+ return;
cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
@@ -374,62 +270,20 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
static void ice_eswitch_release_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct ice_vsi_vlan_ops *vlan_ops;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
- ice_clear_dflt_vsi(uplink_vsi);
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_TX);
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_RX);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
-}
-
-/**
- * ice_eswitch_vsi_setup - configure eswitch control VSI
- * @pf: pointer to PF structure
- * @pi: pointer to port_info structure
- */
-static struct ice_vsi *
-ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
-{
- struct ice_vsi_cfg_params params = {};
-
- params.type = ICE_VSI_SWITCHDEV_CTRL;
- params.pi = pi;
- params.flags = ICE_VSI_FLAG_INIT;
-
- return ice_vsi_setup(pf, &params);
-}
-
-/**
- * ice_eswitch_napi_enable - enable NAPI for all port representors
- * @reprs: xarray of reprs
- */
-static void ice_eswitch_napi_enable(struct xarray *reprs)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(reprs, id, repr)
- napi_enable(&repr->q_vector->napi);
-}
-
-/**
- * ice_eswitch_napi_disable - disable NAPI for all port representors
- * @reprs: xarray of reprs
- */
-static void ice_eswitch_napi_disable(struct xarray *reprs)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(reprs, id, repr)
- napi_disable(&repr->q_vector->napi);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, true);
}
/**
@@ -438,7 +292,7 @@ static void ice_eswitch_napi_disable(struct xarray *reprs)
*/
static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi, *uplink_vsi;
+ struct ice_vsi *uplink_vsi;
uplink_vsi = ice_get_main_vsi(pf);
if (!uplink_vsi)
@@ -450,17 +304,10 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
return -EINVAL;
}
- pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
- if (!pf->eswitch.control_vsi)
- return -ENODEV;
-
- ctrl_vsi = pf->eswitch.control_vsi;
- /* cp VSI is createad with 1 queue as default */
- pf->eswitch.qs.value = 1;
pf->eswitch.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
- goto err_vsi;
+ return -ENODEV;
if (ice_eswitch_br_offloads_init(pf))
goto err_br_offloads;
@@ -471,8 +318,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
err_br_offloads:
ice_eswitch_release_env(pf);
-err_vsi:
- ice_vsi_release(ctrl_vsi);
return -ENODEV;
}
@@ -482,14 +327,10 @@ err_vsi:
*/
static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
-
ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_vsi_release(ctrl_vsi);
pf->eswitch.is_running = false;
- pf->eswitch.qs.is_reaching = false;
}
/**
@@ -530,7 +371,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
pf->hw.pf_id);
- xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC);
+ xa_init(&pf->eswitch.reprs);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
break;
}
@@ -602,56 +443,17 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
static void ice_eswitch_stop_reprs(struct ice_pf *pf)
{
- ice_eswitch_del_sp_rules(pf);
ice_eswitch_stop_all_tx_queues(pf);
- ice_eswitch_napi_disable(&pf->eswitch.reprs);
}
static void ice_eswitch_start_reprs(struct ice_pf *pf)
{
- ice_eswitch_napi_enable(&pf->eswitch.reprs);
ice_eswitch_start_all_tx_queues(pf);
- ice_eswitch_add_sp_rules(pf);
}
-static void
-ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
-{
- struct ice_vsi *cp = eswitch->control_vsi;
- int queues = 0;
-
- if (eswitch->qs.is_reaching) {
- if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
- queues = eswitch->qs.to_reach;
- eswitch->qs.is_reaching = false;
- } else {
- queues = 0;
- }
- } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
- change < 0) {
- queues = cp->alloc_txq + change;
- }
-
- if (queues) {
- cp->req_txq = queues;
- cp->req_rxq = queues;
- ice_vsi_close(cp);
- ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
- ice_vsi_open(cp);
- } else if (!change) {
- /* change == 0 means that VSI wasn't open, open it here */
- ice_vsi_open(cp);
- }
-
- eswitch->qs.value += change;
- ice_eswitch_remap_rings_to_vectors(eswitch);
-}
-
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+static int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id)
{
- struct ice_repr *repr;
- int change = 1;
int err;
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
@@ -661,31 +463,24 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err = ice_eswitch_enable_switchdev(pf);
if (err)
return err;
- /* Control plane VSI is created with 1 queue as default */
- pf->eswitch.qs.to_reach -= 1;
- change = 0;
}
ice_eswitch_stop_reprs(pf);
- repr = ice_repr_add_vf(vf);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
+ err = repr->ops.add(repr);
+ if (err)
goto err_create_repr;
- }
err = ice_eswitch_setup_repr(pf, repr);
if (err)
goto err_setup_repr;
- err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr,
- XA_LIMIT(1, INT_MAX), GFP_KERNEL);
+ err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL);
if (err)
goto err_xa_alloc;
- vf->repr_id = repr->id;
+ *id = repr->id;
- ice_eswitch_cp_change_queues(&pf->eswitch, change);
ice_eswitch_start_reprs(pf);
return 0;
@@ -693,7 +488,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
- ice_repr_rem_vf(repr);
+ repr->ops.rem(repr);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
@@ -702,73 +497,139 @@ err_create_repr:
return err;
}
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
+/**
+ * ice_eswitch_attach_vf - attach VF to a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be attached
+ *
+ * During attaching port representor for VF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_repr *repr;
+ int err;
- if (!repr)
- return;
+ if (!ice_is_eswitch_mode_switchdev(pf))
+ return 0;
+ repr = ice_repr_create_vf(vf);
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ devl_lock(devlink);
+ err = ice_eswitch_attach(pf, repr, &vf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+ devl_unlock(devlink);
+
+ return err;
+}
+
+/**
+ * ice_eswitch_attach_sf - attach SF to a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be attached
+ *
+ * During attaching port representor for SF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create_sf(sf);
+ int err;
+
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ err = ice_eswitch_attach(pf, repr, &sf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+
+ return err;
+}
+
+static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
+{
ice_eswitch_stop_reprs(pf);
+ repr->ops.rem(repr);
+
xa_erase(&pf->eswitch.reprs, repr->id);
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
- else
- ice_eswitch_cp_change_queues(&pf->eswitch, -1);
ice_eswitch_release_repr(pf, repr);
- ice_repr_rem_vf(repr);
+ ice_repr_destroy(repr);
if (xa_empty(&pf->eswitch.reprs)) {
+ struct devlink *devlink = priv_to_devlink(pf);
+
/* since all port representors are destroyed, there is
* no point in keeping the nodes
*/
ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
- devl_lock(devlink);
devl_rate_nodes_destroy(devlink);
- devl_unlock(devlink);
} else {
ice_eswitch_start_reprs(pf);
}
}
/**
- * ice_eswitch_rebuild - rebuild eswitch
+ * ice_eswitch_detach_vf - detach VF from a eswitch
* @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be detached
*/
-int ice_eswitch_rebuild(struct ice_pf *pf)
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr;
- unsigned long id;
- int err;
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct devlink *devlink = priv_to_devlink(pf);
- if (!ice_is_switchdev_running(pf))
- return 0;
+ if (!repr)
+ return;
- err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT);
- if (err)
- return err;
+ devl_lock(devlink);
+ ice_eswitch_detach(pf, repr);
+ devl_unlock(devlink);
+}
- xa_for_each(&pf->eswitch.reprs, id, repr)
- ice_eswitch_detach(pf, repr->vf);
+/**
+ * ice_eswitch_detach_sf - detach SF from a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be detached
+ */
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id);
- return 0;
+ if (!repr)
+ return;
+
+ ice_eswitch_detach(pf, repr);
}
/**
- * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
- * @pf: pointer to PF structure
- * @change: how many more (or less) queues is needed
+ * ice_eswitch_get_target - get netdev based on src_vsi from descriptor
+ * @rx_ring: ring used to receive the packet
+ * @rx_desc: descriptor used to get src_vsi value
*
- * Remember to call ice_eswitch_attach/detach() the "change" times.
+ * Get src_vsi value from descriptor and load correct representor. If it isn't
+ * found return rx_ring->netdev.
*/
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
+struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
{
- if (pf->eswitch.qs.value + change < 0)
- return;
+ struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch;
+ struct ice_32b_rx_flex_desc_nic_2 *desc;
+ struct ice_repr *repr;
+
+ desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
+ repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi));
+ if (!repr)
+ return rx_ring->netdev;
- pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
- pf->eswitch.qs.is_reaching = true;
+ return repr->netdev;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 1a288a03a79a..5c7dcf21b222 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,12 +5,13 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
+#include "devlink/port.h"
#ifdef CONFIG_ICE_SWITCHDEV
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
-int ice_eswitch_rebuild(struct ice_pf *pf);
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf);
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -18,7 +19,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
-void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
+void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
@@ -26,12 +27,26 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
+struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc);
+
+int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
+void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
-static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
+static inline void
+ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { }
+
+static inline void
+ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { }
static inline int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
{
return -EOPNOTSUPP;
}
@@ -43,17 +58,7 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
static inline void
-ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
-
-static inline int ice_eswitch_configure(struct ice_pf *pf)
-{
- return 0;
-}
-
-static inline int ice_eswitch_rebuild(struct ice_pf *pf)
-{
- return -EOPNOTSUPP;
-}
+ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
@@ -78,7 +83,18 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
-static inline void
-ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
+static inline struct net_device *
+ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ return rx_ring->netdev;
+}
+
+static inline int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index ac5beecd028b..cccb7ddf61c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -582,10 +582,13 @@ ice_eswitch_br_switchdev_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
+void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
{
struct ice_esw_br_fdb_entry *entry, *tmp;
+ if (!bridge)
+ return;
+
list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
}
@@ -896,7 +899,8 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
} else {
- struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
+ struct ice_repr *repr =
+ ice_repr_get(vsi->back, br_port->repr_id);
if (repr)
repr->br_port = NULL;
@@ -937,6 +941,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
br_port->vsi = repr->src_vsi;
br_port->vsi_idx = br_port->vsi->idx;
br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
+ br_port->repr_id = repr->id;
repr->br_port = br_port;
err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
index 85a8fadb2928..66a2c804338f 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
@@ -46,6 +46,7 @@ struct ice_esw_br_port {
enum ice_esw_br_port_type type;
u16 vsi_idx;
u16 pvid;
+ u32 repr_id;
struct xarray vlans;
};
@@ -116,5 +117,6 @@ void
ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
int
ice_eswitch_br_offloads_init(struct ice_pf *pf);
+void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge);
#endif /* _ICE_ESWITCH_BR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 78b833b3e1d7..969d4f8f9c02 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -10,6 +10,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include <net/dcbnl.h>
+#include <net/libeth/rx.h>
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -340,7 +341,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
- ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -463,28 +463,353 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static int ice_get_regs_len(struct net_device __always_unused *netdev)
{
- return sizeof(ice_regs_dump_list);
+ return (sizeof(ice_regs_dump_list) +
+ sizeof(struct ice_regdump_to_ethtool));
+}
+
+/**
+ * ice_ethtool_get_maxspeed - Get the max speed for given lport
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which max speed is requested
+ * @max_speed: return max speed for input lport
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
+ bool active_valid = false, pending_valid = true;
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ u8 active_idx = 0, pending_idx = 0;
+ int status;
+
+ status = ice_aq_get_port_options(hw, options, &option_count, lport,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status)
+ return -EIO;
+ if (!active_valid)
+ return -EINVAL;
+
+ *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M;
+ return 0;
+}
+
+/**
+ * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
+ * @hw: pointer to the HW struct
+ *
+ * Return: true when serdes is muxed, false when serdes is not muxed.
+ */
+static bool ice_is_serdes_muxed(struct ice_hw *hw)
+{
+ u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
+
+ return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value);
+}
+
+static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology,
+ u8 lport, bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology,
+ u8 lport, bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_port_topology - returns physical topology like pcsquad, pcsport,
+ * serdes number
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which physical info requested
+ * @port_topology: buffer to hold port topology
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ struct ice_port_topology *port_topology)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u16 node_handle = 0;
+ u8 cage_type = 0;
+ bool is_muxed;
+ int err;
+ u8 ctx;
+
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
+
+ err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
+ if (err)
+ return -EINVAL;
+
+ is_muxed = ice_is_serdes_muxed(hw);
+
+ if (cage_type == 0x11 || /* SFP+ */
+ cage_type == 0x12) { /* SFP28 */
+ port_topology->serdes_lane_count = 1;
+ err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed);
+ if (err)
+ return err;
+ } else if (cage_type == 0x13 || /* QSFP */
+ cage_type == 0x14) { /* QSFP28 */
+ u8 max_speed = 0;
+
+ err = ice_ethtool_get_maxspeed(hw, lport, &max_speed);
+ if (err)
+ return err;
+
+ if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
+ port_topology->serdes_lane_count = 4;
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
+ max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
+ port_topology->serdes_lane_count = 2;
+ else
+ port_topology->serdes_lane_count = 1;
+
+ err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed);
+ if (err)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_tx_rx_equa - read serdes tx rx equaliser param
+ * @hw: pointer to the HW struct
+ * @serdes_num: represents the serdes number
+ * @ptr: structure to read all serdes parameter for given serdes
+ *
+ * Return: all serdes equalization parameter supported per serdes number
+ */
+static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
+ struct ice_serdes_equalization_to_ethtool *ptr)
+{
+ static const int tx = ICE_AQC_OP_CODE_TX_EQU;
+ static const int rx = ICE_AQC_OP_CODE_RX_EQU;
+ struct {
+ int data_in;
+ int opcode;
+ int *out;
+ } aq_params[] = {
+ { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 },
+ { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 },
+ { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten },
+ { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 },
+ { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 },
+ { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 },
+ { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 },
+ { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
+ { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
+ { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
+ { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
+ { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
+ { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
+ { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw },
+ { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain },
+ { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 },
+ { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 },
+ { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 },
+ { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 },
+ { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 },
+ { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 },
+ { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 },
+ { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 },
+ { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 },
+ { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 },
+ { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 },
+ { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 },
+ };
+ int err;
+
+ for (int i = 0; i < ARRAY_SIZE(aq_params); i++) {
+ err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in,
+ aq_params[i].opcode,
+ serdes_num, aq_params[i].out);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per
+ * pcsquad, pcsport
+ * @netdev: pointer to net device structure
+ * @p: output buffer to fill requested register dump
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_extended_regs(struct net_device *netdev, void *p)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_regdump_to_ethtool *ice_prv_regs_buf;
+ struct ice_port_topology port_topology = {};
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ unsigned int i;
+ int err;
+
+ pf = np->vsi->back;
+ hw = &pf->hw;
+ pi = np->vsi->port_info;
+
+ /* Serdes parameters are not supported if not the PF VSI */
+ if (np->vsi->type != ICE_VSI_PF || !pi)
+ return -EINVAL;
+
+ err = ice_get_port_topology(hw, pi->lport, &port_topology);
+ if (err)
+ return -EINVAL;
+ if (port_topology.serdes_lane_count > 4)
+ return -EINVAL;
+
+ ice_prv_regs_buf = p;
+
+ /* Get serdes equalization parameter for available serdes */
+ for (i = 0; i < port_topology.serdes_lane_count; i++) {
+ u8 serdes_num = 0;
+
+ serdes_num = port_topology.primary_serdes_lane + i;
+ err = ice_get_tx_rx_equa(hw, serdes_num,
+ &ice_prv_regs_buf->equalization[i]);
+ if (err)
+ return -EINVAL;
+ }
+
+ return 0;
}
static void
ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
u32 *regs_buf = (u32 *)p;
unsigned int i;
- regs->version = 1;
+ regs->version = 2;
for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
+
+ ice_get_extended_regs(netdev, (void *)&regs_buf[i]);
}
static u32 ice_get_msglevel(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#ifndef CONFIG_DYNAMIC_DEBUG
if (pf->hw.debug_mask)
@@ -497,8 +822,7 @@ static u32 ice_get_msglevel(struct net_device *netdev)
static void ice_set_msglevel(struct net_device *netdev, u32 data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#ifndef CONFIG_DYNAMIC_DEBUG
if (ICE_DBG_USER & data)
@@ -510,10 +834,17 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data)
#endif /* !CONFIG_DYNAMIC_DEBUG */
}
+static void ice_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static int ice_get_eeprom_len(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
return (int)pf->hw.flash.flash_size;
}
@@ -522,9 +853,7 @@ static int
ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
u8 *bytes)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
struct device *dev;
int ret;
@@ -543,7 +872,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
ret = ice_acquire_nvm(hw, ICE_RES_READ);
if (ret) {
dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -551,7 +880,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
false);
if (ret) {
dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto release;
}
@@ -623,8 +952,7 @@ static u64 ice_link_test(struct net_device *netdev)
*/
static u64 ice_eeprom_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
netdev_info(netdev, "EEPROM test\n");
return !!(ice_nvm_validate_checksum(&pf->hw));
@@ -903,8 +1231,9 @@ static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
*/
static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_buf;
int valid_frames, i;
+ struct page *page;
u8 *received_buf;
valid_frames = 0;
@@ -919,8 +1248,10 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;
- rx_buf = &rx_ring->rx_buf[i];
- received_buf = page_address(rx_buf->page) + rx_buf->page_offset;
+ rx_buf = &rx_ring->rx_fqes[i];
+ page = __netmem_to_page(rx_buf->netmem);
+ received_buf = page_address(page) + rx_buf->offset +
+ page->pp->p.offset;
if (ice_lbtest_check_frame(received_buf))
valid_frames++;
@@ -938,9 +1269,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
*/
static u64 ice_loopback_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
- struct ice_pf *pf = orig_vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *test_vsi;
u8 *tx_frame __free(kfree) = NULL;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
@@ -1029,8 +1359,7 @@ lbtest_vsi_close:
*/
static u64 ice_intr_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
u16 swic_old = pf->sw_int_count;
netdev_info(netdev, "interrupt test\n");
@@ -1058,9 +1387,8 @@ static void
ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
u64 *data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
bool if_running = netif_running(netdev);
- struct ice_pf *pf = np->vsi->back;
struct device *dev;
dev = ice_pf_to_dev(pf);
@@ -1384,9 +1712,7 @@ static int ice_nway_reset(struct net_device *netdev)
*/
static u32 ice_get_priv_flags(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
u32 i, ret_flags = 0;
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
@@ -1492,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
- ice_cfg_sw_lldp(vsi, false, false);
+ ice_cfg_sw_rx_lldp(vsi->back, false);
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
@@ -1533,10 +1859,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_nway_reset(netdev);
}
}
- if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
- /* down and up VSI so that changes of Rx cfg are reflected. */
- ice_down_up(vsi);
- }
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
*/
@@ -2462,14 +2784,7 @@ done:
return err;
}
-/**
- * ice_parse_hdrs - parses headers from RSS hash input
- * @nfc: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
+static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc)
{
u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
@@ -2534,15 +2849,7 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
-/**
- * ice_parse_hash_flds - parses hash fields from RSS hash input
- * @nfc: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended
- * hash fields for RSS configuration
- */
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
+static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2639,16 +2946,13 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
return hfld;
}
-/**
- * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+ice_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_rss_hash_cfg cfg;
struct device *dev;
@@ -2694,14 +2998,11 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return 0;
}
-/**
- * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- */
-static void
-ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+static int
+ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
@@ -2714,21 +3015,21 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hdrs = ice_parse_hdrs(nfc);
if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
@@ -2755,6 +3056,8 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
nfc->data |= (u64)RXH_GTP_TEID;
+
+ return 0;
}
/**
@@ -2774,8 +3077,6 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ice_add_fdir_ethtool(vsi, cmd);
case ETHTOOL_SRXCLSRLDEL:
return ice_del_fdir_ethtool(vsi, cmd);
- case ETHTOOL_SRXFH:
- return ice_set_rss_hash_opt(vsi, cmd);
default:
break;
}
@@ -2783,6 +3084,20 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * ice_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 ice_get_rx_ring_count(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -2802,10 +3117,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
hw = &vsi->back->hw;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = hw->fdir_active_fltr;
/* report total rule count */
@@ -2818,10 +3129,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ice_get_rss_hash_opt(vsi, cmd);
- ret = 0;
- break;
default:
break;
}
@@ -2836,9 +3143,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_hw *hw;
- ring->rx_max_pending = ICE_MAX_NUM_DESC;
- ring->tx_max_pending = ICE_MAX_NUM_DESC;
+ hw = &vsi->back->hw;
+ ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
+ ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
if (vsi->tx_rings && vsi->rx_rings) {
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
@@ -2852,6 +3161,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
ring->rx_jumbo_max_pending = 0;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
+
+ kernel_ring->tcp_data_split = vsi->hsplit ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static int
@@ -2866,15 +3179,17 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
+ struct ice_hw *hw = &pf->hw;
u16 new_rx_cnt, new_tx_cnt;
+ bool hsplit;
- if (ring->tx_pending > ICE_MAX_NUM_DESC ||
+ if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
- ring->rx_pending > ICE_MAX_NUM_DESC ||
+ ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
ring->tx_pending, ring->rx_pending,
- ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
+ ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw),
ICE_REQ_DESC_MULTIPLE);
return -EINVAL;
}
@@ -2892,9 +3207,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
new_rx_cnt);
+ hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+
/* if nothing to do return success */
if (new_tx_cnt == vsi->tx_rings[0]->count &&
- new_rx_cnt == vsi->rx_rings[0]->count) {
+ new_rx_cnt == vsi->rx_rings[0]->count &&
+ hsplit == vsi->hsplit) {
netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
return 0;
}
@@ -2924,6 +3242,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt;
+ vsi->hsplit = hsplit;
+
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -2947,6 +3267,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tstamp_ring = NULL;
tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
@@ -3006,7 +3327,8 @@ process_rx:
rx_rings[i].count = new_rx_cnt;
rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
- rx_rings[i].rx_buf = NULL;
+ rx_rings[i].xdp_buf = NULL;
+
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
@@ -3015,10 +3337,6 @@ process_rx:
err = ice_setup_rx_ring(&rx_rings[i]);
if (err)
goto rx_unwind;
-
- /* allocate Rx buffers */
- err = ice_alloc_rx_bufs(&rx_rings[i],
- ICE_RX_DESC_UNUSED(&rx_rings[i]));
rx_unwind:
if (err) {
while (i) {
@@ -3032,6 +3350,8 @@ rx_unwind:
}
process_link:
+ vsi->hsplit = hsplit;
+
/* Bring interface down, copy in the new ring info, then restore the
* interface. if VSI is up, bring it down and then back up
*/
@@ -3231,15 +3551,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
@@ -3281,11 +3601,10 @@ static int
ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
- int err, num_tc, i;
+ int err, i;
u8 *lut;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3293,24 +3612,8 @@ ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
return -EOPNOTSUPP;
}
- if (rss_context && !ice_is_adq_active(pf)) {
- netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n");
- return -EINVAL;
- }
-
- qcount = vsi->mqprio_qopt.qopt.count[rss_context];
- offset = vsi->mqprio_qopt.qopt.offset[rss_context];
-
- if (rss_context && ice_is_adq_active(pf)) {
- num_tc = vsi->mqprio_qopt.qopt.num_tc;
- if (rss_context >= num_tc) {
- netdev_err(netdev, "RSS context:%d > num_tc:%d\n",
- rss_context, num_tc);
- return -EINVAL;
- }
- /* Use channel VSI of given TC */
- vsi = vsi->tc_map_vsi[rss_context];
- }
+ qcount = vsi->mqprio_qopt.qopt.count[0];
+ offset = vsi->mqprio_qopt.qopt.offset[0];
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
@@ -3370,9 +3673,6 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- return -EOPNOTSUPP;
-
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
/* RSS not supported return error here */
netdev_warn(netdev, "RSS is not configured on this VSI!\n");
@@ -3434,7 +3734,7 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
}
static int
-ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct ice_pf *pf = ice_netdev_to_pf(dev);
@@ -3443,8 +3743,6 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -3464,8 +3762,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_txq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3474,8 +3771,7 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_rxq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -3493,8 +3789,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
- combined++;
+ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
}
return combined;
@@ -3575,7 +3870,7 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
if (err)
dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
kfree(lut);
return err;
@@ -3593,7 +3888,6 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0;
bool locked = false;
- u32 curr_combined;
int ret = 0;
/* do not support changing channels in Safe Mode */
@@ -3615,22 +3909,8 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EOPNOTSUPP;
}
- curr_combined = ice_get_combined_cnt(vsi);
-
- /* these checks are for cases where user didn't specify a particular
- * value on cmd line but we get non-zero value anyway via
- * get_channels(); look at ethtool.c in ethtool repository (the user
- * space part), particularly, do_schannels() routine
- */
- if (ch->rx_count == vsi->num_rxq - curr_combined)
- ch->rx_count = 0;
- if (ch->tx_count == vsi->num_txq - curr_combined)
- ch->tx_count = 0;
- if (ch->combined_count == curr_combined)
- ch->combined_count = 0;
-
- if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
- netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
+ if (ch->rx_count && ch->tx_count) {
+ netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
return -EINVAL;
}
@@ -3658,11 +3938,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- if (pf->adev) {
+ if (pf->cdev_info && pf->cdev_info->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&pf->cdev_info->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (pf->cdev_info->adev->dev.driver) {
netdev_err(dev, "Cannot change channels when RDMA is active\n");
ret = -EBUSY;
goto adev_unlock;
@@ -3681,7 +3961,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&pf->cdev_info->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return ret;
@@ -4080,7 +4360,7 @@ ice_repr_get_drvinfo(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
@@ -4092,8 +4372,7 @@ ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct ice_repr *repr = ice_netdev_to_repr(netdev);
/* for port representors only ETH_SS_STATS is supported */
- if (ice_check_vf_ready_for_cfg(repr->vf) ||
- stringset != ETH_SS_STATS)
+ if (repr->ops.ready(repr) || stringset != ETH_SS_STATS)
return;
__ice_get_strings(netdev, stringset, data, repr->src_vsi);
@@ -4106,7 +4385,7 @@ ice_repr_get_ethtool_stats(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
@@ -4143,9 +4422,7 @@ static int
ice_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
u8 sff8472_comp = 0;
u8 sff8472_swap = 0;
@@ -4217,12 +4494,10 @@ static int
ice_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#define SFF_READ_BLOCK_SIZE 8
u8 value[SFF_READ_BLOCK_SIZE] = { 0 };
u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
bool is_sfp = false;
unsigned int i, j;
@@ -4297,14 +4572,275 @@ ice_get_module_eeprom(struct net_device *netdev,
return 0;
}
+/**
+ * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per
+ * pcsquad, pcsport
+ * @hw: pointer to the HW struct
+ * @pcs_quad: pcsquad for input port
+ * @pcs_port: pcsport for input port
+ * @fec_stats: buffer to hold FEC statistics for given port
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0;
+ u32 fec_corr_low_val = 0, fec_corr_high_val = 0;
+ int err;
+
+ if (pcs_quad > 1 || pcs_port > 3)
+ return -EINVAL;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW,
+ &fec_corr_low_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH,
+ &fec_corr_high_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
+ ICE_FEC_UNCORR_LOW,
+ &fec_uncorr_low_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
+ ICE_FEC_UNCORR_HIGH,
+ &fec_uncorr_high_val);
+ if (err)
+ return err;
+
+ fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) +
+ fec_corr_low_val;
+ fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) +
+ fec_uncorr_low_val;
+ return 0;
+}
+
+/**
+ * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
+ * @netdev: network interface device structure
+ * @fec_stats: buffer to hold FEC statistics for given port
+ * @hist: buffer to put FEC histogram statistics for given port
+ *
+ */
+static void ice_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_port_topology port_topology;
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ pf = np->vsi->back;
+ hw = &pf->hw;
+ pi = np->vsi->port_info;
+
+ /* Serdes parameters are not supported if not the PF VSI */
+ if (np->vsi->type != ICE_VSI_PF || !pi)
+ return;
+
+ err = ice_get_port_topology(hw, pi->lport, &port_topology);
+ if (err) {
+ netdev_info(netdev, "Extended register dump failed Lport %d\n",
+ pi->lport);
+ return;
+ }
+
+ /* Get FEC correctable, uncorrectable counter */
+ err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
+ port_topology.pcs_port, fec_stats);
+ if (err)
+ netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n",
+ pi->lport, err);
+}
+
+static void ice_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ mac_stats->FramesTransmittedOK = ps->eth.tx_unicast +
+ ps->eth.tx_multicast +
+ ps->eth.tx_broadcast;
+ mac_stats->FramesReceivedOK = ps->eth.rx_unicast +
+ ps->eth.rx_multicast +
+ ps->eth.rx_broadcast;
+ mac_stats->FrameCheckSequenceErrors = ps->crc_errors;
+ mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes;
+ mac_stats->OctetsReceivedOK = ps->eth.rx_bytes;
+ mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast;
+ mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast;
+ mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast;
+ mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast;
+ mac_stats->InRangeLengthErrors = ps->rx_len_errors;
+ mac_stats->FrameTooLongErrors = ps->rx_oversize;
+}
+
+static void ice_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx;
+ pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx;
+}
+
+static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, 9522 },
+ {}
+};
+
+static void ice_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ rmon->undersize_pkts = ps->rx_undersize;
+ rmon->oversize_pkts = ps->rx_oversize;
+ rmon->fragments = ps->rx_fragments;
+ rmon->jabbers = ps->rx_jabber;
+
+ rmon->hist[0] = ps->rx_size_64;
+ rmon->hist[1] = ps->rx_size_127;
+ rmon->hist[2] = ps->rx_size_255;
+ rmon->hist[3] = ps->rx_size_511;
+ rmon->hist[4] = ps->rx_size_1023;
+ rmon->hist[5] = ps->rx_size_1522;
+ rmon->hist[6] = ps->rx_size_big;
+
+ rmon->hist_tx[0] = ps->tx_size_64;
+ rmon->hist_tx[1] = ps->tx_size_127;
+ rmon->hist_tx[2] = ps->tx_size_255;
+ rmon->hist_tx[3] = ps->tx_size_511;
+ rmon->hist_tx[4] = ps->tx_size_1023;
+ rmon->hist_tx[5] = ps->tx_size_1522;
+ rmon->hist_tx[6] = ps->tx_size_big;
+
+ *ranges = ice_rmon_ranges;
+}
+
+/* ice_get_ts_stats - provide timestamping stats
+ * @netdev: the netdevice pointer from ethtool
+ * @ts_stats: the ethtool data structure to fill in
+ */
+static void ice_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_ptp *ptp = &pf->ptp;
+
+ ts_stats->pkts = ptp->tx_hwtstamp_good;
+ ts_stats->err = ptp->tx_hwtstamp_skipped +
+ ptp->tx_hwtstamp_flushed +
+ ptp->tx_hwtstamp_discarded;
+ ts_stats->lost = ptp->tx_hwtstamp_timeouts;
+}
+
+#define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \
+ ETH_RESET_FILTER | ETH_RESET_OFFLOAD)
+
+#define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \
+ ETH_RESET_SHARED_SHIFT)
+
+#define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \
+ (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \
+ (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))
+
+#define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR
+
+/**
+ * ice_ethtool_reset - triggers a given type of reset
+ * @dev: network interface device structure
+ * @flags: set of reset flags
+ *
+ * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags.
+ */
+static int ice_ethtool_reset(struct net_device *dev, u32 *flags)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
+ enum ice_reset_req reset;
+
+ switch (*flags) {
+ case ICE_ETHTOOL_CORER:
+ reset = ICE_RESET_CORER;
+ break;
+ case ICE_ETHTOOL_GLOBR:
+ reset = ICE_RESET_GLOBR;
+ break;
+ case ICE_ETHTOOL_PFR:
+ reset = ICE_RESET_PFR;
+ break;
+ default:
+ netdev_info(dev, "Unsupported set of ethtool flags");
+ return -EOPNOTSUPP;
+ }
+
+ ice_schedule_reset(pf, reset);
+
+ *flags = 0;
+
+ return 0;
+}
+
+/**
+ * ice_repr_ethtool_reset - triggers a VF reset
+ * @dev: network interface device structure
+ * @flags: set of reset flags
+ *
+ * Return: 0 on success,
+ * -EOPNOTSUPP when using unsupported set of flags
+ * -EBUSY when VF is not ready for reset.
+ */
+static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+ struct ice_vf *vf;
+
+ if (repr->type != ICE_REPR_TYPE_VF ||
+ *flags != ICE_ETHTOOL_VFR)
+ return -EOPNOTSUPP;
+
+ vf = repr->vf;
+
+ if (ice_check_vf_ready_for_cfg(vf))
+ return -EBUSY;
+
+ *flags = 0;
+
+ return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
+ .get_fec_stats = ice_get_fec_stats,
+ .get_eth_mac_stats = ice_get_eth_mac_stats,
+ .get_pause_stats = ice_get_pause_stats,
+ .get_rmon_stats = ice_get_rmon_stats,
+ .get_ts_stats = ice_get_ts_stats,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
@@ -4314,6 +4850,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ice_get_link_ext_stats,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_coalesce = ice_get_coalesce,
@@ -4326,15 +4863,19 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.set_rxnfc = ice_set_rxnfc,
+ .get_rx_ring_count = ice_get_rx_ring_count,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
.get_pauseparam = ice_get_pauseparam,
.set_pauseparam = ice_set_pauseparam,
+ .reset = ice_ethtool_reset,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_rxfh_fields = ice_get_rxfh_fields,
+ .set_rxfh_fields = ice_set_rxfh_fields,
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ice_get_ts_info,
@@ -4383,6 +4924,7 @@ static const struct ethtool_ops ice_ethtool_repr_ops = {
.get_strings = ice_repr_get_strings,
.get_ethtool_stats = ice_repr_get_ethtool_stats,
.get_sset_count = ice_repr_get_sset_count,
+ .reset = ice_repr_ethtool_reset,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index b88e3da06f13..23b2cfbc9684 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -9,6 +9,51 @@ struct ice_phy_type_to_ethtool {
u8 link_mode;
};
+struct ice_serdes_equalization_to_ethtool {
+ int rx_equ_pre2;
+ int rx_equ_pre1;
+ int rx_equ_post1;
+ int rx_equ_bflf;
+ int rx_equ_bfhf;
+ int rx_equ_ctle_gainhf;
+ int rx_equ_ctle_gainlf;
+ int rx_equ_ctle_gaindc;
+ int rx_equ_ctle_bw;
+ int rx_equ_dfe_gain;
+ int rx_equ_dfe_gain_2;
+ int rx_equ_dfe_2;
+ int rx_equ_dfe_3;
+ int rx_equ_dfe_4;
+ int rx_equ_dfe_5;
+ int rx_equ_dfe_6;
+ int rx_equ_dfe_7;
+ int rx_equ_dfe_8;
+ int rx_equ_dfe_9;
+ int rx_equ_dfe_10;
+ int rx_equ_dfe_11;
+ int rx_equ_dfe_12;
+ int tx_equ_pre1;
+ int tx_equ_pre3;
+ int tx_equ_atten;
+ int tx_equ_post1;
+ int tx_equ_pre2;
+};
+
+struct ice_regdump_to_ethtool {
+ /* A multilane port can have max 4 serdes */
+ struct ice_serdes_equalization_to_ethtool equalization[4];
+};
+
+/* Port topology from lport i.e.
+ * serdes mapping, pcsquad, macport, cage etc...
+ */
+struct ice_port_topology {
+ u16 pcs_port;
+ u16 primary_serdes_lane;
+ u16 serdes_lane_count;
+ u16 pcs_quad_select;
+};
+
/* Macro to make PHY type to Ethtool link mode table entry.
* The index is the PHY type.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 9a1a04f5f146..aceec184e89b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -41,6 +41,8 @@ static struct in6_addr zero_ipv6_addr_mask = {
static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
{
switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ return ETHER_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
return TCP_V4_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
@@ -72,6 +74,8 @@ static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
{
switch (eth) {
+ case ETHER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_ETH;
case TCP_V4_FLOW:
return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
case UDP_V4_FLOW:
@@ -137,6 +141,10 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
switch (fsp->flow_type) {
+ case ETHER_FLOW:
+ fsp->h_u.ether_spec = rule->eth;
+ fsp->m_u.ether_spec = rule->eth_mask;
+ break;
case IPV4_USER_FLOW:
fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
fsp->h_u.usr_ip4_spec.proto = 0;
@@ -526,7 +534,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
*
* Returns the number of available flow director filters to this VSI
*/
-static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
{
u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
u16 num_guar;
@@ -1194,6 +1202,122 @@ ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
}
/**
+ * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule
+ * @dev: network interface device structure
+ * @fsp: pointer to ethtool Rx flow specification
+ *
+ * Return: true if vlan data is valid, false otherwise
+ */
+static bool ice_fdir_vlan_valid(struct device *dev,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype))
+ return false;
+
+ if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
+ return false;
+
+ /* proto and vlan must have vlan-etype defined */
+ if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci &&
+ !fsp->m_ext.vlan_etype) {
+ dev_warn(dev, "Filter with proto and vlan require also vlan-etype");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_ether_flow_seg - set address and protocol segments for ether flow
+ * @dev: network interface device structure
+ * @seg: flow segment for programming
+ * @eth_spec: mask data from ethtool
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+static int ice_set_ether_flow_seg(struct device *dev,
+ struct ice_flow_seg_info *seg,
+ struct ethhdr *eth_spec)
+{
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH);
+
+ /* empty rules are not valid */
+ if (is_zero_ether_addr(eth_spec->h_source) &&
+ is_zero_ether_addr(eth_spec->h_dest) &&
+ !eth_spec->h_proto)
+ return -EINVAL;
+
+ /* Ethertype */
+ if (eth_spec->h_proto == htons(0xFFFF)) {
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ } else if (eth_spec->h_proto) {
+ dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether");
+ return -EOPNOTSUPP;
+ }
+
+ /* Source MAC address */
+ if (is_broadcast_ether_addr(eth_spec->h_source))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!is_zero_ether_addr(eth_spec->h_source))
+ goto err_mask;
+
+ /* Destination MAC address */
+ if (is_broadcast_ether_addr(eth_spec->h_dest))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!is_zero_ether_addr(eth_spec->h_dest))
+ goto err_mask;
+
+ return 0;
+
+err_mask:
+ dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether");
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_set_fdir_vlan_seg - set vlan segments for ether flow
+ * @seg: flow segment for programming
+ * @ext_masks: masks for additional RX flow fields
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+static int
+ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_flow_ext *ext_masks)
+{
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN);
+
+ if (ext_masks->vlan_etype) {
+ if (ext_masks->vlan_etype != htons(0xFFFF))
+ return -EOPNOTSUPP;
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ if (ext_masks->vlan_tci) {
+ if (ext_masks->vlan_tci != htons(0xFFFF))
+ return -EOPNOTSUPP;
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ return 0;
+}
+
+/**
* ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
* @pf: PF structure
* @fsp: pointer to ethtool Rx flow specification
@@ -1209,7 +1333,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
struct device *dev = ice_pf_to_dev(pf);
enum ice_fltr_ptype fltr_idx;
struct ice_hw *hw = &pf->hw;
- bool perfect_filter;
+ bool perfect_filter = false;
int ret;
seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
@@ -1262,6 +1386,16 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
&perfect_filter);
break;
+ case ETHER_FLOW:
+ ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec);
+ if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) {
+ if (!ice_fdir_vlan_valid(dev, fsp)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext);
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -1471,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
+ static const enum ice_fltr_ptype dflt_rules[] = {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ };
int err;
/* Create perfect TCP and UDP rules in hardware. */
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) {
+ err = ice_create_init_fdir_rule(pf, dflt_rules[i]);
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+ if (err)
+ break;
+ }
return err;
}
@@ -1696,11 +1827,12 @@ static int
ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
struct ice_fdir_fltr *input)
{
- u16 dest_vsi, q_index = 0;
+ s16 q_index = ICE_FDIR_NO_QUEUE_IDX;
u16 orig_q_index = 0;
struct ice_pf *pf;
struct ice_hw *hw;
int flow_type;
+ u16 dest_vsi;
u8 dest_ctl;
if (!vsi || !fsp || !input)
@@ -1823,6 +1955,10 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
break;
+ case ETHER_FLOW:
+ input->eth = fsp->h_u.ether_spec;
+ input->eth_mask = fsp->m_u.ether_spec;
+ break;
default:
/* not doing un-parsed flow types */
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 5840c3e04a5b..b29fbdec9442 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -4,6 +4,8 @@
#include "ice_common.h"
/* These are training packet headers used to program flow director filters. */
+static const u8 ice_fdir_eth_pkt[22];
+
static const u8 ice_fdir_tcpv4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
@@ -417,6 +419,11 @@ static const u8 ice_fdir_ip6_tun_pkt[] = {
/* Flow Director no-op training packet table */
static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
{
+ ICE_FLTR_PTYPE_NONF_ETH,
+ sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
+ sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
+ },
+ {
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,
sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt,
@@ -914,6 +921,21 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* perspective. The input from user is from Rx filter perspective.
*/
switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ ice_pkt_insert_mac_addr(loc, input->eth.h_dest);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->eth.h_source);
+ if (input->ext_data.vlan_tag || input->ext_data.vlan_type) {
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
+ input->ext_data.vlan_type);
+ ice_pkt_insert_u16(loc, ICE_ETH_VLAN_TCI_OFFSET,
+ input->ext_data.vlan_tag);
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_VLAN_OFFSET,
+ input->eth.h_proto);
+ } else {
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
+ input->eth.h_proto);
+ }
+ break;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
@@ -1099,7 +1121,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* ice_fdir_has_frag - does flow type have 2 ptypes
* @flow: flow ptype
*
- * returns true is there is a fragment packet for this ptype
+ * Return: true if there is a fragment packet for this ptype
*/
bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
{
@@ -1189,52 +1211,58 @@ static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
* ice_fdir_comp_rules - compare 2 filters
* @a: a Flow Director filter data structure
* @b: a Flow Director filter data structure
- * @v6: bool true if v6 filter
*
* Returns true if the filters match
*/
static bool
-ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
+ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)
{
enum ice_fltr_ptype flow_type = a->flow_type;
/* The calling function already checks that the two filters have the
* same flow_type.
*/
- if (!v6) {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.dst_port == b->ip.v4.dst_port &&
- a->ip.v4.src_port == b->ip.v4.src_port)
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.l4_header == b->ip.v4.l4_header &&
- a->ip.v4.proto == b->ip.v4.proto &&
- a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
- a->ip.v4.tos == b->ip.v4.tos)
- return true;
- }
- } else {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port &&
- !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
- b->ip.v6.dst_ip) &&
- !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
- b->ip.v6.src_ip))
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port)
- return true;
- }
+ switch (flow_type) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ if (!memcmp(&a->eth, &b->eth, sizeof(a->eth)))
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.dst_port == b->ip.v4.dst_port &&
+ a->ip.v4.src_port == b->ip.v4.src_port)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.l4_header == b->ip.v4.l4_header &&
+ a->ip.v4.proto == b->ip.v4.proto &&
+ a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
+ a->ip.v4.tos == b->ip.v4.tos)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port &&
+ !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
+ b->ip.v6.dst_ip) &&
+ !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
+ b->ip.v6.src_ip))
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port)
+ return true;
+ break;
+ default:
+ break;
}
return false;
@@ -1253,19 +1281,10 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
bool ret = false;
list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
- enum ice_fltr_ptype flow_type;
-
if (rule->flow_type != input->flow_type)
continue;
- flow_type = input->flow_type;
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
- ret = ice_fdir_comp_rules(rule, input, false);
- else
- ret = ice_fdir_comp_rules(rule, input, true);
+ ret = ice_fdir_comp_rules(rule, input);
if (ret) {
if (rule->fltr_id == input->fltr_id &&
rule->q_index != input->q_index)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index 1b9b84490689..820023c0271f 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -8,6 +8,9 @@
#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
/* macros for offsets into packets for flow director programming */
+#define ICE_ETH_TYPE_F_OFFSET 12
+#define ICE_ETH_VLAN_TCI_OFFSET 14
+#define ICE_ETH_TYPE_VLAN_OFFSET 16
#define ICE_IPV4_SRC_ADDR_OFFSET 26
#define ICE_IPV4_DST_ADDR_OFFSET 30
#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34
@@ -50,6 +53,8 @@
*/
#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
+#define ICE_FDIR_NO_QUEUE_IDX -1
+
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
@@ -159,6 +164,8 @@ struct ice_fdir_fltr {
struct list_head fltr_node;
enum ice_fltr_ptype flow_type;
+ struct ethhdr eth, eth_mask;
+
union {
struct ice_fdir_v4 v4;
struct ice_fdir_v6 v6;
@@ -181,7 +188,7 @@ struct ice_fdir_fltr {
u16 flex_fltr;
/* filter control */
- u16 q_index;
+ s16 q_index;
u16 orig_q_index;
u16 dest_vsi;
u8 dest_ctl;
@@ -202,6 +209,8 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt;
};
+struct ice_vsi;
+
int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
@@ -213,6 +222,7 @@ int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun);
int ice_get_fdir_cnt_all(struct ice_hw *hw);
+int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi);
bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
struct ice_fdir_fltr *
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 20d5db88c99f..c0dbec369366 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -574,9 +574,7 @@ ice_destroy_tunnel_end:
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
enum ice_tunnel_type tnl_type;
int status;
u16 index;
@@ -598,9 +596,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
enum ice_tunnel_type tnl_type;
int status;
@@ -1479,7 +1475,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
hw->blk[blk].masks.count = per_pf;
- hw->blk[blk].masks.first = hw->pf_id * per_pf;
+ hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
@@ -2981,6 +2977,50 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
}
/**
+ * ice_disable_fd_swap - set register appropriately to disable FD SWAP
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ */
+static void
+ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
+{
+ u16 swap_val, fvw_num;
+ unsigned int i;
+
+ swap_val = ICE_SWAP_VALID;
+ fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE;
+
+ /* Since the SWAP Flag in the Programming Desc doesn't work,
+ * here add method to disable the SWAP Option via setting
+ * certain SWAP and INSET register sets.
+ */
+ for (i = 0; i < fvw_num ; i++) {
+ u32 raw_swap, raw_in;
+ unsigned int j;
+
+ raw_swap = 0;
+ raw_in = 0;
+
+ for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) {
+ raw_swap |= (swap_val++) << (j * BITS_PER_BYTE);
+ raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE);
+ }
+
+ /* write the FDIR swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n",
+ prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap);
+
+ /* write the FDIR inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, i), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n",
+ prof_id, i, GLQF_FDINSET(prof_id, i), raw_in);
+ }
+}
+
+/*
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -2991,6 +3031,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
* @symm: symmetric setting for RSS profiles
+ * @fd_swap: enable/disable FDIR paired src/dst fields swap option
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@@ -2998,16 +3039,16 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* the ID value used here.
*/
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm)
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap)
{
- u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- u8 byte = 0;
- u8 prof_id;
int status;
+ u8 prof_id;
+ u16 ptype;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -3020,7 +3061,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
- if (blk == ICE_BLK_FD) {
+ if (blk == ICE_BLK_FD && fd_swap) {
/* For Flow Director block, the extraction sequence may
* need to be altered in the case where there are paired
* fields that have no match. This is necessary because
@@ -3031,6 +3072,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_update_fd_swap(hw, prof_id, es);
if (status)
goto err_ice_add_prof;
+ } else if (blk == ICE_BLK_FD) {
+ ice_disable_fd_swap(hw, prof_id);
}
status = ice_update_prof_masking(hw, blk, prof_id, masks);
if (status)
@@ -3055,57 +3098,35 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
prof->context = 0;
/* build list of ptgs */
- while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u8 bit;
+ for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
+ u8 ptg;
- if (!ptypes[byte]) {
- bytes--;
- byte++;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
- }
-
- /* Examine 8 bits per byte */
- for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
- BITS_PER_BYTE) {
- u16 ptype;
- u8 ptg;
- ptype = byte * BITS_PER_BYTE + bit;
-
- /* The package should place all ptypes in a non-zero
- * PTG, so the following call should never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (test_bit(ptg, ptgs_used))
+ continue;
- /* If PTG is already added, skip and continue */
- if (test_bit(ptg, ptgs_used))
- continue;
+ set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for this ptype, and
+ * add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt);
+ if (status == -ENOSPC)
+ break;
+ if (status) {
+ /* This is simple a ptype/PTG with no attribute */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- __set_bit(ptg, ptgs_used);
- /* Check to see there are any attributes for
- * this PTYPE, and add them if found.
- */
- status = ice_add_prof_attrib(prof, ptg, ptype,
- attr, attr_cnt);
- if (status == -ENOSPC)
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
- if (status) {
- /* This is simple a PTYPE/PTG with no
- * attribute
- */
- prof->ptg[prof->ptg_cnt] = ptg;
- prof->attr[prof->ptg_cnt].flags = 0;
- prof->attr[prof->ptg_cnt].mask = 0;
-
- if (++prof->ptg_cnt >=
- ICE_MAX_PTG_PER_PROFILE)
- break;
- }
}
-
- bytes--;
- byte++;
}
list_add(&prof->list, &hw->blk[blk].es.prof_map);
@@ -3557,6 +3578,19 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
}
/**
+ * ice_set_tcam_flags - set TCAM flag don't care mask
+ * @mask: mask for flags
+ * @dc_mask: pointer to the don't care mask
+ */
+static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
+{
+ u16 inverted_mask = ~mask;
+
+ /* flags are lowest u16 */
+ put_unaligned_le16(inverted_mask, dc_mask);
+}
+
+/**
* ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
* @hw: pointer to the HW struct
* @idx: the index of the TCAM entry to remove
@@ -3626,6 +3660,9 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
if (!p)
return -ENOMEM;
+ /* set don't care masks for TCAM flags */
+ ice_set_tcam_flags(tcam->attr.mask, dc_msk);
+
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, tcam->attr.flags,
vl_msk, dc_msk, nm_msk);
@@ -3652,6 +3689,34 @@ err_ice_prof_tcam_ena_dis:
}
/**
+ * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
+ * @ptg_attr: pointer to the PTG and attribute pair to check
+ * @ptgs_used: bitmap that denotes which PTGs are in use
+ * @attr_used: array of PTG and attributes pairs already used
+ * @attr_cnt: count of entries in the attr_used array
+ *
+ * Return: true if the PTG and attribute pair is in use, false otherwise.
+ */
+static bool
+ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, unsigned long *ptgs_used,
+ struct ice_tcam_inf *attr_used[], u16 attr_cnt)
+{
+ u16 i;
+
+ if (!test_bit(ptg_attr->ptg, ptgs_used))
+ return false;
+
+ /* the PTG is used, so now look for correct attributes */
+ for (i = 0; i < attr_cnt; i++)
+ if (attr_used[i]->ptg == ptg_attr->ptg &&
+ attr_used[i]->attr.flags == ptg_attr->attr.flags &&
+ attr_used[i]->attr.mask == ptg_attr->attr.mask)
+ return true;
+
+ return false;
+}
+
+/**
* ice_adj_prof_priorities - adjust profile based on priorities
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -3663,10 +3728,16 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_tcam_inf **attr_used;
struct ice_vsig_prof *t;
- int status;
+ u16 attr_used_cnt = 0;
+ int status = 0;
u16 idx;
+ attr_used = kcalloc(ICE_MAX_PTG_ATTRS, sizeof(*attr_used), GFP_KERNEL);
+ if (!attr_used)
+ return -ENOMEM;
+
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
idx = vsig & ICE_VSIG_IDX_M;
@@ -3684,11 +3755,15 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
u16 i;
for (i = 0; i < t->tcam_count; i++) {
+ bool used;
+
/* Scan the priorities from newest to oldest.
* Make sure that the newest profiles take priority.
*/
- if (test_bit(t->tcam[i].ptg, ptgs_used) &&
- t->tcam[i].in_use) {
+ used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
+ attr_used, attr_used_cnt);
+
+ if (used && t->tcam[i].in_use) {
/* need to mark this PTG as never match, as it
* was already in use and therefore duplicate
* (and lower priority)
@@ -3698,9 +3773,8 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
&t->tcam[i],
chg);
if (status)
- return status;
- } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
- !t->tcam[i].in_use) {
+ goto free_attr_used;
+ } else if (!used && !t->tcam[i].in_use) {
/* need to enable this PTG, as it in not in use
* and not enabled (highest priority)
*/
@@ -3709,15 +3783,21 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
&t->tcam[i],
chg);
if (status)
- return status;
+ goto free_attr_used;
}
/* keep track of used ptgs */
- __set_bit(t->tcam[i].ptg, ptgs_used);
+ set_bit(t->tcam[i].ptg, ptgs_used);
+ if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
+ attr_used[attr_used_cnt++] = &t->tcam[i];
+ else
+ ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
}
}
- return 0;
+free_attr_used:
+ kfree(attr_used);
+ return status;
}
/**
@@ -3800,11 +3880,15 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
p->vsig = vsig;
p->tcam_idx = t->tcam[i].tcam_idx;
+ /* set don't care masks for TCAM flags */
+ ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
+
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
t->tcam[i].prof_id,
- t->tcam[i].ptg, vsig, 0, 0,
- vl_msk, dc_msk, nm_msk);
+ t->tcam[i].ptg, vsig, 0,
+ t->tcam[i].attr.flags, vl_msk,
+ dc_msk, nm_msk);
if (status) {
devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
@@ -4099,6 +4183,54 @@ err_ice_add_prof_id_flow:
}
/**
+ * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI
+ * @hw: pointer to the HW struct
+ * @blk: HW block
+ * @dest_vsi: dest VSI
+ * @fdir_vsi: fdir programming VSI
+ * @hdl: profile handle
+ *
+ * Update the hardware tables to enable the FDIR profile indicated by @hdl for
+ * the VSI specified by @dest_vsi. On success, the flow will be enabled.
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int
+ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
+ u16 dest_vsi, u16 fdir_vsi, u64 hdl)
+{
+ u16 vsi_num;
+ int status;
+
+ vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
+ status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n",
+ status);
+ return status;
+ }
+
+ if (blk != ICE_BLK_FD)
+ return 0;
+
+ vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
+ status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n",
+ status);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, hdl);
+
+ return status;
+}
+
+/**
* ice_rem_prof_from_list - remove a profile from list
* @hw: pointer to the HW struct
* @lst: list to remove the profile from
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index b39d7cdc381f..ee5d9f9c9d53 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -6,6 +6,8 @@
#include "ice_type.h"
+#define ICE_FDIR_REG_SET_SIZE 4
+
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
@@ -21,9 +23,6 @@ int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
int
-ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
-u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
-int
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
@@ -40,15 +39,19 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT2/VSI group functions */
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm);
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+int
+ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
+ u16 dest_vsi, u16 fdir_vsi, u64 hdl);
enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index d427a79d001a..80c9e7c749c2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -93,6 +93,7 @@ enum ice_tunnel_type {
TNL_GRETAP,
TNL_GTPC,
TNL_GTPU,
+ TNL_PFCP,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -186,6 +187,7 @@ struct ice_prof_map {
};
#define ICE_INVALID_TCAM 0xFFFF
+#define ICE_MAX_PTG_ATTRS 1024
struct ice_tcam_inf {
u16 tcam_idx;
@@ -358,7 +360,8 @@ enum ice_prof_type {
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_GTPU = 0x8,
ICE_PROF_TUN_GTPC = 0x10,
- ICE_PROF_TUN_ALL = 0x1E,
+ ICE_PROF_TUN_PFCP = 0x20,
+ ICE_PROF_TUN_ALL = 0x3E,
ICE_PROF_ALL = 0xFF,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fc2b58f56279..c9b6d0a84bd1 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -5,6 +5,38 @@
#include "ice_flow.h"
#include <net/gre.h>
+/* Size of known protocol header fields */
+#define ICE_FLOW_FLD_SZ_ETH_TYPE 2
+#define ICE_FLOW_FLD_SZ_VLAN 2
+#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
+#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
+#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
+#define ICE_FLOW_FLD_SZ_IPV4_ID 2
+#define ICE_FLOW_FLD_SZ_IPV6_ID 4
+#define ICE_FLOW_FLD_SZ_IP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4
+#define ICE_FLOW_FLD_SZ_IP_DSCP 1
+#define ICE_FLOW_FLD_SZ_IP_TTL 1
+#define ICE_FLOW_FLD_SZ_IP_PROT 1
+#define ICE_FLOW_FLD_SZ_PORT 2
+#define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
+#define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
+#define ICE_FLOW_FLD_SZ_ICMP_CODE 1
+#define ICE_FLOW_FLD_SZ_ARP_OPER 2
+#define ICE_FLOW_FLD_SZ_GRE_KEYID 4
+#define ICE_FLOW_FLD_SZ_GTP_TEID 4
+#define ICE_FLOW_FLD_SZ_GTP_QFI 2
+#define ICE_FLOW_FLD_SZ_PFCP_SEID 8
+#define ICE_FLOW_FLD_SZ_ESP_SPI 4
+#define ICE_FLOW_FLD_SZ_AH_SPI 4
+#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
+#define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2
+#define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2
+
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
enum ice_flow_seg_hdr hdr;
@@ -20,6 +52,7 @@ struct ice_flow_field_info {
.mask = 0, \
}
+/* QFI: 6-bit field in GTP-U PDU Session Container (3GPP TS 38.415) */
#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
.hdr = _hdr, \
.off = (_offset_bytes) * BITS_PER_BYTE, \
@@ -61,7 +94,33 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
/* ICE_FLOW_FIELD_IDX_IPV6_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
/* ICE_FLOW_FIELD_IDX_IPV6_DA */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
+ ICE_FLOW_FLD_SZ_IPV4_ID),
+ /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
+ ICE_FLOW_FLD_SZ_IPV6_ID),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
/* Transport */
/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
@@ -76,7 +135,14 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
+ /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
+ ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
/* ARP */
/* ICE_FLOW_FIELD_IDX_ARP_SIP */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
@@ -108,9 +174,17 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
0x3f00),
/* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
+ ICE_FLOW_FLD_SZ_GTP_TEID),
+ /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
+ ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
/* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
+ ICE_FLOW_FLD_SZ_GTP_TEID),
+ /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
+ ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
/* PPPoE */
/* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
@@ -128,7 +202,16 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
/* NAT_T_ESP */
/* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
+ ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
+ /* L2TPV2 */
+ /* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12,
+ ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID),
+ /* L2TPV2_LEN */
+ /* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14,
+ ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
@@ -137,9 +220,9 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
*/
static const u32 ice_ptypes_mac_ofos[] = {
0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
- 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
- 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000707,
+ 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -162,10 +245,10 @@ static const u32 ice_ptypes_macvlan_il[] = {
* include IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
+ 0x00015000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -176,10 +259,10 @@ static const u32 ice_ptypes_ipv4_ofos[] = {
* IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos_all[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
+ 0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -191,7 +274,7 @@ static const u32 ice_ptypes_ipv4_il[] = {
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
0x0000000E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x001FF800, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -202,10 +285,10 @@ static const u32 ice_ptypes_ipv4_il[] = {
* include IPv6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos[] = {
- 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x76000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
+ 0x0002A000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -216,10 +299,10 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
* IPv6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos_all[] = {
- 0x00000000, 0x00000000, 0x77000000, 0x10002000,
- 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x76000000, 0xFEFDE000,
+ 0x0000077E, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
+ 0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -231,7 +314,7 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
0x00000770, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -304,8 +387,8 @@ static const u32 ice_ptypes_ipv6_il_no_l4[] = {
static const u32 ice_ptypes_udp_il[] = {
0x81000000, 0x20204040, 0x04000010, 0x80810102,
0x00000040, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00410000, 0x90842000, 0x00000007,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00410000, 0x908427E0, 0x00000007,
+ 0x0413F000, 0x00000041, 0x10410410, 0x00004104,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -317,7 +400,7 @@ static const u32 ice_ptypes_tcp_il[] = {
0x04000000, 0x80810102, 0x10000040, 0x02040408,
0x00000102, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00820000, 0x21084000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08200000, 0x00000082, 0x20820820, 0x00008208,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -329,7 +412,7 @@ static const u32 ice_ptypes_sctp_il[] = {
0x08000000, 0x01020204, 0x20000081, 0x04080810,
0x00000204, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x01040000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x10400000, 0x00000104, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -353,7 +436,7 @@ static const u32 ice_ptypes_icmp_il[] = {
0x00000000, 0x02040408, 0x40000102, 0x08101020,
0x00000408, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x42108000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x20800000, 0x00000208, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -365,7 +448,7 @@ static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
0x0000017E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -374,7 +457,7 @@ static const u32 ice_ptypes_gre_of[] = {
/* Packet types for packets with an Innermost/Last MAC header */
static const u32 ice_ptypes_mac_il[] = {
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -388,7 +471,7 @@ static const u32 ice_ptypes_mac_il[] = {
static const u32 ice_ptypes_gtpc[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000180, 0x00000000,
+ 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -409,6 +492,29 @@ static const u32 ice_ptypes_gtpc_tid[] = {
};
/* Packet types for GTPU */
+static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+};
+
static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
@@ -1398,9 +1504,9 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ status = ice_add_prof(hw, blk, prof_id, params->ptypes,
params->attr, params->attr_cnt, params->es,
- params->mask, symm);
+ params->mask, symm, true);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1523,6 +1629,90 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
return status;
}
+#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13)
+#define FLAG_GTP_EH_PDU BIT_ULL(14)
+
+#define HI_BYTE_IN_WORD GENMASK(15, 8)
+#define LO_BYTE_IN_WORD GENMASK(7, 0)
+
+#define FLAG_GTPU_MSK \
+ (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
+#define FLAG_GTPU_UP \
+ (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
+#define FLAG_GTPU_DW FLAG_GTP_EH_PDU
+
+/**
+ * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info
+ * @hw: pointer to the HW struct
+ * @dest_vsi: dest VSI
+ * @fdir_vsi: fdir programming VSI
+ * @prof: stores parsed profile info from raw flow
+ * @blk: classification blk
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int
+ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
+ struct ice_parser_profile *prof, enum ice_block blk)
+{
+ u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ struct ice_flow_prof_params *params __free(kfree);
+ u8 fv_words = hw->blk[blk].es.fvw;
+ int status;
+ int i, idx;
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
+ }
+
+ for (i = 0; i < prof->fv_num; i++) {
+ if (hw->blk[blk].es.reverse)
+ idx = fv_words - i - 1;
+ else
+ idx = i;
+ params->es[idx].prot_id = prof->fv[i].proto_id;
+ params->es[idx].off = prof->fv[i].offset;
+ params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) &
+ HI_BYTE_IN_WORD) |
+ (((prof->fv[i].msk) >> BITS_PER_BYTE) &
+ LO_BYTE_IN_WORD);
+ }
+
+ switch (prof->flags) {
+ case FLAG_GTPU_DW:
+ params->attr = ice_attr_gtpu_down;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
+ break;
+ case FLAG_GTPU_UP:
+ params->attr = ice_attr_gtpu_up;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
+ break;
+ default:
+ if (prof->flags_msk & FLAG_GTPU_MSK) {
+ params->attr = ice_attr_gtpu_session;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
+ }
+ break;
+ }
+
+ status = ice_add_prof(hw, blk, id, prof->ptypes,
+ params->attr, params->attr_cnt,
+ params->es, params->mask, false, false);
+ if (status)
+ return status;
+
+ status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id);
+ if (status)
+ ice_rem_prof(hw, blk, id);
+
+ return status;
+}
+
/**
* ice_flow_add_prof - Add a flow profile for packet segments and matched fields
* @hw: pointer to the HW struct
@@ -2218,6 +2408,130 @@ static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
}
/**
+ * ice_rss_cfg_raw_symm - Configure symmetric RSS for a raw parser profile
+ * @hw: device HW
+ * @prof: parser profile describing extracted FV (field vector) entries
+ * @prof_id: RSS profile identifier used to program symmetry registers
+ *
+ * The routine scans the parser profile's FV entries and looks for
+ * direction-sensitive pairs (L3 src/dst, L4 src/dst). When a pair is found,
+ * it programs XOR-based symmetry so that flows hash identically regardless
+ * of packet direction. This preserves CPU affinity for the same 5-tuple.
+ *
+ * Notes:
+ * - The size of each logical field (IPv4/IPv6 address, L4 port) is expressed
+ * in units of ICE_FLOW_FV_EXTRACT_SZ so we can step across fv[] correctly.
+ * - We guard against out-of-bounds access before looking at fv[i + len].
+ */
+static void ice_rss_cfg_raw_symm(struct ice_hw *hw,
+ const struct ice_parser_profile *prof,
+ u64 prof_id)
+{
+ for (size_t i = 0; i < prof->fv_num; i++) {
+ u8 proto_id = prof->fv[i].proto_id;
+ u16 src_off = 0, dst_off = 0;
+ size_t src_idx, dst_idx;
+ bool is_matched = false;
+ unsigned int len = 0;
+
+ switch (proto_id) {
+ /* IPv4 address pairs (outer/inner variants) */
+ case ICE_PROT_IPV4_OF_OR_S:
+ case ICE_PROT_IPV4_IL:
+ case ICE_PROT_IPV4_IL_IL:
+ len = ICE_FLOW_FLD_SZ_IPV4_ADDR /
+ ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_IPV4_SRC_OFFSET;
+ dst_off = ICE_FLOW_FIELD_IPV4_DST_OFFSET;
+ break;
+
+ /* IPv6 address pairs (outer/inner variants) */
+ case ICE_PROT_IPV6_OF_OR_S:
+ case ICE_PROT_IPV6_IL:
+ case ICE_PROT_IPV6_IL_IL:
+ len = ICE_FLOW_FLD_SZ_IPV6_ADDR /
+ ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_IPV6_SRC_OFFSET;
+ dst_off = ICE_FLOW_FIELD_IPV6_DST_OFFSET;
+ break;
+
+ /* L4 port pairs (TCP/UDP/SCTP) */
+ case ICE_PROT_TCP_IL:
+ case ICE_PROT_UDP_IL_OR_S:
+ case ICE_PROT_SCTP_IL:
+ len = ICE_FLOW_FLD_SZ_PORT / ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_SRC_PORT_OFFSET;
+ dst_off = ICE_FLOW_FIELD_DST_PORT_OFFSET;
+ break;
+
+ default:
+ continue;
+ }
+
+ /* Bounds check before accessing fv[i + len]. */
+ if (i + len >= prof->fv_num)
+ continue;
+
+ /* Verify src/dst pairing for this protocol id. */
+ is_matched = prof->fv[i].offset == src_off &&
+ prof->fv[i + len].proto_id == proto_id &&
+ prof->fv[i + len].offset == dst_off;
+ if (!is_matched)
+ continue;
+
+ /* Program XOR symmetry for this field pair. */
+ src_idx = i;
+ dst_idx = i + len;
+
+ ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len);
+
+ /* Skip over the pair we just handled; the loop's ++i advances
+ * one more element, hence the --i after the jump.
+ */
+ i += (2 * len);
+ /* not strictly needed; keeps static analyzers happy */
+ if (i == 0)
+ break;
+ --i;
+ }
+}
+
+/* Max registers index per packet profile */
+#define ICE_SYMM_REG_INDEX_MAX 6
+
+/**
+ * ice_rss_update_raw_symm - update symmetric hash configuration
+ * for raw pattern
+ * @hw: pointer to the hardware structure
+ * @cfg: configure parameters for raw pattern
+ * @id: profile tracking ID
+ *
+ * Update symmetric hash configuration for raw pattern if required.
+ * Otherwise only clear to default.
+ */
+void
+ice_rss_update_raw_symm(struct ice_hw *hw,
+ struct ice_rss_raw_cfg *cfg, u64 id)
+{
+ struct ice_prof_map *map;
+ u8 prof_id, m;
+
+ mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
+ if (map)
+ prof_id = map->prof_id;
+ mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ if (!map)
+ return;
+ /* clear to default */
+ for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++)
+ wr32(hw, GLQF_HSYMM(prof_id, m), 0);
+
+ if (cfg->symm)
+ ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id);
+}
+
+/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
@@ -2466,38 +2780,38 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
* convert its values to their appropriate flow L3, L4 values.
*/
#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4))
#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP))
#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP))
#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
- ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP))
#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6))
#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP))
#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP))
#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
- ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP))
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
* @vsi: VF's VSI
- * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ * @avf_hash: hash bit fields (LIBIE_FILTER_PCTYPE_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
@@ -2514,8 +2828,7 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
return -EINVAL;
vsi_handle = vsi->idx;
- if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!avf_hash || !ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
/* Make sure no unsupported bits are specified */
@@ -2551,11 +2864,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV4 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
}
} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
@@ -2572,11 +2885,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV6 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 2fd2e0cb483d..6c6cdc8addb1 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,7 +4,10 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include <linux/net/intel/libie/pctype.h>
+
#include "ice_flex_type.h"
+#include "ice_parser.h"
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
@@ -19,6 +22,15 @@
#define ICE_FLOW_HASH_IPV6 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_IPV6_PRE32 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA))
+#define ICE_FLOW_HASH_IPV6_PRE48 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA))
+#define ICE_FLOW_HASH_IPV6_PRE64 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA))
#define ICE_FLOW_HASH_TCP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
@@ -37,6 +49,33 @@
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_SCTP_PORT)
+
+#define ICE_FLOW_HASH_GTP_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
+
+#define ICE_FLOW_HASH_GTP_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+
#define ICE_FLOW_HASH_GTP_C_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
@@ -125,6 +164,23 @@
#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
+#define ICE_FLOW_HASH_L2TPV2_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID)
+
+#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID)
+
+#define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12
+#define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16
+#define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8
+#define ICE_FLOW_FIELD_IPV6_DST_OFFSET 24
+#define ICE_FLOW_FIELD_SRC_PORT_OFFSET 0
+#define ICE_FLOW_FIELD_DST_PORT_OFFSET 2
+
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
@@ -157,10 +213,13 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_AH = 0x00200000,
ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
+ ICE_FLOW_SEG_HDR_GTPU_NON_IP = 0x01000000,
+ ICE_FLOW_SEG_HDR_L2TPV2 = 0x10000000,
/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
- * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
+ * ICE_FLOW_SEG_HDR_IPV6.
*/
- ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
+ ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000,
+ ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000,
};
/* These segments all have the same PTYPES, but are otherwise distinguished by
@@ -197,6 +256,15 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FIELD_IDX_IPV4_CHKSUM,
+ ICE_FLOW_FIELD_IDX_IPV4_ID,
+ ICE_FLOW_FIELD_IDX_IPV6_ID,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA,
/* L4 */
ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
@@ -205,6 +273,9 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
ICE_FLOW_FIELD_IDX_TCP_FLAGS,
+ ICE_FLOW_FIELD_IDX_TCP_CHKSUM,
+ ICE_FLOW_FIELD_IDX_UDP_CHKSUM,
+ ICE_FLOW_FIELD_IDX_SCTP_CHKSUM,
/* ARP */
ICE_FLOW_FIELD_IDX_ARP_SIP,
ICE_FLOW_FIELD_IDX_ARP_DIP,
@@ -225,13 +296,13 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
/* GTPU_UP */
ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
+ ICE_FLOW_FIELD_IDX_GTPU_UP_QFI,
/* GTPU_DWN */
ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
- /* PPPoE */
+ ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI,
ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
/* PFCP */
ICE_FLOW_FIELD_IDX_PFCP_SEID,
- /* L2TPv3 */
ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
/* ESP */
ICE_FLOW_FIELD_IDX_ESP_SPI,
@@ -239,10 +310,16 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_AH_SPI,
/* NAT_T ESP */
ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
+ /* L2TPV2 SESSION ID*/
+ ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID,
+ /* L2TPV2_LEN SESSION ID */
+ ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
+static_assert(ICE_FLOW_FIELD_IDX_MAX <= 64, "The total number of enums must not exceed 64");
+
#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
@@ -263,57 +340,27 @@ enum ice_flow_field {
#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
-/* Flow headers and fields for AVF support */
-enum ice_flow_avf_hdr_field {
- /* Values 0 - 28 are reserved for future use */
- ICE_AVF_FLOW_FIELD_INVALID = 0,
- ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV4_TCP,
- ICE_AVF_FLOW_FIELD_IPV4_SCTP,
- ICE_AVF_FLOW_FIELD_IPV4_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV4,
- /* Values 37-38 are reserved */
- ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV6_TCP,
- ICE_AVF_FLOW_FIELD_IPV6_SCTP,
- ICE_AVF_FLOW_FIELD_IPV6_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV6,
- ICE_AVF_FLOW_FIELD_RSVD47,
- ICE_AVF_FLOW_FIELD_FCOE_OX,
- ICE_AVF_FLOW_FIELD_FCOE_RX,
- ICE_AVF_FLOW_FIELD_FCOE_OTHER,
- /* Values 51-62 are reserved */
- ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
- ICE_AVF_FLOW_FIELD_MAX
-};
-
/* Supported RSS offloads This macro is defined to support
- * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS ops. PF driver sends the RSS hardware
* capabilities to the caller of this ops.
*/
-#define ICE_DEFAULT_RSS_HENA ( \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+#define ICE_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
@@ -323,9 +370,14 @@ enum ice_rss_cfg_hdr_type {
/* take inner headers as inputset for packet with outer ipv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */
+ /* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
+ /* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
ICE_RSS_ANY_HEADERS
};
+struct ice_vsi;
struct ice_rss_hash_cfg {
u32 addl_hdrs; /* protocol header fields */
u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
@@ -432,6 +484,12 @@ struct ice_flow_prof {
bool symm; /* Symmetric Hash for RSS */
};
+struct ice_rss_raw_cfg {
+ struct ice_parser_profile prof;
+ bool raw_ena;
+ bool symm;
+};
+
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
@@ -445,6 +503,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
+ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
+ struct ice_parser_profile *prof, enum ice_block blk);
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
@@ -467,4 +528,6 @@ int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
+void ice_rss_update_raw_symm(struct ice_hw *hw,
+ struct ice_rss_raw_cfg *cfg, u64 id);
#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 319a2d6fe26c..973a13d3d92a 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2019, Intel Corporation. */
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/uuid.h>
#include <linux/crc32.h>
#include <linux/pldmfw.h>
#include "ice.h"
+#include "ice_lib.h"
#include "ice_fw_update.h"
struct ice_fwu_priv {
@@ -67,7 +68,7 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
if (status) {
dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO;
}
@@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
dev_info(dev, "firmware has rejected updating %s\n", component);
break;
+ case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (ice_is_recovery_mode(&pf->hw))
+ return 0;
+ break;
}
switch (code) {
@@ -252,7 +257,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
if (status) {
dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO;
}
@@ -286,16 +291,16 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Returns: zero on success, or a negative error code on failure.
*/
-static int
-ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
- u16 block_size, u8 *block, bool last_cmd,
- u8 *reset_level, struct netlink_ext_ack *extack)
+int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
+ struct ice_aqc_nvm *cmd;
u32 completion_offset;
int err;
@@ -309,7 +314,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
if (err) {
dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
module, block_size, offset, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
}
@@ -329,11 +334,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
}
desc = &task.event.desc;
- completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ cmd = libie_aq_raw(desc);
+ completion_module = le16_to_cpu(cmd->module_typeid);
completion_retval = le16_to_cpu(desc->retval);
- completion_offset = le16_to_cpu(desc->params.nvm.offset_low);
- completion_offset |= desc->params.nvm.offset_high << 16;
+ completion_offset = le16_to_cpu(cmd->offset_low);
+ completion_offset |= cmd->offset_high << 16;
if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
@@ -352,7 +358,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
if (completion_retval) {
dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
module, block_size, offset,
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO;
}
@@ -365,7 +371,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
- *reset_level = desc->params.nvm.cmd_flags &
+ *reset_level = cmd->cmd_flags &
ICE_AQC_NVM_RESET_LVL_M;
dev_dbg(dev, "Firmware reported required reset level as %u\n",
*reset_level);
@@ -483,7 +489,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
+ struct ice_aqc_nvm *cmd;
struct devlink *devlink;
int err;
@@ -499,7 +506,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
if (err) {
dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
component, module, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO;
goto out_notify_devlink;
@@ -514,7 +521,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
desc = &task.event.desc;
- completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ cmd = libie_aq_raw(desc);
+ completion_module = le16_to_cpu(cmd->module_typeid);
completion_retval = le16_to_cpu(desc->retval);
if (completion_module != module) {
@@ -526,9 +534,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
+ dev_err(dev, "Firmware failed to erase %s (module 0x%02x), aq_err %s\n",
component, module,
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
err = -EIO;
goto out_notify_devlink;
@@ -575,7 +583,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
if (err) {
dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO;
}
@@ -607,7 +615,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
completion_retval = le16_to_cpu(task.event.desc.retval);
if (completion_retval) {
dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks");
return -EIO;
}
@@ -945,7 +953,7 @@ ice_cancel_pending_update(struct ice_pf *pf, const char *component,
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
@@ -1005,13 +1013,20 @@ int ice_devlink_flash_update(struct devlink *devlink,
return -EOPNOTSUPP;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
memset(&priv, 0, sizeof(priv));
+ if (params->component && strcmp(params->component, "fw.mgmt") == 0) {
+ priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT;
+ priv.context.component_identifier = NVM_COMP_ID_NVM;
+ } else if (params->component) {
+ return -EOPNOTSUPP;
+ }
+
/* the E822 device needs a slightly different ops */
if (hw->mac_type == ICE_MAC_GENERIC)
priv.context.ops = &ice_fwu_ops_e822;
@@ -1031,7 +1046,7 @@ int ice_devlink_flash_update(struct devlink *devlink,
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index 750574885716..04b200462757 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack);
int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
struct netlink_ext_ack *extack);
+int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ u8 *reset_level, struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
deleted file mode 100644
index 4fd15387a7e5..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ /dev/null
@@ -1,472 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2022, Intel Corporation. */
-
-#include <linux/vmalloc.h>
-#include "ice.h"
-#include "ice_common.h"
-#include "ice_fwlog.h"
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
-{
- u16 head, tail;
-
- head = rings->head;
- tail = rings->tail;
-
- if (head < tail && (tail - head == (rings->size - 1)))
- return true;
- else if (head > tail && (tail == (head - 1)))
- return true;
-
- return false;
-}
-
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
-{
- return rings->head == rings->tail;
-}
-
-void ice_fwlog_ring_increment(u16 *item, u16 size)
-{
- *item = (*item + 1) & (size - 1);
-}
-
-static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i, nr_bytes;
- u8 *mem;
-
- nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
- mem = vzalloc(nr_bytes);
- if (!mem)
- return -ENOMEM;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- ring->data_size = ICE_AQ_MAX_BUF_LEN;
- ring->data = mem;
- mem += ICE_AQ_MAX_BUF_LEN;
- }
-
- return 0;
-}
-
-static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- /* the first ring is the base memory for the whole range so
- * free it
- */
- if (!i)
- vfree(ring->data);
-
- ring->data = NULL;
- ring->data_size = 0;
- }
-}
-
-#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
-/**
- * ice_fwlog_realloc_rings - reallocate the FW log rings
- * @hw: pointer to the HW structure
- * @index: the new index to use to allocate memory for the log data
- *
- */
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
-{
- struct ice_fwlog_ring ring;
- int status, ring_size;
-
- /* convert the number of bytes into a number of 4K buffers. externally
- * the driver presents the interface to the FW log data as a number of
- * bytes because that's easy for users to understand. internally the
- * driver uses a ring of buffers because the driver doesn't know where
- * the beginning and end of any line of log data is so the driver has
- * to overwrite data as complete blocks. when the data is returned to
- * the user the driver knows that the data is correct and the FW log
- * can be correctly parsed by the tools
- */
- ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
- if (ring_size == hw->fwlog_ring.size)
- return;
-
- /* allocate space for the new rings and buffers then release the
- * old rings and buffers. that way if we don't have enough
- * memory then we at least have what we had before
- */
- ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
- if (!ring.rings)
- return;
-
- ring.size = ring_size;
-
- status = ice_fwlog_alloc_ring_buffs(&ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&ring);
- kfree(ring.rings);
- return;
- }
-
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
-
- hw->fwlog_ring.rings = ring.rings;
- hw->fwlog_ring.size = ring.size;
- hw->fwlog_ring.index = index;
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
-}
-
-/**
- * ice_fwlog_init - Initialize FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called on driver initialization during
- * ice_init_hw().
- */
-int ice_fwlog_init(struct ice_hw *hw)
-{
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return -EINVAL;
-
- ice_fwlog_set_supported(hw);
-
- if (ice_fwlog_supported(hw)) {
- int status;
-
- /* read the current config from the FW and store it */
- status = ice_fwlog_get(hw, &hw->fwlog_cfg);
- if (status)
- return status;
-
- hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
- sizeof(*hw->fwlog_ring.rings),
- GFP_KERNEL);
- if (!hw->fwlog_ring.rings) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
- return -ENOMEM;
- }
-
- hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
- hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
-
- status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- return status;
- }
-
- ice_debugfs_fwlog_init(hw->back);
- } else {
- dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
- }
-
- return 0;
-}
-
-/**
- * ice_fwlog_deinit - unroll FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called in ice_deinit_hw().
- */
-void ice_fwlog_deinit(struct ice_hw *hw)
-{
- struct ice_pf *pf = hw->back;
- int status;
-
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return;
-
- ice_debugfs_pf_deinit(hw->back);
-
- /* make sure FW logging is disabled to not put the FW in a weird state
- * for the next driver load
- */
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
- status = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
- status);
-
- kfree(pf->ice_debugfs_pf_fwlog_modules);
-
- pf->ice_debugfs_pf_fwlog_modules = NULL;
-
- status = ice_fwlog_unregister(hw);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
- status);
-
- if (hw->fwlog_ring.rings) {
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- }
-}
-
-/**
- * ice_fwlog_supported - Cached for whether FW supports FW logging or not
- * @hw: pointer to the HW structure
- *
- * This will always return false if called before ice_init_hw(), so it must be
- * called after ice_init_hw().
- */
-bool ice_fwlog_supported(struct ice_hw *hw)
-{
- return hw->fwlog_supported;
-}
-
-/**
- * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
- * @hw: pointer to the HW structure
- * @entries: entries to configure
- * @num_entries: number of @entries
- * @options: options from ice_fwlog_cfg->options structure
- * @log_resolution: logging resolution
- */
-static int
-ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
- u16 num_entries, u16 options, u16 log_resolution)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct ice_aq_desc desc;
- int status;
- int i;
-
- fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
- if (!fw_modules)
- return -ENOMEM;
-
- for (i = 0; i < num_entries; i++) {
- fw_modules[i].module_identifier =
- cpu_to_le16(entries[i].module_id);
- fw_modules[i].log_level = entries[i].log_level;
- }
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- cmd = &desc.params.fw_log;
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
- cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
- cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
-
- if (options & ICE_FWLOG_OPTION_ARQ_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
- if (options & ICE_FWLOG_OPTION_UART_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
-
- status = ice_aq_send_cmd(hw, &desc, fw_modules,
- sizeof(*fw_modules) * num_entries,
- NULL);
-
- kfree(fw_modules);
-
- return status;
-}
-
-/**
- * ice_fwlog_set - Set the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config used to set firmware logging
- *
- * This function should be called whenever the driver needs to set the firmware
- * logging configuration. It can be called on initialization, reset, or during
- * runtime.
- *
- * If the PF wishes to receive FW logging then it must register via
- * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
- * for init.
- */
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_set(hw, cfg->module_entries,
- ICE_AQC_FW_LOG_ID_MAX, cfg->options,
- cfg->log_resolution);
-}
-
-/**
- * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
- * @hw: pointer to the HW structure
- * @cfg: firmware logging configuration to populate
- */
-static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct ice_aq_desc desc;
- u16 module_id_cnt;
- int status;
- void *buf;
- int i;
-
- memset(cfg, 0, sizeof(*cfg));
-
- buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
- cmd = &desc.params.fw_log;
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
-
- status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
- if (status) {
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
- goto status_out;
- }
-
- module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
- if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
- } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
- ICE_AQC_FW_LOG_ID_MAX);
- module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
- }
-
- cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
- cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
- cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
- cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
-
- for (i = 0; i < module_id_cnt; i++) {
- struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
-
- cfg->module_entries[i].module_id =
- le16_to_cpu(fw_module->module_identifier);
- cfg->module_entries[i].log_level = fw_module->log_level;
- }
-
-status_out:
- kfree(buf);
- return status;
-}
-
-/**
- * ice_fwlog_get - Get the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config to populate based on current firmware logging settings
- */
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_get(hw, cfg);
-}
-
-/**
- * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
- * @hw: pointer to the HW structure
- * @reg: true to register and false to unregister
- */
-static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
-
- if (reg)
- desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-}
-
-/**
- * ice_fwlog_register - Register the PF for firmware logging
- * @hw: pointer to the HW structure
- *
- * After this call the PF will start to receive firmware logging based on the
- * configuration set in ice_fwlog_set.
- */
-int ice_fwlog_register(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, true);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_unregister - Unregister the PF from firmware logging
- * @hw: pointer to the HW structure
- */
-int ice_fwlog_unregister(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, false);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_set_supported - Set if FW logging is supported by FW
- * @hw: pointer to the HW struct
- *
- * If FW returns success to the ice_aq_fwlog_get call then it supports FW
- * logging, else it doesn't. Set the fwlog_supported flag accordingly.
- *
- * This function is only meant to be called during driver init to determine if
- * the FW support FW logging.
- */
-void ice_fwlog_set_supported(struct ice_hw *hw)
-{
- struct ice_fwlog_cfg *cfg;
- int status;
-
- hw->fwlog_supported = false;
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return;
-
- /* don't call ice_fwlog_get() because that would check to see if FW
- * logging is supported which is what the driver is determining now
- */
- status = ice_aq_fwlog_get(hw, cfg);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
- status);
- else
- hw->fwlog_supported = true;
-
- kfree(cfg);
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
deleted file mode 100644
index 287e71fa4b86..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2022, Intel Corporation. */
-
-#ifndef _ICE_FWLOG_H_
-#define _ICE_FWLOG_H_
-#include "ice_adminq_cmd.h"
-
-struct ice_hw;
-
-/* Only a single log level should be set and all log levels under the set value
- * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
- * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
- */
-enum ice_fwlog_level {
- ICE_FWLOG_LEVEL_NONE = 0,
- ICE_FWLOG_LEVEL_ERROR = 1,
- ICE_FWLOG_LEVEL_WARNING = 2,
- ICE_FWLOG_LEVEL_NORMAL = 3,
- ICE_FWLOG_LEVEL_VERBOSE = 4,
- ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
-};
-
-struct ice_fwlog_module_entry {
- /* module ID for the corresponding firmware logging event */
- u16 module_id;
- /* verbosity level for the module_id */
- u8 log_level;
-};
-
-struct ice_fwlog_cfg {
- /* list of modules for configuring log level */
- struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
- /* options used to configure firmware logging */
- u16 options;
-#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
-#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
- /* set before calling ice_fwlog_init() so the PF registers for firmware
- * logging on initialization
- */
-#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
- /* set in the ice_fwlog_get() response if the PF is registered for FW
- * logging events over ARQ
- */
-#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
-
- /* minimum number of log events sent per Admin Receive Queue event */
- u16 log_resolution;
-};
-
-struct ice_fwlog_data {
- u16 data_size;
- u8 *data;
-};
-
-struct ice_fwlog_ring {
- struct ice_fwlog_data *rings;
- u16 index;
- u16 size;
- u16 head;
- u16 tail;
-};
-
-#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
-#define ICE_FWLOG_RING_SIZE_DFLT 256
-#define ICE_FWLOG_RING_SIZE_MAX 512
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
-void ice_fwlog_ring_increment(u16 *item, u16 size);
-void ice_fwlog_set_supported(struct ice_hw *hw);
-bool ice_fwlog_supported(struct ice_hw *hw);
-int ice_fwlog_init(struct ice_hw *hw);
-void ice_fwlog_deinit(struct ice_hw *hw);
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_register(struct ice_hw *hw);
-int ice_fwlog_unregister(struct ice_hw *hw);
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
-#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index c8ea1af51ad3..6b26290452d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
return NULL;
@@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
- * ice_gnss_is_gps_present - Check if GPS HW is present
+ * ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
+ *
+ * Return: true when GNSS is present, false otherwise.
*/
-bool ice_gnss_is_gps_present(struct ice_hw *hw)
+bool ice_gnss_is_module_present(struct ice_hw *hw)
{
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return false;
+ int err;
+ u8 data;
- if (!ice_is_gps_in_netlist(hw))
+ if (!hw->func_caps.ts_func_info.src_tmr_owned ||
+ !ice_is_gps_in_netlist(hw))
return false;
-#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
- if (ice_is_e810t(hw)) {
- int err;
- u8 data;
-
- err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N))
- return false;
- } else {
- return false;
- }
-#else
- if (!ice_is_e810t(hw))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
-#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 75e567ad7059..15daf603ed7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
-bool ice_gnss_is_gps_present(struct ice_hw *hw);
+bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
-static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index cfac1d432c15..082ad33c53dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,10 +6,20 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
+#define GLCOMM_QUANTA_PROF(_i) (0x002D2D68 + ((_i) * 4))
+#define GLCOMM_QUANTA_PROF_MAX_INDEX 15
+#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_S 0
+#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_M ICE_M(0x3FFF, 0)
+#define GLCOMM_QUANTA_PROF_MAX_CMD_S 16
+#define GLCOMM_QUANTA_PROF_MAX_CMD_M ICE_M(0xFF, 16)
+#define GLCOMM_QUANTA_PROF_MAX_DESC_S 24
+#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24)
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_MAX_INDEX 16383
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8))
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
@@ -157,6 +167,8 @@
#define GLGEN_RTRIG_CORER_M BIT(0)
#define GLGEN_RTRIG_GLOBR_M BIT(1)
#define GLGEN_STAT 0x000B612C
+#define GLGEN_SWITCH_MODE_CONFIG 0x000B81E0
+#define GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M BIT(2)
#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_M BIT(0)
@@ -177,6 +189,8 @@
#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
#define GLINT_CTL_ITR_GRAN_25_S 28
#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
+#define GLGEN_MAC_LINK_TOPO 0x000B81DC
+#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M GENMASK(1, 0)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
@@ -260,6 +274,8 @@
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define PFLAN_TX_QALLOC(_PF) (0x001D2580 + ((_PF) * 4))
+#define PFLAN_TX_QALLOC_FIRSTQ_M GENMASK(13, 0)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
@@ -364,6 +380,15 @@
#define GLNVM_ULD_POR_DONE_1_M BIT(8)
#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
#define GLNVM_ULD_PE_DONE_M BIT(10)
+#define GLCOMM_QTX_CNTX_CTL 0x002D2DC8
+#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M GENMASK(13, 0)
+#define GLCOMM_QTX_CNTX_CTL_CMD_M GENMASK(18, 16)
+#define GLCOMM_QTX_CNTX_CTL_CMD_READ 0
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE 1
+#define GLCOMM_QTX_CNTX_CTL_CMD_RESET 3
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN 4
+#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
+#define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4))
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
@@ -529,11 +554,28 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000
#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32))
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32))
+#define E830_GLPTM_ART_CTL 0x00088B50
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_TIME_H 0x00088B54
+#define E830_GLPTM_ART_TIME_L 0x00088B58
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4))
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4))
+#define E830_PFPTM_SEM 0x00088B00
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64))
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M ICE_M(0x1FF, 0)
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
+#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
index e4c2c1bff6c0..b7aa6812510a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hwmon.c
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
@@ -96,7 +96,7 @@ static bool ice_is_internal_reading_supported(struct ice_pf *pf)
unsigned long sensors = pf->hw.dev_caps.supported_sensors;
- return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+ return test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
};
void ice_hwmon_init(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 145b27f2a4ce..420d45c2558b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -9,22 +9,25 @@
static DEFINE_XARRAY_ALLOC1(ice_aux_id);
/**
- * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
- * @pf: pointer to PF struct
+ * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
*
* This function has to be called with a device_lock on the
- * pf->adev.dev to avoid race conditions.
+ * cdev->adev.dev to avoid race conditions.
+ *
+ * Return: pointer to the matched auxiliary driver struct
*/
-static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+static struct iidc_rdma_core_auxiliary_drv *
+ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev)
{
struct auxiliary_device *adev;
- adev = pf->adev;
+ adev = cdev->adev;
if (!adev || !adev->dev.driver)
return NULL;
- return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
- adrv.driver);
+ return container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv, adrv.driver);
}
/**
@@ -32,44 +35,54 @@ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
* @pf: pointer to PF struct
* @event: event struct
*/
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event)
{
- struct iidc_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_dev_info *cdev;
if (WARN_ON_ONCE(!in_task()))
return;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return;
+
mutex_lock(&pf->adev_mutex);
- if (!pf->adev)
+ if (!cdev->adev)
goto finish;
- device_lock(&pf->adev->dev);
- iadrv = ice_get_auxiliary_drv(pf);
+ device_lock(&cdev->adev->dev);
+ iadrv = ice_get_auxiliary_drv(cdev);
if (iadrv && iadrv->event_handler)
- iadrv->event_handler(pf, event);
- device_unlock(&pf->adev->dev);
+ iadrv->event_handler(cdev, event);
+ device_unlock(&cdev->adev->dev);
finish:
mutex_unlock(&pf->adev_mutex);
}
/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be allocated
+ *
+ * Return: Zero on success or error code encountered
*/
-int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
struct ice_vsi *vsi;
struct device *dev;
+ struct ice_pf *pf;
u32 qset_teid;
u16 qs_handle;
int status;
int i;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
dev = ice_pf_to_dev(pf);
if (!ice_is_rdma_ena(pf))
@@ -100,7 +113,6 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev_err(dev, "Failed VSI RDMA Qset enable\n");
return status;
}
- vsi->qset_handle[qset->tc] = qset->qs_handle;
qset->teid = qset_teid;
return 0;
@@ -109,18 +121,23 @@ EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
/**
* ice_del_rdma_qset - Delete leaf node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be freed
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
u32 teid;
u16 q_id;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, qset->vport_id);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
@@ -130,36 +147,36 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
q_id = qset->qs_handle;
teid = qset->teid;
- vsi->qset_handle[qset->tc] = 0;
-
return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
}
EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
/**
* ice_rdma_request_reset - accept request from RDMA to perform a reset
- * @pf: struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @reset_type: type of reset
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type)
{
enum ice_reset_req reset;
+ struct ice_pf *pf;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
+
switch (reset_type) {
- case IIDC_PFR:
+ case IIDC_FUNC_RESET:
reset = ICE_RESET_PFR;
break;
- case IIDC_CORER:
+ case IIDC_DEV_RESET:
reset = ICE_RESET_CORER;
break;
- case IIDC_GLOBR:
- reset = ICE_RESET_GLOBR;
- break;
default:
- dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
return -EINVAL;
}
@@ -169,18 +186,23 @@ EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
/**
* ice_rdma_update_vsi_filter - update main VSI filters for RDMA
- * @pf: pointer to struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @vsi_id: VSI HW idx to update filter on
* @enable: bool whether to enable or disable filters
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev,
+ u16 vsi_id, bool enable)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
int status;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, vsi_id);
if (!vsi)
return -EINVAL;
@@ -201,88 +223,54 @@ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
/**
- * ice_get_qos_params - parse QoS params for RDMA consumption
- * @pf: pointer to PF struct
- * @qos: set of QoS values
- */
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
-{
- struct ice_dcbx_cfg *dcbx_cfg;
- unsigned int i;
- u32 up2tc;
-
- dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
-
- qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
-
- qos->pfc_mode = dcbx_cfg->pfc_mode;
- if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
- for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
- qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
-}
-EXPORT_SYMBOL_GPL(ice_get_qos_params);
-
-/**
- * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
- * @pf: board private structure to initialize
+ * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
+ *
+ * Return: Zero on success, error code on failure
*/
-static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- if (ice_is_rdma_ena(pf)) {
- int i;
-
- pf->msix_entries = kcalloc(pf->num_rdma_msix,
- sizeof(*pf->msix_entries),
- GFP_KERNEL);
- if (!pf->msix_entries)
- return -ENOMEM;
+ struct msi_map map;
+ struct ice_pf *pf;
- /* RDMA is the only user of pf->msix_entries array */
- pf->rdma_base_vector = 0;
+ if (WARN_ON(!cdev))
+ return -EINVAL;
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msix_entry *entry = &pf->msix_entries[i];
- struct msi_map map;
+ pf = pci_get_drvdata(cdev->pdev);
+ map = ice_alloc_irq(pf, true);
+ if (map.index < 0)
+ return -ENOMEM;
- map = ice_alloc_irq(pf, false);
- if (map.index < 0)
- break;
+ entry->entry = map.index;
+ entry->vector = map.virq;
- entry->entry = map.index;
- entry->vector = map.virq;
- }
- }
return 0;
}
+EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
- * @pf: board private structure to initialize
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
*/
-static void ice_free_rdma_qvector(struct ice_pf *pf)
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- int i;
+ struct msi_map map;
+ struct ice_pf *pf;
- if (!pf->msix_entries)
+ if (WARN_ON(!cdev || !entry))
return;
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msi_map map;
-
- map.index = pf->msix_entries[i].entry;
- map.virq = pf->msix_entries[i].vector;
- ice_free_irq(pf, map);
- }
+ pf = pci_get_drvdata(cdev->pdev);
- kfree(pf->msix_entries);
- pf->msix_entries = NULL;
+ map.index = entry->entry;
+ map.virq = entry->vector;
+ ice_free_irq(pf, map);
}
+EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
/**
* ice_adev_release - function to be mapped to AUX dev's release op
@@ -290,19 +278,23 @@ static void ice_free_rdma_qvector(struct ice_pf *pf)
*/
static void ice_adev_release(struct device *dev)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
- iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev,
+ adev.dev);
kfree(iadev);
}
/**
* ice_plug_aux_dev - allocate and register AUX device
* @pf: pointer to pf struct
+ *
+ * Return: Zero on success, error code on failure
*/
int ice_plug_aux_dev(struct ice_pf *pf)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ struct iidc_rdma_core_dev_info *cdev;
struct auxiliary_device *adev;
int ret;
@@ -312,17 +304,22 @@ int ice_plug_aux_dev(struct ice_pf *pf)
if (!ice_is_rdma_ena(pf))
return 0;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
adev = &iadev->adev;
- iadev->pf = pf;
+ iadev->cdev_info = cdev;
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
+ adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -337,8 +334,9 @@ int ice_plug_aux_dev(struct ice_pf *pf)
}
mutex_lock(&pf->adev_mutex);
- pf->adev = adev;
+ cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
+ set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags);
return 0;
}
@@ -350,15 +348,16 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
{
struct auxiliary_device *adev;
+ if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags))
+ return;
+
mutex_lock(&pf->adev_mutex);
- adev = pf->adev;
- pf->adev = NULL;
+ adev = pf->cdev_info->adev;
+ pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
- if (adev) {
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
- }
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
/**
@@ -367,7 +366,9 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
*/
int ice_init_rdma(struct ice_pf *pf)
{
+ struct iidc_rdma_priv_dev_info *privd;
struct device *dev = &pf->pdev->dev;
+ struct iidc_rdma_core_dev_info *cdev;
int ret;
if (!ice_is_rdma_ena(pf)) {
@@ -375,30 +376,50 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
}
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ pf->cdev_info = cdev;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ ret = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ privd->pf_id = pf->hw.pf_id;
ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
GFP_KERNEL);
if (ret) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_alloc_xa;
}
- /* Reserve vector resources */
- ret = ice_alloc_rdma_qvectors(pf);
- if (ret < 0) {
- dev_err(dev, "failed to reserve vectors for RDMA\n");
- goto err_reserve_rdma_qvector;
- }
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->iidc_priv = privd;
+ privd->netdev = pf->vsi[0]->netdev;
+
+ privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
+ cdev->pdev = pf->pdev;
+ privd->vport_id = pf->vsi[0]->vsi_num;
+
+ pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
ret = ice_plug_aux_dev(pf);
if (ret)
goto err_plug_aux_dev;
return 0;
err_plug_aux_dev:
- ice_free_rdma_qvector(pf);
-err_reserve_rdma_qvector:
- pf->adev = NULL;
+ pf->cdev_info->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
+err_alloc_xa:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev);
+ pf->cdev_info = NULL;
+
return ret;
}
@@ -412,6 +433,8 @@ void ice_deinit_rdma(struct ice_pf *pf)
return;
ice_unplug_aux_dev(pf);
- ice_free_rdma_qvector(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
+ kfree(pf->cdev_info->iidc_priv);
+ kfree(pf->cdev_info);
+ pf->cdev_info = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index 4b0c86757df9..17dbfcfb6a2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -4,10 +4,11 @@
#ifndef _ICE_IDC_INT_H_
#define _ICE_IDC_INT_H_
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
struct ice_pf;
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event);
#endif /* !_ICE_IDC_INT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..30801fd375f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
}
+static int
+ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries)
+{
+ pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!pf->virt_irq_tracker.bm)
+ return -ENOMEM;
+
+ pf->virt_irq_tracker.num_entries = num_entries;
+ pf->virt_irq_tracker.base = base;
+
+ return 0;
+}
+
/**
* ice_deinit_irq_tracker - free xarray tracker
* @pf: board private structure
@@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf)
xa_destroy(&pf->irq_tracker.entries);
}
+static void ice_deinit_virt_irq_tracker(struct ice_pf *pf)
+{
+ bitmap_free(pf->virt_irq_tracker.bm);
+}
+
/**
* ice_free_irq_res - free a block of resources
* @pf: board private structure
@@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
/**
* ice_get_irq_res - get an interrupt resource
* @pf: board private structure
- * @dyn_only: force entry to be dynamically allocated
+ * @dyn_allowed: allow entry to be dynamically allocated
*
* Allocate new irq entry in the free slot of the tracker. Since xarray
* is used, always allocate new entry at the lowest possible index. Set
@@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
*
* Returns allocated irq entry or NULL on failure.
*/
-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
+ bool dyn_allowed)
{
- struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
.min = 0 };
- unsigned int num_static = pf->irq_tracker.num_static;
+ unsigned int num_static = pf->irq_tracker.num_static - 1;
struct ice_irq_entry *entry;
unsigned int index;
int ret;
@@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
if (!entry)
return NULL;
- /* skip preallocated entries if the caller says so */
- if (dyn_only)
- limit.min = num_static;
+ /* only already allocated if the caller says so */
+ if (!dyn_allowed)
+ limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
GFP_KERNEL);
@@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
entry = NULL;
} else {
entry->index = index;
- entry->dynamic = index >= num_static;
+ entry->dynamic = index > num_static;
}
return entry;
}
-/**
- * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
- * @pf: board private structure
- * @v_remain: number of remaining MSI-X vectors to be distributed
- *
- * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
- * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
- * remaining vectors.
- */
-static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
-{
- int v_rdma;
-
- if (!ice_is_rdma_ena(pf)) {
- pf->num_lan_msix = v_remain;
- return;
- }
-
- /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
- dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining vectors to LAN MSIX
- */
- pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
- pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
- } else {
- /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
- *
- * Compute the number of MSIX vectors wanted and request from the OS. Adjust
- * device usage if there are not enough vectors. Return the number of vectors
- * reserved or negative on failure.
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
+#define ICE_RDMA_AEQ_MSIX 1
+static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
- num_cpus = num_online_cpus();
-
- /* LAN miscellaneous handler */
- v_other = ICE_MIN_LAN_OICR_MSIX;
-
- /* Flow Director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
- v_other += ICE_FDIR_MSIX;
-
- /* switchdev */
- v_other += ICE_ESWITCH_MSIX;
-
- v_wanted = v_other;
-
- /* LAN traffic */
- pf->num_lan_msix = num_cpus;
- v_wanted += pf->num_lan_msix;
-
- /* RDMA auxiliary driver */
- if (ice_is_rdma_ena(pf)) {
- pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- v_wanted += pf->num_rdma_msix;
- }
-
- if (v_wanted > hw_num_msix) {
- int v_remain;
-
- dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
- v_wanted, hw_num_msix);
-
- if (hw_num_msix < ICE_MIN_MSIX) {
- err = -ERANGE;
- goto exit_err;
- }
-
- v_remain = hw_num_msix - v_other;
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
- v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
- }
-
- ice_reduce_msix_usage(pf, v_remain);
- v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
-
- dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
-
- /* actually reserve the vectors */
- v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
- PCI_IRQ_MSIX);
- if (v_actual < 0) {
- dev_err(dev, "unable to reserve MSI-X vectors\n");
- err = v_actual;
- goto exit_err;
- }
-
- if (v_actual < v_wanted) {
- dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_wanted, v_actual);
-
- if (v_actual < ICE_MIN_MSIX) {
- /* error if we can't get minimum vectors */
- pci_free_irq_vectors(pf->pdev);
- err = -ERANGE;
- goto exit_err;
- } else {
- int v_remain = v_actual - v_other;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
-
- ice_reduce_msix_usage(pf, v_remain);
-
- dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
-
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
- }
-
- return v_actual;
-
-exit_err:
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = 0;
- return err;
+ return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
+ (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
}
/**
@@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
pci_free_irq_vectors(pf->pdev);
ice_deinit_irq_tracker(pf);
+ ice_deinit_virt_irq_tracker(pf);
}
/**
@@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors, max_vectors;
+ int vectors;
- vectors = ice_ena_msix_range(pf);
+ /* load default PF MSI-X range */
+ if (!pf->msix.min)
+ pf->msix.min = ICE_MIN_MSIX;
- if (vectors < 0)
- return -ENOMEM;
+ if (!pf->msix.max)
+ pf->msix.max = min(total_vectors,
+ ice_get_default_msix_amount(pf));
+
+ pf->msix.total = total_vectors;
+ pf->msix.rest = total_vectors - pf->msix.max;
if (pci_msix_can_alloc_dyn(pf->pdev))
- max_vectors = total_vectors;
+ vectors = pf->msix.min;
else
- max_vectors = vectors;
+ vectors = pf->msix.max;
+
+ vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors,
+ PCI_IRQ_MSIX);
+ if (vectors < 0)
+ return vectors;
- ice_init_irq_tracker(pf, max_vectors, vectors);
+ ice_init_irq_tracker(pf, pf->msix.max, vectors);
- return 0;
+ return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest);
}
/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
- * @dyn_only: force dynamic allocation of the interrupt
+ * @dyn_allowed: allow dynamic allocation of the interrupt
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
@@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
* interrupt will be allocated with pci_msix_alloc_irq_at.
*
* Some callers may only support dynamically allocated interrupts.
- * This is indicated with dyn_only flag.
+ * This is indicated with dyn_allowed flag.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
{
- int sriov_base_vector = pf->sriov_base_vector;
struct msi_map map = { .index = -ENOENT };
struct device *dev = ice_pf_to_dev(pf);
struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only);
+ entry = ice_get_irq_res(pf, dyn_allowed);
if (!entry)
return map;
- /* fail if we're about to violate SRIOV vectors space */
- if (sriov_base_vector && entry->index >= sriov_base_vector)
- goto exit_free_res;
-
if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
if (map.index < 0)
@@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map)
}
/**
- * ice_get_max_used_msix_vector - Get the max used interrupt vector
- * @pf: board private structure
+ * ice_virt_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
*
- * Return index of maximum used interrupt vectors with respect to the
- * beginning of the MSIX table. Take into account that some interrupts
- * may have been dynamically allocated after MSIX was initially enabled.
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
*/
-int ice_get_max_used_msix_vector(struct ice_pf *pf)
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed)
{
- unsigned long start, index, max_idx;
- void *entry;
+ int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm,
+ pf->virt_irq_tracker.num_entries,
+ 0, needed, 0);
- /* Treat all preallocated interrupts as used */
- start = pf->irq_tracker.num_static;
- max_idx = start - 1;
+ if (res >= pf->virt_irq_tracker.num_entries)
+ return -ENOENT;
- xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
- if (index > max_idx)
- max_idx = index;
- }
+ bitmap_set(pf->virt_irq_tracker.bm, res, needed);
+
+ /* conversion from number in bitmap to global irq index */
+ return res + pf->virt_irq_tracker.base;
+}
- return max_idx;
+/**
+ * ice_virt_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @index: first index to be free
+ * @irqs: number of irqs to free
+ */
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs)
+{
+ bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base,
+ irqs);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h
index f35efc08575e..b2f9dbafd57e 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.h
+++ b/drivers/net/ethernet/intel/ice/ice_irq.h
@@ -15,11 +15,22 @@ struct ice_irq_tracker {
u16 num_static; /* preallocated entries */
};
+struct ice_virt_irq_tracker {
+ unsigned long *bm; /* bitmap to track irq usage */
+ u32 num_entries;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u32 base;
+};
+
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);
struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only);
void ice_free_irq(struct ice_pf *pf, struct msi_map map);
-int ice_get_max_used_msix_vector(struct ice_pf *pf);
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed);
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index f0e76f0a6d60..d2576d606e10 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -10,12 +10,17 @@
#define ICE_LAG_RES_SHARED BIT(14)
#define ICE_LAG_RES_VALID BIT(15)
-#define LACP_TRAIN_PKT_LEN 16
-static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0x88, 0x09, 0, 0 };
+#define ICE_TRAIN_PKT_LEN 16
+static const u8 lacp_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0x88, 0x09, 0, 0 };
+static const u8 act_act_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0 };
#define ICE_RECIPE_LEN 64
+#define ICE_LAG_SRIOV_CP_RECIPE 10
+
static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
@@ -46,10 +51,10 @@ static void ice_lag_set_primary(struct ice_lag *lag)
}
/**
- * ice_lag_set_backup - set PF LAG state to Backup
+ * ice_lag_set_bkup - set PF LAG state to Backup
* @lag: LAG info struct
*/
-static void ice_lag_set_backup(struct ice_lag *lag)
+static void ice_lag_set_bkup(struct ice_lag *lag)
{
struct ice_pf *pf = lag->pf;
@@ -99,6 +104,28 @@ static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_netdev_to_lag - return pointer to associated lag struct from netdev
* @netdev: pointer to net_device struct to query
*/
@@ -202,20 +229,20 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag)
* @act: rule action
* @recipe_id: recipe id for the new rule
* @rule_idx: pointer to rule index
+ * @direction: ICE_FLTR_RX or ICE_FLTR_TX
* @add: boolean on whether we are adding filters
*/
static int
ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
- bool add)
+ u8 direction, bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_hw *hw = &lag->pf->hw;
u16 s_rule_sz, vsi_num;
- struct ice_hw *hw;
u8 *eth_hdr;
u32 opc;
int err;
- hw = &lag->pf->hw;
vsi_num = ice_get_hw_vsi_num(hw, 0);
s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
@@ -231,9 +258,16 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num);
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
s_rule->recipe_id = cpu_to_le16(recipe_id);
- s_rule->src = cpu_to_le16(hw->port_info->lport);
+ if (direction == ICE_FLTR_RX) {
+ s_rule->hdr.type =
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
+ } else {
+ s_rule->hdr.type =
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->src = cpu_to_le16(vsi_num);
+ }
s_rule->act = cpu_to_le32(act);
s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN);
opc = ice_aqc_opc_add_sw_rules;
@@ -266,9 +300,27 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
{
u32 act = ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
+ int err;
+
+ err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id,
+ ICE_FLTR_RX, add);
+ if (err)
+ goto err_rx;
+
+ act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT |
+ ICE_SINGLE_ACT_LB_ENABLE;
+ err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_tx_rule_id,
+ ICE_FLTR_TX, add);
+ if (err)
+ goto err_tx;
- return ice_lag_cfg_fltr(lag, act, lag->pf_recipe,
- &lag->pf_rule_id, add);
+ return 0;
+
+err_tx:
+ ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id,
+ ICE_FLTR_RX, !add);
+err_rx:
+ return err;
}
/**
@@ -284,33 +336,22 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
ICE_SINGLE_ACT_DROP;
return ice_lag_cfg_fltr(lag, act, lag->lport_recipe,
- &lag->lport_rule_idx, add);
+ &lag->lport_rule_idx, ICE_FLTR_RX, add);
}
/**
- * ice_lag_cfg_pf_fltrs - set filters up for new active port
+ * ice_lag_cfg_pf_fltrs_act_bkup - set filters up for new active port
* @lag: local interfaces lag struct
- * @ptr: opaque data containing notifier event
+ * @bonding_info: netdev event bonding info
*/
static void
-ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+ice_lag_cfg_pf_fltrs_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *bonding_info)
{
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
- struct net_device *event_netdev;
- struct device *dev;
-
- event_netdev = netdev_notifier_info_to_dev(ptr);
- /* not for this netdev */
- if (event_netdev != lag->netdev)
- return;
-
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- dev = ice_pf_to_dev(lag->pf);
+ struct device *dev = ice_pf_to_dev(lag->pf);
/* interface not active - remove old default VSI rule */
- if (bonding_info->slave.state && lag->pf_rule_id) {
+ if (bonding_info->slave.state && lag->pf_rx_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, false))
dev_err(dev, "Error removing old default VSI filter\n");
if (ice_lag_cfg_drop_fltr(lag, true))
@@ -319,7 +360,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/* interface becoming active - add new default VSI rule */
- if (!bonding_info->slave.state && !lag->pf_rule_id) {
+ if (!bonding_info->slave.state && !lag->pf_rx_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, true))
dev_err(dev, "Error adding new default VSI filter\n");
if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false))
@@ -328,6 +369,105 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_cfg_lp_fltr - configure lport filters
+ * @lag: local interface's lag struct
+ * @add: add or remove rule
+ * @cp: control packet only or general PF lport rule
+ */
+static void
+ice_lag_cfg_lp_fltr(struct ice_lag *lag, bool add, bool cp)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_vsi *vsi = lag->pf->vsi[0];
+ u16 buf_len, opc;
+
+ buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, ICE_TRAIN_PKT_LEN);
+ s_rule = kzalloc(buf_len, GFP_KERNEL);
+ if (!s_rule) {
+ netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
+ return;
+ }
+
+ if (add) {
+ if (cp) {
+ s_rule->recipe_id =
+ cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
+ memcpy(s_rule->hdr_data, lacp_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ } else {
+ s_rule->recipe_id = cpu_to_le16(lag->act_act_recipe);
+ memcpy(s_rule->hdr_data, act_act_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ }
+
+ s_rule->src = cpu_to_le16(vsi->port_info->lport);
+ s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
+ ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_VALID_BIT |
+ FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ vsi->vsi_num));
+ s_rule->hdr_len = cpu_to_le16(ICE_TRAIN_PKT_LEN);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ opc = ice_aqc_opc_remove_sw_rules;
+ if (cp)
+ s_rule->index = cpu_to_le16(lag->cp_rule_idx);
+ else
+ s_rule->index = cpu_to_le16(lag->act_act_rule_idx);
+ }
+ if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
+ netdev_warn(lag->netdev, "Error %s %s rule for aggregate\n",
+ add ? "ADDING" : "REMOVING",
+ cp ? "CONTROL PACKET" : "LPORT");
+ goto err_cp_free;
+ }
+
+ if (add) {
+ if (cp)
+ lag->cp_rule_idx = le16_to_cpu(s_rule->index);
+ else
+ lag->act_act_rule_idx = le16_to_cpu(s_rule->index);
+ } else {
+ if (cp)
+ lag->cp_rule_idx = 0;
+ else
+ lag->act_act_rule_idx = 0;
+ }
+
+err_cp_free:
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_cfg_pf_fltrs - set filters up for PF traffic
+ * @lag: local interfaces lag struct
+ * @ptr: opaque data containing notifier event
+ */
+static void
+ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ if (event_netdev != lag->netdev)
+ return;
+
+ bonding_info = &info->bonding_info;
+
+ if (lag->bond_aa) {
+ if (lag->need_fltr_cfg) {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ lag->need_fltr_cfg = false;
+ }
+ } else {
+ ice_lag_cfg_pf_fltrs_act_bkup(lag, bonding_info);
+ }
+}
+
+/**
* ice_display_lag_info - print LAG info
* @lag: LAG info struct
*/
@@ -376,12 +516,11 @@ static u16
ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
u16 vsi_num, u16 numq, u8 tc)
{
+ struct ice_pf *pf = hw->back;
struct ice_q_ctx *q_ctx;
u16 qid, count = 0;
- struct ice_pf *pf;
int i;
- pf = hw->back;
for (i = 0; i < numq; i++) {
q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i);
if (!q_ctx) {
@@ -551,7 +690,7 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
}
if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
- newport, NULL)) {
+ newport, ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto qbuf_err;
}
@@ -651,54 +790,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
}
/**
- * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required
- * @vf: the VF to move Tx nodes for
- *
- * Called just after configuring new VF queues. Check whether the VF Tx
- * scheduling nodes need to be updated to fail over to the active port. If so,
- * move them now.
- */
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
-{
- struct ice_lag_netdev_list ndlist;
- u8 pri_port, act_port;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
-
- vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- if (WARN_ON(vsi->type != ICE_VSI_VF))
- return;
-
- pf = vf->pf;
- lag = pf->lag;
-
- mutex_lock(&pf->lag_mutex);
- if (!lag->bonded)
- goto new_vf_unlock;
-
- pri_port = pf->hw.port_info->lport;
- act_port = lag->active_port;
-
- if (lag->upper_netdev)
- ice_lag_build_netdev_list(lag, &ndlist);
-
- if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
- lag->bonded && lag->primary && pri_port != act_port &&
- !list_empty(lag->netdev_head))
- ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
-
- ice_lag_destroy_netdev_list(lag, &ndlist);
-
-new_vf_unlock:
- mutex_unlock(&pf->lag_mutex);
-}
-
-/**
* ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port
* @lag: lag info struct
* @oldport: lport of previous interface
@@ -714,8 +805,7 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
@@ -742,59 +832,60 @@ void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
ice_lag_destroy_netdev_list(lag, &ndlist);
}
-#define ICE_LAG_SRIOV_CP_RECIPE 10
-#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
-
/**
- * ice_lag_cfg_cp_fltr - configure filter for control packets
- * @lag: local interface's lag struct
- * @add: add or remove rule
+ * ice_lag_prepare_vf_reset - helper to adjust vf lag for reset
+ * @lag: lag struct for interface that owns VF
+ *
+ * Context: must be called with the lag_mutex lock held.
+ *
+ * Return: active lport value or ICE_LAG_INVALID_PORT if nothing moved.
*/
-static void
-ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
- struct ice_vsi *vsi;
- u16 buf_len, opc;
-
- vsi = lag->pf->vsi[0];
-
- buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule,
- ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- s_rule = kzalloc(buf_len, GFP_KERNEL);
- if (!s_rule) {
- netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
- return;
- }
-
- if (add) {
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
- s_rule->src = cpu_to_le16(vsi->port_info->lport);
- s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
- ICE_SINGLE_ACT_LAN_ENABLE |
- ICE_SINGLE_ACT_VALID_BIT |
- FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num));
- s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
- opc = ice_aqc_opc_add_sw_rules;
- } else {
- opc = ice_aqc_opc_remove_sw_rules;
- s_rule->index = cpu_to_le16(lag->cp_rule_idx);
- }
- if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
- netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n",
- add ? "ADDING" : "REMOVING");
- goto cp_free;
+ u8 pri_prt, act_prt;
+
+ if (lag && lag->bonded && lag->primary && lag->upper_netdev) {
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt &&
+ act_prt != ICE_LAG_INVALID_PORT) {
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ return act_prt;
+ }
+ } else {
+ if (lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, NULL);
+ lag->port_bitmap |= ICE_LAGS_M;
+ }
+ }
}
- if (add)
- lag->cp_rule_idx = le16_to_cpu(s_rule->index);
- else
- lag->cp_rule_idx = 0;
+ return ICE_LAG_INVALID_PORT;
+}
-cp_free:
- kfree(s_rule);
+/**
+ * ice_lag_complete_vf_reset - helper for lag after reset
+ * @lag: lag struct for primary interface
+ * @act_prt: which port should be active for lag
+ *
+ * Context: must be called while holding the lag_mutex.
+ */
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
+{
+ u8 pri_prt;
+
+ if (lag && lag->bonded && lag->primary) {
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ if (act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt,
+ act_prt);
+ } else {
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, NULL);
+ }
+ }
}
/**
@@ -806,13 +897,12 @@ cp_free:
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_bonding_info *info;
+ struct netdev_notifier_bonding_info *info = ptr;
struct netdev_bonding_info *bonding_info;
struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
- info = ptr;
lag_netdev_name = netdev_name(lag->netdev);
bonding_info = &info->bonding_info;
@@ -830,7 +920,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (bonding_info->slave.state)
- ice_lag_set_backup(lag);
+ ice_lag_set_bkup(lag);
else
ice_lag_set_primary(lag);
@@ -839,6 +929,295 @@ lag_out:
}
/**
+ * ice_lag_aa_qbuf_recfg - fill a single queue buffer for recfg cmd
+ * @hw: HW struct that contains the queue context
+ * @qbuf: pointer to single queue buffer
+ * @vsi_num: index of the VF VSI in PF space
+ * @qnum: queue index
+ *
+ * Return: Zero on success, error code on failure.
+ */
+static int
+ice_lag_aa_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
+ u16 vsi_num, int qnum)
+{
+ struct ice_pf *pf = hw->back;
+ struct ice_q_ctx *q_ctx;
+ u16 q_id;
+
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_num, 0, qnum);
+ if (!q_ctx) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d no Q context\n", qnum);
+ return -ENOENT;
+ }
+
+ if (q_ctx->q_teid == ICE_INVAL_TEID) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL TEID\n", qnum);
+ return -EINVAL;
+ }
+
+ if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL Q HANDLE\n", qnum);
+ return -EINVAL;
+ }
+
+ q_id = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
+ qbuf->queue_info[0].q_handle = cpu_to_le16(q_id);
+ qbuf->queue_info[0].tc = 0;
+ qbuf->queue_info[0].q_teid = cpu_to_le32(q_ctx->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_lag_aa_move_vf_qs - Move some/all VF queues to destination
+ * @lag: primary interface's lag struct
+ * @dest: index of destination port
+ * @vsi_num: index of VF VSI in PF space
+ * @all: if true move all queues to destination
+ * @odd: VF wide q indicator for odd/even
+ * @e_pf: PF struct for the event interface
+ *
+ * the parameter "all" is to control whether we are splitting the queues
+ * between two interfaces or moving them all to the destination interface
+ */
+static void ice_lag_aa_move_vf_qs(struct ice_lag *lag, u8 dest, u16 vsi_num,
+ bool all, bool *odd, struct ice_pf *e_pf)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_cfg_txqs_buf, qbuf, queue_info, 1);
+ struct ice_hw *old_hw, *new_hw, *pri_hw, *sec_hw;
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct ice_vsi_ctx *pv_ctx, *sv_ctx;
+ struct ice_lag_netdev_list ndlist;
+ u16 num_q, qbuf_size, sec_vsi_num;
+ u8 pri_lport, sec_lport;
+ u32 pvf_teid, svf_teid;
+ u16 vf_id;
+
+ vf_id = lag->pf->vsi[vsi_num]->vf->vf_id;
+ /* If sec_vf[] not defined, then no second interface to share with */
+ if (lag->sec_vf[vf_id])
+ sec_vsi_num = lag->sec_vf[vf_id]->idx;
+ else
+ return;
+
+ pri_lport = lag->bond_lport_pri;
+ sec_lport = lag->bond_lport_sec;
+
+ if (pri_lport == ICE_LAG_INVALID_PORT ||
+ sec_lport == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!e_pf)
+ ice_lag_build_netdev_list(lag, &ndlist);
+
+ pri_hw = &lag->pf->hw;
+ if (e_pf && lag->pf != e_pf)
+ sec_hw = &e_pf->hw;
+ else
+ sec_hw = ice_lag_find_hw_by_lport(lag, sec_lport);
+
+ if (!pri_hw || !sec_hw)
+ return;
+
+ if (dest == ICE_LAGP_IDX) {
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(lag->pf);
+ if (!vsi)
+ return;
+
+ old_hw = sec_hw;
+ new_hw = pri_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ } else {
+ struct ice_pf *sec_pf = sec_hw->back;
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(sec_pf);
+ if (!vsi)
+ return;
+
+ old_hw = pri_hw;
+ new_hw = sec_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ }
+
+ pv_ctx = ice_get_vsi_ctx(pri_hw, vsi_num);
+ if (!pv_ctx) {
+ dev_warn(dev, "Unable to locate primary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ sv_ctx = ice_get_vsi_ctx(sec_hw, sec_vsi_num);
+ if (!sv_ctx) {
+ dev_warn(dev, "Unable to locate secondary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ num_q = pv_ctx->num_lan_q_entries[0];
+ qbuf_size = __struct_size(qbuf);
+
+ /* Suspend traffic for primary VSI VF */
+ pvf_teid = le32_to_cpu(pv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, true);
+
+ /* Suspend traffic for secondary VSI VF */
+ svf_teid = le32_to_cpu(sv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, true);
+
+ for (int i = 0; i < num_q; i++) {
+ struct ice_sched_node *n_prt, *q_node, *parent;
+ struct ice_port_info *pi, *new_pi;
+ struct ice_vsi_ctx *src_ctx;
+ struct ice_sched_node *p;
+ struct ice_q_ctx *q_ctx;
+ u16 dst_vsi_num;
+
+ pi = old_hw->port_info;
+ new_pi = new_hw->port_info;
+
+ *odd = !(*odd);
+ if ((dest == ICE_LAGP_IDX && *odd && !all) ||
+ (dest == ICE_LAGS_IDX && !(*odd) && !all) ||
+ lag->q_home[vf_id][i] == dest)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ dst_vsi_num = vsi_num;
+ else
+ dst_vsi_num = sec_vsi_num;
+
+ n_prt = ice_sched_get_free_qparent(new_hw->port_info,
+ dst_vsi_num, 0,
+ ICE_SCHED_NODE_OWNER_LAN);
+ if (!n_prt)
+ continue;
+
+ q_ctx = ice_get_lan_q_ctx(pri_hw, vsi_num, 0, i);
+ if (!q_ctx)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ src_ctx = sv_ctx;
+ else
+ src_ctx = pv_ctx;
+
+ q_node = ice_sched_find_node_by_teid(src_ctx->sched.vsi_node[0],
+ q_ctx->q_teid);
+ if (!q_node)
+ continue;
+
+ qbuf->src_parent_teid = q_node->info.parent_teid;
+ qbuf->dst_parent_teid = n_prt->info.node_teid;
+
+ /* Move the node in the HW/FW */
+ if (ice_lag_aa_qbuf_recfg(pri_hw, qbuf, vsi_num, i))
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ sec_lport, pri_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+ else
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ pri_lport, sec_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+
+ /* Move the node in the SW */
+ parent = q_node->parent;
+ if (!parent)
+ continue;
+
+ for (int n = 0; n < parent->num_children; n++) {
+ int j;
+
+ if (parent->children[n] != q_node)
+ continue;
+
+ for (j = n + 1; j < parent->num_children;
+ j++) {
+ parent->children[j - 1] =
+ parent->children[j];
+ }
+ parent->children[j] = NULL;
+ parent->num_children--;
+ break;
+ }
+
+ p = pi->sib_head[0][q_node->tx_sched_layer];
+ while (p) {
+ if (p->sibling == q_node) {
+ p->sibling = q_node->sibling;
+ break;
+ }
+ p = p->sibling;
+ }
+
+ if (pi->sib_head[0][q_node->tx_sched_layer] == q_node)
+ pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node->sibling;
+
+ q_node->parent = n_prt;
+ q_node->info.parent_teid = n_prt->info.node_teid;
+ q_node->sibling = NULL;
+ p = new_pi->sib_head[0][q_node->tx_sched_layer];
+ if (p) {
+ while (p) {
+ if (!p->sibling) {
+ p->sibling = q_node;
+ break;
+ }
+ p = p->sibling;
+ }
+ } else {
+ new_pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node;
+ }
+
+ n_prt->children[n_prt->num_children++] = q_node;
+ lag->q_home[vf_id][i] = dest;
+ }
+
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, false);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, false);
+
+ if (!e_pf)
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
+/**
+ * ice_lag_aa_failover - move VF queues in A/A mode
+ * @lag: primary lag struct
+ * @dest: index of destination port
+ * @e_pf: PF struct for event port
+ */
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf)
+{
+ bool odd = true, all = false;
+ int i;
+
+ /* Primary can be a target if down (cleanup), but secondary can't */
+ if (dest == ICE_LAGS_IDX && !(lag->port_bitmap & ICE_LAGS_M))
+ return;
+
+ /* Move all queues to a destination if only one port is active,
+ * or no ports are active and dest is primary.
+ */
+ if ((lag->port_bitmap ^ (ICE_LAGP_M | ICE_LAGS_M)) ||
+ (!lag->port_bitmap && dest == ICE_LAGP_IDX))
+ all = true;
+
+ ice_for_each_vsi(lag->pf, i)
+ if (lag->pf->vsi[i] && lag->pf->vsi[i]->type == ICE_VSI_VF)
+ ice_lag_aa_move_vf_qs(lag, dest, i, all, &odd, e_pf);
+}
+
+/**
* ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface
* @lag: primary interface lag struct
* @src_hw: HW struct current node location
@@ -854,13 +1233,12 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
@@ -901,7 +1279,7 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
src_hw->port_info->lport, hw->port_info->lport,
- NULL)) {
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto reclaim_qerr;
}
@@ -953,8 +1331,7 @@ ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_for_each_traffic_class(tc)
ice_lag_reclaim_vf_tc(lag, src_hw, i, tc);
}
@@ -973,14 +1350,15 @@ static void ice_lag_link(struct ice_lag *lag)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
+ lag->need_fltr_cfg = true;
netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
}
/**
- * ice_lag_unlink - handle unlink event
+ * ice_lag_act_bkup_unlink - handle unlink event for A/B bond
* @lag: LAG info struct
*/
-static void ice_lag_unlink(struct ice_lag *lag)
+static void ice_lag_act_bkup_unlink(struct ice_lag *lag)
{
u8 pri_port, act_port, loc_port;
struct ice_pf *pf = lag->pf;
@@ -997,6 +1375,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
ice_lag_move_vf_nodes(lag, act_port, pri_port);
lag->primary = false;
lag->active_port = ICE_LAG_INVALID_PORT;
+
+ /* Config primary's eswitch back to normal operation. */
+ ice_lag_config_eswitch(lag, lag->netdev);
} else {
struct ice_lag *primary_lag;
@@ -1013,10 +1394,32 @@ static void ice_lag_unlink(struct ice_lag *lag)
}
}
}
+}
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
- lag->upper_netdev = NULL;
+/**
+ * ice_lag_aa_unlink - handle unlink event for Active-Active bond
+ * @lag: LAG info struct
+ */
+static void ice_lag_aa_unlink(struct ice_lag *lag)
+{
+ struct ice_lag *pri_lag;
+
+ if (lag->primary) {
+ pri_lag = lag;
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ } else {
+ pri_lag = ice_lag_find_primary(lag);
+ if (pri_lag)
+ pri_lag->port_bitmap &= ICE_LAGS_M;
+ }
+
+ if (pri_lag) {
+ ice_lag_aa_failover(pri_lag, ICE_LAGP_IDX, lag->pf);
+ if (lag->primary)
+ pri_lag->bond_lport_pri = ICE_LAG_INVALID_PORT;
+ else
+ pri_lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
}
/**
@@ -1032,10 +1435,20 @@ static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr)
if (netdev != lag->netdev)
return;
- if (info->linking)
+ if (info->linking) {
ice_lag_link(lag);
- else
- ice_lag_unlink(lag);
+ } else {
+ if (lag->bond_aa)
+ ice_lag_aa_unlink(lag);
+ else
+ ice_lag_act_bkup_unlink(lag);
+
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+ lag->upper_netdev = NULL;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
+ }
}
/**
@@ -1053,7 +1466,7 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
{
struct ice_aqc_alloc_free_res_elem *buf;
struct ice_aqc_set_port_params *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_len, swid;
int status, i;
@@ -1101,7 +1514,7 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
else
swid = local_lag->pf->hw.port_info->sw_id;
- cmd = &desc.params.set_port_params;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid);
@@ -1133,11 +1546,8 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
*/
static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
{
- struct ice_hw *hw;
- u16 swid;
-
- hw = &lag->pf->hw;
- swid = hw->port_info->sw_id;
+ struct ice_hw *hw = &lag->pf->hw;
+ u16 swid = hw->port_info->sw_id;
if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid))
dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n");
@@ -1150,12 +1560,10 @@ static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
*/
static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
event_vsi_num = event_pf->vsi[0]->vsi_num;
prim_vsi_idx = lag->pf->vsi[0]->idx;
@@ -1191,12 +1599,10 @@ static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
*/
static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 vsi_num, vsi_idx, rule_buf_sz, vsi_list_id, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
vsi_num = event_pf->vsi[0]->vsi_num;
vsi_idx = lag->pf->vsi[0]->idx;
@@ -1244,6 +1650,11 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+
+ if (caps->sriov_aa_lag && ice_pkg_has_lport_extract(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
}
/**
@@ -1253,11 +1664,10 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
*/
static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_lag *primary_lag;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
/* not for this netdev */
@@ -1272,25 +1682,47 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
if (!primary_lag) {
lag->primary = true;
+ if (!ice_is_switchdev_running(lag->pf))
+ return;
+
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
+ lag->bond_lport_pri = lag->pf->hw.port_info->lport;
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0;
} else {
u16 swid;
+ if (!ice_is_switchdev_running(primary_lag->pf))
+ return;
+
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
- ice_lag_cfg_drop_fltr(lag, true);
+ primary_lag->bond_lport_sec =
+ lag->pf->hw.port_info->lport;
}
/* add filter for primary control packets */
- ice_lag_cfg_cp_fltr(lag, true);
+ ice_lag_cfg_lp_fltr(lag, true, true);
} else {
if (!primary_lag && lag->primary)
primary_lag = lag;
+ if (primary_lag) {
+ for (int i = 0; i < ICE_MAX_SRIOV_VFS; i++) {
+ if (primary_lag->sec_vf[i]) {
+ ice_vsi_release(primary_lag->sec_vf[i]);
+ primary_lag->sec_vf[i] = NULL;
+ }
+ }
+ }
+
if (!lag->primary) {
ice_lag_set_swid(0, lag, false);
+ if (primary_lag)
+ primary_lag->bond_lport_sec =
+ ICE_LAG_INVALID_PORT;
} else {
if (primary_lag && lag->primary) {
ice_lag_primary_swid(lag, false);
@@ -1298,7 +1730,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
}
}
/* remove filter for control packets */
- ice_lag_cfg_cp_fltr(lag, false);
+ ice_lag_cfg_lp_fltr(lag, false, !lag->bond_aa);
}
}
@@ -1311,7 +1743,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_hw *prim_hw, *active_hw;
struct net_device *event_netdev;
struct ice_pf *pf;
@@ -1324,19 +1756,34 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
if (!netif_is_same_ice(lag->pf, event_netdev))
return;
+ if (info->upper_dev != lag->upper_netdev)
+ return;
+
+ if (info->linking)
+ return;
+
pf = lag->pf;
prim_hw = &pf->hw;
prim_port = prim_hw->port_info->lport;
- info = (struct netdev_notifier_changeupper_info *)ptr;
- if (info->upper_dev != lag->upper_netdev)
- return;
-
- if (!info->linking) {
- /* Since there are only two interfaces allowed in SRIOV+LAG, if
- * one port is leaving, then nodes need to be on primary
- * interface.
- */
+ /* Since there are only two interfaces allowed in SRIOV+LAG, if
+ * one port is leaving, then nodes need to be on primary
+ * interface.
+ */
+ if (lag->bond_aa) {
+ struct ice_netdev_priv *e_ndp;
+ struct ice_pf *e_pf;
+
+ e_ndp = netdev_priv(event_netdev);
+ e_pf = e_ndp->vsi->back;
+
+ if (lag->bond_lport_pri != ICE_LAG_INVALID_PORT &&
+ lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, e_pf);
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
+ } else {
if (prim_port != lag->active_port &&
lag->active_port != ICE_LAG_INVALID_PORT) {
active_hw = ice_lag_find_hw_by_lport(lag,
@@ -1348,45 +1795,32 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
}
/**
- * ice_lag_monitor_active - main PF keep track of which port is active
+ * ice_lag_monitor_act_bkup - keep track of which port is active in A/B LAG
* @lag: lag info struct
- * @ptr: opaque data containing notifier event
+ * @b_info: bonding info
+ * @event_netdev: net_device got target netdev
*
* This function is for the primary PF to monitor changes in which port is
* active and handle changes for SRIOV VF functionality
*/
-static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+static void ice_lag_monitor_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
{
- struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
struct ice_netdev_priv *event_np;
struct ice_pf *pf, *event_pf;
u8 prim_port, event_port;
- if (!lag->primary)
- return;
-
pf = lag->pf;
if (!pf)
return;
- event_netdev = netdev_notifier_info_to_dev(ptr);
- rcu_read_lock();
- event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
- rcu_read_unlock();
- if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
- return;
-
event_np = netdev_priv(event_netdev);
event_pf = event_np->vsi->back;
event_port = event_pf->hw.port_info->lport;
prim_port = pf->hw.port_info->lport;
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
-
- if (!bonding_info->slave.state) {
+ if (!b_info->slave.state) {
/* if no port is currently active, then nodes and filters exist
* on primary port, check if we need to move them
*/
@@ -1395,6 +1829,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
ice_lag_move_vf_nodes(lag, prim_port,
event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
return;
}
@@ -1404,6 +1839,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
/* new active port */
ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
} else {
/* port not set as currently active (e.g. new active port
* has already claimed the nodes and filters
@@ -1421,6 +1857,128 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_aa_clear_spoof - adjust the placeholder VSI spoofing for A/A LAG
+ * @vsi: placeholder VSI to adjust
+ */
+static void ice_lag_aa_clear_spoof(struct ice_vsi *vsi)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+}
+
+/**
+ * ice_lag_monitor_act_act - Keep track of active ports in A/A LAG
+ * @lag: lag struct for primary interface
+ * @b_info: bonding_info for event
+ * @event_netdev: net_device for target netdev
+ */
+static void ice_lag_monitor_act_act(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
+{
+ struct ice_netdev_priv *event_np;
+ u8 prim_port, event_port;
+ struct ice_pf *event_pf;
+
+ event_np = netdev_priv(event_netdev);
+ event_pf = event_np->vsi->back;
+ event_port = event_pf->hw.port_info->lport;
+ prim_port = lag->pf->hw.port_info->lport;
+
+ if (b_info->slave.link == BOND_LINK_UP) {
+ /* Port is coming up */
+ if (prim_port == event_port) {
+ /* Processing event for primary interface */
+ if (lag->bond_lport_pri == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!(lag->port_bitmap & ICE_LAGP_M)) {
+ /* Primary port was not marked up before, move
+ * some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ } else {
+ if (lag->bond_lport_sec == ICE_LAG_INVALID_PORT)
+ return;
+
+ /* Create placeholder VSIs on secondary PF.
+ * The placeholder is necessary so that we have
+ * an element that represents the VF on the secondary
+ * interface's scheduling tree. This will be a tree
+ * root for scheduling nodes when they are moved to
+ * the secondary interface.
+ */
+ if (!lag->sec_vf[0]) {
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *nvsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ params.type = ICE_VSI_VF;
+ params.port_info = event_pf->hw.port_info;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ ice_for_each_vf(lag->pf, bkt, vf) {
+ params.vf = vf;
+ nvsi = ice_vsi_setup(event_pf,
+ &params);
+ ice_lag_aa_clear_spoof(nvsi);
+ lag->sec_vf[vf->vf_id] = nvsi;
+ }
+ }
+
+ if (!(lag->port_bitmap & ICE_LAGS_M)) {
+ /* Secondary port was not marked up before,
+ * move some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ }
+ }
+ } else {
+ /* Port is going down */
+ if (prim_port == event_port) {
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ } else {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ }
+}
+
+/**
+ * ice_lag_monitor_info - Calls relevant A/A or A/B monitoring function
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function is for the primary PF to monitor changes in which port is
+ * active and handle changes for SRIOV VF functionality
+ */
+static void ice_lag_monitor_info(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_bonding_info *bonding_info;
+
+ if (!lag->primary)
+ return;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ bonding_info = &info->bonding_info;
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
+ return;
+
+ if (lag->bond_aa)
+ ice_lag_monitor_act_act(lag, bonding_info, event_netdev);
+ else
+ ice_lag_monitor_act_bkup(lag, bonding_info, event_netdev);
+}
+/**
* ice_lag_chk_comp - evaluate bonded interface for feature support
* @lag: lag info struct
* @ptr: opaque data for netdev event info
@@ -1428,13 +1986,21 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
static bool
ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
{
+ struct netdev_notifier_bonding_info *info = ptr;
struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
struct list_head *tmp;
struct device *dev;
int count = 0;
+ /* All members need to know if bond A/A or A/B */
+ bonding_info = &info->bonding_info;
+ lag->bond_mode = bonding_info->master.bond_mode;
+ if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP)
+ lag->bond_aa = true;
+ else
+ lag->bond_aa = false;
+
if (!lag->primary)
return true;
@@ -1455,13 +2021,9 @@ ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
return false;
}
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- lag->bond_mode = bonding_info->master.bond_mode;
- if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) {
- dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n");
+ if (lag->bond_aa && !ice_is_feature_supported(lag->pf,
+ ICE_F_SRIOV_AA_LAG))
return false;
- }
list_for_each(tmp, lag->netdev_head) {
struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg;
@@ -1565,10 +2127,9 @@ ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev)
static void
ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
if (netdev != lag->netdev)
@@ -1616,12 +2177,29 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{
- struct ice_netdev_priv *np;
- struct ice_pf *pf;
+ struct ice_pf *pf = ice_netdev_to_pf(lag->netdev);
- np = netdev_priv(lag->netdev);
- pf = np->vsi->back;
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+}
+
+/**
+ * ice_lag_preset_drop_fltr - preset drop filter for A/B bonds
+ * @lag: local lag struct
+ * @ptr: opaque data containing event
+ *
+ * Sets the initial drop filter for secondary interface in an
+ * active-backup bond
+ */
+static void ice_lag_preset_drop_fltr(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev || lag->primary || !lag->need_fltr_cfg)
+ return;
+
+ ice_lag_cfg_drop_fltr(lag, true);
+ lag->need_fltr_cfg = false;
}
/**
@@ -1662,10 +2240,12 @@ static void ice_lag_process_event(struct work_struct *work)
ice_lag_unregister(lag_work->lag, netdev);
goto lag_cleanup;
}
- ice_lag_monitor_active(lag_work->lag,
- &lag_work->info.bonding_info);
ice_lag_cfg_pf_fltrs(lag_work->lag,
&lag_work->info.bonding_info);
+ ice_lag_preset_drop_fltr(lag_work->lag,
+ &lag_work->info.bonding_info);
+ ice_lag_monitor_info(lag_work->lag,
+ &lag_work->info.bonding_info);
}
ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
break;
@@ -1738,9 +2318,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
lag_work->lag = lag;
lag_work->event = event;
if (event == NETDEV_CHANGEUPPER) {
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
- info = ptr;
upper_netdev = info->upper_dev;
} else {
upper_netdev = netdev_master_upper_dev_get(netdev);
@@ -1790,10 +2369,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
*/
static int ice_register_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
-
- notif_blk = &lag->notif_block;
if (!notif_blk->notifier_call) {
notif_blk->notifier_call = ice_lag_event_handler;
@@ -1813,10 +2390,9 @@ static int ice_register_lag_handler(struct ice_lag *lag)
*/
static void ice_unregister_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
- notif_blk = &lag->notif_block;
if (notif_blk->notifier_call) {
unregister_netdevice_notifier(notif_blk);
dev_dbg(dev, "LAG event handler unregistered\n");
@@ -1878,13 +2454,12 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n");
@@ -1921,7 +2496,8 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
}
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport,
- dest_hw->port_info->lport, NULL)) {
+ dest_hw->port_info->lport,
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n");
goto sync_qerr;
}
@@ -1976,8 +2552,7 @@ ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_for_each_traffic_class(tc)
ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i,
tc);
@@ -2018,9 +2593,13 @@ int ice_init_lag(struct ice_pf *pf)
lag->netdev = vsi->netdev;
lag->role = ICE_LAG_NONE;
lag->active_port = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0x0;
lag->bonded = false;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
lag->upper_netdev = NULL;
lag->notif_block.notifier_call = NULL;
+ memset(lag->sec_vf, 0, sizeof(lag->sec_vf));
err = ice_register_lag_handler(lag);
if (err) {
@@ -2038,6 +2617,11 @@ int ice_init_lag(struct ice_pf *pf)
if (err)
goto free_rcp_res;
+ err = ice_create_lag_recipe(&pf->hw, &lag->act_act_recipe,
+ ice_lport_rcp, 1);
+ if (err)
+ goto free_lport_res;
+
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
@@ -2047,7 +2631,8 @@ int ice_init_lag(struct ice_pf *pf)
if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
recipe_bits |= BIT(lag->pf_recipe) |
- BIT(lag->lport_recipe);
+ BIT(lag->lport_recipe) |
+ BIT(lag->act_act_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
recipe_bits, NULL);
}
@@ -2058,9 +2643,13 @@ int ice_init_lag(struct ice_pf *pf)
dev_dbg(dev, "INIT LAG complete\n");
return 0;
+free_lport_res:
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &lag->lport_recipe);
+
free_rcp_res:
ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
- &pf->lag->pf_recipe);
+ &lag->pf_recipe);
lag_error:
kfree(lag);
pf->lag = NULL;
@@ -2076,9 +2665,7 @@ lag_error:
*/
void ice_deinit_lag(struct ice_pf *pf)
{
- struct ice_lag *lag;
-
- lag = pf->lag;
+ struct ice_lag *lag = pf->lag;
if (!lag)
return;
@@ -2147,11 +2734,15 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw);
}
- ice_lag_cfg_cp_fltr(lag, true);
+ if (!lag->bond_aa) {
+ ice_lag_cfg_lp_fltr(lag, true, true);
+ if (lag->pf_rx_rule_id)
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
+ } else {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ }
- if (lag->pf_rule_id)
- if (ice_lag_cfg_dflt_fltr(lag, true))
- dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
ice_clear_rdma_cap(pf);
lag_rebuild_out:
@@ -2170,7 +2761,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf)
struct ice_lag *lag = pf->lag;
struct net_device *tmp_nd;
- if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) ||
+ !lag || !lag->upper_netdev)
return false;
rcu_read_lock();
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 183b38792ef2..f77ebcd61042 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -14,7 +14,11 @@ enum ice_lag_role {
ICE_LAG_UNSET
};
-#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAGP_IDX 0
+#define ICE_LAGS_IDX 1
+#define ICE_LAGP_M 0x1
+#define ICE_LAGS_M 0x2
#define ICE_LAG_RESET_RETRIES 5
#define ICE_SW_DEFAULT_PROFILE 0
@@ -41,11 +45,26 @@ struct ice_lag {
u8 active_port; /* lport value for the current active port */
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
+ u8 bond_aa:1; /* is this bond active-active */
+ u8 need_fltr_cfg:1; /* fltrs for A/A bond still need to be make */
+ u8 port_bitmap:2; /* bitmap of active ports */
+ u8 bond_lport_pri; /* lport values for primary PF */
+ u8 bond_lport_sec; /* lport values for secondary PF */
+
+ /* q_home keeps track of which interface the q is currently on */
+ u8 q_home[ICE_MAX_SRIOV_VFS][ICE_MAX_RSS_QS_PER_VF];
+
+ /* placeholder VSI for hanging VF queues from on secondary interface */
+ struct ice_vsi *sec_vf[ICE_MAX_SRIOV_VFS];
+
u16 pf_recipe;
u16 lport_recipe;
- u16 pf_rule_id;
+ u16 act_act_recipe;
+ u16 pf_rx_rule_id;
+ u16 pf_tx_rule_id;
u16 cp_rule_idx;
u16 lport_rule_idx;
+ u16 act_act_rule_idx;
u8 role;
};
@@ -63,10 +82,12 @@ struct ice_lag_work {
} info;
};
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag);
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d384ddfcb83e..185672c7e17d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -160,64 +160,6 @@ struct ice_fltr_desc {
(0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S)
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
-struct ice_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:2;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum ice_rx_ptype_outer_ip {
- ICE_RX_PTYPE_OUTER_L2 = 0,
- ICE_RX_PTYPE_OUTER_IP = 1,
-};
-
-enum ice_rx_ptype_outer_ip_ver {
- ICE_RX_PTYPE_OUTER_NONE = 0,
- ICE_RX_PTYPE_OUTER_IPV4 = 1,
- ICE_RX_PTYPE_OUTER_IPV6 = 2,
-};
-
-enum ice_rx_ptype_outer_fragmented {
- ICE_RX_PTYPE_NOT_FRAG = 0,
- ICE_RX_PTYPE_FRAG = 1,
-};
-
-enum ice_rx_ptype_tunnel_type {
- ICE_RX_PTYPE_TUNNEL_NONE = 0,
- ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum ice_rx_ptype_tunnel_end_prot {
- ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
- ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum ice_rx_ptype_inner_prot {
- ICE_RX_PTYPE_INNER_PROT_NONE = 0,
- ICE_RX_PTYPE_INNER_PROT_UDP = 1,
- ICE_RX_PTYPE_INNER_PROT_TCP = 2,
- ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
- ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
- ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
-};
-
-enum ice_rx_ptype_payload_layer {
- ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
/* Rx Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
@@ -287,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic {
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
- __le16 l2tag2_1st;
+ __le16 raw_csum;
__le16 l2tag2_2nd;
/* Qword 3 */
@@ -400,6 +342,9 @@ enum ice_flg64_bits {
/* for ice_32byte_rx_flex_desc.pkt_length member */
#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
+/* ice_32byte_rx_flex_desc::hdr_len_sph_flex_flags1 */
+#define ICE_RX_FLEX_DESC_HDR_LEN_M GENMASK(10, 0)
+
enum ice_rx_flex_desc_status_error_0_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
@@ -429,29 +374,21 @@ enum ice_rx_flex_desc_status_error_1_bits {
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
-#define ICE_RXQ_CTX_SIZE_DWORDS 8
-#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
-/* RLAN Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* RLAN Rx queue context data */
struct ice_rlan_ctx {
u16 head;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
- u16 dbuf; /* bigger than needed, see above for reason */
+ u8 dbuf;
#define ICE_RLAN_CTX_HBUF_S 6
- u16 hbuf; /* bigger than needed, see above for reason */
+ u8 hbuf;
u8 dtype;
u8 dsize;
u8 crcstrip;
@@ -459,29 +396,15 @@ struct ice_rlan_ctx {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
- u32 rxmax; /* bigger than needed, see above for reason */
+ u16 rxmax;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
- u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 lrxqthresh;
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
-struct ice_ctx_ele {
- u16 offset;
- u16 size_of;
- u16 width;
- u16 lsb;
-};
-
-#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
- .offset = offsetof(struct _struct, _ele), \
- .size_of = sizeof_field(struct _struct, _ele), \
- .width = _width, \
- .lsb = _lsb, \
-}
-
/* for hsplit_0 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_0 {
ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
@@ -558,10 +481,15 @@ enum ice_tx_desc_len_fields {
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
- __le16 rsvd;
+ __le16 gcs;
__le64 qw1;
};
+#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0)
+#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8)
+#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12)
+#define ICE_TX_GCS_DESC_CSUM_PSH 1
+
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
@@ -609,18 +537,12 @@ enum ice_tx_ctx_desc_eipt_offload {
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
-/* Tx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* Tx queue context data */
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
- u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 cgd_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
@@ -631,7 +553,7 @@ struct ice_tlan_ctx {
u8 tsyn_ena;
u8 internal_usage_flag;
u8 alt_vlan;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
@@ -640,7 +562,7 @@ struct ice_tlan_ctx {
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
- u32 qlen; /* bigger than needed, see above for reason */
+ u16 qlen;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
@@ -648,269 +570,47 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
-/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT ice_ptype_lkup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum ice_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-#define ICE_PTYPES \
- /* L2 Packet types */ \
- ICE_PTT_UNUSED_ENTRY(0), \
- ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \
- ICE_PTT_UNUSED_ENTRY(2), \
- ICE_PTT_UNUSED_ENTRY(3), \
- ICE_PTT_UNUSED_ENTRY(4), \
- ICE_PTT_UNUSED_ENTRY(5), \
- ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT_UNUSED_ENTRY(8), \
- ICE_PTT_UNUSED_ENTRY(9), \
- ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT_UNUSED_ENTRY(12), \
- ICE_PTT_UNUSED_ENTRY(13), \
- ICE_PTT_UNUSED_ENTRY(14), \
- ICE_PTT_UNUSED_ENTRY(15), \
- ICE_PTT_UNUSED_ENTRY(16), \
- ICE_PTT_UNUSED_ENTRY(17), \
- ICE_PTT_UNUSED_ENTRY(18), \
- ICE_PTT_UNUSED_ENTRY(19), \
- ICE_PTT_UNUSED_ENTRY(20), \
- ICE_PTT_UNUSED_ENTRY(21), \
- \
- /* Non Tunneled IPv4 */ \
- ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(25), \
- ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \
- ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \
- ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> IPv4 */ \
- ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(32), \
- ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> IPv6 */ \
- ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(39), \
- ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT */ \
- ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 --> GRE/NAT --> IPv4 */ \
- ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(47), \
- ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> IPv6 */ \
- ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(54), \
- ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> MAC */ \
- ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \
- ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(62), \
- ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \
- ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(69), \
- ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> MAC/VLAN */ \
- ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \
- ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(77), \
- ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \
- ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(84), \
- ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \
- \
- /* Non Tunneled IPv6 */ \
- ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(91), \
- ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \
- ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \
- ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> IPv4 */ \
- ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(98), \
- ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> IPv6 */ \
- ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(105), \
- ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT */ \
- ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> IPv4 */ \
- ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(113), \
- ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> IPv6 */ \
- ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(120), \
- ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC */ \
- ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \
- ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(128), \
- ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \
- ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(135), \
- ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN */ \
- ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \
- ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(143), \
- ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \
- ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(150), \
- ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
-#define ICE_NUM_DEFINED_PTYPES 154
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- ICE_RX_PTYPE_OUTER_##OUTER_IP, \
- ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- ICE_RX_PTYPE_##OUTER_FRAG, \
- ICE_RX_PTYPE_TUNNEL_##T, \
- ICE_RX_PTYPE_TUNNEL_END_##TE, \
- ICE_RX_PTYPE_##TEF, \
- ICE_RX_PTYPE_INNER_PROT_##I, \
- ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
-#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
-
-/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
-static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
- ICE_PTYPES
-
- /* unused entries */
- [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+#define ICE_TXTIME_TX_DESC_IDX_M GENMASK(12, 0)
+#define ICE_TXTIME_STAMP_M GENMASK(31, 13)
+
+/* Tx time stamp descriptor */
+struct ice_ts_desc {
+ __le32 tx_desc_idx_tstamp;
};
-static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
-{
- return ice_ptype_lkup[ptype];
-}
+#define ICE_TS_DESC(R, i) (&(((struct ice_ts_desc *)((R)->desc))[i]))
+#define ICE_TXTIME_MAX_QUEUE 2047
+#define ICE_SET_TXTIME_MAX_Q_AMOUNT 127
+#define ICE_TXTIME_FETCH_TS_DESC_DFLT 8
+#define ICE_TXTIME_FETCH_PROFILE_CNT 16
+
+/* Tx Time queue context data */
+struct ice_txtime_ctx {
+#define ICE_TXTIME_CTX_BASE_S 7
+ u64 base; /* base is defined in 128-byte units */
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+ u16 src_vsi;
+ u8 cpuid;
+ u8 tphrd_desc;
+ u16 qlen;
+ u8 timer_num;
+ u8 txtime_ena_q;
+ u8 drbell_mode_32;
+#define ICE_TXTIME_CTX_DRBELL_MODE_32 1
+ u8 ts_res;
+#define ICE_TXTIME_CTX_RESOLUTION_128NS 7
+ u8 ts_round_type;
+ u8 ts_pacing_slot;
+#define ICE_TXTIME_CTX_FETCH_PROF_ID_0 0
+ u8 merging_ena;
+ u8 ts_fetch_prof_id;
+ u8 ts_fetch_cache_line_aln_thld;
+ u8 tx_pipe_delay_mode;
+};
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 558422120312..15621707fbf8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,7 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
-#include "ice_devlink.h"
+#include "ice_type.h"
#include "ice_vsi_vlan_ops.h"
/**
@@ -21,14 +21,14 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_SF:
+ return "ICE_VSI_SF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
case ICE_VSI_CHNL:
return "ICE_VSI_CHNL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
- case ICE_VSI_SWITCHDEV_CTRL:
- return "ICE_VSI_SWITCHDEV_CTRL";
default:
return "unknown";
}
@@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
- vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
- if (!vsi->af_xdp_zc_qps)
- goto err_zc_qps;
-
return 0;
-err_zc_qps:
- devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -144,7 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_SF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -163,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
}
}
+static u16 ice_get_rxq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+}
+
+static u16 ice_get_txq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_txq_count(pf), num_online_cpus());
+}
+
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
@@ -184,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
} else {
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_txq = ice_get_txq_count(pf);
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -199,32 +201,19 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
- max_t(int, vsi->alloc_rxq,
- vsi->alloc_txq));
+ vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- /* The number of queues for ctrl VSI is equal to number of PRs
- * Each ring is associated to the corresponding VF_PR netdev.
- * Tx and Rx rings are always equal
- */
- if (vsi->req_txq && vsi->req_rxq) {
- vsi->alloc_txq = vsi->req_txq;
- vsi->alloc_rxq = vsi->req_rxq;
- } else {
- vsi->alloc_txq = 1;
- vsi->alloc_rxq = 1;
- }
-
+ case ICE_VSI_SF:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
vsi->num_q_vectors = 1;
+ vsi->irq_dyn_alloc = true;
break;
case ICE_VSI_VF:
if (vf->num_req_qs)
@@ -328,8 +317,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
- bitmap_free(vsi->af_xdp_zc_qps);
- vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */
devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
@@ -450,7 +437,7 @@ err_out:
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
*/
-static void ice_vsi_free(struct ice_vsi *vsi)
+void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -474,6 +461,7 @@ static void ice_vsi_free(struct ice_vsi *vsi)
ice_vsi_free_stats(vsi);
ice_vsi_free_arrays(vsi);
+ mutex_destroy(&vsi->xdp_state_lock);
mutex_unlock(&pf->sw_mutex);
devm_kfree(dev, vsi);
}
@@ -496,8 +484,7 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
-#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring);
ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
@@ -522,22 +509,6 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
-{
- struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- struct ice_pf *pf = q_vector->vsi->back;
- struct ice_repr *repr;
- unsigned long id;
-
- if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
- return IRQ_HANDLED;
-
- xa_for_each(&pf->eswitch.reprs, id, repr)
- napi_schedule(&repr->q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
/**
* ice_vsi_alloc_stat_arrays - Allocate statistics arrays
* @vsi: VSI pointer
@@ -599,12 +570,11 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
return -ENOMEM;
}
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
+
switch (vsi->type) {
- case ICE_VSI_SWITCHDEV_CTRL:
- /* Setup eswitch MSIX irq handler for VSI */
- vsi->irq_handler = ice_eswitch_msix_clean_rings;
- break;
case ICE_VSI_PF:
+ case ICE_VSI_SF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
@@ -641,7 +611,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -673,6 +643,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
pf->next_vsi);
+ mutex_init(&vsi->xdp_state_lock);
+
unlock_pf:
mutex_unlock(&pf->sw_mutex);
return vsi;
@@ -860,7 +832,13 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ union devlink_param_value value;
+ int err;
+
+ err = devl_param_driverinit_value_get(priv_to_devlink(pf),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &value);
+ return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
}
/**
@@ -933,7 +911,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_SF:
vsi->rss_table_size = ICE_LUT_VSI_SIZE;
vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
vsi->rss_lut_type = ICE_LUT_VSI;
@@ -1185,6 +1163,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
break;
case ICE_VSI_VF:
+ case ICE_VSI_SF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
break;
@@ -1205,12 +1184,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
static void
ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- struct ice_pf *pf = vsi->back;
u16 qcount, qmap;
u8 offset = 0;
int pow;
- qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+ qcount = vsi->num_rxq;
pow = order_base_2(qcount);
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
@@ -1263,7 +1241,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_SF:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -1449,9 +1427,12 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->rxq_map[i];
ring->vsi = vsi;
ring->netdev = vsi->netdev;
- ring->dev = dev;
ring->count = vsi->num_rx_desc;
ring->cached_phctime = pf->ptp.cached_phc_time;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_RX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1596,7 +1577,7 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
@@ -1732,6 +1713,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf)
return true;
}
+#define ICE_FW_MODE_REC_M BIT(1)
+bool ice_is_recovery_mode(struct ice_hw *hw)
+{
+ return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M;
+}
+
/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
@@ -1790,9 +1777,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
* @prio: priority for the RXDID for this queue
* @ena_ts: true to enable timestamp and false to disable timestamp
*/
-void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
- bool ena_ts)
+void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -2077,12 +2063,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
}
/**
- * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
+ * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
* @create: bool to determine create or remove Rule
+ *
+ * Adding an ethtype Tx rule to the uplink VSI results in it being applied
+ * to the whole port, so LLDP transmission for VFs will be blocked too.
*/
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
enum ice_sw_fwd_act_type act);
@@ -2097,19 +2086,59 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
} else {
- if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
- status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
- create);
- } else {
+ if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
ICE_FWD_TO_VSI);
+ if (!status || !create)
+ goto report;
+
+ dev_info(dev,
+ "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n",
+ vsi->vsi_num, status);
}
+
+ status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create);
+ if (!status)
+ set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
+
}
+report:
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
- create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
+ dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n",
+ create ? "add" : "remove", tx ? "Tx" : "Rx",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP
+ * @pf: the PF being configured
+ * @enable: enable or disable
+ *
+ * Configure switch rules to enable/disable LLDP handling by software
+ * across PF.
+ */
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable)
+{
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ vsi = ice_get_main_vsi(pf);
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+
+ if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ return;
+
+ ice_for_each_vf(pf, bkt, vf) {
+ vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ continue;
+
+ if (ice_vf_is_lldp_ena(vf))
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+ }
}
/**
@@ -2145,7 +2174,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_SF:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2273,10 +2302,8 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
/**
* ice_vsi_cfg_def - configure default VSI based on the type
* @vsi: pointer to VSI
- * @params: the parameters to configure this VSI with
*/
-static int
-ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+static int ice_vsi_cfg_def(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_pf *pf = vsi->back;
@@ -2284,7 +2311,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
vsi->vsw = pf->first_sw;
- ret = ice_vsi_alloc_def(vsi, params->ch);
+ ret = ice_vsi_alloc_def(vsi, vsi->ch);
if (ret)
return ret;
@@ -2309,7 +2336,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi, params->flags);
+ ret = ice_vsi_init(vsi, vsi->flags);
if (ret)
goto unroll_get_qs;
@@ -2317,7 +2344,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
switch (vsi->type) {
case ICE_VSI_CTRL:
- case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_SF:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2331,22 +2358,20 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
if (ret)
goto unroll_vector_base;
- ice_vsi_map_rings_to_vectors(vsi);
-
- /* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi);
-
- vsi->stat_offsets_loaded = false;
-
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto unroll_vector_base;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
+ ICE_XDP_CFG_PART);
if (ret)
goto unroll_vector_base;
}
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ vsi->stat_offsets_loaded = false;
+
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
@@ -2430,23 +2455,16 @@ unroll_vsi_alloc:
/**
* ice_vsi_cfg - configure a previously allocated VSI
* @vsi: pointer to VSI
- * @params: parameters used to configure this VSI
*/
-int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+int ice_vsi_cfg(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int ret;
- if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- vsi->type = params->type;
- vsi->port_info = params->pi;
-
- /* For VSIs which don't have a connected VF, this will be NULL */
- vsi->vf = params->vf;
-
- ret = ice_vsi_cfg_def(vsi, params);
+ ret = ice_vsi_cfg_def(vsi);
if (ret)
return ret;
@@ -2476,24 +2494,17 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int err;
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
-
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
vsi->vsi_num, err);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
- ice_destroy_xdp_rings(vsi);
+ ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
ice_vsi_clear_rings(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -2532,7 +2543,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
* a port_info structure for it.
*/
if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
- WARN_ON(!params->pi))
+ WARN_ON(!params->port_info))
return NULL;
vsi = ice_vsi_alloc(pf);
@@ -2541,7 +2552,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
return NULL;
}
- ret = ice_vsi_cfg(vsi, params);
+ vsi->params = *params;
+ ret = ice_vsi_cfg(vsi);
if (ret)
goto err_vsi_cfg;
@@ -2557,7 +2569,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
+ ice_vsi_cfg_sw_lldp(vsi, true, true);
}
if (!vsi->agg_node)
@@ -2590,7 +2602,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
for (q = 0; q < q_vector->num_ring_tx; q++) {
ice_write_itr(&q_vector->tx, 0);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
- if (ice_is_xdp_ena_vsi(vsi)) {
+ if (vsi->xdp_rings) {
u32 xdp_txq = txq + vsi->num_xdp_txq;
wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
@@ -2625,7 +2637,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- ice_free_cpu_rx_rmap(vsi);
ice_for_each_q_vector(vsi, i) {
int irq_num;
@@ -2638,12 +2649,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
vsi->q_vectors[i]->num_ring_rx))
continue;
- /* clear the affinity notifier in the IRQ descriptor */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
@@ -2690,6 +2695,7 @@ void ice_vsi_close(struct ice_vsi *vsi)
if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
ice_down(vsi);
+ ice_vsi_clear_napi_queues(vsi);
ice_vsi_free_irq(vsi);
ice_vsi_free_tx_rings(vsi);
ice_vsi_free_rx_rings(vsi);
@@ -2709,7 +2715,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2733,144 +2740,108 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
*/
void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
- if (test_bit(ICE_VSI_DOWN, vsi->state))
- return;
+ bool already_down = test_bit(ICE_VSI_DOWN, vsi->state);
set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
-
- ice_vsi_close(vsi);
+ already_down = test_bit(ICE_VSI_DOWN, vsi->state);
+ if (!already_down)
+ ice_vsi_close(vsi);
if (!locked)
rtnl_unlock();
- } else {
+ } else if (!already_down) {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL ||
- vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL && !already_down) {
ice_vsi_close(vsi);
}
}
/**
- * __ice_queue_set_napi - Set the napi instance for the queue
- * @dev: device to which NAPI and queue belong
- * @queue_index: Index of queue
- * @type: queue type as RX or TX
- * @napi: NAPI context
- * @locked: is the rtnl_lock already held
- *
- * Set the napi instance for the queue. Caller indicates the lock status.
- */
-static void
-__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi,
- bool locked)
-{
- if (!locked)
- rtnl_lock();
- netif_queue_set_napi(dev, queue_index, type, napi);
- if (!locked)
- rtnl_unlock();
-}
-
-/**
- * ice_queue_set_napi - Set the napi instance for the queue
- * @vsi: VSI being configured
- * @queue_index: Index of queue
- * @type: queue type as RX or TX
- * @napi: NAPI context
+ * ice_vsi_set_napi_queues - associate netdev queues with napi
+ * @vsi: VSI pointer
*
- * Set the napi instance for the queue. The rtnl lock state is derived from the
- * execution path.
+ * Associate queue[s] with napi for all vectors.
*/
-void
-ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi)
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
+ struct net_device *netdev = vsi->netdev;
+ int q_idx, v_idx;
- if (!vsi->netdev)
+ if (!netdev)
return;
- if (current_work() == &pf->serv_task ||
- test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
- test_bit(ICE_DOWN, pf->state) ||
- test_bit(ICE_SUSPENDED, pf->state))
- __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
- false);
- else
- __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
- true);
+ ASSERT_RTNL();
+ ice_for_each_rxq(vsi, q_idx)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
+ &vsi->rx_rings[q_idx]->q_vector->napi);
+
+ ice_for_each_txq(vsi, q_idx)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
+ &vsi->tx_rings[q_idx]->q_vector->napi);
+ /* Also set the interrupt number for the NAPI */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+ }
}
/**
- * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
- * @q_vector: q_vector pointer
- * @locked: is the rtnl_lock already held
+ * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
+ * @vsi: VSI pointer
*
- * Associate the q_vector napi with all the queue[s] on the vector.
- * Caller indicates the lock status.
+ * Clear the association between all VSI queues queue[s] and napi.
*/
-void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
{
- struct ice_rx_ring *rx_ring;
- struct ice_tx_ring *tx_ring;
+ struct net_device *netdev = vsi->netdev;
+ int q_idx, v_idx;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
- __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
- locked);
+ if (!netdev)
+ return;
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
- __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
- locked);
- /* Also set the interrupt number for the NAPI */
- netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
-}
+ ASSERT_RTNL();
+ /* Clear the NAPI's interrupt number */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
-/**
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
- * @q_vector: q_vector pointer
- *
- * Associate the q_vector napi with all the queue[s] on the vector
- */
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
-{
- struct ice_rx_ring *rx_ring;
- struct ice_tx_ring *tx_ring;
+ netif_napi_set_irq(&q_vector->napi, -1);
+ }
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
+ ice_for_each_txq(vsi, q_idx)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
- /* Also set the interrupt number for the NAPI */
- netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+ ice_for_each_rxq(vsi, q_idx)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL);
}
/**
- * ice_vsi_set_napi_queues
- * @vsi: VSI pointer
+ * ice_napi_add - register NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be registered
*
- * Associate queue[s] with napi for all vectors
+ * This function is only called in the driver's load path. Registering the NAPI
+ * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
+ * reset/rebuild, etc.)
*/
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
+void ice_napi_add(struct ice_vsi *vsi)
{
- int i;
+ int v_idx;
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, i)
- ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
+ ice_for_each_q_vector(vsi, v_idx)
+ netif_napi_add_config(vsi->netdev,
+ &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll,
+ v_idx);
}
/**
@@ -2891,6 +2862,16 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_rss_clean(vsi);
ice_vsi_close(vsi);
+
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!ice_is_safe_mode(pf) &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) &&
+ (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_lldp_ena(vsi->vf))))
+ ice_vsi_cfg_sw_lldp(vsi, false, false);
+
ice_vsi_decfg(vsi);
/* retain SW VSI data structure since it is needed to unregister and
@@ -3089,7 +3070,6 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
*/
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
- struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
int prev_num_q_vectors;
struct ice_pf *pf;
@@ -3098,26 +3078,28 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (!vsi)
return -EINVAL;
- params = ice_vsi_to_params(vsi);
- params.flags = vsi_flags;
-
+ vsi->flags = vsi_flags;
pf = vsi->back;
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
+ mutex_lock(&vsi->xdp_state_lock);
+
ret = ice_vsi_realloc_stat_arrays(vsi);
if (ret)
- goto err_vsi_cfg;
+ goto unlock;
ice_vsi_decfg(vsi);
- ret = ice_vsi_cfg_def(vsi, &params);
+ ret = ice_vsi_cfg_def(vsi);
if (ret)
- goto err_vsi_cfg;
+ goto unlock;
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
- if (!coalesce)
- return -ENOMEM;
+ if (!coalesce) {
+ ret = -ENOMEM;
+ goto decfg;
+ }
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
@@ -3125,22 +3107,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (ret) {
if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO;
- goto err_vsi_cfg_tc_lan;
+ goto free_coalesce;
}
- kfree(coalesce);
- return ice_schedule_reset(pf, ICE_RESET_PFR);
+ ret = ice_schedule_reset(pf, ICE_RESET_PFR);
+ goto free_coalesce;
}
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
- kfree(coalesce);
-
- return 0;
+ clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
-err_vsi_cfg_tc_lan:
- ice_vsi_decfg(vsi);
+free_coalesce:
kfree(coalesce);
-err_vsi_cfg:
+decfg:
+ if (ret)
+ ice_vsi_decfg(vsi);
+unlock:
+ mutex_unlock(&vsi->xdp_state_lock);
return ret;
}
@@ -3215,7 +3198,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
if (!netdev)
return;
- /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ /* CHNL VSI doesn't have its own netdev, hence, no netdev_tc */
if (vsi->type == ICE_VSI_CHNL)
return;
@@ -3752,20 +3735,20 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
status = ice_aq_set_link_restart_an(pi, ena, NULL);
- /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
+ /* if link is owned by manageability, FW will return LIBIE_AQ_RC_EMODE.
* this is not a fatal error, so print a warning message and return
* a success code. Return an error if FW returns an error code other
- * than ICE_AQ_RC_EMODE
+ * than LIBIE_AQ_RC_EMODE
*/
if (status == -EIO) {
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
(ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
} else if (status) {
dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
(ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -3956,12 +3939,18 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
+ if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
}
+
+ if (pf->hw.mac_type == ICE_MAC_E830) {
+ ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ ice_set_feature_support(pf, ICE_F_GCS);
+ ice_set_feature_support(pf, ICE_F_TXTIME);
+ }
}
/**
@@ -4008,24 +3997,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
@@ -4049,3 +4020,38 @@ ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
vsi->info = ctx.info;
return 0;
}
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 l2tsel_bit;
+ int i;
+
+ if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+ l2tsel_bit = 0;
+ else
+ l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ u16 pfq = vsi->rxq_map[i];
+ u32 qrx_context_offset;
+ u32 regval;
+
+ qrx_context_offset =
+ QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+ regval = rd32(hw, qrx_context_offset);
+ regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+ regval |= l2tsel_bit;
+ wr32(hw, qrx_context_offset, regval);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 9cd23afe5f15..2cb1eb98b9da 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -11,43 +11,13 @@
#define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
-/**
- * struct ice_vsi_cfg_params - VSI configuration parameters
- * @pi: pointer to the port_info instance for the VSI
- * @ch: pointer to the channel structure for the VSI, may be NULL
- * @vf: pointer to the VF associated with this VSI, may be NULL
- * @type: the type of VSI to configure
- * @flags: VSI flags used for rebuild and configuration
- *
- * Parameter structure used when configuring a new VSI.
- */
-struct ice_vsi_cfg_params {
- struct ice_port_info *pi;
- struct ice_channel *ch;
- struct ice_vf *vf;
- enum ice_vsi_type type;
- u32 flags;
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET 23
+enum ice_l2tsel {
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
};
-/**
- * ice_vsi_to_params - Get parameters for an existing VSI
- * @vsi: the VSI to get parameters for
- *
- * Fill a parameter structure for reconfiguring a VSI with its current
- * parameters, such as during a rebuild operation.
- */
-static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi)
-{
- struct ice_vsi_cfg_params params = {};
-
- params.pi = vsi->port_info;
- params.ch = vsi->ch;
- params.vf = vsi->vf;
- params.type = vsi->type;
-
- return params;
-}
-
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -66,7 +36,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -81,15 +52,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
-void
-ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi);
-
-void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
-
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
-
void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+void ice_napi_add(struct ice_vsi *vsi);
+
+void ice_vsi_clear_napi_queues(struct ice_vsi *vsi);
int ice_vsi_release(struct ice_vsi *vsi);
@@ -101,7 +67,9 @@ void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
-int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
+int ice_vsi_cfg(struct ice_vsi *vsi);
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf);
+void ice_vsi_free(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
@@ -128,10 +96,9 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
-int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
-
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
+bool ice_is_recovery_mode(struct ice_hw *hw);
bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_vsi *vsi);
@@ -146,10 +113,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
@@ -160,4 +123,5 @@ void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
+void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 33a164fa325a..4bb68e7a00f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -13,7 +13,9 @@
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
+#include "devlink/port.h"
+#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
@@ -34,8 +36,12 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBETH_XDP");
+MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -85,7 +91,8 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
bool netif_is_ice(const struct net_device *dev)
{
- return dev && (dev->netdev_ops == &ice_netdev_ops);
+ return dev && (dev->netdev_ops == &ice_netdev_ops ||
+ dev->netdev_ops == &ice_netdev_safe_mode_ops);
}
/**
@@ -376,7 +383,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
- if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOSPC &&
!test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
@@ -519,25 +526,6 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
- * ice_clear_sw_switch_recipes - clear switch recipes
- * @pf: board private structure
- *
- * Mark switch recipes as not created in sw structures. There are cases where
- * rules (especially advanced rules) need to be restored, either re-read from
- * hardware or added again. For example after the reset. 'recp_created' flag
- * prevents from doing that and need to be cleared upfront.
- */
-static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
-{
- struct ice_sw_recipe *recp;
- u8 i;
-
- recp = pf->hw.switch_info->recp_list;
- for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
- recp[i].recp_created = false;
-}
-
-/**
* ice_prepare_for_reset - prep for reset
* @pf: board private structure
* @reset_type: reset type requested
@@ -558,6 +546,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
+ synchronize_irq(pf->oicr_irq.virq);
+
ice_unplug_aux_dev(pf);
/* Notify VFs of impending reset */
@@ -571,8 +561,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
- if (reset_type != ICE_RESET_PFR)
- ice_clear_sw_switch_recipes(pf);
+ rtnl_lock();
+ ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
+ rtnl_unlock();
}
/* release ADQ specific HW and SW resources */
@@ -605,11 +596,15 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
}
}
+
+ if (vsi->netdev)
+ netif_device_detach(vsi->netdev);
skip:
/* clear SW filtering DB */
ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */
+ set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
@@ -621,7 +616,7 @@ skip:
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
set_bit(ICE_PREPARED_FOR_RESET, pf->state);
}
@@ -803,6 +798,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ speed = "200 G";
+ break;
case ICE_AQ_LINK_SPEED_100GB:
speed = "100 G";
break;
@@ -1125,7 +1123,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (status)
dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
pi->lport, status,
- ice_aq_str(pi->hw->adminq.sq_last_status));
+ libie_aq_str(pi->hw->adminq.sq_last_status));
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -1150,7 +1148,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ if (!link_up && old_link)
+ pf->link_down_events++;
+
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -1253,32 +1254,6 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
- * ice_get_fwlog_data - copy the FW log data from ARQ event
- * @pf: PF that the FW log event is associated with
- * @event: event structure containing FW log data
- */
-static void
-ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- struct ice_fwlog_data *fwlog;
- struct ice_hw *hw = &pf->hw;
-
- fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
-
- memset(fwlog->data, 0, PAGE_SIZE);
- fwlog->data_size = le16_to_cpu(event->desc.datalen);
-
- memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
- ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
-
- if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
- /* the rings are full so bump the head to create room */
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-}
-
-/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1552,19 +1527,31 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vf_lan_overflow_event(pf, &event);
break;
case ice_mbx_opc_send_msg_to_pf:
- data.num_msg_proc = i;
- data.num_pending_arq = pending;
- data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
- data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
+ ice_vc_process_vf_msg(pf, &event, NULL);
+ ice_mbx_vf_dec_trig_e830(hw, &event);
+ } else {
+ u16 val = hw->mailboxq.num_rq_entries;
+
+ data.max_num_msgs_mbx = val;
+ val = ICE_MBX_OVERFLOW_WATERMARK;
+ data.async_watermark_val = val;
+ data.num_msg_proc = i;
+ data.num_pending_arq = pending;
- ice_vc_process_vf_msg(pf, &event, &data);
+ ice_vc_process_vf_msg(pf, &event, &data);
+ }
break;
case ice_aqc_opc_fw_logs_event:
- ice_get_fwlog_data(pf, &event);
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
+ case ice_aqc_opc_get_health_status:
+ ice_process_health_status_event(pf, &event);
+ break;
default:
dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
@@ -1712,7 +1699,7 @@ static int ice_service_task_stop(struct ice_pf *pf)
ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
if (pf->serv_tmr.function)
- del_timer_sync(&pf->serv_tmr);
+ timer_delete_sync(&pf->serv_tmr);
if (pf->serv_task.func)
cancel_work_sync(&pf->serv_task);
@@ -1738,13 +1725,46 @@ static void ice_service_task_restart(struct ice_pf *pf)
*/
static void ice_service_timer(struct timer_list *t)
{
- struct ice_pf *pf = from_timer(pf, t, serv_tmr);
+ struct ice_pf *pf = timer_container_of(pf, t, serv_tmr);
mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
ice_service_task_schedule(pf);
}
/**
+ * ice_mdd_maybe_reset_vf - reset VF after MDD event
+ * @pf: pointer to the PF structure
+ * @vf: pointer to the VF structure
+ * @reset_vf_tx: whether Tx MDD has occurred
+ * @reset_vf_rx: whether Rx MDD has occurred
+ *
+ * Since the queue can get stuck on VF MDD events, the PF can be configured to
+ * automatically reset the VF by enabling the private ethtool flag
+ * mdd-auto-reset-vf.
+ */
+static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
+ bool reset_vf_tx, bool reset_vf_rx)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ return;
+
+ /* VF MDD event counters will be cleared by reset, so print the event
+ * prior to reset.
+ */
+ if (reset_vf_tx)
+ ice_print_vf_tx_mdd_event(vf);
+
+ if (reset_vf_rx)
+ ice_print_vf_rx_mdd_event(vf);
+
+ dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
+ pf->hw.pf_id, vf->vf_id);
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+}
+
+/**
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -1781,6 +1801,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
@@ -1794,6 +1816,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
@@ -1807,6 +1831,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
+ queue);
wr32(hw, GL_MDET_RX, 0xffffffff);
}
@@ -1837,6 +1863,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
*/
mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
+ bool reset_vf_tx = false, reset_vf_rx = false;
+
reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
@@ -1845,6 +1873,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
@@ -1855,6 +1885,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
@@ -1865,6 +1897,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_RX(vf->vf_id));
@@ -1876,18 +1910,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
vf->vf_id);
- /* Since the queue is disabled on VF Rx MDD events, the
- * PF can be configured to reset the VF through ethtool
- * private flag mdd-auto-reset-vf.
- */
- if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
- /* VF MDD event counters will be cleared by
- * reset, so print the event prior to reset.
- */
- ice_print_vf_rx_mdd_event(vf);
- ice_reset_vf(vf, ICE_VF_RESET_LOCK);
- }
+ reset_vf_rx = true;
}
+
+ if (reset_vf_tx || reset_vf_rx)
+ ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
+ reset_vf_rx);
}
mutex_unlock(&pf->vfs.table_lock);
@@ -2318,6 +2346,18 @@ static void ice_check_media_subtask(struct ice_pf *pf)
}
}
+static void ice_service_task_recovery_mode(struct work_struct *work)
+{
+ struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
+
+ set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
+ ice_clean_adminq_subtask(pf);
+
+ ice_service_task_complete(pf);
+
+ mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
@@ -2327,9 +2367,11 @@ static void ice_service_task(struct work_struct *work)
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
- /* subtasks */
+ if (pf->health_reporters.tx_hang_buf.tx_ring) {
+ ice_report_tx_hang(pf);
+ pf->health_reporters.tx_hang_buf.tx_ring = NULL;
+ }
- /* process reset requests first */
ice_reset_subtask(pf);
/* bail if a reset/recovery cycle is pending or rebuild failed */
@@ -2341,11 +2383,11 @@ static void ice_service_task(struct work_struct *work)
}
if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
/* report the entire OICR value to AUX driver */
swap(event->reg, pf->oicr_err_reg);
ice_send_event_to_aux(pf, event);
@@ -2364,11 +2406,11 @@ static void ice_service_task(struct work_struct *work)
ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -2468,34 +2510,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
}
/**
- * ice_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- */
-static void
-ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct ice_q_vector *q_vector =
- container_of(notify, struct ice_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * ice_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- */
-static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -2558,19 +2572,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err);
goto free_q_irqs;
}
-
- /* register for affinity change notifications */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
- struct irq_affinity_notify *affinity_notify;
-
- affinity_notify = &q_vector->affinity_notify;
- affinity_notify->notify = ice_irq_affinity_notify;
- affinity_notify->release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, affinity_notify);
- }
-
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2586,9 +2587,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector--) {
irq_num = vsi->q_vectors[vector]->irq.virq;
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -2670,17 +2668,93 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog);
}
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *ring;
+
+ if (static_key_enabled(&ice_xdp_locking_key))
+ return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+ q_vector = vsi->rx_rings[qid]->q_vector;
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (ice_ring_is_xdp(ring))
+ return ring;
+
+ return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ int v_idx, q_idx;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ ice_for_each_rxq(vsi, q_idx) {
+ vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+ q_idx);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+}
+
+/**
+ * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
+ * @vsi: the VSI with XDP rings being unmapped
+ */
+static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.tx_ring = ring;
+ }
+}
+
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -2693,8 +2767,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
- int i, v_idx;
- int status;
+ int status, i;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2713,49 +2786,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
- /* follow the logic from ice_vsi_map_rings_to_vectors */
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- int xdp_rings_per_v, q_id, q_base;
-
- xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
- vsi->num_q_vectors - v_idx);
- q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
- xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.tx_ring;
- q_vector->tx.tx_ring = xdp_ring;
- }
- xdp_rings_rem -= xdp_rings_per_v;
- }
-
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key)) {
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- } else {
- struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx) {
- if (ice_ring_is_xdp(ring)) {
- vsi->rx_rings[i]->xdp_ring = ring;
- break;
- }
- }
- }
- ice_tx_xsk_pool(vsi, i);
- }
-
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
*/
- if (ice_is_reset_in_progress(pf->state))
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
+ ice_map_xdp_rings(vsi);
+
/* tell the Tx scheduler that right now we have
* additional queues
*/
@@ -2767,7 +2806,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
status);
- goto clear_xdp_rings;
+ goto unmap_xdp_rings;
}
/* assign the prog only when it's not already present on VSI;
@@ -2783,6 +2822,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
+unmap_xdp_rings:
+ ice_unmap_xdp_rings(vsi);
clear_xdp_rings:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
@@ -2799,41 +2840,32 @@ err_map_xdp:
mutex_unlock(&pf->avail_q_mutex);
devm_kfree(dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
return -ENOMEM;
}
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
*
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
* resources
*/
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
- int i, v_idx;
+ int i;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset bits
- * in pf->state won't be set, so additionally check first q_vector
- * against NULL
+ * rings
*/
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx)
- if (!ring->tx_buf || !ice_ring_is_xdp(ring))
- break;
-
- /* restore the value of last node prior to XDP setup */
- q_vector->tx.tx_ring = ring;
- }
+ ice_unmap_xdp_rings(vsi);
free_qmap:
mutex_lock(&pf->avail_q_mutex);
@@ -2861,7 +2893,7 @@ free_qmap:
if (static_key_enabled(&ice_xdp_locking_key))
static_branch_dec(&ice_xdp_locking_key);
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -2890,7 +2922,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_pool)
+ if (READ_ONCE(rx_ring->xsk_pool))
napi_schedule(&rx_ring->q_vector->napi);
}
}
@@ -2910,6 +2942,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
if (avail < cpus / 2)
return -ENOMEM;
+ if (vsi->type == ICE_VSI_SF)
+ avail = vsi->alloc_txq;
+
vsi->num_xdp_txq = min_t(u16, avail, cpus);
if (vsi->num_xdp_txq < cpus)
@@ -2924,10 +2959,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
*/
static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
{
- if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- return ICE_RXBUF_1664;
- else
- return ICE_RXBUF_3072;
+ return ICE_RXBUF_3072;
}
/**
@@ -2941,8 +2973,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
- bool if_running = netif_running(vsi->netdev);
int ret = 0, xdp_ring_err = 0;
+ bool if_running;
if (prog && !prog->aux->xdp_has_frags) {
if (frame_size > ice_max_xdp_frame_size(vsi)) {
@@ -2953,13 +2985,17 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
/* hot swap progs and avoid toggling link */
- if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
+ if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
+ test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
}
+ if_running = netif_running(vsi->netdev) &&
+ !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
+
/* need to stop netdev while setting up the program for Rx rings */
- if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ if (if_running) {
ret = ice_down(vsi);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
@@ -2971,27 +3007,24 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ goto resume_if;
} else {
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
- if (xdp_ring_err)
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+ ICE_XDP_CFG_FULL);
+ if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ goto resume_if;
+ }
}
xdp_features_set_redirect_target(vsi->netdev, true);
- /* reallocate Rx queues that are used for zero-copy */
- xdp_ring_err = ice_realloc_zc_buf(vsi, true);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
- xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
- /* reallocate Rx queues that were used for zero-copy */
- xdp_ring_err = ice_realloc_zc_buf(vsi, false);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
}
+resume_if:
if (if_running)
ret = ice_up(vsi);
@@ -3020,25 +3053,32 @@ static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
* @dev: netdevice
* @xdp: XDP command
*/
-static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_vsi *vsi = np->vsi;
+ int ret;
- if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
return -EINVAL;
}
+ mutex_lock(&vsi->xdp_state_lock);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
- return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ break;
case XDP_SETUP_XSK_POOL:
- return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
- xdp->xsk.queue_id);
+ ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+ mutex_unlock(&vsi->xdp_state_lock);
+ return ret;
}
/**
@@ -3103,12 +3143,14 @@ static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
hw = &pf->hw;
tx = &pf->ptp.port.tx;
spin_lock_irqsave(&tx->lock, flags);
- ice_ptp_complete_tx_single_tstamp(tx);
+ if (tx->init) {
+ ice_ptp_complete_tx_single_tstamp(tx);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ }
spin_unlock_irqrestore(&tx->lock, flags);
val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
@@ -3210,22 +3252,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (ice_pf_state_is_nominal(pf) &&
- pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
- struct ice_ptp_tx *tx = &pf->ptp.port.tx;
- unsigned long flags;
- u8 idx;
-
- spin_lock_irqsave(&tx->lock, flags);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
- spin_unlock_irqrestore(&tx->lock, flags);
- } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
- set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
- ret = IRQ_WAKE_THREAD;
- }
+
+ ret = ice_ptp_ts_irq(pf);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3480,28 +3508,6 @@ skip_req_irq:
}
/**
- * ice_napi_add - register NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be registered
- *
- * This function is only called in the driver's load path. Registering the NAPI
- * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
- * reset/rebuild, etc.)
- */
-static void ice_napi_add(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx) {
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll);
- __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
- }
-}
-
-/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
* @vsi: the VSI associated with the new netdev
*/
@@ -3534,7 +3540,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
* ice_set_netdev_features - set features for the given netdev
* @netdev: netdev instance
*/
-static void ice_set_netdev_features(struct net_device *netdev)
+void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
@@ -3617,6 +3623,15 @@ static void ice_set_netdev_features(struct net_device *netdev)
*/
netdev->hw_features |= NETIF_F_RXFCS;
+ /* Allow core to manage IRQs affinity */
+ netif_set_affinity_auto(netdev);
+
+ /* Mutual exclusivity for TSO and GCS is enforced by the set features
+ * ndo callback.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+
netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
@@ -3648,7 +3663,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_PF;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3661,7 +3676,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_CHNL;
- params.pi = pi;
+ params.port_info = pi;
params.ch = ch;
params.flags = ICE_VSI_FLAG_INIT;
@@ -3682,7 +3697,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_CTRL;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3702,7 +3717,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_LB;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3716,8 +3731,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*
* net_device_ops implementation for adding VLAN IDs
*/
-static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -3779,8 +3793,7 @@ finish:
*
* net_device_ops implementation for removing VLAN IDs
*/
-static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -3927,9 +3940,10 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
* ice_deinit_pf - Unrolls initialziations done by ice_init_pf
* @pf: board private structure to initialize
*/
-static void ice_deinit_pf(struct ice_pf *pf)
+void ice_deinit_pf(struct ice_pf *pf)
{
- ice_service_task_stop(pf);
+ /* note that we unroll also on ice_init_pf() failure here */
+
mutex_destroy(&pf->lag_mutex);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
@@ -3947,8 +3961,19 @@ static void ice_deinit_pf(struct ice_pf *pf)
pf->avail_rxqs = NULL;
}
+ if (pf->txtime_txqs) {
+ bitmap_free(pf->txtime_txqs);
+ pf->txtime_txqs = NULL;
+ }
+
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
+
+ if (!xa_empty(&pf->irq_tracker.entries))
+ ice_free_irq_msix_misc(pf);
+
+ xa_destroy(&pf->dyn_ports);
+ xa_destroy(&pf->sf_nums);
}
/**
@@ -3993,21 +4018,32 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588 &&
- !(pf->hw.mac_type == ICE_MAC_E830))
+ if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
+void ice_start_service_task(struct ice_pf *pf)
+{
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+}
+
/**
* ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize
+ * Return: 0 on success, negative errno otherwise.
*/
-static int ice_init_pf(struct ice_pf *pf)
+int ice_init_pf(struct ice_pf *pf)
{
- ice_set_pf_caps(pf);
+ struct udp_tunnel_nic_info *udp_tunnel_nic = &pf->hw.udp_tunnel_nic;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err = -ENOMEM;
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
@@ -4020,29 +4056,49 @@ static int ice_init_pf(struct ice_pf *pf)
init_waitqueue_head(&pf->reset_wait_queue);
- /* setup service timer and periodic service task */
- timer_setup(&pf->serv_tmr, ice_service_timer, 0);
- pf->serv_tmr_period = HZ;
- INIT_WORK(&pf->serv_task, ice_service_task);
- clear_bit(ICE_SERVICE_SCHED, pf->state);
-
mutex_init(&pf->avail_q_mutex);
- pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
- if (!pf->avail_txqs)
- return -ENOMEM;
-
- pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
- if (!pf->avail_rxqs) {
- bitmap_free(pf->avail_txqs);
- pf->avail_txqs = NULL;
- return -ENOMEM;
- }
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
- ice_mbx_init_snapshot(&pf->hw);
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
+ ICE_MBX_OVERFLOW_WATERMARK);
+ else
+ ice_mbx_init_snapshot(&pf->hw);
+
+ xa_init(&pf->dyn_ports);
+ xa_init(&pf->sf_nums);
+
+ pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
+ pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ if (!pf->avail_txqs || !pf->avail_rxqs || !pf->txtime_txqs)
+ goto undo_init;
+
+ udp_tunnel_nic->set_port = ice_udp_tunnel_set_port;
+ udp_tunnel_nic->unset_port = ice_udp_tunnel_unset_port;
+ udp_tunnel_nic->shared = &hw->udp_tunnel_shared;
+ udp_tunnel_nic->tables[0].n_entries = hw->tnl.valid_count[TNL_VXLAN];
+ udp_tunnel_nic->tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
+ udp_tunnel_nic->tables[1].n_entries = hw->tnl.valid_count[TNL_GENEVE];
+ udp_tunnel_nic->tables[1].tunnel_types = UDP_TUNNEL_TYPE_GENEVE;
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
+ goto undo_init;
+ }
return 0;
+undo_init:
+ /* deinit handles half-initialized pf just fine */
+ ice_deinit_pf(pf);
+ return err;
}
/**
@@ -4079,7 +4135,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
- int err = 0, timeout = 50;
+ int i, err = 0, timeout = 50;
if (!new_rx && !new_tx)
return -EINVAL;
@@ -4098,15 +4154,32 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
+
+ ice_for_each_traffic_class(i) {
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(vsi->netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ }
ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
+ goto done;
+
+rebuild_err:
+ dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
+ err);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
return err;
@@ -4155,7 +4228,7 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
@@ -4417,11 +4490,13 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
/**
* ice_request_fw - Device initialization routine
* @pf: pointer to the PF instance
+ * @firmware: double pointer to firmware struct
+ *
+ * Return: zero when successful, negative values otherwise.
*/
-static void ice_request_fw(struct ice_pf *pf)
+static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
{
char *opt_fw_filename = ice_get_opt_fw_name(pf);
- const struct firmware *firmware = NULL;
struct device *dev = ice_pf_to_dev(pf);
int err = 0;
@@ -4430,29 +4505,126 @@ static void ice_request_fw(struct ice_pf *pf)
* and warning messages for other errors.
*/
if (opt_fw_filename) {
- err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
- if (err) {
- kfree(opt_fw_filename);
- goto dflt_pkg_load;
- }
-
- /* request for firmware was successful. Download to device */
- ice_load_pkg(firmware, pf);
+ err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
kfree(opt_fw_filename);
- release_firmware(firmware);
- return;
+ if (!err)
+ return err;
+ }
+ err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
+ if (err)
+ dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
+
+ return err;
+}
+
+/**
+ * ice_init_tx_topology - performs Tx topology initialization
+ * @hw: pointer to the hardware structure
+ * @firmware: pointer to firmware structure
+ *
+ * Return: zero when init was successful, negative values otherwise.
+ */
+static int
+ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
+{
+ u8 num_tx_sched_layers = hw->num_tx_sched_layers;
+ struct ice_pf *pf = hw->back;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ err = ice_cfg_tx_topo(hw, firmware->data, firmware->size);
+ if (!err) {
+ if (hw->num_tx_sched_layers > num_tx_sched_layers)
+ dev_info(dev, "Tx scheduling layers switching feature disabled\n");
+ else
+ dev_info(dev, "Tx scheduling layers switching feature enabled\n");
+ return 0;
+ } else if (err == -ENODEV) {
+ /* If we failed to re-initialize the device, we can no longer
+ * continue loading.
+ */
+ dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
+ return err;
+ } else if (err == -EIO) {
+ dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
+ return 0;
+ } else if (err == -EEXIST) {
+ return 0;
+ }
+
+ /* Do not treat this as a fatal error. */
+ dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
+ ERR_PTR(err));
+ return 0;
+}
+
+/**
+ * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
+ * @hw: pointer to the hardware structure
+ * @pf: pointer to pf structure
+ *
+ * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
+ * formats the PF hardware supports. The exact list of supported RXDIDs
+ * depends on the loaded DDP package. The IDs can be determined by reading the
+ * GLFLXP_RXDID_FLAGS register after the DDP package is loaded.
+ *
+ * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
+ * in the DDP package. The 16-byte legacy descriptor is never supported by
+ * VFs.
+ */
+static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf)
+{
+ pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1);
+
+ for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ u32 regval;
+
+ regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
+ if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+ & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
+ pf->supported_rxdids |= BIT(i);
}
+}
+
+/**
+ * ice_init_ddp_config - DDP related configuration
+ * @hw: pointer to the hardware structure
+ * @pf: pointer to pf structure
+ *
+ * This function loads DDP file from the disk, then initializes Tx
+ * topology. At the end DDP package is loaded on the card.
+ *
+ * Return: zero when init was successful, negative values otherwise.
+ */
+static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const struct firmware *firmware = NULL;
+ int err;
-dflt_pkg_load:
- err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
+ err = ice_request_fw(pf, &firmware);
if (err) {
- dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
- return;
+ dev_err(dev, "Fail during requesting FW: %d\n", err);
+ return err;
}
- /* request for firmware was successful. Download to device */
+ err = ice_init_tx_topology(hw, firmware);
+ if (err) {
+ dev_err(dev, "Fail during initialization of Tx topology: %d\n",
+ err);
+ release_firmware(firmware);
+ return err;
+ }
+
+ /* Download firmware to device */
ice_load_pkg(firmware, pf);
release_firmware(firmware);
+
+ /* Initialize the supported Rx descriptor IDs after loading DDP */
+ ice_init_supported_rxdids(hw, pf);
+
+ return 0;
}
/**
@@ -4483,19 +4655,6 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
- * ice_pf_fwlog_update_module - update 1 module
- * @pf: pointer to the PF struct
- * @log_level: log_level to use for the @module
- * @module: module to update
- */
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
-{
- struct ice_hw *hw = &pf->hw;
-
- hw->fwlog_cfg.module_entries[module].log_level = log_level;
-}
-
-/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -4574,64 +4733,20 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-/**
- * ice_wait_for_fw - wait for full FW readiness
- * @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
- */
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
-{
- int fw_loading;
- u32 elapsed = 0;
-
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
-
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
-int ice_init_dev(struct ice_pf *pf)
+void ice_init_dev_hw(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int err;
- err = ice_init_hw(hw);
- if (err) {
- dev_err(dev, "ice_init_hw failed: %d\n", err);
- return err;
- }
-
- /* Some cards require longer initialization times
- * due to necessity of loading FW from an external source.
- * This can take even half a minute.
- */
- if (ice_is_pf_c827(hw)) {
- err = ice_wait_for_fw(hw, 30000);
- if (err) {
- dev_err(dev, "ice_wait_for_fw timed out");
- return err;
- }
- }
-
ice_init_feature_support(pf);
- ice_request_fw(pf);
+ err = ice_init_ddp_config(hw, pf);
- /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
+ /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/
- if (ice_is_safe_mode(pf)) {
+ if (err || ice_is_safe_mode(pf)) {
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -4639,64 +4754,28 @@ int ice_init_dev(struct ice_pf *pf)
*/
ice_set_safe_mode_caps(hw);
}
+}
- err = ice_init_pf(pf);
- if (err) {
- dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf;
- }
-
- pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
- pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
- pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
- pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
- if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
- pf->hw.udp_tunnel_nic.tables[0].n_entries =
- pf->hw.tnl.valid_count[TNL_VXLAN];
- pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
- UDP_TUNNEL_TYPE_VXLAN;
- }
- if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
- pf->hw.udp_tunnel_nic.tables[1].n_entries =
- pf->hw.tnl.valid_count[TNL_GENEVE];
- pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
- UDP_TUNNEL_TYPE_GENEVE;
- }
+int ice_init_dev(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+ ice_set_pf_caps(pf);
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
- err = -EIO;
- goto err_init_interrupt_scheme;
+ return -EIO;
}
- /* In case of MSIX we are going to setup the misc vector right here
- * to handle admin queue events etc. In case of legacy and MSI
- * the misc functionality and queue processing is combined in
- * the same vector and that gets setup at open.
- */
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_req_irq_msix_misc;
- }
+ ice_start_service_task(pf);
return 0;
-
-err_req_irq_msix_misc:
- ice_clear_interrupt_scheme(pf);
-err_init_interrupt_scheme:
- ice_deinit_pf(pf);
-err_init_pf:
- ice_deinit_hw(hw);
- return err;
}
void ice_deinit_dev(struct ice_pf *pf)
{
- ice_free_irq_msix_misc(pf);
- ice_deinit_pf(pf);
- ice_deinit_hw(&pf->hw);
+ ice_service_task_stop(pf);
/* Service task is already stopped, so call reset directly. */
ice_reset(&pf->hw, ICE_RESET_PFR);
@@ -4921,12 +5000,14 @@ static int ice_init_devlink(struct ice_pf *pf)
ice_devlink_init_regions(pf);
ice_devlink_register(pf);
+ ice_health_init(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
+ ice_health_deinit(pf);
ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
@@ -4934,15 +5015,24 @@ static void ice_deinit_devlink(struct ice_pf *pf)
static int ice_init(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
int err;
- err = ice_init_dev(pf);
- if (err)
+ err = ice_init_pf(pf);
+ if (err) {
+ dev_err(dev, "ice_init_pf failed: %d\n", err);
return err;
+ }
+
+ if (pf->hw.mac_type == ICE_MAC_E830) {
+ err = pci_enable_ptm(pf->pdev, NULL);
+ if (err)
+ dev_dbg(dev, "PCIe PTM not supported by PCIe bus/controller\n");
+ }
err = ice_alloc_vsis(pf);
if (err)
- goto err_alloc_vsis;
+ goto unroll_pf_init;
err = ice_init_pf_sw(pf);
if (err)
@@ -4979,8 +5069,8 @@ err_init_link:
ice_deinit_pf_sw(pf);
err_init_pf_sw:
ice_dealloc_vsis(pf);
-err_alloc_vsis:
- ice_deinit_dev(pf);
+unroll_pf_init:
+ ice_deinit_pf(pf);
return err;
}
@@ -4991,7 +5081,7 @@ static void ice_deinit(struct ice_pf *pf)
ice_deinit_pf_sw(pf);
ice_dealloc_vsis(pf);
- ice_deinit_dev(pf);
+ ice_deinit_pf(pf);
}
/**
@@ -5039,11 +5129,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf);
+
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
- ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
@@ -5051,6 +5142,7 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
@@ -5074,14 +5166,44 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf);
ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
ice_decfg_netdev(vsi);
}
+static int ice_probe_recovery_mode(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
+
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+ err = ice_create_all_ctrlq(&pf->hw);
+ if (err)
+ return err;
+
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ err = ice_init_devlink(pf);
+ if (err)
+ return err;
+ }
+
+ ice_service_task_restart(pf);
+
+ return 0;
+}
+
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -5093,6 +5215,8 @@ static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
+ bool need_dev_deinit = false;
+ struct ice_adapter *adapter;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
@@ -5144,7 +5268,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
pci_set_master(pdev);
-
pf->pdev = pdev;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
@@ -5173,30 +5296,55 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->debug_mask = debug;
#endif
+ if (ice_is_recovery_mode(hw))
+ return ice_probe_recovery_mode(pf);
+
+ err = ice_init_hw(hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto unroll_hw_init;
+ }
+ pf->adapter = adapter;
+
+ err = ice_init_dev(pf);
+ if (err)
+ goto unroll_adapter;
+
err = ice_init(pf);
if (err)
- goto err_init;
+ goto unroll_dev_init;
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
- devl_unlock(priv_to_devlink(pf));
if (err)
- goto err_load;
+ goto unroll_init;
err = ice_init_devlink(pf);
if (err)
- goto err_init_devlink;
+ goto unroll_load;
+ devl_unlock(priv_to_devlink(pf));
return 0;
-err_init_devlink:
- devl_lock(priv_to_devlink(pf));
+unroll_load:
ice_unload(pf);
+unroll_init:
devl_unlock(priv_to_devlink(pf));
-err_load:
ice_deinit(pf);
-err_init:
- pci_disable_device(pdev);
+unroll_dev_init:
+ need_dev_deinit = true;
+unroll_adapter:
+ ice_adapter_put(pdev);
+unroll_hw_init:
+ ice_deinit_hw(hw);
+ if (need_dev_deinit)
+ ice_deinit_dev(pf);
return err;
}
@@ -5258,7 +5406,7 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
if (status)
dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
}
/**
@@ -5276,6 +5424,14 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (ice_is_recovery_mode(&pf->hw)) {
+ ice_service_task_stop(pf);
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ ice_deinit_devlink(pf);
+ }
+ return;
+ }
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -5283,16 +5439,13 @@ static void ice_remove(struct pci_dev *pdev)
ice_hwmon_exit(pf);
- ice_service_task_stop(pf);
- ice_aq_cancel_waiting_tasks(pf);
- set_bit(ICE_DOWN, pf->state);
-
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
+ devl_lock(priv_to_devlink(pf));
+ ice_dealloc_all_dynamic_ports(pf);
ice_deinit_devlink(pf);
- devl_lock(priv_to_devlink(pf));
ice_unload(pf);
devl_unlock(priv_to_devlink(pf));
@@ -5302,7 +5455,12 @@ static void ice_remove(struct pci_dev *pdev)
ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
- pci_disable_device(pdev);
+ ice_adapter_put(pdev);
+ ice_deinit_hw(&pf->hw);
+
+ ice_deinit_dev(pf);
+ ice_aq_cancel_waiting_tasks(pf);
+ set_bit(ICE_DOWN, pf->state);
}
/**
@@ -5321,7 +5479,6 @@ static void ice_shutdown(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PM
/**
* ice_prepare_for_shutdown - prep for PCI shutdown
* @pf: board private structure
@@ -5346,7 +5503,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf)
if (pf->vsi[v])
pf->vsi[v]->vsi_num = 0;
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
}
/**
@@ -5383,7 +5540,9 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ rtnl_lock();
ice_vsi_set_napi_queues(pf->vsi[v]);
+ rtnl_unlock();
}
ret = ice_req_irq_msix_misc(pf);
@@ -5397,8 +5556,12 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
err_reinit:
while (v--)
- if (pf->vsi[v])
+ if (pf->vsi[v]) {
+ rtnl_lock();
+ ice_vsi_clear_napi_queues(pf->vsi[v]);
+ rtnl_unlock();
ice_vsi_free_q_vectors(pf->vsi[v]);
+ }
return ret;
}
@@ -5410,7 +5573,7 @@ err_reinit:
* Power Management callback to quiesce the device and prepare
* for D3 transition.
*/
-static int __maybe_unused ice_suspend(struct device *dev)
+static int ice_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ice_pf *pf;
@@ -5431,7 +5594,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
- ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
@@ -5463,6 +5626,9 @@ static int __maybe_unused ice_suspend(struct device *dev)
ice_for_each_vsi(pf, v) {
if (!pf->vsi[v])
continue;
+ rtnl_lock();
+ ice_vsi_clear_napi_queues(pf->vsi[v]);
+ rtnl_unlock();
ice_vsi_free_q_vectors(pf->vsi[v]);
}
ice_clear_interrupt_scheme(pf);
@@ -5477,7 +5643,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
* ice_resume - PM callback for waking up from D3
* @dev: generic device information structure
*/
-static int __maybe_unused ice_resume(struct device *dev)
+static int ice_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
enum ice_reset_req reset_type;
@@ -5487,7 +5653,6 @@ static int __maybe_unused ice_resume(struct device *dev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!pci_device_is_present(pdev))
return -ENODEV;
@@ -5511,6 +5676,11 @@ static int __maybe_unused ice_resume(struct device *dev)
if (ret)
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ ret = ice_init_rdma(pf);
+ if (ret)
+ dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
+ ret);
+
clear_bit(ICE_DOWN, pf->state);
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR;
@@ -5528,7 +5698,6 @@ static int __maybe_unused ice_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
/**
* ice_pci_err_detected - warning that PCI error has been detected
@@ -5583,7 +5752,6 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
/* Check for life */
@@ -5693,16 +5861,31 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), },
/* required last entry */
{}
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
-static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
static const struct pci_error_handlers ice_pci_err_handler = {
.error_detected = ice_pci_err_detected,
@@ -5717,9 +5900,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
-#ifdef CONFIG_PM
- .driver.pm = &ice_pm_ops,
-#endif /* CONFIG_PM */
+ .driver.pm = pm_sleep_ptr(&ice_pm_ops),
.shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
@@ -5742,7 +5923,7 @@ static int __init ice_module_init(void)
ice_adv_lnk_speed_maps_init();
- ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return status;
@@ -5762,8 +5943,16 @@ static int __init ice_module_init(void)
goto err_dest_lag_wq;
}
+ status = ice_sf_driver_register();
+ if (status) {
+ pr_err("Failed to register SF driver, err %d\n", status);
+ goto err_sf_driver;
+ }
+
return 0;
+err_sf_driver:
+ pci_unregister_driver(&ice_driver);
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
ice_debugfs_exit();
@@ -5781,6 +5970,7 @@ module_init(ice_module_init);
*/
static void __exit ice_module_exit(void)
{
+ ice_sf_driver_unregister();
pci_unregister_driver(&ice_driver);
ice_debugfs_exit();
destroy_workqueue(ice_wq);
@@ -5958,12 +6148,14 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @notified: whether notification was emitted
* @extack: netlink extended ack
*/
static int
ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid,
- u16 flags, struct netlink_ext_ack __always_unused *extack)
+ u16 flags, bool *notified,
+ struct netlink_ext_ack __always_unused *extack)
{
int err;
@@ -5997,12 +6189,14 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN ID
+ * @notified: whether notification was emitted
* @extack: netlink extended ack
*/
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid, struct netlink_ext_ack *extack)
+ __always_unused u16 vid, bool *notified,
+ struct netlink_ext_ack *extack)
{
int err;
@@ -6206,10 +6400,12 @@ ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
int err = 0;
/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
- * if either bit is set
+ * if either bit is set. In switchdev mode Rx filtering should never be
+ * enabled.
*/
- if (features &
- (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ if ((features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
+ !ice_is_eswitch_mode_switchdev(vsi->back))
err = vlan_ops->ena_rx_filtering(vsi);
else
err = vlan_ops->dis_rx_filtering(vsi);
@@ -6357,13 +6553,24 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_HW_TC) {
bool ena = !!(features & NETIF_F_HW_TC);
- ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
- clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena);
}
if (changed & NETIF_F_LOOPBACK)
ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+ /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
+ * (NETIF_F_HW_CSUM) is not supported.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS) &&
+ ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
+ if (netdev->features & NETIF_F_HW_CSUM)
+ dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
+ else
+ dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
+ return -EIO;
+ }
+
return ret;
}
@@ -6582,11 +6789,12 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev && vsi->type == ICE_VSI_PF) {
+ ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)))) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -6918,6 +7126,9 @@ void ice_update_pf_stats(struct ice_pf *pf)
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
+ ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
+ &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
+
ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
@@ -6940,7 +7151,6 @@ void ice_update_pf_stats(struct ice_pf *pf)
* @netdev: network interface device structure
* @stats: main device statistics structure
*/
-static
void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -7055,13 +7265,11 @@ int ice_down(struct ice_vsi *vsi)
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
- } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
- ice_eswitch_stop_all_tx_queues(vsi->back);
}
ice_vsi_dis_irq(vsi);
@@ -7070,7 +7278,7 @@ int ice_down(struct ice_vsi *vsi)
if (tx_err)
netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
- if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
+ if (!tx_err && vsi->xdp_rings) {
tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
if (tx_err)
netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
@@ -7087,7 +7295,7 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (vsi->xdp_rings)
ice_for_each_xdp_txq(vsi, i)
ice_clean_tx_ring(vsi->xdp_rings[i]);
@@ -7281,9 +7489,10 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+ if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs))
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
- if (vsi->type == ICE_VSI_PF) {
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
if (err)
@@ -7292,6 +7501,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
if (err)
goto err_set_qs;
+
+ ice_vsi_set_napi_queues(vsi);
}
err = ice_up_complete(vsi);
@@ -7429,6 +7640,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
bool dvm;
@@ -7544,12 +7756,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- err = ice_eswitch_rebuild(pf);
- if (err) {
- dev_err(dev, "Switchdev rebuild failed: %d\n", err);
- goto err_vsi_rebuild;
- }
-
if (reset_type == ICE_RESET_PFR) {
err = ice_rebuild_channels(pf);
if (err) {
@@ -7577,6 +7783,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild_arfs(pf);
}
+ if (vsi && vsi->netdev)
+ netif_device_attach(vsi->netdev);
+
ice_update_pf_netdev_link(pf);
/* tell the firmware we are up */
@@ -7592,6 +7801,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+ ice_health_clear(pf);
+
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
@@ -7604,7 +7815,7 @@ err_vsi_rebuild:
err_sched_init_port:
ice_sched_cleanup_all(hw);
err_init_ctrlq:
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
set_bit(ICE_RESET_FAILED, pf->state);
clear_recovery:
/* set this bit in PF state to control service task scheduling */
@@ -7619,7 +7830,7 @@ clear_recovery:
*
* Returns 0 on success, negative on failure
*/
-static int ice_change_mtu(struct net_device *netdev, int new_mtu)
+int ice_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -7642,12 +7853,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
- } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
- if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
- netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
- ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
- return -EINVAL;
- }
}
/* if a reset is in progress, wait for some time for it to complete */
@@ -7666,7 +7871,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- netdev->mtu = (unsigned int)new_mtu;
+ WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
err = ice_down_up(vsi);
if (err)
return err;
@@ -7678,69 +7883,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * ice_eth_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- */
-static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return ice_ptp_get_ts_config(pf, ifr);
- case SIOCSHWTSTAMP:
- return ice_ptp_set_ts_config(pf, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * ice_aq_str - convert AQ err code to a string
- * @aq_err: the AQ error code to convert
- */
-const char *ice_aq_str(enum ice_aq_err aq_err)
-{
- switch (aq_err) {
- case ICE_AQ_RC_OK:
- return "OK";
- case ICE_AQ_RC_EPERM:
- return "ICE_AQ_RC_EPERM";
- case ICE_AQ_RC_ENOENT:
- return "ICE_AQ_RC_ENOENT";
- case ICE_AQ_RC_ENOMEM:
- return "ICE_AQ_RC_ENOMEM";
- case ICE_AQ_RC_EBUSY:
- return "ICE_AQ_RC_EBUSY";
- case ICE_AQ_RC_EEXIST:
- return "ICE_AQ_RC_EEXIST";
- case ICE_AQ_RC_EINVAL:
- return "ICE_AQ_RC_EINVAL";
- case ICE_AQ_RC_ENOSPC:
- return "ICE_AQ_RC_ENOSPC";
- case ICE_AQ_RC_ENOSYS:
- return "ICE_AQ_RC_ENOSYS";
- case ICE_AQ_RC_EMODE:
- return "ICE_AQ_RC_EMODE";
- case ICE_AQ_RC_ENOSEC:
- return "ICE_AQ_RC_ENOSEC";
- case ICE_AQ_RC_EBADSIG:
- return "ICE_AQ_RC_EBADSIG";
- case ICE_AQ_RC_ESVN:
- return "ICE_AQ_RC_ESVN";
- case ICE_AQ_RC_EBADMAN:
- return "ICE_AQ_RC_EBADMAN";
- case ICE_AQ_RC_EBADBUF:
- return "ICE_AQ_RC_EBADBUF";
- }
-
- return "ICE_AQ_RC_UNKNOWN";
-}
-
-/**
* ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
* @lut: Lookup table
@@ -7765,7 +7907,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, &params);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -7788,7 +7930,7 @@ int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -7818,7 +7960,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, &params);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -7841,7 +7983,7 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -7914,9 +8056,7 @@ static int
ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask, int nlflags)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
u16 bmode;
bmode = pf->first_sw->bridge_mode;
@@ -7958,7 +8098,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
- bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
+ bmode, ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
/* Update sw flags for book keeping */
@@ -7986,8 +8126,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
u16 __always_unused flags,
struct netlink_ext_ack __always_unused *extack)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
struct nlattr *attr, *br_spec;
struct ice_hw *hw = &pf->hw;
struct ice_sw *pf_sw;
@@ -7999,12 +8138,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
- mode = nla_get_u16(attr);
if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
return -EINVAL;
/* Continue if bridge mode is not being flipped */
@@ -8029,7 +8165,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (err) {
netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
mode, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return err;
@@ -8046,7 +8182,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
* @netdev: network interface device structure
* @txqueue: Tx queue
*/
-static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *tx_ring = NULL;
@@ -8085,16 +8221,18 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
struct ice_hw *hw = &pf->hw;
- u32 head, val = 0;
+ u32 head, intr = 0;
head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
- val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use, val);
+ head, tx_ring->next_to_use, intr);
+
+ ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
}
pf->tx_timeout_last_recovery = jiffies;
@@ -8128,11 +8266,16 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @np: net device to configure
* @filter_dev: device on which filter is added
* @cls_flower: offload data
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added or deleted,
+ * negative error code otherwise.
*/
static int
ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
struct net_device *filter_dev,
- struct flow_cls_offload *cls_flower)
+ struct flow_cls_offload *cls_flower,
+ bool ingress)
{
struct ice_vsi *vsi = np->vsi;
@@ -8141,7 +8284,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(vsi, cls_flower);
default:
@@ -8150,20 +8293,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
}
/**
- * ice_setup_tc_block_cb - callback handler registered for TC block
+ * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
* @type: TC SETUP type
* @type_data: TC flower offload data that contains user input
* @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
*/
static int
-ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
struct ice_netdev_priv *np = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, np->vsi->netdev,
- type_data);
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb_egress - callback handler for egress TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
+ */
+static int
+ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -8916,7 +9085,7 @@ static int ice_create_q_channels(struct ice_vsi *vsi)
list_add_tail(&ch->list, &vsi->ch_list);
vsi->tc_map_vsi[i] = ch->ch_vsi;
dev_dbg(ice_pf_to_dev(pf),
- "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ "successfully created channel: VSI %p\n", ch->ch_vsi);
}
return 0;
@@ -9101,6 +9270,96 @@ exit:
return ret;
}
+/**
+ * ice_cfg_txtime - configure Tx Time for the Tx ring
+ * @tx_ring: pointer to the Tx ring structure
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_cfg_txtime(struct ice_tx_ring *tx_ring)
+{
+ int err, timeout = 50;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ u32 queue;
+
+ if (!tx_ring)
+ return -EINVAL;
+
+ vsi = tx_ring->vsi;
+ pf = vsi->back;
+ while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ queue = tx_ring->q_index;
+ dev = ice_pf_to_dev(pf);
+
+ /* Ignore return value, and always attempt to enable queue. */
+ ice_qp_dis(vsi, queue);
+
+ err = ice_qp_ena(vsi, queue);
+ if (err)
+ dev_err(dev, "Failed to enable Tx queue %d for TxTime configuration\n",
+ queue);
+
+ clear_bit(ICE_CFG_BUSY, pf->state);
+ return err;
+}
+
+/**
+ * ice_offload_txtime - set earliest TxTime first
+ * @netdev: network interface device structure
+ * @qopt_off: etf queue option offload from the skb to set
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_offload_txtime(struct net_device *netdev,
+ void *qopt_off)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct tc_etf_qopt_offload *qopt;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_tx_ring *tx_ring;
+ int ret = 0;
+
+ if (!ice_is_feature_supported(pf, ICE_F_TXTIME))
+ return -EOPNOTSUPP;
+
+ qopt = qopt_off;
+ if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq)
+ return -EINVAL;
+
+ if (qopt->enable)
+ set_bit(qopt->queue, pf->txtime_txqs);
+ else
+ clear_bit(qopt->queue, pf->txtime_txqs);
+
+ if (netif_running(vsi->netdev)) {
+ tx_ring = vsi->tx_rings[qopt->queue];
+ ret = ice_cfg_txtime(tx_ring);
+ if (ret)
+ goto err;
+ }
+
+ netdev_info(netdev, "%s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+ return 0;
+
+err:
+ netdev_err(netdev, "Failed to %s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+
+ if (qopt->enable)
+ clear_bit(qopt->queue, pf->txtime_txqs);
+ return ret;
+}
+
static LIST_HEAD(ice_block_cb_list);
static int
@@ -9108,27 +9367,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ enum flow_block_binder_type binder_type;
+ struct iidc_rdma_core_dev_info *cdev;
struct ice_pf *pf = np->vsi->back;
+ flow_setup_cb_t *flower_handler;
bool locked = false;
int err;
switch (type) {
case TC_SETUP_BLOCK:
+ binder_type =
+ ((struct flow_block_offload *)type_data)->binder_type;
+
+ switch (binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ flower_handler = ice_setup_tc_block_cb_ingress;
+ break;
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ flower_handler = ice_setup_tc_block_cb_egress;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
return flow_block_cb_setup_simple(type_data,
&ice_block_cb_list,
- ice_setup_tc_block_cb,
- np, np, true);
+ flower_handler,
+ np, np, false);
case TC_SETUP_QDISC_MQPRIO:
if (ice_is_eswitch_mode_switchdev(pf)) {
netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
return -EOPNOTSUPP;
}
- if (pf->adev) {
+ cdev = pf->cdev_info;
+ if (cdev && cdev->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&cdev->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (cdev->adev->dev.driver) {
netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
err = -EBUSY;
goto adev_unlock;
@@ -9142,10 +9419,12 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&cdev->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return err;
+ case TC_SETUP_QDISC_ETF:
+ return ice_offload_txtime(netdev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -9178,7 +9457,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, priv->netdev,
(struct flow_cls_offload *)
- type_data);
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -9281,8 +9560,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
*/
int ice_open(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't open net device while reset is in progress");
@@ -9485,7 +9763,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
- .ndo_eth_ioctl = ice_eth_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
@@ -9509,4 +9786,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_hwtstamp_get = ice_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = ice_ptp_hwtstamp_set,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index d4e05d2cb30c..7e187a804dfa 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -18,15 +18,14 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-static int
-ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
- void *data, bool last_command, bool read_shadow_ram,
- struct ice_sq_cd *cd)
+int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
if (offset > ICE_AQC_NVM_MAX_OFFSET)
return -EINVAL;
@@ -126,10 +125,10 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
@@ -147,7 +146,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
cmd->offset_high = (offset >> 16) & 0xFF;
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, data, length, cd);
}
@@ -162,10 +161,10 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*/
int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
@@ -375,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
*
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
+ *
+ * Note that the Shadow RAM copy is always located after the CSS header, and
+ * is aligned to 64-byte (32-word) offsets.
*/
static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+ u32 sr_copy;
+
+ switch (bank) {
+ case ICE_ACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
+ break;
+ case ICE_INACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
+ break;
+ }
+
+ return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
}
/**
@@ -441,8 +454,7 @@ int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- u16 pfa_len, pfa_ptr;
- u16 next_tlv;
+ u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
@@ -455,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
+
+ /* The Preserved Fields Area contains a sequence of Type-Length-Value
+ * structures which define its contents. The PFA length includes all
+ * of the TLVs, plus the initial length word itself, *and* one final
+ * word at the end after all of the TLVs.
+ */
+ if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
+ pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
+
/* Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
+ while (next_tlv < max_tlv) {
u16 tlv_sub_module_type;
u16 tlv_len;
@@ -483,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
}
return -EINVAL;
}
- /* Check next TLV, i.e. current TLV pointer + length + 2 words
- * (for current TLV's type and length)
- */
- next_tlv = next_tlv + tlv_len + 2;
+
+ if (check_add_overflow(next_tlv, 2, &next_tlv) ||
+ check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
+ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
}
/* Module does not exist */
return -ENOENT;
@@ -842,7 +869,7 @@ static int ice_discover_flash_size(struct ice_hw *hw)
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == -EIO &&
- hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
+ hw->adminq.sq_last_status == LIBIE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
status = 0;
@@ -1011,6 +1038,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
}
/**
+ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the Authentication
+ * header size, and then convert to words.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int
+ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ice_determine_css_hdr_len - Discover CSS header length for the device
+ * @hw: pointer to the HW struct
+ *
+ * Determine the size of the CSS header at the start of the NVM module. This
+ * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
+ * always located just after the CSS header.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ice_determine_css_hdr_len(struct ice_hw *hw)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ int status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
+ &banks->active_css_hdr_len);
+ if (status)
+ return status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
+ &banks->inactive_css_hdr_len);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -1056,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw)
return status;
}
+ status = ice_determine_css_hdr_len(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
+ return status;
+ }
+
status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
@@ -1083,14 +1182,14 @@ int ice_init_nvm(struct ice_hw *hw)
int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
return status;
- cmd = &desc.params.nvm_checksum;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
@@ -1127,11 +1226,11 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
*/
int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- struct ice_aq_desc desc;
int err;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
@@ -1153,7 +1252,7 @@ int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
*/
int ice_aq_nvm_update_empr(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
@@ -1179,15 +1278,15 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pkg_data *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (length != 0 && !data)
return -EINVAL;
- cmd = &desc.params.pkg_data;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (del_pkg_data_flag)
cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
@@ -1217,17 +1316,17 @@ ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 *comp_response_code, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pass_comp_tbl *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!data || !comp_response || !comp_response_code)
return -EINVAL;
- cmd = &desc.params.pass_comp_tbl;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_nvm_pass_component_tbl);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->transfer_flag = transfer_flag;
status = ice_aq_send_cmd(hw, &desc, data, length, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 774c2317967d..63cdc6bdac58 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -14,6 +14,9 @@ struct ice_orom_civd_info {
int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
+int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram, struct ice_sq_cd *cd);
int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index a2562f04267f..b9f383494b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -12,6 +12,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
+#include <linux/iopoll.h>
#include <linux/pci_ids.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -23,6 +24,9 @@
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define rd32_poll_timeout(a, addr, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(rd32, val, cond, delay_us, timeout_us, false, a, addr)
+
#define ice_flush(a) rd32((a), GLGEN_STAT)
#define ICE_M(m, s) ((m ## U) << (s))
@@ -39,11 +43,10 @@ struct device *ice_hw_to_dev(struct ice_hw *hw);
#define ice_debug(hw, type, fmt, args...) \
dev_dbg(ice_hw_to_dev(hw), fmt, ##args)
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
- print_hex_dump_debug(KBUILD_MODNAME " ", \
- DUMP_PREFIX_OFFSET, rowsize, \
- groupsize, buf, len, false)
-#else
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, \
+ rowsize, groupsize, buf, len, false)
+#else /* CONFIG_DYNAMIC_DEBUG */
#define ice_debug(hw, type, fmt, args...) \
do { \
if ((type) & (hw)->debug_mask) \
@@ -51,16 +54,15 @@ do { \
} while (0)
#ifdef DEBUG
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
do { \
if ((type) & (hw)->debug_mask) \
- print_hex_dump_debug(KBUILD_MODNAME, \
- DUMP_PREFIX_OFFSET, \
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET,\
rowsize, groupsize, buf, \
len, false); \
} while (0)
-#else
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+#else /* DEBUG */
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
do { \
struct ice_hw *hw_l = hw; \
if ((type) & (hw_l)->debug_mask) { \
@@ -78,4 +80,10 @@ do { \
#endif /* DEBUG */
#endif /* CONFIG_DYNAMIC_DEBUG */
+#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+ _ice_debug_array(hw, type, KBUILD_MODNAME, rowsize, groupsize, buf, len)
+
+#define ice_debug_array_w_prefix(hw, type, prefix, buf, len) \
+ _ice_debug_array(hw, type, prefix, 16, 1, buf, len)
+
#endif /* _ICE_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.c b/drivers/net/ethernet/intel/ice/ice_parser.c
new file mode 100644
index 000000000000..664beb64f557
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser.c
@@ -0,0 +1,2430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "ice_common.h"
+
+struct ice_pkg_sect_hdr {
+ __le16 count;
+ __le16 offset;
+};
+
+/**
+ * ice_parser_sect_item_get - parse an item from a section
+ * @sect_type: section type
+ * @section: section object
+ * @index: index of the item to get
+ * @offset: dummy as prototype of ice_pkg_enum_entry's last parameter
+ *
+ * Return: a pointer to the item or NULL.
+ */
+static void *ice_parser_sect_item_get(u32 sect_type, void *section,
+ u32 index, u32 __maybe_unused *offset)
+{
+ size_t data_off = ICE_SEC_DATA_OFFSET;
+ struct ice_pkg_sect_hdr *hdr;
+ size_t size;
+
+ if (!section)
+ return NULL;
+
+ switch (sect_type) {
+ case ICE_SID_RXPARSER_IMEM:
+ size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_METADATA_INIT:
+ size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_CAM:
+ size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_PG_SPILL:
+ size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_NOMATCH_CAM:
+ size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_NOMATCH_SPILL:
+ size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_BOOST_TCAM:
+ size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_LBL_RXPARSER_TMEM:
+ data_off = ICE_SEC_LBL_DATA_OFFSET;
+ size = ICE_SID_LBL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_MARKER_PTYPE:
+ size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_MARKER_GRP:
+ size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_PROTO_GRP:
+ size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_FLAG_REDIR:
+ size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE;
+ break;
+ default:
+ return NULL;
+ }
+
+ hdr = section;
+ if (index >= le16_to_cpu(hdr->count))
+ return NULL;
+
+ return section + data_off + index * size;
+}
+
+/**
+ * ice_parser_create_table - create an item table from a section
+ * @hw: pointer to the hardware structure
+ * @sect_type: section type
+ * @item_size: item size in bytes
+ * @length: number of items in the table to create
+ * @parse_item: the function to parse the item
+ * @no_offset: ignore header offset, calculate index from 0
+ *
+ * Return: a pointer to the allocated table or ERR_PTR.
+ */
+static void *
+ice_parser_create_table(struct ice_hw *hw, u32 sect_type,
+ u32 item_size, u32 length,
+ void (*parse_item)(struct ice_hw *hw, u16 idx,
+ void *item, void *data,
+ int size), bool no_offset)
+{
+ struct ice_pkg_enum state = {};
+ struct ice_seg *seg = hw->seg;
+ void *table, *data, *item;
+ u16 idx = 0;
+
+ if (!seg)
+ return ERR_PTR(-EINVAL);
+
+ table = kzalloc(item_size * length, GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ data = ice_pkg_enum_entry(seg, &state, sect_type, NULL,
+ ice_parser_sect_item_get);
+ seg = NULL;
+ if (data) {
+ struct ice_pkg_sect_hdr *hdr = state.sect;
+
+ if (!no_offset)
+ idx = le16_to_cpu(hdr->offset) +
+ state.entry_idx;
+
+ item = (void *)((uintptr_t)table + idx * item_size);
+ parse_item(hw, idx, item, data, item_size);
+
+ if (no_offset)
+ idx++;
+ }
+ } while (data);
+
+ return table;
+}
+
+/*** ICE_SID_RXPARSER_IMEM section ***/
+static void ice_imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "boost main:\n");
+ dev_info(dev, "\talu0 = %d\n", bm->alu0);
+ dev_info(dev, "\talu1 = %d\n", bm->alu1);
+ dev_info(dev, "\talu2 = %d\n", bm->alu2);
+ dev_info(dev, "\tpg = %d\n", bm->pg);
+}
+
+static void ice_imem_bst_kb_dump(struct ice_hw *hw,
+ struct ice_bst_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "boost key builder:\n");
+ dev_info(dev, "\tpriority = %d\n", kb->prio);
+ dev_info(dev, "\ttsr_ctrl = %d\n", kb->tsr_ctrl);
+}
+
+static void ice_imem_np_kb_dump(struct ice_hw *hw,
+ struct ice_np_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "next proto key builder:\n");
+ dev_info(dev, "\topc = %d\n", kb->opc);
+ dev_info(dev, "\tstart_or_reg0 = %d\n", kb->start_reg0);
+ dev_info(dev, "\tlen_or_reg1 = %d\n", kb->len_reg1);
+}
+
+static void ice_imem_pg_kb_dump(struct ice_hw *hw,
+ struct ice_pg_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "parse graph key builder:\n");
+ dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena);
+ dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena);
+ dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena);
+ dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena);
+ dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx);
+ dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx);
+ dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx);
+ dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx);
+ dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx);
+}
+
+static void ice_imem_alu_dump(struct ice_hw *hw,
+ struct ice_alu *alu, int index)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "alu%d:\n", index);
+ dev_info(dev, "\topc = %d\n", alu->opc);
+ dev_info(dev, "\tsrc_start = %d\n", alu->src_start);
+ dev_info(dev, "\tsrc_len = %d\n", alu->src_len);
+ dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel);
+ dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key);
+ dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id);
+ dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id);
+ dev_info(dev, "\tinc0 = %d\n", alu->inc0);
+ dev_info(dev, "\tinc1 = %d\n", alu->inc1);
+ dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc);
+ dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset);
+ dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr);
+ dev_info(dev, "\timm = %d\n", alu->imm);
+ dev_info(dev, "\tdst_start = %d\n", alu->dst_start);
+ dev_info(dev, "\tdst_len = %d\n", alu->dst_len);
+ dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm);
+ dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm);
+}
+
+/**
+ * ice_imem_dump - dump an imem item info
+ * @hw: pointer to the hardware structure
+ * @item: imem item to dump
+ */
+static void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+ ice_imem_bst_bm_dump(hw, &item->b_m);
+ ice_imem_bst_kb_dump(hw, &item->b_kb);
+ dev_info(dev, "pg priority = %d\n", item->pg_prio);
+ ice_imem_np_kb_dump(hw, &item->np_kb);
+ ice_imem_pg_kb_dump(hw, &item->pg_kb);
+ ice_imem_alu_dump(hw, &item->alu0, 0);
+ ice_imem_alu_dump(hw, &item->alu1, 1);
+ ice_imem_alu_dump(hw, &item->alu2, 2);
+}
+
+#define ICE_IM_BM_ALU0 BIT(0)
+#define ICE_IM_BM_ALU1 BIT(1)
+#define ICE_IM_BM_ALU2 BIT(2)
+#define ICE_IM_BM_PG BIT(3)
+
+/**
+ * ice_imem_bm_init - parse 4 bits of Boost Main
+ * @bm: pointer to the Boost Main structure
+ * @data: Boost Main data to be parsed
+ */
+static void ice_imem_bm_init(struct ice_bst_main *bm, u8 data)
+{
+ bm->alu0 = FIELD_GET(ICE_IM_BM_ALU0, data);
+ bm->alu1 = FIELD_GET(ICE_IM_BM_ALU1, data);
+ bm->alu2 = FIELD_GET(ICE_IM_BM_ALU2, data);
+ bm->pg = FIELD_GET(ICE_IM_BM_PG, data);
+}
+
+#define ICE_IM_BKB_PRIO GENMASK(7, 0)
+#define ICE_IM_BKB_TSR_CTRL BIT(8)
+
+/**
+ * ice_imem_bkb_init - parse 10 bits of Boost Main Build
+ * @bkb: pointer to the Boost Main Build structure
+ * @data: Boost Main Build data to be parsed
+ */
+static void ice_imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data)
+{
+ bkb->prio = FIELD_GET(ICE_IM_BKB_PRIO, data);
+ bkb->tsr_ctrl = FIELD_GET(ICE_IM_BKB_TSR_CTRL, data);
+}
+
+#define ICE_IM_NPKB_OPC GENMASK(1, 0)
+#define ICE_IM_NPKB_S_R0 GENMASK(9, 2)
+#define ICE_IM_NPKB_L_R1 GENMASK(17, 10)
+
+/**
+ * ice_imem_npkb_init - parse 18 bits of Next Protocol Key Build
+ * @kb: pointer to the Next Protocol Key Build structure
+ * @data: Next Protocol Key Build data to be parsed
+ */
+static void ice_imem_npkb_init(struct ice_np_keybuilder *kb, u32 data)
+{
+ kb->opc = FIELD_GET(ICE_IM_NPKB_OPC, data);
+ kb->start_reg0 = FIELD_GET(ICE_IM_NPKB_S_R0, data);
+ kb->len_reg1 = FIELD_GET(ICE_IM_NPKB_L_R1, data);
+}
+
+#define ICE_IM_PGKB_F0_ENA BIT_ULL(0)
+#define ICE_IM_PGKB_F0_IDX GENMASK_ULL(6, 1)
+#define ICE_IM_PGKB_F1_ENA BIT_ULL(7)
+#define ICE_IM_PGKB_F1_IDX GENMASK_ULL(13, 8)
+#define ICE_IM_PGKB_F2_ENA BIT_ULL(14)
+#define ICE_IM_PGKB_F2_IDX GENMASK_ULL(20, 15)
+#define ICE_IM_PGKB_F3_ENA BIT_ULL(21)
+#define ICE_IM_PGKB_F3_IDX GENMASK_ULL(27, 22)
+#define ICE_IM_PGKB_AR_IDX GENMASK_ULL(34, 28)
+
+/**
+ * ice_imem_pgkb_init - parse 35 bits of Parse Graph Key Build
+ * @kb: pointer to the Parse Graph Key Build structure
+ * @data: Parse Graph Key Build data to be parsed
+ */
+static void ice_imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
+{
+ kb->flag0_ena = FIELD_GET(ICE_IM_PGKB_F0_ENA, data);
+ kb->flag0_idx = FIELD_GET(ICE_IM_PGKB_F0_IDX, data);
+ kb->flag1_ena = FIELD_GET(ICE_IM_PGKB_F1_ENA, data);
+ kb->flag1_idx = FIELD_GET(ICE_IM_PGKB_F1_IDX, data);
+ kb->flag2_ena = FIELD_GET(ICE_IM_PGKB_F2_ENA, data);
+ kb->flag2_idx = FIELD_GET(ICE_IM_PGKB_F2_IDX, data);
+ kb->flag3_ena = FIELD_GET(ICE_IM_PGKB_F3_ENA, data);
+ kb->flag3_idx = FIELD_GET(ICE_IM_PGKB_F3_IDX, data);
+ kb->alu_reg_idx = FIELD_GET(ICE_IM_PGKB_AR_IDX, data);
+}
+
+#define ICE_IM_ALU_OPC GENMASK_ULL(5, 0)
+#define ICE_IM_ALU_SS GENMASK_ULL(13, 6)
+#define ICE_IM_ALU_SL GENMASK_ULL(18, 14)
+#define ICE_IM_ALU_SXS BIT_ULL(19)
+#define ICE_IM_ALU_SXK GENMASK_ULL(23, 20)
+#define ICE_IM_ALU_SRID GENMASK_ULL(30, 24)
+#define ICE_IM_ALU_DRID GENMASK_ULL(37, 31)
+#define ICE_IM_ALU_INC0 BIT_ULL(38)
+#define ICE_IM_ALU_INC1 BIT_ULL(39)
+#define ICE_IM_ALU_POO GENMASK_ULL(41, 40)
+#define ICE_IM_ALU_PO GENMASK_ULL(49, 42)
+#define ICE_IM_ALU_BA_S 50 /* offset for the 2nd 64-bits field */
+#define ICE_IM_ALU_BA GENMASK_ULL(57 - ICE_IM_ALU_BA_S, \
+ 50 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_IMM GENMASK_ULL(73 - ICE_IM_ALU_BA_S, \
+ 58 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DFE BIT_ULL(74 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DS GENMASK_ULL(80 - ICE_IM_ALU_BA_S, \
+ 75 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DL GENMASK_ULL(86 - ICE_IM_ALU_BA_S, \
+ 81 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_FEI BIT_ULL(87 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_FSI GENMASK_ULL(95 - ICE_IM_ALU_BA_S, \
+ 88 - ICE_IM_ALU_BA_S)
+
+/**
+ * ice_imem_alu_init - parse 96 bits of ALU entry
+ * @alu: pointer to the ALU entry structure
+ * @data: ALU entry data to be parsed
+ * @off: offset of the ALU entry data
+ */
+static void ice_imem_alu_init(struct ice_alu *alu, u8 *data, u8 off)
+{
+ u64 d64;
+ u8 idd;
+
+ d64 = *((u64 *)data) >> off;
+
+ alu->opc = FIELD_GET(ICE_IM_ALU_OPC, d64);
+ alu->src_start = FIELD_GET(ICE_IM_ALU_SS, d64);
+ alu->src_len = FIELD_GET(ICE_IM_ALU_SL, d64);
+ alu->shift_xlate_sel = FIELD_GET(ICE_IM_ALU_SXS, d64);
+ alu->shift_xlate_key = FIELD_GET(ICE_IM_ALU_SXK, d64);
+ alu->src_reg_id = FIELD_GET(ICE_IM_ALU_SRID, d64);
+ alu->dst_reg_id = FIELD_GET(ICE_IM_ALU_DRID, d64);
+ alu->inc0 = FIELD_GET(ICE_IM_ALU_INC0, d64);
+ alu->inc1 = FIELD_GET(ICE_IM_ALU_INC1, d64);
+ alu->proto_offset_opc = FIELD_GET(ICE_IM_ALU_POO, d64);
+ alu->proto_offset = FIELD_GET(ICE_IM_ALU_PO, d64);
+
+ idd = (ICE_IM_ALU_BA_S + off) / BITS_PER_BYTE;
+ off = (ICE_IM_ALU_BA_S + off) % BITS_PER_BYTE;
+ d64 = *((u64 *)(&data[idd])) >> off;
+
+ alu->branch_addr = FIELD_GET(ICE_IM_ALU_BA, d64);
+ alu->imm = FIELD_GET(ICE_IM_ALU_IMM, d64);
+ alu->dedicate_flags_ena = FIELD_GET(ICE_IM_ALU_DFE, d64);
+ alu->dst_start = FIELD_GET(ICE_IM_ALU_DS, d64);
+ alu->dst_len = FIELD_GET(ICE_IM_ALU_DL, d64);
+ alu->flags_extr_imm = FIELD_GET(ICE_IM_ALU_FEI, d64);
+ alu->flags_start_imm = FIELD_GET(ICE_IM_ALU_FSI, d64);
+}
+
+#define ICE_IMEM_BM_S 0
+#define ICE_IMEM_BKB_S 4
+#define ICE_IMEM_BKB_IDD (ICE_IMEM_BKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_BKB_OFF (ICE_IMEM_BKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_PGP GENMASK(15, 14)
+#define ICE_IMEM_NPKB_S 16
+#define ICE_IMEM_NPKB_IDD (ICE_IMEM_NPKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_NPKB_OFF (ICE_IMEM_NPKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_PGKB_S 34
+#define ICE_IMEM_PGKB_IDD (ICE_IMEM_PGKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_PGKB_OFF (ICE_IMEM_PGKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU0_S 69
+#define ICE_IMEM_ALU0_IDD (ICE_IMEM_ALU0_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU0_OFF (ICE_IMEM_ALU0_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU1_S 165
+#define ICE_IMEM_ALU1_IDD (ICE_IMEM_ALU1_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU1_OFF (ICE_IMEM_ALU1_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU2_S 357
+#define ICE_IMEM_ALU2_IDD (ICE_IMEM_ALU2_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU2_OFF (ICE_IMEM_ALU2_S % BITS_PER_BYTE)
+
+/**
+ * ice_imem_parse_item - parse 384 bits of IMEM entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of IMEM entry
+ * @item: item of IMEM entry
+ * @data: IMEM entry data to be parsed
+ * @size: size of IMEM entry
+ */
+static void ice_imem_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_imem_item *ii = item;
+ u8 *buf = data;
+
+ ii->idx = idx;
+
+ ice_imem_bm_init(&ii->b_m, *(u8 *)buf);
+ ice_imem_bkb_init(&ii->b_kb,
+ *((u16 *)(&buf[ICE_IMEM_BKB_IDD])) >>
+ ICE_IMEM_BKB_OFF);
+
+ ii->pg_prio = FIELD_GET(ICE_IMEM_PGP, *(u16 *)buf);
+
+ ice_imem_npkb_init(&ii->np_kb,
+ *((u32 *)(&buf[ICE_IMEM_NPKB_IDD])) >>
+ ICE_IMEM_NPKB_OFF);
+ ice_imem_pgkb_init(&ii->pg_kb,
+ *((u64 *)(&buf[ICE_IMEM_PGKB_IDD])) >>
+ ICE_IMEM_PGKB_OFF);
+
+ ice_imem_alu_init(&ii->alu0,
+ &buf[ICE_IMEM_ALU0_IDD],
+ ICE_IMEM_ALU0_OFF);
+ ice_imem_alu_init(&ii->alu1,
+ &buf[ICE_IMEM_ALU1_IDD],
+ ICE_IMEM_ALU1_OFF);
+ ice_imem_alu_init(&ii->alu2,
+ &buf[ICE_IMEM_ALU2_IDD],
+ ICE_IMEM_ALU2_OFF);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_imem_dump(hw, ii);
+}
+
+/**
+ * ice_imem_table_get - create an imem table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated IMEM table.
+ */
+static struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM,
+ sizeof(struct ice_imem_item),
+ ICE_IMEM_TABLE_SIZE,
+ ice_imem_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
+/**
+ * ice_metainit_dump - dump an metainit item info
+ * @hw: pointer to the hardware structure
+ * @item: metainit item to dump
+ */
+static void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+
+ dev_info(dev, "tsr = %d\n", item->tsr);
+ dev_info(dev, "ho = %d\n", item->ho);
+ dev_info(dev, "pc = %d\n", item->pc);
+ dev_info(dev, "pg_rn = %d\n", item->pg_rn);
+ dev_info(dev, "cd = %d\n", item->cd);
+
+ dev_info(dev, "gpr_a_ctrl = %d\n", item->gpr_a_ctrl);
+ dev_info(dev, "gpr_a_data_mdid = %d\n", item->gpr_a_data_mdid);
+ dev_info(dev, "gpr_a_data_start = %d\n", item->gpr_a_data_start);
+ dev_info(dev, "gpr_a_data_len = %d\n", item->gpr_a_data_len);
+ dev_info(dev, "gpr_a_id = %d\n", item->gpr_a_id);
+
+ dev_info(dev, "gpr_b_ctrl = %d\n", item->gpr_b_ctrl);
+ dev_info(dev, "gpr_b_data_mdid = %d\n", item->gpr_b_data_mdid);
+ dev_info(dev, "gpr_b_data_start = %d\n", item->gpr_b_data_start);
+ dev_info(dev, "gpr_b_data_len = %d\n", item->gpr_b_data_len);
+ dev_info(dev, "gpr_b_id = %d\n", item->gpr_b_id);
+
+ dev_info(dev, "gpr_c_ctrl = %d\n", item->gpr_c_ctrl);
+ dev_info(dev, "gpr_c_data_mdid = %d\n", item->gpr_c_data_mdid);
+ dev_info(dev, "gpr_c_data_start = %d\n", item->gpr_c_data_start);
+ dev_info(dev, "gpr_c_data_len = %d\n", item->gpr_c_data_len);
+ dev_info(dev, "gpr_c_id = %d\n", item->gpr_c_id);
+
+ dev_info(dev, "gpr_d_ctrl = %d\n", item->gpr_d_ctrl);
+ dev_info(dev, "gpr_d_data_mdid = %d\n", item->gpr_d_data_mdid);
+ dev_info(dev, "gpr_d_data_start = %d\n", item->gpr_d_data_start);
+ dev_info(dev, "gpr_d_data_len = %d\n", item->gpr_d_data_len);
+ dev_info(dev, "gpr_d_id = %d\n", item->gpr_d_id);
+
+ dev_info(dev, "flags = 0x%llx\n", (unsigned long long)(item->flags));
+}
+
+#define ICE_MI_TSR GENMASK_ULL(7, 0)
+#define ICE_MI_HO GENMASK_ULL(16, 8)
+#define ICE_MI_PC GENMASK_ULL(24, 17)
+#define ICE_MI_PGRN GENMASK_ULL(35, 25)
+#define ICE_MI_CD GENMASK_ULL(38, 36)
+#define ICE_MI_GAC BIT_ULL(39)
+#define ICE_MI_GADM GENMASK_ULL(44, 40)
+#define ICE_MI_GADS GENMASK_ULL(48, 45)
+#define ICE_MI_GADL GENMASK_ULL(53, 49)
+#define ICE_MI_GAI GENMASK_ULL(59, 56)
+#define ICE_MI_GBC BIT_ULL(60)
+#define ICE_MI_GBDM_S 61 /* offset for the 2nd 64-bits field */
+#define ICE_MI_GBDM_IDD (ICE_MI_GBDM_S / BITS_PER_BYTE)
+#define ICE_MI_GBDM_OFF (ICE_MI_GBDM_S % BITS_PER_BYTE)
+
+#define ICE_MI_GBDM_GENMASK_ULL(high, low) \
+ GENMASK_ULL((high) - ICE_MI_GBDM_S, (low) - ICE_MI_GBDM_S)
+#define ICE_MI_GBDM ICE_MI_GBDM_GENMASK_ULL(65, 61)
+#define ICE_MI_GBDS ICE_MI_GBDM_GENMASK_ULL(69, 66)
+#define ICE_MI_GBDL ICE_MI_GBDM_GENMASK_ULL(74, 70)
+#define ICE_MI_GBI ICE_MI_GBDM_GENMASK_ULL(80, 77)
+#define ICE_MI_GCC BIT_ULL(81 - ICE_MI_GBDM_S)
+#define ICE_MI_GCDM ICE_MI_GBDM_GENMASK_ULL(86, 82)
+#define ICE_MI_GCDS ICE_MI_GBDM_GENMASK_ULL(90, 87)
+#define ICE_MI_GCDL ICE_MI_GBDM_GENMASK_ULL(95, 91)
+#define ICE_MI_GCI ICE_MI_GBDM_GENMASK_ULL(101, 98)
+#define ICE_MI_GDC BIT_ULL(102 - ICE_MI_GBDM_S)
+#define ICE_MI_GDDM ICE_MI_GBDM_GENMASK_ULL(107, 103)
+#define ICE_MI_GDDS ICE_MI_GBDM_GENMASK_ULL(111, 108)
+#define ICE_MI_GDDL ICE_MI_GBDM_GENMASK_ULL(116, 112)
+#define ICE_MI_GDI ICE_MI_GBDM_GENMASK_ULL(122, 119)
+#define ICE_MI_FLAG_S 123 /* offset for the 3rd 64-bits field */
+#define ICE_MI_FLAG_IDD (ICE_MI_FLAG_S / BITS_PER_BYTE)
+#define ICE_MI_FLAG_OFF (ICE_MI_FLAG_S % BITS_PER_BYTE)
+#define ICE_MI_FLAG GENMASK_ULL(186 - ICE_MI_FLAG_S, \
+ 123 - ICE_MI_FLAG_S)
+
+/**
+ * ice_metainit_parse_item - parse 192 bits of Metadata Init entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Metadata Init entry
+ * @item: item of Metadata Init entry
+ * @data: Metadata Init entry data to be parsed
+ * @size: size of Metadata Init entry
+ */
+static void ice_metainit_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_metainit_item *mi = item;
+ u8 *buf = data;
+ u64 d64;
+
+ mi->idx = idx;
+
+ d64 = *(u64 *)buf;
+
+ mi->tsr = FIELD_GET(ICE_MI_TSR, d64);
+ mi->ho = FIELD_GET(ICE_MI_HO, d64);
+ mi->pc = FIELD_GET(ICE_MI_PC, d64);
+ mi->pg_rn = FIELD_GET(ICE_MI_PGRN, d64);
+ mi->cd = FIELD_GET(ICE_MI_CD, d64);
+
+ mi->gpr_a_ctrl = FIELD_GET(ICE_MI_GAC, d64);
+ mi->gpr_a_data_mdid = FIELD_GET(ICE_MI_GADM, d64);
+ mi->gpr_a_data_start = FIELD_GET(ICE_MI_GADS, d64);
+ mi->gpr_a_data_len = FIELD_GET(ICE_MI_GADL, d64);
+ mi->gpr_a_id = FIELD_GET(ICE_MI_GAI, d64);
+
+ mi->gpr_b_ctrl = FIELD_GET(ICE_MI_GBC, d64);
+
+ d64 = *((u64 *)&buf[ICE_MI_GBDM_IDD]) >> ICE_MI_GBDM_OFF;
+
+ mi->gpr_b_data_mdid = FIELD_GET(ICE_MI_GBDM, d64);
+ mi->gpr_b_data_start = FIELD_GET(ICE_MI_GBDS, d64);
+ mi->gpr_b_data_len = FIELD_GET(ICE_MI_GBDL, d64);
+ mi->gpr_b_id = FIELD_GET(ICE_MI_GBI, d64);
+
+ mi->gpr_c_ctrl = FIELD_GET(ICE_MI_GCC, d64);
+ mi->gpr_c_data_mdid = FIELD_GET(ICE_MI_GCDM, d64);
+ mi->gpr_c_data_start = FIELD_GET(ICE_MI_GCDS, d64);
+ mi->gpr_c_data_len = FIELD_GET(ICE_MI_GCDL, d64);
+ mi->gpr_c_id = FIELD_GET(ICE_MI_GCI, d64);
+
+ mi->gpr_d_ctrl = FIELD_GET(ICE_MI_GDC, d64);
+ mi->gpr_d_data_mdid = FIELD_GET(ICE_MI_GDDM, d64);
+ mi->gpr_d_data_start = FIELD_GET(ICE_MI_GDDS, d64);
+ mi->gpr_d_data_len = FIELD_GET(ICE_MI_GDDL, d64);
+ mi->gpr_d_id = FIELD_GET(ICE_MI_GDI, d64);
+
+ d64 = *((u64 *)&buf[ICE_MI_FLAG_IDD]) >> ICE_MI_FLAG_OFF;
+
+ mi->flags = FIELD_GET(ICE_MI_FLAG, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_metainit_dump(hw, mi);
+}
+
+/**
+ * ice_metainit_table_get - create a metainit table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Metadata initialization table.
+ */
+static struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT,
+ sizeof(struct ice_metainit_item),
+ ICE_METAINIT_TABLE_SIZE,
+ ice_metainit_parse_item, false);
+}
+
+/**
+ * ice_bst_tcam_search - find a TCAM item with specific type
+ * @tcam_table: the TCAM table
+ * @lbl_table: the lbl table to search
+ * @type: the type we need to match against
+ * @start: start searching from this index
+ *
+ * Return: a pointer to the matching BOOST TCAM item or NULL.
+ */
+struct ice_bst_tcam_item *
+ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
+ struct ice_lbl_item *lbl_table,
+ enum ice_lbl_type type, u16 *start)
+{
+ u16 i = *start;
+
+ for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
+ if (lbl_table[i].type == type) {
+ *start = i;
+ return &tcam_table[lbl_table[i].idx];
+ }
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
+ * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
+ * sections ***/
+static void ice_pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "key:\n");
+ dev_info(dev, "\tvalid = %d\n", key->valid);
+ dev_info(dev, "\tnode_id = %d\n", key->node_id);
+ dev_info(dev, "\tflag0 = %d\n", key->flag0);
+ dev_info(dev, "\tflag1 = %d\n", key->flag1);
+ dev_info(dev, "\tflag2 = %d\n", key->flag2);
+ dev_info(dev, "\tflag3 = %d\n", key->flag3);
+ dev_info(dev, "\tboost_idx = %d\n", key->boost_idx);
+ dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg);
+ dev_info(dev, "\tnext_proto = 0x%08x\n", key->next_proto);
+}
+
+static void ice_pg_nm_cam_key_dump(struct ice_hw *hw,
+ struct ice_pg_nm_cam_key *key)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "key:\n");
+ dev_info(dev, "\tvalid = %d\n", key->valid);
+ dev_info(dev, "\tnode_id = %d\n", key->node_id);
+ dev_info(dev, "\tflag0 = %d\n", key->flag0);
+ dev_info(dev, "\tflag1 = %d\n", key->flag1);
+ dev_info(dev, "\tflag2 = %d\n", key->flag2);
+ dev_info(dev, "\tflag3 = %d\n", key->flag3);
+ dev_info(dev, "\tboost_idx = %d\n", key->boost_idx);
+ dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg);
+}
+
+static void ice_pg_cam_action_dump(struct ice_hw *hw,
+ struct ice_pg_cam_action *action)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "action:\n");
+ dev_info(dev, "\tnext_node = %d\n", action->next_node);
+ dev_info(dev, "\tnext_pc = %d\n", action->next_pc);
+ dev_info(dev, "\tis_pg = %d\n", action->is_pg);
+ dev_info(dev, "\tproto_id = %d\n", action->proto_id);
+ dev_info(dev, "\tis_mg = %d\n", action->is_mg);
+ dev_info(dev, "\tmarker_id = %d\n", action->marker_id);
+ dev_info(dev, "\tis_last_round = %d\n", action->is_last_round);
+ dev_info(dev, "\tho_polarity = %d\n", action->ho_polarity);
+ dev_info(dev, "\tho_inc = %d\n", action->ho_inc);
+}
+
+/**
+ * ice_pg_cam_dump - dump an parse graph cam info
+ * @hw: pointer to the hardware structure
+ * @item: parse graph cam to dump
+ */
+static void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item)
+{
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+ ice_pg_cam_key_dump(hw, &item->key);
+ ice_pg_cam_action_dump(hw, &item->action);
+}
+
+/**
+ * ice_pg_nm_cam_dump - dump an parse graph no match cam info
+ * @hw: pointer to the hardware structure
+ * @item: parse graph no match cam to dump
+ */
+static void ice_pg_nm_cam_dump(struct ice_hw *hw,
+ struct ice_pg_nm_cam_item *item)
+{
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+ ice_pg_nm_cam_key_dump(hw, &item->key);
+ ice_pg_cam_action_dump(hw, &item->action);
+}
+
+#define ICE_PGCA_NN GENMASK_ULL(10, 0)
+#define ICE_PGCA_NPC GENMASK_ULL(18, 11)
+#define ICE_PGCA_IPG BIT_ULL(19)
+#define ICE_PGCA_PID GENMASK_ULL(30, 23)
+#define ICE_PGCA_IMG BIT_ULL(31)
+#define ICE_PGCA_MID GENMASK_ULL(39, 32)
+#define ICE_PGCA_ILR BIT_ULL(40)
+#define ICE_PGCA_HOP BIT_ULL(41)
+#define ICE_PGCA_HOI GENMASK_ULL(50, 42)
+
+/**
+ * ice_pg_cam_action_init - parse 55 bits of Parse Graph CAM Action
+ * @action: pointer to the Parse Graph CAM Action structure
+ * @data: Parse Graph CAM Action data to be parsed
+ */
+static void ice_pg_cam_action_init(struct ice_pg_cam_action *action, u64 data)
+{
+ action->next_node = FIELD_GET(ICE_PGCA_NN, data);
+ action->next_pc = FIELD_GET(ICE_PGCA_NPC, data);
+ action->is_pg = FIELD_GET(ICE_PGCA_IPG, data);
+ action->proto_id = FIELD_GET(ICE_PGCA_PID, data);
+ action->is_mg = FIELD_GET(ICE_PGCA_IMG, data);
+ action->marker_id = FIELD_GET(ICE_PGCA_MID, data);
+ action->is_last_round = FIELD_GET(ICE_PGCA_ILR, data);
+ action->ho_polarity = FIELD_GET(ICE_PGCA_HOP, data);
+ action->ho_inc = FIELD_GET(ICE_PGCA_HOI, data);
+}
+
+#define ICE_PGNCK_VLD BIT_ULL(0)
+#define ICE_PGNCK_NID GENMASK_ULL(11, 1)
+#define ICE_PGNCK_F0 BIT_ULL(12)
+#define ICE_PGNCK_F1 BIT_ULL(13)
+#define ICE_PGNCK_F2 BIT_ULL(14)
+#define ICE_PGNCK_F3 BIT_ULL(15)
+#define ICE_PGNCK_BH BIT_ULL(16)
+#define ICE_PGNCK_BI GENMASK_ULL(24, 17)
+#define ICE_PGNCK_AR GENMASK_ULL(40, 25)
+
+/**
+ * ice_pg_nm_cam_key_init - parse 41 bits of Parse Graph NoMatch CAM Key
+ * @key: pointer to the Parse Graph NoMatch CAM Key structure
+ * @data: Parse Graph NoMatch CAM Key data to be parsed
+ */
+static void ice_pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data)
+{
+ key->valid = FIELD_GET(ICE_PGNCK_VLD, data);
+ key->node_id = FIELD_GET(ICE_PGNCK_NID, data);
+ key->flag0 = FIELD_GET(ICE_PGNCK_F0, data);
+ key->flag1 = FIELD_GET(ICE_PGNCK_F1, data);
+ key->flag2 = FIELD_GET(ICE_PGNCK_F2, data);
+ key->flag3 = FIELD_GET(ICE_PGNCK_F3, data);
+
+ if (FIELD_GET(ICE_PGNCK_BH, data))
+ key->boost_idx = FIELD_GET(ICE_PGNCK_BI, data);
+ else
+ key->boost_idx = 0;
+
+ key->alu_reg = FIELD_GET(ICE_PGNCK_AR, data);
+}
+
+#define ICE_PGCK_VLD BIT_ULL(0)
+#define ICE_PGCK_NID GENMASK_ULL(11, 1)
+#define ICE_PGCK_F0 BIT_ULL(12)
+#define ICE_PGCK_F1 BIT_ULL(13)
+#define ICE_PGCK_F2 BIT_ULL(14)
+#define ICE_PGCK_F3 BIT_ULL(15)
+#define ICE_PGCK_BH BIT_ULL(16)
+#define ICE_PGCK_BI GENMASK_ULL(24, 17)
+#define ICE_PGCK_AR GENMASK_ULL(40, 25)
+#define ICE_PGCK_NPK_S 41 /* offset for the 2nd 64-bits field */
+#define ICE_PGCK_NPK_IDD (ICE_PGCK_NPK_S / BITS_PER_BYTE)
+#define ICE_PGCK_NPK_OFF (ICE_PGCK_NPK_S % BITS_PER_BYTE)
+#define ICE_PGCK_NPK GENMASK_ULL(72 - ICE_PGCK_NPK_S, \
+ 41 - ICE_PGCK_NPK_S)
+
+/**
+ * ice_pg_cam_key_init - parse 73 bits of Parse Graph CAM Key
+ * @key: pointer to the Parse Graph CAM Key structure
+ * @data: Parse Graph CAM Key data to be parsed
+ */
+static void ice_pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data)
+{
+ u64 d64 = *(u64 *)data;
+
+ key->valid = FIELD_GET(ICE_PGCK_VLD, d64);
+ key->node_id = FIELD_GET(ICE_PGCK_NID, d64);
+ key->flag0 = FIELD_GET(ICE_PGCK_F0, d64);
+ key->flag1 = FIELD_GET(ICE_PGCK_F1, d64);
+ key->flag2 = FIELD_GET(ICE_PGCK_F2, d64);
+ key->flag3 = FIELD_GET(ICE_PGCK_F3, d64);
+
+ if (FIELD_GET(ICE_PGCK_BH, d64))
+ key->boost_idx = FIELD_GET(ICE_PGCK_BI, d64);
+ else
+ key->boost_idx = 0;
+
+ key->alu_reg = FIELD_GET(ICE_PGCK_AR, d64);
+
+ d64 = *((u64 *)&data[ICE_PGCK_NPK_IDD]) >> ICE_PGCK_NPK_OFF;
+
+ key->next_proto = FIELD_GET(ICE_PGCK_NPK, d64);
+}
+
+#define ICE_PG_CAM_ACT_S 73
+#define ICE_PG_CAM_ACT_IDD (ICE_PG_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_CAM_ACT_OFF (ICE_PG_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_cam_parse_item - parse 128 bits of Parse Graph CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph CAM Entry
+ * @item: item of Parse Graph CAM Entry
+ * @data: Parse Graph CAM Entry data to be parsed
+ * @size: size of Parse Graph CAM Entry
+ */
+static void ice_pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ ice_pg_cam_key_init(&ci->key, buf);
+
+ d64 = *((u64 *)&buf[ICE_PG_CAM_ACT_IDD]) >> ICE_PG_CAM_ACT_OFF;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_cam_dump(hw, ci);
+}
+
+#define ICE_PG_SP_CAM_KEY_S 56
+#define ICE_PG_SP_CAM_KEY_IDD (ICE_PG_SP_CAM_KEY_S / BITS_PER_BYTE)
+
+/**
+ * ice_pg_sp_cam_parse_item - parse 136 bits of Parse Graph Spill CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph Spill CAM Entry
+ * @item: item of Parse Graph Spill CAM Entry
+ * @data: Parse Graph Spill CAM Entry data to be parsed
+ * @size: size of Parse Graph Spill CAM Entry
+ */
+static void ice_pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ ice_pg_cam_key_init(&ci->key, &buf[ICE_PG_SP_CAM_KEY_IDD]);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_cam_dump(hw, ci);
+}
+
+#define ICE_PG_NM_CAM_ACT_S 41
+#define ICE_PG_NM_CAM_ACT_IDD (ICE_PG_NM_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_NM_CAM_ACT_OFF (ICE_PG_NM_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_nm_cam_parse_item - parse 96 bits of Parse Graph NoMatch CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph NoMatch CAM Entry
+ * @item: item of Parse Graph NoMatch CAM Entry
+ * @data: Parse Graph NoMatch CAM Entry data to be parsed
+ * @size: size of Parse Graph NoMatch CAM Entry
+ */
+static void ice_pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_nm_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_nm_cam_key_init(&ci->key, d64);
+
+ d64 = *((u64 *)&buf[ICE_PG_NM_CAM_ACT_IDD]) >> ICE_PG_NM_CAM_ACT_OFF;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_nm_cam_dump(hw, ci);
+}
+
+#define ICE_PG_NM_SP_CAM_ACT_S 56
+#define ICE_PG_NM_SP_CAM_ACT_IDD (ICE_PG_NM_SP_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_NM_SP_CAM_ACT_OFF (ICE_PG_NM_SP_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_nm_sp_cam_parse_item - parse 104 bits of Parse Graph NoMatch Spill
+ * CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph NoMatch Spill CAM Entry
+ * @item: item of Parse Graph NoMatch Spill CAM Entry
+ * @data: Parse Graph NoMatch Spill CAM Entry data to be parsed
+ * @size: size of Parse Graph NoMatch Spill CAM Entry
+ */
+static void ice_pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx,
+ void *item, void *data,
+ int __maybe_unused size)
+{
+ struct ice_pg_nm_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ d64 = *((u64 *)&buf[ICE_PG_NM_SP_CAM_ACT_IDD]) >>
+ ICE_PG_NM_SP_CAM_ACT_OFF;
+ ice_pg_nm_cam_key_init(&ci->key, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_nm_cam_dump(hw, ci);
+}
+
+/**
+ * ice_pg_cam_table_get - create a parse graph cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph CAM table.
+ */
+static struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM,
+ sizeof(struct ice_pg_cam_item),
+ ICE_PG_CAM_TABLE_SIZE,
+ ice_pg_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_sp_cam_table_get - create a parse graph spill cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph Spill CAM table.
+ */
+static struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL,
+ sizeof(struct ice_pg_cam_item),
+ ICE_PG_SP_CAM_TABLE_SIZE,
+ ice_pg_sp_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_nm_cam_table_get - create a parse graph no match cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph No Match CAM table.
+ */
+static struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM,
+ sizeof(struct ice_pg_nm_cam_item),
+ ICE_PG_NM_CAM_TABLE_SIZE,
+ ice_pg_nm_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph No Match Spill CAM table.
+ */
+static struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL,
+ sizeof(struct ice_pg_nm_cam_item),
+ ICE_PG_NM_SP_CAM_TABLE_SIZE,
+ ice_pg_nm_sp_cam_parse_item, false);
+}
+
+static bool __ice_pg_cam_match(struct ice_pg_cam_item *item,
+ struct ice_pg_cam_key *key)
+{
+ return (item->key.valid &&
+ !memcmp(&item->key.val, &key->val, sizeof(key->val)));
+}
+
+static bool __ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *item,
+ struct ice_pg_cam_key *key)
+{
+ return (item->key.valid &&
+ !memcmp(&item->key.val, &key->val, sizeof(item->key.val)));
+}
+
+/**
+ * ice_pg_cam_match - search parse graph cam table by key
+ * @table: parse graph cam table to search
+ * @size: cam table size
+ * @key: search key
+ *
+ * Return: a pointer to the matching PG CAM item or NULL.
+ */
+struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
+ int size, struct ice_pg_cam_key *key)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct ice_pg_cam_item *item = &table[i];
+
+ if (__ice_pg_cam_match(item, key))
+ return item;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_pg_nm_cam_match - search parse graph no match cam table by key
+ * @table: parse graph no match cam table to search
+ * @size: cam table size
+ * @key: search key
+ *
+ * Return: a pointer to the matching PG No Match CAM item or NULL.
+ */
+struct ice_pg_nm_cam_item *
+ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ struct ice_pg_cam_key *key)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct ice_pg_nm_cam_item *item = &table[i];
+
+ if (__ice_pg_nm_cam_match(item, key))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** Ternary match ***/
+/* Perform a ternary match on a 1-byte pattern (@pat) given @key and @key_inv
+ * Rules (per bit):
+ * Key == 0 and Key_inv == 0 : Never match (Don't care)
+ * Key == 0 and Key_inv == 1 : Match on bit == 1
+ * Key == 1 and Key_inv == 0 : Match on bit == 0
+ * Key == 1 and Key_inv == 1 : Always match (Don't care)
+ *
+ * Return: true if all bits match, false otherwise.
+ */
+static bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat)
+{
+ u8 bit_key, bit_key_inv, bit_pat;
+ int i;
+
+ for (i = 0; i < BITS_PER_BYTE; i++) {
+ bit_key = key & BIT(i);
+ bit_key_inv = key_inv & BIT(i);
+ bit_pat = pat & BIT(i);
+
+ if (bit_key != 0 && bit_key_inv != 0)
+ continue;
+
+ if ((bit_key == 0 && bit_key_inv == 0) || bit_key == bit_pat)
+ return false;
+ }
+
+ return true;
+}
+
+static bool ice_ternary_match(const u8 *key, const u8 *key_inv,
+ const u8 *pat, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i]))
+ return false;
+
+ return true;
+}
+
+/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+static void ice_bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "next proto key builder:\n");
+ dev_info(dev, "\topc = %d\n", kb->opc);
+ dev_info(dev, "\tstart_reg0 = %d\n", kb->start_reg0);
+ dev_info(dev, "\tlen_reg1 = %d\n", kb->len_reg1);
+}
+
+static void ice_bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "parse graph key builder:\n");
+ dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena);
+ dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena);
+ dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena);
+ dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena);
+ dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx);
+ dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx);
+ dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx);
+ dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx);
+ dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx);
+}
+
+static void ice_bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "alu%d:\n", idx);
+ dev_info(dev, "\topc = %d\n", alu->opc);
+ dev_info(dev, "\tsrc_start = %d\n", alu->src_start);
+ dev_info(dev, "\tsrc_len = %d\n", alu->src_len);
+ dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel);
+ dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key);
+ dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id);
+ dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id);
+ dev_info(dev, "\tinc0 = %d\n", alu->inc0);
+ dev_info(dev, "\tinc1 = %d\n", alu->inc1);
+ dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc);
+ dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset);
+ dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr);
+ dev_info(dev, "\timm = %d\n", alu->imm);
+ dev_info(dev, "\tdst_start = %d\n", alu->dst_start);
+ dev_info(dev, "\tdst_len = %d\n", alu->dst_len);
+ dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm);
+ dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm);
+}
+
+/**
+ * ice_bst_tcam_dump - dump a boost tcam info
+ * @hw: pointer to the hardware structure
+ * @item: boost tcam to dump
+ */
+static void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "addr = %d\n", item->addr);
+
+ dev_info(dev, "key : ");
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "key_inv: ");
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key_inv[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "hit_idx_grp = %d\n", item->hit_idx_grp);
+ dev_info(dev, "pg_prio = %d\n", item->pg_prio);
+
+ ice_bst_np_kb_dump(hw, &item->np_kb);
+ ice_bst_pg_kb_dump(hw, &item->pg_kb);
+
+ ice_bst_alu_dump(hw, &item->alu0, ICE_ALU0_IDX);
+ ice_bst_alu_dump(hw, &item->alu1, ICE_ALU1_IDX);
+ ice_bst_alu_dump(hw, &item->alu2, ICE_ALU2_IDX);
+}
+
+static void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %u\n", item->idx);
+ dev_info(dev, "type = %u\n", item->type);
+ dev_info(dev, "label = %s\n", item->label);
+}
+
+#define ICE_BST_ALU_OPC GENMASK_ULL(5, 0)
+#define ICE_BST_ALU_SS GENMASK_ULL(13, 6)
+#define ICE_BST_ALU_SL GENMASK_ULL(18, 14)
+#define ICE_BST_ALU_SXS BIT_ULL(19)
+#define ICE_BST_ALU_SXK GENMASK_ULL(23, 20)
+#define ICE_BST_ALU_SRID GENMASK_ULL(30, 24)
+#define ICE_BST_ALU_DRID GENMASK_ULL(37, 31)
+#define ICE_BST_ALU_INC0 BIT_ULL(38)
+#define ICE_BST_ALU_INC1 BIT_ULL(39)
+#define ICE_BST_ALU_POO GENMASK_ULL(41, 40)
+#define ICE_BST_ALU_PO GENMASK_ULL(49, 42)
+#define ICE_BST_ALU_BA_S 50 /* offset for the 2nd 64-bits field */
+#define ICE_BST_ALU_BA GENMASK_ULL(57 - ICE_BST_ALU_BA_S, \
+ 50 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_IMM GENMASK_ULL(73 - ICE_BST_ALU_BA_S, \
+ 58 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DFE BIT_ULL(74 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DS GENMASK_ULL(80 - ICE_BST_ALU_BA_S, \
+ 75 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DL GENMASK_ULL(86 - ICE_BST_ALU_BA_S, \
+ 81 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_FEI BIT_ULL(87 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_FSI GENMASK_ULL(95 - ICE_BST_ALU_BA_S, \
+ 88 - ICE_BST_ALU_BA_S)
+
+/**
+ * ice_bst_alu_init - parse 96 bits of ALU entry
+ * @alu: pointer to the ALU entry structure
+ * @data: ALU entry data to be parsed
+ * @off: offset of the ALU entry data
+ */
+static void ice_bst_alu_init(struct ice_alu *alu, u8 *data, u8 off)
+{
+ u64 d64;
+ u8 idd;
+
+ d64 = *((u64 *)data) >> off;
+
+ alu->opc = FIELD_GET(ICE_BST_ALU_OPC, d64);
+ alu->src_start = FIELD_GET(ICE_BST_ALU_SS, d64);
+ alu->src_len = FIELD_GET(ICE_BST_ALU_SL, d64);
+ alu->shift_xlate_sel = FIELD_GET(ICE_BST_ALU_SXS, d64);
+ alu->shift_xlate_key = FIELD_GET(ICE_BST_ALU_SXK, d64);
+ alu->src_reg_id = FIELD_GET(ICE_BST_ALU_SRID, d64);
+ alu->dst_reg_id = FIELD_GET(ICE_BST_ALU_DRID, d64);
+ alu->inc0 = FIELD_GET(ICE_BST_ALU_INC0, d64);
+ alu->inc1 = FIELD_GET(ICE_BST_ALU_INC1, d64);
+ alu->proto_offset_opc = FIELD_GET(ICE_BST_ALU_POO, d64);
+ alu->proto_offset = FIELD_GET(ICE_BST_ALU_PO, d64);
+
+ idd = (ICE_BST_ALU_BA_S + off) / BITS_PER_BYTE;
+ off = (ICE_BST_ALU_BA_S + off) % BITS_PER_BYTE;
+ d64 = *((u64 *)(&data[idd])) >> off;
+
+ alu->branch_addr = FIELD_GET(ICE_BST_ALU_BA, d64);
+ alu->imm = FIELD_GET(ICE_BST_ALU_IMM, d64);
+ alu->dedicate_flags_ena = FIELD_GET(ICE_BST_ALU_DFE, d64);
+ alu->dst_start = FIELD_GET(ICE_BST_ALU_DS, d64);
+ alu->dst_len = FIELD_GET(ICE_BST_ALU_DL, d64);
+ alu->flags_extr_imm = FIELD_GET(ICE_BST_ALU_FEI, d64);
+ alu->flags_start_imm = FIELD_GET(ICE_BST_ALU_FSI, d64);
+}
+
+#define ICE_BST_PGKB_F0_ENA BIT_ULL(0)
+#define ICE_BST_PGKB_F0_IDX GENMASK_ULL(6, 1)
+#define ICE_BST_PGKB_F1_ENA BIT_ULL(7)
+#define ICE_BST_PGKB_F1_IDX GENMASK_ULL(13, 8)
+#define ICE_BST_PGKB_F2_ENA BIT_ULL(14)
+#define ICE_BST_PGKB_F2_IDX GENMASK_ULL(20, 15)
+#define ICE_BST_PGKB_F3_ENA BIT_ULL(21)
+#define ICE_BST_PGKB_F3_IDX GENMASK_ULL(27, 22)
+#define ICE_BST_PGKB_AR_IDX GENMASK_ULL(34, 28)
+
+/**
+ * ice_bst_pgkb_init - parse 35 bits of Parse Graph Key Build
+ * @kb: pointer to the Parse Graph Key Build structure
+ * @data: Parse Graph Key Build data to be parsed
+ */
+static void ice_bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
+{
+ kb->flag0_ena = FIELD_GET(ICE_BST_PGKB_F0_ENA, data);
+ kb->flag0_idx = FIELD_GET(ICE_BST_PGKB_F0_IDX, data);
+ kb->flag1_ena = FIELD_GET(ICE_BST_PGKB_F1_ENA, data);
+ kb->flag1_idx = FIELD_GET(ICE_BST_PGKB_F1_IDX, data);
+ kb->flag2_ena = FIELD_GET(ICE_BST_PGKB_F2_ENA, data);
+ kb->flag2_idx = FIELD_GET(ICE_BST_PGKB_F2_IDX, data);
+ kb->flag3_ena = FIELD_GET(ICE_BST_PGKB_F3_ENA, data);
+ kb->flag3_idx = FIELD_GET(ICE_BST_PGKB_F3_IDX, data);
+ kb->alu_reg_idx = FIELD_GET(ICE_BST_PGKB_AR_IDX, data);
+}
+
+#define ICE_BST_NPKB_OPC GENMASK(1, 0)
+#define ICE_BST_NPKB_S_R0 GENMASK(9, 2)
+#define ICE_BST_NPKB_L_R1 GENMASK(17, 10)
+
+/**
+ * ice_bst_npkb_init - parse 18 bits of Next Protocol Key Build
+ * @kb: pointer to the Next Protocol Key Build structure
+ * @data: Next Protocol Key Build data to be parsed
+ */
+static void ice_bst_npkb_init(struct ice_np_keybuilder *kb, u32 data)
+{
+ kb->opc = FIELD_GET(ICE_BST_NPKB_OPC, data);
+ kb->start_reg0 = FIELD_GET(ICE_BST_NPKB_S_R0, data);
+ kb->len_reg1 = FIELD_GET(ICE_BST_NPKB_L_R1, data);
+}
+
+#define ICE_BT_KEY_S 32
+#define ICE_BT_KEY_IDD (ICE_BT_KEY_S / BITS_PER_BYTE)
+#define ICE_BT_KIV_S 192
+#define ICE_BT_KIV_IDD (ICE_BT_KIV_S / BITS_PER_BYTE)
+#define ICE_BT_HIG_S 352
+#define ICE_BT_HIG_IDD (ICE_BT_HIG_S / BITS_PER_BYTE)
+#define ICE_BT_PGP_S 360
+#define ICE_BT_PGP_IDD (ICE_BT_PGP_S / BITS_PER_BYTE)
+#define ICE_BT_PGP_M GENMASK(361 - ICE_BT_PGP_S, 360 - ICE_BT_PGP_S)
+#define ICE_BT_NPKB_S 362
+#define ICE_BT_NPKB_IDD (ICE_BT_NPKB_S / BITS_PER_BYTE)
+#define ICE_BT_NPKB_OFF (ICE_BT_NPKB_S % BITS_PER_BYTE)
+#define ICE_BT_PGKB_S 380
+#define ICE_BT_PGKB_IDD (ICE_BT_PGKB_S / BITS_PER_BYTE)
+#define ICE_BT_PGKB_OFF (ICE_BT_PGKB_S % BITS_PER_BYTE)
+#define ICE_BT_ALU0_S 415
+#define ICE_BT_ALU0_IDD (ICE_BT_ALU0_S / BITS_PER_BYTE)
+#define ICE_BT_ALU0_OFF (ICE_BT_ALU0_S % BITS_PER_BYTE)
+#define ICE_BT_ALU1_S 511
+#define ICE_BT_ALU1_IDD (ICE_BT_ALU1_S / BITS_PER_BYTE)
+#define ICE_BT_ALU1_OFF (ICE_BT_ALU1_S % BITS_PER_BYTE)
+#define ICE_BT_ALU2_S 607
+#define ICE_BT_ALU2_IDD (ICE_BT_ALU2_S / BITS_PER_BYTE)
+#define ICE_BT_ALU2_OFF (ICE_BT_ALU2_S % BITS_PER_BYTE)
+
+/**
+ * ice_bst_parse_item - parse 704 bits of Boost TCAM entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Boost TCAM entry
+ * @item: item of Boost TCAM entry
+ * @data: Boost TCAM entry data to be parsed
+ * @size: size of Boost TCAM entry
+ */
+static void ice_bst_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_bst_tcam_item *ti = item;
+ u8 *buf = (u8 *)data;
+ int i;
+
+ ti->addr = *(u16 *)buf;
+
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) {
+ ti->key[i] = buf[ICE_BT_KEY_IDD + i];
+ ti->key_inv[i] = buf[ICE_BT_KIV_IDD + i];
+ }
+ ti->hit_idx_grp = buf[ICE_BT_HIG_IDD];
+ ti->pg_prio = buf[ICE_BT_PGP_IDD] & ICE_BT_PGP_M;
+
+ ice_bst_npkb_init(&ti->np_kb,
+ *((u32 *)(&buf[ICE_BT_NPKB_IDD])) >>
+ ICE_BT_NPKB_OFF);
+ ice_bst_pgkb_init(&ti->pg_kb,
+ *((u64 *)(&buf[ICE_BT_PGKB_IDD])) >>
+ ICE_BT_PGKB_OFF);
+
+ ice_bst_alu_init(&ti->alu0, &buf[ICE_BT_ALU0_IDD], ICE_BT_ALU0_OFF);
+ ice_bst_alu_init(&ti->alu1, &buf[ICE_BT_ALU1_IDD], ICE_BT_ALU1_OFF);
+ ice_bst_alu_init(&ti->alu2, &buf[ICE_BT_ALU2_IDD], ICE_BT_ALU2_OFF);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_bst_tcam_dump(hw, ti);
+}
+
+/**
+ * ice_bst_tcam_table_get - create a boost tcam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Boost TCAM table.
+ */
+static struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(struct ice_bst_tcam_item),
+ ICE_BST_TCAM_TABLE_SIZE,
+ ice_bst_parse_item, true);
+}
+
+static void ice_parse_lbl_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_lbl_item *lbl_item = item;
+ struct ice_lbl_item *lbl_data = data;
+
+ lbl_item->idx = lbl_data->idx;
+ memcpy(lbl_item->label, lbl_data->label, sizeof(lbl_item->label));
+
+ if (strstarts(lbl_item->label, ICE_LBL_BST_DVM))
+ lbl_item->type = ICE_LBL_BST_TYPE_DVM;
+ else if (strstarts(lbl_item->label, ICE_LBL_BST_SVM))
+ lbl_item->type = ICE_LBL_BST_TYPE_SVM;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_VXLAN))
+ lbl_item->type = ICE_LBL_BST_TYPE_VXLAN;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_GENEVE))
+ lbl_item->type = ICE_LBL_BST_TYPE_GENEVE;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_UDP_ECPRI))
+ lbl_item->type = ICE_LBL_BST_TYPE_UDP_ECPRI;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_lbl_dump(hw, lbl_item);
+}
+
+/**
+ * ice_bst_lbl_table_get - create a boost label table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Boost label table.
+ */
+static struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM,
+ sizeof(struct ice_lbl_item),
+ ICE_BST_TCAM_TABLE_SIZE,
+ ice_parse_lbl_item, true);
+}
+
+/**
+ * ice_bst_tcam_match - match a pattern on the boost tcam table
+ * @tcam_table: boost tcam table to search
+ * @pat: pattern to match
+ *
+ * Return: a pointer to the matching Boost TCAM item or NULL.
+ */
+struct ice_bst_tcam_item *
+ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat)
+{
+ int i;
+
+ for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
+ struct ice_bst_tcam_item *item = &tcam_table[i];
+
+ if (item->hit_idx_grp == 0)
+ continue;
+ if (ice_ternary_match(item->key, item->key_inv, pat,
+ ICE_BST_TCAM_KEY_SIZE))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
+/**
+ * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info
+ * @hw: pointer to the hardware structure
+ * @item: ptype marker tcam to dump
+ */
+static void ice_ptype_mk_tcam_dump(struct ice_hw *hw,
+ struct ice_ptype_mk_tcam_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "address = %d\n", item->address);
+ dev_info(dev, "ptype = %d\n", item->ptype);
+
+ dev_info(dev, "key :");
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "key_inv:");
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key_inv[i]);
+
+ dev_info(dev, "\n");
+}
+
+static void ice_parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx,
+ void *item, void *data, int size)
+{
+ memcpy(item, data, size);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_ptype_mk_tcam_dump(hw,
+ (struct ice_ptype_mk_tcam_item *)item);
+}
+
+/**
+ * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Marker PType TCAM table.
+ */
+static
+struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE,
+ sizeof(struct ice_ptype_mk_tcam_item),
+ ICE_PTYPE_MK_TCAM_TABLE_SIZE,
+ ice_parse_ptype_mk_tcam_item, true);
+}
+
+/**
+ * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table
+ * @table: ptype marker tcam table to search
+ * @pat: pattern to match
+ * @len: length of the pattern
+ *
+ * Return: a pointer to the matching Marker PType item or NULL.
+ */
+struct ice_ptype_mk_tcam_item *
+ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
+ u8 *pat, int len)
+{
+ int i;
+
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) {
+ struct ice_ptype_mk_tcam_item *item = &table[i];
+
+ if (ice_ternary_match(item->key, item->key_inv, pat, len))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
+/**
+ * ice_mk_grp_dump - dump an marker group item info
+ * @hw: pointer to the hardware structure
+ * @item: marker group item to dump
+ */
+static void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "index = %d\n", item->idx);
+
+ dev_info(dev, "markers: ");
+ for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
+ dev_info(dev, "%d ", item->markers[i]);
+
+ dev_info(dev, "\n");
+}
+
+static void ice_mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_mk_grp_item *grp = item;
+ u8 *buf = data;
+ int i;
+
+ grp->idx = idx;
+
+ for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
+ grp->markers[i] = buf[i];
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_mk_grp_dump(hw, grp);
+}
+
+/**
+ * ice_mk_grp_table_get - create a marker group table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Marker Group ID table.
+ */
+static struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP,
+ sizeof(struct ice_mk_grp_item),
+ ICE_MK_GRP_TABLE_SIZE,
+ ice_mk_grp_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
+static void ice_proto_off_dump(struct ice_hw *hw,
+ struct ice_proto_off *po, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "proto %d\n", idx);
+ dev_info(dev, "\tpolarity = %d\n", po->polarity);
+ dev_info(dev, "\tproto_id = %d\n", po->proto_id);
+ dev_info(dev, "\toffset = %d\n", po->offset);
+}
+
+/**
+ * ice_proto_grp_dump - dump a proto group item info
+ * @hw: pointer to the hardware structure
+ * @item: proto group item to dump
+ */
+static void ice_proto_grp_dump(struct ice_hw *hw,
+ struct ice_proto_grp_item *item)
+{
+ int i;
+
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++)
+ ice_proto_off_dump(hw, &item->po[i], i);
+}
+
+#define ICE_PO_POL BIT(0)
+#define ICE_PO_PID GENMASK(8, 1)
+#define ICE_PO_OFF GENMASK(21, 12)
+
+/**
+ * ice_proto_off_parse - parse 22 bits of Protocol entry
+ * @po: pointer to the Protocol entry structure
+ * @data: Protocol entry data to be parsed
+ */
+static void ice_proto_off_parse(struct ice_proto_off *po, u32 data)
+{
+ po->polarity = FIELD_GET(ICE_PO_POL, data);
+ po->proto_id = FIELD_GET(ICE_PO_PID, data);
+ po->offset = FIELD_GET(ICE_PO_OFF, data);
+}
+
+/**
+ * ice_proto_grp_parse_item - parse 192 bits of Protocol Group Table entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Protocol Group Table entry
+ * @item: item of Protocol Group Table entry
+ * @data: Protocol Group Table entry data to be parsed
+ * @size: size of Protocol Group Table entry
+ */
+static void ice_proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_proto_grp_item *grp = item;
+ u8 *buf = (u8 *)data;
+ u8 idd, off;
+ u32 d32;
+ int i;
+
+ grp->idx = idx;
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
+ idd = (ICE_PROTO_GRP_ITEM_SIZE * i) / BITS_PER_BYTE;
+ off = (ICE_PROTO_GRP_ITEM_SIZE * i) % BITS_PER_BYTE;
+ d32 = *((u32 *)&buf[idd]) >> off;
+ ice_proto_off_parse(&grp->po[i], d32);
+ }
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_proto_grp_dump(hw, grp);
+}
+
+/**
+ * ice_proto_grp_table_get - create a proto group table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Protocol Group table.
+ */
+static struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP,
+ sizeof(struct ice_proto_grp_item),
+ ICE_PROTO_GRP_TABLE_SIZE,
+ ice_proto_grp_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
+/**
+ * ice_flg_rd_dump - dump a flag redirect item info
+ * @hw: pointer to the hardware structure
+ * @item: flag redirect item to dump
+ */
+static void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+ dev_info(dev, "expose = %d\n", item->expose);
+ dev_info(dev, "intr_flg_id = %d\n", item->intr_flg_id);
+}
+
+#define ICE_FRT_EXPO BIT(0)
+#define ICE_FRT_IFID GENMASK(6, 1)
+
+/**
+ * ice_flg_rd_parse_item - parse 8 bits of Flag Redirect Table entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Flag Redirect Table entry
+ * @item: item of Flag Redirect Table entry
+ * @data: Flag Redirect Table entry data to be parsed
+ * @size: size of Flag Redirect Table entry
+ */
+static void ice_flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_flg_rd_item *rdi = item;
+ u8 d8 = *(u8 *)data;
+
+ rdi->idx = idx;
+ rdi->expose = FIELD_GET(ICE_FRT_EXPO, d8);
+ rdi->intr_flg_id = FIELD_GET(ICE_FRT_IFID, d8);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_flg_rd_dump(hw, rdi);
+}
+
+/**
+ * ice_flg_rd_table_get - create a flag redirect table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Flags Redirection table.
+ */
+static struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR,
+ sizeof(struct ice_flg_rd_item),
+ ICE_FLG_RD_TABLE_SIZE,
+ ice_flg_rd_parse_item, false);
+}
+
+/**
+ * ice_flg_redirect - redirect a parser flag to packet flag
+ * @table: flag redirect table
+ * @psr_flg: parser flag to redirect
+ *
+ * Return: flag or 0 if @psr_flag = 0.
+ */
+u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg)
+{
+ u64 flg = 0;
+ int i;
+
+ for (i = 0; i < ICE_FLG_RDT_SIZE; i++) {
+ struct ice_flg_rd_item *item = &table[i];
+
+ if (!item->expose)
+ continue;
+
+ if (psr_flg & BIT(item->intr_flg_id))
+ flg |= BIT(i);
+ }
+
+ return flg;
+}
+
+/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
+ * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
+ * sections ***/
+static void ice_xlt_kb_entry_dump(struct ice_hw *hw,
+ struct ice_xlt_kb_entry *entry, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "key builder entry %d\n", idx);
+ dev_info(dev, "\txlt1_ad_sel = %d\n", entry->xlt1_ad_sel);
+ dev_info(dev, "\txlt2_ad_sel = %d\n", entry->xlt2_ad_sel);
+
+ for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++)
+ dev_info(dev, "\tflg%d_sel = %d\n", i, entry->flg0_14_sel[i]);
+
+ dev_info(dev, "\txlt1_md_sel = %d\n", entry->xlt1_md_sel);
+ dev_info(dev, "\txlt2_md_sel = %d\n", entry->xlt2_md_sel);
+}
+
+/**
+ * ice_xlt_kb_dump - dump a xlt key build info
+ * @hw: pointer to the hardware structure
+ * @kb: key build to dump
+ */
+static void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "xlt1_pm = %d\n", kb->xlt1_pm);
+ dev_info(dev, "xlt2_pm = %d\n", kb->xlt2_pm);
+ dev_info(dev, "prof_id_pm = %d\n", kb->prof_id_pm);
+ dev_info(dev, "flag15 lo = 0x%08x\n", (u32)kb->flag15);
+ dev_info(dev, "flag15 hi = 0x%08x\n",
+ (u32)(kb->flag15 >> (sizeof(u32) * BITS_PER_BYTE)));
+
+ for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++)
+ ice_xlt_kb_entry_dump(hw, &kb->entries[i], i);
+}
+
+#define ICE_XLT_KB_X1AS_S 32 /* offset for the 1st 64-bits field */
+#define ICE_XLT_KB_X1AS_IDD (ICE_XLT_KB_X1AS_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_X1AS_OFF (ICE_XLT_KB_X1AS_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_X1AS GENMASK_ULL(34 - ICE_XLT_KB_X1AS_S, \
+ 32 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_X2AS GENMASK_ULL(37 - ICE_XLT_KB_X1AS_S, \
+ 35 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL00 GENMASK_ULL(46 - ICE_XLT_KB_X1AS_S, \
+ 38 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL01 GENMASK_ULL(55 - ICE_XLT_KB_X1AS_S, \
+ 47 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL02 GENMASK_ULL(64 - ICE_XLT_KB_X1AS_S, \
+ 56 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL03 GENMASK_ULL(73 - ICE_XLT_KB_X1AS_S, \
+ 65 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL04 GENMASK_ULL(82 - ICE_XLT_KB_X1AS_S, \
+ 74 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL05 GENMASK_ULL(91 - ICE_XLT_KB_X1AS_S, \
+ 83 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL06_S 92 /* offset for the 2nd 64-bits field */
+#define ICE_XLT_KB_FL06_IDD (ICE_XLT_KB_FL06_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_FL06_OFF (ICE_XLT_KB_FL06_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_FL06 GENMASK_ULL(100 - ICE_XLT_KB_FL06_S, \
+ 92 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL07 GENMASK_ULL(109 - ICE_XLT_KB_FL06_S, \
+ 101 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL08 GENMASK_ULL(118 - ICE_XLT_KB_FL06_S, \
+ 110 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL09 GENMASK_ULL(127 - ICE_XLT_KB_FL06_S, \
+ 119 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL10 GENMASK_ULL(136 - ICE_XLT_KB_FL06_S, \
+ 128 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL11 GENMASK_ULL(145 - ICE_XLT_KB_FL06_S, \
+ 137 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL12_S 146 /* offset for the 3rd 64-bits field */
+#define ICE_XLT_KB_FL12_IDD (ICE_XLT_KB_FL12_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_FL12_OFF (ICE_XLT_KB_FL12_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_FL12 GENMASK_ULL(154 - ICE_XLT_KB_FL12_S, \
+ 146 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_FL13 GENMASK_ULL(163 - ICE_XLT_KB_FL12_S, \
+ 155 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_FL14 GENMASK_ULL(181 - ICE_XLT_KB_FL12_S, \
+ 164 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_X1MS GENMASK_ULL(186 - ICE_XLT_KB_FL12_S, \
+ 182 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_X2MS GENMASK_ULL(191 - ICE_XLT_KB_FL12_S, \
+ 187 - ICE_XLT_KB_FL12_S)
+
+/**
+ * ice_kb_entry_init - parse 192 bits of XLT Key Builder entry
+ * @entry: pointer to the XLT Key Builder entry structure
+ * @data: XLT Key Builder entry data to be parsed
+ */
+static void ice_kb_entry_init(struct ice_xlt_kb_entry *entry, u8 *data)
+{
+ u8 i = 0;
+ u64 d64;
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_X1AS_IDD]) >> ICE_XLT_KB_X1AS_OFF;
+
+ entry->xlt1_ad_sel = FIELD_GET(ICE_XLT_KB_X1AS, d64);
+ entry->xlt2_ad_sel = FIELD_GET(ICE_XLT_KB_X2AS, d64);
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL00, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL01, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL02, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL03, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL04, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL05, d64);
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_FL06_IDD]) >> ICE_XLT_KB_FL06_OFF;
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL06, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL07, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL08, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL09, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL10, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL11, d64);
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_FL12_IDD]) >> ICE_XLT_KB_FL12_OFF;
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL12, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL13, d64);
+ entry->flg0_14_sel[i] = FIELD_GET(ICE_XLT_KB_FL14, d64);
+
+ entry->xlt1_md_sel = FIELD_GET(ICE_XLT_KB_X1MS, d64);
+ entry->xlt2_md_sel = FIELD_GET(ICE_XLT_KB_X2MS, d64);
+}
+
+#define ICE_XLT_KB_X1PM_OFF 0
+#define ICE_XLT_KB_X2PM_OFF 1
+#define ICE_XLT_KB_PIPM_OFF 2
+#define ICE_XLT_KB_FL15_OFF 4
+#define ICE_XLT_KB_TBL_OFF 12
+
+/**
+ * ice_parse_kb_data - parse 204 bits of XLT Key Build Table
+ * @hw: pointer to the hardware structure
+ * @kb: pointer to the XLT Key Build Table structure
+ * @data: XLT Key Build Table data to be parsed
+ */
+static void ice_parse_kb_data(struct ice_hw *hw, struct ice_xlt_kb *kb,
+ void *data)
+{
+ u8 *buf = data;
+ int i;
+
+ kb->xlt1_pm = buf[ICE_XLT_KB_X1PM_OFF];
+ kb->xlt2_pm = buf[ICE_XLT_KB_X2PM_OFF];
+ kb->prof_id_pm = buf[ICE_XLT_KB_PIPM_OFF];
+
+ kb->flag15 = *(u64 *)&buf[ICE_XLT_KB_FL15_OFF];
+ for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++)
+ ice_kb_entry_init(&kb->entries[i],
+ &buf[ICE_XLT_KB_TBL_OFF +
+ i * ICE_XLT_KB_TBL_ENTRY_SIZE]);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_xlt_kb_dump(hw, kb);
+}
+
+static struct ice_xlt_kb *ice_xlt_kb_get(struct ice_hw *hw, u32 sect_type)
+{
+ struct ice_pkg_enum state = {};
+ struct ice_seg *seg = hw->seg;
+ struct ice_xlt_kb *kb;
+ void *data;
+
+ if (!seg)
+ return ERR_PTR(-EINVAL);
+
+ kb = kzalloc(sizeof(*kb), GFP_KERNEL);
+ if (!kb)
+ return ERR_PTR(-ENOMEM);
+
+ data = ice_pkg_enum_section(seg, &state, sect_type);
+ if (!data) {
+ ice_debug(hw, ICE_DBG_PARSER, "failed to find section type %d.\n",
+ sect_type);
+ kfree(kb);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ice_parse_kb_data(hw, kb, data);
+
+ return kb;
+}
+
+/**
+ * ice_xlt_kb_get_sw - create switch xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for Switch.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_SW);
+}
+
+/**
+ * ice_xlt_kb_get_acl - create acl xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for ACL.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_ACL);
+}
+
+/**
+ * ice_xlt_kb_get_fd - create fdir xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for Flow Director.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_FD);
+}
+
+/**
+ * ice_xlt_kb_get_rss - create rss xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for RSS.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_RSS);
+}
+
+#define ICE_XLT_KB_MASK GENMASK_ULL(5, 0)
+
+/**
+ * ice_xlt_kb_flag_get - aggregate 64 bits packet flag into 16 bits xlt flag
+ * @kb: xlt key build
+ * @pkt_flag: 64 bits packet flag
+ *
+ * Return: XLT flag or 0 if @pkt_flag = 0.
+ */
+u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag)
+{
+ struct ice_xlt_kb_entry *entry = &kb->entries[0];
+ u16 flag = 0;
+ int i;
+
+ /* check flag 15 */
+ if (kb->flag15 & pkt_flag)
+ flag = BIT(ICE_XLT_KB_FLAG0_14_CNT);
+
+ /* check flag 0 - 14 */
+ for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) {
+ /* only check first entry */
+ u16 idx = entry->flg0_14_sel[i] & ICE_XLT_KB_MASK;
+
+ if (pkt_flag & BIT(idx))
+ flag |= (u16)BIT(i);
+ }
+
+ return flag;
+}
+
+/*** Parser API ***/
+/**
+ * ice_parser_create - create a parser instance
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated parser instance or ERR_PTR
+ * in case of error.
+ */
+struct ice_parser *ice_parser_create(struct ice_hw *hw)
+{
+ struct ice_parser *p;
+ void *err;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ p->hw = hw;
+ p->rt.psr = p;
+
+ p->imem_table = ice_imem_table_get(hw);
+ if (IS_ERR(p->imem_table)) {
+ err = p->imem_table;
+ goto err;
+ }
+
+ p->mi_table = ice_metainit_table_get(hw);
+ if (IS_ERR(p->mi_table)) {
+ err = p->mi_table;
+ goto err;
+ }
+
+ p->pg_cam_table = ice_pg_cam_table_get(hw);
+ if (IS_ERR(p->pg_cam_table)) {
+ err = p->pg_cam_table;
+ goto err;
+ }
+
+ p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw);
+ if (IS_ERR(p->pg_sp_cam_table)) {
+ err = p->pg_sp_cam_table;
+ goto err;
+ }
+
+ p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw);
+ if (IS_ERR(p->pg_nm_cam_table)) {
+ err = p->pg_nm_cam_table;
+ goto err;
+ }
+
+ p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw);
+ if (IS_ERR(p->pg_nm_sp_cam_table)) {
+ err = p->pg_nm_sp_cam_table;
+ goto err;
+ }
+
+ p->bst_tcam_table = ice_bst_tcam_table_get(hw);
+ if (IS_ERR(p->bst_tcam_table)) {
+ err = p->bst_tcam_table;
+ goto err;
+ }
+
+ p->bst_lbl_table = ice_bst_lbl_table_get(hw);
+ if (IS_ERR(p->bst_lbl_table)) {
+ err = p->bst_lbl_table;
+ goto err;
+ }
+
+ p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw);
+ if (IS_ERR(p->ptype_mk_tcam_table)) {
+ err = p->ptype_mk_tcam_table;
+ goto err;
+ }
+
+ p->mk_grp_table = ice_mk_grp_table_get(hw);
+ if (IS_ERR(p->mk_grp_table)) {
+ err = p->mk_grp_table;
+ goto err;
+ }
+
+ p->proto_grp_table = ice_proto_grp_table_get(hw);
+ if (IS_ERR(p->proto_grp_table)) {
+ err = p->proto_grp_table;
+ goto err;
+ }
+
+ p->flg_rd_table = ice_flg_rd_table_get(hw);
+ if (IS_ERR(p->flg_rd_table)) {
+ err = p->flg_rd_table;
+ goto err;
+ }
+
+ p->xlt_kb_sw = ice_xlt_kb_get_sw(hw);
+ if (IS_ERR(p->xlt_kb_sw)) {
+ err = p->xlt_kb_sw;
+ goto err;
+ }
+
+ p->xlt_kb_acl = ice_xlt_kb_get_acl(hw);
+ if (IS_ERR(p->xlt_kb_acl)) {
+ err = p->xlt_kb_acl;
+ goto err;
+ }
+
+ p->xlt_kb_fd = ice_xlt_kb_get_fd(hw);
+ if (IS_ERR(p->xlt_kb_fd)) {
+ err = p->xlt_kb_fd;
+ goto err;
+ }
+
+ p->xlt_kb_rss = ice_xlt_kb_get_rss(hw);
+ if (IS_ERR(p->xlt_kb_rss)) {
+ err = p->xlt_kb_rss;
+ goto err;
+ }
+
+ return p;
+err:
+ ice_parser_destroy(p);
+ return err;
+}
+
+/**
+ * ice_parser_destroy - destroy a parser instance
+ * @psr: pointer to a parser instance
+ */
+void ice_parser_destroy(struct ice_parser *psr)
+{
+ kfree(psr->imem_table);
+ kfree(psr->mi_table);
+ kfree(psr->pg_cam_table);
+ kfree(psr->pg_sp_cam_table);
+ kfree(psr->pg_nm_cam_table);
+ kfree(psr->pg_nm_sp_cam_table);
+ kfree(psr->bst_tcam_table);
+ kfree(psr->bst_lbl_table);
+ kfree(psr->ptype_mk_tcam_table);
+ kfree(psr->mk_grp_table);
+ kfree(psr->proto_grp_table);
+ kfree(psr->flg_rd_table);
+ kfree(psr->xlt_kb_sw);
+ kfree(psr->xlt_kb_acl);
+ kfree(psr->xlt_kb_fd);
+ kfree(psr->xlt_kb_rss);
+
+ kfree(psr);
+}
+
+/**
+ * ice_parser_run - parse on a packet in binary and return the result
+ * @psr: pointer to a parser instance
+ * @pkt_buf: packet data
+ * @pkt_len: packet length
+ * @rslt: input/output parameter to save parser result.
+ *
+ * Return: 0 on success or errno.
+ */
+int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
+ int pkt_len, struct ice_parser_result *rslt)
+{
+ ice_parser_rt_reset(&psr->rt);
+ ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len);
+
+ return ice_parser_rt_execute(&psr->rt, rslt);
+}
+
+/**
+ * ice_parser_result_dump - dump a parser result info
+ * @hw: pointer to the hardware structure
+ * @rslt: parser result info to dump
+ */
+void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "ptype = %d\n", rslt->ptype);
+ for (i = 0; i < rslt->po_num; i++)
+ dev_info(dev, "proto = %d, offset = %d\n",
+ rslt->po[i].proto_id, rslt->po[i].offset);
+
+ dev_info(dev, "flags_psr = 0x%016llx\n", rslt->flags_psr);
+ dev_info(dev, "flags_pkt = 0x%016llx\n", rslt->flags_pkt);
+ dev_info(dev, "flags_sw = 0x%04x\n", rslt->flags_sw);
+ dev_info(dev, "flags_fd = 0x%04x\n", rslt->flags_fd);
+ dev_info(dev, "flags_rss = 0x%04x\n", rslt->flags_rss);
+}
+
+#define ICE_BT_VLD_KEY 0xFF
+#define ICE_BT_INV_KEY 0xFE
+
+static void ice_bst_dvm_set(struct ice_parser *psr, enum ice_lbl_type type,
+ bool on)
+{
+ u16 i = 0;
+
+ while (true) {
+ struct ice_bst_tcam_item *item;
+ u8 key;
+
+ item = ice_bst_tcam_search(psr->bst_tcam_table,
+ psr->bst_lbl_table,
+ type, &i);
+ if (!item)
+ break;
+
+ key = on ? ICE_BT_VLD_KEY : ICE_BT_INV_KEY;
+ item->key[ICE_BT_VM_OFF] = key;
+ item->key_inv[ICE_BT_VM_OFF] = key;
+ i++;
+ }
+}
+
+/**
+ * ice_parser_dvm_set - configure double vlan mode for parser
+ * @psr: pointer to a parser instance
+ * @on: true to turn on; false to turn off
+ */
+void ice_parser_dvm_set(struct ice_parser *psr, bool on)
+{
+ ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_DVM, on);
+ ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_SVM, !on);
+}
+
+static int ice_tunnel_port_set(struct ice_parser *psr, enum ice_lbl_type type,
+ u16 udp_port, bool on)
+{
+ u8 *buf = (u8 *)&udp_port;
+ u16 i = 0;
+
+ while (true) {
+ struct ice_bst_tcam_item *item;
+
+ item = ice_bst_tcam_search(psr->bst_tcam_table,
+ psr->bst_lbl_table,
+ type, &i);
+ if (!item)
+ break;
+
+ /* found empty slot to add */
+ if (on && item->key[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY &&
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY) {
+ item->key_inv[ICE_BT_TUN_PORT_OFF_L] =
+ buf[ICE_UDP_PORT_OFF_L];
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] =
+ buf[ICE_UDP_PORT_OFF_H];
+
+ item->key[ICE_BT_TUN_PORT_OFF_L] =
+ ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_L];
+ item->key[ICE_BT_TUN_PORT_OFF_H] =
+ ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_H];
+
+ return 0;
+ /* found a matched slot to delete */
+ } else if (!on &&
+ (item->key_inv[ICE_BT_TUN_PORT_OFF_L] ==
+ buf[ICE_UDP_PORT_OFF_L] ||
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] ==
+ buf[ICE_UDP_PORT_OFF_H])) {
+ item->key_inv[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY;
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY;
+
+ item->key[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY;
+ item->key[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY;
+
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: vxlan tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_vxlan_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_VXLAN, udp_port, on);
+}
+
+/**
+ * ice_parser_geneve_tunnel_set - configure geneve tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: geneve tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_geneve_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_GENEVE, udp_port, on);
+}
+
+/**
+ * ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: ecpri tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_ecpri_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_UDP_ECPRI,
+ udp_port, on);
+}
+
+/**
+ * ice_nearest_proto_id - find nearest protocol ID
+ * @rslt: pointer to a parser result instance
+ * @offset: a min value for the protocol offset
+ * @proto_id: the protocol ID (output)
+ * @proto_off: the protocol offset (output)
+ *
+ * From the protocols in @rslt, find the nearest protocol that has offset
+ * larger than @offset.
+ *
+ * Return: if true, the protocol's ID and offset
+ */
+static bool ice_nearest_proto_id(struct ice_parser_result *rslt, u16 offset,
+ u8 *proto_id, u16 *proto_off)
+{
+ u16 dist = U16_MAX;
+ u8 proto = 0;
+ int i;
+
+ for (i = 0; i < rslt->po_num; i++) {
+ if (offset < rslt->po[i].offset)
+ continue;
+ if (offset - rslt->po[i].offset < dist) {
+ proto = rslt->po[i].proto_id;
+ dist = offset - rslt->po[i].offset;
+ }
+ }
+
+ if (dist % 2)
+ return false;
+
+ *proto_id = proto;
+ *proto_off = dist;
+
+ return true;
+}
+
+/* default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2
+ * In future, the flag masks should learn from DDP
+ */
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010
+
+/**
+ * ice_parser_profile_init - initialize a FXP profile based on parser result
+ * @rslt: a instance of a parser result
+ * @pkt_buf: packet data buffer
+ * @msk_buf: packet mask buffer
+ * @buf_len: packet length
+ * @blk: FXP pipeline stage
+ * @prof: input/output parameter to save the profile
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_profile_init(struct ice_parser_result *rslt,
+ const u8 *pkt_buf, const u8 *msk_buf,
+ int buf_len, enum ice_block blk,
+ struct ice_parser_profile *prof)
+{
+ u8 proto_id = U8_MAX;
+ u16 proto_off = 0;
+ u16 off;
+
+ memset(prof, 0, sizeof(*prof));
+ set_bit(rslt->ptype, prof->ptypes);
+ if (blk == ICE_BLK_SW) {
+ prof->flags = rslt->flags_sw;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW;
+ } else if (blk == ICE_BLK_ACL) {
+ prof->flags = rslt->flags_acl;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL;
+ } else if (blk == ICE_BLK_FD) {
+ prof->flags = rslt->flags_fd;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD;
+ } else if (blk == ICE_BLK_RSS) {
+ prof->flags = rslt->flags_rss;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS;
+ } else {
+ return -EINVAL;
+ }
+
+ for (off = 0; off < buf_len - 1; off++) {
+ if (msk_buf[off] == 0 && msk_buf[off + 1] == 0)
+ continue;
+ if (!ice_nearest_proto_id(rslt, off, &proto_id, &proto_off))
+ continue;
+ if (prof->fv_num >= ICE_PARSER_FV_MAX)
+ return -EINVAL;
+
+ prof->fv[prof->fv_num].proto_id = proto_id;
+ prof->fv[prof->fv_num].offset = proto_off;
+ prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off];
+ prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off];
+ prof->fv_num++;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_parser_profile_dump - dump an FXP profile info
+ * @hw: pointer to the hardware structure
+ * @prof: profile info to dump
+ */
+void ice_parser_profile_dump(struct ice_hw *hw,
+ struct ice_parser_profile *prof)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ u16 i;
+
+ dev_info(dev, "ptypes:\n");
+ for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++)
+ if (test_bit(i, prof->ptypes))
+ dev_info(dev, "\t%u\n", i);
+
+ for (i = 0; i < prof->fv_num; i++)
+ dev_info(dev, "proto = %u, offset = %2u, spec = 0x%04x, mask = 0x%04x\n",
+ prof->fv[i].proto_id, prof->fv[i].offset,
+ prof->fv[i].spec, prof->fv[i].msk);
+
+ dev_info(dev, "flags = 0x%04x\n", prof->flags);
+ dev_info(dev, "flags_msk = 0x%04x\n", prof->flags_msk);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
new file mode 100644
index 000000000000..4f56d53d56b9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _ICE_PARSER_H_
+#define _ICE_PARSER_H_
+
+#define ICE_SEC_DATA_OFFSET 4
+#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48
+#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16
+#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17
+#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12
+#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13
+#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88
+#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8
+#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1
+
+#define ICE_SEC_LBL_DATA_OFFSET 2
+#define ICE_SID_LBL_ENTRY_SIZE 66
+
+/*** ICE_SID_RXPARSER_IMEM section ***/
+#define ICE_IMEM_TABLE_SIZE 192
+
+/* TCAM boost Master; if bit is set, and TCAM hit, TCAM output overrides iMEM
+ * output.
+ */
+struct ice_bst_main {
+ bool alu0;
+ bool alu1;
+ bool alu2;
+ bool pg;
+};
+
+struct ice_bst_keybuilder {
+ u8 prio; /* 0-3: PG precedence within ALUs (3 highest) */
+ bool tsr_ctrl; /* TCAM Search Register control */
+};
+
+/* Next protocol Key builder */
+struct ice_np_keybuilder {
+ u8 opc;
+ u8 start_reg0;
+ u8 len_reg1;
+};
+
+enum ice_np_keybuilder_opcode {
+ ICE_NPKB_OPC_EXTRACT = 0,
+ ICE_NPKB_OPC_BUILD = 1,
+ ICE_NPKB_OPC_BYPASS = 2,
+};
+
+/* Parse Graph Key builder */
+struct ice_pg_keybuilder {
+ bool flag0_ena;
+ bool flag1_ena;
+ bool flag2_ena;
+ bool flag3_ena;
+ u8 flag0_idx;
+ u8 flag1_idx;
+ u8 flag2_idx;
+ u8 flag3_idx;
+ u8 alu_reg_idx;
+};
+
+enum ice_alu_idx {
+ ICE_ALU0_IDX = 0,
+ ICE_ALU1_IDX = 1,
+ ICE_ALU2_IDX = 2,
+};
+
+enum ice_alu_opcode {
+ ICE_ALU_PARK = 0,
+ ICE_ALU_MOV_ADD = 1,
+ ICE_ALU_ADD = 2,
+ ICE_ALU_MOV_AND = 4,
+ ICE_ALU_AND = 5,
+ ICE_ALU_AND_IMM = 6,
+ ICE_ALU_MOV_OR = 7,
+ ICE_ALU_OR = 8,
+ ICE_ALU_MOV_XOR = 9,
+ ICE_ALU_XOR = 10,
+ ICE_ALU_NOP = 11,
+ ICE_ALU_BR = 12,
+ ICE_ALU_BREQ = 13,
+ ICE_ALU_BRNEQ = 14,
+ ICE_ALU_BRGT = 15,
+ ICE_ALU_BRLT = 16,
+ ICE_ALU_BRGEQ = 17,
+ ICE_ALU_BRLEG = 18,
+ ICE_ALU_SETEQ = 19,
+ ICE_ALU_ANDEQ = 20,
+ ICE_ALU_OREQ = 21,
+ ICE_ALU_SETNEQ = 22,
+ ICE_ALU_ANDNEQ = 23,
+ ICE_ALU_ORNEQ = 24,
+ ICE_ALU_SETGT = 25,
+ ICE_ALU_ANDGT = 26,
+ ICE_ALU_ORGT = 27,
+ ICE_ALU_SETLT = 28,
+ ICE_ALU_ANDLT = 29,
+ ICE_ALU_ORLT = 30,
+ ICE_ALU_MOV_SUB = 31,
+ ICE_ALU_SUB = 32,
+ ICE_ALU_INVALID = 64,
+};
+
+enum ice_proto_off_opcode {
+ ICE_PO_OFF_REMAIN = 0,
+ ICE_PO_OFF_HDR_ADD = 1,
+ ICE_PO_OFF_HDR_SUB = 2,
+};
+
+struct ice_alu {
+ enum ice_alu_opcode opc;
+ u8 src_start;
+ u8 src_len;
+ bool shift_xlate_sel;
+ u8 shift_xlate_key;
+ u8 src_reg_id;
+ u8 dst_reg_id;
+ bool inc0;
+ bool inc1;
+ u8 proto_offset_opc;
+ u8 proto_offset;
+ u8 branch_addr;
+ u16 imm;
+ bool dedicate_flags_ena;
+ u8 dst_start;
+ u8 dst_len;
+ bool flags_extr_imm;
+ u8 flags_start_imm;
+};
+
+/* Parser program code (iMEM) */
+struct ice_imem_item {
+ u16 idx;
+ struct ice_bst_main b_m;
+ struct ice_bst_keybuilder b_kb;
+ u8 pg_prio;
+ struct ice_np_keybuilder np_kb;
+ struct ice_pg_keybuilder pg_kb;
+ struct ice_alu alu0;
+ struct ice_alu alu1;
+ struct ice_alu alu2;
+};
+
+/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
+#define ICE_METAINIT_TABLE_SIZE 16
+
+/* Metadata Initialization item */
+struct ice_metainit_item {
+ u16 idx;
+
+ u8 tsr; /* TCAM Search key Register */
+ u16 ho; /* Header Offset register */
+ u16 pc; /* Program Counter register */
+ u16 pg_rn; /* Parse Graph Root Node */
+ u8 cd; /* Control Domain ID */
+
+ /* General Purpose Registers */
+ bool gpr_a_ctrl;
+ u8 gpr_a_data_mdid;
+ u8 gpr_a_data_start;
+ u8 gpr_a_data_len;
+ u8 gpr_a_id;
+
+ bool gpr_b_ctrl;
+ u8 gpr_b_data_mdid;
+ u8 gpr_b_data_start;
+ u8 gpr_b_data_len;
+ u8 gpr_b_id;
+
+ bool gpr_c_ctrl;
+ u8 gpr_c_data_mdid;
+ u8 gpr_c_data_start;
+ u8 gpr_c_data_len;
+ u8 gpr_c_id;
+
+ bool gpr_d_ctrl;
+ u8 gpr_d_data_mdid;
+ u8 gpr_d_data_start;
+ u8 gpr_d_data_len;
+ u8 gpr_d_id;
+
+ u64 flags; /* Initial value for all flags */
+};
+
+/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
+ * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
+ * sections ***/
+#define ICE_PG_CAM_TABLE_SIZE 2048
+#define ICE_PG_SP_CAM_TABLE_SIZE 128
+#define ICE_PG_NM_CAM_TABLE_SIZE 1024
+#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64
+
+struct ice_pg_cam_key {
+ bool valid;
+ struct_group_attr(val, __packed,
+ u16 node_id; /* Node ID of protocol in parse graph */
+ bool flag0;
+ bool flag1;
+ bool flag2;
+ bool flag3;
+ u8 boost_idx; /* Boost TCAM match index */
+ u16 alu_reg;
+ u32 next_proto; /* next Protocol value (must be last) */
+ );
+};
+
+struct ice_pg_nm_cam_key {
+ bool valid;
+ struct_group_attr(val, __packed,
+ u16 node_id;
+ bool flag0;
+ bool flag1;
+ bool flag2;
+ bool flag3;
+ u8 boost_idx;
+ u16 alu_reg;
+ );
+};
+
+struct ice_pg_cam_action {
+ u16 next_node; /* Parser Node ID for the next round */
+ u8 next_pc; /* next Program Counter */
+ bool is_pg; /* is protocol group */
+ u8 proto_id; /* protocol ID or proto group ID */
+ bool is_mg; /* is marker group */
+ u8 marker_id; /* marker ID or marker group ID */
+ bool is_last_round;
+ bool ho_polarity; /* header offset polarity */
+ u16 ho_inc;
+};
+
+/* Parse Graph item */
+struct ice_pg_cam_item {
+ u16 idx;
+ struct ice_pg_cam_key key;
+ struct ice_pg_cam_action action;
+};
+
+/* Parse Graph No Match item */
+struct ice_pg_nm_cam_item {
+ u16 idx;
+ struct ice_pg_nm_cam_key key;
+ struct ice_pg_cam_action action;
+};
+
+struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
+ int size, struct ice_pg_cam_key *key);
+struct ice_pg_nm_cam_item *
+ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ struct ice_pg_cam_key *key);
+
+/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+#define ICE_BST_TCAM_TABLE_SIZE 256
+#define ICE_BST_TCAM_KEY_SIZE 20
+
+/* Boost TCAM item */
+struct ice_bst_tcam_item {
+ u16 addr;
+ u8 key[ICE_BST_TCAM_KEY_SIZE];
+ u8 key_inv[ICE_BST_TCAM_KEY_SIZE];
+ u8 hit_idx_grp;
+ u8 pg_prio;
+ struct ice_np_keybuilder np_kb;
+ struct ice_pg_keybuilder pg_kb;
+ struct ice_alu alu0;
+ struct ice_alu alu1;
+ struct ice_alu alu2;
+};
+
+#define ICE_LBL_LEN 64
+#define ICE_LBL_BST_DVM "BOOST_MAC_VLAN_DVM"
+#define ICE_LBL_BST_SVM "BOOST_MAC_VLAN_SVM"
+#define ICE_LBL_TNL_VXLAN "TNL_VXLAN"
+#define ICE_LBL_TNL_GENEVE "TNL_GENEVE"
+#define ICE_LBL_TNL_UDP_ECPRI "TNL_UDP_ECPRI"
+
+enum ice_lbl_type {
+ ICE_LBL_BST_TYPE_UNKNOWN,
+ ICE_LBL_BST_TYPE_DVM,
+ ICE_LBL_BST_TYPE_SVM,
+ ICE_LBL_BST_TYPE_VXLAN,
+ ICE_LBL_BST_TYPE_GENEVE,
+ ICE_LBL_BST_TYPE_UDP_ECPRI,
+};
+
+struct ice_lbl_item {
+ u16 idx;
+ char label[ICE_LBL_LEN];
+
+ /* must be at the end, not part of the DDP section */
+ enum ice_lbl_type type;
+};
+
+struct ice_bst_tcam_item *
+ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat);
+struct ice_bst_tcam_item *
+ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
+ struct ice_lbl_item *lbl_table,
+ enum ice_lbl_type type, u16 *start);
+
+/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
+#define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024
+#define ICE_PTYPE_MK_TCAM_KEY_SIZE 10
+
+struct ice_ptype_mk_tcam_item {
+ u16 address;
+ u16 ptype;
+ u8 key[ICE_PTYPE_MK_TCAM_KEY_SIZE];
+ u8 key_inv[ICE_PTYPE_MK_TCAM_KEY_SIZE];
+} __packed;
+
+struct ice_ptype_mk_tcam_item *
+ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
+ u8 *pat, int len);
+/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
+#define ICE_MK_GRP_TABLE_SIZE 128
+#define ICE_MK_COUNT_PER_GRP 8
+
+/* Marker Group item */
+struct ice_mk_grp_item {
+ int idx;
+ u8 markers[ICE_MK_COUNT_PER_GRP];
+};
+
+/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
+#define ICE_PROTO_COUNT_PER_GRP 8
+#define ICE_PROTO_GRP_TABLE_SIZE 192
+#define ICE_PROTO_GRP_ITEM_SIZE 22
+struct ice_proto_off {
+ bool polarity; /* true: positive, false: negative */
+ u8 proto_id;
+ u16 offset; /* 10 bit protocol offset */
+};
+
+/* Protocol Group item */
+struct ice_proto_grp_item {
+ u16 idx;
+ struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP];
+};
+
+/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
+#define ICE_FLG_RD_TABLE_SIZE 64
+#define ICE_FLG_RDT_SIZE 64
+
+/* Flags Redirection item */
+struct ice_flg_rd_item {
+ u16 idx;
+ bool expose;
+ u8 intr_flg_id; /* Internal Flag ID */
+};
+
+u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg);
+
+/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
+ * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
+ * sections ***/
+#define ICE_XLT_KB_FLAG0_14_CNT 15
+#define ICE_XLT_KB_TBL_CNT 8
+#define ICE_XLT_KB_TBL_ENTRY_SIZE 24
+
+struct ice_xlt_kb_entry {
+ u8 xlt1_ad_sel;
+ u8 xlt2_ad_sel;
+ u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT];
+ u8 xlt1_md_sel;
+ u8 xlt2_md_sel;
+};
+
+/* XLT Key Builder */
+struct ice_xlt_kb {
+ u8 xlt1_pm; /* XLT1 Partition Mode */
+ u8 xlt2_pm; /* XLT2 Partition Mode */
+ u8 prof_id_pm; /* Profile ID Partition Mode */
+ u64 flag15;
+
+ struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT];
+};
+
+u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
+
+/*** Parser API ***/
+#define ICE_GPR_HV_IDX 64
+#define ICE_GPR_HV_SIZE 32
+#define ICE_GPR_ERR_IDX 84
+#define ICE_GPR_FLG_IDX 104
+#define ICE_GPR_FLG_SIZE 16
+
+#define ICE_GPR_TSR_IDX 108 /* TSR: TCAM Search Register */
+#define ICE_GPR_NN_IDX 109 /* NN: Next Parsing Cycle Node ID */
+#define ICE_GPR_HO_IDX 110 /* HO: Next Parsing Cycle hdr Offset */
+#define ICE_GPR_NP_IDX 111 /* NP: Next Parsing Cycle */
+
+#define ICE_PARSER_MAX_PKT_LEN 504
+#define ICE_PARSER_PKT_REV 32
+#define ICE_PARSER_GPR_NUM 128
+#define ICE_PARSER_FLG_NUM 64
+#define ICE_PARSER_ERR_NUM 16
+#define ICE_MARKER_ID_SIZE 9
+#define ICE_MARKER_MAX_SIZE \
+ (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
+#define ICE_MARKER_ID_NUM 8
+#define ICE_PO_PAIR_SIZE 256
+
+struct ice_gpr_pu {
+ /* array of flags to indicate if GRP needs to be updated */
+ bool gpr_val_upd[ICE_PARSER_GPR_NUM];
+ u16 gpr_val[ICE_PARSER_GPR_NUM];
+ u64 flg_msk;
+ u64 flg_val;
+ u16 err_msk;
+ u16 err_val;
+};
+
+enum ice_pg_prio {
+ ICE_PG_P0 = 0,
+ ICE_PG_P1 = 1,
+ ICE_PG_P2 = 2,
+ ICE_PG_P3 = 3,
+};
+
+struct ice_parser_rt {
+ struct ice_parser *psr;
+ u16 gpr[ICE_PARSER_GPR_NUM];
+ u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
+ u16 pkt_len;
+ u16 po;
+ u8 bst_key[ICE_BST_TCAM_KEY_SIZE];
+ struct ice_pg_cam_key pg_key;
+ u8 pg_prio;
+ struct ice_alu *alu0;
+ struct ice_alu *alu1;
+ struct ice_alu *alu2;
+ struct ice_pg_cam_action *action;
+ struct ice_gpr_pu pu;
+ u8 markers[ICE_MARKER_ID_SIZE];
+ bool protocols[ICE_PO_PAIR_SIZE];
+ u16 offsets[ICE_PO_PAIR_SIZE];
+};
+
+struct ice_parser_proto_off {
+ u8 proto_id; /* hardware protocol ID */
+ u16 offset; /* offset from the start of the protocol header */
+};
+
+#define ICE_PARSER_PROTO_OFF_PAIR_SIZE 16
+#define ICE_PARSER_FLAG_PSR_SIZE 8
+#define ICE_PARSER_FV_SIZE 48
+#define ICE_PARSER_FV_MAX 24
+#define ICE_BT_TUN_PORT_OFF_H 16
+#define ICE_BT_TUN_PORT_OFF_L 15
+#define ICE_BT_VM_OFF 0
+#define ICE_UDP_PORT_OFF_H 1
+#define ICE_UDP_PORT_OFF_L 0
+
+struct ice_parser_result {
+ u16 ptype; /* 16 bits hardware PTYPE */
+ /* array of protocol and header offset pairs */
+ struct ice_parser_proto_off po[ICE_PARSER_PROTO_OFF_PAIR_SIZE];
+ int po_num; /* # of protocol-offset pairs must <= 16 */
+ u64 flags_psr; /* parser flags */
+ u64 flags_pkt; /* packet flags */
+ u16 flags_sw; /* key builder flags for SW */
+ u16 flags_acl; /* key builder flags for ACL */
+ u16 flags_fd; /* key builder flags for FD */
+ u16 flags_rss; /* key builder flags for RSS */
+};
+
+void ice_parser_rt_reset(struct ice_parser_rt *rt);
+void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
+ int pkt_len);
+int ice_parser_rt_execute(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt);
+
+struct ice_parser {
+ struct ice_hw *hw; /* pointer to the hardware structure */
+
+ struct ice_imem_item *imem_table;
+ struct ice_metainit_item *mi_table;
+
+ struct ice_pg_cam_item *pg_cam_table;
+ struct ice_pg_cam_item *pg_sp_cam_table;
+ struct ice_pg_nm_cam_item *pg_nm_cam_table;
+ struct ice_pg_nm_cam_item *pg_nm_sp_cam_table;
+
+ struct ice_bst_tcam_item *bst_tcam_table;
+ struct ice_lbl_item *bst_lbl_table;
+ struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table;
+ struct ice_mk_grp_item *mk_grp_table;
+ struct ice_proto_grp_item *proto_grp_table;
+ struct ice_flg_rd_item *flg_rd_table;
+
+ struct ice_xlt_kb *xlt_kb_sw;
+ struct ice_xlt_kb *xlt_kb_acl;
+ struct ice_xlt_kb *xlt_kb_fd;
+ struct ice_xlt_kb *xlt_kb_rss;
+
+ struct ice_parser_rt rt;
+};
+
+struct ice_parser *ice_parser_create(struct ice_hw *hw);
+void ice_parser_destroy(struct ice_parser *psr);
+void ice_parser_dvm_set(struct ice_parser *psr, bool on);
+int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_geneve_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
+ int pkt_len, struct ice_parser_result *rslt);
+void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt);
+
+struct ice_parser_fv {
+ u8 proto_id; /* hardware protocol ID */
+ u16 offset; /* offset from the start of the protocol header */
+ u16 spec; /* pattern to match */
+ u16 msk; /* pattern mask */
+};
+
+struct ice_parser_profile {
+ /* array of field vectors */
+ struct ice_parser_fv fv[ICE_PARSER_FV_SIZE];
+ int fv_num; /* # of field vectors must <= 48 */
+ u16 flags; /* key builder flags */
+ u16 flags_msk; /* key builder flag mask */
+
+ DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); /* PTYPE bitmap */
+};
+
+int ice_parser_profile_init(struct ice_parser_result *rslt,
+ const u8 *pkt_buf, const u8 *msk_buf,
+ int buf_len, enum ice_block blk,
+ struct ice_parser_profile *prof);
+void ice_parser_profile_dump(struct ice_hw *hw,
+ struct ice_parser_profile *prof);
+#endif /* _ICE_PARSER_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
new file mode 100644
index 000000000000..3995d662e050
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "ice_common.h"
+
+static void ice_rt_tsr_set(struct ice_parser_rt *rt, u16 tsr)
+{
+ rt->gpr[ICE_GPR_TSR_IDX] = tsr;
+}
+
+static void ice_rt_ho_set(struct ice_parser_rt *rt, u16 ho)
+{
+ rt->gpr[ICE_GPR_HO_IDX] = ho;
+ memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
+}
+
+static void ice_rt_np_set(struct ice_parser_rt *rt, u16 pc)
+{
+ rt->gpr[ICE_GPR_NP_IDX] = pc;
+}
+
+static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node)
+{
+ rt->gpr[ICE_GPR_NN_IDX] = node;
+}
+
+static void
+ice_rt_flag_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
+{
+ struct ice_hw *hw = rt->psr->hw;
+ unsigned int word, id;
+
+ word = idx / ICE_GPR_FLG_SIZE;
+ id = idx % ICE_GPR_FLG_SIZE;
+
+ if (set) {
+ rt->gpr[ICE_GPR_FLG_IDX + word] |= (u16)BIT(id);
+ ice_debug(hw, ICE_DBG_PARSER, "Set parser flag %u\n", idx);
+ } else {
+ rt->gpr[ICE_GPR_FLG_IDX + word] &= ~(u16)BIT(id);
+ ice_debug(hw, ICE_DBG_PARSER, "Clear parser flag %u\n", idx);
+ }
+}
+
+static void ice_rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val)
+{
+ struct ice_hw *hw = rt->psr->hw;
+
+ if (idx == ICE_GPR_HO_IDX)
+ ice_rt_ho_set(rt, val);
+ else
+ rt->gpr[idx] = val;
+
+ ice_debug(hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", idx, val);
+}
+
+static void ice_rt_err_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
+{
+ struct ice_hw *hw = rt->psr->hw;
+
+ if (set) {
+ rt->gpr[ICE_GPR_ERR_IDX] |= (u16)BIT(idx);
+ ice_debug(hw, ICE_DBG_PARSER, "Set parser error %u\n", idx);
+ } else {
+ rt->gpr[ICE_GPR_ERR_IDX] &= ~(u16)BIT(idx);
+ ice_debug(hw, ICE_DBG_PARSER, "Reset parser error %u\n", idx);
+ }
+}
+
+/**
+ * ice_parser_rt_reset - reset the parser runtime
+ * @rt: pointer to the parser runtime
+ */
+void ice_parser_rt_reset(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_metainit_item *mi;
+ unsigned int i;
+
+ mi = &psr->mi_table[0];
+
+ memset(rt, 0, sizeof(*rt));
+ rt->psr = psr;
+
+ ice_rt_tsr_set(rt, mi->tsr);
+ ice_rt_ho_set(rt, mi->ho);
+ ice_rt_np_set(rt, mi->pc);
+ ice_rt_nn_set(rt, mi->pg_rn);
+
+ for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
+ if (mi->flags & BIT(i))
+ ice_rt_flag_set(rt, i, true);
+ }
+}
+
+/**
+ * ice_parser_rt_pktbuf_set - set a packet into parser runtime
+ * @rt: pointer to the parser runtime
+ * @pkt_buf: buffer with packet data
+ * @pkt_len: packet buffer length
+ */
+void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
+ int pkt_len)
+{
+ int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len);
+ u16 ho = rt->gpr[ICE_GPR_HO_IDX];
+
+ memcpy(rt->pkt_buf, pkt_buf, len);
+ rt->pkt_len = pkt_len;
+
+ memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
+}
+
+static void ice_bst_key_init(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ u8 tsr = (u8)rt->gpr[ICE_GPR_TSR_IDX];
+ u16 ho = rt->gpr[ICE_GPR_HO_IDX];
+ u8 *key = rt->bst_key;
+ int idd, i;
+
+ idd = ICE_BST_TCAM_KEY_SIZE - 1;
+ if (imem->b_kb.tsr_ctrl)
+ key[idd] = tsr;
+ else
+ key[idd] = imem->b_kb.prio;
+
+ idd = ICE_BST_TCAM_KEY_SIZE - 2;
+ for (i = idd; i >= 0; i--) {
+ int j;
+
+ j = ho + idd - i;
+ if (j < ICE_PARSER_MAX_PKT_LEN)
+ key[i] = rt->pkt_buf[j];
+ else
+ key[i] = 0;
+ }
+
+ ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER,
+ KBUILD_MODNAME ": Generated Boost TCAM Key",
+ key, ICE_BST_TCAM_KEY_SIZE);
+}
+
+static u16 ice_bit_rev_u16(u16 v, int len)
+{
+ return bitrev16(v) >> (BITS_PER_TYPE(v) - len);
+}
+
+static u32 ice_bit_rev_u32(u32 v, int len)
+{
+ return bitrev32(v) >> (BITS_PER_TYPE(v) - len);
+}
+
+static u32 ice_hv_bit_sel(struct ice_parser_rt *rt, int start, int len)
+{
+ int offset;
+ u32 buf[2];
+ u64 val;
+
+ offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
+
+ memcpy(buf, &rt->gpr[offset], sizeof(buf));
+
+ buf[0] = bitrev8x4(buf[0]);
+ buf[1] = bitrev8x4(buf[1]);
+
+ val = *(u64 *)buf;
+ val >>= start % BITS_PER_TYPE(u16);
+
+ return ice_bit_rev_u32(val, len);
+}
+
+static u32 ice_pk_build(struct ice_parser_rt *rt,
+ struct ice_np_keybuilder *kb)
+{
+ if (kb->opc == ICE_NPKB_OPC_EXTRACT)
+ return ice_hv_bit_sel(rt, kb->start_reg0, kb->len_reg1);
+ else if (kb->opc == ICE_NPKB_OPC_BUILD)
+ return rt->gpr[kb->start_reg0] |
+ ((u32)rt->gpr[kb->len_reg1] << BITS_PER_TYPE(u16));
+ else if (kb->opc == ICE_NPKB_OPC_BYPASS)
+ return 0;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported OP Code %u\n",
+ kb->opc);
+ return U32_MAX;
+}
+
+static bool ice_flag_get(struct ice_parser_rt *rt, unsigned int index)
+{
+ int word = index / ICE_GPR_FLG_SIZE;
+ int id = index % ICE_GPR_FLG_SIZE;
+
+ return !!(rt->gpr[ICE_GPR_FLG_IDX + word] & (u16)BIT(id));
+}
+
+static int ice_imem_pgk_init(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ memset(&rt->pg_key, 0, sizeof(rt->pg_key));
+ rt->pg_key.next_proto = ice_pk_build(rt, &imem->np_kb);
+ if (rt->pg_key.next_proto == U32_MAX)
+ return -EINVAL;
+
+ if (imem->pg_kb.flag0_ena)
+ rt->pg_key.flag0 = ice_flag_get(rt, imem->pg_kb.flag0_idx);
+ if (imem->pg_kb.flag1_ena)
+ rt->pg_key.flag1 = ice_flag_get(rt, imem->pg_kb.flag1_idx);
+ if (imem->pg_kb.flag2_ena)
+ rt->pg_key.flag2 = ice_flag_get(rt, imem->pg_kb.flag2_idx);
+ if (imem->pg_kb.flag3_ena)
+ rt->pg_key.flag3 = ice_flag_get(rt, imem->pg_kb.flag3_idx);
+
+ rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx];
+ rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
+ rt->pg_key.node_id,
+ rt->pg_key.flag0,
+ rt->pg_key.flag1,
+ rt->pg_key.flag2,
+ rt->pg_key.flag3,
+ rt->pg_key.boost_idx,
+ rt->pg_key.alu_reg,
+ rt->pg_key.next_proto);
+
+ return 0;
+}
+
+static void ice_imem_alu0_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu0 = &imem->alu0;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_alu1_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu1 = &imem->alu1;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_alu2_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu2 = &imem->alu2;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_pgp_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->pg_prio = imem->pg_prio;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n",
+ rt->pg_prio, imem->idx);
+}
+
+static int ice_bst_pgk_init(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ memset(&rt->pg_key, 0, sizeof(rt->pg_key));
+ rt->pg_key.boost_idx = bst->hit_idx_grp;
+ rt->pg_key.next_proto = ice_pk_build(rt, &bst->np_kb);
+ if (rt->pg_key.next_proto == U32_MAX)
+ return -EINVAL;
+
+ if (bst->pg_kb.flag0_ena)
+ rt->pg_key.flag0 = ice_flag_get(rt, bst->pg_kb.flag0_idx);
+ if (bst->pg_kb.flag1_ena)
+ rt->pg_key.flag1 = ice_flag_get(rt, bst->pg_kb.flag1_idx);
+ if (bst->pg_kb.flag2_ena)
+ rt->pg_key.flag2 = ice_flag_get(rt, bst->pg_kb.flag2_idx);
+ if (bst->pg_kb.flag3_ena)
+ rt->pg_key.flag3 = ice_flag_get(rt, bst->pg_kb.flag3_idx);
+
+ rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx];
+ rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
+ rt->pg_key.node_id,
+ rt->pg_key.flag0,
+ rt->pg_key.flag1,
+ rt->pg_key.flag2,
+ rt->pg_key.flag3,
+ rt->pg_key.boost_idx,
+ rt->pg_key.alu_reg,
+ rt->pg_key.next_proto);
+
+ return 0;
+}
+
+static void ice_bst_alu0_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu0 = &bst->alu0;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_alu1_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu1 = &bst->alu1;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_alu2_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu2 = &bst->alu2;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_pgp_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->pg_prio = bst->pg_prio;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n",
+ rt->pg_prio, bst->addr);
+}
+
+static struct ice_pg_cam_item *ice_rt_pg_cam_match(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_cam_item *item;
+
+ item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE,
+ &rt->pg_key);
+ if (!item)
+ item = ice_pg_cam_match(psr->pg_sp_cam_table,
+ ICE_PG_SP_CAM_TABLE_SIZE, &rt->pg_key);
+ return item;
+}
+
+static
+struct ice_pg_nm_cam_item *ice_rt_pg_nm_cam_match(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_nm_cam_item *item;
+
+ item = ice_pg_nm_cam_match(psr->pg_nm_cam_table,
+ ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key);
+
+ if (!item)
+ item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table,
+ ICE_PG_NM_SP_CAM_TABLE_SIZE,
+ &rt->pg_key);
+ return item;
+}
+
+static void ice_gpr_add(struct ice_parser_rt *rt, int idx, u16 val)
+{
+ rt->pu.gpr_val_upd[idx] = true;
+ rt->pu.gpr_val[idx] = val;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n",
+ idx, val);
+}
+
+static void ice_pg_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n");
+
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, rt->action->next_pc);
+ ice_gpr_add(rt, ICE_GPR_NN_IDX, rt->action->next_node);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n");
+}
+
+static void ice_flg_add(struct ice_parser_rt *rt, int idx, bool val)
+{
+ rt->pu.flg_msk |= BIT_ULL(idx);
+ if (val)
+ rt->pu.flg_val |= BIT_ULL(idx);
+ else
+ rt->pu.flg_val &= ~BIT_ULL(idx);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n",
+ idx, val);
+}
+
+static void ice_flg_update(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ u32 hv_bit_sel;
+ int i;
+
+ if (!alu->dedicate_flags_ena)
+ return;
+
+ if (alu->flags_extr_imm) {
+ for (i = 0; i < alu->dst_len; i++)
+ ice_flg_add(rt, alu->dst_start + i,
+ !!(alu->flags_start_imm & BIT(i)));
+ } else {
+ for (i = 0; i < alu->dst_len; i++) {
+ hv_bit_sel = ice_hv_bit_sel(rt,
+ alu->flags_start_imm + i,
+ 1);
+ ice_flg_add(rt, alu->dst_start + i, !!hv_bit_sel);
+ }
+ }
+}
+
+static void ice_po_update(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ if (alu->proto_offset_opc == ICE_PO_OFF_HDR_ADD)
+ rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] + alu->proto_offset);
+ else if (alu->proto_offset_opc == ICE_PO_OFF_HDR_SUB)
+ rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] - alu->proto_offset);
+ else if (alu->proto_offset_opc == ICE_PO_OFF_REMAIN)
+ rt->po = rt->gpr[ICE_GPR_HO_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n",
+ rt->po);
+}
+
+static u16 ice_reg_bit_sel(struct ice_parser_rt *rt, int reg_idx,
+ int start, int len)
+{
+ int offset;
+ u32 val;
+
+ offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
+
+ memcpy(&val, &rt->gpr[offset], sizeof(val));
+
+ val = bitrev8x4(val);
+ val >>= start % BITS_PER_TYPE(u16);
+
+ return ice_bit_rev_u16(val, len);
+}
+
+static void ice_err_add(struct ice_parser_rt *rt, int idx, bool val)
+{
+ rt->pu.err_msk |= (u16)BIT(idx);
+ if (val)
+ rt->pu.flg_val |= (u64)BIT_ULL(idx);
+ else
+ rt->pu.flg_val &= ~(u64)BIT_ULL(idx);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n",
+ idx, val);
+}
+
+static void ice_dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu,
+ bool val)
+{
+ u16 flg_idx;
+
+ if (alu->dedicate_flags_ena) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n",
+ alu->opc);
+ return;
+ }
+
+ if (alu->dst_reg_id == ICE_GPR_ERR_IDX) {
+ if (alu->dst_start >= ICE_PARSER_ERR_NUM) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n",
+ alu->dst_start);
+ return;
+ }
+ ice_err_add(rt, alu->dst_start, val);
+ } else if (alu->dst_reg_id >= ICE_GPR_FLG_IDX) {
+ flg_idx = (u16)(((alu->dst_reg_id - ICE_GPR_FLG_IDX) << 4) +
+ alu->dst_start);
+
+ if (flg_idx >= ICE_PARSER_FLG_NUM) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n",
+ flg_idx);
+ return;
+ }
+ ice_flg_add(rt, flg_idx, val);
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n",
+ alu->dst_reg_id, alu->dst_start);
+ }
+}
+
+static void ice_alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ u16 dst, src, shift, imm;
+
+ if (alu->shift_xlate_sel) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_sel != 0 is not expected\n");
+ return;
+ }
+
+ ice_po_update(rt, alu);
+ ice_flg_update(rt, alu);
+
+ dst = rt->gpr[alu->dst_reg_id];
+ src = ice_reg_bit_sel(rt, alu->src_reg_id,
+ alu->src_start, alu->src_len);
+ shift = alu->shift_xlate_key;
+ imm = alu->imm;
+
+ switch (alu->opc) {
+ case ICE_ALU_PARK:
+ break;
+ case ICE_ALU_MOV_ADD:
+ dst = (src << shift) + imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ case ICE_ALU_ADD:
+ dst += (src << shift) + imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ case ICE_ALU_ORLT:
+ if (src < imm)
+ ice_dst_reg_bit_set(rt, alu, true);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_OREQ:
+ if (src == imm)
+ ice_dst_reg_bit_set(rt, alu, true);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_SETEQ:
+ ice_dst_reg_bit_set(rt, alu, src == imm);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_MOV_XOR:
+ dst = (src << shift) ^ imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ default:
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n",
+ alu->opc);
+ break;
+ }
+}
+
+static void ice_alu0_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n");
+ ice_alu_exe(rt, rt->alu0);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n");
+}
+
+static void ice_alu1_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n");
+ ice_alu_exe(rt, rt->alu1);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n");
+}
+
+static void ice_alu2_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n");
+ ice_alu_exe(rt, rt->alu2);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n");
+}
+
+static void ice_pu_exe(struct ice_parser_rt *rt)
+{
+ struct ice_gpr_pu *pu = &rt->pu;
+ unsigned int i;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n");
+
+ for (i = 0; i < ICE_PARSER_GPR_NUM; i++) {
+ if (pu->gpr_val_upd[i])
+ ice_rt_gpr_set(rt, i, pu->gpr_val[i]);
+ }
+
+ for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
+ if (pu->flg_msk & BIT(i))
+ ice_rt_flag_set(rt, i, pu->flg_val & BIT(i));
+ }
+
+ for (i = 0; i < ICE_PARSER_ERR_NUM; i++) {
+ if (pu->err_msk & BIT(i))
+ ice_rt_err_set(rt, i, pu->err_val & BIT(i));
+ }
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n");
+}
+
+static void ice_alu_pg_exe(struct ice_parser_rt *rt)
+{
+ memset(&rt->pu, 0, sizeof(rt->pu));
+
+ switch (rt->pg_prio) {
+ case (ICE_PG_P0):
+ ice_pg_exe(rt);
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P1):
+ ice_alu0_exe(rt);
+ ice_pg_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P2):
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_pg_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P3):
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ ice_pg_exe(rt);
+ break;
+ }
+
+ ice_pu_exe(rt);
+
+ if (rt->action->ho_inc == 0)
+ return;
+
+ if (rt->action->ho_polarity)
+ ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] + rt->action->ho_inc);
+ else
+ ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] - rt->action->ho_inc);
+}
+
+static void ice_proto_off_update(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ if (rt->action->is_pg) {
+ struct ice_proto_grp_item *proto_grp =
+ &psr->proto_grp_table[rt->action->proto_id];
+ u16 po;
+ int i;
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
+ struct ice_proto_off *entry = &proto_grp->po[i];
+
+ if (entry->proto_id == U8_MAX)
+ break;
+
+ if (!entry->polarity)
+ po = rt->po + entry->offset;
+ else
+ po = rt->po - entry->offset;
+
+ rt->protocols[entry->proto_id] = true;
+ rt->offsets[entry->proto_id] = po;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
+ entry->proto_id, po);
+ }
+ } else {
+ rt->protocols[rt->action->proto_id] = true;
+ rt->offsets[rt->action->proto_id] = rt->po;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
+ rt->action->proto_id, rt->po);
+ }
+}
+
+static void ice_marker_set(struct ice_parser_rt *rt, int idx)
+{
+ unsigned int byte = idx / BITS_PER_BYTE;
+ unsigned int bit = idx % BITS_PER_BYTE;
+
+ rt->markers[byte] |= (u8)BIT(bit);
+}
+
+static void ice_marker_update(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ if (rt->action->is_mg) {
+ struct ice_mk_grp_item *mk_grp =
+ &psr->mk_grp_table[rt->action->marker_id];
+ int i;
+
+ for (i = 0; i < ICE_MARKER_ID_NUM; i++) {
+ u8 marker = mk_grp->markers[i];
+
+ if (marker == ICE_MARKER_MAX_SIZE)
+ break;
+
+ ice_marker_set(rt, marker);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
+ marker);
+ }
+ } else {
+ if (rt->action->marker_id != ICE_MARKER_MAX_SIZE)
+ ice_marker_set(rt, rt->action->marker_id);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
+ rt->action->marker_id);
+ }
+}
+
+static u16 ice_ptype_resolve(struct ice_parser_rt *rt)
+{
+ struct ice_ptype_mk_tcam_item *item;
+ struct ice_parser *psr = rt->psr;
+
+ item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table,
+ rt->markers, ICE_MARKER_ID_SIZE);
+ if (item)
+ return item->ptype;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Could not resolve PTYPE\n");
+ return U16_MAX;
+}
+
+static void ice_proto_off_resolve(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ int i;
+
+ for (i = 0; i < ICE_PO_PAIR_SIZE - 1; i++) {
+ if (rt->protocols[i]) {
+ rslt->po[rslt->po_num].proto_id = (u8)i;
+ rslt->po[rslt->po_num].offset = rt->offsets[i];
+ rslt->po_num++;
+ }
+ }
+}
+
+static void ice_result_resolve(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ memset(rslt, 0, sizeof(*rslt));
+
+ memcpy(&rslt->flags_psr, &rt->gpr[ICE_GPR_FLG_IDX],
+ ICE_PARSER_FLAG_PSR_SIZE);
+ rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr);
+ rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt);
+ rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt);
+ rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt);
+
+ ice_proto_off_resolve(rt, rslt);
+ rslt->ptype = ice_ptype_resolve(rt);
+}
+
+/**
+ * ice_parser_rt_execute - parser execution routine
+ * @rt: pointer to the parser runtime
+ * @rslt: input/output parameter to save parser result
+ *
+ * Return: 0 on success or errno.
+ */
+int ice_parser_rt_execute(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ struct ice_pg_nm_cam_item *pg_nm_cam;
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_cam_item *pg_cam;
+ int status = 0;
+ u16 node;
+ u16 pc;
+
+ node = rt->gpr[ICE_GPR_NN_IDX];
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node);
+
+ while (true) {
+ struct ice_bst_tcam_item *bst;
+ struct ice_imem_item *imem;
+
+ pc = rt->gpr[ICE_GPR_NP_IDX];
+ imem = &psr->imem_table[pc];
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %u\n",
+ pc);
+
+ ice_bst_key_init(rt, imem);
+ bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key);
+ if (!bst) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n");
+ status = ice_imem_pgk_init(rt, imem);
+ if (status)
+ break;
+ ice_imem_alu0_set(rt, imem);
+ ice_imem_alu1_set(rt, imem);
+ ice_imem_alu2_set(rt, imem);
+ ice_imem_pgp_set(rt, imem);
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %u\n",
+ bst->addr);
+ if (imem->b_m.pg) {
+ status = ice_bst_pgk_init(rt, bst);
+ if (status)
+ break;
+ ice_bst_pgp_set(rt, bst);
+ } else {
+ status = ice_imem_pgk_init(rt, imem);
+ if (status)
+ break;
+ ice_imem_pgp_set(rt, imem);
+ }
+
+ if (imem->b_m.alu0)
+ ice_bst_alu0_set(rt, bst);
+ else
+ ice_imem_alu0_set(rt, imem);
+
+ if (imem->b_m.alu1)
+ ice_bst_alu1_set(rt, bst);
+ else
+ ice_imem_alu1_set(rt, imem);
+
+ if (imem->b_m.alu2)
+ ice_bst_alu2_set(rt, bst);
+ else
+ ice_imem_alu2_set(rt, imem);
+ }
+
+ rt->action = NULL;
+ pg_cam = ice_rt_pg_cam_match(rt);
+ if (!pg_cam) {
+ pg_nm_cam = ice_rt_pg_nm_cam_match(rt);
+ if (pg_nm_cam) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %u\n",
+ pg_nm_cam->idx);
+ rt->action = &pg_nm_cam->action;
+ }
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %u\n",
+ pg_cam->idx);
+ rt->action = &pg_cam->action;
+ }
+
+ if (!rt->action) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n");
+ status = -EINVAL;
+ break;
+ }
+
+ ice_alu_pg_exe(rt);
+ ice_marker_update(rt);
+ ice_proto_off_update(rt);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %u\n",
+ rt->action->next_node);
+
+ if (rt->action->is_last_round) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n");
+ break;
+ }
+
+ if (rt->gpr[ICE_GPR_HO_IDX] >= rt->pkt_len) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset (%u) is larger than packet len (%u), stop parsing\n",
+ rt->gpr[ICE_GPR_HO_IDX], rt->pkt_len);
+ break;
+ }
+ }
+
+ ice_result_resolve(rt, rslt);
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index f6f27361c3cf..725167d557a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -7,18 +7,24 @@
/* Each recipe can match up to 5 different fields. Fields to match can be meta-
* data, values extracted from packet headers, or results from other recipes.
- * One of the 5 fields is reserved for matching the switch ID. So, up to 4
- * recipes can provide intermediate results to another one through chaining,
- * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ * Therefore, up to 5 recipes can provide intermediate results to another one
+ * through chaining, e.g. recipes 0, 1, 2, 3 and 4 can provide intermediate
+ * results to recipe 5. Note that one of the fields in one of the recipes must
+ * always be reserved for matching the switch ID.
*/
-#define ICE_NUM_WORDS_RECIPE 4
+#define ICE_NUM_WORDS_RECIPE 5
-/* Max recipes that can be chained */
+/* Max recipes that can be chained, not including the last one, which combines
+ * intermediate results.
+ */
#define ICE_MAX_CHAIN_RECIPE 5
-/* 1 word reserved for switch ID from allowed 5 words.
- * So a recipe can have max 4 words. And you can chain 5 such recipes
- * together. So maximum words that can be programmed for look up is 5 * 4.
+/* Total max recipes in chain recipe (including intermediate results) */
+#define ICE_MAX_CHAIN_RECIPE_RES (ICE_MAX_CHAIN_RECIPE + 1)
+
+/* A recipe can have max 5 words, and 5 recipes can be chained together (using
+ * the 6th one, which would contain only result indexes). So maximum words that
+ * can be programmed for lookup is 5 * 5 (not including intermediate results).
*/
#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
@@ -43,6 +49,7 @@ enum ice_protocol_type {
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
+ ICE_PFCP,
ICE_PPPOE,
ICE_L2TPV3,
ICE_VLAN_EX,
@@ -61,6 +68,7 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
+ ICE_SW_TUN_PFCP,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -74,26 +82,46 @@ enum ice_sw_tunnel_type {
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_MAC_OF_OR_S = 1,
+ ICE_PROT_MAC_O2 = 2,
ICE_PROT_MAC_IL = 4,
+ ICE_PROT_MAC_IN_MAC = 7,
ICE_PROT_ETYPE_OL = 9,
ICE_PROT_ETYPE_IL = 10,
+ ICE_PROT_PAY = 15,
+ ICE_PROT_EVLAN_O = 16,
+ ICE_PROT_VLAN_O = 17,
+ ICE_PROT_VLAN_IF = 18,
+ ICE_PROT_MPLS_OL_MINUS_1 = 27,
+ ICE_PROT_MPLS_OL_OR_OS = 28,
+ ICE_PROT_MPLS_IL = 29,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
+ ICE_PROT_IPV4_IL_IL = 34,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
+ ICE_PROT_IPV6_IL_IL = 42,
+ ICE_PROT_IPV6_NEXT_PROTO = 43,
+ ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
+ ICE_PROT_NSH_F = 84,
ICE_PROT_ESP_F = 88,
ICE_PROT_ESP_2 = 89,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_ICMP_IL = 98,
ICE_PROT_ICMPV6_IL = 100,
+ ICE_PROT_VRRP_F = 101,
+ ICE_PROT_OSPF = 102,
ICE_PROT_PPPOE = 103,
ICE_PROT_L2TPV3 = 104,
+ ICE_PROT_ATAOE_OF = 114,
+ ICE_PROT_CTRL_OF = 116,
+ ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_META_ID = 255, /* when offset == metadata */
+ ICE_PROT_EAPOL_OF = 120,
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
@@ -202,6 +230,15 @@ struct ice_udp_gtp_hdr {
u8 rsvrd;
};
+struct ice_pfcp_hdr {
+ u8 flags;
+ u8 msg_type;
+ __be16 length;
+ __be64 seid;
+ __be32 seq;
+ u8 spare;
+} __packed __aligned(__alignof__(u16));
+
struct ice_pppoe_hdr {
u8 rsrvd_ver_type;
u8 rsrvd_code;
@@ -418,6 +455,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pfcp_hdr pfcp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
struct ice_hw_metadata metadata;
@@ -437,32 +475,11 @@ struct ice_prot_ext_tbl_entry {
/* Extractions to be looked up for a given recipe */
struct ice_prot_lkup_ext {
- u16 prot_type;
u8 n_val_words;
/* create a buffer to hold max words per recipe */
- u16 field_off[ICE_MAX_CHAIN_WORDS];
u16 field_mask[ICE_MAX_CHAIN_WORDS];
struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
-
- /* Indicate field offsets that have field vector indices assigned */
- DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS);
-};
-
-struct ice_pref_recipe_group {
- u8 n_val_pairs; /* Number of valid pairs */
- struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
- u16 mask[ICE_NUM_WORDS_RECIPE];
};
-struct ice_recp_grp_entry {
- struct list_head l_entry;
-
-#define ICE_INVAL_CHAIN_IND 0xFF
- u16 rid;
- u8 chain_idx;
- u16 fv_idx[ICE_NUM_WORDS_RECIPE];
- u16 fv_mask[ICE_NUM_WORDS_RECIPE];
- struct ice_pref_recipe_group r_group;
-};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index c11eba07283c..4c8d20f2d2c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -5,254 +5,88 @@
#include "ice_lib.h"
#include "ice_trace.h"
-#define E810_OUT_PROP_DELAY_NS 1
-
-#define UNKNOWN_INCVAL_E82X 0x100000000ULL
-
-static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
- /* name idx func chan */
- { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
- { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
- { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
- { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
- { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+static const char ice_pin_names[][64] = {
+ "SDP0",
+ "SDP1",
+ "SDP2",
+ "SDP3",
+ "TIME_SYNC",
+ "1PPS"
};
-/**
- * ice_get_sma_config_e810t
- * @hw: pointer to the hw struct
- * @ptp_pins: pointer to the ptp_pin_desc struture
- *
- * Read the configuration of the SMA control logic and put it into the
- * ptp_pin_desc structure
- */
-static int
-ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
-{
- u8 data, i;
- int status;
-
- /* Read initial pin state */
- status = ice_read_sma_ctrl_e810t(hw, &data);
- if (status)
- return status;
+static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
+ /* name, gpio, delay */
+ { TIME_SYNC, { 4, -1 }, { 0, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 11 }},
+};
- /* initialize with defaults */
- for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
- strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
- sizeof(ptp_pins[i].name));
- ptp_pins[i].index = ice_pin_desc_e810t[i].index;
- ptp_pins[i].func = ice_pin_desc_e810t[i].func;
- ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
- }
+static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 15, 14 }},
+ { SDP1, { 1, 1 }, { 15, 14 }},
+ { SDP2, { 2, 2 }, { 15, 14 }},
+ { SDP3, { 3, 3 }, { 15, 14 }},
+ { TIME_SYNC, { 4, -1 }, { 11, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 9 }},
+};
- /* Parse SMA1/UFL1 */
- switch (data & ICE_SMA1_MASK_E810T) {
- case ICE_SMA1_MASK_E810T:
- default:
- ptp_pins[SMA1].func = PTP_PF_NONE;
- ptp_pins[UFL1].func = PTP_PF_NONE;
- break;
- case ICE_SMA1_DIR_EN_E810T:
- ptp_pins[SMA1].func = PTP_PF_PEROUT;
- ptp_pins[UFL1].func = PTP_PF_NONE;
- break;
- case ICE_SMA1_TX_EN_E810T:
- ptp_pins[SMA1].func = PTP_PF_EXTTS;
- ptp_pins[UFL1].func = PTP_PF_NONE;
- break;
- case 0:
- ptp_pins[SMA1].func = PTP_PF_EXTTS;
- ptp_pins[UFL1].func = PTP_PF_PEROUT;
- break;
- }
+static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 0, 1 }},
+ { SDP1, { 1, 1 }, { 0, 1 }},
+ { SDP2, { 2, 2 }, { 0, 1 }},
+ { SDP3, { 3, 3 }, { 0, 1 }},
+ { ONE_PPS, { -1, 5 }, { 0, 1 }},
+};
- /* Parse SMA2/UFL2 */
- switch (data & ICE_SMA2_MASK_E810T) {
- case ICE_SMA2_MASK_E810T:
- default:
- ptp_pins[SMA2].func = PTP_PF_NONE;
- ptp_pins[UFL2].func = PTP_PF_NONE;
- break;
- case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
- ptp_pins[SMA2].func = PTP_PF_EXTTS;
- ptp_pins[UFL2].func = PTP_PF_NONE;
- break;
- case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
- ptp_pins[SMA2].func = PTP_PF_PEROUT;
- ptp_pins[UFL2].func = PTP_PF_NONE;
- break;
- case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
- ptp_pins[SMA2].func = PTP_PF_NONE;
- ptp_pins[UFL2].func = PTP_PF_EXTTS;
- break;
- case ICE_SMA2_DIR_EN_E810T:
- ptp_pins[SMA2].func = PTP_PF_PEROUT;
- ptp_pins[UFL2].func = PTP_PF_EXTTS;
- break;
- }
+static const char ice_pin_names_dpll[][64] = {
+ "SDP20",
+ "SDP21",
+ "SDP22",
+ "SDP23",
+};
- return 0;
-}
+static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
+ /* name, gpio, delay */
+ { SDP0, { -1, 0 }, { 0, 1 }},
+ { SDP1, { 1, -1 }, { 0, 0 }},
+ { SDP2, { -1, 2 }, { 0, 1 }},
+ { SDP3, { 3, -1 }, { 0, 0 }},
+};
-/**
- * ice_ptp_set_sma_config_e810t
- * @hw: pointer to the hw struct
- * @ptp_pins: pointer to the ptp_pin_desc struture
- *
- * Set the configuration of the SMA control logic based on the configuration in
- * num_pins parameter
- */
-static int
-ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
- const struct ptp_pin_desc *ptp_pins)
+static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
{
- int status;
- u8 data;
-
- /* SMA1 and UFL1 cannot be set to TX at the same time */
- if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
- ptp_pins[UFL1].func == PTP_PF_PEROUT)
- return -EINVAL;
-
- /* SMA2 and UFL2 cannot be set to RX at the same time */
- if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
- ptp_pins[UFL2].func == PTP_PF_EXTTS)
- return -EINVAL;
+ return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
+}
- /* Read initial pin state value */
- status = ice_read_sma_ctrl_e810t(hw, &data);
- if (status)
- return status;
-
- /* Set the right sate based on the desired configuration */
- data &= ~ICE_SMA1_MASK_E810T;
- if (ptp_pins[SMA1].func == PTP_PF_NONE &&
- ptp_pins[UFL1].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
- data |= ICE_SMA1_MASK_E810T;
- } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
- ptp_pins[UFL1].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA1 RX");
- data |= ICE_SMA1_TX_EN_E810T;
- } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
- ptp_pins[UFL1].func == PTP_PF_PEROUT) {
- /* U.FL 1 TX will always enable SMA 1 RX */
- dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
- } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
- ptp_pins[UFL1].func == PTP_PF_PEROUT) {
- dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
- } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
- ptp_pins[UFL1].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA1 TX");
- data |= ICE_SMA1_DIR_EN_E810T;
- }
-
- data &= ~ICE_SMA2_MASK_E810T;
- if (ptp_pins[SMA2].func == PTP_PF_NONE &&
- ptp_pins[UFL2].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
- data |= ICE_SMA2_MASK_E810T;
- } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
- ptp_pins[UFL2].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA2 RX");
- data |= (ICE_SMA2_TX_EN_E810T |
- ICE_SMA2_UFL2_RX_DIS_E810T);
- } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
- ptp_pins[UFL2].func == PTP_PF_EXTTS) {
- dev_info(ice_hw_to_dev(hw), "UFL2 RX");
- data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
- } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
- ptp_pins[UFL2].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA2 TX");
- data |= (ICE_SMA2_DIR_EN_E810T |
- ICE_SMA2_UFL2_RX_DIS_E810T);
- } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
- ptp_pins[UFL2].func == PTP_PF_EXTTS) {
- dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
- data |= ICE_SMA2_DIR_EN_E810T;
- }
-
- return ice_write_sma_ctrl_e810t(hw, data);
-}
-
-/**
- * ice_ptp_set_sma_e810t
- * @info: the driver's PTP info structure
- * @pin: pin index in kernel structure
- * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
- *
- * Set the configuration of a single SMA pin
- */
-static int
-ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
- enum ptp_pin_function func)
+static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
{
- struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
- struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_hw *hw = &pf->hw;
- int err;
-
- if (pin < SMA1 || func > PTP_PF_PEROUT)
- return -EOPNOTSUPP;
-
- err = ice_get_sma_config_e810t(hw, ptp_pins);
- if (err)
- return err;
-
- /* Disable the same function on the other pin sharing the channel */
- if (pin == SMA1 && ptp_pins[UFL1].func == func)
- ptp_pins[UFL1].func = PTP_PF_NONE;
- if (pin == UFL1 && ptp_pins[SMA1].func == func)
- ptp_pins[SMA1].func = PTP_PF_NONE;
-
- if (pin == SMA2 && ptp_pins[UFL2].func == func)
- ptp_pins[UFL2].func = PTP_PF_NONE;
- if (pin == UFL2 && ptp_pins[SMA2].func == func)
- ptp_pins[SMA2].func = PTP_PF_NONE;
-
- /* Set up new pin function in the temp table */
- ptp_pins[pin].func = func;
+ struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
- return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+ return !ctrl_pf ? NULL : &ctrl_pf->ptp;
}
/**
- * ice_verify_pin_e810t
- * @info: the driver's PTP info structure
- * @pin: Pin index
- * @func: Assigned function
- * @chan: Assigned channel
+ * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
+ * @pf: Board private structure
+ * @func: Pin function
+ * @chan: GPIO channel
*
- * Verify if pin supports requested pin function. If the Check pins consistency.
- * Reconfigure the SMA logic attached to the given pin to enable its
- * desired functionality
+ * Return: positive pin number when pin is present, -1 otherwise
*/
-static int
-ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
+static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
+ unsigned int chan)
{
- /* Don't allow channel reassignment */
- if (chan != ice_pin_desc_e810t[pin].chan)
- return -EOPNOTSUPP;
+ const struct ptp_clock_info *info = &pf->ptp.info;
+ int i;
- /* Check if functions are properly assigned */
- switch (func) {
- case PTP_PF_NONE:
- break;
- case PTP_PF_EXTTS:
- if (pin == UFL1)
- return -EOPNOTSUPP;
- break;
- case PTP_PF_PEROUT:
- if (pin == UFL2 || pin == GNSS)
- return -EOPNOTSUPP;
- break;
- case PTP_PF_PHYSYNC:
- return -EOPNOTSUPP;
+ for (i = 0; i < info->n_pins; i++) {
+ if (info->pin_config[i].func == func &&
+ info->pin_config[i].chan == chan)
+ return i;
}
- return ice_ptp_set_sma_e810t(info, pin, func);
+ return -1;
}
/**
@@ -366,17 +200,30 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
* @sts: Optional parameter for holding a pair of system timestamps from
* the system clock. Will be ignored if NULL is given.
*/
-static u64
-ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
{
struct ice_hw *hw = &pf->hw;
u32 hi, lo, lo2;
u8 tmr_idx;
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
tmr_idx = ice_get_ptp_src_clock_index(hw);
+ guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
+ if (hw->mac_type == ICE_MAC_E830) {
+ u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ return clk_time;
+ }
+
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
@@ -531,7 +378,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
*/
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
{
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct sk_buff *skb;
struct ice_pf *pf;
@@ -540,6 +389,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ params = &pf->hw.ptp.phy.e810;
/* Drop packets which have waited for more than 2 seconds */
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
@@ -556,11 +406,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
/* Write TS index to read to the PF register so the FW can read it */
- wr32(&pf->hw, PF_SB_ATQBAL,
- TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
- TS_LL_READ_TS);
+ wr32(&pf->hw, REG_LL_PROXY_H,
+ REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
+ REG_LL_PROXY_H_EXEC);
tx->last_ll_ts_idx_read = idx;
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
}
/**
@@ -571,35 +427,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
{
struct skb_shared_hwtstamps shhwtstamps = {};
u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
u64 raw_tstamp, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
+ unsigned long flags;
+ struct device *dev;
struct ice_pf *pf;
- u32 val;
+ u32 reg_ll_high;
if (!tx->init || tx->last_ll_ts_idx_read < 0)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ dev = ice_pf_to_dev(pf);
+ params = &pf->hw.ptp.phy.e810;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
- val = rd32(&pf->hw, PF_SB_ATQBAL);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
+ dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
+ __func__);
+
+ /* Read the low 32 bit value */
+ raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
+ /* Read the status together with high TS part */
+ reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
+
+ /* Wake up threads waiting on low latency interface */
+ params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
+ wake_up_locked(&params->atqbal_wq);
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
/* When the bit is cleared, the TS is ready in the register */
- if (val & TS_LL_READ_TS) {
+ if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
return;
}
/* High 8 bit value of the TS is on the bits 16:23 */
- raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
- raw_tstamp <<= 32;
-
- /* Read the low 32 bit value */
- raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+ raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
/* Devices using this interface always verify the timestamp differs
* relative to the last cached timestamp value.
@@ -627,6 +500,9 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
if (tstamp) {
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
ice_trace(tx_tstamp_complete, skb, idx);
+
+ /* Count the number of Tx timestamps that succeeded */
+ pf->ptp.tx_hwtstamp_good++;
}
skb_tstamp_tx(skb, &shhwtstamps);
@@ -685,6 +561,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
{
struct ice_ptp_port *ptp_port;
unsigned long flags;
+ u32 tstamp_good = 0;
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
@@ -785,11 +662,16 @@ skip_ts_read:
if (tstamp) {
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
ice_trace(tx_tstamp_complete, skb, idx);
+
+ /* Count the number of Tx timestamps that succeeded */
+ tstamp_good++;
}
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
+
+ pf->ptp.tx_hwtstamp_good += tstamp_good;
}
/**
@@ -801,8 +683,8 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
struct ice_ptp_port *port;
unsigned int i;
- mutex_lock(&pf->ptp.ports_owner.lock);
- list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
+ mutex_lock(&pf->adapter->ports.lock);
+ list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
struct ice_ptp_tx *tx = &port->tx;
if (!tx || !tx->init)
@@ -810,9 +692,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
ice_ptp_process_tx_tstamp(tx);
}
- mutex_unlock(&pf->ptp.ports_owner.lock);
+ mutex_unlock(&pf->adapter->ports.lock);
- for (i = 0; i < ICE_MAX_QUAD; i++) {
+ for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
@@ -975,7 +857,7 @@ ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
{
struct ice_ptp_port *port;
- list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
+ list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
}
@@ -1022,11 +904,13 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
* the timestamp block is shared for all ports in the same quad. To avoid
* ports using the same timestamp index, logically break the block of
* registers into chunks based on the port number.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
{
- tx->block = port / ICE_PORTS_PER_QUAD;
+ tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
tx->has_ready_bitmap = 1;
@@ -1035,24 +919,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
}
/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
*
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
+ * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
+ * each port has its own block of timestamps, independent of the other ports.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = pf->hw.port_info->lport;
+ tx->block = port;
tx->offset = 0;
- tx->len = INDEX_PER_PORT_E810;
+ tx->len = INDEX_PER_PORT;
+
/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->has_ready_bitmap = 0;
+ tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1166,26 +1053,6 @@ static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
}
/**
- * ice_ptp_read_time - Read the time from the device
- * @pf: Board private structure
- * @ts: timespec structure to hold the current time value
- * @sts: Optional parameter for holding a pair of system timestamps from
- * the system clock. Will be ignored if NULL is given.
- *
- * This function reads the source clock registers and stores them in a timespec.
- * However, since the registers are 64 bits of nanoseconds, we must convert the
- * result to a timespec before we can return.
- */
-static void
-ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
- struct ptp_system_timestamp *sts)
-{
- u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
-
- *ts = ns_to_timespec64(time_ns);
-}
-
-/**
* ice_ptp_write_init - Set PHC time to provided value
* @pf: Board private structure
* @ts: timespec structure that holds the new time value
@@ -1229,12 +1096,7 @@ static u64 ice_base_incval(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u64 incval;
- if (ice_is_e810(hw))
- incval = ICE_PTP_NOMINAL_INCVAL_E810;
- else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
- incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
- else
- incval = UNKNOWN_INCVAL_E82X;
+ incval = ice_get_base_incval(hw);
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
@@ -1248,8 +1110,8 @@ static u64 ice_base_incval(struct ice_pf *pf)
*/
static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
{
- int quad = port->port_num / ICE_PORTS_PER_QUAD;
int offs = port->port_num % ICE_PORTS_PER_QUAD;
+ int quad = ICE_GET_QUAD_NUM(port->port_num);
struct ice_pf *pf;
struct ice_hw *hw;
u32 val, phy_sts;
@@ -1362,15 +1224,25 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
- if (ice_is_e810(hw))
- return 0;
-
mutex_lock(&ptp_port->ps_lock);
- kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
+ break;
+ case ICE_MAC_GENERIC:
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- err = ice_stop_phy_timer_e82x(hw, port, true);
- if (err)
+ err = ice_stop_phy_timer_e82x(hw, port, true);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
+ default:
+ err = -ENODEV;
+ }
+ if (err && err != -EBUSY)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
@@ -1396,35 +1268,48 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
- if (ice_is_e810(hw))
- return 0;
-
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
- kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
+ break;
+ case ICE_MAC_GENERIC:
+ /* Start the PHY timer in Vernier mode */
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- /* temporarily disable Tx timestamps while calibrating PHY offset */
- spin_lock_irqsave(&ptp_port->tx.lock, flags);
- ptp_port->tx.calibrating = true;
- spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
- ptp_port->tx_fifo_busy_cnt = 0;
+ /* temporarily disable Tx timestamps while calibrating
+ * PHY offset
+ */
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
+ ptp_port->tx.calibrating = true;
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
+ ptp_port->tx_fifo_busy_cnt = 0;
- /* Start the PHY timer in Vernier mode */
- err = ice_start_phy_timer_e82x(hw, port);
- if (err)
- goto out_unlock;
+ /* Start the PHY timer in Vernier mode */
+ err = ice_start_phy_timer_e82x(hw, port);
+ if (err)
+ break;
- /* Enable Tx timestamps right away */
- spin_lock_irqsave(&ptp_port->tx.lock, flags);
- ptp_port->tx.calibrating = false;
- spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
+ /* Enable Tx timestamps right away */
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
+ ptp_port->tx.calibrating = false;
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
- kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
+ kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
+ 0);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
+ default:
+ err = -ENODEV;
+ }
-out_unlock:
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
port, err);
@@ -1437,10 +1322,9 @@ out_unlock:
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1448,21 +1332,22 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
- return;
-
ptp_port = &pf->ptp.port;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
- switch (hw->phy_model) {
- case ICE_PHY_E810:
- /* Do not reconfigure E810 PHY */
+ /* Skip HW writes if reset is in progress */
+ if (pf->hw.reset_ongoing)
+ return;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ /* Do not reconfigure E810 or E830 PHY */
return;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1476,42 +1361,61 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
- * Utility function to enable or disable Tx timestamp interrupt and threshold
+ * Utility function to configure all the PHY interrupt settings, including
+ * whether the PHY interrupt is enabled, and what threshold to use. Also
+ * configures The E82X timestamp owner to react to interrupts from all PHYs.
+ *
+ * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
+ * when failed to configure PHY interrupt for E82X
*/
static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- int err = 0;
- int quad;
- u32 val;
ice_ptp_reset_ts_memory(hw);
- for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
- err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
- &val);
- if (err)
- break;
-
- if (ena) {
- val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
- val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
- val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
- threshold);
- } else {
- val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return 0;
+ case ICE_MAC_GENERIC: {
+ int quad;
+
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
+ int err;
+
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ if (err) {
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
+ return err;
+ }
}
- err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
- val);
- if (err)
- break;
+ return 0;
}
+ case ICE_MAC_GENERIC_3K_E825: {
+ int port;
- if (err)
- dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
- err);
- return err;
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ if (err) {
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+ }
+ case ICE_MAC_UNKNOWN:
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -1531,10 +1435,10 @@ static void ice_ptp_restart_all_phy(struct ice_pf *pf)
{
struct list_head *entry;
- list_for_each(entry, &pf->ptp.ports_owner.ports) {
+ list_for_each(entry, &pf->adapter->ports.ports) {
struct ice_ptp_port *port = list_entry(entry,
struct ice_ptp_port,
- list_member);
+ list_node);
if (port->link_up)
ice_ptp_port_phy_restart(port);
@@ -1578,6 +1482,10 @@ void ice_ptp_extts_event(struct ice_pf *pf)
u8 chan, tmr_idx;
u32 hi, lo;
+ /* Don't process timestamp events if PTP is not ready */
+ if (pf->ptp.state != ICE_PTP_READY)
+ return;
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Event time is captured by one of the two matched registers
* GLTSYN_EVNT_L: 32 LSB of sampled time event
@@ -1585,45 +1493,62 @@ void ice_ptp_extts_event(struct ice_pf *pf)
* Event is defined in GLTSYN_EVNT_0 register
*/
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+ int pin_desc_idx;
+
/* Check if channel is enabled */
- if (pf->ptp.ext_ts_irq & (1 << chan)) {
- lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
- hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
- event.timestamp = (((u64)hi) << 32) | lo;
- event.type = PTP_CLOCK_EXTTS;
- event.index = chan;
-
- /* Fire event */
- ptp_clock_event(pf->ptp.clock, &event);
- pf->ptp.ext_ts_irq &= ~(1 << chan);
+ if (!(pf->ptp.ext_ts_irq & (1 << chan)))
+ continue;
+
+ lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+ hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+ event.timestamp = (u64)hi << 32 | lo;
+
+ /* Add delay compensation */
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx >= 0) {
+ const struct ice_ptp_pin_desc *desc;
+
+ desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
+ event.timestamp -= desc->delay[0];
}
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = chan;
+ pf->ptp.ext_ts_irq &= ~(1 << chan);
+ ptp_clock_event(pf->ptp.clock, &event);
}
}
/**
* ice_ptp_cfg_extts - Configure EXTTS pin and channel
* @pf: Board private structure
- * @ena: true to enable; false to disable
- * @chan: GPIO channel (0-3)
- * @gpio_pin: GPIO pin
- * @extts_flags: request flags from the ptp_extts_request.flags
+ * @rq: External timestamp request
+ * @on: Enable/disable flag
+ *
+ * Configure an external timestamp event on the requested channel.
+ *
+ * Return: 0 on success, negative error code otherwise
*/
-static int
-ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
- unsigned int extts_flags)
+static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
+ int on)
{
- u32 func, aux_reg, gpio_reg, irq_reg;
+ u32 aux_reg, gpio_reg, irq_reg;
struct ice_hw *hw = &pf->hw;
+ unsigned int chan, gpio_pin;
+ int pin_desc_idx;
u8 tmr_idx;
- if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
- return -EINVAL;
-
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ chan = rq->index;
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx < 0)
+ return -EIO;
+
+ gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
irq_reg = rd32(hw, PFINT_OICR_ENA);
- if (ena) {
+ if (on) {
/* Enable the interrupt */
irq_reg |= PFINT_OICR_TSYN_EVNT_M;
aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
@@ -1632,24 +1557,32 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
/* set event level to requested edge */
- if (extts_flags & PTP_FALLING_EDGE)
+ if (rq->flags & PTP_FALLING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
- if (extts_flags & PTP_RISING_EDGE)
+ if (rq->flags & PTP_RISING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
/* Write GPIO CTL reg.
* 0x1 is input sampled by EVENT register(channel)
* + num_in_channels * tmr_idx
*/
- func = 1 + chan + (tmr_idx * 3);
- gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
- pf->ptp.ext_ts_chan |= (1 << chan);
+ gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
+ 1 + chan + (tmr_idx * 3));
} else {
+ bool last_enabled = true;
+
/* clear the values we set to reset defaults */
aux_reg = 0;
gpio_reg = 0;
- pf->ptp.ext_ts_chan &= ~(1 << chan);
- if (!pf->ptp.ext_ts_chan)
+
+ for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
+ if ((pf->ptp.extts_rqs[i].flags &
+ PTP_ENABLE_FEATURE) &&
+ i != chan) {
+ last_enabled = false;
+ }
+
+ if (last_enabled)
irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
}
@@ -1661,253 +1594,282 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
}
/**
- * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
+ * ice_ptp_disable_all_extts - Disable all EXTTS channels
* @pf: Board private structure
- * @chan: GPIO channel (0-3)
- * @config: desired periodic clk configuration. NULL will disable channel
- * @store: If set to true the values will be stored
- *
- * Configure the internal clock generator modules to generate the clock wave of
- * specified period.
*/
-static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
- struct ice_perout_channel *config, bool store)
+static void ice_ptp_disable_all_extts(struct ice_pf *pf)
{
- u64 current_time, period, start_time, phase;
- struct ice_hw *hw = &pf->hw;
- u32 func, val, gpio_pin;
- u8 tmr_idx;
+ for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
+ if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
+ ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
+ false);
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ synchronize_irq(pf->oicr_irq.virq);
+}
- /* 0. Reset mode & out_en in AUX_OUT */
- wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
+/**
+ * ice_ptp_enable_all_extts - Enable all EXTTS channels
+ * @pf: Board private structure
+ *
+ * Called during reset to restore user configuration.
+ */
+static void ice_ptp_enable_all_extts(struct ice_pf *pf)
+{
+ for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
+ if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
+ ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
+ true);
+}
- /* If we're disabling the output, clear out CLKO and TGT and keep
- * output level low
- */
- if (!config || !config->ena) {
- wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
- wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
- wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
+/**
+ * ice_ptp_write_perout - Write periodic wave parameters to HW
+ * @hw: pointer to the HW struct
+ * @chan: target channel
+ * @gpio_pin: target GPIO pin
+ * @start: target time to start periodic output
+ * @period: target period
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
+ unsigned int gpio_pin, u64 start, u64 period)
+{
- val = GLGEN_GPIO_CTL_PIN_DIR_M;
- gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
- wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ u32 val = 0;
- /* Store the value if requested */
- if (store)
- memset(&pf->ptp.perout_channels[chan], 0,
- sizeof(struct ice_perout_channel));
+ /* 0. Reset mode & out_en in AUX_OUT */
+ wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- return 0;
- }
- period = config->period;
- start_time = config->start_time;
- div64_u64_rem(start_time, period, &phase);
- gpio_pin = config->gpio_pin;
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
+ int err;
- /* 1. Write clkout with half of required period value */
- if (period & 0x1) {
- dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
- goto err;
+ /* Enable/disable CGU 1PPS output for E825C */
+ err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
+ if (err)
+ return err;
}
+ /* 1. Write perout with half of required period value.
+ * HW toggles output when source clock hits the TGT and then adds
+ * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
+ */
period >>= 1;
- /* For proper operation, the GLTSYN_CLKO must be larger than clock tick
+ /* For proper operation, GLTSYN_CLKO must be larger than clock tick and
+ * period has to fit in 32 bit register.
*/
#define MIN_PULSE 3
- if (period <= MIN_PULSE || period > U32_MAX) {
- dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
- MIN_PULSE * 2);
- goto err;
+ if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
+ dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
+ MIN_PULSE);
+ return -EIO;
}
wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
- /* Allow time for programming before start_time is hit */
- current_time = ice_ptp_read_src_clk_reg(pf, NULL);
-
- /* if start time is in the past start the timer at the nearest second
- * maintaining phase
- */
- if (start_time < current_time)
- start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
- NSEC_PER_SEC) * NSEC_PER_SEC + phase;
-
- if (ice_is_e810(hw))
- start_time -= E810_OUT_PROP_DELAY_NS;
- else
- start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
-
/* 2. Write TARGET time */
- wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
- wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
+ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
+ wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
/* 3. Write AUX_OUT register */
- val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
+ if (!!period)
+ val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
/* 4. write GPIO CTL reg */
- func = 8 + chan + (tmr_idx * 4);
- val = GLGEN_GPIO_CTL_PIN_DIR_M |
- FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
+ val = GLGEN_GPIO_CTL_PIN_DIR_M;
+ if (!!period)
+ val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
+ 8 + chan + (tmr_idx * 4));
+
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+ ice_flush(hw);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_cfg_perout - Configure clock to generate periodic wave
+ * @pf: Board private structure
+ * @rq: Periodic output request
+ * @on: Enable/disable flag
+ *
+ * Configure the internal clock generator modules to generate the clock wave of
+ * specified period.
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
+ int on)
+{
+ unsigned int gpio_pin, prop_delay_ns;
+ u64 clk, period, start, phase;
+ struct ice_hw *hw = &pf->hw;
+ int pin_desc_idx;
+
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
+ if (pin_desc_idx < 0)
+ return -EIO;
+
+ gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
+ prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
+ period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
- /* Store the value if requested */
- if (store) {
- memcpy(&pf->ptp.perout_channels[chan], config,
- sizeof(struct ice_perout_channel));
- pf->ptp.perout_channels[chan].start_time = phase;
+ /* If we're disabling the output or period is 0, clear out CLKO and TGT
+ * and keep output level low.
+ */
+ if (!on || !period)
+ return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
+
+ if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
+ period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
+ dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
+ return -EOPNOTSUPP;
}
- return 0;
-err:
- dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
- return -EFAULT;
+ if (period & 0x1) {
+ dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
+ return -EIO;
+ }
+
+ start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
+
+ /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
+ if (rq->flags & PTP_PEROUT_PHASE)
+ phase = start;
+ else
+ div64_u64_rem(start, period, &phase);
+
+ /* If we have only phase or start time is in the past, start the timer
+ * at the next multiple of period, maintaining phase at least 0.5 second
+ * from now, so we have time to write it to HW.
+ */
+ clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
+ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
+ start = div64_u64(clk + period - 1, period) * period + phase;
+
+ /* Compensate for propagation delay from the generator to the pin. */
+ start -= prop_delay_ns;
+
+ return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
/**
- * ice_ptp_disable_all_clkout - Disable all currently configured outputs
- * @pf: pointer to the PF structure
+ * ice_ptp_disable_all_perout - Disable all currently configured outputs
+ * @pf: Board private structure
*
* Disable all currently configured clock outputs. This is necessary before
- * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
+ * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
* re-enable the clocks again.
*/
-static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
+static void ice_ptp_disable_all_perout(struct ice_pf *pf)
{
- uint i;
-
- for (i = 0; i < pf->ptp.info.n_per_out; i++)
- if (pf->ptp.perout_channels[i].ena)
- ice_ptp_cfg_clkout(pf, i, NULL, false);
+ for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
+ if (pf->ptp.perout_rqs[i].period.sec ||
+ pf->ptp.perout_rqs[i].period.nsec)
+ ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
+ false);
}
/**
- * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
- * @pf: pointer to the PF structure
+ * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
+ * @pf: Board private structure
*
* Enable all currently configured clock outputs. Use this after
- * ice_ptp_disable_all_clkout to reconfigure the output signals according to
+ * ice_ptp_disable_all_perout to reconfigure the output signals according to
* their configuration.
*/
-static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
+static void ice_ptp_enable_all_perout(struct ice_pf *pf)
{
- uint i;
-
- for (i = 0; i < pf->ptp.info.n_per_out; i++)
- if (pf->ptp.perout_channels[i].ena)
- ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
- false);
+ for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
+ if (pf->ptp.perout_rqs[i].period.sec ||
+ pf->ptp.perout_rqs[i].period.nsec)
+ ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
+ true);
}
/**
- * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
+ * ice_verify_pin - verify if pin supports requested pin function
* @info: the driver's PTP info structure
- * @rq: The requested feature to change
- * @on: Enable/disable flag
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Return: 0 on success, -EOPNOTSUPP when function is not supported.
*/
-static int
-ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
- struct ptp_clock_request *rq, int on)
+static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_perout_channel clk_cfg = {0};
- bool sma_pres = false;
- unsigned int chan;
- u32 gpio_pin;
- int err;
+ const struct ice_ptp_pin_desc *pin_desc;
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
- sma_pres = true;
+ pin_desc = &pf->ptp.ice_pin_desc[pin];
- switch (rq->type) {
- case PTP_CLK_REQ_PEROUT:
- chan = rq->perout.index;
- if (sma_pres) {
- if (chan == ice_pin_desc_e810t[SMA1].chan)
- clk_cfg.gpio_pin = GPIO_20;
- else if (chan == ice_pin_desc_e810t[SMA2].chan)
- clk_cfg.gpio_pin = GPIO_22;
- else
- return -1;
- } else if (ice_is_e810t(&pf->hw)) {
- if (chan == 0)
- clk_cfg.gpio_pin = GPIO_20;
- else
- clk_cfg.gpio_pin = GPIO_22;
- } else if (chan == PPS_CLK_GEN_CHAN) {
- clk_cfg.gpio_pin = PPS_PIN_INDEX;
- } else {
- clk_cfg.gpio_pin = chan;
- }
-
- clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
- rq->perout.period.nsec);
- clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
- rq->perout.start.nsec);
- clk_cfg.ena = !!on;
-
- err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
+ /* Is assigned function allowed? */
+ switch (func) {
+ case PTP_PF_EXTTS:
+ if (pin_desc->gpio[0] < 0)
+ return -EOPNOTSUPP;
break;
- case PTP_CLK_REQ_EXTTS:
- chan = rq->extts.index;
- if (sma_pres) {
- if (chan < ice_pin_desc_e810t[SMA2].chan)
- gpio_pin = GPIO_21;
- else
- gpio_pin = GPIO_23;
- } else if (ice_is_e810t(&pf->hw)) {
- if (chan == 0)
- gpio_pin = GPIO_21;
- else
- gpio_pin = GPIO_23;
- } else {
- gpio_pin = chan;
- }
-
- err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
- rq->extts.flags);
+ case PTP_PF_PEROUT:
+ if (pin_desc->gpio[1] < 0)
+ return -EOPNOTSUPP;
break;
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_PHYSYNC:
default:
return -EOPNOTSUPP;
}
- return err;
+ return 0;
}
/**
- * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
- * @info: the driver's PTP info structure
+ * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
+ * @info: The driver's PTP info structure
* @rq: The requested feature to change
* @on: Enable/disable flag
+ *
+ * Return: 0 on success, negative error code otherwise
*/
-static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
- struct ptp_clock_request *rq, int on)
+static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_perout_channel clk_cfg = {0};
int err;
switch (rq->type) {
- case PTP_CLK_REQ_PPS:
- clk_cfg.gpio_pin = PPS_PIN_INDEX;
- clk_cfg.period = NSEC_PER_SEC;
- clk_cfg.ena = !!on;
+ case PTP_CLK_REQ_PEROUT:
+ {
+ struct ptp_perout_request *cached =
+ &pf->ptp.perout_rqs[rq->perout.index];
- err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
- break;
+ err = ice_ptp_cfg_perout(pf, &rq->perout, on);
+ if (!err) {
+ *cached = rq->perout;
+ } else {
+ cached->period.sec = 0;
+ cached->period.nsec = 0;
+ }
+ return err;
+ }
case PTP_CLK_REQ_EXTTS:
- err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
- TIME_SYNC_PIN_INDEX, rq->extts.flags);
- break;
+ {
+ struct ptp_extts_request *cached =
+ &pf->ptp.extts_rqs[rq->extts.index];
+
+ err = ice_ptp_cfg_extts(pf, &rq->extts, on);
+ if (!err)
+ *cached = rq->extts;
+ else
+ cached->flags &= ~PTP_ENABLE_FEATURE;
+ return err;
+ }
default:
return -EOPNOTSUPP;
}
-
- return err;
}
/**
@@ -1925,16 +1887,10 @@ ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_hw *hw = &pf->hw;
-
- if (!ice_ptp_lock(hw)) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
- return -EBUSY;
- }
-
- ice_ptp_read_time(pf, ts, sts);
- ice_ptp_unlock(hw);
+ u64 time_ns;
+ time_ns = ice_ptp_read_src_clk_reg(pf, sts);
+ *ts = ns_to_timespec64(time_ns);
return 0;
}
@@ -1954,11 +1910,14 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
struct ice_hw *hw = &pf->hw;
int err;
- /* For Vernier mode, we need to recalibrate after new settime
- * Start with disabling timestamp block
+ /* For Vernier mode on E82X, we need to recalibrate after new settime.
+ * Start with marking timestamps as invalid.
*/
- if (pf->ptp.port.link_up)
- ice_ptp_port_phy_stop(&pf->ptp.port);
+ if (hw->mac_type == ICE_MAC_GENERIC) {
+ err = ice_ptp_clear_phy_offset_ready_e82x(hw);
+ if (err)
+ dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
+ }
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -1966,7 +1925,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
}
/* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
+ ice_ptp_disable_all_perout(pf);
err = ice_ptp_write_init(pf, &ts64);
ice_ptp_unlock(hw);
@@ -1975,10 +1934,10 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_reset_cached_phctime(pf);
/* Reenable periodic outputs */
- ice_ptp_enable_all_clkout(pf);
+ ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->phy_model == ICE_PHY_E82X)
+ if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2037,12 +1996,12 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
}
/* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
+ ice_ptp_disable_all_perout(pf);
err = ice_ptp_write_adj(pf, delta);
/* Reenable periodic outputs */
- ice_ptp_enable_all_clkout(pf);
+ ice_ptp_enable_all_perout(pf);
ice_ptp_unlock(hw);
@@ -2056,92 +2015,158 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+/**
+ * struct ice_crosststamp_cfg - Device cross timestamp configuration
+ * @lock_reg: The hardware semaphore lock to use
+ * @lock_busy: Bit in the semaphore lock indicating the lock is busy
+ * @ctl_reg: The hardware register to request cross timestamp
+ * @ctl_active: Bit in the control register to request cross timestamp
+ * @art_time_l: Lower 32-bits of ART system time
+ * @art_time_h: Upper 32-bits of ART system time
+ * @dev_time_l: Lower 32-bits of device time (per timer index)
+ * @dev_time_h: Upper 32-bits of device time (per timer index)
+ */
+struct ice_crosststamp_cfg {
+ /* HW semaphore lock register */
+ u32 lock_reg;
+ u32 lock_busy;
+
+ /* Capture control register */
+ u32 ctl_reg;
+ u32 ctl_active;
+
+ /* Time storage */
+ u32 art_time_l;
+ u32 art_time_h;
+ u32 dev_time_l[2];
+ u32 dev_time_h[2];
+};
+
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
+ .lock_reg = PFHH_SEM,
+ .lock_busy = PFHH_SEM_BUSY_M,
+ .ctl_reg = GLHH_ART_CTL,
+ .ctl_active = GLHH_ART_CTL_ACTIVE_M,
+ .art_time_l = GLHH_ART_TIME_L,
+ .art_time_h = GLHH_ART_TIME_H,
+ .dev_time_l[0] = GLTSYN_HHTIME_L(0),
+ .dev_time_h[0] = GLTSYN_HHTIME_H(0),
+ .dev_time_l[1] = GLTSYN_HHTIME_L(1),
+ .dev_time_h[1] = GLTSYN_HHTIME_H(1),
+};
+
#ifdef CONFIG_ICE_HWTS
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
+ .lock_reg = E830_PFPTM_SEM,
+ .lock_busy = E830_PFPTM_SEM_BUSY_M,
+ .ctl_reg = E830_GLPTM_ART_CTL,
+ .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
+ .art_time_l = E830_GLPTM_ART_TIME_L,
+ .art_time_h = E830_GLPTM_ART_TIME_H,
+ .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
+ .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
+ .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
+ .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
+};
+
+#endif /* CONFIG_ICE_HWTS */
+/**
+ * struct ice_crosststamp_ctx - Device cross timestamp context
+ * @snapshot: snapshot of system clocks for historic interpolation
+ * @pf: pointer to the PF private structure
+ * @cfg: pointer to hardware configuration for cross timestamp
+ */
+struct ice_crosststamp_ctx {
+ struct system_time_snapshot snapshot;
+ struct ice_pf *pf;
+ const struct ice_crosststamp_cfg *cfg;
+};
+
/**
- * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * ice_capture_crosststamp - Capture a device/system cross timestamp
* @device: Current device time
* @system: System counter value read synchronously with device time
- * @ctx: Context provided by timekeeping code
+ * @__ctx: Context passed from ice_ptp_getcrosststamp
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_get_syncdevicetime(ktime_t *device,
- struct system_counterval_t *system,
- void *ctx)
+static int ice_capture_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *__ctx)
{
- struct ice_pf *pf = (struct ice_pf *)ctx;
- struct ice_hw *hw = &pf->hw;
- u32 hh_lock, hh_art_ctl;
- int i;
+ struct ice_crosststamp_ctx *ctx = __ctx;
+ const struct ice_crosststamp_cfg *cfg;
+ u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+ u64 ts;
-#define MAX_HH_HW_LOCK_TRIES 5
-#define MAX_HH_CTL_LOCK_TRIES 100
+ cfg = ctx->cfg;
+ pf = ctx->pf;
+ hw = &pf->hw;
- for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- if (hh_lock & PFHH_SEM_BUSY_M) {
- usleep_range(10000, 15000);
- continue;
- }
- break;
- }
- if (hh_lock & PFHH_SEM_BUSY_M) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (tmr_idx > 1)
+ return -EINVAL;
+
+ /* Poll until we obtain the cross-timestamp hardware semaphore */
+ err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
+ !(lock & cfg->lock_busy),
+ 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
+ /* Snapshot system time for historic interpolation */
+ ktime_get_snapshot(&ctx->snapshot);
+
/* Program cmd to master timer */
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
/* Start the ART and device clock sync sequence */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
- wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-
- for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
- /* Wait for sync to complete */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
- udelay(1);
- continue;
- } else {
- u32 hh_ts_lo, hh_ts_hi, tmr_idx;
- u64 hh_ts;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- /* Read ART time */
- hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
- hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *system = convert_art_ns_to_tsc(hh_ts);
- /* Read Device source clock time */
- hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
- hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *device = ns_to_ktime(hh_ts);
- break;
- }
- }
+ ctl = rd32(hw, cfg->ctl_reg);
+ ctl |= cfg->ctl_active;
+ wr32(hw, cfg->ctl_reg, ctl);
+ /* Poll until hardware completes the capture */
+ err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
+ 5, 20 * USEC_PER_MSEC);
+ if (err)
+ goto err_timeout;
+
+ /* Read ART system time */
+ ts_lo = rd32(hw, cfg->art_time_l);
+ ts_hi = rd32(hw, cfg->art_time_h);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ system->cycles = ts;
+ system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
+
+ /* Read Device source clock time */
+ ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
+ ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ *device = ns_to_ktime(ts);
+
+err_timeout:
/* Clear the master timer */
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
/* Release HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
- wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
-
- if (i == MAX_HH_CTL_LOCK_TRIES)
- return -ETIMEDOUT;
+ lock = rd32(hw, cfg->lock_reg);
+ lock &= ~cfg->lock_busy;
+ wr32(hw, cfg->lock_reg, lock);
- return 0;
+ return err;
}
/**
- * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2149,41 +2174,55 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 and E823 devices which have support for
- * generating the cross timestamp via PCIe PTM.
- *
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
- struct system_device_crosststamp *cts)
+static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_crosststamp_ctx ctx = {
+ .pf = pf,
+ };
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ ctx.cfg = &ice_crosststamp_cfg_e82x;
+ break;
+#ifdef CONFIG_ICE_HWTS
+ case ICE_MAC_E830:
+ ctx.cfg = &ice_crosststamp_cfg_e830;
+ break;
+#endif /* CONFIG_ICE_HWTS */
+ default:
+ return -EOPNOTSUPP;
+ }
- return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
- pf, NULL, cts);
+ return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
+ &ctx.snapshot, cts);
}
-#endif /* CONFIG_ICE_HWTS */
/**
- * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
- * @pf: Board private structure
- * @ifr: ioctl data
+ * ice_ptp_hwtstamp_get - interface to read the timestamping config
+ * @netdev: Pointer to network interface device structure
+ * @config: Timestamping configuration structure
*
* Copy the timestamping config to user buffer
*/
-int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
- config = &pf->ptp.tstamp_config;
+ *config = pf->ptp.tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -2191,8 +2230,8 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* @pf: Board private structure
* @config: hwtstamp settings requested or saved
*/
-static int
-ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
+static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
+ struct kernel_hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -2236,32 +2275,31 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
}
/**
- * ice_ptp_set_ts_config - ioctl interface to control the timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * ice_ptp_hwtstamp_set - interface to control the timestamping
+ * @netdev: Pointer to network interface device structure
+ * @config: Timestamping configuration structure
+ * @extack: Netlink extended ack structure for error reporting
*
* Get the user config and store it
*/
-int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
int err;
if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = ice_ptp_set_timestamp_mode(pf, &config);
+ err = ice_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
/* Return the actual configuration set */
- config = pf->ptp.tstamp_config;
+ *config = pf->ptp.tstamp_config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -2299,20 +2337,41 @@ u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
}
/**
- * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
+ * @pf: Board private structure
+ */
+static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
+{
+ for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
+ const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
+ struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
+ const char *name;
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ name = ice_pin_names[desc->name_idx];
+ else
+ name = ice_pin_names_dpll[desc->name_idx];
+
+ strscpy(pin->name, name, sizeof(pin->name));
+
+ pin->index = i;
+ }
+
+ pf->ptp.info.pin_config = pf->ptp.pin_desc;
+}
+
+/**
+ * ice_ptp_disable_pins - Disable PTP pins
* @pf: pointer to the PF structure
- * @info: PTP clock info structure
*
- * Disable the OS access to the SMA pins. Called to clear out the OS
- * indications of pin support when we fail to setup the E810-T SMA control
- * register.
+ * Disable the OS access to the pins. Called to clear out the OS
+ * indications of pin support when we fail to setup pin array.
*/
-static void
-ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+static void ice_ptp_disable_pins(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
+ struct ptp_clock_info *info = &pf->ptp.info;
- dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+ dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
info->enable = NULL;
info->verify = NULL;
@@ -2322,126 +2381,162 @@ ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
* @pf: pointer to the PF structure
- * @info: PTP clock info structure
+ * @entries: SDP connection section from NVM
+ * @num_entries: number of valid entries in sdp_entries
+ * @pins: PTP pins array to update
*
- * Finish setting up the SMA pins by allocating pin_config, and setting it up
- * according to the current status of the SMA. On failure, disable all of the
- * extended SMA pin support.
+ * Return: 0 on success, negative error code otherwise.
*/
-static void
-ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
+ unsigned int num_entries,
+ struct ice_ptp_pin_desc *pins)
{
- struct device *dev = ice_pf_to_dev(pf);
- int err;
+ unsigned int n_pins = 0;
+ unsigned int i;
- /* Allocate memory for kernel pins interface */
- info->pin_config = devm_kcalloc(dev, info->n_pins,
- sizeof(*info->pin_config), GFP_KERNEL);
- if (!info->pin_config) {
- ice_ptp_disable_sma_pins_e810t(pf, info);
- return;
+ /* Setup ice_pin_desc array */
+ for (i = 0; i < ICE_N_PINS_MAX; i++) {
+ pins[i].name_idx = -1;
+ pins[i].gpio[0] = -1;
+ pins[i].gpio[1] = -1;
}
- /* Read current SMA status */
- err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
- if (err)
- ice_ptp_disable_sma_pins_e810t(pf, info);
-}
+ for (i = 0; i < num_entries; i++) {
+ u16 entry = le16_to_cpu(entries[i]);
+ DECLARE_BITMAP(bitmap, GPIO_NA);
+ unsigned int idx;
+ bool dir;
+ u16 gpio;
-/**
- * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
- * @pf: pointer to the PF instance
- * @info: PTP clock capabilities
- */
-static void
-ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
-{
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- info->n_ext_ts = N_EXT_TS_E810;
- info->n_per_out = N_PER_OUT_E810T;
- info->n_pins = NUM_PTP_PINS_E810T;
- info->verify = ice_verify_pin_e810t;
-
- /* Complete setup of the SMA pins */
- ice_ptp_setup_sma_pins_e810t(pf, info);
- } else if (ice_is_e810t(&pf->hw)) {
- info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
- info->n_per_out = N_PER_OUT_NO_SMA_E810T;
- } else {
- info->n_per_out = N_PER_OUT_E810;
- info->n_ext_ts = N_EXT_TS_E810;
+ *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
+
+ /* Check if entry's pin bitmap is valid. */
+ if (bitmap_empty(bitmap, GPIO_NA))
+ continue;
+
+ dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
+ gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
+
+ for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
+ if (pins[idx].name_idx == gpio)
+ break;
+ }
+
+ if (idx == ICE_N_PINS_MAX) {
+ /* Pin not found, setup its entry and name */
+ idx = n_pins++;
+ pins[idx].name_idx = gpio;
+ }
+ pins[idx].gpio[dir] = gpio;
}
-}
-/**
- * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
- * @pf: pointer to the PF instance
- * @info: PTP clock capabilities
- */
-static void
-ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
-{
- info->pps = 1;
- info->n_per_out = 0;
- info->n_ext_ts = 1;
+ for (i = 0; i < n_pins; i++) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
+ i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
+ }
+
+ pf->ptp.info.n_pins = n_pins;
+ return 0;
}
/**
- * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
+ * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
* @pf: Board private structure
- * @info: PTP info to fill
*
- * Assign functions to the PTP capabiltiies structure for E82x devices.
+ * Assign functions to the PTP capabilities structure for E82X devices.
* Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
* devices.
*/
-static void
-ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
+static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
-#ifdef CONFIG_ICE_HWTS
- if (boot_cpu_has(X86_FEATURE_ART) &&
- boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
-#endif /* CONFIG_ICE_HWTS */
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
+
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
+ pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
+ } else {
+ pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
+ }
+ ice_ptp_setup_pin_cfg(pf);
}
/**
* ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
* @pf: Board private structure
- * @info: PTP info to fill
*
* Assign functions to the PTP capabiltiies structure for E810 devices.
* Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for e810
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E810
* devices.
*/
-static void
-ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
+static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
{
- info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(pf, info);
+ __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
+ struct ice_ptp_pin_desc *desc = NULL;
+ struct ice_ptp *ptp = &pf->ptp;
+ unsigned int num_entries;
+ int err;
+
+ err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
+ if (err) {
+ /* SDP section does not exist in NVM or is corrupted */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ ptp->ice_pin_desc = ice_pin_desc_dpll;
+ ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
+ } else {
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
+ }
+ err = 0;
+ } else {
+ desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
+ sizeof(struct ice_ptp_pin_desc),
+ GFP_KERNEL);
+ if (!desc)
+ goto err;
+
+ err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
+ if (err)
+ goto err;
+
+ ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
+ }
+
+ ptp->info.pin_config = ptp->pin_desc;
+ ice_ptp_setup_pin_cfg(pf);
+
+err:
+ if (err) {
+ devm_kfree(ice_pf_to_dev(pf), desc);
+ ice_ptp_disable_pins(pf);
+ }
}
/**
- * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
+ * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
* @pf: Board private structure
- * @info: PTP info to fill
*
- * Assign functions to the PTP capabiltiies structure for E823 devices.
+ * Assign functions to the PTP capabiltiies structure for E830 devices.
* Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for e823
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E830
* devices.
*/
-static void
-ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
+static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
{
- ice_ptp_set_funcs_e82x(pf, info);
+#ifdef CONFIG_ICE_HWTS
+ if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
- info->enable = ice_ptp_gpio_enable_e823;
- ice_ptp_setup_pins_e823(pf, info);
+#endif /* CONFIG_ICE_HWTS */
+ /* Rest of the config is the same as base E810 */
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
+ ice_ptp_setup_pin_cfg(pf);
}
/**
@@ -2461,13 +2556,30 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->adjfine = ice_ptp_adjfine;
info->gettimex64 = ice_ptp_gettimex64;
info->settime64 = ice_ptp_settime64;
-
- if (ice_is_e810(&pf->hw))
- ice_ptp_set_funcs_e810(pf, info);
- else if (ice_is_e823(&pf->hw))
- ice_ptp_set_funcs_e823(pf, info);
- else
- ice_ptp_set_funcs_e82x(pf, info);
+ info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
+ info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
+ info->enable = ice_ptp_gpio_enable;
+ info->verify = ice_verify_pin;
+
+ info->supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ info->supported_perout_flags = PTP_PEROUT_PHASE;
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_E810:
+ ice_ptp_set_funcs_e810(pf);
+ return;
+ case ICE_MAC_E830:
+ ice_ptp_set_funcs_e830(pf);
+ return;
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_set_funcs_e82x(pf);
+ return;
+ default:
+ return;
+ }
}
/**
@@ -2578,6 +2690,68 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
/**
+ * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
+ * @pf: Board private structure
+ *
+ * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
+ * half of the interrupt and IRQ_HANDLED otherwise.
+ */
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ /* E810 capable of low latency timestamping with interrupt can
+ * request a single timestamp in the top half and wait for
+ * a second LL TS interrupt from the FW when it's ready.
+ */
+ if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ u8 idx, last;
+
+ if (!ice_pf_state_is_nominal(pf))
+ return IRQ_HANDLED;
+
+ spin_lock(&tx->lock);
+ if (tx->init) {
+ last = tx->last_ll_ts_idx_read + 1;
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ last);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ }
+ spin_unlock(&tx->lock);
+
+ return IRQ_HANDLED;
+ }
+ fallthrough; /* non-LL_TS E810 */
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ /* All other devices process timestamps in the bottom half due
+ * to sleeping or polling.
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf))
+ return IRQ_HANDLED;
+
+ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ return IRQ_WAKE_THREAD;
+ case ICE_MAC_E830:
+ /* E830 can read timestamps in the top half using rd32() */
+ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ /* Process outstanding Tx timestamps. If there
+ * is more work, re-arm the interrupt to trigger again.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+ return IRQ_HANDLED;
+ default:
+ return IRQ_HANDLED;
+ }
+}
+
+/**
* ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
* @pf: Board private structure
*
@@ -2597,13 +2771,13 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
- if (ice_is_e810(hw))
+ if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
return;
- for (i = 0; i < ICE_MAX_QUAD; i++) {
+ for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
@@ -2644,6 +2818,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
+ * @pf: Board private structure
+ * @rebuild: rebuild if true, prepare if false
+ * @reset_type: the reset type being performed
+ */
+static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
+ enum ice_reset_req reset_type)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->adapter->ports.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_node);
+ struct ice_pf *peer_pf = ptp_port_to_pf(port);
+
+ if (!ice_is_primary(&peer_pf->hw)) {
+ if (rebuild)
+ ice_ptp_rebuild(peer_pf, reset_type);
+ else
+ ice_ptp_prepare_for_reset(peer_pf, reset_type);
+ }
+ }
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
* @reset_type: the reset type being performed
@@ -2651,6 +2851,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
u8 src_tmr;
if (ptp->state != ICE_PTP_READY)
@@ -2666,10 +2867,13 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (reset_type == ICE_RESET_PFR)
return;
+ if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
+
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
/* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
+ ice_ptp_disable_all_perout(pf);
src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
@@ -2699,6 +2903,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
if (err)
return err;
+ err = ice_tspll_init(hw);
+ if (err)
+ return err;
+
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -2707,10 +2915,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
- if (err) {
- ice_ptp_unlock(hw);
- return err;
- }
+ if (err)
+ goto err_unlock;
/* Write the initial Time value to PHY and LAN using the cached PHC
* time before the reset and time difference between stopping and
@@ -2723,10 +2929,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
ts = ktime_to_timespec64(ktime_get_real());
}
err = ice_ptp_write_init(pf, &ts);
- if (err) {
- ice_ptp_unlock(hw);
- return err;
- }
+ if (err)
+ goto err_unlock;
/* Release the global hardware lock */
ice_ptp_unlock(hw);
@@ -2736,16 +2940,22 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- return err;
+ /* Enable quad interrupts */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ return err;
- ice_ptp_restart_all_phy(pf);
- }
+ ice_ptp_restart_all_phy(pf);
+
+ /* Re-enable all periodic outputs and external timestamp events */
+ ice_ptp_enable_all_perout(pf);
+ ice_ptp_enable_all_extts(pf);
return 0;
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
}
/**
@@ -2785,187 +2995,43 @@ err:
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
-/**
- * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
- * @aux_dev: auxiliary device to get the auxiliary PF for
- */
-static struct ice_pf *
-ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
-{
- struct ice_ptp_port *aux_port;
- struct ice_ptp *aux_ptp;
-
- aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
- aux_ptp = container_of(aux_port, struct ice_ptp, port);
-
- return container_of(aux_ptp, struct ice_pf, ptp);
-}
-
-/**
- * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
- * @aux_dev: auxiliary device to get the PF for
- */
-static struct ice_pf *
-ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
-{
- struct ice_ptp_port_owner *ports_owner;
- struct auxiliary_driver *aux_drv;
- struct ice_ptp *owner_ptp;
-
- if (!aux_dev->dev.driver)
- return NULL;
-
- aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
- ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
- aux_driver);
- owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
- return container_of(owner_ptp, struct ice_pf, ptp);
-}
-
-/**
- * ice_ptp_auxbus_probe - Probe auxiliary devices
- * @aux_dev: PF's auxiliary device
- * @id: Auxiliary device ID
- */
-static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
- const struct auxiliary_device_id *id)
+static int ice_ptp_setup_adapter(struct ice_pf *pf)
{
- struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
- struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+ if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
+ return -EPERM;
- if (WARN_ON(!owner_pf))
- return -ENODEV;
-
- INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
- mutex_lock(&owner_pf->ptp.ports_owner.lock);
- list_add(&aux_pf->ptp.port.list_member,
- &owner_pf->ptp.ports_owner.ports);
- mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+ pf->adapter->ctrl_pf = pf;
return 0;
}
-/**
- * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
- * @aux_dev: PF's auxiliary device
- */
-static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
+static int ice_ptp_setup_pf(struct ice_pf *pf)
{
- struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
- struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+ struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
+ struct ice_ptp *ptp = &pf->ptp;
- mutex_lock(&owner_pf->ptp.ports_owner.lock);
- list_del(&aux_pf->ptp.port.list_member);
- mutex_unlock(&owner_pf->ptp.ports_owner.lock);
-}
+ if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
+ return -ENODEV;
-/**
- * ice_ptp_auxbus_shutdown
- * @aux_dev: PF's auxiliary device
- */
-static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
-{
- /* Doing nothing here, but handle to auxbus driver must be satisfied */
-}
+ INIT_LIST_HEAD(&ptp->port.list_node);
+ mutex_lock(&pf->adapter->ports.lock);
-/**
- * ice_ptp_auxbus_suspend
- * @aux_dev: PF's auxiliary device
- * @state: power management state indicator
- */
-static int
-ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
-{
- /* Doing nothing here, but handle to auxbus driver must be satisfied */
- return 0;
-}
+ list_add(&ptp->port.list_node,
+ &pf->adapter->ports.ports);
+ mutex_unlock(&pf->adapter->ports.lock);
-/**
- * ice_ptp_auxbus_resume
- * @aux_dev: PF's auxiliary device
- */
-static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
-{
- /* Doing nothing here, but handle to auxbus driver must be satisfied */
return 0;
}
-/**
- * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
- * @pf: Board private structure
- * @name: auxiliary bus driver name
- */
-static struct auxiliary_device_id *
-ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
+static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
- struct auxiliary_device_id *ids;
-
- /* Second id left empty to terminate the array */
- ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
- sizeof(struct auxiliary_device_id), GFP_KERNEL);
- if (!ids)
- return NULL;
-
- snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
-
- return ids;
-}
-
-/**
- * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
- * @pf: Board private structure
- */
-static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
-{
- struct auxiliary_driver *aux_driver;
- struct ice_ptp *ptp;
- struct device *dev;
- char *name;
- int err;
-
- ptp = &pf->ptp;
- dev = ice_pf_to_dev(pf);
- aux_driver = &ptp->ports_owner.aux_driver;
- INIT_LIST_HEAD(&ptp->ports_owner.ports);
- mutex_init(&ptp->ports_owner.lock);
- name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
- pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
- ice_get_ptp_src_clock_index(&pf->hw));
- if (!name)
- return -ENOMEM;
-
- aux_driver->name = name;
- aux_driver->shutdown = ice_ptp_auxbus_shutdown;
- aux_driver->suspend = ice_ptp_auxbus_suspend;
- aux_driver->remove = ice_ptp_auxbus_remove;
- aux_driver->resume = ice_ptp_auxbus_resume;
- aux_driver->probe = ice_ptp_auxbus_probe;
- aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
- if (!aux_driver->id_table)
- return -ENOMEM;
+ struct ice_ptp *ptp = &pf->ptp;
- err = auxiliary_driver_register(aux_driver);
- if (err) {
- devm_kfree(dev, aux_driver->id_table);
- dev_err(dev, "Failed registering aux_driver, name <%s>\n",
- name);
+ if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
+ mutex_lock(&pf->adapter->ports.lock);
+ list_del(&ptp->port.list_node);
+ mutex_unlock(&pf->adapter->ports.lock);
}
-
- return err;
-}
-
-/**
- * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
- * @pf: Board private structure
- */
-static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
-{
- struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
-
- auxiliary_driver_unregister(aux_driver);
- devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
-
- mutex_destroy(&pf->ptp.ports_owner.lock);
}
/**
@@ -2977,15 +3043,12 @@ static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
*/
int ice_ptp_clock_index(struct ice_pf *pf)
{
- struct auxiliary_device *aux_dev;
- struct ice_pf *owner_pf;
+ struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ptp_clock *clock;
- aux_dev = &pf->ptp.port.aux_dev;
- owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
- if (!owner_pf)
+ if (!ctrl_ptp)
return -1;
- clock = owner_pf->ptp.clock;
+ clock = ctrl_ptp->clock;
return clock ? ptp_clock_index(clock) : -1;
}
@@ -3011,6 +3074,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
return err;
}
+ err = ice_tspll_init(hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
+ err);
+ return err;
+ }
+
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -3019,47 +3089,37 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
- if (err) {
- ice_ptp_unlock(hw);
- goto err_exit;
- }
+ if (err)
+ goto err_unlock;
ts = ktime_to_timespec64(ktime_get_real());
/* Write the initial Time value to PHY and LAN */
err = ice_ptp_write_init(pf, &ts);
- if (err) {
- ice_ptp_unlock(hw);
- goto err_exit;
- }
+ if (err)
+ goto err_unlock;
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- goto err_exit;
- }
+ /* Configure PHY interrupt settings */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ goto err_exit;
/* Ensure we have a clock device */
err = ice_ptp_create_clock(pf);
if (err)
goto err_clk;
- err = ice_ptp_register_auxbus_driver(pf);
- if (err) {
- dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
- goto err_aux;
- }
-
return 0;
-err_aux:
- ptp_clock_unregister(pf->ptp.clock);
err_clk:
pf->ptp.clock = NULL;
err_exit:
return err;
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
}
/**
@@ -3077,7 +3137,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
/* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock.
*/
- kworker = kthread_create_worker(0, "ice-ptp-%s",
+ kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -3094,6 +3154,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
* ice_ptp_init_port - Initialize PTP port structure
* @pf: Board private structure
* @ptp_port: PTP port structure
+ *
+ * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
@@ -3101,13 +3163,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (hw->phy_model) {
- case ICE_PHY_E810:
- return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
+ case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
-
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
@@ -3116,76 +3179,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
}
/**
- * ice_ptp_release_auxbus_device
- * @dev: device that utilizes the auxbus
- */
-static void ice_ptp_release_auxbus_device(struct device *dev)
-{
- /* Doing nothing here, but handle to auxbux device must be satisfied */
-}
-
-/**
- * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
- * @pf: Board private structure
- */
-static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
-{
- struct auxiliary_device *aux_dev;
- struct ice_ptp *ptp;
- struct device *dev;
- char *name;
- int err;
- u32 id;
-
- ptp = &pf->ptp;
- id = ptp->port.port_num;
- dev = ice_pf_to_dev(pf);
-
- aux_dev = &ptp->port.aux_dev;
-
- name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
- pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
- ice_get_ptp_src_clock_index(&pf->hw));
- if (!name)
- return -ENOMEM;
-
- aux_dev->name = name;
- aux_dev->id = id;
- aux_dev->dev.release = ice_ptp_release_auxbus_device;
- aux_dev->dev.parent = dev;
-
- err = auxiliary_device_init(aux_dev);
- if (err)
- goto aux_err;
-
- err = auxiliary_device_add(aux_dev);
- if (err) {
- auxiliary_device_uninit(aux_dev);
- goto aux_err;
- }
-
- return 0;
-aux_err:
- dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
- devm_kfree(dev, name);
- return err;
-}
-
-/**
- * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
- * @pf: Board private structure
- */
-static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
-{
- struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
-
- auxiliary_device_delete(aux_dev);
- auxiliary_device_uninit(aux_dev);
-
- memset(aux_dev, 0, sizeof(*aux_dev));
-}
-
-/**
* ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
* @pf: Board private structure
*
@@ -3196,8 +3189,8 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (pf->hw.phy_model) {
- case ICE_PHY_E82X:
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
@@ -3232,23 +3225,35 @@ void ice_ptp_init(struct ice_pf *pf)
ptp->state = ICE_PTP_INITIALIZING;
- ice_ptp_init_phy_model(hw);
+ if (hw->lane_num < 0) {
+ err = hw->lane_num;
+ goto err_exit;
+ }
+ ptp->port.port_num = hw->lane_num;
+
+ ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
/* If this function owns the clock hardware, it must allocate and
* configure the PTP clock device to represent it.
*/
- if (ice_pf_src_tmr_owned(pf)) {
+ if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
+ err = ice_ptp_setup_adapter(pf);
+ if (err)
+ goto err_exit;
err = ice_ptp_init_owner(pf);
if (err)
- goto err;
+ goto err_exit;
}
- ptp->port.port_num = hw->pf_id;
+ err = ice_ptp_setup_pf(pf);
+ if (err)
+ goto err_exit;
+
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
- goto err;
+ goto err_clean_pf;
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
@@ -3256,26 +3261,28 @@ void ice_ptp_init(struct ice_pf *pf)
/* Configure initial Tx interrupt settings */
ice_ptp_cfg_tx_interrupt(pf);
- err = ice_ptp_create_auxbus_device(pf);
- if (err)
- goto err;
-
ptp->state = ICE_PTP_READY;
err = ice_ptp_init_work(pf, ptp);
if (err)
- goto err;
+ goto err_exit;
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
-err:
+err_clean_pf:
+ mutex_destroy(&ptp->port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
+err_exit:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- ptp->state = ICE_PTP_ERROR;
+ /* Keep ICE_PTP_UNINIT state to avoid ambiguity at driver unload
+ * and to avoid duplicated resources release.
+ */
+ ptp->state = ICE_PTP_UNINIT;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3288,18 +3295,30 @@ err:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (pf->ptp.state != ICE_PTP_READY)
+ if (pf->ptp.state == ICE_PTP_UNINIT)
+ return;
+
+ if (pf->ptp.state != ICE_PTP_READY) {
+ mutex_destroy(&pf->ptp.port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
+ if (pf->ptp.clock) {
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+ }
return;
+ }
pf->ptp.state = ICE_PTP_UNINIT;
/* Disable timestamping for both Tx and Rx */
ice_ptp_disable_timestamp_mode(pf);
- ice_ptp_remove_auxbus_device(pf);
+ ice_ptp_cleanup_pf(pf);
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+ ice_ptp_disable_all_extts(pf);
+
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
@@ -3309,14 +3328,11 @@ void ice_ptp_release(struct ice_pf *pf)
pf->ptp.kworker = NULL;
}
- if (ice_pf_src_tmr_owned(pf))
- ice_ptp_unregister_auxbus_driver(pf);
-
if (!pf->ptp.clock)
return;
/* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
+ ice_ptp_disable_all_perout(pf);
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 3af20025043a..27016aac4f1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -9,30 +9,6 @@
#include "ice_ptp_hw.h"
-enum ice_ptp_pin_e810 {
- GPIO_20 = 0,
- GPIO_21,
- GPIO_22,
- GPIO_23,
- NUM_PTP_PIN_E810
-};
-
-enum ice_ptp_pin_e810t {
- GNSS = 0,
- SMA1,
- UFL1,
- SMA2,
- UFL2,
- NUM_PTP_PINS_E810T
-};
-
-struct ice_perout_channel {
- bool ena;
- u32 gpio_pin;
- u64 period;
- u64 start_time;
-};
-
/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
* is stored in a buffer of registers. Depending on the specific hardware,
* this buffer might be shared across multiple PHY ports.
@@ -152,7 +128,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
-#define INDEX_PER_PORT_E810 64
+#define INDEX_PER_PORT 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -161,9 +137,8 @@ struct ice_ptp_tx {
* ready for PTP functionality. It is used to track the port initialization
* and determine when the port's PHY offset is valid.
*
- * @list_member: list member structure of auxiliary device
+ * @list_node: list member structure
* @tx: Tx timestamp tracking for this port
- * @aux_dev: auxiliary device associated with this port
* @ov_work: delayed work task for tracking when PHY offset is valid
* @ps_lock: mutex used to protect the overall PTP PHY start procedure
* @link_up: indicates whether the link is up
@@ -171,9 +146,8 @@ struct ice_ptp_tx {
* @port_num: the port number this structure represents
*/
struct ice_ptp_port {
- struct list_head list_member;
+ struct list_head list_node;
struct ice_ptp_tx tx;
- struct auxiliary_device aux_dev;
struct kthread_delayed_work ov_work;
struct mutex ps_lock; /* protects overall PTP PHY start procedure */
bool link_up;
@@ -187,22 +161,6 @@ enum ice_ptp_tx_interrupt {
ICE_PTP_TX_INTERRUPT_ALL,
};
-/**
- * struct ice_ptp_port_owner - data used to handle the PTP clock owner info
- *
- * This structure contains data necessary for the PTP clock owner to correctly
- * handle the timestamping feature for all attached ports.
- *
- * @aux_driver: the structure carring the auxiliary driver information
- * @ports: list of porst handled by this port owner
- * @lock: protect access to ports list
- */
-struct ice_ptp_port_owner {
- struct auxiliary_driver aux_driver;
- struct list_head ports;
- struct mutex lock;
-};
-
#define GLTSYN_TGT_H_IDX_MAX 4
enum ice_ptp_state {
@@ -213,23 +171,73 @@ enum ice_ptp_state {
ICE_PTP_ERROR,
};
+enum ice_ptp_pin {
+ SDP0 = 0,
+ SDP1,
+ SDP2,
+ SDP3,
+ TIME_SYNC,
+ ONE_PPS
+};
+
+enum ice_ptp_pin_nvm {
+ GNSS = 0,
+ SMA1,
+ UFL1,
+ SMA2,
+ UFL2,
+ NUM_PTP_PINS_NVM,
+ GPIO_NA = 9
+};
+
+/* Per-channel register definitions */
+#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
+#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
+#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
+#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H_IDX_MAX 3
+
+/* Pin definitions for PTP */
+#define ICE_N_PINS_MAX 6
+
+/**
+ * struct ice_ptp_pin_desc - hardware pin description data
+ * @name_idx: index of the name of pin in ice_pin_names
+ * @gpio: the associated GPIO input and output pins
+ * @delay: input and output signal delays in nanoseconds
+ *
+ * Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
+ * for the device. Device families have separate sets of available pins with
+ * varying restrictions.
+ */
+struct ice_ptp_pin_desc {
+ int name_idx;
+ int gpio[2];
+ unsigned int delay[2];
+};
+
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
* @state: current state of PTP state machine
* @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
- * @ports_owner: data for the auxiliary driver owner
* @work: delayed work function for periodic tasks
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
* @cached_phc_jiffies: jiffies when cached_phc_time was last updated
- * @ext_ts_chan: the external timestamp channel in use
- * @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
- * @perout_channels: periodic output data
+ * @ext_ts_irq: the external timestamp IRQ in use
+ * @pin_desc: structure defining pins
+ * @ice_pin_desc: internal structure describing pin relations
+ * @perout_rqs: cached periodic output requests
+ * @extts_rqs: cached external timestamp requests
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
* @reset_time: kernel time after clock stop on reset
+ * @tx_hwtstamp_good: number of completed Tx timestamp requests
* @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
* @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
* @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
@@ -241,18 +249,20 @@ struct ice_ptp {
enum ice_ptp_state state;
enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
- struct ice_ptp_port_owner ports_owner;
struct kthread_delayed_work work;
u64 cached_phc_time;
unsigned long cached_phc_jiffies;
- u8 ext_ts_chan;
- u8 ext_ts_irq;
struct kthread_worker *kworker;
- struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
+ u8 ext_ts_irq;
+ struct ptp_pin_desc pin_desc[ICE_N_PINS_MAX];
+ const struct ice_ptp_pin_desc *ice_pin_desc;
+ struct ptp_perout_request perout_rqs[GLTSYN_TGT_H_IDX_MAX];
+ struct ptp_extts_request extts_rqs[GLTSYN_EVNT_H_IDX_MAX];
struct ptp_clock_info info;
struct ptp_clock *clock;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
u64 reset_time;
+ u64 tx_hwtstamp_good;
u32 tx_hwtstamp_skipped;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_flushed;
@@ -279,33 +289,15 @@ struct ice_ptp {
#define FIFO_EMPTY BIT(2)
#define FIFO_OK 0xFF
#define ICE_PTP_FIFO_NUM_CHECKS 5
-/* Per-channel register definitions */
-#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
-#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
-#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
-#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
-#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
-#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16))
-#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16))
-#define GLTSYN_EVNT_H_IDX_MAX 3
-
-/* Pin definitions for PTP PPS out */
-#define PPS_CLK_GEN_CHAN 3
-#define PPS_CLK_SRC_CHAN 2
-#define PPS_PIN_INDEX 5
-#define TIME_SYNC_PIN_INDEX 4
-#define N_EXT_TS_E810 3
-#define N_PER_OUT_E810 4
-#define N_PER_OUT_E810T 3
-#define N_PER_OUT_NO_SMA_E810T 2
-#define N_EXT_TS_NO_SMA_E810T 2
-#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
-int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
@@ -313,6 +305,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
@@ -321,14 +316,18 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
-static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+
+static inline int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
-static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+static inline int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -351,6 +350,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
return true;
}
+static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ return IRQ_HANDLED;
+}
+
+static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
static inline u64
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
@@ -369,7 +379,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 2c4dab0c48ab..19dddd9b53dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -9,9 +9,279 @@
*/
/* Constants defined for the PTP 1588 clock hardware. */
+const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
+ [ETH56G_PHY_REG_PTP] = {
+ .base_addr = 0x092000,
+ .step = 0x98,
+ },
+ [ETH56G_PHY_MEM_PTP] = {
+ .base_addr = 0x093000,
+ .step = 0x200,
+ },
+ [ETH56G_PHY_REG_XPCS] = {
+ .base_addr = 0x000000,
+ .step = 0x21000,
+ },
+ [ETH56G_PHY_REG_MAC] = {
+ .base_addr = 0x085000,
+ .step = 0x1000,
+ },
+ [ETH56G_PHY_REG_GPCS] = {
+ .base_addr = 0x084000,
+ .step = 0x400,
+ },
+};
+
+const
+struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
+ [ICE_ETH56G_LNK_SPD_1G] = {
+ .tx_mode = { .def = 6, },
+ .rx_mode = { .def = 6, },
+ .blks_per_clk = 1,
+ .blktime = 0x4000, /* 32 */
+ .tx_offset = {
+ .serdes = 0x6666, /* 51.2 */
+ .no_fec = 0xd066, /* 104.2 */
+ .sfd = 0x3000, /* 24 */
+ .onestep = 0x30000 /* 384 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffc59a, /* -29.2 */
+ .no_fec = 0xffff0a80, /* -122.75 */
+ .sfd = 0x2c00, /* 22 */
+ .bs_ds = 0x19a /* 0.8 */
+ /* Dynamic bitslip 0 equals to 10 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_2_5G] = {
+ .tx_mode = { .def = 6, },
+ .rx_mode = { .def = 6, },
+ .blks_per_clk = 1,
+ .blktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0x28f6, /* 20.48 */
+ .no_fec = 0x53b8, /* 41.86 */
+ .sfd = 0x1333, /* 9.6 */
+ .onestep = 0x13333 /* 153.6 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffe8a4, /* -11.68 */
+ .no_fec = 0xffff9a76, /* -50.77 */
+ .sfd = 0xf33, /* 7.6 */
+ .bs_ds = 0xa4 /* 0.32 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_10G] = {
+ .tx_mode = { .def = 1, },
+ .rx_mode = { .def = 1, },
+ .blks_per_clk = 1,
+ .blktime = 0x666, /* 3.2 */
+ .tx_offset = {
+ .serdes = 0x234c, /* 17.6484848 */
+ .no_fec = 0x8e80, /* 71.25 */
+ .fc = 0xb4a4, /* 90.32 */
+ .sfd = 0x4a4, /* 2.32 */
+ .onestep = 0x4ccd /* 38.4 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffeb27, /* -10.42424 */
+ .no_fec = 0xffffcccd, /* -25.6 */
+ .fc = 0xfffc557b, /* -469.26 */
+ .sfd = 0x4a4, /* 2.32 */
+ .bs_ds = 0x32 /* 0.0969697 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_25G] = {
+ .tx_mode = {
+ .def = 1,
+ .rs = 4
+ },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 1,
+ .rs = 4
+ },
+ .rx_mk_dly = {
+ .def = 1,
+ .rs = 1
+ },
+ .rx_cw_dly = {
+ .def = 1,
+ .rs = 1
+ },
+ .blks_per_clk = 1,
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0x147b, /* 10.24, only if RS-FEC enabled */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x3857, /* 28.17 */
+ .fc = 0x48c3, /* 36.38 */
+ .rs = 0x8100, /* 64.5 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0x1eb8 /* 15.36 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xffffe71a, /* -12.45 */
+ .fc = 0xfffe894d, /* -187.35 */
+ .rs = 0xfffff8cd, /* -3.6 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0x14 /* 0.0387879, RS-FEC 0 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_40G] = {
+ .tx_mode = { .def = 3 },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 4 },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x333, /* 1.6 */
+ .mktime = 0xccd, /* 6.4 */
+ .tx_offset = {
+ .serdes = 0x234c, /* 17.6484848 */
+ .no_fec = 0x5a8a, /* 45.27 */
+ .fc = 0x81b8, /* 64.86 */
+ .sfd = 0x4a4, /* 2.32 */
+ .onestep = 0x1333 /* 9.6 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffeb27, /* -10.42424 */
+ .no_fec = 0xfffff594, /* -5.21 */
+ .fc = 0xfffe3080, /* -231.75 */
+ .sfd = 0x4a4, /* 2.32 */
+ .bs_ds = 0xccd /* 6.4 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_50G] = {
+ .tx_mode = { .def = 5 },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 5 },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0xa3d, /* 5.12 */
+ .tx_offset = {
+ .serdes = 0x13ba, /* 9.86353 */
+ .rs = 0x5400, /* 42 */
+ .sfd = 0xe6, /* 0.45 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7e8, /* -4.04706 */
+ .rs = 0xfffff994, /* -3.21 */
+ .sfd = 0xe6 /* 0.45 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_50G2] = {
+ .tx_mode = {
+ .def = 3,
+ .rs = 2
+ },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 4,
+ .rs = 1
+ },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0xa3d, /* 5.12 */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x3d33, /* 30.6 */
+ .rs = 0x5057, /* 40.17 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xfffff8cd, /* -3.6 */
+ .rs = 0xfffff21a, /* -6.95 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0xa3d /* 5.12, RS-FEC 0x633 (3.1) */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_100G] = {
+ .tx_mode = {
+ .def = 3,
+ .rs = 2
+ },
+ .tx_mk_dly = 10,
+ .tx_cw_dly = {
+ .def = 3,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 4,
+ .rs = 1
+ },
+ .rx_mk_dly = { .def = 5 },
+ .rx_cw_dly = { .def = 5 },
+ .blks_per_clk = 1,
+ .blktime = 0x148, /* 0.64 */
+ .mktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x67ec, /* 51.96 */
+ .rs = 0x44fb, /* 34.49 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xfffff5a9, /* -5.17 */
+ .rs = 0xfffff6e6, /* -4.55 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0x199a /* 12.8, RS-FEC 0x31b (1.552) */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_100G2] = {
+ .tx_mode = { .def = 5 },
+ .tx_mk_dly = 10,
+ .tx_cw_dly = {
+ .def = 3,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 5 },
+ .rx_mk_dly = { .def = 5 },
+ .rx_cw_dly = { .def = 5 },
+ .blks_per_clk = 1,
+ .blktime = 0x148, /* 0.64 */
+ .mktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0x13ba, /* 9.86353 */
+ .rs = 0x460a, /* 35.02 */
+ .sfd = 0xe6, /* 0.45 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7e8, /* -4.04706 */
+ .rs = 0xfffff548, /* -5.36 */
+ .sfd = 0xe6, /* 0.45 */
+ .bs_ds = 0x303 /* 1.506 */
+ }
+ }
+};
+
/* struct ice_time_ref_info_e82x
*
- * E822 hardware can use different sources as the reference for the PTP
+ * E82X hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
@@ -19,139 +289,53 @@
* reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ] = {
+ /* ICE_TSPLL_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
823437500, /* 823.4375 MHz PLL */
/* nominal_incval */
0x136e44fabULL,
- /* pps_delay */
- 11,
},
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ /* ICE_TSPLL_FREQ_122_880 -> 122.88 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ /* ICE_TSPLL_FREQ_125_000 -> 125 MHz */
{
/* pll_freq */
796875000, /* 796.875 MHz */
/* nominal_incval */
0x141414141ULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ /* ICE_TSPLL_FREQ_153_600 -> 153.6 MHz */
{
/* pll_freq */
816000000, /* 816 MHz */
/* nominal_incval */
0x139b9b9baULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ /* ICE_TSPLL_FREQ_156_250 -> 156.25 MHz */
{
/* pll_freq */
830078125, /* 830.78125 MHz */
/* nominal_incval */
0x134679aceULL,
- /* pps_delay */
- 11,
},
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ /* ICE_TSPLL_FREQ_245_760 -> 245.76 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
- },
-};
-
-const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* refclk_pre_div */
- 1,
- /* feedback_div */
- 197,
- /* frac_n_div */
- 2621440,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* refclk_pre_div */
- 10,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
},
};
@@ -359,9 +543,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* rx_desk_rsgb_par */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
1620,
/* pmd_adj_divisor */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 187ce9b54e1a..35680dbe4a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -2,10 +2,10 @@
/* Copyright (C) 2021, Intel Corporation. */
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
-#include "ice_cgu_regs.h"
static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
DPLL_PIN_FREQUENCY_1PPS,
@@ -33,7 +33,6 @@ static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
{ "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
- { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
};
static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
@@ -51,7 +50,6 @@ static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
{ "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
- { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
};
static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
@@ -151,7 +149,7 @@ static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
* | 8 bit s | | 32 bits |
* +---------------+ +---------------+
*
- * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
+ * The increment value is added to the GLTSYN_TIME_R and GLTSYN_TIME_L
* registers every clock source tick. Depending on the specific device
* configuration, the clock source frequency could be one of a number of
* values.
@@ -227,40 +225,119 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
}
/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
+ * ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
+ * @hw: pointer to HW struct
* @cmd: Timer command
*
- * Prepare the source timer for an upcoming timer sync command.
+ * Return: the source timer command register value for the given PTP timer
+ * command.
*/
-void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+static u32 ice_ptp_tmr_cmd_to_src_reg(struct ice_hw *hw,
+ enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val;
- u8 tmr_idx;
+ u32 cmd_val, tmr_idx;
+
+ switch (cmd) {
+ case ICE_PTP_INIT_TIME:
+ cmd_val = GLTSYN_CMD_INIT_TIME;
+ break;
+ case ICE_PTP_INIT_INCVAL:
+ cmd_val = GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ICE_PTP_ADJ_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ICE_PTP_ADJ_TIME_AT_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case ICE_PTP_NOP:
+ case ICE_PTP_READ_TIME:
+ cmd_val = GLTSYN_CMD_READ_TIME;
+ break;
+ default:
+ dev_warn(ice_hw_to_dev(hw),
+ "Ignoring unrecognized timer command %u\n", cmd);
+ cmd_val = 0;
+ }
tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ return tmr_idx << SEL_CPK_SRC | cmd_val;
+}
+
+/**
+ * ice_ptp_tmr_cmd_to_port_reg- Convert to port timer command value
+ * @hw: pointer to HW struct
+ * @cmd: Timer command
+ *
+ * Note that some hardware families use a different command register value for
+ * the PHY ports, while other hardware families use the same register values
+ * as the source timer.
+ *
+ * Return: the PHY port timer command register value for the given PTP timer
+ * command.
+ */
+static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, tmr_idx;
+
+ /* Certain hardware families share the same register values for the
+ * port register and source timer register.
+ */
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
+ default:
+ break;
+ }
switch (cmd) {
case ICE_PTP_INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
+ cmd_val = PHY_CMD_INIT_TIME;
break;
case ICE_PTP_INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ cmd_val = PHY_CMD_INIT_INCVAL;
break;
case ICE_PTP_ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ cmd_val = PHY_CMD_ADJ_TIME;
break;
case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ cmd_val = PHY_CMD_ADJ_TIME_AT_TIME;
break;
case ICE_PTP_READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
+ cmd_val = PHY_CMD_READ_TIME;
break;
case ICE_PTP_NOP:
+ cmd_val = 0;
break;
+ default:
+ dev_warn(ice_hw_to_dev(hw),
+ "Ignoring unrecognized timer command %u\n", cmd);
+ cmd_val = 0;
}
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ return tmr_idx << SEL_PHY_SRC | cmd_val;
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -274,10 +351,1854 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
+ guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
}
+/**
+ * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay
+ * @hw: pointer to HW struct
+ * @delay: delay between PHC and PHY SYNC command execution in nanoseconds
+ */
+static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
+{
+ wr32(hw, GLTSYN_SYNC_DLAY, delay);
+ ice_flush(hw);
+}
+
+/* 56G PHY device functions
+ *
+ * The following functions operate on devices with the ETH 56G PHY.
+ */
+
+/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ u8 curr_phy, tgt_phy;
+
+ tgt_phy = port >= hw->ptp.ports_per_phy;
+ curr_phy = hw->lane_num >= hw->ptp.ports_per_phy;
+ /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY.
+ * On a single complex E825C, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ * On dual complex E825C, device phy_0 points to PHY on a current
+ * complex and phy_0_peer to PHY on a different complex.
+ */
+ if ((!ice_is_dual(hw) && tgt_phy == 1) ||
+ (ice_is_dual(hw) && tgt_phy != curr_phy))
+ return ice_sbq_dev_phy_0_peer;
+ else
+ return ice_sbq_dev_phy_0;
+}
+
+/**
+ * ice_write_phy_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ * @addr: PHY register address
+ * @val: Value to write
+ *
+ * Return: 0 on success, other error codes when failed to write to PHY
+ */
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * ice_read_phy_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ * @addr: PHY register address
+ * @val: Value to write
+ *
+ * Return: 0 on success, other error codes when failed to read from PHY
+ */
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ err);
+ else
+ *val = msg.data;
+
+ return err;
+}
+
+/**
+ * ice_phy_res_address_eth56g - Calculate a PHY port register address
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
+ * @res_type: resource type (register/memory)
+ * @offset: Offset from PHY port register base
+ * @addr: The result address
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ */
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
+{
+ if (res_type >= NUM_ETH56G_PHY_RES)
+ return -EINVAL;
+
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base_addr +
+ lane * eth56g_phy_res[res_type].step + offset;
+
+ return 0;
+}
+
+/**
+ * ice_write_port_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ * @res_type: resource type (register/memory)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val, enum eth56g_res_type res_type)
+{
+ u32 addr;
+ int err;
+
+ if (port >= hw->ptp.num_lports)
+ return -EINVAL;
+
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
+ if (err)
+ return err;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_port_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ * @res_type: resource type (register/memory)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 *val, enum eth56g_res_type res_type)
+{
+ u32 addr;
+ int err;
+
+ if (port >= hw->ptp.num_lports)
+ return -EINVAL;
+
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
+ if (err)
+ return err;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_write_ptp_reg_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_mac_reg_eth56g - Write a MAC PHY port register
+ * parameter
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_mac_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
+}
+
+/**
+ * ice_write_xpcs_reg_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_xpcs_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val,
+ ETH56G_PHY_REG_XPCS);
+}
+
+/**
+ * ice_read_ptp_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_read_mac_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_mac_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
+}
+
+/**
+ * ice_read_gpcs_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_gpcs_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_GPCS);
+}
+
+/**
+ * ice_read_port_mem_eth56g - Read a PHY port memory location
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+}
+
+/**
+ * ice_write_port_mem_eth56g - Write a PHY port memory location
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+}
+
+/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Write the appropriate high register offset to use.
+ *
+ * Return: true if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers, false otherwise.
+ */
+static bool ice_is_64b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TX_TIMER_INC_PRE_L:
+ *high_addr = PHY_REG_TX_TIMER_INC_PRE_U;
+ return true;
+ case PHY_REG_RX_TIMER_INC_PRE_L:
+ *high_addr = PHY_REG_RX_TIMER_INC_PRE_U;
+ return true;
+ case PHY_REG_TX_CAPTURE_L:
+ *high_addr = PHY_REG_TX_CAPTURE_U;
+ return true;
+ case PHY_REG_RX_CAPTURE_L:
+ *high_addr = PHY_REG_RX_CAPTURE_U;
+ return true;
+ case PHY_REG_TOTAL_TX_OFFSET_L:
+ *high_addr = PHY_REG_TOTAL_TX_OFFSET_U;
+ return true;
+ case PHY_REG_TOTAL_RX_OFFSET_L:
+ *high_addr = PHY_REG_TOTAL_RX_OFFSET_U;
+ return true;
+ case PHY_REG_TX_MEMORY_STATUS_L:
+ *high_addr = PHY_REG_TX_MEMORY_STATUS_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Write the appropriate high register offset to use.
+ *
+ * Return: true if the provided low address is one of the known 40bit PHY
+ * values split into two registers with the lower 8 bits in the low register and
+ * the upper 32 bits in the high register, false otherwise.
+ */
+static bool ice_is_40b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TIMETUS_L:
+ *high_addr = PHY_REG_TIMETUS_U;
+ return true;
+ case PHY_PCS_REF_TUS_L:
+ *high_addr = PHY_PCS_REF_TUS_U;
+ return true;
+ case PHY_PCS_REF_INC_L:
+ *high_addr = PHY_PCS_REF_INC_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 40 bit register offset and read
+ * the two registers associated with a 40bit value and return it in the val
+ * pointer.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to read from PHY
+ */
+static int ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
+ u64 *val, enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ err = ice_read_port_eth56g(hw, port, low_addr, &lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_read_port_eth56g(hw, port, high_addr, &hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ *val = ((u64)hi << 32) | lo;
+
+ return 0;
+}
+
+/**
+ * ice_read_64b_ptp_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Check if the caller has specified a known 40 bit register offset and read
+ * the two registers associated with a 40bit value and return it in the val
+ * pointer.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to read from PHY
+ */
+static int ice_read_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
+ u64 *val)
+{
+ return ice_read_64b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 40 bit register offset and write
+ * provided 40b value to the two associated registers by splitting it up into
+ * two chunks, the lower 8 bits and the upper 32 bits.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 40 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val,
+ enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_40b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ lo = FIELD_GET(P_REG_40B_LOW_M, val);
+ hi = (u32)(val >> P_REG_40B_HIGH_S);
+
+ err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_40b_ptp_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Check if the caller has specified a known 40 bit register offset and write
+ * provided 40b value to the two associated registers by splitting it up into
+ * two chunks, the lower 8 bits and the upper 32 bits.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 40 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_40b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val)
+{
+ return ice_write_40b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 64 bit register offset and write
+ * the 64bit value to the two associated 32bit PHY registers.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val,
+ enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ lo = lower_32_bits(val);
+ hi = upper_32_bits(val);
+
+ err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_64b_ptp_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Check if the caller has specified a known 64 bit register offset and write
+ * the 64bit value to the two associated 32bit PHY registers.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val)
+{
+ return ice_write_64b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_read_ptp_tstamp_eth56g - Read a PHY timestamp out of the port memory
+ * @hw: pointer to the HW struct
+ * @port: the port to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated entries in the
+ * port memory block of the internal PHYs of the 56G devices.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
+ u64 *tstamp)
+{
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+ int err;
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+ hi_addr = (u16)PHY_TSTAMP_U(idx);
+
+ err = ice_read_port_mem_eth56g(hw, port, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_port_mem_eth56g(hw, port, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ /* For 56G based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
+ return 0;
+}
+
+/**
+ * ice_clear_ptp_tstamp_eth56g - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @port: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Read and then forcibly clear the timestamp index to ensure the valid bit is
+ * cleared and the timestamp status bit is reset in the PHY port memory of
+ * internal PHYs of the 56G devices.
+ *
+ * To directly clear the contents of the timestamp block entirely, discarding
+ * all timestamp data at once, software should instead use
+ * ice_ptp_reset_ts_memory_quad_eth56g().
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_clear_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx)
+{
+ u64 unused_tstamp;
+ u16 lo_addr;
+ int err;
+
+ /* Read the timestamp register to ensure the timestamp status bit is
+ * cleared.
+ */
+ err = ice_read_ptp_tstamp_eth56g(hw, port, idx, &unused_tstamp);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the PHY timestamp register for port %u, idx %u, err %d\n",
+ port, idx, err);
+ }
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+
+ err = ice_write_port_mem_eth56g(hw, port, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for port %u, idx %u, err %d\n",
+ port, idx, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_ts_memory_eth56g - Clear all timestamps from the port block
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_reset_ts_memory_eth56g(struct ice_hw *hw)
+{
+ unsigned int port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
+ 0);
+ ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_U,
+ 0);
+ }
+}
+
+/**
+ * ice_ptp_prep_port_time_eth56g - Prepare one PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @port: port number
+ * @time: time to initialize the PHY port clocks to
+ *
+ * Write a new initial time value into registers of a specific PHY port.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_port_time_eth56g(struct ice_hw *hw, u8 port,
+ u64 time)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_write_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
+ time);
+ if (err)
+ return err;
+
+ /* Rx case */
+ return ice_write_64b_ptp_reg_eth56g(hw, port,
+ PHY_REG_RX_TIMER_INC_PRE_L, time);
+}
+
+/**
+ * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time)
+{
+ u64 phy_time;
+ u8 port;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_time_eth56g(hw, port, phy_time);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port clocks
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ICE_PTP_ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time)
+{
+ u32 l_time, u_time;
+ int err;
+
+ l_time = lower_32_bits(time);
+ u_time = upper_32_bits(time);
+
+ /* Tx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj)
+{
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock (lowest 32 bits). We shift the provided adjustment in
+ * nanoseconds by 32 to calculate the appropriate adjustment to program
+ * into the PHY ports.
+ */
+ cycles = (s64)adj << 32;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_adj_eth56g(hw, port, cycles);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an ICE_PTP_INIT_INCVAL command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval)
+{
+ u8 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L,
+ incval);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_read_port_capture_eth56g - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tx_ts, u64 *rx_ts)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L,
+ tx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n", *tx_ts);
+
+ /* Rx case */
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L,
+ rx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n", *rx_ts);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_write_port_cmd_eth56g - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_write_port_cmd_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+ int err;
+
+ /* Tx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Rx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_get_speed_eth56g - Get link speed based on PHY link type
+ * @li: pointer to link information struct
+ *
+ * Return: simplified ETH56G PHY speed
+ */
+static enum ice_eth56g_link_spd
+ice_phy_get_speed_eth56g(struct ice_link_status *li)
+{
+ u16 speed = ice_get_link_speed_based_on_phy_type(li->phy_type_low,
+ li->phy_type_high);
+
+ switch (speed) {
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return ICE_ETH56G_LNK_SPD_1G;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return ICE_ETH56G_LNK_SPD_2_5G;
+ case ICE_AQ_LINK_SPEED_10GB:
+ return ICE_ETH56G_LNK_SPD_10G;
+ case ICE_AQ_LINK_SPEED_25GB:
+ return ICE_ETH56G_LNK_SPD_25G;
+ case ICE_AQ_LINK_SPEED_40GB:
+ return ICE_ETH56G_LNK_SPD_40G;
+ case ICE_AQ_LINK_SPEED_50GB:
+ switch (li->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ return ICE_ETH56G_LNK_SPD_50G;
+ default:
+ return ICE_ETH56G_LNK_SPD_50G2;
+ }
+ case ICE_AQ_LINK_SPEED_100GB:
+ if (li->phy_type_high ||
+ li->phy_type_low == ICE_PHY_TYPE_LOW_100GBASE_SR2)
+ return ICE_ETH56G_LNK_SPD_100G2;
+ else
+ return ICE_ETH56G_LNK_SPD_100G;
+ default:
+ return ICE_ETH56G_LNK_SPD_1G;
+ }
+}
+
+/**
+ * ice_phy_cfg_parpcs_eth56g - Configure TUs per PAR/PCS clock cycle
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ *
+ * Configure the number of TUs for the PAR and PCS clocks used as part of the
+ * timestamp calibration process.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
+{
+ u32 val;
+ int err;
+
+ err = ice_write_xpcs_reg_eth56g(hw, port, PHY_VENDOR_TXLANE_THRESH,
+ ICE_ETH56G_NOMINAL_THRESH4);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read VENDOR_TXLANE_THRESH, status: %d",
+ err);
+ return err;
+ }
+
+ switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ case ICE_ETH56G_LNK_SPD_1G:
+ case ICE_ETH56G_LNK_SPD_2_5G:
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
+ err);
+ return err;
+ }
+
+ val &= ~PHY_GPCS_CONFIG_REG0_TX_THR_M;
+ val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
+ ICE_ETH56G_NOMINAL_TX_THRESH);
+
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
+ err);
+ return err;
+ }
+ break;
+ default:
+ break;
+ }
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_TUS_L,
+ ICE_ETH56G_NOMINAL_PCS_REF_TUS);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_TUS, status: %d",
+ err);
+ return err;
+ }
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_INC_L,
+ ICE_ETH56G_NOMINAL_PCS_REF_INC);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_INC, status: %d",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_ptp_1step_eth56g - Configure 1-step PTP settings
+ * @hw: Pointer to the HW struct
+ * @port: Port to configure
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
+{
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
+ bool enable, sfd_ena;
+ int err;
+
+ enable = hw->ptp.phy.eth56g.onestep_ena;
+ peer_delay = hw->ptp.phy.eth56g.peer_delay;
+ sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
+
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
+ if (err)
+ return err;
+
+ if (enable)
+ val |= BIT(quad_lane);
+ else
+ val &= ~BIT(quad_lane);
+
+ val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
+
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
+ if (err)
+ return err;
+
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
+ val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
+ if (peer_delay)
+ val |= PHY_PTP_1STEP_PD_ADD_PD_M;
+ val |= PHY_PTP_1STEP_PD_DLY_V_M;
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
+ if (err)
+ return err;
+
+ val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
+ if (err)
+ return err;
+
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
+ if (err)
+ return err;
+
+ val &= ~(PHY_MAC_XIF_1STEP_ENA_M | PHY_MAC_XIF_TS_BIN_MODE_M |
+ PHY_MAC_XIF_TS_SFD_ENA_M | PHY_MAC_XIF_GMII_TS_SEL_M);
+
+ switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ case ICE_ETH56G_LNK_SPD_1G:
+ case ICE_ETH56G_LNK_SPD_2_5G:
+ val |= PHY_MAC_XIF_GMII_TS_SEL_M;
+ break;
+ default:
+ break;
+ }
+
+ val |= FIELD_PREP(PHY_MAC_XIF_1STEP_ENA_M, enable) |
+ FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
+ FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
+
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
+}
+
+/**
+ * mul_u32_u32_fx_q9 - Multiply two u32 fixed point Q9 values
+ * @a: multiplier value
+ * @b: multiplicand value
+ *
+ * Return: result of multiplication
+ */
+static u32 mul_u32_u32_fx_q9(u32 a, u32 b)
+{
+ return (u32)(((u64)a * b) >> ICE_ETH56G_MAC_CFG_FRAC_W);
+}
+
+/**
+ * add_u32_u32_fx - Add two u32 fixed point values and discard overflow
+ * @a: first value
+ * @b: second value
+ *
+ * Return: result of addition
+ */
+static u32 add_u32_u32_fx(u32 a, u32 b)
+{
+ return lower_32_bits(((u64)a + b));
+}
+
+/**
+ * ice_ptp_calc_bitslip_eth56g - Calculate bitslip value
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @bs: bitslip multiplier
+ * @fc: FC-FEC enabled
+ * @rs: RS-FEC enabled
+ * @spd: link speed
+ *
+ * Return: calculated bitslip value
+ */
+static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
+ bool fc, bool rs,
+ enum ice_eth56g_link_spd spd)
+{
+ u32 bitslip;
+ int err;
+
+ if (!bs || rs)
+ return 0;
+
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
+ err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
+ &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
+ if (err)
+ return 0;
+
+ if (spd == ICE_ETH56G_LNK_SPD_1G && !bitslip) {
+ /* Bitslip register value of 0 corresponds to 10 so substitute
+ * it for calculations
+ */
+ bitslip = 10;
+ } else if (spd == ICE_ETH56G_LNK_SPD_10G ||
+ spd == ICE_ETH56G_LNK_SPD_25G) {
+ if (fc)
+ bitslip = bitslip * 2 + 32;
+ else
+ bitslip = (u32)((s32)bitslip * -1 + 20);
+ }
+
+ bitslip <<= ICE_ETH56G_MAC_CFG_FRAC_W;
+ return mul_u32_u32_fx_q9(bitslip, bs);
+}
+
+/**
+ * ice_ptp_calc_deskew_eth56g - Calculate deskew value
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @ds: deskew multiplier
+ * @rs: RS-FEC enabled
+ * @spd: link speed
+ *
+ * Return: calculated deskew value
+ */
+static u32 ice_ptp_calc_deskew_eth56g(struct ice_hw *hw, u8 port, u32 ds,
+ bool rs, enum ice_eth56g_link_spd spd)
+{
+ u32 deskew_i, deskew_f;
+ int err;
+
+ if (!ds)
+ return 0;
+
+ read_poll_timeout(ice_read_ptp_reg_eth56g, err,
+ FIELD_GET(PHY_REG_DESKEW_0_VALID, deskew_i), 500,
+ 50 * USEC_PER_MSEC, false, hw, port, PHY_REG_DESKEW_0,
+ &deskew_i);
+ if (err)
+ return err;
+
+ deskew_f = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL_FRAC, deskew_i);
+ deskew_i = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL, deskew_i);
+
+ if (rs && spd == ICE_ETH56G_LNK_SPD_50G2)
+ ds = 0x633; /* 3.1 */
+ else if (rs && spd == ICE_ETH56G_LNK_SPD_100G)
+ ds = 0x31b; /* 1.552 */
+
+ deskew_i = FIELD_PREP(ICE_ETH56G_MAC_CFG_RX_OFFSET_INT, deskew_i);
+ /* Shift 3 fractional bits to the end of the integer part */
+ deskew_f <<= ICE_ETH56G_MAC_CFG_FRAC_W - PHY_REG_DESKEW_0_RLEVEL_FRAC_W;
+ return mul_u32_u32_fx_q9(deskew_i | deskew_f, ds);
+}
+
+/**
+ * ice_phy_set_offsets_eth56g - Set Tx/Rx offset values
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @spd: link speed
+ * @cfg: structure to store output values
+ * @fc: FC-FEC enabled
+ * @rs: RS-FEC enabled
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_phy_set_offsets_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_eth56g_link_spd spd,
+ const struct ice_eth56g_mac_reg_cfg *cfg,
+ bool fc, bool rs)
+{
+ u32 rx_offset, tx_offset, bs_ds;
+ bool onestep, sfd;
+
+ onestep = hw->ptp.phy.eth56g.onestep_ena;
+ sfd = hw->ptp.phy.eth56g.sfd_ena;
+ bs_ds = cfg->rx_offset.bs_ds;
+
+ if (fc)
+ rx_offset = cfg->rx_offset.fc;
+ else if (rs)
+ rx_offset = cfg->rx_offset.rs;
+ else
+ rx_offset = cfg->rx_offset.no_fec;
+
+ rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.serdes);
+ if (sfd)
+ rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.sfd);
+
+ if (spd < ICE_ETH56G_LNK_SPD_40G)
+ bs_ds = ice_ptp_calc_bitslip_eth56g(hw, port, bs_ds, fc, rs,
+ spd);
+ else
+ bs_ds = ice_ptp_calc_deskew_eth56g(hw, port, bs_ds, rs, spd);
+ rx_offset = add_u32_u32_fx(rx_offset, bs_ds);
+ rx_offset &= ICE_ETH56G_MAC_CFG_RX_OFFSET_INT |
+ ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC;
+
+ if (fc)
+ tx_offset = cfg->tx_offset.fc;
+ else if (rs)
+ tx_offset = cfg->tx_offset.rs;
+ else
+ tx_offset = cfg->tx_offset.no_fec;
+ tx_offset += cfg->tx_offset.serdes + cfg->tx_offset.sfd * sfd +
+ cfg->tx_offset.onestep * onestep;
+
+ ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_OFFSET, rx_offset);
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_OFFSET, tx_offset);
+}
+
+/**
+ * ice_phy_cfg_mac_eth56g - Configure MAC for PTP
+ * @hw: Pointer to the HW struct
+ * @port: Port to configure
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port)
+{
+ const struct ice_eth56g_mac_reg_cfg *cfg;
+ enum ice_eth56g_link_spd spd;
+ struct ice_link_status *li;
+ bool fc = false;
+ bool rs = false;
+ bool onestep;
+ u32 val;
+ int err;
+
+ onestep = hw->ptp.phy.eth56g.onestep_ena;
+ li = &hw->port_info->phy.link_info;
+ spd = ice_phy_get_speed_eth56g(li);
+ if (!!(li->an_info & ICE_AQ_FEC_EN)) {
+ if (spd == ICE_ETH56G_LNK_SPD_10G) {
+ fc = true;
+ } else {
+ fc = !!(li->fec_info & ICE_AQ_LINK_25G_KR_FEC_EN);
+ rs = !!(li->fec_info & ~ICE_AQ_LINK_25G_KR_FEC_EN);
+ }
+ }
+ cfg = &eth56g_mac_cfg[spd];
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_MODULO, 0);
+ if (err)
+ return err;
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_MODULO, 0);
+ if (err)
+ return err;
+
+ val = FIELD_PREP(PHY_MAC_TSU_CFG_TX_MODE_M,
+ cfg->tx_mode.def + rs * cfg->tx_mode.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M, cfg->tx_mk_dly) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M,
+ cfg->tx_cw_dly.def +
+ onestep * cfg->tx_cw_dly.onestep) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MODE_M,
+ cfg->rx_mode.def + rs * cfg->rx_mode.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M,
+ cfg->rx_mk_dly.def + rs * cfg->rx_mk_dly.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M,
+ cfg->rx_cw_dly.def + rs * cfg->rx_cw_dly.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_BLKS_PER_CLK_M, cfg->blks_per_clk);
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TSU_CONFIG, val);
+ if (err)
+ return err;
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_BLOCKTIME,
+ cfg->blktime);
+ if (err)
+ return err;
+
+ err = ice_phy_set_offsets_eth56g(hw, port, spd, cfg, fc, rs);
+ if (err)
+ return err;
+
+ if (spd == ICE_ETH56G_LNK_SPD_25G && !rs)
+ val = 0;
+ else
+ val = cfg->mktime;
+
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_MARKERTIME, val);
+}
+
+/**
+ * ice_phy_cfg_intr_eth56g - Configure TX timestamp interrupt
+ * @hw: pointer to the HW struct
+ * @port: the timestamp port
+ * @ena: enable or disable interrupt
+ * @threshold: interrupt threshold
+ *
+ * Configure TX timestamp interrupt for the specified port
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val);
+ if (err)
+ return err;
+
+ if (ena) {
+ val |= PHY_TS_INT_CONFIG_ENA_M;
+ val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M;
+ val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold);
+ } else {
+ val &= ~PHY_TS_INT_CONFIG_ENA_M;
+ }
+
+ return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val);
+}
+
+/**
+ * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
+ u64 *phy_time, u64 *phc_time)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
+ if (err)
+ return err;
+
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ if (ice_is_primary(hw)) {
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ } else {
+ zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx));
+ }
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ err = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time);
+ if (err)
+ return err;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ dev_warn(ice_hw_to_dev(hw), "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, tx_time, rx_time);
+
+ *phy_time = tx_time;
+
+ return 0;
+}
+
+/**
+ * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EBUSY- failed to acquire PTP semaphore
+ * * %other - PHY read/write failed
+ */
+static int ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+ difference = phc_time - phy_time;
+
+ err = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference);
+ if (err)
+ goto err_unlock;
+
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
+ if (err)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+ err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ dev_info(ice_hw_to_dev(hw),
+ "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, phy_time, phc_time);
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
+}
+
+/**
+ * ice_stop_phy_timer_eth56g - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ int err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_start_phy_timer_eth56g - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 lo, hi;
+ u64 incval;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ err = ice_stop_phy_timer_eth56g(hw, port, false);
+ if (err)
+ return err;
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
+ err = ice_phy_cfg_parpcs_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_ptp_1step_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_mac_eth56g(hw, port);
+ if (err)
+ return err;
+
+ if (ice_is_primary(hw)) {
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ } else {
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx));
+ }
+ incval = (u64)hi << 32 | lo;
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_sync_phy_timer_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
+ * @hw: pointer to the HW struct
+ * @ts_status: the timestamp mask pointer
+ *
+ * Read the PHY Tx timestamp status mask indicating which ports have Tx
+ * timestamps available.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
+{
+ const struct ice_eth56g_params *params = &hw->ptp.phy.eth56g;
+ u8 phy, mask;
+ u32 status;
+
+ mask = (1 << hw->ptp.ports_per_phy) - 1;
+ *ts_status = 0;
+
+ for (phy = 0; phy < params->num_phys; phy++) {
+ int err;
+
+ err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status);
+ if (err)
+ return err;
+
+ *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy);
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status);
+
+ return 0;
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read from
+ * @tstamp_ready: contents of the Tx memory status register
+ *
+ * Read the PHY_REG_TX_MEMORY_STATUS register indicating which timestamps in
+ * the PHY are ready. A set bit means the corresponding timestamp is valid and
+ * ready to be captured from the PHY timestamp block.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ int err;
+
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
+ tstamp_ready);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
+{
+ struct ice_ptp_hw *ptp = &hw->ptp;
+ struct ice_eth56g_params *params;
+
+ params = &ptp->phy.eth56g;
+ params->onestep_ena = false;
+ params->peer_delay = 0;
+ params->sfd_ena = false;
+ params->num_phys = 2;
+ ptp->ports_per_phy = 4;
+ ptp->num_lports = params->num_phys * ptp->ports_per_phy;
+}
+
/* E822 family functions
*
* The following functions operate on the E822 family of devices.
@@ -285,18 +2206,20 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
/**
* ice_fill_phy_msg_e82x - Fill message data for a PHY register access
+ * @hw: pointer to the HW struct
* @msg: the PHY message buffer to fill in
* @port: the port to access
* @offset: the register offset
*/
-static void
-ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
+ struct ice_sbq_msg_input *msg, u8 port,
+ u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY_E82X;
- phy = port / ICE_PORTS_PER_PHY_E82X;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
+ phy_port = port % hw->ptp.ports_per_phy;
+ quadtype = ICE_GET_QUAD_NUM(port) %
+ ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -306,12 +2229,7 @@ ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = ice_sbq_dev_phy_0;
}
/**
@@ -427,10 +2345,10 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e82x(&msg, port, offset);
+ ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -504,11 +2422,11 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e82x(&msg, port, offset);
+ ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -543,8 +2461,7 @@ ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low_addr);
return -EINVAL;
}
-
- low = (u32)(val & P_REG_40B_LOW_M);
+ low = FIELD_GET(P_REG_40B_LOW_M, val);
high = (u32)(val >> P_REG_40B_HIGH_S);
err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
@@ -614,24 +2531,30 @@ ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/**
* ice_fill_quad_msg_e82x - Fill message data for quad register access
+ * @hw: pointer to the HW struct
* @msg: the PHY message buffer to fill in
* @quad: the quad to access
* @offset: the register offset
*
* Fill a message buffer for accessing a register in a quad shared between
* multiple PHYs.
+ *
+ * Return:
+ * * %0 - OK
+ * * %-EINVAL - invalid quad number
*/
-static int
-ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
+ struct ice_sbq_msg_input *msg, u8 quad,
+ u16 offset)
{
u32 addr;
- if (quad >= ICE_MAX_QUAD)
+ if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
- msg->dest_dev = rmn_0;
+ msg->dest_dev = ice_sbq_dev_phy_0;
- if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
+ if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
@@ -658,13 +2581,13 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e82x(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -692,14 +2615,14 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e82x(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -748,7 +2671,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -813,294 +2737,11 @@ static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
{
unsigned int quad;
- for (quad = 0; quad < ICE_MAX_QUAD; quad++)
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++)
ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
}
/**
- * ice_read_cgu_reg_e82x - Read a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
- *
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static int
-ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
-{
- struct ice_sbq_msg_input cgu_msg;
- int err;
-
- cgu_msg.opcode = ice_sbq_msg_rd;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- *val = cgu_msg.data;
-
- return err;
-}
-
-/**
- * ice_write_cgu_reg_e82x - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static int
-ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg;
- int err;
-
- cgu_msg.opcode = ice_sbq_msg_wr;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
- cgu_msg.data = val;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- return err;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Convert the specified TIME_REF clock frequency to a string.
- */
-static const char *ice_clk_freq_str(u8 clk_freq)
-{
- switch ((enum ice_time_ref_freq)clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
- *
- * Convert the specified clock source to its string name.
- */
-static const char *ice_clk_src_str(u8 clk_src)
-{
- switch ((enum ice_clk_src)clk_src) {
- case ICE_CLK_SRC_TCX0:
- return "TCX0";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCX0)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- */
-static int
-ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCX0 &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- dev_warn(ice_hw_to_dev(hw),
- "TCX0 only supports 25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.field.ts_pll_enable) {
- dw24.field.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.field.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.field.tspll_ndivratio = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.field.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.field.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw24.field.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- if (!bwm_lf.field.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-/**
- * ice_init_cgu_e82x - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
- *
- * Initialize the Clock Generation Unit of the E822 device.
- */
-static int ice_init_cgu_e82x(struct ice_hw *hw)
-{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- union tspll_cntr_bist_settings cntr_bist;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (err)
- return err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- cntr_bist.field.i_plllock_sel_0 = 0;
- cntr_bist.field.i_plllock_sel_1 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
- if (err)
- return err;
-
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- if (err)
- return err;
-
- return 0;
-}
-
-/**
* ice_ptp_set_vernier_wl - Set the window length for vernier calibration
* @hw: pointer to the HW struct
*
@@ -1110,7 +2751,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
{
u8 port;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
@@ -1133,21 +2774,14 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
*/
static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
- int err;
- u32 regval;
+ u32 val;
/* Enable reading switch and PHY registers over the sideband queue */
#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
- regval = rd32(hw, PF_SB_REM_DEV_CTL);
- regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
- PF_SB_REM_DEV_CTL_PHY0);
- wr32(hw, PF_SB_REM_DEV_CTL, regval);
-
- /* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e82x(hw);
- if (err)
- return err;
+ val = rd32(hw, PF_SB_REM_DEV_CTL);
+ val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, val);
/* Set window length for all the ports */
return ice_ptp_set_vernier_wl(hw);
@@ -1175,7 +2809,7 @@ ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
*/
phy_time = (u64)time << 32;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
/* Tx case */
err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_TX_TIMER_INC_PRE_L,
@@ -1278,7 +2912,7 @@ ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
else
cycles = -(((s64)-adj) << 32);
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
@@ -1304,7 +2938,7 @@ ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
int err;
u8 port;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
incval);
if (err)
@@ -1369,51 +3003,20 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
*
* Prepare the requested port for an upcoming timer sync command.
*
- * Do not use this function directly. If you want to configure exactly one
- * port, use ice_ptp_one_port_cmd() instead.
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
*/
static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val, val;
- u8 tmr_idx;
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
int err;
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_PHY_SRC;
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val |= PHY_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val |= PHY_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val |= PHY_CMD_ADJ_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val |= PHY_CMD_READ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
- break;
- case ICE_PTP_NOP:
- break;
- }
-
/* Tx case */
- /* Read, modify, write */
- err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
- err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK;
- val |= cmd_val;
-
err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
@@ -1422,19 +3025,8 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
}
/* Rx case */
- /* Read, modify, write */
- err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
- err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK;
- val |= cmd_val;
-
- err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD,
+ val | TS_CMD_RX_TYPE);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
err);
@@ -1444,63 +3036,6 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
return 0;
}
-/**
- * ice_ptp_one_port_cmd - Prepare one port for a timer command
- * @hw: pointer to the HW struct
- * @configured_port: the port to configure with configured_cmd
- * @configured_cmd: timer command to prepare on the configured_port
- *
- * Prepare the configured_port for the configured_cmd, and prepare all other
- * ports for ICE_PTP_NOP. This causes the configured_port to execute the
- * desired command while all other ports perform no operation.
- */
-static int
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
- enum ice_ptp_tmr_cmd configured_cmd)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- enum ice_ptp_tmr_cmd cmd;
- int err;
-
- if (port == configured_port)
- cmd = configured_cmd;
- else
- cmd = ICE_PTP_NOP;
-
- err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
- * @hw: pointer to the HW struct
- * @cmd: timer command to prepare
- *
- * Prepare all ports connected to this device for an upcoming timer sync
- * command.
- */
-static int
-ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- int err;
-
- err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
- if (err)
- return err;
- }
-
- return 0;
-}
-
/* E822 Vernier calibration functions
*
* The following functions are used as part of the vernier calibration of
@@ -1603,7 +3138,7 @@ static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
return;
}
- quad = port / ICE_PORTS_PER_QUAD;
+ quad = ICE_GET_QUAD_NUM(port);
err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
if (err) {
@@ -2324,6 +3859,40 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
}
/**
+ * ice_ptp_clear_phy_offset_ready_e82x - Clear PHY TX_/RX_OFFSET_READY registers
+ * @hw: pointer to the HW struct
+ *
+ * Clear PHY TX_/RX_OFFSET_READY registers, effectively marking all transmitted
+ * and received timestamps as invalid.
+ *
+ * Return: 0 on success, other error codes when failed to write to PHY
+ */
+int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Failed to clear PHY TX_OFFSET_READY register\n");
+ return err;
+ }
+
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Failed to clear PHY RX_OFFSET_READY register\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
* @hw: pointer to the HW struct
* @port: the PHY port to read
@@ -2633,6 +4202,47 @@ ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
return 0;
}
+/**
+ * ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt
+ * @hw: pointer to the HW struct
+ * @quad: the timestamp quad
+ * @ena: enable or disable interrupt
+ * @threshold: interrupt threshold
+ *
+ * Configure TX timestamp interrupt for the specified quad
+ *
+ * Return: 0 on success, other error codes when failed to read/write quad
+ */
+
+int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ if (err)
+ return err;
+
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ if (ena) {
+ val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
+ val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, threshold);
+ }
+
+ return ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+}
+
+/**
+ * ice_ptp_init_phy_e82x - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
+{
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 8;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use
@@ -2655,9 +4265,9 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2685,10 +4295,10 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_wr;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2712,33 +4322,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
{
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ unsigned long flags;
u32 val;
- u8 i;
+ int err;
+
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
/* Write TS index to read to the PF register so the FW can read it */
- val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
- wr32(hw, PF_SB_ATQBAL, val);
+ val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
/* Read the register repeatedly until the FW provides us the TS */
- for (i = TS_LL_READ_RETRIES; i > 0; i--) {
- val = rd32(hw, PF_SB_ATQBAL);
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10,
+ REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
- /* When the bit is cleared, the TS is ready in the register */
- if (!(FIELD_GET(TS_LL_READ_TS, val))) {
- /* High 8 bit value of the TS is on the bits 16:23 */
- *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val);
- /* Read the low 32 bit value and set the TS valid bit */
- *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
- return 0;
- }
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID;
- udelay(10);
- }
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
- /* FW failed to provide the TS in time */
- ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
- return -EINVAL;
+ return 0;
}
/**
@@ -2809,7 +4432,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
- *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
return 0;
}
@@ -2860,17 +4484,20 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
}
/**
- * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
* @hw: pointer to HW struct
*
- * Enable the timesync PTP functionality for the external PHY connected to
- * this function.
+ * Perform E810-specific PTP hardware clock initialization steps.
+ *
+ * Return: 0 on success, other error codes when failed to initialize TimeSync
*/
-int ice_ptp_init_phy_e810(struct ice_hw *hw)
+static int ice_ptp_init_phc_e810(struct ice_hw *hw)
{
u8 tmr_idx;
int err;
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
GLTSYN_ENA_TSYN_ENA_M);
@@ -2882,21 +4509,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
- * @hw: pointer to HW struct
- *
- * Perform E810-specific PTP hardware clock initialization steps.
- */
-static int ice_ptp_init_phc_e810(struct ice_hw *hw)
-{
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
-
- /* Initialize the PHY */
- return ice_ptp_init_phy_e810(hw);
-}
-
-/**
* ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
* @hw: Board private structure
* @time: Time to initialize the PHY port clock to
@@ -2932,6 +4544,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
}
/**
+ * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Use the low latency firmware interface to program PHY time adjustment to
+ * all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, adj);
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment value to program
@@ -2949,6 +4610,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_adj_ll_e810(hw, adj);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
@@ -2972,6 +4636,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
}
/**
+ * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Use the low latency firmware interface to program PHY time increment value
+ * for all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval));
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) |
+ FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
* @hw: pointer to HW struct
* @incval: The new 40bit increment value to prepare
@@ -2986,6 +4700,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_incval_ll_e810(hw, incval);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
@@ -3017,47 +4734,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
*/
static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val, val;
- int err;
-
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val = GLTSYN_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val = GLTSYN_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val = GLTSYN_CMD_ADJ_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val = GLTSYN_CMD_READ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case ICE_PTP_NOP:
- return 0;
- }
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
- /* Read, modify, write */
- err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK_E810;
- val |= cmd_val;
-
- err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
- return err;
- }
-
- return 0;
+ return ice_write_phy_reg_e810(hw, E810_ETH_GLTSYN_CMD, val);
}
/**
@@ -3076,83 +4755,21 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
return 0;
}
-/* E810T SMA functions
+/* E810 SMA functions
*
- * The following functions operate specifically on E810T hardware and are used
+ * The following functions operate specifically on E810 hardware and are used
* to access the extended GPIOs available.
*/
/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
- * ice_read_sma_ctrl_e810t
+ * ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
*
* Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
* PCA9575 expander, so only bits 3-7 in data are valid.
*/
-int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data)
{
int status;
u16 handle;
@@ -3164,7 +4781,7 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
*data = 0;
- for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) {
bool pin;
status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
@@ -3178,14 +4795,14 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
}
/**
- * ice_write_sma_ctrl_e810t
+ * ice_write_sma_ctrl
* @hw: pointer to the hw struct
* @data: data to be written to the GPIO controller
*
* Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
* of the PCA9575 expander, so only bits 3-7 in data are valid.
*/
-int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
{
int status;
u16 handle;
@@ -3195,7 +4812,7 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
if (status)
return status;
- for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) {
bool pin;
pin = !(data & (1 << i));
@@ -3209,41 +4826,203 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg_e810t
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
+ * ice_ptp_read_sdp_ac - read SDP available connections section from NVM
+ * @hw: pointer to the HW struct
+ * @entries: returns the SDP available connections section from NVM
+ * @num_entries: returns the number of valid entries
*
- * Read the register from the GPIO controller
+ * Return: 0 on success, negative error code if NVM read failed or section does
+ * not exist or is corrupted
*/
-int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries)
{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
+ __le16 data;
+ u32 offset;
int err;
- memset(&link_topo, 0, sizeof(link_topo));
+ err = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (err)
+ goto exit;
- err = ice_get_pca9575_handle(hw, &handle);
+ /* Read the offset of SDP_AC */
+ offset = ICE_AQC_NVM_SDP_AC_PTR_OFFSET;
+ err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true,
+ NULL);
if (err)
- return err;
+ goto exit;
+
+ /* Check if section exist */
+ offset = FIELD_GET(ICE_AQC_NVM_SDP_AC_PTR_M, le16_to_cpu(data));
+ if (offset == ICE_AQC_NVM_SDP_AC_PTR_INVAL) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (offset & ICE_AQC_NVM_SDP_AC_PTR_TYPE_M) {
+ offset &= ICE_AQC_NVM_SDP_AC_PTR_M;
+ offset *= ICE_AQC_NVM_SECTOR_UNIT;
+ } else {
+ offset *= sizeof(data);
+ }
+
+ /* Skip reading section length and read the number of valid entries */
+ offset += sizeof(data);
+ err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true,
+ NULL);
+ if (err)
+ goto exit;
+ *num_entries = le16_to_cpu(data);
+
+ /* Read SDP configuration section */
+ offset += sizeof(data);
+ err = ice_aq_read_nvm(hw, 0, offset, *num_entries * sizeof(data),
+ entries, false, true, NULL);
+
+exit:
+ if (err)
+ dev_dbg(ice_hw_to_dev(hw), "Failed to configure SDP connection section\n");
+ ice_release_nvm(hw);
+ return err;
+}
+
+/**
+ * ice_ptp_init_phy_e810 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
+{
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+
+ init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
+}
+
+/* E830 functions
+ *
+ * The following functions operate on the E830 series devices.
+ *
+ */
+
+/**
+ * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E830-specific PTP hardware clock initialization steps.
+ */
+static void ice_ptp_init_phc_e830(const struct ice_hw *hw)
+{
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
+}
+
+/**
+ * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHC
+ * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is
+ * completed by FW automatically.
+ */
+static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw,
+ u64 incval)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval));
+}
+
+/**
+ * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an ICE_PTP_INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw,
+ u64 time)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_TIME_0(tmr_idx), 0);
+ wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time));
+}
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+/**
+ * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ *
+ * Return: 0 on success, negative error code when PHY write failed
+ */
+static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
- addr = cpu_to_le16((u16)offset);
+ return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val);
+}
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+/**
+ * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E830 device.
+ */
+static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx,
+ u64 *tstamp)
+{
+ u32 hi, lo;
+
+ hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx));
+ lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx));
+
+ /* For E830 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ */
+static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H);
+ *tstamp_ready <<= 32;
+ *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L);
+}
+
+/**
+ * ice_ptp_init_phy_e830 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp)
+{
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
}
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
- * necessary.
+ * The following functions implement shared behavior common to all devices,
+ * possibly calling a device specific implementation where necessary.
*/
/**
@@ -3296,18 +5075,136 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
+ * ice_ptp_init_hw - Initialize hw based on device type
* @hw: pointer to the HW structure
*
- * Determine the PHY model for the device, and initialize hw->phy_model
+ * Determine the PHY model for the device, and initialize hw
* for use by other functions.
*/
-void ice_ptp_init_phy_model(struct ice_hw *hw)
+void ice_ptp_init_hw(struct ice_hw *hw)
{
- if (ice_is_e810(hw))
- hw->phy_model = ICE_PHY_E810;
- else
- hw->phy_model = ICE_PHY_E82X;
+ struct ice_ptp_hw *ptp = &hw->ptp;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ ice_ptp_init_phy_e810(ptp);
+ break;
+ case ICE_MAC_E830:
+ ice_ptp_init_phy_e830(ptp);
+ break;
+ case ICE_MAC_GENERIC:
+ ice_ptp_init_phy_e82x(ptp);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_init_phy_e825(hw);
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * ice_ptp_write_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare one port for the upcoming timer sync command. Do not use this for
+ * programming only a single port, instead use ice_ptp_one_port_cmd() to
+ * ensure non-modified ports get properly initialized to ICE_PTP_NOP.
+ *
+ * Return:
+ * * %0 - success
+ * %-EBUSY - PHY type not supported
+ * * %other - failed to write port command
+ */
+static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_ptp_one_port_cmd - Program one PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @configured_port: the port that should execute the command
+ * @configured_cmd: the command to be executed on the configured port
+ *
+ * Prepare one port for executing a timer command, while preparing all other
+ * ports to ICE_PTP_NOP. This allows executing a command on a single port
+ * while ensuring all other ports do not execute stale commands.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write port command
+ */
+int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd)
+{
+ u32 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ /* Program the configured port with the configured command,
+ * program all other ports with ICE_PTP_NOP.
+ */
+ if (port == configured_port)
+ err = ice_ptp_write_port_cmd(hw, port, configured_cmd);
+ else
+ err = ice_ptp_write_port_cmd(hw, port, ICE_PTP_NOP);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd - Prepare PHY ports for a timer sync command
+ * @hw: pointer to HW struct
+ * @cmd: the timer command to setup
+ *
+ * Prepare all PHY ports on this device for the requested timer command. For
+ * some families this can be done in one shot, but for other families each
+ * port must be configured individually.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write port command
+ */
+static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 port;
+
+ /* PHY models which can program all ports simultaneously */
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ return ice_ptp_port_cmd_e810(hw, cmd);
+ case ICE_MAC_E830:
+ return ice_ptp_port_cmd_e830(hw, cmd);
+ default:
+ break;
+ }
+
+ /* PHY models which require programming each port separately */
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_write_port_cmd(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/**
@@ -3328,17 +5225,7 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- switch (hw->phy_model) {
- case ICE_PHY_E810:
- err = ice_ptp_port_cmd_e810(hw, cmd);
- break;
- case ICE_PHY_E82X:
- err = ice_ptp_port_cmd_e82x(hw, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
+ err = ice_ptp_port_cmd(hw, cmd);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
cmd, err);
@@ -3374,19 +5261,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Source timers */
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_phc_time_e830(hw, time);
+ return 0;
+ }
+
wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (hw->phy_model) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -3418,17 +5315,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_incval_e830(hw, incval);
+ return 0;
+ }
+
/* Shadow Adjust */
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (hw->phy_model) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -3488,13 +5394,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (hw->phy_model) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */
+ return 0;
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -3518,11 +5430,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (hw->phy_model) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_read_phy_tstamp_e830(hw, idx, tstamp);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -3546,11 +5463,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (hw->phy_model) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_clear_phy_tstamp_e82x(hw, block, idx);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -3607,11 +5526,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (hw->phy_model) {
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
ice_ptp_reset_ts_memory_e82x(hw);
break;
- case ICE_PHY_E810:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
+ case ICE_MAC_E810:
default:
return;
}
@@ -3633,11 +5555,16 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (hw->phy_model) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_ptp_init_phc_e830(hw);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -3656,14 +5583,19 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (hw->phy_model) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
- break;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
default:
return -EOPNOTSUPP;
}
@@ -3760,6 +5692,25 @@ ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
}
/**
+ * ice_cgu_get_num_pins - get pin description array size
+ * @hw: pointer to the hw struct
+ * @input: if request is done against input or output pins
+ *
+ * Return: size of pin description array for given hw.
+ */
+int ice_cgu_get_num_pins(struct ice_hw *hw, bool input)
+{
+ const struct ice_cgu_pin_desc *t;
+ int size;
+
+ t = ice_cgu_get_pin_desc(hw, input, &size);
+ if (t)
+ return size;
+
+ return 0;
+}
+
+/**
* ice_cgu_get_pin_type - get pin's type
* @hw: pointer to the hw struct
* @pin: pin index
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1f3e03124430..5896b346e579 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -41,11 +41,45 @@ enum ice_ptp_fec_mode {
ICE_PTP_FEC_MODE_RS_FEC
};
+enum eth56g_res_type {
+ ETH56G_PHY_REG_PTP,
+ ETH56G_PHY_MEM_PTP,
+ ETH56G_PHY_REG_XPCS,
+ ETH56G_PHY_REG_MAC,
+ ETH56G_PHY_REG_GPCS,
+ NUM_ETH56G_PHY_RES
+};
+
+enum ice_eth56g_link_spd {
+ ICE_ETH56G_LNK_SPD_1G,
+ ICE_ETH56G_LNK_SPD_2_5G,
+ ICE_ETH56G_LNK_SPD_10G,
+ ICE_ETH56G_LNK_SPD_25G,
+ ICE_ETH56G_LNK_SPD_40G,
+ ICE_ETH56G_LNK_SPD_50G,
+ ICE_ETH56G_LNK_SPD_50G2,
+ ICE_ETH56G_LNK_SPD_100G,
+ ICE_ETH56G_LNK_SPD_100G2,
+ NUM_ICE_ETH56G_LNK_SPD /* Must be last */
+};
+
+/**
+ * struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
+ * @base_addr: base address for each PHY block
+ * @step: step between PHY lanes
+ *
+ * Characteristic information for the various PHY register parameters in the
+ * ETH56G devices
+ */
+struct ice_phy_reg_info_eth56g {
+ u32 base_addr;
+ u32 step;
+};
+
/**
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
- * @pps_delay: propagation delay of the PPS output signal
*
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
@@ -53,7 +87,6 @@ enum ice_ptp_fec_mode {
struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
- u8 pps_delay;
};
/**
@@ -94,23 +127,73 @@ struct ice_vernier_info_e82x {
u32 rx_fixed_delay;
};
+#define ICE_ETH56G_MAC_CFG_RX_OFFSET_INT GENMASK(19, 9)
+#define ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC GENMASK(8, 0)
+#define ICE_ETH56G_MAC_CFG_FRAC_W 9
/**
- * struct ice_cgu_pll_params_e82x
- * @refclk_pre_div: Reference clock pre-divisor
- * @feedback_div: Feedback divisor
- * @frac_n_div: Fractional divisor
- * @post_pll_div: Post PLL divisor
+ * struct ice_eth56g_mac_reg_cfg - MAC config values for specific PTP registers
+ * @tx_mode: Tx timestamp compensation mode
+ * @tx_mk_dly: Tx timestamp marker start strobe delay
+ * @tx_cw_dly: Tx timestamp codeword start strobe delay
+ * @rx_mode: Rx timestamp compensation mode
+ * @rx_mk_dly: Rx timestamp marker start strobe delay
+ * @rx_cw_dly: Rx timestamp codeword start strobe delay
+ * @blks_per_clk: number of blocks transferred per clock cycle
+ * @blktime: block time, fixed point
+ * @mktime: marker time, fixed point
+ * @tx_offset: total Tx offset, fixed point
+ * @rx_offset: total Rx offset, contains value for bitslip/deskew, fixed point
*
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF frequency.
+ * All fixed point registers except Rx offset are 23 bit unsigned ints with
+ * a 9 bit fractional.
+ * Rx offset is 11 bit unsigned int with a 9 bit fractional.
*/
-struct ice_cgu_pll_params_e82x {
- u32 refclk_pre_div;
- u32 feedback_div;
- u32 frac_n_div;
- u32 post_pll_div;
+struct ice_eth56g_mac_reg_cfg {
+ struct {
+ u8 def;
+ u8 rs;
+ } tx_mode;
+ u8 tx_mk_dly;
+ struct {
+ u8 def;
+ u8 onestep;
+ } tx_cw_dly;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_mode;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_mk_dly;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_cw_dly;
+ u8 blks_per_clk;
+ u16 blktime;
+ u16 mktime;
+ struct {
+ u32 serdes;
+ u32 no_fec;
+ u32 fc;
+ u32 rs;
+ u32 sfd;
+ u32 onestep;
+ } tx_offset;
+ struct {
+ u32 serdes;
+ u32 no_fec;
+ u32 fc;
+ u32 rs;
+ u32 sfd;
+ u32 bs_ds;
+ } rx_offset;
};
+extern
+const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD];
+
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
enum ice_e810_c827_idx {
@@ -182,14 +265,14 @@ struct ice_cgu_pin_desc {
struct dpll_pin_frequency *freq_supp;
};
-extern const struct
-ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
+/* Table of constants related to possible ETH56G PHY resources */
+extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
+
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ];
/* Table of constants for Vernier calibration on E822 */
extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
@@ -197,7 +280,9 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
-#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define ICE_E810_PLL_FREQ 812500000
+#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define ICE_E810_E830_SYNC_DELAY 0
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -208,11 +293,15 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time);
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
+int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
void ice_ptp_reset_ts_memory(struct ice_hw *hw);
int ice_ptp_init_phc(struct ice_hw *hw);
+void ice_ptp_init_hw(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
+int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd);
/* E822 family functions */
int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
@@ -225,7 +314,8 @@ void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw)
+
+static inline enum ice_tspll_freq ice_e82x_time_ref(const struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
@@ -239,24 +329,19 @@ static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_tspll_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
-{
- return e822_time_ref[time_ref].pll_freq;
-}
-
-static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_tspll_freq time_ref)
{
- return e822_time_ref[time_ref].nominal_incval;
+ return e82x_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_tspll_freq time_ref)
{
- return e822_time_ref[time_ref].pps_delay;
+ return e82x_time_ref[time_ref].nominal_incval;
}
/* E822 Vernier calibration functions */
@@ -264,13 +349,13 @@ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
-int ice_ptp_init_phy_e810(struct ice_hw *hw);
-int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
-int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
-bool ice_is_pca9575_present(struct ice_hw *hw);
+int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
+int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
+int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
+int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
struct dpll_pin_frequency *
ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num);
@@ -280,11 +365,43 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
enum dpll_lock_status *dpll_state);
int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
-
-void ice_ptp_init_phy_model(struct ice_hw *hw);
int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
unsigned long *caps);
+/* ETH56G family functions */
+int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status);
+int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold);
+int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
+
+#define ICE_ETH56G_NOMINAL_INCVAL 0x140000000ULL
+#define ICE_ETH56G_NOMINAL_PCS_REF_TUS 0x100000000ULL
+#define ICE_ETH56G_NOMINAL_PCS_REF_INC 0x300000000ULL
+#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
+#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
+
+/**
+ * ice_get_base_incval - Get base clock increment value
+ * @hw: pointer to the HW struct
+ *
+ * Return: base clock increment value for supported PHYs, 0 otherwise
+ */
+static inline u64 ice_get_base_incval(struct ice_hw *hw)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return ICE_PTP_NOMINAL_INCVAL_E810;
+ case ICE_MAC_GENERIC:
+ return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_ETH56G_NOMINAL_INCVAL;
+ default:
+ return 0;
+ }
+}
+
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
@@ -312,6 +429,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define TS_CMD_MASK_E810 0xFF
#define TS_CMD_MASK 0xF
#define SYNC_EXEC_CMD 0x3
+#define TS_CMD_RX_TYPE ICE_M(0x18, 0x4)
/* Macros to derive port low and high addresses on both quads */
#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
@@ -344,11 +462,8 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define Q_REG_TX_MEM_GBL_CFG 0xC08
#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0
#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0)
-#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1
#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1)
-#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9
#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9)
-#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15
#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15)
/* Tx Timestamp data registers */
@@ -380,7 +495,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define P_REG_TIMETUS_L 0x410
#define P_REG_TIMETUS_U 0x414
-#define P_REG_40B_LOW_M 0xFF
+#define P_REG_40B_LOW_M GENMASK(7, 0)
#define P_REG_40B_HIGH_S 8
/* PHY window length registers */
@@ -487,30 +602,43 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32))
/* E810 timer command register */
-#define ETH_GLTSYN_CMD 0x03000344
+#define E810_ETH_GLTSYN_CMD 0x03000344
+
+/* E830 timer command register */
+#define E830_ETH_GLTSYN_CMD 0x00088814
+
+/* E810 PHC time register */
+#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx))
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
-/* Timestamp block macros */
+/* PHY 40b registers macros */
+#define PHY_EXT_40B_LOW_M GENMASK(31, 0)
+#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32)
+#define PHY_40B_LOW_M GENMASK(7, 0)
+#define PHY_40B_HIGH_M GENMASK_ULL(39, 8)
#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
-#define TS_PHY_LOW_M 0xFF
-#define TS_PHY_HIGH_M 0xFFFFFFFF
-#define TS_PHY_HIGH_S 8
-
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
/* Tx timestamp low latency read definitions */
-#define TS_LL_READ_RETRIES 200
-#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
-#define TS_LL_READ_TS_IDX GENMASK(29, 24)
-#define TS_LL_READ_TS_INTR BIT(30)
-#define TS_LL_READ_TS BIT(31)
+#define REG_LL_PROXY_H_TIMEOUT_US 2000
+#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6)
+#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1
+#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2
+#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16)
+#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24)
+#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24)
+#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30)
+#define REG_LL_PROXY_H_EXEC BIT(31)
+
+#define REG_LL_PROXY_L PF_SB_ATQBAH
+#define REG_LL_PROXY_H PF_SB_ATQBAL
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
@@ -524,29 +652,135 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define LOW_TX_MEMORY_BANK_START 0x03090000
#define HIGH_TX_MEMORY_BANK_START 0x03090004
-/* E810T SMA controller pin control */
-#define ICE_SMA1_DIR_EN_E810T BIT(4)
-#define ICE_SMA1_TX_EN_E810T BIT(5)
-#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3)
-#define ICE_SMA2_DIR_EN_E810T BIT(6)
-#define ICE_SMA2_TX_EN_E810T BIT(7)
-
-#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \
- ICE_SMA1_TX_EN_E810T)
-#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \
- ICE_SMA2_DIR_EN_E810T | \
- ICE_SMA2_TX_EN_E810T)
-#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \
- ICE_SMA2_MASK_E810T)
-
-#define ICE_SMA_MIN_BIT_E810T 3
-#define ICE_SMA_MAX_BIT_E810T 7
+/* SMA controller pin control */
+#define ICE_SMA1_DIR_EN BIT(4)
+#define ICE_SMA1_TX_EN BIT(5)
+#define ICE_SMA2_UFL2_RX_DIS BIT(3)
+#define ICE_SMA2_DIR_EN BIT(6)
+#define ICE_SMA2_TX_EN BIT(7)
+
+#define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)
+#define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \
+ ICE_SMA2_TX_EN)
+#define ICE_SMA2_INACTIVE_MASK (ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN)
+#define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK)
+
+#define ICE_SMA_MIN_BIT 3
+#define ICE_SMA_MAX_BIT 7
#define ICE_PCA9575_P1_OFFSET 8
-/* E810T PCA9575 IO controller registers */
+/* PCA9575 IO controller registers */
#define ICE_PCA9575_P0_IN 0x0
-/* E810T PCA9575 IO controller pin control */
-#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+/* PCA9575 IO controller pin control */
+#define ICE_P0_GNSS_PRSNT_N BIT(4)
+
+/* ETH56G PHY register addresses */
+/* Timestamp PHY incval registers */
+#define PHY_REG_TIMETUS_L 0x8
+#define PHY_REG_TIMETUS_U 0xC
+
+/* Timestamp PCS registers */
+#define PHY_PCS_REF_TUS_L 0x18
+#define PHY_PCS_REF_TUS_U 0x1C
+
+/* Timestamp PCS ref incval registers */
+#define PHY_PCS_REF_INC_L 0x20
+#define PHY_PCS_REF_INC_U 0x24
+
+/* Timestamp init registers */
+#define PHY_REG_RX_TIMER_INC_PRE_L 0x64
+#define PHY_REG_RX_TIMER_INC_PRE_U 0x68
+#define PHY_REG_TX_TIMER_INC_PRE_L 0x44
+#define PHY_REG_TX_TIMER_INC_PRE_U 0x48
+
+/* Timestamp match and adjust target registers */
+#define PHY_REG_RX_TIMER_CNT_ADJ_L 0x6C
+#define PHY_REG_RX_TIMER_CNT_ADJ_U 0x70
+#define PHY_REG_TX_TIMER_CNT_ADJ_L 0x4C
+#define PHY_REG_TX_TIMER_CNT_ADJ_U 0x50
+
+/* Timestamp command registers */
+#define PHY_REG_TX_TMR_CMD 0x40
+#define PHY_REG_RX_TMR_CMD 0x60
+
+/* Phy offset ready registers */
+#define PHY_REG_TX_OFFSET_READY 0x54
+#define PHY_REG_RX_OFFSET_READY 0x74
+
+/* Phy total offset registers */
+#define PHY_REG_TOTAL_TX_OFFSET_L 0x38
+#define PHY_REG_TOTAL_TX_OFFSET_U 0x3C
+#define PHY_REG_TOTAL_RX_OFFSET_L 0x58
+#define PHY_REG_TOTAL_RX_OFFSET_U 0x5C
+
+/* Timestamp capture registers */
+#define PHY_REG_TX_CAPTURE_L 0x78
+#define PHY_REG_TX_CAPTURE_U 0x7C
+#define PHY_REG_RX_CAPTURE_L 0x8C
+#define PHY_REG_RX_CAPTURE_U 0x90
+
+/* Memory status registers */
+#define PHY_REG_TX_MEMORY_STATUS_L 0x80
+#define PHY_REG_TX_MEMORY_STATUS_U 0x84
+
+/* Interrupt config register */
+#define PHY_REG_TS_INT_CONFIG 0x88
+
+/* XIF mode config register */
+#define PHY_MAC_XIF_MODE 0x24
+#define PHY_MAC_XIF_1STEP_ENA_M ICE_M(0x1, 5)
+#define PHY_MAC_XIF_TS_BIN_MODE_M ICE_M(0x1, 11)
+#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
+#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
+
+#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
+#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
+
+/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
+#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
+#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
+
+#define PHY_REG_DESKEW_0 0x94
+#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
+#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
+#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
+#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
+
+#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
+#define PHY_REVISION_ETH56G 0x10200
+#define PHY_VENDOR_TXLANE_THRESH 0x2000C
+
+#define PHY_MAC_TSU_CONFIG 0x40
+#define PHY_MAC_TSU_CFG_RX_MODE_M ICE_M(0x7, 0)
+#define PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M ICE_M(0x7, 4)
+#define PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M ICE_M(0x7, 8)
+#define PHY_MAC_TSU_CFG_TX_MODE_M ICE_M(0x7, 12)
+#define PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M ICE_M(0x1F, 16)
+#define PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M ICE_M(0x1F, 21)
+#define PHY_MAC_TSU_CFG_BLKS_PER_CLK_M ICE_M(0x1, 28)
+#define PHY_MAC_RX_MODULO 0x44
+#define PHY_MAC_RX_OFFSET 0x48
+#define PHY_MAC_RX_OFFSET_M ICE_M(0xFFFFFF, 0)
+#define PHY_MAC_TX_MODULO 0x4C
+#define PHY_MAC_BLOCKTIME 0x50
+#define PHY_MAC_MARKERTIME 0x54
+#define PHY_MAC_TX_OFFSET 0x58
+#define PHY_GPCS_BITSLIP 0x5C
+
+#define PHY_PTP_INT_STATUS 0x7FD140
+
+/* ETH56G registers shared per quad */
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24)
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane))
+#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0)
+#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31)
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 5f30fb131f74..cb08746556a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -3,42 +3,51 @@
#include "ice.h"
#include "ice_eswitch.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
+#include "devlink/port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
/**
- * ice_repr_get_sw_port_id - get port ID associated with representor
- * @repr: pointer to port representor
+ * ice_repr_inc_tx_stats - increment Tx statistic by one packet
+ * @repr: repr to increment stats on
+ * @len: length of the packet
+ * @xmit_status: value returned by xmit function
*/
-static int ice_repr_get_sw_port_id(struct ice_repr *repr)
+void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
+ int xmit_status)
{
- return repr->src_vsi->back->hw.port_info->lport;
+ struct ice_repr_pcpu_stats *stats;
+
+ if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
+ xmit_status != NET_XMIT_CN)) {
+ this_cpu_inc(repr->stats->tx_drops);
+ return;
+ }
+
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
}
/**
- * ice_repr_get_phys_port_name - get phys port name
- * @netdev: pointer to port representor netdev
- * @buf: write here port name
- * @len: max length of buf
+ * ice_repr_inc_rx_stats - increment Rx statistic by one packet
+ * @netdev: repr netdev to increment stats on
+ * @len: length of the packet
*/
-static int
-ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
+void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_repr *repr = np->repr;
- int res;
-
- /* Devlink port is registered and devlink core is taking care of name formatting. */
- if (repr->vf->devlink_port.devlink)
- return -EOPNOTSUPP;
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_repr_pcpu_stats *stats;
- res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
- repr->id);
- if (res <= 0)
- return -EOPNOTSUPP;
- return 0;
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
}
/**
@@ -50,12 +59,13 @@ static void
ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
struct ice_eth_stats *eth_stats;
struct ice_vsi *vsi;
- if (ice_is_vf_disabled(np->repr->vf))
+ if (repr->ops.ready(repr))
return;
- vsi = np->repr->src_vsi;
+ vsi = repr->src_vsi;
ice_update_vsi_stats(vsi);
eth_stats = &vsi->eth_stats;
@@ -76,7 +86,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
* ice_netdev_to_repr - Get port representor for given netdevice
* @netdev: pointer to port representor netdev
*/
-struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
+struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -84,7 +94,7 @@ struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
}
/**
- * ice_repr_open - Enable port representor's network interface
+ * ice_repr_vf_open - Enable port representor's network interface
* @netdev: network interface device structure
*
* The open entry point is called when a port representor's network
@@ -93,7 +103,7 @@ struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_open(struct net_device *netdev)
+static int ice_repr_vf_open(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -109,8 +119,16 @@ static int ice_repr_open(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_open(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
/**
- * ice_repr_stop - Disable port representor's network interface
+ * ice_repr_vf_stop - Disable port representor's network interface
* @netdev: network interface device structure
*
* The stop entry point is called when a port representor's network
@@ -119,7 +137,7 @@ static int ice_repr_open(struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_stop(struct net_device *netdev)
+static int ice_repr_vf_stop(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -135,42 +153,47 @@ static int ice_repr_stop(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_stop(struct net_device *netdev)
+{
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
* @stats: netlink stats structure
- *
- * RX/TX stats are being swapped here to be consistent with VF stats. In slow
- * path, port representor receives data when the corresponding VF is sending it
- * (and vice versa), TX and RX bytes/packets are effectively swapped on port
- * representor.
*/
static int
ice_repr_sp_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- int vf_id = np->repr->vf->vf_id;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- u64 pkts, bytes;
-
- tx_ring = np->vsi->tx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
- tx_ring->ring_stats->stats,
- &pkts, &bytes);
- stats->rx_packets = pkts;
- stats->rx_bytes = bytes;
-
- rx_ring = np->vsi->rx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
- rx_ring->ring_stats->stats,
- &pkts, &bytes);
- stats->tx_packets = pkts;
- stats->tx_bytes = bytes;
- stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
- rx_ring->ring_stats->rx_stats.alloc_buf_failed;
-
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+ struct ice_repr_pcpu_stats *repr_stats;
+ unsigned int start;
+
+ repr_stats = per_cpu_ptr(repr->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&repr_stats->syncp);
+ tbytes = repr_stats->tx_bytes;
+ tpkts = repr_stats->tx_packets;
+ tdrops = repr_stats->tx_drops;
+ rbytes = repr_stats->rx_bytes;
+ rpkts = repr_stats->rx_packets;
+ } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
+
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+ stats->tx_dropped += tdrops;
+ stats->rx_bytes += rbytes;
+ stats->rx_packets += rpkts;
+ }
return 0;
}
@@ -196,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
{
switch (flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower,
+ true);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(repr->src_vsi, flower);
default:
@@ -239,11 +263,20 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
}
-static const struct net_device_ops ice_repr_netdev_ops = {
- .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
+static const struct net_device_ops ice_repr_vf_netdev_ops = {
.ndo_get_stats64 = ice_repr_get_stats64,
- .ndo_open = ice_repr_open,
- .ndo_stop = ice_repr_stop,
+ .ndo_open = ice_repr_vf_open,
+ .ndo_stop = ice_repr_vf_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
+};
+
+static const struct net_device_ops ice_repr_sf_netdev_ops = {
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_sf_open,
+ .ndo_stop = ice_repr_sf_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
@@ -256,18 +289,20 @@ static const struct net_device_ops ice_repr_netdev_ops = {
*/
bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
- return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+ return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
+ netdev->netdev_ops == &ice_repr_sf_netdev_ops);
}
/**
* ice_repr_reg_netdev - register port representor netdev
* @netdev: pointer to port representor netdev
+ * @ops: new ops for netdev
*/
static int
-ice_repr_reg_netdev(struct net_device *netdev)
+ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
{
eth_hw_addr_random(netdev);
- netdev->netdev_ops = &ice_repr_netdev_ops;
+ netdev->netdev_ops = ops;
ice_set_ethtool_repr_ops(netdev);
netdev->hw_features |= NETIF_F_HW_TC;
@@ -278,60 +313,58 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
-static void ice_repr_remove_node(struct devlink_port *devlink_port)
+static int ice_repr_ready_vf(struct ice_repr *repr)
{
- devl_lock(devlink_port->devlink);
- devl_rate_leaf_destroy(devlink_port);
- devl_unlock(devlink_port->devlink);
+ return !ice_check_vf_ready_for_cfg(repr->vf);
+}
+
+static int ice_repr_ready_sf(struct ice_repr *repr)
+{
+ return !repr->sf->active;
}
/**
- * ice_repr_rem - remove representor from VF
+ * ice_repr_destroy - remove representor from VF
* @repr: pointer to representor structure
*/
-static void ice_repr_rem(struct ice_repr *repr)
+void ice_repr_destroy(struct ice_repr *repr)
{
- kfree(repr->q_vector);
+ free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
-/**
- * ice_repr_rem_vf - remove representor from VF
- * @repr: pointer to representor structure
- */
-void ice_repr_rem_vf(struct ice_repr *repr)
+static void ice_repr_rem_vf(struct ice_repr *repr)
{
- ice_repr_remove_node(&repr->vf->devlink_port);
+ ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
- ice_repr_rem(repr);
}
-static void ice_repr_set_tx_topology(struct ice_pf *pf)
+static void ice_repr_rem_sf(struct ice_repr *repr)
{
- struct devlink *devlink;
+ unregister_netdev(repr->netdev);
+ ice_devlink_destroy_sf_port(repr->sf);
+}
+static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
+{
/* only export if ADQ and DCB disabled and eswitch enabled*/
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
!ice_is_switchdev_running(pf))
return;
- devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
}
/**
- * ice_repr_add - add representor for generic VSI
- * @pf: pointer to PF structure
+ * ice_repr_create - add representor for generic VSI
* @src_vsi: pointer to VSI structure of device to represent
- * @parent_mac: device MAC address
*/
-static struct ice_repr *
-ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
+static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
{
- struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
int err;
@@ -346,78 +379,153 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
goto err_alloc;
}
+ repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
+ if (!repr->stats) {
+ err = -ENOMEM;
+ goto err_stats;
+ }
+
repr->src_vsi = src_vsi;
+ repr->id = src_vsi->vsi_num;
np = netdev_priv(repr->netdev);
np->repr = repr;
- q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector) {
- err = -ENOMEM;
- goto err_alloc_q_vector;
- }
- repr->q_vector = q_vector;
- repr->q_id = repr->id;
+ repr->netdev->min_mtu = ETH_MIN_MTU;
+ repr->netdev->max_mtu = ICE_MAX_MTU;
- ether_addr_copy(repr->parent_mac, parent_mac);
+ SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
return repr;
-err_alloc_q_vector:
+err_stats:
free_netdev(repr->netdev);
err_alloc:
kfree(repr);
return ERR_PTR(err);
}
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
+static int ice_repr_add_vf(struct ice_repr *repr)
{
- struct ice_repr *repr;
- struct ice_vsi *vsi;
+ struct ice_vf *vf = repr->vf;
+ struct devlink *devlink;
int err;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return ERR_PTR(-ENOENT);
-
err = ice_devlink_create_vf_port(vf);
if (err)
- return ERR_PTR(err);
+ return err;
- repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
- goto err_repr_add;
- }
+ SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
+ if (err)
+ goto err_netdev;
+
+ err = ice_drop_vf_tx_lldp(repr->src_vsi, true);
+ if (err)
+ goto err_drop_lldp;
+
+ err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
+ if (err)
+ goto err_cfg_vsi;
+
+ ice_virtchnl_set_repr_ops(vf);
+
+ devlink = priv_to_devlink(vf->pf);
+ ice_repr_set_tx_topology(vf->pf, devlink);
+
+ return 0;
+
+err_cfg_vsi:
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
+err_drop_lldp:
+ unregister_netdev(repr->netdev);
+err_netdev:
+ ice_devlink_destroy_vf_port(vf);
+ return err;
+}
+/**
+ * ice_repr_create_vf - add representor for VF VSI
+ * @vf: VF to create port representor on
+ *
+ * Set correct representor type for VF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_repr *repr;
+
+ if (!vsi)
+ return ERR_PTR(-EINVAL);
+
+ repr = ice_repr_create(vsi);
+ if (IS_ERR(repr))
+ return repr;
+
+ repr->type = ICE_REPR_TYPE_VF;
repr->vf = vf;
+ repr->ops.add = ice_repr_add_vf;
+ repr->ops.rem = ice_repr_rem_vf;
+ repr->ops.ready = ice_repr_ready_vf;
- repr->netdev->min_mtu = ETH_MIN_MTU;
- repr->netdev->max_mtu = ICE_MAX_MTU;
+ ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
- SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
- SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
- err = ice_repr_reg_netdev(repr->netdev);
+ return repr;
+}
+
+static int ice_repr_add_sf(struct ice_repr *repr)
+{
+ struct ice_dynamic_port *sf = repr->sf;
+ int err;
+
+ err = ice_devlink_create_sf_port(sf);
+ if (err)
+ return err;
+
+ SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
if (err)
goto err_netdev;
- ice_virtchnl_set_repr_ops(vf);
- ice_repr_set_tx_topology(vf->pf);
+ ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
- return repr;
+ return 0;
err_netdev:
- ice_repr_rem(repr);
-err_repr_add:
- ice_devlink_destroy_vf_port(vf);
- return ERR_PTR(err);
+ ice_devlink_destroy_sf_port(sf);
+ return err;
}
-struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
+/**
+ * ice_repr_create_sf - add representor for SF VSI
+ * @sf: SF to create port representor on
+ *
+ * Set correct representor type for SF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
{
- if (!vsi->vf)
- return NULL;
+ struct ice_repr *repr = ice_repr_create(sf->vsi);
+
+ if (IS_ERR(repr))
+ return repr;
+
+ repr->type = ICE_REPR_TYPE_SF;
+ repr->sf = sf;
+ repr->ops.add = ice_repr_add_sf;
+ repr->ops.rem = ice_repr_rem_sf;
+ repr->ops.ready = ice_repr_ready_sf;
- return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
+ ether_addr_copy(repr->parent_mac, sf->hw_addr);
+
+ return repr;
+}
+
+struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
+{
+ return xa_load(&pf->eswitch.reprs, id);
}
/**
@@ -439,15 +547,3 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr)
netif_carrier_off(repr->netdev);
netif_tx_stop_all_queues(repr->netdev);
}
-
-/**
- * ice_repr_set_traffic_vsi - set traffic VSI for port representor
- * @repr: repr on with VSI will be set
- * @vsi: pointer to VSI that will be used by port representor to pass traffic
- */
-void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
-{
- struct ice_netdev_priv *np = netdev_priv(repr->netdev);
-
- np->vsi = vsi;
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index f9aede315716..35bd93165e1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -6,32 +6,53 @@
#include <net/dst_metadata.h>
+struct ice_repr_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+
+enum ice_repr_type {
+ ICE_REPR_TYPE_VF,
+ ICE_REPR_TYPE_SF,
+};
+
struct ice_repr {
struct ice_vsi *src_vsi;
- struct ice_vf *vf;
- struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
- int q_id;
+ struct ice_repr_pcpu_stats __percpu *stats;
u32 id;
u8 parent_mac[ETH_ALEN];
-#ifdef CONFIG_ICE_SWITCHDEV
- /* info about slow path rule */
- struct ice_rule_query_data sp_rule;
-#endif
+ enum ice_repr_type type;
+ union {
+ struct ice_vf *vf;
+ struct ice_dynamic_port *sf;
+ };
+ struct {
+ int (*add)(struct ice_repr *repr);
+ void (*rem)(struct ice_repr *repr);
+ int (*ready)(struct ice_repr *repr);
+ } ops;
};
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
-void ice_repr_rem_vf(struct ice_repr *repr);
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf);
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf);
+
+void ice_repr_destroy(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
-void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
-
-struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
+struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
-struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
+void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
+ int xmit_status);
+void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
+struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index ead75fe2bcda..21bb861febbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -46,11 +46,11 @@ struct ice_sbq_evt_desc {
u8 data[24];
};
-enum ice_sbq_msg_dev {
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06
+enum ice_sbq_dev_id {
+ ice_sbq_dev_phy_0 = 0x02,
+ ice_sbq_dev_cgu = 0x06,
+ ice_sbq_dev_phy_0_peer = 0x0D,
+ ice_sbq_dev_cgu_peer = 0x0F,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index a1525992d14b..fff0c1afdb41 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return -ENOMEM;
- /* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
- sizeof(*root), GFP_KERNEL);
+ sizeof(*root->children), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
return -ENOMEM;
@@ -86,6 +85,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
+ * ice_sched_find_next_vsi_node - find the next node for a given VSI
+ * @vsi_node: VSI support node to start search with
+ *
+ * Return: Next VSI support node, or NULL.
+ *
+ * The function returns a pointer to the next node from the VSI layer
+ * assigned to the given VSI, or NULL if there is no such a node.
+ */
+static struct ice_sched_node *
+ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node)
+{
+ unsigned int vsi_handle = vsi_node->vsi_handle;
+
+ while ((vsi_node = vsi_node->sibling) != NULL)
+ if (vsi_node->vsi_handle == vsi_handle)
+ break;
+
+ return vsi_node;
+}
+
+/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
@@ -103,13 +123,13 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.sched_elem_cmd;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
cmd->num_elem_req = cpu_to_le16(elems_req);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && elems_resp)
*elems_resp = le16_to_cpu(cmd->num_elem_resp);
@@ -186,10 +206,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!node)
return -ENOMEM;
if (hw->max_children[layer]) {
- /* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw),
hw->max_children[layer],
- sizeof(*node), GFP_KERNEL);
+ sizeof(*node->children), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
return -ENOMEM;
@@ -373,10 +392,10 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.get_topo;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
cmd->port_num = lport;
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -499,7 +518,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -664,13 +683,13 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.rl_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_profiles = cpu_to_le16(num_profiles);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && num_processed)
@@ -1086,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children;
} else {
- /* This parent is full, try the next sibling */
- parent = parent->sibling;
+ /* This parent is full,
+ * try the next available sibling.
+ */
+ parent = ice_sched_find_next_vsi_node(parent);
/* Don't modify the first node TEID memory if the
* first node was added already in the above call.
* Instead send some temp memory for all other
@@ -1128,12 +1149,11 @@ u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 5 or less sw_entry_point_layer
*/
/* calculate the VSI layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+ else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
+ /* qgroup and VSI layers are same */
+ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@@ -1150,13 +1170,10 @@ u8 ice_sched_get_agg_layer(struct ice_hw *hw)
* 7 or less sw_entry_point_layer
*/
/* calculate the aggregator layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
- return hw->sw_entry_point_layer;
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
+ else
+ return hw->sw_entry_point_layer;
}
/**
@@ -1510,10 +1527,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
{
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
+ u8 qgrp_layer, vsi_layer;
u16 max_children;
- u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@@ -1524,15 +1542,32 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_node)
return NULL;
+ /* If the queue group and VSI layer are same then queues
+ * are all attached directly to VSI
+ */
+ if (qgrp_layer == vsi_layer)
+ return vsi_node;
+
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
+ struct ice_sched_node *next_vsi_node;
+
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < max_children &&
qgrp_node->owner == owner)
break;
qgrp_node = qgrp_node->sibling;
+ if (qgrp_node)
+ continue;
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+ if (!next_vsi_node)
+ break;
+
+ vsi_node = next_vsi_node;
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
}
/* Select the best queue group */
@@ -1603,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct
- * @num_qs: number of queues
+ * @num_new_qs: number of new queues that will be added to the tree
* @num_nodes: num nodes array
*
* This function calculates the number of VSI child nodes based on the
* number of queues.
*/
static void
-ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
+ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes)
{
- u16 num = num_qs;
+ u16 num = num_new_qs;
u8 i, qgl, vsil;
qgl = ice_sched_get_qgrp_layer(hw);
@@ -1778,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!parent)
return -EIO;
- if (i == vsil)
+ /* Do not modify the VSI handle for already existing VSI nodes,
+ * (if no new VSI node was added to the tree).
+ * Assign the VSI handle only to newly added VSI nodes.
+ */
+ if (i == vsil && num_added)
parent->vsi_handle = vsi_handle;
}
@@ -1812,6 +1851,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
}
/**
+ * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count
+ * @hw: pointer to the HW struct
+ * @vsi_node: pointer to the leftmost VSI node that needs to be extended
+ * @new_numqs: new number of queues that has to be handled by the VSI
+ * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry
+ *
+ * This function recalculates the number of supported nodes that need to
+ * be added after adding more Tx queues for a given VSI.
+ * The number of new VSI support nodes that shall be added will be saved
+ * to the @new_num_nodes table for the VSI layer.
+ */
+static void
+ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw,
+ struct ice_sched_node *vsi_node,
+ unsigned int new_numqs, u16 *new_num_nodes)
+{
+ u32 vsi_nodes_cnt = 1;
+ u32 max_queue_cnt = 1;
+ u32 qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ for (u32 i = vsil; i <= qgl; i++)
+ max_queue_cnt *= hw->max_children[i];
+
+ while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL)
+ vsi_nodes_cnt++;
+
+ if (new_numqs > (max_queue_cnt * vsi_nodes_cnt))
+ new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) -
+ vsi_nodes_cnt;
+}
+
+/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1862,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
return status;
}
- if (new_numqs)
- ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
- /* Keep the max number of queue configuration all the time. Update the
- * tree only if number of queues > previous number of queues. This may
+ ice_sched_recalc_vsi_support_nodes(hw, vsi_node,
+ new_numqs, new_num_nodes);
+ ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs,
+ new_num_nodes);
+
+ /* Never decrease the number of queues in the tree. Update the tree
+ * only if number of queues > previous number of queues. This may
* leave some extra nodes in the tree if number of queues < previous
* number but that wouldn't harm anything. Removing those extra nodes
* may complicate the code if those nodes are part of SRL or
* individually rate limited.
+ * Also, add the required VSI support nodes if the existing ones cannot
+ * handle the requested new number of queues.
*/
+ status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes);
+ if (status)
+ return status;
+
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
@@ -2012,6 +2096,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
}
/**
+ * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI
+ * @pi: port information structure
+ * @vsi_node: pointer to the leftmost node of the VSI to be removed
+ * @owner: LAN or RDMA
+ * @tc: TC number
+ *
+ * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC.
+ *
+ * This function removes all the VSI support nodes associated with a given VSI
+ * and its LAN or RDMA children nodes from the scheduler tree.
+ */
+static int
+ice_sched_rm_vsi_subtree(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node, u8 owner, u8 tc)
+{
+ u16 vsi_handle = vsi_node->vsi_handle;
+ bool all_vsi_nodes_removed = true;
+ int j = 0;
+
+ while (vsi_node) {
+ struct ice_sched_node *next_vsi_node;
+
+ if (ice_sched_is_leaf_node_present(vsi_node)) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc);
+ return -EBUSY;
+ }
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner)
+ ice_free_sched_node(pi, vsi_node->children[j]);
+ else
+ j++;
+ }
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children)
+ ice_free_sched_node(pi, vsi_node);
+ else
+ all_vsi_nodes_removed = false;
+
+ vsi_node = next_vsi_node;
+ }
+
+ /* clean up aggregator related VSI info if any */
+ if (all_vsi_nodes_removed)
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+
+ return 0;
+}
+
+/**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -2037,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
- u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -2047,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node)
continue;
- if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = -EBUSY;
+ status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i);
+ if (status)
goto exit_sched_rm_vsi_cfg;
- }
- while (j < vsi_node->num_children) {
- if (vsi_node->children[j]->owner == owner) {
- ice_free_sched_node(pi, vsi_node->children[j]);
- /* reset the counter again since the num
- * children will be updated after node removal
- */
- j = 0;
- } else {
- j++;
- }
- }
- /* remove the VSI if it has no children */
- if (!vsi_node->num_children) {
- ice_free_sched_node(pi, vsi_node);
- vsi_ctx->sched.vsi_node[i] = NULL;
+ vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up aggregator related VSI info if any */
- ice_sched_rm_agg_vsi_info(pi, vsi_handle);
- }
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
else
@@ -3199,7 +3315,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
u8 profile_type;
int status;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (!pi || layer_num >= pi->hw->num_tx_sched_layers)
return NULL;
switch (rl_type) {
case ICE_MIN_BW:
@@ -3215,8 +3331,6 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
return NULL;
}
- if (!pi)
- return NULL;
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@@ -3446,7 +3560,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
int status = 0;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (layer_num >= pi->hw->num_tx_sched_layers)
return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 1aef05ea5a57..7b668083be07 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -6,6 +6,17 @@
#include "ice_common.h"
+/**
+ * DOC: ice_sched.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * scheduler. It consists of defines related to layers, structures related to
+ * aggregator, functions declarations and others.
+ */
+
+#define ICE_SCHED_5_LAYERS 5
+#define ICE_SCHED_9_LAYERS 9
+
#define SCHED_NODE_NAME_MAX_LEN 32
#define ICE_QGRP_LAYER_OFFSET 2
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
new file mode 100644
index 000000000000..1a2c94375ca7
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_txrx.h"
+#include "ice_fltr.h"
+#include "ice_sf_eth.h"
+#include "devlink/devlink.h"
+#include "devlink/port.h"
+
+static const struct net_device_ops ice_sf_netdev_ops = {
+ .ndo_open = ice_open,
+ .ndo_stop = ice_stop,
+ .ndo_start_xmit = ice_start_xmit,
+ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_change_mtu = ice_change_mtu,
+ .ndo_get_stats64 = ice_get_stats64,
+ .ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
+};
+
+/**
+ * ice_sf_cfg_netdev - Allocate, configure and register a netdev
+ * @dyn_port: subfunction associated with configured netdev
+ * @devlink_port: subfunction devlink port to be linked with netdev
+ *
+ * Return: 0 on success, negative value on failure
+ */
+static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port,
+ struct devlink_port *devlink_port)
+{
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
+
+ eth_hw_addr_set(netdev, dyn_port->hw_addr);
+ ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr);
+ netdev->netdev_ops = &ice_sf_netdev_ops;
+ SET_NETDEV_DEVLINK_PORT(netdev, devlink_port);
+
+ err = register_netdev(netdev);
+ if (err) {
+ free_netdev(netdev);
+ vsi->netdev = NULL;
+ return -ENOMEM;
+ }
+ set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static void ice_sf_decfg_netdev(struct ice_vsi *vsi)
+{
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+}
+
+/**
+ * ice_sf_dev_probe - subfunction driver probe function
+ * @adev: pointer to the auxiliary device
+ * @id: pointer to the auxiliary_device id
+ *
+ * Configure VSI and netdev resources for the subfunction device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_sf_dev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_pf *pf = dyn_port->pf;
+ struct device *dev = &adev->dev;
+ struct ice_sf_priv *priv;
+ struct devlink *devlink;
+ int err;
+
+ vsi->type = ICE_VSI_SF;
+ vsi->port_info = pf->hw.port_info;
+ vsi->flags = ICE_VSI_FLAG_INIT;
+
+ priv = ice_allocate_sf(&adev->dev, pf);
+ if (IS_ERR(priv)) {
+ dev_err(dev, "Subfunction devlink alloc failed");
+ return PTR_ERR(priv);
+ }
+
+ priv->dev = sf_dev;
+ sf_dev->priv = priv;
+ devlink = priv_to_devlink(priv);
+
+ devl_lock(devlink);
+
+ err = ice_vsi_cfg(vsi);
+ if (err) {
+ dev_err(dev, "Subfunction vsi config failed");
+ goto err_free_devlink;
+ }
+ vsi->sf = dyn_port;
+
+ ice_eswitch_update_repr(&dyn_port->repr_id, vsi);
+
+ err = ice_devlink_create_sf_dev_port(sf_dev);
+ if (err) {
+ dev_err(dev, "Cannot add ice virtual devlink port for subfunction");
+ goto err_vsi_decfg;
+ }
+
+ err = ice_sf_cfg_netdev(dyn_port, &sf_dev->priv->devlink_port);
+ if (err) {
+ dev_err(dev, "Subfunction netdev config failed");
+ goto err_devlink_destroy;
+ }
+
+ err = devl_port_fn_devlink_set(&dyn_port->devlink_port, devlink);
+ if (err) {
+ dev_err(dev, "Can't link devlink instance to SF devlink port");
+ goto err_netdev_decfg;
+ }
+
+ ice_napi_add(vsi);
+
+ devl_register(devlink);
+ devl_unlock(devlink);
+
+ dyn_port->attached = true;
+
+ return 0;
+
+err_netdev_decfg:
+ ice_sf_decfg_netdev(vsi);
+err_devlink_destroy:
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+err_vsi_decfg:
+ ice_vsi_decfg(vsi);
+err_free_devlink:
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ return err;
+}
+
+/**
+ * ice_sf_dev_remove - subfunction driver remove function
+ * @adev: pointer to the auxiliary device
+ *
+ * Deinitalize VSI and netdev resources for the subfunction device.
+ */
+static void ice_sf_dev_remove(struct auxiliary_device *adev)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(sf_dev->priv);
+ devl_lock(devlink);
+
+ ice_vsi_close(vsi);
+
+ ice_sf_decfg_netdev(vsi);
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ ice_vsi_decfg(vsi);
+
+ dyn_port->attached = false;
+}
+
+static const struct auxiliary_device_id ice_sf_dev_id_table[] = {
+ { .name = "ice.sf", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ice_sf_dev_id_table);
+
+static struct auxiliary_driver ice_sf_driver = {
+ .name = "sf",
+ .probe = ice_sf_dev_probe,
+ .remove = ice_sf_dev_remove,
+ .id_table = ice_sf_dev_id_table
+};
+
+static DEFINE_XARRAY_ALLOC1(ice_sf_aux_id);
+
+/**
+ * ice_sf_driver_register - Register new auxiliary subfunction driver
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_driver_unregister - Unregister new auxiliary subfunction driver
+ *
+ */
+void ice_sf_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_dev_release - Release device associated with auxiliary device
+ * @device: pointer to the device
+ *
+ * Since most of the code for subfunction deactivation is handled in
+ * the remove handler, here just free tracking resources.
+ */
+static void ice_sf_dev_release(struct device *device)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(device);
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+
+ xa_erase(&ice_sf_aux_id, adev->id);
+ kfree(sf_dev);
+}
+
+/**
+ * ice_sf_eth_activate - Activate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port as an Ethernet subfunction. Setup the netdev
+ * resources associated and initialize the auxiliary device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = dyn_port->pf;
+ struct ice_sf_dev *sf_dev;
+ struct pci_dev *pdev;
+ int err;
+ u32 id;
+
+ err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b,
+ GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF ID");
+ return err;
+ }
+
+ sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
+ if (!sf_dev) {
+ err = -ENOMEM;
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF memory");
+ goto xa_erase;
+ }
+ pdev = pf->pdev;
+
+ sf_dev->dyn_port = dyn_port;
+ sf_dev->adev.id = id;
+ sf_dev->adev.name = "sf";
+ sf_dev->adev.dev.release = ice_sf_dev_release;
+ sf_dev->adev.dev.parent = &pdev->dev;
+
+ err = auxiliary_device_init(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize SF device");
+ goto sf_dev_free;
+ }
+
+ err = auxiliary_device_add(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to add SF device");
+ goto aux_dev_uninit;
+ }
+
+ dyn_port->sf_dev = sf_dev;
+
+ return 0;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(&sf_dev->adev);
+sf_dev_free:
+ kfree(sf_dev);
+xa_erase:
+ xa_erase(&ice_sf_aux_id, id);
+
+ return err;
+}
+
+/**
+ * ice_sf_eth_deactivate - Deactivate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ *
+ * Deactivate the Ethernet subfunction, removing its auxiliary device and the
+ * associated resources.
+ */
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port)
+{
+ struct ice_sf_dev *sf_dev = dyn_port->sf_dev;
+
+ auxiliary_device_delete(&sf_dev->adev);
+ auxiliary_device_uninit(&sf_dev->adev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.h b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
new file mode 100644
index 000000000000..c558cad0a183
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _ICE_SF_ETH_H_
+#define _ICE_SF_ETH_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ice.h"
+
+struct ice_sf_dev {
+ struct auxiliary_device adev;
+ struct ice_dynamic_port *dyn_port;
+ struct ice_sf_priv *priv;
+};
+
+struct ice_sf_priv {
+ struct ice_sf_dev *dev;
+ struct devlink_port devlink_port;
+};
+
+static inline struct
+ice_sf_dev *ice_adev_to_sf_dev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct ice_sf_dev, adev);
+}
+
+int ice_sf_driver_register(void);
+void ice_sf_driver_unregister(void);
+
+int ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack);
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port);
+#endif /* _ICE_SF_ETH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..3d7e96721cf9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_sf_vsi_vlan_ops.h"
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ vlan_ops = &vsi->outer_vlan_ops;
+ else
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..8c44eafceea0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023, Intel Corporation. */
+
+#ifndef _ICE_SF_VSI_VLAN_OPS_H_
+#define _ICE_SF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_SF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index a958fcf3e6be..6b1126ddb561 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -9,7 +9,7 @@
#include "ice_dcb_lib.h"
#include "ice_flow.h"
#include "ice_eswitch.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
#include "ice_flex_pipe.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
hash_del_rcu(&vf->entry);
+ ice_deinitialize_vf_entry(vf);
ice_put_vf(vf);
}
}
@@ -62,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
if (vf->lan_vsi_idx != ICE_NO_VSI) {
ice_vf_vsi_release(vf);
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
}
last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
@@ -123,27 +125,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- if (!pf)
- return -EINVAL;
-
- bitmap_free(pf->sriov_irq_bm);
- pf->sriov_irq_size = 0;
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -170,15 +151,14 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
- ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
-
mutex_lock(&vfs->table_lock);
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
@@ -195,15 +175,9 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- /* clear malicious info since the VF is getting released */
- list_del(&vf->mbx_info.list_entry);
-
mutex_unlock(&vf->cfg_lock);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
vfs->num_qps_per = 0;
ice_free_vf_entries(pf);
@@ -227,7 +201,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_vsi *vsi;
params.type = ICE_VSI_VF;
- params.pi = ice_vf_get_port_info(vf);
+ params.port_info = ice_vf_get_port_info(vf);
params.vf = vf;
params.flags = ICE_VSI_FLAG_INIT;
@@ -362,47 +336,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
* @vf: VF to calculate the register index for
* @q_vector: a q_vector associated to the VF
*/
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
if (!vf || !q_vector)
- return -EINVAL;
+ return;
/* always add one to account for the OICR being the first MSIX */
- return vf->first_vector_idx + q_vector->v_idx + 1;
-}
-
-/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = ice_get_max_used_msix_vector(pf);
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
+ q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF;
+ q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx;
}
/**
@@ -429,11 +370,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
*/
static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- int vectors_used = ice_get_max_used_msix_vector(pf);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
- int err;
lockdep_assert_held(&pf->vfs.table_lock);
@@ -441,8 +380,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -EINVAL;
/* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- vectors_used;
+ msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
@@ -481,13 +419,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -ENOSPC;
}
- err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
- if (err) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
- num_vfs, err);
- return err;
- }
-
/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
pf->vfs.num_msix_per = num_msix_per_vf;
@@ -498,52 +429,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
- * ice_sriov_get_irqs - get irqs for SR-IOV usacase
- * @pf: pointer to PF structure
- * @needed: number of irqs to get
- *
- * This returns the first MSI-X vector index in PF space that is used by this
- * VF. This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration_id: id of VF which will
- * use this irqs
- *
- * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
- * allocated from the end of global irq index. First bit in sriov_irq_bm means
- * last irq index etc. It simplifies extension of SRIOV vectors.
- * They will be always located from sriov_base_vector to the last irq
- * index. While increasing/decreasing sriov_base_vector can be moved.
- */
-static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
-{
- int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
- pf->sriov_irq_size, 0, needed, 0);
- /* conversion from number in bitmap to global irq index */
- int index = pf->sriov_irq_size - res - needed;
-
- if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
- return -ENOENT;
-
- bitmap_set(pf->sriov_irq_bm, res, needed);
- return index;
-}
-
-/**
- * ice_sriov_free_irqs - free irqs used by the VF
- * @pf: pointer to PF structure
- * @vf: pointer to VF structure
- */
-static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
-{
- /* Move back from first vector index to first index in bitmap */
- int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
-
- bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
- vf->first_vector_idx = 0;
-}
-
-/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -556,7 +441,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
return -ENOMEM;
@@ -599,7 +484,7 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
- retval = ice_eswitch_attach(pf, vf);
+ retval = ice_eswitch_attach_vf(pf, vf);
if (retval) {
dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
vf->vf_id, retval);
@@ -833,11 +718,6 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
pci_dev_get(vfdev);
- /* set default number of MSI-X */
- vf->num_msix = pf->vfs.num_msix_per;
- vf->num_vf_qs = pf->vfs.num_qps_per;
- ice_vc_set_default_allowlist(vf);
-
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
@@ -861,16 +741,10 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
- int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
- pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
- if (!pf->sriov_irq_bm)
- return -ENOMEM;
- pf->sriov_irq_size = total_vectors;
-
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -897,7 +771,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
goto err_unroll_sriov;
}
- ice_eswitch_reserve_cp_queues(pf, num_vfs);
ret = ice_start_vfs(pf);
if (ret) {
dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
@@ -924,7 +797,6 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
- bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -998,16 +870,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
-}
-
-static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
-{
- if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
- return -ENOMEM;
-
- pf->sriov_base_vector -= move;
- return 0;
+ return pf->virt_irq_tracker.num_entries;
}
static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
@@ -1026,7 +889,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
ice_dis_vf_mappings(tmp_vf);
- ice_sriov_free_irqs(pf, tmp_vf);
+ ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
+ tmp_vf->num_msix);
vf_ids[to_remap] = tmp_vf->vf_id;
to_remap += 1;
@@ -1038,7 +902,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
tmp_vf->first_vector_idx =
- ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ ice_virt_get_irqs(pf, tmp_vf->num_msix);
/* there is no need to rebuild VSI as we are only changing the
* vector indexes not amount of MSI-X or queues
*/
@@ -1069,7 +933,6 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
bool needs_rebuild = false;
struct ice_vsi *vsi;
struct ice_vf *vf;
- int id;
if (!ice_get_num_vfs(pf))
return -ENOENT;
@@ -1088,45 +951,41 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (msix_vec_count < ICE_MIN_INTR_PER_VF)
return -EINVAL;
- /* Transition of PCI VF function number to function_id */
- for (id = 0; id < pci_num_vf(pdev); id++) {
- if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
- break;
- }
-
- if (id == pci_num_vf(pdev))
- return -ENOENT;
-
- vf = ice_get_vf_by_id(pf, id);
-
+ vf = ice_get_vf_by_dev(pf, vf_dev);
if (!vf)
return -ENOENT;
vsi = ice_get_vf_vsi(vf);
- if (!vsi)
+ if (!vsi) {
+ ice_put_vf(vf);
return -ENOENT;
+ }
- prev_msix = vf->num_msix;
- prev_queues = vf->num_vf_qs;
-
- if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
+ /* No need to rebuild if we're setting to the same value */
+ if (msix_vec_count == vf->num_msix) {
ice_put_vf(vf);
- return -ENOSPC;
+ return 0;
}
+ prev_msix = vf->num_msix;
+ prev_queues = vf->num_vf_qs;
+
ice_dis_vf_mappings(vf);
- ice_sriov_free_irqs(pf, vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
/* Remap all VFs beside the one is now configured */
ice_sriov_remap_vectors(pf, vf->vf_id);
vf->num_msix = msix_vec_count;
vf->num_vf_qs = queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
goto unroll;
- if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
+ vsi->req_txq = queues;
+ vsi->req_rxq = queues;
+
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
/* Try to rebuild with previous values */
needs_rebuild = true;
goto unroll;
@@ -1148,13 +1007,18 @@ unroll:
vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
- if (vf->first_vector_idx < 0)
+
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
+ if (vf->first_vector_idx < 0) {
+ ice_put_vf(vf);
return -EINVAL;
+ }
if (needs_rebuild) {
- ice_vf_reconfig_vsi(vf);
- ice_vf_init_host_cfg(vf, vsi);
+ vsi->req_txq = prev_queues;
+ vsi->req_rxq = prev_queues;
+
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
}
ice_ena_vf_mappings(vf);
@@ -1297,10 +1161,12 @@ static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
{
+ struct ice_aqc_event_lan_overflow *cmd;
u32 gldcb_rtctq, queue;
struct ice_vf *vf;
- gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ cmd = libie_aq_raw(&event->desc);
+ gldcb_rtctq = le32_to_cpu(cmd->prtdcb_ruptq);
dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
/* event returns device global Rx queue number */
@@ -1324,8 +1190,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
*/
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vf_vsi;
struct device *dev;
struct ice_vf *vf;
@@ -1423,21 +1288,23 @@ out_put_vf:
}
/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
+ * __ice_set_vf_mac - program VF MAC address
+ * @pf: PF to be configure
* @vf_id: VF identifier
* @mac: MAC address
*
* program VF MAC address
+ * Return: zero on success or an error code on failure
*/
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac)
{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct device *dev;
struct ice_vf *vf;
int ret;
+ dev = ice_pf_to_dev(pf);
if (is_multicast_ether_addr(mac)) {
- netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ dev_err(dev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
}
@@ -1466,13 +1333,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
- netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
- vf->vf_id);
+ dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
} else {
/* PF will add MAC rule for the VF */
vf->pf_set_mac = true;
- netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
- mac, vf_id);
+ dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
}
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
@@ -1484,6 +1351,20 @@ out_put_vf:
}
/**
+ * ice_set_vf_mac - .ndo_set_vf_mac handler
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
+ * Return: zero on success or an error code on failure
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac);
+}
+
+/**
* ice_set_vf_trust
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -1518,6 +1399,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
mutex_lock(&vf->cfg_lock);
+ while (!trusted && vf->num_mac_lldp)
+ ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false);
+
vf->trusted = trusted;
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
@@ -1869,6 +1753,24 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
}
/**
+ * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_tx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dev_lan_addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
* ice_print_vfs_mdd_events - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -1876,8 +1778,6 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
*/
void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
struct ice_vf *vf;
unsigned int bkt;
@@ -1904,10 +1804,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
vf->mdd_tx_events.last_printed =
vf->mdd_tx_events.count;
-
- dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
- vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
- vf->dev_lan_addr);
+ ice_print_vf_tx_mdd_event(vf);
}
}
mutex_unlock(&pf->vfs.table_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 8488df38b586..6c4fad09a527 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,9 +3,9 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vf_lib.h"
-#include "ice_virtchnl.h"
+#include "virt/virtchnl.h"
/* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA
@@ -28,6 +28,7 @@
#ifdef CONFIG_PCI_IOV
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
@@ -49,7 +50,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
@@ -58,10 +59,12 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+void ice_print_vf_tx_mdd_event(struct ice_vf *vf);
bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count);
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id);
#else /* CONFIG_PCI_IOV */
static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { }
@@ -69,6 +72,7 @@ static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_print_vf_tx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { }
static inline int
@@ -79,6 +83,13 @@ ice_sriov_configure(struct pci_dev __always_unused *pdev,
}
static inline int
+__ice_set_vf_mac(struct ice_pf __always_unused *pf,
+ u16 __always_unused vf_id, const u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_set_vf_mac(struct net_device __always_unused *netdev,
int __always_unused vf_id, u8 __always_unused *mac)
{
@@ -130,11 +141,10 @@ ice_set_vf_bw(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline int
+static inline void
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
- return 0;
}
static inline int
@@ -155,5 +165,11 @@ ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
{
return -EOPNOTSUPP;
}
+
+static inline int ice_vf_vsi_dis_single_txq(struct ice_vf *vf,
+ struct ice_vsi *vsi, u16 q_id)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index b4ea935e8300..84848f0123e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3,6 +3,7 @@
#include "ice_lib.h"
#include "ice_switch.h"
+#include "ice_trace.h"
#define ICE_ETH_DA_OFFSET 0
#define ICE_ETH_ETHTYPE_OFFSET 12
@@ -42,6 +43,7 @@ enum {
ICE_PKT_KMALLOC = BIT(9),
ICE_PKT_PPPOE = BIT(10),
ICE_PKT_L2TPV3 = BIT(11),
+ ICE_PKT_PFCP = BIT(12),
};
struct ice_dummy_pkt_offsets {
@@ -1110,6 +1112,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00,
};
+ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_PFCP, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */
+ 0x00, 0x18, 0x00, 0x00,
+
+ 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_PFCP, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x18, 0x00, 0x00,
+
+ 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -1343,6 +1416,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP),
ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
ICE_PKT_INNER_UDP),
ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
@@ -1397,7 +1472,6 @@ int ice_init_def_sw_recp(struct ice_hw *hw)
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
INIT_LIST_HEAD(&recps[i].filt_replay_rules);
- INIT_LIST_HEAD(&recps[i].rg_list);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -1437,11 +1511,11 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
- cmd = &desc.params.get_sw_conf;
+ cmd = libie_aq_raw(&desc);
cmd->element = cpu_to_le16(*req_desc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -1467,11 +1541,11 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- res = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ res = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
@@ -1482,7 +1556,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cd);
@@ -1511,11 +1585,11 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- resp = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ resp = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
@@ -1546,17 +1620,17 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- resp = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ resp = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cd);
@@ -1825,7 +1899,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT) {
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
if (opc == ice_aqc_opc_alloc_res)
@@ -1869,7 +1944,8 @@ int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_sw_rules *cmd;
+ struct libie_aq_desc desc;
int status;
if (opc != ice_aqc_opc_add_sw_rules &&
@@ -1878,15 +1954,24 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
+ cmd = libie_aq_raw(&desc);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- desc.params.sw_rules.num_rules_fltr_entry_index =
- cpu_to_le16(num_rules);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+ cmd->num_rules_fltr_entry_index = cpu_to_le16(num_rules);
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
- hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT)
status = -ENOENT;
+ if (!status) {
+ if (opc == ice_aqc_opc_add_sw_rules)
+ hw->switch_info->rule_cnt += num_rules;
+ else if (opc == ice_aqc_opc_remove_sw_rules)
+ hw->switch_info->rule_cnt -= num_rules;
+ }
+
+ trace_ice_aq_sw_rules(hw->switch_info);
+
return status;
}
@@ -1905,14 +1990,14 @@ ice_aq_add_recipe(struct ice_hw *hw,
u16 num_recipes, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
- cmd = &desc.params.add_get_recipe;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
cmd->num_sub_recipes = cpu_to_le16(num_recipes);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
buf_size = num_recipes * sizeof(*s_recipe_list);
@@ -1942,14 +2027,14 @@ ice_aq_get_recipe(struct ice_hw *hw,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
int status;
if (*num_recipes != ICE_MAX_NUM_RECIPES)
return -EINVAL;
- cmd = &desc.params.add_get_recipe;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
cmd->return_index = cpu_to_le16(recipe_root);
@@ -2034,9 +2119,9 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.recipe_to_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
cmd->profile_id = cpu_to_le16(profile_id);
/* Set the recipe ID bit in the bitmask to let the device know which
@@ -2060,10 +2145,10 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.recipe_to_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
cmd->profile_id = cpu_to_le16(profile_id);
@@ -2075,6 +2160,18 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
}
/**
+ * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported
+ * @hw: pointer to the hardware structure
+ */
+void ice_init_chk_recipe_reuse_support(struct ice_hw *hw)
+{
+ struct ice_nvm_info *nvm = &hw->flash.nvm;
+
+ hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) ||
+ nvm->major > 0x4;
+}
+
+/**
* ice_alloc_recipe - add recipe resource
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
@@ -2083,21 +2180,97 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
+ u16 res_type;
int status;
sw_buf->num_elems = cpu_to_le16(1);
- sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
- ICE_AQC_RES_TYPE_S) |
- ICE_AQC_RES_TYPE_FLAG_SHARED);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE);
+ if (hw->recp_reuse)
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED;
+ else
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
+ sw_buf->res_type = cpu_to_le16(res_type);
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
ice_aqc_opc_alloc_res);
- if (!status)
+ if (!status) {
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
+ hw->switch_info->recp_cnt++;
+ }
+
+ return status;
+}
+
+/**
+ * ice_free_recipe_res - free recipe resource
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to free
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_free_recipe_res(struct ice_hw *hw, u16 rid)
+{
+ int status;
+
+ status = ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
+ if (!status)
+ hw->switch_info->recp_cnt--;
return status;
}
/**
+ * ice_release_recipe_res - disassociate and free recipe resource
+ * @hw: pointer to the hardware structure
+ * @recp: the recipe struct resource to unassociate and free
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_release_recipe_res(struct ice_hw *hw,
+ struct ice_sw_recipe *recp)
+{
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ struct ice_switch_info *sw = hw->switch_info;
+ u64 recp_assoc;
+ u32 rid, prof;
+ int status;
+
+ for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) {
+ for_each_set_bit(prof, recipe_to_profile[rid],
+ ICE_MAX_NUM_PROFILES) {
+ status = ice_aq_get_recipe_to_profile(hw, prof,
+ &recp_assoc,
+ NULL);
+ if (status)
+ return status;
+
+ bitmap_from_arr64(r_bitmap, &recp_assoc,
+ ICE_MAX_NUM_RECIPES);
+ bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ bitmap_to_arr64(&recp_assoc, r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ ice_aq_map_recipe_to_profile(hw, prof,
+ recp_assoc, NULL);
+
+ clear_bit(rid, profile_to_recipe[prof]);
+ clear_bit(prof, recipe_to_profile[rid]);
+ }
+
+ status = ice_free_recipe_res(hw, rid);
+ if (status)
+ return status;
+
+ sw->recp_list[rid].recp_created = false;
+ sw->recp_list[rid].adv_rule = false;
+ memset(&sw->recp_list[rid].lkup_exts, 0,
+ sizeof(sw->recp_list[rid].lkup_exts));
+ clear_bit(rid, recp->r_bitmap);
+ }
+
+ return 0;
+}
+
+/**
* ice_get_recp_to_prof_map - updates recipe to profile mapping
* @hw: pointer to hardware structure
*
@@ -2127,25 +2300,12 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
}
/**
- * ice_collect_result_idx - copy result index values
- * @buf: buffer that contains the result index
- * @recp: the recipe struct to copy data into
- */
-static void
-ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
- struct ice_sw_recipe *recp)
-{
- if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
- set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
- recp->res_idxs);
-}
-
-/**
* ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
* @hw: pointer to hardware structure
* @recps: struct that we need to populate
* @rid: recipe ID that we are populating
* @refresh_required: true if we should get recipe to profile mapping from FW
+ * @is_add: flag of adding recipe
*
* This function is used to populate all the necessary entries into our
* bookkeeping so that we have a current list of all the recipes that are
@@ -2153,7 +2313,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
*/
static int
ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
- bool *refresh_required)
+ bool *refresh_required, bool is_add)
{
DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
struct ice_aqc_recipe_data_elem *tmp;
@@ -2197,18 +2357,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
- struct ice_recp_grp_entry *rg_entry;
u8 i, prof, idx, prot = 0;
bool is_root;
u16 off = 0;
- rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
- GFP_KERNEL);
- if (!rg_entry) {
- status = -ENOMEM;
- goto err_unroll;
- }
-
idx = root_bufs.recipe_indx;
is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
@@ -2221,11 +2373,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
prof = find_first_bit(recipe_to_profile[idx],
ICE_MAX_NUM_PROFILES);
for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
- u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
-
- rg_entry->fv_idx[i] = lkup_indx;
- rg_entry->fv_mask[i] =
- le16_to_cpu(root_bufs.content.mask[i + 1]);
+ u8 lkup_indx = root_bufs.content.lkup_indx[i];
+ u16 lkup_mask = le16_to_cpu(root_bufs.content.mask[i]);
/* If the recipe is a chained recipe then all its
* child recipe's result will have a result index.
@@ -2236,42 +2385,38 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
* has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
* valid offset value.
*/
- if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
- rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
- rg_entry->fv_idx[i] == 0)
+ if (!lkup_indx ||
+ (lkup_indx & ICE_AQ_RECIPE_LKUP_IGNORE) ||
+ test_bit(lkup_indx,
+ hw->switch_info->prof_res_bm[prof]))
continue;
- ice_find_prot_off(hw, ICE_BLK_SW, prof,
- rg_entry->fv_idx[i], &prot, &off);
+ ice_find_prot_off(hw, ICE_BLK_SW, prof, lkup_indx,
+ &prot, &off);
lkup_exts->fv_words[fv_word_idx].prot_id = prot;
lkup_exts->fv_words[fv_word_idx].off = off;
- lkup_exts->field_mask[fv_word_idx] =
- rg_entry->fv_mask[i];
+ lkup_exts->field_mask[fv_word_idx] = lkup_mask;
fv_word_idx++;
}
- /* populate rg_list with the data from the child entry of this
- * recipe
- */
- list_add(&rg_entry->l_entry, &recps[rid].rg_list);
/* Propagate some data to the recipe database */
- recps[idx].is_root = !!is_root;
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
- recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
- ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
- recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
- ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
+ recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_NEED_PASS_L2);
+ recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2);
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
- recps[idx].chain_idx = root_bufs.content.result_indx &
- ~ICE_AQ_RECIPE_RESULT_EN;
- set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
- } else {
- recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
+ set_bit(root_bufs.content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN, recps[idx].res_idxs);
}
- if (!is_root)
+ if (!is_root) {
+ if (hw->recp_reuse && is_add)
+ recps[idx].recp_created = true;
+
continue;
+ }
/* Only do the following for root recipes entries */
memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
@@ -2283,19 +2428,11 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Complete initialization of the root recipe entry */
lkup_exts->n_val_words = fv_word_idx;
- recps[rid].big_recp = (num_recps > 1);
- recps[rid].n_grp_count = (u8)num_recps;
- recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
- recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
- GFP_KERNEL);
- if (!recps[rid].root_buf) {
- status = -ENOMEM;
- goto err_unroll;
- }
/* Copy result indexes */
bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
- recps[rid].recp_created = true;
+ if (is_add)
+ recps[rid].recp_created = true;
err_unroll:
kfree(tmp);
@@ -2446,6 +2583,9 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->lan_en = true;
}
}
+
+ if (fi->flag & ICE_FLTR_TX_ONLY)
+ fi->lan_en = false;
}
/**
@@ -2759,7 +2899,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT)
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
@@ -3006,7 +3147,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
@@ -3054,7 +3195,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
@@ -3124,7 +3265,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
list_head = &sw->recp_list[recp_id].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
- if (list_itr->vsi_list_info) {
+ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
map_info = list_itr->vsi_list_info;
if (test_bit(vsi_handle, map_info->vsi_map)) {
*vsi_list_id = map_info->vsi_list_id;
@@ -3821,6 +3962,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
+ f_info.flag |= ICE_FLTR_TX_ONLY;
}
f_list_entry.fltr_info = f_info;
@@ -4528,6 +4670,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
+ ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
@@ -4561,6 +4704,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_PFCP, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
{ ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
@@ -4573,12 +4717,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
* @rinfo: information regarding the rule e.g. priority and action info
+ * @is_add: flag of adding recipe
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
static u16
ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
- const struct ice_adv_rule_info *rinfo)
+ const struct ice_adv_rule_info *rinfo, bool is_add)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -4592,16 +4737,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
* entry update it in our SW bookkeeping and continue with the
* matching.
*/
- if (!recp[i].recp_created)
+ if (hw->recp_reuse) {
if (ice_get_recp_frm_fw(hw,
hw->switch_info->recp_list, i,
- &refresh_required))
+ &refresh_required, is_add))
continue;
-
- /* Skip inverse action recipes */
- if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
- ICE_AQ_RECIPE_ACT_INV_ACT)
- continue;
+ }
/* if number of words we are looking for match */
if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
@@ -4644,7 +4785,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
*/
if (found && recp[i].tun_type == rinfo->tun_type &&
recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
- recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2 &&
+ recp[i].priority == rinfo->priority)
return i; /* Return the recipe ID */
}
}
@@ -4727,110 +4869,55 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
}
/**
- * ice_create_first_fit_recp_def - Create a recipe grouping
- * @hw: pointer to the hardware structure
- * @lkup_exts: an array of protocol header extractions
- * @rg_list: pointer to a list that stores new recipe groups
- * @recp_cnt: pointer to a variable that stores returned number of recipe groups
- *
- * Using first fit algorithm, take all the words that are still not done
- * and start grouping them in 4-word groups. Each group makes up one
- * recipe.
- */
-static int
-ice_create_first_fit_recp_def(struct ice_hw *hw,
- struct ice_prot_lkup_ext *lkup_exts,
- struct list_head *rg_list,
- u8 *recp_cnt)
-{
- struct ice_pref_recipe_group *grp = NULL;
- u8 j;
-
- *recp_cnt = 0;
-
- /* Walk through every word in the rule to check if it is not done. If so
- * then this word needs to be part of a new recipe.
- */
- for (j = 0; j < lkup_exts->n_val_words; j++)
- if (!test_bit(j, lkup_exts->done)) {
- if (!grp ||
- grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
- struct ice_recp_grp_entry *entry;
-
- entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*entry),
- GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- list_add(&entry->l_entry, rg_list);
- grp = &entry->r_group;
- (*recp_cnt)++;
- }
-
- grp->pairs[grp->n_val_pairs].prot_id =
- lkup_exts->fv_words[j].prot_id;
- grp->pairs[grp->n_val_pairs].off =
- lkup_exts->fv_words[j].off;
- grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
- grp->n_val_pairs++;
- }
-
- return 0;
-}
-
-/**
* ice_fill_fv_word_index - fill in the field vector indices for a recipe group
* @hw: pointer to the hardware structure
- * @fv_list: field vector with the extraction sequence information
- * @rg_list: recipe groupings with protocol-offset pairs
+ * @rm: recipe management list entry
*
* Helper function to fill in the field vector indices for protocol-offset
* pairs. These indexes are then ultimately programmed into a recipe.
*/
static int
-ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
- struct list_head *rg_list)
+ice_fill_fv_word_index(struct ice_hw *hw, struct ice_sw_recipe *rm)
{
struct ice_sw_fv_list_entry *fv;
- struct ice_recp_grp_entry *rg;
struct ice_fv_word *fv_ext;
+ u8 i;
- if (list_empty(fv_list))
- return 0;
+ if (list_empty(&rm->fv_list))
+ return -EINVAL;
- fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
+ fv = list_first_entry(&rm->fv_list, struct ice_sw_fv_list_entry,
list_entry);
fv_ext = fv->fv_ptr->ew;
- list_for_each_entry(rg, rg_list, l_entry) {
- u8 i;
-
- for (i = 0; i < rg->r_group.n_val_pairs; i++) {
- struct ice_fv_word *pr;
- bool found = false;
- u16 mask;
- u8 j;
-
- pr = &rg->r_group.pairs[i];
- mask = rg->r_group.mask[i];
+ /* Add switch id as the first word. */
+ rm->fv_idx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ rm->fv_mask[0] = ICE_AQ_SW_ID_LKUP_MASK;
+ rm->n_ext_words++;
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv_ext[j].prot_id == pr->prot_id &&
- fv_ext[j].off == pr->off) {
- found = true;
+ for (i = 1; i < rm->n_ext_words; i++) {
+ struct ice_fv_word *fv_word = &rm->ext_words[i - 1];
+ u16 fv_mask = rm->word_masks[i - 1];
+ bool found = false;
+ u8 j;
- /* Store index of field vector */
- rg->fv_idx[i] = j;
- rg->fv_mask[i] = mask;
- break;
- }
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) {
+ if (fv_ext[j].prot_id == fv_word->prot_id &&
+ fv_ext[j].off == fv_word->off) {
+ found = true;
- /* Protocol/offset could not be found, caller gave an
- * invalid pair
- */
- if (!found)
- return -EINVAL;
+ /* Store index of field vector */
+ rm->fv_idx[i] = j;
+ rm->fv_mask[i] = fv_mask;
+ break;
+ }
}
+
+ /* Protocol/offset could not be found, caller gave an invalid
+ * pair.
+ */
+ if (!found)
+ return -EINVAL;
}
return 0;
@@ -4904,335 +4991,223 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
}
/**
- * ice_add_sw_recipe - function to call AQ calls to create switch recipe
- * @hw: pointer to hardware structure
- * @rm: recipe management list entry
- * @profiles: bitmap of profiles that will be associated.
+ * ice_calc_recp_cnt - calculate number of recipes based on word count
+ * @word_cnt: number of lookup words
+ *
+ * Word count should include switch ID word and regular lookup words.
+ * Returns: number of recipes required to fit @word_cnt, including extra recipes
+ * needed for recipe chaining (if needed).
*/
-static int
-ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
- unsigned long *profiles)
+static int ice_calc_recp_cnt(u8 word_cnt)
{
- DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
- struct ice_aqc_recipe_content *content;
- struct ice_aqc_recipe_data_elem *tmp;
- struct ice_aqc_recipe_data_elem *buf;
- struct ice_recp_grp_entry *entry;
- u16 free_res_idx;
- u16 recipe_count;
- u8 chain_idx;
- u8 recps = 0;
- int status;
+ /* All words fit in a single recipe, no need for chaining. */
+ if (word_cnt <= ICE_NUM_WORDS_RECIPE)
+ return 1;
- /* When more than one recipe are required, another recipe is needed to
- * chain them together. Matching a tunnel metadata ID takes up one of
- * the match fields in the chaining recipe reducing the number of
- * chained recipes by one.
+ /* Recipe chaining required. Result indexes are fitted right after
+ * regular lookup words. In some cases a new recipe must be added in
+ * order to fit result indexes.
+ *
+ * While the word count increases, every 5 words an extra recipe needs
+ * to be added. However, by adding a recipe, one word for its result
+ * index must also be added, therefore every 4 words recipe count
+ * increases by 1. This calculation does not apply to word count == 1,
+ * which is handled above.
*/
- /* check number of free result indices */
- bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
- free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
+ return (word_cnt + 2) / (ICE_NUM_WORDS_RECIPE - 1);
+}
- ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
- free_res_idx, rm->n_grp_count);
+static void fill_recipe_template(struct ice_aqc_recipe_data_elem *recp, u16 rid,
+ const struct ice_sw_recipe *rm)
+{
+ int i;
- if (rm->n_grp_count > 1) {
- if (rm->n_grp_count > free_res_idx)
- return -ENOSPC;
+ recp->recipe_indx = rid;
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_PRUNE_INDX_M;
- rm->n_grp_count++;
+ for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
+ recp->content.lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
+ recp->content.mask[i] = cpu_to_le16(0);
}
- if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
- return -ENOSPC;
+ set_bit(rid, (unsigned long *)recp->recipe_bitmap);
+ recp->content.act_ctrl_fwd_priority = rm->priority;
- tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
+ if (rm->need_pass_l2)
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
- buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
- GFP_KERNEL);
- if (!buf) {
- status = -ENOMEM;
- goto err_mem;
- }
-
- bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
- recipe_count = ICE_MAX_NUM_RECIPES;
- status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
- NULL);
- if (status || recipe_count == 0)
- goto err_unroll;
+ if (rm->allow_pass_l2)
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
+}
- /* Allocate the recipe resources, and configure them according to the
- * match fields from protocol headers and extracted field vectors.
- */
- chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- u8 i;
+static void bookkeep_recipe(struct ice_sw_recipe *recipe,
+ struct ice_aqc_recipe_data_elem *r,
+ const struct ice_sw_recipe *rm)
+{
+ memcpy(recipe->r_bitmap, r->recipe_bitmap, sizeof(recipe->r_bitmap));
- status = ice_alloc_recipe(hw, &entry->rid);
- if (status)
- goto err_unroll;
+ recipe->priority = r->content.act_ctrl_fwd_priority;
+ recipe->tun_type = rm->tun_type;
+ recipe->need_pass_l2 = rm->need_pass_l2;
+ recipe->allow_pass_l2 = rm->allow_pass_l2;
+ recipe->recp_created = true;
+}
- content = &buf[recps].content;
+/* For memcpy in ice_add_sw_recipe. */
+static_assert(sizeof_field(struct ice_aqc_recipe_data_elem, recipe_bitmap) ==
+ sizeof_field(struct ice_sw_recipe, r_bitmap));
- /* Clear the result index of the located recipe, as this will be
- * updated, if needed, later in the recipe creation process.
- */
- tmp[0].content.result_indx = 0;
+/**
+ * ice_add_sw_recipe - function to call AQ calls to create switch recipe
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @profiles: bitmap of profiles that will be associated.
+ */
+static int
+ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ unsigned long *profiles)
+{
+ struct ice_aqc_recipe_data_elem *buf __free(kfree) = NULL;
+ DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *root;
+ struct ice_sw_recipe *recipe;
+ u16 free_res_idx, rid;
+ int lookup = 0;
+ int recp_cnt;
+ int status;
+ int word;
+ int i;
- buf[recps] = tmp[0];
- buf[recps].recipe_indx = (u8)entry->rid;
- /* if the recipe is a non-root recipe RID should be programmed
- * as 0 for the rules to be applied correctly.
- */
- content->rid = 0;
- memset(&content->lkup_indx, 0,
- sizeof(content->lkup_indx));
-
- /* All recipes use look-up index 0 to match switch ID. */
- content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
- /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
- * to be 0
- */
- for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- content->lkup_indx[i] = 0x80;
- content->mask[i] = 0;
- }
+ recp_cnt = ice_calc_recp_cnt(rm->n_ext_words);
- for (i = 0; i < entry->r_group.n_val_pairs; i++) {
- content->lkup_indx[i + 1] = entry->fv_idx[i];
- content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
- }
+ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
+ bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
- if (rm->n_grp_count > 1) {
- /* Checks to see if there really is a valid result index
- * that can be used.
- */
- if (chain_idx >= ICE_MAX_FV_WORDS) {
- ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
- status = -ENOSPC;
- goto err_unroll;
- }
+ /* Check number of free result indices */
+ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
- entry->chain_idx = chain_idx;
- content->result_indx =
- ICE_AQ_RECIPE_RESULT_EN |
- FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
- chain_idx);
- clear_bit(chain_idx, result_idx_bm);
- chain_idx = find_first_bit(result_idx_bm,
- ICE_MAX_FV_WORDS);
- }
+ ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
+ free_res_idx, recp_cnt);
- /* fill recipe dependencies */
- bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
- ICE_MAX_NUM_RECIPES);
- set_bit(buf[recps].recipe_indx,
- (unsigned long *)buf[recps].recipe_bitmap);
- content->act_ctrl_fwd_priority = rm->priority;
+ /* Last recipe doesn't need result index */
+ if (recp_cnt - 1 > free_res_idx)
+ return -ENOSPC;
- if (rm->need_pass_l2)
- content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+ if (recp_cnt > ICE_MAX_CHAIN_RECIPE_RES)
+ return -E2BIG;
- if (rm->allow_pass_l2)
- content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
- recps++;
- }
+ buf = kcalloc(recp_cnt, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (rm->n_grp_count == 1) {
- rm->root_rid = buf[0].recipe_indx;
- set_bit(buf[0].recipe_indx, rm->r_bitmap);
- buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
- if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
- memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
- sizeof(buf[0].recipe_bitmap));
- } else {
- status = -EINVAL;
- goto err_unroll;
- }
- /* Applicable only for ROOT_RECIPE, set the fwd_priority for
- * the recipe which is getting created if specified
- * by user. Usually any advanced switch filter, which results
- * into new extraction sequence, ended up creating a new recipe
- * of type ROOT and usually recipes are associated with profiles
- * Switch rule referreing newly created recipe, needs to have
- * either/or 'fwd' or 'join' priority, otherwise switch rule
- * evaluation will not happen correctly. In other words, if
- * switch rule to be evaluated on priority basis, then recipe
- * needs to have priority, otherwise it will be evaluated last.
- */
- buf[0].content.act_ctrl_fwd_priority = rm->priority;
- } else {
- struct ice_recp_grp_entry *last_chain_entry;
- u16 rid, i;
+ /* Setup the non-root subrecipes. These do not contain lookups for other
+ * subrecipes results. Set associated recipe only to own recipe index.
+ * Each non-root subrecipe needs a free result index from FV.
+ *
+ * Note: only done if there is more than one recipe.
+ */
+ for (i = 0; i < recp_cnt - 1; i++) {
+ struct ice_aqc_recipe_content *content;
+ u8 result_idx;
- /* Allocate the last recipe that will chain the outcomes of the
- * other recipes together
- */
status = ice_alloc_recipe(hw, &rid);
if (status)
- goto err_unroll;
+ return status;
- content = &buf[recps].content;
+ fill_recipe_template(&buf[i], rid, rm);
- buf[recps].recipe_indx = (u8)rid;
- content->rid = (u8)rid;
- content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
- /* the new entry created should also be part of rg_list to
- * make sure we have complete recipe
+ result_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ /* Check if there really is a valid result index that can be
+ * used.
*/
- last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*last_chain_entry),
- GFP_KERNEL);
- if (!last_chain_entry) {
- status = -ENOMEM;
- goto err_unroll;
- }
- last_chain_entry->rid = rid;
- memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
- /* All recipes use look-up index 0 to match switch ID. */
- content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
- for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
- content->mask[i] = 0;
+ if (result_idx >= ICE_MAX_FV_WORDS) {
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
+ return -ENOSPC;
}
+ clear_bit(result_idx, result_idx_bm);
+
+ content = &buf[i].content;
+ content->result_indx = ICE_AQ_RECIPE_RESULT_EN |
+ FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
+ result_idx);
- i = 1;
- /* update r_bitmap with the recp that is used for chaining */
+ /* Set recipe association to be used for root recipe */
set_bit(rid, rm->r_bitmap);
- /* this is the recipe that chains all the other recipes so it
- * should not have a chaining ID to indicate the same
- */
- last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- last_chain_entry->fv_idx[i] = entry->chain_idx;
- content->lkup_indx[i] = entry->chain_idx;
- content->mask[i++] = cpu_to_le16(0xFFFF);
- set_bit(entry->rid, rm->r_bitmap);
- }
- list_add(&last_chain_entry->l_entry, &rm->rg_list);
- if (sizeof(buf[recps].recipe_bitmap) >=
- sizeof(rm->r_bitmap)) {
- memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
- sizeof(buf[recps].recipe_bitmap));
- } else {
- status = -EINVAL;
- goto err_unroll;
+
+ word = 0;
+ while (lookup < rm->n_ext_words &&
+ word < ICE_NUM_WORDS_RECIPE) {
+ content->lkup_indx[word] = rm->fv_idx[lookup];
+ content->mask[word] = cpu_to_le16(rm->fv_mask[lookup]);
+
+ lookup++;
+ word++;
}
- content->act_ctrl_fwd_priority = rm->priority;
- recps++;
- rm->root_rid = (u8)rid;
+ recipe = &hw->switch_info->recp_list[rid];
+ set_bit(result_idx, recipe->res_idxs);
+ bookkeep_recipe(recipe, &buf[i], rm);
}
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- goto err_unroll;
- status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
- ice_release_change_lock(hw);
+ /* Setup the root recipe */
+ status = ice_alloc_recipe(hw, &rid);
if (status)
- goto err_unroll;
-
- /* Every recipe that just got created add it to the recipe
- * book keeping list
- */
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- struct ice_switch_info *sw = hw->switch_info;
- bool is_root, idx_found = false;
- struct ice_sw_recipe *recp;
- u16 idx, buf_idx = 0;
-
- /* find buffer index for copying some data */
- for (idx = 0; idx < rm->n_grp_count; idx++)
- if (buf[idx].recipe_indx == entry->rid) {
- buf_idx = idx;
- idx_found = true;
- }
+ return status;
- if (!idx_found) {
- status = -EIO;
- goto err_unroll;
- }
+ recipe = &hw->switch_info->recp_list[rid];
+ root = &buf[recp_cnt - 1];
+ fill_recipe_template(root, rid, rm);
- recp = &sw->recp_list[entry->rid];
- is_root = (rm->root_rid == entry->rid);
- recp->is_root = is_root;
+ /* Set recipe association, use previously set bitmap and own rid */
+ set_bit(rid, rm->r_bitmap);
+ memcpy(root->recipe_bitmap, rm->r_bitmap, sizeof(root->recipe_bitmap));
- recp->root_rid = entry->rid;
- recp->big_recp = (is_root && rm->n_grp_count > 1);
+ /* For non-root recipes rid should be 0, for root it should be correct
+ * rid value ored with 0x80 (is root bit).
+ */
+ root->content.rid = rid | ICE_AQ_RECIPE_ID_IS_ROOT;
- memcpy(&recp->ext_words, entry->r_group.pairs,
- entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
+ /* Fill remaining lookups in root recipe */
+ word = 0;
+ while (lookup < rm->n_ext_words &&
+ word < ICE_NUM_WORDS_RECIPE /* should always be true */) {
+ root->content.lkup_indx[word] = rm->fv_idx[lookup];
+ root->content.mask[word] = cpu_to_le16(rm->fv_mask[lookup]);
- memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
- sizeof(recp->r_bitmap));
+ lookup++;
+ word++;
+ }
- /* Copy non-result fv index values and masks to recipe. This
- * call will also update the result recipe bitmask.
+ /* Fill result indexes as lookups */
+ i = 0;
+ while (i < recp_cnt - 1 &&
+ word < ICE_NUM_WORDS_RECIPE /* should always be true */) {
+ root->content.lkup_indx[word] = buf[i].content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN;
+ root->content.mask[word] = cpu_to_le16(0xffff);
+ /* For bookkeeping, it is needed to mark FV index as used for
+ * intermediate result.
*/
- ice_collect_result_idx(&buf[buf_idx], recp);
+ set_bit(root->content.lkup_indx[word], recipe->res_idxs);
- /* for non-root recipes, also copy to the root, this allows
- * easier matching of a complete chained recipe
- */
- if (!is_root)
- ice_collect_result_idx(&buf[buf_idx],
- &sw->recp_list[rm->root_rid]);
-
- recp->n_ext_words = entry->r_group.n_val_pairs;
- recp->chain_idx = entry->chain_idx;
- recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
- recp->n_grp_count = rm->n_grp_count;
- recp->tun_type = rm->tun_type;
- recp->need_pass_l2 = rm->need_pass_l2;
- recp->allow_pass_l2 = rm->allow_pass_l2;
- recp->recp_created = true;
+ i++;
+ word++;
}
- rm->root_buf = buf;
- kfree(tmp);
- return status;
-err_unroll:
-err_mem:
- kfree(tmp);
- devm_kfree(ice_hw_to_dev(hw), buf);
- return status;
-}
+ rm->root_rid = rid;
+ bookkeep_recipe(&hw->switch_info->recp_list[rid], root, rm);
-/**
- * ice_create_recipe_group - creates recipe group
- * @hw: pointer to hardware structure
- * @rm: recipe management list entry
- * @lkup_exts: lookup elements
- */
-static int
-ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
- struct ice_prot_lkup_ext *lkup_exts)
-{
- u8 recp_count = 0;
- int status;
-
- rm->n_grp_count = 0;
+ /* Program the recipe */
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
- /* Create recipes for words that are marked not done by packing them
- * as best fit.
- */
- status = ice_create_first_fit_recp_def(hw, lkup_exts,
- &rm->rg_list, &recp_count);
- if (!status) {
- rm->n_grp_count += recp_count;
- rm->n_ext_words = lkup_exts->n_val_words;
- memcpy(&rm->ext_words, lkup_exts->fv_words,
- sizeof(rm->ext_words));
- memcpy(rm->word_masks, lkup_exts->field_mask,
- sizeof(rm->word_masks));
- }
+ status = ice_aq_add_recipe(hw, buf, recp_cnt, NULL);
+ ice_release_change_lock(hw);
+ if (status)
+ return status;
- return status;
+ return 0;
}
/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
@@ -5268,6 +5243,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_GTPC:
prof_type = ICE_PROF_TUN_GTPC;
break;
+ case ICE_SW_TUN_PFCP:
+ prof_type = ICE_PROF_TUN_PFCP;
+ break;
case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
@@ -5278,6 +5256,49 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
}
/**
+ * ice_subscribe_recipe - subscribe to an existing recipe
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to subscribe to
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
+ u16 res_type;
+ int status;
+
+ /* Prepare buffer to allocate resource */
+ sw_buf->num_elems = cpu_to_le16(1);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) |
+ ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED |
+ ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL;
+ sw_buf->res_type = cpu_to_le16(res_type);
+
+ sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid);
+
+ status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res);
+
+ return status;
+}
+
+/**
+ * ice_subscribable_recp_shared - share an existing subscribable recipe
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to subscribe to
+ */
+static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid)
+{
+ struct ice_sw_recipe *recps = hw->switch_info->recp_list;
+ u16 sub_rid;
+
+ for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES)
+ ice_subscribe_recipe(hw, sub_rid);
+}
+
+/**
* ice_add_adv_recipe - Add an advanced recipe that is not part of the default
* @hw: pointer to hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5293,12 +5314,11 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
struct ice_prot_lkup_ext *lkup_exts;
- struct ice_recp_grp_entry *r_entry;
struct ice_sw_fv_list_entry *fvit;
- struct ice_recp_grp_entry *r_tmp;
struct ice_sw_fv_list_entry *tmp;
struct ice_sw_recipe *rm;
int status = 0;
+ u16 rid_tmp;
u8 i;
if (!lkups_cnt)
@@ -5336,7 +5356,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* headers being programmed.
*/
INIT_LIST_HEAD(&rm->fv_list);
- INIT_LIST_HEAD(&rm->rg_list);
/* Get bitmap of field vectors (profiles) that are compatible with the
* rule request; only these will be searched in the subsequent call to
@@ -5348,12 +5367,10 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_unroll;
- /* Group match words into recipes using preferred recipe grouping
- * criteria.
- */
- status = ice_create_recipe_group(hw, rm, lkup_exts);
- if (status)
- goto err_unroll;
+ /* Copy FV words and masks from lkup_exts to recipe struct. */
+ rm->n_ext_words = lkup_exts->n_val_words;
+ memcpy(rm->ext_words, lkup_exts->fv_words, sizeof(rm->ext_words));
+ memcpy(rm->word_masks, lkup_exts->field_mask, sizeof(rm->word_masks));
/* set the recipe priority if specified */
rm->priority = (u8)rinfo->priority;
@@ -5364,7 +5381,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Find offsets from the field vector. Pick the first one for all the
* recipes.
*/
- status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
+ status = ice_fill_fv_word_index(hw, rm);
if (status)
goto err_unroll;
@@ -5376,10 +5393,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts, rinfo);
- if (*rid < ICE_MAX_NUM_RECIPES)
+ *rid = ice_find_recp(hw, lkup_exts, rinfo, true);
+ if (*rid < ICE_MAX_NUM_RECIPES) {
/* Success if found a recipe that match the existing criteria */
+ if (hw->recp_reuse)
+ ice_subscribable_recp_shared(hw, *rid);
+
goto err_unroll;
+ }
rm->tun_type = rinfo->tun_type;
/* Recipe we need does not exist, add a recipe */
@@ -5398,14 +5419,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
&recp_assoc, NULL);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
ICE_MAX_NUM_RECIPES);
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
@@ -5413,7 +5434,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ice_release_change_lock(hw);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
/* Update profile to recipe bitmap array */
bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
@@ -5427,18 +5448,22 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*rid = rm->root_rid;
memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
sizeof(*lkup_exts));
-err_unroll:
- list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
- list_del(&r_entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), r_entry);
+ goto err_unroll;
+
+err_free_recipe:
+ if (hw->recp_reuse) {
+ for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) {
+ if (!ice_free_recipe_res(hw, rid_tmp))
+ clear_bit(rid_tmp, rm->r_bitmap);
+ }
}
+err_unroll:
list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
list_del(&fvit->list_entry);
devm_kfree(ice_hw_to_dev(hw), fvit);
}
- devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
kfree(rm);
err_free_lkup_exts:
@@ -5552,6 +5577,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_SW_TUN_VXLAN:
match |= ICE_PKT_TUN_UDP;
break;
+ case ICE_SW_TUN_PFCP:
+ match |= ICE_PKT_PFCP;
+ break;
default:
break;
}
@@ -5692,6 +5720,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GTP:
len = sizeof(struct ice_udp_gtp_hdr);
break;
+ case ICE_PFCP:
+ len = sizeof(struct ice_pfcp_hdr);
+ break;
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;
@@ -5948,7 +5979,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
@@ -6293,8 +6324,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
if (!itr->vsi_list_info ||
!test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
continue;
- /* Clearing it so that the logic can add it back */
- clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
f_entry.fltr_info.vsi_handle = vsi_handle;
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
/* update the src in case it is VSI num */
@@ -6440,7 +6469,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO;
}
- rid = ice_find_recp(hw, &lkup_exts, rinfo);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo, false);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return -EINVAL;
@@ -6484,14 +6513,21 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ice_aqc_opc_remove_sw_rules, NULL);
if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info;
+ struct ice_sw_recipe *r_list = sw->recp_list;
mutex_lock(rule_lock);
list_del(&list_elem->list_entry);
devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
devm_kfree(ice_hw_to_dev(hw), list_elem);
mutex_unlock(rule_lock);
- if (list_empty(&sw->recp_list[rid].filt_rules))
- sw->recp_list[rid].adv_rule = false;
+ if (list_empty(&r_list[rid].filt_rules)) {
+ r_list[rid].adv_rule = false;
+
+ /* All rules for this recipe are now removed */
+ if (hw->recp_reuse)
+ ice_release_recipe_res(hw,
+ &r_list[rid]);
+ }
}
kfree(s_rule);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 89ffa1b51b5a..671d7a5f359f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -8,8 +8,9 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
-#define ICE_FLTR_RX BIT(0)
-#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_TX_ONLY BIT(2)
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
@@ -21,6 +22,8 @@
#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
+#define ICE_PROFID_IPV4_PFCP_NODE 79
+#define ICE_PROFID_IPV6_PFCP_SESSION 82
#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
@@ -213,7 +216,6 @@ struct ice_sw_recipe {
/* For a chained recipe the root recipe is what should be used for
* programming rules
*/
- u8 is_root;
u8 root_rid;
u8 recp_created;
@@ -224,19 +226,8 @@ struct ice_sw_recipe {
*/
struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
u16 word_masks[ICE_MAX_CHAIN_WORDS];
-
- /* if this recipe is a collection of other recipe */
- u8 big_recp;
-
- /* if this recipe is part of another bigger recipe then chain index
- * corresponding to this recipe
- */
- u8 chain_idx;
-
- /* if this recipe is a collection of other recipe then count of other
- * recipes and recipe IDs of those recipes
- */
- u8 n_grp_count;
+ u8 fv_idx[ICE_MAX_CHAIN_WORDS];
+ u16 fv_mask[ICE_MAX_CHAIN_WORDS];
/* Bit map specifying the IDs associated with this group of recipe */
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
@@ -269,10 +260,6 @@ struct ice_sw_recipe {
u8 need_pass_l2:1;
u8 allow_pass_l2:1;
- struct list_head rg_list;
-
- /* AQ buffer associated with this recipe */
- struct ice_aqc_recipe_data_elem *root_buf;
/* This struct saves the fv_words for a given lookup */
struct ice_prot_lkup_ext lkup_exts;
};
@@ -429,5 +416,6 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd);
+void ice_init_chk_recipe_reuse_support(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 688ccb0615ab..fb9ea7f8ef44 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -12,14 +12,11 @@
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
- * @headers: Pointer to TC flower filter header structure
* @fltr: Pointer to outer TC filter structure
*
- * Determine lookup count based on TC flower input for switch filter.
+ * Return: lookup count based on TC flower input for a switch filter.
*/
-static int
-ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
- struct ice_tc_flower_fltr *fltr)
+static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr)
{
int lkups_cnt = 1; /* 0th lookup is metadata */
@@ -37,7 +34,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
lkups_cnt++;
- if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+ if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS)
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS)
lkups_cnt++;
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
@@ -140,6 +140,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GTP;
case TNL_GTPC:
return ICE_GTP_NO_PAY;
+ case TNL_PFCP:
+ return ICE_PFCP;
default:
return 0;
}
@@ -159,6 +161,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GTPU;
case TNL_GTPC:
return ICE_SW_TUN_GTPC;
+ case TNL_PFCP:
+ return ICE_SW_TUN_PFCP;
default:
return ICE_NON_TUN;
}
@@ -221,8 +225,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
- (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+ if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) {
list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
if (fltr->gtp_pdu_info_masks.pdu_type) {
@@ -239,6 +242,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) {
+ struct ice_pfcp_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.pfcp_hdr;
+ hdr_m = &list[i].m_u.pfcp_hdr;
+ list[i].type = ICE_PFCP;
+
+ hdr_h->flags = fltr->pfcp_meta_keys.type;
+ hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01;
+
+ hdr_h->seid = fltr->pfcp_meta_keys.seid;
+ hdr_m->seid = fltr->pfcp_meta_masks.seid;
+
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@@ -374,8 +393,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
- headers = &tc_fltr->inner_headers;
- inner = true;
+ /* PFCP is considered non-tunneled - don't swap headers. */
+ if (tc_fltr->tunnel_type != TNL_PFCP) {
+ headers = &tc_fltr->inner_headers;
+ inner = true;
+ }
}
if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
@@ -629,6 +651,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
*/
if (netif_is_gtp(tunnel_dev))
return TNL_GTPU;
+ if (netif_is_pfcp(tunnel_dev))
+ return TNL_PFCP;
return TNL_LAST;
}
@@ -642,35 +666,41 @@ static bool ice_tc_is_dev_uplink(struct net_device *dev)
return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
}
-static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
- struct ice_tc_flower_fltr *fltr,
- struct net_device *target_dev)
+static int ice_tc_setup_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct net_device *target_dev,
+ enum ice_sw_fwd_act_type action)
{
struct ice_repr *repr;
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided");
+ return -EINVAL;
+ }
+
+ fltr->action.fltr_act = action;
if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
+ ice_tc_is_dev_uplink(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -683,48 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
{
fltr->action.fltr_act = ICE_DROP_PACKET;
- if (ice_is_port_repr_netdev(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
+ if (!ice_tc_is_dev_uplink(filter_dev) &&
+ !(ice_is_port_repr_netdev(filter_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ice_tc_setup_mirror_action(struct net_device *filter_dev,
- struct ice_tc_flower_fltr *fltr,
- struct net_device *target_dev)
-{
- struct ice_repr *repr;
-
- fltr->action.fltr_act = ICE_MIRROR_PACKET;
-
- if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
- repr = ice_netdev_to_repr(target_dev);
-
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
- repr = ice_netdev_to_repr(filter_dev);
-
- fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
- repr = ice_netdev_to_repr(target_dev);
-
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -746,16 +739,19 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
break;
case FLOW_ACTION_REDIRECT:
- err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
+ err = ice_tc_setup_action(filter_dev, fltr,
+ act->dev, ICE_FWD_TO_VSI);
if (err)
return err;
break;
case FLOW_ACTION_MIRRED:
- err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev);
+ err = ice_tc_setup_action(filter_dev, fltr,
+ act->dev, ICE_MIRROR_PACKET);
if (err)
return err;
+
break;
default:
@@ -766,10 +762,157 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
return 0;
}
+static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP);
+}
+
+static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ if (!ice_is_switchdev_running(vsi->back))
+ return false;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET &&
+ ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+}
+
+static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == uplink;
+}
+
+static struct ice_tc_flower_fltr *
+ice_find_pf_tx_lldp_fltr(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ return fltr;
+
+ return NULL;
+}
+
+static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ ice_for_each_vf(pf, bkt, vf)
+ if (vf->lldp_tx_ena)
+ return true;
+
+ return false;
+}
+
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit)
+{
+ struct ice_rule_query_data remove_entry = {
+ .rid = vsi->vf->lldp_recipe_id,
+ .rule_id = vsi->vf->lldp_rule_id,
+ .vsi_handle = vsi->idx,
+ };
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (vsi->vf->lldp_tx_ena)
+ return 0;
+
+ if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back))
+ return -EINVAL;
+
+ if (!deinit && ice_any_vf_lldp_tx_ena(pf))
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry);
+ if (!err)
+ vsi->vf->lldp_tx_ena = true;
+
+ return err;
+}
+
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init)
+{
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_rule_info rinfo = {
+ .priority = 7,
+ .src_vsi = vsi->idx,
+ .sw_act = {
+ .src = vsi->idx,
+ .flag = ICE_FLTR_TX,
+ .fltr_act = ICE_DROP_PACKET,
+ .vsi_handle = vsi->idx,
+ },
+ .flags_info.act_valid = true,
+ };
+ struct ice_adv_lkup_elem list[3];
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (!init && !vsi->vf->lldp_tx_ena)
+ return 0;
+
+ memset(list, 0, sizeof(list));
+ ice_rule_add_direction_metadata(&list[0]);
+ ice_rule_add_src_vsi_metadata(&list[1]);
+ list[2].type = ICE_ETYPE_OL;
+ list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP);
+ list[2].m_u.ethertype.ethtype_id = htons(0xFFFF);
+
+ err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo,
+ &rule_added);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to add an LLDP rule to VSI 0x%X: %d\n",
+ vsi->idx, err);
+ } else {
+ vsi->vf->lldp_recipe_id = rule_added.rid;
+ vsi->vf->lldp_rule_id = rule_added.rule_id;
+ vsi->vf->lldp_tx_ena = false;
+ }
+
+ return err;
+}
+
+static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) {
+ if (!ice_is_fltr_vf_tx_lldp(fltr))
+ continue;
+ ice_pass_vf_tx_lldp(fltr->src_vsi, true);
+ break;
+ }
+}
+
+static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf)
+{
+ int i;
+
+ /* Make the VF LLDP fwd to uplink rule dormant */
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vf_vsi = pf->vsi[i];
+
+ if (vf_vsi && vf_vsi->type == ICE_VSI_VF)
+ ice_drop_vf_tx_lldp(vf_vsi, false);
+ }
+}
+
static int
ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data rule_added;
struct ice_hw *hw = &vsi->back->hw;
@@ -784,7 +927,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
return -EOPNOTSUPP;
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_pass_vf_tx_lldp(vsi, false);
+
+ lkups_cnt = ice_tc_count_lkups(flags, fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -813,11 +959,27 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) {
+ /* PF to Uplink */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ /* This is a specific case. The destination VSI index is
+ * overwritten by the source VSI index. This type of filter
+ * should allow the packet to go to the LAN, not to the
+ * VSI passed here. It should set LAN_EN bit only. However,
+ * the VSI must be a valid one. Setting source VSI index
+ * here is safe. Even if the result from switch is set LAN_EN
+ * and LB_EN (which normally will pass the packet to this VSI)
+ * packet won't be seen on the VSI, because local loopback is
+ * turned off.
+ */
+ rule_info.sw_act.vsi_handle = vsi->idx;
} else {
/* VF to VF */
rule_info.sw_act.flag |= ICE_FLTR_TX;
@@ -834,11 +996,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
goto exit;
}
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_add_pf_lldp_drop_rule(vsi);
+
/* store the output params, which are needed later for removing
* advanced switch filter
*/
@@ -973,7 +1141,6 @@ static int
ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_tc_flower_fltr *tc_fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
struct ice_adv_rule_info rule_info = {0};
struct ice_rule_query_data rule_added;
struct ice_adv_lkup_elem *list;
@@ -1009,7 +1176,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return PTR_ERR(dest_vsi);
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ lkups_cnt = ice_tc_count_lkups(flags, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -1044,8 +1211,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
case ICE_DROP_PACKET:
- rule_info.sw_act.flag |= ICE_FLTR_RX;
- rule_info.sw_act.src = hw->pf_id;
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ }
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
@@ -1059,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
"Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
@@ -1352,6 +1528,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct netlink_ext_ack *extack = fltr->extack;
struct flow_match_control enc_control;
fltr->tunnel_type = ice_tc_tun_get_type(dev);
@@ -1372,6 +1549,9 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
flow_rule_match_enc_control(rule, &enc_control);
+ if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack))
+ return -EOPNOTSUPP;
+
if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
@@ -1409,7 +1589,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
}
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
+ (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
struct flow_match_enc_opts match;
flow_rule_match_enc_opts(rule, &match);
@@ -1420,7 +1601,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
sizeof(struct gtp_pdu_session_info));
- fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
+ fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
+ fltr->tunnel_type == TNL_PFCP) {
+ struct flow_match_enc_opts match;
+
+ flow_rule_match_enc_opts(rule, &match);
+
+ memcpy(&fltr->pfcp_meta_keys, match.key->data,
+ sizeof(struct pfcp_metadata));
+ memcpy(&fltr->pfcp_meta_masks, match.mask->data,
+ sizeof(struct pfcp_metadata));
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS;
}
return 0;
@@ -1432,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
* @filter_dev: Pointer to device on which filter is being added
* @f: Pointer to struct flow_cls_offload
* @fltr: Pointer to filter structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was parsed successfully, -EINVAL if the flower
+ * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured
+ * for the given VSI.
*/
static int
ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr *fltr)
+ struct ice_tc_flower_fltr *fltr, bool ingress)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
@@ -1481,10 +1681,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return err;
}
- /* header pointers should point to the inner headers, outer
- * header were already set by ice_parse_tunnel_attr
- */
- headers = &fltr->inner_headers;
+ /* PFCP is considered non-tunneled - don't swap headers. */
+ if (fltr->tunnel_type != TNL_PFCP) {
+ /* Header pointers should point to the inner headers,
+ * outer header were already set by
+ * ice_parse_tunnel_attr().
+ */
+ headers = &fltr->inner_headers;
+ }
} else if (dissector->used_keys &
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
@@ -1516,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
+ if (!ingress) {
+ bool switchdev =
+ ice_is_eswitch_mode_switchdev(vsi->back);
+
+ if (switchdev != (n_proto_key == ETH_P_LLDP)) {
+ NL_SET_ERR_MSG_FMT_MOD(fltr->extack,
+ "%sLLDP filtering is not supported on egress in %s mode",
+ switchdev ? "Non-" : "",
+ switchdev ? "switchdev" :
+ "legacy");
+ return -EOPNOTSUPP;
+ }
+ }
+
headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
headers->l3_key.ip_proto = match.key->ip_proto;
@@ -1638,6 +1856,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ fltr->extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -1687,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
}
+
+ /* Ingress filter on representor results in an egress filter in HW
+ * and vice versa
+ */
+ ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress;
+ fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS :
+ ICE_ESWITCH_FLTR_EGRESS;
+
return 0;
}
@@ -1900,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_pf *pf = vsi->back;
int err;
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_del_pf_lldp_drop_rule(pf);
+
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_drop_vf_tx_lldp(vsi, false);
+
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
rule_rem.vsi_handle = fltr->dest_vsi_handle;
@@ -1936,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
* @vsi: Pointer to VSI
* @f: Pointer to flower offload structure
* @__fltr: Pointer to struct ice_tc_flower_fltr
+ * @ingress: if the rule is added to an ingress block
*
* This function parses TC-flower input fields, parses action,
* and adds a filter.
+ *
+ * Return: 0 if the filter was successfully added,
+ * negative error code otherwise.
*/
static int
ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr **__fltr)
+ struct ice_tc_flower_fltr **__fltr, bool ingress)
{
struct ice_tc_flower_fltr *fltr;
int err;
@@ -1960,7 +2200,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
fltr->src_vsi = vsi;
INIT_HLIST_NODE(&fltr->tc_flower_node);
- err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress);
if (err < 0)
goto err;
@@ -2003,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
* @netdev: Pointer to filter device
* @vsi: Pointer to VSI
* @cls_flower: Pointer to flower offload structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added,
+ * negative error code otherwise.
*/
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower)
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress)
{
struct netlink_ext_ack *extack = cls_flower->common.extack;
struct net_device *vsi_netdev = vsi->netdev;
@@ -2041,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
}
/* prep and add TC-flower filter in HW */
- err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 65d387163a46..8a3ab2f22af9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -4,6 +4,9 @@
#ifndef _ICE_TC_LIB_H_
#define _ICE_TC_LIB_H_
+#include <linux/bits.h>
+#include <net/pfcp.h>
+
#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
@@ -22,7 +25,7 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
-#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18)
#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
@@ -34,6 +37,7 @@
#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29)
+#define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -161,6 +165,8 @@ struct ice_tc_flower_fltr {
__be32 tenant_id;
struct gtp_pdu_session_info gtp_pdu_info_keys;
struct gtp_pdu_session_info gtp_pdu_info_masks;
+ struct pfcp_metadata pfcp_meta_keys;
+ struct pfcp_metadata pfcp_meta_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;
@@ -205,13 +211,14 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
}
struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue);
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower);
-int
-ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress);
+int ice_del_cls_flower(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init);
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit);
static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index b2f5c9fe0149..4f35ef8d6b29 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -69,7 +69,7 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template,
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->rx.rx_ring->netdev->name);),
+ __assign_str(devname);),
TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
@@ -96,7 +96,7 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template,
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->tx.tx_ring->netdev->name);),
+ __assign_str(devname);),
TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
@@ -128,9 +128,9 @@ DECLARE_EVENT_CLASS(ice_tx_template,
TP_fast_assign(__entry->ring = ring;
__entry->desc = desc;
__entry->buf = buf;
- __assign_str(devname, ring->netdev->name);),
+ __assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p buf %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->buf)
);
@@ -156,9 +156,9 @@ DECLARE_EVENT_CLASS(ice_rx_template,
TP_fast_assign(__entry->ring = ring;
__entry->desc = desc;
- __assign_str(devname, ring->netdev->name);),
+ __assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p", __get_str(devname),
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
@@ -180,9 +180,9 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
TP_fast_assign(__entry->ring = ring;
__entry->desc = desc;
__entry->skb = skb;
- __assign_str(devname, ring->netdev->name);),
+ __assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p skb %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->skb)
);
@@ -203,9 +203,9 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
TP_fast_assign(__entry->ring = ring;
__entry->skb = skb;
- __assign_str(devname, ring->netdev->name);),
+ __assign_str(devname);),
- TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
+ TP_printk("netdev: %s skb: %p ring: %p", __get_str(devname),
__entry->skb, __entry->ring)
);
@@ -228,7 +228,7 @@ DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
TP_fast_assign(__entry->skb = skb;
__entry->idx = idx;),
- TP_printk("skb %pK idx %d",
+ TP_printk("skb %p idx %d",
__entry->skb, __entry->idx)
);
#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
@@ -330,6 +330,24 @@ DEFINE_EVENT(ice_esw_br_port_template,
TP_ARGS(port)
);
+DECLARE_EVENT_CLASS(ice_switch_stats_template,
+ TP_PROTO(struct ice_switch_info *sw_info),
+ TP_ARGS(sw_info),
+ TP_STRUCT__entry(__field(u16, rule_cnt)
+ __field(u8, recp_cnt)),
+ TP_fast_assign(__entry->rule_cnt = sw_info->rule_cnt;
+ __entry->recp_cnt = sw_info->recp_cnt;),
+ TP_printk("rules=%u recipes=%u",
+ __entry->rule_cnt,
+ __entry->recp_cnt)
+);
+
+DEFINE_EVENT(ice_switch_stats_template,
+ ice_aq_sw_rules,
+ TP_PROTO(struct ice_switch_info *sw_info),
+ TP_ARGS(sw_info)
+);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.c b/drivers/net/ethernet/intel/ice/ice_tspll.c
new file mode 100644
index 000000000000..66320a4ab86f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_ptp_hw.h"
+
+static const struct
+ice_tspll_params_e82x e82x_tspll_params[NUM_ICE_TSPLL_FREQ] = {
+ [ICE_TSPLL_FREQ_25_000] = {
+ .refclk_pre_div = 1,
+ .post_pll_div = 6,
+ .feedback_div = 197,
+ .frac_n_div = 2621440,
+ },
+ [ICE_TSPLL_FREQ_122_880] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_125_000] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_153_600] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_156_250] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_245_760] = {
+ .refclk_pre_div = 10,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+};
+
+/**
+ * ice_tspll_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Return: specified TIME_REF clock frequency converted to a string.
+ */
+static const char *ice_tspll_clk_freq_str(enum ice_tspll_freq clk_freq)
+{
+ switch (clk_freq) {
+ case ICE_TSPLL_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TSPLL_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TSPLL_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TSPLL_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TSPLL_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TSPLL_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_default_freq - Return default frequency for a MAC type
+ * @mac_type: MAC type
+ *
+ * Return: default TSPLL frequency for a correct MAC type, -ERANGE otherwise.
+ */
+static enum ice_tspll_freq ice_tspll_default_freq(enum ice_mac_type mac_type)
+{
+ switch (mac_type) {
+ case ICE_MAC_GENERIC:
+ return ICE_TSPLL_FREQ_25_000;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_TSPLL_FREQ_156_250;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_check_params - Check if TSPLL params are correct
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF or TCXO)
+ *
+ * Return: true if TSPLL params are correct, false otherwise.
+ */
+static bool ice_tspll_check_params(struct ice_hw *hw,
+ enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ if (clk_freq >= NUM_ICE_TSPLL_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TSPLL frequency %u\n",
+ clk_freq);
+ return false;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return false;
+ }
+
+ if ((hw->mac_type == ICE_MAC_GENERIC_3K_E825 ||
+ clk_src == ICE_CLK_SRC_TCXO) &&
+ clk_freq != ice_tspll_default_freq(hw->mac_type)) {
+ dev_warn(ice_hw_to_dev(hw), "Unsupported frequency for this clock source\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_tspll_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Return: specified clock source converted to its string name
+ */
+static const char *ice_tspll_clk_src_str(enum ice_clk_src clk_src)
+{
+ switch (clk_src) {
+ case ICE_CLK_SRC_TCXO:
+ return "TCXO";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_log_cfg - Log current/new TSPLL configuration
+ * @hw: Pointer to the HW struct
+ * @enable: CGU enabled/disabled
+ * @clk_src: Current clock source
+ * @tspll_freq: Current clock frequency
+ * @lock: CGU lock status
+ * @new_cfg: true if this is a new config
+ */
+static void ice_tspll_log_cfg(struct ice_hw *hw, bool enable, u8 clk_src,
+ u8 tspll_freq, bool lock, bool new_cfg)
+{
+ dev_dbg(ice_hw_to_dev(hw),
+ "%s TSPLL configuration -- %s, src %s, freq %s, PLL %s\n",
+ new_cfg ? "New" : "Current", str_enabled_disabled(enable),
+ ice_tspll_clk_src_str((enum ice_clk_src)clk_src),
+ ice_tspll_clk_freq_str((enum ice_tspll_freq)tspll_freq),
+ lock ? "locked" : "unlocked");
+}
+
+/**
+ * ice_tspll_cfg_e82x - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e82x(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ u32 val, r9, r24;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_BWM_LF, &val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r24),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ !!FIELD_GET(ICE_CGU_RO_BWM_LF_TRUE_LOCK, val),
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24)) {
+ r24 &= ~ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ r9 &= ~ICE_CGU_R9_TIME_REF_FREQ_SEL;
+ r9 |= FIELD_PREP(ICE_CGU_R9_TIME_REF_FREQ_SEL, clk_freq);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X | ICE_CGU_R19_TSPLL_NDIVRATIO);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X,
+ e82x_tspll_params[clk_freq].feedback_div);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_NDIVRATIO, 1);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R22_TIME1588CLK_DIV |
+ ICE_CGU_R22_TIME1588CLK_DIV2);
+ val |= FIELD_PREP(ICE_CGU_R22_TIME1588CLK_DIV,
+ e82x_tspll_params[clk_freq].post_pll_div);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ r24 &= ~(ICE_CGU_R23_R24_REF1588_CK_DIV | ICE_CGU_R24_FBDIV_FRAC |
+ ICE_CGU_R23_R24_TIME_REF_SEL);
+ r24 |= FIELD_PREP(ICE_CGU_R23_R24_REF1588_CK_DIV,
+ e82x_tspll_params[clk_freq].refclk_pre_div);
+ r24 |= FIELD_PREP(ICE_CGU_R24_FBDIV_FRAC,
+ e82x_tspll_params[clk_freq].frac_n_div);
+ r24 |= FIELD_PREP(ICE_CGU_R23_R24_TIME_REF_SEL, clk_src);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+
+ /* Wait to ensure everything is stable */
+ usleep_range(10, 20);
+
+ /* Finally, enable the PLL */
+ r24 |= ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+
+ /* Wait at least 1 ms to verify if the PLL locks */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_BWM_LF, &val);
+ if (err)
+ return err;
+
+ if (!(val & ICE_CGU_RO_BWM_LF_TRUE_LOCK)) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r24),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ true, true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e82x - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e82x(struct ice_hw *hw)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_CNTR_BIST, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 |
+ ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1);
+
+ return ice_write_cgu_reg(hw, ICE_CGU_CNTR_BIST, val);
+}
+
+/**
+ * ice_tspll_cfg_e825c - Configure the TSPLL for E825-C
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e825c(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ u32 val, r9, r23;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_LOCK, &val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r23),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ !!FIELD_GET(ICE_CGU_RO_LOCK_TRUE_LOCK, val),
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23)) {
+ r23 &= ~ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+ }
+
+ if (FIELD_GET(ICE_CGU_R9_TIME_SYNC_EN, r9)) {
+ r9 &= ~ICE_CGU_R9_TIME_SYNC_EN;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency and enable the correct receiver */
+ r9 &= ~(ICE_CGU_R9_TIME_REF_FREQ_SEL | ICE_CGU_R9_CLK_EREF0_EN |
+ ICE_CGU_R9_TIME_REF_EN);
+ r9 |= FIELD_PREP(ICE_CGU_R9_TIME_REF_FREQ_SEL, clk_freq);
+ if (clk_src == ICE_CLK_SRC_TCXO)
+ r9 |= ICE_CGU_R9_CLK_EREF0_EN;
+ else
+ r9 |= ICE_CGU_R9_TIME_REF_EN;
+ r9 |= ICE_CGU_R9_TIME_SYNC_EN;
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+
+ /* Choose the referenced frequency */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R16, &val);
+ if (err)
+ return err;
+ val &= ~ICE_CGU_R16_TSPLL_CK_REFCLKFREQ;
+ val |= FIELD_PREP(ICE_CGU_R16_TSPLL_CK_REFCLKFREQ,
+ ICE_TSPLL_CK_REFCLKFREQ_E825);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R16, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 |
+ ICE_CGU_R19_TSPLL_NDIVRATIO);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825,
+ ICE_TSPLL_FBDIV_INTGR_E825);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_NDIVRATIO,
+ ICE_TSPLL_NDIVRATIO_E825);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor, these two are constant */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R22_TIME1588CLK_DIV |
+ ICE_CGU_R22_TIME1588CLK_DIV2);
+ val |= FIELD_PREP(ICE_CGU_R22_TIME1588CLK_DIV, 5);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor (constant) and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ r23 &= ~(ICE_CGU_R23_R24_REF1588_CK_DIV | ICE_CGU_R23_R24_TIME_REF_SEL);
+ r23 |= FIELD_PREP(ICE_CGU_R23_R24_TIME_REF_SEL, clk_src);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+
+ /* Clear the R24 register. */
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, 0);
+ if (err)
+ return err;
+
+ /* Wait to ensure everything is stable */
+ usleep_range(10, 20);
+
+ /* Finally, enable the PLL */
+ r23 |= ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+
+ /* Wait at least 1 ms to verify if the PLL locks */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_LOCK, &val);
+ if (err)
+ return err;
+
+ if (!(val & ICE_CGU_RO_LOCK_TRUE_LOCK)) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r23),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ true, true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e825c - disable TSPLL sticky bits for E825-C
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e825c(struct ice_hw *hw)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_BW_TDC, &val);
+ if (err)
+ return err;
+
+ val &= ~ICE_CGU_BW_TDC_PLLLOCK_SEL;
+
+ return ice_write_cgu_reg(hw, ICE_CGU_BW_TDC, val);
+}
+
+/**
+ * ice_tspll_cfg_pps_out_e825c - Enable/disable 1PPS output and set amplitude
+ * @hw: pointer to the HW struct
+ * @enable: true to enable 1PPS output, false to disable it
+ *
+ * Return: 0 on success, other negative error code when CGU read/write failed.
+ */
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R9_ONE_PPS_OUT_EN | ICE_CGU_R9_ONE_PPS_OUT_AMP);
+ val |= FIELD_PREP(ICE_CGU_R9_ONE_PPS_OUT_EN, enable) |
+ ICE_CGU_R9_ONE_PPS_OUT_AMP;
+
+ return ice_write_cgu_reg(hw, ICE_CGU_R9, val);
+}
+
+/**
+ * ice_tspll_cfg - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the TSPLL which drives the PTP hardware clock.
+ *
+ * Return: 0 on success, -ERANGE on unsupported MAC type, other negative error
+ * codes when failed to configure CGU.
+ */
+static int ice_tspll_cfg(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_tspll_cfg_e82x(hw, clk_freq, clk_src);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_tspll_cfg_e825c(hw, clk_freq, clk_src);
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_dis_sticky_bits - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, -ERANGE on unsupported MAC type.
+ */
+static int ice_tspll_dis_sticky_bits(struct ice_hw *hw)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_tspll_dis_sticky_bits_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_tspll_dis_sticky_bits_e825c(hw);
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_init - Initialize TSPLL with settings from firmware
+ * @hw: Pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E82X/E825 device.
+ *
+ * Return: 0 on success, other error codes when failed to read/write/cfg CGU.
+ */
+int ice_tspll_init(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ enum ice_tspll_freq tspll_freq;
+ enum ice_clk_src clk_src;
+ int err;
+
+ /* Only E822, E823 and E825 products support TSPLL */
+ if (hw->mac_type != ICE_MAC_GENERIC &&
+ hw->mac_type != ICE_MAC_GENERIC_3K_E825)
+ return 0;
+
+ tspll_freq = (enum ice_tspll_freq)ts_info->time_ref;
+ clk_src = (enum ice_clk_src)ts_info->clk_src;
+ if (!ice_tspll_check_params(hw, tspll_freq, clk_src))
+ return -EINVAL;
+
+ /* Disable sticky lock detection so lock status reported is accurate */
+ err = ice_tspll_dis_sticky_bits(hw);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL using the parameters from the function
+ * capabilities.
+ */
+ err = ice_tspll_cfg(hw, tspll_freq, clk_src);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw), "Failed to lock TSPLL to predefined frequency. Retrying with fallback frequency.\n");
+
+ /* Try to lock to internal TCXO as a fallback. */
+ tspll_freq = ice_tspll_default_freq(hw->mac_type);
+ clk_src = ICE_CLK_SRC_TCXO;
+ err = ice_tspll_cfg(hw, tspll_freq, clk_src);
+ if (err)
+ dev_warn(ice_hw_to_dev(hw), "Failed to lock TSPLL to fallback frequency.\n");
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.h b/drivers/net/ethernet/intel/ice/ice_tspll.h
new file mode 100644
index 000000000000..c0b1232cc07c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _ICE_TSPLL_H_
+#define _ICE_TSPLL_H_
+
+/**
+ * struct ice_tspll_params_e82x - E82X TSPLL parameters
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @post_pll_div: Post PLL divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF/TCXO frequency.
+ */
+struct ice_tspll_params_e82x {
+ u8 refclk_pre_div;
+ u8 post_pll_div;
+ u8 feedback_div;
+ u32 frac_n_div;
+};
+
+#define ICE_TSPLL_CK_REFCLKFREQ_E825 0x1F
+#define ICE_TSPLL_NDIVRATIO_E825 5
+#define ICE_TSPLL_FBDIV_INTGR_E825 256
+
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable);
+int ice_tspll_init(struct ice_hw *hw);
+
+#endif /* _ICE_TSPLL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 97d41d6ebf1f..ad76768a4232 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -7,6 +7,8 @@
#include <linux/netdevice.h>
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/rx.h>
+#include <net/libeth/xdp.h>
#include <net/dsfield.h>
#include <net/mpls.h>
#include <net/xdp.h>
@@ -20,7 +22,6 @@
#define ICE_RX_HDR_SIZE 256
-#define FDIR_DESC_RXDID 0x40
#define ICE_FDIR_CLEAN_DELAY 10
/**
@@ -112,7 +113,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
- if (dma_unmap_len(tx_buf, len))
+ if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
@@ -126,7 +127,7 @@ ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
dev_kfree_skb_any(tx_buf->skb);
break;
case ICE_TX_BUF_XDP_TX:
- page_frag_free(tx_buf->raw_buf);
+ libeth_xdp_return_va(tx_buf->raw_buf, false);
break;
case ICE_TX_BUF_XDP_XMIT:
xdp_return_frame(tx_buf->xdpf);
@@ -145,6 +146,56 @@ static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
}
/**
+ * ice_clean_tstamp_ring - clean time stamp ring
+ * @tx_ring: Tx ring to clean the Time Stamp ring for
+ */
+static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ memset(tstamp_ring->desc, 0, size);
+ tstamp_ring->next_to_use = 0;
+}
+
+/**
+ * ice_free_tstamp_ring - free time stamp resources per queue
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ ice_clean_tstamp_ring(tx_ring);
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc,
+ tstamp_ring->dma);
+ tstamp_ring->desc = NULL;
+}
+
+/**
+ * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ ice_free_tstamp_ring(tx_ring);
+ kfree_rcu(tx_ring->tstamp_ring, rcu);
+ tx_ring->tstamp_ring = NULL;
+ tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME;
+}
+
+/**
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
@@ -182,6 +233,9 @@ tx_skip_free:
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ if (ice_is_txtime_cfg(tx_ring))
+ ice_free_tx_tstamp_ring(tx_ring);
}
/**
@@ -333,6 +387,84 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
}
/**
+ * ice_alloc_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to allocate the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ tstamp_ring = kzalloc(sizeof(*tstamp_ring), GFP_KERNEL);
+ if (!tstamp_ring)
+ return -ENOMEM;
+
+ tstamp_ring->tx_ring = tx_ring;
+ tx_ring->tstamp_ring = tstamp_ring;
+ tstamp_ring->desc = NULL;
+ tstamp_ring->count = ice_calc_ts_ring_count(tx_ring);
+ tx_ring->flags |= ICE_TX_FLAGS_TXTIME;
+ return 0;
+}
+
+/**
+ * ice_setup_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to set up the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct device *dev = tx_ring->dev;
+ u32 size;
+
+ /* round up to nearest page */
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma,
+ GFP_KERNEL);
+ if (!tstamp_ring->desc) {
+ dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n",
+ size);
+ return -ENOMEM;
+ }
+
+ tstamp_ring->next_to_use = 0;
+ return 0;
+}
+
+/**
+ * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring
+ * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int err;
+
+ err = ice_alloc_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ return err;
+ }
+
+ err = ice_setup_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ ice_free_tx_tstamp_ring(tx_ring);
+ return err;
+ }
+ return 0;
+}
+
+/**
* ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the Tx ring to set up
*
@@ -376,61 +508,67 @@ err:
return -ENOMEM;
}
+void ice_rxq_pp_destroy(struct ice_rx_ring *rq)
+{
+ struct libeth_fq fq = {
+ .fqes = rq->rx_fqes,
+ .pp = rq->pp,
+ };
+
+ libeth_rx_fq_destroy(&fq);
+ rq->rx_fqes = NULL;
+ rq->pp = NULL;
+
+ if (!rq->hdr_pp)
+ return;
+
+ fq.fqes = rq->hdr_fqes;
+ fq.pp = rq->hdr_pp;
+
+ libeth_rx_fq_destroy(&fq);
+ rq->hdr_fqes = NULL;
+ rq->hdr_pp = NULL;
+}
+
/**
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- struct xdp_buff *xdp = &rx_ring->xdp;
- struct device *dev = rx_ring->dev;
u32 size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buf)
- return;
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
- if (xdp->data) {
- xdp_return_buff(xdp);
- xdp->data = NULL;
- }
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_fqes)
+ return;
+
+ libeth_xdp_return_stash(&rx_ring->xdp);
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+ for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
+ libeth_rx_recycle_slow(rx_ring->rx_fqes[i].netmem);
- if (!rx_buf->page)
- continue;
+ if (rx_ring->hdr_pp)
+ libeth_rx_recycle_slow(rx_ring->hdr_fqes[i].netmem);
- /* Invalidate cache lines that may have been written to by
- * device so that we avoid corrupting memory.
- */
- dma_sync_single_range_for_cpu(dev, rx_buf->dma,
- rx_buf->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
-
- /* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
-
- rx_buf->page = NULL;
- rx_buf->page_offset = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
}
-rx_skip_free:
- if (rx_ring->xsk_pool)
- memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
- else
- memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
+ if (rx_ring->vsi->type == ICE_VSI_PF &&
+ xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) {
+ xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+ ice_rxq_pp_destroy(rx_ring);
+rx_skip_free:
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
@@ -438,7 +576,6 @@ rx_skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
- rx_ring->first_desc = 0;
rx_ring->next_to_use = 0;
}
@@ -450,26 +587,20 @@ rx_skip_free:
*/
void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
u32 size;
ice_clean_rx_ring(rx_ring);
- if (rx_ring->vsi->type == ICE_VSI_PF)
- if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- rx_ring->xdp_prog = NULL;
+ WRITE_ONCE(rx_ring->xdp_prog, NULL);
if (rx_ring->xsk_pool) {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
- } else {
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
}
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
- dmam_free_coherent(rx_ring->dev, size,
- rx_ring->desc, rx_ring->dma);
+ dmam_free_coherent(dev, size, rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
}
@@ -482,19 +613,9 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
*/
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
+ struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
u32 size;
- if (!dev)
- return -ENOMEM;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_buf);
- rx_ring->rx_buf =
- kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- if (!rx_ring->rx_buf)
- return -ENOMEM;
-
/* round up to nearest page */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
@@ -503,46 +624,16 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
size);
- goto err;
+ return -ENOMEM;
}
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
- rx_ring->first_desc = 0;
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
return 0;
-
-err:
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
- return -ENOMEM;
-}
-
-/**
- * ice_rx_frame_truesize
- * @rx_ring: ptr to Rx ring
- * @size: size
- *
- * calculate the truesize with taking into the account PAGE_SIZE of
- * underlying arch
- */
-static unsigned int
-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
-{
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
-#else
- truesize = rx_ring->rx_offset ?
- SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
- SKB_DATA_ALIGN(size);
-#endif
- return truesize;
}
/**
@@ -551,15 +642,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
- * @rx_buf: Rx buffer to store the XDP action
* @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static void
-ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+static u32
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct libeth_xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
+ union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -567,23 +657,23 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (!xdp_prog)
goto exit;
- ice_xdp_meta_set_desc(xdp, eop_desc);
+ xdp->desc = eop_desc;
- act = bpf_prog_run_xdp(xdp_prog, xdp);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp->base);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
+ ret = __ice_xmit_xdp_ring(&xdp->base, xdp_ring, false);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
if (ret == ICE_XDP_CONSUMED)
goto out_failure;
break;
case XDP_REDIRECT:
- if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
+ if (xdp_do_redirect(rx_ring->netdev, &xdp->base, xdp_prog))
goto out_failure;
ret = ICE_XDP_REDIR;
break;
@@ -595,10 +685,12 @@ out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
+ libeth_xdp_return_buff(xdp);
ret = ICE_XDP_CONSUMED;
}
+
exit:
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ return ret;
}
/**
@@ -685,50 +777,34 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
/**
- * ice_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buf struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
+ * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
+ * @rx_ring: ring to init descriptors on
+ * @count: number of descriptors to initialize
*/
-static bool
-ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u32 ntu = rx_ring->next_to_use;
- /* alloc new page for storage */
- page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- return false;
- }
+ if (!count)
+ return;
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, ice_rx_pg_order(rx_ring));
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- return false;
- }
+ do {
+ rx_desc++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ ntu = 0;
+ }
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = rx_ring->rx_offset;
- page_ref_add(page, USHRT_MAX - 1);
- bi->pagecnt_bias = USHRT_MAX;
+ rx_desc->wb.status_error0 = 0;
+ count--;
+ } while (count);
- return true;
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
}
/**
@@ -746,41 +822,60 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
*/
bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
{
+ const struct libeth_fq_fp hdr_fq = {
+ .pp = rx_ring->hdr_pp,
+ .fqes = rx_ring->hdr_fqes,
+ .truesize = rx_ring->hdr_truesize,
+ .count = rx_ring->count,
+ };
+ const struct libeth_fq_fp fq = {
+ .pp = rx_ring->pp,
+ .fqes = rx_ring->rx_fqes,
+ .truesize = rx_ring->truesize,
+ .count = rx_ring->count,
+ };
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
- struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
- !cleaned_count)
+ if (!rx_ring->netdev || !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_buf[ntu];
do {
- /* if we fail here, we have work remaining */
- if (!ice_alloc_mapped_page(rx_ring, bi))
- break;
+ dma_addr_t addr;
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ addr = libeth_rx_alloc(&fq, ntu);
+ if (addr == DMA_MAPPING_ERROR) {
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ break;
+ }
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(addr);
+
+ if (!hdr_fq.pp)
+ goto next;
+ addr = libeth_rx_alloc(&hdr_fq, ntu);
+ if (addr == DMA_MAPPING_ERROR) {
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+
+ libeth_rx_recycle_slow(fq.fqes[ntu].netmem);
+ break;
+ }
+
+ rx_desc->read.hdr_addr = cpu_to_le64(addr);
+
+next:
rx_desc++;
- bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = ICE_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_buf;
ntu = 0;
}
@@ -797,341 +892,41 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
}
/**
- * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
- * @rx_buf: Rx buffer to adjust
- * @size: Size of adjustment
+ * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
+ * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
*
- * Update the offset within page so that Rx buf will be ready to be reused.
- * For systems with PAGE_SIZE < 8192 this function will flip the page offset
- * so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by "size" bytes
+ * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
+ * to set flow director rules on VFs.
*/
-static void
-ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
{
-#if (PAGE_SIZE < 8192)
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= size;
-#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += size;
-#endif
-}
-
-/**
- * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
- * @rx_buf: buffer containing the page
- *
- * If page is reusable, we have a green light for calling ice_reuse_rx_page,
- * which will assign the current buffer to the buffer that next_to_alloc is
- * pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
- */
-static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
-{
- unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
- struct page *page = rx_buf->page;
-
- /* avoid re-using remote and pfmemalloc pages */
- if (!dev_page_is_reusable(page))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
- return false;
-#else
-#define ICE_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
- if (rx_buf->page_offset > ICE_LAST_OFFSET)
- return false;
-#endif /* PAGE_SIZE < 8192) */
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(pagecnt_bias == 1)) {
- page_ref_add(page, USHRT_MAX - 1);
- rx_buf->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
- * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp buff to place the data into
- * @rx_buf: buffer containing page to add
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buf->page to the xdp buf.
- * It will just attach the page as a frag.
- */
-static int
-ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct ice_rx_buf *rx_buf, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(xdp)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(xdp);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
- rx_buf->page_offset, size);
- sinfo->xdp_frags_size += size;
- /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
- * can pop off frags but driver has to handle it on its own
- */
- rx_ring->nr_frags = sinfo->nr_frags;
-
- if (page_is_pfmemalloc(rx_buf->page))
- xdp_buff_set_frag_pfmemalloc(xdp);
-
- return 0;
-}
-
-/**
- * ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: Rx descriptor ring to store buffers on
- * @old_buf: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- */
-static void
-ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
-{
- u16 nta = rx_ring->next_to_alloc;
- struct ice_rx_buf *new_buf;
-
- new_buf = &rx_ring->rx_buf[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* Transfer page from old buffer to new buffer.
- * Move each member individually to avoid possible store
- * forwarding stalls and unnecessary copy of skb.
- */
- new_buf->dma = old_buf->dma;
- new_buf->page = old_buf->page;
- new_buf->page_offset = old_buf->page_offset;
- new_buf->pagecnt_bias = old_buf->pagecnt_bias;
-}
-
-/**
- * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
- * @rx_ring: Rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- * @ntc: index of next to clean element
- *
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
- */
-static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
- const unsigned int ntc)
-{
- struct ice_rx_buf *rx_buf;
-
- rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt =
-#if (PAGE_SIZE < 8192)
- page_count(rx_buf->page);
-#else
- 0;
-#endif
- prefetchw(rx_buf->page);
-
- if (!size)
- return rx_buf;
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
- rx_buf->page_offset, size,
- DMA_FROM_DEVICE);
-
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buf->pagecnt_bias--;
-
- return rx_buf;
-}
-
-/**
- * ice_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp_buff pointing to the data
- *
- * This function builds an skb around an existing XDP buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead. Driver has
- * already combined frags (if any) to skb_shared_info.
- */
-static struct sk_buff *
-ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
- u8 metasize = xdp->data - xdp->data_meta;
- struct skb_shared_info *sinfo = NULL;
- unsigned int nr_frags;
- struct sk_buff *skb;
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
-
- /* Prefetch first cache line of first page. If xdp->data_meta
- * is unused, this points exactly as xdp->data, otherwise we
- * likely have a consumer accessing first few bytes of meta
- * data, and then actual data.
- */
- net_prefetch(xdp->data_meta);
- /* build an skb around the page buffer */
- skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
- if (unlikely(!skb))
- return NULL;
-
- /* must to record Rx queue, otherwise OS features such as
- * symmetric queue won't work
- */
- skb_record_rx_queue(skb, rx_ring->q_index);
-
- /* update pointers within the skb to store the data */
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- __skb_put(skb, xdp->data_end - xdp->data);
- if (metasize)
- skb_metadata_set(skb, metasize);
-
- if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
-
- return skb;
-}
-
-/**
- * ice_construct_skb - Allocate skb and populate it
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp_buff pointing to the data
- *
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
- unsigned int size = xdp->data_end - xdp->data;
- struct skb_shared_info *sinfo = NULL;
- struct ice_rx_buf *rx_buf;
- unsigned int nr_frags = 0;
- unsigned int headlen;
- struct sk_buff *skb;
-
- /* prefetch first cache line of first page */
- net_prefetch(xdp->data);
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
-
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- return NULL;
-
- rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
- skb_record_rx_queue(skb, rx_ring->q_index);
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > ICE_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
- sizeof(long)));
-
- /* if we exhaust the linear part then add what is left as a frag */
- size -= headlen;
- if (size) {
- /* besides adding here a partial frag, we are going to add
- * frags from xdp_buff, make sure there is enough space for
- * them
- */
- if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
- dev_kfree_skb(skb);
- return NULL;
- }
- skb_add_rx_frag(skb, 0, rx_buf->page,
- rx_buf->page_offset + headlen, size,
- xdp->frame_sz);
- } else {
- /* buffer is unused, change the act that should be taken later
- * on; data was copied onto skb's linear part so there's no
- * need for adjusting page offset and we can reuse this buffer
- * as-is
- */
- rx_buf->act = ICE_SKB_CONSUMED;
- }
+ u32 ntc = rx_ring->next_to_clean;
+ unsigned int total_rx_pkts = 0;
+ u32 cnt = rx_ring->count;
- if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *skinfo = skb_shinfo(skb);
+ while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 stat_err_bits;
- memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
- sizeof(skb_frag_t) * nr_frags);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
- }
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ break;
- return skb;
-}
+ dma_rmb();
-/**
- * ice_put_rx_buf - Clean up used buffer and either recycle or free
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
- *
- * This function will clean up the contents of the rx_buf. It will either
- * recycle the buffer or unmap it and free the associated resources.
- */
-static void
-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- if (!rx_buf)
- return;
+ if (ctrl_vsi->vf)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- if (ice_can_reuse_rx_page(rx_buf)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
- ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
- ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
+ if (++ntc == cnt)
+ ntc = 0;
+ total_rx_pkts++;
}
- /* clear contents of buffer_info */
- rx_buf->page = NULL;
+ rx_ring->next_to_clean = ntc;
+ ice_init_ctrl_rx_descs(rx_ring, ICE_DESC_UNUSED(rx_ring));
}
/**
@@ -1146,25 +941,19 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
- unsigned int offset = rx_ring->rx_offset;
- struct xdp_buff *xdp = &rx_ring->xdp;
- u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+ u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0;
- u32 cached_ntu;
bool failure;
- u32 first;
- /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
-#if (PAGE_SIZE < 8192)
- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
-#endif
+ libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq);
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
@@ -1175,19 +964,21 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_buf;
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
u16 vlan_tci;
+ bool rxe;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, ntc);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ /*
+ * The DD bit will always be zero for unused descriptors
+ * because it's cleared in cleanup or when setting the DMA
+ * address of the header buffer, which never uses the DD bit.
+ * If the hardware wrote the descriptor, it will be non-zero.
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
@@ -1200,89 +991,66 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
dma_rmb();
ice_trace(clean_rx_irq, rx_ring, rx_desc);
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- struct ice_vsi *ctrl_vsi = rx_ring->vsi;
-
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf)
- ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- if (++ntc == cnt)
- ntc = 0;
- rx_ring->first_desc = ntc;
- continue;
- }
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_HBO_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ rxe = ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits);
+
+ if (!rx_ring->hdr_pp)
+ goto payload;
+
+ size = le16_get_bits(rx_desc->wb.hdr_len_sph_flex_flags1,
+ ICE_RX_FLEX_DESC_HDR_LEN_M);
+ if (unlikely(rxe))
+ size = 0;
+
+ rx_buf = &rx_ring->hdr_fqes[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, size);
+ rx_buf->netmem = 0;
+
+payload:
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (unlikely(rxe))
+ size = 0;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
-
- if (!xdp->data) {
- void *hard_start;
-
- hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
- offset;
- xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
-#if (PAGE_SIZE > 4096)
- /* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
-#endif
- xdp_buff_clear_frags_flag(xdp);
- } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
- break;
- }
+ rx_buf = &rx_ring->rx_fqes[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, size);
+
if (++ntc == cnt)
ntc = 0;
/* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc))
+ if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!xdp->data))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
- if (rx_buf->act == ICE_XDP_PASS)
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
- total_rx_bytes += xdp_get_buff_len(xdp);
+
+ if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR))
+ xdp_xmit |= xdp_verdict;
+ total_rx_bytes += xdp_get_buff_len(&xdp->base);
total_rx_pkts++;
xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
continue;
+
construct_skb:
- if (likely(ice_ring_uses_build_skb(rx_ring)))
- skb = ice_build_skb(rx_ring, xdp);
- else
- skb = ice_construct_skb(rx_ring, xdp);
- /* exit if we failed to retrieve a buffer */
- if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- rx_buf->act = ICE_XDP_CONSUMED;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring,
- ICE_XDP_CONSUMED);
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- break;
- }
+ skb = xdp_build_skb_from_buff(&xdp->base);
xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
- stat_err_bits))) {
- dev_kfree_skb_any(skb);
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ libeth_xdp_return_buff_slow(xdp);
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
continue;
}
vlan_tci = ice_get_vlan_tci(rx_desc);
- /* pad the skb if needed, to make a valid ethernet frame */
- if (eth_skb_pad(skb))
- continue;
-
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1297,30 +1065,15 @@ construct_skb:
total_rx_pkts++;
}
- first = rx_ring->first_desc;
- while (cached_ntc != first) {
- struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
-
- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- xdp_xmit |= buf->act;
- } else if (buf->act & ICE_XDP_CONSUMED) {
- buf->pagecnt_bias++;
- } else if (buf->act == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
-
- ice_put_rx_buf(rx_ring, buf);
- if (++cached_ntc >= cnt)
- cached_ntc = 0;
- }
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
- failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
+ failure = ice_alloc_rx_bufs(rx_ring, ICE_DESC_UNUSED(rx_ring));
if (xdp_xmit)
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
+ libeth_xdp_save_buff(&rx_ring->xdp, xdp);
+
if (rx_ring->ring_stats)
ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
total_rx_bytes);
@@ -1392,14 +1145,14 @@ static void ice_net_dim(struct ice_q_vector *q_vector)
struct dim_sample dim_sample;
__ice_update_sample(q_vector, tx, &dim_sample, true);
- net_dim(&tx->dim, dim_sample);
+ net_dim(&tx->dim, &dim_sample);
}
if (ITR_IS_DYNAMIC(rx)) {
struct dim_sample dim_sample;
__ice_update_sample(q_vector, rx, &dim_sample, false);
- net_dim(&rx->dim, dim_sample);
+ net_dim(&rx->dim, &dim_sample);
}
}
@@ -1522,10 +1275,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
bool wd;
- if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring);
+ if (xsk_pool)
+ wd = ice_xmit_zc(tx_ring, xsk_pool);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1551,6 +1305,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
int cleaned;
/* A dedicated path for zero-copy allows making a single
@@ -1558,7 +1313,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
cleaned = rx_ring->xsk_pool ?
- ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
@@ -1760,10 +1515,46 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
/* notify HW of packet */
kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
netdev_xmit_more());
- if (kick)
- /* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!kick)
+ return;
+ if (ice_is_txtime_cfg(tx_ring)) {
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 tstamp_count = tstamp_ring->count;
+ u32 j = tstamp_ring->next_to_use;
+ struct ice_ts_desc *ts_desc;
+ struct timespec64 ts;
+ u32 tstamp;
+
+ ts = ktime_to_timespec64(first->skb->tstamp);
+ tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS;
+
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp);
+
+ j++;
+ if (j == tstamp_count) {
+ u32 fetch = tstamp_count - tx_ring->count;
+
+ j = 0;
+
+ /* To prevent an MDD, when wrapping the tstamp ring
+ * create additional TS descriptors equal to the number
+ * of the fetch TS descriptors value. HW will merge the
+ * TS descriptors with the same timestamp value into a
+ * single descriptor.
+ */
+ for (; j < fetch; j++) {
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp =
+ ice_build_tstamp_desc(i, tstamp);
+ }
+ }
+ tstamp_ring->next_to_use = j;
+ writel_relaxed(j, tstamp_ring->tail);
+ } else {
+ writel_relaxed(i, tx_ring->tail);
+ }
return;
dma_error:
@@ -1791,6 +1582,7 @@ dma_error:
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1940,6 +1732,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
+ ICE_TX_GCS_DESC_CSUM_PSH);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -2397,17 +2213,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
eth = (struct ethhdr *)skb_mac_header(skb);
- if (unlikely((skb->priority == TC_PRIO_CONTROL ||
- eth->h_proto == htons(ETH_P_LLDP)) &&
- vsi->type == ICE_VSI_PF &&
- vsi->port_info->qos_cfg.is_sw_lldp))
+
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
+ ice_eswitch_set_target_vsi(skb, &offload);
+ else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back))
- ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2421,7 +2240,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index af955b0e5dc5..e440c55d9e9f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -4,6 +4,8 @@
#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_
+#include <net/libeth/types.h>
+
#include "ice_type.h"
#define ICE_DFLT_IRQ_WORK 256
@@ -27,72 +29,6 @@
#define ICE_MAX_TXQ_PER_TXQG 128
-/* Attempt to maximize the headroom available for incoming frames. We use a 2K
- * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
- * This leaves us with 512 bytes of room. From that we need to deduct the
- * space needed for the shared info and the padding needed to IP align the
- * frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define ICE_2K_TOO_SMALL_WITH_PADDING \
- ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
- SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
-
-/**
- * ice_compute_pad - compute the padding
- * @rx_buf_len: buffer length
- *
- * Figure out the size of half page based on given buffer length and
- * then subtract the skb_shared_info followed by subtraction of the
- * actual buffer length; this in turn results in the actual space that
- * is left for padding usage
- */
-static inline int ice_compute_pad(int rx_buf_len)
-{
- int half_page_size;
-
- half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
-}
-
-/**
- * ice_skb_pad - determine the padding that we can supply
- *
- * Figure out the right Rx buffer size and based on that calculate the
- * padding
- */
-static inline int ice_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (ICE_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = ICE_RXBUF_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return ice_compute_pad(rx_buf_len);
-}
-
-#define ICE_SKB_PAD ice_skb_pad()
-#else
-#define ICE_2K_TOO_SMALL_WITH_PADDING false
-#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
-
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
@@ -112,10 +48,6 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define ICE_RX_DESC_UNUSED(R) \
- ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->first_desc - (R)->next_to_use - 1)
-
#define ICE_RING_QUARTER(R) ((R)->count >> 2)
#define ICE_TX_FLAGS_TSO BIT(0)
@@ -193,18 +125,10 @@ struct ice_tx_offload_params {
u32 td_l2tag1;
u32 cd_tunnel_params;
u16 cd_l2tag2;
+ u16 cd_gcs_params;
u8 header_len;
};
-struct ice_rx_buf {
- dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- unsigned int pgcnt;
- unsigned int act;
- unsigned int pagecnt_bias;
-};
-
struct ice_q_stats {
u64 pkts;
u64 bytes;
@@ -262,15 +186,6 @@ struct ice_pkt_ctx {
__be16 vlan_proto;
};
-struct ice_xdp_buff {
- struct xdp_buff xdp_buff;
- const union ice_32b_rx_flex_desc *eop_desc;
- const struct ice_pkt_ctx *pkt_ctx;
-};
-
-/* Required for compatibility with xdp_buffs from xsk_pool */
-static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
-
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
@@ -310,10 +225,20 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
+struct ice_tstamp_ring {
+ struct ice_tx_ring *tx_ring; /* Backreference to associated Tx ring */
+ dma_addr_t dma; /* physical address of ring */
+ struct rcu_head rcu; /* to avoid race on free */
+ u8 __iomem *tail;
+ void *desc;
+ u16 next_to_use;
+ u16 count;
+} ____cacheline_internodealigned_in_smp;
+
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
void *desc; /* Descriptor ring memory */
- struct device *dev; /* Used for DMA mapping */
+ struct page_pool *pp;
struct net_device *netdev; /* netdev ring maps to */
struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
@@ -325,14 +250,19 @@ struct ice_rx_ring {
u16 next_to_alloc;
union {
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_fqes;
struct xdp_buff **xdp_buf;
};
+
/* CL2 - 2nd cacheline starts here */
+ struct libeth_fqe *hdr_fqes;
+ struct page_pool *hdr_pp;
+
union {
- struct ice_xdp_buff xdp_ext;
- struct xdp_buff xdp;
+ struct libeth_xdp_buff_stash xdp;
+ struct libeth_xdp_buff *xsk;
};
+
/* CL3 - 3rd cacheline starts here */
union {
struct ice_pkt_ctx pkt_ctx;
@@ -342,12 +272,13 @@ struct ice_rx_ring {
};
};
struct bpf_prog *xdp_prog;
- u16 rx_offset;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u16 first_desc;
+
+ u32 hdr_truesize;
+ u32 truesize;
/* stats structs */
struct ice_ring_stats *ring_stats;
@@ -358,13 +289,14 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
- u32 nr_frags;
- dma_addr_t dma; /* physical address of ring */
+ u16 rx_hdr_len;
u16 rx_buf_len;
+ dma_addr_t dma; /* physical address of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
-#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
+#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
@@ -400,28 +332,16 @@ struct ice_tx_ring {
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */
+ struct ice_tstamp_ring *tstamp_ring;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
+#define ICE_TX_FLAGS_TXTIME BIT(3)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
+ u16 quanta_prof_id;
} ____cacheline_internodealigned_in_smp;
-static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
-{
- return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
-}
-
-static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
-{
- ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
-}
-
-static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
-{
- ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
-}
-
static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
{
return !!ring->ch;
@@ -476,17 +396,13 @@ struct ice_coalesce_stored {
static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
return 0;
}
-#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
-
union ice_32b_rx_flex_desc;
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs);
+void ice_rxq_pp_destroy(struct ice_rx_ring *rq);
bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
@@ -496,12 +412,15 @@ void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring);
void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring);
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring);
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index f8f1d2bdc1be..956da38d63b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/filter.h>
+#include <linux/net/intel/libie/rx.h>
+#include <net/libeth/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
@@ -39,30 +41,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
}
/**
- * ice_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
- * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
- * Rx desc.
- */
-static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
-{
- struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
- if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
- return PKT_HASH_TYPE_L2;
-
- return PKT_HASH_TYPE_NONE;
-}
-
-/**
* ice_get_rx_hash - get RX hash value from descriptor
* @rx_desc: specific descriptor
*
@@ -91,14 +69,33 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
const union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
- if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded))
return;
hash = ice_get_rx_hash(rx_desc);
if (likely(hash))
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
+}
+
+/**
+ * ice_rx_gcs - Set generic checksum in skb
+ * @skb: skb currently being received and modified
+ * @rx_desc: receive descriptor
+ */
+static void ice_rx_gcs(struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *desc;
+ u16 csum;
+
+ desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = (__force u16)desc->raw_csum;
+ skb->csum = csum_unfold((__force __sum16)swab16(csum));
}
/**
@@ -114,34 +111,35 @@ static void
ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
- struct ice_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u16 rx_status0, rx_status1;
bool ipv4, ipv6;
- rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
- rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
-
- decoded = ice_decode_rx_desc_ptype(ptype);
-
/* Start with CHECKSUM_NONE and by default csum_level = 0 */
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(ring->netdev, decoded))
return;
- /* check if HW has decoded the packet and checksum */
- if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+
+ if ((ring->flags & ICE_RX_FLAGS_RING_GCS) &&
+ rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
+ (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
+ ice_rx_gcs(skb, rx_desc);
return;
+ }
- if (!(decoded.known && decoded.outer_ip))
+ /* check if HW has decoded the packet and checksum */
+ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
- ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
ring->vsi->back->hw_rx_eipe_error++;
@@ -169,19 +167,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case ICE_RX_PTYPE_INNER_PROT_TCP:
- case ICE_RX_PTYPE_INNER_PROT_UDP:
- case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -236,7 +225,19 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) {
+ struct net_device *netdev = ice_eswitch_get_target(rx_ring,
+ rx_desc);
+
+ if (ice_is_port_repr_netdev(netdev))
+ ice_repr_inc_rx_stats(netdev, skb->len);
+
+ /* __skb_push() is needed because xdp_build_skb_from_buff()
+ * calls eth_type_trans()
+ */
+ __skb_push(skb, ETH_HLEN);
+ skb->protocol = eth_type_trans(skb, netdev);
+ }
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -273,19 +274,18 @@ static void
ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
struct xdp_frame_bulk *bq)
{
- dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
-
switch (tx_buf->type) {
case ICE_TX_BUF_XDP_TX:
- page_frag_free(tx_buf->raw_buf);
+ libeth_xdp_return_va(tx_buf->raw_buf, true);
break;
case ICE_TX_BUF_XDP_XMIT:
+ dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
xdp_return_frame_bulk(tx_buf->xdpf, bq);
break;
}
+ dma_unmap_len_set(tx_buf, len, 0);
tx_buf->type = ICE_TX_BUF_EMPTY;
}
@@ -380,9 +380,11 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
struct ice_tx_buf *tx_buf;
u32 cnt = xdp_ring->count;
void *data = xdp->data;
+ struct page *page;
u32 nr_frags = 0;
u32 free_space;
u32 frag = 0;
+ u32 offset;
free_space = ICE_DESC_UNUSED(xdp_ring);
if (free_space < ICE_RING_QUARTER(xdp_ring))
@@ -402,24 +404,28 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
tx_head = &xdp_ring->tx_buf[ntu];
tx_buf = tx_head;
+ page = virt_to_page(data);
+ offset = offset_in_page(xdp->data);
+
for (;;) {
dma_addr_t dma;
- dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_unmap;
-
- /* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, size);
- dma_unmap_addr_set(tx_buf, dma, dma);
-
if (frame) {
+ dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_unmap;
tx_buf->type = ICE_TX_BUF_FRAG;
} else {
+ dma = page_pool_get_dma_addr(page) + offset;
+ dma_sync_single_for_device(dev, dma, size, DMA_BIDIRECTIONAL);
tx_buf->type = ICE_TX_BUF_XDP_TX;
tx_buf->raw_buf = data;
}
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
@@ -433,6 +439,8 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
tx_buf = &xdp_ring->tx_buf[ntu];
+ page = skb_frag_page(&sinfo->frags[frag]);
+ offset = skb_frag_off(&sinfo->frags[frag]);
data = skb_frag_address(&sinfo->frags[frag]);
size = skb_frag_size(&sinfo->frags[frag]);
frag++;
@@ -517,52 +525,19 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
*/
static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
+ struct ice_rx_ring *rx_ring;
- *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
- xdp_ext->pkt_ctx);
+ rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq);
+
+ *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->desc,
+ &rx_ring->pkt_ctx);
if (!*ts_ns)
return -ENODATA;
return 0;
}
-/* Define a ptype index -> XDP hash type lookup table.
- * It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
- * avoiding possible copy-paste errors.
- */
-#undef ICE_PTT
-#undef ICE_PTT_UNUSED_ENTRY
-
-#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
-
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
-
-/* A few supplementary definitions for when XDP hash types do not coincide
- * with what can be generated from ptype definitions
- * by means of preprocessor concatenation.
- */
-#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE
-#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
-#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2
-#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE
-#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4
-
-static const enum xdp_rss_hash_type
-ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
- ICE_PTYPES
-};
-
-#undef XDP_RSS_L3_NONE
-#undef XDP_RSS_L4_NONE
-#undef XDP_RSS_TYPE_PAY2
-#undef XDP_RSS_TYPE_PAY3
-#undef XDP_RSS_TYPE_PAY4
-
-#undef ICE_PTT
-#undef ICE_PTT_UNUSED_ENTRY
-
/**
* ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
* @eop_desc: End of Packet descriptor
@@ -570,12 +545,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
static enum xdp_rss_hash_type
ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
{
- u16 ptype = ice_get_ptype(eop_desc);
-
- if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
- return 0;
-
- return ice_ptype_to_xdp_hash[ptype];
+ return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type;
}
/**
@@ -589,10 +559,10 @@ ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
enum xdp_rss_hash_type *rss_type)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
- *hash = ice_get_rx_hash(xdp_ext->eop_desc);
- *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
+ *hash = ice_get_rx_hash(xdp_ext->desc);
+ *rss_type = ice_xdp_rx_hash_type(xdp_ext->desc);
if (!likely(*hash))
return -ENODATA;
@@ -611,13 +581,16 @@ static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
u16 *vlan_tci)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
+ struct ice_rx_ring *rx_ring;
+
+ rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq);
- *vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
+ *vlan_proto = rx_ring->pkt_ctx.vlan_proto;
if (!*vlan_proto)
return -ENODATA;
- *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
+ *vlan_tci = ice_get_vlan_tci(xdp_ext->desc);
if (!*vlan_tci)
return -ENODATA;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index afcead4baef4..6a3f10f7a53f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,49 +6,6 @@
#include "ice.h"
/**
- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
- * @xdp: XDP buffer representing frame (linear and frags part)
- * @rx_ring: Rx ring struct
- * act: action to store onto Rx buffers related to XDP buffer parts
- *
- * Set action that should be taken before putting Rx buffer from first frag
- * to the last.
- */
-static inline void
-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
- const unsigned int act)
-{
- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- u32 nr_frags = rx_ring->nr_frags + 1;
- u32 idx = rx_ring->first_desc;
- u32 cnt = rx_ring->count;
- struct ice_rx_buf *buf;
-
- for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- buf->act = act;
-
- if (++idx == cnt)
- idx = 0;
- }
-
- /* adjust pagecnt_bias on frags freed by XDP prog */
- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
- u32 delta = rx_ring->nr_frags - sinfo_frags;
-
- while (delta) {
- if (idx == 0)
- idx = cnt - 1;
- else
- idx--;
- buf = &rx_ring->rx_buf[idx];
- buf->pagecnt_bias--;
- delta--;
- }
- }
-}
-
-/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
@@ -97,6 +54,20 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_build_tstamp_desc - build Tx time stamp descriptor
+ * @tx_desc: Tx LAN descriptor index
+ * @tstamp: time stamp
+ *
+ * Return: Tx time stamp descriptor
+ */
+static inline __le32
+ice_build_tstamp_desc(u16 tx_desc, u32 tstamp)
+{
+ return cpu_to_le32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M, tx_desc) |
+ FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp));
+}
+
+/**
* ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
@@ -154,7 +125,6 @@ static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
}
void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
bool frame);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
@@ -165,13 +135,4 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
-static inline void
-ice_xdp_meta_set_desc(struct xdp_buff *xdp,
- union ice_32b_rx_flex_desc *eop_desc)
-{
- struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
- xdp_buff);
-
- xdp_ext->eop_desc = eop_desc;
-}
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 9ff92dba5823..6a2ec8389a8f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,7 +17,9 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
-#include "ice_fwlog.h"
+#include <linux/net/intel/libie/fwlog.h>
+#include <linux/wait.h>
+#include <net/dscp.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -61,6 +63,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
ICE_DBG_AQ_DESC | \
ICE_DBG_AQ_DESC_BUF | \
ICE_DBG_AQ_CMD)
+#define ICE_DBG_PARSER BIT_ULL(28)
#define ICE_DBG_USER BIT_ULL(31)
@@ -71,6 +74,14 @@ enum ice_aq_res_ids {
ICE_GLOBAL_CFG_LOCK_RES_ID
};
+enum ice_fec_stats_types {
+ ICE_FEC_CORR_LOW,
+ ICE_FEC_CORR_HIGH,
+ ICE_FEC_UNCORR_LOW,
+ ICE_FEC_UNCORR_HIGH,
+ ICE_FEC_MAX
+};
+
/* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
@@ -150,7 +161,7 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
- ICE_VSI_SWITCHDEV_CTRL = 7,
+ ICE_VSI_SF = 9,
};
struct ice_link_status {
@@ -204,6 +215,7 @@ struct ice_phy_info {
enum ice_fltr_ptype {
/* NONE - used for undef/error */
ICE_FLTR_PTYPE_NONF_NONE = 0,
+ ICE_FLTR_PTYPE_NONF_ETH,
ICE_FLTR_PTYPE_NONF_IPV4_UDP,
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
@@ -281,8 +293,10 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
- u8 roce_lag;
- u8 sriov_lag;
+
+ bool roce_lag;
+ bool sriov_lag;
+ bool sriov_aa_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -296,6 +310,7 @@ struct ice_hw_common_caps {
bool pcie_reset_avoidance;
/* Post update reset restriction */
bool reset_restrict_support;
+ bool tx_sched_topo_comp_mode_en;
};
/* IEEE 1588 TIME_SYNC specific info */
@@ -313,20 +328,22 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
/* TIME_REF clock rate specification */
-enum ice_time_ref_freq {
- ICE_TIME_REF_FREQ_25_000 = 0,
- ICE_TIME_REF_FREQ_122_880 = 1,
- ICE_TIME_REF_FREQ_125_000 = 2,
- ICE_TIME_REF_FREQ_153_600 = 3,
- ICE_TIME_REF_FREQ_156_250 = 4,
- ICE_TIME_REF_FREQ_245_760 = 5,
+enum ice_tspll_freq {
+ ICE_TSPLL_FREQ_25_000 = 0,
+ ICE_TSPLL_FREQ_122_880 = 1,
+ ICE_TSPLL_FREQ_125_000 = 2,
+ ICE_TSPLL_FREQ_153_600 = 3,
+ ICE_TSPLL_FREQ_156_250 = 4,
+ ICE_TSPLL_FREQ_245_760 = 5,
- NUM_ICE_TIME_REF_FREQ
+ NUM_ICE_TSPLL_FREQ,
+
+ ICE_TSPLL_FREQ_INVALID = -1,
};
/* Clock source specification */
enum ice_clk_src {
- ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */
+ ICE_CLK_SRC_TCXO = 0, /* Temperature compensated oscillator */
ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */
NUM_ICE_CLK_SRC
@@ -334,7 +351,7 @@ enum ice_clk_src {
struct ice_ts_func_info {
/* Function specific info */
- enum ice_time_ref_freq time_ref;
+ enum ice_tspll_freq time_ref;
u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
@@ -355,6 +372,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
+#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30)
struct ice_ts_dev_info {
/* Device specific info */
@@ -369,6 +387,16 @@ struct ice_ts_dev_info {
u8 tmr1_ena;
u8 ts_ll_read;
u8 ts_ll_int_read;
+ u8 ll_phy_tmr_update;
+};
+
+#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
+#define ICE_NAC_TOPO_DUAL_M BIT(1)
+#define ICE_NAC_TOPO_ID_M GENMASK(0xF, 0)
+
+struct ice_nac_topology {
+ u32 mode;
+ u8 id;
};
/* Function specific capabilities */
@@ -392,6 +420,7 @@ struct ice_hw_dev_caps {
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
+ struct ice_nac_topology nac_topo;
/* bitmap of supported sensors
* bit 0 - internal temperature sensor
* bit 31:1 - Reserved
@@ -481,6 +510,8 @@ struct ice_bank_info {
u32 orom_size; /* Size of OROM bank */
u32 netlist_ptr; /* Pointer to 1st Netlist bank */
u32 netlist_size; /* Size of Netlist bank */
+ u32 active_css_hdr_len; /* Active CSS header length */
+ u32 inactive_css_hdr_len; /* Inactive CSS header length */
enum ice_flash_bank nvm_bank; /* Active NVM bank */
enum ice_flash_bank orom_bank; /* Active OROM bank */
enum ice_flash_bank netlist_bank; /* Active Netlist bank */
@@ -667,7 +698,6 @@ struct ice_dcb_app_priority_table {
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 64
-#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -690,9 +720,9 @@ struct ice_dcbx_cfg {
u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
/* when DSCP mapping defined by user set its bit to 1 */
- DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ DECLARE_BITMAP(dscp_mapped, DSCP_MAX);
/* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
- u8 dscp_map[ICE_DSCP_NUM_VAL];
+ u8 dscp_map[DSCP_MAX];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -715,6 +745,7 @@ struct ice_port_info {
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
+ u8 local_fwd_mode;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
@@ -738,6 +769,8 @@ struct ice_switch_info {
struct ice_sw_recipe *recp_list;
u16 prof_res_bm_init;
u16 max_used_prof_index;
+ u16 rule_cnt;
+ u8 recp_cnt;
DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
@@ -817,11 +850,41 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
-/* PHY model */
-enum ice_phy_model {
- ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
- ICE_PHY_E82X,
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+
+#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0)
+
+struct ice_e810_params {
+ /* The wait queue lock also protects the low latency interface */
+ wait_queue_head_t atqbal_wq;
+ unsigned int atqbal_flags;
+};
+
+struct ice_eth56g_params {
+ u8 num_phys;
+ bool onestep_ena;
+ bool sfd_ena;
+ u32 peer_delay;
+};
+
+union ice_phy_params {
+ struct ice_e810_params e810;
+ struct ice_eth56g_params eth56g;
+};
+
+/* Global Link Topology */
+enum ice_global_link_topo {
+ ICE_LINK_TOPO_UP_TO_2_LINKS,
+ ICE_LINK_TOPO_UP_TO_4_LINKS,
+ ICE_LINK_TOPO_UP_TO_8_LINKS,
+ ICE_LINK_TOPO_RESERVED,
+};
+
+struct ice_ptp_hw {
+ union ice_phy_params phy;
+ u8 num_lports;
+ u8 ports_per_phy;
};
/* Port hardware description */
@@ -845,10 +908,12 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
- enum ice_phy_model phy_model;
+ u8 logical_pf_id;
u16 max_burst_size; /* driver sets this value */
+ u8 recp_reuse:1; /* indicates whether FW supports recipe reuse */
+
/* Tx Scheduler values */
u8 num_tx_sched_layers;
u8 num_tx_sched_phys_layers;
@@ -883,9 +948,7 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fwlog_cfg fwlog_cfg;
- bool fwlog_supported; /* does hardware support FW logging? */
- struct ice_fwlog_ring fwlog_ring;
+ struct libie_fwlog fwlog;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
@@ -906,12 +969,8 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_MAX_QUAD 2
-#define ICE_QUADS_PER_PHY_E82X 2
-#define ICE_PORTS_PER_PHY_E82X 8
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PORTS_PER_PHY_E810 4
-#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
+ struct ice_ptp_hw ptp;
+ s8 lane_num;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
@@ -1004,6 +1063,7 @@ struct ice_hw_port_stats {
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
+ u64 rx_len_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
@@ -1084,17 +1144,13 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
/* CSS Header words */
+#define ICE_NVM_CSS_HDR_LEN_L 0x02
+#define ICE_NVM_CSS_HDR_LEN_H 0x03
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
-/* Length of CSS header section in words */
-#define ICE_CSS_HEADER_LENGTH 330
-
-/* Offset of Shadow RAM copy in the NVM bank area. */
-#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32)
-
-/* Size in bytes of Option ROM trailer */
-#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
+/* Length of Authentication header section in words */
+#define ICE_NVM_AUTH_HEADER_LEN 0x08
/* The Link Topology Netlist section is stored as a series of words. It is
* stored in the NVM as a TLV, with the first two words containing the type
@@ -1163,4 +1219,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* AQ API version for Health Status support */
+#define ICE_FW_API_HEALTH_REPORT_MAJ 1
+#define ICE_FW_API_HEALTH_REPORT_MIN 7
+#define ICE_FW_API_HEALTH_REPORT_PATCH 6
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 21d26e19338a..de9e81ccee66 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
/* Public functions which may be accessed by all driver files */
@@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
vsi->num_vlan = 0;
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
}
@@ -256,23 +257,21 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
*
* It brings the VSI down and then reconfigures it with the hardware.
*/
-int ice_vf_reconfig_vsi(struct ice_vf *vf)
+static int ice_vf_reconfig_vsi(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
int err;
if (WARN_ON(!vsi))
return -EINVAL;
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_NO_INIT;
+ vsi->flags = ICE_VSI_FLAG_NO_INIT;
ice_vsi_decfg(vsi);
ice_fltr_remove_all(vsi);
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_vsi_cfg(vsi);
if (err) {
dev_err(ice_pf_to_dev(pf),
"Failed to reconfigure the VF%u's VSI, error %d\n",
@@ -337,6 +336,13 @@ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
} else {
+ /* clear possible previous port vlan config */
+ err = ice_vsi_clear_port_vlan(vsi);
+ if (err) {
+ dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
err = ice_vsi_add_vlan_zero(vsi);
}
@@ -712,6 +718,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
}
/**
+ * ice_reset_vf_mbx_cnt - reset VF mailbox message count
+ * @vf: pointer to the VF structure
+ *
+ * This function clears the VF mailbox message count, and should be called on
+ * VF reset.
+ */
+static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
+ else
+ ice_mbx_clear_malvf(&vf->mbx_info);
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
*
@@ -737,7 +760,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
/* clear all malicious info if the VFs are getting reset */
ice_for_each_vf(pf, bkt, vf)
- ice_mbx_clear_malvf(&vf->mbx_info);
+ ice_reset_vf_mbx_cnt(vf);
/* If VFs have been disabled, there is no need to reset */
if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
@@ -768,7 +791,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -784,7 +807,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
- ice_eswitch_attach(pf, vf);
+ ice_eswitch_attach_vf(pf, vf);
mutex_unlock(&vf->cfg_lock);
}
@@ -836,16 +859,13 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
+ u8 act_prt;
bool rsd;
dev = ice_pf_to_dev(pf);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -856,22 +876,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
- lag = pf->lag;
- mutex_lock(&pf->lag_mutex);
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
-
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
lockdep_assert_held(&vf->cfg_lock);
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
+
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
@@ -950,20 +962,18 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
- ice_eswitch_update_repr(vf->repr_id, vsi);
+ ice_eswitch_update_repr(&vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
- ice_mbx_clear_malvf(&vf->mbx_info);
+ ice_reset_vf_mbx_cnt(vf);
out_unlock:
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
return err;
}
@@ -992,10 +1002,16 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
/* assign default capabilities */
vf->spoofchk = true;
- vf->num_vf_qs = vfs->num_qps_per;
ice_vc_set_default_allowlist(vf);
ice_virtchnl_set_dflt_ops(vf);
+ /* set default number of MSI-X */
+ vf->num_msix = vfs->num_msix_per;
+ vf->num_vf_qs = vfs->num_qps_per;
+
+ /* set default RSS hash configuration */
+ vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/
@@ -1003,11 +1019,22 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
ice_vf_fdir_init(vf);
/* Initialize mailbox info for this VF */
- ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
+ else
+ ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
mutex_init(&vf->cfg_lock);
}
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
+}
+
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
@@ -1240,7 +1267,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_vsi *vsi;
params.type = ICE_VSI_CTRL;
- params.pi = ice_vf_get_port_info(vf);
+ params.port_info = ice_vf_get_port_info(vf);
params.vf = vf;
params.flags = ICE_VSI_FLAG_INIT;
@@ -1365,3 +1392,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
rcu_read_unlock();
return ctrl_vsi;
}
+
+/**
+ * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses
+ * @vf: a VF to add the address to
+ * @vsi: the corresponding VSI
+ * @incr: is the rule added or removed
+ */
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr)
+{
+ bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags);
+ bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+ bool is_ena;
+
+ if (WARN_ON(!vsi)) {
+ vf->num_mac_lldp = 0;
+ return;
+ }
+
+ vf->num_mac_lldp += incr ? 1 : -1;
+ is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+
+ if (was_ena != is_ena)
+ ice_vsi_cfg_sw_lldp(vsi, false, is_ena);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index fec16919ec19..7a9c75d1d07c 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -12,7 +12,8 @@
#include <net/devlink.h>
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
-#include "ice_virtchnl_fdir.h"
+#include "ice_flow.h"
+#include "virt/fdir.h"
#include "ice_vsi_vlan_ops.h"
#define ICE_MAX_SRIOV_VFS 256
@@ -52,6 +53,65 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+enum ice_hash_ip_ctx_type {
+ ICE_HASH_IP_CTX_IP = 0,
+ ICE_HASH_IP_CTX_IP_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP,
+ ICE_HASH_IP_CTX_IP_AH,
+ ICE_HASH_IP_CTX_IP_PFCP,
+ ICE_HASH_IP_CTX_IP_UDP,
+ ICE_HASH_IP_CTX_IP_TCP,
+ ICE_HASH_IP_CTX_IP_SCTP,
+ ICE_HASH_IP_CTX_MAX,
+};
+
+struct ice_vf_hash_ip_ctx {
+ struct ice_rss_hash_cfg ctx[ICE_HASH_IP_CTX_MAX];
+};
+
+enum ice_hash_gtpu_ctx_type {
+ ICE_HASH_GTPU_CTX_EH_IP = 0,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ ICE_HASH_GTPU_CTX_MAX,
+};
+
+struct ice_vf_hash_gtpu_ctx {
+ struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX];
+};
+
+struct ice_vf_hash_ctx {
+ struct ice_vf_hash_ip_ctx v4;
+ struct ice_vf_hash_ip_ctx v6;
+ struct ice_vf_hash_gtpu_ctx ipv4;
+ struct ice_vf_hash_gtpu_ctx ipv6;
+};
+
+/* Structure to store fdir fv entry */
+struct ice_fdir_prof_info {
+ struct ice_parser_profile prof;
+ u64 fdir_active_cnt;
+};
+
+struct ice_vf_qs_bw {
+ u32 committed;
+ u32 peak;
+ u16 queue_id;
+ u8 tc;
+};
+
+/* Structure to store RSS field vector entry */
+struct ice_rss_prof_info {
+ struct ice_parser_profile prof;
+ bool symm;
+};
+
/* VF operations */
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
@@ -91,8 +151,10 @@ struct ice_vf {
u16 lan_vsi_idx; /* index into PF struct */
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
- /* first vector index of this VF in the PF space */
- int first_vector_idx;
+ struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
+ struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
+ struct ice_vf_hash_ctx hash_ctx;
+ u64 rss_hashcfg; /* RSS hash configuration */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
@@ -109,13 +171,22 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+ u8 lldp_tx_ena:1;
+
+ u16 num_msix; /* num of MSI-X configured on this VF */
+
+ u32 ptp_caps;
+
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
+ u16 num_mac_lldp;
u16 num_vf_qs; /* num of queue configured per VF */
u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
@@ -131,7 +202,10 @@ struct ice_vf {
/* devlink port data */
struct devlink_port devlink_port;
- u16 num_msix; /* num of MSI-X configured on this VF */
+ u16 lldp_recipe_id;
+ u16 lldp_rule_id;
+
+ struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
/* Flags for controlling behavior of ice_reset_vf */
@@ -161,6 +235,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
return vf->port_vlan_info.tpid;
}
+static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
+{
+ return vf->num_mac_lldp && vf->trusted;
+}
+
/* VF Hash Table access functions
*
* These functions provide abstraction for interacting with the VF hash table.
@@ -208,6 +287,18 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
#ifdef CONFIG_PCI_IOV
struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+
+static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
+ struct pci_dev *vf_dev)
+{
+ int vf_id = pci_iov_vf_id(vf_dev);
+
+ if (vf_id < 0)
+ return NULL;
+
+ return ice_get_vf_by_id(pf, pci_iov_vf_id(vf_dev));
+}
+
void ice_put_vf(struct ice_vf *vf);
bool ice_has_vfs(struct ice_pf *pf);
u16 ice_get_num_vfs(struct ice_pf *pf);
@@ -226,12 +317,20 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int ice_reset_vf(struct ice_vf *vf, u32 flags);
void ice_reset_all_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr);
#else /* CONFIG_PCI_IOV */
static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
{
return NULL;
}
+static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
+ struct pci_dev *vf_dev)
+{
+ return NULL;
+}
+
static inline void ice_put_vf(struct ice_vf *vf)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 91ba7fe0eaee..5392b0404986 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -23,8 +23,8 @@
#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
#endif
-int ice_vf_reconfig_vsi(struct ice_vf *vf);
void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index 40cb4ba0789c..7798a5d4bc9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -23,18 +23,18 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
struct ice_aqc_pf_vf_msg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
- cmd = &desc.params.virt;
+ cmd = libie_aq_raw(&desc);
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
@@ -211,6 +211,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
}
/**
+ * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter
+ * @hw: pointer to the HW struct
+ * @event: pointer to the control queue receive event
+ *
+ * This function triggers to decrement the counter
+ * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes
+ * the buffers at the PF mailbox queue.
+ */
+void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
+ const struct ice_rq_event_info *event)
+{
+ u16 vfid = le16_to_cpu(event->desc.retval);
+
+ wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1);
+}
+
+/**
+ * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count
+ * @hw: pointer to the HW struct
+ * @vf_id: VF ID in the PF space
+ *
+ * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should
+ * be called when a VF is created and on VF reset.
+ */
+void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id)
+{
+ u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id));
+
+ wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg);
+}
+
+/**
* ice_mbx_vf_state_handler - Handle states of the overflow algorithm
* @hw: pointer to the HW struct
* @mbx_data: pointer to structure containing mailbox data
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
index 44bc030d17e0..684de89e5c5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
@@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
+ const struct ice_rq_event_info *event);
+void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id);
int
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
struct ice_mbx_vf_info *vf_info, bool *report_malvf);
@@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw)
{
}
+static inline void
+ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
+ const struct ice_rq_event_info *event)
+{
+}
+
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VF_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
index 1279c1ffe31c..fb526cb84776 100644
--- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -63,7 +63,7 @@ static int
ice_aq_get_vlan_mode(struct ice_hw *hw,
struct ice_aqc_get_vlan_mode *get_params)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (!get_params)
return -EINVAL;
@@ -275,7 +275,7 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
struct ice_aqc_set_vlan_mode *set_params)
{
u8 rdma_packet, mng_vlan_prot_id;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (!set_params)
return -EINVAL;
@@ -295,7 +295,7 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_set_vlan_mode_parameters);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 2e9ad27cb9d1..ada78f83b3ac 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return -EINVAL;
err = ice_fltr_add_vlan(vsi, vlan);
- if (err && err != -EEXIST) {
+ if (!err)
+ vsi->num_vlan++;
+ else if (err == -EEXIST)
+ err = 0;
+ else
dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
vlan->vid, vsi->vsi_num, err);
- return err;
- }
- vsi->num_vlan++;
- return 0;
+ return err;
}
/**
@@ -112,7 +113,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -168,7 +169,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
- ena, err, ice_aq_str(hw->adminq.sq_last_status));
+ ena, err, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -257,7 +258,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -305,7 +306,7 @@ int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret)
dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return ret;
@@ -352,7 +353,7 @@ static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -496,7 +497,7 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -543,7 +544,7 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -603,7 +604,7 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -653,7 +654,7 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -719,7 +720,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan;
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -781,7 +782,64 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
+
+ kfree(ctxt);
+ return err;
+}
+
+int ice_vsi_clear_port_vlan(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ ctxt->info.port_based_outer_vlan = 0;
+ ctxt->info.port_based_inner_vlan = 0;
+
+ ctxt->info.inner_vlan_flags =
+ FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
+ if (ice_is_dvm_ena(hw)) {
+ ctxt->info.inner_vlan_flags |=
+ FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
+ ctxt->info.outer_vlan_flags =
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
+ ctxt->info.outer_vlan_flags |=
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
+ ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
+ ctxt->info.outer_vlan_flags |=
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
+ }
+
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing port based VLAN failed, err %d aq_err %s\n",
+ err, libie_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.port_based_outer_vlan =
+ ctxt->info.port_based_outer_vlan;
+ vsi->info.port_based_inner_vlan =
+ ctxt->info.port_based_inner_vlan;
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ }
kfree(ctxt);
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
index f0d84d11bd5b..12b227621a7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -36,5 +36,6 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi);
+int ice_vsi_clear_port_vlan(struct ice_vsi *vsi);
#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
index 4a6c850d83ac..8c7a9b41fb63 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -3,6 +3,7 @@
#include "ice_pf_vsi_vlan_ops.h"
#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sf_vsi_vlan_ops.h"
#include "ice_lib.h"
#include "ice.h"
@@ -72,12 +73,14 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
ice_pf_vsi_init_vlan_ops(vsi);
break;
case ICE_VSI_VF:
ice_vf_vsi_init_vlan_ops(vsi);
break;
+ case ICE_VSI_SF:
+ ice_sf_vsi_init_vlan_ops(vsi);
+ break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
ice_vsi_type_str(vsi->type));
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 1857220d27fe..989ff1fd9110 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
+#include <net/libeth/xdp.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
@@ -18,54 +20,12 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
}
/**
- * ice_qp_reset_stats - Resets all stats for rings of given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_vsi_stats *vsi_stat;
- struct ice_pf *pf;
-
- pf = vsi->back;
- if (!pf->vsi_stats)
- return;
-
- vsi_stat = pf->vsi_stats[vsi->idx];
- if (!vsi_stat)
- return;
-
- memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
- sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
- memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
- sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
- if (ice_is_xdp_ena_vsi(vsi))
- memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
- sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
-}
-
-/**
- * ice_qp_clean_rings - Cleans all the rings of a given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
-{
- ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi)) {
- synchronize_rcu();
- ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
- }
- ice_clean_rx_ring(vsi->rx_rings[q_idx]);
-}
-
-/**
* ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
* @vsi: VSI that has netdev
* @q_vector: q_vector that has NAPI context
* @enable: true for enable, false for disable
*/
-static void
+void
ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
bool enable)
{
@@ -84,7 +44,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @rx_ring: Rx ring that will have its IRQ disabled
* @q_vector: queue vector
*/
-static void
+void
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
@@ -112,25 +72,29 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
* ice_qvec_cfg_msix - Enable IRQ for given queue vector
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
+ * @qid: queue index
*/
-static void
-ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
+ int q, _qid = qid;
ice_cfg_itr(hw, q_vector);
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
- q_vector->tx.itr_idx);
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx);
+ _qid++;
+ }
+
+ _qid = qid;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
- q_vector->rx.itr_idx);
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx);
+ _qid++;
+ }
ice_flush(hw);
}
@@ -140,7 +104,7 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
*/
-static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -151,111 +115,6 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
}
/**
- * ice_qp_dis - Disables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_txq_meta txq_meta = { };
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- int timeout = 50;
- int err;
-
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
- timeout--;
- if (!timeout)
- return -EBUSY;
- usleep_range(1000, 2000);
- }
-
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
- ice_qvec_toggle_napi(vsi, q_vector, false);
-
- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
- ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
- if (err)
- return err;
- if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- memset(&txq_meta, 0, sizeof(txq_meta));
- ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
- &txq_meta);
- if (err)
- return err;
- }
- err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
- if (err)
- return err;
-
- ice_qp_clean_rings(vsi, q_idx);
- ice_qp_reset_stats(vsi, q_idx);
-
- return 0;
-}
-
-/**
- * ice_qp_ena - Enables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_q_vector *q_vector;
- int err;
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
- if (err)
- return err;
-
- if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
- if (err)
- return err;
- ice_set_ring_xdp(xdp_ring);
- ice_tx_xsk_pool(vsi, q_idx);
- }
-
- err = ice_vsi_cfg_single_rxq(vsi, q_idx);
- if (err)
- return err;
-
- q_vector = vsi->rx_rings[q_idx]->q_vector;
- ice_qvec_cfg_msix(vsi, q_vector);
-
- err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
- if (err)
- return err;
-
- ice_qvec_toggle_napi(vsi, q_vector, true);
- ice_qvec_ena_irq(vsi, q_vector);
-
- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- clear_bit(ICE_CFG_BUSY, vsi->state);
-
- return 0;
-}
-
-/**
* ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
@@ -269,7 +128,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
- clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -288,7 +146,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
- if (vsi->type != ICE_VSI_PF)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF)
return -EINVAL;
if (qid >= vsi->netdev->real_num_rx_queues ||
@@ -300,8 +158,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
- set_bit(qid, vsi->af_xdp_zc_qps);
-
return 0;
}
@@ -314,48 +170,18 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
* If allocation was successful, substitute buffer with allocated one.
* Returns 0 on success, negative on failure
*/
-static int
+int
ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
{
- size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
- sizeof(*rx_ring->rx_buf);
- void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
-
- if (!sw_ring)
- return -ENOMEM;
-
if (pool_present) {
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
- rx_ring->xdp_buf = sw_ring;
+ rx_ring->xdp_buf = kcalloc(rx_ring->count,
+ sizeof(*rx_ring->xdp_buf),
+ GFP_KERNEL);
+ if (!rx_ring->xdp_buf)
+ return -ENOMEM;
} else {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
- rx_ring->rx_buf = sw_ring;
- }
-
- return 0;
-}
-
-/**
- * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
- * @vsi: Current VSI
- * @zc: is zero copy set
- *
- * Reallocate buffer for rx_rings that might be used by XSK.
- * XDP requires more memory, than rx_buf provides.
- * Returns 0 on success, negative on failure
- */
-int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
-{
- struct ice_rx_ring *rx_ring;
- unsigned long q;
-
- for_each_set_bit(q, vsi->af_xdp_zc_qps,
- max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
- rx_ring = vsi->rx_rings[q];
- if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
- return -ENOMEM;
}
return 0;
@@ -371,6 +197,7 @@ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
*/
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
@@ -380,11 +207,10 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
goto failure;
}
- if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
+ if_running = !test_bit(ICE_VSI_DOWN, vsi->state) &&
+ ice_is_xdp_ena_vsi(vsi);
if (if_running) {
- struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
-
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
@@ -445,11 +271,6 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
- /* Put private info that changes on a per-packet basis
- * into xdp_buff_xsk->cb.
- */
- ice_xdp_meta_set_desc(*xdp, rx_desc);
-
rx_desc++;
xdp++;
}
@@ -460,6 +281,7 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
/**
* __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
* @count: The number of buffers to allocate
*
* Place the @count of descriptors onto Rx ring. Handle the ring wrap
@@ -468,7 +290,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
*
* Returns true if all allocations were successful, false if any fail.
*/
-static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
{
u32 nb_buffs_extra = 0, nb_buffs = 0;
union ice_32b_rx_flex_desc *rx_desc;
@@ -480,8 +303,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
xdp = ice_xdp_buf(rx_ring, ntu);
if (ntu + count >= rx_ring->count) {
- nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
- rx_desc,
+ nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc,
rx_ring->count - ntu);
if (nb_buffs_extra != rx_ring->count - ntu) {
ntu += nb_buffs_extra;
@@ -494,7 +316,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
ice_release_rx_desc(rx_ring, 0);
}
- nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+ nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
ntu += nb_buffs;
if (ntu == rx_ring->count)
@@ -510,6 +332,7 @@ exit:
/**
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
* @count: The number of buffers to allocate
*
* Wrapper for internal allocation routine; figure out how many tail
@@ -517,7 +340,8 @@ exit:
*
* Returns true if all calls to internal alloc routine succeeded
*/
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
u16 leftover, i, tail_bumps;
@@ -526,80 +350,18 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
- if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+ if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh))
return false;
- return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
-}
-
-/**
- * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
- * @rx_ring: Rx ring
- * @xdp: Pointer to XDP buffer
- *
- * This function allocates a new skb from a zero-copy Rx buffer.
- *
- * Returns the skb on success, NULL on failure.
- */
-static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
- unsigned int totalsize = xdp->data_end - xdp->data_meta;
- unsigned int metasize = xdp->data - xdp->data_meta;
- struct skb_shared_info *sinfo = NULL;
- struct sk_buff *skb;
- u32 nr_frags = 0;
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
- net_prefetch(xdp->data_meta);
-
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- return NULL;
-
- memcpy(__skb_put(skb, totalsize), xdp->data_meta,
- ALIGN(totalsize, sizeof(long)));
-
- if (metasize) {
- skb_metadata_set(skb, metasize);
- __skb_pull(skb, metasize);
- }
-
- if (likely(!xdp_buff_has_frags(xdp)))
- goto out;
-
- for (int i = 0; i < nr_frags; i++) {
- struct skb_shared_info *skinfo = skb_shinfo(skb);
- skb_frag_t *frag = &sinfo->frags[i];
- struct page *page;
- void *addr;
-
- page = dev_alloc_page();
- if (!page) {
- dev_kfree_skb(skb);
- return NULL;
- }
- addr = page_to_virt(page);
-
- memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
-
- __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
- addr, 0, skb_frag_size(frag));
- }
-
-out:
- xsk_buff_free(xdp);
- return skb;
+ return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
}
/**
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
*/
-static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
@@ -650,7 +412,7 @@ skip:
if (xdp_ring->next_to_clean >= cnt)
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ xsk_tx_completed(xsk_pool, xsk_frames);
return completed_frames;
}
@@ -659,6 +421,7 @@ skip:
* ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
* @xdp: XDP buffer to xmit
* @xdp_ring: XDP ring to produce descriptor onto
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* note that this function works directly on xdp_buff, no need to convert
* it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
@@ -668,7 +431,8 @@ skip:
* was not enough space on XDP ring
*/
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
- struct ice_tx_ring *xdp_ring)
+ struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
@@ -682,7 +446,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
free_space = ICE_DESC_UNUSED(xdp_ring);
if (free_space < ICE_RING_QUARTER(xdp_ring))
- free_space += ice_clean_xdp_irq_zc(xdp_ring);
+ free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
if (unlikely(!free_space))
goto busy;
@@ -702,7 +466,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
dma_addr_t dma;
dma = xsk_buff_xdp_get_dma(xdp);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size);
tx_buf->xdp = xdp;
tx_buf->type = ICE_TX_BUF_XSK_TX;
@@ -744,12 +508,14 @@ busy:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
int err, result = ICE_XDP_PASS;
u32 act;
@@ -760,7 +526,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (!err)
return ICE_XDP_REDIR;
- if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
result = ICE_XDP_EXIT;
else
result = ICE_XDP_CONSUMED;
@@ -771,7 +537,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
break;
case XDP_TX:
- result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
+ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
@@ -791,49 +557,22 @@ out_failure:
return result;
}
-static int
-ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
* @budget: NAPI budget
*
* Returns number of processed packets on success, remaining budget on failure.
*/
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool,
+ int budget)
{
+ struct xdp_buff *first = (struct xdp_buff *)rx_ring->xsk;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
u32 ntc = rx_ring->next_to_clean;
u32 ntu = rx_ring->next_to_use;
- struct xdp_buff *first = NULL;
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
@@ -847,9 +586,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
xdp_ring = rx_ring->xdp_ring;
- if (ntc != rx_ring->first_desc)
- first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
-
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
@@ -879,27 +615,30 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
xsk_buff_set_size(xdp, size);
- xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp);
if (!first) {
first = xdp;
- } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
- break;
+ } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
+ xsk_buff_free(first);
+ first = NULL;
}
if (++ntc == cnt)
ntc = 0;
- if (ice_is_non_eop(rx_ring, rx_desc))
+ if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!first))
continue;
- xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
+ ((struct libeth_xdp_buff *)first)->desc = rx_desc;
+
+ xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
+ xsk_pool);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
xdp_xmit |= xdp_res;
} else if (xdp_res == ICE_XDP_EXIT) {
failure = true;
first = NULL;
- rx_ring->first_desc = ntc;
break;
} else if (xdp_res == ICE_XDP_CONSUMED) {
xsk_buff_free(first);
@@ -911,24 +650,20 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
total_rx_packets++;
first = NULL;
- rx_ring->first_desc = ntc;
continue;
construct_skb:
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, first);
+ skb = xdp_build_skb_from_zc(first);
if (!skb) {
+ xsk_buff_free(first);
+ first = NULL;
+
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
- break;
+ continue;
}
first = NULL;
- rx_ring->first_desc = ntc;
-
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -940,9 +675,12 @@ construct_skb:
}
rx_ring->next_to_clean = ntc;
- entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
+ rx_ring->xsk = (struct libeth_xdp_buff *)first;
+
+ entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
- failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -965,17 +703,19 @@ construct_skb:
/**
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @desc: AF_XDP descriptor to pull the DMA address and length from
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
unsigned int *total_bytes)
{
struct ice_tx_desc *tx_desc;
dma_addr_t dma;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
+ dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len);
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buf_addr = cpu_to_le64(dma);
@@ -988,21 +728,25 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
/**
* ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool,
+ struct xdp_desc *descs,
unsigned int *total_bytes)
{
u16 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
tx_desc->buf_addr = cpu_to_le64(dma);
@@ -1018,60 +762,69 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
/**
* ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
* @nb_pkts: count of packets to be send
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
- u32 nb_pkts, unsigned int *total_bytes)
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool,
+ struct xdp_desc *descs, u32 nb_pkts,
+ unsigned int *total_bytes)
{
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
leftover = nb_pkts & (PKTS_PER_BATCH - 1);
for (i = 0; i < batched; i += PKTS_PER_BATCH)
- ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes);
for (; i < batched + leftover; i++)
- ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+ ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
}
/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{
- struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
int budget;
- ice_clean_xdp_irq_zc(xdp_ring);
+ ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
+
+ if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
+ !netif_running(xdp_ring->vsi->netdev))
+ return true;
budget = ICE_DESC_UNUSED(xdp_ring);
budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
if (!nb_pkts)
return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
- ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed,
+ &total_bytes);
xdp_ring->next_to_use = 0;
}
- ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
- &total_bytes);
+ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed],
+ nb_pkts - nb_processed, &total_bytes);
ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
- if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
return nb_pkts < budget;
}
@@ -1093,7 +846,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *ring;
- if (test_bit(ICE_VSI_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
@@ -1104,7 +857,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
ring = vsi->rx_rings[queue_id]->xdp_ring;
- if (!ring->xsk_pool)
+ if (!READ_ONCE(ring->xsk_pool))
return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 6fa181f080ef..5275fcedc9e1 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -7,29 +7,32 @@
#define PKTS_PER_BATCH 8
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 8") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool,
+ int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
-int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
+int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present);
+void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid);
+void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable);
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector);
+void ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector);
#else
-static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool)
{
return false;
}
@@ -44,6 +47,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool,
int __always_unused budget)
{
return 0;
@@ -51,6 +55,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool,
u16 __always_unused count)
{
return false;
@@ -72,10 +77,25 @@ static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
static inline int
-ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
- bool __always_unused zc)
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring,
+ bool __always_unused pool_present)
{
return 0;
}
+
+static inline void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid) { }
+
+static inline void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable) { }
+
+static inline void
+ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { }
+
+static inline void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/virt/allowlist.c
index d796dbd2a440..a07efec19c45 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
/* Purpose of this file is to share functionality to allowlist or denylist
* opcodes used in PF <-> VF communication. Group of opcodes:
@@ -65,7 +65,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, VIRTCHNL_OP_SET_RSS_HASHCFG,
VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
@@ -84,6 +84,17 @@ static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
+/* VIRTCHNL_VF_CAP_PTP */
+static const u32 ptp_allowlist_opcodes[] = {
+ VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ VIRTCHNL_OP_1588_PTP_GET_TIME,
+};
+
+static const u32 tc_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ VIRTCHNL_OP_CONFIG_QUANTA,
+};
+
struct allowlist_opcode_info {
const u32 *opcodes;
size_t size;
@@ -104,6 +115,8 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h b/drivers/net/ethernet/intel/ice/virt/allowlist.h
index d3ae86ded219..d3ae86ded219 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.h
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/virt/fdir.c
index 8e4ff3af86c6..ae83c3914e29 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.c
@@ -26,6 +26,15 @@ enum ice_fdir_tunnel_type {
ICE_FDIR_TUNNEL_TYPE_NONE = 0,
ICE_FDIR_TUNNEL_TYPE_GTPU,
ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
+ ICE_FDIR_TUNNEL_TYPE_ECPRI,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GRE,
+ ICE_FDIR_TUNNEL_TYPE_GTPOGRE,
+ ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GRE_INNER,
+ ICE_FDIR_TUNNEL_TYPE_L2TPV2,
+ ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER,
};
struct virtchnl_fdir_fltr_conf {
@@ -33,6 +42,11 @@ struct virtchnl_fdir_fltr_conf {
enum ice_fdir_tunnel_type ttype;
u64 inset_flag;
u32 flow_id;
+
+ struct ice_parser_profile *prof;
+ bool parser_ena;
+ u8 *pkt_buf;
+ u8 pkt_len;
};
struct virtchnl_fdir_inset_map {
@@ -536,6 +550,8 @@ static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
fdir->fdir_fltr_cnt[flow][0] = 0;
fdir->fdir_fltr_cnt[flow][1] = 0;
}
+
+ fdir->fdir_fltr_cnt_total = 0;
}
/**
@@ -785,6 +801,113 @@ err_exit:
}
/**
+ * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary)
+ * @proto: virtchnl protocol headers
+ *
+ * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note
+ * that common FDIR rule must have non-zero proto->count. Thus, we choose the
+ * tunnel_level and count of proto as the indicators. If both tunnel_level and
+ * count of proto are zero, this FDIR rule will be regarded as raw flow.
+ *
+ * Returns: true if headers describe raw flow, false otherwise.
+ */
+static bool
+ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto)
+{
+ return (proto->tunnel_level == 0 && proto->count == 0);
+}
+
+/**
+ * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ * @conf: FDIR configuration for each filter
+ *
+ * Parse the virtual channel filter's raw flow and store it in @conf
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_fdir_parse_raw(struct ice_vf *vf,
+ struct virtchnl_proto_hdrs *proto,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
+ struct ice_parser_result rslt;
+ struct ice_pf *pf = vf->pf;
+ u16 pkt_len, udp_port = 0;
+ struct ice_parser *psr;
+ int status = -ENOMEM;
+ struct ice_hw *hw;
+
+ pkt_len = proto->raw.pkt_len;
+
+ if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ return -EINVAL;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+
+ if (!pkt_buf || !msk_buf)
+ goto err_mem_alloc;
+
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
+
+ hw = &pf->hw;
+
+ /* Get raw profile info via Parser Lib */
+ psr = ice_parser_create(hw);
+ if (IS_ERR(psr)) {
+ status = PTR_ERR(psr);
+ goto err_mem_alloc;
+ }
+
+ ice_parser_dvm_set(psr, ice_is_dvm_ena(hw));
+
+ if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
+ ice_parser_vxlan_tunnel_set(psr, udp_port, true);
+
+ status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
+ if (status)
+ goto err_parser_destroy;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_parser_result_dump(hw, &rslt);
+
+ conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL);
+ if (!conf->prof) {
+ status = -ENOMEM;
+ goto err_parser_destroy;
+ }
+
+ status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
+ pkt_len, ICE_BLK_FD,
+ conf->prof);
+ if (status)
+ goto err_parser_profile_init;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_parser_profile_dump(hw, conf->prof);
+
+ /* Store raw flow info into @conf */
+ conf->pkt_len = pkt_len;
+ conf->pkt_buf = pkt_buf;
+ conf->parser_ena = true;
+
+ ice_parser_destroy(psr);
+ return 0;
+
+err_parser_profile_init:
+ kfree(conf->prof);
+err_parser_destroy:
+ ice_parser_destroy(psr);
+err_mem_alloc:
+ kfree(pkt_buf);
+ return status;
+}
+
+/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
@@ -811,6 +934,10 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
return -EINVAL;
}
+ /* For raw FDIR filters created by the parser */
+ if (ice_vc_fdir_is_raw_flow(proto))
+ return ice_vc_fdir_parse_raw(vf, proto, conf);
+
for (i = 0; i < proto->count; i++) {
struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
struct ip_esp_hdr *esph;
@@ -1099,8 +1226,10 @@ ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
- if (!ice_vc_validate_pattern(vf, proto))
- return -EINVAL;
+ /* For raw FDIR filters created by the parser */
+ if (!ice_vc_fdir_is_raw_flow(proto))
+ if (!ice_vc_validate_pattern(vf, proto))
+ return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@@ -1293,11 +1422,15 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- if (ret) {
- dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
- vf->vf_id, input->flow_type);
- goto err_free_pkt;
+ if (conf->parser_ena) {
+ memcpy(pkt, conf->pkt_buf, conf->pkt_len);
+ } else {
+ ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (ret) {
+ dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
+ vf->vf_id, input->flow_type);
+ goto err_free_pkt;
+ }
}
ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
@@ -1317,7 +1450,8 @@ err_free_pkt:
*/
static void ice_vf_fdir_timer(struct timer_list *t)
{
- struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
+ struct ice_vf_fdir_ctx *ctx_irq = timer_container_of(ctx_irq, t,
+ rx_tmr);
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir *fdir;
unsigned long flags;
@@ -1388,7 +1522,7 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
spin_unlock_irqrestore(&fdir->ctx_lock, flags);
- ret = del_timer(&ctx_irq->rx_tmr);
+ ret = timer_delete(&ctx_irq->rx_tmr);
if (!ret)
dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
@@ -1519,6 +1653,16 @@ err_exit:
return ret;
}
+static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype)
+{
+ return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER);
+}
+
/**
* ice_vc_add_fdir_fltr_post
* @vf: pointer to the VF structure
@@ -1560,6 +1704,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
resp->status = status;
resp->flow_id = conf->flow_id;
vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
+ vf->fdir.fdir_fltr_cnt_total++;
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
@@ -1624,6 +1769,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
resp->status = status;
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
+ vf->fdir.fdir_fltr_cnt_total--;
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
@@ -1771,13 +1917,165 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
unsigned long flags;
- del_timer(&ctx->rx_tmr);
+ timer_delete(&ctx->rx_tmr);
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
}
/**
+ * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context
+ * @fv_a: struct of parsed FDIR profile field vector
+ * @fv_b: struct of parsed FDIR profile field vector
+ *
+ * Check if the two parsed FDIR profile field vector context are different,
+ * including proto_id, offset and mask.
+ *
+ * Return: true on different, false on otherwise.
+ */
+static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a,
+ struct ice_parser_fv *fv_b)
+{
+ return (fv_a->proto_id != fv_b->proto_id ||
+ fv_a->offset != fv_b->offset ||
+ fv_a->msk != fv_b->msk);
+}
+
+/**
+ * ice_vc_parser_fv_save - save parsed FDIR profile fv context
+ * @fv: struct of parsed FDIR profile field vector
+ * @fv_src: parsed FDIR profile field vector context to save
+ *
+ * Save the parsed FDIR profile field vector context, including proto_id,
+ * offset and mask.
+ *
+ * Return: Void.
+ */
+static void ice_vc_parser_fv_save(struct ice_parser_fv *fv,
+ struct ice_parser_fv *fv_src)
+{
+ fv->proto_id = fv_src->proto_id;
+ fv->offset = fv_src->offset;
+ fv->msk = fv_src->msk;
+ fv->spec = 0;
+}
+
+/**
+ * ice_vc_add_fdir_raw - add a raw FDIR filter for VF
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @v_ret: the final VIRTCHNL code
+ * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER
+ * @len: length of the stat
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_add_fdir_raw(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_status_code *v_ret,
+ struct virtchnl_fdir_add *stat, int len)
+{
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ struct ice_fdir_prof_info *pi;
+ struct ice_pf *pf = vf->pf;
+ int ret, ptg, id, i;
+ struct device *dev;
+ struct ice_hw *hw;
+ bool fv_found;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+
+ id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id);
+ return -ENODEV;
+ }
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n",
+ vf->vf_id);
+ return -ENODEV;
+ }
+
+ fv_found = false;
+
+ /* Check if profile info already exists, then update the counter */
+ pi = &vf->fdir_prof_info[ptg];
+ if (pi->fdir_active_cnt != 0) {
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++)
+ if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i],
+ &conf->prof->fv[i]))
+ break;
+ if (i == ICE_MAX_FV_WORDS) {
+ fv_found = true;
+ pi->fdir_active_cnt++;
+ }
+ }
+
+ /* HW profile setting is only required for the first time */
+ if (!fv_found) {
+ ret = ice_flow_set_parser_prof(hw, vf_vsi->idx,
+ ctrl_vsi->idx, conf->prof,
+ ICE_BLK_FD);
+
+ if (ret) {
+ *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: insert hw prof failed\n",
+ vf->vf_id);
+ return ret;
+ }
+ }
+
+ ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
+ if (ret) {
+ *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: insert FDIR list failed\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ ret = ice_vc_fdir_set_irq_ctx(vf, conf,
+ VIRTCHNL_OP_ADD_FDIR_FILTER);
+ if (ret) {
+ dev_dbg(dev, "VF %d: set FDIR context failed\n",
+ vf->vf_id);
+ goto err_rem_entry;
+ }
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, true, false);
+ if (ret) {
+ dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_clr_irq;
+ }
+
+ /* Save parsed profile fv info of the FDIR rule for the first time */
+ if (!fv_found) {
+ for (i = 0; i < conf->prof->fv_num; i++)
+ ice_vc_parser_fv_save(&pi->prof.fv[i],
+ &conf->prof->fv[i]);
+ pi->prof.fv_num = conf->prof->fv_num;
+ pi->fdir_active_cnt = 1;
+ }
+
+ return 0;
+
+err_clr_irq:
+ ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ return ret;
+}
+
+/**
* ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1790,6 +2088,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
struct virtchnl_fdir_add *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
enum virtchnl_status_code v_ret;
+ struct ice_vsi *vf_vsi;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
@@ -1798,6 +2097,22 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err_exit;
+ }
+
+#define ICE_VF_MAX_FDIR_FILTERS 128
+ if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
+ vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
if (ret) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1830,7 +2145,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
len = sizeof(*stat);
ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
if (ret) {
- v_ret = VIRTCHNL_STATUS_SUCCESS;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
goto err_free_conf;
@@ -1845,6 +2160,15 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto exit;
}
+ /* For raw FDIR filters created by the parser */
+ if (conf->parser_ena) {
+ ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len);
+ if (ret)
+ goto err_free_conf;
+ goto exit;
+ }
+
+ is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@@ -1906,6 +2230,78 @@ err_exit:
}
/**
+ * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @v_ret: the final VIRTCHNL code
+ * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER
+ * @len: length of the stat
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_del_fdir_raw(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_status_code *v_ret,
+ struct virtchnl_fdir_del *stat, int len)
+{
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ enum ice_block blk = ICE_BLK_FD;
+ struct ice_fdir_prof_info *pi;
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+ struct ice_hw *hw;
+ unsigned long id;
+ u16 vsi_num;
+ int ptg;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+
+ id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, false, false);
+ if (ret) {
+ dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n",
+ vf->vf_id, ret);
+ return ret;
+ }
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ return -ENODEV;
+ }
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n",
+ vf->vf_id);
+ return -ENODEV;
+ }
+
+ pi = &vf->fdir_prof_info[ptg];
+ if (pi->fdir_active_cnt != 0) {
+ pi->fdir_active_cnt--;
+ /* Remove the profile id flow if no active FDIR rule left */
+ if (!pi->fdir_active_cnt) {
+ vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+
+ vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+ }
+ }
+
+ conf->parser_ena = false;
+ return 0;
+}
+
+/**
* ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1917,7 +2313,10 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
struct virtchnl_fdir_del *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
+ struct ice_vf_fdir *fdir = &vf->fdir;
enum virtchnl_status_code v_ret;
+ struct ice_fdir_fltr *input;
+ enum ice_fltr_ptype flow;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
@@ -1967,6 +2366,15 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_exit;
}
+ /* For raw FDIR filters created by the parser */
+ if (conf->parser_ena) {
+ ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len);
+ if (ret)
+ goto err_del_tmr;
+ goto exit;
+ }
+
+ is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@@ -1976,6 +2384,13 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_del_tmr;
}
+ /* Remove unused profiles to avoid unexpected behaviors */
+ input = &conf->input;
+ flow = input->flow_type;
+ if (fdir->fdir_fltr_cnt[flow][is_tun] == 1)
+ ice_vc_fdir_rem_prof(vf, flow, is_tun);
+
+exit:
kfree(stat);
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/virt/fdir.h
index c5bcc8d7481c..ac6dcab454b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.h
@@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx {
struct ice_vf_fdir {
u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ u16 fdir_fltr_cnt_total;
struct ice_fd_hw_prof **fdir_prof;
struct idr fdir_rule_idr;
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c
new file mode 100644
index 000000000000..f73d5a3e83d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "virtchnl.h"
+#include "queues.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vsi: VSI to check queue ID against
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
+{
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return qid < vsi->alloc_txq;
+}
+
+/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC_E810 &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
+ * ice_vf_cfg_qs_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @num_queues: number of queues to be configured
+ *
+ * Configure per queue bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ int ret;
+ u16 i;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ for (i = 0; i < num_queues; i++) {
+ u32 p_rate, min_rate;
+ u8 tc;
+
+ p_rate = vf->qs_bw[i].peak;
+ min_rate = vf->qs_bw[i].committed;
+ tc = vf->qs_bw[i].tc;
+ if (p_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW, p_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW);
+ if (ret)
+ return ret;
+
+ if (min_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW, min_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_cfg_q_quanta_profile - Configure quanta profile
+ * @vf: pointer to the VF info
+ * @quanta_prof_idx: pointer to the quanta profile index
+ * @quanta_size: quanta size to be set
+ *
+ * This function chooses available quanta profile and configures the register.
+ * The quanta profile is evenly divided by the number of device ports, and then
+ * available to the specific PF and VFs. The first profile for each PF is a
+ * reserved default profile. Only quanta size of the rest unused profile can be
+ * modified.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
+ u16 *quanta_prof_idx)
+{
+ const u16 n_desc = calc_quanta_desc(quanta_size);
+ struct ice_hw *hw = &vf->pf->hw;
+ const u16 n_cmd = 2 * n_desc;
+ struct ice_pf *pf = vf->pf;
+ u16 per_pf, begin_id;
+ u8 n_used;
+ u32 reg;
+
+ begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
+ hw->logical_pf_id;
+
+ if (quanta_size == ICE_DFLT_QUANTA) {
+ *quanta_prof_idx = begin_id;
+ } else {
+ per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
+ hw->dev_caps.num_funcs;
+ n_used = pf->num_quanta_prof_used;
+ if (n_used < per_pf) {
+ *quanta_prof_idx = begin_id + 1 + n_used;
+ pf->num_quanta_prof_used++;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
+ wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->rxq_ena);
+ }
+
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->txq_ena);
+ }
+
+ /* Set flag to indicate that queues are enabled */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific queue(s)
+ */
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
+
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ }
+ }
+
+ /* Clear enabled queues flag */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static enum virtchnl_status_code
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
+ struct virtchnl_irq_map_info *irqmap_info;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi;
+ int i;
+
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ vf->num_msix < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
+
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < vf->num_msix) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
+ if (v_ret)
+ goto error_param;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queues_bw_cfg *qbw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+ struct ice_vsi *vsi;
+ u16 i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
+ qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
+ vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
+ vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
+ vf->qs_bw[i].tc = qbw->cfg[i].tc;
+ }
+
+ if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_quanta - Configure per queue quanta
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues quanta.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
+{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_quanta_cfg *qquanta =
+ (struct virtchnl_quanta_cfg *)msg;
+ struct ice_vsi *vsi;
+ int ret;
+
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
+ end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ quanta_size = qquanta->quanta_size;
+ if (quanta_size > ICE_MAX_QUANTA_SIZE ||
+ quanta_size < ICE_MIN_QUANTA_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (quanta_size % 64) {
+ dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
+ &quanta_prof_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto err;
+ }
+
+ for (i = start_qid; i < end_qid; i++)
+ vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i = -1, q_idx;
+ bool ena_ts;
+ u8 act_prt;
+
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
+ goto error_param;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ goto error_param;
+
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ if (!qci->qpair[i].rxq.crc_disable)
+ continue;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
+ vf->vlan_strip_ena)
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
+ goto error_param;
+ }
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ goto error_param;
+ }
+
+ /* copy Tx queue info from VF into VSI */
+ if (qpi->txq.ring_len > 0) {
+ vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
+ goto error_param;
+
+ /* Configure a queue with the requested settings */
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+ }
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
+ u32 rxdid;
+
+ ring->dma = qpi->rxq.dma_ring_addr;
+ ring->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.crc_disable)
+ ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024))
+ goto error_param;
+
+ ring->rx_buf_len = qpi->rxq.databuffer_size;
+
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64)
+ goto error_param;
+
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
+ */
+ if (ice_vf_is_port_vlan_ena(vf))
+ vsi->max_frame += VLAN_HLEN;
+
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+
+ /* If Rx flex desc is supported, select RXDID for Rx
+ * queues. Otherwise, use legacy 32byte descriptor
+ * format. Legacy 16byte descriptor is not supported.
+ * If this RXDID is selected, return error.
+ */
+ if (vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ rxdid = qpi->rxq.rxdid;
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
+
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
+ ice_write_qrxflxp_cntxt(&vsi->back->hw,
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
+ }
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+error_param:
+ /* disable whatever we can */
+ for (; i >= 0; i--) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+ vf->vf_id, i);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+ vf->vf_id, i);
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ u16 req_queues = vfres->num_queue_pairs;
+ struct ice_pf *pf = vf->pf;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ struct device *dev;
+ u16 cur_queues;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = vf->num_vf_qs;
+ tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf));
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
+ if (!req_queues) {
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
+ ICE_MAX_RSS_QS_PER_VF);
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ v_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.h b/drivers/net/ethernet/intel/ice/virt/queues.h
new file mode 100644
index 000000000000..c4a792cecea1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_QUEUES_H_
+#define _ICE_VIRT_QUEUES_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf);
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_QUEUES_H_ */
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.c b/drivers/net/ethernet/intel/ice/virt/rss.c
new file mode 100644
index 000000000000..085e69ec0cfc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.c
@@ -0,0 +1,1922 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "rss.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+ u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+ u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
+ {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+ {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP},
+ {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
+ {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
+ {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
+ {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
+ {VIRTCHNL_PROTO_HDR_GTPC, ICE_FLOW_SEG_HDR_GTPC},
+ {VIRTCHNL_PROTO_HDR_L2TPV2, ICE_FLOW_SEG_HDR_L2TPV2},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG},
+ {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG},
+ {VIRTCHNL_PROTO_HDR_GRE, ICE_FLOW_SEG_HDR_GRE},
+};
+
+struct ice_vc_hash_field_match_type {
+ u32 vc_hdr; /* virtchnl headers
+ * (VIRTCHNL_PROTO_HDR_XXX)
+ */
+ u32 vc_hash_field; /* virtchnl hash fields selector
+ * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+ */
+ u64 ice_hash_field; /* ice hash fields
+ * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+ */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ ICE_FLOW_HASH_ETH},
+ {VIRTCHNL_PROTO_HDR_ETH,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+ {VIRTCHNL_PROTO_HDR_S_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+ {VIRTCHNL_PROTO_HDR_C_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST),
+ ICE_FLOW_HASH_IPV6_PRE64},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6_PRE64 |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ ICE_FLOW_HASH_TCP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ ICE_FLOW_HASH_UDP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ ICE_FLOW_HASH_SCTP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_PPPOE,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV3,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+ {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+ {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+ {VIRTCHNL_PROTO_HDR_GTPC,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPC_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV2,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV2,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)},
+};
+
+static int
+ice_vc_rss_hash_update(struct ice_hw *hw, struct ice_vsi *vsi, u8 hash_type)
+{
+ struct ice_vsi_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clear previous hash_type */
+ ctx->info.q_opt_rss = vsi->info.q_opt_rss &
+ ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
+ /* hash_type is passed in as ICE_AQ_VSI_Q_OPT_RSS_<XOR|TPLZ|SYM_TPLZ */
+ ctx->info.q_opt_rss |= FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M,
+ hash_type);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_flags;
+
+ ctx->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (ret) {
+ dev_err(ice_hw_to_dev(hw), "update VSI for RSS failed, err %d aq_err %s\n",
+ ret, libie_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ }
+
+ kfree(ctx);
+
+ return ret;
+}
+
+/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @hash_cfg: pointer to the HW hash configuration
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
+{
+ const struct ice_vc_hash_field_match_type *hf_list;
+ const struct ice_vc_hdr_match_type *hdr_list;
+ int i, hf_list_len, hdr_list_len;
+ bool outer_ipv4 = false;
+ bool outer_ipv6 = false;
+ bool inner_hdr = false;
+ bool has_gre = false;
+
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
+
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
+
+ for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+ struct virtchnl_proto_hdr *proto_hdr =
+ &rss_cfg->proto_hdrs.proto_hdr[i];
+ u32 hdr_found = 0;
+ int j;
+
+ /* Find matched ice headers according to virtchnl headers.
+ * Also figure out the outer type of GTPU headers.
+ */
+ for (j = 0; j < hdr_list_len; j++) {
+ struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+ if (proto_hdr->type == hdr_map.vc_hdr)
+ hdr_found = hdr_map.ice_hdr;
+ }
+
+ if (!hdr_found)
+ return false;
+
+ /* Find matched ice hash fields according to
+ * virtchnl hash fields.
+ */
+ for (j = 0; j < hf_list_len; j++) {
+ struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+ if (proto_hdr->type == hf_map.vc_hdr &&
+ proto_hdr->field_selector == hf_map.vc_hash_field) {
+ *hash_flds |= hf_map.ice_hash_field;
+ break;
+ }
+ }
+
+ if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4 && !inner_hdr)
+ outer_ipv4 = true;
+ else if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6 &&
+ !inner_hdr)
+ outer_ipv6 = true;
+ /* for GRE and L2TPv2, take inner header as input set if no
+ * any field is selected from outer headers.
+ * for GTPU, take inner header and GTPU teid as input set.
+ */
+ else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN ||
+ proto_hdr->type ==
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) ||
+ ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) &&
+ *hash_flds == 0)) {
+ /* set inner_hdr flag, and clean up outer header */
+ inner_hdr = true;
+
+ /* clear outer headers */
+ *addl_hdrs = 0;
+
+ if (outer_ipv4 && outer_ipv6)
+ return false;
+
+ if (outer_ipv4)
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
+ else if (outer_ipv6)
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
+ else
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS;
+
+ if (has_gre && outer_ipv4)
+ hash_cfg->hdr_type =
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE;
+ if (has_gre && outer_ipv6)
+ hash_cfg->hdr_type =
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE;
+
+ if (proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE)
+ has_gre = true;
+ }
+
+ *addl_hdrs |= hdr_found;
+
+ /* refine hash hdrs and fields for IP fragment */
+ if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID) &&
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4_FRAG) {
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
+ VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID);
+ }
+ if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID) &&
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG) {
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
+ VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID);
+ }
+ }
+
+ /* refine gtpu header if we take outer as input set for a no inner
+ * ip gtpu flow.
+ */
+ if (hash_cfg->hdr_type == ICE_RSS_OUTER_HEADERS &&
+ *addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_GTPU_IP);
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_GTPU_NON_IP;
+ }
+
+ /* refine hash field for esp and nat-t-esp. */
+ if ((*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) &&
+ (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP)) {
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_UDP);
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_NAT_T_ESP;
+ *hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI));
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI);
+ }
+
+ /* refine hash hdrs for L4 udp/tcp/sctp. */
+ if (*addl_hdrs & (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_SCTP) &&
+ *addl_hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)
+ *addl_hdrs &= ~ICE_FLOW_SEG_HDR_IPV_OTHER;
+
+ return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_is_hash_cfg_valid - Check whether an RSS hash context is valid
+ * @cfg: RSS hash configuration to test
+ *
+ * Return: true if both @cfg->hash_flds and @cfg->addl_hdrs are non-zero; false otherwise.
+ */
+static bool ice_is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
+{
+ return cfg->hash_flds && cfg->addl_hdrs;
+}
+
+/**
+ * ice_hash_cfg_reset - Reset an RSS hash context
+ * @cfg: RSS hash configuration to reset
+ *
+ * Reset fields of @cfg that store the active rule information.
+ */
+static void ice_hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
+{
+ cfg->hash_flds = 0;
+ cfg->addl_hdrs = 0;
+ cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+ cfg->symm = 0;
+}
+
+/**
+ * ice_hash_cfg_record - Record an RSS hash context
+ * @ctx: destination (global) RSS hash configuration
+ * @cfg: source RSS hash configuration to record
+ *
+ * Copy the active rule information from @cfg into @ctx.
+ */
+static void ice_hash_cfg_record(struct ice_rss_hash_cfg *ctx,
+ struct ice_rss_hash_cfg *cfg)
+{
+ ctx->hash_flds = cfg->hash_flds;
+ ctx->addl_hdrs = cfg->addl_hdrs;
+ ctx->hdr_type = cfg->hdr_type;
+ ctx->symm = cfg->symm;
+}
+
+/**
+ * ice_hash_moveout - Delete an RSS configuration (keep context)
+ * @vf: VF pointer
+ * @cfg: RSS hash configuration
+ *
+ * Return: 0 on success (including when already absent); -ENOENT if @cfg is
+ * invalid or VSI is missing; -EBUSY on hardware removal failure.
+ */
+static int
+ice_hash_moveout(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (!ice_is_hash_cfg_valid(cfg) || !vsi)
+ return -ENOENT;
+
+ ret = ice_rem_rss_cfg(hw, vsi->idx, cfg);
+ if (ret && ret != -ENOENT) {
+ dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_hash_moveback - Add an RSS hash configuration for a VF
+ * @vf: VF pointer
+ * @cfg: RSS hash configuration to apply
+ *
+ * Add @cfg to @vf if the context is valid and VSI exists; programs HW.
+ *
+ * Return:
+ * * 0 on success
+ * * -ENOENT if @cfg is invalid or VSI is missing
+ * * -EBUSY if hardware programming fails
+ */
+static int
+ice_hash_moveback(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (!ice_is_hash_cfg_valid(cfg) || !vsi)
+ return -ENOENT;
+
+ ret = ice_add_rss_cfg(hw, vsi, cfg);
+ if (ret) {
+ dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_hash_remove - remove a RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * This function will delete a RSS hash configuration and also delete the
+ * hash context which stores the rule info.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int
+ice_hash_remove(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ int ret;
+
+ ret = ice_hash_moveout(vf, cfg);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ice_hash_cfg_reset(cfg);
+
+ return 0;
+}
+
+struct ice_gtpu_ctx_action {
+ u32 ctx_idx;
+ const u32 *remove_list;
+ int remove_count;
+ const u32 *moveout_list;
+ int moveout_count;
+};
+
+/**
+ * ice_add_rss_cfg_pre_gtpu - Pre-process the GTPU RSS configuration
+ * @vf: pointer to the VF info
+ * @ctx: pointer to the context of the GTPU hash
+ * @ctx_idx: index of the hash context
+ *
+ * Pre-processes the GTPU hash configuration before adding a new
+ * hash context. It removes or reorders existing hash configurations that may
+ * conflict with the new one. For example, if a GTPU_UP or GTPU_DWN rule is
+ * configured after a GTPU_EH rule, the GTPU_EH hash will be matched first due
+ * to TCAM write and match order (top-down). In such cases, the GTPU_EH rule
+ * must be moved after the GTPU_UP/DWN rule. Conversely, if a GTPU_EH rule is
+ * configured after a GTPU_UP/DWN rule, the UP/DWN rules should be removed to
+ * avoid conflict.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int ice_add_rss_cfg_pre_gtpu(struct ice_vf *vf,
+ struct ice_vf_hash_gtpu_ctx *ctx,
+ u32 ctx_idx)
+{
+ int ret, i;
+
+ static const u32 remove_eh_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP_UDP, ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ ICE_HASH_GTPU_CTX_UP_IP, ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP, ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP, ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+
+ static const u32 remove_eh_ip_udp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ };
+ static const u32 moveout_eh_ip_udp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+
+ static const u32 remove_eh_ip_tcp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+ static const u32 moveout_eh_ip_tcp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ };
+
+ static const u32 remove_up_ip[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ };
+ static const u32 moveout_up_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const u32 moveout_up_ip_udp_tcp[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const u32 remove_dw_ip[] = {
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+ static const u32 moveout_dw_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const struct ice_gtpu_ctx_action actions[] = {
+ { ICE_HASH_GTPU_CTX_EH_IP, remove_eh_ip,
+ ARRAY_SIZE(remove_eh_ip), NULL, 0 },
+ { ICE_HASH_GTPU_CTX_EH_IP_UDP, remove_eh_ip_udp,
+ ARRAY_SIZE(remove_eh_ip_udp), moveout_eh_ip_udp,
+ ARRAY_SIZE(moveout_eh_ip_udp) },
+ { ICE_HASH_GTPU_CTX_EH_IP_TCP, remove_eh_ip_tcp,
+ ARRAY_SIZE(remove_eh_ip_tcp), moveout_eh_ip_tcp,
+ ARRAY_SIZE(moveout_eh_ip_tcp) },
+ { ICE_HASH_GTPU_CTX_UP_IP, remove_up_ip,
+ ARRAY_SIZE(remove_up_ip), moveout_up_ip,
+ ARRAY_SIZE(moveout_up_ip) },
+ { ICE_HASH_GTPU_CTX_UP_IP_UDP, NULL, 0, moveout_up_ip_udp_tcp,
+ ARRAY_SIZE(moveout_up_ip_udp_tcp) },
+ { ICE_HASH_GTPU_CTX_UP_IP_TCP, NULL, 0, moveout_up_ip_udp_tcp,
+ ARRAY_SIZE(moveout_up_ip_udp_tcp) },
+ { ICE_HASH_GTPU_CTX_DW_IP, remove_dw_ip,
+ ARRAY_SIZE(remove_dw_ip), moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ { ICE_HASH_GTPU_CTX_DW_IP_UDP, NULL, 0, moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ { ICE_HASH_GTPU_CTX_DW_IP_TCP, NULL, 0, moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(actions); i++) {
+ if (actions[i].ctx_idx != ctx_idx)
+ continue;
+
+ if (actions[i].remove_list) {
+ for (int j = 0; j < actions[i].remove_count; j++) {
+ u16 rm = actions[i].remove_list[j];
+
+ ret = ice_hash_remove(vf, &ctx->ctx[rm]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+ }
+
+ if (actions[i].moveout_list) {
+ for (int j = 0; j < actions[i].moveout_count; j++) {
+ u16 mv = actions[i].moveout_list[j];
+
+ ret = ice_hash_moveout(vf, &ctx->ctx[mv]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_pre_ip - Pre-process IP-layer RSS configuration
+ * @vf: VF pointer
+ * @ctx: IP L4 hash context (ESP/UDP-ESP/AH/PFCP and UDP/TCP/SCTP)
+ *
+ * Remove covered/recorded IP RSS configurations prior to adding a new one.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_pre_ip(struct ice_vf *vf, struct ice_vf_hash_ip_ctx *ctx)
+{
+ int i, ret;
+
+ for (i = 1; i < ICE_HASH_IP_CTX_MAX; i++)
+ if (ice_is_hash_cfg_valid(&ctx->ctx[i])) {
+ ret = ice_hash_remove(vf, &ctx->ctx[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_calc_gtpu_ctx_idx - Calculate GTPU hash context index
+ * @hdrs: Bitmask of protocol headers prefixed with ICE_FLOW_SEG_HDR_*
+ *
+ * Determine the GTPU hash context index based on the combination of
+ * encapsulation headers (GTPU_EH, GTPU_UP, GTPU_DWN) and transport
+ * protocols (UDP, TCP) within IPv4 or IPv6 flows.
+ *
+ * Return: A valid context index (0-8) if the header combination is supported,
+ * or ICE_HASH_GTPU_CTX_MAX if the combination is invalid.
+ */
+static enum ice_hash_gtpu_ctx_type ice_calc_gtpu_ctx_idx(u32 hdrs)
+{
+ u32 eh_idx, ip_idx;
+
+ if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH)
+ eh_idx = 0;
+ else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP)
+ eh_idx = 1;
+ else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
+ eh_idx = 2;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+
+ ip_idx = 0;
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP)
+ ip_idx = 1;
+ else if (hdrs & ICE_FLOW_SEG_HDR_TCP)
+ ip_idx = 2;
+
+ if (hdrs & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
+ return eh_idx * 3 + ip_idx;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+}
+
+/**
+ * ice_map_ip_ctx_idx - map the index of the IP L4 hash context
+ * @hdrs: protocol headers prefix with ICE_FLOW_SEG_HDR_XXX.
+ *
+ * The IP L4 hash context use the index to classify for IPv4/IPv6 with
+ * ESP/UDP_ESP/AH/PFCP and non-tunnel UDP/TCP/SCTP
+ * this function map the index based on the protocol headers.
+ *
+ * Return: The mapped IP context index on success, or ICE_HASH_IP_CTX_MAX
+ * if no matching context is found.
+ */
+static u8 ice_map_ip_ctx_idx(u32 hdrs)
+{
+ u8 i;
+
+ static struct {
+ u32 hdrs;
+ u8 ctx_idx;
+ } ip_ctx_idx_map[] = {
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_ESP,
+ ICE_HASH_IP_CTX_IP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_NAT_T_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_AH,
+ ICE_HASH_IP_CTX_IP_AH },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_PFCP_SESSION,
+ ICE_HASH_IP_CTX_IP_PFCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_ESP,
+ ICE_HASH_IP_CTX_IP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_NAT_T_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_AH,
+ ICE_HASH_IP_CTX_IP_AH },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_PFCP_SESSION,
+ ICE_HASH_IP_CTX_IP_PFCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ /* the remaining mappings are used for default RSS */
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ip_ctx_idx_map); i++) {
+ if (hdrs == ip_ctx_idx_map[i].hdrs)
+ return ip_ctx_idx_map[i].ctx_idx;
+ }
+
+ return ICE_HASH_IP_CTX_MAX;
+}
+
+/**
+ * ice_add_rss_cfg_pre - Prepare RSS configuration context for a VF
+ * @vf: pointer to the VF structure
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Prepare the RSS hash context for a given VF based on the additional
+ * protocol headers specified in @cfg. This includes pre-configuration
+ * for IP and GTPU-based flows.
+ *
+ * If the configuration matches a known IP context, the function sets up
+ * the appropriate IP hash context. If the configuration includes GTPU
+ * headers, it prepares the GTPU-specific context accordingly.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_pre(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx == ICE_HASH_IP_CTX_IP) {
+ int ret = 0;
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v4);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v6);
+
+ if (ret)
+ return ret;
+ }
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv4,
+ ice_gtpu_ctx_idx);
+ } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv6,
+ ice_gtpu_ctx_idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_post_gtpu - Post-process GTPU RSS configuration
+ * @vf: pointer to the VF info
+ * @ctx: pointer to the context of the GTPU hash
+ * @cfg: pointer to the RSS hash configuration
+ * @ctx_idx: index of the hash context
+ *
+ * Post-processes the GTPU hash configuration after a new hash
+ * context has been successfully added. It updates the context with the new
+ * configuration and restores any previously removed hash contexts that need
+ * to be re-applied. This ensures proper TCAM rule ordering and avoids
+ * conflicts between overlapping GTPU rules.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int ice_add_rss_cfg_post_gtpu(struct ice_vf *vf,
+ struct ice_vf_hash_gtpu_ctx *ctx,
+ struct ice_rss_hash_cfg *cfg, u32 ctx_idx)
+{
+ /* GTPU hash moveback lookup table indexed by context ID.
+ * Each entry is a bitmap indicating which contexts need moveback
+ * operations when the corresponding context index is processed.
+ */
+ static const unsigned long
+ ice_gtpu_moveback_tbl[ICE_HASH_GTPU_CTX_MAX] = {
+ [ICE_HASH_GTPU_CTX_EH_IP] = 0,
+ [ICE_HASH_GTPU_CTX_EH_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_UP_IP) |
+ BIT(ICE_HASH_GTPU_CTX_UP_IP_TCP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP_TCP),
+ [ICE_HASH_GTPU_CTX_EH_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_UP_IP) |
+ BIT(ICE_HASH_GTPU_CTX_UP_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP_UDP),
+ [ICE_HASH_GTPU_CTX_UP_IP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_UP_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_UP_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ };
+ unsigned long moveback_mask;
+ int ret;
+ int i;
+
+ if (unlikely(ctx_idx >= ICE_HASH_GTPU_CTX_MAX))
+ return 0;
+
+ ctx->ctx[ctx_idx].addl_hdrs = cfg->addl_hdrs;
+ ctx->ctx[ctx_idx].hash_flds = cfg->hash_flds;
+ ctx->ctx[ctx_idx].hdr_type = cfg->hdr_type;
+ ctx->ctx[ctx_idx].symm = cfg->symm;
+
+ moveback_mask = ice_gtpu_moveback_tbl[ctx_idx];
+ for_each_set_bit(i, &moveback_mask, ICE_HASH_GTPU_CTX_MAX) {
+ ret = ice_hash_moveback(vf, &ctx->ctx[i]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) {
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_record(&vf->hash_ctx.v4.ctx[ip_ctx_idx], cfg);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_record(&vf->hash_ctx.v6.ctx[ip_ctx_idx], cfg);
+ }
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv4,
+ cfg, ice_gtpu_ctx_idx);
+ } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv6,
+ cfg, ice_gtpu_ctx_idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_rem_rss_cfg_post - post-process the RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Post process the RSS hash configuration after deleting a hash
+ * config. Such as, it will reset the hash context for the GTPU hash.
+ */
+static void
+ice_rem_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) {
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_reset(&vf->hash_ctx.v4.ctx[ip_ctx_idx]);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_reset(&vf->hash_ctx.v6.ctx[ip_ctx_idx]);
+ }
+
+ if (ice_gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
+ return;
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_reset(&vf->hash_ctx.ipv4.ctx[ice_gtpu_ctx_idx]);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_reset(&vf->hash_ctx.ipv6.ctx[ice_gtpu_ctx_idx]);
+}
+
+/**
+ * ice_rem_rss_cfg_wrap - Wrapper for deleting an RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Wrapper function to delete a flow profile base on an RSS configuration,
+ * and also post process the hash context base on the rollback mechanism
+ * which handle some rules conflict by ice_add_rss_cfg_wrap.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_rem_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ ret = ice_rem_rss_cfg(hw, vsi->idx, cfg);
+ /* We just ignore -ENOENT, because if two configurations share the same
+ * profile remove one of them actually removes both, since the
+ * profile is deleted.
+ */
+ if (ret && ret != -ENOENT) {
+ dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return ret;
+ }
+
+ ice_rem_rss_cfg_post(vf, cfg);
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_wrap - Wrapper for adding an RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Add a flow profile based on an RSS configuration. Use a rollback
+ * mechanism to handle rule conflicts due to TCAM
+ * write sequence from top to down.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (ice_add_rss_cfg_pre(vf, cfg))
+ return -EINVAL;
+
+ ret = ice_add_rss_cfg(hw, vsi, cfg);
+ if (ret) {
+ dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return ret;
+ }
+
+ if (ice_add_rss_cfg_post(vf, cfg))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/**
+ * ice_parse_raw_rss_pattern - Parse raw pattern spec and mask for RSS
+ * @vf: pointer to the VF info
+ * @proto: pointer to the virtchnl protocol header
+ * @raw_cfg: pointer to the RSS raw pattern configuration
+ *
+ * Parser function to get spec and mask from virtchnl message, and parse
+ * them to get the corresponding profile and offset. The profile is used
+ * to add RSS configuration.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_parse_raw_rss_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto,
+ struct ice_rss_raw_cfg *raw_cfg)
+{
+ struct ice_parser_result pkt_parsed;
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_parser_profile prof;
+ struct ice_parser *psr;
+ u8 *pkt_buf, *msk_buf;
+ u16 pkt_len;
+ int ret = 0;
+
+ pkt_len = proto->raw.pkt_len;
+ if (!pkt_len)
+ return -EINVAL;
+ if (pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ pkt_len = VIRTCHNL_MAX_SIZE_RAW_PACKET;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+ if (!pkt_buf || !msk_buf) {
+ ret = -ENOMEM;
+ goto free_alloc;
+ }
+
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
+
+ psr = ice_parser_create(hw);
+ if (IS_ERR(psr)) {
+ ret = PTR_ERR(psr);
+ goto free_alloc;
+ }
+
+ ret = ice_parser_run(psr, pkt_buf, pkt_len, &pkt_parsed);
+ if (ret)
+ goto parser_destroy;
+
+ ret = ice_parser_profile_init(&pkt_parsed, pkt_buf, msk_buf,
+ pkt_len, ICE_BLK_RSS, &prof);
+ if (ret)
+ goto parser_destroy;
+
+ memcpy(&raw_cfg->prof, &prof, sizeof(prof));
+
+parser_destroy:
+ ice_parser_destroy(psr);
+free_alloc:
+ kfree(pkt_buf);
+ kfree(msk_buf);
+ return ret;
+}
+
+/**
+ * ice_add_raw_rss_cfg - add RSS configuration for raw pattern
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS raw pattern configuration
+ *
+ * This function adds the RSS configuration for raw pattern.
+ * Check if current profile is matched. If not, remove the old
+ * one and add the new profile to HW directly. Update the symmetric
+ * hash configuration as well.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg)
+{
+ struct ice_parser_profile *prof = &cfg->prof;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_rss_prof_info *rss_prof;
+ struct ice_hw *hw = &vf->pf->hw;
+ int i, ptg, ret = 0;
+ u16 vsi_handle;
+ u64 id;
+
+ vsi_handle = vf->lan_vsi_idx;
+ id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+
+ ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
+ rss_prof = &vf->rss_prof_info[ptg];
+
+ /* check if ptg already has a profile */
+ if (rss_prof->prof.fv_num) {
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ if (rss_prof->prof.fv[i].proto_id !=
+ prof->fv[i].proto_id ||
+ rss_prof->prof.fv[i].offset !=
+ prof->fv[i].offset)
+ break;
+ }
+
+ /* current profile is matched, check symmetric hash */
+ if (i == ICE_MAX_FV_WORDS) {
+ if (rss_prof->symm != cfg->symm)
+ goto update_symm;
+ return ret;
+ }
+
+ /* current profile is not matched, remove it */
+ ret =
+ ice_rem_prof_id_flow(hw, ICE_BLK_RSS,
+ ice_get_hw_vsi_num(hw, vsi_handle),
+ id);
+ if (ret) {
+ dev_err(dev, "remove RSS flow failed\n");
+ return ret;
+ }
+
+ ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
+ if (ret) {
+ dev_err(dev, "remove RSS profile failed\n");
+ return ret;
+ }
+ }
+
+ /* add new profile */
+ ret = ice_flow_set_parser_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS);
+ if (ret) {
+ dev_err(dev, "HW profile add failed\n");
+ return ret;
+ }
+
+ memcpy(&rss_prof->prof, prof, sizeof(struct ice_parser_profile));
+
+update_symm:
+ rss_prof->symm = cfg->symm;
+ ice_rss_update_raw_symm(hw, cfg, id);
+ return ret;
+}
+
+/**
+ * ice_rem_raw_rss_cfg - remove RSS configuration for raw pattern
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS raw pattern configuration
+ *
+ * This function removes the RSS configuration for raw pattern.
+ * Check if vsi group is already removed first. If not, remove the
+ * profile.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_rem_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg)
+{
+ struct ice_parser_profile *prof = &cfg->prof;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ptg, ret = 0;
+ u16 vsig, vsi;
+ u64 id;
+
+ id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+
+ ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
+
+ memset(&vf->rss_prof_info[ptg], 0,
+ sizeof(struct ice_rss_prof_info));
+
+ /* check if vsig is already removed */
+ vsi = ice_get_hw_vsi_num(hw, vf->lan_vsi_idx);
+ if (vsi >= ICE_MAX_VSI) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ vsig = hw->blk[ICE_BLK_RSS].xlt2.vsis[vsi].vsig;
+ if (vsig) {
+ ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS, vsi, id);
+ if (ret)
+ goto err;
+
+ ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
+ if (ret)
+ goto err;
+ }
+
+ return ret;
+
+err:
+ dev_err(dev, "HW profile remove failed\n");
+ return ret;
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+ u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ u8 hash_type;
+ bool symm;
+ int ret;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto error_param;
+ }
+
+ if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+ rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+ rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+
+ ret = ice_vc_rss_hash_update(hw, vsi, hash_type);
+ if (ret)
+ v_ret = ice_err_to_virt_err(ret);
+ goto error_param;
+ }
+
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ ret = ice_vc_rss_hash_update(hw, vsi, hash_type);
+ if (ret) {
+ v_ret = ice_err_to_virt_err(ret);
+ goto error_param;
+ }
+
+ symm = rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ /* Configure RSS hash for raw pattern */
+ if (rss_cfg->proto_hdrs.tunnel_level == 0 &&
+ rss_cfg->proto_hdrs.count == 0) {
+ struct ice_rss_raw_cfg raw_cfg;
+
+ if (ice_parse_raw_rss_pattern(vf, &rss_cfg->proto_hdrs,
+ &raw_cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ raw_cfg.symm = symm;
+ if (ice_add_raw_rss_cfg(vf, &raw_cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ if (ice_rem_raw_rss_cfg(vf, &raw_cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ } else {
+ struct ice_rss_hash_cfg cfg;
+
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ cfg.symm = symm;
+ if (ice_add_rss_cfg_wrap(vf, &cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ if (ice_rem_rss_cfg_wrap(vf, &cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_key(vsi, vrk->key))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
+ * @vf: pointer to the VF info
+ */
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
+ int len = 0, ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_rss_hashcfg);
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
+ (u8 *)vrh, len);
+ kfree(vrh);
+ return ret;
+}
+
+/**
+ * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ */
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ dev_err(dev, "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ /* clear all previously programmed RSS configuration to allow VF drivers
+ * the ability to customize the RSS configuration and/or completely
+ * disable RSS
+ */
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status && !vrh->hashcfg) {
+ /* only report failure to clear the current RSS configuration if
+ * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
+ */
+ v_ret = ice_err_to_virt_err(status);
+ goto err;
+ } else if (status) {
+ /* allow the VF to update the RSS configuration even on failure
+ * to clear the current RSS confguration in an attempt to keep
+ * RSS in a working state
+ */
+ dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
+ vf->vf_id);
+ }
+
+ if (vrh->hashcfg) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
+ v_ret = ice_err_to_virt_err(status);
+ }
+
+ /* save the requested VF configuration */
+ if (!v_ret)
+ vf->rss_hashcfg = vrh->hashcfg;
+
+ /* send the response to the VF */
+err:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
+ NULL, 0);
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.h b/drivers/net/ethernet/intel/ice/virt/rss.h
new file mode 100644
index 000000000000..784d4c43ce8b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_RSS_H_
+#define _ICE_VIRT_RSS_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add);
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg);
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf);
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_RSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
index 1ff9818b4c84..f3f921134379 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
@@ -1,170 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022, Intel Corporation. */
-#include "ice_virtchnl.h"
+#include "virtchnl.h"
+#include "queues.h"
+#include "rss.h"
#include "ice_vf_lib_private.h"
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
#include "ice_flex_pipe.h"
#include "ice_dcb_lib.h"
-#define FIELD_SELECTOR(proto_hdr_field) \
- BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
-
-struct ice_vc_hdr_match_type {
- u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
- u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
- {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
- {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
- {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
- {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
- ICE_FLOW_SEG_HDR_GTPU_DWN},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
- ICE_FLOW_SEG_HDR_GTPU_UP},
- {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
- {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
- {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
- {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
-};
-
-struct ice_vc_hash_field_match_type {
- u32 vc_hdr; /* virtchnl headers
- * (VIRTCHNL_PROTO_HDR_XXX)
- */
- u32 vc_hash_field; /* virtchnl hash fields selector
- * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
- */
- u64 ice_hash_field; /* ice hash fields
- * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
- */
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- ICE_FLOW_HASH_ETH},
- {VIRTCHNL_PROTO_HDR_ETH,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
- {VIRTCHNL_PROTO_HDR_S_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
- {VIRTCHNL_PROTO_HDR_C_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
- {VIRTCHNL_PROTO_HDR_PPPOE,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_GTPU_IP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
- {VIRTCHNL_PROTO_HDR_L2TPV3,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
- {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
- {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
-};
-
/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
@@ -304,10 +154,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
- if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != LIBIE_AQ_RC_ENOSYS) {
dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
vf->vf_id, aq_ret,
- ice_aq_str(pf->hw.mailboxq.sq_last_status));
+ libie_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -338,28 +188,6 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_get_max_frame_size - get max frame size allowed for VF
- * @vf: VF used to determine max frame size
- *
- * Max frame size is determined based on the current port's max frame size and
- * whether a port VLAN is configured on this VF. The VF is not aware whether
- * it's in a port VLAN so the PF needs to account for this in max frame size
- * checks and sending the max frame size to the VF.
- */
-static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- u16 max_frame_size;
-
- max_frame_size = pi->phy.link_info.max_frame_size;
-
- if (ice_vf_is_port_vlan_ena(vf))
- max_frame_size -= VLAN_HLEN;
-
- return max_frame_size;
-}
-
-/**
* ice_vc_get_vlan_caps
* @hw: pointer to the hw
* @vf: pointer to the VF info
@@ -461,6 +289,10 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 &&
+ vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32;
+
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
@@ -491,6 +323,12 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -549,485 +387,84 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
}
/**
- * ice_vc_isvalid_q_id
- * @vsi: VSI to check queue ID against
- * @qid: VSI relative queue ID
- *
- * check for the valid queue ID
- */
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
-{
- /* allocated Tx and Rx queues should be always equal for VF VSI */
- return qid < vsi->alloc_txq;
-}
-
-/**
- * ice_vc_isvalid_ring_len
- * @ring_len: length of ring
- *
- * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
- * or zero
- */
-static bool ice_vc_isvalid_ring_len(u16 ring_len)
-{
- return ring_len == 0 ||
- (ring_len >= ICE_MIN_NUM_DESC &&
- ring_len <= ICE_MAX_NUM_DESC &&
- !(ring_len % ICE_REQ_DESC_MULTIPLE));
-}
-
-/**
- * ice_vc_validate_pattern
+ * ice_vc_get_qos_caps - Get current QoS caps from PF
* @vf: pointer to the VF info
- * @proto: virtchnl protocol headers
- *
- * validate the pattern is supported or not.
- *
- * Return: true on success, false on error.
- */
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
-{
- bool is_ipv4 = false;
- bool is_ipv6 = false;
- bool is_udp = false;
- u16 ptype = -1;
- int i = 0;
-
- while (i < proto->count &&
- proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
- switch (proto->proto_hdr[i].type) {
- case VIRTCHNL_PROTO_HDR_ETH:
- ptype = ICE_PTYPE_MAC_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_IPV4:
- ptype = ICE_PTYPE_IPV4_PAY;
- is_ipv4 = true;
- break;
- case VIRTCHNL_PROTO_HDR_IPV6:
- ptype = ICE_PTYPE_IPV6_PAY;
- is_ipv6 = true;
- break;
- case VIRTCHNL_PROTO_HDR_UDP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_UDP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_UDP_PAY;
- is_udp = true;
- break;
- case VIRTCHNL_PROTO_HDR_TCP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_TCP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_TCP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_SCTP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_SCTP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_SCTP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_GTPU_IP:
- case VIRTCHNL_PROTO_HDR_GTPU_EH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_GTPU;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_GTPU;
- goto out;
- case VIRTCHNL_PROTO_HDR_L2TPV3:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_L2TPV3;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_L2TPV3;
- goto out;
- case VIRTCHNL_PROTO_HDR_ESP:
- if (is_ipv4)
- ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
- ICE_MAC_IPV4_ESP;
- else if (is_ipv6)
- ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
- ICE_MAC_IPV6_ESP;
- goto out;
- case VIRTCHNL_PROTO_HDR_AH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_AH;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_AH;
- goto out;
- case VIRTCHNL_PROTO_HDR_PFCP:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_PFCP_SESSION;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_PFCP_SESSION;
- goto out;
- default:
- break;
- }
- i++;
- }
-
-out:
- return ice_hw_ptype_ena(&vf->pf->hw, ptype);
-}
-
-/**
- * ice_vc_parse_rss_cfg - parses hash fields and headers from
- * a specific virtchnl RSS cfg
- * @hw: pointer to the hardware
- * @rss_cfg: pointer to the virtchnl RSS cfg
- * @hash_cfg: pointer to the HW hash configuration
- *
- * Return true if all the protocol header and hash fields in the RSS cfg could
- * be parsed, else return false
- *
- * This function parses the virtchnl RSS cfg to be the intended
- * hash fields and the intended header for RSS configuration
- */
-static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
- struct virtchnl_rss_cfg *rss_cfg,
- struct ice_rss_hash_cfg *hash_cfg)
-{
- const struct ice_vc_hash_field_match_type *hf_list;
- const struct ice_vc_hdr_match_type *hdr_list;
- int i, hf_list_len, hdr_list_len;
- u32 *addl_hdrs = &hash_cfg->addl_hdrs;
- u64 *hash_flds = &hash_cfg->hash_flds;
-
- /* set outer layer RSS as default */
- hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hash_cfg->symm = true;
- else
- hash_cfg->symm = false;
-
- hf_list = ice_vc_hash_field_list;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
- hdr_list = ice_vc_hdr_list;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
-
- for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
- struct virtchnl_proto_hdr *proto_hdr =
- &rss_cfg->proto_hdrs.proto_hdr[i];
- bool hdr_found = false;
- int j;
-
- /* Find matched ice headers according to virtchnl headers. */
- for (j = 0; j < hdr_list_len; j++) {
- struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
-
- if (proto_hdr->type == hdr_map.vc_hdr) {
- *addl_hdrs |= hdr_map.ice_hdr;
- hdr_found = true;
- }
- }
-
- if (!hdr_found)
- return false;
-
- /* Find matched ice hash fields according to
- * virtchnl hash fields.
- */
- for (j = 0; j < hf_list_len; j++) {
- struct ice_vc_hash_field_match_type hf_map = hf_list[j];
-
- if (proto_hdr->type == hf_map.vc_hdr &&
- proto_hdr->field_selector == hf_map.vc_hash_field) {
- *hash_flds |= hf_map.ice_hash_field;
- break;
- }
- }
- }
-
- return true;
-}
-
-/**
- * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
- * RSS offloads
- * @caps: VF driver negotiated capabilities
*
- * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
- * else return false
- */
-static bool ice_vf_adv_rss_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
-}
-
-/**
- * ice_vc_handle_rss_cfg
- * @vf: pointer to the VF info
- * @msg: pointer to the message buffer
- * @add: add a RSS config if true, otherwise delete a RSS config
+ * Get VF's QoS capabilities, such as TC number, arbiter and
+ * bandwidth from PF.
*
- * This function adds/deletes a RSS config
+ * Return: 0 on success or negative error value.
*/
-static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+static int ice_vc_get_qos_caps(struct ice_vf *vf)
{
- u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
- struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto error_param;
- }
-
- if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
- rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
- rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
- struct ice_vsi_ctx *ctx;
- u8 lut_type, hash_type;
- int status;
-
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- goto error_param;
- }
-
- ctx->info.q_opt_rss =
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
-
- /* Preserve existing queueing option setting */
- ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
- ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
- ctx->info.q_opt_tc = vsi->info.q_opt_tc;
- ctx->info.q_opt_flags = vsi->info.q_opt_rss;
-
- ctx->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- } else {
- vsi->info.q_opt_rss = ctx->info.q_opt_rss;
- }
-
- kfree(ctx);
- } else {
- struct ice_rss_hash_cfg cfg;
-
- /* Only check for none raw pattern case */
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- cfg.hash_flds = ICE_HASH_INVALID;
- cfg.hdr_type = ICE_RSS_ANY_HEADERS;
-
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add) {
- if (ice_add_rss_cfg(hw, vsi, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
- vsi->vsi_num, v_ret);
- }
- } else {
- int status;
-
- status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
- /* We just ignore -ENOENT, because if two configurations
- * share the same profile remove one of them actually
- * removes both, since the profile is deleted.
- */
- if (status && status != -ENOENT) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
- vf->vf_id, status);
- }
- }
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS key
- */
-static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_key *vrk =
- (struct virtchnl_rss_key *)msg;
+ struct virtchnl_qos_cap_list *cap_list = NULL;
+ u8 tc_prio[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct virtchnl_qos_cap_elem *cfg = NULL;
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_pf *pf = vf->pf;
+ struct ice_port_info *pi;
struct ice_vsi *vsi;
+ u8 numtc, tc;
+ u16 len = 0;
+ int ret, i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+ goto err;
}
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_key(vsi, vrk->key))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS LUT
- */
-static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+ goto err;
}
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
+ pi = pf->hw.port_info;
+ numtc = vsi->tc_cfg.numtc;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vf->lan_vsi_idx);
+ if (!vsi_ctx) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+ goto err;
}
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_hfunc
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS Hash function
- */
-static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+ len = struct_size(cap_list, cap, numtc);
+ cap_list = kzalloc(len, GFP_KERNEL);
+ if (!cap_list) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
}
- if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
+ cap_list->vsi_id = vsi->vsi_num;
+ cap_list->num_elem = numtc;
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+ /* Store the UP2TC configuration from DCB to a user priority bitmap
+ * of each TC. Each element of prio_of_tc represents one TC. Each
+ * bitmap indicates the user priorities belong to this TC.
+ */
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ tc = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[i];
+ tc_prio[tc] |= BIT(i);
}
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+ for (i = 0; i < numtc; i++) {
+ cfg = &cap_list->cap[i];
+ cfg->tc_num = i;
+ cfg->tc_prio = tc_prio[i];
+ cfg->arbiter = pi->qos_cfg.local_dcbx_cfg.etscfg.tsatable[i];
+ cfg->weight = VIRTCHNL_STRICT_WEIGHT;
+ cfg->type = VIRTCHNL_BW_SHAPER;
+ cfg->shaper.committed = vsi_ctx->sched.bw_t_info[i].cir_bw.bw;
+ cfg->shaper.peak = vsi_ctx->sched.bw_t_info[i].eir_bw.bw;
}
- if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
-
- if (ice_set_rss_hfunc(vsi, hfunc))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
- NULL, 0);
+err:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_QOS_CAPS, v_ret,
+ (u8 *)cap_list, len);
+ kfree(cap_list);
+ return ret;
}
/**
@@ -1212,611 +649,6 @@ error_param:
}
/**
- * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
- * @vqs: virtchnl_queue_select structure containing bitmaps to validate
- *
- * Return true on successful validation, else false
- */
-static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
-{
- if ((!vqs->rx_queues && !vqs->tx_queues) ||
- vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
- vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
- return false;
-
- return true;
-}
-
-/**
- * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->txq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_TQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_TQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->rxq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_RQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_RQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vc_ena_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to enable all or specific queue(s)
- */
-static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable only Rx rings, Tx rings were enabled by the FW when the
- * Tx queue group list was configured and the context bits were
- * programmed using ice_vsi_cfg_txqs
- */
- q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->rxq_ena);
- }
-
- q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_vf_ena_txq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->txq_ena);
- }
-
- /* Set flag to indicate that queues are enabled */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vf_vsi_dis_single_txq - disable a single Tx queue
- * @vf: VF to disable queue for
- * @vsi: VSI for the VF
- * @q_id: VF relative (0-based) queue ID
- *
- * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
- * disabled then clear q_id bit in the enabled queues bitmap and return
- * success. Otherwise return error.
- */
-static int
-ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
-{
- struct ice_txq_meta txq_meta = { 0 };
- struct ice_tx_ring *ring;
- int err;
-
- if (!test_bit(q_id, vf->txq_ena))
- dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
- q_id, vsi->vsi_num);
-
- ring = vsi->tx_rings[q_id];
- if (!ring)
- return -EINVAL;
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
- if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- q_id, vsi->vsi_num);
- return err;
- }
-
- /* Clear enabled queues flag */
- clear_bit(q_id, vf->txq_ena);
-
- return 0;
-}
-
-/**
- * ice_vc_dis_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to disable all or specific queue(s)
- */
-static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->tx_queues) {
- q_map = vqs->tx_queues;
-
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
- }
-
- q_map = vqs->rx_queues;
- /* speed up Rx queue disable by batching them if possible */
- if (q_map &&
- bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
- if (ice_vsi_stop_all_rx_rings(vsi)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- } else if (q_map) {
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
- true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->rxq_ena);
- }
- }
-
- /* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_cfg_interrupt
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @vector_id: vector ID
- * @map: vector map for mapping vectors to queues
- * @q_vector: structure for interrupt vector
- * configure the IRQ to queue map
- */
-static int
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
- struct virtchnl_vector_map *map,
- struct ice_q_vector *q_vector)
-{
- u16 vsi_q_id, vsi_q_id_idx;
- unsigned long qmap;
-
- q_vector->num_ring_rx = 0;
- q_vector->num_ring_tx = 0;
-
- qmap = map->rxq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->tx.itr_idx);
- }
-
- return VIRTCHNL_STATUS_SUCCESS;
-}
-
-/**
- * ice_vc_cfg_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the IRQ to queue map
- */
-static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 num_q_vectors_mapped, vsi_id, vector_id;
- struct virtchnl_irq_map_info *irqmap_info;
- struct virtchnl_vector_map *map;
- struct ice_vsi *vsi;
- int i;
-
- irqmap_info = (struct virtchnl_irq_map_info *)msg;
- num_q_vectors_mapped = irqmap_info->num_vectors;
-
- /* Check to make sure number of VF vectors mapped is not greater than
- * number of VF vectors originally allocated, and check that
- * there is actually at least a single VF queue vector mapped
- */
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- vf->num_msix < num_q_vectors_mapped ||
- !num_q_vectors_mapped) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < num_q_vectors_mapped; i++) {
- struct ice_q_vector *q_vector;
-
- map = &irqmap_info->vecmap[i];
-
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
- /* vector_id is always 0-based for each VF, and can never be
- * larger than or equal to the max allowed interrupts per VF
- */
- if (!(vector_id < vf->num_msix) ||
- !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
- (!vector_id && (map->rxq_map || map->txq_map))) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* No need to map VF miscellaneous or rogue vector */
- if (!vector_id)
- continue;
-
- /* Subtract non queue vector from vector_id passed by VF
- * to get actual number of VSI queue vector array index
- */
- q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
- if (!q_vector) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* lookout for the invalid queue index */
- v_ret = (enum virtchnl_status_code)
- ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
- if (v_ret)
- goto error_param;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_cfg_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the Rx/Tx queues
- */
-static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_vsi_queue_config_info *qci =
- (struct virtchnl_vsi_queue_config_info *)msg;
- struct virtchnl_queue_pair_info *qpi;
- struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
- int i = -1, q_idx;
-
- lag = pf->lag;
- mutex_lock(&pf->lag_mutex);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- goto error_param;
-
- if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
- goto error_param;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- goto error_param;
-
- if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
- qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- if (!qci->qpair[i].rxq.crc_disable)
- continue;
-
- if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
- vf->vlan_strip_ena)
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- qpi = &qci->qpair[i];
- if (qpi->txq.vsi_id != qci->vsi_id ||
- qpi->rxq.vsi_id != qci->vsi_id ||
- qpi->rxq.queue_id != qpi->txq.queue_id ||
- qpi->txq.headwb_enabled ||
- !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
- !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
- goto error_param;
- }
-
- q_idx = qpi->rxq.queue_id;
-
- /* make sure selected "q_idx" is in valid range of queues
- * for selected "vsi"
- */
- if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
- goto error_param;
- }
-
- /* copy Tx queue info from VF into VSI */
- if (qpi->txq.ring_len > 0) {
- vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[i]->count = qpi->txq.ring_len;
-
- /* Disable any existing queue first */
- if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
- goto error_param;
-
- /* Configure a queue with the requested settings */
- if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
- vf->vf_id, i);
- goto error_param;
- }
- }
-
- /* copy Rx queue info from VF into VSI */
- if (qpi->rxq.ring_len > 0) {
- u16 max_frame_size = ice_vc_get_max_frame_size(vf);
- u32 rxdid;
-
- vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
- vsi->rx_rings[i]->count = qpi->rxq.ring_len;
-
- if (qpi->rxq.crc_disable)
- vsi->rx_rings[q_idx]->flags |=
- ICE_RX_FLAGS_CRC_STRIP_DIS;
- else
- vsi->rx_rings[q_idx]->flags &=
- ~ICE_RX_FLAGS_CRC_STRIP_DIS;
-
- if (qpi->rxq.databuffer_size != 0 &&
- (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
- qpi->rxq.databuffer_size < 1024))
- goto error_param;
- vsi->rx_buf_len = qpi->rxq.databuffer_size;
- vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
- if (qpi->rxq.max_pkt_size > max_frame_size ||
- qpi->rxq.max_pkt_size < 64)
- goto error_param;
-
- vsi->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is
- * not expected to account for it in the MTU
- * calculation
- */
- if (ice_vf_is_port_vlan_ena(vf))
- vsi->max_frame += VLAN_HLEN;
-
- if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
- vf->vf_id, i);
- goto error_param;
- }
-
- /* If Rx flex desc is supported, select RXDID for Rx
- * queues. Otherwise, use legacy 32byte descriptor
- * format. Legacy 16byte descriptor is not supported.
- * If this RXDID is selected, return error.
- */
- if (vf->driver_caps &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
- rxdid = qpi->rxq.rxdid;
- if (!(BIT(rxdid) & pf->supported_rxdids))
- goto error_param;
- } else {
- rxdid = ICE_RXDID_LEGACY_1;
- }
-
- ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx],
- rxdid, 0x03, false);
- }
- }
-
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-error_param:
- /* disable whatever we can */
- for (; i >= 0; i--) {
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
- vf->vf_id, i);
- if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
- vf->vf_id, i);
- }
-
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- ice_lag_move_new_vf_nodes(vf);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
-}
-
-/**
* ice_can_vf_change_mac
* @vf: pointer to the VF info
*
@@ -1909,6 +741,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
}
/**
+ * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
+ * @mac: address to check
+ *
+ * Return: true if the address is one of the three possible LLDP multicast
+ * addresses, false otherwise.
+ */
+static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
+{
+ const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
+
+ if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
+ return false;
+
+ return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
+}
+
+/**
+ * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
+ * @vf: a VF to add the address to
+ * @mac: address to check
+ *
+ * Return: true if the VF is allowed to add such MAC address, false otherwise.
+ */
+static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+
+ if (is_unicast_ether_addr(mac) &&
+ !ice_can_vf_change_mac((struct ice_vf *)vf)) {
+ dev_err(dev,
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return false;
+ }
+
+ if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
+ dev_warn(dev,
+ "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
+ vf->vf_id);
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
@@ -1926,10 +803,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
- if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
- dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ if (!ice_vc_can_add_mac(vf, mac_addr))
return -EPERM;
- }
ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (ret == -EEXIST) {
@@ -1944,6 +819,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return ret;
} else {
vf->num_mac++;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, true);
}
ice_vfhw_mac_add(vf, vc_ether_addr);
@@ -2038,6 +915,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
ice_vfhw_mac_del(vf, vc_ether_addr);
vf->num_mac--;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, false);
return 0;
}
@@ -2143,66 +1022,6 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_request_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
- * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
- */
-static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_res_request *vfres =
- (struct virtchnl_vf_res_request *)msg;
- u16 req_queues = vfres->num_queue_pairs;
- struct ice_pf *pf = vf->pf;
- u16 max_allowed_vf_queues;
- u16 tx_rx_queue_left;
- struct device *dev;
- u16 cur_queues;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (!req_queues) {
- dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
- vf->vf_id);
- } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
- dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
- } else if (req_queues > cur_queues &&
- req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
- vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_RSS_QS_PER_VF);
- } else {
- /* request is successful, then reset VF */
- vf->num_req_qs = req_queues;
- ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
- dev_info(dev, "VF %d granted request of %u queues.\n",
- vf->vf_id, req_queues);
- return 0;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- v_ret, (u8 *)vfres, sizeof(*vfres));
-}
-
-/**
* ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
* @caps: VF driver negotiated capabilities
*
@@ -2217,7 +1036,7 @@ static bool ice_vf_vlan_offload_ena(u32 caps)
* ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
* @vf: VF used to determine if VLAN promiscuous config is allowed
*/
-static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
{
if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
@@ -2229,17 +1048,27 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
/**
* ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
+ * @vf: VF to enable VLAN promisc on
* @vsi: VF's VSI used to enable VLAN promiscuous mode
* @vlan: VLAN used to enable VLAN promiscuous
*
* This function should only be called if VLAN promiscuous mode is allowed,
* which can be determined via ice_is_vlan_promisc_allowed().
*/
-static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
{
- u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ u8 promisc_m = 0;
int status;
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ promisc_m |= ICE_UCAST_VLAN_PROMISC_BITS;
+ if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS;
+
+ if (!promisc_m)
+ return 0;
+
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
vlan->vid);
if (status && status != -EEXIST)
@@ -2258,7 +1087,7 @@ static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
*/
static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ u8 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS | ICE_MCAST_VLAN_PROMISC_BITS;
int status;
status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
@@ -2413,7 +1242,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
} else if (vlan_promisc) {
- status = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ status = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
@@ -2585,108 +1414,6 @@ error_param:
}
/**
- * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware
- * @vf: pointer to the VF info
- */
-static int ice_vc_get_rss_hena(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_hena *vrh = NULL;
- int len = 0, ret;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- len = sizeof(struct virtchnl_rss_hena);
- vrh = kzalloc(len, GFP_KERNEL);
- if (!vrh) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
-
- vrh->hena = ICE_DEFAULT_RSS_HENA;
-err:
- /* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret,
- (u8 *)vrh, len);
- kfree(vrh);
- return ret;
-}
-
-/**
- * ice_vc_set_rss_hena - set RSS HENA bits for the VF
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- */
-static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int status;
-
- dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- dev_err(dev, "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- /* clear all previously programmed RSS configuration to allow VF drivers
- * the ability to customize the RSS configuration and/or completely
- * disable RSS
- */
- status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
- if (status && !vrh->hena) {
- /* only report failure to clear the current RSS configuration if
- * that was clearly the VF's intention (i.e. vrh->hena = 0)
- */
- v_ret = ice_err_to_virt_err(status);
- goto err;
- } else if (status) {
- /* allow the VF to update the RSS configuration even on failure
- * to clear the current RSS confguration in an attempt to keep
- * RSS in a working state
- */
- dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
- vf->vf_id);
- }
-
- if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
- v_ret = ice_err_to_virt_err(status);
- }
-
- /* send the response to the VF */
-err:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_query_rxdid - query RXDID supported by DDP package
* @vf: pointer to VF info
*
@@ -2696,12 +1423,8 @@ err:
static int ice_vc_query_rxdid(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_supported_rxdids *rxdid = NULL;
- struct ice_hw *hw = &vf->pf->hw;
struct ice_pf *pf = vf->pf;
- int len = 0;
- int ret, i;
- u32 regval;
+ u64 rxdid;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2713,35 +1436,11 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- len = sizeof(struct virtchnl_supported_rxdids);
- rxdid = kzalloc(len, GFP_KERNEL);
- if (!rxdid) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
-
- /* RXDIDs supported by DDP package can be read from the register
- * to get the supported RXDID bitmap. But the legacy 32byte RXDID
- * is not listed in DDP package, add it in the bitmap manually.
- * Legacy 16byte descriptor is not supported.
- */
- rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
-
- for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
- regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
- if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
- & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
- rxdid->supported_rxdids |= BIT(i);
- }
-
- pf->supported_rxdids = rxdid->supported_rxdids;
+ rxdid = pf->supported_rxdids;
err:
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
- v_ret, (u8 *)rxdid, len);
- kfree(rxdid);
- return ret;
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ v_ret, (u8 *)&rxdid, sizeof(rxdid));
}
/**
@@ -3250,7 +1949,7 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
return err;
if (vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
if (err)
return err;
}
@@ -3278,7 +1977,8 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
*/
if (!ice_is_dvm_ena(&vsi->back->hw)) {
if (vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ err = ice_vf_ena_vlan_promisc(vf, vsi,
+ &vlan);
if (err)
return err;
}
@@ -3469,48 +2169,6 @@ ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
return 0;
}
-#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
-#define ICE_L2TSEL_BIT_OFFSET 23
-enum ice_l2tsel {
- ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
- ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
-};
-
-/**
- * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
- * @vsi: VSI used to update l2tsel on
- * @l2tsel: l2tsel setting requested
- *
- * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
- * This will modify which descriptor field the first offloaded VLAN will be
- * stripped into.
- */
-static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 l2tsel_bit;
- int i;
-
- if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
- l2tsel_bit = 0;
- else
- l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
-
- for (i = 0; i < vsi->alloc_rxq; i++) {
- u16 pfq = vsi->rxq_map[i];
- u32 qrx_context_offset;
- u32 regval;
-
- qrx_context_offset =
- QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
-
- regval = rd32(hw, qrx_context_offset);
- regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
- regval |= l2tsel_bit;
- wr32(hw, qrx_context_offset, regval);
- }
-}
-
/**
* ice_vc_ena_vlan_stripping_v2_msg
* @vf: VF the message was received from
@@ -3784,6 +2442,59 @@ out:
v_ret, NULL, 0);
}
+static int ice_vc_get_ptp_cap(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ if (msg->caps & caps)
+ vf->ptp_caps = caps;
+
+err:
+ /* send the response back to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
+ (u8 *)&vf->ptp_caps,
+ sizeof(struct virtchnl_ptp_caps));
+}
+
+static int ice_vc_get_phc_time(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ struct virtchnl_phc_time *phc_time = NULL;
+ struct ice_pf *pf = vf->pf;
+ u32 len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
+ if (!phc_time) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto err;
+ }
+
+ len = sizeof(*phc_time);
+
+ phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
+ (u8 *)phc_time, len);
+ kfree(phc_time);
+ return ret;
+}
+
static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
@@ -3803,8 +2514,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -3817,6 +2528,14 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+ .get_qos_caps = ice_vc_get_qos_caps,
+ .cfg_q_bw = ice_vc_cfg_q_bw,
+ .cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
+ /* If you add a new op here please make sure to add it to
+ * ice_virtchnl_repr_ops as well.
+ */
};
/**
@@ -3874,7 +2593,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
}
ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
break;
}
@@ -3933,8 +2651,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -3947,6 +2665,11 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+ .get_qos_caps = ice_vc_get_qos_caps,
+ .cfg_q_bw = ice_vc_cfg_q_bw,
+ .cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
};
/**
@@ -4005,8 +2728,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
* @event: pointer to the AQ event
* @mbxdata: information used to detect VF attempting mailbox overflow
*
- * called from the common asq/arq handler to
- * process request from VF
+ * Called from the common asq/arq handler to process request from VF. When this
+ * flow is used for devices with hardware VF to PF message queue overflow
+ * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
+ * check is skipped.
*/
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata)
@@ -4032,7 +2757,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
mutex_lock(&vf->cfg_lock);
/* Check if the VF is trying to overflow the mailbox */
- if (ice_is_malicious_vf(vf, mbxdata))
+ if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
goto finish;
/* Check if VF is disabled. */
@@ -4128,11 +2853,11 @@ error_handler:
case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
err = ops->query_rxdid(vf);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- err = ops->get_rss_hena(vf);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ err = ops->get_rss_hashcfg(vf);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- err = ops->set_rss_hena_msg(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ err = ops->set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
err = ops->ena_vlan_stripping(vf);
@@ -4173,6 +2898,21 @@ error_handler:
case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
err = ops->dis_vlan_insertion_v2_msg(vf, msg);
break;
+ case VIRTCHNL_OP_GET_QOS_CAPS:
+ err = ops->get_qos_caps(vf);
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ err = ops->cfg_q_bw(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ err = ops->cfg_q_quanta(vf, msg);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ err = ops->get_ptp_cap(vf, (const void *)msg);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ err = ops->get_phc_time(vf);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
index 3a4115869153..71bb456e2d71 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
@@ -13,12 +13,22 @@
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
+#define ICE_DFLT_QUANTA 1024
+#define ICE_MAX_QUANTA_SIZE 4096
+#define ICE_MIN_QUANTA_SIZE 256
+
+#define calc_quanta_desc(x) \
+ max_t(u16, 12, min_t(u16, 63, (((x) + 66) / 132) * 2 + 4))
+
/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
* broadcast, and 16 for additional unicast/multicast filters
*/
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* Priority to be compared against previous priority from the pipe */
+#define ICE_RXDID_PRIO 0x03
+
/* VFs only get a single VSI. For ice hardware, the VF does not need to know
* its VSI index. However, the virtchnl interface requires a VSI number,
* mainly due to legacy hardware.
@@ -47,8 +57,8 @@ struct ice_virtchnl_ops {
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*query_rxdid)(struct ice_vf *vf);
- int (*get_rss_hena)(struct ice_vf *vf);
- int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_rss_hashcfg)(struct ice_vf *vf);
+ int (*set_rss_hashcfg)(struct ice_vf *vf, u8 *msg);
int (*ena_vlan_stripping)(struct ice_vf *vf);
int (*dis_vlan_stripping)(struct ice_vf *vf);
int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
@@ -61,6 +71,13 @@ struct ice_virtchnl_ops {
int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_qos_caps)(struct ice_vf *vf);
+ int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg);
+ int (*get_ptp_cap)(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg);
+ int (*get_phc_time)(struct ice_vf *vf);
};
#ifdef CONFIG_PCI_IOV
@@ -75,12 +92,31 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata);
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx);
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx);
+int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan);
+bool ice_is_vlan_promisc_allowed(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
+static inline void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
+
+static inline int ice_vf_ena_vlan_promisc(struct ice_vf *vf,
+ struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+ return false;
+}
static inline int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
new file mode 100644
index 000000000000..adab2154125b
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config IDPF
+ tristate "Intel(R) Infrastructure Data Path Function Support"
+ depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select DIMLIB
+ select LIBETH_XDP
+ help
+ This driver supports Intel(R) Infrastructure Data Path Function
+ devices.
+
+ To compile this driver as a module, choose M here. The module
+ will be called idpf.
+
+if IDPF
+
+config IDPF_SINGLEQ
+ bool "idpf singleq support"
+ help
+ This option enables support for legacy single Rx/Tx queues w/no
+ completion and fill queues. Only enable if you have hardware which
+ wants to work in this mode as it increases the driver size and adds
+ runtme checks on hotpath.
+
+endif # IDPF
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 6844ead2f3ac..651ddee942bd 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -10,9 +10,17 @@ idpf-y := \
idpf_controlq_setup.o \
idpf_dev.o \
idpf_ethtool.o \
+ idpf_idc.o \
idpf_lib.o \
idpf_main.o \
- idpf_singleq_txrx.o \
idpf_txrx.o \
idpf_virtchnl.o \
idpf_vf_dev.o
+
+idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
+
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o
+
+idpf-y += xdp.o
+idpf-y += xsk.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index e7a036538246..8cfc68cbfa06 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -12,15 +12,17 @@ struct idpf_vport_max_q;
#include <net/pkt_sched.h>
#include <linux/aer.h>
#include <linux/etherdevice.h>
+#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/sctp.h>
#include <linux/ethtool_netlink.h>
#include <net/gro.h>
-#include <linux/dim.h>
+
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_idpf.h>
#include "virtchnl2.h"
-#include "idpf_lan_txrx.h"
#include "idpf_txrx.h"
#include "idpf_controlq.h"
@@ -38,6 +40,7 @@ struct idpf_vport_max_q;
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
+#define IDPF_WAIT_FOR_MARKER_TIMEO 500
#define IDPF_MAX_WAIT 500
/* available message levels */
@@ -128,14 +131,12 @@ enum idpf_cap_field {
/**
* enum idpf_vport_state - Current vport state
- * @__IDPF_VPORT_DOWN: Vport is down
- * @__IDPF_VPORT_UP: Vport is up
- * @__IDPF_VPORT_STATE_LAST: Must be last, number of states
+ * @IDPF_VPORT_UP: Vport is up
+ * @IDPF_VPORT_STATE_NBITS: Must be last, number of states
*/
enum idpf_vport_state {
- __IDPF_VPORT_DOWN,
- __IDPF_VPORT_UP,
- __IDPF_VPORT_STATE_LAST,
+ IDPF_VPORT_UP,
+ IDPF_VPORT_STATE_NBITS
};
/**
@@ -143,7 +144,10 @@ enum idpf_vport_state {
* @adapter: Adapter back pointer
* @vport: Vport back pointer
* @vport_id: Vport identifier
+ * @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
+ * @max_tx_hdr_size: Max header length hardware can support
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
* @stats_lock: Lock to protect stats update
@@ -152,8 +156,11 @@ struct idpf_netdev_priv {
struct idpf_adapter *adapter;
struct idpf_vport *vport;
u32 vport_id;
+ u32 link_speed_mbps;
u16 vport_idx;
- enum idpf_vport_state state;
+ u16 max_tx_hdr_size;
+ u16 tx_max_bufs;
+ DECLARE_BITMAP(state, IDPF_VPORT_STATE_NBITS);
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
};
@@ -189,22 +196,38 @@ struct idpf_vport_max_q {
* @mb_intr_reg_init: Mailbox interrupt register initialization
* @reset_reg_init: Reset register initialization
* @trigger_reset: Trigger a reset to occur
+ * @ptp_reg_init: PTP register initialization
*/
struct idpf_reg_ops {
- void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
+ void (*ctlq_reg_init)(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq);
int (*intr_reg_init)(struct idpf_vport *vport);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
enum idpf_flags trig_cause);
+ void (*ptp_reg_init)(const struct idpf_adapter *adapter);
};
+#define IDPF_MMIO_REG_NUM_STATIC 2
+#define IDPF_PF_MBX_REGION_SZ 4096
+#define IDPF_PF_RSTAT_REGION_SZ 2048
+#define IDPF_VF_MBX_REGION_SZ 10240
+#define IDPF_VF_RSTAT_REGION_SZ 2048
+
/**
* struct idpf_dev_ops - Device specific operations
* @reg_ops: Register operations
+ * @idc_init: IDC initialization
+ * @static_reg_info: array of mailbox and rstat register info
*/
struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
+
+ int (*idc_init)(struct idpf_adapter *adapter);
+
+ /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
+ struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
};
/**
@@ -224,16 +247,28 @@ enum idpf_vport_reset_cause {
/**
* enum idpf_vport_flags - Vport flags
* @IDPF_VPORT_DEL_QUEUES: To send delete queues message
- * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets
- * processing is done
* @IDPF_VPORT_FLAGS_NBITS: Must be last
*/
enum idpf_vport_flags {
IDPF_VPORT_DEL_QUEUES,
- IDPF_VPORT_SW_MARKER,
IDPF_VPORT_FLAGS_NBITS,
};
+/**
+ * struct idpf_tstamp_stats - Tx timestamp statistics
+ * @stats_sync: See struct u64_stats_sync
+ * @packets: Number of packets successfully timestamped by the hardware
+ * @discarded: Number of Tx skbs discarded due to cached PHC
+ * being too old to correctly extend timestamp
+ * @flushed: Number of Tx skbs flushed due to interface closed
+ */
+struct idpf_tstamp_stats {
+ struct u64_stats_sync stats_sync;
+ u64_stats_t packets;
+ u64_stats_t discarded;
+ u64_stats_t flushed;
+};
+
struct idpf_port_stats {
struct u64_stats_sync stats_sync;
u64_stats_t rx_hw_csum_err;
@@ -247,6 +282,12 @@ struct idpf_port_stats {
struct virtchnl2_vport_stats vport_stats;
};
+struct idpf_fsteer_fltr {
+ struct list_head list;
+ u32 loc;
+ u32 q_index;
+};
+
/**
* struct idpf_vport - Handle for netdevices and queue resources
* @num_txq: Number of allocated TX queues
@@ -259,6 +300,10 @@ struct idpf_port_stats {
* @txq_model: Split queue or single queue queuing model
* @txqs: Used only in hotpath to get to the right queue very fast
* @crc_enable: Enable CRC insertion offload
+ * @xdpsq_share: whether XDPSQ sharing is enabled
+ * @num_xdp_txq: number of XDPSQs
+ * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
+ * @xdp_prog: installed XDP program
* @num_rxq: Number of allocated RX queues
* @num_bufq: Number of allocated buffer queues
* @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
@@ -266,12 +311,12 @@ struct idpf_port_stats {
* the worst case.
* @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
* @bufq_desc_count: Buffer queue descriptor count
- * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc)
* @num_rxq_grp: Number of RX queues in a group
* @rxq_grps: Total number of RX groups. Number of groups * number of RX per
* group will yield total number of RX queues.
* @rxq_model: Splitq queue or single queue queuing model
* @rx_ptype_lkup: Lookup table for ptypes on RX
+ * @vdev_info: IDC vport device info pointer
* @adapter: back pointer to associated adapter
* @netdev: Associated net_device. Each vport should have one and only one
* associated netdev.
@@ -284,14 +329,19 @@ struct idpf_port_stats {
* @num_q_vectors: Number of IRQ vectors allocated
* @q_vectors: Array of queue vectors
* @q_vector_idxs: Starting index of queue vectors
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_v_idx: ID of the NOIRQ vector
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
- * @link_speed_mbps: Link speed in mbps
- * @sw_marker_wq: workqueue for marker packets
+ * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
+ * @tstamp_config: The Tx tstamp config
+ * @tstamp_task: Tx timestamping task
+ * @tstamp_stats: Tx timestamping statistics
*/
struct idpf_vport {
u16 num_txq;
@@ -302,19 +352,25 @@ struct idpf_vport {
u16 num_txq_grp;
struct idpf_txq_group *txq_grps;
u32 txq_model;
- struct idpf_queue **txqs;
+ struct idpf_tx_queue **txqs;
bool crc_enable;
+ bool xdpsq_share;
+ u16 num_xdp_txq;
+ u16 xdp_txq_offset;
+ struct bpf_prog *xdp_prog;
+
u16 num_rxq;
u16 num_bufq;
u32 rxq_desc_count;
u8 num_bufqs_per_qgrp;
u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
- u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP];
u16 num_rxq_grp;
struct idpf_rxq_group *rxq_grps;
u32 rxq_model;
- struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE];
+ struct libeth_rx_pt *rx_ptype_lkup;
+
+ struct iidc_rdma_vport_dev_info *vdev_info;
struct idpf_adapter *adapter;
struct net_device *netdev;
@@ -328,6 +384,11 @@ struct idpf_vport {
u16 num_q_vectors;
struct idpf_q_vector *q_vectors;
u16 *q_vector_idxs;
+
+ void __iomem *noirq_dyn_ctl;
+ u32 noirq_dyn_ctl_ena;
+ u16 noirq_v_idx;
+
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
@@ -335,9 +396,11 @@ struct idpf_vport {
struct idpf_port_stats port_stats;
bool link_up;
- u32 link_speed_mbps;
- wait_queue_head_t sw_marker_wq;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct work_struct tstamp_task;
+ struct idpf_tstamp_stats tstamp_stats;
};
/**
@@ -372,28 +435,53 @@ struct idpf_rss_data {
};
/**
+ * struct idpf_q_coalesce - User defined coalescing configuration values for
+ * a single queue.
+ * @tx_intr_mode: Dynamic TX ITR or not
+ * @rx_intr_mode: Dynamic RX ITR or not
+ * @tx_coalesce_usecs: TX interrupt throttling rate
+ * @rx_coalesce_usecs: RX interrupt throttling rate
+ *
+ * Used to restore user coalescing configuration after a reset.
+ */
+struct idpf_q_coalesce {
+ u32 tx_intr_mode;
+ u32 rx_intr_mode;
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+};
+
+/**
* struct idpf_vport_user_config_data - User defined configuration values for
* each vport.
* @rss_data: See struct idpf_rss_data
+ * @q_coalesce: Array of per queue coalescing data
* @num_req_tx_qs: Number of user requested TX queues through ethtool
* @num_req_rx_qs: Number of user requested RX queues through ethtool
* @num_req_txq_desc: Number of user requested TX queue descriptors through
* ethtool
* @num_req_rxq_desc: Number of user requested RX queue descriptors through
* ethtool
+ * @xdp_prog: requested XDP program to install
* @user_flags: User toggled config flags
* @mac_filter_list: List of MAC filters
+ * @num_fsteer_fltrs: number of flow steering filters
+ * @flow_steer_list: list of flow steering filters
*
* Used to restore configuration after a reset as the vport will get wiped.
*/
struct idpf_vport_user_config_data {
struct idpf_rss_data rss_data;
+ struct idpf_q_coalesce *q_coalesce;
u16 num_req_tx_qs;
u16 num_req_rx_qs;
u32 num_req_txq_desc;
u32 num_req_rxq_desc;
+ struct bpf_prog *xdp_prog;
DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
struct list_head mac_filter_list;
+ u32 num_fsteer_fltrs;
+ struct list_head flow_steer_list;
};
/**
@@ -482,6 +570,13 @@ struct idpf_vport_config {
struct idpf_vc_xn_manager;
+#define idpf_for_each_vport(adapter, iter) \
+ for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
+ *iter = (adapter)->max_vports ? *__##iter : NULL; \
+ iter; \
+ iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
+ *__##iter : NULL)
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -493,10 +588,11 @@ struct idpf_vc_xn_manager;
* @flags: See enum idpf_flags
* @reset_reg: See struct idpf_reset_reg
* @hw: Device access data
- * @num_req_msix: Requested number of MSIX vectors
* @num_avail_msix: Available number of MSIX vectors
* @num_msix_entries: Number of entries in MSIX table
* @msix_entries: MSIX table
+ * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA
+ * @rdma_msix_entries: RDMA MSIX table
* @req_vec_chunks: Requested vector chunk data
* @mb_vector: Mailbox vector data
* @vector_stack: Stack to store the msix vector indexes
@@ -525,6 +621,7 @@ struct idpf_vc_xn_manager;
* @caps: Negotiated capabilities with device
* @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
+ * @cdev_info: IDC core device info pointer
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
* @crc_enable: Enable CRC insertion offload
@@ -534,6 +631,7 @@ struct idpf_vc_xn_manager;
* @vector_lock: Lock to protect vector distribution
* @queue_lock: Lock to protect queue distribution
* @vc_buf_lock: Lock to protect virtchnl buffer
+ * @ptp: Storage for PTP-related data
*/
struct idpf_adapter {
struct pci_dev *pdev;
@@ -546,10 +644,11 @@ struct idpf_adapter {
DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
struct idpf_reset_reg reset_reg;
struct idpf_hw hw;
- u16 num_req_msix;
u16 num_avail_msix;
u16 num_msix_entries;
struct msix_entry *msix_entries;
+ u16 num_rdma_msix_entries;
+ struct msix_entry *rdma_msix_entries;
struct virtchnl2_alloc_vectors *req_vec_chunks;
struct idpf_q_vector mb_vector;
struct idpf_vector_lifo vector_stack;
@@ -582,6 +681,7 @@ struct idpf_adapter {
struct idpf_vc_xn_manager *vcxn_mngr;
struct idpf_dev_ops dev_ops;
+ struct iidc_rdma_core_dev_info *cdev_info;
int num_vfs;
bool crc_enable;
bool req_tx_splitq;
@@ -591,6 +691,8 @@ struct idpf_adapter {
struct mutex vector_lock;
struct mutex queue_lock;
struct mutex vc_buf_lock;
+
+ struct idpf_ptp *ptp;
};
/**
@@ -601,7 +703,13 @@ struct idpf_adapter {
*/
static inline int idpf_is_queue_model_split(u16 q_model)
{
- return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) ||
+ q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
+}
+
+static inline bool idpf_xdp_enabled(const struct idpf_vport *vport)
+{
+ return vport->adapter && vport->xdp_prog;
}
#define idpf_is_cap_ena(adapter, field, flag) \
@@ -612,17 +720,26 @@ static inline int idpf_is_queue_model_split(u16 q_model)
bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
enum idpf_cap_field field, u64 flag);
+/**
+ * idpf_is_rdma_cap_ena - Determine if RDMA is supported
+ * @adapter: private data struct
+ *
+ * Return: true if RDMA capability is enabled, false otherwise
+ */
+static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
+{
+ return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA);
+}
+
#define IDPF_CAP_RSS (\
- VIRTCHNL2_CAP_RSS_IPV4_TCP |\
- VIRTCHNL2_CAP_RSS_IPV4_TCP |\
- VIRTCHNL2_CAP_RSS_IPV4_UDP |\
- VIRTCHNL2_CAP_RSS_IPV4_SCTP |\
- VIRTCHNL2_CAP_RSS_IPV4_OTHER |\
- VIRTCHNL2_CAP_RSS_IPV6_TCP |\
- VIRTCHNL2_CAP_RSS_IPV6_TCP |\
- VIRTCHNL2_CAP_RSS_IPV6_UDP |\
- VIRTCHNL2_CAP_RSS_IPV6_SCTP |\
- VIRTCHNL2_CAP_RSS_IPV6_OTHER)
+ VIRTCHNL2_FLOW_IPV4_TCP |\
+ VIRTCHNL2_FLOW_IPV4_UDP |\
+ VIRTCHNL2_FLOW_IPV4_SCTP |\
+ VIRTCHNL2_FLOW_IPV4_OTHER |\
+ VIRTCHNL2_FLOW_IPV6_TCP |\
+ VIRTCHNL2_FLOW_IPV6_UDP |\
+ VIRTCHNL2_FLOW_IPV6_SCTP |\
+ VIRTCHNL2_FLOW_IPV6_OTHER)
#define IDPF_CAP_RSC (\
VIRTCHNL2_CAP_RSC_IPV4_TCP |\
@@ -632,13 +749,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
-#define IDPF_CAP_RX_CSUM_L4V4 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
+#define IDPF_CAP_TX_CSUM_L4V4 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
-#define IDPF_CAP_RX_CSUM_L4V6 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+#define IDPF_CAP_TX_CSUM_L4V6 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
#define IDPF_CAP_RX_CSUM (\
VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
@@ -647,11 +764,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
-#define IDPF_CAP_SCTP_CSUM (\
+#define IDPF_CAP_TX_SCTP_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
#define IDPF_CAP_TUNNEL_TX_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
@@ -667,6 +782,17 @@ static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
}
/**
+ * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors
+ * @adapter: private data struct
+ *
+ * Return: number of vectors reserved for RDMA
+ */
+static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors);
+}
+
+/**
* idpf_get_default_vports - Get default number of vports
* @adapter: private data struct
*/
@@ -705,6 +831,34 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
}
/**
+ * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 mailbox register address based on register offset.
+ */
+static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ return adapter->hw.mbx.vaddr + reg_offset;
+}
+
+/**
+ * idpf_get_rstat_reg_addr - Get BAR0 rstat register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 rstat register address based on register offset.
+ */
+static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ reg_offset -= adapter->dev_ops.static_reg_info[1].start;
+
+ return adapter->hw.rstat.vaddr + reg_offset;
+}
+
+/**
* idpf_get_reg_addr - Get BAR0 register address
* @adapter: private data struct
* @reg_offset: register offset value
@@ -714,7 +868,30 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
resource_size_t reg_offset)
{
- return (void __iomem *)(adapter->hw.hw_addr + reg_offset);
+ struct idpf_hw *hw = &adapter->hw;
+
+ for (int i = 0; i < hw->num_lan_regs; i++) {
+ struct idpf_mmio_reg *region = &hw->lan_regs[i];
+
+ if (reg_offset >= region->addr_start &&
+ reg_offset < (region->addr_start + region->addr_len)) {
+ /* Convert the offset so that it is relative to the
+ * start of the region. Then add the base address of
+ * the region to get the final address.
+ */
+ reg_offset -= region->addr_start;
+
+ return region->vaddr + reg_offset;
+ }
+ }
+
+ /* It's impossible to hit this case with offsets from the CP. But if we
+ * do for any other reason, the kernel will panic on that register
+ * access. Might as well do it here to make it clear what's happening.
+ */
+ BUG();
+
+ return NULL;
}
/**
@@ -728,7 +905,7 @@ static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
if (!adapter->hw.arq)
return true;
- return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) &
+ return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
adapter->hw.arq->reg.len_mask);
}
@@ -814,6 +991,13 @@ static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
mutex_unlock(&np->adapter->vport_ctrl_lock);
}
+static inline bool idpf_vport_ctrl_is_locked(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return mutex_is_locked(&np->adapter->vport_ctrl_lock);
+}
+
void idpf_statistics_task(struct work_struct *work);
void idpf_init_task(struct work_struct *work);
void idpf_service_task(struct work_struct *work);
@@ -837,5 +1021,16 @@ int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
-
+int idpf_idc_init(struct idpf_adapter *adapter);
+int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
+ enum iidc_function_type ftype);
+void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info);
+void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info);
+void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
+void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
+ enum iidc_rdma_event_type event_type);
+
+int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
+ struct virtchnl2_flow_rule_add_del *rule,
+ enum virtchnl2_op opcode);
#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index 4849590a5591..67894eda2d29 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -36,19 +36,19 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
{
/* Update tail to post pre-allocated buffers for rx queues */
if (is_rxq)
- wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
+ idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
/* For non-Mailbox control queues only TAIL need to be set */
if (cq->q_id != -1)
return;
/* Clear Head for both send or receive */
- wr32(hw, cq->reg.head, 0);
+ idpf_mbx_wr32(hw, cq->reg.head, 0);
/* set starting point */
- wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
- wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
- wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+ idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
+ idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
+ idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
}
/**
@@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
*/
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);
@@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
/* Set ring_size to 0 to indicate uninitialized queue */
cq->ring_size = 0;
- mutex_unlock(&cq->cq_lock);
- mutex_destroy(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
}
/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
idpf_ctlq_init_regs(hw, cq, is_rxq);
- mutex_init(&cq->cq_lock);
+ spin_lock_init(&cq->cq_lock);
list_add(&cq->cq_list, &hw->cq_list_head);
@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
int err = 0;
int i;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
@@ -329,10 +328,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
*/
dma_wmb();
- wr32(hw, cq->reg.tail, cq->next_to_use);
+ idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use);
err_unlock:
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
return err;
}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (*clean_count > cq->ring_size)
return -EBADR;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
@@ -376,6 +375,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
/* strip off FW internal code */
desc_err = le16_to_cpu(desc->ret_val) & 0xff;
@@ -394,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
cq->next_to_clean = ntc;
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
/* Return number of descriptors actually cleaned */
*clean_count = i;
@@ -432,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
if (*buff_count > 0)
buffs_avail = true;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
if (tbp >= cq->ring_size)
tbp = 0;
@@ -518,10 +520,10 @@ post_buffs_out:
dma_wmb();
- wr32(hw, cq->reg.tail, cq->next_to_post);
+ idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post);
}
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
@@ -549,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
u16 i;
/* take the lock before we start messing with the ring */
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
@@ -563,6 +565,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
if (!(flags & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
q_msg[i].vmvf_type = (flags &
(IDPF_CTLQ_FLAG_FTYPE_VM |
IDPF_CTLQ_FLAG_FTYPE_PF)) >>
@@ -608,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
cq->next_to_clean = ntc;
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
*num_q_msg = i;
if (*num_q_msg == 0)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
index c1aba09e9856..de4ece40c2ff 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
@@ -94,12 +94,26 @@ struct idpf_mbxq_desc {
u32 pf_vf_id; /* used by CP when sending to PF */
};
+/* Max number of MMIO regions not including the mailbox and rstat regions in
+ * the fallback case when the whole bar is mapped.
+ */
+#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3
+
+struct idpf_mmio_reg {
+ void __iomem *vaddr;
+ resource_size_t addr_start;
+ resource_size_t addr_len;
+};
+
/* Define the driver hardware struct to replace other control structs as needed
* Align to ctlq_hw_info
*/
struct idpf_hw {
- void __iomem *hw_addr;
- resource_size_t hw_addr_len;
+ struct idpf_mmio_reg mbx;
+ struct idpf_mmio_reg rstat;
+ /* Array of remaining LAN BAR regions */
+ int num_lan_regs;
+ struct idpf_mmio_reg *lan_regs;
struct idpf_adapter *back;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index e8e046ef2f0d..3414c5f9a831 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -99,7 +99,7 @@ struct idpf_ctlq_info {
enum idpf_ctlq_type cq_type;
int q_id;
- struct mutex cq_lock; /* control queue lock */
+ spinlock_t cq_lock; /* control queue lock */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
@@ -123,9 +123,12 @@ struct idpf_ctlq_info {
/**
* enum idpf_mbx_opc - PF/VF mailbox commands
* @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP
+ * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to
+ * any peer driver
*/
enum idpf_mbx_opc {
idpf_mbq_opc_send_msg_to_cp = 0x0801,
+ idpf_mbq_opc_send_msg_to_peer_drv = 0x0804,
};
/* API supported for control queue management */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 3df9935685e9..3a04a6bd0d7c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -4,15 +4,19 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
/**
* idpf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
* @cq: pointer to the array of create control queues
*/
-static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_ctlq_reg_init(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq)
{
+ resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
int i;
for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
@@ -21,22 +25,22 @@ static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
switch (ccq->type) {
case IDPF_CTLQ_TYPE_MAILBOX_TX:
/* set head and tail registers in our local struct */
- ccq->reg.head = PF_FW_ATQH;
- ccq->reg.tail = PF_FW_ATQT;
- ccq->reg.len = PF_FW_ATQLEN;
- ccq->reg.bah = PF_FW_ATQBAH;
- ccq->reg.bal = PF_FW_ATQBAL;
+ ccq->reg.head = PF_FW_ATQH - mbx_start;
+ ccq->reg.tail = PF_FW_ATQT - mbx_start;
+ ccq->reg.len = PF_FW_ATQLEN - mbx_start;
+ ccq->reg.bah = PF_FW_ATQBAH - mbx_start;
+ ccq->reg.bal = PF_FW_ATQBAL - mbx_start;
ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
ccq->reg.head_mask = PF_FW_ATQH_ATQH_M;
break;
case IDPF_CTLQ_TYPE_MAILBOX_RX:
/* set head and tail registers in our local struct */
- ccq->reg.head = PF_FW_ARQH;
- ccq->reg.tail = PF_FW_ARQT;
- ccq->reg.len = PF_FW_ARQLEN;
- ccq->reg.bah = PF_FW_ARQBAH;
- ccq->reg.bal = PF_FW_ARQBAL;
+ ccq->reg.head = PF_FW_ARQH - mbx_start;
+ ccq->reg.tail = PF_FW_ARQT - mbx_start;
+ ccq->reg.len = PF_FW_ARQLEN - mbx_start;
+ ccq->reg.bah = PF_FW_ARQBAH - mbx_start;
+ ccq->reg.bal = PF_FW_ARQBAL - mbx_start;
ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
ccq->reg.head_mask = PF_FW_ARQH_ARQH_M;
@@ -73,7 +77,7 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
int num_vecs = vport->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
- u32 rx_itr, tx_itr;
+ u32 rx_itr, tx_itr, val;
u16 total_vecs;
total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -97,8 +101,13 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl = idpf_get_reg_addr(adapter,
reg_vals[vec_id].dyn_ctl_reg);
intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+ intr->dyn_ctl_intena_msk_m = PF_GLINT_DYN_CTL_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
+ intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
+ intr->dyn_ctl_swint_trig_m = PF_GLINT_DYN_CTL_SWINT_TRIG_M;
+ intr->dyn_ctl_sw_itridx_ena_m =
+ PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_PF_ITR_IDX_SPACING);
@@ -112,6 +121,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
}
+ /* Data vector for NOIRQ queues */
+
+ val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+ val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
+ FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+ vport->noirq_dyn_ctl_ena = val;
+
free_reg_vals:
kfree(reg_vals);
@@ -124,7 +142,7 @@ free_reg_vals:
*/
static void idpf_reset_reg_init(struct idpf_adapter *adapter)
{
- adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT);
+ adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, PFGEN_RSTAT);
adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M;
}
@@ -138,9 +156,32 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter,
{
u32 reset_reg;
- reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ reset_reg = readl(idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
writel(reset_reg | PFGEN_CTRL_PFSWR,
- idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
+}
+
+/**
+ * idpf_ptp_reg_init - Initialize required registers
+ * @adapter: Driver specific private structure
+ *
+ * Set the bits required for enabling shtime and cmd execution
+ */
+static void idpf_ptp_reg_init(const struct idpf_adapter *adapter)
+{
+ adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M;
+ adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M;
+}
+
+/**
+ * idpf_idc_register - register for IDC callbacks
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_register(struct idpf_adapter *adapter)
+{
+ return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_PF);
}
/**
@@ -154,6 +195,7 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter)
adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init;
adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init;
adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset;
+ adapter->dev_ops.reg_ops.ptp_reg_init = idpf_ptp_reg_init;
}
/**
@@ -163,4 +205,11 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter)
void idpf_dev_ops_init(struct idpf_adapter *adapter)
{
idpf_reg_ops_init(adapter);
+
+ adapter->dev_ops.idc_init = idpf_idc_register;
+
+ resource_set_range(&adapter->dev_ops.static_reg_info[0],
+ PF_FW_BASE, IDPF_PF_MBX_REGION_SZ);
+ resource_set_range(&adapter->dev_ops.static_reg_info[1],
+ PFGEN_RTRIG, IDPF_PF_RSTAT_REGION_SZ);
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 986d429d1175..2589e124e41c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -2,6 +2,27 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
+
+/**
+ * idpf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 idpf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct idpf_vport *vport;
+ u32 num_rxq;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+ num_rxq = vport->num_rxq;
+ idpf_vport_ctrl_unlock(netdev);
+
+ return num_rxq;
+}
/**
* idpf_get_rxnfc - command to get RX flow classification rules
@@ -12,26 +33,309 @@
* Returns Success if the command is supported.
*/
static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+ u32 *rule_locs)
{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_fsteer_fltr *f;
struct idpf_vport *vport;
+ unsigned int cnt = 0;
+ int err = 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vport->num_rxq;
- idpf_vport_ctrl_unlock(netdev);
-
- return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ cmd->data = idpf_fsteer_max_rules(vport);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = -EINVAL;
+ list_for_each_entry(f, &user_config->flow_steer_list, list)
+ if (f->loc == cmd->fs.location) {
+ cmd->fs.ring_cookie = f->q_index;
+ err = 0;
+ break;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = idpf_fsteer_max_rules(vport);
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (cnt == cmd->rule_cnt) {
+ err = -EMSGSIZE;
+ break;
+ }
+ rule_locs[cnt] = f->loc;
+ cnt++;
+ }
+ if (!err)
+ cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ break;
default:
break;
}
idpf_vport_ctrl_unlock(netdev);
- return -EOPNOTSUPP;
+ return err;
+}
+
+static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct iphdr *iph;
+
+ hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4);
+
+ iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec;
+ iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask;
+ iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src;
+ iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst;
+}
+
+static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp,
+ bool v4)
+{
+ struct udphdr *udph, *udpm;
+
+ hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP);
+
+ udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec;
+ udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask;
+
+ if (v4) {
+ udph->source = fsp->h_u.udp_ip4_spec.psrc;
+ udph->dest = fsp->h_u.udp_ip4_spec.pdst;
+ udpm->source = fsp->m_u.udp_ip4_spec.psrc;
+ udpm->dest = fsp->m_u.udp_ip4_spec.pdst;
+ } else {
+ udph->source = fsp->h_u.udp_ip6_spec.psrc;
+ udph->dest = fsp->h_u.udp_ip6_spec.pdst;
+ udpm->source = fsp->m_u.udp_ip6_spec.psrc;
+ udpm->dest = fsp->m_u.udp_ip6_spec.pdst;
+ }
+}
+
+static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp,
+ bool v4)
+{
+ struct tcphdr *tcph, *tcpm;
+
+ hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP);
+
+ tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec;
+ tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask;
+
+ if (v4) {
+ tcph->source = fsp->h_u.tcp_ip4_spec.psrc;
+ tcph->dest = fsp->h_u.tcp_ip4_spec.pdst;
+ tcpm->source = fsp->m_u.tcp_ip4_spec.psrc;
+ tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst;
+ } else {
+ tcph->source = fsp->h_u.tcp_ip6_spec.psrc;
+ tcph->dest = fsp->h_u.tcp_ip6_spec.pdst;
+ tcpm->source = fsp->m_u.tcp_ip6_spec.psrc;
+ tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst;
+ }
+}
+
+/**
+ * idpf_add_flow_steer - add a Flow Steering filter
+ * @netdev: network interface device structure
+ * @cmd: command to add Flow Steering filter
+ *
+ * Return: 0 on success and negative values for failure
+ */
+static int idpf_add_flow_steer(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct idpf_fsteer_fltr *fltr, *parent = NULL, *f;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct virtchnl2_flow_rule_add_del *rule;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_rule_action_set *acts;
+ struct virtchnl2_flow_rule_info *info;
+ struct virtchnl2_proto_hdrs *hdrs;
+ struct idpf_vport *vport;
+ u32 flow_type, q_index;
+ u16 num_rxq;
+ int err;
+
+ vport = idpf_netdev_to_vport(netdev);
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+ num_rxq = user_config->num_req_rx_qs;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ if (flow_type != fsp->flow_type)
+ return -EINVAL;
+
+ if (!idpf_sideband_action_ena(vport, fsp) ||
+ !idpf_sideband_flow_type_ena(vport, flow_type))
+ return -EOPNOTSUPP;
+
+ if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport))
+ return -ENOSPC;
+
+ q_index = fsp->ring_cookie;
+ if (q_index >= num_rxq)
+ return -EINVAL;
+
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->vport_id = cpu_to_le32(vport->vport_id);
+ rule->count = cpu_to_le32(1);
+ info = &rule->rule_info[0];
+ info->rule_id = cpu_to_le32(fsp->location);
+
+ hdrs = &info->rule_cfg.proto_hdrs;
+ hdrs->tunnel_level = 0;
+ hdrs->count = cpu_to_le32(2);
+
+ acts = &info->rule_cfg.action_set;
+ acts->count = cpu_to_le32(1);
+ acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE);
+ acts->actions[0].act_conf.q_id = cpu_to_le32(q_index);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+ idpf_fsteer_fill_ipv4(hdrs, fsp);
+ idpf_fsteer_fill_udp(hdrs, fsp, true);
+ break;
+ case TCP_V4_FLOW:
+ idpf_fsteer_fill_ipv4(hdrs, fsp);
+ idpf_fsteer_fill_tcp(hdrs, fsp, true);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = idpf_add_del_fsteer_filters(vport->adapter, rule,
+ VIRTCHNL2_OP_ADD_FLOW_RULE);
+ if (err)
+ goto out;
+
+ if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
+ err = -EIO;
+ goto out;
+ }
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ fltr->loc = fsp->location;
+ fltr->q_index = q_index;
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (f->loc >= fltr->loc)
+ break;
+ parent = f;
+ }
+
+ parent ? list_add(&fltr->list, &parent->list) :
+ list_add(&fltr->list, &user_config->flow_steer_list);
+
+ user_config->num_fsteer_fltrs++;
+
+out:
+ kfree(rule);
+ return err;
+}
+
+/**
+ * idpf_del_flow_steer - delete a Flow Steering filter
+ * @netdev: network interface device structure
+ * @cmd: command to add Flow Steering filter
+ *
+ * Return: 0 on success and negative values for failure
+ */
+static int idpf_del_flow_steer(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct virtchnl2_flow_rule_add_del *rule;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_flow_rule_info *info;
+ struct idpf_fsteer_fltr *f, *iter;
+ struct idpf_vport *vport;
+ int err;
+
+ vport = idpf_netdev_to_vport(netdev);
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+
+ if (!idpf_sideband_action_ena(vport, fsp))
+ return -EOPNOTSUPP;
+
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->vport_id = cpu_to_le32(vport->vport_id);
+ rule->count = cpu_to_le32(1);
+ info = &rule->rule_info[0];
+ info->rule_id = cpu_to_le32(fsp->location);
+
+ err = idpf_add_del_fsteer_filters(vport->adapter, rule,
+ VIRTCHNL2_OP_DEL_FLOW_RULE);
+ if (err)
+ goto out;
+
+ if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
+ err = -EIO;
+ goto out;
+ }
+
+ list_for_each_entry_safe(f, iter,
+ &user_config->flow_steer_list, list) {
+ if (f->loc == fsp->location) {
+ list_del(&f->list);
+ kfree(f);
+ user_config->num_fsteer_fltrs--;
+ goto out;
+ }
+ }
+ err = -EINVAL;
+
+out:
+ kfree(rule);
+ return err;
+}
+
+static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ idpf_vport_ctrl_lock(netdev);
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = idpf_add_flow_steer(netdev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = idpf_del_flow_steer(netdev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(netdev);
+ return ret;
}
/**
@@ -46,7 +350,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
struct idpf_vport_user_config_data *user_config;
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
- return -EOPNOTSUPP;
+ return 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -65,7 +369,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
struct idpf_vport_user_config_data *user_config;
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
- return -EOPNOTSUPP;
+ return 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -98,7 +402,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -148,7 +452,7 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
@@ -222,14 +526,19 @@ static int idpf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct idpf_vport_config *vport_config;
- u16 combined, num_txq, num_rxq;
unsigned int num_req_tx_q;
unsigned int num_req_rx_q;
struct idpf_vport *vport;
+ u16 num_txq, num_rxq;
struct device *dev;
int err = 0;
u16 idx;
+ if (ch->rx_count && ch->tx_count) {
+ netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
+ return -EINVAL;
+ }
+
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
@@ -239,20 +548,6 @@ static int idpf_set_channels(struct net_device *netdev,
num_txq = vport_config->user_config.num_req_tx_qs;
num_rxq = vport_config->user_config.num_req_rx_qs;
- combined = min(num_txq, num_rxq);
-
- /* these checks are for cases where user didn't specify a particular
- * value on cmd line but we get non-zero value anyway via
- * get_channels(); look at ethtool.c in ethtool repository (the user
- * space part), particularly, do_schannels() routine
- */
- if (ch->combined_count == combined)
- ch->combined_count = 0;
- if (ch->combined_count && ch->rx_count == num_rxq - combined)
- ch->rx_count = 0;
- if (ch->combined_count && ch->tx_count == num_txq - combined)
- ch->tx_count = 0;
-
num_req_tx_q = ch->combined_count + ch->tx_count;
num_req_rx_q = ch->combined_count + ch->rx_count;
@@ -376,7 +671,8 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_tx_count);
if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count)
+ new_rx_count == vport->rxq_desc_count &&
+ kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
@@ -445,22 +741,24 @@ struct idpf_stats {
.stat_offset = offsetof(_type, _stat) \
}
-/* Helper macro for defining some statistics related to queues */
-#define IDPF_QUEUE_STAT(_name, _stat) \
- IDPF_STAT(struct idpf_queue, _name, _stat)
+/* Helper macros for defining some statistics related to queues */
+#define IDPF_RX_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_rx_queue, _name, _stat)
+#define IDPF_TX_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_tx_queue, _name, _stat)
/* Stats associated with a Tx queue */
static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
- IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
- IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
- IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
+ IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
+ IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
+ IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
};
/* Stats associated with an Rx queue */
static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
- IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
- IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
- IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
+ IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
+ IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
+ IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
};
#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
@@ -571,8 +869,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < vport_config->max_q.max_rxq; i++)
idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
"rx", i);
-
- page_pool_ethtool_stats_get_strings(data);
}
/**
@@ -606,7 +902,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
u16 max_txq, max_rxq;
- unsigned int size;
if (sset != ETH_SS_STATS)
return -EINVAL;
@@ -625,11 +920,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
max_txq = vport_config->max_q.max_txq;
max_rxq = vport_config->max_q.max_rxq;
- size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
+ return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
(IDPF_RX_QUEUE_STATS_LEN * max_rxq);
- size += page_pool_ethtool_stats_get_count();
-
- return size;
}
/**
@@ -641,7 +933,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
* Copies the stat data defined by the pointer and stat structure pair into
* the memory supplied as data. If the pointer is null, data will be zero'd.
*/
-static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
+static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
const struct idpf_stats *stat)
{
char *p;
@@ -679,6 +971,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
* idpf_add_queue_stats - copy queue statistics into supplied buffer
* @data: ethtool stats buffer
* @q: the queue to copy
+ * @type: type of the queue
*
* Queue statistics must be copied while protected by u64_stats_fetch_begin,
* so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
@@ -689,19 +982,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
*
* This function expects to be called while under rcu_read_lock().
*/
-static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
+static void idpf_add_queue_stats(u64 **data, const void *q,
+ enum virtchnl2_queue_type type)
{
+ const struct u64_stats_sync *stats_sync;
const struct idpf_stats *stats;
unsigned int start;
unsigned int size;
unsigned int i;
- if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
size = IDPF_RX_QUEUE_STATS_LEN;
stats = idpf_gstrings_rx_queue_stats;
+ stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
} else {
size = IDPF_TX_QUEUE_STATS_LEN;
stats = idpf_gstrings_tx_queue_stats;
+ stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
}
/* To avoid invalid statistics values, ensure that we keep retrying
@@ -709,10 +1006,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
* u64_stats_fetch_retry.
*/
do {
- start = u64_stats_fetch_begin(&q->stats_sync);
+ start = u64_stats_fetch_begin(stats_sync);
for (i = 0; i < size; i++)
idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
- } while (u64_stats_fetch_retry(&q->stats_sync, start));
+ } while (u64_stats_fetch_retry(stats_sync, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
@@ -801,7 +1098,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats;
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
unsigned int start;
if (idpf_is_queue_model_split(vport->rxq_model))
@@ -815,7 +1112,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- stats = &rxq->q_stats.rx;
+ stats = &rxq->q_stats;
hw_csum_err = u64_stats_read(&stats->hw_csum_err);
hsplit = u64_stats_read(&stats->hsplit_pkts);
hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
@@ -836,7 +1133,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs;
- struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats;
unsigned int start;
@@ -846,7 +1143,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- stats = &txq->q_stats.tx;
+ stats = &txq->q_stats;
linearize = u64_stats_read(&stats->linearize);
qbusy = u64_stats_read(&stats->q_busy);
skb_drops = u64_stats_read(&stats->skb_drops);
@@ -877,7 +1174,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
- struct page_pool_stats pp_stats = { };
struct idpf_vport *vport;
unsigned int total = 0;
unsigned int i, j;
@@ -887,7 +1183,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP) {
+ if (!test_bit(IDPF_VPORT_UP, np->state)) {
idpf_vport_ctrl_unlock(netdev);
return;
@@ -904,12 +1200,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
qtype = VIRTCHNL2_QUEUE_TYPE_TX;
for (j = 0; j < txq_grp->num_txq; j++, total++) {
- struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq)
idpf_add_empty_queue_stats(&data, qtype);
else
- idpf_add_queue_stats(&data, txq);
+ idpf_add_queue_stats(&data, txq, qtype);
}
}
@@ -937,7 +1233,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
num_rxq = rxq_grp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, total++) {
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
if (is_splitq)
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
@@ -946,93 +1242,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
if (!rxq)
idpf_add_empty_queue_stats(&data, qtype);
else
- idpf_add_queue_stats(&data, rxq);
-
- /* In splitq mode, don't get page pool stats here since
- * the pools are attached to the buffer queues
- */
- if (is_splitq)
- continue;
-
- if (rxq)
- page_pool_get_stats(rxq->pp, &pp_stats);
- }
- }
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
- struct idpf_queue *rxbufq =
- &vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
-
- page_pool_get_stats(rxbufq->pp, &pp_stats);
+ idpf_add_queue_stats(&data, rxq, qtype);
}
}
for (; total < vport_config->max_q.max_rxq; total++)
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
- page_pool_ethtool_stats_get(data, &pp_stats);
-
rcu_read_unlock();
idpf_vport_ctrl_unlock(netdev);
}
/**
- * idpf_find_rxq - find rxq from q index
+ * idpf_find_rxq_vec - find rxq vector from q index
* @vport: virtual port associated to queue
* @q_num: q index used to find queue
*
- * returns pointer to rx queue
+ * returns pointer to rx vector
*/
-static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ u32 q_num)
{
int q_grp, q_idx;
if (!idpf_is_queue_model_split(vport->rxq_model))
- return vport->rxq_grps->singleq.rxqs[q_num];
+ return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
- return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
+ return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
}
/**
- * idpf_find_txq - find txq from q index
+ * idpf_find_txq_vec - find txq vector from q index
* @vport: virtual port associated to queue
* @q_num: q index used to find queue
*
- * returns pointer to tx queue
+ * returns pointer to tx vector
*/
-static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ u32 q_num)
{
int q_grp;
if (!idpf_is_queue_model_split(vport->txq_model))
- return vport->txqs[q_num];
+ return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
- return vport->txq_grps[q_grp].complq;
+ return vport->txq_grps[q_grp].complq->q_vector;
}
/**
* __idpf_get_q_coalesce - get ITR values for specific queue
* @ec: ethtool structure to fill with driver's coalesce settings
- * @q: quuee of Rx or Tx
+ * @q_vector: queue vector corresponding to this queue
+ * @type: queue type
*/
static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
- struct idpf_queue *q)
+ const struct idpf_q_vector *q_vector,
+ enum virtchnl2_queue_type type)
{
- if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
ec->use_adaptive_rx_coalesce =
- IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
- ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
+ IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
+ ec->rx_coalesce_usecs = q_vector->rx_itr_value;
} else {
ec->use_adaptive_tx_coalesce =
- IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
- ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
+ IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
+ ec->tx_coalesce_usecs = q_vector->tx_itr_value;
}
}
@@ -1048,14 +1328,14 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
u32 q_num)
{
- struct idpf_netdev_priv *np = netdev_priv(netdev);
- struct idpf_vport *vport;
+ const struct idpf_netdev_priv *np = netdev_priv(netdev);
+ const struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
@@ -1064,10 +1344,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
}
if (q_num < vport->num_rxq)
- __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
+ __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
+ VIRTCHNL2_QUEUE_TYPE_RX);
if (q_num < vport->num_txq)
- __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
+ __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
+ VIRTCHNL2_QUEUE_TYPE_TX);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
@@ -1111,16 +1393,17 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings
- * @q: queue for which itr values has to be set
+ * @q_coal: per queue coalesce settings
+ * @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx
*
* Returns 0 on success, negative otherwise.
*/
-static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
- struct idpf_queue *q, bool is_rxq)
+static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ struct idpf_q_coalesce *q_coal,
+ struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs;
- struct idpf_q_vector *qv = q->q_vector;
bool is_dim_ena = false;
u16 itr_val;
@@ -1136,7 +1419,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
itr_val = qv->tx_itr_value;
}
if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
- netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
+ netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
return -EINVAL;
}
@@ -1145,7 +1428,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
return 0;
if (coalesce_usecs > IDPF_ITR_MAX) {
- netdev_err(q->vport->netdev,
+ netdev_err(qv->vport->netdev,
"Invalid value, %d-usecs range is 0-%d\n",
coalesce_usecs, IDPF_ITR_MAX);
@@ -1154,27 +1437,32 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
if (coalesce_usecs % 2) {
coalesce_usecs--;
- netdev_info(q->vport->netdev,
+ netdev_info(qv->vport->netdev,
"HW only supports even ITR values, ITR rounded to %d\n",
coalesce_usecs);
}
if (is_rxq) {
qv->rx_itr_value = coalesce_usecs;
+ q_coal->rx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
- idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
- false);
+ q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
}
} else {
qv->tx_itr_value = coalesce_usecs;
+ q_coal->tx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
- idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
+ q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
}
}
@@ -1187,21 +1475,24 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
/**
* idpf_set_q_coalesce - set ITR values for specific queue
* @vport: vport associated to the queue that need updating
+ * @q_coal: per queue coalesce settings
* @ec: coalesce settings to program the device with
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
* @is_rxq: is queue type rx
*
* Return 0 on success, and negative on failure
*/
-static int idpf_set_q_coalesce(struct idpf_vport *vport,
- struct ethtool_coalesce *ec,
+static int idpf_set_q_coalesce(const struct idpf_vport *vport,
+ struct idpf_q_coalesce *q_coal,
+ const struct ethtool_coalesce *ec,
int q_num, bool is_rxq)
{
- struct idpf_queue *q;
+ struct idpf_q_vector *qv;
- q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
+ qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
+ idpf_find_txq_vec(vport, q_num);
- if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
+ if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
return -EINVAL;
return 0;
@@ -1222,23 +1513,29 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int i, err = 0;
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
for (i = 0; i < vport->num_txq; i++) {
- err = idpf_set_q_coalesce(vport, ec, i, false);
+ q_coal = &user_config->q_coalesce[i];
+ err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
for (i = 0; i < vport->num_rxq; i++) {
- err = idpf_set_q_coalesce(vport, ec, i, true);
+ q_coal = &user_config->q_coalesce[i];
+ err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
goto unlock_mutex;
}
@@ -1260,20 +1557,25 @@ unlock_mutex:
static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ q_coal = &user_config->q_coalesce[q_num];
- err = idpf_set_q_coalesce(vport, ec, q_num, false);
+ err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
if (err) {
idpf_vport_ctrl_unlock(netdev);
return err;
}
- err = idpf_set_q_coalesce(vport, ec, q_num, true);
+ err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
idpf_vport_ctrl_unlock(netdev);
@@ -1318,27 +1620,142 @@ static void idpf_set_msglevel(struct net_device *netdev, u32 data)
static int idpf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct idpf_vport *vport;
-
- idpf_vport_ctrl_lock(netdev);
- vport = idpf_netdev_to_vport(netdev);
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
ethtool_link_ksettings_zero_link_mode(cmd, supported);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = PORT_NONE;
- if (vport->link_up) {
+ if (netif_carrier_ok(netdev)) {
cmd->base.duplex = DUPLEX_FULL;
- cmd->base.speed = vport->link_speed_mbps;
+ cmd->base.speed = np->link_speed_mbps;
} else {
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.speed = SPEED_UNKNOWN;
}
- idpf_vport_ctrl_unlock(netdev);
-
return 0;
}
+/**
+ * idpf_get_timestamp_filters - Get the supported timestamping mode
+ * @vport: Virtual port structure
+ * @info: ethtool timestamping info structure
+ *
+ * Get the Tx/Rx timestamp filters.
+ */
+static void idpf_get_timestamp_filters(const struct idpf_vport *vport,
+ struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ if (!vport->tx_tstamp_caps ||
+ vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE)
+ return;
+
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE;
+
+ info->tx_types |= BIT(HWTSTAMP_TX_ON);
+}
+
+/**
+ * idpf_get_ts_info - Get device PHC association
+ * @netdev: network interface device structure
+ * @info: ethtool timestamping info structure
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int err = 0;
+
+ if (!mutex_trylock(&np->adapter->vport_ctrl_lock))
+ return -EBUSY;
+
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->adapter->ptp) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) &&
+ vport->adapter->ptp->clock) {
+ info->phc_index = ptp_clock_index(vport->adapter->ptp->clock);
+ idpf_get_timestamp_filters(vport, info);
+ } else {
+ pci_dbg(vport->adapter->pdev, "PTP clock not detected\n");
+ err = ethtool_op_get_ts_info(netdev, info);
+ }
+
+unlock:
+ mutex_unlock(&np->adapter->vport_ctrl_lock);
+
+ return err;
+}
+
+/**
+ * idpf_get_ts_stats - Collect HW tstamping statistics
+ * @netdev: network interface device structure
+ * @ts_stats: HW timestamping stats structure
+ *
+ * Collect HW timestamping statistics including successfully timestamped
+ * packets, discarded due to illegal values, flushed during releasing PTP and
+ * skipped due to lack of the free index.
+ */
+static void idpf_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ unsigned int start;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+ do {
+ start = u64_stats_fetch_begin(&vport->tstamp_stats.stats_sync);
+ ts_stats->pkts = u64_stats_read(&vport->tstamp_stats.packets);
+ ts_stats->lost = u64_stats_read(&vport->tstamp_stats.flushed);
+ ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded);
+ } while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start));
+
+ if (!test_bit(IDPF_VPORT_UP, np->state))
+ goto exit;
+
+ for (u16 i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (u16 j = 0; j < txq_grp->num_txq; j++) {
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue_stats *stats;
+ u64 ts;
+
+ if (!txq)
+ continue;
+
+ stats = &txq->q_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq->stats_sync);
+
+ ts = u64_stats_read(&stats->tstamp_skipped);
+ } while (u64_stats_fetch_retry(&txq->stats_sync,
+ start));
+
+ ts_stats->lost += ts;
+ }
+ }
+
+exit:
+ idpf_vport_ctrl_unlock(netdev);
+}
+
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1355,6 +1772,8 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_sset_count = idpf_get_sset_count,
.get_channels = idpf_get_channels,
.get_rxnfc = idpf_get_rxnfc,
+ .set_rxnfc = idpf_set_rxnfc,
+ .get_rx_ring_count = idpf_get_rx_ring_count,
.get_rxfh_key_size = idpf_get_rxfh_key_size,
.get_rxfh_indir_size = idpf_get_rxfh_indir_size,
.get_rxfh = idpf_get_rxfh,
@@ -1363,6 +1782,8 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_ringparam = idpf_get_ringparam,
.set_ringparam = idpf_set_ringparam,
.get_link_ksettings = idpf_get_link_ksettings,
+ .get_ts_info = idpf_get_ts_info,
+ .get_ts_stats = idpf_get_ts_stats,
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c
new file mode 100644
index 000000000000..7e20a07e98e5
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <linux/export.h>
+
+#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+static DEFINE_IDA(idpf_idc_ida);
+
+#define IDPF_IDC_MAX_ADEV_NAME_LEN 15
+
+/**
+ * idpf_idc_init - Called to initialize IDC
+ * @adapter: driver private data structure
+ *
+ * Return: 0 on success or cap not enabled, error code on failure.
+ */
+int idpf_idc_init(struct idpf_adapter *adapter)
+{
+ int err;
+
+ if (!idpf_is_rdma_cap_ena(adapter) ||
+ !adapter->dev_ops.idc_init)
+ return 0;
+
+ err = adapter->dev_ops.idc_init(adapter);
+ if (err)
+ dev_err(&adapter->pdev->dev, "failed to initialize idc: %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * idpf_vport_adev_release - function to be mapped to aux dev's release op
+ * @dev: pointer to device to free
+ */
+static void idpf_vport_adev_release(struct device *dev)
+{
+ struct iidc_rdma_vport_auxiliary_dev *iadev;
+
+ iadev = container_of(dev, struct iidc_rdma_vport_auxiliary_dev, adev.dev);
+ kfree(iadev);
+ iadev = NULL;
+}
+
+/**
+ * idpf_plug_vport_aux_dev - allocate and register a vport Auxiliary device
+ * @cdev_info: IDC core device info pointer
+ * @vdev_info: IDC vport device info pointer
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_plug_vport_aux_dev(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_vport_dev_info *vdev_info)
+{
+ struct iidc_rdma_vport_auxiliary_dev *iadev;
+ char name[IDPF_IDC_MAX_ADEV_NAME_LEN];
+ struct auxiliary_device *adev;
+ int ret;
+
+ iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ if (!iadev)
+ return -ENOMEM;
+
+ adev = &iadev->adev;
+ vdev_info->adev = &iadev->adev;
+ iadev->vdev_info = vdev_info;
+
+ ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("failed to allocate unique device ID for Auxiliary driver\n");
+ goto err_ida_alloc;
+ }
+ adev->id = ret;
+ adev->dev.release = idpf_vport_adev_release;
+ adev->dev.parent = &cdev_info->pdev->dev;
+ sprintf(name, "%04x.rdma.vdev", cdev_info->pdev->vendor);
+ adev->name = name;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_aux_dev_init;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ goto err_aux_dev_add;
+
+ return 0;
+
+err_aux_dev_add:
+ auxiliary_device_uninit(adev);
+err_aux_dev_init:
+ ida_free(&idpf_idc_ida, adev->id);
+err_ida_alloc:
+ vdev_info->adev = NULL;
+ kfree(iadev);
+
+ return ret;
+}
+
+/**
+ * idpf_idc_init_aux_vport_dev - initialize vport Auxiliary Device(s)
+ * @vport: virtual port data struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_init_aux_vport_dev(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct iidc_rdma_vport_dev_info *vdev_info;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct virtchnl2_create_vport *vport_msg;
+ int err;
+
+ vport_msg = (struct virtchnl2_create_vport *)
+ adapter->vport_params_recvd[vport->idx];
+
+ if (!(le16_to_cpu(vport_msg->vport_flags) & VIRTCHNL2_VPORT_ENABLE_RDMA))
+ return 0;
+
+ vport->vdev_info = kzalloc(sizeof(*vdev_info), GFP_KERNEL);
+ if (!vport->vdev_info)
+ return -ENOMEM;
+
+ cdev_info = vport->adapter->cdev_info;
+
+ vdev_info = vport->vdev_info;
+ vdev_info->vport_id = vport->vport_id;
+ vdev_info->netdev = vport->netdev;
+ vdev_info->core_adev = cdev_info->adev;
+
+ err = idpf_plug_vport_aux_dev(cdev_info, vdev_info);
+ if (err) {
+ vport->vdev_info = NULL;
+ kfree(vdev_info);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_idc_vdev_mtu_event - Function to handle IDC vport mtu change events
+ * @vdev_info: IDC vport device info pointer
+ * @event_type: type of event to pass to handler
+ */
+void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
+ enum iidc_rdma_event_type event_type)
+{
+ struct iidc_rdma_vport_auxiliary_drv *iadrv;
+ struct iidc_rdma_event event = { };
+ struct auxiliary_device *adev;
+
+ if (!vdev_info)
+ /* RDMA is not enabled */
+ return;
+
+ set_bit(event_type, event.type);
+
+ device_lock(&vdev_info->adev->dev);
+ adev = vdev_info->adev;
+ if (!adev || !adev->dev.driver)
+ goto unlock;
+ iadrv = container_of(adev->dev.driver,
+ struct iidc_rdma_vport_auxiliary_drv,
+ adrv.driver);
+ if (iadrv->event_handler)
+ iadrv->event_handler(vdev_info, &event);
+unlock:
+ device_unlock(&vdev_info->adev->dev);
+}
+
+/**
+ * idpf_core_adev_release - function to be mapped to aux dev's release op
+ * @dev: pointer to device to free
+ */
+static void idpf_core_adev_release(struct device *dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, adev.dev);
+ kfree(iadev);
+ iadev = NULL;
+}
+
+/**
+ * idpf_plug_core_aux_dev - allocate and register an Auxiliary device
+ * @cdev_info: IDC core device info pointer
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_plug_core_aux_dev(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ char name[IDPF_IDC_MAX_ADEV_NAME_LEN];
+ struct auxiliary_device *adev;
+ int ret;
+
+ iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ if (!iadev)
+ return -ENOMEM;
+
+ adev = &iadev->adev;
+ cdev_info->adev = adev;
+ iadev->cdev_info = cdev_info;
+
+ ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("failed to allocate unique device ID for Auxiliary driver\n");
+ goto err_ida_alloc;
+ }
+ adev->id = ret;
+ adev->dev.release = idpf_core_adev_release;
+ adev->dev.parent = &cdev_info->pdev->dev;
+ sprintf(name, "%04x.rdma.core", cdev_info->pdev->vendor);
+ adev->name = name;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_aux_dev_init;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ goto err_aux_dev_add;
+
+ return 0;
+
+err_aux_dev_add:
+ auxiliary_device_uninit(adev);
+err_aux_dev_init:
+ ida_free(&idpf_idc_ida, adev->id);
+err_ida_alloc:
+ cdev_info->adev = NULL;
+ kfree(iadev);
+
+ return ret;
+}
+
+/**
+ * idpf_unplug_aux_dev - unregister and free an Auxiliary device
+ * @adev: auxiliary device struct
+ */
+static void idpf_unplug_aux_dev(struct auxiliary_device *adev)
+{
+ if (!adev)
+ return;
+
+ ida_free(&idpf_idc_ida, adev->id);
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+/**
+ * idpf_idc_issue_reset_event - Function to handle reset IDC event
+ * @cdev_info: IDC core device info pointer
+ */
+void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ enum iidc_rdma_event_type event_type = IIDC_RDMA_EVENT_WARN_RESET;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_event event = { };
+ struct auxiliary_device *adev;
+
+ if (!cdev_info)
+ /* RDMA is not enabled */
+ return;
+
+ set_bit(event_type, event.type);
+
+ device_lock(&cdev_info->adev->dev);
+
+ adev = cdev_info->adev;
+ if (!adev || !adev->dev.driver)
+ goto unlock;
+
+ iadrv = container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv,
+ adrv.driver);
+ if (iadrv->event_handler)
+ iadrv->event_handler(cdev_info, &event);
+unlock:
+ device_unlock(&cdev_info->adev->dev);
+}
+
+/**
+ * idpf_idc_vport_dev_up - called when CORE is ready for vport aux devs
+ * @adapter: private data struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_vport_dev_up(struct idpf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_alloc_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+
+ if (!vport)
+ continue;
+
+ if (!vport->vdev_info)
+ err = idpf_idc_init_aux_vport_dev(vport);
+ else
+ err = idpf_plug_vport_aux_dev(vport->adapter->cdev_info,
+ vport->vdev_info);
+ }
+
+ return err;
+}
+
+/**
+ * idpf_idc_vport_dev_down - called CORE is leaving vport aux dev support state
+ * @adapter: private data struct
+ */
+static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_alloc_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+
+ if (!vport)
+ continue;
+
+ idpf_unplug_aux_dev(vport->vdev_info->adev);
+ vport->vdev_info->adev = NULL;
+ }
+}
+
+/**
+ * idpf_idc_vport_dev_ctrl - Called by an Auxiliary Driver
+ * @cdev_info: IDC core device info pointer
+ * @up: RDMA core driver status
+ *
+ * This callback function is accessed by an Auxiliary Driver to indicate
+ * whether core driver is ready to support vport driver load or if vport
+ * drivers need to be taken down.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+
+ if (up)
+ return idpf_idc_vport_dev_up(adapter);
+
+ idpf_idc_vport_dev_down(adapter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_vport_dev_ctrl);
+
+/**
+ * idpf_idc_request_reset - Called by an Auxiliary Driver
+ * @cdev_info: IDC core device info pointer
+ * @reset_type: function, core or other
+ *
+ * This callback function is accessed by an Auxiliary Driver to request a reset
+ * on the Auxiliary Device.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info,
+ enum iidc_rdma_reset_type __always_unused reset_type)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+
+ if (!idpf_is_reset_in_prog(adapter)) {
+ set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq,
+ &adapter->vc_event_task,
+ msecs_to_jiffies(10));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_request_reset);
+
+/**
+ * idpf_idc_init_msix_data - initialize MSIX data for the cdev_info structure
+ * @adapter: driver private data structure
+ */
+static void
+idpf_idc_init_msix_data(struct idpf_adapter *adapter)
+{
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *privd;
+
+ if (!adapter->rdma_msix_entries)
+ return;
+
+ cdev_info = adapter->cdev_info;
+ privd = cdev_info->iidc_priv;
+
+ privd->msix_entries = adapter->rdma_msix_entries;
+ privd->msix_count = adapter->num_rdma_msix_entries;
+}
+
+/**
+ * idpf_idc_init_aux_core_dev - initialize Auxiliary Device(s)
+ * @adapter: driver private data structure
+ * @ftype: PF or VF
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
+ enum iidc_function_type ftype)
+{
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *privd;
+ int err, i;
+
+ adapter->cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL);
+ if (!adapter->cdev_info)
+ return -ENOMEM;
+ cdev_info = adapter->cdev_info;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ err = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ cdev_info->iidc_priv = privd;
+ cdev_info->pdev = adapter->pdev;
+ cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2;
+ privd->ftype = ftype;
+
+ privd->mapped_mem_regions =
+ kcalloc(adapter->hw.num_lan_regs,
+ sizeof(struct iidc_rdma_lan_mapped_mem_region),
+ GFP_KERNEL);
+ if (!privd->mapped_mem_regions) {
+ err = -ENOMEM;
+ goto err_plug_aux_dev;
+ }
+
+ privd->num_memory_regions = cpu_to_le16(adapter->hw.num_lan_regs);
+ for (i = 0; i < adapter->hw.num_lan_regs; i++) {
+ privd->mapped_mem_regions[i].region_addr =
+ adapter->hw.lan_regs[i].vaddr;
+ privd->mapped_mem_regions[i].size =
+ cpu_to_le64(adapter->hw.lan_regs[i].addr_len);
+ privd->mapped_mem_regions[i].start_offset =
+ cpu_to_le64(adapter->hw.lan_regs[i].addr_start);
+ }
+
+ idpf_idc_init_msix_data(adapter);
+
+ err = idpf_plug_core_aux_dev(cdev_info);
+ if (err)
+ goto err_free_mem_regions;
+
+ return 0;
+
+err_free_mem_regions:
+ kfree(privd->mapped_mem_regions);
+ privd->mapped_mem_regions = NULL;
+err_plug_aux_dev:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev_info);
+ adapter->cdev_info = NULL;
+
+ return err;
+}
+
+/**
+ * idpf_idc_deinit_core_aux_device - de-initialize Auxiliary Device(s)
+ * @cdev_info: IDC core device info pointer
+ */
+void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *privd;
+
+ if (!cdev_info)
+ return;
+
+ idpf_unplug_aux_dev(cdev_info->adev);
+
+ privd = cdev_info->iidc_priv;
+ kfree(privd->mapped_mem_regions);
+ kfree(privd);
+ kfree(cdev_info);
+}
+
+/**
+ * idpf_idc_deinit_vport_aux_device - de-initialize Auxiliary Device(s)
+ * @vdev_info: IDC vport device info pointer
+ */
+void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info)
+{
+ if (!vdev_info)
+ return;
+
+ idpf_unplug_aux_dev(vdev_info->adev);
+
+ kfree(vdev_info);
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
index 24edb8a6ec2e..cc9aa2b6a14a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
@@ -53,6 +53,10 @@
#define PF_FW_ATQH_ATQH_M GENMASK(9, 0)
#define PF_FW_ATQT (PF_FW_BASE + 0x24)
+/* Timesync registers */
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0)
+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(2)
+
/* Interrupts */
#define PF_GLINT_BASE 0x08900000
#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index a5752dcab888..20d5af64e750 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -4,6 +4,8 @@
#ifndef _IDPF_LAN_TXRX_H_
#define _IDPF_LAN_TXRX_H_
+#include <linux/bits.h>
+
enum idpf_rss_hash {
IDPF_HASH_INVALID = 0,
/* Values 1 - 28 are reserved for future use */
@@ -184,13 +186,17 @@ struct idpf_base_tx_desc {
__le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
}; /* read used with buffer queues */
-struct idpf_splitq_tx_compl_desc {
+struct idpf_splitq_4b_tx_compl_desc {
/* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
__le16 qid_comptype_gen;
union {
__le16 q_head; /* Queue head */
__le16 compl_tag; /* Completion tag */
} q_head_compl_tag;
+}; /* writeback used with completion queues */
+
+struct idpf_splitq_tx_compl_desc {
+ struct idpf_splitq_4b_tx_compl_desc common;
u8 ts[3];
u8 rsvd; /* Reserved */
}; /* writeback used with completion queues */
@@ -280,7 +286,18 @@ struct idpf_flex_tx_tso_ctx_qw {
u8 flex;
};
-struct idpf_flex_tx_ctx_desc {
+union idpf_flex_tx_ctx_desc {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */
+ struct {
+ __le64 qw0;
+#define IDPF_TX_CTX_L2TAG2_M GENMASK_ULL(47, 32)
+ __le64 qw1;
+#define IDPF_TX_CTX_DTYPE_M GENMASK_ULL(3, 0)
+#define IDPF_TX_CTX_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TX_CTX_TSYN_REG_M GENMASK_ULL(47, 30)
+#define IDPF_TX_CTX_MSS_M GENMASK_ULL(50, 63)
+ } tsyn;
+
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 5d3532c27d57..7a7e101afeb6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -3,9 +3,11 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
+#include "xdp.h"
+#include "xsk.h"
-static const struct net_device_ops idpf_netdev_ops_splitq;
-static const struct net_device_ops idpf_netdev_ops_singleq;
+static const struct net_device_ops idpf_netdev_ops;
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
@@ -69,7 +71,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{
clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
- free_irq(adapter->msix_entries[0].vector, adapter);
+ kfree(free_irq(adapter->msix_entries[0].vector, adapter));
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
}
@@ -88,6 +90,8 @@ void idpf_intr_rel(struct idpf_adapter *adapter)
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
+ kfree(adapter->rdma_msix_entries);
+ adapter->rdma_msix_entries = NULL;
}
/**
@@ -124,15 +128,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
*/
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{
- struct idpf_q_vector *mb_vector = &adapter->mb_vector;
int irq_num, mb_vidx = 0, err;
+ char *name;
irq_num = adapter->msix_entries[mb_vidx].vector;
- mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
- dev_driver_string(&adapter->pdev->dev),
- "Mailbox", mb_vidx);
- err = request_irq(irq_num, adapter->irq_mb_handler, 0,
- mb_vector->name, adapter);
+ name = kasprintf(GFP_KERNEL, "%s-%s-%d",
+ dev_driver_string(&adapter->pdev->dev),
+ "Mailbox", mb_vidx);
+ err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err);
@@ -146,22 +149,6 @@ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
}
/**
- * idpf_set_mb_vec_id - Set vector index for mailbox
- * @adapter: adapter structure to access the vector chunks
- *
- * The first vector id in the requested vector chunks from the CP is for
- * the mailbox
- */
-static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
-{
- if (adapter->req_vec_chunks)
- adapter->mb_vector.v_idx =
- le16_to_cpu(adapter->caps.mailbox_vector_id);
- else
- adapter->mb_vector.v_idx = 0;
-}
-
-/**
* idpf_mb_intr_init - Initialize the mailbox interrupt
* @adapter: adapter structure to store the mailbox vector
*/
@@ -316,13 +303,33 @@ rel_lock:
*/
int idpf_intr_req(struct idpf_adapter *adapter)
{
+ u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0;
u16 default_vports = idpf_get_default_vports(adapter);
int num_q_vecs, total_vecs, num_vec_ids;
- int min_vectors, v_actual, err;
+ int min_vectors, actual_vecs, err;
unsigned int vector;
u16 *vecids;
+ int i;
total_vecs = idpf_get_reserved_vecs(adapter);
+ num_lan_vecs = total_vecs;
+ if (idpf_is_rdma_cap_ena(adapter)) {
+ num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter);
+ min_rdma_vecs = IDPF_MIN_RDMA_VEC;
+
+ if (!num_rdma_vecs) {
+ /* If idpf_get_reserved_rdma_vecs is 0, vectors are
+ * pulled from the LAN pool.
+ */
+ num_rdma_vecs = min_rdma_vecs;
+ } else if (num_rdma_vecs < min_rdma_vecs) {
+ dev_err(&adapter->pdev->dev,
+ "Not enough vectors reserved for RDMA (min: %u, current: %u)\n",
+ min_rdma_vecs, num_rdma_vecs);
+ return -EINVAL;
+ }
+ }
+
num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
@@ -333,65 +340,76 @@ int idpf_intr_req(struct idpf_adapter *adapter)
return -EAGAIN;
}
- min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
- v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
- total_vecs, PCI_IRQ_MSIX);
- if (v_actual < min_vectors) {
- dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
- v_actual);
- err = -EAGAIN;
+ min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
+ min_vectors = min_lan_vecs + min_rdma_vecs;
+ actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
+ total_vecs, PCI_IRQ_MSIX);
+ if (actual_vecs < 0) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n",
+ min_vectors);
+ err = actual_vecs;
goto send_dealloc_vecs;
}
- adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
- GFP_KERNEL);
+ if (idpf_is_rdma_cap_ena(adapter)) {
+ if (actual_vecs < total_vecs) {
+ dev_warn(&adapter->pdev->dev,
+ "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n",
+ total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC);
+ num_rdma_vecs = IDPF_MIN_RDMA_VEC;
+ }
+ adapter->rdma_msix_entries = kcalloc(num_rdma_vecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->rdma_msix_entries) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ }
+
+ num_lan_vecs = actual_vecs - num_rdma_vecs;
+ adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
if (!adapter->msix_entries) {
err = -ENOMEM;
- goto free_irq;
+ goto free_rdma_msix;
}
- idpf_set_mb_vec_id(adapter);
+ adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
- vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
+ vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
err = -ENOMEM;
goto free_msix;
}
- if (adapter->req_vec_chunks) {
- struct virtchnl2_vector_chunks *vchunks;
- struct virtchnl2_alloc_vectors *ac;
-
- ac = adapter->req_vec_chunks;
- vchunks = &ac->vchunks;
-
- num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
- vchunks);
- if (num_vec_ids < v_actual) {
- err = -EINVAL;
- goto free_vecids;
- }
- } else {
- int i;
-
- for (i = 0; i < v_actual; i++)
- vecids[i] = i;
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs,
+ &adapter->req_vec_chunks->vchunks);
+ if (num_vec_ids < actual_vecs) {
+ err = -EINVAL;
+ goto free_vecids;
}
- for (vector = 0; vector < v_actual; vector++) {
+ for (vector = 0; vector < num_lan_vecs; vector++) {
adapter->msix_entries[vector].entry = vecids[vector];
adapter->msix_entries[vector].vector =
pci_irq_vector(adapter->pdev, vector);
}
+ for (i = 0; i < num_rdma_vecs; vector++, i++) {
+ adapter->rdma_msix_entries[i].entry = vecids[vector];
+ adapter->rdma_msix_entries[i].vector =
+ pci_irq_vector(adapter->pdev, vector);
+ }
- adapter->num_req_msix = total_vecs;
- adapter->num_msix_entries = v_actual;
/* 'num_avail_msix' is used to distribute excess vectors to the vports
* after considering the minimum vectors required per each default
* vport
*/
- adapter->num_avail_msix = v_actual - min_vectors;
+ adapter->num_avail_msix = num_lan_vecs - min_lan_vecs;
+ adapter->num_msix_entries = num_lan_vecs;
+ if (idpf_is_rdma_cap_ena(adapter))
+ adapter->num_rdma_msix_entries = num_rdma_vecs;
/* Fill MSIX vector lifo stack with vector indexes */
err = idpf_init_vector_stack(adapter);
@@ -413,6 +431,9 @@ free_vecids:
free_msix:
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
+free_rdma_msix:
+ kfree(adapter->rdma_msix_entries);
+ adapter->rdma_msix_entries = NULL;
free_irq:
pci_free_irq_vectors(adapter->pdev);
send_dealloc_vecs:
@@ -498,7 +519,7 @@ static int idpf_del_mac_filter(struct idpf_vport *vport,
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (np->state == __IDPF_VPORT_UP) {
+ if (test_bit(IDPF_VPORT_UP, np->state)) {
int err;
err = idpf_add_del_mac_filters(vport, np, false, async);
@@ -569,7 +590,7 @@ static int idpf_add_mac_filter(struct idpf_vport *vport,
if (err)
return err;
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
err = idpf_add_del_mac_filters(vport, np, true, async);
return err;
@@ -718,8 +739,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
+ netdev_features_t other_offloads = 0;
+ netdev_features_t csum_offloads = 0;
+ netdev_features_t tso_offloads = 0;
netdev_features_t dflt_features;
- netdev_features_t offloads = 0;
struct idpf_netdev_priv *np;
struct net_device *netdev;
u16 idx = vport->idx;
@@ -736,6 +759,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->vport = vport;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev);
@@ -753,6 +777,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->adapter = adapter;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
+ np->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
spin_lock_init(&np->stats_lock);
@@ -765,10 +791,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
}
/* assign netdev_ops */
- if (idpf_is_queue_model_split(vport->txq_model))
- netdev->netdev_ops = &idpf_netdev_ops_splitq;
- else
- netdev->netdev_ops = &idpf_netdev_ops_singleq;
+ netdev->netdev_ops = &idpf_netdev_ops;
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
@@ -784,54 +807,40 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
- dflt_features |= NETIF_F_IP_CSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
- dflt_features |= NETIF_F_IPV6_CSUM;
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_FLOW_STEER) &&
+ idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
+ dflt_features |= NETIF_F_NTUPLE;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
+ csum_offloads |= NETIF_F_IP_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
+ csum_offloads |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
- dflt_features |= NETIF_F_RXCSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
- dflt_features |= NETIF_F_SCTP_CRC;
+ csum_offloads |= NETIF_F_RXCSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
+ csum_offloads |= NETIF_F_SCTP_CRC;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
- dflt_features |= NETIF_F_TSO;
+ tso_offloads |= NETIF_F_TSO;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
- dflt_features |= NETIF_F_TSO6;
+ tso_offloads |= NETIF_F_TSO6;
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_IPV4_UDP |
VIRTCHNL2_CAP_SEG_IPV6_UDP))
- dflt_features |= NETIF_F_GSO_UDP_L4;
+ tso_offloads |= NETIF_F_GSO_UDP_L4;
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
- offloads |= NETIF_F_GRO_HW;
- /* advertise to stack only if offloads for encapsulated packets is
- * supported
- */
- if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
- VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
- offloads |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM |
- NETIF_F_GSO_PARTIAL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_IPXIP6 |
- 0;
-
- if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
- IDPF_CAP_TUNNEL_TX_CSUM))
- netdev->gso_partial_features |=
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
- offloads |= NETIF_F_TSO_MANGLEID;
- }
+ other_offloads |= NETIF_F_GRO_HW;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
- offloads |= NETIF_F_LOOPBACK;
+ other_offloads |= NETIF_F_LOOPBACK;
+
+ netdev->features |= dflt_features | csum_offloads | tso_offloads;
+ netdev->hw_features |= netdev->features | other_offloads;
+ netdev->vlan_features |= netdev->features | other_offloads;
+ netdev->hw_enc_features |= dflt_features | other_offloads;
+ idpf_xdp_set_features(vport);
- netdev->features |= dflt_features;
- netdev->hw_features |= dflt_features | offloads;
- netdev->hw_enc_features |= dflt_features | offloads;
idpf_set_ethtool_ops(netdev);
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
/* carrier off on init to avoid Tx hangs */
@@ -879,14 +888,18 @@ static void idpf_remove_features(struct idpf_vport *vport)
/**
* idpf_vport_stop - Disable a vport
* @vport: vport to disable
+ * @rtnl: whether to take RTNL lock
*/
-static void idpf_vport_stop(struct idpf_vport *vport)
+static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
- if (np->state <= __IDPF_VPORT_DOWN)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return;
+ if (rtnl)
+ rtnl_lock();
+
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
@@ -905,9 +918,13 @@ static void idpf_vport_stop(struct idpf_vport *vport)
vport->link_up = false;
idpf_vport_intr_deinit(vport);
- idpf_vport_intr_rel(vport);
+ idpf_xdp_rxq_info_deinit_all(vport);
idpf_vport_queues_rel(vport);
- np->state = __IDPF_VPORT_DOWN;
+ idpf_vport_intr_rel(vport);
+ clear_bit(IDPF_VPORT_UP, np->state);
+
+ if (rtnl)
+ rtnl_unlock();
}
/**
@@ -931,7 +948,7 @@ static int idpf_stop(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, false);
idpf_vport_ctrl_unlock(netdev);
@@ -945,12 +962,19 @@ static int idpf_stop(struct net_device *netdev)
static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ u16 idx = vport->idx;
+
+ kfree(vport->rx_ptype_lkup);
+ vport->rx_ptype_lkup = NULL;
- unregister_netdev(vport->netdev);
- free_netdev(vport->netdev);
+ if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[idx]->flags)) {
+ unregister_netdev(vport->netdev);
+ free_netdev(vport->netdev);
+ }
vport->netdev = NULL;
- adapter->netdevs[vport->idx] = NULL;
+ adapter->netdevs[idx] = NULL;
}
/**
@@ -1014,8 +1038,10 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
struct idpf_adapter *adapter = vport->adapter;
unsigned int i = vport->idx;
+ idpf_idc_deinit_vport_aux_device(vport->vdev_info);
+
idpf_deinit_mac_addr(vport);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, true);
if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
idpf_decfg_netdev(vport);
@@ -1121,8 +1147,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
if (!vport)
return vport;
+ num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS;
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
+ struct idpf_q_coalesce *q_coal;
vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
if (!vport_config) {
@@ -1131,6 +1159,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return NULL;
}
+ q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL);
+ if (!q_coal) {
+ kfree(vport_config);
+ kfree(vport);
+
+ return NULL;
+ }
+ for (int i = 0; i < num_max_q; i++) {
+ q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
+ q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
+ }
+ vport_config->user_config.q_coalesce = q_coal;
+
adapter->vport_config[idx] = vport_config;
}
@@ -1140,13 +1183,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs) {
- kfree(vport);
+ if (!vport->q_vector_idxs)
+ goto free_vport;
- return NULL;
- }
idpf_vport_init(vport, max_q);
/* This alloc is done separate from the LUT because it's not strictly
@@ -1156,11 +1196,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
- if (!rss_data->rss_key) {
- kfree(vport);
+ if (!rss_data->rss_key)
+ goto free_vector_idxs;
- return NULL;
- }
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
@@ -1173,6 +1211,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
adapter->next_vport = idpf_get_free_slot(adapter);
return vport;
+
+free_vector_idxs:
+ kfree(vport->q_vector_idxs);
+free_vport:
+ kfree(vport);
+
+ return NULL;
}
/**
@@ -1276,13 +1321,13 @@ static void idpf_restore_features(struct idpf_vport *vport)
*/
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
- int err;
+ int err, txq = vport->num_txq - vport->num_xdp_txq;
err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
if (err)
return err;
- return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
+ return netif_set_real_num_tx_queues(vport->netdev, txq);
}
/**
@@ -1300,7 +1345,7 @@ static int idpf_up_complete(struct idpf_vport *vport)
netif_tx_start_all_queues(vport->netdev);
}
- np->state = __IDPF_VPORT_UP;
+ set_bit(IDPF_VPORT_UP, np->state);
return 0;
}
@@ -1318,14 +1363,14 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
if (idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
- struct idpf_queue *q =
+ const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
for (j = 0; j < grp->singleq.num_rxq; j++) {
- struct idpf_queue *q =
+ const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
writel(q->next_to_alloc, q->tail);
@@ -1337,76 +1382,87 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
/**
* idpf_vport_open - Bring up a vport
* @vport: vport to bring up
- * @alloc_res: allocate queue resources
+ * @rtnl: whether to take RTNL lock
*/
-static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
+static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
int err;
- if (np->state != __IDPF_VPORT_DOWN)
+ if (test_bit(IDPF_VPORT_UP, np->state))
return -EBUSY;
+ if (rtnl)
+ rtnl_lock();
+
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
- if (alloc_res) {
- err = idpf_vport_queues_alloc(vport);
- if (err)
- return err;
- }
-
err = idpf_vport_intr_alloc(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
- goto queues_rel;
+ goto err_rtnl_unlock;
}
+ err = idpf_vport_queues_alloc(vport);
+ if (err)
+ goto intr_rel;
+
err = idpf_vport_queue_ids_init(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
- goto intr_rel;
+ goto queues_rel;
}
err = idpf_vport_intr_init(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
vport->vport_id, err);
- goto intr_rel;
+ goto queues_rel;
}
- err = idpf_rx_bufs_init_all(vport);
+ err = idpf_queue_reg_init(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
- goto intr_rel;
+ goto queues_rel;
}
- err = idpf_queue_reg_init(vport);
+ err = idpf_rx_bufs_init_all(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
- goto intr_rel;
+ goto queues_rel;
}
idpf_rx_init_buf_tail(vport);
+ err = idpf_xdp_rxq_info_init_all(vport);
+ if (err) {
+ netdev_err(vport->netdev,
+ "Failed to initialize XDP RxQ info for vport %u: %pe\n",
+ vport->vport_id, ERR_PTR(err));
+ goto intr_deinit;
+ }
+
+ idpf_vport_intr_ena(vport);
+
err = idpf_send_config_queues_msg(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
- goto intr_deinit;
+ goto rxq_deinit;
}
err = idpf_send_map_unmap_queue_vector_msg(vport, true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
- goto intr_deinit;
+ goto rxq_deinit;
}
err = idpf_send_enable_queues_msg(vport);
@@ -1444,6 +1500,9 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
goto deinit_rss;
}
+ if (rtnl)
+ rtnl_unlock();
+
return 0;
deinit_rss:
@@ -1454,12 +1513,18 @@ disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
idpf_send_map_unmap_queue_vector_msg(vport, false);
+rxq_deinit:
+ idpf_xdp_rxq_info_deinit_all(vport);
intr_deinit:
idpf_vport_intr_deinit(vport);
-intr_rel:
- idpf_vport_intr_rel(vport);
queues_rel:
idpf_vport_queues_rel(vport);
+intr_rel:
+ idpf_vport_intr_rel(vport);
+
+err_rtnl_unlock:
+ if (rtnl)
+ rtnl_unlock();
return err;
}
@@ -1517,11 +1582,10 @@ void idpf_init_task(struct work_struct *work)
index = vport->idx;
vport_config = adapter->vport_config[index];
- init_waitqueue_head(&vport->sw_marker_wq);
-
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
+ INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
err = idpf_check_supported_desc_ids(vport);
if (err) {
@@ -1538,7 +1602,7 @@ void idpf_init_task(struct work_struct *work)
/* Once state is put into DOWN, driver is ready for dev_open */
np = netdev_priv(vport->netdev);
- np->state = __IDPF_VPORT_DOWN;
+ clear_bit(IDPF_VPORT_UP, np->state);
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
idpf_vport_open(vport, true);
@@ -1553,13 +1617,22 @@ void idpf_init_task(struct work_struct *work)
}
for (index = 0; index < adapter->max_vports; index++) {
- if (adapter->netdevs[index] &&
- !test_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags)) {
- register_netdev(adapter->netdevs[index]);
- set_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags);
+ struct net_device *netdev = adapter->netdevs[index];
+ struct idpf_vport_config *vport_config;
+
+ vport_config = adapter->vport_config[index];
+
+ if (!netdev ||
+ test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
+ continue;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
+ index, ERR_PTR(err));
+ continue;
}
+ set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
/* As all the required vports are created, clear the reset flag
@@ -1728,7 +1801,7 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter)
continue;
np = netdev_priv(adapter->netdevs[i]);
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
set_bit(IDPF_VPORT_UP_REQUESTED,
adapter->vport_config[i]->flags);
}
@@ -1770,6 +1843,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
+ idpf_idc_issue_reset_event(adapter->cdev_info);
+
idpf_set_vport_state(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
@@ -1803,6 +1878,7 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
*/
err = idpf_vc_core_init(adapter);
if (err) {
+ cancel_delayed_work_sync(&adapter->mbx_task);
idpf_deinit_dflt_mbx(adapter);
goto unlock_mutex;
}
@@ -1816,6 +1892,10 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
+ /* Wait until all vports are created to init RDMA CORE AUX */
+ if (!err)
+ err = idpf_idc_init(adapter);
+
return err;
}
@@ -1832,11 +1912,19 @@ void idpf_vc_event_task(struct work_struct *work)
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return;
- if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
- test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
- set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
- idpf_init_hard_reset(adapter);
- }
+ if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
+ goto func_reset;
+
+ if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
+ goto drv_load;
+
+ return;
+
+func_reset:
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+drv_load:
+ set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ idpf_init_hard_reset(adapter);
}
/**
@@ -1851,10 +1939,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
- enum idpf_vport_state current_state = np->state;
+ bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport *new_vport;
- int err, i;
+ int err;
/* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them
@@ -1877,7 +1965,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1891,6 +1979,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_vport_calc_num_q_desc(new_vport);
break;
case IDPF_SR_MTU_CHANGE:
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE);
+ break;
case IDPF_SR_RSC_CHANGE:
break;
default:
@@ -1899,14 +1990,11 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
}
- err = idpf_vport_queues_alloc(new_vport);
- if (err)
- goto free_vport;
- if (current_state <= __IDPF_VPORT_DOWN) {
+ if (!vport_is_up) {
idpf_send_delete_queues_msg(vport);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, false);
}
idpf_deinit_rss(vport);
@@ -1926,67 +2014,35 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
-
- /* Since idpf_vport_queues_alloc was called with new_port, the queue
- * back pointers are currently pointing to the local new_vport. Reset
- * the backpointers to the original vport here
- */
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j;
-
- tx_qgrp->vport = vport;
- for (j = 0; j < tx_qgrp->num_txq; j++)
- tx_qgrp->txqs[j]->vport = vport;
-
- if (idpf_is_queue_model_split(vport->txq_model))
- tx_qgrp->complq->vport = vport;
- }
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- struct idpf_queue *q;
- u16 num_rxq;
- int j;
-
- rx_qgrp->vport = vport;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
- rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
-
- if (idpf_is_queue_model_split(vport->rxq_model))
- num_rxq = rx_qgrp->splitq.num_rxq_sets;
- else
- num_rxq = rx_qgrp->singleq.num_rxq;
-
- for (j = 0; j < num_rxq; j++) {
- if (idpf_is_queue_model_split(vport->rxq_model))
- q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- else
- q = rx_qgrp->singleq.rxqs[j];
- q->vport = vport;
- }
- }
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport);
err = idpf_set_real_num_queues(vport);
if (err)
- goto err_reset;
+ goto err_open;
- if (current_state == __IDPF_VPORT_UP)
+ if (vport_is_up)
err = idpf_vport_open(vport, false);
- kfree(new_vport);
-
- return err;
+ goto free_vport;
err_reset:
- idpf_vport_queues_rel(new_vport);
+ idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
+ vport->num_rxq, vport->num_bufq);
+
+err_open:
+ if (vport_is_up)
+ idpf_vport_open(vport, false);
+
free_vport:
kfree(new_vport);
+ if (reset_cause == IDPF_SR_MTU_CHANGE)
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IIDC_RDMA_EVENT_AFTER_MTU_CHANGE);
+
return err;
}
@@ -2212,8 +2268,13 @@ static int idpf_open(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- err = idpf_vport_open(vport, true);
+ err = idpf_set_real_num_queues(vport);
+ if (err)
+ goto unlock;
+
+ err = idpf_vport_open(vport, false);
+unlock:
idpf_vport_ctrl_unlock(netdev);
return err;
@@ -2234,7 +2295,7 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
@@ -2244,6 +2305,92 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * idpf_chk_tso_segment - Check skb is not using too many buffers
+ * @skb: send buffer
+ * @max_bufs: maximum number of buffers
+ *
+ * For TSO we need to count the TSO header and segment payload separately. As
+ * such we need to check cases where we have max_bufs-1 fragments or more as we
+ * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
+ * for the segment payload in the first descriptor, and another max_buf-1 for
+ * the fragments.
+ *
+ * Returns true if the packet needs to be software segmented by core stack.
+ */
+static bool idpf_chk_tso_segment(const struct sk_buff *skb,
+ unsigned int max_bufs)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const skb_frag_t *frag, *stale;
+ int nr_frags, sum;
+
+ /* no need to check if number of frags is less than max_bufs - 1 */
+ nr_frags = shinfo->nr_frags;
+ if (nr_frags < (max_bufs - 1))
+ return false;
+
+ /* We need to walk through the list and validate that each group
+ * of max_bufs-2 fragments totals at least gso_size.
+ */
+ nr_frags -= max_bufs - 2;
+ frag = &shinfo->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We use
+ * this as the worst case scenario in which the frag ahead of us only
+ * provides one byte which is why we are limited to max_bufs-2
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - shinfo->gso_size;
+
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ for (stale = &shinfo->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
+ sum += skb_frag_size(frag++);
+
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > IDPF_TX_MAX_DESC_DATA) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ } while (stale_size > IDPF_TX_MAX_DESC_DATA);
+ }
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ if (!nr_frags--)
+ break;
+
+ sum -= stale_size;
+ }
+
+ return false;
+}
+
+/**
* idpf_features_check - Validate packet conforms to limits
* @skb: skb buffer
* @netdev: This port's netdev
@@ -2253,8 +2400,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ u16 max_tx_hdr_size = np->max_tx_hdr_size;
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -2264,12 +2411,15 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
- /* We cannot support GSO if the MSS is going to be less than
- * 88 bytes. If it is then we need to drop support for GSO.
- */
- if (skb_is_gso(skb) &&
- (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
- features &= ~NETIF_F_GSO_MASK;
+ if (skb_is_gso(skb)) {
+ /* We cannot support GSO if the MSS is going to be less than
+ * 88 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)
+ features &= ~NETIF_F_GSO_MASK;
+ else if (idpf_chk_tso_segment(skb, np->tx_max_bufs))
+ features &= ~NETIF_F_GSO_MASK;
+ }
/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
len = skb_network_offset(skb);
@@ -2277,7 +2427,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
goto unsupported;
len = skb_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
if (!skb->encapsulation)
@@ -2290,7 +2440,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
/* IPLEN can support at most 127 dwords */
len = skb_inner_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
/* No need to validate L4LEN as TCP is the only protocol with a
@@ -2316,6 +2466,7 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
struct sockaddr *addr = p;
+ u8 old_mac_addr[ETH_ALEN];
struct idpf_vport *vport;
int err = 0;
@@ -2339,17 +2490,19 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
goto unlock_mutex;
+ ether_addr_copy(old_mac_addr, vport->default_mac_addr);
+ ether_addr_copy(vport->default_mac_addr, addr->sa_data);
vport_config = vport->adapter->vport_config[vport->idx];
err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
if (err) {
__idpf_del_mac_filter(vport_config, addr->sa_data);
+ ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
goto unlock_mutex;
}
- if (is_valid_ether_addr(vport->default_mac_addr))
- idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
+ if (is_valid_ether_addr(old_mac_addr))
+ __idpf_del_mac_filter(vport_config, old_mac_addr);
- ether_addr_copy(vport->default_mac_addr, addr->sa_data);
eth_hw_addr_set(netdev, addr->sa_data);
unlock_mutex:
@@ -2369,8 +2522,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
struct idpf_adapter *adapter = hw->back;
size_t sz = ALIGN(size, 4096);
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
- &mem->pa, GFP_KERNEL);
+ /* The control queue resources are freed under a spinlock, contiguous
+ * pages will avoid IOMMU remapping and the use vmap (and vunmap in
+ * dma_free_*() path.
+ */
+ mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = sz;
return mem->va;
@@ -2385,31 +2542,71 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
{
struct idpf_adapter *adapter = hw->back;
- dma_free_coherent(&adapter->pdev->dev, mem->size,
- mem->va, mem->pa);
+ dma_free_attrs(&adapter->pdev->dev, mem->size,
+ mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;
}
-static const struct net_device_ops idpf_netdev_ops_splitq = {
- .ndo_open = idpf_open,
- .ndo_stop = idpf_stop,
- .ndo_start_xmit = idpf_tx_splitq_start,
- .ndo_features_check = idpf_features_check,
- .ndo_set_rx_mode = idpf_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = idpf_set_mac,
- .ndo_change_mtu = idpf_change_mtu,
- .ndo_get_stats64 = idpf_get_stats64,
- .ndo_set_features = idpf_set_features,
- .ndo_tx_timeout = idpf_tx_timeout,
-};
+static int idpf_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EOPNOTSUPP;
+ }
+
+ err = idpf_ptp_set_timestamp_mode(vport, config);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+static int idpf_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return 0;
+ }
+
+ *config = vport->tstamp_config;
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
-static const struct net_device_ops idpf_netdev_ops_singleq = {
+static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
- .ndo_start_xmit = idpf_tx_singleq_start,
+ .ndo_start_xmit = idpf_tx_start,
.ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
@@ -2418,4 +2615,9 @@ static const struct net_device_ops idpf_netdev_ops_singleq = {
.ndo_get_stats64 = idpf_get_stats64,
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
+ .ndo_hwtstamp_get = idpf_hwtstamp_get,
+ .ndo_hwtstamp_set = idpf_hwtstamp_set,
+ .ndo_bpf = idpf_xdp,
+ .ndo_xdp_xmit = idpf_xdp_xmit,
+ .ndo_xsk_wakeup = idpf_xsk_wakeup,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index f784eea044bd..de5d722cc21d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -3,14 +3,94 @@
#include "idpf.h"
#include "idpf_devids.h"
+#include "idpf_lan_vf_regs.h"
#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
+#define IDPF_NETWORK_ETHERNET_PROGIF 0x01
+#define IDPF_CLASS_NETWORK_ETHERNET_PROGIF \
+ (PCI_CLASS_NETWORK_ETHERNET << 8 | IDPF_NETWORK_ETHERNET_PROGIF)
+#define IDPF_VF_TEST_VAL 0xfeed0000u
+
MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBETH_XDP");
MODULE_LICENSE("GPL");
/**
+ * idpf_get_device_type - Helper to find if it is a VF or PF device
+ * @pdev: PCI device information struct
+ *
+ * Return: PF/VF device ID or -%errno on failure.
+ */
+static int idpf_get_device_type(struct pci_dev *pdev)
+{
+ void __iomem *addr;
+ int ret;
+
+ addr = ioremap(pci_resource_start(pdev, 0) + VF_ARQBAL, 4);
+ if (!addr) {
+ pci_err(pdev, "Failed to allocate BAR0 mbx region\n");
+ return -EIO;
+ }
+
+ writel(IDPF_VF_TEST_VAL, addr);
+ if (readl(addr) == IDPF_VF_TEST_VAL)
+ ret = IDPF_DEV_ID_VF;
+ else
+ ret = IDPF_DEV_ID_PF;
+
+ iounmap(addr);
+
+ return ret;
+}
+
+/**
+ * idpf_dev_init - Initialize device specific parameters
+ * @adapter: adapter to initialize
+ * @ent: entry in idpf_pci_tbl
+ *
+ * Return: %0 on success, -%errno on failure.
+ */
+static int idpf_dev_init(struct idpf_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ if (ent->class == IDPF_CLASS_NETWORK_ETHERNET_PROGIF) {
+ ret = idpf_get_device_type(adapter->pdev);
+ switch (ret) {
+ case IDPF_DEV_ID_VF:
+ idpf_vf_dev_ops_init(adapter);
+ adapter->crc_enable = true;
+ break;
+ case IDPF_DEV_ID_PF:
+ idpf_dev_ops_init(adapter);
+ break;
+ default:
+ return ret;
+ }
+
+ return 0;
+ }
+
+ switch (ent->device) {
+ case IDPF_DEV_ID_PF:
+ idpf_dev_ops_init(adapter);
+ break;
+ case IDPF_DEV_ID_VF:
+ idpf_vf_dev_ops_init(adapter);
+ adapter->crc_enable = true;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
* idpf_remove - Device removal routine
* @pdev: PCI device information struct
*/
@@ -61,6 +141,9 @@ destroy_wqs:
destroy_workqueue(adapter->vc_event_wq);
for (i = 0; i < adapter->max_vports; i++) {
+ if (!adapter->vport_config[i])
+ continue;
+ kfree(adapter->vport_config[i]->user_config.q_coalesce);
kfree(adapter->vport_config[i]);
adapter->vport_config[i] = NULL;
}
@@ -86,7 +169,12 @@ destroy_wqs:
*/
static void idpf_shutdown(struct pci_dev *pdev)
{
- idpf_remove(pdev);
+ struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->vc_event_task);
+ idpf_vc_core_deinit(adapter);
+ idpf_deinit_dflt_mbx(adapter);
if (system_state == SYSTEM_POWER_OFF)
pci_set_power_state(pdev, PCI_D3hot);
@@ -100,15 +188,37 @@ static void idpf_shutdown(struct pci_dev *pdev)
*/
static int idpf_cfg_hw(struct idpf_adapter *adapter)
{
+ resource_size_t res_start, mbx_start, rstat_start;
struct pci_dev *pdev = adapter->pdev;
struct idpf_hw *hw = &adapter->hw;
+ struct device *dev = &pdev->dev;
+ long len;
- hw->hw_addr = pcim_iomap_table(pdev)[0];
- if (!hw->hw_addr) {
- pci_err(pdev, "failed to allocate PCI iomap table\n");
+ res_start = pci_resource_start(pdev, 0);
+
+ /* Map mailbox space for virtchnl communication */
+ mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start;
+ len = resource_size(&adapter->dev_ops.static_reg_info[0]);
+ hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len);
+ if (!hw->mbx.vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 mbx region\n");
return -ENOMEM;
}
+ hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start;
+ hw->mbx.addr_len = len;
+
+ /* Map rstat space for resets */
+ rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start;
+ len = resource_size(&adapter->dev_ops.static_reg_info[1]);
+ hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len);
+ if (!hw->rstat.vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 rstat region\n");
+
+ return -ENOMEM;
+ }
+ hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start;
+ hw->rstat.addr_len = len;
hw->back = adapter;
@@ -135,33 +245,22 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->req_tx_splitq = true;
adapter->req_rx_splitq = true;
- switch (ent->device) {
- case IDPF_DEV_ID_PF:
- idpf_dev_ops_init(adapter);
- break;
- case IDPF_DEV_ID_VF:
- idpf_vf_dev_ops_init(adapter);
- adapter->crc_enable = true;
- break;
- default:
- err = -ENODEV;
- dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n",
- ent->device);
- goto err_free;
- }
-
adapter->pdev = pdev;
err = pcim_enable_device(pdev);
if (err)
goto err_free;
- err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ err = pcim_request_region(pdev, 0, pci_name(pdev));
if (err) {
- pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err));
+ pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err));
goto err_free;
}
+ err = pci_enable_ptm(pdev, NULL);
+ if (err)
+ pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n");
+
/* set up for high or low dma */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err) {
@@ -173,7 +272,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_set_drvdata(pdev, adapter);
- adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
+ adapter->init_wq = alloc_workqueue("%s-%s-init",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->init_wq) {
@@ -182,7 +282,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free;
}
- adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
+ adapter->serv_wq = alloc_workqueue("%s-%s-service",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->serv_wq) {
@@ -191,8 +292,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_serv_wq_alloc;
}
- adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
- dev_driver_string(dev),
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI,
+ 0, dev_driver_string(dev),
dev_name(dev));
if (!adapter->mbx_wq) {
dev_err(dev, "Failed to allocate mailbox workqueue\n");
@@ -200,7 +301,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mbx_wq_alloc;
}
- adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
+ adapter->stats_wq = alloc_workqueue("%s-%s-stats",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->stats_wq) {
@@ -209,7 +311,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_stats_wq_alloc;
}
- adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
+ adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->vc_event_wq) {
@@ -221,11 +324,18 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* setup msglvl */
adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M);
+ err = idpf_dev_init(adapter, ent);
+ if (err) {
+ dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n",
+ ent->device);
+ goto destroy_vc_event_wq;
+ }
+
err = idpf_cfg_hw(adapter);
if (err) {
dev_err(dev, "Failed to configure HW structure for adapter: %d\n",
err);
- goto err_cfg_hw;
+ goto destroy_vc_event_wq;
}
mutex_init(&adapter->vport_ctrl_lock);
@@ -246,7 +356,7 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-err_cfg_hw:
+destroy_vc_event_wq:
destroy_workqueue(adapter->vc_event_wq);
err_vc_event_wq_alloc:
destroy_workqueue(adapter->stats_wq);
@@ -266,6 +376,7 @@ err_free:
static const struct pci_device_id idpf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)},
{ PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)},
+ { PCI_DEVICE_CLASS(IDPF_CLASS_NETWORK_ETHERNET_PROGIF, ~0)},
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(pci, idpf_pci_tbl);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h
index b21a04fccf0f..2aaabdc02dd2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_mem.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h
@@ -12,9 +12,9 @@ struct idpf_dma_mem {
size_t size;
};
-#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-#define rd32(a, reg) readl((a)->hw_addr + (reg))
-#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
-#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define idpf_mbx_wr32(a, reg, value) writel((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd32(a, reg) readl((a)->mbx.vaddr + (reg))
+#define idpf_mbx_wr64(a, reg, value) writeq((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd64(a, reg) readq((a)->mbx.vaddr + (reg))
#endif /* _IDPF_MEM_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
new file mode 100644
index 000000000000..3e1052d070cf
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+
+/**
+ * idpf_ptp_get_access - Determine the access type of the PTP features
+ * @adapter: Driver specific private structure
+ * @direct: Capability that indicates the direct access
+ * @mailbox: Capability that indicates the mailbox access
+ *
+ * Return: the type of supported access for the PTP feature.
+ */
+static enum idpf_ptp_access
+idpf_ptp_get_access(const struct idpf_adapter *adapter, u32 direct, u32 mailbox)
+{
+ if (adapter->ptp->caps & direct)
+ return IDPF_PTP_DIRECT;
+ else if (adapter->ptp->caps & mailbox)
+ return IDPF_PTP_MAILBOX;
+ else
+ return IDPF_PTP_NONE;
+}
+
+/**
+ * idpf_ptp_get_features_access - Determine the access type of PTP features
+ * @adapter: Driver specific private structure
+ *
+ * Fulfill the adapter structure with type of the supported PTP features
+ * access.
+ */
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 direct, mailbox;
+
+ /* Get the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB;
+ ptp->get_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Get the cross timestamp */
+ direct = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB;
+ ptp->get_cross_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Set the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ ptp->set_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Adjust the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK;
+ mailbox = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB;
+ ptp->adj_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Tx timestamping */
+ direct = VIRTCHNL2_CAP_PTP_TX_TSTAMPS;
+ mailbox = VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB;
+ ptp->tx_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+}
+
+/**
+ * idpf_ptp_enable_shtime - Enable shadow time and execute a command
+ * @adapter: Driver specific private structure
+ */
+static void idpf_ptp_enable_shtime(struct idpf_adapter *adapter)
+{
+ u32 shtime_enable, exec_cmd;
+
+ /* Get offsets */
+ shtime_enable = adapter->ptp->cmd.shtime_enable_mask;
+ exec_cmd = adapter->ptp->cmd.exec_cmd_mask;
+
+ /* Set the shtime en and the sync field */
+ writel(shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+ writel(exec_cmd | shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_direct - Read directly the main timer value
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ *
+ * Return: the device clock time.
+ */
+static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 hi, lo;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ return ((u64)hi << 32) | lo;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_mailbox - Read the main timer value through mailbox
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ * @src_clk: Returned main timer value in nanoseconds unit
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg_mailbox(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts,
+ u64 *src_clk)
+{
+ struct idpf_ptp_dev_timers clk_time;
+ int err;
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ err = idpf_ptp_get_dev_clk_time(adapter, &clk_time);
+ if (err)
+ return err;
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ *src_clk = clk_time.dev_clk_time_ns;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg - Read the main timer value
+ * @adapter: Driver specific private structure
+ * @src_clk: Returned main timer value in nanoseconds unit
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock time on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk,
+ struct ptp_system_timestamp *sts)
+{
+ switch (adapter->ptp->get_dev_clk_time_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_MAILBOX:
+ return idpf_ptp_read_src_clk_reg_mailbox(adapter, sts, src_clk);
+ case IDPF_PTP_DIRECT:
+ *src_clk = idpf_ptp_read_src_clk_reg_direct(adapter, sts);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) || IS_ENABLED(CONFIG_X86)
+/**
+ * idpf_ptp_get_sync_device_time_direct - Get the cross time stamp values
+ * directly
+ * @adapter: Driver specific private structure
+ * @dev_time: 64bit main timer value
+ * @sys_time: 64bit system time value
+ */
+static void idpf_ptp_get_sync_device_time_direct(struct idpf_adapter *adapter,
+ u64 *dev_time, u64 *sys_time)
+{
+ u32 dev_time_lo, dev_time_hi, sys_time_lo, sys_time_hi;
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ dev_time_lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ dev_time_hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ sys_time_lo = readl(ptp->dev_clk_regs.sys_time_ns_l);
+ sys_time_hi = readl(ptp->dev_clk_regs.sys_time_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ *dev_time = (u64)dev_time_hi << 32 | dev_time_lo;
+ *sys_time = (u64)sys_time_hi << 32 | sys_time_lo;
+}
+
+/**
+ * idpf_ptp_get_sync_device_time_mailbox - Get the cross time stamp values
+ * through mailbox
+ * @adapter: Driver specific private structure
+ * @dev_time: 64bit main timer value expressed in nanoseconds
+ * @sys_time: 64bit system time value expressed in nanoseconds
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_get_sync_device_time_mailbox(struct idpf_adapter *adapter,
+ u64 *dev_time, u64 *sys_time)
+{
+ struct idpf_ptp_dev_timers cross_time;
+ int err;
+
+ err = idpf_ptp_get_cross_time(adapter, &cross_time);
+ if (err)
+ return err;
+
+ *dev_time = cross_time.dev_clk_time_ns;
+ *sys_time = cross_time.sys_time_ns;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_sync_device_time - Get the cross time stamp info
+ * @device: Current device time
+ * @system: System counter value read synchronously with device time
+ * @ctx: Context provided by timekeeping code
+ *
+ * The device and the system clocks time read simultaneously.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_get_sync_device_time(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct idpf_adapter *adapter = ctx;
+ u64 ns_time_dev, ns_time_sys;
+ int err;
+
+ switch (adapter->ptp->get_cross_tstamp_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_DIRECT:
+ idpf_ptp_get_sync_device_time_direct(adapter, &ns_time_dev,
+ &ns_time_sys);
+ break;
+ case IDPF_PTP_MAILBOX:
+ err = idpf_ptp_get_sync_device_time_mailbox(adapter,
+ &ns_time_dev,
+ &ns_time_sys);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *device = ns_to_ktime(ns_time_dev);
+
+ system->cs_id = IS_ENABLED(CONFIG_X86) ? CSID_X86_ART
+ : CSID_ARM_ARCH_COUNTER;
+ system->cycles = ns_time_sys;
+ system->use_nsecs = true;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_crosststamp - Capture a device cross timestamp
+ * @info: the driver's PTP info structure
+ * @cts: The memory to fill the cross timestamp info
+ *
+ * Capture a cross timestamp between the system time and the device PTP hardware
+ * clock.
+ *
+ * Return: cross timestamp value on success, -errno on failure.
+ */
+static int idpf_ptp_get_crosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ return get_device_system_crosststamp(idpf_ptp_get_sync_device_time,
+ adapter, NULL, cts);
+}
+#endif /* CONFIG_ARM_ARCH_TIMER || CONFIG_X86 */
+
+/**
+ * idpf_ptp_gettimex64 - Get the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock value in ns, after converting it into a timespec
+ * struct on success, -errno otherwise.
+ */
+static int idpf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ u64 time_ns;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &time_ns, sts);
+ if (err)
+ return -EACCES;
+
+ *ts = ns_to_timespec64(time_ns);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx
+ * queue group.
+ * @grp: receive queue group in which Rx timestamp is enabled
+ * @split: Indicates whether the queue model is split or single queue
+ * @systime: Cached system time
+ */
+static void
+idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split,
+ u64 systime)
+{
+ struct idpf_rx_queue *rxq;
+ u16 i;
+
+ if (!split) {
+ for (i = 0; i < grp->singleq.num_rxq; i++) {
+ rxq = grp->singleq.rxqs[i];
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ } else {
+ for (i = 0; i < grp->splitq.num_rxq_sets; i++) {
+ rxq = &grp->splitq.rxq_sets[i]->rxq;
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_update_cached_phctime - Update the cached PHC time values
+ * @adapter: Driver specific private structure
+ *
+ * This function updates the system time values which are cached in the adapter
+ * structure and the Rx queues.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
+{
+ u64 systime;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &systime, NULL);
+ if (err)
+ return -EACCES;
+
+ /* Update the cached PHC time stored in the adapter structure.
+ * These values are used to extend Tx timestamp values to 64 bit
+ * expected by the stack.
+ */
+ WRITE_ONCE(adapter->ptp->cached_phc_time, systime);
+ WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
+
+ idpf_for_each_vport(adapter, vport) {
+ bool split;
+
+ if (!vport || !vport->rxq_grps)
+ continue;
+
+ split = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_settime64 - Set the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+ u64 ns;
+
+ access = adapter->ptp->set_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ ns = timespec64_to_ns(ts);
+
+ err = idpf_ptp_set_dev_clk_time(adapter, ns);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to set the time, err: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
+{
+ struct timespec64 now, then;
+ int err;
+
+ err = idpf_ptp_gettimex64(info, &now, NULL);
+ if (err)
+ return err;
+
+ then = ns_to_timespec64(delta);
+ now = timespec64_add(now, then);
+
+ return idpf_ptp_settime64(info, &now);
+}
+
+/**
+ * idpf_ptp_adjtime - Adjust the time of the clock by the indicated delta
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ /* Hardware only supports atomic adjustments using signed 32-bit
+ * integers. For any adjustment outside this range, perform
+ * a non-atomic get->adjust->set flow.
+ */
+ if (delta > S32_MAX || delta < S32_MIN)
+ return idpf_ptp_adjtime_nonatomic(info, delta);
+
+ err = idpf_ptp_adj_dev_clk_time(adapter, delta);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to adjust the clock with delta %lld err: %pe\n",
+ delta, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjfine - Adjust clock increment rate
+ * @info: the driver's PTP info structure
+ * @scaled_ppm: Parts per million with 16-bit fractional field
+ *
+ * Adjust the frequency of the clock by the indicated scaled ppm from the
+ * base frequency.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ u64 incval, diff;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ incval = adapter->ptp->base_incval;
+
+ diff = adjust_by_scaled_ppm(incval, scaled_ppm);
+ err = idpf_ptp_adj_dev_clk_fine(adapter, diff);
+ if (err)
+ pci_err(adapter->pdev, "Failed to adjust clock increment rate for scaled ppm %ld %pe\n",
+ scaled_ppm, ERR_PTR(err));
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_verify_pin - Verify if pin supports requested pin function
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_verify_pin(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_gpio_enable - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_gpio_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_tstamp_extend_32b_to_64b - Convert a 32b nanoseconds Tx or Rx
+ * timestamp value to 64b.
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_timestamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Return: Tx timestamp value extended to 64 bits based on cached PHC time.
+ */
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp)
+{
+ u32 delta, phc_time_lo;
+ u64 ns;
+
+ /* Extract the lower 32 bits of the PHC time */
+ phc_time_lo = (u32)cached_phc_time;
+
+ /* Calculate the delta between the lower 32bits of the cached PHC
+ * time and the in_timestamp value.
+ */
+ delta = in_timestamp - phc_time_lo;
+
+ if (delta > U32_MAX / 2) {
+ /* Reverse the delta calculation here */
+ delta = phc_time_lo - in_timestamp;
+ ns = cached_phc_time - delta;
+ } else {
+ ns = cached_phc_time + delta;
+ }
+
+ return ns;
+}
+
+/**
+ * idpf_ptp_extend_ts - Convert a 40b timestamp to 64b nanoseconds
+ * @vport: Virtual port structure
+ * @in_tstamp: Ingress/egress timestamp value
+ *
+ * It is assumed that the caller verifies the timestamp is valid prior to
+ * calling this function.
+ *
+ * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
+ * time stored in the device private PTP structure as the basis for timestamp
+ * extension.
+ *
+ * Return: Tx timestamp value extended to 64 bits.
+ */
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp)
+{
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ unsigned long discard_time;
+
+ discard_time = ptp->cached_phc_jiffies + 2 * HZ;
+
+ if (time_is_before_jiffies(discard_time)) {
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ u64_stats_inc(&vport->tstamp_stats.discarded);
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
+ return 0;
+ }
+
+ return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time,
+ lower_32_bits(in_tstamp));
+}
+
+/**
+ * idpf_ptp_request_ts - Request an available Tx timestamp index
+ * @tx_q: Transmit queue on which the Tx timestamp is requested
+ * @skb: The SKB to associate with this timestamp request
+ * @idx: Index of the Tx timestamp latch
+ *
+ * Request tx timestamp index negotiated during PTP init that will be set into
+ * Tx descriptor.
+ *
+ * Return: 0 and the index that can be provided to Tx descriptor on success,
+ * -errno otherwise.
+ */
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ struct list_head *head;
+
+ /* Get the index from the free latches list */
+ spin_lock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ head = &tx_q->cached_tstamp_caps->latches_free;
+ if (list_empty(head)) {
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+ return -ENOBUFS;
+ }
+
+ ptp_tx_tstamp = list_first_entry(head, struct idpf_ptp_tx_tstamp,
+ list_member);
+ list_del(&ptp_tx_tstamp->list_member);
+
+ ptp_tx_tstamp->skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Move the element to the used latches list */
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_q->cached_tstamp_caps->latches_in_use);
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ *idx = ptp_tx_tstamp->idx;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping
+ * @vport: Virtual port structure
+ * @rx_filter: Receive timestamp filter
+ */
+static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
+{
+ bool enable = true, splitq;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE) {
+ enable = false;
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ } else {
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ struct idpf_rx_queue *rx_queue;
+ u16 j, num_rxq;
+
+ if (splitq)
+ num_rxq = grp->splitq.num_rxq_sets;
+ else
+ num_rxq = grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (splitq)
+ rx_queue = &grp->splitq.rxq_sets[j]->rxq;
+ else
+ rx_queue = grp->singleq.rxqs[j];
+
+ if (enable)
+ idpf_queue_set(PTP, rx_queue);
+ else
+ idpf_queue_clear(PTP, rx_queue);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
+ * @vport: Virtual port structure
+ * @config: Hwtstamp settings requested or saved
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vport->tstamp_config.tx_type = config->tx_type;
+ idpf_ptp_set_rx_tstamp(vport, config->rx_filter);
+ *config = vport->tstamp_config;
+
+ return 0;
+}
+
+/**
+ * idpf_tstamp_task - Delayed task to handle Tx tstamps
+ * @work: work_struct handle
+ */
+void idpf_tstamp_task(struct work_struct *work)
+{
+ struct idpf_vport *vport;
+
+ vport = container_of(work, struct idpf_vport, tstamp_task);
+
+ idpf_ptp_get_tx_tstamp(vport);
+}
+
+/**
+ * idpf_ptp_do_aux_work - Do PTP periodic work
+ * @info: Driver's PTP info structure
+ *
+ * Return: Number of jiffies to periodic work.
+ */
+static long idpf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ idpf_ptp_update_cached_phctime(adapter);
+
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * idpf_ptp_set_caps - Set PTP capabilities
+ * @adapter: Driver specific private structure
+ *
+ * This function sets the PTP functions.
+ */
+static void idpf_ptp_set_caps(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp->info;
+
+ snprintf(info->name, sizeof(info->name), "%s-%s-clk",
+ KBUILD_MODNAME, pci_name(adapter->pdev));
+
+ info->owner = THIS_MODULE;
+ info->max_adj = adapter->ptp->max_adj;
+ info->gettimex64 = idpf_ptp_gettimex64;
+ info->settime64 = idpf_ptp_settime64;
+ info->adjfine = idpf_ptp_adjfine;
+ info->adjtime = idpf_ptp_adjtime;
+ info->verify = idpf_ptp_verify_pin;
+ info->enable = idpf_ptp_gpio_enable;
+ info->do_aux_work = idpf_ptp_do_aux_work;
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER)
+ info->getcrosststamp = idpf_ptp_get_crosststamp;
+#elif IS_ENABLED(CONFIG_X86)
+ if (pcie_ptm_enabled(adapter->pdev) &&
+ boot_cpu_has(X86_FEATURE_ART) &&
+ boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
+ info->getcrosststamp = idpf_ptp_get_crosststamp;
+#endif /* CONFIG_ARM_ARCH_TIMER */
+}
+
+/**
+ * idpf_ptp_create_clock - Create PTP clock device for userspace
+ * @adapter: Driver specific private structure
+ *
+ * This function creates a new PTP clock device.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_create_clock(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock *clock;
+
+ idpf_ptp_set_caps(adapter);
+
+ /* Attempt to register the clock before enabling the hardware. */
+ clock = ptp_clock_register(&adapter->ptp->info,
+ &adapter->pdev->dev);
+ if (IS_ERR(clock)) {
+ pci_err(adapter->pdev, "PTP clock creation failed: %pe\n",
+ clock);
+ return PTR_ERR(clock);
+ }
+
+ adapter->ptp->clock = clock;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_release_vport_tstamp - Release the Tx timestamps trakcers for a
+ * given vport.
+ * @vport: Virtual port structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries for a
+ * given vport.
+ */
+static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct list_head *head;
+
+ cancel_work_sync(&vport->tstamp_task);
+
+ /* Remove list with free latches */
+ spin_lock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ head = &vport->tx_tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ /* Remove list with latches in use */
+ head = &vport->tx_tstamp_caps->latches_in_use;
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ u64_stats_inc(&vport->tstamp_stats.flushed);
+
+ list_del(&ptp_tx_tstamp->list_member);
+ if (ptp_tx_tstamp->skb)
+ consume_skb(ptp_tx_tstamp->skb);
+
+ kfree(ptp_tx_tstamp);
+ }
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
+ spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ kfree(vport->tx_tstamp_caps);
+ vport->tx_tstamp_caps = NULL;
+}
+
+/**
+ * idpf_ptp_release_tstamp - Release the Tx timestamps trackers
+ * @adapter: Driver specific private structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries.
+ */
+static void idpf_ptp_release_tstamp(struct idpf_adapter *adapter)
+{
+ idpf_for_each_vport(adapter, vport) {
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ continue;
+
+ idpf_ptp_release_vport_tstamp(vport);
+ }
+}
+
+/**
+ * idpf_ptp_get_txq_tstamp_capability - Verify the timestamping capability
+ * for a given tx queue.
+ * @txq: Transmit queue
+ *
+ * Since performing timestamp flows requires reading the device clock value and
+ * the support in the Control Plane, the function checks both factors and
+ * summarizes the support for the timestamping.
+ *
+ * Return: true if the timestamping is supported, false otherwise.
+ */
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ if (!txq || !txq->cached_tstamp_caps)
+ return false;
+ else if (txq->cached_tstamp_caps->access)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * idpf_ptp_init - Initialize PTP hardware clock support
+ * @adapter: Driver specific private structure
+ *
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions. Function will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ struct timespec64 ts;
+ int err;
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP)) {
+ pci_dbg(adapter->pdev, "PTP capability is not detected\n");
+ return -EOPNOTSUPP;
+ }
+
+ adapter->ptp = kzalloc(sizeof(*adapter->ptp), GFP_KERNEL);
+ if (!adapter->ptp)
+ return -ENOMEM;
+
+ /* add a back pointer to adapter */
+ adapter->ptp->adapter = adapter;
+
+ if (adapter->dev_ops.reg_ops.ptp_reg_init)
+ adapter->dev_ops.reg_ops.ptp_reg_init(adapter);
+
+ err = idpf_ptp_get_caps(adapter);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to get PTP caps err %d\n", err);
+ goto free_ptp;
+ }
+
+ err = idpf_ptp_create_clock(adapter);
+ if (err)
+ goto free_ptp;
+
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_schedule_worker(adapter->ptp->clock, 0);
+
+ /* Write the default increment time value if the clock adjustments
+ * are enabled.
+ */
+ if (adapter->ptp->adj_dev_clk_time_access != IDPF_PTP_NONE) {
+ err = idpf_ptp_adj_dev_clk_fine(adapter,
+ adapter->ptp->base_incval);
+ if (err)
+ goto remove_clock;
+ }
+
+ /* Write the initial time value if the set time operation is enabled */
+ if (adapter->ptp->set_dev_clk_time_access != IDPF_PTP_NONE) {
+ ts = ktime_to_timespec64(ktime_get_real());
+ err = idpf_ptp_settime64(&adapter->ptp->info, &ts);
+ if (err)
+ goto remove_clock;
+ }
+
+ spin_lock_init(&adapter->ptp->read_dev_clk_lock);
+
+ pci_dbg(adapter->pdev, "PTP init successful\n");
+
+ return 0;
+
+remove_clock:
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(adapter->ptp->clock);
+ adapter->ptp->clock = NULL;
+
+free_ptp:
+ kfree(adapter->ptp);
+ adapter->ptp = NULL;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_release - Clear PTP hardware clock support
+ * @adapter: Driver specific private structure
+ */
+void idpf_ptp_release(struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->tx_tstamp_access != IDPF_PTP_NONE &&
+ ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ idpf_ptp_release_tstamp(adapter);
+
+ if (ptp->clock) {
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(ptp->clock);
+ }
+
+ kfree(ptp);
+ adapter->ptp = NULL;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
new file mode 100644
index 000000000000..785da03e4cf5
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_PTP_H
+#define _IDPF_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+
+/**
+ * struct idpf_ptp_cmd - PTP command masks
+ * @exec_cmd_mask: mask to trigger command execution
+ * @shtime_enable_mask: mask to enable shadow time
+ */
+struct idpf_ptp_cmd {
+ u32 exec_cmd_mask;
+ u32 shtime_enable_mask;
+};
+
+/* struct idpf_ptp_dev_clk_regs - PTP device registers
+ * @dev_clk_ns_l: low part of the device clock register
+ * @dev_clk_ns_h: high part of the device clock register
+ * @phy_clk_ns_l: low part of the PHY clock register
+ * @phy_clk_ns_h: high part of the PHY clock register
+ * @sys_time_ns_l: low part of the system time register
+ * @sys_time_ns_h: high part of the system time register
+ * @incval_l: low part of the increment value register
+ * @incval_h: high part of the increment value register
+ * @shadj_l: low part of the shadow adjust register
+ * @shadj_h: high part of the shadow adjust register
+ * @phy_incval_l: low part of the PHY increment value register
+ * @phy_incval_h: high part of the PHY increment value register
+ * @phy_shadj_l: low part of the PHY shadow adjust register
+ * @phy_shadj_h: high part of the PHY shadow adjust register
+ * @cmd: PTP command register
+ * @phy_cmd: PHY command register
+ * @cmd_sync: PTP command synchronization register
+ */
+struct idpf_ptp_dev_clk_regs {
+ /* Main clock */
+ void __iomem *dev_clk_ns_l;
+ void __iomem *dev_clk_ns_h;
+
+ /* PHY timer */
+ void __iomem *phy_clk_ns_l;
+ void __iomem *phy_clk_ns_h;
+
+ /* System time */
+ void __iomem *sys_time_ns_l;
+ void __iomem *sys_time_ns_h;
+
+ /* Main timer adjustments */
+ void __iomem *incval_l;
+ void __iomem *incval_h;
+ void __iomem *shadj_l;
+ void __iomem *shadj_h;
+
+ /* PHY timer adjustments */
+ void __iomem *phy_incval_l;
+ void __iomem *phy_incval_h;
+ void __iomem *phy_shadj_l;
+ void __iomem *phy_shadj_h;
+
+ /* Command */
+ void __iomem *cmd;
+ void __iomem *phy_cmd;
+ void __iomem *cmd_sync;
+};
+
+/**
+ * enum idpf_ptp_access - the type of access to PTP operations
+ * @IDPF_PTP_NONE: no access
+ * @IDPF_PTP_DIRECT: direct access through BAR registers
+ * @IDPF_PTP_MAILBOX: access through mailbox messages
+ */
+enum idpf_ptp_access {
+ IDPF_PTP_NONE = 0,
+ IDPF_PTP_DIRECT,
+ IDPF_PTP_MAILBOX,
+};
+
+/**
+ * struct idpf_ptp_secondary_mbx - PTP secondary mailbox
+ * @peer_mbx_q_id: PTP mailbox queue ID
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @valid: indicates whether secondary mailblox is supported by the Control
+ * Plane
+ */
+struct idpf_ptp_secondary_mbx {
+ u16 peer_mbx_q_id;
+ u16 peer_id;
+ bool valid:1;
+};
+
+/**
+ * enum idpf_ptp_tx_tstamp_state - Tx timestamp states
+ * @IDPF_PTP_FREE: Tx timestamp index free to use
+ * @IDPF_PTP_REQUEST: Tx timestamp index set to the Tx descriptor
+ * @IDPF_PTP_READ_VALUE: Tx timestamp value ready to be read
+ */
+enum idpf_ptp_tx_tstamp_state {
+ IDPF_PTP_FREE,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE,
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp_status - Parameters to track Tx timestamp
+ * @skb: the pointer to the SKB that received the completion tag
+ * @state: the state of the Tx timestamp
+ */
+struct idpf_ptp_tx_tstamp_status {
+ struct sk_buff *skb;
+ enum idpf_ptp_tx_tstamp_state state;
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp - Parameters for Tx timestamping
+ * @list_member: the list member structure
+ * @tx_latch_reg_offset_l: Tx tstamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx tstamp latch high register offset
+ * @skb: the pointer to the SKB for this timestamp request
+ * @tstamp: the Tx tstamp value
+ * @idx: the index of the Tx tstamp
+ */
+struct idpf_ptp_tx_tstamp {
+ struct list_head list_member;
+ u32 tx_latch_reg_offset_l;
+ u32 tx_latch_reg_offset_h;
+ struct sk_buff *skb;
+ u64 tstamp;
+ u32 idx;
+};
+
+/**
+ * struct idpf_ptp_vport_tx_tstamp_caps - Tx timestamp capabilities
+ * @vport_id: the vport id
+ * @num_entries: the number of negotiated Tx timestamp entries
+ * @tstamp_ns_lo_bit: first bit for nanosecond part of the timestamp
+ * @latches_lock: the lock to the lists of free/used timestamp indexes
+ * @status_lock: the lock to the status tracker
+ * @access: indicates an access to Tx timestamp
+ * @latches_free: the list of the free Tx timestamps latches
+ * @latches_in_use: the list of the used Tx timestamps latches
+ * @tx_tstamp_status: Tx tstamp status tracker
+ */
+struct idpf_ptp_vport_tx_tstamp_caps {
+ u32 vport_id;
+ u16 num_entries;
+ u16 tstamp_ns_lo_bit;
+ spinlock_t latches_lock;
+ spinlock_t status_lock;
+ bool access:1;
+ struct list_head latches_free;
+ struct list_head latches_in_use;
+ struct idpf_ptp_tx_tstamp_status tx_tstamp_status[];
+};
+
+/**
+ * struct idpf_ptp - PTP parameters
+ * @info: structure defining PTP hardware capabilities
+ * @clock: pointer to registered PTP clock device
+ * @adapter: back pointer to the adapter
+ * @base_incval: base increment value of the PTP clock
+ * @max_adj: maximum adjustment of the PTP clock
+ * @cmd: HW specific command masks
+ * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
+ * @dev_clk_regs: the set of registers to access the device clock
+ * @caps: PTP capabilities negotiated with the Control Plane
+ * @get_dev_clk_time_access: access type for getting the device clock time
+ * @get_cross_tstamp_access: access type for the cross timestamping
+ * @set_dev_clk_time_access: access type for setting the device clock time
+ * @adj_dev_clk_time_access: access type for the adjusting the device clock
+ * @tx_tstamp_access: access type for the Tx timestamp value read
+ * @rsv: reserved bits
+ * @secondary_mbx: parameters for using dedicated PTP mailbox
+ * @read_dev_clk_lock: spinlock protecting access to the device clock read
+ * operation executed by the HW latch
+ */
+struct idpf_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct idpf_adapter *adapter;
+ u64 base_incval;
+ u64 max_adj;
+ struct idpf_ptp_cmd cmd;
+ u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
+ struct idpf_ptp_dev_clk_regs dev_clk_regs;
+ u32 caps;
+ enum idpf_ptp_access get_dev_clk_time_access:2;
+ enum idpf_ptp_access get_cross_tstamp_access:2;
+ enum idpf_ptp_access set_dev_clk_time_access:2;
+ enum idpf_ptp_access adj_dev_clk_time_access:2;
+ enum idpf_ptp_access tx_tstamp_access:2;
+ u8 rsv;
+ struct idpf_ptp_secondary_mbx secondary_mbx;
+ spinlock_t read_dev_clk_lock;
+};
+
+/**
+ * idpf_ptp_info_to_adapter - get driver adapter struct from ptp_clock_info
+ * @info: pointer to ptp_clock_info struct
+ *
+ * Return: pointer to the corresponding adapter struct
+ */
+static inline struct idpf_adapter *
+idpf_ptp_info_to_adapter(const struct ptp_clock_info *info)
+{
+ const struct idpf_ptp *ptp = container_of_const(info, struct idpf_ptp,
+ info);
+ return ptp->adapter;
+}
+
+/**
+ * struct idpf_ptp_dev_timers - System time and device time values
+ * @sys_time_ns: system time value expressed in nanoseconds
+ * @dev_clk_time_ns: device clock time value expressed in nanoseconds
+ */
+struct idpf_ptp_dev_timers {
+ u64 sys_time_ns;
+ u64 dev_clk_time_ns;
+};
+
+/**
+ * idpf_ptp_is_vport_tx_tstamp_ena - Verify the Tx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Tx timestamp capabilities are negotiated with the Control Plane only if the
+ * device clock value can be read, Tx timestamp access type is different than
+ * NONE, and the PTP clock for the adapter is created. When all those conditions
+ * are satisfied, Tx timestamp feature is enabled and tx_tstamp_caps is
+ * allocated and fulfilled.
+ *
+ * Return: true if the Tx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_tx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->tx_tstamp_caps)
+ return false;
+ else
+ return true;
+}
+
+/**
+ * idpf_ptp_is_vport_rx_tstamp_ena - Verify the Rx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Rx timestamp feature is enabled if the PTP clock for the adapter is created
+ * and it is possible to read the value of the device clock. The second
+ * assumption comes from the need to extend the Rx timestamp value to 64 bit
+ * based on the current device clock time.
+ *
+ * Return: true if the Rx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_rx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->adapter->ptp ||
+ vport->adapter->ptp->get_dev_clk_time_access == IDPF_PTP_NONE)
+ return false;
+ else
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int idpf_ptp_init(struct idpf_adapter *adapter);
+void idpf_ptp_release(struct idpf_adapter *adapter);
+int idpf_ptp_get_caps(struct idpf_adapter *adapter);
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter);
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq);
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time);
+int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time);
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time);
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval);
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta);
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport);
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport);
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config);
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp);
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp);
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx);
+void idpf_tstamp_task(struct work_struct *work);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ return 0;
+}
+
+static inline void idpf_ptp_release(struct idpf_adapter *adapter) { }
+
+static inline int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+idpf_ptp_get_features_access(const struct idpf_adapter *adapter) { }
+
+static inline bool
+idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ return false;
+}
+
+static inline int
+idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter,
+ u64 time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter,
+ u64 incval)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter,
+ s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u32 in_tstamp)
+{
+ return 0;
+}
+
+static inline u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time,
+ u32 in_timestamp)
+{
+ return 0;
+}
+
+static inline int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q,
+ struct sk_buff *skb, u32 *idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void idpf_tstamp_task(struct work_struct *work) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
+#endif /* _IDPF_PTP_H */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 27b93592c4ba..e3ddf18dcbf5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/xdp.h>
+
#include "idpf.h"
/**
@@ -177,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
}
/**
+ * idpf_tx_singleq_dma_map_error - handle TX DMA map errors
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ * @first: original first buffer info buffer for packet
+ * @idx: starting point on ring to unwind
+ */
+static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 idx)
+{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ /* clear dma mappings for failed tx_buf map */
+ for (;;) {
+ struct idpf_tx_buf *tx_buf;
+
+ tx_buf = &txq->tx_buf[idx];
+ libeth_tx_complete(tx_buf, &cp);
+ if (tx_buf == first)
+ break;
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ if (skb_is_gso(skb)) {
+ union idpf_tx_flex_desc *tx_desc;
+
+ /* If we failed a DMA mapping for a TSO packet, we will have
+ * used one additional descriptor for a context
+ * descriptor. Reset that here.
+ */
+ tx_desc = &txq->flex_tx[idx];
+ memset(tx_desc, 0, sizeof(*tx_desc));
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ /* Update tail in case netdev_xmit_more was previously true */
+ idpf_tx_buf_hw_update(txq, idx, false);
+}
+
+/**
* idpf_tx_singleq_map - Build the Tx base descriptor
* @tx_q: queue to send buffer on
* @first: first buffer info buffer to use
@@ -186,7 +240,7 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
* and gets a physical address for each memory location and programs
* it and the length into the transmit base mode descriptor.
*/
-static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
+static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_buf *first,
struct idpf_tx_offload_params *offloads)
{
@@ -205,23 +259,25 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
data_len = skb->data_len;
size = skb_headlen(skb);
- tx_desc = IDPF_BASE_TX_DESC(tx_q, i);
+ tx_desc = &tx_q->base_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
/* write each descriptor with CRC bit */
- if (tx_q->vport->crc_enable)
+ if (idpf_queue_has(CRC_EN, tx_q))
td_cmd |= IDPF_TX_DESC_CMD_ICRC;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma)))
+ return idpf_tx_singleq_dma_map_error(tx_q, skb,
+ first, i);
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
+ tx_buf->type = LIBETH_SQE_FRAG;
/* align size to end of page */
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
@@ -235,14 +291,17 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
offsets,
max_data,
td_tag);
- tx_desc++;
- i++;
-
- if (i == tx_q->desc_count) {
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = &tx_q->tx_buf[0];
+ tx_desc = &tx_q->base_tx[0];
i = 0;
+ } else {
+ tx_buf++;
+ tx_desc++;
}
+ tx_buf->type = LIBETH_SQE_EMPTY;
+
dma += max_data;
size -= max_data;
@@ -255,12 +314,14 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- tx_desc++;
- i++;
- if (i == tx_q->desc_count) {
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = &tx_q->tx_buf[0];
+ tx_desc = &tx_q->base_tx[0];
i = 0;
+ } else {
+ tx_buf++;
+ tx_desc++;
}
size = skb_frag_size(frag);
@@ -268,8 +329,6 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
-
- tx_buf = &tx_q->tx_buf[i];
}
skb_tx_timestamp(first->skb);
@@ -280,13 +339,13 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
+ first->type = LIBETH_SQE_SKB;
+ first->rs_idx = i;
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -299,15 +358,14 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
* ring entry to reflect that this index is a context descriptor
*/
static struct idpf_base_tx_ctx_desc *
-idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
+idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
{
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
- memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[ntu].ctx_entry = true;
+ txq->tx_buf[ntu].type = LIBETH_SQE_CTX;
- ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu);
+ ctx_desc = &txq->base_ctx[ntu];
IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
txq->next_to_use = ntu;
@@ -320,7 +378,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
* @txq: queue to send buffer on
* @offload: offload parameter structure
**/
-static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
+static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq,
struct idpf_tx_offload_params *offload)
{
struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq);
@@ -333,7 +391,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.lso_pkts);
+ u64_stats_inc(&txq->q_stats.lso_pkts);
u64_stats_update_end(&txq->stats_sync);
}
@@ -351,24 +409,29 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
-static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
- struct idpf_queue *tx_q)
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_tx_queue *tx_q)
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
- unsigned int count;
+ u32 count, buf_count = 1;
+ int csum, tso, needed;
__be16 protocol;
- int csum, tso;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
- if (idpf_tx_maybe_stop_common(tx_q,
- count + IDPF_TX_DESCS_PER_CACHE_LINE +
- IDPF_TX_DESCS_FOR_CTX)) {
+ needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX;
+ if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ IDPF_DESC_UNUSED(tx_q),
+ needed, needed)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+
return NETDEV_TX_BUSY;
}
@@ -394,11 +457,11 @@ static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = offload.tso_segs;
- first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
+ first->packets = offload.tso_segs;
+ first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
} else {
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
- first->gso_segs = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
}
idpf_tx_singleq_map(tx_q, first, &offload);
@@ -409,53 +472,30 @@ out_drop:
}
/**
- * idpf_tx_singleq_start - Selects the right Tx queue to send buffer
- * @skb: send buffer
- * @netdev: network interface device structure
- *
- * Returns NETDEV_TX_OK if sent, else an error code
- */
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_queue *tx_q;
-
- tx_q = vport->txqs[skb_get_queue_mapping(skb)];
-
- /* hardware can't handle really short frames, hardware padding works
- * beyond this point
- */
- if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
- idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
-
- return NETDEV_TX_OK;
- }
-
- return idpf_tx_singleq_frame(skb, tx_q);
-}
-
-/**
* idpf_tx_singleq_clean - Reclaim resources from queue
* @tx_q: Tx queue to clean
* @napi_budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
*/
-static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
+static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int budget = tx_q->vport->compln_clean_budget;
- unsigned int total_bytes = 0, total_pkts = 0;
+ struct libeth_sq_napi_stats ss = { };
struct idpf_base_tx_desc *tx_desc;
+ u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = &ss,
+ .napi = napi_budget,
+ };
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
- struct idpf_vport *vport;
struct netdev_queue *nq;
bool dont_wake;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc);
+ tx_desc = &tx_q->base_tx[ntc];
tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count;
@@ -467,47 +507,26 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
* such. We can skip this descriptor since there is no buffer
* to clean.
*/
- if (tx_buf->ctx_entry) {
- /* Clear this flag here to avoid stale flag values when
- * this buffer is used for actual data in the future.
- * There are cases where the tx_buf struct / the flags
- * field will not be cleared before being reused.
- */
- tx_buf->ctx_entry = false;
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
+ tx_buf->type = LIBETH_SQE_EMPTY;
goto fetch_next_txq_desc;
}
- /* if next_to_watch is not set then no work pending */
- eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
- if (!eop_desc)
+ if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
break;
- /* prevent any other reads prior to eop_desc */
+ /* prevent any other reads prior to type */
smp_rmb();
+ eop_desc = &tx_q->base_tx[tx_buf->rs_idx];
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->qw1 &
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
-
/* update the statistics for this packet */
- total_bytes += tx_buf->bytecount;
- total_pkts += tx_buf->gso_segs;
-
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- /* unmap skb header data */
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
- dma_unmap_len_set(tx_buf, len, 0);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
@@ -517,17 +536,11 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
if (unlikely(!ntc)) {
ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
}
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
/* update budget only if we did something */
@@ -540,27 +553,26 @@ fetch_next_txq_desc:
if (unlikely(!ntc)) {
ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
}
} while (likely(budget));
ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
- *cleaned += total_pkts;
+ *cleaned += ss.packets;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, ss.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
u64_stats_update_end(&tx_q->stats_sync);
- vport = tx_q->vport;
- np = netdev_priv(vport->netdev);
- nq = netdev_get_tx_queue(vport->netdev, tx_q->idx);
+ np = netdev_priv(tx_q->netdev);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = np->state != __IDPF_VPORT_UP ||
- !netif_carrier_ok(vport->netdev);
- __netif_txq_completed_wake(nq, total_pkts, total_bytes,
+ dont_wake = !test_bit(IDPF_VPORT_UP, np->state) ||
+ !netif_carrier_ok(tx_q->netdev);
+ __netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
@@ -584,7 +596,7 @@ static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
budget_per_q = num_txq ? max(budget / num_txq, 1) : 0;
for (i = 0; i < num_txq; i++) {
- struct idpf_queue *q;
+ struct idpf_tx_queue *q;
q = q_vec->tx[i];
clean_complete &= idpf_tx_singleq_clean(q, budget_per_q,
@@ -614,14 +626,9 @@ static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc,
/**
* idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers
- * @rxq: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
- * @ntc: next to clean
*/
-static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
- union virtchnl2_rx_desc *rx_desc,
- struct sk_buff *skb, u16 ntc)
+static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ)))
@@ -635,98 +642,82 @@ static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
* @rxq: Rx ring being processed
* @skb: skb currently being received and modified
* @csum_bits: checksum bits from descriptor
- * @ptype: the packet type decoded by hardware
+ * @decoded: the packet type decoded by hardware
*
* skb->protocol must be set before this function is called
*/
-static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded *csum_bits,
- u16 ptype)
+static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
+ struct sk_buff *skb,
+ struct libeth_rx_csum csum_bits,
+ struct libeth_rx_pt decoded)
{
- struct idpf_rx_ptype_decoded decoded;
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM)))
+ if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
return;
/* check if HW has decoded the packet and checksum */
- if (unlikely(!(csum_bits->l3l4p)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- decoded = rxq->vport->rx_ptype_lkup[ptype];
- if (unlikely(!(decoded.known && decoded.outer_ip)))
- return;
-
- ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
/* Check if there were any checksum errors */
- if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe)))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* Device could not do any checksum offload for certain extension
* headers as indicated by setting IPV6EXADD bit
*/
- if (unlikely(ipv6 && csum_bits->ipv6exadd))
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed
*/
- if (unlikely(csum_bits->l4e))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (unlikely(csum_bits->nat && csum_bits->eudpe))
+ if (unlikely(csum_bits.nat && csum_bits.eudpe))
goto checksum_fail;
/* Handle packets that were not able to be checksummed due to arrival
* speed, in this case the stack can compute the csum.
*/
- if (unlikely(csum_bits->pprs))
+ if (unlikely(csum_bits.pprs))
return;
/* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case IDPF_RX_PTYPE_INNER_PROT_ICMP:
- case IDPF_RX_PTYPE_INNER_PROT_TCP:
- case IDPF_RX_PTYPE_INNER_PROT_UDP:
- case IDPF_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- default:
- return;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
checksum_fail:
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync);
}
/**
* idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum
- * @rx_q: Rx completion queue
- * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- * @ptype: Rx packet type
*
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
+ *
+ * Return: parsed checksum status.
**/
-static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static struct libeth_rx_csum
+idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -745,28 +736,23 @@ static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
rx_status);
csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M,
rx_status);
- csum_bits.nat = 0;
- csum_bits.eudpe = 0;
- idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+ return csum_bits;
}
/**
* idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum
- * @rx_q: Rx completion queue
- * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- * @ptype: Rx packet type
*
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
+ *
+ * Return: parsed checksum status.
**/
-static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static struct libeth_rx_csum
+idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -786,9 +772,8 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
rx_status0);
csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M,
rx_status1);
- csum_bits.pprs = 0;
- idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+ return csum_bits;
}
/**
@@ -801,14 +786,14 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
**/
-static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
+static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rx_pt decoded)
{
u64 mask, qw1;
- if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded))
return;
mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
@@ -817,7 +802,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
if (FIELD_GET(mask, qw1) == mask) {
u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -831,22 +816,24 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
**/
-static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
+static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rx_pt decoded)
{
- if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded))
return;
if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
- le16_to_cpu(rx_desc->flex_nic_wb.status_error0)))
- skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash),
- idpf_ptype_to_htype(decoded));
+ le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) {
+ u32 hash = le32_to_cpu(rx_desc->flex_nic_wb.rss_hash);
+
+ libeth_rx_pt_set_hash(skb, hash, decoded);
+ }
}
/**
- * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
+ * __idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
* descriptor
* @rx_q: Rx ring being processed
* @skb: pointer to current skb being populated
@@ -857,25 +844,41 @@ static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
-static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static void
+__idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
+ struct sk_buff *skb,
+ const union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
{
- struct idpf_rx_ptype_decoded decoded =
- rx_q->vport->rx_ptype_lkup[ptype];
-
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_q->vport->netdev);
+ struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
+ struct libeth_rx_csum csum_bits;
/* Check if we're using base mode descriptor IDs */
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
- idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded);
- idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded);
+ csum_bits = idpf_rx_singleq_base_csum(rx_desc);
} else {
- idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded);
- idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, decoded);
+ csum_bits = idpf_rx_singleq_flex_csum(rx_desc);
}
+
+ idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded);
+}
+
+/**
+ * idpf_rx_buf_hw_update - Store the new tail and head values
+ * @rxq: queue to bump
+ * @val: new head index
+ */
+static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val)
+{
+ rxq->next_to_use = val;
+
+ if (unlikely(!rxq->tail))
+ return;
+
+ /* writel has an implicit memory barrier */
+ writel(val, rxq->tail);
}
/**
@@ -885,24 +888,28 @@ static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
*
* Returns false if all allocations were successful, true if any fail
*/
-bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
u16 cleaned_count)
{
struct virtchnl2_singleq_rx_buf_desc *desc;
+ const struct libeth_fq_fp fq = {
+ .pp = rx_q->pp,
+ .fqes = rx_q->rx_buf,
+ .truesize = rx_q->truesize,
+ .count = rx_q->desc_count,
+ };
u16 nta = rx_q->next_to_alloc;
- struct idpf_rx_buf *buf;
if (!cleaned_count)
return false;
- desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
- buf = &rx_q->rx_buf.buf[nta];
+ desc = &rx_q->single_buf[nta];
do {
dma_addr_t addr;
- addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ addr = libeth_rx_alloc(&fq, nta);
+ if (addr == DMA_MAPPING_ERROR)
break;
/* Refresh the desc even if buffer_addrs didn't change
@@ -912,11 +919,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
desc->hdr_addr = 0;
desc++;
- buf++;
nta++;
if (unlikely(nta == rx_q->desc_count)) {
- desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
- buf = rx_q->rx_buf.buf;
+ desc = &rx_q->single_buf[0];
nta = 0;
}
@@ -933,7 +938,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
/**
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
- * @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
*
@@ -943,21 +947,20 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
*/
-static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rqe_info *fields)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
- fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+ fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
- * @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
*
@@ -967,14 +970,14 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
*/
-static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rqe_info *fields)
{
- fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
- le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
- le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+ fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -984,14 +987,41 @@ static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
* @fields: storage for extracted values
*
*/
-static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rqe_info *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields);
else
- idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
+}
+
+static bool
+idpf_rx_singleq_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs)
+{
+ struct libeth_rqe_info fields;
+ struct idpf_rx_queue *rxq;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ idpf_rx_singleq_extract_fields(rxq, xdp->desc, &fields);
+ __idpf_rx_singleq_process_skb_fields(rxq, skb, xdp->desc,
+ fields.ptype);
+
+ return true;
+}
+
+static void idpf_xdp_run_pass(struct libeth_xdp_buff *xdp,
+ struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs,
+ const union virtchnl2_rx_desc *desc)
+{
+ libeth_xdp_run_pass(xdp, NULL, napi, rs, desc, NULL,
+ idpf_rx_singleq_process_skb_fields);
}
/**
@@ -1001,22 +1031,23 @@ static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
+static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
- struct sk_buff *skb = rx_q->skb;
+ struct libeth_rq_napi_stats rs = { };
u16 ntc = rx_q->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
u16 cleaned_count = 0;
- bool failure = false;
+
+ libeth_xdp_init_buff(xdp, &rx_q->xdp, &rx_q->xdp_rxq);
/* Process Rx packets bounded by budget */
- while (likely(total_rx_pkts < (unsigned int)budget)) {
- struct idpf_rx_extracted fields = { };
+ while (likely(rs.packets < budget)) {
+ struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
- rx_desc = IDPF_RX_DESC(rx_q, ntc);
+ rx_desc = &rx_q->rx[ntc];
/* status_error_ptype_len will always be zero for unused
* descriptors because it's cleared in cleanup, and overlaps
@@ -1036,75 +1067,42 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
- rx_buf = &rx_q->rx_buf.buf[ntc];
- if (!fields.size) {
- idpf_rx_put_page(rx_buf);
- goto skip_data;
- }
-
- idpf_rx_sync_for_cpu(rx_buf, fields.size);
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, fields.size);
- else
- skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size);
+ rx_buf = &rx_q->rx_buf[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, fields.len);
+ rx_buf->netmem = 0;
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
-skip_data:
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
-
cleaned_count++;
/* skip if it is non EOP desc */
- if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc))
+ if (idpf_rx_singleq_is_non_eop(rx_desc) ||
+ unlikely(!xdp->data))
continue;
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M)
if (unlikely(idpf_rx_singleq_test_staterr(rx_desc,
IDPF_RXD_ERR_S))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
- continue;
- }
-
- /* pad skb if needed (to make valid ethernet frame) */
- if (eth_skb_pad(skb)) {
- skb = NULL;
+ libeth_xdp_return_buff_slow(xdp);
continue;
}
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb,
- rx_desc, fields.rx_ptype);
-
- /* send completed skb up the stack */
- napi_gro_receive(&rx_q->q_vector->napi, skb);
- skb = NULL;
-
- /* update budget accounting */
- total_rx_pkts++;
+ idpf_xdp_run_pass(xdp, rx_q->pp->p.napi, &rs, rx_desc);
}
- rx_q->skb = skb;
-
rx_q->next_to_clean = ntc;
+ libeth_xdp_save_buff(&rx_q->xdp, xdp);
+ page_pool_nid_changed(rx_q->pp, numa_mem_id());
if (cleaned_count)
- failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
+ idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
u64_stats_update_begin(&rx_q->stats_sync);
- u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts);
- u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_add(&rx_q->q_stats.packets, rs.packets);
+ u64_stats_add(&rx_q->q_stats.bytes, rs.bytes);
u64_stats_update_end(&rx_q->stats_sync);
- /* guarantee a trip back through this routine if there was a failure */
- return failure ? budget : (int)total_rx_pkts;
+ return rs.packets;
}
/**
@@ -1127,7 +1125,7 @@ static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
*/
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) {
- struct idpf_queue *rxq = q_vec->rx[i];
+ struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q);
@@ -1166,8 +1164,10 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
&work_done);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return budget;
+ }
work_done = min_t(int, work_done, budget - 1);
@@ -1176,6 +1176,8 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
idpf_vport_intr_update_itr_ena_irq(q_vector);
+ else
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return work_done;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index f5bc4a278074..1d91c56f7469 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,36 +2,35 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
+#include "xdp.h"
+#include "xsk.h"
+
+#define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
+LIBETH_SQE_CHECK_PRIV(u32);
/**
- * idpf_buf_lifo_push - push a buffer pointer onto stack
- * @stack: pointer to stack struct
- * @buf: pointer to buf to push
+ * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
+ * @skb: send buffer
+ * @max_bufs: maximum scatter gather buffers for single packet
+ * @count: number of buffers this packet needs
*
- * Returns 0 on success, negative on failure
- **/
-static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
- struct idpf_tx_stash *buf)
+ * Make sure we don't exceed maximum scatter gather buffers for a single
+ * packet.
+ * TSO case has been handled earlier from idpf_features_check().
+ */
+static bool idpf_chk_linearize(const struct sk_buff *skb,
+ unsigned int max_bufs,
+ unsigned int count)
{
- if (unlikely(stack->top == stack->size))
- return -ENOSPC;
-
- stack->bufs[stack->top++] = buf;
-
- return 0;
-}
+ if (likely(count <= max_bufs))
+ return false;
-/**
- * idpf_buf_lifo_pop - pop a buffer pointer from stack
- * @stack: pointer to stack struct
- **/
-static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
-{
- if (unlikely(!stack->top))
- return NULL;
+ if (skb_is_gso(skb))
+ return false;
- return stack->bufs[--stack->top];
+ return true;
}
/**
@@ -55,85 +54,97 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
}
-/**
- * idpf_tx_buf_rel - Release a Tx buffer
- * @tx_q: the queue that owns the buffer
- * @tx_buf: the buffer to free
- */
-static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
+static void idpf_tx_buf_clean(struct idpf_tx_queue *txq)
{
- if (tx_buf->skb) {
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- }
+ struct libeth_sq_napi_stats ss = { };
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .bq = &bq,
+ .ss = &ss,
+ };
+
+ xdp_frame_bulk_init(&bq);
+
+ /* Free all the Tx buffer sk_buffs */
+ for (u32 i = 0; i < txq->buf_pool_size; i++)
+ libeth_tx_complete_any(&txq->tx_buf[i], &cp);
- tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
- dma_unmap_len_set(tx_buf, len, 0);
+ xdp_flush_frame_bulk(&bq);
}
/**
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
-static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
+static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
- u16 i;
-
/* Buffers already cleared, nothing to do */
if (!txq->tx_buf)
return;
- /* Free all the Tx buffer sk_buffs */
- for (i = 0; i < txq->desc_count; i++)
- idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
+ if (idpf_queue_has(XSK, txq))
+ idpf_xsksq_clean(txq);
+ else
+ idpf_tx_buf_clean(txq);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
-
- if (!txq->buf_stack.bufs)
- return;
-
- for (i = 0; i < txq->buf_stack.size; i++)
- kfree(txq->buf_stack.bufs[i]);
-
- kfree(txq->buf_stack.bufs);
- txq->buf_stack.bufs = NULL;
}
/**
* idpf_tx_desc_rel - Free Tx resources per queue
* @txq: Tx descriptor ring for a specific queue
- * @bufq: buffer q or completion q
*
* Free all transmit software resources
*/
-static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq)
+static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
- if (bufq)
- idpf_tx_buf_rel_all(txq);
+ bool xdp = idpf_queue_has(XDP, txq);
+
+ if (xdp)
+ libeth_xdpsq_deinit_timer(txq->timer);
+
+ idpf_tx_buf_rel_all(txq);
+
+ if (!xdp)
+ netdev_tx_reset_subqueue(txq->netdev, txq->idx);
+
+ idpf_xsk_clear_queue(txq, VIRTCHNL2_QUEUE_TYPE_TX);
if (!txq->desc_ring)
return;
+ if (!xdp && txq->refillq)
+ kfree(txq->refillq->ring);
+
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
- txq->next_to_alloc = 0;
txq->next_to_use = 0;
txq->next_to_clean = 0;
}
/**
+ * idpf_compl_desc_rel - Free completion resources per queue
+ * @complq: completion queue
+ *
+ * Free all completion software resources.
+ */
+static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
+{
+ idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+
+ if (!complq->desc_ring)
+ return;
+
+ dma_free_coherent(complq->netdev->dev.parent, complq->size,
+ complq->desc_ring, complq->dma);
+ complq->desc_ring = NULL;
+ complq->next_to_use = 0;
+ complq->next_to_clean = 0;
+}
+
+/**
* idpf_tx_desc_rel_all - Free Tx Resources for All Queues
* @vport: virtual port structure
*
@@ -150,10 +161,10 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++)
- idpf_tx_desc_rel(txq_grp->txqs[j], true);
+ idpf_tx_desc_rel(txq_grp->txqs[j]);
if (idpf_is_queue_model_split(vport->txq_model))
- idpf_tx_desc_rel(txq_grp->complq, false);
+ idpf_compl_desc_rel(txq_grp->complq);
}
}
@@ -163,69 +174,42 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
+static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
- int buf_size;
- int i;
-
/* Allocate book keeping buffers only. Buffers to be supplied to HW
* are allocated by kernel network stack and received as part of skb
*/
- buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
- tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q))
+ tx_q->buf_pool_size = U16_MAX;
+ else
+ tx_q->buf_pool_size = tx_q->desc_count;
+ tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
+ GFP_KERNEL);
if (!tx_q->tx_buf)
return -ENOMEM;
- /* Initialize tx_bufs with invalid completion tags */
- for (i = 0; i < tx_q->desc_count; i++)
- tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
-
- /* Initialize tx buf stack for out-of-order completions if
- * flow scheduling offload is enabled
- */
- tx_q->buf_stack.bufs =
- kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
- GFP_KERNEL);
- if (!tx_q->buf_stack.bufs)
- return -ENOMEM;
-
- tx_q->buf_stack.size = tx_q->desc_count;
- tx_q->buf_stack.top = tx_q->desc_count;
-
- for (i = 0; i < tx_q->desc_count; i++) {
- tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
- GFP_KERNEL);
- if (!tx_q->buf_stack.bufs[i])
- return -ENOMEM;
- }
-
return 0;
}
/**
* idpf_tx_desc_alloc - Allocate the Tx descriptors
+ * @vport: vport to allocate resources for
* @tx_q: the tx ring to set up
- * @bufq: buffer or completion queue
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
+static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
- u32 desc_sz;
+ struct idpf_sw_queue *refillq;
int err;
- if (bufq) {
- err = idpf_tx_buf_alloc_all(tx_q);
- if (err)
- goto err_alloc;
-
- desc_sz = sizeof(struct idpf_base_tx_desc);
- } else {
- desc_sz = sizeof(struct idpf_splitq_tx_compl_desc);
- }
+ err = idpf_tx_buf_alloc_all(tx_q);
+ if (err)
+ goto err_alloc;
- tx_q->size = tx_q->desc_count * desc_sz;
+ tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
/* Allocate descriptors also round up to nearest 4K */
tx_q->size = ALIGN(tx_q->size, 4096);
@@ -238,20 +222,78 @@ static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
goto err_alloc;
}
- tx_q->next_to_alloc = 0;
tx_q->next_to_use = 0;
tx_q->next_to_clean = 0;
- set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
+ idpf_queue_set(GEN_CHK, tx_q);
+
+ idpf_xsk_setup_queue(vport, tx_q, VIRTCHNL2_QUEUE_TYPE_TX);
+
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ refillq = tx_q->refillq;
+ refillq->desc_count = tx_q->buf_pool_size;
+ refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
+ GFP_KERNEL);
+ if (!refillq->ring) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (unsigned int i = 0; i < refillq->desc_count; i++)
+ refillq->ring[i] =
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
+ idpf_queue_has(GEN_CHK, refillq));
+
+ /* Go ahead and flip the GEN bit since this counts as filling
+ * up the ring, i.e. we already ring wrapped.
+ */
+ idpf_queue_change(GEN_CHK, refillq);
+
+ tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
return 0;
err_alloc:
- idpf_tx_desc_rel(tx_q, bufq);
+ idpf_tx_desc_rel(tx_q);
return err;
}
/**
+ * idpf_compl_desc_alloc - allocate completion descriptors
+ * @vport: vport to allocate resources for
+ * @complq: completion queue to set up
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_compl_queue *complq)
+{
+ u32 desc_size;
+
+ desc_size = idpf_queue_has(FLOW_SCH_EN, complq) ?
+ sizeof(*complq->comp) : sizeof(*complq->comp_4b);
+ complq->size = array_size(complq->desc_count, desc_size);
+
+ complq->desc_ring = dma_alloc_coherent(complq->netdev->dev.parent,
+ complq->size, &complq->dma,
+ GFP_KERNEL);
+ if (!complq->desc_ring)
+ return -ENOMEM;
+
+ complq->next_to_use = 0;
+ complq->next_to_clean = 0;
+ idpf_queue_set(GEN_CHK, complq);
+
+ idpf_xsk_setup_queue(vport, complq,
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+
+ return 0;
+}
+
+/**
* idpf_tx_desc_alloc_all - allocate all queues Tx resources
* @vport: virtual port private structure
*
@@ -259,7 +301,6 @@ err_alloc:
*/
static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
{
- struct device *dev = &vport->adapter->pdev->dev;
int err = 0;
int i, j;
@@ -268,53 +309,25 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
*/
for (i = 0; i < vport->num_txq_grp; i++) {
for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
- struct idpf_queue *txq = vport->txq_grps[i].txqs[j];
- u8 gen_bits = 0;
- u16 bufidx_mask;
+ struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
- err = idpf_tx_desc_alloc(txq, true);
+ err = idpf_tx_desc_alloc(vport, txq);
if (err) {
- dev_err(dev, "Allocation for Tx Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Allocation for Tx Queue %u failed\n",
i);
goto err_out;
}
-
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
-
- txq->compl_tag_cur_gen = 0;
-
- /* Determine the number of bits in the bufid
- * mask and add one to get the start of the
- * generation bits
- */
- bufidx_mask = txq->desc_count - 1;
- while (bufidx_mask >> 1) {
- txq->compl_tag_gen_s++;
- bufidx_mask = bufidx_mask >> 1;
- }
- txq->compl_tag_gen_s++;
-
- gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
- txq->compl_tag_gen_s;
- txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
-
- /* Set bufid mask based on location of first
- * gen bit; it cannot simply be the descriptor
- * ring size-1 since we can have size values
- * where not all of those bits are set.
- */
- txq->compl_tag_bufid_m =
- GETMAXVAL(txq->compl_tag_gen_s);
}
if (!idpf_is_queue_model_split(vport->txq_model))
continue;
/* Setup completion queues */
- err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false);
+ err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
if (err) {
- dev_err(dev, "Allocation for Tx Completion Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Allocation for Tx Completion Queue %u failed\n",
i);
goto err_out;
}
@@ -329,93 +342,150 @@ err_out:
/**
* idpf_rx_page_rel - Release an rx buffer page
- * @rxq: the queue that owns the buffer
* @rx_buf: the buffer to free
*/
-static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf)
+static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
{
- if (unlikely(!rx_buf->page))
+ if (unlikely(!rx_buf->netmem))
return;
- page_pool_put_full_page(rxq->pp, rx_buf->page, false);
+ libeth_rx_recycle_slow(rx_buf->netmem);
- rx_buf->page = NULL;
- rx_buf->page_offset = 0;
+ rx_buf->netmem = 0;
+ rx_buf->offset = 0;
}
/**
* idpf_rx_hdr_buf_rel_all - Release header buffer memory
- * @rxq: queue to use
+ * @bufq: queue to use
*/
-static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq)
+static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
{
- struct idpf_adapter *adapter = rxq->vport->adapter;
+ struct libeth_fq fq = {
+ .fqes = bufq->hdr_buf,
+ .pp = bufq->hdr_pp,
+ };
+
+ for (u32 i = 0; i < bufq->desc_count; i++)
+ idpf_rx_page_rel(&bufq->hdr_buf[i]);
- dma_free_coherent(&adapter->pdev->dev,
- rxq->desc_count * IDPF_HDR_BUF_SIZE,
- rxq->rx_buf.hdr_buf_va,
- rxq->rx_buf.hdr_buf_pa);
- rxq->rx_buf.hdr_buf_va = NULL;
+ libeth_rx_fq_destroy(&fq);
+ bufq->hdr_buf = NULL;
+ bufq->hdr_pp = NULL;
}
/**
- * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue
- * @rxq: queue to be cleaned
+ * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
+ * @bufq: queue to be cleaned
*/
-static void idpf_rx_buf_rel_all(struct idpf_queue *rxq)
+static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
{
- u16 i;
+ struct libeth_fq fq = {
+ .fqes = bufq->buf,
+ .pp = bufq->pp,
+ };
/* queue already cleared, nothing to do */
- if (!rxq->rx_buf.buf)
+ if (!bufq->buf)
+ return;
+
+ if (idpf_queue_has(XSK, bufq)) {
+ idpf_xskfq_rel(bufq);
return;
+ }
/* Free all the bufs allocated and given to hw on Rx queue */
- for (i = 0; i < rxq->desc_count; i++)
- idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]);
+ for (u32 i = 0; i < bufq->desc_count; i++)
+ idpf_rx_page_rel(&bufq->buf[i]);
- if (rxq->rx_hsplit_en)
- idpf_rx_hdr_buf_rel_all(rxq);
+ if (idpf_queue_has(HSPLIT_EN, bufq))
+ idpf_rx_hdr_buf_rel_all(bufq);
- page_pool_destroy(rxq->pp);
- rxq->pp = NULL;
+ libeth_rx_fq_destroy(&fq);
+ bufq->buf = NULL;
+ bufq->pp = NULL;
+}
- kfree(rxq->rx_buf.buf);
- rxq->rx_buf.buf = NULL;
+/**
+ * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
+ * @rxq: queue to be cleaned
+ */
+static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
+{
+ struct libeth_fq fq = {
+ .fqes = rxq->rx_buf,
+ .pp = rxq->pp,
+ };
+
+ if (!rxq->rx_buf)
+ return;
+
+ for (u32 i = 0; i < rxq->desc_count; i++)
+ idpf_rx_page_rel(&rxq->rx_buf[i]);
+
+ libeth_rx_fq_destroy(&fq);
+ rxq->rx_buf = NULL;
+ rxq->pp = NULL;
}
/**
* idpf_rx_desc_rel - Free a specific Rx q resources
* @rxq: queue to clean the resources from
- * @bufq: buffer q or completion q
- * @q_model: single or split q model
+ * @dev: device to free DMA memory
+ * @model: single or split queue model
*
* Free a specific rx queue resources
*/
-static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
+static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
+ u32 model)
{
if (!rxq)
return;
- if (rxq->skb) {
- dev_kfree_skb_any(rxq->skb);
- rxq->skb = NULL;
- }
+ if (!idpf_queue_has(XSK, rxq))
+ libeth_xdp_return_stash(&rxq->xdp);
- if (bufq || !idpf_is_queue_model_split(q_model))
+ if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq);
+ idpf_xsk_clear_queue(rxq, VIRTCHNL2_QUEUE_TYPE_RX);
+
rxq->next_to_alloc = 0;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
if (!rxq->desc_ring)
return;
- dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma);
+ dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
rxq->desc_ring = NULL;
}
/**
+ * idpf_rx_desc_rel_bufq - free buffer queue resources
+ * @bufq: buffer queue to clean the resources from
+ * @dev: device to free DMA memory
+ */
+static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
+ struct device *dev)
+{
+ if (!bufq)
+ return;
+
+ idpf_rx_buf_rel_bufq(bufq);
+ idpf_xsk_clear_queue(bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+
+ bufq->next_to_alloc = 0;
+ bufq->next_to_clean = 0;
+ bufq->next_to_use = 0;
+
+ if (!bufq->split_buf)
+ return;
+
+ dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
+ bufq->split_buf = NULL;
+}
+
+/**
* idpf_rx_desc_rel_all - Free Rx Resources for All Queues
* @vport: virtual port structure
*
@@ -423,6 +493,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
*/
static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
{
+ struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
u16 num_rxq;
int i, j;
@@ -435,15 +506,15 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
if (!idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
- idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
- false, vport->rxq_model);
+ idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
+ VIRTCHNL2_QUEUE_MODEL_SINGLE);
continue;
}
num_rxq = rx_qgrp->splitq.num_rxq_sets;
for (j = 0; j < num_rxq; j++)
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
- false, vport->rxq_model);
+ dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
if (!rx_qgrp->splitq.bufq_sets)
continue;
@@ -452,69 +523,76 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
- idpf_rx_desc_rel(&bufq_set->bufq, true,
- vport->rxq_model);
+ idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
}
}
}
/**
* idpf_rx_buf_hw_update - Store the new tail and head values
- * @rxq: queue to bump
+ * @bufq: queue to bump
* @val: new head index
*/
-void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val)
+static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
{
- rxq->next_to_use = val;
+ bufq->next_to_use = val;
- if (unlikely(!rxq->tail))
+ if (unlikely(!bufq->tail))
return;
/* writel has an implicit memory barrier */
- writel(val, rxq->tail);
+ writel(val, bufq->tail);
}
/**
* idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
- * @rxq: ring to use
+ * @bufq: ring to use
*
* Returns 0 on success, negative on failure.
*/
-static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
+static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
{
- struct idpf_adapter *adapter = rxq->vport->adapter;
-
- rxq->rx_buf.hdr_buf_va =
- dma_alloc_coherent(&adapter->pdev->dev,
- IDPF_HDR_BUF_SIZE * rxq->desc_count,
- &rxq->rx_buf.hdr_buf_pa,
- GFP_KERNEL);
- if (!rxq->rx_buf.hdr_buf_va)
- return -ENOMEM;
+ struct libeth_fq fq = {
+ .count = bufq->desc_count,
+ .type = LIBETH_FQE_HDR,
+ .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
+ if (ret)
+ return ret;
+
+ bufq->hdr_pp = fq.pp;
+ bufq->hdr_buf = fq.fqes;
+ bufq->hdr_truesize = fq.truesize;
+ bufq->rx_hbuf_size = fq.buf_len;
return 0;
}
/**
- * idpf_rx_post_buf_refill - Post buffer id to refill queue
+ * idpf_post_buf_refill - Post buffer id to refill queue
* @refillq: refill queue to post to
* @buf_id: buffer id to post
*/
-static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
- u16 nta = refillq->next_to_alloc;
+ u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
- FIELD_PREP(IDPF_RX_BI_GEN_M,
- test_bit(__IDPF_Q_GEN_CHK, refillq->flags));
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
+ idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
- change_bit(__IDPF_Q_GEN_CHK, refillq->flags);
+ idpf_queue_change(GEN_CHK, refillq);
}
- refillq->next_to_alloc = nta;
+
+ refillq->next_to_use = nta;
}
/**
@@ -524,24 +602,35 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
*
* Returns false if buffer could not be allocated, true otherwise.
*/
-static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
+static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
{
struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
+ struct libeth_fq_fp fq = {
+ .count = bufq->desc_count,
+ };
u16 nta = bufq->next_to_alloc;
- struct idpf_rx_buf *buf;
dma_addr_t addr;
- splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
- buf = &bufq->rx_buf.buf[buf_id];
+ splitq_rx_desc = &bufq->split_buf[nta];
- if (bufq->rx_hsplit_en) {
- splitq_rx_desc->hdr_addr =
- cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
- (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ if (idpf_queue_has(HSPLIT_EN, bufq)) {
+ fq.pp = bufq->hdr_pp;
+ fq.fqes = bufq->hdr_buf;
+ fq.truesize = bufq->hdr_truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
+ return false;
+
+ splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
}
- addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ fq.pp = bufq->pp;
+ fq.fqes = bufq->buf;
+ fq.truesize = bufq->truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
return false;
splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
@@ -562,7 +651,8 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
*
* Returns true if @working_set bufs were posted successfully, false otherwise.
*/
-static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
+static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
+ u16 working_set)
{
int i;
@@ -571,95 +661,118 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
return false;
}
- idpf_rx_buf_hw_update(bufq,
- bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1));
+ idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
+ IDPF_RX_BUF_STRIDE));
return true;
}
/**
- * idpf_rx_create_page_pool - Create a page pool
- * @rxbufq: RX queue to create page pool for
+ * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
+ * @rxq: queue for which the buffers are allocated
*
- * Returns &page_pool on success, casted -errno on failure
+ * Return: 0 on success, -ENOMEM on failure.
*/
-static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
+static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
{
- struct page_pool_params pp = {
- .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .order = 0,
- .pool_size = rxbufq->desc_count,
- .nid = NUMA_NO_NODE,
- .dev = rxbufq->vport->netdev->dev.parent,
- .max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
- .offset = 0,
+ if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
+ goto err;
+
+ return 0;
+
+err:
+ idpf_rx_buf_rel_all(rxq);
+
+ return -ENOMEM;
+}
+
+/**
+ * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
+ * @rxq: buffer queue to create page pool for
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
+{
+ struct libeth_fq fq = {
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
+ if (ret)
+ return ret;
+
+ rxq->pp = fq.pp;
+ rxq->rx_buf = fq.fqes;
+ rxq->truesize = fq.truesize;
+ rxq->rx_buf_size = fq.buf_len;
- return page_pool_create(&pp);
+ return idpf_rx_buf_alloc_singleq(rxq);
}
/**
* idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
- * @rxbufq: queue for which the buffers are allocated; equivalent to
- * rxq when operating in singleq mode
+ * @rxbufq: queue for which the buffers are allocated
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq)
+static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
{
int err = 0;
- /* Allocate book keeping buffers */
- rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count,
- sizeof(struct idpf_rx_buf), GFP_KERNEL);
- if (!rxbufq->rx_buf.buf) {
- err = -ENOMEM;
- goto rx_buf_alloc_all_out;
- }
-
- if (rxbufq->rx_hsplit_en) {
+ if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
err = idpf_rx_hdr_buf_alloc_all(rxbufq);
if (err)
goto rx_buf_alloc_all_out;
}
/* Allocate buffers to be given to HW. */
- if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) {
- int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq);
-
- if (!idpf_rx_post_init_bufs(rxbufq, working_set))
- err = -ENOMEM;
- } else {
- if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq,
- rxbufq->desc_count - 1))
- err = -ENOMEM;
- }
+ if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
+ err = -ENOMEM;
rx_buf_alloc_all_out:
if (err)
- idpf_rx_buf_rel_all(rxbufq);
+ idpf_rx_buf_rel_bufq(rxbufq);
return err;
}
/**
* idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
- * @rxbufq: RX queue to create page pool for
+ * @bufq: buffer queue to create page pool for
+ * @type: type of Rx buffers to allocate
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
+static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
+ enum libeth_fqe_type type)
{
- struct page_pool *pool;
+ struct libeth_fq fq = {
+ .truesize = bufq->truesize,
+ .count = bufq->desc_count,
+ .type = type,
+ .hsplit = idpf_queue_has(HSPLIT_EN, bufq),
+ .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ if (idpf_queue_has(XSK, bufq))
+ return idpf_xskfq_init(bufq);
- pool = idpf_rx_create_page_pool(rxbufq);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
+ if (ret)
+ return ret;
- rxbufq->pp = pool;
+ bufq->pp = fq.pp;
+ bufq->buf = fq.fqes;
+ bufq->truesize = fq.truesize;
+ bufq->rx_buf_size = fq.buf_len;
- return idpf_rx_buf_alloc_all(rxbufq);
+ return idpf_rx_buf_alloc_all(bufq);
}
/**
@@ -670,20 +783,24 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
*/
int idpf_rx_bufs_init_all(struct idpf_vport *vport)
{
- struct idpf_rxq_group *rx_qgrp;
- struct idpf_queue *q;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
int i, j, err;
+ idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+
for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 truesize = 0;
/* Allocate bufs for the rxq itself in singleq */
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!split) {
int num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
- err = idpf_rx_bufs_init(q);
+ err = idpf_rx_bufs_init_singleq(q);
if (err)
return err;
}
@@ -693,10 +810,19 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
/* Otherwise, allocate bufs for the buffer queues */
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ enum libeth_fqe_type type;
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- err = idpf_rx_bufs_init(q);
+ q->truesize = truesize;
+
+ type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
+
+ err = idpf_rx_bufs_init(q, type);
if (err)
return err;
+
+ truesize = q->truesize >> 1;
}
}
@@ -705,22 +831,17 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
/**
* idpf_rx_desc_alloc - Allocate queue Rx resources
+ * @vport: vport to allocate resources for
* @rxq: Rx queue for which the resources are setup
- * @bufq: buffer or completion queue
- * @q_model: single or split queue model
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
+static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_rx_queue *rxq)
{
- struct device *dev = rxq->dev;
+ struct device *dev = &vport->adapter->pdev->dev;
- if (bufq)
- rxq->size = rxq->desc_count *
- sizeof(struct virtchnl2_splitq_rx_buf_desc);
- else
- rxq->size = rxq->desc_count *
- sizeof(union virtchnl2_rx_desc);
+ rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
/* Allocate descriptors and also round up to nearest 4K */
rxq->size = ALIGN(rxq->size, 4096);
@@ -735,7 +856,38 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
rxq->next_to_alloc = 0;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
- set_bit(__IDPF_Q_GEN_CHK, rxq->flags);
+ idpf_queue_set(GEN_CHK, rxq);
+
+ idpf_xsk_setup_queue(vport, rxq, VIRTCHNL2_QUEUE_TYPE_RX);
+
+ return 0;
+}
+
+/**
+ * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
+ * @vport: vport to allocate resources for
+ * @bufq: buffer queue for which the resources are set up
+ *
+ * Return: 0 on success, -ENOMEM on failure.
+ */
+static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_buf_queue *bufq)
+{
+ struct device *dev = &vport->adapter->pdev->dev;
+
+ bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
+
+ bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
+ GFP_KERNEL);
+ if (!bufq->split_buf)
+ return -ENOMEM;
+
+ bufq->next_to_alloc = 0;
+ bufq->next_to_clean = 0;
+ bufq->next_to_use = 0;
+ idpf_queue_set(GEN_CHK, bufq);
+
+ idpf_xsk_setup_queue(vport, bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
return 0;
}
@@ -748,9 +900,7 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
*/
static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
{
- struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
- struct idpf_queue *q;
int i, j, err;
u16 num_rxq;
@@ -762,14 +912,18 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
- err = idpf_rx_desc_alloc(q, false, vport->rxq_model);
+
+ err = idpf_rx_desc_alloc(vport, q);
if (err) {
- dev_err(dev, "Memory allocation for Rx Queue %u failed\n",
- i);
+ pci_err(vport->adapter->pdev,
+ "Memory allocation for Rx queue %u from queue group %u failed\n",
+ j, i);
goto err_out;
}
}
@@ -778,11 +932,15 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
continue;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- err = idpf_rx_desc_alloc(q, true, vport->rxq_model);
+
+ err = idpf_bufq_desc_alloc(vport, q);
if (err) {
- dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n",
- i);
+ pci_err(vport->adapter->pdev,
+ "Memory allocation for Rx Buffer Queue %u from queue group %u failed\n",
+ j, i);
goto err_out;
}
}
@@ -796,24 +954,373 @@ err_out:
return err;
}
+static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+{
+ const struct idpf_vport *vport = qs->vport;
+ bool splitq;
+ int err;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ struct idpf_buf_queue *bufq;
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ err = idpf_rx_desc_alloc(vport, q->rxq);
+ if (err)
+ break;
+
+ err = idpf_xdp_rxq_info_init(q->rxq);
+ if (err)
+ break;
+
+ if (!splitq)
+ err = idpf_rx_bufs_init_singleq(q->rxq);
+
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ bufq = q->bufq;
+
+ err = idpf_bufq_desc_alloc(vport, bufq);
+ if (err)
+ break;
+
+ for (u32 j = 0; j < bufq->q_vector->num_bufq; j++) {
+ struct idpf_buf_queue * const *bufqs;
+ enum libeth_fqe_type type;
+ u32 ts;
+
+ bufqs = bufq->q_vector->bufq;
+ if (bufqs[j] != bufq)
+ continue;
+
+ if (j) {
+ type = LIBETH_FQE_SHORT;
+ ts = bufqs[j - 1]->truesize >> 1;
+ } else {
+ type = LIBETH_FQE_MTU;
+ ts = 0;
+ }
+
+ bufq->truesize = ts;
+
+ err = idpf_rx_bufs_init(bufq, type);
+ break;
+ }
+
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ err = idpf_tx_desc_alloc(vport, q->txq);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ err = idpf_compl_desc_alloc(vport, q->complq);
+ break;
+ default:
+ continue;
+ }
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
+{
+ const struct idpf_vport *vport = qs->vport;
+ struct device *dev = vport->netdev->dev.parent;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
+ idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ idpf_rx_desc_rel_bufq(q->bufq, dev);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ idpf_tx_desc_rel(q->txq);
+
+ if (idpf_queue_has(XDP, q->txq)) {
+ q->txq->pending = 0;
+ q->txq->xdp_tx = 0;
+ } else {
+ q->txq->txq_grp->num_completions_pending = 0;
+ }
+
+ writel(q->txq->next_to_use, q->txq->tail);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ idpf_compl_desc_rel(q->complq);
+ q->complq->num_completions = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
+{
+ if (qv->num_txq) {
+ u32 itr;
+
+ if (IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode))
+ itr = qv->vport->tx_itr_profile[qv->tx_dim.profile_ix];
+ else
+ itr = qv->tx_itr_value;
+
+ idpf_vport_intr_write_itr(qv, itr, true);
+ }
+
+ if (qv->num_rxq) {
+ u32 itr;
+
+ if (IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode))
+ itr = qv->vport->rx_itr_profile[qv->rx_dim.profile_ix];
+ else
+ itr = qv->rx_itr_value;
+
+ idpf_vport_intr_write_itr(qv, itr, false);
+ }
+
+ if (qv->num_txq || qv->num_rxq)
+ idpf_vport_intr_update_itr_ena_irq(qv);
+}
+
+/**
+ * idpf_vector_to_queue_set - create a queue set associated with the given
+ * queue vector
+ * @qv: queue vector corresponding to the queue pair
+ *
+ * Returns a pointer to a dynamically allocated array of pointers to all
+ * queues associated with a given queue vector (@qv).
+ * Please note that the caller is responsible to free the memory allocated
+ * by this function using kfree().
+ *
+ * Return: &idpf_queue_set on success, %NULL in case of error.
+ */
+static struct idpf_queue_set *
+idpf_vector_to_queue_set(struct idpf_q_vector *qv)
+{
+ bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq;
+ struct idpf_vport *vport = qv->vport;
+ struct idpf_queue_set *qs;
+ u32 num;
+
+ num = qv->num_rxq + qv->num_bufq + qv->num_txq + qv->num_complq;
+ num += xdp ? qv->num_rxq * 2 : qv->num_xsksq * 2;
+ if (!num)
+ return NULL;
+
+ qs = idpf_alloc_queue_set(vport, num);
+ if (!qs)
+ return NULL;
+
+ num = 0;
+
+ for (u32 i = 0; i < qv->num_bufq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[num++].bufq = qv->bufq[i];
+ }
+
+ for (u32 i = 0; i < qv->num_rxq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ qs->qs[num++].rxq = qv->rx[i];
+ }
+
+ for (u32 i = 0; i < qv->num_txq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = qv->tx[i];
+ }
+
+ for (u32 i = 0; i < qv->num_complq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = qv->complq[i];
+ }
+
+ if (!vport->xdp_txq_offset)
+ goto finalize;
+
+ if (xdp) {
+ for (u32 i = 0; i < qv->num_rxq; i++) {
+ u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = vport->txqs[idx];
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = vport->txqs[idx]->complq;
+ }
+ } else {
+ for (u32 i = 0; i < qv->num_xsksq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = qv->xsksq[i];
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = qv->xsksq[i]->complq;
+ }
+ }
+
+finalize:
+ if (num != qs->num) {
+ kfree(qs);
+ return NULL;
+ }
+
+ return qs;
+}
+
+static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+{
+ struct idpf_vport *vport = qs->vport;
+ struct idpf_q_vector *q_vector;
+ int err;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+
+ err = idpf_init_queue_set(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ if (!vport->xdp_txq_offset)
+ goto config;
+
+ q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors),
+ sizeof(*q_vector->xsksq), GFP_KERNEL);
+ if (!q_vector->xsksq)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+
+ if (q->type != VIRTCHNL2_QUEUE_TYPE_TX)
+ continue;
+
+ if (!idpf_queue_has(XSK, q->txq))
+ continue;
+
+ idpf_xsk_init_wakeup(q_vector);
+
+ q->txq->q_vector = q_vector;
+ q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
+ }
+
+config:
+ err = idpf_send_config_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not configure queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_send_enable_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not enable queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ napi_enable(&q_vector->napi);
+ idpf_qvec_ena_irq(q_vector);
+
+ netif_start_subqueue(vport->netdev, qid);
+
+ return 0;
+}
+
+static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+{
+ struct idpf_vport *vport = qs->vport;
+ struct idpf_q_vector *q_vector;
+ int err;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ netif_stop_subqueue(vport->netdev, qid);
+
+ writel(0, q_vector->intr_reg.dyn_ctl);
+ napi_disable(&q_vector->napi);
+
+ err = idpf_send_disable_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not disable queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ idpf_clean_queue_set(qs);
+
+ kfree(q_vector->xsksq);
+ q_vector->num_xsksq = 0;
+
+ return 0;
+}
+
+/**
+ * idpf_qp_switch - enable or disable queues associated with queue pair
+ * @vport: vport to switch the pair for
+ * @qid: index of the queue pair to switch
+ * @en: whether to enable or disable the pair
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
+{
+ struct idpf_q_vector *q_vector = idpf_find_rxq_vec(vport, qid);
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+
+ if (idpf_find_txq_vec(vport, qid) != q_vector)
+ return -EINVAL;
+
+ qs = idpf_vector_to_queue_set(q_vector);
+ if (!qs)
+ return -ENOMEM;
+
+ return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+}
+
/**
* idpf_txq_group_rel - Release all resources for txq groups
* @vport: vport to release txq groups on
*/
static void idpf_txq_group_rel(struct idpf_vport *vport)
{
+ bool split, flow_sch_en;
int i, j;
if (!vport->txq_grps)
return;
+ split = idpf_is_queue_model_split(vport->txq_model);
+ flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++) {
+ if (flow_sch_en) {
+ kfree(txq_grp->txqs[j]->refillq);
+ txq_grp->txqs[j]->refillq = NULL;
+ }
+
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
+
+ if (!split)
+ continue;
+
kfree(txq_grp->complq);
txq_grp->complq = NULL;
}
@@ -897,8 +1404,12 @@ static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
*/
void idpf_vport_queues_rel(struct idpf_vport *vport)
{
+ idpf_xdp_copy_prog_to_rqs(vport, NULL);
+
idpf_tx_desc_rel_all(vport);
idpf_rx_desc_rel_all(vport);
+
+ idpf_xdpsqs_put(vport);
idpf_vport_queue_grp_rel_all(vport);
kfree(vport->txqs);
@@ -917,9 +1428,11 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
*/
static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{
+ struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
+ struct work_struct *tstamp_task = &vport->tstamp_task;
int i, j, k = 0;
- vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *),
+ vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
GFP_KERNEL);
if (!vport->txqs)
@@ -931,6 +1444,12 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
for (j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
+
+ if (!caps)
+ continue;
+
+ vport->txqs[k]->cached_tstamp_caps = caps;
+ vport->txqs[k]->tstamp_task = tstamp_task;
}
}
@@ -964,20 +1483,26 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
if (idpf_is_queue_model_split(vport->rxq_model))
vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+ vport->xdp_prog = config_data->xdp_prog;
+ if (idpf_xdp_enabled(vport)) {
+ vport->xdp_txq_offset = config_data->num_req_tx_qs;
+ vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
+ vport->xdp_txq_offset;
+ vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
+ } else {
+ vport->xdp_txq_offset = 0;
+ vport->num_xdp_txq = 0;
+ vport->xdpsq_share = false;
+ }
+
/* Adjust number of buffer queues per Rx queue group. */
if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
- vport->bufq_size[0] = IDPF_RX_BUF_2048;
return;
}
vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
- /* Bufq[0] default buffer size is 4K
- * Bufq[1] default buffer size is 2K
- */
- vport->bufq_size[0] = IDPF_RX_BUF_4096;
- vport->bufq_size[1] = IDPF_RX_BUF_2048;
}
/**
@@ -1041,22 +1566,17 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
+ struct idpf_vport_user_config_data *user;
struct idpf_vport_config *vport_config;
u16 num_txq_grps, num_rxq_grps;
- u32 num_qs;
+ u32 num_qs, num_xdpsq;
vport_config = adapter->vport_config[vport_idx];
if (vport_config) {
num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
} else {
- int num_cpus;
-
- /* Restrict num of queues to cpus online as a default
- * configuration to give best performance. User can always
- * override to a max number of queues via ethtool.
- */
- num_cpus = num_online_cpus();
+ u32 num_cpus = netif_get_num_default_rss_queues();
dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
@@ -1091,6 +1611,24 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
vport_msg->num_rx_bufq = 0;
}
+ if (!vport_config)
+ return 0;
+
+ user = &vport_config->user_config;
+ user->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
+ user->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
+
+ if (vport_config->user_config.xdp_prog)
+ num_xdpsq = libeth_xdpsq_num(user->num_req_rx_qs,
+ user->num_req_tx_qs,
+ vport_config->max_q.max_txq);
+ else
+ num_xdpsq = 0;
+
+ vport_msg->num_tx_q = cpu_to_le16(user->num_req_tx_qs + num_xdpsq);
+ if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model)))
+ vport_msg->num_tx_complq = vport_msg->num_tx_q;
+
return 0;
}
@@ -1137,16 +1675,16 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
* @q: rx queue for which descids are set
*
*/
-static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
+static void idpf_rxq_set_descids(const struct idpf_vport *vport,
+ struct idpf_rx_queue *q)
{
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
- } else {
- if (vport->base_rxd)
- q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
- else
- q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
- }
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ return;
+
+ if (vport->base_rxd)
+ q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
+ else
+ q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
}
/**
@@ -1158,14 +1696,15 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
*/
static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{
- bool flow_sch_en;
- int err, i;
+ bool split, flow_sch_en;
+ int i;
vport->txq_grps = kcalloc(vport->num_txq_grp,
sizeof(*vport->txq_grps), GFP_KERNEL);
if (!vport->txq_grps)
return -ENOMEM;
+ split = idpf_is_queue_model_split(vport->txq_model);
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
@@ -1180,45 +1719,56 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
for (j = 0; j < tx_qgrp->num_txq; j++) {
tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
GFP_KERNEL);
- if (!tx_qgrp->txqs[j]) {
- err = -ENOMEM;
+ if (!tx_qgrp->txqs[j])
goto err_alloc;
- }
}
for (j = 0; j < tx_qgrp->num_txq; j++) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
+ struct idpf_tx_queue *q = tx_qgrp->txqs[j];
q->dev = &adapter->pdev->dev;
q->desc_count = vport->txq_desc_count;
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
- q->vport = vport;
+ q->netdev = vport->netdev;
q->txq_grp = tx_qgrp;
- hash_init(q->sched_buf_hash);
+ q->rel_q_id = j;
- if (flow_sch_en)
- set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
+ if (!split) {
+ q->clean_budget = vport->compln_clean_budget;
+ idpf_queue_assign(CRC_EN, q,
+ vport->crc_enable);
+ }
+
+ if (!flow_sch_en)
+ continue;
+
+ idpf_queue_set(FLOW_SCH_EN, q);
+
+ q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
+ if (!q->refillq)
+ goto err_alloc;
+
+ idpf_queue_set(GEN_CHK, q->refillq);
+ idpf_queue_set(RFL_GEN_CHK, q->refillq);
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!split)
continue;
tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
sizeof(*tx_qgrp->complq),
GFP_KERNEL);
- if (!tx_qgrp->complq) {
- err = -ENOMEM;
+ if (!tx_qgrp->complq)
goto err_alloc;
- }
- tx_qgrp->complq->dev = &adapter->pdev->dev;
tx_qgrp->complq->desc_count = vport->complq_desc_count;
- tx_qgrp->complq->vport = vport;
tx_qgrp->complq->txq_grp = tx_qgrp;
+ tx_qgrp->complq->netdev = vport->netdev;
+ tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
if (flow_sch_en)
- __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
+ idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
}
return 0;
@@ -1226,7 +1776,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
err_alloc:
idpf_txq_group_rel(vport);
- return err;
+ return -ENOMEM;
}
/**
@@ -1238,8 +1788,6 @@ err_alloc:
*/
static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, k, err = 0;
bool hs;
@@ -1292,21 +1840,13 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
int swq_size = sizeof(struct idpf_sw_queue);
+ struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- q->dev = &adapter->pdev->dev;
q->desc_count = vport->bufq_desc_count[j];
- q->vport = vport;
- q->rxq_grp = rx_qgrp;
- q->idx = j;
- q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
- q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
- if (hs) {
- q->rx_hsplit_en = true;
- q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
- }
+ idpf_queue_assign(HSPLIT_EN, q, hs);
bufq_set->num_refillqs = num_rxq;
bufq_set->refillqs = kcalloc(num_rxq, swq_size,
@@ -1319,13 +1859,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_sw_queue *refillq =
&bufq_set->refillqs[k];
- refillq->dev = &vport->adapter->pdev->dev;
refillq->desc_count =
vport->bufq_desc_count[j];
- set_bit(__IDPF_Q_GEN_CHK, refillq->flags);
- set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ idpf_queue_set(GEN_CHK, refillq);
+ idpf_queue_set(RFL_GEN_CHK, refillq);
refillq->ring = kcalloc(refillq->desc_count,
- sizeof(u16),
+ sizeof(*refillq->ring),
GFP_KERNEL);
if (!refillq->ring) {
err = -ENOMEM;
@@ -1336,36 +1875,29 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
skip_splitq_rx_init:
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
if (!idpf_is_queue_model_split(vport->rxq_model)) {
q = rx_qgrp->singleq.rxqs[j];
goto setup_rxq;
}
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- rx_qgrp->splitq.rxq_sets[j]->refillq0 =
+ rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
&rx_qgrp->splitq.bufq_sets[0].refillqs[j];
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
- rx_qgrp->splitq.rxq_sets[j]->refillq1 =
+ rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
- if (hs) {
- q->rx_hsplit_en = true;
- q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
- }
+ idpf_queue_assign(HSPLIT_EN, q, hs);
setup_rxq:
- q->dev = &adapter->pdev->dev;
q->desc_count = vport->rxq_desc_count;
- q->vport = vport;
- q->rxq_grp = rx_qgrp;
+ q->rx_ptype_lkup = vport->rx_ptype_lkup;
+ q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
- /* In splitq mode, RXQ buffer size should be
- * set to that of the first buffer queue
- * associated with this RXQ
- */
- q->rx_buf_size = vport->bufq_size[0];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_max_pkt_size = vport->netdev->mtu +
- IDPF_PACKET_HDR_PAD;
+ LIBETH_RX_LL_LEN;
idpf_rxq_set_descids(vport, q);
}
}
@@ -1421,15 +1953,19 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;
- err = idpf_tx_desc_alloc_all(vport);
+ err = idpf_vport_init_fast_path_txqs(vport);
if (err)
goto err_out;
- err = idpf_rx_desc_alloc_all(vport);
+ err = idpf_xdpsqs_get(vport);
if (err)
goto err_out;
- err = idpf_vport_init_fast_path_txqs(vport);
+ err = idpf_tx_desc_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_rx_desc_alloc_all(vport);
if (err)
goto err_out;
@@ -1442,149 +1978,45 @@ err_out:
}
/**
- * idpf_tx_handle_sw_marker - Handle queue marker packet
- * @tx_q: tx queue to handle software marker
- */
-static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
-{
- struct idpf_vport *vport = tx_q->vport;
- int i;
-
- clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
- /* Hardware must write marker packets to all queues associated with
- * completion queues. So check if all queues received marker packets
- */
- for (i = 0; i < vport->num_txq; i++)
- /* If we're still waiting on any other TXQ marker completions,
- * just return now since we cannot wake up the marker_wq yet.
- */
- if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
- return;
-
- /* Drain complete */
- set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
- wake_up(&vport->sw_marker_wq);
-}
-
-/**
- * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
- * packet
- * @tx_q: tx queue to clean buffer from
- * @tx_buf: buffer to be cleaned
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @napi_budget: Used to determine if we are in netpoll
+ * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
+ * @txq: queue to read the timestamp from
+ * @skb: socket buffer to provide Tx timestamp value
+ *
+ * Schedule a work to read Tx timestamp value generated once the packet is
+ * transmitted.
*/
-static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
- struct idpf_tx_buf *tx_buf,
- struct idpf_cleaned_stats *cleaned,
- int napi_budget)
+static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
{
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ tx_tstamp_caps = txq->cached_tstamp_caps;
+ spin_lock_bh(&tx_tstamp_caps->status_lock);
- /* clear tx_buf data */
- tx_buf->skb = NULL;
-
- cleaned->bytes += tx_buf->bytecount;
- cleaned->packets += tx_buf->gso_segs;
-}
-
-/**
- * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
- * out of order completions
- * @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
- int budget)
-{
- struct idpf_tx_stash *stash;
- struct hlist_node *tmp_buf;
-
- /* Buffer completion */
- hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf,
- hlist, compl_tag) {
- if (unlikely(stash->buf.compl_tag != (int)compl_tag))
+ for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
+ tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
+ if (tx_tstamp_status->state != IDPF_PTP_FREE)
continue;
- if (stash->buf.skb) {
- idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
- budget);
- } else if (dma_unmap_len(&stash->buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(&stash->buf, dma),
- dma_unmap_len(&stash->buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(&stash->buf, len, 0);
- }
-
- /* Push shadow buf back onto stack */
- idpf_buf_lifo_push(&txq->buf_stack, stash);
+ tx_tstamp_status->skb = skb;
+ tx_tstamp_status->state = IDPF_PTP_REQUEST;
- hash_del(&stash->hlist);
- }
-}
-
-/**
- * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
- * later time (only relevant for flow scheduling mode)
- * @txq: Tx queue to clean
- * @tx_buf: buffer to store
- */
-static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
- struct idpf_tx_buf *tx_buf)
-{
- struct idpf_tx_stash *stash;
-
- if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
- !dma_unmap_len(tx_buf, len)))
- return 0;
-
- stash = idpf_buf_lifo_pop(&txq->buf_stack);
- if (unlikely(!stash)) {
- net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
- txq->vport->netdev->name);
-
- return -ENOMEM;
+ /* Fetch timestamp from completion descriptor through
+ * virtchnl msg to report to stack.
+ */
+ queue_work(system_unbound_wq, txq->tstamp_task);
+ break;
}
- /* Store buffer params in shadow buffer */
- stash->buf.skb = tx_buf->skb;
- stash->buf.bytecount = tx_buf->bytecount;
- stash->buf.gso_segs = tx_buf->gso_segs;
- dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
- dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- stash->buf.compl_tag = tx_buf->compl_tag;
-
- /* Add buffer to buf_hash table to be freed later */
- hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag);
-
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
-
- /* Reinitialize buf_id portion of tag */
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
-
- return 0;
+ spin_unlock_bh(&tx_tstamp_caps->status_lock);
}
#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
do { \
- (ntc)++; \
- if (unlikely(!(ntc))) { \
- ntc -= (txq)->desc_count; \
+ if (unlikely(++(ntc) == (txq)->desc_count)) { \
+ ntc = 0; \
buf = (txq)->tx_buf; \
- desc = IDPF_FLEX_TX_DESC(txq, 0); \
+ desc = &(txq)->flex_tx[0]; \
} else { \
(buf)++; \
(desc)++; \
@@ -1607,158 +2039,100 @@ do { \
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
*/
-static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
+static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
bool descs_only)
{
union idpf_tx_flex_desc *next_pending_desc = NULL;
union idpf_tx_flex_desc *tx_desc;
- s16 ntc = tx_q->next_to_clean;
+ u32 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = cleaned,
+ .napi = napi_budget,
+ };
struct idpf_tx_buf *tx_buf;
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
- next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
+ if (descs_only) {
+ /* Bump ring index to mark as cleaned. */
+ tx_q->next_to_clean = end;
+ return;
+ }
+
+ tx_desc = &tx_q->flex_tx[ntc];
+ next_pending_desc = &tx_q->flex_tx[end];
tx_buf = &tx_q->tx_buf[ntc];
- ntc -= tx_q->desc_count;
while (tx_desc != next_pending_desc) {
- union idpf_tx_flex_desc *eop_desc;
+ u32 eop_idx;
/* If this entry in the ring was used as a context descriptor,
- * it's corresponding entry in the buffer ring will have an
- * invalid completion tag since no buffer was used. We can
- * skip this descriptor since there is no buffer to clean.
+ * it's corresponding entry in the buffer ring is reserved. We
+ * can skip this descriptor since there is no buffer to clean.
*/
- if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
+ if (tx_buf->type <= LIBETH_SQE_CTX)
goto fetch_next_txq_desc;
- eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
-
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
+ if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
+ break;
- if (descs_only) {
- if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
- goto tx_splitq_clean_out;
+ eop_idx = tx_buf->rs_idx;
+ libeth_tx_complete(tx_buf, &cp);
- while (tx_desc != eop_desc) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
+ /* unmap remaining buffers */
+ while (ntc != eop_idx) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
- if (dma_unmap_len(tx_buf, len)) {
- if (idpf_stash_flow_sch_buffers(tx_q,
- tx_buf))
- goto tx_splitq_clean_out;
- }
- }
- } else {
- idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
- napi_budget);
-
- /* unmap remaining buffers */
- while (tx_desc != eop_desc) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
-
- /* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
- }
+ /* unmap any remaining paged data */
+ libeth_tx_complete(tx_buf, &cp);
}
fetch_next_txq_desc:
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
}
-tx_splitq_clean_out:
- ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
}
-#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
-do { \
- (buf)++; \
- (ntc)++; \
- if (unlikely((ntc) == (txq)->desc_count)) { \
- buf = (txq)->tx_buf; \
- ntc = 0; \
- } \
-} while (0)
-
/**
- * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
+ * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
* @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @buf_id: packet's starting buffer ID, from completion descriptor
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @budget: Used to determine if we are in netpoll
*
- * Cleans all buffers associated with the input completion tag either from the
- * TX buffer ring or from the hash table if the buffers were previously
- * stashed. Returns the byte/segment count for the cleaned packet associated
- * this completion tag.
+ * Clean all buffers associated with the packet starting at buf_id. Returns the
+ * byte/segment count for the cleaned packet.
*/
-static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
- int budget)
+static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
+ struct libeth_sq_napi_stats *cleaned,
+ int budget)
{
- u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
- u16 ntc = txq->next_to_clean;
- u16 num_descs_cleaned = 0;
- u16 orig_idx = idx;
-
- tx_buf = &txq->tx_buf[idx];
-
- while (tx_buf->compl_tag == (int)compl_tag) {
- if (tx_buf->skb) {
- idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ tx_buf = &txq->tx_buf[buf_id];
+ if (tx_buf->type == LIBETH_SQE_SKB) {
+ if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ idpf_tx_read_tstamp(txq, tx_buf->skb);
- num_descs_cleaned++;
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ libeth_tx_complete(tx_buf, &cp);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
- /* If we didn't clean anything on the ring for this completion, there's
- * nothing more to do.
- */
- if (unlikely(!num_descs_cleaned))
- return false;
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ buf_id = idpf_tx_buf_next(tx_buf);
- /* Otherwise, if we did clean a packet on the ring directly, it's safe
- * to assume that the descriptors starting from the original
- * next_to_clean up until the previously cleaned packet can be reused.
- * Therefore, we will go back in the ring and stash any buffers still
- * in the ring into the hash table to be cleaned later.
- */
- tx_buf = &txq->tx_buf[ntc];
- while (tx_buf != &txq->tx_buf[orig_idx]) {
- idpf_stash_flow_sch_buffers(txq, tx_buf);
- idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
+ tx_buf = &txq->tx_buf[buf_id];
+ libeth_tx_complete(tx_buf, &cp);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
-
- /* Finally, update next_to_clean to reflect the work that was just done
- * on the ring, if any. If the packet was only cleaned from the hash
- * table, the ring will not be impacted, therefore we should not touch
- * next_to_clean. The updated idx is used here
- */
- txq->next_to_clean = idx;
-
- return true;
}
/**
@@ -1772,26 +2146,22 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
*
* Returns bytes/packets cleaned
*/
-static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
+static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
- u16 compl_tag;
-
- if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) {
- u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
+ /* RS completion contains queue head for queue based scheduling or
+ * completion tag for flow based scheduling.
+ */
+ u16 rs_compl_val = le16_to_cpu(desc->common.q_head_compl_tag.q_head);
- return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
+ idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
+ return;
}
- compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
-
- /* If we didn't clean anything on the ring, this packet must be
- * in the hash table. Go clean it there.
- */
- if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
- idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
+ idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
}
/**
@@ -1802,73 +2172,67 @@ static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
+static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
int *cleaned)
{
struct idpf_splitq_tx_compl_desc *tx_desc;
- struct idpf_vport *vport = complq->vport;
s16 ntc = complq->next_to_clean;
struct idpf_netdev_priv *np;
unsigned int complq_budget;
bool complq_ok = true;
int i;
- complq_budget = vport->compln_clean_budget;
- tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
+ complq_budget = complq->clean_budget;
+ tx_desc = &complq->comp[ntc];
ntc -= complq->desc_count;
do {
- struct idpf_cleaned_stats cleaned_stats = { };
- struct idpf_queue *tx_q;
+ struct libeth_sq_napi_stats cleaned_stats = { };
+ struct idpf_tx_queue *tx_q;
+ __le16 hw_head;
int rel_tx_qid;
- u16 hw_head;
u8 ctype; /* completion type */
u16 gen;
/* if the descriptor isn't done, no work yet to do */
- gen = le16_get_bits(tx_desc->qid_comptype_gen,
+ gen = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M);
- if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
+ if (idpf_queue_has(GEN_CHK, complq) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
- rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
+ rel_tx_qid = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
- dev_err(&complq->vport->adapter->pdev->dev,
- "TxQ not found\n");
+ netdev_err(complq->netdev, "TxQ not found\n");
goto fetch_next_desc;
}
tx_q = complq->txq_grp->txqs[rel_tx_qid];
/* Determine completion type */
- ctype = le16_get_bits(tx_desc->qid_comptype_gen,
+ ctype = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_COMPL_TYPE_M);
switch (ctype) {
case IDPF_TXD_COMPLT_RE:
- hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
+ hw_head = tx_desc->common.q_head_compl_tag.q_head;
- idpf_tx_splitq_clean(tx_q, hw_head, budget,
- &cleaned_stats, true);
+ idpf_tx_splitq_clean(tx_q, le16_to_cpu(hw_head),
+ budget, &cleaned_stats, true);
break;
case IDPF_TXD_COMPLT_RS:
idpf_tx_handle_rs_completion(tx_q, tx_desc,
&cleaned_stats, budget);
break;
- case IDPF_TXD_COMPLT_SW_MARKER:
- idpf_tx_handle_sw_marker(tx_q);
- break;
default:
- dev_err(&tx_q->vport->adapter->pdev->dev,
- "Unknown TX completion type: %d\n",
- ctype);
+ netdev_err(tx_q->netdev,
+ "Unknown TX completion type: %d\n", ctype);
goto fetch_next_desc;
}
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
- u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
+ u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
tx_q->cleaned_pkts += cleaned_stats.packets;
tx_q->cleaned_bytes += cleaned_stats.bytes;
complq->num_completions++;
@@ -1879,8 +2243,8 @@ fetch_next_desc:
ntc++;
if (unlikely(!ntc)) {
ntc -= complq->desc_count;
- tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
- change_bit(__IDPF_Q_GEN_CHK, complq->flags);
+ tx_desc = &complq->comp[0];
+ idpf_queue_change(GEN_CHK, complq);
}
prefetch(tx_desc);
@@ -1896,9 +2260,9 @@ fetch_next_desc:
IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
complq_ok = false;
- np = netdev_priv(complq->vport->netdev);
+ np = netdev_priv(complq->netdev);
for (i = 0; i < complq->txq_grp->num_txq; ++i) {
- struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
+ struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
struct netdev_queue *nq;
bool dont_wake;
@@ -1909,11 +2273,10 @@ fetch_next_desc:
*cleaned += tx_q->cleaned_pkts;
/* Update BQL */
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
- np->state != __IDPF_VPORT_UP ||
- !netif_carrier_ok(tx_q->vport->netdev);
+ dont_wake = !complq_ok || !test_bit(IDPF_VPORT_UP, np->state) ||
+ !netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
@@ -1933,6 +2296,69 @@ fetch_next_desc:
}
/**
+ * idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue
+ * @txq: disabled Tx queue
+ *
+ * When Tx queue is requested for disabling, the CP sends a special completion
+ * descriptor called "SW marker", meaning the queue is ready to be destroyed.
+ * If, for some reason, the marker is not received within 500 ms, break the
+ * polling to not hang the driver.
+ */
+void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
+{
+ struct idpf_compl_queue *complq;
+ unsigned long timeout;
+ bool flow, gen_flag;
+ u32 ntc;
+
+ if (!idpf_queue_has(SW_MARKER, txq))
+ return;
+
+ complq = idpf_queue_has(XDP, txq) ? txq->complq : txq->txq_grp->complq;
+ ntc = complq->next_to_clean;
+
+ flow = idpf_queue_has(FLOW_SCH_EN, complq);
+ gen_flag = idpf_queue_has(GEN_CHK, complq);
+
+ timeout = jiffies + msecs_to_jiffies(IDPF_WAIT_FOR_MARKER_TIMEO);
+
+ do {
+ struct idpf_splitq_4b_tx_compl_desc *tx_desc;
+ struct idpf_tx_queue *target;
+ u32 ctype_gen, id;
+
+ tx_desc = flow ? &complq->comp[ntc].common :
+ &complq->comp_4b[ntc];
+ ctype_gen = le16_to_cpu(tx_desc->qid_comptype_gen);
+
+ if (!!(ctype_gen & IDPF_TXD_COMPLQ_GEN_M) != gen_flag) {
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (FIELD_GET(IDPF_TXD_COMPLQ_COMPL_TYPE_M, ctype_gen) !=
+ IDPF_TXD_COMPLT_SW_MARKER)
+ goto next;
+
+ id = FIELD_GET(IDPF_TXD_COMPLQ_QID_M, ctype_gen);
+ target = complq->txq_grp->txqs[id];
+
+ idpf_queue_clear(SW_MARKER, target);
+ if (target == txq)
+ break;
+
+next:
+ if (unlikely(++ntc == complq->desc_count)) {
+ ntc = 0;
+ gen_flag = !gen_flag;
+ }
+ } while (time_before(jiffies, timeout));
+
+ idpf_queue_assign(GEN_CHK, complq, gen_flag);
+ complq->next_to_clean = ntc;
+}
+
+/**
* idpf_tx_splitq_build_ctb - populate command tag and size for queue
* based scheduling descriptors
* @desc: descriptor to populate
@@ -1964,68 +2390,55 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
- desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
+ *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
/**
- * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
+ * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
* @tx_q: the queue to be checked
- * @size: number of descriptors we want to assure is available
+ * @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of Tx buffers required for this packet
*
- * Returns 0 if stop is not needed
+ * Return: 0 if no room available, 1 otherwise
*/
-int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
+ u32 bufs_needed)
{
- struct netdev_queue *nq;
-
- if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
+ if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
+ IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
+ idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
return 0;
-
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
-
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
-
- return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
+ return 1;
}
/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of buffers needed for this packet
*
- * Returns 0 if stop is not needed
+ * Return: 0 if stop is not needed
*/
-static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
- unsigned int descs_needed)
+static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
+ u32 descs_needed,
+ u32 bufs_needed)
{
- if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto splitq_stop;
-
- /* If there are too many outstanding completions expected on the
- * completion queue, stop the TX queue to give the device some time to
- * catch up
- */
- if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
- IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
- goto splitq_stop;
-
- /* Also check for available book keeping buffers; if we are low, stop
- * the queue to wait for more completions
+ /* Since we have multiple resources to check for splitq, our
+ * start,stop_thrs becomes a boolean check instead of a count
+ * threshold.
*/
- if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
- goto splitq_stop;
-
- return 0;
+ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ idpf_txq_has_room(tx_q, descs_needed,
+ bufs_needed),
+ 1, 1))
+ return 0;
-splitq_stop:
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
return -EBUSY;
}
@@ -2040,16 +2453,14 @@ splitq_stop:
* to do a register write to update our queue status. We know this can only
* mean tail here as HW should be owning head for TX.
*/
-void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more)
{
struct netdev_queue *nq;
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2063,14 +2474,16 @@ void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
}
/**
- * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
+ * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
* @txq: queue to send buffer on
* @skb: send buffer
+ * @bufs_needed: (output) number of buffers needed for this skb.
*
- * Returns number of data descriptors needed for this skb.
+ * Return: number of data descriptors and buffers needed for this skb.
*/
-unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
- struct sk_buff *skb)
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ u32 *bufs_needed)
{
const struct skb_shared_info *shinfo;
unsigned int count = 0, i;
@@ -2081,6 +2494,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
return count;
shinfo = skb_shinfo(skb);
+ *bufs_needed += shinfo->nr_frags;
for (i = 0; i < shinfo->nr_frags; i++) {
unsigned int size;
@@ -2102,7 +2516,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
count = idpf_size_to_txd_count(skb->len);
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.linearize);
+ u64_stats_inc(&txq->q_stats.linearize);
u64_stats_update_end(&txq->stats_sync);
}
@@ -2110,65 +2524,89 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
}
/**
- * idpf_tx_dma_map_error - handle TX DMA map errors
- * @txq: queue to send buffer on
- * @skb: send buffer
- * @first: original first buffer info buffer for packet
- * @idx: starting point on ring to unwind
+ * idpf_tx_splitq_bump_ntu - adjust NTU and generation
+ * @txq: the tx ring to wrap
+ * @ntu: ring index to bump
*/
-void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 idx)
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
- u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
- u64_stats_update_end(&txq->stats_sync);
+ ntu++;
- /* clear dma mappings for failed tx_buf map */
- for (;;) {
- struct idpf_tx_buf *tx_buf;
+ if (ntu == txq->desc_count)
+ ntu = 0;
- tx_buf = &txq->tx_buf[idx];
- idpf_tx_buf_rel(txq, tx_buf);
- if (tx_buf == first)
- break;
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
- }
+ return ntu;
+}
- if (skb_is_gso(skb)) {
- union idpf_tx_flex_desc *tx_desc;
+/**
+ * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
+ * @refillq: refill queue to get buffer ID from
+ * @buf_id: return buffer ID
+ *
+ * Return: true if a buffer ID was found, false if not
+ */
+static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
+ u32 *buf_id)
+{
+ u32 ntc = refillq->next_to_clean;
+ u32 refill_desc;
- /* If we failed a DMA mapping for a TSO packet, we will have
- * used one additional descriptor for a context
- * descriptor. Reset that here.
- */
- tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
- memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
+ refill_desc = refillq->ring[ntc];
+
+ if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RFL_BI_GEN_M)))
+ return false;
+
+ *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
+
+ if (unlikely(++ntc == refillq->desc_count)) {
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ ntc = 0;
}
- /* Update tail in case netdev_xmit_more was previously true */
- idpf_tx_buf_hw_update(txq, idx, false);
+ refillq->next_to_clean = ntc;
+
+ return true;
}
/**
- * idpf_tx_splitq_bump_ntu - adjust NTU and generation
- * @txq: the tx ring to wrap
- * @ntu: ring index to bump
+ * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
+ * @txq: Tx queue to unwind
+ * @params: pointer to splitq params struct
+ * @first: starting buffer for packet to unmap
*/
-static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
+static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
+ struct idpf_tx_splitq_params *params,
+ struct idpf_tx_buf *first)
{
- ntu++;
+ struct idpf_sw_queue *refillq = txq->refillq;
+ struct libeth_sq_napi_stats ss = { };
+ struct idpf_tx_buf *tx_buf = first;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
- if (ntu == txq->desc_count) {
- ntu = 0;
- txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ libeth_tx_complete(tx_buf, &cp);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
+ libeth_tx_complete(tx_buf, &cp);
}
- return ntu;
+ /* Update tail in case netdev_xmit_more was previously true. */
+ idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
+
+ if (!refillq)
+ return;
+
+ /* Restore refillq state to avoid leaking tags. */
+ if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = params->prev_refill_ntc;
}
/**
@@ -2181,7 +2619,7 @@ static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
* and gets a physical address for each memory location and programs
* it and the length into the transmit flex descriptor.
*/
-static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
+static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_splitq_params *params,
struct idpf_tx_buf *first)
{
@@ -2192,6 +2630,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
struct netdev_queue *nq;
struct sk_buff *skb;
skb_frag_t *frag;
+ u32 next_buf_id;
u16 td_cmd = 0;
dma_addr_t dma;
@@ -2202,22 +2641,24 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
data_len = skb->data_len;
size = skb_headlen(skb);
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
+ tx_desc = &tx_q->flex_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
tx_buf = first;
-
- params->compl_tag =
- (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
+ first->nr_frags = 0;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
- tx_buf->compl_tag = params->compl_tag;
+ first->nr_frags++;
+ tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -2271,29 +2712,13 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
max_data);
- tx_desc++;
- i++;
-
- if (i == tx_q->desc_count) {
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen =
- IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ } else {
+ tx_desc++;
}
- /* Since this packet has a buffer that is going to span
- * multiple descriptors, it's going to leave holes in
- * to the TX buffer ring. To ensure these holes do not
- * cause issues in the cleaning routines, we will clear
- * them of any stale data and assign them the same
- * completion tag as the current packet. Then when the
- * packet is being cleaned, the cleaning routines will
- * simply pass over these holes and finish cleaning the
- * rest of the packet.
- */
- memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- tx_q->tx_buf[i].compl_tag = params->compl_tag;
-
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
* max_data will be >= 12K and <= 16K-1. On any
@@ -2316,40 +2741,51 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
break;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
- tx_desc++;
- i++;
- if (i == tx_q->desc_count) {
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ } else {
+ tx_desc++;
}
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &next_buf_id))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
+ } else {
+ next_buf_id = i;
+ }
+ idpf_tx_buf_next(tx_buf) = next_buf_id;
+ tx_buf = &tx_q->tx_buf[next_buf_id];
+
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
-
- tx_buf = &tx_q->tx_buf[i];
}
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(skb);
+ first->type = LIBETH_SQE_SKB;
+
/* write last descriptor with RS and EOP bits */
+ first->rs_idx = i;
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
-
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -2430,111 +2866,6 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
return 1;
}
-/**
- * __idpf_chk_linearize - Check skb is not using too many buffers
- * @skb: send buffer
- * @max_bufs: maximum number of buffers
- *
- * For TSO we need to count the TSO header and segment payload separately. As
- * such we need to check cases where we have max_bufs-1 fragments or more as we
- * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
- * for the segment payload in the first descriptor, and another max_buf-1 for
- * the fragments.
- */
-static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
-{
- const struct skb_shared_info *shinfo = skb_shinfo(skb);
- const skb_frag_t *frag, *stale;
- int nr_frags, sum;
-
- /* no need to check if number of frags is less than max_bufs - 1 */
- nr_frags = shinfo->nr_frags;
- if (nr_frags < (max_bufs - 1))
- return false;
-
- /* We need to walk through the list and validate that each group
- * of max_bufs-2 fragments totals at least gso_size.
- */
- nr_frags -= max_bufs - 2;
- frag = &shinfo->frags[0];
-
- /* Initialize size to the negative value of gso_size minus 1. We use
- * this as the worst case scenario in which the frag ahead of us only
- * provides one byte which is why we are limited to max_bufs-2
- * descriptors for a single transmit as the header and previous
- * fragment are already consuming 2 descriptors.
- */
- sum = 1 - shinfo->gso_size;
-
- /* Add size of frags 0 through 4 to create our initial sum */
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
-
- /* Walk through fragments adding latest fragment, testing it, and
- * then removing stale fragments from the sum.
- */
- for (stale = &shinfo->frags[0];; stale++) {
- int stale_size = skb_frag_size(stale);
-
- sum += skb_frag_size(frag++);
-
- /* The stale fragment may present us with a smaller
- * descriptor than the actual fragment size. To account
- * for that we need to remove all the data on the front and
- * figure out what the remainder would be in the last
- * descriptor associated with the fragment.
- */
- if (stale_size > IDPF_TX_MAX_DESC_DATA) {
- int align_pad = -(skb_frag_off(stale)) &
- (IDPF_TX_MAX_READ_REQ_SIZE - 1);
-
- sum -= align_pad;
- stale_size -= align_pad;
-
- do {
- sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
- stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
- } while (stale_size > IDPF_TX_MAX_DESC_DATA);
- }
-
- /* if sum is negative we failed to make sufficient progress */
- if (sum < 0)
- return true;
-
- if (!nr_frags--)
- break;
-
- sum -= stale_size;
- }
-
- return false;
-}
-
-/**
- * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
- * @skb: send buffer
- * @max_bufs: maximum scatter gather buffers for single packet
- * @count: number of buffers this packet needs
- *
- * Make sure we don't exceed maximum scatter gather buffers for a single
- * packet. We have to do some special checking around the boundary (max_bufs-1)
- * if TSO is on since we need count the TSO header and payload separately.
- * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
- * header, 1 for segment payload, and then 7 for the fragments.
- */
-bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count)
-{
- if (likely(count < max_bufs))
- return false;
- if (skb_is_gso(skb))
- return __idpf_chk_linearize(skb, max_bufs);
-
- return count > max_bufs;
-}
/**
* idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
@@ -2543,17 +2874,14 @@ bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
* Since the TX buffer rings mimics the descriptor ring, update the tx buffer
* ring entry to reflect that this index is a context descriptor
*/
-static struct idpf_flex_tx_ctx_desc *
-idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
+static union idpf_flex_tx_ctx_desc *
+idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{
- struct idpf_flex_tx_ctx_desc *desc;
+ union idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
-
/* grab the next descriptor */
- desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
+ desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
return desc;
@@ -2564,10 +2892,10 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
* @tx_q: queue to send buffer on
* @skb: pointer to skb
*/
-netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
+netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
{
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
+ u64_stats_inc(&tx_q->q_stats.skb_drops);
u64_stats_update_end(&tx_q->stats_sync);
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
@@ -2577,6 +2905,88 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+/**
+ * idpf_tx_tstamp - set up context descriptor for hardware timestamp
+ * @tx_q: queue to send buffer on
+ * @skb: pointer to the SKB we're sending
+ * @off: pointer to the offload struct
+ *
+ * Return: Positive index number on success, negative otherwise.
+ */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ int err, idx;
+
+ /* only timestamp the outbound packet if the user has requested it */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return -1;
+
+ if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
+ return -1;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (off->tx_flags & IDPF_TX_FLAGS_TSO)
+ return -1;
+
+ /* Grab an open timestamp slot */
+ err = idpf_ptp_request_ts(tx_q, skb, &idx);
+ if (err) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ return -1;
+ }
+
+ off->tx_flags |= IDPF_TX_FLAGS_TSYN;
+
+ return idx;
+}
+
+/**
+ * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
+ * PHY Tx timestamp
+ * @ctx_desc: Context descriptor
+ * @idx: Index of the Tx timestamp latch
+ */
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{
+ ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
+ IDPF_TX_CTX_DTYPE_M) |
+ le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
+ IDPF_TX_CTX_CMD_M) |
+ le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
+}
+#else /* CONFIG_PTP_1588_CLOCK */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ return -1;
+}
+
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
+/**
+ * idpf_tx_splitq_need_re - check whether RE bit needs to be set
+ * @tx_q: pointer to Tx queue
+ *
+ * Return: true if RE bit needs to be set, false otherwise
+ */
+static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
+{
+ int gap = tx_q->next_to_use - tx_q->last_re;
+
+ gap += (gap < 0) ? tx_q->desc_count : 0;
+
+ return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
+}
+
/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
@@ -2585,14 +2995,18 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
- struct idpf_queue *tx_q)
+ struct idpf_tx_queue *tx_q)
{
- struct idpf_tx_splitq_params tx_params = { };
+ struct idpf_tx_splitq_params tx_params = {
+ .prev_ntu = tx_q->next_to_use,
+ };
+ union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first;
- unsigned int count;
- int tso;
+ u32 count, buf_count = 1;
+ int tso, idx;
+ u32 buf_id;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
@@ -2602,7 +3016,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
/* Check for splitq specific TX resources */
count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
- if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
+ if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_BUSY;
@@ -2610,8 +3024,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
if (tso) {
/* If tso is needed, set up context desc */
- struct idpf_flex_tx_ctx_desc *ctx_desc =
- idpf_tx_splitq_get_ctx_desc(tx_q);
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
ctx_desc->tso.qw1.cmd_dtype =
cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
@@ -2625,40 +3038,57 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
+ u64_stats_inc(&tx_q->q_stats.lso_pkts);
u64_stats_update_end(&tx_q->stats_sync);
}
- /* record the location of the first descriptor for this packet */
- first = &tx_q->tx_buf[tx_q->next_to_use];
- first->skb = skb;
-
- if (tso) {
- first->gso_segs = tx_params.offload.tso_segs;
- first->bytecount = skb->len +
- ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
- } else {
- first->gso_segs = 1;
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
+ if (idx != -1) {
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
+ idpf_tx_set_tstamp_desc(ctx_desc, idx);
}
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ struct idpf_sw_queue *refillq = tx_q->refillq;
+
+ /* Save refillq state in case of a packet rollback. Otherwise,
+ * the tags will be leaked since they will be popped from the
+ * refillq but never reposted during cleaning.
+ */
+ tx_params.prev_refill_gen =
+ idpf_queue_has(RFL_GEN_CHK, refillq);
+ tx_params.prev_refill_ntc = refillq->next_to_clean;
+
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &buf_id))) {
+ if (tx_params.prev_refill_gen !=
+ idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = tx_params.prev_refill_ntc;
+
+ tx_q->next_to_use = tx_params.prev_ntu;
+ return idpf_tx_drop_skb(tx_q, skb);
+ }
+ tx_params.compl_tag = buf_id;
+
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
- /* Set the RE bit to catch any packets that may have not been
- * stashed during RS completion cleaning. MIN_GAP is set to
- * MIN_RING size to ensure it will be set at least once each
- * time around the ring.
+ /* Set the RE bit to periodically "clean" the descriptor ring.
+ * MIN_GAP is set to MIN_RING size to ensure it will be set at
+ * least once each time around the ring.
*/
- if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
+ if (idpf_tx_splitq_need_re(tx_q)) {
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
tx_q->txq_grp->num_completions_pending++;
+ tx_q->last_re = tx_q->next_to_use;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
} else {
+ buf_id = tx_q->next_to_use;
+
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
@@ -2666,25 +3096,37 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
}
+ first = &tx_q->tx_buf[buf_id];
+ first->skb = skb;
+
+ if (tso) {
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
+ } else {
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ }
+
idpf_tx_splitq_map(tx_q, &tx_params, first);
return NETDEV_TX_OK;
}
/**
- * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
+ * idpf_tx_start - Selects the right Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
- struct net_device *netdev)
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_queue *tx_q;
+ const struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ struct idpf_tx_queue *tx_q;
- if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
+ if (unlikely(skb_get_queue_mapping(skb) >=
+ vport->num_txq - vport->num_xdp_txq)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2701,31 +3143,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- return idpf_tx_splitq_frame(skb, tx_q);
-}
-
-/**
- * idpf_ptype_to_htype - get a hash type
- * @decoded: Decoded Rx packet type related fields
- *
- * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
- * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
- * Rx desc.
- */
-enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded)
-{
- if (!decoded->known)
- return PKT_HASH_TYPE_NONE;
- if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
- decoded->inner_prot)
- return PKT_HASH_TYPE_L4;
- if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
- decoded->outer_ip)
- return PKT_HASH_TYPE_L3;
- if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2)
- return PKT_HASH_TYPE_L2;
-
- return PKT_HASH_TYPE_NONE;
+ if (idpf_is_queue_model_split(vport->txq_model))
+ return idpf_tx_splitq_frame(skb, tx_q);
+ else
+ return idpf_tx_singleq_frame(skb, tx_q);
}
/**
@@ -2735,20 +3156,21 @@ enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *deco
* @rx_desc: Receive descriptor
* @decoded: Decoded Rx packet type related fields
*/
-static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+static void
+idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct libeth_rx_pt decoded)
{
u32 hash;
- if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, decoded))
return;
hash = le16_to_cpu(rx_desc->hash1) |
(rx_desc->ff2_mirrid_hash2.hash2 << 16) |
(rx_desc->hash3 << 24);
- skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
/**
@@ -2760,92 +3182,83 @@ static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
*
* skb->protocol must be set before this function is called
*/
-static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded *csum_bits,
- struct idpf_rx_ptype_decoded *decoded)
+static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ struct libeth_rx_csum csum_bits,
+ struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM)))
+ if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
return;
/* check if HW has decoded the packet and checksum */
- if (!(csum_bits->l3l4p))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 && (csum_bits->ipe || csum_bits->eipe))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
- if (ipv6 && csum_bits->ipv6exadd)
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* check for L4 errors and handle packets that were not able to be
* checksummed
*/
- if (csum_bits->l4e)
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
- /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
- switch (decoded->inner_prot) {
- case IDPF_RX_PTYPE_INNER_PROT_ICMP:
- case IDPF_RX_PTYPE_INNER_PROT_TCP:
- case IDPF_RX_PTYPE_INNER_PROT_UDP:
- if (!csum_bits->raw_csum_inv) {
- u16 csum = csum_bits->raw_csum;
-
- skb->csum = csum_unfold((__force __sum16)~swab16(csum));
- skb->ip_summed = CHECKSUM_COMPLETE;
- } else {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- break;
- case IDPF_RX_PTYPE_INNER_PROT_SCTP:
+ if (!csum_bits.raw_csum_valid ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
+ return;
}
+ skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
return;
checksum_fail:
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync);
}
/**
* idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
* @rx_desc: receive descriptor
- * @csum: structure to extract checksum fields
*
+ * Return: parsed checksum status.
**/
-static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_csum_decoded *csum)
+static struct libeth_rx_csum
+idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
+ struct libeth_rx_csum csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
qword1 = rx_desc->status_err0_qw1;
- csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ qword1);
+ csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
qword1);
- csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
+ csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
+ qword1);
+ csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
qword1);
- csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
- qword1);
- csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
- qword1);
- csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
- qword0);
- csum->raw_csum_inv =
- le16_get_bits(rx_desc->ptype_err_fflags0,
- VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
- csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+ csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
+ qword0);
+ csum.raw_csum_valid =
+ !le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
+ csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+
+ return csum;
}
/**
@@ -2860,36 +3273,34 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n
* Populate the skb fields with the total number of RSC segments, RSC payload
* length and packet type.
*/
-static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct libeth_rx_pt decoded)
{
u16 rsc_segments, rsc_seg_len;
bool ipv4, ipv6;
int len;
- if (unlikely(!decoded->outer_ip))
+ if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
+ LIBETH_RX_PT_OUTER_L2))
return -EINVAL;
rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
if (unlikely(!rsc_seg_len))
return -EINVAL;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (unlikely(!(ipv4 ^ ipv6)))
return -EINVAL;
rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
- if (unlikely(rsc_segments == 1))
- return 0;
NAPI_GRO_CB(skb)->count = rsc_segments;
skb_shinfo(skb)->gso_size = rsc_seg_len;
skb_reset_network_header(skb);
- len = skb->len - skb_transport_offset(skb);
if (ipv4) {
struct iphdr *ipv4h = ip_hdr(skb);
@@ -2898,6 +3309,7 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
/* Reset and set transport header offset in skb */
skb_set_transport_header(skb, sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
/* Compute the TCP pseudo header checksum*/
tcp_hdr(skb)->check =
@@ -2907,6 +3319,7 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
tcp_hdr(skb)->check =
~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
}
@@ -2914,14 +3327,41 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
tcp_gro_complete(skb);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.rsc_pkts);
+ u64_stats_inc(&rxq->q_stats.rsc_pkts);
u64_stats_update_end(&rxq->stats_sync);
return 0;
}
/**
- * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
+ * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
+ * @rxq: pointer to the rx queue that receives the timestamp
+ * @rx_desc: pointer to rx descriptor containing timestamp
+ * @skb: skb to put timestamp in
+ */
+static void
+idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 cached_time, ts_ns;
+ u32 ts_high;
+
+ if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
+ return;
+
+ cached_time = READ_ONCE(rxq->cached_phc_time);
+
+ ts_high = le32_to_cpu(rx_desc->ts_high);
+ ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ts_ns),
+ };
+}
+
+/**
+ * __idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rxq: Rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being populated
* @rx_desc: Receive descriptor
@@ -2930,151 +3370,91 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
* order to populate the hash, checksum, protocol, and
* other fields within the skb.
*/
-static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
- struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+static int
+__idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
- struct idpf_rx_ptype_decoded decoded;
+ struct libeth_rx_csum csum_bits;
+ struct libeth_rx_pt decoded;
u16 rx_ptype;
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
-
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
- decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
- /* If we don't know the ptype we can't do anything else with it. Just
- * pass it up the stack as-is.
- */
- if (!decoded.known)
- return 0;
+ decoded = rxq->rx_ptype_lkup[rx_ptype];
/* process RSS/hash */
- idpf_rx_hash(rxq, skb, rx_desc, &decoded);
+ idpf_rx_hash(rxq, skb, rx_desc, decoded);
+
+ if (idpf_queue_has(PTP, rxq))
+ idpf_rx_hwtstamp(rxq, rx_desc, skb);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
- return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
+ return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
- idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
- idpf_rx_csum(rxq, skb, &csum_bits, &decoded);
+ csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
+ idpf_rx_csum(rxq, skb, csum_bits, decoded);
return 0;
}
-/**
- * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
- * @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
- */
-void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size)
+bool idpf_rx_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs)
{
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, rx_buf->truesize);
+ struct idpf_rx_queue *rxq;
- rx_buf->page = NULL;
-}
-
-/**
- * idpf_rx_construct_skb - Allocate skb and populate it
- * @rxq: Rx descriptor queue
- * @rx_buf: Rx buffer to pull data from
- * @size: the length of the packet
- *
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
- struct idpf_rx_buf *rx_buf,
- unsigned int size)
-{
- unsigned int headlen;
- struct sk_buff *skb;
- void *va;
-
- va = page_address(rx_buf->page) + rx_buf->page_offset;
-
- /* prefetch first cache line of first page */
- net_prefetch(va);
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE,
- GFP_ATOMIC);
- if (unlikely(!skb)) {
- idpf_rx_put_page(rx_buf);
-
- return NULL;
- }
-
- skb_record_rx_queue(skb, rxq->idx);
- skb_mark_for_recycle(skb);
-
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > IDPF_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE);
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
- /* if we exhaust the linear part then add what is left as a frag */
- size -= headlen;
- if (!size) {
- idpf_rx_put_page(rx_buf);
-
- return skb;
- }
-
- skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen,
- size, rx_buf->truesize);
-
- /* Since we're giving the page to the stack, clear our reference to it.
- * We'll get a new one during buffer posting.
- */
- rx_buf->page = NULL;
-
- return skb;
+ return !__idpf_rx_process_skb_fields(rxq, skb, xdp->desc);
}
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_RUN(static idpf_xdp_run_pass, idpf_xdp_run_prog,
+ idpf_xdp_tx_flush_bulk, idpf_rx_process_skb_fields);
+LIBETH_XDP_DEFINE_FINALIZE(static idpf_xdp_finalize_rx, idpf_xdp_tx_flush_bulk,
+ idpf_xdp_tx_finalize);
+LIBETH_XDP_DEFINE_END();
+
/**
- * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer
- * @rxq: Rx descriptor queue
- * @va: Rx buffer to pull data from
- * @size: the length of the packet
+ * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
+ * @hdr: Rx buffer for the headers
+ * @buf: Rx buffer for the payload
+ * @data_len: number of bytes received to the payload buffer
+ *
+ * When a header buffer overflow occurs or the HW was unable do parse the
+ * packet type to perform header split, the whole frame gets placed to the
+ * payload buffer. We can't build a valid skb around a payload buffer when
+ * the header split is active since it doesn't reserve any head- or tailroom.
+ * In that case, copy either the whole frame when it's short or just the
+ * Ethernet header to the header buffer to be able to build an skb and adjust
+ * the data offset in the payload buffer, IOW emulate the header split.
*
- * This function allocates an skb. It then populates it with the page data from
- * the current receive descriptor, taking care to set up the skb correctly.
- * This specifically uses a header buffer to start building the skb.
+ * Return: number of bytes copied to the header buffer.
*/
-static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
- const void *va,
- unsigned int size)
+static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
+ struct libeth_fqe *buf, u32 data_len)
{
- struct sk_buff *skb;
+ u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
+ struct page *hdr_page, *buf_page;
+ const void *src;
+ void *dst;
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC);
- if (unlikely(!skb))
- return NULL;
-
- skb_record_rx_queue(skb, rxq->idx);
+ if (unlikely(netmem_is_net_iov(buf->netmem)) ||
+ !libeth_rx_sync_for_cpu(buf, copy))
+ return 0;
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+ hdr_page = __netmem_to_page(hdr->netmem);
+ buf_page = __netmem_to_page(buf->netmem);
+ dst = page_address(hdr_page) + hdr->offset +
+ pp_page_to_nmdesc(hdr_page)->pp->p.offset;
+ src = page_address(buf_page) + buf->offset +
+ pp_page_to_nmdesc(buf_page)->pp->p.offset;
- /* More than likely, a payload fragment, which will use a page from
- * page_pool will be added to the SKB so mark it for recycle
- * preemptively. And if not, it's inconsequential.
- */
- skb_mark_for_recycle(skb);
+ memcpy(dst, src, LARGEST_ALIGN(copy));
+ buf->offset += copy;
- return skb;
+ return copy;
}
/**
@@ -3116,50 +3496,47 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
*
* Returns amount of work completed
*/
-static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
+static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
- int total_rx_bytes = 0, total_rx_pkts = 0;
- struct idpf_queue *rx_bufq = NULL;
- struct sk_buff *skb = rxq->skb;
+ struct idpf_buf_queue *rx_bufq = NULL;
+ struct libeth_rq_napi_stats rs = { };
u16 ntc = rxq->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+ LIBETH_XDP_ONSTACK_BULK(bq);
+
+ libeth_xdp_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
+ rxq->xdpsqs, rxq->num_xdp_txq);
+ libeth_xdp_init_buff(xdp, &rxq->xdp, &rxq->xdp_rxq);
/* Process Rx packets bounded by budget */
- while (likely(total_rx_pkts < budget)) {
+ while (likely(rs.packets < budget)) {
struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct libeth_fqe *hdr, *rx_buf = NULL;
struct idpf_sw_queue *refillq = NULL;
struct idpf_rxq_set *rxq_set = NULL;
- struct idpf_rx_buf *rx_buf = NULL;
- union virtchnl2_rx_desc *desc;
unsigned int pkt_len = 0;
unsigned int hdr_len = 0;
u16 gen_id, buf_id = 0;
- /* Header buffer overflow only valid for header split */
- bool hbo = false;
int bufq_id;
u8 rxdid;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
- desc = IDPF_RX_DESC(rxq, ntc);
- rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
-
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc
- */
- dma_rmb();
+ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
/* if the descriptor isn't done, no work yet to do */
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
-
- if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
+ if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
break;
+ dma_rmb();
+
rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
rx_desc->rxdid_ucast);
if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
IDPF_RX_BUMP_NTC(rxq, ntc);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.bad_descs);
+ u64_stats_inc(&rxq->q_stats.bad_descs);
u64_stats_update_end(&rxq->stats_sync);
continue;
}
@@ -3167,140 +3544,118 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
- hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
- rx_desc->status_err0_qw1);
-
- if (unlikely(hbo)) {
- /* If a header buffer overflow, occurs, i.e. header is
- * too large to fit in the header split buffer, HW will
- * put the entire packet, including headers, in the
- * data/payload buffer.
- */
- u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf);
- u64_stats_update_end(&rxq->stats_sync);
- goto bypass_hsplit;
- }
-
- hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
- VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);
-
-bypass_hsplit:
bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
- if (!bufq_id)
- refillq = rxq_set->refillq0;
- else
- refillq = rxq_set->refillq1;
+ refillq = rxq_set->refillq[bufq_id];
/* retrieve buffer from the rxq */
- rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq;
+ rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
buf_id = le16_to_cpu(rx_desc->buf_id);
- rx_buf = &rx_bufq->rx_buf.buf[buf_id];
+ rx_buf = &rx_bufq->buf[buf_id];
+
+ if (!rx_bufq->hdr_pp)
+ goto payload;
+
+#define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
+#define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
+ if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
+ /* If a header buffer overflow, occurs, i.e. header is
+ * too large to fit in the header split buffer, HW will
+ * put the entire packet, including headers, in the
+ * data/payload buffer.
+ */
+ hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
+ __HDR_LEN_MASK);
+#undef __HDR_LEN_MASK
+#undef __HBO_BIT
- if (hdr_len) {
- const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va +
- (u32)buf_id * IDPF_HDR_BUF_SIZE;
+ hdr = &rx_bufq->hdr_buf[buf_id];
+
+ if (unlikely(!hdr_len && !xdp->data)) {
+ hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
+ /* If failed, drop both buffers by setting len to 0 */
+ pkt_len -= hdr_len ? : pkt_len;
- skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts);
+ u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
u64_stats_update_end(&rxq->stats_sync);
}
- if (pkt_len) {
- idpf_rx_sync_for_cpu(rx_buf, pkt_len);
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, pkt_len);
- else
- skb = idpf_rx_construct_skb(rxq, rx_buf,
- pkt_len);
- } else {
- idpf_rx_put_page(rx_buf);
- }
+ if (libeth_xdp_process_buff(xdp, hdr, hdr_len))
+ rs.hsplit++;
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
+ hdr->netmem = 0;
- idpf_rx_post_buf_refill(refillq, buf_id);
+payload:
+ libeth_xdp_process_buff(xdp, rx_buf, pkt_len);
+ rx_buf->netmem = 0;
+ idpf_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
- /* skip if it is non EOP desc */
- if (!idpf_rx_splitq_is_eop(rx_desc))
- continue;
- /* pad skb if needed (to make valid ethernet frame) */
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* protocol */
- if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
+ /* skip if it is non EOP desc */
+ if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!xdp->data))
continue;
- }
-
- /* send completed skb up the stack */
- napi_gro_receive(&rxq->q_vector->napi, skb);
- skb = NULL;
- /* update budget accounting */
- total_rx_pkts++;
+ idpf_xdp_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
}
+ idpf_xdp_finalize_rx(&bq);
+
rxq->next_to_clean = ntc;
+ libeth_xdp_save_buff(&rxq->xdp, xdp);
- rxq->skb = skb;
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts);
- u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_add(&rxq->q_stats.packets, rs.packets);
+ u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
+ u64_stats_add(&rxq->q_stats.hsplit_pkts, rs.hsplit);
u64_stats_update_end(&rxq->stats_sync);
- /* guarantee a trip back through this routine if there was a failure */
- return total_rx_pkts;
+ return rs.packets;
}
/**
* idpf_rx_update_bufq_desc - Update buffer queue descriptor
* @bufq: Pointer to the buffer queue
- * @refill_desc: SW Refill queue descriptor containing buffer ID
+ * @buf_id: buffer ID
* @buf_desc: Buffer queue descriptor
*
* Return 0 on success and negative on failure.
*/
-static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
+static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
struct virtchnl2_splitq_rx_buf_desc *buf_desc)
{
- struct idpf_rx_buf *buf;
+ struct libeth_fq_fp fq = {
+ .pp = bufq->pp,
+ .fqes = bufq->buf,
+ .truesize = bufq->truesize,
+ .count = bufq->desc_count,
+ };
dma_addr_t addr;
- u16 buf_id;
-
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
- buf = &bufq->rx_buf.buf[buf_id];
-
- addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
return -ENOMEM;
buf_desc->pkt_addr = cpu_to_le64(addr);
buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
- if (!bufq->rx_hsplit_en)
+ if (!idpf_queue_has(HSPLIT_EN, bufq))
return 0;
- buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
- (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ fq.pp = bufq->hdr_pp;
+ fq.fqes = bufq->hdr_buf;
+ fq.truesize = bufq->hdr_truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+
+ buf_desc->hdr_addr = cpu_to_le64(addr);
return 0;
}
@@ -3312,38 +3667,37 @@ static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
*
* This function takes care of the buffer refill management
*/
-static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
+static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
struct idpf_sw_queue *refillq)
{
struct virtchnl2_splitq_rx_buf_desc *buf_desc;
u16 bufq_nta = bufq->next_to_alloc;
u16 ntc = refillq->next_to_clean;
int cleaned = 0;
- u16 gen;
- buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
+ buf_desc = &bufq->split_buf[bufq_nta];
/* make sure we stop at ring wrap in the unlikely case ring is full */
while (likely(cleaned < refillq->desc_count)) {
- u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
+ u32 buf_id, refill_desc = refillq->ring[ntc];
bool failure;
- gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
- if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen)
+ if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RFL_BI_GEN_M))
break;
- failure = idpf_rx_update_bufq_desc(bufq, refill_desc,
- buf_desc);
+ buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
+ failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
if (unlikely(++ntc == refillq->desc_count)) {
- change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ idpf_queue_change(RFL_GEN_CHK, refillq);
ntc = 0;
}
if (unlikely(++bufq_nta == bufq->desc_count)) {
- buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
+ buf_desc = &bufq->split_buf[0];
bufq_nta = 0;
} else {
buf_desc++;
@@ -3372,16 +3726,21 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
/**
* idpf_rx_clean_refillq_all - Clean all refill queues
* @bufq: buffer queue with refill queues
+ * @nid: ID of the closest NUMA node with memory
*
* Iterates through all refill queues assigned to the buffer queue assigned to
* this vector. Returns true if clean is complete within budget, false
* otherwise.
*/
-static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq)
+static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
{
struct idpf_bufq_set *bufq_set;
int i;
+ page_pool_nid_changed(bufq->pp, nid);
+ if (bufq->hdr_pp)
+ page_pool_nid_changed(bufq->hdr_pp, nid);
+
bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
for (i = 0; i < bufq_set->num_refillqs; i++)
idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
@@ -3399,7 +3758,7 @@ static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
q_vector->total_events++;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -3437,11 +3796,13 @@ static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
*/
void idpf_vport_intr_rel(struct idpf_vport *vport)
{
- int i, j, v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ kfree(q_vector->xsksq);
+ q_vector->xsksq = NULL;
+ kfree(q_vector->complq);
+ q_vector->complq = NULL;
kfree(q_vector->bufq);
q_vector->bufq = NULL;
kfree(q_vector->tx);
@@ -3450,28 +3811,22 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->rx = NULL;
}
- /* Clean up the mapping of queues to vectors */
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ kfree(vport->q_vectors);
+ vport->q_vectors = NULL;
+}
- if (idpf_is_queue_model_split(vport->rxq_model))
- for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++)
- rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL;
- else
- for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
- rx_qgrp->singleq.rxqs[j]->q_vector = NULL;
- }
+static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
+{
+ struct napi_struct *napi = link ? &q_vector->napi : NULL;
+ struct net_device *dev = q_vector->vport->netdev;
- if (idpf_is_queue_model_split(vport->txq_model))
- for (i = 0; i < vport->num_txq_grp; i++)
- vport->txq_grps[i].complq->q_vector = NULL;
- else
- for (i = 0; i < vport->num_txq_grp; i++)
- for (j = 0; j < vport->txq_grps[i].num_txq; j++)
- vport->txq_grps[i].txqs[j]->q_vector = NULL;
+ for (u32 i = 0; i < q_vector->num_rxq; i++)
+ netif_queue_set_napi(dev, q_vector->rx[i]->idx,
+ NETDEV_QUEUE_TYPE_RX, napi);
- kfree(vport->q_vectors);
- vport->q_vectors = NULL;
+ for (u32 i = 0; i < q_vector->num_txq; i++)
+ netif_queue_set_napi(dev, q_vector->tx[i]->idx,
+ NETDEV_QUEUE_TYPE_TX, napi);
}
/**
@@ -3494,9 +3849,8 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
- free_irq(irq_num, q_vector);
+ idpf_q_vector_set_napi(q_vector, false);
+ kfree(free_irq(irq_num, q_vector));
}
}
@@ -3509,6 +3863,8 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
struct idpf_q_vector *q_vector = vport->q_vectors;
int q_idx;
+ writel(0, vport->noirq_dyn_ctl);
+
for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
}
@@ -3516,21 +3872,31 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
/**
* idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
* @q_vector: pointer to q_vector
- * @type: itr index
- * @itr: itr value
*/
-static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
- const int type, u16 itr)
+static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
{
- u32 itr_val;
+ u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
+ int type = IDPF_NO_ITR_UPDATE_IDX;
+ u16 itr = 0;
+
+ if (q_vector->wb_on_itr) {
+ /*
+ * Trigger a software interrupt when exiting wb_on_itr, to make
+ * sure we catch any pending write backs that might have been
+ * missed due to interrupt state transition.
+ */
+ itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
+ q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
+ type = IDPF_SW_ITR_UPDATE_IDX;
+ itr = IDPF_ITR_20K;
+ }
itr &= IDPF_ITR_MASK;
/* Don't clear PBA because that can cause lost interrupts that
* came in while we were cleaning/polling
*/
- itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
- (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
- (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
+ itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
+ (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
return itr_val;
}
@@ -3580,38 +3946,38 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector)
goto check_rx_itr;
for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
- struct idpf_queue *txq = q_vector->tx[i];
+ struct idpf_tx_queue *txq = q_vector->tx[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- packets += u64_stats_read(&txq->q_stats.tx.packets);
- bytes += u64_stats_read(&txq->q_stats.tx.bytes);
+ packets += u64_stats_read(&txq->q_stats.packets);
+ bytes += u64_stats_read(&txq->q_stats.bytes);
} while (u64_stats_fetch_retry(&txq->stats_sync, start));
}
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
packets, bytes);
- net_dim(&q_vector->tx_dim, dim_sample);
+ net_dim(&q_vector->tx_dim, &dim_sample);
check_rx_itr:
if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
return;
for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
- struct idpf_queue *rxq = q_vector->rx[i];
+ struct idpf_rx_queue *rxq = q_vector->rx[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- packets += u64_stats_read(&rxq->q_stats.rx.packets);
- bytes += u64_stats_read(&rxq->q_stats.rx.bytes);
+ packets += u64_stats_read(&rxq->q_stats.packets);
+ bytes += u64_stats_read(&rxq->q_stats.bytes);
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
}
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
packets, bytes);
- net_dim(&q_vector->rx_dim, dim_sample);
+ net_dim(&q_vector->rx_dim, &dim_sample);
}
/**
@@ -3628,8 +3994,8 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/* net_dim() updates ITR out-of-band using a work item */
idpf_net_dim(q_vector);
- intval = idpf_vport_intr_buildreg_itr(q_vector,
- IDPF_NO_ITR_UPDATE_IDX, 0);
+ intval = idpf_vport_intr_buildreg_itr(q_vector);
+ q_vector->wb_on_itr = false;
writel(intval, q_vector->intr_reg.dyn_ctl);
}
@@ -3637,16 +4003,19 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/**
* idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
* @vport: main vport structure
- * @basename: name for the vector
*/
-static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
+static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ const char *drv_name, *if_name, *vec_name;
int vector, err, irq_num, vidx;
- const char *vec_name;
+
+ drv_name = dev_driver_string(&adapter->pdev->dev);
+ if_name = netdev_name(vport->netdev);
for (vector = 0; vector < vport->num_q_vectors; vector++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ char *name;
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
@@ -3660,18 +4029,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
else
continue;
- q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
- basename, vec_name, vidx);
+ name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
+ vec_name, vidx);
err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
- q_vector->name, q_vector);
+ name, q_vector);
if (err) {
netdev_err(vport->netdev,
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+
+ idpf_q_vector_set_napi(q_vector, true);
}
return 0;
@@ -3680,7 +4049,7 @@ free_q_irqs:
while (--vector >= 0) {
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- free_irq(irq_num, &vport->q_vectors[vector]);
+ kfree(free_irq(irq_num, &vport->q_vectors[vector]));
}
return err;
@@ -3739,6 +4108,8 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
if (qv->num_txq || qv->num_rxq)
idpf_vport_intr_update_itr_ena_irq(qv);
}
+
+ writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
}
/**
@@ -3747,9 +4118,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
*/
void idpf_vport_intr_deinit(struct idpf_vport *vport)
{
+ idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_rel_irq(vport);
}
@@ -3847,16 +4218,17 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
int budget, int *cleaned)
{
- u16 num_txq = q_vec->num_txq;
+ u16 num_complq = q_vec->num_complq;
bool clean_complete = true;
int i, budget_per_q;
- if (unlikely(!num_txq))
+ if (unlikely(!num_complq))
return true;
- budget_per_q = DIV_ROUND_UP(budget, num_txq);
- for (i = 0; i < num_txq; i++)
- clean_complete &= idpf_tx_clean_complq(q_vec->tx[i],
+ budget_per_q = DIV_ROUND_UP(budget, num_complq);
+
+ for (i = 0; i < num_complq; i++)
+ clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
budget_per_q, cleaned);
return clean_complete;
@@ -3877,16 +4249,19 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
bool clean_complete = true;
int pkts_cleaned = 0;
int i, budget_per_q;
+ int nid;
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) {
- struct idpf_queue *rxq = q_vec->rx[i];
+ struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
- pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
+ pkts_cleaned_per_q = idpf_queue_has(XSK, rxq) ?
+ idpf_xskrq_poll(rxq, budget_per_q) :
+ idpf_rx_splitq_clean(rxq, budget_per_q);
/* if we clean as many as budgeted, we must not be done */
if (pkts_cleaned_per_q >= budget_per_q)
clean_complete = false;
@@ -3894,8 +4269,12 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
}
*cleaned = pkts_cleaned;
- for (i = 0; i < q_vec->num_bufq; i++)
- idpf_rx_clean_refillq_all(q_vec->bufq[i]);
+ nid = numa_mem_id();
+
+ for (i = 0; i < q_vec->num_bufq; i++) {
+ if (!idpf_queue_has(XSK, q_vec->bufq[i]))
+ idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
+ }
return clean_complete;
}
@@ -3909,7 +4288,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
{
struct idpf_q_vector *q_vector =
container_of(napi, struct idpf_q_vector, napi);
- bool clean_complete;
+ bool clean_complete = true;
int work_done = 0;
/* Handle case where we are called by netpoll with a budget of 0 */
@@ -3919,30 +4298,31 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
- clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
- clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
+ for (u32 i = 0; i < q_vector->num_xsksq; i++)
+ clean_complete &= idpf_xsk_xmit(q_vector->xsksq[i]);
+
+ clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget,
+ &work_done);
+ clean_complete &= idpf_rx_splitq_clean_all(q_vector, budget,
+ &work_done);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return budget;
+ }
work_done = min_t(int, work_done, budget - 1);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
+ if (napi_complete_done(napi, work_done))
idpf_vport_intr_update_itr_ena_irq(q_vector);
-
- /* Switch to poll mode in the tear-down path after sending disable
- * queues virtchnl message, as the interrupts will be disabled after
- * that
- */
- if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
- q_vector->tx[0]->flags)))
- return budget;
else
- return work_done;
+ idpf_vport_intr_set_wb_on_itr(q_vector);
+
+ return work_done;
}
/**
@@ -3953,27 +4333,28 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
{
- u16 num_txq_grp = vport->num_txq_grp;
- int i, j, qv_idx, bufq_vidx = 0;
+ u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
- struct idpf_queue *q, *bufq;
- u16 q_index;
+ u32 i, qv_idx, q_index;
for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
u16 num_rxq;
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
rx_qgrp = &vport->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ for (u32 j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (split)
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -3981,52 +4362,68 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
q_index = q->q_vector->num_rxq;
q->q_vector->rx[q_index] = q;
q->q_vector->num_rxq++;
- qv_idx++;
+
+ if (split)
+ q->napi = &q->q_vector->napi;
}
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (split) {
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *bufq;
+
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
- bufq->q_vector = &vport->q_vectors[bufq_vidx];
+ bufq->q_vector = &vport->q_vectors[qv_idx];
q_index = bufq->q_vector->num_bufq;
bufq->q_vector->bufq[q_index] = bufq;
bufq->q_vector->num_bufq++;
}
- if (++bufq_vidx >= vport->num_q_vectors)
- bufq_vidx = 0;
}
+
+ qv_idx++;
}
+ split = idpf_is_queue_model_split(vport->txq_model);
+
for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
tx_qgrp = &vport->txq_grps[i];
num_txq = tx_qgrp->num_txq;
- if (idpf_is_queue_model_split(vport->txq_model)) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ for (u32 j = 0; j < num_txq; j++) {
+ struct idpf_tx_queue *q;
- q = tx_qgrp->complq;
+ q = tx_qgrp->txqs[j];
q->q_vector = &vport->q_vectors[qv_idx];
- q_index = q->q_vector->num_txq;
- q->q_vector->tx[q_index] = q;
- q->q_vector->num_txq++;
- qv_idx++;
- } else {
- for (j = 0; j < num_txq; j++) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ q->q_vector->tx[q->q_vector->num_txq++] = q;
+ }
- q = tx_qgrp->txqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
- q_index = q->q_vector->num_txq;
- q->q_vector->tx[q_index] = q;
- q->q_vector->num_txq++;
+ if (split) {
+ struct idpf_compl_queue *q = tx_qgrp->complq;
- qv_idx++;
- }
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector->complq[q->q_vector->num_complq++] = q;
}
+
+ qv_idx++;
+ }
+
+ for (i = 0; i < vport->num_xdp_txq; i++) {
+ struct idpf_tx_queue *xdpsq;
+ struct idpf_q_vector *qv;
+
+ xdpsq = vport->txqs[vport->xdp_txq_offset + i];
+ if (!idpf_queue_has(XSK, xdpsq))
+ continue;
+
+ qv = idpf_find_rxq_vec(vport, i);
+ idpf_xsk_init_wakeup(qv);
+
+ xdpsq->q_vector = qv;
+ qv->xsksq[qv->num_xsksq++] = xdpsq;
}
}
@@ -4048,6 +4445,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
for (i = 0; i < vport->num_q_vectors; i++)
vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+ vport->noirq_v_idx = vport->q_vector_idxs[i];
+
return 0;
}
@@ -4061,6 +4460,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
for (i = 0; i < vport->num_q_vectors; i++)
vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+ vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+
kfree(vecids);
return 0;
@@ -4073,7 +4474,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx;
+ u16 v_idx, qv_idx;
+ int irq_num;
if (idpf_is_queue_model_split(vport->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
@@ -4082,12 +4484,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ qv_idx = vport->q_vector_idxs[v_idx];
+ irq_num = vport->adapter->msix_entries[qv_idx].vector;
- netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
-
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ netif_napi_add_config(vport->netdev, &q_vector->napi,
+ napi_poll, v_idx);
+ netif_napi_set_irq(&q_vector->napi, irq_num);
}
}
@@ -4101,58 +4503,74 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
+ struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
- int v_idx, err;
+ struct idpf_q_coalesce *q_coal;
+ u32 complqs_per_vector, v_idx;
+ u16 idx = vport->idx;
+ user_config = &vport->adapter->vport_config[idx]->user_config;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
return -ENOMEM;
- txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors);
- rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors);
+ txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ vport->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors);
bufqs_per_vector = vport->num_bufqs_per_qgrp *
DIV_ROUND_UP(vport->num_rxq_grp,
vport->num_q_vectors);
+ complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ vport->num_q_vectors);
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
+ q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
- q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
- q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
+ q_vector->tx_intr_mode = q_coal->tx_intr_mode;
q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
- q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
- q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
+ q_vector->rx_intr_mode = q_coal->rx_intr_mode;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- q_vector->tx = kcalloc(txqs_per_vector,
- sizeof(struct idpf_queue *),
+ q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
- if (!q_vector->tx) {
- err = -ENOMEM;
+ if (!q_vector->tx)
goto error;
- }
- q_vector->rx = kcalloc(rxqs_per_vector,
- sizeof(struct idpf_queue *),
+ q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
GFP_KERNEL);
- if (!q_vector->rx) {
- err = -ENOMEM;
+ if (!q_vector->rx)
goto error;
- }
if (!idpf_is_queue_model_split(vport->rxq_model))
continue;
q_vector->bufq = kcalloc(bufqs_per_vector,
- sizeof(struct idpf_queue *),
+ sizeof(*q_vector->bufq),
GFP_KERNEL);
- if (!q_vector->bufq) {
- err = -ENOMEM;
+ if (!q_vector->bufq)
+ goto error;
+
+ q_vector->complq = kcalloc(complqs_per_vector,
+ sizeof(*q_vector->complq),
+ GFP_KERNEL);
+ if (!q_vector->complq)
+ goto error;
+
+ if (!vport->xdp_txq_offset)
+ continue;
+
+ q_vector->xsksq = kcalloc(rxqs_per_vector,
+ sizeof(*q_vector->xsksq),
+ GFP_KERNEL);
+ if (!q_vector->xsksq)
goto error;
- }
}
return 0;
@@ -4160,7 +4578,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
error:
idpf_vport_intr_rel(vport);
- return err;
+ return -ENOMEM;
}
/**
@@ -4171,7 +4589,6 @@ error:
*/
int idpf_vport_intr_init(struct idpf_vport *vport)
{
- char *int_name;
int err;
err = idpf_vport_intr_init_vec_idx(vport);
@@ -4180,31 +4597,29 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
idpf_vport_intr_map_vector_to_qs(vport);
idpf_vport_intr_napi_add_all(vport);
- idpf_vport_intr_napi_ena_all(vport);
err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
if (err)
goto unroll_vectors_alloc;
- int_name = kasprintf(GFP_KERNEL, "%s-%s",
- dev_driver_string(&vport->adapter->pdev->dev),
- vport->netdev->name);
-
- err = idpf_vport_intr_req_irq(vport, int_name);
+ err = idpf_vport_intr_req_irq(vport);
if (err)
goto unroll_vectors_alloc;
- idpf_vport_intr_ena_irq_all(vport);
-
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
return err;
}
+void idpf_vport_intr_ena(struct idpf_vport *vport)
+{
+ idpf_vport_intr_napi_ena_all(vport);
+ idpf_vport_intr_ena_irq_all(vport);
+}
+
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index df76493faa75..75b977094741 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -4,9 +4,16 @@
#ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_
-#include <net/page_pool/helpers.h>
-#include <net/tcp.h>
+#include <linux/dim.h>
+
+#include <net/libeth/cache.h>
+#include <net/libeth/types.h>
#include <net/netdev_queues.h>
+#include <net/tcp.h>
+#include <net/xdp.h>
+
+#include "idpf_lan_txrx.h"
+#include "virtchnl2_lan_desc.h"
#define IDPF_LARGE_MAX_Q 256
#define IDPF_MAX_Q 16
@@ -52,6 +59,9 @@
/* Default vector sharing */
#define IDPF_MBX_Q_VEC 1
#define IDPF_MIN_Q_VEC 1
+#define IDPF_MIN_RDMA_VEC 2
+/* Data vector for NOIRQ queues */
+#define IDPF_RESERVED_VECS 1
#define IDPF_DFLT_TX_Q_DESC_COUNT 512
#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
@@ -81,7 +91,7 @@
do { \
if (unlikely(++(ntc) == (rxq)->desc_count)) { \
ntc = 0; \
- change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \
+ idpf_queue_change(GEN_CHK, rxq); \
} \
} while (0)
@@ -91,16 +101,10 @@ do { \
idx = 0; \
} while (0)
-#define IDPF_RX_HDR_SIZE 256
-#define IDPF_RX_BUF_2048 2048
-#define IDPF_RX_BUF_4096 4096
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64
-/* Size of header buffer specifically for header split */
-#define IDPF_HDR_BUF_SIZE 256
-#define IDPF_PACKET_HDR_PAD \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
+
#define IDPF_TX_TSO_MIN_MSS 88
/* Minimum number of descriptors between 2 descriptors with the RE bit set;
@@ -108,54 +112,26 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_BUFID_S 0
-#define IDPF_RX_BI_BUFID_M GENMASK(14, 0)
-#define IDPF_RX_BI_GEN_S 15
-#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S)
+#define IDPF_RFL_BI_GEN_M BIT(16)
+#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
+
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
-#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
- (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
- (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
-
-#define IDPF_BASE_TX_DESC(txq, i) \
- (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_BASE_TX_CTX_DESC(txq, i) \
- (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
- (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
-
-#define IDPF_FLEX_TX_DESC(txq, i) \
- (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
-#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
- (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
-
#define IDPF_DESC_UNUSED(txq) \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top)
-#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
- (txq)->desc_count >> 2)
-
#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
/* Determine the absolute number of completions pending, i.e. the number of
* completions that are expected to arrive on the TX completion queue.
*/
#define IDPF_TX_COMPLQ_PENDING(txq) \
(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
- 0 : U64_MAX) + \
+ 0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
-#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
-#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
-/* Adjust the generation for the completion tag and wrap if necessary */
-#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
- ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
- 0 : (txq)->compl_tag_cur_gen)
+#define IDPF_TXBUF_NULL U32_MAX
#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
@@ -163,65 +139,16 @@ do { \
#define IDPF_TX_FLAGS_IPV4 BIT(1)
#define IDPF_TX_FLAGS_IPV6 BIT(2)
#define IDPF_TX_FLAGS_TUNNEL BIT(3)
+#define IDPF_TX_FLAGS_TSYN BIT(4)
+
+struct libeth_rq_napi_stats;
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
};
-/**
- * struct idpf_tx_buf
- * @next_to_watch: Next descriptor to clean
- * @skb: Pointer to the skb
- * @dma: DMA address
- * @len: DMA length
- * @bytecount: Number of bytes
- * @gso_segs: Number of GSO segments
- * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
- * with completion tag returned in buffer completion event.
- * Because the completion tag is expected to be the same in all
- * data descriptors for a given packet, and a single packet can
- * span multiple buffers, we need this field to track all
- * buffers associated with this completion tag independently of
- * the buf_id. The tag consists of a N bit buf_id and M upper
- * order "generation bits". See compl_tag_bufid_m and
- * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
- * to indicate the tag is not valid.
- * @ctx_entry: Singleq only. Used to indicate the corresponding entry
- * in the descriptor ring was used for a context descriptor and
- * this buffer entry should be skipped.
- */
-struct idpf_tx_buf {
- void *next_to_watch;
- struct sk_buff *skb;
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- unsigned int bytecount;
- unsigned short gso_segs;
-
- union {
- int compl_tag;
-
- bool ctx_entry;
- };
-};
-
-struct idpf_tx_stash {
- struct hlist_node hlist;
- struct idpf_tx_buf buf;
-};
-
-/**
- * struct idpf_buf_lifo - LIFO for managing OOO completions
- * @top: Used to know how many buffers are left
- * @size: Total size of LIFO
- * @bufs: Backing array
- */
-struct idpf_buf_lifo {
- u16 top;
- u16 size;
- struct idpf_tx_stash **bufs;
-};
+#define idpf_tx_buf libeth_sqe
/**
* struct idpf_tx_offload_params - Offload parameters for a given packet
@@ -255,6 +182,9 @@ struct idpf_tx_offload_params {
* @compl_tag: Associated tag for completion
* @td_tag: Descriptor tunneling tag
* @offload: Offload parameters
+ * @prev_ntu: stored TxQ next_to_use in case of rollback
+ * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
+ * @prev_refill_gen: stored refillq generation bit in case of packet rollback
*/
struct idpf_tx_splitq_params {
enum idpf_tx_desc_dtype_value dtype;
@@ -265,6 +195,10 @@ struct idpf_tx_splitq_params {
};
struct idpf_tx_offload_params offload;
+
+ u16 prev_ntu;
+ u16 prev_refill_ntc;
+ bool prev_refill_gen;
};
enum idpf_tx_ctx_desc_eipt_offload {
@@ -274,25 +208,6 @@ enum idpf_tx_ctx_desc_eipt_offload {
IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
};
-/* Checksum offload bits decoded from the receive descriptor. */
-struct idpf_rx_csum_decoded {
- u32 l3l4p : 1;
- u32 ipe : 1;
- u32 eipe : 1;
- u32 eudpe : 1;
- u32 ipv6exadd : 1;
- u32 l4e : 1;
- u32 pprs : 1;
- u32 nat : 1;
- u32 raw_csum_inv : 1;
- u32 raw_csum : 16;
-};
-
-struct idpf_rx_extracted {
- unsigned int size;
- u16 rx_ptype;
-};
-
#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
@@ -313,16 +228,7 @@ struct idpf_rx_extracted {
#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
-#define IDPF_RX_DMA_ATTR \
- (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-#define IDPF_RX_DESC(rxq, i) \
- (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
-
-struct idpf_rx_buf {
- struct page *page;
- unsigned int page_offset;
- u16 truesize;
-};
+#define idpf_rx_buf libeth_fqe
#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
@@ -346,72 +252,6 @@ struct idpf_rx_buf {
#define IDPF_RX_MAX_BASE_PTYPE 256
#define IDPF_INVALID_PTYPE_ID 0xFFFF
-/* Packet type non-ip values */
-enum idpf_rx_ptype_l2 {
- IDPF_RX_PTYPE_L2_RESERVED = 0,
- IDPF_RX_PTYPE_L2_MAC_PAY2 = 1,
- IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- IDPF_RX_PTYPE_L2_FIP_PAY2 = 3,
- IDPF_RX_PTYPE_L2_OUI_PAY2 = 4,
- IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6,
- IDPF_RX_PTYPE_L2_ECP_PAY2 = 7,
- IDPF_RX_PTYPE_L2_EVB_PAY2 = 8,
- IDPF_RX_PTYPE_L2_QCN_PAY2 = 9,
- IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- IDPF_RX_PTYPE_L2_ARP = 11,
-};
-
-enum idpf_rx_ptype_outer_ip {
- IDPF_RX_PTYPE_OUTER_L2 = 0,
- IDPF_RX_PTYPE_OUTER_IP = 1,
-};
-
-#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \
- (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \
- ((ptype)->outer_ip_ver == (ipv)))
-
-enum idpf_rx_ptype_outer_ip_ver {
- IDPF_RX_PTYPE_OUTER_NONE = 0,
- IDPF_RX_PTYPE_OUTER_IPV4 = 1,
- IDPF_RX_PTYPE_OUTER_IPV6 = 2,
-};
-
-enum idpf_rx_ptype_outer_fragmented {
- IDPF_RX_PTYPE_NOT_FRAG = 0,
- IDPF_RX_PTYPE_FRAG = 1,
-};
-
-enum idpf_rx_ptype_tunnel_type {
- IDPF_RX_PTYPE_TUNNEL_NONE = 0,
- IDPF_RX_PTYPE_TUNNEL_IP_IP = 1,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum idpf_rx_ptype_tunnel_end_prot {
- IDPF_RX_PTYPE_TUNNEL_END_NONE = 0,
- IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum idpf_rx_ptype_inner_prot {
- IDPF_RX_PTYPE_INNER_PROT_NONE = 0,
- IDPF_RX_PTYPE_INNER_PROT_UDP = 1,
- IDPF_RX_PTYPE_INNER_PROT_TCP = 2,
- IDPF_RX_PTYPE_INNER_PROT_SCTP = 3,
- IDPF_RX_PTYPE_INNER_PROT_ICMP = 4,
- IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
-};
-
-enum idpf_rx_ptype_payload_layer {
- IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
enum idpf_tunnel_state {
IDPF_PTYPE_TUNNEL_IP = BIT(0),
IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
@@ -419,22 +259,9 @@ enum idpf_tunnel_state {
};
struct idpf_ptype_state {
- bool outer_ip;
- bool outer_frag;
- u8 tunnel_state;
-};
-
-struct idpf_rx_ptype_decoded {
- u32 ptype:10;
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:2;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
+ bool outer_ip:1;
+ bool outer_frag:1;
+ u8 tunnel_state:6;
};
/**
@@ -450,23 +277,44 @@ struct idpf_rx_ptype_decoded {
* to 1 and knows that reading a gen bit of 1 in any
* descriptor on the initial pass of the ring indicates a
* writeback. It also flips on every ring wrap.
- * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
- * and RFLGQ_GEN is the SW bit.
+ * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
+ * bit and Q_RFL_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
- * @__IDPF_Q_POLL_MODE: Enable poll mode
+ * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
+ * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
+ * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
+ * queue
+ * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt
+ * @__IDPF_Q_XDP: this is an XDP queue
+ * @__IDPF_Q_XSK: the queue has an XSk pool installed
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
__IDPF_Q_GEN_CHK,
- __IDPF_RFLQ_GEN_CHK,
+ __IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
- __IDPF_Q_POLL_MODE,
+ __IDPF_Q_CRC_EN,
+ __IDPF_Q_HSPLIT_EN,
+ __IDPF_Q_PTP,
+ __IDPF_Q_NOIRQ,
+ __IDPF_Q_XDP,
+ __IDPF_Q_XSK,
__IDPF_Q_FLAGS_NBITS,
};
+#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
+
+#define idpf_queue_has_clear(f, q) \
+ __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_assign(f, q, v) \
+ __assign_bit(__IDPF_Q_##f, (q)->flags, v)
+
/**
* struct idpf_vec_regs
* @dyn_ctl_reg: Dynamic control interrupt register offset
@@ -484,9 +332,13 @@ struct idpf_vec_regs {
* struct idpf_intr_reg
* @dyn_ctl: Dynamic control interrupt register
* @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
+ * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
* @dyn_ctl_itridx_s: Register bit offset for ITR index
* @dyn_ctl_itridx_m: Mask for ITR index
* @dyn_ctl_intrvl_s: Register bit offset for ITR interval
+ * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
+ * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
+ * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
* @rx_itr: RX ITR register
* @tx_itr: TX ITR register
* @icr_ena: Interrupt cause register offset
@@ -495,9 +347,13 @@ struct idpf_vec_regs {
struct idpf_intr_reg {
void __iomem *dyn_ctl;
u32 dyn_ctl_intena_m;
+ u32 dyn_ctl_intena_msk_m;
u32 dyn_ctl_itridx_s;
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
+ u32 dyn_ctl_wb_on_itr_m;
+ u32 dyn_ctl_sw_itridx_ena_m;
+ u32 dyn_ctl_swint_trig_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
@@ -507,54 +363,77 @@ struct idpf_intr_reg {
/**
* struct idpf_q_vector
* @vport: Vport back pointer
- * @affinity_mask: CPU affinity mask
- * @napi: napi handler
- * @v_idx: Vector index
- * @intr_reg: See struct idpf_intr_reg
+ * @num_rxq: Number of RX queues
* @num_txq: Number of TX queues
+ * @num_bufq: Number of buffer queues
+ * @num_complq: number of completion queues
+ * @num_xsksq: number of XSk send queues
+ * @rx: Array of RX queues to service
* @tx: Array of TX queues to service
+ * @bufq: Array of buffer queues to service
+ * @complq: array of completion queues
+ * @xsksq: array of XSk send queues
+ * @intr_reg: See struct idpf_intr_reg
+ * @csd: XSk wakeup CSD
+ * @total_events: Number of interrupts processed
+ * @wb_on_itr: whether WB on ITR is enabled
+ * @napi: napi handler
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
* @tx_itr_idx: TX ITR index
- * @num_rxq: Number of RX queues
- * @rx: Array of RX queues to service
* @rx_dim: Data for RX net_dim algorithm
* @rx_itr_value: RX interrupt throttling rate
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
- * @num_bufq: Number of buffer queues
- * @bufq: Array of buffer queues to service
- * @total_events: Number of interrupts processed
- * @name: Queue vector name
+ * @v_idx: Vector index
*/
struct idpf_q_vector {
+ __cacheline_group_begin_aligned(read_mostly);
struct idpf_vport *vport;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- u16 v_idx;
- struct idpf_intr_reg intr_reg;
+ u16 num_rxq;
u16 num_txq;
- struct idpf_queue **tx;
+ u16 num_bufq;
+ u16 num_complq;
+ u16 num_xsksq;
+ struct idpf_rx_queue **rx;
+ struct idpf_tx_queue **tx;
+ struct idpf_buf_queue **bufq;
+ struct idpf_compl_queue **complq;
+ struct idpf_tx_queue **xsksq;
+
+ struct idpf_intr_reg intr_reg;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ call_single_data_t csd;
+
+ u16 total_events;
+ bool wb_on_itr;
+
+ struct napi_struct napi;
+
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
u32 tx_itr_idx;
- u16 num_rxq;
- struct idpf_queue **rx;
struct dim rx_dim;
u16 rx_itr_value;
bool rx_intr_mode;
u32 rx_itr_idx;
+ __cacheline_group_end_aligned(read_write);
- u16 num_bufq;
- struct idpf_queue **bufq;
+ __cacheline_group_begin_aligned(cold);
+ u16 v_idx;
- u16 total_events;
- char *name;
+ __cacheline_group_end_aligned(cold);
};
+libeth_cacheline_set_assert(struct idpf_q_vector, 136,
+ 56 + sizeof(struct napi_struct) +
+ 2 * sizeof(struct dim),
+ 8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -574,16 +453,7 @@ struct idpf_tx_queue_stats {
u64_stats_t q_busy;
u64_stats_t skb_drops;
u64_stats_t dma_map_errs;
-};
-
-struct idpf_cleaned_stats {
- u32 packets;
- u32 bytes;
-};
-
-union idpf_queue_stats {
- struct idpf_rx_queue_stats rx;
- struct idpf_tx_queue_stats tx;
+ u64_stats_t tstamp_skipped;
};
#define IDPF_ITR_DYNAMIC 1
@@ -595,45 +465,152 @@ union idpf_queue_stats {
#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
#define IDPF_ITR_TX_DEF IDPF_ITR_20K
#define IDPF_ITR_RX_DEF IDPF_ITR_20K
+/* Index used for 'SW ITR' update in DYN_CTL register */
+#define IDPF_SW_ITR_UPDATE_IDX 2
/* Index used for 'No ITR' update in DYN_CTL register */
#define IDPF_NO_ITR_UPDATE_IDX 3
#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
/**
- * struct idpf_queue
- * @dev: Device back pointer for DMA mapping
- * @vport: Back pointer to associated vport
- * @txq_grp: See struct idpf_txq_group
- * @rxq_grp: See struct idpf_rxq_group
- * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
- * buffer queue uses this index to determine which group of refill queues
- * to clean.
- * For TX queue, it is used as index to map between TX queue group and
- * hot path TX pointers stored in vport. Used in both singleq/splitq.
- * For RX queue, it is used to index to total RX queue across groups and
+ * struct idpf_rx_queue - software structure representing a receive queue
+ * @rx: universal receive descriptor array
+ * @single_buf: buffer descriptor array in singleq
+ * @desc_ring: virtual descriptor ring address
+ * @bufq_sets: Pointer to the array of buffer queues in splitq mode
+ * @napi: NAPI instance corresponding to this queue (splitq)
+ * @xdp_prog: attached XDP program
+ * @rx_buf: See struct &libeth_fqe
+ * @pp: Page pool pointer in singleq mode
+ * @tail: Tail offset. Used for both queue models single and split.
+ * @flags: See enum idpf_queue_flags_t
+ * @idx: For RX queue, it is used to index to total RX queue across groups and
* used for skb reporting.
- * @tail: Tail offset. Used for both queue models single and split. In splitq
- * model relevant only for TX queue and RX queue.
- * @tx_buf: See struct idpf_tx_buf
- * @rx_buf: Struct with RX buffer related members
- * @rx_buf.buf: See struct idpf_rx_buf
- * @rx_buf.hdr_buf_pa: DMA handle
- * @rx_buf.hdr_buf_va: Virtual address
- * @pp: Page pool pointer
- * @skb: Pointer to the skb
- * @q_type: Queue type (TX, RX, TX completion, RX buffer)
- * @q_id: Queue id
* @desc_count: Number of descriptors
- * @next_to_use: Next descriptor to use. Relevant in both split & single txq
- * and bufq.
- * @next_to_clean: Next descriptor to clean. In split queue model, only
- * relevant to TX completion queue and RX queue.
- * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
- * only relevant to RX queue.
- * @flags: See enum idpf_queue_flags_t
- * @q_stats: See union idpf_queue_stats
+ * @num_xdp_txq: total number of XDP Tx queues
+ * @xdpsqs: shortcut for XDP Tx queues array
+ * @rxdids: Supported RX descriptor ids
+ * @truesize: data buffer truesize in singleq
+ * @rx_ptype_lkup: LUT of Rx ptypes
+ * @xdp_rxq: XDP queue info
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: RX buffer to allocate at
+ * @xdp: XDP buffer with the current frame
+ * @xsk: current XDP buffer in XSk mode
+ * @pool: XSk pool if installed
+ * @cached_phc_time: Cached PHC time for the Rx queue
* @stats_sync: See struct u64_stats_sync
+ * @q_stats: See union idpf_rx_queue_stats
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ * @rx_buffer_low_watermark: RX buffer low watermark
+ * @rx_hbuf_size: Header buffer size
+ * @rx_buf_size: Buffer size
+ * @rx_max_pkt_size: RX max packet size
+ */
+struct idpf_rx_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ union {
+ union virtchnl2_rx_desc *rx;
+ struct virtchnl2_singleq_rx_buf_desc *single_buf;
+
+ void *desc_ring;
+ };
+ union {
+ struct {
+ struct idpf_bufq_set *bufq_sets;
+ struct napi_struct *napi;
+ struct bpf_prog __rcu *xdp_prog;
+ };
+ struct {
+ struct libeth_fqe *rx_buf;
+ struct page_pool *pp;
+ void __iomem *tail;
+ };
+ };
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 idx;
+ u16 desc_count;
+
+ u32 num_xdp_txq;
+ union {
+ struct idpf_tx_queue **xdpsqs;
+ struct {
+ u32 rxdids;
+ u32 truesize;
+ };
+ };
+ const struct libeth_rx_pt *rx_ptype_lkup;
+
+ struct xdp_rxq_info xdp_rxq;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 next_to_alloc;
+
+ union {
+ struct libeth_xdp_buff_stash xdp;
+ struct {
+ struct libeth_xdp_buff *xsk;
+ struct xsk_buff_pool *pool;
+ };
+ };
+ u64 cached_phc_time;
+
+ struct u64_stats_sync stats_sync;
+ struct idpf_rx_queue_stats q_stats;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+
+ u16 rx_buffer_low_watermark;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ u16 rx_max_pkt_size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_rx_queue,
+ ALIGN(64, __alignof(struct xdp_rxq_info)) +
+ sizeof(struct xdp_rxq_info),
+ 96 + offsetof(struct idpf_rx_queue, q_stats) -
+ offsetofend(struct idpf_rx_queue, cached_phc_time),
+ 32);
+
+/**
+ * struct idpf_tx_queue - software structure representing a transmit queue
+ * @base_tx: base Tx descriptor array
+ * @base_ctx: base Tx context descriptor array
+ * @flex_tx: flex Tx descriptor array
+ * @flex_ctx: flex Tx context descriptor array
+ * @desc_ring: virtual descriptor ring address
+ * @tx_buf: See struct idpf_tx_buf
+ * @txq_grp: See struct idpf_txq_group
+ * @complq: corresponding completion queue in XDP mode
+ * @dev: Device back pointer for DMA mapping
+ * @pool: corresponding XSk pool if installed
+ * @tail: Tail offset. Used for both queue models single and split
+ * @flags: See enum idpf_queue_flags_t
+ * @idx: For TX queue, it is used as index to map between TX queue group and
+ * hot path TX pointers stored in vport. Used in both singleq/splitq.
+ * @desc_count: Number of descriptors
+ * @tx_min_pkt_len: Min supported packet length
+ * @thresh: XDP queue cleaning threshold
+ * @netdev: &net_device corresponding to this queue
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @last_re: last descriptor index that RE bit was set
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
* the TX completion queue, it can be for any TXQ associated
* with that completion queue. This means we can clean up to
@@ -642,149 +619,280 @@ union idpf_queue_stats {
* that single call to clean the completion queue. By doing so,
* we can update BQL with aggregate cleaned stats for each TXQ
* only once at the end of the cleaning routine.
+ * @clean_budget: singleq only, queue cleaning budget
* @cleaned_pkts: Number of packets cleaned for the above said case
- * @rx_hsplit_en: RX headsplit enable
- * @rx_hbuf_size: Header buffer size
- * @rx_buf_size: Buffer size
- * @rx_max_pkt_size: RX max packet size
- * @rx_buf_stride: RX buffer stride
- * @rx_buffer_low_watermark: RX buffer low watermark
- * @rxdids: Supported RX descriptor ids
- * @q_vector: Backreference to associated vector
+ * @refillq: Pointer to refill queue
+ * @pending: number of pending descriptors to send in QB
+ * @xdp_tx: number of pending &xdp_buff or &xdp_frame buffers
+ * @timer: timer for XDP Tx queue cleanup
+ * @xdp_lock: lock for XDP Tx queues sharing
+ * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
+ * @tstamp_task: Work that handles Tx timestamp read
+ * @stats_sync: See struct u64_stats_sync
+ * @q_stats: See union idpf_tx_queue_stats
+ * @q_id: Queue id
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
- * @desc_ring: Descriptor ring memory
- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
- * @tx_min_pkt_len: Min supported packet length
- * @num_completions: Only relevant for TX completion queue. It tracks the
- * number of completions received to compare against the
- * number of completions pending, as accumulated by the
- * TX queues.
- * @buf_stack: Stack of empty buffers to store buffer info for out of order
- * buffer completions. See struct idpf_buf_lifo.
- * @compl_tag_bufid_m: Completion tag buffer id mask
- * @compl_tag_gen_s: Completion tag generation bit
- * The format of the completion tag will change based on the TXQ
- * descriptor ring size so that we can maintain roughly the same level
- * of "uniqueness" across all descriptor sizes. For example, if the
- * TXQ descriptor ring size is 64 (the minimum size supported), the
- * completion tag will be formatted as below:
- * 15 6 5 0
- * --------------------------------
- * | GEN=0-1023 |IDX = 0-63|
- * --------------------------------
- *
- * This gives us 64*1024 = 65536 possible unique values. Similarly, if
- * the TXQ descriptor ring size is 8160 (the maximum size supported),
- * the completion tag will be formatted as below:
- * 15 13 12 0
- * --------------------------------
- * |GEN | IDX = 0-8159 |
- * --------------------------------
- *
- * This gives us 8*8160 = 65280 possible unique values.
- * @compl_tag_cur_gen: Used to keep track of current completion tag generation
- * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
- * @sched_buf_hash: Hash table to stores buffers
+ * @q_vector: Backreference to associated vector
+ * @buf_pool_size: Total number of idpf_tx_buf
+ * @rel_q_id: relative virtchnl queue index
*/
-struct idpf_queue {
- struct device *dev;
- struct idpf_vport *vport;
+struct idpf_tx_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ union {
+ struct idpf_base_tx_desc *base_tx;
+ struct idpf_base_tx_ctx_desc *base_ctx;
+ union idpf_tx_flex_desc *flex_tx;
+ union idpf_flex_tx_ctx_desc *flex_ctx;
+
+ void *desc_ring;
+ };
+ struct libeth_sqe *tx_buf;
union {
struct idpf_txq_group *txq_grp;
- struct idpf_rxq_group *rxq_grp;
+ struct idpf_compl_queue *complq;
+ };
+ union {
+ struct device *dev;
+ struct xsk_buff_pool *pool;
};
- u16 idx;
void __iomem *tail;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 idx;
+ u16 desc_count;
+
+ union {
+ u16 tx_min_pkt_len;
+ u32 thresh;
+ };
+
+ struct net_device *netdev;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+
union {
- struct idpf_tx_buf *tx_buf;
struct {
- struct idpf_rx_buf *buf;
- dma_addr_t hdr_buf_pa;
- void *hdr_buf_va;
- } rx_buf;
+ u16 last_re;
+ u16 tx_max_bufs;
+
+ union {
+ u32 cleaned_bytes;
+ u32 clean_budget;
+ };
+ u16 cleaned_pkts;
+
+ struct idpf_sw_queue *refillq;
+ };
+ struct {
+ u32 pending;
+ u32 xdp_tx;
+
+ struct libeth_xdpsq_timer *timer;
+ struct libeth_xdpsq_lock xdp_lock;
+ };
};
- struct page_pool *pp;
- struct sk_buff *skb;
- u16 q_type;
+
+ struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
+ struct work_struct *tstamp_task;
+
+ struct u64_stats_sync stats_sync;
+ struct idpf_tx_queue_stats q_stats;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
u32 q_id;
- u16 desc_count;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+
+ u32 buf_pool_size;
+ u32 rel_q_id;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
+ 104 +
+ offsetof(struct idpf_tx_queue, cached_tstamp_caps) -
+ offsetofend(struct idpf_tx_queue, timer) +
+ offsetof(struct idpf_tx_queue, q_stats) -
+ offsetofend(struct idpf_tx_queue, tstamp_task),
+ 32);
+
+/**
+ * struct idpf_buf_queue - software structure representing a buffer queue
+ * @split_buf: buffer descriptor array
+ * @buf: &libeth_fqe for data buffers
+ * @pp: &page_pool for data buffers
+ * @xsk_buf: &xdp_buff for XSk Rx buffers
+ * @pool: &xsk_buff_pool on XSk queues
+ * @hdr_buf: &libeth_fqe for header buffers
+ * @hdr_pp: &page_pool for header buffers
+ * @tail: Tail offset
+ * @flags: See enum idpf_queue_flags_t
+ * @desc_count: Number of descriptors
+ * @thresh: refill threshold in XSk
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: RX buffer to allocate at
+ * @pending: number of buffers to refill (Xsk)
+ * @hdr_truesize: truesize for buffer headers
+ * @truesize: truesize for data buffers
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ * @rx_buffer_low_watermark: RX buffer low watermark
+ * @rx_hbuf_size: Header buffer size
+ * @rx_buf_size: Buffer size
+ */
+struct idpf_buf_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ struct virtchnl2_splitq_rx_buf_desc *split_buf;
+ union {
+ struct {
+ struct libeth_fqe *buf;
+ struct page_pool *pp;
+ };
+ struct {
+ struct libeth_xdp_buff **xsk_buf;
+ struct xsk_buff_pool *pool;
+ };
+ };
+ struct libeth_fqe *hdr_buf;
+ struct page_pool *hdr_pp;
+ void __iomem *tail;
- u16 next_to_use;
- u16 next_to_clean;
- u16 next_to_alloc;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u32 desc_count;
- union idpf_queue_stats q_stats;
- struct u64_stats_sync stats_sync;
+ u32 thresh;
+ __cacheline_group_end_aligned(read_mostly);
- u32 cleaned_bytes;
- u16 cleaned_pkts;
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 next_to_alloc;
- bool rx_hsplit_en;
+ u32 pending;
+ u32 hdr_truesize;
+ u32 truesize;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+
+ u16 rx_buffer_low_watermark;
u16 rx_hbuf_size;
u16 rx_buf_size;
- u16 rx_max_pkt_size;
- u16 rx_buf_stride;
- u8 rx_buffer_low_watermark;
- u64 rxdids;
- struct idpf_q_vector *q_vector;
- unsigned int size;
- dma_addr_t dma;
- void *desc_ring;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
+
+/**
+ * struct idpf_compl_queue - software structure representing a completion queue
+ * @comp: 8-byte completion descriptor array
+ * @comp_4b: 4-byte completion descriptor array
+ * @desc_ring: virtual descriptor ring address
+ * @txq_grp: See struct idpf_txq_group
+ * @flags: See enum idpf_queue_flags_t
+ * @desc_count: Number of descriptors
+ * @clean_budget: queue cleaning budget
+ * @netdev: &net_device corresponding to this queue
+ * @next_to_use: Next descriptor to use. Relevant in both split & single txq
+ * and bufq.
+ * @next_to_clean: Next descriptor to clean
+ * @num_completions: Only relevant for TX completion queue. It tracks the
+ * number of completions received to compare against the
+ * number of completions pending, as accumulated by the
+ * TX queues.
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ */
+struct idpf_compl_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ union {
+ struct idpf_splitq_tx_compl_desc *comp;
+ struct idpf_splitq_4b_tx_compl_desc *comp_4b;
- u16 tx_max_bufs;
- u8 tx_min_pkt_len;
+ void *desc_ring;
+ };
+ struct idpf_txq_group *txq_grp;
- u32 num_completions;
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u32 desc_count;
- struct idpf_buf_lifo buf_stack;
+ u32 clean_budget;
+ struct net_device *netdev;
+ __cacheline_group_end_aligned(read_mostly);
- u16 compl_tag_bufid_m;
- u16 compl_tag_gen_s;
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
- u16 compl_tag_cur_gen;
- u16 compl_tag_gen_max;
+ aligned_u64 num_completions;
+ __cacheline_group_end_aligned(read_write);
- DECLARE_HASHTABLE(sched_buf_hash, 12);
-} ____cacheline_internodealigned_in_smp;
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
/**
* struct idpf_sw_queue
- * @next_to_clean: Next descriptor to clean
- * @next_to_alloc: Buffer to allocate at
- * @flags: See enum idpf_queue_flags_t
* @ring: Pointer to the ring
+ * @flags: See enum idpf_queue_flags_t
* @desc_count: Descriptor count
- * @dev: Device back pointer for DMA mapping
+ * @next_to_use: Buffer to allocate at
+ * @next_to_clean: Next descriptor to clean
*
* Software queues are used in splitq mode to manage buffers between rxq
* producer and the bufq consumer. These are required in order to maintain a
* lockless buffer management system and are strictly software only constructs.
*/
struct idpf_sw_queue {
- u16 next_to_clean;
- u16 next_to_alloc;
+ __cacheline_group_begin_aligned(read_mostly);
+ u32 *ring;
+
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
- u16 *ring;
- u16 desc_count;
- struct device *dev;
-} ____cacheline_internodealigned_in_smp;
+ u32 desc_count;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ __cacheline_group_end_aligned(read_write);
+};
+libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
+libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
+libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
/**
* struct idpf_rxq_set
* @rxq: RX queue
- * @refillq0: Pointer to refill queue 0
- * @refillq1: Pointer to refill queue 1
+ * @refillq: pointers to refill queues
*
* Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
* Each rxq needs a refillq to return used buffers back to the respective bufq.
* Bufqs then clean these refillqs for buffers to give to hardware.
*/
struct idpf_rxq_set {
- struct idpf_queue rxq;
- struct idpf_sw_queue *refillq0;
- struct idpf_sw_queue *refillq1;
+ struct idpf_rx_queue rxq;
+ struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
};
/**
@@ -803,7 +911,7 @@ struct idpf_rxq_set {
* managed by at most two bufqs (depending on performance configuration).
*/
struct idpf_bufq_set {
- struct idpf_queue bufq;
+ struct idpf_buf_queue bufq;
int num_refillqs;
struct idpf_sw_queue *refillqs;
};
@@ -829,7 +937,7 @@ struct idpf_rxq_group {
union {
struct {
u16 num_rxq;
- struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
+ struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
} singleq;
struct {
u16 num_rxq_sets;
@@ -857,13 +965,25 @@ struct idpf_txq_group {
struct idpf_vport *vport;
u16 num_txq;
- struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
+ struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
- struct idpf_queue *complq;
+ struct idpf_compl_queue *complq;
- u32 num_completions_pending;
+ aligned_u64 num_completions_pending;
};
+static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
+{
+ u32 cpu;
+
+ if (!q_vector)
+ return NUMA_NO_NODE;
+
+ cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
+
+ return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
+}
+
/**
* idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
* @size: transmit request size in bytes
@@ -920,57 +1040,33 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
}
/**
- * idpf_alloc_page - Allocate a new RX buffer from the page pool
- * @pool: page_pool to allocate from
- * @buf: metadata struct to populate with page info
- * @buf_size: 2K or 4K
- *
- * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
+ * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
+ * @q_vector: pointer to queue vector struct
*/
-static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
- struct idpf_rx_buf *buf,
- unsigned int buf_size)
+static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
{
- if (buf_size == IDPF_RX_BUF_2048)
- buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
- buf_size);
- else
- buf->page = page_pool_dev_alloc_pages(pool);
+ struct idpf_intr_reg *reg;
- if (!buf->page)
- return DMA_MAPPING_ERROR;
+ if (q_vector->wb_on_itr)
+ return;
- buf->truesize = buf_size;
+ q_vector->wb_on_itr = true;
+ reg = &q_vector->intr_reg;
- return page_pool_get_dma_addr(buf->page) + buf->page_offset +
- pool->p.offset;
+ writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
+ (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
+ reg->dyn_ctl);
}
/**
- * idpf_rx_put_page - Return RX buffer page to pool
- * @rx_buf: RX buffer metadata struct
+ * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
+ * @refillq: pointer to refillq containing buf_ids
*/
-static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
+static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
{
- page_pool_put_page(rx_buf->page->pp, rx_buf->page,
- rx_buf->truesize, true);
- rx_buf->page = NULL;
-}
-
-/**
- * idpf_rx_sync_for_cpu - Synchronize DMA buffer
- * @rx_buf: RX buffer metadata struct
- * @len: frame length from descriptor
- */
-static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
-{
- struct page *page = rx_buf->page;
- struct page_pool *pp = page->pp;
-
- dma_sync_single_range_for_cpu(pp->p.dev,
- page_pool_get_dma_addr(page),
- rx_buf->page_offset + pp->p.offset, len,
- page_pool_get_dma_dir(pp));
+ return (refillq->next_to_use > refillq->next_to_clean ?
+ 0 : refillq->desc_count) +
+ refillq->next_to_use - refillq->next_to_clean - 1;
}
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
@@ -988,36 +1084,35 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
-enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
+void idpf_vport_intr_ena(struct idpf_vport *vport);
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
-void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size);
-struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
- struct idpf_rx_buf *rx_buf,
- unsigned int size);
-bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
-void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
-void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ u32 q_num);
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ u32 q_num);
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
+
+void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
-netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
-void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 ring_idx);
-unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
- struct sk_buff *skb);
-bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count);
-int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
+netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb, u32 *buf_count);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
- struct net_device *netdev);
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
- struct net_device *netdev);
-bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_tx_queue *tx_q);
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
+bool idpf_rx_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq);
+
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 629cb5cb7c9f..4cc58c83688c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -9,10 +9,13 @@
/**
* idpf_vf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
* @cq: pointer to the array of create control queues
*/
-static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq)
{
+ resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
int i;
for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
@@ -21,22 +24,22 @@ static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
switch (ccq->type) {
case IDPF_CTLQ_TYPE_MAILBOX_TX:
/* set head and tail registers in our local struct */
- ccq->reg.head = VF_ATQH;
- ccq->reg.tail = VF_ATQT;
- ccq->reg.len = VF_ATQLEN;
- ccq->reg.bah = VF_ATQBAH;
- ccq->reg.bal = VF_ATQBAL;
+ ccq->reg.head = VF_ATQH - mbx_start;
+ ccq->reg.tail = VF_ATQT - mbx_start;
+ ccq->reg.len = VF_ATQLEN - mbx_start;
+ ccq->reg.bah = VF_ATQBAH - mbx_start;
+ ccq->reg.bal = VF_ATQBAL - mbx_start;
ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M;
ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
ccq->reg.head_mask = VF_ATQH_ATQH_M;
break;
case IDPF_CTLQ_TYPE_MAILBOX_RX:
/* set head and tail registers in our local struct */
- ccq->reg.head = VF_ARQH;
- ccq->reg.tail = VF_ARQT;
- ccq->reg.len = VF_ARQLEN;
- ccq->reg.bah = VF_ARQBAH;
- ccq->reg.bal = VF_ARQBAL;
+ ccq->reg.head = VF_ARQH - mbx_start;
+ ccq->reg.tail = VF_ARQT - mbx_start;
+ ccq->reg.len = VF_ARQLEN - mbx_start;
+ ccq->reg.bah = VF_ARQBAH - mbx_start;
+ ccq->reg.bal = VF_ARQBAL - mbx_start;
ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M;
ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
ccq->reg.head_mask = VF_ARQH_ARQH_M;
@@ -73,7 +76,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
int num_vecs = vport->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
- u32 rx_itr, tx_itr;
+ u32 rx_itr, tx_itr, val;
u16 total_vecs;
total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -97,7 +100,13 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl = idpf_get_reg_addr(adapter,
reg_vals[vec_id].dyn_ctl_reg);
intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
+ intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
+ intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S;
+ intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
+ intr->dyn_ctl_swint_trig_m = VF_INT_DYN_CTLN_SWINT_TRIG_M;
+ intr->dyn_ctl_sw_itridx_ena_m =
+ VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_VF_ITR_IDX_SPACING);
@@ -111,6 +120,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
}
+ /* Data vector for NOIRQ queues */
+
+ val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+ val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
+ FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+ vport->noirq_dyn_ctl_ena = val;
+
free_reg_vals:
kfree(reg_vals);
@@ -123,7 +141,7 @@ free_reg_vals:
*/
static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)
{
- adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT);
+ adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, VFGEN_RSTAT);
adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M;
}
@@ -142,6 +160,17 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
}
/**
+ * idpf_idc_vf_register - register for IDC callbacks
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_vf_register(struct idpf_adapter *adapter)
+{
+ return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_VF);
+}
+
+/**
* idpf_vf_reg_ops_init - Initialize register API function pointers
* @adapter: Driver specific private structure
*/
@@ -161,4 +190,11 @@ static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter)
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter)
{
idpf_vf_reg_ops_init(adapter);
+
+ adapter->dev_ops.idc_init = idpf_idc_vf_register;
+
+ resource_set_range(&adapter->dev_ops.static_reg_info[0],
+ VF_BASE, IDPF_VF_MBX_REGION_SZ);
+ resource_set_range(&adapter->dev_ops.static_reg_info[1],
+ VFGEN_RSTAT, IDPF_VF_RSTAT_REGION_SZ);
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index a5f9b7a5effe..44cd4b466c48 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1,90 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <linux/export.h>
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
-
-#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
-#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
-#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
-#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
-#define IDPF_VC_XN_RING_LEN U8_MAX
-
-/**
- * enum idpf_vc_xn_state - Virtchnl transaction status
- * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
- * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
- * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
- * buffer updated
- * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
- * was an error, buffer not updated
- * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
- * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
- * return context; a callback may be provided to handle
- * return
- */
-enum idpf_vc_xn_state {
- IDPF_VC_XN_IDLE = 1,
- IDPF_VC_XN_WAITING,
- IDPF_VC_XN_COMPLETED_SUCCESS,
- IDPF_VC_XN_COMPLETED_FAILED,
- IDPF_VC_XN_SHUTDOWN,
- IDPF_VC_XN_ASYNC,
-};
-
-struct idpf_vc_xn;
-/* Callback for asynchronous messages */
-typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
- const struct idpf_ctlq_msg *);
-
-/**
- * struct idpf_vc_xn - Data structure representing virtchnl transactions
- * @completed: virtchnl event loop uses that to signal when a reply is
- * available, uses kernel completion API
- * @state: virtchnl event loop stores the data below, protected by the
- * completion's lock.
- * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
- * truncated on its way to the receiver thread according to
- * reply_buf.iov_len.
- * @reply: Reference to the buffer(s) where the reply data should be written
- * to. May be 0-length (then NULL address permitted) if the reply data
- * should be ignored.
- * @async_handler: if sent asynchronously, a callback can be provided to handle
- * the reply when it's received
- * @vc_op: corresponding opcode sent with this transaction
- * @idx: index used as retrieval on reply receive, used for cookie
- * @salt: changed every message to make unique, used for cookie
- */
-struct idpf_vc_xn {
- struct completion completed;
- enum idpf_vc_xn_state state;
- size_t reply_sz;
- struct kvec reply;
- async_vc_cb async_handler;
- u32 vc_op;
- u8 idx;
- u8 salt;
-};
-
-/**
- * struct idpf_vc_xn_params - Parameters for executing transaction
- * @send_buf: kvec for send buffer
- * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
- * @timeout_ms: timeout to wait for reply
- * @async: send message asynchronously, will not wait on completion
- * @async_handler: If sent asynchronously, optional callback handler. The user
- * must be careful when using async handlers as the memory for
- * the recv_buf _cannot_ be on stack if this is async.
- * @vc_op: virtchnl op to send
- */
-struct idpf_vc_xn_params {
- struct kvec send_buf;
- struct kvec recv_buf;
- int timeout_ms;
- bool async;
- async_vc_cb async_handler;
- u32 vc_op;
-};
+#include "idpf_ptp.h"
/**
* struct idpf_vc_xn_manager - Manager for tracking transactions
@@ -139,14 +61,14 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,
}
np = netdev_priv(vport->netdev);
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+ np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
if (vport->link_up == v2e->link_status)
return;
vport->link_up = v2e->link_status;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return;
if (vport->link_up) {
@@ -233,6 +155,55 @@ err_kfree:
return err;
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * idpf_ptp_is_mb_msg - Check if the message is PTP-related
+ * @op: virtchnl opcode
+ *
+ * Return: true if msg is PTP-related, false otherwise.
+ */
+static bool idpf_ptp_is_mb_msg(u32 op)
+{
+ switch (op) {
+ case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
+ case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_prepare_ptp_mb_msg - Prepare PTP related message
+ *
+ * @adapter: Driver specific private structure
+ * @op: virtchnl opcode
+ * @ctlq_msg: Corresponding control queue message
+ */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ /* If the message is PTP-related and the secondary mailbox is available,
+ * send the message through the secondary mailbox.
+ */
+ if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
+ return;
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
+ ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
+ ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
+}
+#else /* !CONFIG_PTP_1588_CLOCK */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_send_mb_msg - Send message over mailbox
* @adapter: Driver specific private structure
@@ -276,6 +247,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
ctlq_msg->func_id = 0;
+
+ idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
+
ctlq_msg->data_len = msg_size;
ctlq_msg->cookie.mbx.chnl_opcode = op;
ctlq_msg->cookie.mbx.chnl_retval = 0;
@@ -374,7 +348,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
* All waiting threads will be woken-up and their transaction aborted. Further
* operations on that object will fail.
*/
-static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
int i;
@@ -447,8 +421,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
* >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
* error.
*/
-static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
- const struct idpf_vc_xn_params *params)
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
{
const struct kvec *send_buf = &params->send_buf;
struct idpf_vc_xn *xn;
@@ -515,8 +489,10 @@ static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
retval = -ENXIO;
goto only_unlock;
case IDPF_VC_XN_WAITING:
- dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
- params->vc_op, params->timeout_ms);
+ dev_notice_ratelimited(&adapter->pdev->dev,
+ "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
+ params->vc_op, cookie, xn->vc_op,
+ xn->salt, params->timeout_ms);
retval = -ETIME;
break;
case IDPF_VC_XN_COMPLETED_SUCCESS:
@@ -610,14 +586,16 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
return -EINVAL;
}
xn = &adapter->vcxn_mngr->ring[xn_idx];
+ idpf_vc_xn_lock(xn);
salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
if (xn->salt != salt) {
- dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
- xn->salt, salt);
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
+ xn->vc_op, xn->salt, xn->state,
+ ctlq_msg->cookie.mbx.chnl_opcode, salt);
+ idpf_vc_xn_unlock(xn);
return -EINVAL;
}
- idpf_vc_xn_lock(xn);
switch (xn->state) {
case IDPF_VC_XN_WAITING:
/* success */
@@ -664,7 +642,7 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
if (ctlq_msg->data_len) {
payload = ctlq_msg->ctx.indirect.payload->va;
- payload_size = ctlq_msg->ctx.indirect.payload->size;
+ payload_size = ctlq_msg->data_len;
}
xn->reply_sz = payload_size;
@@ -724,9 +702,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
/* If post failed clear the only buffer we supplied */
if (post_err) {
if (dma_mem)
- dmam_free_coherent(&adapter->pdev->dev,
- dma_mem->size, dma_mem->va,
- dma_mem->pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
break;
}
@@ -738,34 +716,145 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
return err;
}
+struct idpf_chunked_msg_params {
+ u32 (*prepare_msg)(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num);
+
+ const void *chunks;
+ u32 num_chunks;
+
+ u32 chunk_sz;
+ u32 config_sz;
+
+ u32 vc_op;
+};
+
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
+{
+ struct idpf_queue_set *qp;
+
+ qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ qp->vport = vport;
+ qp->num = num;
+
+ return qp;
+}
+
/**
- * idpf_wait_for_marker_event - wait for software marker response
+ * idpf_send_chunked_msg - send VC message consisting of chunks
* @vport: virtual port data structure
+ * @params: message params
*
- * Returns 0 success, negative on failure.
- **/
-static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+ * Helper function for preparing a message describing queues to be enabled
+ * or disabled.
+ *
+ * Return: the total size of the prepared message.
+ */
+static int idpf_send_chunked_msg(struct idpf_vport *vport,
+ const struct idpf_chunked_msg_params *params)
{
- int event;
- int i;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = params->vc_op,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ const void *pos = params->chunks;
+ u32 num_chunks, num_msgs, buf_sz;
+ void *buf __free(kfree) = NULL;
+ u32 totqs = params->num_chunks;
- for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags);
+ num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
+ params->chunk_sz), totqs);
+ num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+
+ buf_sz = params->config_sz + num_chunks * params->chunk_sz;
+ buf = kzalloc(buf_sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- event = wait_event_timeout(vport->sw_marker_wq,
- test_and_clear_bit(IDPF_VPORT_SW_MARKER,
- vport->flags),
- msecs_to_jiffies(500));
+ xn_params.send_buf.iov_base = buf;
- for (i = 0; i < vport->num_txq; i++)
- clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ for (u32 i = 0; i < num_msgs; i++) {
+ ssize_t reply_sz;
- if (event)
- return 0;
+ memset(buf, 0, buf_sz);
+ xn_params.send_buf.iov_len = buf_sz;
+
+ if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz)
+ return -EINVAL;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+
+ pos += num_chunks * params->chunk_sz;
+ totqs -= num_chunks;
+
+ num_chunks = min(num_chunks, totqs);
+ buf_sz = params->config_sz + num_chunks * params->chunk_sz;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_wait_for_marker_event_set - wait for software marker response for
+ * selected Tx queues
+ * @qs: set of the Tx queues
+ *
+ * Return: 0 success, -errno on failure.
+ */
+static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
+{
+ struct idpf_tx_queue *txq;
+ bool markers_rcvd = true;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ switch (qs->qs[i].type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ txq = qs->qs[i].txq;
+
+ idpf_queue_set(SW_MARKER, txq);
+ idpf_wait_for_sw_marker_completion(txq);
+ markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!markers_rcvd) {
+ netdev_warn(qs->vport->netdev,
+ "Failed to receive marker packets\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_wait_for_marker_event - wait for software marker response
+ * @vport: virtual port data structure
+ *
+ * Return: 0 success, negative on failure.
+ **/
+static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+{
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+
+ qs = idpf_alloc_queue_set(vport, vport->num_txq);
+ if (!qs)
+ return -ENOMEM;
- dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
+ for (u32 i = 0; i < qs->num; i++) {
+ qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[i].txq = vport->txqs[i];
+ }
- return -ETIMEDOUT;
+ return idpf_wait_for_marker_event_set(qs);
}
/**
@@ -872,14 +961,14 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
caps.rss_caps =
- cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP |
- VIRTCHNL2_CAP_RSS_IPV4_UDP |
- VIRTCHNL2_CAP_RSS_IPV4_SCTP |
- VIRTCHNL2_CAP_RSS_IPV4_OTHER |
- VIRTCHNL2_CAP_RSS_IPV6_TCP |
- VIRTCHNL2_CAP_RSS_IPV6_UDP |
- VIRTCHNL2_CAP_RSS_IPV6_SCTP |
- VIRTCHNL2_CAP_RSS_IPV6_OTHER);
+ cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP |
+ VIRTCHNL2_FLOW_IPV4_UDP |
+ VIRTCHNL2_FLOW_IPV4_SCTP |
+ VIRTCHNL2_FLOW_IPV4_OTHER |
+ VIRTCHNL2_FLOW_IPV6_TCP |
+ VIRTCHNL2_FLOW_IPV6_UDP |
+ VIRTCHNL2_FLOW_IPV6_SCTP |
+ VIRTCHNL2_FLOW_IPV6_OTHER);
caps.hsplit_caps =
cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |
@@ -891,10 +980,13 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
caps.other_caps =
cpu_to_le64(VIRTCHNL2_CAP_SRIOV |
+ VIRTCHNL2_CAP_RDMA |
+ VIRTCHNL2_CAP_LAN_MEMORY_REGIONS |
VIRTCHNL2_CAP_MACFILTER |
VIRTCHNL2_CAP_SPLITQ_QSCHED |
VIRTCHNL2_CAP_PROMISC |
- VIRTCHNL2_CAP_LOOPBACK);
+ VIRTCHNL2_CAP_LOOPBACK |
+ VIRTCHNL2_CAP_PTP);
xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
xn_params.send_buf.iov_base = &caps;
@@ -913,6 +1005,163 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
}
/**
+ * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
+ * @adapter: Driver specific private struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int num_regions, size;
+ struct idpf_hw *hw;
+ ssize_t reply_sz;
+ int err = 0;
+
+ rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_regions)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = rcvd_regions;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+
+ num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
+ size = struct_size(rcvd_regions, mem_reg, num_regions);
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ hw = &adapter->hw;
+ hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
+ if (!hw->lan_regs)
+ return -ENOMEM;
+
+ for (int i = 0; i < num_regions; i++) {
+ hw->lan_regs[i].addr_len =
+ le64_to_cpu(rcvd_regions->mem_reg[i].size);
+ hw->lan_regs[i].addr_start =
+ le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
+ }
+ hw->num_lan_regs = num_regions;
+
+ return err;
+}
+
+/**
+ * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
+ * @adapter: Driver specific private structure
+ *
+ * Called when idpf_send_get_lan_memory_regions is not supported. This will
+ * calculate the offsets and sizes for the regions before, in between, and
+ * after the mailbox and rstat MMIO mappings.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
+{
+ struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
+ struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
+ struct idpf_hw *hw = &adapter->hw;
+
+ hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
+ hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
+ GFP_KERNEL);
+ if (!hw->lan_regs)
+ return -ENOMEM;
+
+ /* Region preceding mailbox */
+ hw->lan_regs[0].addr_start = 0;
+ hw->lan_regs[0].addr_len = mbx_reg->start;
+ /* Region between mailbox and rstat */
+ hw->lan_regs[1].addr_start = mbx_reg->end + 1;
+ hw->lan_regs[1].addr_len = rstat_reg->start -
+ hw->lan_regs[1].addr_start;
+ /* Region after rstat */
+ hw->lan_regs[2].addr_start = rstat_reg->end + 1;
+ hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
+ hw->lan_regs[2].addr_start;
+
+ return 0;
+}
+
+/**
+ * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct idpf_hw *hw = &adapter->hw;
+ resource_size_t res_start;
+
+ res_start = pci_resource_start(pdev, 0);
+
+ for (int i = 0; i < hw->num_lan_regs; i++) {
+ resource_size_t start;
+ long len;
+
+ len = hw->lan_regs[i].addr_len;
+ if (!len)
+ continue;
+ start = hw->lan_regs[i].addr_start + res_start;
+
+ hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
+ if (!hw->lan_regs[i].vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 region\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
+ * @adapter: adapter info struct
+ * @rule: Flow steering rule to add/delete
+ * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
+ * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
+ *
+ * Send ADD/DELETE flow steering virtchnl message and receive the result.
+ *
+ * Return: 0 on success, negative on failure.
+ */
+int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
+ struct virtchnl2_flow_rule_add_del *rule,
+ enum virtchnl2_op opcode)
+{
+ int rule_count = le32_to_cpu(rule->count);
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
+ if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
+ opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
+ return -EINVAL;
+
+ xn_params.vc_op = opcode;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = false;
+ xn_params.send_buf.iov_base = rule;
+ xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
+ xn_params.recv_buf.iov_base = rule;
+ xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ return reply_sz < 0 ? reply_sz : 0;
+}
+
+/**
* idpf_vport_alloc_max_qs - Allocate max queues for a vport
* @adapter: Driver specific private structure
* @max_q: vport max queue structure
@@ -923,21 +1172,35 @@ int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
struct virtchnl2_get_capabilities *caps = &adapter->caps;
u16 default_vports = idpf_get_default_vports(adapter);
- int max_rx_q, max_tx_q;
+ u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q;
mutex_lock(&adapter->queue_lock);
+ /* Caps are device-wide. Give each vport an equal piece */
max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
- if (adapter->num_alloc_vports < default_vports) {
- max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
- max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
- } else {
- max_q->max_rxq = IDPF_MIN_Q;
- max_q->max_txq = IDPF_MIN_Q;
+ max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports;
+ max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports;
+
+ if (adapter->num_alloc_vports >= default_vports) {
+ max_rx_q = IDPF_MIN_Q;
+ max_tx_q = IDPF_MIN_Q;
}
- max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
- max_q->max_complq = max_q->max_txq;
+
+ /*
+ * Harmonize the numbers. The current implementation always creates
+ * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and
+ * one completion queue for each Tx queue for best performance.
+ * If less buffer or completion queues is available, cap the number
+ * of the corresponding Rx/Tx queues.
+ */
+ max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP);
+ max_tx_q = min(max_tx_q, max_compl_q);
+
+ max_q->max_rxq = max_rx_q;
+ max_q->max_txq = max_tx_q;
+ max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ max_q->max_complq = max_tx_q;
if (avail_queues->avail_rxq < max_q->max_rxq ||
avail_queues->avail_txq < max_q->max_txq ||
@@ -1092,7 +1355,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -1111,6 +1373,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1123,6 +1387,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1253,12 +1519,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
vport_msg->vport_index = cpu_to_le16(idx);
- if (adapter->req_tx_splitq)
+ if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
- if (adapter->req_rx_splitq)
+ if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
@@ -1290,10 +1556,6 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
err = reply_sz;
goto free_vport_params;
}
- if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EIO;
- goto free_vport_params;
- }
return 0;
@@ -1320,10 +1582,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport_msg = adapter->vport_params_recvd[vport->idx];
+ if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
+ (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
+ vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
+ pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
+ return -EOPNOTSUPP;
+ }
+
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
@@ -1333,7 +1602,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport->base_rxd = true;
}
- if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ if (!idpf_is_queue_model_split(vport->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1362,7 +1631,7 @@ int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
@@ -1410,223 +1679,368 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport)
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
- * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
+ * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
+ * @vport: virtual port data structure
+ * @q: Tx queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_tx_queue *q,
+ struct virtchnl2_txq_info *qi)
+{
+ u32 val;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->txq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
+
+ if (!idpf_is_queue_model_split(vport->txq_model)) {
+ qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
+ return;
+ }
+
+ if (idpf_queue_has(XDP, q))
+ val = q->complq->q_id;
+ else
+ val = q->txq_grp->complq->q_id;
+
+ qi->tx_compl_queue_id = cpu_to_le16(val);
+
+ if (idpf_queue_has(FLOW_SCH_EN, q))
+ val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+
+ qi->sched_mode = cpu_to_le16(val);
+}
+
+/**
+ * idpf_fill_complq_config_chunk - fill chunk describing the completion queue
* @vport: virtual port data structure
+ * @q: completion queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_compl_queue *q,
+ struct virtchnl2_txq_info *qi)
+{
+ u32 val;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->txq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+
+ if (idpf_queue_has(FLOW_SCH_EN, q))
+ val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+
+ qi->sched_mode = cpu_to_le16(val);
+}
+
+/**
+ * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the tx queue
+ * @num_chunks: number of chunks in the message
*
- * Send config tx queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Helper function for preparing the message describing configuration of
+ * Tx queues.
+ *
+ * Return: the total size of the prepared message.
*/
-static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_config_tx_queues *ctq = buf;
+
+ ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
+
+ return struct_size(ctq, qinfo, num_chunks);
+}
+
+/**
+ * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues
+ * message for selected queues
+ * @qs: set of the Tx queues to configure
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain Tx queues (or completion queues) only.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
{
- struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- int totqs, num_msgs, num_chunks;
- ssize_t reply_sz;
- int i, k = 0;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES,
+ .prepare_msg = idpf_prepare_cfg_txqs_msg,
+ .config_sz = sizeof(struct virtchnl2_config_tx_queues),
+ .chunk_sz = sizeof(*qi),
+ };
- totqs = vport->num_txq + vport->num_complq;
- qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
+ qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
- /* Populate the queue info buffer with all queue context info */
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j, sched_mode;
-
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qi[k].queue_id =
- cpu_to_le32(tx_qgrp->txqs[j]->q_id);
- qi[k].model =
- cpu_to_le16(vport->txq_model);
- qi[k].type =
- cpu_to_le32(tx_qgrp->txqs[j]->q_type);
- qi[k].ring_len =
- cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
- qi[k].dma_ring_addr =
- cpu_to_le64(tx_qgrp->txqs[j]->dma);
- if (idpf_is_queue_model_split(vport->txq_model)) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
-
- qi[k].tx_compl_queue_id =
- cpu_to_le16(tx_qgrp->complq->q_id);
- qi[k].relative_queue_id = cpu_to_le16(j);
-
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags))
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
- else
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
- } else {
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
- }
- }
+ params.chunks = qi;
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
+ for (u32 i = 0; i < qs->num; i++) {
+ if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
+ idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq,
+ &qi[params.num_chunks++]);
+ else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
+ idpf_fill_complq_config_chunk(qs->vport,
+ qs->qs[i].complq,
+ &qi[params.num_chunks++]);
+ }
- qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
- qi[k].model = cpu_to_le16(vport->txq_model);
- qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
- qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+ return idpf_send_chunked_msg(qs->vport, &params);
+}
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
- sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
- else
- sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
- qi[k].sched_mode = cpu_to_le16(sched_mode);
+/**
+ * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
+ * @vport: virtual port data structure
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+{
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 totqs = vport->num_txq + vport->num_complq;
+ u32 k = 0;
+
+ qs = idpf_alloc_queue_set(vport, totqs);
+ if (!qs)
+ return -ENOMEM;
+
+ /* Populate the queue info buffer with all queue context info */
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
+ }
- k++;
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[k++].complq = tx_qgrp->complq;
+ }
}
/* Make sure accounting agrees */
if (k != totqs)
return -EINVAL;
- /* Chunk up the queue contexts into multiple messages to avoid
- * sending a control queue message buffer that is too large
- */
- config_sz = sizeof(struct virtchnl2_config_tx_queues);
- chunk_sz = sizeof(struct virtchnl2_txq_info);
+ return idpf_send_config_tx_queue_set_msg(qs);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- totqs);
- num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+/**
+ * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
+ * @vport: virtual port data structure
+ * @q: Rx queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
+ struct idpf_rx_queue *q,
+ struct virtchnl2_rxq_info *qi)
+{
+ const struct idpf_bufq_set *sets;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->rxq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
+ qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
+ qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
+ qi->desc_ids = cpu_to_le64(q->rxdids);
- buf_sz = struct_size(ctq, qinfo, num_chunks);
- ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq)
- return -ENOMEM;
+ return;
+ }
- xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ sets = q->bufq_sets;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(ctq, 0, buf_sz);
- ctq->vport_id = cpu_to_le32(vport->vport_id);
- ctq->num_qinfo = cpu_to_le16(num_chunks);
- memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
+ /*
+ * In splitq mode, RxQ buffer size should be set to that of the first
+ * buffer queue associated with this RxQ.
+ */
+ q->rx_buf_size = sets[0].bufq.rx_buf_size;
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
- xn_params.send_buf.iov_base = ctq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
+ if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ qi->bufq2_ena = IDPF_BUFQ2_ENA;
+ qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
+ }
- k += num_chunks;
- totqs -= num_chunks;
- num_chunks = min(num_chunks, totqs);
- /* Recalculate buffer size */
- buf_sz = struct_size(ctq, qinfo, num_chunks);
+ q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
+
+ if (idpf_queue_has(HSPLIT_EN, q)) {
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
}
- return 0;
+ qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
}
/**
- * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
+ * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
* @vport: virtual port data structure
+ * @q: buffer queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_buf_queue *q,
+ struct virtchnl2_rxq_info *qi)
+{
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->rxq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
+ qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
+ qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+ qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+ if (idpf_queue_has(HSPLIT_EN, q)) {
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
+ }
+}
+
+/**
+ * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the rx queue
+ * @num_chunks: number of chunks in the message
*
- * Send config rx queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Helper function for preparing the message describing configuration of
+ * Rx queues.
+ *
+ * Return: the total size of the prepared message.
*/
-static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_config_rx_queues *crq = buf;
+
+ crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
+
+ return struct_size(crq, qinfo, num_chunks);
+}
+
+/**
+ * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message
+ * for selected queues.
+ * @qs: set of the Rx queues to configure
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain Rx queues (or buffer queues) only.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
{
- struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- int totqs, num_msgs, num_chunks;
- ssize_t reply_sz;
- int i, k = 0;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES,
+ .prepare_msg = idpf_prepare_cfg_rxqs_msg,
+ .config_sz = sizeof(struct virtchnl2_config_rx_queues),
+ .chunk_sz = sizeof(*qi),
+ };
- totqs = vport->num_rxq + vport->num_bufq;
- qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
+ qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
+ params.chunks = qi;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
+ idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq,
+ &qi[params.num_chunks++]);
+ else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
+ idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq,
+ &qi[params.num_chunks++]);
+ }
+
+ return idpf_send_chunked_msg(qs->vport, &params);
+}
+
+/**
+ * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
+ * @vport: virtual port data structure
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+{
+ bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 totqs = vport->num_rxq + vport->num_bufq;
+ u32 k = 0;
+
+ qs = idpf_alloc_queue_set(vport, totqs);
+ if (!qs)
+ return -ENOMEM;
+
/* Populate the queue info buffer with all queue context info */
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u16 num_rxq;
- int j;
-
- if (!idpf_is_queue_model_split(vport->rxq_model))
- goto setup_rxqs;
-
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *bufq =
- &rx_qgrp->splitq.bufq_sets[j].bufq;
-
- qi[k].queue_id = cpu_to_le32(bufq->q_id);
- qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(bufq->q_type);
- qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
- qi[k].ring_len = cpu_to_le16(bufq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
- qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
- qi[k].buffer_notif_stride = bufq->rx_buf_stride;
- qi[k].rx_buffer_low_watermark =
- cpu_to_le16(bufq->rx_buffer_low_watermark);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
- qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
- }
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
-setup_rxqs:
- if (idpf_is_queue_model_split(vport->rxq_model))
- num_rxq = rx_qgrp->splitq.num_rxq_sets;
- else
+ if (!splitq) {
num_rxq = rx_qgrp->singleq.num_rxq;
+ goto rxq;
+ }
- for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ }
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- rxq = rx_qgrp->singleq.rxqs[j];
- goto common_qi_fields;
- }
- rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- qi[k].rx_bufq1_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
- qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
- qi[k].rx_bufq2_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id);
- }
- qi[k].rx_buffer_low_watermark =
- cpu_to_le16(rxq->rx_buffer_low_watermark);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
- qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
-
-common_qi_fields:
- if (rxq->rx_hsplit_en) {
- qi[k].qflags |=
- cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
- qi[k].hdr_buffer_size =
- cpu_to_le16(rxq->rx_hbuf_size);
- }
- qi[k].queue_id = cpu_to_le32(rxq->q_id);
- qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(rxq->q_type);
- qi[k].ring_len = cpu_to_le16(rxq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
- qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
- qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
- qi[k].qflags |=
- cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
- qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+
+rxq:
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
+
+ if (splitq)
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
}
}
@@ -1634,314 +2048,395 @@ common_qi_fields:
if (k != totqs)
return -EINVAL;
- /* Chunk up the queue contexts into multiple messages to avoid
- * sending a control queue message buffer that is too large
- */
- config_sz = sizeof(struct virtchnl2_config_rx_queues);
- chunk_sz = sizeof(struct virtchnl2_rxq_info);
+ return idpf_send_config_rx_queue_set_msg(qs);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- totqs);
- num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+/**
+ * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
+ * queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the queue
+ * @num_chunks: number of chunks in the message
+ *
+ * Helper function for preparing the message describing queues to be enabled
+ * or disabled.
+ *
+ * Return: the total size of the prepared message.
+ */
+static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_del_ena_dis_queues *eq = buf;
- buf_sz = struct_size(crq, qinfo, num_chunks);
- crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq)
+ eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->chunks.num_chunks = cpu_to_le16(num_chunks);
+ memcpy(eq->chunks.chunks, pos,
+ num_chunks * sizeof(*eq->chunks.chunks));
+
+ return struct_size(eq, chunks.chunks, num_chunks);
+}
+
+/**
+ * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues
+ * message for selected queues
+ * @qs: set of the queues to enable or disable
+ * @en: whether to enable or disable queues
+ *
+ * Send enable or disable queues virtchnl message for queues contained
+ * in the @qs array.
+ * The @qs array can contain pointers to both Rx and Tx queues.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
+ bool en)
+{
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES :
+ VIRTCHNL2_OP_DISABLE_QUEUES,
+ .prepare_msg = idpf_prepare_ena_dis_qs_msg,
+ .config_sz = sizeof(struct virtchnl2_del_ena_dis_queues),
+ .chunk_sz = sizeof(*qc),
+ .num_chunks = qs->num,
+ };
+
+ qc = kcalloc(qs->num, sizeof(*qc), GFP_KERNEL);
+ if (!qc)
return -ENOMEM;
- xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ params.chunks = qc;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(crq, 0, buf_sz);
- crq->vport_id = cpu_to_le32(vport->vport_id);
- crq->num_qinfo = cpu_to_le16(num_chunks);
- memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ u32 qid;
- xn_params.send_buf.iov_base = crq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ qc[i].type = cpu_to_le32(q->type);
+ qc[i].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- k += num_chunks;
- totqs -= num_chunks;
- num_chunks = min(num_chunks, totqs);
- /* Recalculate buffer size */
- buf_sz = struct_size(crq, qinfo, num_chunks);
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ qid = q->rxq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ qid = q->txq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ qid = q->bufq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ qid = q->complq->q_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ qc[i].start_queue_id = cpu_to_le32(qid);
}
- return 0;
+ return idpf_send_chunked_msg(qs->vport, &params);
}
/**
- * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
- * queues message
+ * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
+ * message
* @vport: virtual port data structure
- * @ena: if true enable, false disable
+ * @en: whether to enable or disable queues
*
- * Send enable or disable queues virtchnl message. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, -errno on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
{
- struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
- struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
- u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_vc_xn_params xn_params = {};
- struct virtchnl2_queue_chunks *qcs;
- u32 config_sz, chunk_sz, buf_sz;
- ssize_t reply_sz;
- int i, j, k = 0;
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 num_txq, num_q, k = 0;
+ bool split;
num_txq = vport->num_txq + vport->num_complq;
- num_rxq = vport->num_rxq + vport->num_bufq;
- num_q = num_txq + num_rxq;
- buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
- qc = kzalloc(buf_sz, GFP_KERNEL);
- if (!qc)
+ num_q = num_txq + vport->num_rxq + vport->num_bufq;
+
+ qs = idpf_alloc_queue_set(vport, num_q);
+ if (!qs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ split = idpf_is_queue_model_split(vport->txq_model);
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
- qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- }
- }
- if (vport->num_txq != k)
- return -EINVAL;
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- if (!idpf_is_queue_model_split(vport->txq_model))
- goto setup_rx;
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
+ }
- for (i = 0; i < vport->num_txq_grp; i++, k++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ if (!split)
+ continue;
- qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
- qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[k++].complq = tx_qgrp->complq;
}
- if (vport->num_complq != (k - vport->num_txq))
+
+ if (k != num_txq)
return -EINVAL;
-setup_rx:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ split = idpf_is_queue_model_split(vport->rxq_model);
- if (idpf_is_queue_model_split(vport->rxq_model))
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
+
+ if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, k++) {
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- qc[k].start_queue_id =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
- qc[k].type =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type);
- } else {
- qc[k].start_queue_id =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
- qc[k].type =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type);
- }
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- }
- }
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
- return -EINVAL;
-
- if (!idpf_is_queue_model_split(vport->rxq_model))
- goto send_msg;
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ if (split)
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
+ }
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *q;
+ if (!split)
+ continue;
- q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- qc[k].type = cpu_to_le32(q->q_type);
- qc[k].start_queue_id = cpu_to_le32(q->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
}
- if (vport->num_bufq != k - (vport->num_txq +
- vport->num_complq +
- vport->num_rxq))
+
+ if (k != num_q)
return -EINVAL;
-send_msg:
- /* Chunk up the queue info into multiple messages */
- config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
- chunk_sz = sizeof(struct virtchnl2_queue_chunk);
+ return idpf_send_ena_dis_queue_set_msg(qs, en);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- num_q);
- num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+/**
+ * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
+ * queue set to the interrupt vector
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the vector mapping
+ * @num_chunks: number of chunks in the message
+ *
+ * Helper function for preparing the message describing mapping queues to
+ * q_vectors.
+ *
+ * Return: the total size of the prepared message.
+ */
+static u32
+idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_queue_vector_maps *vqvm = buf;
- buf_sz = struct_size(eq, chunks.chunks, num_chunks);
- eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq)
+ vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->num_qv_maps = cpu_to_le16(num_chunks);
+ memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
+
+ return struct_size(vqvm, qv_maps, num_chunks);
+}
+
+/**
+ * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap
+ * queue set vector message
+ * @qs: set of the queues to map or unmap
+ * @map: true for map and false for unmap
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int
+idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
+ bool map)
+{
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
+ .prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg,
+ .config_sz = sizeof(struct virtchnl2_queue_vector_maps),
+ .chunk_sz = sizeof(*vqv),
+ .num_chunks = qs->num,
+ };
+ bool split;
+
+ vqv = kcalloc(qs->num, sizeof(*vqv), GFP_KERNEL);
+ if (!vqv)
return -ENOMEM;
- if (ena) {
- xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- } else {
- xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
- }
+ params.chunks = vqv;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(eq, 0, buf_sz);
- eq->vport_id = cpu_to_le32(vport->vport_id);
- eq->chunks.num_chunks = cpu_to_le16(num_chunks);
- qcs = &eq->chunks;
- memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
+ split = idpf_is_queue_model_split(qs->vport->txq_model);
- xn_params.send_buf.iov_base = eq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ const struct idpf_q_vector *vec;
+ u32 qid, v_idx, itr_idx;
+
+ vqv[i].queue_type = cpu_to_le32(q->type);
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ qid = q->rxq->q_id;
+
+ if (idpf_queue_has(NOIRQ, q->rxq))
+ vec = NULL;
+ else
+ vec = q->rxq->q_vector;
+
+ if (vec) {
+ v_idx = vec->v_idx;
+ itr_idx = vec->rx_itr_idx;
+ } else {
+ v_idx = qs->vport->noirq_v_idx;
+ itr_idx = VIRTCHNL2_ITR_IDX_0;
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ qid = q->txq->q_id;
+
+ if (idpf_queue_has(NOIRQ, q->txq))
+ vec = NULL;
+ else if (idpf_queue_has(XDP, q->txq))
+ vec = q->txq->complq->q_vector;
+ else if (split)
+ vec = q->txq->txq_grp->complq->q_vector;
+ else
+ vec = q->txq->q_vector;
- k += num_chunks;
- num_q -= num_chunks;
- num_chunks = min(num_chunks, num_q);
- /* Recalculate buffer size */
- buf_sz = struct_size(eq, chunks.chunks, num_chunks);
+ if (vec) {
+ v_idx = vec->v_idx;
+ itr_idx = vec->tx_itr_idx;
+ } else {
+ v_idx = qs->vport->noirq_v_idx;
+ itr_idx = VIRTCHNL2_ITR_IDX_1;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vqv[i].queue_id = cpu_to_le32(qid);
+ vqv[i].vector_id = cpu_to_le16(v_idx);
+ vqv[i].itr_idx = cpu_to_le32(itr_idx);
}
- return 0;
+ return idpf_send_chunked_msg(qs->vport, &params);
}
/**
- * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
- * vector message
+ * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
+ * vector message
* @vport: virtual port data structure
* @map: true for map and false for unmap
*
- * Send map or unmap queue vector virtchnl message. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, -errno on failure.
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
- struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- u32 num_msgs, num_chunks, num_q;
- ssize_t reply_sz;
- int i, j, k = 0;
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 num_q = vport->num_txq + vport->num_rxq;
+ u32 k = 0;
- num_q = vport->num_txq + vport->num_rxq;
-
- buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
- vqv = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqv)
+ qs = idpf_alloc_queue_set(vport, num_q);
+ if (!qs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
- vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
-
- if (idpf_is_queue_model_split(vport->txq_model)) {
- vqv[k].vector_id =
- cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
- vqv[k].itr_idx =
- cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
- } else {
- vqv[k].vector_id =
- cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
- vqv[k].itr_idx =
- cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
- }
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
}
}
- if (vport->num_txq != k)
+ if (k != vport->num_txq)
return -EINVAL;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u16 num_rxq;
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
if (idpf_is_queue_model_split(vport->rxq_model))
- rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
- rxq = rx_qgrp->singleq.rxqs[j];
-
- vqv[k].queue_type = cpu_to_le32(rxq->q_type);
- vqv[k].queue_id = cpu_to_le32(rxq->q_id);
- vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
- vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
}
}
- if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq)
- return -EINVAL;
- } else {
- if (vport->num_rxq != k - vport->num_txq)
- return -EINVAL;
- }
+ if (k != num_q)
+ return -EINVAL;
- /* Chunk up the vector info into multiple messages */
- config_sz = sizeof(struct virtchnl2_queue_vector_maps);
- chunk_sz = sizeof(struct virtchnl2_queue_vector);
+ return idpf_send_map_unmap_queue_set_vector_msg(qs, map);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- num_q);
- num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+/**
+ * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Send enable queues virtchnl message for queues contained in the @qs array.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ return idpf_send_ena_dis_queue_set_msg(qs, true);
+}
- buf_sz = struct_size(vqvm, qv_maps, num_chunks);
- vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm)
- return -ENOMEM;
+/**
+ * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ int err;
- if (map) {
- xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- } else {
- xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
- }
+ err = idpf_send_ena_dis_queue_set_msg(qs, false);
+ if (err)
+ return err;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(vqvm, 0, buf_sz);
- xn_params.send_buf.iov_base = vqvm;
- xn_params.send_buf.iov_len = buf_sz;
- vqvm->vport_id = cpu_to_le32(vport->vport_id);
- vqvm->num_qv_maps = cpu_to_le16(num_chunks);
- memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
+ return idpf_wait_for_marker_event_set(qs);
+}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+/**
+ * idpf_send_config_queue_set_msg - send virtchnl config queues message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain both Rx or Tx queues.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ int err;
- k += num_chunks;
- num_q -= num_chunks;
- num_chunks = min(num_chunks, num_q);
- /* Recalculate buffer size */
- buf_sz = struct_size(vqvm, qv_maps, num_chunks);
- }
+ err = idpf_send_config_tx_queue_set_msg(qs);
+ if (err)
+ return err;
- return 0;
+ return idpf_send_config_rx_queue_set_msg(qs);
}
/**
@@ -1965,24 +2460,12 @@ int idpf_send_enable_queues_msg(struct idpf_vport *vport)
*/
int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
- int err, i;
+ int err;
err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
- /* switch to poll mode as interrupts will be disabled after disable
- * queues virtchnl message is sent
- */
- for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
-
- /* schedule the napi to receive all the marker packets */
- local_bh_disable();
- for (i = 0; i < vport->num_q_vectors; i++)
- napi_schedule(&vport->q_vectors[i].napi);
- local_bh_enable();
-
return idpf_wait_for_marker_event(vport);
}
@@ -2047,7 +2530,7 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
num_chunks);
xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = eq;
xn_params.send_buf.iov_len = buf_size;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
@@ -2211,7 +2694,7 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
xn_params.send_buf.iov_base = vcs;
xn_params.send_buf.iov_len = buf_size;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -2272,7 +2755,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
/* Don't send get_stats message if the link is down */
- if (np->state <= __IDPF_VPORT_DOWN)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return 0;
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
@@ -2469,39 +2952,52 @@ do_memcpy:
* @frag: fragmentation allowed
*
*/
-static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
+static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
struct idpf_ptype_state *pstate,
bool ipv4, bool frag)
{
if (!pstate->outer_ip || !pstate->outer_frag) {
- ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
pstate->outer_ip = true;
if (ipv4)
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
else
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
if (frag) {
- ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->outer_frag = LIBETH_RX_PT_FRAG;
pstate->outer_frag = true;
}
} else {
- ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
+ ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
if (ipv4)
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV4;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
else
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV6;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
if (frag)
- ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
}
}
+static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
+{
+ if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->inner_prot)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
+ else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->outer_ip)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
+ else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ else
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
+
+ libeth_rx_pt_gen_hash_type(ptype);
+}
+
/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
* @vport: virtual port data structure
@@ -2512,7 +3008,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
+ struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vc_xn_params xn_params = {};
@@ -2520,12 +3016,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ssize_t reply_sz;
int i, j, k;
+ if (vport->rx_ptype_lkup)
+ return 0;
+
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
else
max_ptype = IDPF_RX_MAX_BASE_PTYPE;
- memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
+ if (!ptype_lkup)
+ return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
if (!get_ptype_info)
@@ -2556,9 +3057,6 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (reply_sz < 0)
return reply_sz;
- if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
- return -EIO;
-
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
if (ptypes_recvd > max_ptype)
return -EINVAL;
@@ -2583,16 +3081,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID)
- return 0;
+ goto out;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
else
k = ptype->ptype_id_8;
- if (ptype->proto_id_count)
- ptype_lkup[k].known = 1;
-
for (j = 0; j < ptype->proto_id_count; j++) {
id = le16_to_cpu(ptype->proto_id[j]);
switch (id) {
@@ -2600,18 +3095,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (pstate.tunnel_state ==
IDPF_PTYPE_TUNNEL_IP) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT;
}
break;
case VIRTCHNL2_PROTO_HDR_MAC:
ptype_lkup[k].outer_ip =
- IDPF_RX_PTYPE_OUTER_L2;
+ LIBETH_RX_PT_OUTER_L2;
if (pstate.tunnel_state ==
IDPF_TUN_IP_GRE) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
}
@@ -2638,23 +3133,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
case VIRTCHNL2_PROTO_HDR_UDP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_UDP;
+ LIBETH_RX_PT_INNER_UDP;
break;
case VIRTCHNL2_PROTO_HDR_TCP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_TCP;
+ LIBETH_RX_PT_INNER_TCP;
break;
case VIRTCHNL2_PROTO_HDR_SCTP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_SCTP;
+ LIBETH_RX_PT_INNER_SCTP;
break;
case VIRTCHNL2_PROTO_HDR_ICMP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_ICMP;
+ LIBETH_RX_PT_INNER_ICMP;
break;
case VIRTCHNL2_PROTO_HDR_PAY:
ptype_lkup[k].payload_layer =
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
+ LIBETH_RX_PT_PAYLOAD_L2;
break;
case VIRTCHNL2_PROTO_HDR_ICMPV6:
case VIRTCHNL2_PROTO_HDR_IPV6_EH:
@@ -2708,9 +3203,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
}
}
+
+ idpf_finalize_ptype_lookup(&ptype_lkup[k]);
}
}
+out:
+ vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+
return 0;
}
@@ -2784,7 +3284,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
struct idpf_hw *hw = &adapter->hw;
int err;
- adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
+ adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
if (err)
@@ -2944,6 +3444,30 @@ restart:
msleep(task_delay);
}
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
+ err = idpf_send_get_lan_memory_regions(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
+ err);
+ return -EINVAL;
+ }
+ } else {
+ /* Fallback to mapping the remaining regions of the entire BAR */
+ err = idpf_calc_remaining_mmio_regs(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
+ err);
+ return -ENOMEM;
+ }
+ }
+
+ err = idpf_map_lan_mmio_regs(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
+ err);
+ return -ENOMEM;
+ }
+
pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
num_max_vports = idpf_get_max_vports(adapter);
adapter->max_vports = num_max_vports;
@@ -2984,6 +3508,11 @@ restart:
goto err_intr_req;
}
+ err = idpf_ptp_init(adapter);
+ if (err)
+ pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
+ ERR_PTR(err));
+
idpf_init_avail_queues(adapter);
/* Skew the delay for init tasks for each function based on fn number
@@ -3022,7 +3551,6 @@ init_failed:
adapter->state = __IDPF_VER_CHECK;
if (adapter->vcxn_mngr)
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
- idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
msecs_to_jiffies(task_delay));
@@ -3037,13 +3565,24 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ bool remove_in_prog;
+
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return;
- idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+ /* Avoid transaction timeouts when called during reset */
+ remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
+ if (!remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
+ idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
+ idpf_idc_deinit_core_aux_device(adapter->cdev_info);
idpf_intr_rel(adapter);
+ if (remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
@@ -3069,9 +3608,17 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
{
struct idpf_vector_info vec_info;
int num_alloc_vecs;
+ u32 req;
vec_info.num_curr_vecs = vport->num_q_vectors;
- vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
+ if (vec_info.num_curr_vecs)
+ vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
+
+ /* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
+ req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
+ IDPF_RESERVED_VECS;
+ vec_info.num_req_vecs = req;
+
vec_info.default_vport = vport->default_vport;
vec_info.index = vport->idx;
@@ -3084,7 +3631,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
return -EINVAL;
}
- vport->num_q_vectors = num_alloc_vecs;
+ vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
return 0;
}
@@ -3105,6 +3652,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
u16 rx_itr[] = {2, 8, 32, 96, 128};
struct idpf_rss_data *rss_data;
u16 idx = vport->idx;
+ int err;
vport_config = adapter->vport_config[idx];
rss_data = &vport_config->user_config.rss_data;
@@ -3125,7 +3673,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
- vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD;
+ vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
@@ -3139,6 +3687,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_alloc_vec_indexes(vport);
vport->crc_enable = adapter->crc_enable;
+
+ if (!(vport_msg->vport_flags &
+ cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
+ return;
+
+ err = idpf_ptp_get_vport_tstamps_caps(vport);
+ if (err) {
+ pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
+ return;
+ }
+
+ INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
}
/**
@@ -3242,7 +3802,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
int num_qids,
u32 q_type)
{
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -3250,11 +3809,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) {
+ for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
- tx_qgrp->txqs[j]->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX;
- }
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
@@ -3268,12 +3824,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
}
}
break;
@@ -3282,8 +3839,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
- tx_qgrp->complq->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
@@ -3292,9 +3847,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
}
}
break;
@@ -3453,6 +4009,79 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
}
/**
+ * idpf_vport_is_cap_ena - Check if vport capability is enabled
+ * @vport: Private data struct
+ * @flag: flag(s) to check
+ *
+ * Return: true if the capability is supported, false otherwise
+ */
+bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+
+ return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
+}
+
+/**
+ * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
+ * @vport: Private data struct
+ * @flow_type: flow type to check (from ethtool.h)
+ *
+ * Return: true if sideband filters are allowed for @flow_type, false otherwise
+ */
+bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ __le64 caps;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ caps = vport_msg->sideband_flow_caps;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
+ case UDP_V4_FLOW:
+ return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_sideband_action_ena - Check if steering is enabled for action
+ * @vport: Private data struct
+ * @fsp: flow spec
+ *
+ * Return: true if sideband filters are allowed for @fsp, false otherwise
+ */
+bool idpf_sideband_action_ena(struct idpf_vport *vport,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ unsigned int supp_actions;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
+
+ /* Actions Drop/Wake are not supported */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
+ fsp->ring_cookie == RX_CLS_FLOW_WAKE)
+ return false;
+
+ return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
+}
+
+unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ return le32_to_cpu(vport_msg->flow_steer_max_rules);
+}
+
+/**
* idpf_get_vport_id: Get vport id
* @vport: virtual port structure
*
@@ -3467,6 +4096,16 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
return le32_to_cpu(vport_msg->vport_id);
}
+static void idpf_set_mac_type(struct idpf_vport *vport,
+ struct virtchnl2_mac_addr *mac_addr)
+{
+ bool is_primary;
+
+ is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr);
+ mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
+ VIRTCHNL2_MAC_ADDR_EXTRA;
+}
+
/**
* idpf_mac_filter_async_handler - Async callback for mac filters
* @adapter: private data struct
@@ -3596,6 +4235,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ idpf_set_mac_type(vport, &mac_addr[i]);
i++;
f->add = false;
if (i == total_filters)
@@ -3603,6 +4243,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
}
if (!add && f->remove) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ idpf_set_mac_type(vport, &mac_addr[i]);
i++;
f->remove = false;
if (i == total_filters)
@@ -3688,3 +4329,42 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
return reply_sz < 0 ? reply_sz : 0;
}
+
+/**
+ * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
+ * @cdev_info: IDC core device info pointer
+ * @send_msg: message to send
+ * @msg_size: size of message to send
+ * @recv_msg: message to populate on reception of response
+ * @recv_len: length of message copied into recv_msg or 0 on error
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+ struct idpf_vc_xn_params xn_params = { };
+ ssize_t reply_sz;
+ u16 recv_size;
+
+ if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
+ *recv_len = 0;
+ xn_params.vc_op = VIRTCHNL2_OP_RDMA;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = send_msg;
+ xn_params.send_buf.iov_len = msg_size;
+ xn_params.recv_buf.iov_base = recv_msg;
+ xn_params.recv_buf.iov_len = recv_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ *recv_len = reply_sz;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 83da5d8da56b..eac3d15daa42 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -4,6 +4,89 @@
#ifndef _IDPF_VIRTCHNL_H_
#define _IDPF_VIRTCHNL_H_
+#include "virtchnl2.h"
+
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer
+ * updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
struct idpf_adapter;
struct idpf_netdev_priv;
struct idpf_vec_regs;
@@ -11,6 +94,8 @@ struct idpf_vport;
struct idpf_vport_max_q;
struct idpf_vport_user_config_data;
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params);
int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
@@ -21,10 +106,43 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
int idpf_queue_reg_init(struct idpf_vport *vport);
int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
+bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
+bool idpf_sideband_action_ena(struct idpf_vport *vport,
+ struct ethtool_rx_flow_spec *fsp);
+unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
+
int idpf_recv_mb_msg(struct idpf_adapter *adapter);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie);
+struct idpf_queue_ptr {
+ enum virtchnl2_queue_type type;
+ union {
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+ struct idpf_buf_queue *bufq;
+ struct idpf_compl_queue *complq;
+ };
+};
+
+struct idpf_queue_set {
+ struct idpf_vport *vport;
+
+ u32 num;
+ struct idpf_queue_ptr qs[] __counted_by(num);
+};
+
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
+
+int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
+int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
+int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
+
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+
void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
@@ -41,9 +159,6 @@ void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
@@ -66,5 +181,9 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len);
#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
new file mode 100644
index 000000000000..61cedb6f2854
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
+
+/**
+ * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchnl get PTP capabilities message.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;
+ struct virtchnl2_ptp_get_caps send_ptp_caps_msg = {
+ .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME |
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,
+ .send_buf.iov_base = &send_ptp_caps_msg,
+ .send_buf.iov_len = sizeof(send_ptp_caps_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+ struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
+ struct idpf_ptp_secondary_mbx *scnd_mbx;
+ struct idpf_ptp *ptp = adapter->ptp;
+ enum idpf_ptp_access access_type;
+ u32 temp_offset;
+ int reply_sz;
+
+ recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps),
+ GFP_KERNEL);
+ if (!recv_ptp_caps_msg)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = recv_ptp_caps_msg;
+ xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ else if (reply_sz != sizeof(*recv_ptp_caps_msg))
+ return -EIO;
+
+ ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);
+ ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);
+ ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj);
+
+ scnd_mbx = &ptp->secondary_mbx;
+ scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id);
+
+ /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary
+ * mailbox is not supported.
+ */
+ scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff;
+ if (scnd_mbx->valid)
+ scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id;
+
+ /* Determine the access type for the PTP features */
+ idpf_ptp_get_features_access(adapter);
+
+ access_type = ptp->get_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto cross_tstamp;
+
+ clock_offsets = recv_ptp_caps_msg->clk_offsets;
+
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l);
+ ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h);
+ ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l);
+ ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h);
+ ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
+cross_tstamp:
+ access_type = ptp->get_cross_tstamp_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto discipline_clock;
+
+ cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets;
+
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l);
+ ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h);
+ ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
+discipline_clock:
+ access_type = ptp->adj_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ return 0;
+
+ clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;
+
+ /* Device clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type);
+ ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l);
+ ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h);
+ ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l);
+ ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h);
+ ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ /* PHY clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type);
+ ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l);
+ ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h);
+ ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l);
+ ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h);
+ ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message
+ * @adapter: Driver specific private structure
+ * @dev_clk_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get time message to get the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,
+ .send_buf.iov_base = &get_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .recv_buf.iov_base = &get_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+ u64 dev_time;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(get_dev_clk_time_msg))
+ return -EIO;
+
+ dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);
+ dev_clk_time->dev_clk_time_ns = dev_time;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_cross_time - Send virtchnl get cross time message
+ * @adapter: Driver specific private structure
+ * @cross_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get cross time message to get the time of the clock and the
+ * system time.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time)
+{
+ struct virtchnl2_ptp_get_cross_time cross_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,
+ .send_buf.iov_base = &cross_time_msg,
+ .send_buf.iov_len = sizeof(cross_time_msg),
+ .recv_buf.iov_base = &cross_time_msg,
+ .recv_buf.iov_len = sizeof(cross_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(cross_time_msg))
+ return -EIO;
+
+ cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);
+ cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
+ * @adapter: Driver specific private structure
+ * @time: New time value
+ *
+ * Send virtchnl set time message to set the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)
+{
+ struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {
+ .dev_time_ns = cpu_to_le64(time),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,
+ .send_buf.iov_base = &set_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .recv_buf.iov_base = &set_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(set_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message
+ * @adapter: Driver specific private structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Send virtchnl adj time message to adjust the clock by the indicated delta.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)
+{
+ struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {
+ .delta = cpu_to_le64(delta),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,
+ .send_buf.iov_base = &adj_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .recv_buf.iov_base = &adj_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message
+ * @adapter: Driver specific private structure
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Send virtchnl adj fine message to adjust the frequency of the clock by
+ * incval.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)
+{
+ struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {
+ .incval = cpu_to_le64(incval),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,
+ .send_buf.iov_base = &adj_dev_clk_fine_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .recv_buf.iov_base = &adj_dev_clk_fine_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_fine_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get vport tstamps caps message to receive the set of tstamp
+ * capabilities per vport.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps;
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;
+ struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,
+ .send_buf.iov_base = &send_tx_tstamp_caps,
+ .send_buf.iov_len = sizeof(send_tx_tstamp_caps),
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ enum idpf_ptp_access tstamp_access, get_dev_clk_access;
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ struct list_head *head;
+ int err = 0, reply_sz;
+ u16 num_latches;
+ u32 size;
+
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ tstamp_access = ptp->tx_tstamp_access;
+ get_dev_clk_access = ptp->get_dev_clk_time_access;
+ if (tstamp_access == IDPF_PTP_NONE ||
+ get_dev_clk_access == IDPF_PTP_NONE)
+ return -EOPNOTSUPP;
+
+ rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcv_tx_tstamp_caps)
+ return -ENOMEM;
+
+ send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);
+ xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto get_tstamp_caps_out;
+ }
+
+ num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);
+ size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);
+ if (reply_sz != size) {
+ err = -EIO;
+ goto get_tstamp_caps_out;
+ }
+
+ size = struct_size(tstamp_caps, tx_tstamp_status, num_latches);
+ tstamp_caps = kzalloc(size, GFP_KERNEL);
+ if (!tstamp_caps) {
+ err = -ENOMEM;
+ goto get_tstamp_caps_out;
+ }
+
+ tstamp_caps->access = true;
+ tstamp_caps->num_entries = num_latches;
+
+ INIT_LIST_HEAD(&tstamp_caps->latches_in_use);
+ INIT_LIST_HEAD(&tstamp_caps->latches_free);
+
+ spin_lock_init(&tstamp_caps->latches_lock);
+ spin_lock_init(&tstamp_caps->status_lock);
+
+ tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ for (u16 i = 0; i < tstamp_caps->num_entries; i++) {
+ __le32 offset_l, offset_h;
+
+ ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL);
+ if (!ptp_tx_tstamp) {
+ err = -ENOMEM;
+ goto err_free_ptp_tx_stamp_list;
+ }
+
+ tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i];
+
+ if (tstamp_access != IDPF_PTP_DIRECT)
+ goto skip_offsets;
+
+ offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l;
+ offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h;
+ ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l);
+ ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h);
+
+skip_offsets:
+ ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index;
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tstamp_caps->latches_free);
+
+ tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE;
+ }
+
+ vport->tx_tstamp_caps = tstamp_caps;
+ kfree(rcv_tx_tstamp_caps);
+
+ return 0;
+
+err_free_ptp_tx_stamp_list:
+ head = &tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ kfree(tstamp_caps);
+get_tstamp_caps_out:
+ kfree(rcv_tx_tstamp_caps);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on
+ * the skb compatibility.
+ * @caps: Tx timestamp capabilities that monitor the latch status
+ * @skb: skb for which the tstamp value is returned through virtchnl message
+ * @current_state: Current state of the Tx timestamp latch
+ * @expected_state: Expected state of the Tx timestamp latch
+ *
+ * Find a proper skb tracker for which the Tx timestamp is received and change
+ * the state to expected value.
+ *
+ * Return: true if the tracker has been found and updated, false otherwise.
+ */
+static bool
+idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps,
+ struct sk_buff *skb,
+ enum idpf_ptp_tx_tstamp_state current_state,
+ enum idpf_ptp_tx_tstamp_state expected_state)
+{
+ bool updated = false;
+
+ spin_lock(&caps->status_lock);
+ for (u16 i = 0; i < caps->num_entries; i++) {
+ struct idpf_ptp_tx_tstamp_status *status;
+
+ status = &caps->tx_tstamp_status[i];
+
+ if (skb == status->skb && status->state == current_state) {
+ status->state = expected_state;
+ updated = true;
+ break;
+ }
+ }
+ spin_unlock(&caps->status_lock);
+
+ return updated;
+}
+
+/**
+ * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it
+ * back to the skb.
+ * @vport: Virtual port structure
+ * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane
+ * @ptp_tx_tstamp: Tx timestamp latch to add to the free list
+ *
+ * Read the value of the Tx timestamp for a given latch received from the
+ * Control Plane, extend it to 64 bit and provide back to the skb.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
+ struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch,
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp)
+{
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct skb_shared_hwtstamps shhwtstamps;
+ bool state_upd = false;
+ u8 tstamp_ns_lo_bit;
+ u64 tstamp;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp);
+ ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_READ_VALUE,
+ IDPF_PTP_FREE);
+ if (!state_upd)
+ return -EINVAL;
+
+ tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp);
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
+ consume_skb(ptp_tx_tstamp->skb);
+ ptp_tx_tstamp->skb = NULL;
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_tstamp_caps->latches_free);
+
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ u64_stats_inc(&vport->tstamp_stats.packets);
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps
+ * @adapter: Driver specific private structure
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * Read the tstamps Tx tstamp values from a received message and put them
+ * directly to the skb. The number of timestamps to read is specified by
+ * the virtchnl message.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;
+ struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;
+ struct idpf_vport *tstamp_vport = NULL;
+ struct list_head *head;
+ u16 num_latches;
+ u32 vport_id;
+ int err = 0;
+
+ recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;
+ vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);
+
+ idpf_for_each_vport(adapter, vport) {
+ if (!vport)
+ continue;
+
+ if (vport->vport_id == vport_id) {
+ tstamp_vport = vport;
+ break;
+ }
+ }
+
+ if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)
+ return -EINVAL;
+
+ tx_tstamp_caps = tstamp_vport->tx_tstamp_caps;
+ num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ head = &tx_tstamp_caps->latches_in_use;
+
+ for (u16 i = 0; i < num_latches; i++) {
+ tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i];
+
+ if (!tstamp_latch.valid)
+ continue;
+
+ if (list_empty(head)) {
+ err = -ENOBUFS;
+ goto unlock;
+ }
+
+ list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) {
+ if (tstamp_latch.index == tx_tstamp->idx) {
+ list_del(&tx_tstamp->list_member);
+ err = idpf_ptp_get_tstamp_value(tstamp_vport,
+ &tstamp_latch,
+ tx_tstamp);
+ if (err)
+ goto unlock;
+
+ break;
+ }
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get Tx tstamp message to read the value of the HW timestamp.
+ * The message contains a list of indexes set in the Tx descriptors.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ .async = true,
+ .async_handler = idpf_ptp_get_tx_tstamp_async_handler,
+ };
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ int reply_sz, size, msg_size;
+ struct list_head *head;
+ bool state_upd;
+ u16 id = 0;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ head = &tx_tstamp_caps->latches_in_use;
+
+ size = struct_size(send_tx_tstamp_msg, tstamp_latches,
+ tx_tstamp_caps->num_entries);
+ send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL);
+ if (!send_tx_tstamp_msg)
+ return -ENOMEM;
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ list_for_each_entry(ptp_tx_tstamp, head, list_member) {
+ u8 idx;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE);
+ if (!state_upd)
+ continue;
+
+ idx = ptp_tx_tstamp->idx;
+ send_tx_tstamp_msg->tstamp_latches[id].index = idx;
+ id++;
+ }
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);
+ send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);
+ send_tx_tstamp_msg->num_latches = cpu_to_le16(id);
+ xn_params.send_buf.iov_base = send_tx_tstamp_msg;
+ xn_params.send_buf.iov_len = msg_size;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ kfree(send_tx_tstamp_msg);
+
+ return min(reply_sz, 0);
+}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 4a3c4454d25a..02ae447cc24a 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -4,6 +4,8 @@
#ifndef _VIRTCHNL2_H_
#define _VIRTCHNL2_H_
+#include <linux/if_ether.h>
+
/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
* VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
* and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
@@ -17,8 +19,6 @@
* must remain unchanged over time, so we specify explicit values for all enums.
*/
-#include "virtchnl2_lan_desc.h"
-
/* This macro is used to generate compilation errors if a structure
* is not exactly the correct length.
*/
@@ -62,12 +62,28 @@ enum virtchnl2_op {
VIRTCHNL2_OP_GET_PTYPE_INFO = 526,
/* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
* VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
- * Opcodes 529, 530, 531, 532 and 533 are reserved.
*/
+ VIRTCHNL2_OP_RDMA = 529,
+ /* Opcodes 530 through 533 are reserved. */
VIRTCHNL2_OP_LOOPBACK = 534,
VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
+
+ /* TimeSync opcodes */
+ VIRTCHNL2_OP_PTP_GET_CAPS = 541,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP = 542,
+ VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME = 543,
+ VIRTCHNL2_OP_PTP_GET_CROSS_TIME = 544,
+ VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME = 545,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
+ VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549,
+ /* Opcode 550 is reserved */
+ VIRTCHNL2_OP_ADD_FLOW_RULE = 551,
+ VIRTCHNL2_OP_GET_FLOW_RULE = 552,
+ VIRTCHNL2_OP_DEL_FLOW_RULE = 553,
};
/**
@@ -141,22 +157,22 @@ enum virtchnl2_cap_seg {
VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
};
-/* Receive Side Scaling Flow type capability flags */
-enum virtchnl2_cap_rss {
- VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
- VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
- VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
- VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
- VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
- VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
- VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
- VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
- VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
- VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
- VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
- VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
- VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
- VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
+/* Receive Side Scaling and Flow Steering Flow type capability flags */
+enum virtchnl2_flow_types {
+ VIRTCHNL2_FLOW_IPV4_TCP = BIT(0),
+ VIRTCHNL2_FLOW_IPV4_UDP = BIT(1),
+ VIRTCHNL2_FLOW_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_FLOW_IPV4_OTHER = BIT(3),
+ VIRTCHNL2_FLOW_IPV6_TCP = BIT(4),
+ VIRTCHNL2_FLOW_IPV6_UDP = BIT(5),
+ VIRTCHNL2_FLOW_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_FLOW_IPV6_OTHER = BIT(7),
+ VIRTCHNL2_FLOW_IPV4_AH = BIT(8),
+ VIRTCHNL2_FLOW_IPV4_ESP = BIT(9),
+ VIRTCHNL2_FLOW_IPV4_AH_ESP = BIT(10),
+ VIRTCHNL2_FLOW_IPV6_AH = BIT(11),
+ VIRTCHNL2_FLOW_IPV6_ESP = BIT(12),
+ VIRTCHNL2_FLOW_IPV6_AH_ESP = BIT(13),
};
/* Header split capability flags */
@@ -182,8 +198,9 @@ enum virtchnl2_cap_other {
VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
- VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
- /* Queue based scheduling using split queue model */
+ /* Other capability 3 is available
+ * Queue based scheduling using split queue model
+ */
VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
VIRTCHNL2_CAP_CRC = BIT_ULL(5),
VIRTCHNL2_CAP_ADQ = BIT_ULL(6),
@@ -197,16 +214,37 @@ enum virtchnl2_cap_other {
/* EDT: Earliest Departure Time capability used for Timing Wheel */
VIRTCHNL2_CAP_EDT = BIT_ULL(14),
VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
- VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
+ /* Other capability 16 is available */
VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
/* Other capability 20 is reserved */
+ VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(21),
+ VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22),
/* this must be the last capability */
VIRTCHNL2_CAP_OEM = BIT_ULL(63),
};
+/**
+ * enum virtchnl2_action_types - Available actions for sideband flow steering
+ * @VIRTCHNL2_ACTION_DROP: Drop the packet
+ * @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage
+ * @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue
+ * @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group
+ * @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value
+ * @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter
+ */
+
+enum virtchnl2_action_types {
+ VIRTCHNL2_ACTION_DROP = BIT(0),
+ VIRTCHNL2_ACTION_PASSTHRU = BIT(1),
+ VIRTCHNL2_ACTION_QUEUE = BIT(2),
+ VIRTCHNL2_ACTION_Q_GROUP = BIT(3),
+ VIRTCHNL2_ACTION_MARK = BIT(4),
+ VIRTCHNL2_ACTION_COUNT = BIT(5),
+};
+
/* underlying device type */
enum virtchl2_device_type {
VIRTCHNL2_MEV_DEVICE = 0,
@@ -448,7 +486,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* @seg_caps: See enum virtchnl2_cap_seg.
* @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
* @rsc_caps: See enum virtchnl2_cap_rsc.
- * @rss_caps: See enum virtchnl2_cap_rss.
+ * @rss_caps: See enum virtchnl2_flow_types.
* @other_caps: See enum virtchnl2_cap_other.
* @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
* provided by CP.
@@ -473,6 +511,8 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* segment offload.
* @max_hdr_buf_per_lso: Max number of header buffers that can be used for
* an LSO.
+ * @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for
+ * the device.
* @pad1: Padding for future extensions.
*
* Dataplane driver sends this message to CP to negotiate capabilities and
@@ -520,7 +560,8 @@ struct virtchnl2_get_capabilities {
__le32 device_type;
u8 min_sso_packet_len;
u8 max_hdr_buf_per_lso;
- u8 pad1[10];
+ __le16 num_rdma_allocated_vectors;
+ u8 pad1[8];
};
VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
@@ -555,11 +596,28 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
struct virtchnl2_queue_reg_chunks {
__le16 num_chunks;
u8 pad[6];
- struct virtchnl2_queue_reg_chunk chunks[];
+ struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
/**
+ * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
+ * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled
+ * with explicit Rx queue action
+ * @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled
+ * @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport
+ */
+enum virtchnl2_vport_flags {
+ VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER = BIT(1),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ = BIT(2),
+ VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER = BIT(3),
+ VIRTCHNL2_VPORT_ENABLE_RDMA = BIT(4),
+};
+
+/**
* struct virtchnl2_create_vport - Create vport config info.
* @vport_type: See enum virtchnl2_vport_type.
* @txq_model: See virtchnl2_queue_model.
@@ -577,10 +635,18 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
* @max_mtu: Max MTU. CP populates this field on response.
* @vport_id: Vport id. CP populates this field on response.
* @default_mac_addr: Default MAC address.
- * @pad: Padding.
+ * @vport_flags: See enum virtchnl2_vport_flags.
* @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
* @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
* @pad1: Padding.
+ * @inline_flow_caps: Bit mask of supported inline-flow-steering
+ * flow types (See enum virtchnl2_flow_types)
+ * @sideband_flow_caps: Bit mask of supported sideband-flow-steering
+ * flow types (See enum virtchnl2_flow_types)
+ * @sideband_flow_actions: Bit mask of supported action types
+ * for sideband flow steering (See enum virtchnl2_action_types)
+ * @flow_steer_max_rules: Max rules allowed for inline and sideband
+ * flow steering combined
* @rss_algorithm: RSS algorithm.
* @rss_key_size: RSS key size.
* @rss_lut_size: RSS LUT size.
@@ -610,10 +676,14 @@ struct virtchnl2_create_vport {
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[ETH_ALEN];
- __le16 pad;
+ __le16 vport_flags;
__le64 rx_desc_ids;
__le64 tx_desc_ids;
- u8 pad1[72];
+ u8 pad1[48];
+ __le64 inline_flow_caps;
+ __le64 sideband_flow_caps;
+ __le32 sideband_flow_actions;
+ __le32 flow_steer_max_rules;
__le32 rss_algorithm;
__le16 rss_key_size;
__le16 rss_lut_size;
@@ -703,7 +773,7 @@ struct virtchnl2_config_tx_queues {
__le32 vport_id;
__le16 num_qinfo;
u8 pad[10];
- struct virtchnl2_txq_info qinfo[];
+ struct virtchnl2_txq_info qinfo[] __counted_by_le(num_qinfo);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
@@ -782,7 +852,7 @@ struct virtchnl2_config_rx_queues {
__le32 vport_id;
__le16 num_qinfo;
u8 pad[18];
- struct virtchnl2_rxq_info qinfo[];
+ struct virtchnl2_rxq_info qinfo[] __counted_by_le(num_qinfo);
};
VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
@@ -868,7 +938,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
struct virtchnl2_vector_chunks {
__le16 num_vchunks;
u8 pad[14];
- struct virtchnl2_vector_chunk vchunks[];
+ struct virtchnl2_vector_chunk vchunks[] __counted_by_le(num_vchunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
@@ -912,7 +982,7 @@ struct virtchnl2_rss_lut {
__le16 lut_entries_start;
__le16 lut_entries;
u8 pad[4];
- __le32 lut[];
+ __le32 lut[] __counted_by_le(lut_entries);
};
VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
@@ -977,7 +1047,7 @@ struct virtchnl2_ptype {
u8 ptype_id_8;
u8 proto_id_count;
__le16 pad;
- __le16 proto_id[];
+ __le16 proto_id[] __counted_by(proto_id_count);
} __packed __aligned(2);
VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
@@ -1104,7 +1174,7 @@ struct virtchnl2_rss_key {
__le32 vport_id;
__le16 key_len;
u8 pad;
- u8 key_flex[];
+ u8 key_flex[] __counted_by_le(key_len);
} __packed;
VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
@@ -1131,7 +1201,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
struct virtchnl2_queue_chunks {
__le16 num_chunks;
u8 pad[6];
- struct virtchnl2_queue_chunk chunks[];
+ struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
@@ -1195,7 +1265,7 @@ struct virtchnl2_queue_vector_maps {
__le32 vport_id;
__le16 num_qv_maps;
u8 pad[10];
- struct virtchnl2_queue_vector qv_maps[];
+ struct virtchnl2_queue_vector qv_maps[] __counted_by_le(num_qv_maps);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
@@ -1247,7 +1317,7 @@ struct virtchnl2_mac_addr_list {
__le32 vport_id;
__le16 num_mac_addr;
u8 pad[2];
- struct virtchnl2_mac_addr mac_addr_list[];
+ struct virtchnl2_mac_addr mac_addr_list[] __counted_by_le(num_mac_addr);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
@@ -1270,4 +1340,474 @@ struct virtchnl2_promisc_info {
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
+/**
+ * enum virtchnl2_ptp_caps - PTP capabilities
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device
+ * clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping
+ *
+ * PF/VF negotiates a set of supported PTP capabilities with the Control Plane.
+ * There are two access methods - mailbox (_MB) and direct.
+ * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer,
+ * cross timestamping and the Tx timestamping.
+ */
+enum virtchnl2_ptp_caps {
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME = BIT(0),
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB = BIT(1),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME = BIT(2),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB = BIT(3),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME = BIT(4),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB = BIT(5),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK = BIT(6),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB = BIT(7),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS = BIT(8),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB = BIT(9),
+};
+
+/**
+ * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks
+ * registers.
+ * @dev_clk_ns_l: Device clock low register offset
+ * @dev_clk_ns_h: Device clock high register offset
+ * @phy_clk_ns_l: PHY clock low register offset
+ * @phy_clk_ns_h: PHY clock high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_clk_reg_offsets {
+ __le32 dev_clk_ns_l;
+ __le32 dev_clk_ns_h;
+ __le32 phy_clk_ns_l;
+ __le32 phy_clk_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross
+ * time registers.
+ * @sys_time_ns_l: System time low register offset
+ * @sys_time_ns_h: System time high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_cross_time_reg_offsets {
+ __le32 sys_time_ns_l;
+ __le32 sys_time_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks
+ * adjustments registers.
+ * @dev_clk_cmd_type: Device clock command type register offset
+ * @dev_clk_incval_l: Device clock increment value low register offset
+ * @dev_clk_incval_h: Device clock increment value high registers offset
+ * @dev_clk_shadj_l: Device clock shadow adjust low register offset
+ * @dev_clk_shadj_h: Device clock shadow adjust high register offset
+ * @phy_clk_cmd_type: PHY timer command type register offset
+ * @phy_clk_incval_l: PHY timer increment value low register offset
+ * @phy_clk_incval_h: PHY timer increment value high register offset
+ * @phy_clk_shadj_l: PHY timer shadow adjust low register offset
+ * @phy_clk_shadj_h: PHY timer shadow adjust high register offset
+ */
+struct virtchnl2_ptp_clk_adj_reg_offsets {
+ __le32 dev_clk_cmd_type;
+ __le32 dev_clk_incval_l;
+ __le32 dev_clk_incval_h;
+ __le32 dev_clk_shadj_l;
+ __le32 dev_clk_shadj_h;
+ __le32 phy_clk_cmd_type;
+ __le32 phy_clk_incval_l;
+ __le32 phy_clk_incval_h;
+ __le32 phy_clk_shadj_l;
+ __le32 phy_clk_shadj_h;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch
+ * capabilities.
+ * @tx_latch_reg_offset_l: Tx timestamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx timestamp latch high register offset
+ * @index: Latch index provided to the Tx descriptor
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch_caps {
+ __le32 tx_latch_reg_offset_l;
+ __le32 tx_latch_reg_offset_h;
+ u8 index;
+ u8 pad[7];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx
+ * tstamp entries.
+ * @vport_id: Vport number
+ * @num_latches: Total number of latches
+ * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp
+ * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp
+ * @pad: Padding for future tstamp granularity extensions
+ * @tstamp_latches: Capabilities of Tx timestamp entries
+ *
+ * PF/VF sends this message to negotiate the Tx timestamp latches for each
+ * Vport.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_caps {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 tstamp_ns_lo_bit;
+ u8 tstamp_ns_hi_bit;
+ u8 pad[8];
+
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps);
+
+/**
+ * struct virtchnl2_ptp_get_caps - Get PTP capabilities
+ * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps
+ * @max_adj: The maximum possible frequency adjustment
+ * @base_incval: The default timer increment value
+ * @peer_mbx_q_id: ID of the PTP Device Control daemon queue
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @secondary_mbx: Indicates to the driver that it should create a secondary
+ * mailbox to inetract with control plane for PTP
+ * @pad: Padding for future extensions
+ * @clk_offsets: Main timer and PHY registers offsets
+ * @cross_time_offsets: Cross time registers offsets
+ * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer
+ *
+ * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap
+ * with supported features and fulfills appropriate structures.
+ * If HW uses primary MBX for PTP: secondary_mbx is set to false.
+ * If HW uses secondary MBX for PTP: secondary_mbx is set to true.
+ * Control plane has 2 MBX and the driver has 1 MBX, send to peer
+ * driver may be used to send a message using valid ptp_peer_mb_q_id and
+ * ptp_peer_id.
+ * If HW does not use send to peer driver: secondary_mbx is no care field and
+ * peer_mbx_q_id holds invalid value (0xFFFF).
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_CAPS.
+ */
+struct virtchnl2_ptp_get_caps {
+ __le32 caps;
+ __le32 max_adj;
+ __le64 base_incval;
+ __le16 peer_mbx_q_id;
+ u8 peer_id;
+ u8 secondary_mbx;
+ u8 pad[4];
+
+ struct virtchnl2_ptp_clk_reg_offsets clk_offsets;
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets;
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
+ * values, index and validity.
+ * @tstamp: Timestamp value
+ * @index: Timestamp index from which the value is read
+ * @valid: Timestamp validity
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch {
+ __le64 tstamp;
+ u8 index;
+ u8 valid;
+ u8 pad[6];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches
+ * associated with the vport.
+ * @vport_id: Number of vport that requests the timestamp
+ * @num_latches: Number of latches
+ * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp
+ * @pad: Padding for future extensions
+ * @device_time: device time if get_devtime_with_txtstmp was set in request
+ * @tstamp_latches: PTP TX timestamp latch
+ *
+ * PF/VF sends this message to receive a specified number of timestamps
+ * entries.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_latches {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 get_devtime_with_txtstmp;
+ u8 pad[1];
+ __le64 device_time;
+
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches);
+
+/**
+ * struct virtchnl2_ptp_get_dev_clk_time - Associated with message
+ * VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME.
+ * @dev_time_ns: Device clock time value in nanoseconds
+ *
+ * PF/VF sends this message to receive the time from the main timer.
+ */
+struct virtchnl2_ptp_get_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_get_cross_time: Associated with message
+ * VIRTCHNL2_OP_PTP_GET_CROSS_TIME.
+ * @sys_time_ns: System counter value expressed in nanoseconds, read
+ * synchronously with device time
+ * @dev_time_ns: Device clock time value expressed in nanoseconds
+ *
+ * PF/VF sends this message to receive the cross time.
+ */
+struct virtchnl2_ptp_get_cross_time {
+ __le64 sys_time_ns;
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time);
+
+/**
+ * struct virtchnl2_ptp_set_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME.
+ * @dev_time_ns: Device time value expressed in nanoseconds to set
+ *
+ * PF/VF sends this message to set the time of the main timer.
+ */
+struct virtchnl2_ptp_set_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE.
+ * @incval: Source timer increment value per clock cycle
+ *
+ * PF/VF sends this message to adjust the frequency of the main timer by the
+ * indicated increment value.
+ */
+struct virtchnl2_ptp_adj_dev_clk_fine {
+ __le64 incval;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME.
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * PF/VF sends this message to adjust the time of the main timer by the delta.
+ */
+struct virtchnl2_ptp_adj_dev_clk_time {
+ __le64 delta;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
+
+/**
+ * struct virtchnl2_mem_region - MMIO memory region
+ * @start_offset: starting offset of the MMIO memory region
+ * @size: size of the MMIO memory region
+ */
+struct virtchnl2_mem_region {
+ __le64 start_offset;
+ __le64 size;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region);
+
+/**
+ * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions
+ * @num_memory_regions: number of memory regions
+ * @pad: Padding
+ * @mem_reg: List with memory region info
+ *
+ * PF/VF sends this message to learn what LAN MMIO memory regions it should map.
+ */
+struct virtchnl2_get_lan_memory_regions {
+ __le16 num_memory_regions;
+ u8 pad[6];
+ struct virtchnl2_mem_region mem_reg[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
+
+#define VIRTCHNL2_MAX_NUM_PROTO_HDRS 4
+#define VIRTCHNL2_MAX_SIZE_RAW_PACKET 256
+#define VIRTCHNL2_MAX_NUM_ACTIONS 8
+
+/**
+ * struct virtchnl2_proto_hdr - represent one protocol header
+ * @hdr_type: See enum virtchnl2_proto_hdr_type
+ * @pad: padding
+ * @buffer_spec: binary buffer based on header type.
+ * @buffer_mask: mask applied on buffer_spec.
+ *
+ * Structure to hold protocol headers based on hdr_type
+ */
+struct virtchnl2_proto_hdr {
+ __le32 hdr_type;
+ u8 pad[4];
+ u8 buffer_spec[64];
+ u8 buffer_mask[64];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_proto_hdr);
+
+/**
+ * struct virtchnl2_proto_hdrs - struct to represent match criteria
+ * @tunnel_level: specify where protocol header(s) start from.
+ * must be 0 when sending a raw packet request.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * @pad: Padding bytes
+ * @count: total number of protocol headers in proto_hdr. 0 for raw packet.
+ * @proto_hdr: Array of protocol headers
+ * @raw: struct holding raw packet buffer when count is 0
+ */
+struct virtchnl2_proto_hdrs {
+ u8 tunnel_level;
+ u8 pad[3];
+ __le32 count;
+ union {
+ struct virtchnl2_proto_hdr
+ proto_hdr[VIRTCHNL2_MAX_NUM_PROTO_HDRS];
+ struct {
+ __le16 pkt_len;
+ u8 spec[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(552, virtchnl2_proto_hdrs);
+
+/**
+ * struct virtchnl2_rule_action - struct representing single action for a flow
+ * @action_type: see enum virtchnl2_action_types
+ * @act_conf: union representing action depending on action_type.
+ * @act_conf.q_id: queue id to redirect the packets to.
+ * @act_conf.q_grp_id: queue group id to redirect the packets to.
+ * @act_conf.ctr_id: used for count action. If input value 0xFFFFFFFF control
+ * plane assigns a new counter and returns the counter ID to
+ * the driver. If input value is not 0xFFFFFFFF then it must
+ * be an existing counter given to the driver for an earlier
+ * flow. Then this flow will share the counter.
+ * @act_conf.mark_id: Value used to mark the packets. Used for mark action.
+ * @act_conf.reserved: Reserved for future use.
+ */
+struct virtchnl2_rule_action {
+ __le32 action_type;
+ union {
+ __le32 q_id;
+ __le32 q_grp_id;
+ __le32 ctr_id;
+ __le32 mark_id;
+ u8 reserved[8];
+ } act_conf;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rule_action);
+
+/**
+ * struct virtchnl2_rule_action_set - struct representing multiple actions
+ * @count: number of valid actions in the action set of a rule
+ * @actions: array of struct virtchnl2_rule_action
+ */
+struct virtchnl2_rule_action_set {
+ /* action count must be less than VIRTCHNL2_MAX_NUM_ACTIONS */
+ __le32 count;
+ struct virtchnl2_rule_action actions[VIRTCHNL2_MAX_NUM_ACTIONS];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(100, virtchnl2_rule_action_set);
+
+/**
+ * struct virtchnl2_flow_rule - represent one flow steering rule
+ * @proto_hdrs: array of protocol header buffers representing match criteria
+ * @action_set: series of actions to be applied for given rule
+ * @priority: rule priority.
+ * @pad: padding for future extensions.
+ */
+struct virtchnl2_flow_rule {
+ struct virtchnl2_proto_hdrs proto_hdrs;
+ struct virtchnl2_rule_action_set action_set;
+ __le32 priority;
+ u8 pad[8];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(664, virtchnl2_flow_rule);
+
+enum virtchnl2_flow_rule_status {
+ VIRTCHNL2_FLOW_RULE_SUCCESS = 1,
+ VIRTCHNL2_FLOW_RULE_NORESOURCE = 2,
+ VIRTCHNL2_FLOW_RULE_EXIST = 3,
+ VIRTCHNL2_FLOW_RULE_TIMEOUT = 4,
+ VIRTCHNL2_FLOW_RULE_FLOW_TYPE_NOT_SUPPORTED = 5,
+ VIRTCHNL2_FLOW_RULE_MATCH_KEY_NOT_SUPPORTED = 6,
+ VIRTCHNL2_FLOW_RULE_ACTION_NOT_SUPPORTED = 7,
+ VIRTCHNL2_FLOW_RULE_ACTION_COMBINATION_INVALID = 8,
+ VIRTCHNL2_FLOW_RULE_ACTION_DATA_INVALID = 9,
+ VIRTCHNL2_FLOW_RULE_NOT_ADDED = 10,
+};
+
+/**
+ * struct virtchnl2_flow_rule_info: structure representing single flow rule
+ * @rule_id: rule_id associated with the flow_rule.
+ * @rule_cfg: structure representing rule.
+ * @status: status of rule programming. See enum virtchnl2_flow_rule_status.
+ */
+struct virtchnl2_flow_rule_info {
+ __le32 rule_id;
+ struct virtchnl2_flow_rule rule_cfg;
+ __le32 status;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(672, virtchnl2_flow_rule_info);
+
+/**
+ * struct virtchnl2_flow_rule_add_del - add/delete a flow steering rule
+ * @vport_id: vport id for which the rule is to be added or deleted.
+ * @count: Indicates number of rules to be added or deleted.
+ * @rule_info: Array of flow rules to be added or deleted.
+ *
+ * For VIRTCHNL2_OP_FLOW_RULE_ADD, rule_info contains list of rules to be
+ * added. If rule_id is 0xFFFFFFFF, then the rule is programmed and not cached.
+ *
+ * For VIRTCHNL2_OP_FLOW_RULE_DEL, there are two possibilities. The structure
+ * can contain either array of rule_ids or array of match keys to be deleted.
+ * When match keys are used the corresponding rule_ids must be 0xFFFFFFFF.
+ *
+ * status member of each rule indicates the result. Maximum of 6 rules can be
+ * added or deleted using this method. Driver has to retry in case of any
+ * failure of ADD or DEL opcode. CP doesn't retry in case of failure.
+ */
+struct virtchnl2_flow_rule_add_del {
+ __le32 vport_id;
+ __le32 count;
+ struct virtchnl2_flow_rule_info rule_info[] __counted_by_le(count);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_flow_rule_add_del);
+
#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
new file mode 100644
index 000000000000..958d16f87424
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_virtchnl.h"
+#include "xdp.h"
+#include "xsk.h"
+
+static int idpf_rxq_for_each(const struct idpf_vport *vport,
+ int (*fn)(struct idpf_rx_queue *rxq, void *arg),
+ void *arg)
+{
+ bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (!vport->rxq_grps)
+ return -ENETDOWN;
+
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
+
+ if (splitq)
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (u32 j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+ int err;
+
+ if (splitq)
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+
+ err = fn(q, arg);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
+{
+ const struct idpf_vport *vport = rxq->q_vector->vport;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
+ int err;
+
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
+ rxq->q_vector->napi.napi_id,
+ rxq->rx_buf_size);
+ if (err)
+ return err;
+
+ if (idpf_queue_has(XSK, rxq)) {
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ rxq->pool);
+ if (err)
+ goto unreg;
+ } else {
+ const struct page_pool *pp;
+
+ pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
+ xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
+ }
+
+ if (!split)
+ return 0;
+
+ rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
+ rxq->num_xdp_txq = vport->num_xdp_txq;
+
+ return 0;
+
+unreg:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return err;
+}
+
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
+{
+ return __idpf_xdp_rxq_info_init(rxq, NULL);
+}
+
+int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
+{
+ return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
+}
+
+static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
+{
+ if (idpf_is_queue_model_split((size_t)arg)) {
+ rxq->xdpsqs = NULL;
+ rxq->num_xdp_txq = 0;
+ }
+
+ if (!idpf_queue_has(XSK, rxq))
+ xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
+
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return 0;
+}
+
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
+{
+ __idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
+}
+
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
+{
+ idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
+ (void *)(size_t)vport->rxq_model);
+}
+
+static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
+{
+ struct bpf_prog *prog = arg;
+ struct bpf_prog *old;
+
+ if (prog)
+ bpf_prog_inc(prog);
+
+ old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
+ if (old)
+ bpf_prog_put(old);
+
+ return 0;
+}
+
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog)
+{
+ idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+}
+
+static void idpf_xdp_tx_timer(struct work_struct *work);
+
+int idpf_xdpsqs_get(const struct idpf_vport *vport)
+{
+ struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
+ struct net_device *dev;
+ u32 sqs;
+
+ if (!idpf_xdp_enabled(vport))
+ return 0;
+
+ timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
+ if (!timers)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < vport->num_xdp_txq; i++) {
+ timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
+ cpu_to_mem(i));
+ if (!timers[i]) {
+ for (int j = i - 1; j >= 0; j--)
+ kfree(timers[j]);
+
+ return -ENOMEM;
+ }
+ }
+
+ dev = vport->netdev;
+ sqs = vport->xdp_txq_offset;
+
+ for (u32 i = sqs; i < vport->num_txq; i++) {
+ struct idpf_tx_queue *xdpsq = vport->txqs[i];
+
+ xdpsq->complq = xdpsq->txq_grp->complq;
+ kfree(xdpsq->refillq);
+ xdpsq->refillq = NULL;
+
+ idpf_queue_clear(FLOW_SCH_EN, xdpsq);
+ idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
+ idpf_queue_set(NOIRQ, xdpsq);
+ idpf_queue_set(XDP, xdpsq);
+ idpf_queue_set(XDP, xdpsq->complq);
+
+ xdpsq->timer = timers[i - sqs];
+ libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
+ libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
+ idpf_xdp_tx_timer);
+
+ xdpsq->pending = 0;
+ xdpsq->xdp_tx = 0;
+ xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
+ }
+
+ return 0;
+}
+
+void idpf_xdpsqs_put(const struct idpf_vport *vport)
+{
+ struct net_device *dev;
+ u32 sqs;
+
+ if (!idpf_xdp_enabled(vport))
+ return;
+
+ dev = vport->netdev;
+ sqs = vport->xdp_txq_offset;
+
+ for (u32 i = sqs; i < vport->num_txq; i++) {
+ struct idpf_tx_queue *xdpsq = vport->txqs[i];
+
+ if (!idpf_queue_has_clear(XDP, xdpsq))
+ continue;
+
+ libeth_xdpsq_deinit_timer(xdpsq->timer);
+ libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
+
+ kfree(xdpsq->timer);
+ xdpsq->refillq = NULL;
+ idpf_queue_clear(NOIRQ, xdpsq);
+ }
+}
+
+static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
+ bool gen)
+{
+ u32 val;
+
+#ifdef __LIBETH_WORD_ACCESS
+ val = *(const u32 *)desc;
+#else
+ val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
+ le16_to_cpu(desc->qid_comptype_gen);
+#endif
+ if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
+ return -ENODATA;
+
+ if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
+ FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
+ IDPF_TXD_COMPLT_RS)))
+ return -EINVAL;
+
+ return upper_16_bits(val);
+}
+
+u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
+{
+ struct idpf_compl_queue *cq = xdpsq->complq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ u32 ntc = cq->next_to_clean;
+ u32 cnt = cq->desc_count;
+ u32 done_frames;
+ bool gen;
+
+ gen = idpf_queue_has(GEN_CHK, cq);
+
+ for (done_frames = 0; done_frames < budget; ) {
+ int ret;
+
+ ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
+ if (ret >= 0) {
+ done_frames = ret > tx_ntc ? ret - tx_ntc :
+ ret + tx_cnt - tx_ntc;
+ goto next;
+ }
+
+ switch (ret) {
+ case -ENODATA:
+ goto out;
+ case -EINVAL:
+ break;
+ }
+
+next:
+ if (unlikely(++ntc == cnt)) {
+ ntc = 0;
+ gen = !gen;
+ idpf_queue_change(GEN_CHK, cq);
+ }
+ }
+
+out:
+ cq->next_to_clean = ntc;
+
+ return done_frames;
+}
+
+static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->dev,
+ .bq = &bq,
+ .xss = &ss,
+ .napi = true,
+ };
+ u32 done_frames;
+
+ done_frames = idpf_xdpsq_poll(xdpsq, budget);
+ if (unlikely(!done_frames))
+ return 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ for (u32 i = 0; likely(i < done_frames); i++) {
+ libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
+
+ if (unlikely(++tx_ntc == tx_cnt))
+ tx_ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ xdpsq->next_to_clean = tx_ntc;
+ xdpsq->pending -= done_frames;
+ xdpsq->xdp_tx -= cp.xdp_tx;
+
+ return done_frames;
+}
+
+static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 free;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ free = xdpsq->desc_count - xdpsq->pending;
+ if (free < xdpsq->thresh)
+ free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
+
+ *sq = (struct libeth_xdpsq){
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ .xdp_tx = &xdpsq->xdp_tx,
+ };
+
+ return free;
+}
+
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
+LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_END();
+
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+
+ if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
+ return -ENETDOWN;
+
+ return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
+ &vport->txqs[vport->xdp_txq_offset],
+ vport->num_xdp_txq,
+ idpf_xdp_xmit_flush_bulk,
+ idpf_xdp_tx_finalize);
+}
+
+static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ const struct idpf_rx_queue *rxq;
+ struct libeth_rx_pt pt;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ idpf_xdp_get_qw0(&desc, xdp->desc);
+
+ pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
+ if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
+ return -ENODATA;
+
+ idpf_xdp_get_qw2(&desc, xdp->desc);
+
+ return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
+ pt);
+}
+
+static const struct xdp_metadata_ops idpf_xdpmo = {
+ .xmo_rx_hash = idpf_xdpmo_rx_hash,
+};
+
+void idpf_xdp_set_features(const struct idpf_vport *vport)
+{
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ return;
+
+ libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
+ idpf_get_max_tx_bufs(vport->adapter),
+ libeth_xsktmo);
+}
+
+static int idpf_xdp_setup_prog(struct idpf_vport *vport,
+ const struct netdev_bpf *xdp)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct bpf_prog *old, *prog = xdp->prog;
+ struct idpf_vport_config *cfg;
+ int ret;
+
+ cfg = vport->adapter->vport_config[vport->idx];
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
+ !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
+ !!vport->xdp_prog == !!prog) {
+ if (test_bit(IDPF_VPORT_UP, np->state))
+ idpf_xdp_copy_prog_to_rqs(vport, prog);
+
+ old = xchg(&vport->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ cfg->user_config.xdp_prog = prog;
+
+ return 0;
+ }
+
+ if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "No Tx queues available for XDP, please decrease the number of regular SQs");
+ return -ENOSPC;
+ }
+
+ old = cfg->user_config.xdp_prog;
+ cfg->user_config.xdp_prog = prog;
+
+ ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Could not reopen the vport after XDP setup");
+
+ cfg->user_config.xdp_prog = old;
+ old = prog;
+ }
+
+ if (old)
+ bpf_prog_put(old);
+
+ libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
+
+ return ret;
+}
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct idpf_vport *vport;
+ int ret;
+
+ idpf_vport_ctrl_lock(dev);
+ vport = idpf_netdev_to_vport(dev);
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto notsupp;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ ret = idpf_xdp_setup_prog(vport, xdp);
+ break;
+ case XDP_SETUP_XSK_POOL:
+ ret = idpf_xsk_pool_setup(vport, xdp);
+ break;
+ default:
+notsupp:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(dev);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
new file mode 100644
index 000000000000..479f5ef3c604
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XDP_H_
+#define _IDPF_XDP_H_
+
+#include <net/libeth/xdp.h>
+
+#include "idpf_txrx.h"
+
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
+int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog);
+
+int idpf_xdpsqs_get(const struct idpf_vport *vport);
+void idpf_xdpsqs_put(const struct idpf_vport *vport);
+
+u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget);
+bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
+
+/**
+ * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
+ * @desc: XDP descriptor to pull the DMA address and length from
+ * @i: descriptor index on the queue to fill
+ * @sq: XDP queue to produce the HW Tx descriptor on
+ * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
+ */
+static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct idpf_flex_tx_desc *tx_desc = sq->descs;
+ u32 cmd;
+
+ cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
+ IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
+ if (desc.flags & LIBETH_XDP_TX_LAST)
+ cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
+ IDPF_TX_DESC_CMD_EOP);
+ if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
+ cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
+ IDPF_TX_FLEX_DESC_CMD_CS_EN);
+
+ tx_desc = &tx_desc[i];
+ tx_desc->buf_addr = cpu_to_le64(desc.addr);
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
+#else
+ tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
+ tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
+#endif
+}
+
+static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
+{
+ u32 ntu, cmd;
+
+ ntu = xdpsq->next_to_use;
+ if (unlikely(!ntu))
+ ntu = xdpsq->desc_count;
+
+ cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
+#else
+ xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
+#endif
+}
+
+static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
+{
+ dma_wmb();
+ writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
+}
+
+/**
+ * idpf_xdp_tx_finalize - finalize sending over XDPSQ
+ * @_xdpsq: XDP Tx queue
+ * @sent: whether any frames were sent
+ * @flush: whether to update RS bit and the tail register
+ *
+ * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
+ * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
+ */
+static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+
+ if ((!flush || unlikely(!sent)) &&
+ likely(xdpsq->desc_count - 1 != xdpsq->pending))
+ return;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ idpf_xdpsq_set_rs(xdpsq);
+ idpf_xdpsq_update_tail(xdpsq);
+
+ libeth_xdpsq_queue_timer(xdpsq->timer);
+
+ libeth_xdpsq_unlock(&xdpsq->xdp_lock);
+}
+
+struct idpf_xdp_rx_desc {
+ aligned_u64 qw0;
+#define IDPF_XDP_RX_BUFQ BIT_ULL(47)
+#define IDPF_XDP_RX_GEN BIT_ULL(46)
+#define IDPF_XDP_RX_LEN GENMASK_ULL(45, 32)
+#define IDPF_XDP_RX_PT GENMASK_ULL(25, 16)
+
+ aligned_u64 qw1;
+#define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
+#define IDPF_XDP_RX_EOP BIT_ULL(1)
+
+ aligned_u64 qw2;
+#define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
+
+ aligned_u64 qw3;
+} __aligned(4 * sizeof(u64));
+static_assert(sizeof(struct idpf_xdp_rx_desc) ==
+ sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
+
+#define idpf_xdp_rx_bufq(desc) !!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
+#define idpf_xdp_rx_gen(desc) !!((desc)->qw0 & IDPF_XDP_RX_GEN)
+#define idpf_xdp_rx_len(desc) FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
+#define idpf_xdp_rx_pt(desc) FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
+#define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
+#define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
+#define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
+
+static inline void
+idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw0 = ((const typeof(desc))rxd)->qw0;
+#else
+ desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
+ ((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
+#endif
+}
+
+static inline void
+idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw1 = ((const typeof(desc))rxd)->qw1;
+#else
+ desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
+ rxd->status_err0_qw1;
+#endif
+}
+
+static inline void
+idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw2 = ((const typeof(desc))rxd)->qw2;
+#else
+ desc->qw2 = ((u64)rxd->hash3 << 24) |
+ ((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
+ le16_to_cpu(rxd->hash1);
+#endif
+}
+
+void idpf_xdp_set_features(const struct idpf_vport *vport);
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+#endif /* _IDPF_XDP_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
new file mode 100644
index 000000000000..fd2cc43ab43c
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <net/libeth/xsk.h>
+
+#include "idpf.h"
+#include "xdp.h"
+#include "xsk.h"
+
+static void idpf_xsk_tx_timer(struct work_struct *work);
+
+static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
+ struct idpf_rx_queue *rxq)
+{
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, rxq->idx);
+ if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
+ return;
+
+ rxq->pool = pool;
+
+ idpf_queue_set(XSK, rxq);
+}
+
+static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
+ struct idpf_buf_queue *bufq)
+{
+ struct xsk_buff_pool *pool;
+ u32 qid = U32_MAX;
+
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (&grp->splitq.bufq_sets[j].bufq == bufq) {
+ qid = grp->splitq.rxq_sets[0]->rxq.idx;
+ goto setup;
+ }
+ }
+ }
+
+setup:
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
+ return;
+
+ bufq->pool = pool;
+
+ idpf_queue_set(XSK, bufq);
+}
+
+static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
+ struct idpf_tx_queue *txq)
+{
+ struct xsk_buff_pool *pool;
+ u32 qid;
+
+ idpf_queue_clear(XSK, txq);
+
+ if (!idpf_queue_has(XDP, txq))
+ return;
+
+ qid = txq->idx - vport->xdp_txq_offset;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev)
+ return;
+
+ txq->pool = pool;
+ libeth_xdpsq_init_timer(txq->timer, txq, &txq->xdp_lock,
+ idpf_xsk_tx_timer);
+
+ idpf_queue_assign(NOIRQ, txq, xsk_uses_need_wakeup(pool));
+ idpf_queue_set(XSK, txq);
+}
+
+static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
+ struct idpf_compl_queue *complq)
+{
+ const struct xsk_buff_pool *pool;
+ u32 qid;
+
+ idpf_queue_clear(XSK, complq);
+
+ if (!idpf_queue_has(XDP, complq))
+ return;
+
+ qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev)
+ return;
+
+ idpf_queue_set(XSK, complq);
+}
+
+void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
+ enum virtchnl2_queue_type type)
+{
+ if (!idpf_xdp_enabled(vport))
+ return;
+
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ idpf_xsk_setup_rxq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ idpf_xsk_setup_bufq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ idpf_xsk_setup_txq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ idpf_xsk_setup_complq(vport, q);
+ break;
+ default:
+ break;
+ }
+}
+
+void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
+{
+ struct idpf_compl_queue *complq;
+ struct idpf_buf_queue *bufq;
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ rxq = q;
+ if (!idpf_queue_has_clear(XSK, rxq))
+ return;
+
+ rxq->pool = NULL;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ bufq = q;
+ if (!idpf_queue_has_clear(XSK, bufq))
+ return;
+
+ bufq->pool = NULL;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ txq = q;
+ if (!idpf_queue_has_clear(XSK, txq))
+ return;
+
+ idpf_queue_set(NOIRQ, txq);
+ txq->dev = txq->netdev->dev.parent;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ complq = q;
+ idpf_queue_clear(XSK, complq);
+ break;
+ default:
+ break;
+ }
+}
+
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv)
+{
+ libeth_xsk_init_wakeup(&qv->csd, &qv->napi);
+}
+
+void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ u32 ntc = xdpsq->next_to_clean;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->pool->dev,
+ .bq = &bq,
+ .xss = &ss,
+ };
+ u32 xsk_frames = 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ while (ntc != xdpsq->next_to_use) {
+ struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
+
+ if (sqe->type)
+ libeth_xdp_complete_tx(sqe, &cp);
+ else
+ xsk_frames++;
+
+ if (unlikely(++ntc == xdpsq->desc_count))
+ ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ if (xsk_frames)
+ xsk_tx_completed(xdpsq->pool, xsk_frames);
+}
+
+static noinline u32 idpf_xsksq_complete_slow(struct idpf_tx_queue *xdpsq,
+ u32 done)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ u32 ntc = xdpsq->next_to_clean;
+ u32 cnt = xdpsq->desc_count;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->pool->dev,
+ .bq = &bq,
+ .xss = &ss,
+ .napi = true,
+ };
+ u32 xsk_frames = 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ for (u32 i = 0; likely(i < done); i++) {
+ struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
+
+ if (sqe->type)
+ libeth_xdp_complete_tx(sqe, &cp);
+ else
+ xsk_frames++;
+
+ if (unlikely(++ntc == cnt))
+ ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ xdpsq->next_to_clean = ntc;
+ xdpsq->xdp_tx -= cp.xdp_tx;
+
+ return xsk_frames;
+}
+
+static __always_inline u32 idpf_xsksq_complete(void *_xdpsq, u32 budget)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ u32 done_frames;
+ u32 xsk_frames;
+
+ done_frames = idpf_xdpsq_poll(xdpsq, budget);
+ if (unlikely(!done_frames))
+ return 0;
+
+ if (likely(!xdpsq->xdp_tx)) {
+ tx_ntc += done_frames;
+ if (tx_ntc >= tx_cnt)
+ tx_ntc -= tx_cnt;
+
+ xdpsq->next_to_clean = tx_ntc;
+ xsk_frames = done_frames;
+
+ goto finalize;
+ }
+
+ xsk_frames = idpf_xsksq_complete_slow(xdpsq, done_frames);
+ if (xsk_frames)
+finalize:
+ xsk_tx_completed(xdpsq->pool, xsk_frames);
+
+ xdpsq->pending -= done_frames;
+
+ return done_frames;
+}
+
+static u32 idpf_xsk_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 free;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ free = xdpsq->desc_count - xdpsq->pending;
+ if (free < xdpsq->thresh)
+ free += idpf_xsksq_complete(xdpsq, xdpsq->thresh);
+
+ *sq = (struct libeth_xdpsq){
+ .pool = xdpsq->pool,
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ .xdp_tx = &xdpsq->xdp_tx,
+ };
+
+ return free;
+}
+
+static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+
+ *sq = (struct libeth_xdpsq){
+ .pool = xdpsq->pool,
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ };
+
+ /*
+ * The queue is cleaned, the budget is already known, optimize out
+ * the second min() by passing the type limit.
+ */
+ return U32_MAX;
+}
+
+bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq)
+{
+ u32 free;
+
+ libeth_xdpsq_lock(&xsksq->xdp_lock);
+
+ free = xsksq->desc_count - xsksq->pending;
+ if (free < xsksq->thresh)
+ free += idpf_xsksq_complete(xsksq, xsksq->thresh);
+
+ return libeth_xsk_xmit_do_bulk(xsksq->pool, xsksq,
+ min(free - 1, xsksq->thresh),
+ libeth_xsktmo, idpf_xsk_xmit_prep,
+ idpf_xdp_tx_xmit, idpf_xdp_tx_finalize);
+}
+
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete);
+LIBETH_XSK_DEFINE_FLUSH_TX(static idpf_xsk_tx_flush_bulk, idpf_xsk_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XSK_DEFINE_RUN(static idpf_xsk_run_pass, idpf_xsk_run_prog,
+ idpf_xsk_tx_flush_bulk, idpf_rx_process_skb_fields);
+LIBETH_XSK_DEFINE_FINALIZE(static idpf_xsk_finalize_rx, idpf_xsk_tx_flush_bulk,
+ idpf_xdp_tx_finalize);
+LIBETH_XDP_DEFINE_END();
+
+static void idpf_xskfqe_init(const struct libeth_xskfq_fp *fq, u32 i)
+{
+ struct virtchnl2_splitq_rx_buf_desc *desc = fq->descs;
+
+ desc = &desc[i];
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&desc->qword0 = i;
+#else
+ desc->qword0.buf_id = cpu_to_le16(i);
+#endif
+ desc->pkt_addr = cpu_to_le64(libeth_xsk_buff_xdp_get_dma(fq->fqes[i]));
+}
+
+static bool idpf_xskfq_refill_thresh(struct idpf_buf_queue *bufq, u32 count)
+{
+ struct libeth_xskfq_fp fq = {
+ .pool = bufq->pool,
+ .fqes = bufq->xsk_buf,
+ .descs = bufq->split_buf,
+ .ntu = bufq->next_to_use,
+ .count = bufq->desc_count,
+ };
+ u32 done;
+
+ done = libeth_xskfqe_alloc(&fq, count, idpf_xskfqe_init);
+ writel(fq.ntu, bufq->tail);
+
+ bufq->next_to_use = fq.ntu;
+ bufq->pending -= done;
+
+ return done == count;
+}
+
+static bool idpf_xskfq_refill(struct idpf_buf_queue *bufq)
+{
+ u32 count, rx_thresh = bufq->thresh;
+
+ count = ALIGN_DOWN(bufq->pending - 1, rx_thresh);
+
+ for (u32 i = 0; i < count; i += rx_thresh) {
+ if (unlikely(!idpf_xskfq_refill_thresh(bufq, rx_thresh)))
+ return false;
+ }
+
+ return true;
+}
+
+int idpf_xskfq_init(struct idpf_buf_queue *bufq)
+{
+ struct libeth_xskfq fq = {
+ .pool = bufq->pool,
+ .count = bufq->desc_count,
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ ret = libeth_xskfq_create(&fq);
+ if (ret)
+ return ret;
+
+ bufq->xsk_buf = fq.fqes;
+ bufq->pending = fq.pending;
+ bufq->thresh = fq.thresh;
+ bufq->rx_buf_size = fq.buf_len;
+
+ if (!idpf_xskfq_refill(bufq))
+ netdev_err(bufq->pool->netdev,
+ "failed to allocate XSk buffers for qid %d\n",
+ bufq->pool->queue_id);
+
+ bufq->next_to_alloc = bufq->next_to_use;
+
+ idpf_queue_clear(HSPLIT_EN, bufq);
+ bufq->rx_hbuf_size = 0;
+
+ return 0;
+}
+
+void idpf_xskfq_rel(struct idpf_buf_queue *bufq)
+{
+ struct libeth_xskfq fq = {
+ .fqes = bufq->xsk_buf,
+ };
+
+ libeth_xskfq_destroy(&fq);
+
+ bufq->rx_buf_size = fq.buf_len;
+ bufq->thresh = fq.thresh;
+ bufq->pending = fq.pending;
+}
+
+struct idpf_xskfq_refill_set {
+ struct {
+ struct idpf_buf_queue *q;
+ u32 buf_id;
+ u32 pending;
+ } bufqs[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+};
+
+static bool idpf_xskfq_refill_set(const struct idpf_xskfq_refill_set *set)
+{
+ bool ret = true;
+
+ for (u32 i = 0; i < ARRAY_SIZE(set->bufqs); i++) {
+ struct idpf_buf_queue *bufq = set->bufqs[i].q;
+ u32 ntc;
+
+ if (!bufq)
+ continue;
+
+ ntc = set->bufqs[i].buf_id;
+ if (unlikely(++ntc == bufq->desc_count))
+ ntc = 0;
+
+ bufq->next_to_clean = ntc;
+ bufq->pending += set->bufqs[i].pending;
+
+ if (bufq->pending > bufq->thresh)
+ ret &= idpf_xskfq_refill(bufq);
+ }
+
+ return ret;
+}
+
+int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget)
+{
+ struct idpf_xskfq_refill_set set = { };
+ struct libeth_rq_napi_stats rs = { };
+ bool wake, gen, fail = false;
+ u32 ntc = rxq->next_to_clean;
+ struct libeth_xdp_buff *xdp;
+ LIBETH_XDP_ONSTACK_BULK(bq);
+ u32 cnt = rxq->desc_count;
+
+ wake = xsk_uses_need_wakeup(rxq->pool);
+ if (wake)
+ xsk_clear_rx_need_wakeup(rxq->pool);
+
+ gen = idpf_queue_has(GEN_CHK, rxq);
+
+ libeth_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
+ rxq->xdpsqs, rxq->num_xdp_txq);
+ xdp = rxq->xsk;
+
+ while (likely(rs.packets < budget)) {
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ struct idpf_buf_queue *bufq;
+ u32 bufq_id, buf_id;
+
+ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
+
+ idpf_xdp_get_qw0(&desc, rx_desc);
+ if (idpf_xdp_rx_gen(&desc) != gen)
+ break;
+
+ dma_rmb();
+
+ bufq_id = idpf_xdp_rx_bufq(&desc);
+ bufq = set.bufqs[bufq_id].q;
+ if (!bufq) {
+ bufq = &rxq->bufq_sets[bufq_id].bufq;
+ set.bufqs[bufq_id].q = bufq;
+ }
+
+ idpf_xdp_get_qw1(&desc, rx_desc);
+ buf_id = idpf_xdp_rx_buf(&desc);
+
+ set.bufqs[bufq_id].buf_id = buf_id;
+ set.bufqs[bufq_id].pending++;
+
+ xdp = libeth_xsk_process_buff(xdp, bufq->xsk_buf[buf_id],
+ idpf_xdp_rx_len(&desc));
+
+ if (unlikely(++ntc == cnt)) {
+ ntc = 0;
+ gen = !gen;
+ idpf_queue_change(GEN_CHK, rxq);
+ }
+
+ if (!idpf_xdp_rx_eop(&desc) || unlikely(!xdp))
+ continue;
+
+ fail = !idpf_xsk_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
+ xdp = NULL;
+
+ if (fail)
+ break;
+ }
+
+ idpf_xsk_finalize_rx(&bq);
+
+ rxq->next_to_clean = ntc;
+ rxq->xsk = xdp;
+
+ fail |= !idpf_xskfq_refill_set(&set);
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_add(&rxq->q_stats.packets, rs.packets);
+ u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
+ u64_stats_update_end(&rxq->stats_sync);
+
+ if (!wake)
+ return unlikely(fail) ? budget : rs.packets;
+
+ if (unlikely(fail))
+ xsk_set_rx_need_wakeup(rxq->pool);
+
+ return rs.packets;
+}
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
+{
+ struct xsk_buff_pool *pool = bpf->xsk.pool;
+ u32 qid = bpf->xsk.queue_id;
+ bool restart;
+ int ret;
+
+ if (pool && !IS_ALIGNED(xsk_pool_get_rx_frame_size(pool),
+ LIBETH_RX_BUF_STRIDE)) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: HW doesn't support frames sizes not aligned to %u (qid %u: %u)",
+ netdev_name(vport->netdev),
+ LIBETH_RX_BUF_STRIDE, qid,
+ xsk_pool_get_rx_frame_size(pool));
+ return -EINVAL;
+ }
+
+ restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
+ if (!restart)
+ goto pool;
+
+ ret = idpf_qp_switch(vport, qid, false);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to disable queue pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+pool:
+ ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to configure XSk pool for pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!restart)
+ return 0;
+
+ ret = idpf_qp_switch(vport, qid, true);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to enable queue pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ goto err_dis;
+ }
+
+ return 0;
+
+err_dis:
+ libeth_xsk_setup_pool(vport->netdev, qid, false);
+
+ return ret;
+}
+
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+ struct idpf_q_vector *q_vector;
+
+ if (unlikely(idpf_vport_ctrl_is_locked(dev)))
+ return -EBUSY;
+
+ if (unlikely(!vport->link_up))
+ return -ENETDOWN;
+
+ if (unlikely(!vport->num_xdp_txq))
+ return -ENXIO;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ if (unlikely(!q_vector->xsksq))
+ return -ENXIO;
+
+ libeth_xsk_wakeup(&q_vector->csd, qid);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xsk.h b/drivers/net/ethernet/intel/idpf/xsk.h
new file mode 100644
index 000000000000..b622d08c03e8
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xsk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XSK_H_
+#define _IDPF_XSK_H_
+
+#include <linux/types.h>
+
+enum virtchnl2_queue_type;
+struct idpf_buf_queue;
+struct idpf_q_vector;
+struct idpf_rx_queue;
+struct idpf_tx_queue;
+struct idpf_vport;
+struct net_device;
+struct netdev_bpf;
+
+void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
+ enum virtchnl2_queue_type type);
+void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type);
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv);
+
+int idpf_xskfq_init(struct idpf_buf_queue *bufq);
+void idpf_xskfq_rel(struct idpf_buf_queue *bufq);
+void idpf_xsksq_clean(struct idpf_tx_queue *xdpq);
+
+int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget);
+bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq);
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp);
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
+#endif /* !_IDPF_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 394c1e0656b9..6c1b702fd992 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -6,6 +6,6 @@
obj-$(CONFIG_IGB) += igb.o
-igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
- e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
+ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
+ e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 64dfc362d1dc..44a85ad749a4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2372,7 +2372,7 @@ static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
checksum += nvm_data;
}
- if (checksum != (u16) NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -2406,7 +2406,7 @@ static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
&checksum);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 503b239868e8..9db29b231d6a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -602,7 +602,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 6e110f28f922..529b7d18b662 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -63,6 +63,5 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-void e1000_init_function_pointers_82575(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 2dcd64d6dec3..c8638502c2be 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -636,7 +636,7 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16) NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -668,7 +668,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 091cddf4ada8..4f652ab713b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -7,7 +7,6 @@
s32 igb_acquire_nvm(struct e1000_hw *hw);
void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 3c2dc7bdebb5..0fff1df81b7b 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -18,8 +18,10 @@
#include <linux/i2c-algo-bit.h>
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <linux/lockdep.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
struct igb_adapter;
@@ -86,6 +88,7 @@ struct igb_adapter;
#define IGB_XDP_CONSUMED BIT(0)
#define IGB_XDP_TX BIT(1)
#define IGB_XDP_REDIR BIT(2)
+#define IGB_XDP_EXIT BIT(3)
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -214,7 +217,7 @@ static inline int igb_skb_pad(void)
#define IGB_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define IGB_MNG_VLAN_NONE -1
+#define IGB_MNG_VLAN_NONE 0xFFFF
enum igb_tx_flags {
/* cmd_type flags */
@@ -255,6 +258,7 @@ enum igb_tx_flags {
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
+ IGB_TYPE_XSK
};
/* wrapper around a pointer to a socket buffer,
@@ -320,6 +324,7 @@ struct igb_ring {
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
+ struct xdp_buff **rx_buffer_info_zc;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
@@ -357,6 +362,7 @@ struct igb_ring {
};
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -384,7 +390,9 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
- IGB_RING_FLAG_TX_DETECT_HANG
+ IGB_RING_FLAG_TX_DETECT_HANG,
+ IGB_RING_FLAG_TX_DISABLED,
+ IGB_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
@@ -618,7 +626,7 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
@@ -715,6 +723,8 @@ enum igb_boards {
extern char igb_driver_name[];
+void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx,
+ struct napi_struct *napi);
int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *ring,
struct xdp_frame *xdpf);
@@ -731,12 +741,21 @@ int igb_setup_tx_resources(struct igb_ring *);
int igb_setup_rx_resources(struct igb_ring *);
void igb_free_tx_resources(struct igb_ring *);
void igb_free_rx_resources(struct igb_ring *);
+void igb_clean_tx_ring(struct igb_ring *tx_ring);
+void igb_clean_rx_ring(struct igb_ring *rx_ring);
void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status);
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp);
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -752,8 +771,11 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp);
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igb_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int igb_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
unsigned int igb_get_max_rss_queues(struct igb_adapter *);
#ifdef CONFIG_IGB_HWMON
@@ -797,6 +819,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
+/* This function assumes __netif_tx_lock is held by the caller. */
+static inline void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter)
+{
+ return !!READ_ONCE(adapter->xdp_prog);
+}
+
int igb_add_filter(struct igb_adapter *adapter,
struct igb_nfc_filter *input);
int igb_erase_filter(struct igb_adapter *adapter,
@@ -807,4 +856,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter,
int igb_del_mac_steering_filter(struct igb_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring);
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid);
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring);
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget);
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool);
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 99977a22b843..b507576b28b2 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -920,11 +920,11 @@ static int igb_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
- adapter->num_tx_queues));
+ temp_ring = vmalloc_array(adapter->num_tx_queues,
+ sizeof(struct igb_ring));
else
- temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
- adapter->num_rx_queues));
+ temp_ring = vmalloc_array(adapter->num_rx_queues,
+ sizeof(struct igb_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
- /* PHY is powered down when interface is down */
- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
+ if (igb_link_test(adapter, &data[TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- else
- data[TEST_LINK] = 0;
/* Online tests aren't run; pass by default */
data[TEST_REG] = 0;
@@ -2284,7 +2281,7 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGB_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -2381,21 +2378,17 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static int igb_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct igb_adapter *adapter = netdev_priv(dev);
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
switch (adapter->hw.mac.type) {
case e1000_82575:
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
case e1000_82576:
case e1000_82580:
@@ -2405,8 +2398,6 @@ static int igb_get_ts_info(struct net_device *dev,
case e1000_i211:
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -2506,9 +2497,11 @@ static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter,
return 0;
}
-static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igb_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on igb */
@@ -2548,6 +2541,13 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
return 0;
}
+static u32 igb_get_rx_ring_count(struct net_device *dev)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ return adapter->num_rx_queues;
+}
+
static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2555,10 +2555,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->nfc_filter_count;
ret = 0;
@@ -2569,9 +2565,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = igb_get_rss_hash_opts(adapter, cmd);
- break;
default:
break;
}
@@ -2581,9 +2574,11 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
IGB_FLAG_RSS_FIELD_IPV6_UDP)
-static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igb_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
u32 flags = adapter->flags;
/* RSS does not support anything other than hashing
@@ -3011,9 +3006,6 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = igb_set_rss_hash_opt(adapter, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = igb_add_ethtool_nfc_entry(adapter, cmd);
break;
@@ -3272,19 +3264,6 @@ static int igb_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int igb_ethtool_begin(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_get_sync(&adapter->pdev->dev);
- return 0;
-}
-
-static void igb_ethtool_complete(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_put(&adapter->pdev->dev);
-}
-
static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
{
return IGB_RETA_SIZE;
@@ -3497,6 +3476,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_ts_info = igb_get_ts_info,
.get_rxnfc = igb_get_rxnfc,
.set_rxnfc = igb_set_rxnfc,
+ .get_rx_ring_count = igb_get_rx_ring_count,
.get_eee = igb_get_eee,
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
@@ -3504,12 +3484,12 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
.get_rxfh = igb_get_rxfh,
.set_rxfh = igb_set_rxfh,
+ .get_rxfh_fields = igb_get_rxfh_fields,
+ .set_rxfh_fields = igb_set_rxfh_fields,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
.get_priv_flags = igb_get_priv_flags,
.set_priv_flags = igb_set_priv_flags,
- .begin = igb_ethtool_begin,
- .complete = igb_ethtool_complete,
.get_link_ksettings = igb_get_link_ksettings,
.set_link_ksettings = igb_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a3f100769e39..dbea37269d2c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -106,8 +106,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
-static int igb_probe(struct pci_dev *, const struct pci_device_id *);
-static void igb_remove(struct pci_dev *pdev);
static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *);
@@ -117,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *);
-static void igb_clean_tx_ring(struct igb_ring *);
-static void igb_clean_rx_ring(struct igb_ring *);
static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(struct timer_list *);
static void igb_watchdog(struct timer_list *);
@@ -178,20 +174,6 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
#endif
-static int igb_suspend(struct device *);
-static int igb_resume(struct device *);
-static int igb_runtime_suspend(struct device *dev);
-static int igb_runtime_resume(struct device *dev);
-static int igb_runtime_idle(struct device *dev);
-#ifdef CONFIG_PM
-static const struct dev_pm_ops igb_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
- SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
- igb_runtime_idle)
-};
-#endif
-static void igb_shutdown(struct pci_dev *);
-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
@@ -219,20 +201,6 @@ static const struct pci_error_handlers igb_err_handler = {
static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
-static struct pci_driver igb_driver = {
- .name = igb_driver_name,
- .id_table = igb_pci_tbl,
- .probe = igb_probe,
- .remove = igb_remove,
-#ifdef CONFIG_PM
- .driver.pm = &igb_pm_ops,
-#endif
- .shutdown = igb_shutdown,
- .sriov_configure = igb_pci_sriov_configure,
- .err_handler = &igb_err_handler
-};
-
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL v2");
@@ -504,12 +472,17 @@ rx_ring_summary:
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
- struct igb_rx_buffer *buffer_info;
- buffer_info = &rx_ring->rx_buffer_info[i];
+ dma_addr_t dma = (dma_addr_t)0;
+ struct igb_rx_buffer *buffer_info = NULL;
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (!rx_ring->xsk_pool) {
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ dma = buffer_info->dma;
+ }
+
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
@@ -529,11 +502,11 @@ rx_ring_summary:
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
+ (u64)dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->page) {
+ buffer_info && dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
@@ -647,6 +620,8 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
return adapter->netdev;
}
+static struct pci_driver igb_driver;
+
/**
* igb_init_module - Driver Registration Routine
*
@@ -664,6 +639,10 @@ static int __init igb_init_module(void)
dca_register_notify(&dca_notifier);
#endif
ret = pci_register_driver(&igb_driver);
+#ifdef CONFIG_IGB_DCA
+ if (ret)
+ dca_unregister_notify(&dca_notifier);
+#endif
return ret;
}
@@ -968,6 +947,9 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igb_configure_msix(adapter);
@@ -1215,7 +1197,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
+ netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll,
+ v_idx);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -1231,7 +1214,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/* initialize pointer to rings */
ring = q_vector->ring;
- /* intialize ITR */
+ /* initialize ITR */
if (rxr_count) {
/* rx or rx/tx vector */
if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
@@ -1548,8 +1531,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
}
- if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+ if (old_vid != IGB_MNG_VLAN_NONE && vid != old_vid &&
!test_bit(old_vid, adapter->active_vlans)) {
/* remove VID from filter table */
igb_vfta_set(hw, vid, pf_id, false, true);
@@ -2013,7 +1995,11 @@ static void igb_configure(struct igb_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ if (ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(ring, ring->xsk_pool,
+ igb_desc_unused(ring));
+ else
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
@@ -2113,6 +2099,22 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
+void igb_set_queue_napi(struct igb_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2120,6 +2122,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
int igb_up(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i;
/* hardware has been reset, we need to reload some things */
@@ -2127,8 +2130,11 @@ int igb_up(struct igb_adapter *adapter)
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
@@ -2198,12 +2204,13 @@ void igb_down(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igb_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
@@ -2513,7 +2520,7 @@ static int igb_set_features(struct net_device *netdev,
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
@@ -2624,6 +2631,9 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -2931,34 +2941,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return igb_xsk_pool_setup(adapter, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
}
-static void igb_xdp_ring_update_tail(struct igb_ring *ring)
-{
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
-}
-
-static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
-{
- unsigned int r_idx = smp_processor_id();
-
- if (r_idx >= adapter->num_tx_queues)
- r_idx = r_idx % adapter->num_tx_queues;
-
- return adapter->tx_ring[r_idx];
-}
-
-static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
@@ -2972,7 +2968,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return IGB_XDP_CONSUMED;
@@ -3005,10 +3002,14 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return -ENXIO;
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return -ENXIO;
+
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
@@ -3025,11 +3026,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nxmit++;
}
- __netif_tx_unlock(nq);
-
if (unlikely(flags & XDP_XMIT_FLUSH))
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+
return nxmit;
}
@@ -3059,6 +3060,9 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_setup_tc = igb_setup_tc,
.ndo_bpf = igb_xdp,
.ndo_xdp_xmit = igb_xdp_xmit,
+ .ndo_xsk_wakeup = igb_xsk_wakeup,
+ .ndo_hwtstamp_get = igb_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = igb_ptp_hwtstamp_set,
};
/**
@@ -3355,7 +3359,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -3881,8 +3886,8 @@ static void igb_remove(struct pci_dev *pdev)
* disable watchdog from being rescheduled.
*/
set_bit(__IGB_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
@@ -3927,7 +3932,7 @@ static void igb_remove(struct pci_dev *pdev)
*
* This function initializes the vf specific data storage and then attempts to
* allocate the VFs. The reason for ordering it this way is because it is much
- * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * more expensive time wise to disable SR-IOV than it is to allocate and free
* the memory for the VFs.
**/
static void igb_probe_vfs(struct igb_adapter *adapter)
@@ -4134,8 +4139,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err;
int i;
@@ -4187,8 +4193,11 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(E1000_TSICR);
@@ -4381,6 +4390,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -4737,12 +4748,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
struct e1000_hw *hw = &adapter->hw;
int reg_idx = ring->reg_idx;
u32 srrctl = 0;
+ u32 buf_size;
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGB_RXBUFFER_3072;
else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGB_RXBUFFER_2048;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -4774,8 +4790,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
u32 rxdctl = 0;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL));
+ }
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4802,9 +4827,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
- /* initialize rx_buffer_info */
- memset(ring->rx_buffer_info, 0,
- sizeof(struct igb_rx_buffer) * ring->count);
+ if (ring->xsk_pool)
+ memset(ring->rx_buffer_info_zc, 0,
+ sizeof(*ring->rx_buffer_info_zc) * ring->count);
+ else
+ memset(ring->rx_buffer_info, 0,
+ sizeof(*ring->rx_buffer_info) * ring->count);
/* initialize Rx descriptor 0 */
rx_desc = IGB_RX_DESC(ring, 0);
@@ -4833,6 +4861,7 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
#if (PAGE_SIZE < 8192)
if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
+ IGB_2K_TOO_SMALL_WITH_PADDING ||
rd32(E1000_RCTL) & E1000_RCTL_SBP)
set_ring_uses_large_buffer(rx_ring);
#endif
@@ -4904,19 +4933,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
-static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs or xdp frames */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
dev_kfree_skb_any(tx_buffer->skb);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -4947,6 +4981,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+skip_for_xsk:
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
@@ -4961,6 +4996,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -4991,8 +5029,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = NULL;
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
+ if (rx_ring->xsk_pool) {
+ vfree(rx_ring->rx_buffer_info_zc);
+ rx_ring->rx_buffer_info_zc = NULL;
+ } else {
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ }
/* if not set, then don't free */
if (!rx_ring->desc)
@@ -5023,13 +5066,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
* igb_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
-static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
+ if (rx_ring->xsk_pool) {
+ igb_clean_rx_ring_zc(rx_ring);
+ goto skip_for_xsk;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
@@ -5057,6 +5105,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
i = 0;
}
+skip_for_xsk:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5416,7 +5465,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)
*/
static void igb_update_phy_info(struct timer_list *t)
{
- struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct igb_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
igb_get_phy_info(&adapter->hw);
}
@@ -5506,7 +5556,8 @@ static void igb_check_lvmmc(struct igb_adapter *adapter)
**/
static void igb_watchdog(struct timer_list *t)
{
- struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igb_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -5706,11 +5757,29 @@ no_wait:
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(E1000_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ struct igb_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(E1000_EICS, eics);
} else {
- wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ struct igb_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
}
igb_spoof_check(adapter);
@@ -6483,6 +6552,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return NETDEV_TX_BUSY;
+
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGB_TYPE_SKB;
@@ -6638,7 +6710,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
- if (adapter->xdp_prog) {
+ if (igb_xdp_is_enabled(adapter)) {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -6668,7 +6740,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igb_up(adapter);
@@ -6984,10 +7056,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
+ const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
+ TSINTR_TT0 | TSINTR_TT1 |
+ TSINTR_AUTT0 | TSINTR_AUTT1);
struct e1000_hw *hw = &adapter->hw;
u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
+ if (hw->mac.type == e1000_82580) {
+ /* 82580 has a hardware bug that requires an explicit
+ * write to clear the TimeSync interrupt cause.
+ */
+ wr32(E1000_TSICR, tsicr & mask);
+ }
+
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
@@ -8201,6 +8283,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector *q_vector = container_of(napi,
struct igb_q_vector,
napi);
+ struct xsk_buff_pool *xsk_pool;
bool clean_complete = true;
int work_done = 0;
@@ -8212,7 +8295,12 @@ static int igb_poll(struct napi_struct *napi, int budget)
clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
- int cleaned = igb_clean_rx_irq(q_vector, budget);
+ int cleaned;
+
+ xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool);
+ cleaned = xsk_pool ?
+ igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) :
+ igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -8241,13 +8329,18 @@ static int igb_poll(struct napi_struct *napi, int budget)
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
- struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *tx_ring = q_vector->tx.ring;
- struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
unsigned int budget = q_vector->tx.work_limit;
+ struct igb_ring *tx_ring = q_vector->tx.ring;
unsigned int i = tx_ring->next_to_clean;
+ union e1000_adv_tx_desc *tx_desc;
+ struct igb_tx_buffer *tx_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ int cpu = smp_processor_id();
+ bool xsk_xmit_done = true;
+ struct netdev_queue *nq;
+ u32 xsk_frames = 0;
if (test_bit(__IGB_DOWN, &adapter->state))
return true;
@@ -8278,10 +8371,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
napi_consume_skb(tx_buffer->skb, napi_budget);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -8313,6 +8410,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
+skip_for_xsk:
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -8341,6 +8439,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ xsk_pool = READ_ONCE(tx_ring->xsk_pool);
+ if (xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+ xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool);
+ __netif_tx_unlock(nq);
+ }
+
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
@@ -8403,7 +8516,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
- return !!budget;
+ return !!budget && xsk_xmit_done;
}
/**
@@ -8594,9 +8707,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
-static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
- struct igb_ring *rx_ring,
- struct xdp_buff *xdp)
+static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IGB_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -8636,7 +8748,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
@@ -8762,10 +8874,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8792,9 +8900,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void igb_process_skb_fields(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
@@ -8876,17 +8984,50 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
rx_buffer->page = NULL;
}
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+
+ if (status & IGB_XDP_REDIR)
+ xdp_do_flush();
+
+ if (status & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+ }
+}
+
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes)
+{
+ struct igb_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
+ unsigned int total_bytes = 0, total_packets = 0;
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
- struct sk_buff *skb = rx_ring->skb;
- unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
+ struct sk_buff *skb = rx_ring->skb;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -8944,12 +9085,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
#endif
- skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = igb_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
xdp_xmit |= xdp_res;
igb_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -8968,9 +9107,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
&xdp, timestamp);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -8982,7 +9122,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -9005,21 +9145,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush();
-
- if (xdp_xmit & IGB_XDP_TX) {
- struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
-
- igb_xdp_ring_update_tail(tx_ring);
- }
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -9041,6 +9170,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9057,6 +9187,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
__free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9163,6 +9294,10 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -EIO;
break;
case SIOCSMIIREG:
+ if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ data->val_in))
+ return -EIO;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -9182,10 +9317,6 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
- case SIOCGHWTSTAMP:
- return igb_ptp_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return igb_ptp_set_ts_config(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -9453,12 +9584,12 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int __maybe_unused igb_suspend(struct device *dev)
+static int igb_suspend(struct device *dev)
{
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
+static int __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9468,7 +9599,6 @@ static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!pci_device_is_present(pdev))
return -ENODEV;
@@ -9514,12 +9644,12 @@ static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
return err;
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int igb_resume(struct device *dev)
{
return __igb_resume(dev, false);
}
-static int __maybe_unused igb_runtime_idle(struct device *dev)
+static int igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -9530,12 +9660,12 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
return -EBUSY;
}
-static int __maybe_unused igb_runtime_suspend(struct device *dev)
+static int igb_runtime_suspend(struct device *dev)
{
return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
-static int __maybe_unused igb_runtime_resume(struct device *dev)
+static int igb_runtime_resume(struct device *dev)
{
return __igb_resume(dev, true);
}
@@ -9591,8 +9721,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
+ rtnl_lock();
if (netif_running(netdev))
igb_down(adapter);
+ rtnl_unlock();
+
pci_disable_device(pdev);
/* Request a slot reset. */
@@ -9620,7 +9753,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -9651,12 +9783,21 @@ static void igb_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
if (netif_running(netdev)) {
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
+ rtnl_unlock();
+ return;
+ }
+
if (igb_up(adapter)) {
dev_err(&pdev->dev, "igb_up failed after reset\n");
+ rtnl_unlock();
return;
}
}
+ rtnl_unlock();
netif_device_attach(netdev);
@@ -10157,4 +10298,20 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter)
spin_unlock(&adapter->nfc_lock);
}
+
+static _DEFINE_DEV_PM_OPS(igb_pm_ops, igb_suspend, igb_resume,
+ igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle);
+
+static struct pci_driver igb_driver = {
+ .name = igb_driver_name,
+ .id_table = igb_pci_tbl,
+ .probe = igb_probe,
+ .remove = igb_remove,
+ .driver.pm = pm_ptr(&igb_pm_ops),
+ .shutdown = igb_shutdown,
+ .sriov_configure = igb_pci_sriov_configure,
+ .err_handler = &igb_err_handler
+};
+
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f94570556120..bd85d02ecadd 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -73,7 +73,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
static void igb_ptp_sdp_init(struct igb_adapter *adapter);
/* SYSTIM read access for the 82576 */
-static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
+static u64 igb_ptp_read_82576(struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
@@ -90,7 +90,7 @@ static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
}
/* SYSTIM read access for the 82580 */
-static u64 igb_ptp_read_82580(const struct cyclecounter *cc)
+static u64 igb_ptp_read_82580(struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
@@ -502,11 +502,10 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
+ /* Both the rising and falling edge are timestamped */
+ if (rq->extts.flags & PTP_STRICT_FLAGS &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
if (on) {
@@ -652,13 +651,6 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -848,14 +840,11 @@ static void igb_ptp_overflow_check(struct work_struct *work)
struct igb_adapter *igb =
container_of(work, struct igb_adapter, ptp_overflow_work.work);
struct timespec64 ts;
- u64 ns;
/* Update the timecounter */
- ns = timecounter_read(&igb->tc);
+ ts = ns_to_timespec64(timecounter_read(&igb->tc));
- ts = ns_to_timespec64(ns);
- pr_debug("igb overflow check at %lld.%09lu\n",
- (long long) ts.tv_sec, ts.tv_nsec);
+ pr_debug("igb overflow check at %ptSp\n", &ts);
schedule_delayed_work(&igb->ptp_overflow_work,
IGB_SYSTIM_OVERFLOW_PERIOD);
@@ -1102,21 +1091,22 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
}
/**
- * igb_ptp_get_ts_config - get hardware time stamping config
+ * igb_ptp_hwtstamp_get - get hardware time stamping config
* @netdev: netdev struct
- * @ifr: interface struct
+ * @config: timestamping configuration structure
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
* of the last known settings.
**/
-int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igb_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config *config = &adapter->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = adapter->tstamp_config;
+
+ return 0;
}
/**
@@ -1137,7 +1127,7 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
* level 2 or 4".
*/
static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
@@ -1283,30 +1273,26 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
}
/**
- * igb_ptp_set_ts_config - set hardware time stamping config
+ * igb_ptp_hwtstamp_set - set hardware time stamping config
* @netdev: netdev struct
- * @ifr: interface struct
- *
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
**/
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igb_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = igb_ptp_set_timestamp_mode(adapter, &config);
+ err = igb_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -1350,6 +1336,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
@@ -1372,6 +1361,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
new file mode 100644
index 000000000000..30ce5fbb5b77
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "e1000_hw.h"
+#include "igb.h"
+
+static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present)
+{
+ int size = pool_present ?
+ sizeof(*ring->rx_buffer_info_zc) * ring->count :
+ sizeof(*ring->rx_buffer_info) * ring->count;
+ void *buff_info = vmalloc(size);
+
+ if (!buff_info)
+ return -ENOMEM;
+
+ if (pool_present) {
+ vfree(ring->rx_buffer_info);
+ ring->rx_buffer_info = NULL;
+ ring->rx_buffer_info_zc = buff_info;
+ } else {
+ vfree(ring->rx_buffer_info_zc);
+ ring->rx_buffer_info_zc = NULL;
+ ring->rx_buffer_info = buff_info;
+ }
+
+ return 0;
+}
+
+static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+ struct e1000_hw *hw = &adapter->hw;
+
+ set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ wr32(E1000_TXDCTL(tx_ring->reg_idx), 0);
+ wr32(E1000_RXDCTL(rx_ring->reg_idx), 0);
+
+ synchronize_net();
+
+ /* Rx/Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ igb_clean_tx_ring(tx_ring);
+ igb_clean_rx_ring(rx_ring);
+
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+
+ igb_configure_tx_ring(adapter, tx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+
+ synchronize_net();
+
+ clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ if (rx_ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool,
+ igb_desc_unused(rx_ring));
+ else
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+
+ /* Rx/Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+}
+
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ int qid = ring->queue_index;
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+
+ if (!igb_xdp_is_enabled(adapter))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+static int igb_xsk_pool_enable(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
+ if (err)
+ return err;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, true);
+ if (!err) {
+ igb_txrx_ring_enable(adapter, qid);
+ /* Kick start the NAPI context so that receiving will start */
+ err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
+ }
+
+ if (err) {
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct xsk_buff_pool *pool;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, false);
+ if (err)
+ return err;
+
+ igb_txrx_ring_enable(adapter, qid);
+ }
+
+ return 0;
+}
+
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
+ igb_xsk_pool_disable(adapter, qid);
+}
+
+static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union e1000_adv_rx_desc *rx_desc, u16 count)
+{
+ dma_addr_t dma;
+ u16 buffs;
+ int i;
+
+ /* nothing to do */
+ if (!count)
+ return 0;
+
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.upper.length = 0;
+
+ rx_desc++;
+ xdp++;
+ }
+
+ return buffs;
+}
+
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
+{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union e1000_adv_rx_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ xdp = &rx_ring->rx_buffer_info_zc[ntu];
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->rx_buffer_info_zc;
+ ntu = 0;
+ count -= nb_buffs_extra;
+ }
+
+ nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count)
+ ntu = 0;
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ rx_desc->wb.upper.length = 0;
+
+exit:
+ if (rx_ring->next_to_use != ntu) {
+ rx_ring->next_to_use = ntu;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(ntu, rx_ring->tail);
+ }
+
+ return total_count == (nb_buffs + nb_buffs_extra);
+}
+
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring)
+{
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
+
+ while (ntc != ntu) {
+ struct xdp_buff *xdp = rx_ring->rx_buffer_info_zc[ntc];
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
+ }
+}
+
+static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+
+ net_prefetch(xdp->data_meta);
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
+ return skb;
+}
+
+static int igb_run_xdp_zc(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = IGB_XDP_PASS;
+ u32 act;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ return IGB_XDP_REDIR;
+
+ if (xsk_uses_need_wakeup(xsk_pool) &&
+ err == -ENOBUFS)
+ result = IGB_XDP_EXIT;
+ else
+ result = IGB_XDP_CONSUMED;
+ goto out_failure;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+
+ return result;
+}
+
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ u32 ntc = rx_ring->next_to_clean;
+ struct bpf_prog *xdp_prog;
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
+ u16 entries_to_alloc;
+ struct sk_buff *skb;
+
+ /* xdp_prog cannot be NULL in the ZC path */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ while (likely(total_packets < budget)) {
+ union e1000_adv_rx_desc *rx_desc;
+ ktime_t timestamp = 0;
+ struct xdp_buff *xdp;
+ unsigned int size;
+ int xdp_res = 0;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ xdp = rx_ring->rx_buffer_info_zc[ntc];
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ xdp->data,
+ &timestamp);
+
+ xdp->data += ts_hdr_len;
+ xdp->data_meta += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
+
+ xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool,
+ xdp_prog);
+
+ if (xdp_res) {
+ if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IGB_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IGB_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ }
+
+ total_packets++;
+ total_bytes += size;
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+ continue;
+ }
+
+ skb = igb_construct_skb_zc(rx_ring, xdp, timestamp);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ break;
+ }
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ rx_ring->next_to_clean = ntc;
+
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
+
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
+
+ entries_to_alloc = igb_desc_unused(rx_ring);
+ if (entries_to_alloc >= IGB_RX_BUFFER_WRITE)
+ failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
+
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xsk_pool);
+
+ return (int)total_packets;
+ }
+ return failure ? budget : (int)total_packets;
+}
+
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
+{
+ unsigned int budget = igb_desc_unused(tx_ring);
+ u32 cmd_type, olinfo_status, nb_pkts, i = 0;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igb_tx_buffer *tx_buffer_info;
+ unsigned int total_bytes = 0;
+ dma_addr_t dma;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))
+ return true;
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ for (; i < nb_pkts; i++) {
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer_info->bytecount = descs[i].len;
+ tx_buffer_info->type = IGB_TYPE_XSK;
+ tx_buffer_info->xdpf = NULL;
+ tx_buffer_info->gso_segs = 1;
+ tx_buffer_info->time_stamp = jiffies;
+
+ tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT;
+
+ /* FIXME: This sets the Report Status (RS) bit for every
+ * descriptor. One nice to have optimization would be to set it
+ * only for the last descriptor in the whole batch. See Intel
+ * ice driver for an example on how to do it.
+ */
+ cmd_type |= descs[i].len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+
+ total_bytes += descs[i].len;
+
+ tx_ring->next_to_use++;
+ tx_buffer_info->next_to_watch = tx_desc;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ }
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes);
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return nb_pkts < budget;
+}
+
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
+ u32 eics = 0;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igb_xdp_is_enabled(adapter))
+ return -EINVAL;
+
+ if (qid >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[qid];
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ /* Cause software interrupt */
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+ eics |= ring->q_vector->eims_value;
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index afd3e36eae75..902711d5e691 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -6,8 +6,4 @@
obj-$(CONFIG_IGBVF) += igbvf.o
-igbvf-objs := vf.o \
- mbx.o \
- ethtool.o \
- netdev.o
-
+igbvf-y := vf.o mbx.o ethtool.o netdev.o
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 83b97989a6bd..9c08ebfad804 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -30,10 +30,12 @@ static const struct igbvf_stats igbvf_gstrings_stats[] = {
{ "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
{ "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
- { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
{ "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
+ { "lbtx_packets", IGBVF_STAT(stats.gptlbc, stats.base_gptlbc) },
+ { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
+ { "lbtx_bytes", IGBVF_STAT(stats.gotlbc, stats.base_gotlbc) },
{ "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
- { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
+ { "tx_timeout_count", IGBVF_STAT(tx_timeout_count, zero_base) },
{ "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
{ "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
{ "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 7b83678ba83a..da8e1fd47301 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -154,7 +154,6 @@ struct igbvf_ring {
/* board specific private data structure */
struct igbvf_adapter {
struct timer_list watchdog_timer;
- struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
@@ -162,15 +161,10 @@ struct igbvf_adapter {
const struct igbvf_info *ei;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u32 bd_number;
u32 rx_buffer_len;
- u32 polling_interval;
- u16 mng_vlan_id;
u16 link_speed;
u16 link_duplex;
- spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
-
/* track device up/down/testing state */
unsigned long state;
@@ -185,9 +179,6 @@ struct igbvf_adapter {
unsigned int restart_queue;
u32 txd_cmd;
- u32 tx_int_delay;
- u32 tx_abs_int_delay;
-
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
@@ -195,23 +186,15 @@ struct igbvf_adapter {
/* Tx stats */
u32 tx_timeout_count;
- u32 tx_fifo_head;
- u32 tx_head_addr;
- u32 tx_fifo_size;
- u32 tx_dma_failed;
/* Rx */
struct igbvf_ring *rx_ring;
- u32 rx_int_delay;
- u32 rx_abs_int_delay;
-
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
- u32 rx_dma_failed;
unsigned int rx_ps_hdr_size;
u32 max_frame_size;
@@ -220,7 +203,6 @@ struct igbvf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- spinlock_t stats_lock; /* prevent concurrent stats updates */
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
@@ -232,26 +214,14 @@ struct igbvf_adapter {
struct e1000_vf_stats stats;
u64 zero_base;
- struct igbvf_ring test_tx_ring;
- struct igbvf_ring test_rx_ring;
- u32 test_icr;
-
u32 msg_enable;
struct msix_entry *msix_entries;
- int int_mode;
u32 eims_enable_mask;
u32 eims_other;
- u32 int_counter0;
- u32 int_counter1;
- u32 eeprom_wol;
u32 wol;
u32 pba;
- bool fc_autoneg;
-
- unsigned long led_status;
-
unsigned int flags;
unsigned long last_reset;
};
@@ -282,7 +252,6 @@ enum igbvf_state_t {
extern char igbvf_driver_name[];
-void igbvf_check_options(struct igbvf_adapter *);
void igbvf_set_ethtool_ops(struct net_device *);
int igbvf_up(struct igbvf_adapter *);
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index e5b31818d565..7637d21445bf 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -49,7 +49,6 @@
#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
-void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
s32 e1000_init_mbx_params_vf(struct e1000_hw *);
#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b0cf310e6f7b..ac57212ab02b 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -855,8 +855,6 @@ static irqreturn_t igbvf_msix_other(int irq, void *data)
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- adapter->int_counter1++;
-
hw->mac.get_link_status = 1;
if (!test_bit(__IGBVF_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -899,8 +897,6 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
struct net_device *netdev = data;
struct igbvf_adapter *adapter = netdev_priv(netdev);
- adapter->int_counter0++;
-
/* Write the ITR value calculated at the end of the
* previous interrupt.
*/
@@ -1239,7 +1235,7 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
spin_lock_bh(&hw->mbx_lock);
if (hw->mac.ops.set_vfta(hw, vid, true)) {
- dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid);
+ dev_warn(&adapter->pdev->dev, "Vlan id %d is not added\n", vid);
spin_unlock_bh(&hw->mbx_lock);
return -EINVAL;
}
@@ -1592,7 +1588,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
igbvf_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
/* record the stats before reset*/
igbvf_update_stats(adapter);
@@ -1633,10 +1629,6 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- adapter->tx_int_delay = 8;
- adapter->tx_abs_int_delay = 32;
- adapter->rx_int_delay = 0;
- adapter->rx_abs_int_delay = 8;
adapter->requested_itr = 3;
adapter->current_itr = IGBVF_START_ITR;
@@ -1656,12 +1648,9 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter)
if (igbvf_alloc_queues(adapter))
return -ENOMEM;
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
igbvf_irq_disable(adapter);
- spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->hw.mbx_lock);
set_bit(__IGBVF_DOWN, &adapter->state);
@@ -1903,7 +1892,8 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
**/
static void igbvf_watchdog(struct timer_list *t)
{
- struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igbvf_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -2434,7 +2424,7 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igbvf_up(adapter);
@@ -2470,7 +2460,7 @@ static int igbvf_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused igbvf_resume(struct device *dev_d)
+static int igbvf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2714,7 +2704,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
- static int cards_found;
int err;
err = pci_enable_device_mem(pdev);
@@ -2786,8 +2775,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
- adapter->bd_number = cards_found++;
-
netdev->hw_features = NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |
@@ -2915,7 +2902,7 @@ static void igbvf_remove(struct pci_dev *pdev)
* disable it from being rescheduled.
*/
set_bit(__IGBVF_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
@@ -2957,7 +2944,7 @@ static const struct pci_device_id igbvf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
-static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
/* PCI Device API Driver */
static struct pci_driver igbvf_driver = {
@@ -2965,7 +2952,7 @@ static struct pci_driver igbvf_driver = {
.id_table = igbvf_pci_tbl,
.probe = igbvf_probe,
.remove = igbvf_remove,
- .driver.pm = &igbvf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&igbvf_pm_ops),
.shutdown = igbvf_shutdown,
.err_handler = &igbvf_err_handler
};
@@ -3001,7 +2988,6 @@ static void __exit igbvf_exit_module(void)
}
module_exit(igbvf_exit_module);
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index ebffd3054285..efc5e7983dad 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,7 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
-igc-$(CONFIG_IGC_LEDS) += igc_leds.o
-igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
+igc-y := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
+ igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 90316dc58630..a427f05814c1 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -40,6 +40,12 @@ void igc_ethtool_set_ops(struct net_device *);
#define IGC_MAX_TX_TSTAMP_REGS 4
+struct igc_fpe_t {
+ struct ethtool_mmsv mmsv;
+ u32 tx_min_frag_size;
+ bool tx_enabled;
+};
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -72,13 +78,46 @@ struct igc_rx_packet_stats {
u64 other_packets;
};
+enum igc_tx_buffer_type {
+ IGC_TX_BUFFER_TYPE_SKB,
+ IGC_TX_BUFFER_TYPE_XDP,
+ IGC_TX_BUFFER_TYPE_XSK,
+};
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igc_tx_buffer {
+ union igc_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ enum igc_tx_buffer_type type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+ unsigned int bytecount;
+ u16 gso_segs;
+ __be16 protocol;
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+ bool xsk_pending_ts;
+};
+
struct igc_tx_timestamp_request {
- struct sk_buff *skb; /* reference to the packet being timestamped */
+ union { /* reference to the packet being timestamped */
+ struct sk_buff *skb;
+ struct igc_tx_buffer *xsk_tx_buffer;
+ };
+ enum igc_tx_buffer_type buffer_type;
unsigned long start; /* when the tstamp request started (jiffies) */
u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */
u32 regl; /* which TXSTMPL_{X} register should be used */
u32 regh; /* which TXSTMPH_{X} register should be used */
u32 flags; /* flags that should be added to the tx_buffer */
+ u8 xsk_queue_index; /* Tx queue which requesting timestamp */
+ struct xsk_tx_metadata_compl xsk_meta; /* ref to xsk Tx metadata */
};
struct igc_inline_rx_tstamps {
@@ -125,6 +164,7 @@ struct igc_ring {
bool launchtime_enable; /* true if LaunchTime is enabled */
ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
ktime_t last_ff_cycle; /* Last cycle with an active first flag */
+ bool preemptible; /* True if preemptible queue, false if express queue */
u32 start_time;
u32 end_time;
@@ -169,7 +209,6 @@ struct igc_adapter {
struct net_device *netdev;
struct ethtool_keee eee;
- u16 eee_advert;
unsigned long state;
unsigned int flags;
@@ -227,6 +266,10 @@ struct igc_adapter {
*/
spinlock_t qbv_tx_lock;
+ bool strict_priority_enable;
+ u8 num_tc;
+ u16 queue_per_tc[IGC_MAX_TX_QUEUES];
+
/* OS defined structs */
struct pci_dev *pdev;
/* lock for statistics */
@@ -272,7 +315,7 @@ struct igc_adapter {
*/
spinlock_t ptp_tx_lock;
struct igc_tx_timestamp_request tx_tstamp[IGC_MAX_TX_TSTAMP_REGS];
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
@@ -283,6 +326,7 @@ struct igc_adapter {
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
struct system_time_snapshot snapshot;
+ struct mutex ptm_lock; /* Only allow one PTM transaction at a time */
char fw_version[32];
@@ -296,8 +340,12 @@ struct igc_adapter {
struct timespec64 period;
} perout[IGC_N_PEROUT];
+ struct igc_fpe_t fpe;
+
/* LEDs */
struct mutex led_mutex;
+ struct igc_led_classdev *leds;
+ bool leds_available;
};
void igc_up(struct igc_adapter *adapter);
@@ -322,6 +370,9 @@ void igc_disable_tx_ring(struct igc_ring *ring);
void igc_enable_tx_ring(struct igc_ring *ring);
int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+/* AF_XDP TX metadata operations */
+extern const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops;
+
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
void igc_regs_dump(struct igc_adapter *adapter);
@@ -346,17 +397,16 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
+#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19)
+#define IGC_FLAG_TSN_REVERSE_TXQ_PRIO BIT(20)
-#define IGC_FLAG_TSN_ANY_ENABLED \
- (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED)
+#define IGC_FLAG_TSN_ANY_ENABLED \
+ (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
+ IGC_FLAG_TSN_PREEMPT_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
-#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
-#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
-
/* RX-desc Write-Back format RSS Type's */
enum igc_rss_type_num {
IGC_RSS_TYPE_NO_HASH = 0,
@@ -435,12 +485,30 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
-#define IGC_RX_PTHRESH 8
-#define IGC_RX_HTHRESH 8
-#define IGC_TX_PTHRESH 8
-#define IGC_TX_HTHRESH 1
-#define IGC_RX_WTHRESH 4
-#define IGC_TX_WTHRESH 16
+#define IGC_RXDCTL_PTHRESH 8
+#define IGC_RXDCTL_HTHRESH 8
+#define IGC_RXDCTL_WTHRESH 4
+/* Ena specific Rx Queue */
+#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000
+/* Receive Software Flush */
+#define IGC_RXDCTL_SWFLUSH 0x04000000
+
+#define IGC_TXDCTL_PTHRESH_MASK GENMASK(4, 0)
+#define IGC_TXDCTL_HTHRESH_MASK GENMASK(12, 8)
+#define IGC_TXDCTL_WTHRESH_MASK GENMASK(20, 16)
+#define IGC_TXDCTL_QUEUE_ENABLE_MASK GENMASK(25, 25)
+#define IGC_TXDCTL_SWFLUSH_MASK GENMASK(26, 26)
+#define IGC_TXDCTL_PRIORITY_MASK GENMASK(27, 27)
+
+#define IGC_TXDCTL_PTHRESH(x) FIELD_PREP(IGC_TXDCTL_PTHRESH_MASK, (x))
+#define IGC_TXDCTL_HTHRESH(x) FIELD_PREP(IGC_TXDCTL_HTHRESH_MASK, (x))
+#define IGC_TXDCTL_WTHRESH(x) FIELD_PREP(IGC_TXDCTL_WTHRESH_MASK, (x))
+/* Ena specific Tx Queue */
+#define IGC_TXDCTL_QUEUE_ENABLE FIELD_PREP(IGC_TXDCTL_QUEUE_ENABLE_MASK, 1)
+/* Transmit Software Flush */
+#define IGC_TXDCTL_SWFLUSH FIELD_PREP(IGC_TXDCTL_SWFLUSH_MASK, 1)
+#define IGC_TXDCTL_PRIORITY(x) FIELD_PREP(IGC_TXDCTL_PRIORITY_MASK, (x))
+#define IGC_TXDCTL_PRIORITY_HIGH IGC_TXDCTL_PRIORITY(1)
#define IGC_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -507,32 +575,6 @@ enum igc_boards {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-enum igc_tx_buffer_type {
- IGC_TX_BUFFER_TYPE_SKB,
- IGC_TX_BUFFER_TYPE_XDP,
- IGC_TX_BUFFER_TYPE_XSK,
-};
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer
- */
-struct igc_tx_buffer {
- union igc_adv_tx_desc *next_to_watch;
- unsigned long time_stamp;
- enum igc_tx_buffer_type type;
- union {
- struct sk_buff *skb;
- struct xdp_frame *xdpf;
- };
- unsigned int bytecount;
- u16 gso_segs;
- __be16 protocol;
-
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- u32 tx_flags;
-};
-
struct igc_rx_buffer {
union {
struct {
@@ -556,6 +598,14 @@ struct igc_xdp_buff {
struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
};
+struct igc_metadata_request {
+ struct igc_tx_buffer *tx_buffer;
+ struct xsk_tx_metadata *meta;
+ struct igc_ring *tx_ring;
+ u32 cmd_type;
+ u16 used_desc;
+};
+
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
@@ -582,6 +632,7 @@ enum igc_filter_match_flags {
IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
IGC_FILTER_FLAG_USER_DATA = BIT(4),
IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
+ IGC_FILTER_FLAG_DEFAULT_QUEUE = BIT(6),
};
struct igc_nfc_filter {
@@ -609,10 +660,14 @@ struct igc_nfc_rule {
bool flex;
};
-/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority
- * based, 8 ethertype based and 32 Flex filter based rules.
+/* IGC supports a total of 65 NFC rules, listed below in order of priority:
+ * - 16 MAC address based filtering rules (highest priority)
+ * - 8 ethertype based filtering rules
+ * - 32 Flex filter based filtering rules
+ * - 8 VLAN priority based filtering rules
+ * - 1 default queue rule (lowest priority)
*/
-#define IGC_MAX_RXNFC_RULES 64
+#define IGC_MAX_RXNFC_RULES 65
struct igc_flex_filter {
u8 index;
@@ -664,6 +719,7 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_TX_DETECT_HANG,
IGC_RING_FLAG_AF_XDP_ZC,
IGC_RING_FLAG_TX_HWTSTAMP,
+ IGC_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
@@ -710,19 +766,26 @@ struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
u32 location);
int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
-
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter);
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter);
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu);
+void igc_flush_tx_descriptors(struct igc_ring *ring);
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf);
-int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igc_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int igc_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
int igc_led_setup(struct igc_adapter *adapter);
+void igc_led_free(struct igc_adapter *adapter);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 9fae8bdec2a7..1613b562d17c 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
u32 eecd = rd32(IGC_EECD);
u16 size;
+ /* failed to read reg and got all F's */
+ if (!(~eecd))
+ return -ENXIO;
+
size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
@@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
/* NVM initialization */
ret_val = igc_init_nvm_params_base(hw);
+ if (ret_val)
+ goto out;
switch (hw->mac.type) {
case igc_i225:
ret_val = igc_init_nvm_params_i225(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index bf8cdfbba9ff..eaf17cd031c3 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -49,6 +49,7 @@ struct igc_adv_tx_context_desc {
#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IGC_ADVTXD_PAYLEN_MASK 0XFFFFC000 /* Adv desc PAYLEN mask */
#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IGC_RAR_ENTRIES 16
@@ -85,14 +86,6 @@ union igc_adv_rx_desc {
} wb; /* writeback */
};
-/* Additional Transmit Descriptor Control definitions */
-#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
-#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */
-
-/* Additional Receive Descriptor Control definitions */
-#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
-#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
-
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0)
#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5f92b3c7c3d4..498ba1522ca4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -4,6 +4,8 @@
#ifndef _IGC_DEFINES_H_
#define _IGC_DEFINES_H_
+#include <linux/bitfield.h>
+
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
@@ -176,7 +178,6 @@
/* PHY GPY 211 registers */
#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
-#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
@@ -307,6 +308,8 @@
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_POPTS_SMD_MASK 0x3000 /* Indicates whether it's SMD-V or SMD-R */
+
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
@@ -362,6 +365,8 @@
#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_SMD_TYPE_V 0x01 /* SMD-V Packet */
+#define IGC_RXD_STAT_SMD_TYPE_R 0x02 /* SMD-R Packet */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
@@ -371,17 +376,22 @@
#define IGC_RXDEXT_STATERR_LB 0x00040000
/* Advanced Receive Descriptor bit definitions */
-#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_SMD_TYPE_MASK 0x06000
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
+#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IGC_MRQC_DEFAULT_QUEUE_MASK GENMASK(5, 3)
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
@@ -395,15 +405,57 @@
#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
-#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
-#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
-
-#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
+/* Mask for RX packet buffer size */
+#define IGC_RXPBSIZE_EXP_MASK GENMASK(5, 0)
+#define IGC_BMC2OSPBSIZE_MASK GENMASK(11, 6)
+#define IGC_RXPBSIZE_BE_MASK GENMASK(17, 12)
+/* Mask for timestamp in RX buffer */
+#define IGC_RXPBS_CFG_TS_EN_MASK GENMASK(31, 31)
+/* High-priority RX packet buffer size (KB). Used for Express traffic when preemption is enabled */
+#define IGC_RXPBSIZE_EXP(x) FIELD_PREP(IGC_RXPBSIZE_EXP_MASK, (x))
+/* BMC to OS packet buffer size in KB */
+#define IGC_BMC2OSPBSIZE(x) FIELD_PREP(IGC_BMC2OSPBSIZE_MASK, (x))
+/* Low-priority RX packet buffer size (KB). Used for BE traffic when preemption is enabled */
+#define IGC_RXPBSIZE_BE(x) FIELD_PREP(IGC_RXPBSIZE_BE_MASK, (x))
+/* Enable RX packet buffer for timestamp descriptor, saving 16 bytes per packet if set */
+#define IGC_RXPBS_CFG_TS_EN FIELD_PREP(IGC_RXPBS_CFG_TS_EN_MASK, 1)
+/* Default value following I225/I226 SW User Manual Section 8.3.1 */
+#define IGC_RXPBSIZE_EXP_BMC_DEFAULT ( \
+ IGC_RXPBSIZE_EXP(34) | IGC_BMC2OSPBSIZE(2))
+#define IGC_RXPBSIZE_EXP_BMC_BE_TSN ( \
+ IGC_RXPBSIZE_EXP(15) | IGC_BMC2OSPBSIZE(2) | IGC_RXPBSIZE_BE(15))
+
+/* Mask for TX packet buffer size */
+#define IGC_TXPB0SIZE_MASK GENMASK(5, 0)
+#define IGC_TXPB1SIZE_MASK GENMASK(11, 6)
+#define IGC_TXPB2SIZE_MASK GENMASK(17, 12)
+#define IGC_TXPB3SIZE_MASK GENMASK(23, 18)
+/* Mask for OS to BMC packet buffer size */
+#define IGC_OS2BMCPBSIZE_MASK GENMASK(29, 24)
+/* TX Packet buffer size in KB */
+#define IGC_TXPB0SIZE(x) FIELD_PREP(IGC_TXPB0SIZE_MASK, (x))
+#define IGC_TXPB1SIZE(x) FIELD_PREP(IGC_TXPB1SIZE_MASK, (x))
+#define IGC_TXPB2SIZE(x) FIELD_PREP(IGC_TXPB2SIZE_MASK, (x))
+#define IGC_TXPB3SIZE(x) FIELD_PREP(IGC_TXPB3SIZE_MASK, (x))
+/* OS to BMC packet buffer size in KB */
+#define IGC_OS2BMCPBSIZE(x) FIELD_PREP(IGC_OS2BMCPBSIZE_MASK, (x))
+/* Default value following I225/I226 SW User Manual Section 8.3.2 */
+#define IGC_TXPBSIZE_DEFAULT ( \
+ IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \
+ IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4))
+#define IGC_TXPBSIZE_TSN ( \
+ IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \
+ IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4))
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+/* Retry Buffer Control */
+#define IGC_RETX_CTL 0x041C
+#define IGC_RETX_CTL_WATERMARK_MASK 0xF
+#define IGC_RETX_CTL_QBVFULLTH_SHIFT 8 /* QBV Retry Buffer Full Threshold */
+#define IGC_RETX_CTL_QBVFULLEN 0x1000 /* Enable QBV Retry Buffer Full Threshold */
+
/* Transmit Scheduling Latency */
/* Latency between transmission scheduling (LaunchTime) and the time
* the packet is transmitted to the network in nanosecond.
@@ -532,12 +584,15 @@
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
+#define IGC_TQAVCTRL_PREEMPT_ENA 0x00000002
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080
+#define IGC_TQAVCTRL_MIN_FRAG_MASK 0x0000C000
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
#define IGC_TXQCTL_STRICT_END 0x00000004
+#define IGC_TXQCTL_PREEMPTIBLE 0x00000008
#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0
#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080
#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0
@@ -547,6 +602,15 @@
#define IGC_MAX_SR_QUEUES 2
+#define IGC_TXARB_TXQ_PRIO_0_MASK GENMASK(1, 0)
+#define IGC_TXARB_TXQ_PRIO_1_MASK GENMASK(3, 2)
+#define IGC_TXARB_TXQ_PRIO_2_MASK GENMASK(5, 4)
+#define IGC_TXARB_TXQ_PRIO_3_MASK GENMASK(7, 6)
+#define IGC_TXARB_TXQ_PRIO_0(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_0_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_1(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_1_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_2(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_2_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_3(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_3_MASK, (x))
+
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
@@ -558,7 +622,10 @@
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2)
#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
-#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */
+/* A short cycle time of 1us theoretically should work, but appears to be too
+ * short in practice.
+ */
+#define IGC_PTM_SHORT_CYC_DEFAULT 4 /* Default short cycle interval */
#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */
#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */
@@ -577,6 +644,7 @@
#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */
#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */
#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */
+#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */
/* PCIe PTM Cycle Control */
#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */
@@ -635,6 +703,16 @@
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_ERROR 0x40000000
+/* EEE Link Ability */
+#define IGC_EEE_2500BT_MASK BIT(0)
+#define IGC_EEE_1000BT_MASK BIT(2)
+#define IGC_EEE_100BT_MASK BIT(1)
+
+/* EEE Link-Partner Ability */
+#define IGC_LP_EEE_2500BT_MASK BIT(0)
+#define IGC_LP_EEE_1000BT_MASK BIT(2)
+#define IGC_LP_EEE_100BT_MASK BIT(1)
+
#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
diff --git a/drivers/net/ethernet/intel/igc/igc_diag.c b/drivers/net/ethernet/intel/igc/igc_diag.c
index cc621970c0cd..a43d7244ee70 100644
--- a/drivers/net/ethernet/intel/igc/igc_diag.c
+++ b/drivers/net/ethernet/intel/igc/igc_diag.c
@@ -173,8 +173,7 @@ bool igc_link_test(struct igc_adapter *adapter, u64 *data)
*data = 0;
/* add delay to give enough time for autonegotioation to finish */
- if (adapter->hw.mac.autoneg)
- ssleep(5);
+ ssleep(5);
link_up = igc_has_link(adapter);
if (!link_up) {
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 1a64f1ca6ca8..e94c1922b97a 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -8,6 +8,7 @@
#include "igc.h"
#include "igc_diag.h"
+#include "igc_tsn.h"
/* forward declaration */
struct igc_stats {
@@ -121,9 +122,11 @@ static const char igc_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGC_STATS_LEN \
(IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN)
+#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
+#define IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO BIT(1)
static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
+ "reverse-tsn-txq-prio",
};
#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings)
@@ -624,11 +627,11 @@ igc_ethtool_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
- adapter->num_tx_queues));
+ temp_ring = vmalloc_array(adapter->num_tx_queues,
+ sizeof(struct igc_ring));
else
- temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
- adapter->num_rx_queues));
+ temp_ring = vmalloc_array(adapter->num_rx_queues,
+ sizeof(struct igc_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -807,7 +810,7 @@ static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGC_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -1044,9 +1047,11 @@ static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter,
return 0;
}
-static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on igc */
@@ -1086,15 +1091,19 @@ static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
return 0;
}
+static u32 igc_ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ return adapter->num_rx_queues;
+}
+
static int igc_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->nfc_rule_count;
return 0;
@@ -1102,8 +1111,6 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev,
return igc_ethtool_get_nfc_rule(adapter, cmd);
case ETHTOOL_GRXCLSRLALL:
return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs);
- case ETHTOOL_GRXFH:
- return igc_ethtool_get_rss_hash_opts(adapter, cmd);
default:
return -EOPNOTSUPP;
}
@@ -1111,9 +1118,11 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev,
#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
IGC_FLAG_RSS_FIELD_IPV6_UDP)
-static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igc_ethtool_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct igc_adapter *adapter = netdev_priv(dev);
u32 flags = adapter->flags;
/* RSS does not support anything other than hashing
@@ -1278,6 +1287,24 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
rule->flex = true;
else
rule->flex = false;
+
+ /* The wildcard rule is only applied if:
+ * a) None of the other filtering rules match (match_flags is zero)
+ * b) The flow type is ETHER_FLOW only (no additional fields set)
+ * c) Mask for Source MAC address is not specified (all zeros)
+ * d) Mask for Destination MAC address is not specified (all zeros)
+ * e) Mask for L2 EtherType is not specified (zero)
+ *
+ * If all these conditions are met, the rule is treated as a wildcard
+ * rule. Default queue feature will be used, so that all packets that do
+ * not match any other rule will be routed to the default queue.
+ */
+ if (!rule->filter.match_flags &&
+ fsp->flow_type == ETHER_FLOW &&
+ is_zero_ether_addr(fsp->m_u.ether_spec.h_source) &&
+ is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) &&
+ !fsp->m_u.ether_spec.h_proto)
+ rule->filter.match_flags = IGC_FILTER_FLAG_DEFAULT_QUEUE;
}
/**
@@ -1424,8 +1451,6 @@ static int igc_ethtool_set_rxnfc(struct net_device *dev,
struct igc_adapter *adapter = netdev_priv(dev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- return igc_ethtool_set_rss_hash_opt(adapter, cmd);
case ETHTOOL_SRXCLSRLINS:
return igc_ethtool_add_nfc_rule(adapter, cmd);
case ETHTOOL_SRXCLSRLDEL:
@@ -1540,6 +1565,10 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
+ /* Do not allow channel reconfiguration when mqprio is enabled */
+ if (adapter->strict_priority_enable)
+ return -EINVAL;
+
/* Verify the number of channels doesn't exceed hw limits */
max_combined = igc_get_max_rss_queues(adapter);
if (count > max_combined)
@@ -1559,21 +1588,17 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
}
static int igc_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct igc_adapter *adapter = netdev_priv(dev);
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
switch (adapter->hw.mac.type) {
case igc_i225:
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1599,6 +1624,9 @@ static u32 igc_ethtool_get_priv_flags(struct net_device *netdev)
if (adapter->flags & IGC_FLAG_RX_LEGACY)
priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX;
+ if (adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
+ priv_flags |= IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO;
+
return priv_flags;
}
@@ -1607,10 +1635,13 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
struct igc_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
- flags &= ~IGC_FLAG_RX_LEGACY;
+ flags &= ~(IGC_FLAG_RX_LEGACY | IGC_FLAG_TSN_REVERSE_TXQ_PRIO);
if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX)
flags |= IGC_FLAG_RX_LEGACY;
+ if (priv_flags & IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO)
+ flags |= IGC_FLAG_TSN_REVERSE_TXQ_PRIO;
+
if (flags != adapter->flags) {
adapter->flags = flags;
@@ -1627,13 +1658,85 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- u32 eeer;
+ struct igc_phy_info *phy = &hw->phy;
+ u16 eee_advert, eee_lp_advert;
+ u32 eeer, ret_val;
+
+ /* EEE supported */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
+
+ /* EEE Advertisement 1 - reg 7.60 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_AB1,
+ &eee_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.60 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_advert & IGC_EEE_1000BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised);
+
+ if (eee_advert & IGC_EEE_100BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->advertised);
+
+ /* EEE Advertisement 2 - reg 7.62 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_AB2,
+ &eee_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.62 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_advert & IGC_EEE_2500BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->advertised);
+
+ /* EEE Link-Partner Ability 1 - reg 7.61 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_LP_AB1,
+ &eee_lp_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.61 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_lp_advert & IGC_LP_EEE_1000BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->lp_advertised);
- if (hw->dev_spec._base.eee_enable)
- mii_eee_cap1_mod_linkmode_t(edata->advertised,
- adapter->eee_advert);
+ if (eee_lp_advert & IGC_LP_EEE_100BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->lp_advertised);
- *edata = adapter->eee;
+ /* EEE Link-Partner Ability 2 - reg 7.63 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_LP_AB2,
+ &eee_lp_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.63 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_lp_advert & IGC_LP_EEE_2500BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->lp_advertised);
eeer = rd32(IGC_EEER);
@@ -1695,8 +1798,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
-
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
@@ -1711,19 +1812,81 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return 0;
}
-static int igc_ethtool_begin(struct net_device *netdev)
+static int igc_ethtool_get_mm(struct net_device *netdev,
+ struct ethtool_mm_state *cmd)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ ethtool_mmsv_get_mm(&fpe->mmsv, cmd);
+ cmd->tx_min_frag_size = fpe->tx_min_frag_size;
+ cmd->rx_min_frag_size = IGC_RX_MIN_FRAG_SIZE;
- pm_runtime_get_sync(&adapter->pdev->dev);
return 0;
}
-static void igc_ethtool_complete(struct net_device *netdev)
+static int igc_ethtool_set_mm(struct net_device *netdev,
+ struct ethtool_mm_cfg *cmd,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ fpe->tx_min_frag_size = igc_fpe_get_supported_frag_size(cmd->tx_min_frag_size);
+ if (fpe->tx_min_frag_size != cmd->tx_min_frag_size)
+ NL_SET_ERR_MSG_MOD(extack,
+ "tx-min-frag-size value set is unsupported. Rounded up to supported value (64, 128, 192, 256)");
- pm_runtime_put(&adapter->pdev->dev);
+ if (fpe->mmsv.pmac_enabled != cmd->pmac_enabled) {
+ if (cmd->pmac_enabled)
+ static_branch_inc(&igc_fpe_enabled);
+ else
+ static_branch_dec(&igc_fpe_enabled);
+ }
+
+ ethtool_mmsv_set_mm(&fpe->mmsv, cmd);
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+/**
+ * igc_ethtool_get_frame_ass_error - Get the frame assembly error count.
+ * @reg_value: Register value for IGC_PRMEXCPRCNT
+ * Return: The count of frame assembly errors.
+ */
+static u64 igc_ethtool_get_frame_ass_error(u32 reg_value)
+{
+ /* Out of order statistics */
+ u32 ooo_frame_cnt, ooo_frag_cnt;
+ u32 miss_frame_frag_cnt;
+
+ ooo_frame_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAME_CNT, reg_value);
+ ooo_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAG_CNT, reg_value);
+ miss_frame_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT,
+ reg_value);
+
+ return ooo_frame_cnt + ooo_frag_cnt + miss_frame_frag_cnt;
+}
+
+static u64 igc_ethtool_get_frame_smd_error(u32 reg_value)
+{
+ return FIELD_GET(IGC_PRMEXCPRCNT_OOO_SMDC, reg_value);
+}
+
+static void igc_ethtool_get_mm_stats(struct net_device *dev,
+ struct ethtool_mm_stats *stats)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 reg_value;
+
+ reg_value = rd32(IGC_PRMEXCPRCNT);
+
+ stats->MACMergeFrameAssErrorCount = igc_ethtool_get_frame_ass_error(reg_value);
+ stats->MACMergeFrameSmdErrorCount = igc_ethtool_get_frame_smd_error(reg_value);
+ stats->MACMergeFrameAssOkCount = rd32(IGC_PRMPTDRCNT);
+ stats->MACMergeFragCountRx = rd32(IGC_PRMEVNTRCNT);
+ stats->MACMergeFragCountTx = rd32(IGC_PRMEVNTTCNT);
}
static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
@@ -1766,11 +1929,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
- if (hw->mac.autoneg == 1) {
- ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Autoneg);
- }
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
/* Set pause flow control settings */
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
@@ -1823,10 +1983,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
cmd->base.speed = speed;
- if (hw->mac.autoneg)
- cmd->base.autoneg = AUTONEG_ENABLE;
- else
- cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if (hw->phy.media_type == igc_media_type_copper)
@@ -1900,7 +2057,6 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
- hw->mac.autoneg = 1;
hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
@@ -1942,6 +2098,9 @@ static void igc_ethtool_diag_test(struct net_device *netdev,
netdev_info(adapter->netdev, "Offline testing starting");
set_bit(__IGC_TESTING, &adapter->state);
+ /* power up PHY for link test */
+ igc_power_up_phy_copper(&adapter->hw);
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
@@ -2015,9 +2174,12 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_coalesce = igc_ethtool_set_coalesce,
.get_rxnfc = igc_ethtool_get_rxnfc,
.set_rxnfc = igc_ethtool_set_rxnfc,
+ .get_rx_ring_count = igc_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size,
.get_rxfh = igc_ethtool_get_rxfh,
.set_rxfh = igc_ethtool_set_rxfh,
+ .get_rxfh_fields = igc_ethtool_get_rxfh_fields,
+ .set_rxfh_fields = igc_ethtool_set_rxfh_fields,
.get_ts_info = igc_ethtool_get_ts_info,
.get_channels = igc_ethtool_get_channels,
.set_channels = igc_ethtool_set_channels,
@@ -2025,11 +2187,12 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_priv_flags = igc_ethtool_set_priv_flags,
.get_eee = igc_ethtool_get_eee,
.set_eee = igc_ethtool_set_eee,
- .begin = igc_ethtool_begin,
- .complete = igc_ethtool_complete,
.get_link_ksettings = igc_ethtool_get_link_ksettings,
.set_link_ksettings = igc_ethtool_set_link_ksettings,
.self_test = igc_ethtool_diag_test,
+ .get_mm = igc_ethtool_get_mm,
+ .get_mm_stats = igc_ethtool_get_mm_stats,
+ .set_mm = igc_ethtool_set_mm,
};
void igc_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index e1c572e0d4ef..be8a49a86d09 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -92,7 +92,6 @@ struct igc_mac_info {
bool asf_firmware_present;
bool arc_subsystem_valid;
- bool autoneg;
bool autoneg_failed;
bool get_link_status;
};
@@ -280,9 +279,4 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-
#endif /* _IGC_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 0dd61719f1ed..5226d10cc95b 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -435,7 +435,7 @@ static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
index bf240c5daf86..3929b25b6ae6 100644
--- a/drivers/net/ethernet/intel/igc/igc_leds.c
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -236,8 +236,8 @@ static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
pci_dev_id(adapter->pdev), index);
}
-static void igc_setup_ldev(struct igc_led_classdev *ldev,
- struct net_device *netdev, int index)
+static int igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct led_classdev *led_cdev = &ldev->led;
@@ -257,24 +257,46 @@ static void igc_setup_ldev(struct igc_led_classdev *ldev,
led_cdev->hw_control_get = igc_led_hw_control_get;
led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
- devm_led_classdev_register(&netdev->dev, led_cdev);
+ return led_classdev_register(&netdev->dev, led_cdev);
}
int igc_led_setup(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct device *dev = &netdev->dev;
struct igc_led_classdev *leds;
- int i;
+ int i, err;
mutex_init(&adapter->led_mutex);
- leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
- for (i = 0; i < IGC_NUM_LEDS; i++)
- igc_setup_ldev(leds + i, netdev, i);
+ for (i = 0; i < IGC_NUM_LEDS; i++) {
+ err = igc_setup_ldev(leds + i, netdev, i);
+ if (err)
+ goto err;
+ }
+
+ adapter->leds = leds;
return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
+ return err;
+}
+
+void igc_led_free(struct igc_adapter *adapter)
+{
+ struct igc_led_classdev *leds = adapter->leds;
+ int i;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index a5c4b19d71a2..7ac6637f8db7 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -127,7 +127,7 @@ s32 igc_setup_link(struct igc_hw *hw)
goto out;
/* If requested flow control is set to default, set flow control
- * to the both 'rx' and 'tx' pause frames.
+ * to both 'rx' and 'tx' pause frames.
*/
if (hw->fc.requested_mode == igc_fc_default)
hw->fc.requested_mode = igc_fc_full;
@@ -386,14 +386,6 @@ s32 igc_check_for_copper_link(struct igc_hw *hw)
*/
igc_check_downshift(hw);
- /* If we are forcing speed/duplex, then we simply return since
- * we have already determined whether we have link or not.
- */
- if (!mac->autoneg) {
- ret_val = -IGC_ERR_CONFIG;
- goto out;
- }
-
/* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
@@ -468,173 +460,171 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
goto out;
}
- /* Check for the case where we have copper media and auto-neg is
- * enabled. In this case, we need to check and see if Auto-Neg
- * has completed, and if so, how the PHY and link partner has
- * flow control configured.
+ /* In auto-neg, we need to check and see if Auto-Neg has completed,
+ * and if so, how the PHY and link partner has flow control
+ * configured.
*/
- if (mac->autoneg) {
- /* Read the MII Status Register and check to see if AutoNeg
- * has completed. We read this twice because this reg has
- * some "sticky" (latched) bits.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &mii_status_reg);
- if (ret_val)
- goto out;
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &mii_status_reg);
- if (ret_val)
- goto out;
- if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg has not completed.\n");
- goto out;
- }
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
- /* The AutoNeg process has completed, so we now need to
- * read both the Auto Negotiation Advertisement
- * Register (Address 4) and the Auto_Negotiation Base
- * Page Ability Register (Address 5) to determine how
- * flow control was negotiated.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
- &mii_nway_adv_reg);
- if (ret_val)
- goto out;
- ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
- &mii_nway_lp_ability_reg);
- if (ret_val)
- goto out;
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
- * NOTE: DC = Don't Care
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
- * 0 | 0 | DC | DC | igc_fc_none
- * 0 | 1 | 0 | DC | igc_fc_none
- * 0 | 1 | 1 | 0 | igc_fc_none
- * 0 | 1 | 1 | 1 | igc_fc_tx_pause
- * 1 | 0 | 0 | DC | igc_fc_none
- * 1 | DC | 1 | DC | igc_fc_full
- * 1 | 1 | 0 | 0 | igc_fc_none
- * 1 | 1 | 0 | 1 | igc_fc_rx_pause
- *
- * Are both PAUSE bits set to 1? If so, this implies
- * Symmetric Flow Control is enabled at both ends. The
- * ASM_DIR bits are irrelevant per the spec.
- *
- * For Symmetric Flow Control:
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | DC | 1 | DC | IGC_fc_full
- *
- */
- if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == igc_fc_full) {
- hw->fc.current_mode = igc_fc_full;
- hw_dbg("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = igc_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\n");
- }
- }
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
+ goto out;
+ }
- /* For receiving PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 0 | 1 | 1 | 1 | igc_fc_tx_pause
- */
- else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc.current_mode = igc_fc_tx_pause;
- hw_dbg("Flow Control = TX PAUSE frames only.\n");
- }
- /* For transmitting PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | 1 | 0 | 1 | igc_fc_rx_pause
- */
- else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc.current_mode = igc_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\n");
- }
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ goto out;
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | igc_fc_none
+ * 0 | 1 | 0 | DC | igc_fc_none
+ * 0 | 1 | 1 | 0 | igc_fc_none
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ * 1 | 0 | 0 | DC | igc_fc_none
+ * 1 | DC | 1 | DC | igc_fc_full
+ * 1 | 1 | 0 | 0 | igc_fc_none
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | IGC_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
- else if ((hw->fc.requested_mode == igc_fc_none) ||
- (hw->fc.requested_mode == igc_fc_tx_pause) ||
- (hw->fc.strict_ieee)) {
- hw->fc.current_mode = igc_fc_none;
- hw_dbg("Flow Control = NONE.\n");
+ if (hw->fc.requested_mode == igc_fc_full) {
+ hw->fc.current_mode = igc_fc_full;
+ hw_dbg("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
+ }
- /* Now we need to do one last check... If we auto-
- * negotiated to HALF DUPLEX, flow control should not be
- * enabled per IEEE 802.3 spec.
- */
- ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
- if (ret_val) {
- hw_dbg("Error getting link speed and duplex\n");
- goto out;
- }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_tx_pause;
+ hw_dbg("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->fc.requested_mode == igc_fc_none) ||
+ (hw->fc.requested_mode == igc_fc_tx_pause) ||
+ (hw->fc.strict_ieee)) {
+ hw->fc.current_mode = igc_fc_none;
+ hw_dbg("Flow Control = NONE.\n");
+ } else {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
- if (duplex == HALF_DUPLEX)
- hw->fc.current_mode = igc_fc_none;
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ hw_dbg("Error getting link speed and duplex\n");
+ goto out;
+ }
- /* Now we call a subroutine to actually force the MAC
- * controller to use the correct flow control settings.
- */
- ret_val = igc_force_mac_fc(hw);
- if (ret_val) {
- hw_dbg("Error forcing flow control settings\n");
- goto out;
- }
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = igc_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = igc_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
}
out:
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 35ad40a803cb..7aafa60ba0c8 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/xdp_sock_drv.h>
#include <linux/pci.h>
+#include <linux/mdio.h>
#include <net/ipv6.h>
@@ -31,7 +32,6 @@
static int debug = -1;
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
module_param(debug, int, 0);
@@ -683,9 +683,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
wr32(IGC_SRRCTL(reg_idx), srrctl);
- rxdctl |= IGC_RX_PTHRESH;
- rxdctl |= IGC_RX_HTHRESH << 8;
- rxdctl |= IGC_RX_WTHRESH << 16;
+ rxdctl |= IGC_RXDCTL_PTHRESH;
+ rxdctl |= IGC_RXDCTL_HTHRESH << 8;
+ rxdctl |= IGC_RXDCTL_WTHRESH << 16;
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
@@ -749,11 +749,9 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
wr32(IGC_TDH(reg_idx), 0);
writel(0, ring->tail);
- txdctl |= IGC_TX_PTHRESH;
- txdctl |= IGC_TX_HTHRESH << 8;
- txdctl |= IGC_TX_WTHRESH << 16;
+ txdctl |= IGC_TXDCTL_PTHRESH(8) | IGC_TXDCTL_HTHRESH(1) |
+ IGC_TXDCTL_WTHRESH(16) | IGC_TXDCTL_QUEUE_ENABLE;
- txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
wr32(IGC_TXDCTL(reg_idx), txdctl);
}
@@ -1092,10 +1090,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ net_err_ratelimited("%s: DMA mapping error for empty frame\n",
+ netdev_name(ring->netdev));
return -ENOMEM;
}
+ buffer->type = IGC_TX_BUFFER_TYPE_SKB;
buffer->skb = skb;
buffer->protocol = 0;
buffer->bytecount = skb->len;
@@ -1107,20 +1107,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return 0;
}
-static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
- struct sk_buff *skb,
- struct igc_tx_buffer *first)
+static void igc_init_tx_empty_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ struct igc_tx_buffer *first)
{
union igc_adv_tx_desc *desc;
u32 cmd_type, olinfo_status;
- int err;
-
- if (!igc_desc_unused(ring))
- return -EBUSY;
-
- err = igc_init_empty_frame(ring, first, skb);
- if (err)
- return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
@@ -1139,8 +1131,6 @@ static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
ring->next_to_use++;
if (ring->next_to_use == ring->count)
ring->next_to_use = 0;
-
- return 0;
}
#define IGC_EMPTY_FRAME_SIZE 60
@@ -1566,6 +1556,40 @@ static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *s
return false;
}
+static int igc_insert_empty_frame(struct igc_ring *tx_ring)
+{
+ struct igc_tx_buffer *empty_info;
+ struct sk_buff *empty_skb;
+ void *data;
+ int ret;
+
+ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
+ if (unlikely(!empty_skb)) {
+ net_err_ratelimited("%s: skb alloc error for empty frame\n",
+ netdev_name(tx_ring->netdev));
+ return -ENOMEM;
+ }
+
+ data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE);
+ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
+
+ /* Prepare DMA mapping and Tx buffer information */
+ ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(empty_skb);
+ return ret;
+ }
+
+ /* Prepare advanced context descriptor for empty packet */
+ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
+
+ /* Prepare advanced data descriptor for empty packet */
+ igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info);
+
+ return 0;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1585,6 +1609,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
+ * + 2 desc for inserting an empty packet for launch time,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
@@ -1604,24 +1629,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
if (insert_empty) {
- struct igc_tx_buffer *empty_info;
- struct sk_buff *empty;
- void *data;
-
- empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
- if (!empty)
- goto done;
-
- data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
- memset(data, 0, IGC_EMPTY_FRAME_SIZE);
-
- igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
-
- if (igc_init_tx_empty_descriptor(tx_ring,
- empty,
- empty_info) < 0)
- dev_kfree_skb_any(empty);
+ /* Reset the launch time if the required empty frame fails to
+ * be inserted. However, this packet is not dropped, so it
+ * "dirties" the current Qbv cycle. This ensures that the
+ * upcoming packet, which is scheduled in the next Qbv cycle,
+ * does not require an empty frame. This way, the launch time
+ * continues to function correctly despite the current failure
+ * to insert the empty frame.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ launch_time = 0;
}
done:
@@ -1649,7 +1666,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (skb->sk &&
+ READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
@@ -1667,6 +1685,15 @@ done:
first->tx_flags = tx_flags;
first->protocol = protocol;
+ /* For preemptible queue, manually pad the skb so that HW includes
+ * padding bytes in mCRC calculation
+ */
+ if (tx_ring->preemptible && skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out_drop;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
if (tso < 0)
goto out_drop;
@@ -2123,10 +2150,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
@@ -2191,6 +2214,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -2207,6 +2231,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
__free_page(page);
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -2446,8 +2471,7 @@ unmap:
return -ENOMEM;
}
-static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
- int cpu)
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu)
{
int index = cpu;
@@ -2471,7 +2495,7 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
if (unlikely(!xdpf))
return -EFAULT;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2513,8 +2537,7 @@ out_failure:
}
}
-static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
- struct xdp_buff *xdp)
+static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp)
{
struct bpf_prog *prog;
int res;
@@ -2528,11 +2551,11 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
res = __igc_xdp_run_prog(adapter, prog, xdp);
out:
- return ERR_PTR(-res);
+ return res;
}
/* This function assumes __netif_tx_lock is held by the caller. */
-static void igc_flush_tx_descriptors(struct igc_ring *ring)
+void igc_flush_tx_descriptors(struct igc_ring *ring)
{
/* Once tail pointer is updated, hardware can fetch the descriptors
* any time so we issue a write membar here to ensure all memory
@@ -2549,7 +2572,7 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
struct igc_ring *ring;
if (status & IGC_XDP_TX) {
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2583,6 +2606,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
int xdp_status = 0, rx_buffer_pgcnt;
+ int xdp_res = 0;
while (likely(total_packets < budget)) {
struct igc_xdp_buff ctx = { .rx_ts = NULL };
@@ -2620,6 +2644,14 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
size -= IGC_TS_HDR_LEN;
}
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_handle_mpacket(adapter, rx_desc, size, pktbuf)) {
+ /* Advance the ring next-to-clean */
+ igc_is_non_eop(rx_ring, rx_desc);
+ cleaned_count++;
+ continue;
+ }
+
if (!skb) {
xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
@@ -2628,12 +2660,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_buff_clear_frags_flag(&ctx.xdp);
ctx.rx_desc = rx_desc;
- skb = igc_xdp_run_prog(adapter, &ctx.xdp);
+ xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
switch (xdp_res) {
case IGC_XDP_CONSUMED:
rx_buffer->pagecnt_bias++;
@@ -2655,9 +2685,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -2669,7 +2700,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -2704,16 +2735,16 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
}
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
- struct xdp_buff *xdp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
net_prefetch(xdp->data_meta);
- skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -2725,26 +2756,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
__skb_pull(skb, metasize);
}
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
+
return skb;
}
static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
union igc_adv_rx_desc *desc,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
struct igc_ring *ring = q_vector->rx.ring;
struct sk_buff *skb;
- skb = igc_construct_skb_zc(ring, xdp);
+ skb = igc_construct_skb_zc(ring, ctx);
if (!skb) {
ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
-
if (igc_cleanup_headers(ring, desc, skb))
return;
@@ -2780,7 +2813,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
union igc_adv_rx_desc *desc;
struct igc_rx_buffer *bi;
struct igc_xdp_buff *ctx;
- ktime_t timestamp = 0;
unsigned int size;
int res;
@@ -2810,15 +2842,17 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
*/
bi->xdp->data_meta += IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
+ } else {
+ ctx->rx_ts = NULL;
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
case IGC_XDP_PASS:
- igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
+ igc_dispatch_skb_zc(q_vector, desc, ctx);
fallthrough;
case IGC_XDP_CONSUMED:
xsk_buff_free(bi->xdp);
@@ -2874,6 +2908,128 @@ static void igc_update_tx_stats(struct igc_q_vector *q_vector,
q_vector->tx.total_packets += packets;
}
+static void igc_xsk_request_timestamp(void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ struct igc_tx_timestamp_request *tstamp;
+ u32 tx_flags = IGC_TX_FLAGS_TSTAMP;
+ struct igc_adapter *adapter;
+ unsigned long lock_flags;
+ bool found = false;
+ int i;
+
+ if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) {
+ adapter = netdev_priv(tx_ring->netdev);
+
+ spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags);
+
+ /* Search for available tstamp regs */
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ tstamp = &adapter->tx_tstamp[i];
+
+ /* tstamp->skb and tstamp->xsk_tx_buffer are in union.
+ * When tstamp->skb is equal to NULL,
+ * tstamp->xsk_tx_buffer is equal to NULL as well.
+ * This condition means that the particular tstamp reg
+ * is not occupied by other packet.
+ */
+ if (!tstamp->skb) {
+ found = true;
+ break;
+ }
+ }
+
+ /* Return if no available tstamp regs */
+ if (!found) {
+ adapter->tx_hwtstamp_skipped++;
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock,
+ lock_flags);
+ return;
+ }
+
+ tstamp->start = jiffies;
+ tstamp->xsk_queue_index = tx_ring->queue_index;
+ tstamp->xsk_tx_buffer = meta_req->tx_buffer;
+ tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK;
+
+ /* Hold the transmit completion until timestamp is ready */
+ meta_req->tx_buffer->xsk_pending_ts = true;
+
+ /* Keep the pointer to tx_timestamp, which is located in XDP
+ * metadata area. It is the location to store the value of
+ * tx hardware timestamp.
+ */
+ xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta);
+
+ /* Set timestamp bit based on the _TSTAMP(_X) bit. */
+ tx_flags |= tstamp->flags;
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP,
+ (IGC_ADVTXD_MAC_TSTAMP));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_1,
+ (IGC_ADVTXD_TSTAMP_REG_1));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_2,
+ (IGC_ADVTXD_TSTAMP_REG_2));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_3,
+ (IGC_ADVTXD_TSTAMP_REG_3));
+
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags);
+ }
+}
+
+static u64 igc_xsk_fill_timestamp(void *_priv)
+{
+ return *(u64 *)_priv;
+}
+
+static void igc_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ __le32 launch_time_offset;
+ bool insert_empty = false;
+ bool first_flag = false;
+ u16 used_desc = 0;
+
+ if (!tx_ring->launchtime_enable)
+ return;
+
+ launch_time_offset = igc_tx_launchtime(tx_ring,
+ ns_to_ktime(launch_time),
+ &first_flag, &insert_empty);
+ if (insert_empty) {
+ /* Disregard the launch time request if the required empty frame
+ * fails to be inserted.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ return;
+
+ meta_req->tx_buffer =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ /* Inserting an empty packet requires two descriptors:
+ * one data descriptor and one context descriptor.
+ */
+ used_desc += 2;
+ }
+
+ /* Use one context descriptor to specify launch time and first flag. */
+ igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0);
+ used_desc += 1;
+
+ /* Update the number of used descriptors in this request */
+ meta_req->used_desc += used_desc;
+}
+
+const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
+ .tmo_request_timestamp = igc_xsk_request_timestamp,
+ .tmo_fill_timestamp = igc_xsk_fill_timestamp,
+ .tmo_request_launch_time = igc_xsk_request_launch_time,
+};
+
static void igc_xdp_xmit_zc(struct igc_ring *ring)
{
struct xsk_buff_pool *pool = ring->xsk_pool;
@@ -2894,25 +3050,51 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu = ring->next_to_use;
budget = igc_desc_unused(ring);
- while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
- u32 cmd_type, olinfo_status;
+ /* Packets with launch time require one data descriptor and one context
+ * descriptor. When the launch time falls into the next Qbv cycle, we
+ * may need to insert an empty packet, which requires two more
+ * descriptors. Therefore, to be safe, we always ensure we have at least
+ * 4 descriptors available.
+ */
+ while (budget >= 4 && xsk_tx_peek_desc(pool, &xdp_desc)) {
+ struct igc_metadata_request meta_req;
+ struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
+ u32 olinfo_status;
dma_addr_t dma;
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- xdp_desc.len;
+ meta_req.cmd_type = IGC_ADVTXD_DTYP_DATA |
+ IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS |
+ IGC_TXD_DCMD | xdp_desc.len;
olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
+ bi = &ring->tx_buffer_info[ntu];
+
+ meta_req.tx_ring = ring;
+ meta_req.tx_buffer = bi;
+ meta_req.meta = meta;
+ meta_req.used_desc = 0;
+ xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
+ &meta_req);
+
+ /* xsk_tx_metadata_request() may have updated next_to_use */
+ ntu = ring->next_to_use;
+
+ /* xsk_tx_metadata_request() may have updated Tx buffer info */
+ bi = meta_req.tx_buffer;
+
+ /* xsk_tx_metadata_request() may use a few descriptors */
+ budget -= meta_req.used_desc;
tx_desc = IGC_TX_DESC(ring, ntu);
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- bi = &ring->tx_buffer_info[ntu];
bi->type = IGC_TX_BUFFER_TYPE_XSK;
bi->protocol = 0;
bi->bytecount = xdp_desc.len;
@@ -2925,9 +3107,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu++;
if (ntu == ring->count)
ntu = 0;
+
+ ring->next_to_use = ntu;
+ budget--;
}
- ring->next_to_use = ntu;
if (tx_desc) {
igc_flush_tx_descriptors(ring);
xsk_tx_release(pool);
@@ -2975,6 +3159,18 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
break;
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_transmitted_smd_v(tx_desc))
+ ethtool_mmsv_event_handle(&adapter->fpe.mmsv,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
+
+ /* Hold the completions while there's a pending tx hardware
+ * timestamp request from XDP Tx metadata.
+ */
+ if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK &&
+ tx_buffer->xsk_pending_ts)
+ break;
+
/* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL;
@@ -3678,6 +3874,22 @@ static void igc_del_flex_filter(struct igc_adapter *adapter,
wr32(IGC_WUFC, wufc);
}
+static void igc_set_default_queue_filter(struct igc_adapter *adapter, u32 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(IGC_MRQC);
+
+ mrqc &= ~IGC_MRQC_DEFAULT_QUEUE_MASK;
+ mrqc |= FIELD_PREP(IGC_MRQC_DEFAULT_QUEUE_MASK, queue);
+ wr32(IGC_MRQC, mrqc);
+}
+
+static void igc_reset_default_queue_filter(struct igc_adapter *adapter)
+{
+ /* Reset the default queue to its default value which is Queue 0 */
+ igc_set_default_queue_filter(adapter, 0);
+}
+
static int igc_enable_nfc_rule(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
@@ -3716,6 +3928,9 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
return err;
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
+ igc_set_default_queue_filter(adapter, rule->action);
+
return 0;
}
@@ -3743,6 +3958,9 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
rule->filter.dst_addr);
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
+ igc_reset_default_queue_filter(adapter);
}
/**
@@ -3860,6 +4078,30 @@ static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
}
/**
+ * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address
+ * @adapter: Pointer to the igc_adapter structure.
+ *
+ * Frame preemption verification requires that packets with the all-zeroes
+ * MAC address are allowed to be received by the driver. This function adds the
+ * all-zeroes destination address to the list of acceptable addresses.
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1);
+}
+
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty);
+}
+
+/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -4845,6 +5087,22 @@ static int igc_sw_init(struct igc_adapter *adapter)
return 0;
}
+static void igc_set_queue_napi(struct igc_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -4852,6 +5110,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
void igc_up(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i = 0;
/* hardware has been reset, we need to reload some things */
@@ -4859,8 +5118,11 @@ void igc_up(struct igc_adapter *adapter)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
if (adapter->msix_entries)
igc_configure_msix(adapter);
@@ -5089,12 +5351,13 @@ void igc_down(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igc_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
@@ -5113,6 +5376,9 @@ void igc_down(struct igc_adapter *adapter)
igc_disable_all_tx_rings_hw(adapter);
igc_clean_all_tx_rings(adapter);
igc_clean_all_rx_rings(adapter);
+
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_stop(&adapter->fpe.mmsv);
}
void igc_reinit_locked(struct igc_adapter *adapter)
@@ -5176,7 +5442,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
igc_down(adapter);
netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igc_up(adapter);
@@ -5473,6 +5739,9 @@ static int igc_request_msix(struct igc_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igc_configure_msix(adapter);
@@ -5509,7 +5778,8 @@ static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
*/
static void igc_update_phy_info(struct timer_list *t)
{
- struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct igc_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
igc_get_phy_info(&adapter->hw);
}
@@ -5551,7 +5821,8 @@ bool igc_has_link(struct igc_adapter *adapter)
*/
static void igc_watchdog(struct timer_list *t)
{
- struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igc_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -5634,6 +5905,10 @@ static void igc_watchdog_task(struct work_struct *work)
*/
igc_tsn_adjust_txtime_offset(adapter);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ true);
+
if (adapter->link_speed != SPEED_1000)
goto no_wait;
@@ -5669,6 +5944,10 @@ no_wait:
netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ false);
+
/* link state has changed, schedule phy info update */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
@@ -5708,11 +5987,29 @@ no_wait:
if (adapter->flags & IGC_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(IGC_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igc_q_vector *q_vector = adapter->q_vector[i];
+ struct igc_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(IGC_EICS, eics);
} else {
- wr32(IGC_ICS, IGC_ICS_RXDMT0);
+ struct igc_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(IGC_ICS, IGC_ICS_RXDMT0);
+ }
}
igc_ptp_tx_hang(adapter);
@@ -5897,6 +6194,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err = 0;
int i = 0;
@@ -5930,19 +6228,13 @@ static int __igc_open(struct net_device *netdev, bool resuming)
if (err)
goto err_req_irq;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
- if (err)
- goto err_set_queues;
-
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(IGC_ICR);
@@ -5959,8 +6251,6 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return IGC_SUCCESS;
-err_set_queues:
- igc_free_irq(adapter);
err_req_irq:
igc_release_hw_control(adapter);
igc_power_down_phy_copper_base(&adapter->hw);
@@ -5977,6 +6267,17 @@ err_setup_tx:
int igc_open(struct net_device *netdev)
{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_queues(netdev, adapter->num_tx_queues,
+ adapter->num_rx_queues);
+ if (err) {
+ netdev_err(netdev, "error setting real queue count\n");
+ return err;
+ }
+
return __igc_open(netdev, false);
}
@@ -6024,24 +6325,6 @@ int igc_close(struct net_device *netdev)
return 0;
}
-/**
- * igc_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- **/
-static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return igc_ptp_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return igc_ptp_set_ts_config(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
bool enable)
{
@@ -6151,6 +6434,7 @@ static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->max_sdu = 0;
+ ring->preemptible = false;
}
spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
@@ -6207,21 +6491,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
size_t n;
int i;
- switch (qopt->cmd) {
- case TAPRIO_CMD_REPLACE:
- break;
- case TAPRIO_CMD_DESTROY:
- return igc_tsn_clear_schedule(adapter);
- case TAPRIO_CMD_STATS:
- igc_taprio_stats(adapter->netdev, &qopt->stats);
- return 0;
- case TAPRIO_CMD_QUEUE_STATS:
- igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-
if (qopt->base_time < 0)
return -ERANGE;
@@ -6231,12 +6500,23 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
+ if (qopt->mqprio.preemptible_tcs &&
+ !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) {
+ NL_SET_ERR_MSG_MOD(qopt->extack,
+ "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc");
+ return -ENODEV;
+ }
+
+ igc_ptp_read(adapter, &now);
+
+ if (igc_tsn_is_taprio_activated_by_user(adapter) &&
+ is_base_time_past(qopt->base_time, &now))
+ adapter->qbv_config_change_errors++;
+
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
adapter->taprio_offload_enable = true;
- igc_ptp_read(adapter, &now);
-
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
@@ -6318,6 +6598,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
ring->max_sdu = 0;
}
+ igc_fpe_save_preempt_queue(adapter, &qopt->mqprio);
+
return 0;
}
@@ -6330,7 +6612,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
- err = igc_save_qbv_schedule(adapter, qopt);
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = igc_save_qbv_schedule(adapter, qopt);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ err = igc_tsn_clear_schedule(adapter);
+ break;
+ case TAPRIO_CMD_STATS:
+ igc_taprio_stats(adapter->netdev, &qopt->stats);
+ return 0;
+ case TAPRIO_CMD_QUEUE_STATS:
+ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (err)
return err;
@@ -6411,10 +6709,18 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
- caps->broken_mqprio = true;
+ if (!(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO))
+ caps->broken_mqprio = true;
if (hw->mac.type == igc_i225) {
caps->supports_queue_max_sdu = true;
@@ -6428,6 +6734,96 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
}
}
+static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc,
+ u16 *offset)
+{
+ int i;
+
+ adapter->strict_priority_enable = true;
+ adapter->num_tc = num_tc;
+
+ for (i = 0; i < num_tc; i++)
+ adapter->queue_per_tc[i] = offset[i];
+}
+
+static bool
+igc_tsn_is_tc_to_queue_priority_ordered(struct tc_mqprio_qopt_offload *mqprio)
+{
+ int num_tc = mqprio->qopt.num_tc;
+ int i;
+
+ for (i = 1; i < num_tc; i++) {
+ if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i])
+ return false;
+ }
+
+ return true;
+}
+
+static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err, i;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ if (!mqprio->qopt.num_tc) {
+ adapter->strict_priority_enable = false;
+ igc_fpe_clear_preempt_queue(adapter);
+ netdev_reset_tc(adapter->netdev);
+ goto apply;
+ }
+
+ /* There are as many TCs as Tx queues. */
+ if (mqprio->qopt.num_tc != adapter->num_tx_queues) {
+ NL_SET_ERR_MSG_FMT_MOD(mqprio->extack,
+ "Only %d traffic classes supported",
+ adapter->num_tx_queues);
+ return -EOPNOTSUPP;
+ }
+
+ /* Only one queue per TC is supported. */
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ if (mqprio->qopt.count[i] != 1) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "Only one queue per TC supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!igc_tsn_is_tc_to_queue_priority_ordered(mqprio)) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "tc to queue mapping must preserve increasing priority (higher tc -> higher queue)");
+ return -EOPNOTSUPP;
+ }
+
+ igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
+ mqprio->qopt.offset);
+
+ err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ err = netdev_set_tc_queue(adapter->netdev, i, 1,
+ adapter->queue_per_tc[i]);
+ if (err)
+ return err;
+ }
+
+ /* In case the card is configured with less than four queues. */
+ for (; i < IGC_MAX_TX_QUEUES; i++)
+ adapter->queue_per_tc[i] = i;
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ igc_fpe_save_preempt_queue(adapter, mqprio);
+
+apply:
+ return igc_tsn_offload_apply(adapter);
+}
+
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -6447,6 +6843,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
case TC_SETUP_QDISC_CBS:
return igc_tsn_enable_cbs(adapter, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return igc_tsn_enable_mqprio(adapter, type_data);
+
default:
return -EOPNOTSUPP;
}
@@ -6482,7 +6881,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -6576,53 +6975,15 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
- .ndo_eth_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
.ndo_xsk_wakeup = igc_xsk_wakeup,
.ndo_get_tstamp = igc_get_tstamp,
+ .ndo_hwtstamp_get = igc_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = igc_ptp_hwtstamp_set,
};
-/* PCIe configuration access */
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_read_config_word(adapter->pdev, reg, value);
-}
-
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_write_config_word(adapter->pdev, reg, *value);
-}
-
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_read_word(adapter->pdev, reg, value);
-
- return IGC_SUCCESS;
-}
-
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_write_word(adapter->pdev, reg, *value);
-
- return IGC_SUCCESS;
-}
-
u32 igc_rd32(struct igc_hw *hw, u32 reg)
{
struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
@@ -6788,6 +7149,17 @@ static int igc_probe(struct pci_dev *pdev,
adapter->port_num = hw->bus.func;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+
err = pci_save_state(pdev);
if (err)
goto err_ioremap;
@@ -6803,19 +7175,13 @@ static int igc_probe(struct pci_dev *pdev,
netdev->netdev_ops = &igc_netdev_ops;
netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
+ netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops;
igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
netdev->mem_start = pci_resource_start(pdev, 0);
netdev->mem_end = pci_resource_end(pdev, 0);
- /* PCI config space info */
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->revision_id = pdev->revision;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
-
/* Copy the default MAC and PHY function pointers */
memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -6866,6 +7232,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ /* enable HW vlan tag insertion/stripping by default */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -6898,8 +7267,8 @@ static int igc_probe(struct pci_dev *pdev,
}
/* configure RXPBSIZE and TXPBSIZE */
- wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_RXPBS, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
@@ -6907,12 +7276,11 @@ static int igc_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, igc_reset_task);
INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
- hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+ hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
- hw->mac.autoneg = true;
hw->phy.autoneg_advertised = 0xaf;
hw->fc.requested_mode = igc_fc_default;
@@ -6932,6 +7300,8 @@ static int igc_probe(struct pci_dev *pdev,
igc_tsn_clear_schedule(adapter);
+ igc_fpe_init(adapter);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -6965,14 +7335,21 @@ static int igc_probe(struct pci_dev *pdev,
if (IS_ENABLED(CONFIG_IGC_LEDS)) {
err = igc_led_setup(adapter);
- if (err)
- goto err_register;
+ if (err) {
+ netdev_warn_once(netdev,
+ "LED init failed (%d); continuing without LED support\n",
+ err);
+ adapter->leds_available = false;
+ } else {
+ adapter->leds_available = true;
+ }
}
return 0;
err_register:
igc_release_hw_control(adapter);
+ igc_ptp_stop(adapter);
err_eeprom:
if (!igc_check_reset_block(hw))
igc_reset_phy(hw);
@@ -7014,13 +7391,16 @@ static void igc_remove(struct pci_dev *pdev)
set_bit(__IGC_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
+ if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
+ igc_led_free(adapter);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -7105,8 +7485,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
return 0;
}
-#ifdef CONFIG_PM
-static int __maybe_unused igc_runtime_suspend(struct device *dev)
+static int igc_runtime_suspend(struct device *dev)
{
return __igc_shutdown(to_pci_dev(dev), NULL, 1);
}
@@ -7141,7 +7520,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int __maybe_unused igc_resume(struct device *dev)
+static int __igc_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7151,7 +7530,6 @@ static int __maybe_unused igc_resume(struct device *dev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!pci_device_is_present(pdev))
return -ENODEV;
@@ -7165,6 +7543,9 @@ static int __maybe_unused igc_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+
if (igc_init_interrupt_scheme(adapter, true)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -7183,23 +7564,30 @@ static int __maybe_unused igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
- rtnl_lock();
- if (!err && netif_running(netdev))
+ if (netif_running(netdev)) {
+ if (!rpm)
+ rtnl_lock();
err = __igc_open(netdev, true);
-
- if (!err)
- netif_device_attach(netdev);
- rtnl_unlock();
+ if (!rpm)
+ rtnl_unlock();
+ if (!err)
+ netif_device_attach(netdev);
+ }
return err;
}
-static int __maybe_unused igc_runtime_resume(struct device *dev)
+static int igc_resume(struct device *dev)
+{
+ return __igc_resume(dev, false);
+}
+
+static int igc_runtime_resume(struct device *dev)
{
- return igc_resume(dev);
+ return __igc_resume(dev, true);
}
-static int __maybe_unused igc_suspend(struct device *dev)
+static int igc_suspend(struct device *dev)
{
return __igc_shutdown(to_pci_dev(dev), NULL, 0);
}
@@ -7214,7 +7602,6 @@ static int __maybe_unused igc_runtime_idle(struct device *dev)
return -EBUSY;
}
-#endif /* CONFIG_PM */
static void igc_shutdown(struct pci_dev *pdev)
{
@@ -7242,14 +7629,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
igc_down(adapter);
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7260,7 +7651,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igc_resume routine.
+ * resembles the first-half of the __igc_resume routine.
**/
static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
{
@@ -7275,11 +7666,13 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
+
/* In case of PCI error, adapter loses its HW address
* so we should re-assign it here.
*/
@@ -7299,7 +7692,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igc_resume routine.
+ * second-half of the __igc_resume routine.
*/
static void igc_io_resume(struct pci_dev *pdev)
{
@@ -7309,6 +7702,7 @@ static void igc_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(netdev)) {
if (igc_open(netdev)) {
+ rtnl_unlock();
netdev_err(netdev, "igc_open failed after reset\n");
return;
}
@@ -7329,22 +7723,16 @@ static const struct pci_error_handlers igc_err_handler = {
.resume = igc_io_resume,
};
-#ifdef CONFIG_PM
-static const struct dev_pm_ops igc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
- SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
- igc_runtime_idle)
-};
-#endif
+static _DEFINE_DEV_PM_OPS(igc_pm_ops, igc_suspend, igc_resume,
+ igc_runtime_suspend, igc_runtime_resume,
+ igc_runtime_idle);
static struct pci_driver igc_driver = {
.name = igc_driver_name,
.id_table = igc_pci_tbl,
.probe = igc_probe,
.remove = igc_remove,
-#ifdef CONFIG_PM
- .driver.pm = &igc_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&igc_pm_ops),
.shutdown = igc_shutdown,
.err_handler = &igc_err_handler,
};
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
index 58f81aba0144..a47b8d39238c 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.c
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -36,56 +36,6 @@ static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
}
/**
- * igc_acquire_nvm - Generic request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -IGC_ERR_NVM (-1).
- */
-s32 igc_acquire_nvm(struct igc_hw *hw)
-{
- s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
- u32 eecd = rd32(IGC_EECD);
- s32 ret_val = 0;
-
- wr32(IGC_EECD, eecd | IGC_EECD_REQ);
- eecd = rd32(IGC_EECD);
-
- while (timeout) {
- if (eecd & IGC_EECD_GNT)
- break;
- udelay(5);
- eecd = rd32(IGC_EECD);
- timeout--;
- }
-
- if (!timeout) {
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
- hw_dbg("Could not acquire NVM grant\n");
- ret_val = -IGC_ERR_NVM;
- }
-
- return ret_val;
-}
-
-/**
- * igc_release_nvm - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit.
- */
-void igc_release_nvm(struct igc_hw *hw)
-{
- u32 eecd;
-
- eecd = rd32(IGC_EECD);
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
-}
-
-/**
* igc_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
@@ -173,7 +123,7 @@ s32 igc_validate_nvm_checksum(struct igc_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16)NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -IGC_ERR_NVM;
goto out;
@@ -205,7 +155,7 @@ s32 igc_update_nvm_checksum(struct igc_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h
index f9fc2e9cfb03..ab78d0c64547 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.h
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.h
@@ -4,8 +4,6 @@
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
-s32 igc_acquire_nvm(struct igc_hw *hw);
-void igc_release_nvm(struct igc_hw *hw);
s32 igc_read_mac_addr(struct igc_hw *hw);
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
s32 igc_validate_nvm_checksum(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 861f37076861..6c4d204aecfa 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -240,7 +240,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
- ANEG_MULTIGBT_AN_CTRL,
+ IGC_ANEG_MULTIGBT_AN_CTRL,
&aneg_multigbt_an_ctrl);
if (ret_val)
@@ -380,7 +380,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
- ANEG_MULTIGBT_AN_CTRL,
+ IGC_ANEG_MULTIGBT_AN_CTRL,
aneg_multigbt_an_ctrl);
return ret_val;
@@ -494,24 +494,12 @@ s32 igc_setup_copper_link(struct igc_hw *hw)
s32 ret_val = 0;
bool link;
- if (hw->mac.autoneg) {
- /* Setup autoneg and flow control advertisement and perform
- * autonegotiation.
- */
- ret_val = igc_copper_link_autoneg(hw);
- if (ret_val)
- goto out;
- } else {
- /* PHY will be set to 10H, 10F, 100H or 100F
- * depending on user settings.
- */
- hw_dbg("Forcing Speed and Duplex\n");
- ret_val = hw->phy.ops.force_speed_duplex(hw);
- if (ret_val) {
- hw_dbg("Error Forcing Speed and Duplex\n");
- goto out;
- }
- }
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = igc_copper_link_autoneg(hw);
+ if (ret_val)
+ goto out;
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 885faaa7b9de..b7b46d863bee 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -11,6 +11,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <net/xdp_sock_drv.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
@@ -256,13 +257,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -299,10 +293,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags)
- return -EOPNOTSUPP;
-
if (on) {
pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
@@ -545,6 +535,30 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
wr32(IGC_TSYNCRXCTL, val);
}
+static void igc_ptp_free_tx_buffer(struct igc_adapter *adapter,
+ struct igc_tx_timestamp_request *tstamp)
+{
+ if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) {
+ /* Release the transmit completion */
+ tstamp->xsk_tx_buffer->xsk_pending_ts = false;
+
+ /* Note: tstamp->skb and tstamp->xsk_tx_buffer are in union.
+ * By setting tstamp->xsk_tx_buffer to NULL, tstamp->skb will
+ * become NULL as well.
+ */
+ tstamp->xsk_tx_buffer = NULL;
+ tstamp->buffer_type = 0;
+
+ /* Trigger txrx interrupt for transmit completion */
+ igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index, 0);
+
+ return;
+ }
+
+ dev_kfree_skb_any(tstamp->skb);
+ tstamp->skb = NULL;
+}
+
static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
{
unsigned long flags;
@@ -555,8 +569,8 @@ static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
- dev_kfree_skb_any(tstamp->skb);
- tstamp->skb = NULL;
+ if (tstamp->skb)
+ igc_ptp_free_tx_buffer(adapter, tstamp);
}
spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
@@ -612,7 +626,7 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
* Return: 0 in case of success, negative errno code otherwise.
*/
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -657,8 +671,9 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
static void igc_ptp_tx_timeout(struct igc_adapter *adapter,
struct igc_tx_timestamp_request *tstamp)
{
- dev_kfree_skb_any(tstamp->skb);
- tstamp->skb = NULL;
+ if (tstamp->skb)
+ igc_ptp_free_tx_buffer(adapter, tstamp);
+
adapter->tx_hwtstamp_timeouts++;
netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
@@ -729,10 +744,21 @@ static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter,
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
- tstamp->skb = NULL;
+ /* Copy the tx hardware timestamp into xdp metadata or skb */
+ if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) {
+ struct xsk_buff_pool *xsk_pool;
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ xsk_pool = adapter->tx_ring[tstamp->xsk_queue_index]->xsk_pool;
+ if (xsk_pool && xp_tx_metadata_enabled(xsk_pool)) {
+ xsk_tx_metadata_complete(&tstamp->xsk_meta,
+ &igc_xsk_tx_metadata_ops,
+ &shhwtstamps.hwtstamp);
+ }
+ } else {
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ igc_ptp_free_tx_buffer(adapter, tstamp);
}
/**
@@ -827,48 +853,46 @@ void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter)
}
/**
- * igc_ptp_set_ts_config - set hardware time stamping config
+ * igc_ptp_hwtstamp_set - set hardware time stamping config
* @netdev: network interface device structure
- * @ifr: interface request data
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
*
**/
-int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igc_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = igc_ptp_set_timestamp_mode(adapter, &config);
+ err = igc_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
- * igc_ptp_get_ts_config - get hardware time stamping config
+ * igc_ptp_hwtstamp_get - get hardware time stamping config
* @netdev: network interface device structure
- * @ifr: interface request data
+ * @config: timestamping configuration structure
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
* of the last known settings.
**/
-int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igc_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config *config = &adapter->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = adapter->tstamp_config;
+
+ return 0;
}
/* The two conditions below must be met for cross timestamping via
@@ -901,7 +925,11 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
{
#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML)
- return convert_art_ns_to_tsc(tstamp);
+ return (struct system_counterval_t) {
+ .cs_id = CSID_X86_ART,
+ .cycles = tstamp,
+ .use_nsecs = true,
+ };
#else
return (struct system_counterval_t) { };
#endif
@@ -933,45 +961,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat)
}
}
+/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */
+static void igc_ptm_trigger(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ /* To "manually" start the PTM cycle we need to set the
+ * trigger (TRIG) bit
+ */
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl |= IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ /* Perform flush after write to CTRL register otherwise
+ * transaction may not start
+ */
+ wrfl();
+}
+
+/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */
+static void igc_ptm_reset(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ /* Write to clear all status */
+ wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL);
+}
+
static int igc_phc_get_syncdevicetime(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
{
- u32 stat, t2_curr_h, t2_curr_l, ctrl;
struct igc_adapter *adapter = ctx;
struct igc_hw *hw = &adapter->hw;
+ u32 stat, t2_curr_h, t2_curr_l;
int err, count = 100;
ktime_t t1, t2_curr;
- /* Get a snapshot of system clocks to use as historic value. */
- ktime_get_snapshot(&adapter->snapshot);
-
+ /* Doing this in a loop because in the event of a
+ * badly timed (ha!) system clock adjustment, we may
+ * get PTM errors from the PCI root, but these errors
+ * are transitory. Repeating the process returns valid
+ * data eventually.
+ */
do {
- /* Doing this in a loop because in the event of a
- * badly timed (ha!) system clock adjustment, we may
- * get PTM errors from the PCI root, but these errors
- * are transitory. Repeating the process returns valid
- * data eventually.
- */
+ /* Get a snapshot of system clocks to use as historic value. */
+ ktime_get_snapshot(&adapter->snapshot);
- /* To "manually" start the PTM cycle we need to clear and
- * then set again the TRIG bit.
- */
- ctrl = rd32(IGC_PTM_CTRL);
- ctrl &= ~IGC_PTM_CTRL_TRIG;
- wr32(IGC_PTM_CTRL, ctrl);
- ctrl |= IGC_PTM_CTRL_TRIG;
- wr32(IGC_PTM_CTRL, ctrl);
-
- /* The cycle only starts "for real" when software notifies
- * that it has read the registers, this is done by setting
- * VALID bit.
- */
- wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+ igc_ptm_trigger(hw);
err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat,
stat, IGC_PTM_STAT_SLEEP,
IGC_PTM_STAT_TIMEOUT);
+ igc_ptm_reset(hw);
+
if (err < 0) {
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
return err;
@@ -980,15 +1025,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device,
if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID)
break;
- if (stat & ~IGC_PTM_STAT_VALID) {
- /* An error occurred, log it. */
- igc_ptm_log_error(adapter, stat);
- /* The STAT register is write-1-to-clear (W1C),
- * so write the previous error status to clear it.
- */
- wr32(IGC_PTM_STAT, stat);
- continue;
- }
+ igc_ptm_log_error(adapter, stat);
} while (--count);
if (!count) {
@@ -1020,9 +1057,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
{
struct igc_adapter *adapter = container_of(ptp, struct igc_adapter,
ptp_caps);
+ int ret;
- return get_device_system_crosststamp(igc_phc_get_syncdevicetime,
- adapter, &adapter->snapshot, cts);
+ /* This blocks until any in progress PTM transactions complete */
+ mutex_lock(&adapter->ptm_lock);
+
+ ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime,
+ adapter, &adapter->snapshot, cts);
+ mutex_unlock(&adapter->ptm_lock);
+
+ return ret;
}
static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
@@ -1105,6 +1149,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS;
adapter->ptp_caps.n_per_out = IGC_N_PEROUT;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.n_pins = IGC_N_SDP;
adapter->ptp_caps.verify = igc_ptp_verify_pin;
@@ -1121,6 +1168,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
spin_lock_init(&adapter->ptp_tx_lock);
spin_lock_init(&adapter->free_timer_lock);
spin_lock_init(&adapter->tmreg_lock);
+ mutex_init(&adapter->ptm_lock);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
@@ -1133,6 +1181,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
netdev_err(netdev, "ptp_clock_register failed\n");
+ mutex_destroy(&adapter->ptm_lock);
} else if (adapter->ptp_clock) {
netdev_info(netdev, "PHC added\n");
adapter->ptp_flags |= IGC_PTP_ENABLED;
@@ -1162,10 +1211,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter)
struct igc_hw *hw = &adapter->hw;
u32 ctrl;
+ mutex_lock(&adapter->ptm_lock);
ctrl = rd32(IGC_PTM_CTRL);
ctrl &= ~IGC_PTM_CTRL_EN;
wr32(IGC_PTM_CTRL, ctrl);
+ mutex_unlock(&adapter->ptm_lock);
}
/**
@@ -1196,13 +1247,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
**/
void igc_ptp_stop(struct igc_adapter *adapter)
{
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
igc_ptp_suspend(adapter);
+ adapter->ptp_flags &= ~IGC_PTP_ENABLED;
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
netdev_info(adapter->netdev, "PHC removed\n");
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
}
+ mutex_destroy(&adapter->ptm_lock);
}
/**
@@ -1214,13 +1270,18 @@ void igc_ptp_stop(struct igc_adapter *adapter)
void igc_ptp_reset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
- u32 cycle_ctrl, ctrl;
+ u32 cycle_ctrl, ctrl, stat;
unsigned long flags;
u32 timadj;
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+ mutex_lock(&adapter->ptm_lock);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
@@ -1249,14 +1310,19 @@ void igc_ptp_reset(struct igc_adapter *adapter)
ctrl = IGC_PTM_CTRL_EN |
IGC_PTM_CTRL_START_NOW |
IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) |
- IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) |
- IGC_PTM_CTRL_TRIG;
+ IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT);
wr32(IGC_PTM_CTRL, ctrl);
/* Force the first cycle to run. */
- wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+ igc_ptm_trigger(hw);
+
+ if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat,
+ stat, IGC_PTM_STAT_SLEEP,
+ IGC_PTM_STAT_TIMEOUT))
+ netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
+ igc_ptm_reset(hw);
break;
default:
/* No work to do. */
@@ -1273,5 +1339,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ mutex_unlock(&adapter->ptm_lock);
+
wrfl();
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index e5b893fc5b66..f343c6bfc6be 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -222,6 +222,22 @@
#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+/* Time sync registers - preemption statistics */
+#define IGC_PRMPTDRCNT 0x04284 /* Good RX Preempted Packets */
+#define IGC_PRMEVNTTCNT 0x04298 /* TX Preemption event counter */
+#define IGC_PRMEVNTRCNT 0x0429C /* RX Preemption event counter */
+
+ /* Preemption Exception Counter */
+ #define IGC_PRMEXCPRCNT 0x42A0
+/* Received out of order packets with SMD-C */
+#define IGC_PRMEXCPRCNT_OOO_SMDC 0x000000FF
+/* Received out of order packets with SMD-C and wrong Frame CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAME_CNT 0x0000FF00
+/* Received out of order packets with SMD-C and wrong Frag CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAG_CNT 0x00FF0000
+/* Received packets with SMD-S and wrong Frag CNT and Frame CNT */
+#define IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT 0xFF000000
+
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
@@ -238,6 +254,8 @@
#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40))
#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40))
+#define IGC_TXARB 0x3354 /* Tx Arbitration Control TxARB - RW */
+
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
@@ -308,6 +326,16 @@
#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */
#define IGC_EEE_SU 0x0E34 /* EEE Setup */
+/* MULTI GBT AN Control Register - reg. 7.32 */
+#define IGC_ANEG_MULTIGBT_AN_CTRL 0x0020
+
+/* EEE ANeg Advertisement Register - reg 7.60 and reg 7.62 */
+#define IGC_ANEG_EEE_AB1 0x003c
+#define IGC_ANEG_EEE_AB2 0x003e
+/* EEE ANeg Link-Partner Advertisement Register - reg 7.61 and reg 7.63 */
+#define IGC_ANEG_EEE_LP_AB1 0x003d
+#define IGC_ANEG_EEE_LP_AB2 0x003f
+
/* LTR registers */
#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 22cefb1eeedf..8a110145bfee 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -2,9 +2,206 @@
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
+#include "igc_base.h"
#include "igc_hw.h"
#include "igc_tsn.h"
+#define MIN_MULTPLIER_TX_MIN_FRAG 0
+#define MAX_MULTPLIER_TX_MIN_FRAG 3
+/* Frag size is based on the Section 8.12.2 of the SW User Manual */
+#define TX_MIN_FRAG_SIZE 64
+#define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \
+ (MAX_MULTPLIER_TX_MIN_FRAG + 1))
+
+enum tx_queue {
+ TX_QUEUE_0 = 0,
+ TX_QUEUE_1,
+ TX_QUEUE_2,
+ TX_QUEUE_3,
+};
+
+DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+static int igc_fpe_init_smd_frame(struct igc_ring *ring,
+ struct igc_tx_buffer *buffer,
+ struct sk_buff *skb)
+{
+ dma_addr_t dma = dma_map_single(ring->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->protocol = 0;
+ buffer->bytecount = skb->len;
+ buffer->gso_segs = 1;
+ buffer->time_stamp = jiffies;
+ dma_unmap_len_set(buffer, len, skb->len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ return 0;
+}
+
+static int igc_fpe_init_tx_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ enum igc_txd_popts_type type)
+{
+ u32 cmd_type, olinfo_status = 0;
+ struct igc_tx_buffer *buffer;
+ union igc_adv_tx_desc *desc;
+ int err;
+
+ if (!igc_desc_unused(ring))
+ return -EBUSY;
+
+ buffer = &ring->tx_buffer_info[ring->next_to_use];
+ err = igc_fpe_init_smd_frame(ring, buffer, skb);
+ if (err)
+ return err;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ buffer->bytecount;
+
+ olinfo_status |= FIELD_PREP(IGC_ADVTXD_PAYLEN_MASK, buffer->bytecount);
+
+ switch (type) {
+ case SMD_V:
+ case SMD_R:
+ olinfo_status |= FIELD_PREP(IGC_TXD_POPTS_SMD_MASK, type);
+ break;
+ }
+
+ desc = IGC_TX_DESC(ring, ring->next_to_use);
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
+
+ netdev_tx_sent_queue(txring_txq(ring), skb->len);
+
+ buffer->next_to_watch = desc;
+ ring->next_to_use = (ring->next_to_use + 1) % ring->count;
+
+ return 0;
+}
+
+static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
+ enum igc_txd_popts_type type)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+ struct sk_buff *skb;
+ int err;
+
+ ring = igc_get_tx_ring(adapter, cpu);
+ nq = txring_txq(ring);
+
+ skb = alloc_skb(SMD_FRAME_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_zero(skb, SMD_FRAME_SIZE);
+
+ __netif_tx_lock(nq, cpu);
+
+ err = igc_fpe_init_tx_descriptor(ring, skb, type);
+ igc_flush_tx_descriptors(ring);
+
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
+static void igc_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+ adapter->fpe.tx_enabled = tx_enable;
+
+ /* Update config since tx_enabled affects preemptible queue configuration */
+ igc_tsn_offload_apply(adapter);
+}
+
+static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+ int err;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+
+ if (type == ETHTOOL_MPACKET_VERIFY) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_V);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-V\n");
+ } else if (type == ETHTOOL_MPACKET_RESPONSE) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_R);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-R frame\n");
+ }
+}
+
+static const struct ethtool_mmsv_ops igc_mmsv_ops = {
+ .configure_tx = igc_fpe_configure_tx,
+ .send_mpacket = igc_fpe_send_mpacket,
+};
+
+void igc_fpe_init(struct igc_adapter *adapter)
+{
+ adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
+ adapter->fpe.tx_enabled = false;
+ ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
+}
+
+void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter)
+{
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = false;
+ }
+}
+
+static u32 igc_fpe_map_preempt_tc_to_queue(const struct igc_adapter *adapter,
+ unsigned long preemptible_tcs)
+{
+ struct net_device *dev = adapter->netdev;
+ u32 i, queue = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(preemptible_tcs & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+ queue |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue;
+}
+
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio)
+{
+ u32 preemptible_queue = igc_fpe_map_preempt_tc_to_queue(adapter,
+ mqprio->preemptible_tcs);
+
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = !!(preemptible_queue & BIT(i));
+ }
+}
+
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
@@ -37,24 +234,33 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
{
unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
- if (adapter->taprio_offload_enable)
- new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
- if (is_any_launchtime(adapter))
+ if (adapter->taprio_offload_enable || is_any_launchtime(adapter) ||
+ adapter->strict_priority_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED;
+
return new_flags;
}
+static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
+}
+
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u16 txoffset;
- if (!is_any_launchtime(adapter))
+ if (!igc_tsn_is_tx_mode_in_tsn(adapter))
return;
switch (adapter->link_speed) {
@@ -78,6 +284,73 @@ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
wr32(IGC_GTXOFFSET, txoffset);
}
+static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 retxctl;
+
+ retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
+ wr32(IGC_RETX_CTL, retxctl);
+}
+
+bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
+ adapter->taprio_offload_enable;
+}
+
+static void igc_tsn_tx_arb(struct igc_adapter *adapter, bool reverse_prio)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 txarb;
+
+ txarb = rd32(IGC_TXARB);
+
+ txarb &= ~(IGC_TXARB_TXQ_PRIO_0_MASK |
+ IGC_TXARB_TXQ_PRIO_1_MASK |
+ IGC_TXARB_TXQ_PRIO_2_MASK |
+ IGC_TXARB_TXQ_PRIO_3_MASK);
+
+ if (reverse_prio) {
+ txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_3);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_2);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_1);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_0);
+ } else {
+ txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_0);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_1);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_2);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_3);
+ }
+
+ wr32(IGC_TXARB, txarb);
+}
+
+/**
+ * igc_tsn_set_rxpbsize - Set the receive packet buffer size
+ * @adapter: Pointer to the igc_adapter structure
+ * @rxpbs_exp_bmc_be: Value to set the receive packet buffer size, including
+ * express buffer, BMC buffer, and Best Effort buffer
+ *
+ * The IGC_RXPBS register value may include allocations for the Express buffer,
+ * BMC buffer, Best Effort buffer, and the timestamp descriptor buffer
+ * (IGC_RXPBS_CFG_TS_EN).
+ */
+static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
+ u32 rxpbs_exp_bmc_be)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 rxpbs = rd32(IGC_RXPBS);
+
+ rxpbs &= ~(IGC_RXPBSIZE_EXP_MASK | IGC_BMC2OSPBSIZE_MASK |
+ IGC_RXPBSIZE_BE_MASK);
+ rxpbs |= rxpbs_exp_bmc_be;
+
+ wr32(IGC_RXPBS, rxpbs);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@@ -88,43 +361,113 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
int i;
wr32(IGC_GTXOFFSET, 0);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+
+ if (igc_is_device_id_i226(hw))
+ igc_tsn_restore_retx_default(adapter);
+
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
- IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
+ int reg_idx = adapter->tx_ring[i]->reg_idx;
+ u32 txdctl;
+
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
+
+ txdctl = rd32(IGC_TXDCTL(reg_idx));
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ wr32(IGC_TXDCTL(reg_idx), txdctl);
}
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
+ /* Restore the default Tx arbitration: Priority 0 has the highest
+ * priority and is assigned to queue 0 and so on and so forth.
+ */
+ igc_tsn_tx_arb(adapter, false);
+
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
return 0;
}
+/* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
+ * to 88 Bytes by setting RETX_CTL register using the recommendation from:
+ * a) Ethernet Controller I225/I226 Specification Update Rev 2.1
+ * Item 9: TSN: Packet Transmission Might Cross the Qbv Window
+ * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
+ */
+static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 retxctl, watermark;
+
+ retxctl = rd32(IGC_RETX_CTL);
+ watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
+ /* Set QBVFULLTH value using watermark and set QBVFULLEN */
+ retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
+ IGC_RETX_CTL_QBVFULLEN;
+ wr32(IGC_RETX_CTL, retxctl);
+}
+
+static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
+{
+ u8 mult = (fpe->tx_min_frag_size / TX_MIN_FRAG_SIZE) - 1;
+
+ return clamp_t(u8, mult, MIN_MULTPLIER_TX_MIN_FRAG,
+ MAX_MULTPLIER_TX_MIN_FRAG);
+}
+
+u32 igc_fpe_get_supported_frag_size(u32 frag_size)
+{
+ static const u32 supported_sizes[] = { 64, 128, 192, 256 };
+
+ /* Find the smallest supported size that is >= frag_size */
+ for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
+ if (frag_size <= supported_sizes[i])
+ return supported_sizes[i];
+ }
+
+ /* Should not happen */
+ return TX_MAX_FRAG_SIZE;
+}
+
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
+ u32 frag_size_mult;
int i;
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_BE_TSN);
+
+ if (igc_is_device_id_i226(hw))
+ igc_tsn_set_retx_qbvfullthreshold(adapter);
+
+ if (adapter->strict_priority_enable ||
+ adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
+ igc_tsn_tx_arb(adapter, true);
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txdctl = rd32(IGC_TXDCTL(ring->reg_idx));
u32 txqctl = 0;
u16 cbs_value;
u32 tqavcc;
@@ -158,6 +501,22 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+ if (!adapter->fpe.tx_enabled) {
+ /* fpe inactive: clear both flags */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else if (ring->preemptible) {
+ /* fpe active + preemptible: enable preemptible queue + set low priority */
+ txqctl |= IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else {
+ /* fpe active + express: enable express queue + set high priority */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl |= IGC_TXDCTL_PRIORITY_HIGH;
+ }
+
+ wr32(IGC_TXDCTL(ring->reg_idx), txdctl);
+
/* Skip configuring CBS for Q2 and Q3 */
if (i > 1)
goto skip_cbs;
@@ -245,10 +604,16 @@ skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
- tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
-
+ tqavctrl = rd32(IGC_TQAVCTRL) & ~(IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ tqavctrl |= IGC_TQAVCTRL_PREEMPT_ENA;
+
+ frag_size_mult = igc_fpe_get_frag_size_mult(&adapter->fpe);
+ tqavctrl |= FIELD_PREP(IGC_TQAVCTRL_MIN_FRAG_MASK, frag_size_mult);
+
adapter->qbv_count++;
cycle = adapter->cycle_time;
@@ -262,14 +627,6 @@ skip_cbs:
s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
-
- /* Increase the counter if scheduling into the past while
- * Gate Control List (GCL) is running.
- */
- if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
- (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) &&
- (adapter->qbv_count > 1))
- adapter->qbv_config_change_errors++;
} else {
if (igc_is_device_id_i226(hw)) {
ktime_t adjust_time, expires_time;
@@ -317,6 +674,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
unsigned int new_flags;
int err = 0;
+ if (adapter->fpe.mmsv.pmac_enabled) {
+ err = igc_enable_empty_addr_recv(adapter);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error adding empty address to MAC filter\n");
+ } else {
+ igc_disable_empty_addr_recv(adapter);
+ }
+
new_flags = igc_tsn_new_flags(adapter);
if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED))
@@ -331,15 +696,22 @@ int igc_tsn_reset(struct igc_adapter *adapter)
return err;
}
-int igc_tsn_offload_apply(struct igc_adapter *adapter)
+static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter)
{
- struct igc_hw *hw = &adapter->hw;
+ bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) &
+ IGC_FLAG_TSN_ANY_ENABLED);
- /* Per I225/6 HW Design Section 7.5.2.1, transmit mode
- * cannot be changed dynamically. Require reset the adapter.
+ return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) ||
+ (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter));
+}
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter)
+{
+ /* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change
+ * from legacy->tsn or tsn->legacy, then reset adapter is needed.
*/
if (netif_running(adapter->netdev) &&
- (igc_is_device_id_i225(hw) || !adapter->qbv_count)) {
+ igc_tsn_will_tx_mode_change(adapter)) {
schedule_work(&adapter->reset_task);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index b53e6af560b7..a95b893459d7 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -4,8 +4,66 @@
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
+#include <net/pkt_sched.h>
+
+#define IGC_RX_MIN_FRAG_SIZE 60
+#define SMD_FRAME_SIZE 60
+
+enum igc_txd_popts_type {
+ SMD_V = 0x01,
+ SMD_R = 0x02,
+};
+
+DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+void igc_fpe_init(struct igc_adapter *adapter);
+void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter);
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio);
+u32 igc_fpe_get_supported_frag_size(u32 frag_size);
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
+bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
+
+static inline bool igc_fpe_is_pmac_enabled(struct igc_adapter *adapter)
+{
+ return static_branch_unlikely(&igc_fpe_enabled) &&
+ adapter->fpe.mmsv.pmac_enabled;
+}
+
+static inline bool igc_fpe_handle_mpacket(struct igc_adapter *adapter,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size, void *pktbuf)
+{
+ u32 status_error = le32_to_cpu(rx_desc->wb.upper.status_error);
+ int smd;
+
+ smd = FIELD_GET(IGC_RXDADV_STAT_SMD_TYPE_MASK, status_error);
+ if (smd != IGC_RXD_STAT_SMD_TYPE_V && smd != IGC_RXD_STAT_SMD_TYPE_R)
+ return false;
+
+ if (size == SMD_FRAME_SIZE && mem_is_zero(pktbuf, SMD_FRAME_SIZE)) {
+ struct ethtool_mmsv *mmsv = &adapter->fpe.mmsv;
+ enum ethtool_mmsv_event event;
+
+ if (smd == IGC_RXD_STAT_SMD_TYPE_V)
+ event = ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET;
+ else
+ event = ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET;
+
+ ethtool_mmsv_event_handle(mmsv, event);
+ }
+
+ return true;
+}
+
+static inline bool igc_fpe_transmitted_smd_v(union igc_adv_tx_desc *tx_desc)
+{
+ u32 olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
+ u8 smd = FIELD_GET(IGC_TXD_POPTS_SMD_MASK, olinfo_status);
+
+ return smd == SMD_V;
+}
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index e27af72aada8..9eb47b4beb06 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -13,6 +13,8 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct net_device *dev = adapter->netdev;
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
+ bool need_update;
+ unsigned int i;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -22,8 +24,14 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
- if (if_running)
- igc_close(dev);
+ need_update = !!adapter->xdp_prog != !!prog;
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_disable_rx_ring(adapter->rx_ring[i]);
+ igc_disable_tx_ring(adapter->tx_ring[i]);
+ napi_disable(&adapter->rx_ring[i]->q_vector->napi);
+ }
+ }
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
@@ -34,8 +42,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running)
- igc_open(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ napi_enable(&adapter->rx_ring[i]->q_vector->napi);
+ igc_enable_tx_ring(adapter->tx_ring[i]);
+ igc_enable_rx_ring(adapter->rx_ring[i]);
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 4fb0d9e3f2da..2e7738f41c58 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,15 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 1999 - 2018 Intel Corporation.
+# Copyright(c) 1999 - 2024 Intel Corporation.
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
+ ixgbe_xsk.o ixgbe_e610.o devlink/devlink.o ixgbe_fw_update.o \
+ devlink/region.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
new file mode 100644
index 000000000000..d227f4d2a2d1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_info_ctx {
+ char buf[128];
+ struct ixgbe_orom_info pending_orom;
+ struct ixgbe_nvm_info pending_nvm;
+ struct ixgbe_netlist_info pending_netlist;
+ struct ixgbe_hw_dev_caps dev_caps;
+};
+
+enum ixgbe_devlink_version_type {
+ IXGBE_DL_VERSION_RUNNING,
+ IXGBE_DL_VERSION_STORED
+};
+
+static void ixgbe_info_get_dsn(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ u8 dsn[8];
+
+ /* Copy the DSN into an array in Big Endian format */
+ put_unaligned_be64(pci_get_dsn(adapter->pdev), dsn);
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
+}
+
+static void ixgbe_info_orom_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ctx->buf[0] = '\0';
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x",
+ nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+
+ return;
+ }
+
+ ixgbe_get_orom_version(hw, &nvm_ver);
+ if (nvm_ver.or_valid)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%d.%d.%d",
+ nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
+}
+
+static void ixgbe_info_eetrack(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ u32 eetrack = hw->flash.nvm.eetrack;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ eetrack = ctx->pending_nvm.eetrack;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", eetrack);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+
+ /* No ETRACK version for OEM */
+ if (nvm_ver.oem_valid) {
+ ctx->buf[0] = '\0';
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm_ver.etk_id);
+}
+
+static void ixgbe_info_fw_api(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->api_maj_ver, hw->api_min_ver, hw->api_patch);
+}
+
+static void ixgbe_info_fw_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
+}
+
+static void ixgbe_info_fw_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", nvm->srev);
+}
+
+static void ixgbe_info_orom_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", orom->srev);
+}
+
+static void ixgbe_info_nvm_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
+}
+
+static void ixgbe_info_netlist_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ /* The netlist version fields are BCD formatted */
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
+}
+
+static void ixgbe_info_netlist_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+}
+
+static int ixgbe_set_ctx_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_info_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ bool *pending_orom, *pending_nvm, *pending_netlist;
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to discover device capabilities");
+ return err;
+ }
+
+ pending_orom = &ctx->dev_caps.common_cap.nvm_update_pending_orom;
+ pending_nvm = &ctx->dev_caps.common_cap.nvm_update_pending_nvm;
+ pending_netlist = &ctx->dev_caps.common_cap.nvm_update_pending_netlist;
+
+ if (*pending_orom) {
+ err = ixgbe_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err)
+ *pending_orom = false;
+ }
+
+ if (*pending_nvm) {
+ err = ixgbe_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err)
+ *pending_nvm = false;
+ }
+
+ if (*pending_netlist) {
+ err = ixgbe_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err)
+ *pending_netlist = false;
+ }
+
+ return 0;
+}
+
+static int ixgbe_devlink_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_fw_api(adapter, ctx);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_build(adapter, ctx);
+ err = devlink_info_version_running_put(req, "fw.mgmt.build", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ return devlink_info_version_running_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int
+ixgbe_devlink_pending_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ return devlink_info_version_stored_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int ixgbe_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_info_ctx *ctx;
+ int err;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
+
+ ixgbe_info_get_dsn(adapter, ctx);
+ err = devlink_info_serial_number_put(req, ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ err = hw->eeprom.ops.read_pba_string(hw, ctx->buf, sizeof(ctx->buf));
+ if (err)
+ goto free_ctx;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err || hw->mac.type != ixgbe_mac_e610)
+ goto free_ctx;
+
+ err = ixgbe_set_ctx_dev_caps(hw, ctx, extack);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_info_get_e610(adapter, req, ctx);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_pending_info_get_e610(adapter, req, ctx);
+free_ctx:
+ kfree(ctx);
+ return err;
+}
+
+/**
+ * ixgbe_devlink_reload_empr_start - Start EMP reset to activate new firmware
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ *
+ * Allow user to activate new Embedded Management Processor firmware by
+ * issuing device specific EMP reset. Called in response to
+ * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
+ *
+ * Note that teardown and rebuild of the driver state happens automatically as
+ * part of an interrupt and watchdog task. This is because all physical
+ * functions on the device must be able to reset when an EMP reset occurs from
+ * any source.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_devlink_reload_empr_start(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* Pending is a bitmask of which flash banks have a pending update,
+ * including the main NVM bank, the Option ROM bank, and the netlist
+ * bank. If any of these bits are set, then there is a pending update
+ * waiting to be activated.
+ */
+ if (!pending) {
+ NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
+ return -ECANCELED;
+ }
+
+ if (adapter->fw_emp_reset_disabled) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
+ return -ECANCELED;
+ }
+
+ err = ixgbe_aci_nvm_update_empr(hw);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to trigger EMP device reset to reload firmware");
+
+ return err;
+}
+
+/*Wait for 10 sec with 0.5 sec tic. EMPR takes no less than half of a sec */
+#define IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC 20
+
+/**
+ * ixgbe_devlink_reload_empr_finish - finishes EMP reset
+ * @devlink: pointer to the devlink instance
+ * @action: the action to perform.
+ * @limit: limits on what reload should do
+ * @actions_performed: actions performed
+ * @extack: netlink extended ACK structure
+ *
+ * Wait for new NVM to be loaded during EMP reset.
+ *
+ * Return: -ETIME when timer is exceeded, 0 on success.
+ */
+static int ixgbe_devlink_reload_empr_finish(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i = 0;
+ u32 fwsm;
+
+ do {
+ /* Just right away after triggering EMP reset the FWSM register
+ * may be not cleared yet, so begin the loop with the delay
+ * in order to not check the not updated register.
+ */
+ mdelay(500);
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+
+ if (i++ >= IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC)
+ return -ETIME;
+
+ } while (!(fwsm & IXGBE_FWSM_FW_VAL_BIT));
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+
+ adapter->flags2 &= ~(IXGBE_FLAG2_API_MISMATCH |
+ IXGBE_FLAG2_FW_ROLLBACK);
+
+ return 0;
+}
+
+static const struct devlink_ops ixgbe_devlink_ops = {
+ .info_get = ixgbe_devlink_info_get,
+ .supported_flash_update_params =
+ DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .flash_update = ixgbe_flash_pldm_image,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_down = ixgbe_devlink_reload_empr_start,
+ .reload_up = ixgbe_devlink_reload_empr_finish,
+};
+
+/**
+ * ixgbe_allocate_devlink - Allocate devlink instance
+ * @dev: device to allocate devlink for
+ *
+ * Allocate a devlink instance for this physical function.
+ *
+ * Return: pointer to the device adapter structure on success,
+ * ERR_PTR(-ENOMEM) when allocation failed.
+ */
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev)
+{
+ struct ixgbe_adapter *adapter;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ixgbe_devlink_ops, sizeof(*adapter), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ adapter = devlink_priv(devlink);
+ adapter->devlink = devlink;
+
+ return adapter;
+}
+
+/**
+ * ixgbe_devlink_set_switch_id - Set unique switch ID based on PCI DSN
+ * @adapter: pointer to the device adapter structure
+ * @ppid: struct with switch id information
+ */
+static void ixgbe_devlink_set_switch_id(struct ixgbe_adapter *adapter,
+ struct netdev_phys_item_id *ppid)
+{
+ u64 id = pci_get_dsn(adapter->pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ixgbe_devlink_register_port - Register devlink port
+ * @adapter: pointer to the device adapter structure
+ *
+ * Create and register a devlink_port for this physical function.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
+{
+ struct devlink_port *devlink_port = &adapter->devlink_port;
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = adapter->hw.bus.func;
+ attrs.no_phys_port_name = 1;
+ ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register(devlink, devlink_port, 0);
+ if (err) {
+ dev_err(dev,
+ "devlink port registration failed, err %d\n", err);
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
new file mode 100644
index 000000000000..381558058048
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _IXGBE_DEVLINK_H_
+#define _IXGBE_DEVLINK_H_
+
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev);
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/region.c b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
new file mode 100644
index 000000000000..478b4f435120
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+
+#define IXGBE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops;
+static const struct devlink_region_ops ixgbe_sram_region_ops;
+
+static int ixgbe_devlink_parse_region(struct ixgbe_hw *hw,
+ const struct devlink_region_ops *ops,
+ bool *read_shadow_ram, u32 *nvm_size)
+{
+ if (ops == &ixgbe_nvm_region_ops) {
+ *read_shadow_ram = false;
+ *nvm_size = hw->flash.flash_size;
+ } else if (ops == &ixgbe_sram_region_ops) {
+ *read_shadow_ram = true;
+ *nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_snapshot - Capture a snapshot of the NVM content
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW cmd.
+ *
+ * Capture a snapshot of the whole requested NVM region.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ENOMEM when cannot alloc mem and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u8 *nvm_data, *buf;
+ u32 nvm_size, left;
+ u8 num_blks;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ nvm_data = kvzalloc(nvm_size, GFP_KERNEL);
+ if (!nvm_data)
+ return -ENOMEM;
+
+ num_blks = DIV_ROUND_UP(nvm_size, IXGBE_DEVLINK_READ_BLK_SIZE);
+ buf = nvm_data;
+ left = nvm_size;
+
+ for (int i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, IXGBE_DEVLINK_READ_BLK_SIZE, left);
+
+ /* Need to acquire NVM lock during each loop run because the
+ * total period of reading whole NVM is longer than the maximum
+ * period the lock can be taken defined by the IXGBE_NVM_TIMEOUT.
+ */
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire NVM semaphore");
+ kvfree(nvm_data);
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, i * IXGBE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, buf, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read RAM content");
+ ixgbe_release_nvm(hw);
+ kvfree(nvm_data);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+
+ buf += read_sz;
+ left -= read_sz;
+ }
+
+ *data = nvm_data;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_devcaps_snapshot - Capture a snapshot of device capabilities
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW for
+ * the device-caps devlink region.
+ *
+ * Capture a snapshot of the device capabilities reported by firmware.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -ENOMEM when cannot alloc mem, or return code of
+ * the reading operation.
+ */
+static int ixgbe_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_aci_cmd_list_caps_elem *caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ caps = kvzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_list_caps(hw, caps, IXGBE_ACI_MAX_BUFFER_SIZE, NULL,
+ ixgbe_aci_opc_list_dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read device capabilities");
+ kvfree(caps);
+ return err;
+ }
+
+ *data = (u8 *)caps;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_read - Read a portion of NVM flash content
+ * @devlink: the devlink instance
+ * @ops: the devlink region to snapshot
+ * @extack: extended ACK response structure
+ * @offset: the offset to start at
+ * @size: the amount to read
+ * @data: the data buffer to read into
+ *
+ * This function is called in response to DEVLINK_CMD_REGION_READ to directly
+ * read a section of the NVM contents.
+ *
+ * Read from either the nvm-flash region either shadow-ram region.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ERANGE when buffer limit exceeded and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_read(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u32 nvm_size;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ if (offset + size > nvm_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
+ return -ERANGE;
+ }
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, (u32)offset, &size, data, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ixgbe_release_nvm(hw);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+ return 0;
+}
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops = {
+ .name = "nvm-flash",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_sram_region_ops = {
+ .name = "shadow-ram",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_devcaps_region_ops = {
+ .name = "device-caps",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_devcaps_snapshot,
+};
+
+/**
+ * ixgbe_devlink_init_regions - Initialize devlink regions
+ * @adapter: adapter instance
+ *
+ * Create devlink regions used to enable access to dump the contents of the
+ * flash memory of the device.
+ */
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ u64 nvm_size, sram_size;
+
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ nvm_size = adapter->hw.flash.flash_size;
+ adapter->nvm_region = devl_region_create(devlink, &ixgbe_nvm_region_ops,
+ 1, nvm_size);
+ if (IS_ERR(adapter->nvm_region)) {
+ dev_err(dev,
+ "Failed to create NVM devlink region, err %ld\n",
+ PTR_ERR(adapter->nvm_region));
+ adapter->nvm_region = NULL;
+ }
+
+ sram_size = adapter->hw.flash.sr_words * 2u;
+ adapter->sram_region = devl_region_create(devlink, &ixgbe_sram_region_ops,
+ 1, sram_size);
+ if (IS_ERR(adapter->sram_region)) {
+ dev_err(dev,
+ "Failed to create shadow-ram devlink region, err %ld\n",
+ PTR_ERR(adapter->sram_region));
+ adapter->sram_region = NULL;
+ }
+
+ adapter->devcaps_region = devl_region_create(devlink,
+ &ixgbe_devcaps_region_ops,
+ 10, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (IS_ERR(adapter->devcaps_region)) {
+ dev_err(dev,
+ "Failed to create device-caps devlink region, err %ld\n",
+ PTR_ERR(adapter->devcaps_region));
+ adapter->devcaps_region = NULL;
+ }
+}
+
+/**
+ * ixgbe_devlink_destroy_regions - Destroy devlink regions
+ * @adapter: adapter instance
+ *
+ * Remove previously created regions for this adapter instance.
+ */
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ if (adapter->nvm_region)
+ devl_region_destroy(adapter->nvm_region);
+
+ if (adapter->sram_region)
+ devl_region_destroy(adapter->sram_region);
+
+ if (adapter->devcaps_region)
+ devl_region_destroy(adapter->devcaps_region);
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 559b443c409f..dce4936708eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -17,9 +17,12 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/devlink.h>
+
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
+#include "ixgbe_e610.h"
#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
@@ -173,6 +176,7 @@ enum ixgbe_tx_flags {
#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define IXGBE_E610_VF_DEVICE_ID 0x57AD
#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
{ \
@@ -425,6 +429,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 63
+#define IXGBE_MAX_TX_QUEUES 128
+#define IXGBE_MAX_TX_DESCRIPTORS 40
+#define IXGBE_MAX_TX_VF_HANGS 4
+
DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
struct ixgbe_ring_feature {
@@ -503,9 +511,10 @@ struct ixgbe_q_vector {
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
cpumask_t affinity_mask;
int numa_node;
- struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
@@ -610,6 +619,11 @@ struct ixgbe_adapter {
struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
struct mii_bus *mii_bus;
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+ struct devlink_region *nvm_region;
+ struct devlink_region *sram_region;
+ struct devlink_region *devcaps_region;
unsigned long state;
@@ -654,6 +668,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
+#define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
@@ -661,6 +676,11 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
+#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
+#define IXGBE_FLAG2_NO_MEDIA BIT(21)
+#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
+#define IXGBE_FLAG2_API_MISMATCH BIT(23)
+#define IXGBE_FLAG2_FW_ROLLBACK BIT(24)
/* Tx fast path data */
int num_tx_queues;
@@ -737,6 +757,7 @@ struct ixgbe_adapter {
bool link_up;
unsigned long sfp_poll_time;
unsigned long link_check_timeout;
+ u32 link_down_events;
struct timer_list service_timer;
struct work_struct service_task;
@@ -749,6 +770,8 @@ struct ixgbe_adapter {
u32 atr_sample_rate;
spinlock_t fdir_perfect_lock;
+ bool fw_emp_reset_disabled;
+
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
@@ -767,7 +790,7 @@ struct ixgbe_adapter {
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_overflow_check;
unsigned long last_rx_ptp_check;
@@ -792,13 +815,13 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
+ u8 tx_hang_count[IXGBE_MAX_TX_QUEUES];
struct kobject *info_kobj;
+ u16 lse_mask;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
-#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
-#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
/* Bitmask indicating in use pools */
@@ -823,6 +846,17 @@ struct ixgbe_adapter {
spinlock_t vfs_lock;
};
+struct ixgbe_netdevice_priv {
+ struct ixgbe_adapter *adapter;
+};
+
+static inline struct ixgbe_adapter *ixgbe_from_netdev(struct net_device *netdev)
+{
+ struct ixgbe_netdevice_priv *priv = netdev_priv(netdev);
+
+ return priv->adapter;
+}
+
static inline int ixgbe_determine_xdp_q_idx(int cpu)
{
if (static_key_enabled(&ixgbe_xdp_locking_key))
@@ -849,6 +883,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -874,6 +909,7 @@ enum ixgbe_state_t {
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
__IXGBE_RESET_REQUESTED,
+ __IXGBE_PHY_INIT_COMPLETE,
};
struct ixgbe_cb {
@@ -896,6 +932,7 @@ enum ixgbe_boards {
board_x550em_x_fw,
board_x550em_a,
board_x550em_a_fw,
+ board_e610,
};
extern const struct ixgbe_info ixgbe_82598_info;
@@ -906,6 +943,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
+extern const struct ixgbe_info ixgbe_e610_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
@@ -934,6 +972,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter);
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
@@ -1044,8 +1084,11 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
-int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+int ixgbe_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int ixgbe_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 283a23150a4d..406c15f58034 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include "ixgbe.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#define IXGBE_82598_MAX_TX_QUEUES 32
@@ -44,7 +45,7 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
goto out;
/*
- * if capababilities version is type 1 we can write the
+ * if capabilities version is type 1 we can write the
* timeout of 10ms to 250ms through the GCR register
*/
if (!(gcr & IXGBE_GCR_CAP_VER2)) {
@@ -750,7 +751,7 @@ mac_reset_top:
/*
* Store the original AUTOC value if it has not been
* stored off yet. Otherwise restore the stored original
- * AUTOC value since the reset operation sets back to deaults.
+ * AUTOC value since the reset operation sets back to defaults.
*/
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if (hw->mac.orig_link_settings_stored == false) {
@@ -1168,6 +1169,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index cdaf087b4e85..3069b583fd81 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -198,7 +198,7 @@ static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
* @hw: pointer to hardware structure
* @autoc: value to write to AUTOC
* @locked: bool to indicate whether the SW/FW lock was already taken by
- * previous proc_autoc_read_82599.
+ * previous prot_autoc_read_82599.
*
* This part (82599) may need to hold a the SW/FW lock around all writes to
* AUTOC. Likewise after a write we need to do a pipeline reset.
@@ -1615,13 +1615,14 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
break;
}
- /* store source and destination IP masks (big-enian) */
+ /* store source and destination IP masks (big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
~input_mask->formatted.src_ip[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
@@ -2229,6 +2230,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3be1bfb16498..3ea6765f9c5d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -58,6 +58,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -88,6 +89,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -241,7 +244,7 @@ int ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
/* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on, likewise reset_pipeline requries the lock as
+ * LESM is on, likewise reset_pipeline requires the lock as
* it also writes AUTOC.
*/
ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
@@ -298,7 +301,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
return ret_val;
}
- /* Cashe bit indicating need for crosstalk fix */
+ /* Cache bit indicating need for crosstalk fix */
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
@@ -329,6 +332,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* Devices in the second generation:
* 82599
* X540
+ * E610
**/
int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
@@ -469,9 +473,14 @@ int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_e610) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
+ }
+
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
@@ -660,7 +669,11 @@ int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
+ if (hw->mac.type == ixgbe_mac_e610)
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610);
+ else
+ link_status = ixgbe_read_pci_cfg_word(hw,
+ IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
@@ -1726,9 +1739,9 @@ int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
}
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/**
@@ -2918,6 +2931,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_e610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return 1;
}
@@ -3366,7 +3383,8 @@ int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
- if ((hw->mac.type >= ixgbe_mac_X550) &&
+ if ((hw->mac.type >= ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_e610) &&
(links_reg & IXGBE_LINKS_SPEED_NON_STD))
*speed = IXGBE_LINK_SPEED_5GB_FULL;
else
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6493abf189de..6639069ad528 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -194,6 +194,8 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
dev_err(&adapter->pdev->dev, format, ## arg)
#define e_dev_notice(format, arg...) \
dev_notice(&adapter->pdev->dev, format, ## arg)
+#define e_dbg(msglvl, format, arg...) \
+ netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_info(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_err(msglvl, format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index e85f7d2e8810..3dd5a16a14df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include <linux/dcbnl.h>
@@ -118,14 +118,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
}
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
@@ -142,7 +142,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i, j;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
@@ -154,6 +154,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
@@ -166,7 +167,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
@@ -183,7 +184,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
}
@@ -192,7 +193,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
@@ -209,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
}
@@ -218,7 +219,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
@@ -229,7 +230,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
}
@@ -238,7 +239,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
@@ -249,7 +250,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
}
@@ -257,7 +258,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
u8 setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
@@ -268,14 +269,14 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
u8 *setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -294,7 +295,7 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
struct ixgbe_hw *hw = &adapter->hw;
int ret = DCB_NO_HW_CHG;
@@ -317,7 +318,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ if (adapter->netdev->fcoe_mtu)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
@@ -382,7 +383,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (capid) {
case DCB_CAP_ATTR_PG:
@@ -419,7 +420,7 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -446,14 +447,14 @@ static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->dcb_cfg.pfc_mode_enable;
}
static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.pfc_mode_enable = state;
}
@@ -470,7 +471,7 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
*/
static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct dcb_app app = {
.selector = idtype,
.protocol = id,
@@ -485,7 +486,7 @@ static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
@@ -505,7 +506,7 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err;
__u8 max_tc = 0;
@@ -558,7 +559,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
int i;
@@ -583,7 +584,7 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 *prio_tc;
int err;
@@ -615,7 +616,7 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -660,7 +661,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -704,13 +705,13 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
return adapter->dcbx_cap;
}
static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
int err = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..c2f8189a0738
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -0,0 +1,4043 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_type.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_mbx.h"
+#include "ixgbe_phy.h"
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ * It may happen when CSR is busy during link state changes.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - 0 - success.
+ * * - -EIO - CSR mechanism is not enabled.
+ * * - -EBUSY - CSR mechanism is busy.
+ * * - -EINVAL - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - -ETIME - Admin Command X command timeout.
+ * * - -EIO - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
+ struct libie_aq_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode, buf_tail_size = buf_size % 4;
+ u32 *raw_desc = (u32 *)desc;
+ u32 hicr, i, buf_tail = 0;
+ bool valid_buf = false;
+
+ hw->aci.last_status = LIBIE_AQ_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
+
+ if (!(hicr & IXGBE_PF_HICR_EN))
+ return -EIO;
+
+ if (hicr & IXGBE_PF_HICR_C) {
+ hw->aci.last_status = LIBIE_AQ_RC_EBUSY;
+ return -EBUSY;
+ }
+
+ opcode = le16_to_cpu(desc->opcode);
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (buf)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+
+ if (desc->flags & cpu_to_le16(LIBIE_AQ_FLAG_BUF)) {
+ if ((buf && !buf_size) ||
+ (!buf && buf_size))
+ return -EINVAL;
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf) {
+ if (buf_tail_size)
+ memcpy(&buf_tail, buf + buf_size - buf_tail_size,
+ buf_tail_size);
+
+ if (((buf_size + 3) & ~0x3) > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
+
+ desc->datalen = cpu_to_le16(buf_size);
+
+ if (desc->flags & cpu_to_le16(LIBIE_AQ_FLAG_RD)) {
+ for (i = 0; i < buf_size / 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
+ if (buf_tail_size)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
+ ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
+
+#define MAX_SLEEP_RESP_US 1000
+#define MAX_TMOUT_RESP_SYNC_US 100000000
+
+ /* Wait for sync Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_SV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+#define MAX_TMOUT_RESP_ASYNC_US 150000000
+
+ /* Wait for async Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_EV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+ /* Read sync Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & IXGBE_PF_HICR_C)
+ return -ETIME;
+
+ if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
+ return -EIO;
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != cpu_to_le16(opcode) &&
+ opcode != ixgbe_aci_opc_get_fw_event)
+ return -EIO;
+
+ if (desc->retval) {
+ hw->aci.last_status = (enum libie_aq_err)
+ le16_to_cpu(desc->retval);
+ return -EIO;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf) {
+ for (i = 0; i < buf_size / 4; i++)
+ ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ if (buf_tail_size) {
+ buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ memcpy(buf + buf_size - buf_tail_size, &buf_tail,
+ buf_tail_size);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct libie_aq_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode = le16_to_cpu(desc->opcode);
+ struct libie_aq_desc desc_cpy;
+ enum libie_aq_err last_status;
+ u8 idx = 0, *buf_cpy = NULL;
+ bool is_cmd_for_retry;
+ unsigned long timeout;
+ int err;
+
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf_cpy)
+ return -ENOMEM;
+ *buf_cpy = *(u8 *)buf;
+ }
+ desc_cpy = *desc;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
+ do {
+ mutex_lock(&hw->aci.lock);
+ err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ mutex_unlock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || !err ||
+ last_status != LIBIE_AQ_RC_EBUSY)
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ *desc = desc_cpy;
+
+ msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
+ time_before(jiffies, timeout));
+
+ kfree(buf_cpy);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+ u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct libie_aq_desc desc;
+ int err;
+
+ if (!e || (!e->msg_buf && e->buf_len))
+ return -EINVAL;
+
+ mutex_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (err)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending)
+ *pending = ixgbe_aci_check_event_pending(hw);
+
+aci_get_event_exit:
+ mutex_unlock(&hw->aci.lock);
+
+ return err;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
+{
+ /* Zero out the desc. */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - Get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct libie_aqc_get_ver *resp;
+ struct libie_aq_desc desc;
+ int err;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = le32_to_cpu(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access,
+ u8 sdp_number, u32 *timeout)
+{
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
+ int err;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = cpu_to_le16(res);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!err || hw->aci.last_status == LIBIE_AQ_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ u8 sdp_number)
+{
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = cpu_to_le16(res);
+ cmd->res_number = cpu_to_le32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the -EALREADY is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout;
+ int err;
+
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of -EALREADY means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (err == -EALREADY)
+ return err;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (err && retry_timeout && res_timeout) {
+ msleep(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* Success - lock acquired.
+ * -EALREADY - lock free, no work to do.
+ */
+ if (!err || err == -EALREADY)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res)
+{
+ u32 total_delay = 0;
+ int err;
+
+ err = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while (err == -ETIME &&
+ total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
+ usleep_range(1000, 1500);
+ err = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_e610_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_caps *caps,
+ struct libie_aqc_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = le32_to_cpu(elem->logical_id);
+ u32 phys_id = le32_to_cpu(elem->phys_id);
+ u32 number = le32_to_cpu(elem->number);
+ u16 cap = le16_to_cpu(elem->cap);
+
+ switch (cap) {
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case LIBIE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case LIBIE_AQC_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case LIBIE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case LIBIE_AQC_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case LIBIE_AQC_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case LIBIE_AQC_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case LIBIE_AQC_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case LIBIE_AQC_CAPS_NVM_VER:
+ break;
+ case LIBIE_AQC_CAPS_PENDING_NVM_VER:
+ caps->nvm_update_pending_nvm = true;
+ break;
+ case LIBIE_AQC_CAPS_PENDING_OROM_VER:
+ caps->nvm_update_pending_orom = true;
+ break;
+ case LIBIE_AQC_CAPS_PENDING_NET_VER:
+ caps->nvm_update_pending_netlist = true;
+ break;
+ case LIBIE_AQC_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ break;
+ case LIBIE_AQC_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ default:
+ /* Not one of the recognized common capabilities */
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse LIBIE_AQC_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse LIBIE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse LIBIE_AQC_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse LIBIE_AQC_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse LIBIE_AQC_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse LIBIE_AQC_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse LIBIE_AQC_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse LIBIE_AQC_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct libie_aqc_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct libie_aqc_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
+ "dev caps");
+
+ switch (cap) {
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case LIBIE_AQC_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case LIBIE_AQC_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case LIBIE_AQC_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse LIBIE_AQC_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for LIBIE_AQC_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ func_p->num_allocd_vfs = le32_to_cpu(cap->number);
+ func_p->vf_base_id = le32_to_cpu(cap->logical_id);
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PFs are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+#define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0)
+ u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ return funcs ? (max / funcs) : 0;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse LIBIE_AQC_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for LIBIE_AQC_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct libie_aqc_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct libie_aqc_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case LIBIE_AQC_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case LIBIE_AQC_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return -ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of -ENOMEM means the buffer size is too small.
+ */
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
+ int err;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = le32_to_cpu(cmd->count);
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct libie_aqc_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!err)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct libie_aqc_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!err)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (err)
+ return err;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct libie_aq_desc desc;
+ int err;
+
+ cmd = libie_aq_raw(&desc);
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= cpu_to_le16(report_mode);
+ err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+ if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_cmd_set_phy_cfg *cmd;
+ struct libie_aq_desc desc;
+ int err;
+
+ if (!cfg)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ cmd->lport_num = hw->bus.func;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+ if (!err)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = hw->bus.func;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
+
+ /* Set node type. */
+ cmd->addr.topo_params.node_type_ctx |=
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
+ if (ixgbe_is_media_cage_present(hw))
+ return ixgbe_media_type_aui;
+ fallthrough;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li = &hw->link.link_info;
+
+ err = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (err)
+ return err;
+
+ if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ return 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (!err)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ kfree(pcaps);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ if (!hw || !link_up)
+ return -EINVAL;
+
+ if (hw->link.get_link_info) {
+ int err = ixgbe_update_link_info(hw);
+
+ if (err)
+ return err;
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = {};
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct libie_aq_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = libie_aq_raw(&desc);
+ resp->cmd_flags = cpu_to_le16(cmd_flags);
+ resp->lport_num = hw->bus.func;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (err)
+ return err;
+
+ /* Save off old link status information. */
+ *li_old = *li;
+
+ /* Update current link status information. */
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* Update fc info. */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
+ IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* Save link status information. */
+ if (link)
+ *link = *li;
+
+ /* Flag cleared so calling functions don't call AQ again. */
+ hw->link.get_link_info = false;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: true for enable lse, false otherwise
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ int err;
+
+ err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (err)
+ return err;
+
+ /* Enabling link status events generation by fw. */
+ return ixgbe_aci_get_link_info(hw, activate, NULL);
+}
+
+/**
+ * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Get firmware version and start the hardware using the generic
+ * start_hw() and ixgbe_start_hw_gen2() functions.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_start_hw_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_aci_get_fw_ver(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_start_hw_generic(hw);
+ if (err)
+ return err;
+
+ ixgbe_start_hw_gen2(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_get_media_type_e610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ int rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ int highest_bit;
+
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_high =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_low = 0;
+ } else {
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_low =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_high = 0;
+ }
+ }
+ }
+
+ /* Based on link status or search above try to discover media type. */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_setup_link_e610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_e610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ int err;
+ u32 i;
+
+ if (!speed || !link_up)
+ return -EINVAL;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command.
+ */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && !(*link_up)) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msleep(100);
+ hw->link.get_link_info = true;
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW.
+ */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_link_capabilities_e610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return -EINVAL;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return -EINVAL;
+
+ switch (req_mode) {
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the old pause settings. */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* Set the new capabilities. */
+ cfg->caps |= pause_mask;
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_fc_e610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
+ int err;
+
+ /* Get the current PHY config */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (err)
+ return err;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_fc_autoneg_e610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ /* Get current link err.
+ * Current FC mode will be stored in the hw context.
+ */
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ goto no_autoneg;
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
+ goto no_autoneg;
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
+ goto no_autoneg;
+
+ hw->fc.fc_was_autonegged = true;
+ return;
+
+no_autoneg:
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_disable_rx_e610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ u32 pfdtxgswc;
+ int err;
+
+ if (!(rxctrl & IXGBE_RXCTRL_RXEN))
+ return;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ err = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (err) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+}
+
+/**
+ * ixgbe_fw_recovery_mode_e610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM recovery mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+static bool ixgbe_fw_recovery_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_e610 - Check FW NVM rollback mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM rollback mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in rollback mode, otherwise false.
+ */
+static bool ixgbe_fw_rollback_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_ROLLBACK_M);
+}
+
+/**
+ * ixgbe_init_phy_ops_e610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
+ else
+ phy->ops.set_phy_power = NULL;
+
+ /* Identify the PHY */
+ return phy->ops.identify(hw);
+}
+
+/**
+ * ixgbe_identify_phy_e610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type_low, phy_type_high;
+ int err;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
+ if (err)
+ return err;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (err)
+ return err;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+ phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+ phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+
+ if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* Initialize autoneg speeds */
+ if (!hw->phy.autoneg_advertised)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_identify_module_e610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ int err;
+
+ err = ixgbe_update_link_info(hw);
+ if (err)
+ return err;
+
+ media_available =
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by
+ * ixgbe_update_link_info()
+ */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ u64 phy_type_low = 0, phy_type_high = 0;
+ int err;
+
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ return err;
+
+ /* If media is not available get default config. */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (err)
+ return err;
+
+ sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+ sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+
+ /* Get Active configuration to avoid unintended changes. */
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types. */
+ phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_low = cpu_to_le64(phy_type_low);
+ phy_type_high &= sup_phy_type_high;
+ pcfg.phy_type_high = cpu_to_le64(phy_type_high);
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_phy_power_e610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on)
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ else
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+
+ /* PHY is already in requested power mode. */
+ if (phy_caps.caps == phy_cfg.caps)
+ return 0;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_enter_lplu_e610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
+ * struct in order to set up EEPROM access.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type != ixgbe_eeprom_uninitialized)
+ return 0;
+
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2). */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
+ eeprom->word_size);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_cmd_get_link_topo *resp;
+ struct libie_aq_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ resp = libie_aq_raw(&desc);
+ *resp = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return -EOPNOTSUPP;
+
+ if (node_handle)
+ *node_handle = le16_to_cpu(resp->addr.handle);
+ if (node_part_number)
+ *node_part_number = resp->node_part_num;
+
+ return 0;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw, enum libie_aq_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return 0;
+
+ return ixgbe_acquire_res(hw, LIBIE_AQC_RES_ID_NVM, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, LIBIE_AQC_RES_ID_NVM);
+}
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct libie_aq_desc desc;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct libie_aq_desc desc;
+ __le16 len;
+ int err;
+
+ /* Read a length value from SR, so module_typeid is equal to 0,
+ * calculate offset where module size is placed from bytes to words
+ * set last command and read from SR values to true.
+ */
+ err = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (err)
+ return err;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_U_MASK, offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct libie_aq_desc desc;
+ s32 err;
+
+ cmd = libie_aq_raw(&desc);
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_A_MASK,
+ cmd_flags);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return err;
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct libie_aq_desc desc;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!err && cmd->checksum !=
+ cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (err == -EIO &&
+ hw->aci.last_status == LIBIE_AQ_RC_EINVAL) {
+ err = 0;
+ max_size = offset;
+ } else if (!err) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset,
+ u32 *pointer)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K;
+ else
+ *pointer = value * sizeof(u16);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * SZ_4K;
+
+ return 0;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD,
+ &ctrl_word);
+ if (err)
+ return err;
+
+ if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) !=
+ IXGBE_SR_CTRL_WORD_VALID)
+ return -ENODATA;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case IXGBE_E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case IXGBE_E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case IXGBE_E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ u32 start;
+ int err;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start)
+ return -EINVAL;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local,
+ sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_orom_module - Read from the active Option ROM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive OROM module
+ * @offset: offset into the OROM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active Option ROM module of the flash.
+ * Note that unlike the NVM module, the CSS data is stored at the end of the
+ * module instead of at the beginning.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_orom_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (err)
+ return err;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size.
+ */
+ hdr_len_dword = (hdr_len_h << 16) | hdr_len_l;
+ *hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (err)
+ return err;
+
+ *srev = (srev_h << 16) | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @civd: storage for the Option ROM CIVD data.
+ *
+ * Searches through the Option ROM flash contents to locate the CIVD data for
+ * the image.
+ *
+ * Return: -ENOMEM when cannot allocate memory, -EDOM for checksum violation,
+ * -ENODATA when cannot find proper data, -EIO for faulty read or
+ * 0 on success.
+ *
+ * On success @civd stores collected data.
+ */
+static int
+ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
+ struct ixgbe_orom_civd_info *civd)
+{
+ u32 orom_size = hw->flash.banks.orom_size;
+ u8 *orom_data;
+ u32 offset;
+ int err;
+
+ orom_data = kzalloc(orom_size, GFP_KERNEL);
+ if (!orom_data)
+ return -ENOMEM;
+
+ err = ixgbe_read_flash_module(hw, bank,
+ IXGBE_E610_SR_1ST_OROM_BANK_PTR, 0,
+ orom_data, orom_size);
+ if (err) {
+ err = -EIO;
+ goto cleanup;
+ }
+
+ /* The CIVD section is located in the Option ROM aligned to 512 bytes.
+ * The first 4 bytes must contain the ASCII characters "$CIV".
+ * A simple modulo 256 sum of all of the bytes of the structure must
+ * equal 0.
+ */
+ for (offset = 0; offset + SZ_512 <= orom_size; offset += SZ_512) {
+ struct ixgbe_orom_civd_info *tmp;
+ u8 sum = 0;
+ u32 i;
+
+ BUILD_BUG_ON(sizeof(*tmp) > SZ_512);
+
+ tmp = (struct ixgbe_orom_civd_info *)&orom_data[offset];
+
+ /* Skip forward until we find a matching signature */
+ if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp->signature,
+ sizeof(tmp->signature)))
+ continue;
+
+ /* Verify that the simple checksum is zero */
+ for (i = 0; i < sizeof(*tmp); i++)
+ sum += ((u8 *)tmp)[i];
+
+ if (sum) {
+ err = -EDOM;
+ goto cleanup;
+ }
+
+ *civd = *tmp;
+ err = 0;
+
+ goto cleanup;
+ }
+
+ err = -ENODATA;
+cleanup:
+ kfree(orom_data);
+ return err;
+}
+
+/**
+ * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from active or inactive flash module
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active OROM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *srev)
+{
+ u32 orom_size_word = hw->flash.banks.orom_size / 2;
+ u32 css_start, hdr_len;
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ if (orom_size_word < hdr_len)
+ return -EINVAL;
+
+ /* Calculate how far into the Option ROM the CSS header starts. Note
+ * that ixgbe_read_orom_module takes a word offset.
+ */
+ css_start = orom_size_word - hdr_len;
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_L,
+ &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_H,
+ &srev_h);
+ if (err)
+ return err;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @orom: pointer to Option ROM info structure
+ *
+ * Read Option ROM version and security revision from the Option ROM flash
+ * section.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_orom_info *orom)
+{
+ struct ixgbe_orom_civd_info civd;
+ u32 combo_ver;
+ int err;
+
+ err = ixgbe_get_orom_civd_data(hw, bank, &civd);
+ if (err)
+ return err;
+
+ combo_ver = get_unaligned_le32(&civd.combo_ver);
+
+ orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
+ orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
+ orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver);
+
+ return ixgbe_get_orom_srev(hw, bank, &orom->srev);
+}
+
+/**
+ * ixgbe_get_inactive_orom_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @orom: storage for Option ROM version information
+ *
+ * Read the Option ROM version and security revision data for the inactive
+ * section of flash. Used to access version data for a pending update that has
+ * not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom)
+{
+ return ixgbe_get_orom_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, orom);
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ int err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank,
+ IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (err)
+ return err;
+
+ nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver);
+ nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver);
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (err)
+ return err;
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Read the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info - Read the netlist version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ int err;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (err)
+ return err;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (err)
+ return err;
+
+ /* Sanity check that we have at least enough words to store the
+ * netlist ID block.
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (err)
+ return err;
+
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
+ if (!id_blk)
+ return -ENOMEM;
+
+ /* Read out the entire Netlist ID Block at once. */
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) *
+ sizeof(*id_blk), (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE *
+ sizeof(*id_blk));
+ if (err)
+ goto free_id_blk;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+free_id_blk:
+ kfree(id_blk);
+ return err;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver - Read netlist version from the inactive bank
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_get_flash_data - get flash data
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate flash data such as Shadow RAM size,
+ * max_timeout and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_flash_data(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat;
+ u8 sr_size;
+ int err;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16));
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if (fla & IXGBE_GLNVM_FLA_LOCKED_M) {
+ flash->blank_nvm_mode = false;
+ } else {
+ flash->blank_nvm_mode = true;
+ return -EIO;
+ }
+
+ err = ixgbe_discover_flash_size(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_determine_active_flash_banks(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (err)
+ return err;
+
+ err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->orom);
+ if (err)
+ return err;
+
+ err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+ return err;
+}
+
+/**
+ * ixgbe_aci_nvm_update_empr - update NVM using EMPR
+ * @hw: pointer to the HW struct
+ *
+ * Force EMP reset using ACI command (0x0709). This command allows SW to
+ * request an EMPR to activate new FW.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw)
+{
+ struct libie_aq_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_update_empr);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/* ixgbe_nvm_set_pkg_data - NVM set package data
+ * @hw: pointer to the HW struct
+ * @del_pkg_data_flag: If is set then the current pkg_data store by FW
+ * is deleted.
+ * If bit is set to 1, then buffer should be size 0.
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ *
+ * Set package data using ACI command (0x070A).
+ * This command is equivalent to the reception of
+ * a PLDM FW Update GetPackageData cmd. This command should be sent
+ * as part of the NVM update as the first cmd in the flow.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length)
+{
+ struct ixgbe_aci_cmd_nvm_pkg_data *cmd;
+ struct libie_aq_desc desc;
+
+ if (length != 0 && !data)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_pkg_data);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ if (del_pkg_data_flag)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_PKG_DELETE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/* ixgbe_nvm_pass_component_tbl - NVM pass component table
+ * @hw: pointer to the HW struct
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @transfer_flag: parameter for determining stage of the update
+ * @comp_response: a pointer to the response from the 0x070B ACI.
+ * @comp_response_code: a pointer to the response code from the 0x070B ACI.
+ *
+ * Pass component table using ACI command (0x070B). This command is equivalent
+ * to the reception of a PLDM FW Update PassComponentTable cmd.
+ * This command should be sent once per component. It can be only sent after
+ * Set Package Data cmd and before actual update. FW will assume these
+ * commands are going to be sent until the TransferFlag is set to End or
+ * StartAndEnd.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code)
+{
+ struct ixgbe_aci_cmd_nvm_pass_comp_tbl *cmd;
+ struct libie_aq_desc desc;
+ int err;
+
+ if (!data || !comp_response || !comp_response_code)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_pass_component_tbl);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ cmd->transfer_flag = transfer_flag;
+ err = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ if (!err) {
+ *comp_response = cmd->component_response;
+ *comp_response_code = cmd->component_response_code;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ u16 data_local;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (err)
+ return err;
+
+ *data = data_local;
+ return 0;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns an error code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ int err;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u)))
+ return -EINVAL;
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = min_t(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (err)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
+ * ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+ if (err)
+ return err;
+
+ *words = bytes / 2;
+
+ for (int i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
+ * @hw: pointer to hardware structure
+ * @offset: offset of words in the EEPROM to read
+ * @words: number of words to read
+ * @data: words to read from the EEPROM
+ *
+ * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
+ * prior to the read. Acquire/release the NVM ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_nvm_validate_checksum(hw);
+ if (err)
+ return err;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!err)
+ *checksum_val = tmp_checksum;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_reset_hw_e610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and performs a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int err;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ err = hw->mac.ops.stop_adapter(hw);
+ if (err)
+ goto reset_hw_out;
+
+ /* Flush pending Tx transactions. */
+ ixgbe_clear_tx_pending(hw);
+
+ hw->phy.ops.init(hw);
+mac_reset_top:
+ err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (err)
+ return -EBUSY;
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Reset polling failed to complete.");
+ }
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ msleep(100);
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Maximum number of Receive Address Registers. */
+#define IXGBE_MAX_NUM_RAR 128
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to the
+ * maximum number of Receive Address Registers, since we modify this
+ * value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+reset_hw_out:
+ return err;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Find the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len);
+ if (err)
+ return err;
+
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (err)
+ break;
+
+ /* Read TLV length */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len);
+ if (err)
+ break;
+
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return -EIO;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length).
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return -ENODATA;
+}
+
+/**
+ * ixgbe_read_pba_string_e610 - Read PBA string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Read the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ int err;
+
+ *pba_num = '\0';
+
+ err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ IXGBE_E610_SR_PBA_BLOCK_PTR);
+ if (err)
+ return err;
+
+ /* pba_size is the next word */
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size);
+ if (err)
+ return err;
+
+ if (pba_tlv_len < pba_size)
+ return -EINVAL;
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size).
+ */
+ pba_size--;
+
+ if (pba_num_size < (((u32)pba_size * 2) + 1))
+ return -EINVAL;
+
+ for (u16 i = 0; i < pba_size; i++) {
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (err)
+ return err;
+
+ pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK,
+ pba_word);
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+
+ pba_num[(pba_size * 2)] = '\0';
+
+ return err;
+}
+
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ixgbe_hw *hw = priv;
+
+ return ixgbe_aci_send_cmd(hw, desc, buf, size);
+}
+
+int ixgbe_fwlog_init(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = adapter->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .debugfs_root = adapter->ixgbe_dbg_adapter,
+ .priv = hw,
+ };
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
+void ixgbe_fwlog_deinit(struct ixgbe_hw *hw)
+{
+ if (hw->mac.type != ixgbe_mac_e610)
+ return;
+
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
+static const struct ixgbe_mac_operations mac_ops_e610 = {
+ .init_hw = ixgbe_init_hw_generic,
+ .start_hw = ixgbe_start_hw_e610,
+ .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
+ .enable_rx_dma = ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = ixgbe_get_mac_addr_generic,
+ .get_device_caps = ixgbe_get_device_caps_generic,
+ .stop_adapter = ixgbe_stop_adapter_generic,
+ .set_lan_id = ixgbe_set_lan_id_multi_port_pcie,
+ .set_rxpba = ixgbe_set_rxpba_generic,
+ .check_link = ixgbe_check_link_e610,
+ .blink_led_start = ixgbe_blink_led_start_X540,
+ .blink_led_stop = ixgbe_blink_led_stop_X540,
+ .set_rar = ixgbe_set_rar_generic,
+ .clear_rar = ixgbe_clear_rar_generic,
+ .set_vmdq = ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic,
+ .clear_vmdq = ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = ixgbe_update_mc_addr_list_generic,
+ .enable_mc = ixgbe_enable_mc_generic,
+ .disable_mc = ixgbe_disable_mc_generic,
+ .clear_vfta = ixgbe_clear_vfta_generic,
+ .set_vfta = ixgbe_set_vfta_generic,
+ .fc_enable = ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550,
+ .init_uta_tables = ixgbe_init_uta_tables_generic,
+ .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing,
+ .set_source_address_pruning =
+ ixgbe_set_source_address_pruning_x550,
+ .set_ethertype_anti_spoofing =
+ ixgbe_set_ethertype_anti_spoofing_x550,
+ .disable_rx_buff = ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = ixgbe_enable_rx_buff_generic,
+ .enable_rx = ixgbe_enable_rx_generic,
+ .disable_rx = ixgbe_disable_rx_e610,
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
+ .reset_hw = ixgbe_reset_hw_e610,
+ .get_fw_ver = ixgbe_aci_get_fw_ver,
+ .get_media_type = ixgbe_get_media_type_e610,
+ .setup_link = ixgbe_setup_link_e610,
+ .fw_recovery_mode = ixgbe_fw_recovery_mode_e610,
+ .fw_rollback_mode = ixgbe_fw_rollback_mode_e610,
+ .get_nvm_ver = ixgbe_get_active_nvm_ver,
+ .get_link_capabilities = ixgbe_get_link_capabilities_e610,
+ .get_bus_info = ixgbe_get_bus_info_generic,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_e610,
+ .fc_autoneg = ixgbe_fc_autoneg_e610,
+ .enable_mdd = ixgbe_enable_mdd_x550,
+ .disable_mdd = ixgbe_disable_mdd_x550,
+ .restore_mdd_vf = ixgbe_restore_mdd_vf_x550,
+ .handle_mdd = ixgbe_handle_mdd_x550,
+};
+
+static const struct ixgbe_phy_operations phy_ops_e610 = {
+ .init = ixgbe_init_phy_ops_e610,
+ .identify = ixgbe_identify_phy_e610,
+ .identify_sfp = ixgbe_identify_module_e610,
+ .setup_link_speed = ixgbe_setup_phy_link_speed_generic,
+ .setup_link = ixgbe_setup_phy_link_e610,
+ .enter_lplu = ixgbe_enter_lplu_e610,
+};
+
+static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
+ .read = ixgbe_read_ee_aci_e610,
+ .read_buffer = ixgbe_read_ee_aci_buffer_e610,
+ .validate_checksum = ixgbe_validate_eeprom_checksum_e610,
+ .read_pba_string = ixgbe_read_pba_string_e610,
+ .init_params = ixgbe_init_eeprom_params_e610,
+};
+
+const struct ixgbe_info ixgbe_e610_info = {
+ .mac = ixgbe_mac_e610,
+ .get_invariants = ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_e610,
+ .eeprom_ops = &eeprom_ops_e610,
+ .phy_ops = &phy_ops_e610,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..11916b979d28
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct libie_aq_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+void ixgbe_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res);
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps);
+int ixgbe_get_caps(struct ixgbe_hw *hw);
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+int ixgbe_update_link_info(struct ixgbe_hw *hw);
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw);
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw);
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on);
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw);
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum libie_aq_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom);
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist);
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data);
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val);
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw);
+int ixgbe_get_flash_data(struct ixgbe_hw *hw);
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw);
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length);
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code);
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
+int ixgbe_fwlog_init(struct ixgbe_hw *hw);
+void ixgbe_fwlog_deinit(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6e6e6f1847b6..2ad81f687a84 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/* ethtool support for ixgbe */
@@ -213,7 +213,7 @@ static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
static int ixgbe_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
@@ -458,7 +458,7 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
static int ixgbe_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
int err = 0;
@@ -535,7 +535,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
static void ixgbe_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw_stats *hwstats = &adapter->stats;
stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
@@ -545,7 +545,7 @@ static void ixgbe_get_pause_stats(struct net_device *netdev,
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (ixgbe_device_supports_autoneg_fc(hw) &&
@@ -564,10 +564,26 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
}
+static void ixgbe_set_pauseparam_finalize(struct net_device *netdev,
+ struct ixgbe_fc_info *fc)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* If the thing changed then we'll update and use new autoneg. */
+ if (memcmp(fc, &hw->fc, sizeof(*fc))) {
+ hw->fc = *fc;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+}
+
static int ixgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fc_info fc = hw->fc;
@@ -592,27 +608,52 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.requested_mode = ixgbe_fc_none;
- /* if the thing changed then we'll update and use new autoneg */
- if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
- hw->fc = fc;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
+ return 0;
+}
+
+static int ixgbe_set_pauseparam_e610(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fc_info fc = hw->fc;
+
+ if (!ixgbe_device_supports_autoneg_fc(hw))
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_DISABLE) {
+ netdev_info(netdev,
+ "Cannot disable autonegotiation on this device.\n");
+ return -EOPNOTSUPP;
}
+ fc.disable_fc_autoneg = false;
+
+ if (pause->rx_pause && pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_full;
+ else if (pause->rx_pause)
+ fc.requested_mode = ixgbe_fc_rx_pause;
+ else if (pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_tx_pause;
+ else
+ fc.requested_mode = ixgbe_fc_none;
+
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
return 0;
}
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->msg_enable;
}
static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->msg_enable = data;
}
@@ -627,7 +668,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
static void ixgbe_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u8 i;
@@ -690,6 +731,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -991,16 +1033,24 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
}
+static void ixgbe_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ stats->link_down_events = adapter->link_down_events;
+}
+
static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->hw.eeprom.word_size * 2;
}
static int ixgbe_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
int first_word, last_word, eeprom_len;
@@ -1036,7 +1086,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
static int ixgbe_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
void *ptr;
@@ -1103,10 +1153,22 @@ err:
return ret_val;
}
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbe_get_flash_data(hw);
+ ixgbe_set_fw_version_e610(adapter);
+}
+
static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ /* need to refresh info for e610 in case fw reloads in runtime */
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
@@ -1160,7 +1222,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
@@ -1175,7 +1237,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *temp_ring;
int i, j, err = 0;
u32 new_rx_count, new_tx_count;
@@ -1216,7 +1278,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* allocate temporary buffer to store rings in */
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
adapter->num_rx_queues);
- temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
+ temp_ring = vmalloc_array(i, sizeof(struct ixgbe_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -1335,7 +1397,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *net_stats;
unsigned int start;
@@ -1613,6 +1675,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1708,7 +1771,7 @@ static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
static irqreturn_t ixgbe_test_intr(int irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
@@ -1874,6 +1937,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1935,6 +1999,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2179,7 +2244,7 @@ out:
static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
bool if_running = netif_running(netdev);
if (ixgbe_removed(adapter->hw.hw_addr)) {
@@ -2302,7 +2367,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
static void ixgbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC;
@@ -2324,7 +2389,7 @@ static void ixgbe_get_wol(struct net_device *netdev,
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
WAKE_FILTER))
@@ -2349,9 +2414,53 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static int ixgbe_set_wol_acpi(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 grc;
+
+ if (ixgbe_wol_exclusion(adapter, wol))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* disable APM wakeup */
+ grc = IXGBE_READ_REG(hw, IXGBE_GRC_X550EM_a);
+ grc &= ~IXGBE_GRC_APME;
+ IXGBE_WRITE_REG(hw, IXGBE_GRC_X550EM_a, grc);
+
+ /* erase existing filters */
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IXGBE_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IXGBE_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IXGBE_WUFC_BC;
+
+ IXGBE_WRITE_REG(hw, IXGBE_WUC, IXGBE_WUC_PME_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wol);
+
+ hw->wol_enabled = adapter->wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int ixgbe_set_wol_e610(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ if (wol->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
+ return ixgbe_set_wol_acpi(netdev, wol);
+ else
+ return ixgbe_set_wol(netdev, wol);
+}
+
static int ixgbe_nway_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -2362,7 +2471,7 @@ static int ixgbe_nway_reset(struct net_device *netdev)
static int ixgbe_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
@@ -2390,12 +2499,32 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 0;
}
+static int ixgbe_set_phys_id_e610(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ bool led_active;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ led_active = true;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ led_active = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ixgbe_aci_set_port_id_led(&adapter->hw, !led_active);
+}
+
static int ixgbe_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* only valid if in constant ITR mode */
if (adapter->rx_itr_setting <= 1)
@@ -2451,7 +2580,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_q_vector *q_vector;
int i;
u16 tx_itr_param, rx_itr_param, tx_itr_prev;
@@ -2624,9 +2753,11 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
return 0;
}
-static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int ixgbe_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
+
cmd->data = 0;
/* Report default options for RSS on ixgbe */
@@ -2674,18 +2805,21 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
return 64;
}
+static u32 ixgbe_get_rx_ring_count(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
+
+ return min_t(u32, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = min_t(int, adapter->num_rx_queues,
- ixgbe_rss_indir_tbl_max(adapter));
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->fdir_filter_count;
ret = 0;
@@ -2696,9 +2830,6 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = ixgbe_get_rss_hash_opts(adapter, cmd);
- break;
default:
break;
}
@@ -2950,9 +3081,11 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
-static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int ixgbe_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u32 flags2 = adapter->flags2;
/*
@@ -3065,7 +3198,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
@@ -3075,9 +3208,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
break;
- case ETHTOOL_SRXFH:
- ret = ixgbe_set_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -3092,7 +3222,7 @@ static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
static u32 ixgbe_rss_indir_size(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return ixgbe_rss_indir_tbl_entries(adapter);
}
@@ -3112,7 +3242,7 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
static int ixgbe_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -3130,7 +3260,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
@@ -3170,9 +3300,9 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
}
static int ixgbe_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* we always support timestamping disabled */
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
@@ -3181,6 +3311,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
break;
case ixgbe_mac_X540:
@@ -3196,16 +3327,12 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types =
BIT(HWTSTAMP_TX_OFF) |
@@ -3251,7 +3378,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
static void ixgbe_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* report maximum channels */
ch->max_combined = ixgbe_max_channels(adapter);
@@ -3288,7 +3415,7 @@ static void ixgbe_get_channels(struct net_device *dev,
static int ixgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
unsigned int count = ch->combined_count;
u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
@@ -3326,7 +3453,7 @@ static int ixgbe_set_channels(struct net_device *dev,
static int ixgbe_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -3372,7 +3499,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
int status = -EFAULT;
u8 databyte = 0xFF;
@@ -3447,13 +3574,13 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
- linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ linkmode_set_bit(ixgbe_ls_map[i].link_mode,
edata->supported);
}
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
- linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ linkmode_set_bit(ixgbe_ls_map[i].link_mode,
edata->advertised);
}
@@ -3468,7 +3595,7 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
@@ -3482,7 +3609,7 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ethtool_keee eee_data;
int ret_val;
@@ -3537,7 +3664,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
static u32 ixgbe_get_priv_flags(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
u32 priv_flags = 0;
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
@@ -3554,7 +3681,7 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
unsigned int flags2 = adapter->flags2;
unsigned int i;
@@ -3601,6 +3728,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_wol = ixgbe_set_wol,
.nway_reset = ixgbe_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ixgbe_get_link_ext_stats,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
@@ -3618,12 +3746,64 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
+ .get_rx_ring_count = ixgbe_get_rx_ring_count,
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
+ .get_rxfh_indir_size = ixgbe_rss_indir_size,
+ .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
+ .get_rxfh = ixgbe_get_rxfh,
+ .set_rxfh = ixgbe_set_rxfh,
+ .get_rxfh_fields = ixgbe_get_rxfh_fields,
+ .set_rxfh_fields = ixgbe_set_rxfh_fields,
+ .get_eee = ixgbe_get_eee,
+ .set_eee = ixgbe_set_eee,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
+ .get_priv_flags = ixgbe_get_priv_flags,
+ .set_priv_flags = ixgbe_set_priv_flags,
+ .get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
+};
+
+static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = ixgbe_get_drvinfo,
+ .get_regs_len = ixgbe_get_regs_len,
+ .get_regs = ixgbe_get_regs,
+ .get_wol = ixgbe_get_wol,
+ .set_wol = ixgbe_set_wol_e610,
+ .nway_reset = ixgbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ixgbe_get_link_ext_stats,
+ .get_eeprom_len = ixgbe_get_eeprom_len,
+ .get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
+ .get_ringparam = ixgbe_get_ringparam,
+ .set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
+ .get_pauseparam = ixgbe_get_pauseparam,
+ .set_pauseparam = ixgbe_set_pauseparam_e610,
+ .get_msglevel = ixgbe_get_msglevel,
+ .set_msglevel = ixgbe_set_msglevel,
+ .self_test = ixgbe_diag_test,
+ .get_strings = ixgbe_get_strings,
+ .set_phys_id = ixgbe_set_phys_id_e610,
+ .get_sset_count = ixgbe_get_sset_count,
+ .get_ethtool_stats = ixgbe_get_ethtool_stats,
+ .get_coalesce = ixgbe_get_coalesce,
+ .set_coalesce = ixgbe_set_coalesce,
+ .get_rx_ring_count = ixgbe_get_rx_ring_count,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
.get_rxfh_indir_size = ixgbe_rss_indir_size,
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
+ .get_rxfh_fields = ixgbe_get_rxfh_fields,
+ .set_rxfh_fields = ixgbe_set_rxfh_fields,
.get_eee = ixgbe_get_eee,
.set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
@@ -3639,5 +3819,10 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &ixgbe_ethtool_ops;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ netdev->ethtool_ops = &ixgbe_ethtool_ops_e610;
+ else
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 18d63c8c2ff4..011fda9c6193 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -56,7 +56,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
if (xid >= netdev->fcoe_ddp_xid)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
@@ -153,7 +153,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
if (!netdev || !sgl)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
if (xid >= netdev->fcoe_ddp_xid) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
@@ -744,7 +744,7 @@ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
* ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
* @adapter: ixgbe adapter
*
- * Sets up ddp context resouces
+ * Sets up ddp context resources
*
* Returns : 0 indicates success or -EINVAL on failure
*/
@@ -834,7 +834,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
atomic_inc(&fcoe->refcnt);
@@ -858,7 +858,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
/* enable FCoE and notify stack */
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
- netdev->features |= NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = true;
netdev_features_change(netdev);
/* release existing queues and reallocate them */
@@ -881,7 +881,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
return -EINVAL;
@@ -898,7 +898,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
/* disable FCoE and notify stack */
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- netdev->features &= ~NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = false;
netdev_features_change(netdev);
@@ -927,7 +927,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
u16 prefix = 0xffff;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
switch (type) {
@@ -967,7 +967,7 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u64 dsn;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
new file mode 100644
index 000000000000..e5479fc07a07
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2025 Intel Corporation. */
+
+#include <linux/crc32.h>
+#include <linux/pldmfw.h>
+#include <linux/uuid.h>
+
+#include "ixgbe.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_fwu_priv {
+ struct pldmfw context;
+
+ struct ixgbe_adapter *adapter;
+ struct netlink_ext_ack *extack;
+
+ /* Track which NVM banks to activate at the end of the update */
+ u8 activate_flags;
+ bool emp_reset_available;
+};
+
+/**
+ * ixgbe_send_package_data - Send record package data to firmware
+ * @context: PLDM fw update structure
+ * @data: pointer to the package data
+ * @length: length of the package data
+ *
+ * Send a copy of the package data associated with the PLDM record matching
+ * this device to the firmware.
+ *
+ * Note that this function sends an AdminQ command that will fail unless the
+ * NVM resource has been acquired.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ixgbe_send_package_data(struct pldmfw *context,
+ const u8 *data, u16 length)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *package_data;
+ int err;
+
+ package_data = kmemdup(data, length, GFP_KERNEL);
+ if (!package_data)
+ return -ENOMEM;
+
+ err = ixgbe_nvm_set_pkg_data(hw, false, package_data, length);
+
+ kfree(package_data);
+
+ return err;
+}
+
+/**
+ * ixgbe_check_component_response - Report firmware response to a component
+ * @adapter: device private data structure
+ * @response: indicates whether this component can be updated
+ * @code: code indicating reason for response
+ * @extack: netlink extended ACK structure
+ *
+ * Check whether firmware indicates if this component can be updated. Report
+ * a suitable error message over the netlink extended ACK if the component
+ * cannot be updated.
+ *
+ * Return: 0 if the component can be updated, or -ECANCELED if the
+ * firmware indicates the component cannot be updated.
+ */
+static int ixgbe_check_component_response(struct ixgbe_adapter *adapter,
+ u8 response, u8 code,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (response) {
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED:
+ /* Firmware indicated this update is good to proceed. */
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware recommends not updating, as it may result in a downgrade. Continuing anyways");
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
+ NL_SET_ERR_MSG_MOD(extack, "Firmware has rejected updating.");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (hw->mac.ops.fw_recovery_mode &&
+ hw->mac.ops.fw_recovery_mode(hw))
+ return 0;
+ break;
+ }
+
+ switch (code) {
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is lower than running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is invalid");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component table conflict occurred");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component not supported");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete component image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is lower than the running image");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Received unexpected response code from firmware");
+ break;
+ }
+
+ return -ECANCELED;
+}
+
+/**
+ * ixgbe_send_component_table - Send PLDM component table to firmware
+ * @context: PLDM fw update structure
+ * @component: the component to process
+ * @transfer_flag: relative transfer order of this component
+ *
+ * Read relevant data from the component and forward it to the device
+ * firmware. Check the response to determine if the firmware indicates that
+ * the update can proceed.
+ *
+ * This function sends ACI commands related to the NVM, and assumes that
+ * the NVM resource has been acquired.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_send_component_table(struct pldmfw *context,
+ struct pldmfw_component *component,
+ u8 transfer_flag)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_aci_cmd_nvm_comp_tbl *comp_tbl;
+ u8 comp_response, comp_response_code;
+ struct ixgbe_hw *hw = &adapter->hw;
+ size_t length;
+ int err;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ case NVM_COMP_ID_NVM:
+ case NVM_COMP_ID_NETLIST:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to update due to unknown firmware component");
+ return -EOPNOTSUPP;
+ }
+
+ length = struct_size(comp_tbl, cvs, component->version_len);
+ comp_tbl = kzalloc(length, GFP_KERNEL);
+ if (!comp_tbl)
+ return -ENOMEM;
+
+ comp_tbl->comp_class = cpu_to_le16(component->classification);
+ comp_tbl->comp_id = cpu_to_le16(component->identifier);
+ comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE;
+ comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp);
+ comp_tbl->cvs_type = component->version_type;
+ comp_tbl->cvs_len = component->version_len;
+
+ memcpy(comp_tbl->cvs, component->version_string,
+ component->version_len);
+
+ err = ixgbe_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
+ transfer_flag, &comp_response,
+ &comp_response_code);
+
+ kfree(comp_tbl);
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transfer component table to firmware");
+ return -EIO;
+ }
+
+ return ixgbe_check_component_response(adapter,
+ comp_response,
+ comp_response_code, extack);
+}
+
+/**
+ * ixgbe_write_one_nvm_block - Write an NVM block and await completion response
+ * @adapter: the PF data structure
+ * @module: the module to write to
+ * @offset: offset in bytes
+ * @block_size: size of the block to write, up to 4k
+ * @block: pointer to block of data to write
+ * @last_cmd: whether this is the last command
+ * @extack: netlink extended ACK structure
+ *
+ * Write a block of data to a flash module, and await for the completion
+ * response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * On successful return, reset level indicates the device reset required to
+ * complete the update.
+ *
+ * 0 - IXGBE_ACI_NVM_POR_FLAG - A full power on is required
+ * 1 - IXGBE_ACI_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - IXGBE_ACI_NVM_EMPR_FLAG - An EMP reset is required
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_one_nvm_block(struct ixgbe_adapter *adapter,
+ u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ return ixgbe_aci_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0);
+}
+
+/**
+ * ixgbe_write_nvm_module - Write data to an NVM module
+ * @adapter: the PF driver structure
+ * @module: the module id to program
+ * @component: the name of the component being updated
+ * @image: buffer of image data to write to the NVM
+ * @length: length of the buffer
+ * @extack: netlink extended ACK structure
+ *
+ * Loop over the data for a given NVM module and program it in 4 Kb
+ * blocks. Notify devlink core of progress after each block is programmed.
+ * Loops over a block of data and programs the NVM in 4k block chunks.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component, const u8 *image,
+ u32 length,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ u32 offset = 0;
+ bool last_cmd;
+ u8 *block;
+ int err;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, 0, length);
+
+ block = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ do {
+ u32 block_size;
+
+ block_size = min_t(u32, IXGBE_ACI_MAX_BUFFER_SIZE,
+ length - offset);
+ last_cmd = !(offset + block_size < length);
+
+ memcpy(block, image + offset, block_size);
+
+ err = ixgbe_write_one_nvm_block(adapter, module, offset,
+ block_size, block, last_cmd,
+ extack);
+ if (err)
+ break;
+
+ offset += block_size;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, offset, length);
+ } while (!last_cmd);
+
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ component, length, length);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, length, length);
+
+ kfree(block);
+
+ return err;
+}
+
+/* Length in seconds to wait before timing out when erasing a flash module.
+ * Yes, erasing really can take minutes to complete.
+ */
+#define IXGBE_FW_ERASE_TIMEOUT 300
+
+/**
+ * ixgbe_erase_nvm_module - Erase an NVM module and await firmware completion
+ * @adapter: the PF data structure
+ * @module: the module to erase
+ * @component: name of the component being updated
+ * @extack: netlink extended ACK structure
+ *
+ * Erase the inactive NVM bank associated with this module, and await for
+ * a completion response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_erase_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ devlink_flash_update_timeout_notify(devlink, "Erasing", component,
+ IXGBE_FW_ERASE_TIMEOUT);
+
+ err = ixgbe_aci_erase_nvm(hw, module);
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Erasing failed",
+ component, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Erasing done",
+ component, 0, 0);
+
+ return err;
+}
+
+/**
+ * ixgbe_switch_flash_banks - Tell firmware to switch NVM banks
+ * @adapter: Pointer to the PF data structure
+ * @activate_flags: flags used for the activation command
+ * @emp_reset_available: on return, indicates if EMP reset is available
+ * @extack: netlink extended ACK structure
+ *
+ * Notify firmware to activate the newly written flash banks, and wait for the
+ * firmware response.
+ *
+ * Return: 0 on success or an error code on failure.
+ */
+static int ixgbe_switch_flash_banks(struct ixgbe_adapter *adapter,
+ u8 activate_flags,
+ bool *emp_reset_available,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 response_flags;
+ int err;
+
+ err = ixgbe_nvm_write_activate(hw, activate_flags, &response_flags);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to switch active flash banks");
+ return err;
+ }
+
+ if (emp_reset_available) {
+ if (hw->dev_caps.common_cap.reset_restrict_support)
+ *emp_reset_available =
+ response_flags & IXGBE_ACI_NVM_EMPR_ENA;
+ else
+ *emp_reset_available = true;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_flash_component - Flash a component of the NVM
+ * @context: PLDM fw update structure
+ * @component: the component table to program
+ *
+ * Program the flash contents for a given component. First, determine the
+ * module id. Then, erase the secondary bank for this module. Finally, write
+ * the contents of the component to the NVM.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_adapter *adapter = priv->adapter;
+ const char *name;
+ u16 module;
+ int err;
+ u8 flag;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ module = IXGBE_E610_SR_1ST_OROM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ name = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ module = IXGBE_E610_SR_1ST_NVM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ name = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ module = IXGBE_E610_SR_NETLIST_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ name = "fw.netlist";
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Mark this component for activating at the end. */
+ priv->activate_flags |= flag;
+
+ err = ixgbe_erase_nvm_module(adapter, module, name, extack);
+ if (err)
+ return err;
+
+ return ixgbe_write_nvm_module(adapter, module, name,
+ component->component_data,
+ component->component_size, extack);
+}
+
+/**
+ * ixgbe_finalize_update - Perform last steps to complete device update
+ * @context: PLDM fw update structure
+ *
+ * Called as the last step of the update process. Complete the update by
+ * telling the firmware to switch active banks, and perform a reset of
+ * configured.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ixgbe_finalize_update(struct pldmfw *context)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct devlink *devlink = adapter->devlink;
+ int err;
+
+ /* Finally, notify firmware to activate the written NVM banks */
+ err = ixgbe_switch_flash_banks(adapter, priv->activate_flags,
+ &priv->emp_reset_available, extack);
+ if (err)
+ return err;
+
+ adapter->fw_emp_reset_disabled = !priv->emp_reset_available;
+
+ if (!adapter->fw_emp_reset_disabled)
+ devlink_flash_update_status_notify(devlink,
+ "Suggested is to activate new firmware by devlink reload, if it doesn't work then a power cycle is required",
+ NULL, 0, 0);
+
+ return 0;
+}
+
+static const struct pldmfw_ops ixgbe_fwu_ops_e610 = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ixgbe_send_package_data,
+ .send_component_table = &ixgbe_send_component_table,
+ .flash_component = &ixgbe_flash_component,
+ .finalize_update = &ixgbe_finalize_update,
+};
+
+/**
+ * ixgbe_get_pending_updates - Check if the component has a pending update
+ * @adapter: the PF driver structure
+ * @pending: on return, bitmap of updates pending
+ * @extack: Netlink extended ACK
+ *
+ * Check if the device has any pending updates on any flash components.
+ *
+ * Return: 0 on success, or a negative error code on failure. Update
+ * pending with the bitmap of pending updates.
+ */
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw_dev_caps *dev_caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
+ if (!dev_caps)
+ return -ENOMEM;
+
+ err = ixgbe_discover_dev_caps(hw, dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read device capabilities");
+ kfree(dev_caps);
+ return -EIO;
+ }
+
+ *pending = 0;
+
+ if (dev_caps->common_cap.nvm_update_pending_nvm)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+
+ if (dev_caps->common_cap.nvm_update_pending_orom)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+
+ if (dev_caps->common_cap.nvm_update_pending_netlist)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+
+ kfree(dev_caps);
+
+ return 0;
+}
+
+/**
+ * ixgbe_cancel_pending_update - Cancel any pending update for a component
+ * @adapter: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Cancel any pending update for the specified component. If component is
+ * NULL, all device updates will be canceled.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_cancel_pending_update(struct ixgbe_adapter *adapter,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* If the flash_update request is for a specific component, ignore all
+ * of the other components.
+ */
+ if (component) {
+ if (strcmp(component, "fw.mgmt") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ else if (strcmp(component, "fw.undi") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ else if (strcmp(component, "fw.netlist") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ else
+ return -EINVAL;
+ }
+
+ /* There is no previous pending update, so this request may continue */
+ if (!pending)
+ return 0;
+
+ /* In order to allow overwriting a previous pending update, notify
+ * firmware to cancel that update by issuing the appropriate command.
+ */
+ devlink_flash_update_status_notify(devlink,
+ "Canceling previous pending update",
+ component, 0, 0);
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ pending |= IXGBE_ACI_NVM_REVERT_LAST_ACTIV;
+ err = ixgbe_switch_flash_banks(adapter, pending, NULL, extack);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_flash_pldm_image - Write a PLDM-formatted firmware image to the device
+ * @devlink: pointer to devlink associated with the device to update
+ * @params: devlink flash update parameters
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct device *dev = &adapter->pdev->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fwu_priv priv;
+ u8 preservation;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ switch (params->overwrite_mask) {
+ case 0:
+ /* preserve all settings and identifiers */
+ preservation = IXGBE_ACI_NVM_PRESERVE_ALL;
+ break;
+ case DEVLINK_FLASH_OVERWRITE_SETTINGS:
+ /* Overwrite settings, but preserve vital information such as
+ * device identifiers.
+ */
+ preservation = IXGBE_ACI_NVM_PRESERVE_SELECTED;
+ break;
+ case (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS):
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = IXGBE_ACI_NVM_NO_PRESERVATION;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested overwrite mask is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* Cannot get caps in recovery mode, so lack of nvm_unified_update bit
+ * cannot lead to error
+ */
+ if (!hw->dev_caps.common_cap.nvm_unified_update &&
+ (hw->mac.ops.fw_recovery_mode &&
+ !hw->mac.ops.fw_recovery_mode(hw))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ixgbe_fwu_ops_e610;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.adapter = adapter;
+ priv.activate_flags = preservation;
+
+ devlink_flash_update_status_notify(devlink,
+ "Preparing to flash", NULL, 0, 0);
+
+ err = ixgbe_cancel_pending_update(adapter, NULL, extack);
+ if (err)
+ return err;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ err = pldmfw_flash_image(&priv.context, params->fw);
+ if (err == -ENOENT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image has no record matching this device");
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to flash PLDM image");
+ }
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
new file mode 100644
index 000000000000..abdd708c93df
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2025 Intel Corporation. */
+
+#ifndef _IXGBE_FW_UPDATE_H_
+#define _IXGBE_FW_UPDATE_H_
+
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack);
+#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 866024f2b9ee..d1f4073b36f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -9,7 +9,7 @@
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs);
/**
* ixgbe_ipsec_set_tx_sa - set the Tx SA registers
@@ -321,7 +321,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (r->used) {
if (r->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(r->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, r->xs);
else
ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
r->key, r->salt,
@@ -330,7 +330,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (t->used) {
if (t->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(t->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, t->xs);
else
ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
}
@@ -417,6 +417,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
/**
* ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -424,10 +425,10 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbe_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -473,12 +474,13 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
+ * @dev: pointer to net device
* @xs: pointer to transformer state struct
**/
-static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
+static int ixgbe_ipsec_check_mgmt_ip(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 mfval, manc, reg;
int num_filters = 4;
@@ -556,14 +558,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
/**
* ixgbe_ipsec_add_sa - program device with a security association
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbe_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
int checked, match, first;
@@ -581,7 +584,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
return -EINVAL;
}
- if (ixgbe_ipsec_check_mgmt_ip(xs)) {
+ if (ixgbe_ipsec_check_mgmt_ip(dev, xs)) {
NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters");
return -EINVAL;
}
@@ -615,7 +618,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, rsa.key, &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -724,7 +727,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, tsa.key, &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -752,12 +755,12 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbe_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
u32 zerobuf[4] = {0, 0, 0, 0};
@@ -817,30 +820,9 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
.xdo_dev_state_add = ixgbe_ipsec_add_sa,
.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
};
/**
@@ -862,7 +844,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->rx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->rx_tbl[i].xs);
}
/* search tx sa table */
@@ -871,7 +854,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->tx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->tx_tbl[i].xs);
}
}
@@ -951,7 +935,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
- err = ixgbe_ipsec_add_sa(xs, NULL);
+ err = ixgbe_ipsec_add_sa(adapter->netdev, xs, NULL);
if (err)
goto err_aead;
@@ -1055,7 +1039,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
xs = ipsec->tx_tbl[sa_idx].xs;
}
- ixgbe_ipsec_del_sa(xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, xs);
/* remove the xs that was made-up in the add request */
kfree_sensitive(xs);
@@ -1073,7 +1057,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
{
- struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(tx_ring->netdev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
struct sec_path *sp;
@@ -1163,7 +1147,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(rx_ring->netdev);
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 0ee943db3dc9..a1d04914fbbc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_sriov.h"
@@ -107,6 +107,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
@@ -317,7 +318,7 @@ static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
* ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
* @adapter: board private structure to initialize
*
- * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues
* and VM pools where appropriate. Also assign queues based on DCB
* priorities and map accordingly..
*
@@ -491,7 +492,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
* ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
* @adapter: board private structure to initialize
*
- * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues
* and VM pools where appropriate. If RSS is available, then also try and
* enable RSS and map accordingly.
*
@@ -890,7 +891,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
IXGBE_ITR_ADAPTIVE_LATENCY;
- /* intialize ITR */
+ /* initialize ITR */
if (txr_count && !rxr_count) {
/* tx only vector */
if (adapter->tx_itr_setting == 1)
@@ -981,7 +982,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
+ if (adapter->netdev->fcoe_mtu) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
if ((rxr_idx >= f->offset) &&
@@ -1292,7 +1293,8 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* set bits to identify this as an advanced context descriptor */
- type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+ type_tucmd |= IXGBE_TXD_CMD_DEXT |
+ FIELD_PREP(IXGBE_ADVTXD_DTYP_MASK, IXGBE_ADVTXD_DTYP_CTXT);
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f985252c8c8d..034618e79169 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -9,6 +9,7 @@
#include <linux/string.h>
#include <linux/in.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
@@ -42,11 +43,14 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
#include "ixgbe_txrx_common.h"
+#include "devlink/devlink.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -72,6 +76,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
+ [board_e610] = &ixgbe_e610_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -130,6 +135,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_BACKPLANE), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SFP), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_10G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_2_5G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SGMII), board_e610},
/* required last entry */
{0, }
};
@@ -162,7 +172,7 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
@@ -173,6 +183,8 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *);
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *);
static const struct net_device_ops ixgbe_netdev_ops;
@@ -236,8 +248,11 @@ static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
* bandwidth details should be gathered from the parent bus instead of from the
* device. Used to ensure that various locations all have the correct device ID
* checks.
+ *
+ * Return: true if information should be collected from the parent bus, false
+ * otherwise
*/
-static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_SFP_SF_QP:
@@ -876,6 +891,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -915,6 +931,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -952,10 +969,6 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->tx_ring[i]->state);
-
- for (i = 0; i < adapter->num_xdp_queues; i++)
- clear_bit(__IXGBE_HANG_CHECK_ARMED,
- &adapter->xdp_ring[i]->state);
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
@@ -1025,7 +1038,49 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
return ((head <= tail) ? tail : tail + ring->count) - head;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+/**
+ * ixgbe_get_vf_idx - provide VF index number based on queue index
+ * @adapter: pointer to the adapter struct
+ * @queue: Tx queue identifier
+ * @vf: output VF index
+ *
+ * Provide VF index number associated to the input queue.
+ *
+ * Returns: 0 if VF provided or error number.
+ */
+static int ixgbe_get_vf_idx(struct ixgbe_adapter *adapter, u16 queue, u16 *vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 queue_count;
+ u32 reg;
+
+ if (queue >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ /* Determine number of queues by checking
+ * number of virtual functions
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
+ case IXGBE_GCR_EXT_VT_MODE_64:
+ queue_count = IXGBE_64VFS_QUEUES;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_32:
+ queue_count = IXGBE_32VFS_QUEUES;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_16:
+ queue_count = IXGBE_16VFS_QUEUES;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *vf = queue / queue_count;
+
+ return 0;
+}
+
+static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
@@ -1081,7 +1136,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 bcnrc_val = ixgbe_link_mbps(adapter);
@@ -1144,6 +1199,148 @@ void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
}
/**
+ * ixgbe_pf_handle_tx_hang - handle Tx hang on PF
+ * @tx_ring: tx ring number
+ * @next: next ring
+ *
+ * Prints a message containing details about the tx hang.
+ */
+static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
+ unsigned int next)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ e_err(drv, "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, next,
+ tx_ring->tx_buffer_info[next].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+}
+
+/**
+ * ixgbe_vf_handle_tx_hang - handle Tx hang on VF
+ * @adapter: structure containing ring specific data
+ * @vf: VF index
+ *
+ * Print a message containing details about malicious driver detection.
+ * Set malicious VF link down if the detection happened several times.
+ */
+static void ixgbe_vf_handle_tx_hang(struct ixgbe_adapter *adapter, u16 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ e_warn(drv,
+ "Malicious Driver Detection tx hang detected on PF %d VF %d MAC: %pM",
+ hw->bus.func, vf, adapter->vfinfo[vf].vf_mac_addresses);
+
+ adapter->tx_hang_count[vf]++;
+ if (adapter->tx_hang_count[vf] == IXGBE_MAX_TX_VF_HANGS) {
+ ixgbe_set_vf_link_state(adapter, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ adapter->tx_hang_count[vf] = 0;
+ }
+}
+
+static u32 ixgbe_poll_tx_icache(struct ixgbe_hw *hw, u16 queue, u16 idx)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_TXDESCIC, queue * idx);
+ return IXGBE_READ_REG(hw, IXGBE_TXDESCIC);
+}
+
+/**
+ * ixgbe_check_illegal_queue - search for queue with illegal packet
+ * @adapter: structure containing ring specific data
+ * @queue: queue index
+ *
+ * Check if tx descriptor connected with input queue
+ * contains illegal packet.
+ *
+ * Returns: true if queue contain illegal packet.
+ */
+static bool ixgbe_check_illegal_queue(struct ixgbe_adapter *adapter,
+ u16 queue)
+{
+ u32 hdr_len_reg, mss_len_reg, type_reg;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mss_len, header_len, reg;
+
+ for (u16 i = 0; i < IXGBE_MAX_TX_DESCRIPTORS; i++) {
+ /* HW will clear bit IXGBE_TXDESCIC_READY when address
+ * is written to address field. HW will set this bit
+ * when iCache read is done, and data is ready at TIC_DWx.
+ * Set descriptor address.
+ */
+ read_poll_timeout(ixgbe_poll_tx_icache, reg,
+ !(reg & IXGBE_TXDESCIC_READY), 0, 0, false,
+ hw, queue, i);
+
+ /* read tx descriptor access registers */
+ hdr_len_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_VLAN_MACIP_LENS_REG));
+ type_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_TYPE_TUCMD_MLHL));
+ mss_len_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_MSS_L4LEN_IDX));
+
+ /* check if Advanced Context Descriptor */
+ if (FIELD_GET(IXGBE_ADVTXD_DTYP_MASK, type_reg) !=
+ IXGBE_ADVTXD_DTYP_CTXT)
+ continue;
+
+ /* check for illegal MSS and Header length */
+ mss_len = FIELD_GET(IXGBE_ADVTXD_MSS_MASK, mss_len_reg);
+ header_len = FIELD_GET(IXGBE_ADVTXD_HEADER_LEN_MASK,
+ hdr_len_reg);
+ if ((mss_len + header_len) > SZ_16K) {
+ e_warn(probe, "mss len + header len too long\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_handle_mdd_event - handle mdd event
+ * @adapter: structure containing ring specific data
+ * @tx_ring: tx descriptor ring to handle
+ *
+ * Reset VF driver if malicious vf detected or
+ * illegal packet in an any queue detected.
+ */
+static void ixgbe_handle_mdd_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ u16 vf, q;
+
+ if (adapter->vfinfo && ixgbe_check_mdd_event(adapter)) {
+ /* vf mdd info and malicious vf detected */
+ if (!ixgbe_get_vf_idx(adapter, tx_ring->queue_index, &vf))
+ ixgbe_vf_handle_tx_hang(adapter, vf);
+ } else {
+ /* malicious vf not detected */
+ for (q = 0; q < IXGBE_MAX_TX_QUEUES; q++) {
+ if (ixgbe_check_illegal_queue(adapter, q) &&
+ !ixgbe_get_vf_idx(adapter, q, &vf))
+ /* illegal queue detected */
+ ixgbe_vf_handle_tx_hang(adapter, vf);
+ }
+ }
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
@@ -1249,27 +1446,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes);
adapter->tx_ipsec += total_ipsec;
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
- /* schedule immediate reset if we believe we hung */
- struct ixgbe_hw *hw = &adapter->hw;
- e_err(drv, "Detected Tx Unit Hang %s\n"
- " Tx Queue <%d>\n"
- " TDH, TDT <%x>, <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "tx_buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- ring_is_xdp(tx_ring) ? "(XDP)" : "",
- tx_ring->queue_index,
- IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
- IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
- tx_ring->next_to_use, i,
- tx_ring->tx_buffer_info[i].time_stamp, jiffies);
-
- if (!ring_is_xdp(tx_ring))
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_handle_mdd_event(adapter, tx_ring);
+
+ ixgbe_pf_handle_tx_hang(tx_ring, i);
e_info(probe,
"tx hang %d detected on queue %d, resetting adapter\n",
@@ -1282,9 +1466,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
return true;
}
- if (ring_is_xdp(tx_ring))
- return !!budget;
-
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
@@ -1909,10 +2090,6 @@ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
{
struct net_device *netdev = rx_ring->netdev;
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* Verify netdev is present, and that packet does not have any
* errors that would be unacceptable to the netdev.
*/
@@ -2095,7 +2272,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
- if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
@@ -2189,7 +2366,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
struct sk_buff *skb;
/* Prefetch first cache line of first page. If xdp->data_meta
- * is unused, this points extactly as xdp->data, otherwise we
+ * is unused, this points exactly as xdp->data, otherwise we
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
@@ -2220,9 +2397,9 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -2272,7 +2449,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
@@ -2312,7 +2489,7 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
- * it by maintaining the mapping of the page to the syste.
+ * it by maintaining the mapping of the page to the system.
*
* Returns amount of work completed
**/
@@ -2330,6 +2507,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -2375,12 +2553,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -2400,7 +2576,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -2414,7 +2590,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
+ if (xdp_res || ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
continue;
/* probably a little skewed due to removing CRC */
@@ -2515,6 +2691,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2528,6 +2705,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_EIMS_MAILBOX |
IXGBE_EIMS_LSC);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mask &= ~IXGBE_EIMS_FW_EVENT;
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -2744,6 +2924,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2966,6 +3147,226 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_check_phy_fw_load - check if PHY FW load failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check if external PHY FW load failed and print an error message if it did.
+ */
+static void ixgbe_check_phy_fw_load(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ if (!(link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ return;
+ }
+
+ if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
+ netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
+ adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ }
+}
+
+/**
+ * ixgbe_check_module_power - check module power level
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check module power level returned by a previous call to aci_get_link_info
+ * and print error messages if module power level is not supported.
+ */
+static void ixgbe_check_module_power(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ /* If module power level is supported, clear the flag. */
+ if (!(link_cfg_err & (IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT |
+ IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED))) {
+ adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ return;
+ }
+
+ /* If IXGBE_FLAG2_MOD_POWER_UNSUPPORTED was previously set and the
+ * above block didn't clear this bit, there's nothing to do.
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT) {
+ netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ } else if (link_cfg_err & IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED) {
+ netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_check_link_cfg_err - check if link configuration failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Print if any link configuration failure happens due to the value in the
+ * link_cfg_err parameter in the link info structure.
+ */
+static void ixgbe_check_link_cfg_err(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ ixgbe_check_module_power(adapter, link_cfg_err);
+ ixgbe_check_phy_fw_load(adapter, link_cfg_err);
+}
+
+/**
+ * ixgbe_process_link_status_event - process the link event
+ * @adapter: pointer to adapter structure
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+static int
+ixgbe_process_link_status_event(struct ixgbe_adapter *adapter, bool link_up,
+ u16 link_speed)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int status;
+
+ /* Update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed.
+ */
+ status = ixgbe_update_link_info(hw);
+ if (status)
+ e_dev_err("Failed to update link status, err %d aq_err %d\n",
+ status, hw->aci.last_status);
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)
+ link_up = true;
+
+ /* Turn off PHY if media was removed. */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) &&
+ !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA;
+
+ if (link_up == adapter->link_up &&
+ link_up == netif_carrier_ok(adapter->netdev) &&
+ link_speed == adapter->link_speed)
+ return 0;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ ixgbe_watchdog_update_link(adapter);
+
+ if (link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_link_status_event - handle link status event via ACI
+ * @adapter: pointer to adapter structure
+ * @e: event structure containing link status info
+ */
+static void
+ixgbe_handle_link_status_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_aci_event *e)
+{
+ struct ixgbe_aci_cmd_get_link_status_data *link_data;
+ u16 link_speed;
+ bool link_up;
+
+ link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf;
+
+ link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP);
+ link_speed = le16_to_cpu(link_data->link_speed);
+
+ if (ixgbe_process_link_status_event(adapter, link_up, link_speed))
+ e_dev_warn("Could not process link status event");
+}
+
+/**
+ * ixgbe_schedule_fw_event - schedule Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * If the adapter is not in down, removing or resetting state,
+ * an event is scheduled.
+ */
+static void ixgbe_schedule_fw_event(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
+ !test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+/**
+ * ixgbe_aci_event_cleanup - release msg_buf memory
+ * @event: pointer to the event holding msg_buf to be released
+ *
+ * Clean memory allocated for event's msg_buf. Implements auto memory cleanup.
+ */
+static void ixgbe_aci_event_cleanup(struct ixgbe_aci_event *event)
+{
+ kfree(event->msg_buf);
+}
+
+/**
+ * ixgbe_handle_fw_event - handle Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * Obtain an event from the ACI and then and then process it according to the
+ * type of the event and the opcode.
+ */
+static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_aci_event event __cleanup(ixgbe_aci_event_cleanup);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool pending = false;
+ int err;
+
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT;
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ err = ixgbe_aci_get_event(hw, &event, &pending);
+ if (err)
+ break;
+
+ switch (le16_to_cpu(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ ixgbe_handle_link_status_event(adapter, &event);
+ break;
+ case ixgbe_aci_opc_temp_tca_event:
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ ixgbe_down(adapter);
+ break;
+ case libie_aqc_opc_fw_logs_event:
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
+ break;
+ default:
+ e_warn(hw, "unknown FW async event captured\n");
+ break;
+ }
+ } while (pending);
+}
+
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
@@ -2982,6 +3383,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -3035,6 +3437,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ fallthrough;
case ixgbe_mac_x550em_a:
if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
@@ -3091,12 +3496,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -3334,6 +3743,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
@@ -3342,6 +3754,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
@@ -3442,6 +3855,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3689,8 +4103,12 @@ void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
#endif
{
- int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
@@ -3712,6 +4130,9 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
}
+
+ if (hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
@@ -4359,6 +4780,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
fallthrough;
@@ -4434,7 +4856,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
@@ -4493,7 +4915,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
@@ -4526,6 +4948,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4564,6 +4987,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4638,7 +5062,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
/* pull VLAN ID from VLVF */
vid = vlvf & VLAN_VID_MASK;
- /* only concern outselves with a certain range */
+ /* only concern ourselves with a certain range */
if (vid < vid_start || vid >= vid_end)
continue;
@@ -4716,7 +5140,7 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
**/
static int ixgbe_write_mc_addr_list(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!netif_running(netdev))
@@ -4892,7 +5316,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret;
ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
@@ -4902,7 +5326,7 @@ static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
@@ -4920,7 +5344,7 @@ static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
**/
void ixgbe_set_rx_mode(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
netdev_features_t features = netdev->features;
@@ -5022,7 +5446,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
struct udp_tunnel_info ti;
@@ -5080,7 +5504,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
netif_set_tso_max_size(adapter->netdev, 32768);
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ if (adapter->netdev->fcoe_mtu)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
@@ -5137,8 +5561,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if ((dev->features & NETIF_F_FCOE_MTU) &&
- (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
@@ -5149,6 +5572,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -5198,8 +5622,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if ((dev->features & NETIF_F_FCOE_MTU) &&
- (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE &&
(pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
@@ -5210,6 +5633,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -5512,6 +5936,48 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_enable_link_status_events - enable link status events
+ * @adapter: pointer to the adapter structure
+ * @mask: event mask to be set
+ *
+ * Enables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_enable_link_status_events(struct ixgbe_adapter *adapter,
+ u16 mask)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, true, mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = mask;
+ return 0;
+}
+
+/**
+ * ixgbe_disable_link_status_events - disable link status events
+ * @adapter: pointer to the adapter structure
+ *
+ * Disables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_disable_link_status_events(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = 0;
+ return 0;
+}
+
+/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
**/
@@ -5534,13 +6000,21 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* ixgbe_non_sfp_link_config - set up non-SFP+ link
* @hw: pointer to private hardware struct
*
- * Returns 0 on success, negative on failure
+ * Configure non-SFP link.
+ *
+ * Return: 0 on success, negative on failure
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 speed;
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
bool autoneg, link_up = false;
int ret = -EIO;
+ u32 speed;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5563,13 +6037,53 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (ret)
return ret;
- if (hw->mac.ops.setup_link)
+ if (hw->mac.ops.setup_link) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ret = ixgbe_enable_link_status_events(adapter, mask);
+ if (ret)
+ return ret;
+ }
ret = hw->mac.ops.setup_link(hw, speed, link_up);
+ }
return ret;
}
/**
+ * ixgbe_check_media_subtask - check for media
+ * @adapter: pointer to adapter structure
+ *
+ * If media is available then initialize PHY user configuration. Configure the
+ * PHY if the interface is up.
+ */
+static void ixgbe_check_media_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* No need to check for media if it's already present */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA))
+ return;
+
+ /* Refresh link info and check if media is present */
+ if (ixgbe_update_link_info(hw))
+ return;
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (!(ixgbe_non_sfp_link_config(&adapter->hw)))
+ adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA;
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
* @adapter: board private structure
*
@@ -5632,6 +6146,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5982,6 +6497,7 @@ dma_engine_disable:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -6200,7 +6716,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
- del_timer_sync(&adapter->service_timer);
+ timer_delete_sync(&adapter->service_timer);
if (adapter->num_vfs) {
/* Clear EITR Select mapping */
@@ -6226,6 +6742,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
}
/**
@@ -6260,7 +6778,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
**/
static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Do the reset outside of interrupt context */
ixgbe_tx_timeout_reset(adapter);
@@ -6281,6 +6799,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
@@ -6344,6 +6863,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
+
/* get_invariants needs the device IDs */
ii->get_invariants(hw);
@@ -6457,6 +6978,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
break;
}
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
+
#ifdef IXGBE_FCOE
/* FCoE support exists, always init the FCoE lock */
spin_lock_init(&adapter->fcoe.lock);
@@ -6506,7 +7034,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
/* initialize eeprom parameters */
- if (ixgbe_init_eeprom_params_generic(hw)) {
+ if (hw->eeprom.ops.init_params(hw)) {
e_dev_err("EEPROM initialization failed\n");
return -EIO;
}
@@ -6822,7 +7350,7 @@ static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
**/
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (ixgbe_enabled_xdp_adapter(adapter)) {
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
@@ -6847,7 +7375,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -6869,7 +7397,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
**/
int ixgbe_open(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
@@ -6911,6 +7439,19 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
udp_tunnel_nic_reset_ntf(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ int err = ixgbe_update_link_info(&adapter->hw);
+
+ if (err)
+ e_dev_err("Failed to update link info, err %d.\n", err);
+
+ ixgbe_check_link_cfg_err(adapter,
+ adapter->hw.link.link_info.link_cfg_err);
+
+ err = ixgbe_non_sfp_link_config(&adapter->hw);
+ if (err)
+ e_dev_err("Link setup failed, err %d.\n", err);
+ }
return 0;
@@ -6960,7 +7501,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
**/
int ixgbe_close(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_ptp_stop(adapter);
@@ -6974,7 +7515,7 @@ int ixgbe_close(struct net_device *netdev)
return 0;
}
-static int __maybe_unused ixgbe_resume(struct device *dev_d)
+static int ixgbe_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -7064,6 +7605,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -7082,7 +7624,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-static int __maybe_unused ixgbe_suspend(struct device *dev_d)
+static int ixgbe_suspend(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
int retval;
@@ -7211,6 +7753,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -7223,11 +7766,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
for (i = 0; i < 16; i++) {
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540) ||
- (hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x) ||
- (hw->mac.type == ixgbe_mac_x550em_a)) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_x550em_a ||
+ hw->mac.type == ixgbe_mac_e610) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -7253,6 +7797,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -7435,12 +7980,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
return;
/* Force detection of hung controller */
- if (netif_carrier_ok(adapter->netdev)) {
+ if (netif_carrier_ok(adapter->netdev))
for (i = 0; i < adapter->num_tx_queues; i++)
set_check_for_tx_hang(adapter->tx_ring[i]);
- for (i = 0; i < adapter->num_xdp_queues; i++)
- set_check_for_tx_hang(adapter->xdp_ring[i]);
- }
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/*
@@ -7553,6 +8095,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -7602,6 +8145,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ if (adapter->num_vfs && hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
+
/* enable transmits */
netif_tx_wake_all_queues(adapter->netdev);
@@ -7629,6 +8175,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (!netif_carrier_ok(netdev))
return;
+ adapter->link_down_events++;
+
/* poll for SFP+ cable when link is down */
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
@@ -7654,13 +8202,6 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
return true;
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = adapter->xdp_ring[i];
-
- if (ring->next_to_use != ring->next_to_clean)
- return true;
- }
-
return false;
}
@@ -7948,7 +8489,8 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
**/
static void ixgbe_service_timer(struct timer_list *t)
{
- struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
+ struct ixgbe_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
unsigned long next_event_offset;
/* poll faster when waiting for link */
@@ -8003,6 +8545,34 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
rtnl_unlock();
}
+static int ixgbe_check_fw_api_mismatch(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return 0;
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ return 0;
+
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ e_dev_err("The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ return -EOPNOTSUPP;
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > IXGBE_FW_API_VER_MINOR + IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ }
+
+ return 0;
+}
+
/**
* ixgbe_check_fw_error - Check firmware for errors
* @adapter: the adapter private structure
@@ -8013,12 +8583,14 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 fwsm;
+ int err;
/* read fwsm.ext_err_ind register and log errors */
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+ /* skip if E610's FW is reloading, warning in that case may be misleading */
if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
- !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
+ (!(fwsm & IXGBE_FWSM_FW_VAL_BIT) && !(hw->mac.type == ixgbe_mac_e610)))
e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
fwsm);
@@ -8026,10 +8598,53 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
return true;
}
+ if (!(adapter->flags2 & IXGBE_FLAG2_API_MISMATCH)) {
+ err = ixgbe_check_fw_api_mismatch(adapter);
+ if (err)
+ return true;
+ }
+
+ /* return here if FW rollback mode has been already detected */
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ROLLBACK)
+ return false;
+
+ if (hw->mac.ops.fw_rollback_mode && hw->mac.ops.fw_rollback_mode(hw)) {
+ struct ixgbe_nvm_info *nvm_info = &adapter->hw.flash.nvm;
+ char ver_buff[64] = "";
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ goto no_version;
+
+ if (hw->mac.ops.get_nvm_ver &&
+ hw->mac.ops.get_nvm_ver(hw, nvm_info))
+ goto no_version;
+
+ snprintf(ver_buff, sizeof(ver_buff),
+ "Current version is NVM:%x.%x.%x, FW:%d.%d. ",
+ nvm_info->major, nvm_info->minor, nvm_info->eetrack,
+ hw->fw_maj_ver, hw->fw_maj_ver);
+no_version:
+ e_dev_warn("Firmware rollback mode detected. %sDevice may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode.",
+ ver_buff);
+
+ adapter->flags2 |= IXGBE_FLAG2_FW_ROLLBACK;
+ }
return false;
}
+static void ixgbe_recovery_service_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ service_task);
+
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_service_event_complete(adapter);
+
+ mod_timer(&adapter->service_timer, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ixgbe_service_task - manages and runs subtasks
* @work: pointer to work_struct containing our data
@@ -8049,11 +8664,21 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
if (ixgbe_check_fw_error(adapter)) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ if (adapter->mii_bus) {
+ mdiobus_unregister(adapter->mii_bus);
+ adapter->mii_bus = NULL;
+ }
unregister_netdev(adapter->netdev);
+ }
ixgbe_service_event_complete(adapter);
return;
}
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_check_media_subtask(adapter);
+ }
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -8635,7 +9260,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_FCOE):
case htons(ETH_P_FIP):
- adapter = netdev_priv(dev);
+ adapter = ixgbe_from_netdev(dev);
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
@@ -8894,7 +9519,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev,
struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring;
/*
@@ -8926,7 +9551,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
**/
static int ixgbe_set_mac(struct net_device *netdev, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
@@ -8944,7 +9569,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
static int
ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 value;
int rc;
@@ -8970,7 +9595,7 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
u16 addr, u16 value)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (adapter->mii_bus) {
@@ -8990,13 +9615,9 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (cmd) {
- case SIOCSHWTSTAMP:
- return ixgbe_ptp_set_ts_config(adapter, req);
- case SIOCGHWTSTAMP:
- return ixgbe_ptp_get_ts_config(adapter, req);
case SIOCGMIIPHY:
if (!adapter->hw.phy.ops.read_reg)
return -EOPNOTSUPP;
@@ -9016,7 +9637,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
static int ixgbe_add_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
if (is_valid_ether_addr(hw->mac.san_addr)) {
@@ -9040,7 +9661,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
static int ixgbe_del_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
if (is_valid_ether_addr(mac->san_addr)) {
@@ -9071,7 +9692,7 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
static void ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
rcu_read_lock();
@@ -9114,7 +9735,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
struct ifla_vf_stats *vf_stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf < 0 || vf >= adapter->num_vfs)
return -EINVAL;
@@ -9231,7 +9852,7 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct netdev_nested_priv priv = {
.data = (void *)adapter,
};
@@ -9252,7 +9873,7 @@ static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
/* Hardware supports up to 8 traffic classes */
@@ -9810,7 +10431,7 @@ static LIST_HEAD(ixgbe_block_cb_list);
static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (type) {
case TC_SETUP_BLOCK:
@@ -9838,7 +10459,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
#endif
void ixgbe_do_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -9849,7 +10470,7 @@ void ixgbe_do_reset(struct net_device *netdev)
static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
@@ -9886,7 +10507,7 @@ static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
netdev_features_t changed = netdev->features ^ features;
bool need_reset = false;
@@ -9957,12 +10578,12 @@ static int ixgbe_set_features(struct net_device *netdev,
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u16 pool = VMDQ_P(0);
if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
@@ -10050,7 +10671,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct nlattr *attr, *br_spec;
int rem;
@@ -10061,15 +10682,10 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- int status;
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
+ int status = ixgbe_configure_bridge_mode(adapter, mode);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- mode = nla_get_u16(attr);
- status = ixgbe_configure_bridge_mode(adapter, mode);
if (status)
return status;
@@ -10083,7 +10699,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask, int nlflags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
@@ -10095,7 +10711,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
int pool, err;
@@ -10192,7 +10808,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
unsigned int rxbase = accel->rx_base_queue;
unsigned int i;
@@ -10270,7 +10886,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
{
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct bpf_prog *old_prog;
bool need_reset;
int num_queues;
@@ -10342,7 +10958,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -10377,7 +10993,7 @@ void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
int nxmit = 0;
int i;
@@ -10385,6 +11001,10 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -ENETDOWN;
+ if (!netif_carrier_ok(adapter->netdev) ||
+ !netif_running(adapter->netdev))
+ return -ENETDOWN;
+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -10465,6 +11085,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
.ndo_xsk_wakeup = ixgbe_xsk_wakeup,
+ .ndo_hwtstamp_get = ixgbe_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = ixgbe_ptp_hwtstamp_set,
};
static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
@@ -10667,7 +11289,7 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
* ixgbe_enumerate_functions - Get the number of ports this device has
* @adapter: adapter structure
*
- * This function enumerates the phsyical functions co-located on a single slot,
+ * This function enumerates the physical functions co-located on a single slot,
* in order to determine how many ports a device has. This is most useful in
* determining the required GT/s of PCIe bandwidth necessary for optimal
* performance.
@@ -10778,6 +11400,24 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ *
+ */
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
+ nvm->eetrack, orom->major, orom->build, orom->patch);
+}
+
+/**
* ixgbe_set_fw_version - Set FW version
* @adapter: the adapter private structure
*
@@ -10789,6 +11429,11 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_nvm_version nvm_ver;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_set_fw_version_e610(adapter);
+ return;
+ }
+
ixgbe_get_oem_prod_version(hw, &nvm_ver);
if (nvm_ver.oem_valid) {
snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
@@ -10813,6 +11458,66 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_recovery_probe - Handle FW recovery mode during probe
+ * @adapter: the adapter private structure
+ *
+ * Perform limited driver initialization when FW error is detected.
+ *
+ * Return: 0 on successful probe for E610, -EIO if recovery mode is detected
+ * for non-E610 adapter, error status code on any other case.
+ */
+static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool disable_dev;
+ int err = -EIO;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ goto clean_up_probe;
+
+ ixgbe_get_hw_control(adapter);
+ mutex_init(&hw->aci.lock);
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto shutdown_aci;
+
+ timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
+ INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task);
+ set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+ pci_set_drvdata(pdev, adapter);
+ /* We are creating devlink interface so NIC can be managed,
+ * e.g. new NVM image loaded
+ */
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev,
+ &adapter->devlink_port);
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
+
+ return 0;
+shutdown_aci:
+ mutex_destroy(&adapter->hw.aci.lock);
+ ixgbe_release_hw_control(adapter);
+clean_up_probe:
+ disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
+ free_netdev(netdev);
+ devlink_free(adapter->devlink);
+ pci_release_mem_regions(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -10826,6 +11531,7 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
+ struct ixgbe_netdevice_priv *netdev_priv_wrapper;
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
@@ -10875,9 +11581,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#else
indices = IXGBE_MAX_RSS_INDICES;
#endif
+ } else if (ii->mac == ixgbe_mac_e610) {
+ indices = IXGBE_MAX_RSS_INDICES_X550;
+ }
+
+ adapter = ixgbe_allocate_devlink(&pdev->dev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto err_devlink;
}
- netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+ netdev = alloc_etherdev_mq(sizeof(*netdev_priv_wrapper), indices);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -10885,7 +11599,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
+ netdev_priv_wrapper = netdev_priv(netdev);
+ netdev_priv_wrapper->adapter = adapter;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -10901,11 +11616,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
- netdev->netdev_ops = &ixgbe_netdev_ops;
- ixgbe_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
-
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
@@ -10935,17 +11645,36 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
+ netdev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbe_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
/* setup the private structure */
err = ixgbe_sw_init(adapter, ii);
if (err)
goto err_sw_init;
+ if (ixgbe_check_fw_error(adapter))
+ return ixgbe_recovery_probe(adapter);
+
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ err = ixgbe_get_caps(&adapter->hw);
+ if (err)
+ dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err);
+
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto err_sw_init;
+ }
+
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
break;
case ixgbe_mac_x550em_a:
@@ -10955,10 +11684,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* Make sure the SWFW semaphore is in a valid state */
- if (hw->mac.ops.init_swfw_sync)
- hw->mac.ops.init_swfw_sync(hw);
-
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -10966,6 +11691,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -11102,8 +11828,7 @@ skip_sriov:
NETIF_F_FCOE_CRC;
netdev->vlan_features |= NETIF_F_FSO |
- NETIF_F_FCOE_CRC |
- NETIF_F_FCOE_MTU;
+ NETIF_F_FCOE_CRC;
}
#endif /* IXGBE_FCOE */
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
@@ -11111,11 +11836,6 @@ skip_sriov:
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
- if (ixgbe_check_fw_error(adapter)) {
- err = -EIO;
- goto err_sw_init;
- }
-
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n");
@@ -11196,7 +11916,7 @@ skip_sriov:
if (expected_gts > 0)
ixgbe_check_minimum_link(adapter, expected_gts);
- err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
+ err = hw->eeprom.ops.read_pba_string(hw, part_str, sizeof(part_str));
if (err)
strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
@@ -11222,6 +11942,11 @@ skip_sriov:
}
strcpy(netdev->name, "eth%d");
pci_set_drvdata(pdev, adapter);
+
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev, &adapter->devlink_port);
+
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -11276,14 +12001,25 @@ skip_sriov:
if (err)
goto err_netdev;
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
+
+ if (ixgbe_fwlog_init(hw))
+ e_dev_info("Firmware logging not supported\n");
+
return 0;
err_netdev:
unregister_netdev(netdev);
err_register:
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
@@ -11295,7 +12031,9 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
+err_devlink:
err_pci_reg:
err_dma:
if (!adapter || disable_dev)
@@ -11308,7 +12046,7 @@ err_dma:
* @pdev: PCI device information struct
*
* ixgbe_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
+ * that it should release a PCI device. This could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
@@ -11324,11 +12062,18 @@ static void ixgbe_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
+ devl_lock(adapter->devlink);
+ devl_unregister(adapter->devlink);
+ ixgbe_devlink_destroy_regions(adapter);
+ ixgbe_fwlog_deinit(&adapter->hw);
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
+
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -11354,6 +12099,9 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
+
ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -11383,8 +12131,13 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
+
if (disable_dev)
pci_disable_device(pdev);
+
+ devlink_free(adapter->devlink);
}
/**
@@ -11460,6 +12213,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_x550em_a:
device_id = IXGBE_DEV_ID_X550EM_A_VF;
break;
+ case ixgbe_mac_e610:
+ device_id = IXGBE_DEV_ID_E610_VF;
+ break;
default:
device_id = 0;
break;
@@ -11542,7 +12298,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
adapter->hw.hw_addr = adapter->io_addr;
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
@@ -11588,14 +12343,14 @@ static const struct pci_error_handlers ixgbe_err_handler = {
.resume = ixgbe_io_resume,
};
-static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
static struct pci_driver ixgbe_driver = {
.name = ixgbe_driver_name,
.id_table = ixgbe_pci_tbl,
.probe = ixgbe_probe,
.remove = ixgbe_remove,
- .driver.pm = &ixgbe_pm_ops,
+ .driver.pm = pm_sleep_ptr(&ixgbe_pm_ops),
.shutdown = ixgbe_shutdown,
.sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d67d77e5dacc..788b5af07c70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -283,6 +283,7 @@ static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
@@ -407,6 +408,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_x550em_a &&
+ hw->mac.type != ixgbe_mac_e610 &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index bd205306934b..0334ed4b8fa3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -4,7 +4,7 @@
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
-#include "ixgbe_type.h"
+#include <linux/types.h>
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
@@ -34,7 +34,7 @@
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
-/* bits 23:16 are used for exra info for certain messages */
+/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
@@ -50,6 +50,9 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
+ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
+ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -86,6 +89,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+/* mailbox API, version 1.6 VF requests */
+#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
+
+/* mailbox API, version 1.7 VF requests */
+#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
@@ -96,6 +105,14 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+/* features negotiated between PF/VF */
+#define IXGBEVF_PF_SUP_IPSEC BIT(0)
+#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
+
+#define IXGBE_SUPPORTED_FEATURES IXGBEVF_PF_SUP_IPSEC
+
+struct ixgbe_hw;
+
int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
int ixgbe_check_for_msg(struct ixgbe_hw *, u16);
@@ -105,6 +122,18 @@ int ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
+struct ixgbe_mbx_operations {
+ int (*init_params)(struct ixgbe_hw *hw);
+ int (*read)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number);
+ int (*write)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number);
+ int (*read_posted)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+ int (*write_posted)(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id);
+ int (*check_for_msg)(struct ixgbe_hw *hw, u16 vf_number);
+ int (*check_for_ack)(struct ixgbe_hw *hw, u16 vf_number);
+ int (*check_for_rst)(struct ixgbe_hw *hw, u16 vf_number);
+};
+
extern const struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 07eaa3c3f4d3..2449e4cf2679 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 1;
+ int max_retry = 3;
int retry = 0;
u8 reg_high;
u8 csum;
@@ -1117,7 +1117,7 @@ int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
MDIO_MMD_AN, &autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_e610) {
/* Set or unset auto-negotiation 5G advertisement */
autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
@@ -1233,6 +1233,7 @@ static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
switch (hw->mac.type) {
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
@@ -1322,7 +1323,7 @@ int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* @hw: pointer to hardware structure
*
* Restart autonegotiation and PHY and waits for completion.
- * This function always returns success, this is nessary since
+ * This function always returns success, this is necessary since
* it is called via a function pointer that could call other
* functions that could return an error.
**/
@@ -2284,7 +2285,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- u32 max_retry = 1;
+ u32 max_retry = 3;
u32 retry = 0;
int status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 14aa2ca51f70..81179c60af4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -40,7 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x40
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9339edbd9082..6885d2343c48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -140,6 +140,7 @@
* proper mult and shift to convert the cycles into nanoseconds of time.
*/
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
+#define IXGBE_E610_BASE_PERIOD 0x333333333ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
@@ -326,7 +327,7 @@ static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter)
* result of SYSTIME is 32bits of "billions of cycles" and 32 bits of
* "cycles", rather than seconds and nanoseconds.
*/
-static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc)
+static u64 ixgbe_ptp_read_X550(struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
container_of(cc, struct ixgbe_adapter, hw_cc);
@@ -363,7 +364,7 @@ static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc)
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
-static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
+static u64 ixgbe_ptp_read_82599(struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
container_of(cc, struct ixgbe_adapter, hw_cc);
@@ -415,6 +416,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -492,11 +494,13 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
+ u64 rate, base;
bool neg_adj;
- u64 rate;
u32 inca;
- neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);
+ base = hw->mac.type == ixgbe_mac_e610 ? IXGBE_E610_BASE_PERIOD :
+ IXGBE_X550_BASE_PERIOD;
+ neg_adj = diff_by_scaled_ppm(base, scaled_ppm, &rate);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
@@ -559,6 +563,7 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -636,7 +641,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
* disabled
*/
if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (on)
adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
@@ -931,20 +936,22 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector,
}
/**
- * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration
- * @adapter: pointer to adapter structure
- * @ifr: ioctl data
+ * ixgbe_ptp_hwtstamp_get - get current hardware timestamping configuration
+ * @netdev: pointer to net device structure
+ * @config: timestamping configuration structure
*
* This function returns the current timestamping settings. Rather than
* attempt to deconstruct registers to fill in the values, simply keep a copy
* of the old settings around, and return a copy when requested.
*/
-int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+int ixgbe_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config = &adapter->tstamp_config;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ *config = adapter->tstamp_config;
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ return 0;
}
/**
@@ -973,7 +980,7 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* mode, if required to support the specifically requested mode.
*/
static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
@@ -1067,6 +1074,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1123,31 +1131,29 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
- * @adapter: pointer to adapter struct
- * @ifr: ioctl data
+ * ixgbe_ptp_hwtstamp_set - user entry point for timestamp mode
+ * @netdev: pointer to net device structure
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
*
* Set hardware to requested mode. If unsupported, return an error with no
* changes. Otherwise, store the mode for future reference.
*/
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+int ixgbe_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
+ err = ixgbe_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter,
@@ -1233,6 +1239,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
fallthrough;
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
cc.read = ixgbe_ptp_read_X550;
break;
case ixgbe_mac_X540:
@@ -1280,6 +1287,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
/* Reset SYSTIME registers to 0 */
@@ -1407,6 +1415,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index fcfd0a075eee..ee133d6749b3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -18,6 +18,7 @@
#include "ixgbe.h"
#include "ixgbe_type.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
@@ -206,6 +207,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
int rss;
@@ -236,6 +238,9 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
+
#ifdef CONFIG_PCI_IOV
/*
* If our VFs are assigned we cannot shut down SR-IOV
@@ -495,7 +500,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
int err = 0;
#ifdef CONFIG_FCOE
- if (dev->features & NETIF_F_FCOE_MTU)
+ if (dev->fcoe_mtu)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -505,6 +510,8 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
/* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
* disabled
@@ -701,7 +708,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
u32 reg_val;
u32 queue;
- /* remove VLAN filters beloning to this VF */
+ /* remove VLAN filters belonging to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
/* add back PF assigned VLAN or VLAN 0 */
@@ -857,7 +864,7 @@ static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
int pf_max_frame = dev->mtu + ETH_HLEN;
#if IS_ENABLED(CONFIG_FCOE)
- if (dev->features & NETIF_F_FCOE_MTU)
+ if (dev->fcoe_mtu)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif /* CONFIG_FCOE */
@@ -1041,13 +1048,15 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
break;
}
- e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+ e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api);
return -1;
}
@@ -1067,6 +1076,8 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -1;
@@ -1107,6 +1118,8 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -1140,6 +1153,8 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -1169,6 +1184,8 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
fallthrough;
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -1239,6 +1256,8 @@ static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -1249,6 +1268,65 @@ static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
return 0;
}
+/**
+ * ixgbe_send_vf_link_status - send link status data to VF
+ * @adapter: pointer to adapter struct
+ * @msgbuf: pointer to message buffers
+ * @vf: VF identifier
+ *
+ * Reply for IXGBE_VF_GET_PF_LINK_STATE mbox command sending link status data.
+ *
+ * Return: 0 on success or -EOPNOTSUPP when operation is not supported.
+ */
+static int ixgbe_send_vf_link_status(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ /* Simply provide stored values as watchdog & link status events take
+ * care of its freshness.
+ */
+ msgbuf[1] = adapter->link_speed;
+ msgbuf[2] = adapter->link_up;
+
+ return 0;
+}
+
+/**
+ * ixgbe_negotiate_vf_features - negotiate supported features with VF driver
+ * @adapter: pointer to adapter struct
+ * @msgbuf: pointer to message buffers
+ * @vf: VF identifier
+ *
+ * Return: 0 on success or -EOPNOTSUPP when operation is not supported.
+ */
+static int ixgbe_negotiate_vf_features(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u32 features = msgbuf[1];
+
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ features &= IXGBE_SUPPORTED_FEATURES;
+ msgbuf[1] = features;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1323,6 +1401,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_IPSEC_DEL:
retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_GET_PF_LINK_STATE:
+ retval = ixgbe_send_vf_link_status(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_FEATURES_NEGOTIATE:
+ retval = ixgbe_negotiate_vf_features(adapter, msgbuf, vf);
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = -EIO;
@@ -1352,12 +1436,59 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_write_mbx(hw, &msg, 1, vf);
}
+/**
+ * ixgbe_check_mdd_event - check for MDD event on all VFs
+ * @adapter: pointer to ixgbe adapter
+ *
+ * Return: true if there is a VF on which MDD event occurred, false otherwise.
+ */
+bool ixgbe_check_mdd_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ DECLARE_BITMAP(vf_bitmap, 64);
+ bool ret = false;
+ int i;
+
+ if (!hw->mac.ops.handle_mdd)
+ return false;
+
+ /* Did we have a malicious event */
+ bitmap_zero(vf_bitmap, 64);
+ hw->mac.ops.handle_mdd(hw, vf_bitmap);
+
+ /* Log any blocked queues and release lock */
+ for_each_set_bit(i, vf_bitmap, 64) {
+ dev_warn(&adapter->pdev->dev,
+ "Malicious event on VF %d tx:%x rx:%x\n", i,
+ IXGBE_READ_REG(hw, IXGBE_LVMMC_TX),
+ IXGBE_READ_REG(hw, IXGBE_LVMMC_RX));
+
+ if (hw->mac.ops.restore_mdd_vf) {
+ u32 ping;
+
+ hw->mac.ops.restore_mdd_vf(hw, i);
+
+ /* get the VF to rebuild its queues */
+ adapter->vfinfo[i].clear_to_send = 0;
+ ping = IXGBE_PF_CONTROL_MSG |
+ IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+
+ ret = true;
+ }
+
+ return ret;
+}
+
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
u32 vf;
+ ixgbe_check_mdd_event(adapter);
+
spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
@@ -1417,7 +1548,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int retval;
if (vf >= adapter->num_vfs)
@@ -1525,7 +1656,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 vlan_proto)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
@@ -1643,7 +1774,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int link_speed;
/* verify VF is active */
@@ -1678,7 +1809,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (vf >= adapter->num_vfs)
@@ -1756,7 +1887,7 @@ void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
**/
int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret = 0;
if (vf < 0 || vf >= adapter->num_vfs) {
@@ -1793,7 +1924,7 @@ int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* This operation is currently supported only for 82599 and x540
* devices.
@@ -1812,7 +1943,7 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
@@ -1835,7 +1966,7 @@ int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
ivi->vf = vf;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 0690ecb8dfa3..bc4cab976bf9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -15,6 +15,7 @@
#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
#endif
+bool ixgbe_check_mdd_event(struct ixgbe_adapter *adapter);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ed440dd0c4f9..b1bfeb21537a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/netdevice.h>
+#include <linux/net/intel/libie/fwlog.h>
+#include "ixgbe_type_e610.h"
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
@@ -71,12 +73,19 @@
#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
+
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+#define IXGBE_DEV_ID_E610_VF 0x57AD
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -230,7 +239,7 @@ struct ixgbe_thermal_sensor_data {
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETK_VALID 0x8000
#define NVM_INVALID_PTR 0xFFFF
-#define NVM_VER_SIZE 32 /* version sting size */
+#define NVM_VER_SIZE 32 /* version string size */
struct ixgbe_nvm_version {
u32 etk_id;
@@ -394,6 +403,8 @@ struct ixgbe_nvm_version {
#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_LVMMC_RX 0x2FA8
+#define IXGBE_LVMMC_TX 0x8108
#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
@@ -1034,6 +1045,7 @@ struct ixgbe_nvm_version {
#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
IXGBE_GCR_EXT_VT_MODE_64)
@@ -1600,7 +1612,7 @@ enum {
#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -1636,6 +1648,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -1654,6 +1667,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -1673,6 +1687,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -2010,7 +2025,7 @@ enum {
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
-#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD allows 14 bits for addr. */
#define IXGBE_EEC_SIZE_SHIFT 11
#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
@@ -2068,6 +2083,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2168,6 +2184,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2179,7 +2196,6 @@ enum {
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
@@ -2289,6 +2305,7 @@ enum {
#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_TPE 0x00000080 /* Tag Promiscuous Ena*/
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
@@ -2352,6 +2369,7 @@ enum {
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_NUM_TC_OR_Q 0xC /* Number of TCs or TxQs per pool */
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
@@ -2732,6 +2750,28 @@ enum ixgbe_fdir_pballoc_type {
#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
+/* There are only 3 options for VFs creation on this device:
+ * 16 VFs pool with 8 queues each
+ * 32 VFs pool with 4 queues each
+ * 64 VFs pool with 2 queues each
+ *
+ * That means reading some VF registers that map VF to queue depending on
+ * chosen option. Define values that help dealing with each scenario.
+ */
+/* Number of queues based on VFs pool */
+#define IXGBE_16VFS_QUEUES 8
+#define IXGBE_32VFS_QUEUES 4
+#define IXGBE_64VFS_QUEUES 2
+/* Mask for getting queues bits based on VFs pool */
+#define IXGBE_16VFS_BITMASK GENMASK(IXGBE_16VFS_QUEUES - 1, 0)
+#define IXGBE_32VFS_BITMASK GENMASK(IXGBE_32VFS_QUEUES - 1, 0)
+#define IXGBE_64VFS_BITMASK GENMASK(IXGBE_64VFS_QUEUES - 1, 0)
+/* Convert queue index to register number.
+ * We have 4 registers with 32 queues in each.
+ */
+#define IXGBE_QUEUES_PER_REG 32
+#define IXGBE_QUEUES_REG_AMOUNT 4
+
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
u8 cmd;
@@ -2897,6 +2937,13 @@ struct ixgbe_adv_tx_context_desc {
__le32 mss_l4len_idx;
};
+enum {
+ IXGBE_VLAN_MACIP_LENS_REG = 0,
+ IXGBE_FCEOF_SAIDX_REG = 1,
+ IXGBE_TYPE_TUCMD_MLHL = 2,
+ IXGBE_MSS_L4LEN_IDX = 3,
+};
+
/* Adv Transmit Descriptor Config Masks */
#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
@@ -2904,7 +2951,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x2 /* Advanced Context Desc */
#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
@@ -2953,6 +3000,8 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define IXGBE_ADVTXD_MSS_MASK GENMASK(31, IXGBE_ADVTXD_MSS_SHIFT)
+#define IXGBE_ADVTXD_HEADER_LEN_MASK GENMASK(8, 0)
/* Autonegotiation advertised speeds */
typedef u32 ixgbe_autoneg_advertised;
@@ -2971,6 +3020,29 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
+/* Physical layer type */
+typedef u64 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000
+#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000
+
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
*/
@@ -3146,6 +3218,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
ixgbe_mac_x550em_a,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
@@ -3225,7 +3299,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui,
};
/* Flow Control Settings */
@@ -3234,7 +3310,8 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
- ixgbe_fc_default
+ ixgbe_fc_default,
+ ixgbe_fc_pfc,
};
/* Smart Speed Settings */
@@ -3404,6 +3481,8 @@ struct ixgbe_eeprom_operations {
int (*validate_checksum)(struct ixgbe_hw *, u16 *);
int (*update_checksum)(struct ixgbe_hw *);
int (*calc_checksum)(struct ixgbe_hw *);
+ int (*read_pba_string)(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
};
struct ixgbe_mac_operations {
@@ -3412,6 +3491,7 @@ struct ixgbe_mac_operations {
int (*start_hw)(struct ixgbe_hw *);
int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ int (*get_fw_ver)(struct ixgbe_hw *hw);
int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_device_caps)(struct ixgbe_hw *, u16 *);
@@ -3480,6 +3560,8 @@ struct ixgbe_mac_operations {
int (*get_thermal_sensor_data)(struct ixgbe_hw *);
int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ int (*get_nvm_ver)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
@@ -3492,6 +3574,12 @@ struct ixgbe_mac_operations {
int (*dmac_config_tcs)(struct ixgbe_hw *hw);
int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+
+ /* MDD events */
+ void (*enable_mdd)(struct ixgbe_hw *hw);
+ void (*disable_mdd)(struct ixgbe_hw *hw);
+ void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+ void (*handle_mdd)(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
};
struct ixgbe_phy_operations {
@@ -3534,6 +3622,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -3576,6 +3667,7 @@ struct ixgbe_mac_info {
u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
bool set_lben;
+ u32 max_link_up_time;
u8 led_link_act;
};
@@ -3600,19 +3692,10 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
-};
-
-#include "ixgbe_mbx.h"
-
-struct ixgbe_mbx_operations {
- int (*init_params)(struct ixgbe_hw *hw);
- int (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*check_for_msg)(struct ixgbe_hw *, u16);
- int (*check_for_ack)(struct ixgbe_hw *, u16);
- int (*check_for_rst)(struct ixgbe_hw *, u16);
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 curr_user_speed_req;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
struct ixgbe_mbx_stats {
@@ -3624,6 +3707,8 @@ struct ixgbe_mbx_stats {
u32 rsts;
};
+struct ixgbe_mbx_operations;
+
struct ixgbe_mbx_info {
const struct ixgbe_mbx_operations *ops;
struct ixgbe_mbx_stats stats;
@@ -3655,6 +3740,20 @@ struct ixgbe_hw {
bool allow_unsupported_sfp;
bool wol_enabled;
bool need_crosstalk_fix;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
+ struct libie_fwlog fwlog;
};
struct ixgbe_info {
@@ -3676,9 +3775,7 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
-#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
-#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
@@ -3688,7 +3785,6 @@ struct ixgbe_info {
#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
-#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..ff8d640a50b1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,1032 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+#include <linux/net/intel/libie/adminq.h>
+
+#define BYTES_PER_DWORD 4
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Checksum and Shadow RAM pointers */
+#define IXGBE_E610_SR_NVM_CTRL_WORD 0x00
+#define IXGBE_E610_SR_PBA_BLOCK_PTR 0x16
+#define IXGBE_E610_SR_PBA_BLOCK_MASK GENMASK(15, 8)
+#define IXGBE_E610_SR_NVM_DEV_STARTER_VER 0x18
+#define IXGBE_E610_SR_NVM_EETRACK_LO 0x2D
+#define IXGBE_E610_SR_NVM_EETRACK_HI 0x2E
+#define IXGBE_E610_NVM_VER_LO_MASK GENMASK(7, 0)
+#define IXGBE_E610_NVM_VER_HI_MASK GENMASK(15, 12)
+#define IXGBE_E610_SR_SW_CHECKSUM_WORD 0x3F
+#define IXGBE_E610_SR_PFA_PTR 0x40
+#define IXGBE_E610_SR_1ST_NVM_BANK_PTR 0x42
+#define IXGBE_E610_SR_NVM_BANK_SIZE 0x43
+#define IXGBE_E610_SR_1ST_OROM_BANK_PTR 0x44
+#define IXGBE_E610_SR_OROM_BANK_SIZE 0x45
+#define IXGBE_E610_SR_NETLIST_BANK_PTR 0x46
+#define IXGBE_E610_SR_NETLIST_BANK_SIZE 0x47
+
+/* The OROM version topology */
+#define IXGBE_OROM_VER_PATCH_MASK GENMASK_ULL(7, 0)
+#define IXGBE_OROM_VER_BUILD_MASK GENMASK_ULL(23, 8)
+#define IXGBE_OROM_VER_MASK GENMASK_ULL(31, 24)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+#define IXGBE_HDR_LEN_ROUNDUP 32
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
+
+/* Shadow RAM related */
+#define IXGBE_SR_WORDS_IN_1KB 512
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M GENMASK_ULL(9, 0)
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5)
+
+#define IXGBE_GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define IXGBE_GL_MNG_FWSM_RECOVERY_M BIT(1)
+#define IXGBE_GL_MNG_FWSM_ROLLBACK_M BIT(2)
+
+/* Flash Access Register */
+#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define IXGBE_GLNVM_FLA_LOCKED_S 6
+#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_M GENMASK(7, 6)
+#define IXGBE_SR_CTRL_WORD_VALID BIT(0)
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
+/* Admin Command Interface (ACI) registers */
+#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define IXGBE_PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define IXGBE_PF_HICR 0x00082048
+
+#define IXGBE_PF_HICR_EN BIT(0)
+#define IXGBE_PF_HICR_C BIT(1)
+#define IXGBE_PF_HICR_SV BIT(2)
+#define IXGBE_PF_HICR_EV BIT(3)
+
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD)
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+#define IXGBE_ACI_SEND_TIMEOUT_MS \
+ (IXGBE_ACI_SEND_MAX_EXECUTE * IXGBE_ACI_SEND_DELAY_TIME_MS)
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_get_link_topo_pin = 0x06E1,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ /* TCA Events */
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+};
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_M GENMASK(3, 1)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 29
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK GENMASK(7, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK 0xdf
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK 0xef
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define IXGBE_ACI_LSE_M GENMASK(1, 0)
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK GENMASK(2, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_M GENMASK(6, 3)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M GENMASK(5, 0)
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M GENMASK(10, 0)
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3;
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+} __packed;
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M GENMASK(3, 0)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M GENMASK(7, 4)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_SI5384 0x25
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ixgbe_aci_cmd_get_link_topo_pin {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 input_io_params;
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO 0
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N 1
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N 2
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE 6
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0 9
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1 10
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO 3
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 ident_mode;
+ u8 rsvd[13];
+};
+
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M GENMASK(6, 0)
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M GENMASK(9, 0)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+#define IXGBE_ACI_NVM_OFFSET_HI_A_MASK GENMASK(15, 8)
+#define IXGBE_ACI_NVM_OFFSET_HI_U_MASK GENMASK(23, 16)
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M GENMASK(1, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+#define IXGBE_ACI_NVM_NO_PRESERVATION 0x0
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED 0x6
+
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for
+ * struct ixgbe_aci_cmd_nvm.
+ */
+#define IXGBE_ACI_NVM_START_POINT 0
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+/* Used for NVM Set Package Data command - 0x070A */
+struct ixgbe_aci_cmd_nvm_pkg_data {
+ u8 reserved[3];
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_PKG_DELETE BIT(0) /* used for command call */
+
+ u32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Used for Pass Component Table command - 0x070B */
+struct ixgbe_aci_cmd_nvm_pass_comp_tbl {
+ u8 component_response; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK 0x3
+ u8 component_response_code; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3
+#define IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE 0x4
+#define IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5
+#define IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6
+#define IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7
+#define IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB
+ u8 reserved;
+ u8 transfer_flag;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ixgbe_aci_cmd_nvm_comp_tbl {
+ __le16 comp_class;
+#define NVM_COMP_CLASS_ALL_FW 0x000A
+
+ __le16 comp_id;
+#define NVM_COMP_ID_OROM 0x5
+#define NVM_COMP_ID_NVM 0x6
+#define NVM_COMP_ID_NETLIST 0x8
+
+ u8 comp_class_idx;
+#define FWU_COMP_CLASS_IDX_NOT_USE 0x0
+
+ __le32 comp_cmp_stamp;
+ u8 cvs_type;
+#define NVM_CVS_TYPE_ASCII 0x1
+
+ u8 cvs_len;
+ u8 cvs[]; /* Component Version String */
+} __packed;
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 topo_media_conflict;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]
+ * of ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M GENMASK(3, 0)
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M GENMASK(7, 4)
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M GENMASK(11, 8)
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 ieee_1588;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define IXGBE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define IXGBE_NVM_PENDING_OROM BIT(1)
+#define IXGBE_NVM_PENDING_NETLIST BIT(2)
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M GENMASK(15, 8)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+} __packed;
+
+#define IXGBE_OROM_CIV_SIGNATURE "$CIV"
+
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+} __packed;
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ struct ixgbe_hw_caps common_cap;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct libie_aq_desc desc;
+ u8 *msg_buf;
+ u16 msg_len;
+ u16 buf_len;
+};
+
+struct ixgbe_aci_info {
+ struct mutex lock; /* admin command interface lock */
+ enum libie_aq_err last_status; /* last status of sent admin command */
+};
+
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+} __packed;
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+} __packed;
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Ptr to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ u32 flash_size; /* Available flash size in bytes */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f1ffa398f6df..e67e2feb045b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include "ixgbe.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#include "ixgbe_x540.h"
@@ -46,7 +47,7 @@ int ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
@@ -65,7 +66,9 @@ int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
- **/
+ *
+ * Return: 0 on success or negative value on failure
+ */
int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -132,10 +135,14 @@ mac_reset_top:
hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
hw->mac.ops.init_rx_addrs(hw);
+ /* The following is not supported by E610. */
+ if (hw->mac.type == ixgbe_mac_e610)
+ return status;
+
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
- /* Add the SAN MAC address to the RAR only if it's a valid address */
+ /* Add the SAN MAC address to RAR if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -366,9 +373,9 @@ static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
}
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/**
@@ -887,6 +894,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
.validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index b69a680d3ab5..6ed360c5b605 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
#include "ixgbe_type.h"
@@ -17,3 +20,5 @@ int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_X540_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 2decb0710b6e..76d2fa3ef518 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe_x540.h"
+#include "ixgbe_x550.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
@@ -18,7 +20,7 @@ static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_link_info *link = &hw->link;
- /* Start with X540 invariants, since so simular */
+ /* Start with X540 invariants, since so similar */
ixgbe_get_invariants_X540(hw);
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
@@ -46,7 +48,7 @@ static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- /* Start with X540 invariants, since so simular */
+ /* Start with X540 invariants, since so similar */
ixgbe_get_invariants_X540(hw);
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
@@ -683,7 +685,7 @@ static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
return 0;
}
-/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
+/** ixgbe_read_iosf_sb_reg_x550 - Reads a value to specified register of the
* IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
@@ -845,7 +847,7 @@ static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
* @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
+ * @offset: offset of word in the EEPROM to read
* @words: number of words
* @data: word(s) read from the EEPROM
*
@@ -1058,9 +1060,9 @@ static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
return status;
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1161,7 +1163,7 @@ static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
return status;
}
-/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+/** ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
@@ -1251,7 +1253,7 @@ static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
/**
* ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
- * @hw: pointer t hardware structure
+ * @hw: pointer to hardware structure
*
* Returns true if in FW NVM recovery mode.
*/
@@ -1265,7 +1267,7 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
/** ixgbe_disable_rx_x550 - Disable RX unit
*
- * Enables the Rx DMA unit for x550
+ * Disables the Rx DMA unit for x550
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
@@ -1722,59 +1724,9 @@ static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return -EINVAL;
}
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* change mode enforcement rules to hybrid */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x0400;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* manually control the config */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x20002240;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* move the AN base page values */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x1;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* set the AN37 over CB mode */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x20000000;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* restart AN manually */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* Toggle port SW reset by AN reset. */
status = ixgbe_restart_an_internal_phy_x550em(hw);
@@ -1802,7 +1754,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
- * SFP not present error is not excepted in the setup MAC link flow.
+ * SFP not present error is not accepted in the setup MAC link flow.
*/
if (ret_val == -ENOENT)
return 0;
@@ -1852,7 +1804,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
- * SFP not present error is not excepted in the setup MAC link flow.
+ * SFP not present error is not accepted in the setup MAC link flow.
*/
if (ret_val == -ENOENT)
return 0;
@@ -2366,13 +2318,13 @@ static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * ixgbe_get_lasi_ext_t_x550em - Determine external Base T PHY interrupt cause
* @hw: pointer to hardware structure
* @lsc: pointer to boolean flag which indicates whether external Base T
* PHY interrupt is lsc
* @is_overtemp: indicate whether an overtemp event encountered
*
- * Determime if external Base T PHY interrupt cause is high temperature
+ * Determine if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
**/
static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
@@ -2676,7 +2628,7 @@ static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
}
/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
- * @hw: point to hardware structure
+ * @hw: pointer to hardware structure
*
* Configures the link between the integrated KR PHY and the external X557 PHY
* The driver will call this function when it gets a link status change
@@ -2717,7 +2669,7 @@ static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
if (status)
return status;
- /* If link is not still up, then no setup is necessary so return */
+ /* If the link is still not up, no setup is necessary */
status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
@@ -2793,7 +2745,7 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
return -EINVAL;
- /* To turn on the LED, set mode to ON. */
+ /* To turn off the LED, set mode to OFF. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
MDIO_MMD_VEND1, &phy_data);
phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
@@ -2816,12 +2768,12 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* Sends driver version number to firmware through the manageability
* block. On success return 0
* else returns -EBUSY when encountering an error acquiring
- * semaphore, -EIO when command fails or -ENIVAL when incorrect
+ * semaphore, -EIO when command fails or -EINVAL when incorrect
* params passed.
**/
-static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub, u16 len,
- const char *driver_ver)
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
int ret_val;
@@ -2860,7 +2812,7 @@ static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
return ret_val;
}
-/** ixgbe_get_lcd_x550em - Determine lowest common denominator
+/** ixgbe_get_lcd_t_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
*
@@ -3223,7 +3175,7 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
/* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
- * PHY address. This register field was has only been used for X552.
+ * PHY address. This register field has only been used for X552.
*/
if (hw->mac.type == ixgbe_mac_x550em_a &&
hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
@@ -3364,7 +3316,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
return media_type;
}
-/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+/** ixgbe_init_ext_t_x550em - Start (uninstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
@@ -3554,14 +3506,14 @@ mac_reset_top:
return status;
}
-/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
+/** ixgbe_set_ethertype_anti_spoofing_x550 - Enable/Disable Ethertype
* anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
**/
-static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
- bool enable, int vf)
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
@@ -3576,14 +3528,14 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
-/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
+/** ixgbe_set_source_address_pruning_x550 - Enable/Disable src address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
* @pool: Rx pool to set source address pruning for
**/
-static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
- bool enable,
- unsigned int pool)
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool)
{
u64 pfflp;
@@ -3783,7 +3735,7 @@ static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
- * Release the SWFW semaphore and puts the shared PHY token as needed
+ * Release the SWFW semaphore and puts back the shared PHY token as needed
*/
static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
@@ -3804,7 +3756,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
- * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * Token. The PHY Token is needed since the MDIO is shared between two MAC
* instances.
*/
static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
@@ -3848,6 +3800,122 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
return status;
}
+static void ixgbe_set_mdd_x550(struct ixgbe_hw *hw, bool ena)
+{
+ u32 reg_dma, reg_rdr;
+
+ reg_dma = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg_rdr = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+ if (ena) {
+ reg_dma |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ reg_rdr |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ } else {
+ reg_dma &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ reg_rdr &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_dma);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_rdr);
+}
+
+/**
+ * ixgbe_enable_mdd_x550 - enable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw)
+{
+ ixgbe_set_mdd_x550(hw, true);
+}
+
+/**
+ * ixgbe_disable_mdd_x550 - disable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw)
+{
+ ixgbe_set_mdd_x550(hw, false);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_x550 - restore VF that was disabled during MDD event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ */
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 idx, reg, val, num_qs, start_q, bitmask;
+
+ /* Map VF to queues */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ num_qs = IXGBE_16VFS_QUEUES;
+ bitmask = IXGBE_16VFS_BITMASK;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ num_qs = IXGBE_32VFS_QUEUES;
+ bitmask = IXGBE_32VFS_BITMASK;
+ break;
+ default:
+ num_qs = IXGBE_64VFS_QUEUES;
+ bitmask = IXGBE_64VFS_BITMASK;
+ break;
+ }
+ start_q = vf * num_qs;
+
+ /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+ idx = start_q / IXGBE_QUEUES_PER_REG;
+ val = bitmask << (start_q % IXGBE_QUEUES_PER_REG);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), val);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), val);
+}
+
+/**
+ * ixgbe_handle_mdd_x550 - handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: output vf bitmap of malicious vfs
+ */
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap)
+{
+ u32 i, j, reg, q, div, vf;
+ unsigned long wqbr;
+
+ /* figure out pool size for mapping to vf's */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ div = IXGBE_16VFS_QUEUES;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ div = IXGBE_32VFS_QUEUES;
+ break;
+ default:
+ div = IXGBE_64VFS_QUEUES;
+ break;
+ }
+
+ /* Read WQBR_TX and WQBR_RX and check for malicious queues */
+ for (i = 0; i < IXGBE_QUEUES_REG_AMOUNT; i++) {
+ wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)) |
+ IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+ if (!wqbr)
+ continue;
+
+ /* Get malicious queue */
+ for_each_set_bit(j, (unsigned long *)&wqbr,
+ IXGBE_QUEUES_PER_REG) {
+ /* Get queue from bitmask */
+ q = j + (i * IXGBE_QUEUES_PER_REG);
+ /* Map queue to vf */
+ vf = q / div;
+ set_bit(vf, vf_bitmap);
+ }
+ }
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -3880,9 +3948,9 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
.set_source_address_pruning = \
- &ixgbe_set_source_address_pruning_X550, \
+ &ixgbe_set_source_address_pruning_x550, \
.set_ethertype_anti_spoofing = \
- &ixgbe_set_ethertype_anti_spoofing_X550, \
+ &ixgbe_set_ethertype_anti_spoofing_x550, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
@@ -3911,6 +3979,10 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
.prot_autoc_write = prot_autoc_write_generic,
.setup_fc = ixgbe_setup_fc_generic,
.fc_autoneg = ixgbe_fc_autoneg,
+ .enable_mdd = ixgbe_enable_mdd_x550,
+ .disable_mdd = ixgbe_disable_mdd_x550,
+ .restore_mdd_vf = ixgbe_restore_mdd_vf_x550,
+ .handle_mdd = ixgbe_handle_mdd_x550,
};
static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
@@ -4007,6 +4079,7 @@ static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
.validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
+ .read_pba_string = &ixgbe_read_pba_string_generic, \
static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
@@ -4096,7 +4169,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_a)
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
new file mode 100644
index 000000000000..2a11147fb1bc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+extern const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT];
+
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver);
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf);
+
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
+
+#endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index d34d715c59eb..7b941505a9d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -220,8 +220,7 @@ static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -304,7 +303,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
@@ -399,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
u32 cmd_type;
- while (budget-- > 0) {
+ while (likely(budget)) {
if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
@@ -434,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
xdp_ring->next_to_use = 0;
+
+ budget--;
}
if (tx_desc) {
@@ -509,7 +510,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
if (test_bit(__IXGBE_DOWN, &adapter->state))
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 186a4bb24fde..01d3e892f3fa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -6,9 +6,5 @@
obj-$(CONFIG_IXGBEVF) += ixgbevf.o
-ixgbevf-objs := vf.o \
- mbx.o \
- ethtool.o \
- ixgbevf_main.o
+ixgbevf-y := vf.o mbx.o ethtool.o ixgbevf_main.o
ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o
-
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5f08779c0e4e..e177d1d58696 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
@@ -16,6 +16,9 @@
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -25,6 +28,7 @@
/* Link speed */
typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_100_FULL 0x0008
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 7ac53171b041..537a60d5276f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -276,9 +276,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
}
if (new_tx_count != adapter->tx_ring_count) {
- tx_ring = vmalloc(array_size(sizeof(*tx_ring),
- adapter->num_tx_queues +
- adapter->num_xdp_queues));
+ tx_ring = vmalloc_array(adapter->num_tx_queues +
+ adapter->num_xdp_queues,
+ sizeof(*tx_ring));
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
@@ -867,19 +867,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
return 0;
}
-static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 ixgbevf_get_rx_ring_count(struct net_device *dev)
{
struct ixgbevf_adapter *adapter = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = adapter->num_rx_queues;
- return 0;
- default:
- hw_dbg(&adapter->hw, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return adapter->num_rx_queues;
}
static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
@@ -987,7 +979,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
.get_coalesce = ixgbevf_get_coalesce,
.set_coalesce = ixgbevf_set_coalesce,
- .get_rxnfc = ixgbevf_get_rxnfc,
+ .get_rx_ring_count = ixgbevf_get_rx_ring_count,
.get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 66cf17f19408..fce35924ff8b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -201,6 +201,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
/**
* ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device to program
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -208,10 +209,10 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbevf_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -256,13 +257,14 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_add_sa - program device with a security association
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbevf_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
@@ -271,6 +273,9 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return -EOPNOTSUPP;
+
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
return -EINVAL;
@@ -310,7 +315,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, rsa.key,
+ &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -363,7 +369,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, tsa.key,
+ &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -388,11 +395,12 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbevf_ipsec_del_sa(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
@@ -400,6 +408,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return;
+
if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
@@ -428,30 +439,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
.xdo_dev_state_add = ixgbevf_ipsec_add_sa,
.xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
};
/**
@@ -628,8 +618,11 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
size_t size;
switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_17:
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return;
+ break;
case ixgbe_mbox_api_14:
- case ixgbe_mbox_api_15:
break;
default:
return;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 130cb868774c..516a6fdd23d0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -241,23 +241,7 @@ struct ixgbevf_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
- struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
-#define IXGBEVF_QV_STATE_IDLE 0
-#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
-#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
-#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
-#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
-#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
-#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \
- IXGBEVF_QV_STATE_POLL_YIELD)
-#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \
- IXGBEVF_QV_STATE_POLL_YIELD)
- spinlock_t lock;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct ixgbevf_ring ring[] ____cacheline_internodealigned_in_smp;
};
/* microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -346,7 +330,6 @@ struct ixgbevf_adapter {
int num_rx_queues;
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
int num_msix_vectors;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
@@ -363,8 +346,13 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- /* Interrupt Throttle Rate */
- u32 eitr_param;
+
+ u32 pf_features;
+#define IXGBEVF_PF_SUP_IPSEC BIT(0)
+#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
+
+#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \
+ IXGBEVF_PF_SUP_ESX_MBX)
struct ixgbevf_hw_stats stats;
@@ -418,6 +406,8 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
board_x550em_a_vf,
+ board_e610_vf,
+ board_e610_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -434,12 +424,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
+extern const struct ixgbevf_info ixgbevf_e610_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
-extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9c960017a6de..d5ce20f47def 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2018 Intel Corporation.";
+ "Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
+ [board_e610_vf] = &ixgbevf_e610_vf_info,
+ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -71,12 +73,14 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
+ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
+ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
@@ -733,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* verify that the packet does not have any known errors */
if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
@@ -1045,9 +1045,9 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_TX;
}
-static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int result = IXGBEVF_XDP_PASS;
struct ixgbevf_ring *xdp_ring;
@@ -1081,7 +1081,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
@@ -1123,6 +1123,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -1166,11 +1167,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
+ if (xdp_res) {
+ if (xdp_res == IXGBEVF_XDP_TX) {
xdp_xmit = true;
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size);
@@ -1190,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -1204,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -2270,10 +2271,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
}
+/**
+ * ixgbevf_set_features - Set features supported by PF
+ * @adapter: pointer to the adapter struct
+ *
+ * Negotiate with PF supported features and then set pf_features accordingly.
+ */
+static void ixgbevf_set_features(struct ixgbevf_adapter *adapter)
+{
+ u32 *pf_features = &adapter->pf_features;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ err = hw->mac.ops.negotiate_features(hw, pf_features);
+ if (err && err != -EOPNOTSUPP)
+ netdev_dbg(adapter->netdev,
+ "PF feature negotiation failed.\n");
+
+ /* Address also pre API 1.7 cases */
+ if (hw->api_version == ixgbe_mbox_api_14)
+ *pf_features |= IXGBEVF_PF_SUP_IPSEC;
+ else if (hw->api_version == ixgbe_mbox_api_15)
+ *pf_features |= IXGBEVF_PF_SUP_ESX_MBX;
+}
+
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
+ ixgbe_mbox_api_17,
+ ixgbe_mbox_api_16,
ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
@@ -2293,7 +2320,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
- if (hw->api_version >= ixgbe_mbox_api_15) {
+ ixgbevf_set_features(adapter);
+
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) {
hw->mbx.ops.init_params(hw);
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
@@ -2513,7 +2542,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
ixgbevf_napi_disable_all(adapter);
- del_timer_sync(&adapter->service_timer);
+ timer_delete_sync(&adapter->service_timer);
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -2650,6 +2679,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -3175,8 +3206,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
- struct ixgbevf_adapter *adapter = from_timer(adapter, t,
- service_timer);
+ struct ixgbevf_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
/* Reset the timer */
mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
@@ -4292,7 +4323,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
@@ -4300,7 +4331,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
+static int ixgbevf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -4317,12 +4348,12 @@ static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused ixgbevf_resume(struct device *dev_d)
+static int ixgbevf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- u32 err;
+ int err;
adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
@@ -4644,6 +4675,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4694,6 +4727,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
+ case ixgbe_mac_e610_vf:
+ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
+ break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
@@ -4854,7 +4890,7 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
.resume = ixgbevf_io_resume,
};
-static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
@@ -4863,7 +4899,7 @@ static struct pci_driver ixgbevf_driver = {
.remove = ixgbevf_remove,
/* Power Management Hooks */
- .driver.pm = &ixgbevf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&ixgbevf_pm_ops),
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index a55dd978f7ca..24d0237e7a99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -505,15 +505,3 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
};
-
-/* Mailbox operations when running on Hyper-V.
- * On Hyper-V, PF/VF communication is not through the
- * hardware mailbox; this communication is through
- * a software mediated path.
- * Most mail box operations are noop while running on
- * Hyper-V.
- */
-const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
- .init_params = ixgbevf_init_mbx_params_vf,
- .check_for_rst = ixgbevf_check_for_rst_vf,
-};
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 835bbcc5cc8e..a8ed23ee66aa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
+ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
+ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+/* mailbox API, version 1.6 VF requests */
+#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
+
+/* mailbox API, version 1.7 VF requests */
+#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 1641d00d8ed3..74d320879513 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
@@ -255,7 +255,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
memset(msgbuf, 0, sizeof(msgbuf));
/* If index is one then this is the start of a new list and needs
- * indication to the PF so it can do it's own list management.
+ * indication to the PF so it can do its own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
@@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -625,6 +631,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
}
/**
+ * ixgbevf_get_pf_link_state - Get PF's link status
+ * @hw: pointer to the HW structure
+ * @speed: link speed
+ * @link_up: indicate if link is up/down
+ *
+ * Ask PF to provide link_up state and speed of the link.
+ *
+ * Return: IXGBE_ERR_MBX in the case of mailbox error,
+ * -EOPNOTSUPP if the op is not supported or 0 on success.
+ */
+static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 msgbuf[3] = {};
+ int err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ err = IXGBE_ERR_MBX;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* No need to set @link_up to false as it will be done by
+ * ixgbe_check_mac_link_vf().
+ */
+ } else {
+ *speed = msgbuf[1];
+ *link_up = msgbuf[2];
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
+ * @hw: pointer to the HW structure
+ * @pf_features: bitmask of features supported by PF
+ *
+ * Return: IXGBE_ERR_MBX in the case of mailbox error,
+ * -EOPNOTSUPP if the op is not supported or 0 on success.
+ */
+static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
+{
+ u32 msgbuf[2] = {};
+ int err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
+ msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ err = IXGBE_ERR_MBX;
+ *pf_features = 0x0;
+ } else {
+ *pf_features = msgbuf[1];
+ }
+
+ return err;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -659,6 +744,58 @@ mbx_err:
}
/**
+ * ixgbe_read_vflinks - Read VFLINKS register
+ * @hw: pointer to the HW structure
+ * @speed: link speed
+ * @link_up: indicate if link is up/down
+ *
+ * Get linkup status and link speed from the VFLINKS register.
+ */
+static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ /* if link status is down no point in checking to see if PF is up */
+ if (!(vflinks & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ return;
+ }
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf) {
+ for (int i = 0; i < 5; i++) {
+ udelay(100);
+ vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(vflinks & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ return;
+ }
+ }
+ }
+
+ /* We reached this point so there's link */
+ *link_up = true;
+
+ switch (vflinks & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+}
+
+/**
* ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
* @hw: unused
* @vlan: unused
@@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
bool *link_up,
bool autoneg_wait_to_complete)
{
+ struct ixgbevf_adapter *adapter = hw->back;
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
s32 ret_val = 0;
- u32 links_reg;
u32 in_msg = 0;
/* If we were hit with a reset drop the link */
@@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
if (!mac->get_link_status)
goto out;
- /* if link status is down no point in checking to see if pf is up */
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
- if (!(links_reg & IXGBE_LINKS_UP))
- goto out;
-
- /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
- * before the link status is correct
- */
- if (mac->type == ixgbe_mac_82599_vf) {
- int i;
-
- for (i = 0; i < 5; i++) {
- udelay(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
-
- if (!(links_reg & IXGBE_LINKS_UP))
- goto out;
- }
- }
-
- switch (links_reg & IXGBE_LINKS_SPEED_82599) {
- case IXGBE_LINKS_SPEED_10G_82599:
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- break;
- case IXGBE_LINKS_SPEED_1G_82599:
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- break;
- case IXGBE_LINKS_SPEED_100_82599:
- *speed = IXGBE_LINK_SPEED_100_FULL;
- break;
+ if (hw->mac.type == ixgbe_mac_e610_vf) {
+ ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
+ if (ret_val)
+ goto out;
+ } else {
+ ixgbe_read_vflinks(hw, speed, link_up);
+ if (*link_up == false)
+ goto out;
}
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
if (mbx->ops.read(hw, &in_msg, 1)) {
- if (hw->api_version >= ixgbe_mbox_api_15)
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
mac->get_link_status = false;
goto out;
}
@@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return 0;
@@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
.negotiate_api_version = ixgbevf_negotiate_api_version_vf,
+ .negotiate_features = ixgbevf_negotiate_features_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
@@ -1076,3 +1194,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
.mac = ixgbe_mac_x550em_a_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_e610_vf_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index b4eef5b6c172..4f19b8900c29 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
@@ -26,6 +26,7 @@ struct ixgbe_mac_operations {
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
+ int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -54,6 +55,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_x550em_a_vf,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig
new file mode 100644
index 000000000000..2445b979c499
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024-2025 Intel Corporation
+
+config LIBETH
+ tristate "Common Ethernet library (libeth)" if COMPILE_TEST
+ select PAGE_POOL
+ help
+ libeth is a common library containing routines shared between several
+ drivers, but not yet promoted to the generic kernel API.
+
+config LIBETH_XDP
+ tristate "Common XDP library (libeth_xdp)" if COMPILE_TEST
+ select LIBETH
+ help
+ XDP and XSk helpers based on libeth hotpath management.
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
new file mode 100644
index 000000000000..350bc0b38bad
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024-2025 Intel Corporation
+
+obj-$(CONFIG_LIBETH) += libeth.o
+
+libeth-y := rx.o
+libeth-y += tx.o
+
+obj-$(CONFIG_LIBETH_XDP) += libeth_xdp.o
+
+libeth_xdp-y += xdp.o
+libeth_xdp-y += xsk.o
diff --git a/drivers/net/ethernet/intel/libeth/priv.h b/drivers/net/ethernet/intel/libeth/priv.h
new file mode 100644
index 000000000000..9b811d31015c
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/priv.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_PRIV_H
+#define __LIBETH_PRIV_H
+
+#include <linux/types.h>
+
+/* XDP */
+
+enum xdp_action;
+struct libeth_xdp_buff;
+struct libeth_xdp_tx_frame;
+struct skb_shared_info;
+struct xdp_frame_bulk;
+
+extern const struct xsk_tx_metadata_ops libeth_xsktmo_slow;
+
+void libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count);
+u32 libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp, enum xdp_action act,
+ int ret);
+
+struct libeth_xdp_ops {
+ void (*bulk)(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags);
+ void (*xsk)(struct libeth_xdp_buff *xdp);
+};
+
+void libeth_attach_xdp(const struct libeth_xdp_ops *ops);
+
+static inline void libeth_detach_xdp(void)
+{
+ libeth_attach_xdp(NULL);
+}
+
+#endif /* __LIBETH_PRIV_H */
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
new file mode 100644
index 000000000000..62521a1f4ec9
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024-2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH"
+
+#include <linux/export.h>
+
+#include <net/libeth/rx.h>
+
+/* Rx buffer management */
+
+/**
+ * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW accounting:
+ * MTU the @dev has, HW required alignment, minimum and maximum allowed values,
+ * and system's page size.
+ */
+static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len)
+{
+ u32 len;
+
+ len = READ_ONCE(pp->netdev->mtu) + LIBETH_RX_LL_LEN;
+ len = ALIGN(len, LIBETH_RX_BUF_STRIDE);
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ * @truesize: desired truesize for the buffers
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW ignoring the
+ * MTU and closest to the passed truesize. Can be used for "short" buffer
+ * queues to fragment pages more efficiently.
+ */
+static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
+ u32 max_len, u32 truesize)
+{
+ u32 min, len;
+
+ min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE);
+ truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min),
+ PAGE_SIZE << LIBETH_RX_PAGE_ORDER);
+
+ len = SKB_WITH_OVERHEAD(truesize - pp->offset);
+ len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE;
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_page_pool_params - calculate params with the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to will all needed stack overhead (headroom, tailroom) and
+ * both the HW buffer length and the truesize for all types of buffers. For
+ * "short" buffers, truesize never exceeds the "wanted" one; for the rest,
+ * it can be up to the page size.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ pp->offset = fq->xdp ? LIBETH_XDP_HEADROOM : LIBETH_SKB_HEADROOM;
+ /* HW-writeable / syncable length per one page */
+ pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
+
+ /* HW-writeable length per buffer */
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len);
+ break;
+ case LIBETH_FQE_SHORT:
+ fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len,
+ fq->truesize);
+ break;
+ case LIBETH_FQE_HDR:
+ fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE);
+ break;
+ default:
+ return false;
+ }
+
+ /* Buffer size to allocate */
+ fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset +
+ fq->buf_len));
+
+ return true;
+}
+
+/**
+ * libeth_rx_page_pool_params_zc - calculate params without the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to exclude the stack overhead and both the buffer length
+ * and the truesize, which are equal for the data buffers. Note that this
+ * requires separate header buffers to be always active and account the
+ * overhead.
+ * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy
+ * mode.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ u32 mtu, max;
+
+ pp->offset = 0;
+ pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER;
+
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ mtu = READ_ONCE(pp->netdev->mtu);
+ break;
+ case LIBETH_FQE_SHORT:
+ mtu = fq->truesize;
+ break;
+ default:
+ return false;
+ }
+
+ mtu = roundup_pow_of_two(mtu);
+ max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX),
+ pp->max_len);
+
+ fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
+ fq->truesize = fq->buf_len;
+
+ return true;
+}
+
+/**
+ * libeth_rx_fq_create - create a PP with the default libeth settings
+ * @fq: buffer queue struct to fill
+ * @napi: &napi_struct covering this PP (no usage outside its poll loops)
+ *
+ * Return: %0 on success, -%errno on failure.
+ */
+int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
+{
+ struct page_pool_params pp = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = LIBETH_RX_PAGE_ORDER,
+ .pool_size = fq->count,
+ .nid = fq->nid,
+ .dev = napi->dev->dev.parent,
+ .netdev = napi->dev,
+ .napi = napi,
+ };
+ struct libeth_fqe *fqes;
+ struct page_pool *pool;
+ int ret;
+
+ pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+
+ if (!fq->hsplit)
+ ret = libeth_rx_page_pool_params(fq, &pp);
+ else
+ ret = libeth_rx_page_pool_params_zc(fq, &pp);
+ if (!ret)
+ return -EINVAL;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
+ if (!fqes) {
+ ret = -ENOMEM;
+ goto err_buf;
+ }
+
+ ret = xdp_reg_page_pool(pool);
+ if (ret)
+ goto err_mem;
+
+ fq->fqes = fqes;
+ fq->pp = pool;
+
+ return 0;
+
+err_mem:
+ kvfree(fqes);
+err_buf:
+ page_pool_destroy(pool);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(libeth_rx_fq_create);
+
+/**
+ * libeth_rx_fq_destroy - destroy a &page_pool created by libeth
+ * @fq: buffer queue to process
+ */
+void libeth_rx_fq_destroy(struct libeth_fq *fq)
+{
+ xdp_unreg_page_pool(fq->pp);
+ kvfree(fq->fqes);
+ page_pool_destroy(fq->pp);
+}
+EXPORT_SYMBOL_GPL(libeth_rx_fq_destroy);
+
+/**
+ * libeth_rx_recycle_slow - recycle libeth netmem
+ * @netmem: network memory to recycle
+ *
+ * To be used on exceptions or rare cases not requiring fast inline recycling.
+ */
+void __cold libeth_rx_recycle_slow(netmem_ref netmem)
+{
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
+}
+EXPORT_SYMBOL_GPL(libeth_rx_recycle_slow);
+
+/* Converting abstract packet type numbers into a software structure with
+ * the packet parameters to do O(1) lookup on Rx.
+ */
+
+static const u16 libeth_rx_pt_xdp_oip[] = {
+ [LIBETH_RX_PT_OUTER_L2] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_OUTER_IPV4] = XDP_RSS_L3_IPV4,
+ [LIBETH_RX_PT_OUTER_IPV6] = XDP_RSS_L3_IPV6,
+};
+
+static const u16 libeth_rx_pt_xdp_iprot[] = {
+ [LIBETH_RX_PT_INNER_NONE] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_INNER_UDP] = XDP_RSS_L4_UDP,
+ [LIBETH_RX_PT_INNER_TCP] = XDP_RSS_L4_TCP,
+ [LIBETH_RX_PT_INNER_SCTP] = XDP_RSS_L4_SCTP,
+ [LIBETH_RX_PT_INNER_ICMP] = XDP_RSS_L4_ICMP,
+ [LIBETH_RX_PT_INNER_TIMESYNC] = XDP_RSS_TYPE_NONE,
+};
+
+static const u16 libeth_rx_pt_xdp_pl[] = {
+ [LIBETH_RX_PT_PAYLOAD_NONE] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L2] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L3] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L4] = XDP_RSS_L4,
+};
+
+/**
+ * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT
+ * @pt: PT structure to evaluate
+ *
+ * Generates ```hash_type``` field with XDP RSS type values from the parsed
+ * packet parameters if they're obtained dynamically at runtime.
+ */
+void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
+{
+ pt->hash_type = 0;
+ pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip];
+ pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
+ pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
+}
+EXPORT_SYMBOL_GPL(libeth_rx_pt_gen_hash_type);
+
+/* Module */
+
+MODULE_DESCRIPTION("Common Ethernet library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libeth/tx.c b/drivers/net/ethernet/intel/libeth/tx.c
new file mode 100644
index 000000000000..e0167f43d2a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/tx.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH"
+
+#include <net/libeth/xdp.h>
+
+#include "priv.h"
+
+/* Tx buffer completion */
+
+DEFINE_STATIC_CALL_NULL(bulk, libeth_xdp_return_buff_bulk);
+DEFINE_STATIC_CALL_NULL(xsk, libeth_xsk_buff_free_slow);
+
+/**
+ * libeth_tx_complete_any - perform Tx completion for one SQE of any type
+ * @sqe: Tx buffer to complete
+ * @cp: polling params
+ *
+ * Can be used to complete both regular and XDP SQEs, for example when
+ * destroying queues.
+ * When libeth_xdp is not loaded, XDPSQEs won't be handled.
+ */
+void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp)
+{
+ if (sqe->type >= __LIBETH_SQE_XDP_START)
+ __libeth_xdp_complete_tx(sqe, cp, static_call(bulk),
+ static_call(xsk));
+ else
+ libeth_tx_complete(sqe, cp);
+}
+EXPORT_SYMBOL_GPL(libeth_tx_complete_any);
+
+/* Module */
+
+void libeth_attach_xdp(const struct libeth_xdp_ops *ops)
+{
+ static_call_update(bulk, ops ? ops->bulk : NULL);
+ static_call_update(xsk, ops ? ops->xsk : NULL);
+}
+EXPORT_SYMBOL_GPL(libeth_attach_xdp);
diff --git a/drivers/net/ethernet/intel/libeth/xdp.c b/drivers/net/ethernet/intel/libeth/xdp.c
new file mode 100644
index 000000000000..d4ac027d9584
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/xdp.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
+
+#include <linux/export.h>
+
+#include <net/libeth/xdp.h>
+
+#include "priv.h"
+
+/* XDPSQ sharing */
+
+DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
+EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
+
+void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ bool warn;
+
+ spin_lock_init(&lock->lock);
+ lock->share = true;
+
+ warn = !static_key_enabled(&libeth_xdpsq_share);
+ static_branch_inc(&libeth_xdpsq_share);
+
+ if (warn && net_ratelimit())
+ netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
+
+void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ static_branch_dec(&libeth_xdpsq_share);
+
+ if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
+ netdev_notice(dev, "XDPSQ sharing disabled\n");
+
+ lock->share = false;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
+
+void __acquires(&lock->lock)
+__libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
+{
+ spin_lock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
+
+void __releases(&lock->lock)
+__libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
+{
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
+
+/* XDPSQ clean-up timers */
+
+/**
+ * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
+ * @timer: timer to initialize
+ * @xdpsq: queue this timer belongs to
+ * @lock: corresponding XDPSQ lock
+ * @poll: queue polling/completion function
+ *
+ * XDPSQ clean-up timers must be set up before using at the queue configuration
+ * time. Set the required pointers and the cleaning callback.
+ */
+void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
+ struct libeth_xdpsq_lock *lock,
+ void (*poll)(struct work_struct *work))
+{
+ timer->xdpsq = xdpsq;
+ timer->lock = lock;
+
+ INIT_DELAYED_WORK(&timer->dwork, poll);
+}
+EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer);
+
+/* ``XDP_TX`` bulking */
+
+static void __cold
+libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm)
+{
+ if (frm->len_fl & LIBETH_XDP_TX_MULTI)
+ libeth_xdp_return_frags(frm->data + frm->soff, true);
+
+ libeth_xdp_return_va(frm->data, true);
+}
+
+static void __cold
+libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count)
+{
+ for (u32 i = 0; i < count; i++) {
+ const struct libeth_xdp_tx_frame *frm = &bq[i];
+
+ if (!(frm->len_fl & LIBETH_XDP_TX_FIRST))
+ continue;
+
+ libeth_xdp_tx_return_one(frm);
+ }
+}
+
+static void __cold libeth_trace_xdp_exception(const struct net_device *dev,
+ const struct bpf_prog *prog,
+ u32 act)
+{
+ trace_xdp_exception(dev, prog, act);
+}
+
+/**
+ * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
+ * @bq: XDP Tx frame bulk
+ * @sent: number of frames sent successfully (from this bulk)
+ * @flags: internal libeth_xdp flags (XSk, .ndo_xdp_xmit etc.)
+ *
+ * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly.
+ * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust
+ * the Tx bulk to try again later.
+ */
+void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
+ u32 flags)
+{
+ const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
+ u32 left = bq->count - sent;
+
+ if (!(flags & LIBETH_XDP_TX_NDO))
+ libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
+
+ if (!(flags & LIBETH_XDP_TX_DROP)) {
+ memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
+ bq->count = left;
+
+ return;
+ }
+
+ if (flags & LIBETH_XDP_TX_XSK)
+ libeth_xsk_tx_return_bulk(pos, left);
+ else if (!(flags & LIBETH_XDP_TX_NDO))
+ libeth_xdp_tx_return_bulk(pos, left);
+ else
+ libeth_xdp_xmit_return_bulk(pos, left, bq->dev);
+
+ bq->count = 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception);
+
+/* .ndo_xdp_xmit() implementation */
+
+u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count, const struct net_device *dev)
+{
+ u32 n = 0;
+
+ for (u32 i = 0; i < count; i++) {
+ const struct libeth_xdp_tx_frame *frm = &bq[i];
+ dma_addr_t dma;
+
+ if (frm->flags & LIBETH_XDP_TX_FIRST)
+ dma = *libeth_xdp_xmit_frame_dma(frm->xdpf);
+ else
+ dma = dma_unmap_addr(frm, dma);
+
+ dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len),
+ DMA_TO_DEVICE);
+
+ /* Actual xdp_frames are freed by the core */
+ n += !!(frm->flags & LIBETH_XDP_TX_FIRST);
+ }
+
+ return n;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk);
+
+/* Rx polling path */
+
+/**
+ * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash
+ * @dst: target &libeth_xdp_buff to initialize
+ * @src: source stash
+ *
+ * External helper used by libeth_xdp_init_buff(), do not call directly.
+ * Recreate an onstack &libeth_xdp_buff using the stash saved earlier.
+ * The only field untouched (rxq) is initialized later in the
+ * abovementioned function.
+ */
+void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src)
+{
+ dst->data = src->data;
+ dst->base.data_end = src->data + src->len;
+ dst->base.data_meta = src->data;
+ dst->base.data_hard_start = src->data - src->headroom;
+
+ dst->base.frame_sz = src->frame_sz;
+ dst->base.flags = src->flags;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_load_stash);
+
+/**
+ * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash
+ * @dst: target &libeth_xdp_buff_stash to initialize
+ * @src: source XDP buffer
+ *
+ * External helper used by libeth_xdp_save_buff(), do not call directly.
+ * Use the fields from the passed XDP buffer to initialize the stash on the
+ * queue, so that a partially received frame can be finished later during
+ * the next NAPI poll.
+ */
+void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src)
+{
+ dst->data = src->data;
+ dst->headroom = src->data - src->base.data_hard_start;
+ dst->len = src->base.data_end - src->data;
+
+ dst->frame_sz = src->base.frame_sz;
+ dst->flags = src->base.flags;
+
+ WARN_ON_ONCE(dst->flags != src->base.flags);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_save_stash);
+
+void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
+{
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+
+ libeth_xdp_load_stash(xdp, stash);
+ libeth_xdp_return_buff_slow(xdp);
+
+ stash->data = NULL;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash);
+
+/**
+ * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
+ * @xdp: buffer to free/return
+ *
+ * Slowpath version of libeth_xdp_return_buff() to be called on exceptions,
+ * queue clean-ups etc., without unwanted inlining.
+ */
+void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
+{
+ __libeth_xdp_return_buff(xdp, false);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
+
+/**
+ * libeth_xdp_buff_add_frag - add frag to XDP buffer
+ * @xdp: head XDP buffer
+ * @fqe: Rx buffer containing the frag
+ * @len: frag length reported by HW
+ *
+ * External helper used by libeth_xdp_process_buff(), do not call directly.
+ * Frees both head and frag buffers on error.
+ *
+ * Return: true success, false on error (no space for a new frag).
+ */
+bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ netmem_ref netmem = fqe->netmem;
+
+ if (!xdp_buff_add_frag(&xdp->base, netmem,
+ fqe->offset + netmem_get_pp(netmem)->p.offset,
+ len, fqe->truesize))
+ goto recycle;
+
+ return true;
+
+recycle:
+ libeth_rx_recycle_slow(netmem);
+ libeth_xdp_return_buff_slow(xdp);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag);
+
+/**
+ * libeth_xdp_prog_exception - handle XDP prog exceptions
+ * @bq: XDP Tx bulk
+ * @xdp: buffer to process
+ * @act: original XDP prog verdict
+ * @ret: error code if redirect failed
+ *
+ * External helper used by __libeth_xdp_run_prog() and
+ * __libeth_xsk_run_prog_slow(), do not call directly.
+ * Reports invalid @act, XDP exception trace event and frees the buffer.
+ *
+ * Return: libeth_xdp XDP prog verdict.
+ */
+u32 __cold libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret)
+{
+ if (act > XDP_REDIRECT)
+ bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act);
+
+ libeth_trace_xdp_exception(bq->dev, bq->prog, act);
+
+ if (xdp->base.rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ return libeth_xsk_prog_exception(xdp, act, ret);
+
+ libeth_xdp_return_buff_slow(xdp);
+
+ return LIBETH_XDP_DROP;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_prog_exception);
+
+/* Tx buffer completion */
+
+static void libeth_xdp_put_netmem_bulk(netmem_ref netmem,
+ struct xdp_frame_bulk *bq)
+{
+ if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE))
+ xdp_flush_frame_bulk(bq);
+
+ bq->q[bq->count++] = netmem;
+}
+
+/**
+ * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
+ * @sinfo: shared info corresponding to the buffer
+ * @bq: XDP frame bulk to store the buffer
+ * @frags: whether the buffer has frags
+ *
+ * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
+ * completion of ``XDP_TX`` buffers and allows to free them in same bulks
+ * with &xdp_frame buffers.
+ */
+void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags)
+{
+ if (!frags)
+ goto head;
+
+ for (u32 i = 0; i < sinfo->nr_frags; i++)
+ libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
+ bq);
+
+head:
+ libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk);
+
+/* Misc */
+
+/**
+ * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold
+ * @count: number of descriptors in the queue
+ *
+ * The threshold is the limit at which RQs start to refill (when the number of
+ * empty buffers exceeds it) and SQs get cleaned up (when the number of free
+ * descriptors goes below it). To speed up hotpath processing, threshold is
+ * always pow-2, closest to 1/4 of the queue length.
+ * Don't call it on hotpath, calculate and cache the threshold during the
+ * queue initialization.
+ *
+ * Return: the calculated threshold.
+ */
+u32 libeth_xdp_queue_threshold(u32 count)
+{
+ u32 quarter, low, high;
+
+ if (likely(is_power_of_2(count)))
+ return count >> 2;
+
+ quarter = DIV_ROUND_CLOSEST(count, 4);
+ low = rounddown_pow_of_two(quarter);
+ high = roundup_pow_of_two(quarter);
+
+ return high - quarter <= quarter - low ? high : low;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold);
+
+/**
+ * __libeth_xdp_set_features - set XDP features for netdev
+ * @dev: &net_device to configure
+ * @xmo: XDP metadata ops (Rx hints)
+ * @zc_segs: maximum number of S/G frags the HW can transmit
+ * @tmo: XSk Tx metadata ops (Tx hints)
+ *
+ * Set all the features libeth_xdp supports. Only the first argument is
+ * necessary; without the third one (zero), XSk support won't be advertised.
+ * Use the non-underscored versions in drivers instead.
+ */
+void __libeth_xdp_set_features(struct net_device *dev,
+ const struct xdp_metadata_ops *xmo,
+ u32 zc_segs,
+ const struct xsk_tx_metadata_ops *tmo)
+{
+ xdp_set_features_flag(dev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ (zc_segs ? NETDEV_XDP_ACT_XSK_ZEROCOPY : 0) |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG);
+ dev->xdp_metadata_ops = xmo;
+
+ tmo = tmo == libeth_xsktmo ? &libeth_xsktmo_slow : tmo;
+
+ dev->xdp_zc_max_segs = zc_segs ? : 1;
+ dev->xsk_tx_metadata_ops = zc_segs ? tmo : NULL;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdp_set_features);
+
+/**
+ * libeth_xdp_set_redirect - toggle the XDP redirect feature
+ * @dev: &net_device to configure
+ * @enable: whether XDP is enabled
+ *
+ * Use this when XDPSQs are not always available to dynamically enable
+ * and disable redirect feature.
+ */
+void libeth_xdp_set_redirect(struct net_device *dev, bool enable)
+{
+ if (enable)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect);
+
+/* Module */
+
+static const struct libeth_xdp_ops xdp_ops __initconst = {
+ .bulk = libeth_xdp_return_buff_bulk,
+ .xsk = libeth_xsk_buff_free_slow,
+};
+
+static int __init libeth_xdp_module_init(void)
+{
+ libeth_attach_xdp(&xdp_ops);
+
+ return 0;
+}
+module_init(libeth_xdp_module_init);
+
+static void __exit libeth_xdp_module_exit(void)
+{
+ libeth_detach_xdp();
+}
+module_exit(libeth_xdp_module_exit);
+
+MODULE_DESCRIPTION("Common Ethernet library - XDP infra");
+MODULE_IMPORT_NS("LIBETH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libeth/xsk.c b/drivers/net/ethernet/intel/libeth/xsk.c
new file mode 100644
index 000000000000..846e902e31b6
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/xsk.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
+
+#include <linux/export.h>
+
+#include <net/libeth/xsk.h>
+
+#include "priv.h"
+
+/* ``XDP_TX`` bulking */
+
+void __cold libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count)
+{
+ for (u32 i = 0; i < count; i++)
+ libeth_xsk_buff_free_slow(bq[i].xsk);
+}
+
+/* XSk TMO */
+
+const struct xsk_tx_metadata_ops libeth_xsktmo_slow = {
+ .tmo_request_checksum = libeth_xsktmo_req_csum,
+};
+
+/* Rx polling path */
+
+/**
+ * libeth_xsk_buff_free_slow - free an XSk Rx buffer
+ * @xdp: buffer to free
+ *
+ * Slowpath version of xsk_buff_free() to be used on exceptions, cleanups etc.
+ * to avoid unwanted inlining.
+ */
+void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp)
+{
+ xsk_buff_free(&xdp->base);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_free_slow);
+
+/**
+ * libeth_xsk_buff_add_frag - add frag to XSk Rx buffer
+ * @head: head buffer
+ * @xdp: frag buffer
+ *
+ * External helper used by libeth_xsk_process_buff(), do not call directly.
+ * Frees both main and frag buffers on error.
+ *
+ * Return: main buffer with attached frag on success, %NULL on error (no space
+ * for a new frag).
+ */
+struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp)
+{
+ if (!xsk_buff_add_frag(&head->base, &xdp->base))
+ goto free;
+
+ return head;
+
+free:
+ libeth_xsk_buff_free_slow(xdp);
+ libeth_xsk_buff_free_slow(head);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_add_frag);
+
+/**
+ * libeth_xsk_buff_stats_frags - update onstack RQ stats with XSk frags info
+ * @rs: onstack stats to update
+ * @xdp: buffer to account
+ *
+ * External helper used by __libeth_xsk_run_pass(), do not call directly.
+ * Adds buffer's frags count and total len to the onstack stats.
+ */
+void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
+ const struct libeth_xdp_buff *xdp)
+{
+ libeth_xdp_buff_stats_frags(rs, xdp);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_stats_frags);
+
+/**
+ * __libeth_xsk_run_prog_slow - process the non-``XDP_REDIRECT`` verdicts
+ * @xdp: buffer to process
+ * @bq: Tx bulk for queueing on ``XDP_TX``
+ * @act: verdict to process
+ * @ret: error code if ``XDP_REDIRECT`` failed
+ *
+ * External helper used by __libeth_xsk_run_prog(), do not call directly.
+ * ``XDP_REDIRECT`` is the most common and hottest verdict on XSk, thus
+ * it is processed inline. The rest goes here for out-of-line processing,
+ * together with redirect errors.
+ *
+ * Return: libeth_xdp XDP prog verdict.
+ */
+u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq,
+ enum xdp_action act, int ret)
+{
+ switch (act) {
+ case XDP_DROP:
+ xsk_buff_free(&xdp->base);
+
+ return LIBETH_XDP_DROP;
+ case XDP_TX:
+ return LIBETH_XDP_TX;
+ case XDP_PASS:
+ return LIBETH_XDP_PASS;
+ default:
+ break;
+ }
+
+ return libeth_xdp_prog_exception(bq, xdp, act, ret);
+}
+EXPORT_SYMBOL_GPL(__libeth_xsk_run_prog_slow);
+
+/**
+ * libeth_xsk_prog_exception - handle XDP prog exceptions on XSk
+ * @xdp: buffer to process
+ * @act: verdict returned by the prog
+ * @ret: error code if ``XDP_REDIRECT`` failed
+ *
+ * Internal. Frees the buffer and, if the queue uses XSk wakeups, stop the
+ * current NAPI poll when there are no free buffers left.
+ *
+ * Return: libeth_xdp's XDP prog verdict.
+ */
+u32 __cold libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret)
+{
+ const struct xdp_buff_xsk *xsk;
+ u32 __ret = LIBETH_XDP_DROP;
+
+ if (act != XDP_REDIRECT)
+ goto drop;
+
+ xsk = container_of(&xdp->base, typeof(*xsk), xdp);
+ if (xsk_uses_need_wakeup(xsk->pool) && ret == -ENOBUFS)
+ __ret = LIBETH_XDP_ABORTED;
+
+drop:
+ libeth_xsk_buff_free_slow(xdp);
+
+ return __ret;
+}
+
+/* Refill */
+
+/**
+ * libeth_xskfq_create - create an XSkFQ
+ * @fq: fill queue to initialize
+ *
+ * Allocates the FQEs and initializes the fields used by libeth_xdp: number
+ * of buffers to refill, refill threshold and buffer len.
+ *
+ * Return: %0 on success, -errno otherwise.
+ */
+int libeth_xskfq_create(struct libeth_xskfq *fq)
+{
+ fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL,
+ fq->nid);
+ if (!fq->fqes)
+ return -ENOMEM;
+
+ fq->pending = fq->count;
+ fq->thresh = libeth_xdp_queue_threshold(fq->count);
+ fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xskfq_create);
+
+/**
+ * libeth_xskfq_destroy - destroy an XSkFQ
+ * @fq: fill queue to destroy
+ *
+ * Zeroes the used fields and frees the FQEs array.
+ */
+void libeth_xskfq_destroy(struct libeth_xskfq *fq)
+{
+ fq->buf_len = 0;
+ fq->thresh = 0;
+ fq->pending = 0;
+
+ kvfree(fq->fqes);
+}
+EXPORT_SYMBOL_GPL(libeth_xskfq_destroy);
+
+/* .ndo_xsk_wakeup */
+
+static void libeth_xsk_napi_sched(void *info)
+{
+ __napi_schedule_irqoff(info);
+}
+
+/**
+ * libeth_xsk_init_wakeup - initialize libeth XSk wakeup structure
+ * @csd: struct to initialize
+ * @napi: NAPI corresponding to this queue
+ *
+ * libeth_xdp uses inter-processor interrupts to perform XSk wakeups. In order
+ * to do that, the corresponding CSDs must be initialized when creating the
+ * queues.
+ */
+void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi)
+{
+ INIT_CSD(csd, libeth_xsk_napi_sched, napi);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_init_wakeup);
+
+/**
+ * libeth_xsk_wakeup - perform an XSk wakeup
+ * @csd: CSD corresponding to the queue
+ * @qid: the stack queue index
+ *
+ * Try to mark the NAPI as missed first, so that it could be rescheduled.
+ * If it's not, schedule it on the corresponding CPU using IPIs (or directly
+ * if already running on it).
+ */
+void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid)
+{
+ struct napi_struct *napi = csd->info;
+
+ if (napi_if_scheduled_mark_missed(napi) ||
+ unlikely(!napi_schedule_prep(napi)))
+ return;
+
+ if (unlikely(qid >= nr_cpu_ids))
+ qid %= nr_cpu_ids;
+
+ if (qid != raw_smp_processor_id() && cpu_online(qid))
+ smp_call_function_single_async(qid, csd);
+ else
+ __napi_schedule(napi);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_wakeup);
+
+/* Pool setup */
+
+#define LIBETH_XSK_DMA_ATTR \
+ (DMA_ATTR_WEAK_ORDERING | DMA_ATTR_SKIP_CPU_SYNC)
+
+/**
+ * libeth_xsk_setup_pool - setup or destroy an XSk pool for a queue
+ * @dev: target &net_device
+ * @qid: stack queue index to configure
+ * @enable: whether to enable or disable the pool
+ *
+ * Check that @qid is valid and then map or unmap the pool.
+ *
+ * Return: %0 on success, -errno otherwise.
+ */
+int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable)
+{
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ if (enable)
+ return xsk_pool_dma_map(pool, dev->dev.parent,
+ LIBETH_XSK_DMA_ATTR);
+ else
+ xsk_pool_dma_unmap(pool, LIBETH_XSK_DMA_ATTR);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_setup_pool);
diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig
new file mode 100644
index 000000000000..70831c7e336e
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config LIBIE
+ tristate
+ select LIBETH
+ help
+ libie (Intel Ethernet library) is a common library built on top of
+ libeth and containing vendor-specific routines shared between several
+ Intel Ethernet drivers.
+
+config LIBIE_ADMINQ
+ tristate
+ help
+ Helper functions used by Intel Ethernet drivers for administration
+ queue command interface (aka adminq).
+
+config LIBIE_FWLOG
+ tristate
+ select LIBIE_ADMINQ
+ help
+ Library to support firmware logging on device that have support
+ for it. Firmware logging is using admin queue interface to communicate
+ with the device. Debugfs is a user interface used to config logging
+ and dump all collected logs.
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
new file mode 100644
index 000000000000..db57fc6780ea
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+obj-$(CONFIG_LIBIE) += libie.o
+
+libie-y := rx.o
+
+obj-$(CONFIG_LIBIE_ADMINQ) += libie_adminq.o
+
+libie_adminq-y := adminq.o
+
+obj-$(CONFIG_LIBIE_FWLOG) += libie_fwlog.o
+
+libie_fwlog-y := fwlog.o
diff --git a/drivers/net/ethernet/intel/libie/adminq.c b/drivers/net/ethernet/intel/libie/adminq.c
new file mode 100644
index 000000000000..7b4ff479e7e5
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/adminq.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <linux/module.h>
+#include <linux/net/intel/libie/adminq.h>
+
+static const char * const libie_aq_str_arr[] = {
+#define LIBIE_AQ_STR(x) \
+ [LIBIE_AQ_RC_##x] = "LIBIE_AQ_RC_" #x
+ LIBIE_AQ_STR(OK),
+ LIBIE_AQ_STR(EPERM),
+ LIBIE_AQ_STR(ENOENT),
+ LIBIE_AQ_STR(ESRCH),
+ LIBIE_AQ_STR(EIO),
+ LIBIE_AQ_STR(EAGAIN),
+ LIBIE_AQ_STR(ENOMEM),
+ LIBIE_AQ_STR(EACCES),
+ LIBIE_AQ_STR(EBUSY),
+ LIBIE_AQ_STR(EEXIST),
+ LIBIE_AQ_STR(EINVAL),
+ LIBIE_AQ_STR(ENOSPC),
+ LIBIE_AQ_STR(ENOSYS),
+ LIBIE_AQ_STR(EMODE),
+ LIBIE_AQ_STR(ENOSEC),
+ LIBIE_AQ_STR(EBADSIG),
+ LIBIE_AQ_STR(ESVN),
+ LIBIE_AQ_STR(EBADMAN),
+ LIBIE_AQ_STR(EBADBUF),
+#undef LIBIE_AQ_STR
+ "LIBIE_AQ_RC_UNKNOWN",
+};
+
+#define __LIBIE_AQ_STR_NUM (ARRAY_SIZE(libie_aq_str_arr) - 1)
+
+/**
+ * libie_aq_str - get error string based on aq error
+ * @err: admin queue error type
+ *
+ * Return: error string for passed error code
+ */
+const char *libie_aq_str(enum libie_aq_err err)
+{
+ if (err >= ARRAY_SIZE(libie_aq_str_arr) ||
+ !libie_aq_str_arr[err])
+ err = __LIBIE_AQ_STR_NUM;
+
+ return libie_aq_str_arr[err];
+}
+EXPORT_SYMBOL_NS_GPL(libie_aq_str, "LIBIE_ADMINQ");
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library - adminq helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/fwlog.c b/drivers/net/ethernet/intel/libie/fwlog.c
new file mode 100644
index 000000000000..f39cc11cb7c5
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/fwlog.c
@@ -0,0 +1,1115 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/net/intel/libie/fwlog.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBIE_FWLOG"
+
+/* create a define that has an extra module that doesn't really exist. this
+ * is so we can add a module 'all' to easily enable/disable all the modules
+ */
+#define LIBIE_NR_FW_LOG_MODULES (LIBIE_AQC_FW_LOG_ID_MAX + 1)
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in
+ * libie_aqc_fw_logging_mod
+ */
+static const char * const libie_fwlog_module_string[] = {
+ "general",
+ "ctrl",
+ "link",
+ "link_topo",
+ "dnl",
+ "i2c",
+ "sdp",
+ "mdio",
+ "adminq",
+ "hdma",
+ "lldp",
+ "dcbx",
+ "dcb",
+ "xlr",
+ "nvm",
+ "auth",
+ "vpd",
+ "iosf",
+ "parser",
+ "sw",
+ "scheduler",
+ "txq",
+ "rsvd",
+ "post",
+ "watchdog",
+ "task_dispatch",
+ "mng",
+ "synce",
+ "health",
+ "tsdrv",
+ "pfreg",
+ "mdlver",
+ "all",
+};
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in libie_fwlog_level
+ */
+static const char * const libie_fwlog_level_string[] = {
+ "none",
+ "error",
+ "warning",
+ "normal",
+ "verbose",
+};
+
+static const char * const libie_fwlog_log_size[] = {
+ "128K",
+ "256K",
+ "512K",
+ "1M",
+ "2M",
+};
+
+static bool libie_fwlog_ring_empty(struct libie_fwlog_ring *rings)
+{
+ return rings->head == rings->tail;
+}
+
+static void libie_fwlog_ring_increment(u16 *item, u16 size)
+{
+ *item = (*item + 1) & (size - 1);
+}
+
+static int libie_fwlog_alloc_ring_buffs(struct libie_fwlog_ring *rings)
+{
+ int i, nr_bytes;
+ u8 *mem;
+
+ nr_bytes = rings->size * LIBIE_AQ_MAX_BUF_LEN;
+ mem = vzalloc(nr_bytes);
+ if (!mem)
+ return -ENOMEM;
+
+ for (i = 0; i < rings->size; i++) {
+ struct libie_fwlog_data *ring = &rings->rings[i];
+
+ ring->data_size = LIBIE_AQ_MAX_BUF_LEN;
+ ring->data = mem;
+ mem += LIBIE_AQ_MAX_BUF_LEN;
+ }
+
+ return 0;
+}
+
+static void libie_fwlog_free_ring_buffs(struct libie_fwlog_ring *rings)
+{
+ int i;
+
+ for (i = 0; i < rings->size; i++) {
+ struct libie_fwlog_data *ring = &rings->rings[i];
+
+ /* the first ring is the base memory for the whole range so
+ * free it
+ */
+ if (!i)
+ vfree(ring->data);
+
+ ring->data = NULL;
+ ring->data_size = 0;
+ }
+}
+
+#define LIBIE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
+/**
+ * libie_fwlog_realloc_rings - reallocate the FW log rings
+ * @fwlog: pointer to the fwlog structure
+ * @index: the new index to use to allocate memory for the log data
+ *
+ */
+static void libie_fwlog_realloc_rings(struct libie_fwlog *fwlog, int index)
+{
+ struct libie_fwlog_ring ring;
+ int status, ring_size;
+
+ /* convert the number of bytes into a number of 4K buffers. externally
+ * the driver presents the interface to the FW log data as a number of
+ * bytes because that's easy for users to understand. internally the
+ * driver uses a ring of buffers because the driver doesn't know where
+ * the beginning and end of any line of log data is so the driver has
+ * to overwrite data as complete blocks. when the data is returned to
+ * the user the driver knows that the data is correct and the FW log
+ * can be correctly parsed by the tools
+ */
+ ring_size = LIBIE_FWLOG_INDEX_TO_BYTES(index) / LIBIE_AQ_MAX_BUF_LEN;
+ if (ring_size == fwlog->ring.size)
+ return;
+
+ /* allocate space for the new rings and buffers then release the
+ * old rings and buffers. that way if we don't have enough
+ * memory then we at least have what we had before
+ */
+ ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
+ if (!ring.rings)
+ return;
+
+ ring.size = ring_size;
+
+ status = libie_fwlog_alloc_ring_buffs(&ring);
+ if (status) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log ring data buffers\n");
+ libie_fwlog_free_ring_buffs(&ring);
+ kfree(ring.rings);
+ return;
+ }
+
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+
+ fwlog->ring.rings = ring.rings;
+ fwlog->ring.size = ring.size;
+ fwlog->ring.index = index;
+ fwlog->ring.head = 0;
+ fwlog->ring.tail = 0;
+}
+
+/**
+ * libie_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @fwlog: pointer to the fwlog structure
+ *
+ * This will always return false if called before libie_init_hw(), so it must be
+ * called after libie_init_hw().
+ */
+static bool libie_fwlog_supported(struct libie_fwlog *fwlog)
+{
+ return fwlog->supported;
+}
+
+/**
+ * libie_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
+ * @fwlog: pointer to the fwlog structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from libie_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ */
+static int
+libie_aq_fwlog_set(struct libie_fwlog *fwlog,
+ struct libie_fwlog_module_entry *entries, u16 num_entries,
+ u16 options, u16 log_resolution)
+{
+ struct libie_aqc_fw_log_cfg_resp *fw_modules;
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+ int status;
+ int i;
+
+ fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
+ if (!fw_modules)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ cpu_to_le16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_config);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI) |
+ cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ cmd = libie_aq_raw(&desc);
+
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
+
+ if (options & LIBIE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= LIBIE_AQC_FW_LOG_CONF_AQ_EN;
+ if (options & LIBIE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= LIBIE_AQC_FW_LOG_CONF_UART_EN;
+
+ status = fwlog->send_cmd(fwlog->priv, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries);
+
+ kfree(fw_modules);
+
+ return status;
+}
+
+/**
+ * libie_fwlog_set - Set the firmware logging settings
+ * @fwlog: pointer to the fwlog structure
+ * @cfg: config used to set firmware logging
+ *
+ * This function should be called whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * libie_fwlog_register. Note, that libie_fwlog_register does not need to be called
+ * for init.
+ */
+static int libie_fwlog_set(struct libie_fwlog *fwlog,
+ struct libie_fwlog_cfg *cfg)
+{
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ return libie_aq_fwlog_set(fwlog, cfg->module_entries,
+ LIBIE_AQC_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+}
+
+/**
+ * libie_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
+ * @fwlog: pointer to the fwlog structure
+ * @reg: true to register and false to unregister
+ */
+static int libie_aq_fwlog_register(struct libie_fwlog *fwlog, bool reg)
+{
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_register);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+ cmd = libie_aq_raw(&desc);
+
+ if (reg)
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_AQ_REGISTER;
+
+ return fwlog->send_cmd(fwlog->priv, &desc, NULL, 0);
+}
+
+/**
+ * libie_fwlog_register - Register the PF for firmware logging
+ * @fwlog: pointer to the fwlog structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in libie_fwlog_set.
+ */
+static int libie_fwlog_register(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ status = libie_aq_fwlog_register(fwlog, true);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "Failed to register for firmware logging events over ARQ\n");
+ else
+ fwlog->cfg.options |= LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * libie_fwlog_unregister - Unregister the PF from firmware logging
+ * @fwlog: pointer to the fwlog structure
+ */
+static int libie_fwlog_unregister(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ status = libie_aq_fwlog_register(fwlog, false);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "Failed to unregister from firmware logging events over ARQ\n");
+ else
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * libie_fwlog_print_module_cfg - print current FW logging module configuration
+ * @cfg: pointer to the fwlog cfg structure
+ * @module: module to print
+ * @s: the seq file to put data into
+ */
+static void
+libie_fwlog_print_module_cfg(struct libie_fwlog_cfg *cfg, int module,
+ struct seq_file *s)
+{
+ struct libie_fwlog_module_entry *entry;
+
+ if (module != LIBIE_AQC_FW_LOG_ID_MAX) {
+ entry = &cfg->module_entries[module];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ libie_fwlog_module_string[entry->module_id],
+ libie_fwlog_level_string[entry->log_level]);
+ } else {
+ int i;
+
+ for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++) {
+ entry = &cfg->module_entries[i];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ libie_fwlog_module_string[entry->module_id],
+ libie_fwlog_level_string[entry->log_level]);
+ }
+ }
+}
+
+static int libie_find_module_by_dentry(struct dentry **modules, struct dentry *d)
+{
+ int i, module;
+
+ module = -1;
+ /* find the module based on the dentry */
+ for (i = 0; i < LIBIE_NR_FW_LOG_MODULES; i++) {
+ if (d == modules[i]) {
+ module = i;
+ break;
+ }
+ }
+
+ return module;
+}
+
+/**
+ * libie_debugfs_module_show - read from 'module' file
+ * @s: the opened file
+ * @v: pointer to the offset
+ */
+static int libie_debugfs_module_show(struct seq_file *s, void *v)
+{
+ struct libie_fwlog *fwlog = s->private;
+ const struct file *filp = s->file;
+ struct dentry *dentry;
+ int module;
+
+ dentry = file_dentry(filp);
+
+ module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry);
+ if (module < 0) {
+ dev_info(&fwlog->pdev->dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ libie_fwlog_print_module_cfg(&fwlog->cfg, module, s);
+
+ return 0;
+}
+
+static int libie_debugfs_module_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, libie_debugfs_module_show, inode->i_private);
+}
+
+/**
+ * libie_debugfs_module_write - write into 'module' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_module_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = file_inode(filp)->i_private;
+ struct dentry *dentry = file_dentry(filp);
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[16], *cmd_buf;
+ int module, log_level, cnt;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 8)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry);
+ if (module < 0) {
+ dev_info(dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ cnt = sscanf(cmd_buf, "%s", user_val);
+ if (cnt != 1)
+ return -EINVAL;
+
+ log_level = sysfs_match_string(libie_fwlog_level_string, user_val);
+ if (log_level < 0) {
+ dev_info(dev, "unknown log level '%s'\n", user_val);
+ return -EINVAL;
+ }
+
+ if (module != LIBIE_AQC_FW_LOG_ID_MAX) {
+ fwlog->cfg.module_entries[module].log_level = log_level;
+ } else {
+ /* the module 'all' is a shortcut so that we can set
+ * all of the modules to the same level quickly
+ */
+ int i;
+
+ for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++)
+ fwlog->cfg.module_entries[i].log_level = log_level;
+ }
+
+ return count;
+}
+
+static const struct file_operations libie_debugfs_module_fops = {
+ .owner = THIS_MODULE,
+ .open = libie_debugfs_module_open,
+ .read = seq_read,
+ .release = single_release,
+ .write = libie_debugfs_module_write,
+};
+
+/**
+ * libie_debugfs_nr_messages_read - read from 'nr_messages' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_nr_messages_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%d\n",
+ fwlog->cfg.log_resolution);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_nr_messages_write - write into 'nr_messages' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[8], *cmd_buf;
+ s16 nr_messages;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 4)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtos16(user_val, 0, &nr_messages);
+ if (ret)
+ return ret;
+
+ if (nr_messages < LIBIE_AQC_FW_LOG_MIN_RESOLUTION ||
+ nr_messages > LIBIE_AQC_FW_LOG_MAX_RESOLUTION) {
+ dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
+ nr_messages, LIBIE_AQC_FW_LOG_MIN_RESOLUTION,
+ LIBIE_AQC_FW_LOG_MAX_RESOLUTION);
+ return -EINVAL;
+ }
+
+ fwlog->cfg.log_resolution = nr_messages;
+
+ return count;
+}
+
+static const struct file_operations libie_debugfs_nr_messages_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_nr_messages_read,
+ .write = libie_debugfs_nr_messages_write,
+};
+
+/**
+ * libie_debugfs_enable_read - read from 'enable' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_enable_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%u\n",
+ (u16)(fwlog->cfg.options &
+ LIBIE_FWLOG_OPTION_IS_REGISTERED) >> 3);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_enable_write - write into 'enable' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_enable_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char user_val[8], *cmd_buf;
+ bool enable;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 2)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtobool(user_val, &enable);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ fwlog->cfg.options |= LIBIE_FWLOG_OPTION_ARQ_ENA;
+ else
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_ARQ_ENA;
+
+ ret = libie_fwlog_set(fwlog, &fwlog->cfg);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ ret = libie_fwlog_register(fwlog);
+ else
+ ret = libie_fwlog_unregister(fwlog);
+
+ if (ret)
+ goto enable_write_error;
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+enable_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_enable_read,
+ .write = libie_debugfs_enable_write,
+};
+
+/**
+ * libie_debugfs_log_size_read - read from 'log_size' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_log_size_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+ int index;
+
+ index = fwlog->ring.index;
+ snprintf(buff, sizeof(buff), "%s\n", libie_fwlog_log_size[index]);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_log_size_write - write into 'log_size' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_log_size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[8], *cmd_buf;
+ ssize_t ret;
+ int index;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 5)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = sysfs_match_string(libie_fwlog_log_size, user_val);
+ if (index < 0) {
+ dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
+ user_val);
+ ret = -EINVAL;
+ goto log_size_write_error;
+ } else if (fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED) {
+ dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
+ ret = -EINVAL;
+ goto log_size_write_error;
+ }
+
+ /* free all the buffers and the tracking info and resize */
+ libie_fwlog_realloc_rings(fwlog, index);
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+log_size_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_log_size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_log_size_read,
+ .write = libie_debugfs_log_size_write,
+};
+
+/**
+ * libie_debugfs_data_read - read from 'data' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_data_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ int data_copied = 0;
+ bool done = false;
+
+ if (libie_fwlog_ring_empty(&fwlog->ring))
+ return 0;
+
+ while (!libie_fwlog_ring_empty(&fwlog->ring) && !done) {
+ struct libie_fwlog_data *log;
+ u16 cur_buf_len;
+
+ log = &fwlog->ring.rings[fwlog->ring.head];
+ cur_buf_len = log->data_size;
+ if (cur_buf_len >= count) {
+ done = true;
+ continue;
+ }
+
+ if (copy_to_user(buffer, log->data, cur_buf_len)) {
+ /* if there is an error then bail and return whatever
+ * the driver has copied so far
+ */
+ done = true;
+ continue;
+ }
+
+ data_copied += cur_buf_len;
+ buffer += cur_buf_len;
+ count -= cur_buf_len;
+ *ppos += cur_buf_len;
+ libie_fwlog_ring_increment(&fwlog->ring.head, fwlog->ring.size);
+ }
+
+ return data_copied;
+}
+
+/**
+ * libie_debugfs_data_write - write into 'data' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ ssize_t ret;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ /* any value is allowed to clear the buffer so no need to even look at
+ * what the value is
+ */
+ if (!(fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED)) {
+ fwlog->ring.head = 0;
+ fwlog->ring.tail = 0;
+ } else {
+ dev_info(dev, "Can't clear FW log data while FW log running\n");
+ ret = -EINVAL;
+ goto nr_buffs_write_error;
+ }
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+nr_buffs_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_data_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_data_read,
+ .write = libie_debugfs_data_write,
+};
+
+/**
+ * libie_debugfs_fwlog_init - setup the debugfs directory
+ * @fwlog: pointer to the fwlog structure
+ * @root: debugfs root entry on which fwlog director will be registered
+ */
+static void libie_debugfs_fwlog_init(struct libie_fwlog *fwlog,
+ struct dentry *root)
+{
+ struct dentry *fw_modules_dir;
+ struct dentry **fw_modules;
+ int i;
+
+ /* allocate space for this first because if it fails then we don't
+ * need to unwind
+ */
+ fw_modules = kcalloc(LIBIE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
+ GFP_KERNEL);
+ if (!fw_modules)
+ return;
+
+ fwlog->debugfs = debugfs_create_dir("fwlog", root);
+ if (IS_ERR(fwlog->debugfs))
+ goto err_create_module_files;
+
+ fw_modules_dir = debugfs_create_dir("modules", fwlog->debugfs);
+ if (IS_ERR(fw_modules_dir))
+ goto err_create_module_files;
+
+ for (i = 0; i < LIBIE_NR_FW_LOG_MODULES; i++) {
+ fw_modules[i] = debugfs_create_file(libie_fwlog_module_string[i],
+ 0600, fw_modules_dir, fwlog,
+ &libie_debugfs_module_fops);
+ if (IS_ERR(fw_modules[i]))
+ goto err_create_module_files;
+ }
+
+ debugfs_create_file("nr_messages", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_nr_messages_fops);
+
+ fwlog->debugfs_modules = fw_modules;
+
+ debugfs_create_file("enable", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_enable_fops);
+
+ debugfs_create_file("log_size", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_log_size_fops);
+
+ debugfs_create_file("data", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_data_fops);
+
+ return;
+
+err_create_module_files:
+ debugfs_remove_recursive(fwlog->debugfs);
+ kfree(fw_modules);
+}
+
+static bool libie_fwlog_ring_full(struct libie_fwlog_ring *rings)
+{
+ u16 head, tail;
+
+ head = rings->head;
+ tail = rings->tail;
+
+ if (head < tail && (tail - head == (rings->size - 1)))
+ return true;
+ else if (head > tail && (tail == (head - 1)))
+ return true;
+
+ return false;
+}
+
+/**
+ * libie_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
+ * @fwlog: pointer to the fwlog structure
+ * @cfg: firmware logging configuration to populate
+ */
+static int libie_aq_fwlog_get(struct libie_fwlog *fwlog,
+ struct libie_fwlog_cfg *cfg)
+{
+ struct libie_aqc_fw_log_cfg_resp *fw_modules;
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+ u16 module_id_cnt;
+ int status;
+ void *buf;
+ int i;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ buf = kzalloc(LIBIE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_query);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+ cmd = libie_aq_raw(&desc);
+
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_AQ_QUERY;
+
+ status = fwlog->send_cmd(fwlog->priv, &desc, buf, LIBIE_AQ_MAX_BUF_LEN);
+ if (status) {
+ dev_dbg(&fwlog->pdev->dev, "Failed to get FW log configuration\n");
+ goto status_out;
+ }
+
+ module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt < LIBIE_AQC_FW_LOG_ID_MAX) {
+ dev_dbg(&fwlog->pdev->dev, "FW returned less than the expected number of FW log module IDs\n");
+ } else if (module_id_cnt > LIBIE_AQC_FW_LOG_ID_MAX) {
+ dev_dbg(&fwlog->pdev->dev, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
+ LIBIE_AQC_FW_LOG_ID_MAX);
+ module_id_cnt = LIBIE_AQC_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_CONF_AQ_EN)
+ cfg->options |= LIBIE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_CONF_UART_EN)
+ cfg->options |= LIBIE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct libie_aqc_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct libie_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ le16_to_cpu(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * libie_fwlog_set_supported - Set if FW logging is supported by FW
+ * @fwlog: pointer to the fwlog structure
+ *
+ * If FW returns success to the libie_aq_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_supported flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ */
+static void libie_fwlog_set_supported(struct libie_fwlog *fwlog)
+{
+ struct libie_fwlog_cfg *cfg;
+ int status;
+
+ fwlog->supported = false;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ status = libie_aq_fwlog_get(fwlog, cfg);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "libie_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
+ status);
+ else
+ fwlog->supported = true;
+
+ kfree(cfg);
+}
+
+/**
+ * libie_fwlog_init - Initialize FW logging configuration
+ * @fwlog: pointer to the fwlog structure
+ * @api: api structure to init fwlog
+ *
+ * This function should be called on driver initialization during
+ * libie_init_hw().
+ */
+int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api)
+{
+ fwlog->api = *api;
+ libie_fwlog_set_supported(fwlog);
+
+ if (libie_fwlog_supported(fwlog)) {
+ int status;
+
+ /* read the current config from the FW and store it */
+ status = libie_aq_fwlog_get(fwlog, &fwlog->cfg);
+ if (status)
+ return status;
+
+ fwlog->ring.rings = kcalloc(LIBIE_FWLOG_RING_SIZE_DFLT,
+ sizeof(*fwlog->ring.rings),
+ GFP_KERNEL);
+ if (!fwlog->ring.rings) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log rings\n");
+ return -ENOMEM;
+ }
+
+ fwlog->ring.size = LIBIE_FWLOG_RING_SIZE_DFLT;
+ fwlog->ring.index = LIBIE_FWLOG_RING_SIZE_INDEX_DFLT;
+
+ status = libie_fwlog_alloc_ring_buffs(&fwlog->ring);
+ if (status) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log ring data buffers\n");
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+ return status;
+ }
+
+ libie_debugfs_fwlog_init(fwlog, api->debugfs_root);
+ } else {
+ dev_warn(&fwlog->pdev->dev, "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_init);
+
+/**
+ * libie_fwlog_deinit - unroll FW logging configuration
+ * @fwlog: pointer to the fwlog structure
+ *
+ * This function should be called in libie_deinit_hw().
+ */
+void libie_fwlog_deinit(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_ARQ_ENA;
+ status = libie_fwlog_set(fwlog, &fwlog->cfg);
+ if (status)
+ dev_warn(&fwlog->pdev->dev, "Unable to turn off FW logging, status: %d\n",
+ status);
+
+ kfree(fwlog->debugfs_modules);
+
+ fwlog->debugfs_modules = NULL;
+
+ status = libie_fwlog_unregister(fwlog);
+ if (status)
+ dev_warn(&fwlog->pdev->dev, "Unable to unregister FW logging, status: %d\n",
+ status);
+
+ if (fwlog->ring.rings) {
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+ }
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_deinit);
+
+/**
+ * libie_get_fwlog_data - copy the FW log data from ARQ event
+ * @fwlog: fwlog that the FW log event is associated with
+ * @buf: event buffer pointer
+ * @len: len of event descriptor
+ */
+void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len)
+{
+ struct libie_fwlog_data *log;
+
+ log = &fwlog->ring.rings[fwlog->ring.tail];
+
+ memset(log->data, 0, PAGE_SIZE);
+ log->data_size = len;
+
+ memcpy(log->data, buf, log->data_size);
+ libie_fwlog_ring_increment(&fwlog->ring.tail, fwlog->ring.size);
+
+ if (libie_fwlog_ring_full(&fwlog->ring)) {
+ /* the rings are full so bump the head to create room */
+ libie_fwlog_ring_increment(&fwlog->ring.head, fwlog->ring.size);
+ }
+}
+EXPORT_SYMBOL_GPL(libie_get_fwlog_data);
+
+void libie_fwlog_reregister(struct libie_fwlog *fwlog)
+{
+ if (!(fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED))
+ return;
+
+ if (libie_fwlog_register(fwlog))
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_IS_REGISTERED;
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_reregister);
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
new file mode 100644
index 000000000000..6fda656afa9c
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024-2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBIE"
+
+#include <linux/export.h>
+#include <linux/net/intel/libie/rx.h>
+
+/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
+ * bitfield struct.
+ */
+
+/* A few supplementary definitions for when XDP hash types do not coincide
+ * with what can be generated from ptype definitions by means of preprocessor
+ * concatenation.
+ */
+#define XDP_RSS_L3_L2 XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_TIMESYNC XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_L3 XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_L4 XDP_RSS_L4
+
+#define LIBIE_RX_PT(oip, ofrag, tun, tp, tefr, iprot, pl) { \
+ .outer_ip = LIBETH_RX_PT_OUTER_##oip, \
+ .outer_frag = LIBETH_RX_PT_##ofrag, \
+ .tunnel_type = LIBETH_RX_PT_TUNNEL_IP_##tun, \
+ .tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_##tp, \
+ .tunnel_end_frag = LIBETH_RX_PT_##tefr, \
+ .inner_prot = LIBETH_RX_PT_INNER_##iprot, \
+ .payload_layer = LIBETH_RX_PT_PAYLOAD_##pl, \
+ .hash_type = XDP_RSS_L3_##oip | \
+ XDP_RSS_L4_##iprot | \
+ XDP_RSS_TYPE_##pl, \
+ }
+
+#define LIBIE_RX_PT_UNUSED { }
+
+#define __LIBIE_RX_PT_L2(iprot, pl) \
+ LIBIE_RX_PT(L2, NOT_FRAG, NONE, NONE, NOT_FRAG, iprot, pl)
+#define LIBIE_RX_PT_L2 __LIBIE_RX_PT_L2(NONE, L2)
+#define LIBIE_RX_PT_TS __LIBIE_RX_PT_L2(TIMESYNC, L2)
+#define LIBIE_RX_PT_L3 __LIBIE_RX_PT_L2(NONE, L3)
+
+#define LIBIE_RX_PT_IP_FRAG(oip) \
+ LIBIE_RX_PT(IPV##oip, FRAG, NONE, NONE, NOT_FRAG, NONE, L3)
+#define LIBIE_RX_PT_IP_L3(oip, tun, teprot, tefr) \
+ LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, tefr, NONE, L3)
+#define LIBIE_RX_PT_IP_L4(oip, tun, teprot, iprot) \
+ LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, NOT_FRAG, iprot, L4)
+
+#define LIBIE_RX_PT_IP_NOF(oip, tun, ver) \
+ LIBIE_RX_PT_IP_L3(oip, tun, ver, NOT_FRAG), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, UDP), \
+ LIBIE_RX_PT_UNUSED, \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, TCP), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, SCTP), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, ICMP)
+
+/* IPv oip --> tun --> IPv ver */
+#define LIBIE_RX_PT_IP_TUN_VER(oip, tun, ver) \
+ LIBIE_RX_PT_IP_L3(oip, tun, ver, FRAG), \
+ LIBIE_RX_PT_IP_NOF(oip, tun, ver)
+
+/* Non Tunneled IPv oip */
+#define LIBIE_RX_PT_IP_RAW(oip) \
+ LIBIE_RX_PT_IP_FRAG(oip), \
+ LIBIE_RX_PT_IP_NOF(oip, NONE, NONE)
+
+/* IPv oip --> tun --> { IPv4, IPv6 } */
+#define LIBIE_RX_PT_IP_TUN(oip, tun) \
+ LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV4), \
+ LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV6)
+
+/* IPv oip --> GRE/NAT tun --> { x, IPv4, IPv6 } */
+#define LIBIE_RX_PT_IP_GRE(oip, tun) \
+ LIBIE_RX_PT_IP_L3(oip, tun, NONE, NOT_FRAG), \
+ LIBIE_RX_PT_IP_TUN(oip, tun)
+
+/* Non Tunneled IPv oip
+ * IPv oip --> { IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> { x, IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> MAC --> { x, IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> MAC/VLAN --> { x, IPv4, IPv6 }
+ */
+#define LIBIE_RX_PT_IP(oip) \
+ LIBIE_RX_PT_IP_RAW(oip), \
+ LIBIE_RX_PT_IP_TUN(oip, IP), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC_VLAN)
+
+/* Lookup table mapping for O(1) parsing */
+const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
+ /* L2 packet types */
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_TS,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+
+ LIBIE_RX_PT_IP(4),
+ LIBIE_RX_PT_IP(6),
+};
+EXPORT_SYMBOL_GPL(libie_rx_pt_lut);
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library");
+MODULE_IMPORT_NS("LIBETH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 1732ec3c3dbd..d8be0e4dcb07 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb)
if (skb->protocol != htons(ETH_P_IP))
return csum;
skb_set_network_header(skb, ETH_HLEN);
- if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
- (skb->len < (ETH_HLEN +
- (ip_hdr(skb)->ihl << 2) +
- sizeof(struct udphdr)))) {
+
+ if (ip_hdr(skb)->protocol != IPPROTO_UDP ||
+ skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) {
skb_reset_network_header(skb);
return csum;
}
- skb_set_transport_header(skb,
- ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb));
csum = udp_hdr(skb)->check;
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
@@ -2301,7 +2299,7 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
{
struct jme_adapter *jme = netdev_priv(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
netdev_update_features(netdev);
jme_restart_rx_engine(jme);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 81cf3361a1e5..891a94d89f4b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,7 +900,8 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media)
static void korina_poll_media(struct timer_list *t)
{
- struct korina_private *lp = from_timer(lp, t, media_check_timer);
+ struct korina_private *lp = timer_container_of(lp, t,
+ media_check_timer);
struct net_device *dev = lp->dev;
korina_check_media(dev, 0);
@@ -1239,7 +1240,7 @@ static int korina_close(struct net_device *dev)
struct korina_private *lp = netdev_priv(dev);
u32 tmp;
- del_timer(&lp->media_check_timer);
+ timer_delete(&lp->media_check_timer);
/* Disable interrupts */
disable_irq(lp->rx_irq);
@@ -1403,7 +1404,7 @@ static struct platform_driver korina_driver = {
.of_match_table = of_match_ptr(korina_match),
},
.probe = korina_probe,
- .remove_new = korina_remove,
+ .remove = korina_remove,
};
module_platform_driver(korina_driver);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 1d5b7bb6380f..83ce3bfefa5c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -90,12 +90,10 @@ struct ltq_etop_priv {
struct net_device *netdev;
struct platform_device *pdev;
struct ltq_eth_data *pldata;
- struct resource *res;
struct mii_bus *mii_bus;
struct ltq_etop_chan ch[MAX_DMA_CHAN];
- int tx_free[MAX_DMA_CHAN >> 1];
int tx_burst_len;
int rx_burst_len;
@@ -217,9 +215,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
if (ch->dma.irq)
free_irq(ch->dma.irq, priv);
if (IS_RX(ch->idx)) {
- int desc;
+ struct ltq_dma_channel *dma = &ch->dma;
- for (desc = 0; desc < LTQ_DESC_NUM; desc++)
+ for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
}
}
@@ -482,7 +480,9 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
u32 byte_offset;
- len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
netdev_err(dev, "tx ring full\n");
@@ -519,7 +519,7 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
struct ltq_etop_priv *priv = netdev_priv(dev);
unsigned long flags;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
@@ -642,31 +642,14 @@ ltq_etop_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct ltq_etop_priv *priv;
- struct resource *res;
int err;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get etop resource\n");
- err = -ENOENT;
- goto err_out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request etop resource\n");
- err = -EBUSY;
- goto err_out;
- }
-
- ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_etop_membase) {
+ ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ltq_etop_membase)) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
- err = -ENOMEM;
+ err = PTR_ERR(ltq_etop_membase);
goto err_out;
}
@@ -675,11 +658,9 @@ ltq_etop_probe(struct platform_device *pdev)
err = -ENOMEM;
goto err_out;
}
- strcpy(dev->name, "eth%d");
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
- priv->res = res;
priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
@@ -734,7 +715,7 @@ static void ltq_etop_remove(struct platform_device *pdev)
}
static struct platform_driver ltq_mii_driver = {
- .remove_new = ltq_etop_remove,
+ .remove = ltq_etop_remove,
.driver = {
.name = "ltq_etop",
},
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 8bd4def3622e..b8766fb7a844 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -419,7 +419,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
int curr_desc;
int ret = 0;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
priv->rx_buf_size = xrx200_buffer_size(new_mtu);
priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
@@ -440,7 +440,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
buff = ch_rx->rx_buff[ch_rx->dma.desc];
ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret) {
- net_dev->mtu = old_mtu;
+ WRITE_ONCE(net_dev->mtu, old_mtu);
priv->rx_buf_size = xrx200_buffer_size(old_mtu);
priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
break;
@@ -669,7 +669,7 @@ MODULE_DEVICE_TABLE(of, xrx200_match);
static struct platform_driver xrx200_driver = {
.probe = xrx200_probe,
- .remove_new = xrx200_remove,
+ .remove = xrx200_remove,
.driver = {
.name = "lantiq,xrx200-net",
.of_match_table = xrx200_match,
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index ff54fbe41bcc..829a4b828f8e 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -309,7 +309,7 @@ MODULE_DEVICE_TABLE(of, liteeth_of_match);
static struct platform_driver liteeth_driver = {
.probe = liteeth_probe,
- .remove_new = liteeth_remove,
+ .remove = liteeth_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = liteeth_of_match,
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 837295fecd17..50f7c59e8a04 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -34,7 +34,6 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
depends on HAS_IOMEM
- select MDIO_DEVRES
select PHYLIB
help
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f0bdc06d253d..0ab52c57c648 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1333,7 +1333,8 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
static void mib_counters_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
+ struct mv643xx_eth_private *mp = timer_container_of(mp, t,
+ mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -1698,13 +1699,9 @@ static void mv643xx_eth_get_strings(struct net_device *dev,
{
int i;
- if (stringset == ETH_SS_STATS) {
- for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- mv643xx_eth_stats[i].stat_string,
- ETH_GSTRING_LEN);
- }
- }
+ if (stringset == ETH_SS_STATS)
+ for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++)
+ ethtool_puts(&data, mv643xx_eth_stats[i].stat_string);
}
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
@@ -2251,7 +2248,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (unlikely(mp->oom)) {
mp->oom = 0;
- del_timer(&mp->rx_oom);
+ timer_delete(&mp->rx_oom);
}
work_done = 0;
@@ -2310,7 +2307,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
static inline void oom_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
+ struct mv643xx_eth_private *mp = timer_container_of(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
@@ -2525,7 +2522,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
napi_disable(&mp->napi);
- del_timer_sync(&mp->rx_oom);
+ timer_delete_sync(&mp->rx_oom);
netif_carrier_off(dev);
if (dev->phydev)
@@ -2535,7 +2532,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
port_reset(mp);
mv643xx_eth_get_stats(dev);
mib_counters_update(mp);
- del_timer_sync(&mp->mib_counters_timer);
+ timer_delete_sync(&mp->mib_counters_timer);
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
@@ -2562,7 +2559,7 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
mv643xx_eth_recalc_skb_size(mp);
tx_set_rate(mp, 1000000000, 16777216);
@@ -2708,9 +2705,15 @@ static struct platform_device *port_platdev[3];
static void mv643xx_eth_shared_of_remove(void)
{
+ struct mv643xx_eth_platform_data *pd;
int n;
for (n = 0; n < 3; n++) {
+ if (!port_platdev[n])
+ continue;
+ pd = dev_get_platdata(&port_platdev[n]->dev);
+ if (pd)
+ of_node_put(pd->phy_node);
platform_device_del(port_platdev[n]);
port_platdev[n] = NULL;
}
@@ -2773,8 +2776,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
- if (!ppdev)
- return -ENOMEM;
+ if (!ppdev) {
+ ret = -ENOMEM;
+ goto put_err;
+ }
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ppdev->dev.of_node = pnp;
@@ -2796,13 +2801,15 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
port_err:
platform_device_put(ppdev);
+put_err:
+ of_node_put(ppd.phy_node);
return ret;
}
static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
struct mv643xx_eth_shared_platform_data *pd;
- struct device_node *pnp, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
int ret;
/* bail out if not registered from DT */
@@ -2816,10 +2823,9 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
- for_each_available_child_of_node(np, pnp) {
+ for_each_available_child_of_node_scoped(np, pnp) {
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
- of_node_put(pnp);
mv643xx_eth_shared_of_remove();
return ret;
}
@@ -2844,29 +2850,24 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
struct mv643xx_eth_shared_platform_data *pd;
struct mv643xx_eth_shared_private *msp;
const struct mbus_dram_target_info *dram;
- struct resource *res;
int ret;
if (!mv643xx_eth_version_printed++)
pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
mv643xx_eth_driver_version);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL)
- return -EINVAL;
-
msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
if (msp == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, msp);
- msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (msp->base == NULL)
- return -ENOMEM;
+ msp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(msp->base))
+ return PTR_ERR(msp->base);
- msp->clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(msp->clk))
- clk_prepare_enable(msp->clk);
+ msp->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(msp->clk))
+ return PTR_ERR(msp->clk);
/*
* (Re-)program MBUS remapping windows if we are asked to.
@@ -2877,7 +2878,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
- goto err_put_clk;
+ return ret;
pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2885,25 +2886,16 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
infer_hw_params(msp);
return 0;
-
-err_put_clk:
- if (!IS_ERR(msp->clk))
- clk_disable_unprepare(msp->clk);
- return ret;
}
static void mv643xx_eth_shared_remove(struct platform_device *pdev)
{
- struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
-
mv643xx_eth_shared_of_remove();
- if (!IS_ERR(msp->clk))
- clk_disable_unprepare(msp->clk);
}
static struct platform_driver mv643xx_eth_shared_driver = {
.probe = mv643xx_eth_shared_probe,
- .remove_new = mv643xx_eth_shared_remove,
+ .remove = mv643xx_eth_shared_remove,
.driver = {
.name = MV643XX_ETH_SHARED_NAME,
.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
@@ -3308,7 +3300,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
static struct platform_driver mv643xx_eth_driver = {
.probe = mv643xx_eth_probe,
- .remove_new = mv643xx_eth_remove,
+ .remove = mv643xx_eth_remove,
.shutdown = mv643xx_eth_shutdown,
.driver = {
.name = MV643XX_ETH_NAME,
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9190eff6c0bb..3f4447e68888 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -104,7 +104,7 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
return 0;
} else {
/* wait_event_timeout does not guarantee a delay of at
- * least one whole jiffie, so timeout must be no less
+ * least one whole jiffy, so timeout must be no less
* than two.
*/
timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2);
@@ -348,13 +348,12 @@ static int orion_mdio_probe(struct platform_device *pdev)
if (type == BUS_TYPE_XSMI)
orion_mdio_xsmi_set_mdc_freq(bus);
} else {
- dev->clk[0] = clk_get(&pdev->dev, NULL);
- if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ dev->clk[0] = clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk[0])) {
+ ret = PTR_ERR(dev->clk[0]);
goto out_clk;
}
- if (!IS_ERR(dev->clk[0]))
- clk_prepare_enable(dev->clk[0]);
+ clk_prepare_enable(dev->clk[0]);
}
@@ -422,8 +421,6 @@ static void orion_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(bus);
for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
- if (IS_ERR(dev->clk[i]))
- break;
clk_disable_unprepare(dev->clk[i]);
clk_put(dev->clk[i]);
}
@@ -447,7 +444,7 @@ MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
static struct platform_driver orion_mdio_driver = {
.probe = orion_mdio_probe,
- .remove_new = orion_mdio_remove,
+ .remove = orion_mdio_remove,
.driver = {
.name = "orion-mdio",
.of_match_table = orion_mdio_match,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 40a5f1431e4e..7af44f858fa3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -284,8 +284,12 @@
MVNETA_TXQ_BUCKET_REFILL_PERIOD))
#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_0_TS (0xff << 8)
#define MVNETA_LPI_CTRL_1 0x2cc4
-#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_FORCE BIT(1)
+#define MVNETA_LPI_CTRL_1_MANUAL_MODE BIT(2)
+#define MVNETA_LPI_CTRL_1_TW (0xfff << 4)
#define MVNETA_LPI_CTRL_2 0x2cc8
#define MVNETA_LPI_STATUS 0x2ccc
@@ -541,10 +545,6 @@ struct mvneta_port {
struct mvneta_bm_pool *pool_short;
int bm_win_id;
- bool eee_enabled;
- bool eee_active;
- bool tx_lpi_enabled;
-
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -1781,7 +1781,7 @@ static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
}
/* Set TXQ descriptors fields relevant for CSUM calculation */
-static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvneta_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@@ -2342,7 +2342,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
prefetch(data);
xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
- data_len, false);
+ data_len, true);
}
static void
@@ -2396,6 +2396,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
u8 num_frags;
@@ -2410,13 +2411,14 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, num_frags,
- sinfo->xdp_frags_size,
- num_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -2982,6 +2984,13 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ /* This is not really the true transmit point, since we batch
+ * up several before hitting the hardware, but is the best we
+ * can do without more complexity to walk the packets in the
+ * pending section of the transmit queue.
+ */
+ skb_tx_timestamp(skb);
+
if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
@@ -3259,7 +3268,8 @@ static void mvneta_link_change(struct mvneta_port *pp)
{
u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
+ phylink_pcs_change(&pp->phylink_pcs,
+ !!(gmac_stat & MVNETA_GMAC_LINK_UP));
}
/* NAPI handler
@@ -3860,7 +3870,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
return -EINVAL;
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
if (!netif_running(dev)) {
if (pp->bm_priv)
@@ -3959,23 +3969,30 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvneta_port, phylink_pcs);
}
-static int mvneta_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
- * When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* QSGMII, SGMII and RGMII can be configured to use inband
+ * signalling of the AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
-static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
@@ -4070,7 +4087,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
- .pcs_validate = mvneta_pcs_validate,
+ .pcs_inband_caps = mvneta_pcs_inband_caps,
.pcs_get_state = mvneta_pcs_get_state,
.pcs_config = mvneta_pcs_config,
.pcs_an_restart = mvneta_pcs_an_restart,
@@ -4205,18 +4222,6 @@ static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
return 0;
}
-static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
-{
- u32 lpi_ctl1;
-
- lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
- if (enable)
- lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
- else
- lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
- mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
-}
-
static void mvneta_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -4232,9 +4237,6 @@ static void mvneta_mac_link_down(struct phylink_config *config,
val |= MVNETA_GMAC_FORCE_LINK_DOWN;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
-
- pp->eee_active = false;
- mvneta_set_eee(pp, false);
}
static void mvneta_mac_link_up(struct phylink_config *config,
@@ -4283,11 +4285,56 @@ static void mvneta_mac_link_up(struct phylink_config *config,
}
mvneta_port_up(pp);
+}
+
+static void mvneta_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 lpi1;
- if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, false) >= 0;
- mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE |
+ MVNETA_LPI_CTRL_1_REQUEST_FORCE |
+ MVNETA_LPI_CTRL_1_MANUAL_MODE);
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+}
+
+static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 ts, tw, lpi0, lpi1, status;
+
+ status = mvreg_read(pp, MVNETA_GMAC_STATUS);
+ if (status & MVNETA_GMAC_SPEED_1000) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
}
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS);
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0);
+
+ /* Configure tw and enable LPI generation */
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW);
+ lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+
+ return 0;
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
@@ -4297,6 +4344,8 @@ static const struct phylink_mac_ops mvneta_phylink_ops = {
.mac_finish = mvneta_mac_finish,
.mac_link_down = mvneta_mac_link_down,
.mac_link_up = mvneta_mac_link_up,
+ .mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi,
};
static int mvneta_mdio_probe(struct mvneta_port *pp)
@@ -4384,6 +4433,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
if (pp->neta_armada3700)
return 0;
+ netdev_lock(port->napi.dev);
spin_lock(&pp->lock);
/*
* Configuring the driver for a new CPU while the driver is
@@ -4391,6 +4441,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
*/
if (pp->is_stopped) {
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
return 0;
}
netif_tx_stop_all_queues(pp->dev);
@@ -4410,7 +4461,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
/* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- napi_enable(&port->napi);
+ napi_enable_locked(&port->napi);
/*
* Enable per-CPU interrupts on the CPU that is
@@ -4431,6 +4482,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
+
return 0;
}
@@ -4563,7 +4616,7 @@ static int mvneta_stop(struct net_device *dev)
/* Inform that we are stopping so we don't want to setup the
* driver for new CPUs in the notifiers. The code of the
* notifier for CPU online is protected by the same spinlock,
- * so when we get the lock, the notifer work is done.
+ * so when we get the lock, the notifier work is done.
*/
spin_lock(&pp->lock);
pp->is_stopped = true;
@@ -4794,11 +4847,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
int i;
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- mvneta_statistics[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, mvneta_statistics[i].name);
if (!pp->bm_priv) {
- data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
page_pool_ethtool_stats_get_strings(data);
}
}
@@ -4961,19 +5012,9 @@ static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
return MVNETA_RSS_LU_TABLE_SIZE;
}
-static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 mvneta_ethtool_get_rx_ring_count(struct net_device *dev)
{
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = rxq_number;
- return 0;
- case ETHTOOL_GRXFH:
- return -EOPNOTSUPP;
- default:
- return -EOPNOTSUPP;
- }
+ return rxq_number;
}
static int mvneta_config_rss(struct mvneta_port *pp)
@@ -5100,14 +5141,6 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
-
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
-
- eee->eee_enabled = pp->eee_enabled;
- eee->eee_active = pp->eee_active;
- eee->tx_lpi_enabled = pp->tx_lpi_enabled;
- eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
return phylink_ethtool_get_eee(pp->phylink, eee);
}
@@ -5116,7 +5149,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
/* The Armada 37x documents do not give limits for this other than
* it being an 8-bit register.
@@ -5124,16 +5156,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
- lpi_ctl0 &= ~(0xff << 8);
- lpi_ctl0 |= eee->tx_lpi_timer << 8;
- mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
-
- pp->eee_enabled = eee->eee_enabled;
- pp->tx_lpi_enabled = eee->tx_lpi_enabled;
-
- mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
-
return phylink_ethtool_set_eee(pp->phylink, eee);
}
@@ -5326,13 +5348,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.get_ethtool_stats = mvneta_ethtool_get_stats,
.get_sset_count = mvneta_ethtool_get_sset_count,
.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
- .get_rxnfc = mvneta_ethtool_get_rxnfc,
+ .get_rx_ring_count = mvneta_ethtool_get_rx_ring_count,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
.get_wol = mvneta_ethtool_get_wol,
.set_wol = mvneta_ethtool_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = mvneta_ethtool_get_eee,
.set_eee = mvneta_ethtool_set_eee,
};
@@ -5447,6 +5470,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
+ /* Ensure LPI is disabled */
+ mvneta_mac_disable_tx_lpi(&pp->phylink_config);
+
return 0;
}
@@ -5531,13 +5557,19 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk_bus);
pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- pp->phylink_pcs.neg_mode = true;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
MAC_100 | MAC_1000FD | MAC_2500FD;
+ /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */
+ __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces);
+ pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ pp->phylink_config.lpi_timer_default = 250;
+ pp->phylink_config.eee_enabled_default = true;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
@@ -5882,7 +5914,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match);
static struct platform_driver mvneta_driver = {
.probe = mvneta_probe,
- .remove_new = mvneta_remove,
+ .remove = mvneta_remove,
.driver = {
.name = MVNETA_DRIVER_NAME,
.of_match_table = mvneta_match,
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 3f46a0fed048..6bb380494919 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -485,7 +485,7 @@ MODULE_DEVICE_TABLE(of, mvneta_bm_match);
static struct platform_driver mvneta_bm_driver = {
.probe = mvneta_bm_probe,
- .remove_new = mvneta_bm_remove,
+ .remove = mvneta_bm_remove,
.driver = {
.name = MVNETA_BM_DRIVER_NAME,
.of_match_table = mvneta_bm_match,
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index e47783ce77e0..57ac039df6f7 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -115,7 +115,7 @@ struct mvneta_bm_pool {
/* Packet size */
int pkt_size;
- /* Size of the buffer acces through DMA*/
+ /* Size of the buffer access through DMA */
u32 buf_size;
/* BPPE virtual base address */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index e809f91c08fb..061fcd444d50 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -481,6 +481,11 @@
#define MVPP22_GMAC_INT_SUM_MASK 0xa4
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
+#define MVPP2_GMAC_LPI_CTRL0 0xc0
+#define MVPP2_GMAC_LPI_CTRL0_TS_MASK GENMASK(15, 8)
+#define MVPP2_GMAC_LPI_CTRL1 0xc4
+#define MVPP2_GMAC_LPI_CTRL1_REQ_EN BIT(0)
+#define MVPP2_GMAC_LPI_CTRL1_TW_MASK GENMASK(15, 4)
/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
@@ -1088,7 +1093,7 @@ struct mvpp2 {
unsigned int max_port_rxqs;
/* Workqueue to gather hardware statistics */
- char queue_name[30];
+ char queue_name[31];
struct workqueue_struct *stats_queue;
/* Debugfs root entry */
@@ -1108,6 +1113,9 @@ struct mvpp2 {
/* Spinlocks for CM3 shared memory configuration */
spinlock_t mss_spinlock;
+
+ /* Spinlock for shared PRS parser memory and shadow table */
+ spinlock_t prs_spinlock;
};
struct mvpp2_pcpu_stats {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 40aeaa7bd739..44b201817d94 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
- MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_TAGGED,
0, 0),
};
@@ -1522,29 +1522,19 @@ static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
return 0;
}
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 port_ctx)
{
u32 rss_ctx;
- int ret, i;
+ int ret;
ret = mvpp22_rss_context_create(port, &rss_ctx);
if (ret)
return ret;
- /* Find the first available context number in the port, starting from 1.
- * Context 0 on each port is reserved for the default context.
- */
- for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
- if (port->rss_ctx[i] < 0)
- break;
- }
-
- if (i == MVPP22_N_RSS_TABLES)
+ if (WARN_ON_ONCE(port->rss_ctx[port_ctx] >= 0))
return -EINVAL;
- port->rss_ctx[i] = rss_ctx;
- *port_ctx = i;
-
+ port->rss_ctx[port_ctx] = rss_ctx;
return 0;
}
@@ -1628,7 +1618,8 @@ int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
return 0;
}
-int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port,
+ const struct ethtool_rxfh_fields *info)
{
u16 hash_opts = 0;
u32 flow_type;
@@ -1666,7 +1657,8 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
}
-int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port,
+ struct ethtool_rxfh_fields *info)
{
unsigned long hash_opts;
u32 flow_type;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 663157dc8062..caadf3aea95d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -264,7 +264,7 @@ int mvpp22_port_rss_init(struct mvpp2_port *port);
int mvpp22_port_rss_enable(struct mvpp2_port *port);
int mvpp22_port_rss_disable(struct mvpp2_port *port);
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 rss_ctx);
int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
@@ -272,8 +272,10 @@ int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
u32 *indir);
-int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
-int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port,
+ struct ethtool_rxfh_fields *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port,
+ const struct ethtool_rxfh_fields *info);
void mvpp2_cls_init(struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 23adf53c2aa1..33426fded919 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
{
struct mvpp2_port *port;
- int i;
+ int i, j;
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
if (port->priv->percpu_pools) {
- for (i = 0; i < port->nrxqs; i++)
- mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ for (j = 0; j < port->nrxqs; j++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
port->tx_fc & en);
} else {
mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
@@ -1375,7 +1375,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
}
out_set:
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
dev->wanted_features = dev->features;
netdev_update_features(dev);
@@ -1985,45 +1985,32 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
struct mvpp2_port *port = netdev_priv(netdev);
+ const char *str;
int i, q;
if (sset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
- strscpy(data, mvpp2_ethtool_mib_regs[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
+ ethtool_puts(&data, mvpp2_ethtool_mib_regs[i].string);
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
- strscpy(data, mvpp2_ethtool_port_regs[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
+ ethtool_puts(&data, mvpp2_ethtool_port_regs[i].string);
- for (q = 0; q < port->ntxqs; q++) {
+ for (q = 0; q < port->ntxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
- snprintf(data, ETH_GSTRING_LEN,
- mvpp2_ethtool_txq_regs[i].string, q);
- data += ETH_GSTRING_LEN;
+ str = mvpp2_ethtool_txq_regs[i].string;
+ ethtool_sprintf(&data, str, q);
}
- }
- for (q = 0; q < port->nrxqs; q++) {
+ for (q = 0; q < port->nrxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
- snprintf(data, ETH_GSTRING_LEN,
- mvpp2_ethtool_rxq_regs[i].string,
- q);
- data += ETH_GSTRING_LEN;
+ str = mvpp2_ethtool_rxq_regs[i].string;
+ ethtool_sprintf(&data, str, q);
}
- }
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
- strscpy(data, mvpp2_ethtool_xdp[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++)
+ ethtool_puts(&data, mvpp2_ethtool_xdp[i].string);
}
static void
@@ -3434,12 +3421,13 @@ static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
mvpp2_isr_handle_ptp_queue(port, 1);
}
-static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
+static void mvpp2_isr_handle_link(struct mvpp2_port *port,
+ struct phylink_pcs *pcs, bool link)
{
struct net_device *dev = port->dev;
if (port->phylink) {
- phylink_mac_change(port->phylink, link);
+ phylink_pcs_change(pcs, link);
return;
}
@@ -3472,7 +3460,7 @@ static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
if (val & MVPP22_XLG_INT_STAT_LINK) {
val = readl(port->base + MVPP22_XLG_STATUS);
link = (val & MVPP22_XLG_STATUS_LINK_UP);
- mvpp2_isr_handle_link(port, link);
+ mvpp2_isr_handle_link(port, &port->pcs_xlg, link);
}
}
@@ -3488,7 +3476,7 @@ static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
if (val & MVPP22_GMAC_INT_STAT_LINK) {
val = readl(port->base + MVPP2_GMAC_STATUS0);
link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
- mvpp2_isr_handle_link(port, link);
+ mvpp2_isr_handle_link(port, &port->pcs_gmac, link);
}
}
}
@@ -3927,13 +3915,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ u32 rx_status, timestamp, metasize = 0;
struct mvpp2_bm_pool *bm_pool;
struct page_pool *pp = NULL;
struct sk_buff *skb;
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
struct page *page;
void *data;
@@ -3995,7 +3983,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
xdp_prepare_buff(&xdp, data,
MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
- rx_bytes, false);
+ rx_bytes, true);
ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
@@ -4011,9 +3999,14 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
ps.rx_bytes += rx_bytes;
continue;
}
+
+ metasize = xdp.data - xdp.data_meta;
}
- skb = build_skb(data, frag_size);
+ if (frag_size)
+ skb = build_skb(data, frag_size);
+ else
+ skb = slab_build_skb(data);
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
@@ -4047,6 +4040,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvpp2_rx_csum(port, rx_status);
skb->protocol = eth_type_trans(skb, dev);
@@ -4444,6 +4439,8 @@ out:
txq_pcpu->count += frags;
aggr_txq->count += frags;
+ skb_tx_timestamp(skb);
+
/* Enable transmit */
wmb();
mvpp2_aggr_txq_pend_desc_add(port, frags);
@@ -5178,38 +5175,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = dev->stats.tx_dropped;
}
-static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct mvpp2_port *port = netdev_priv(dev);
void __iomem *ptp;
u32 gcr, int_mask;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
int_mask = gcr = 0;
- if (config.tx_type != HWTSTAMP_TX_OFF) {
+ if (config->tx_type != HWTSTAMP_TX_OFF) {
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
MVPP22_PTP_INT_MASK_QUEUE0;
}
/* It seems we must also release the TX reset when enabling the TSU */
- if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE)
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET;
if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
mvpp22_tai_start(port->priv->tai);
- if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
mvpp2_modify(ptp + MVPP22_PTP_GCR,
MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET |
@@ -5230,45 +5229,39 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
mvpp22_tai_stop(port->priv->tai);
- port->tx_hwtstamp_type = config.tx_type;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ port->tx_hwtstamp_type = config->tx_type;
return 0;
}
-static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
-
- memset(&config, 0, sizeof(config));
+ struct mvpp2_port *port = netdev_priv(dev);
- config.tx_type = port->tx_hwtstamp_type;
- config.rx_filter = port->rx_hwtstamp ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ config->tx_type = port->tx_hwtstamp_type;
+ config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
return 0;
}
static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mvpp2_port *port = netdev_priv(dev);
+ ethtool_op_get_ts_info(dev, info);
if (!port->hwtstamp)
- return -EOPNOTSUPP;
+ return 0;
info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
@@ -5281,18 +5274,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvpp2_port *port = netdev_priv(dev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_set_ts_config(port, ifr);
- break;
-
- case SIOCGHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_get_ts_config(port, ifr);
- break;
- }
-
if (!port->phylink)
return -ENOTSUPP;
@@ -5599,6 +5580,13 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
+static u32 mvpp2_ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return port->nrxqs;
+}
+
static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rules)
{
@@ -5609,12 +5597,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_GRXFH:
- ret = mvpp2_ethtool_rxfh_get(port, info);
- break;
- case ETHTOOL_GRXRINGS:
- info->data = port->nrxqs;
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = port->n_rfs_rules;
break;
@@ -5649,9 +5631,6 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_SRXFH:
- ret = mvpp2_ethtool_rxfh_set(port, info);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = mvpp2_ethtool_cls_rule_ins(port, info);
break;
@@ -5692,38 +5671,125 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
return ret;
}
+static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port,
+ const struct ethtool_rxfh_param *rxfh)
+{
+ if (!mvpp22_rss_is_supported(port))
+ return false;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_CRC32)
+ return false;
+
+ if (rxfh->key)
+ return false;
+
+ return true;
+}
+
+static int mvpp2_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
+ return -EOPNOTSUPP;
+
+ ctx->hfunc = ETH_RSS_HASH_CRC32;
+
+ ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context);
+ if (ret)
+ return ret;
+
+ if (!rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context,
+ ethtool_rxfh_context_indir(ctx));
+ else
+ ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
+ rxfh->indir);
+ return ret;
+}
+
+static int mvpp2_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
+ return -EOPNOTSUPP;
+
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
+ rxfh->indir);
+ return ret;
+}
+
+static int mvpp2_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_port_rss_ctx_delete(port, rss_context);
+}
+
static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
+ return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
+}
+
+static int mvpp2_ethtool_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
struct mvpp2_port *port = netdev_priv(dev);
- u32 *rss_context = &rxfh->rss_context;
- int ret = 0;
if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
- if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
- rxfh->hfunc != ETH_RSS_HASH_CRC32)
+ return mvpp2_ethtool_rxfh_get(port, info);
+}
+
+static int mvpp2_ethtool_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
- if (rxfh->key)
+ return mvpp2_ethtool_rxfh_set(port, info);
+}
+
+static int mvpp2_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
return -EOPNOTSUPP;
- if (*rss_context && rxfh->rss_delete)
- return mvpp22_port_rss_ctx_delete(port, *rss_context);
+ return phylink_ethtool_get_eee(port->phylink, eee);
+}
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = mvpp22_port_rss_ctx_create(port, rss_context);
- if (ret)
- return ret;
- }
+static int mvpp2_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
- if (rxfh->indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
- rxfh->indir);
+ if (!port->phylink)
+ return -EOPNOTSUPP;
- return ret;
+ return phylink_ethtool_set_eee(port->phylink, eee);
}
/* Device ops */
@@ -5742,10 +5808,12 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_set_features = mvpp2_set_features,
.ndo_bpf = mvpp2_xdp,
.ndo_xdp_xmit = mvpp2_xdp_xmit,
+ .ndo_hwtstamp_get = mvpp2_hwtstamp_get,
+ .ndo_hwtstamp_set = mvpp2_hwtstamp_set,
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
- .cap_rss_ctx_supported = true,
+ .rxfh_max_num_contexts = MVPP22_N_RSS_TABLES,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
@@ -5763,11 +5831,19 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_pauseparam = mvpp2_ethtool_set_pause_param,
.get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
.set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rx_ring_count = mvpp2_ethtool_get_rx_ring_count,
.get_rxnfc = mvpp2_ethtool_get_rxnfc,
.set_rxnfc = mvpp2_ethtool_set_rxnfc,
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
+ .get_rxfh_fields = mvpp2_ethtool_get_rxfh_fields,
+ .set_rxfh_fields = mvpp2_ethtool_set_rxfh_fields,
+ .create_rxfh_context = mvpp2_create_rxfh_context,
+ .modify_rxfh_context = mvpp2_modify_rxfh_context,
+ .remove_rxfh_context = mvpp2_remove_rxfh_context,
+ .get_eee = mvpp2_ethtool_get_eee,
+ .set_eee = mvpp2_ethtool_set_eee,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -6153,7 +6229,14 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvpp2_port, pcs_gmac);
}
+static unsigned int mvpp2_xjg_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE;
+}
+
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
@@ -6186,26 +6269,35 @@ static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_inband_caps = mvpp2_xjg_pcs_inband_caps,
.pcs_get_state = mvpp2_xlg_pcs_get_state,
.pcs_config = mvpp2_xlg_pcs_config,
};
-static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* SGMII and RGMII can be configured to use inband signalling of the
+ * AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
@@ -6309,7 +6401,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
- .pcs_validate = mvpp2_gmac_pcs_validate,
+ .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
@@ -6631,6 +6723,55 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
mvpp2_port_disable(port);
}
+static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1,
+ MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0);
+}
+
+static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ u32 ts, tw, lpi1, status;
+
+ status = readl(port->base + MVPP2_GMAC_STATUS0);
+ if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
+ }
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0,
+ MVPP2_GMAC_LPI_CTRL0_TS_MASK,
+ FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts));
+
+ lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ /* Configure tw */
+ lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK);
+
+ /* Enable LPI generation */
+ writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN,
+ port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ return 0;
+}
+
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
@@ -6638,6 +6779,8 @@ static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
+ .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi,
+ .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi,
};
/* Work-around for ACPI */
@@ -6867,9 +7010,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->timer_scheduled = false;
port_pcpu->dev = dev;
}
@@ -6903,11 +7045,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
device_set_node(&dev->dev, port_fwnode);
+ dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_gmac.neg_mode = true;
port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
- port->pcs_xlg.neg_mode = true;
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
@@ -6915,6 +7056,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.mac_capabilities =
MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.lpi_interfaces);
+
+ port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD;
+
+ /* Setup EEE. Choose 250us idle. */
+ port->phylink_config.lpi_timer_default = 250;
+ port->phylink_config.eee_enabled_default = true;
+
if (port->priv->global_tx_fc)
port->phylink_config.mac_capabilities |=
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
@@ -6989,6 +7139,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_port_pcpu;
}
port->phylink = phylink;
+
+ mvpp2_mac_disable_tx_lpi(&port->phylink_config);
} else {
dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
port->phylink = NULL;
@@ -7412,8 +7564,6 @@ static int mvpp2_get_sram(struct platform_device *pdev,
static int mvpp2_probe(struct platform_device *pdev)
{
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- struct fwnode_handle *port_fwnode;
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
@@ -7586,7 +7736,7 @@ static int mvpp2_probe(struct platform_device *pdev)
}
/* Map DTS-active ports. Should be done before FIFO mvpp2_init */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
priv->port_map |= BIT(i);
}
@@ -7594,8 +7744,9 @@ static int mvpp2_probe(struct platform_device *pdev)
if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
priv->hw_version = MVPP23;
- /* Init mss lock */
+ /* Init locks for shared packet processor resources */
spin_lock_init(&priv->mss_spinlock);
+ spin_lock_init(&priv->prs_spinlock);
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
@@ -7609,7 +7760,7 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_axi_clk;
/* Initialize ports */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
err = mvpp2_port_probe(pdev, port_fwnode, priv);
if (err < 0)
goto err_port_probe;
@@ -7648,14 +7799,8 @@ static int mvpp2_probe(struct platform_device *pdev)
return 0;
err_port_probe:
- fwnode_handle_put(port_fwnode);
-
- i = 0;
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i])
- mvpp2_port_remove(priv->port_list[i]);
- i++;
- }
+ for (i = 0; i < priv->port_count; i++)
+ mvpp2_port_remove(priv->port_list[i]);
err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
@@ -7672,18 +7817,13 @@ err_pp_clk:
static void mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
- struct fwnode_handle *port_fwnode;
+ int i, poolnum = MVPP2_BM_POOLS_NUM;
mvpp2_dbgfs_cleanup(priv);
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i]) {
- mutex_destroy(&priv->port_list[i]->gather_stats_lock);
- mvpp2_port_remove(priv->port_list[i]);
- }
- i++;
+ for (i = 0; i < priv->port_count; i++) {
+ mutex_destroy(&priv->port_list[i]->gather_stats_lock);
+ mvpp2_port_remove(priv->port_list[i]);
}
destroy_workqueue(priv->stats_queue);
@@ -7706,7 +7846,7 @@ static void mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
- if (is_acpi_node(port_fwnode))
+ if (!dev_of_node(&pdev->dev))
return;
clk_disable_unprepare(priv->axi_clk);
@@ -7739,7 +7879,7 @@ MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
- .remove_new = mvpp2_remove,
+ .remove = mvpp2_remove,
.driver = {
.name = MVPP2_DRIVER_NAME,
.of_match_table = mvpp2_match,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 9af22f497a40..93e978bdf303 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
{
int i;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
@@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
}
/* Initialize tcam entry from hw */
-int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
- int tid)
+static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv,
+ struct mvpp2_prs_entry *pe, int tid)
{
int i;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
@@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
return 0;
}
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid)
+{
+ int err;
+
+ spin_lock_bh(&priv->prs_spinlock);
+ err = __mvpp2_prs_init_from_hw(priv, pe, tid);
+ spin_unlock_bh(&priv->prs_spinlock);
+
+ return err;
+}
+
/* Invalidate tcam hw entry */
static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
{
@@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
bits = mvpp2_prs_sram_ai_get(&pe);
/* Sram store classification lookup ID in AI bits [5:0] */
@@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
+ __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
}
/* Set port to unicast or multicast promiscuous mode */
-void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
- enum mvpp2_prs_l2_cast l2_cast, bool add)
+static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast,
+ bool add)
{
struct mvpp2_prs_entry pe;
unsigned char cast_match;
unsigned int ri;
int tid;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
cast_match = MVPP2_PRS_UCAST_VAL;
tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
@@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
/* promiscuous mode - Accept unknown unicast or multicast packets */
if (priv->prs_shadow[tid].valid) {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
@@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
mvpp2_prs_hw_write(priv, &pe);
}
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast, bool add)
+{
+ spin_lock_bh(&priv->prs_spinlock);
+ __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add);
+ spin_unlock_bh(&priv->prs_spinlock);
+}
+
/* Set entry for dsa packets */
static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
bool tagged, bool extend)
@@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
if (priv->prs_shadow[tid].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
if (priv->prs_shadow[tid].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
if (!match)
continue;
@@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
ri_bits = mvpp2_prs_sram_ri_get(&pe);
if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
MVPP2_PRS_RI_VLAN_DOUBLE)
@@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Update ports' mask */
mvpp2_prs_tcam_port_map_set(&pe, port_map);
@@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
@@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
ri_bits = mvpp2_prs_sram_ri_get(&pe);
ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
@@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Update ports' mask */
@@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
/* Create dummy entries for drop all and promiscuous modes */
mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
- mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
- mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
+ __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
+ __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
}
/* Set default entries for various types of dsa packets */
@@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
struct mvpp2_prs_entry pe;
int err;
- priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
- MVPP2_PRS_DBL_VLANS_MAX,
- GFP_KERNEL);
- if (!priv->prs_double_vlans)
- return -ENOMEM;
-
/* Double VLAN: 0x88A8, 0x8100 */
err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
MVPP2_PRS_PORT_MASK);
@@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
continue;
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(port->priv, &pe, tid);
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
@@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Scan TCAM and see if entry with this <vid,port> already exist */
tid = mvpp2_prs_vid_range_find(port, vid, mask);
@@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
/* There isn't room for a new VID filter */
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&priv->prs_spinlock);
return tid;
+ }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
pe.index = tid;
@@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Enable the current port */
@@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock);
return 0;
}
@@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
struct mvpp2 *priv = port->priv;
int tid;
- /* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+ spin_lock_bh(&priv->prs_spinlock);
- /* No such entry */
- if (tid < 0)
- return;
+ /* Invalidate TCAM entry with this <vid,port>, if it exists */
+ tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+ if (tid >= 0) {
+ mvpp2_prs_hw_inv(priv, tid);
+ priv->prs_shadow[tid].valid = false;
+ }
- mvpp2_prs_hw_inv(priv, tid);
- priv->prs_shadow[tid].valid = false;
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Remove all existing VID filters on this port */
@@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
int tid;
+ spin_lock_bh(&priv->prs_spinlock);
+
for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
if (priv->prs_shadow[tid].valid) {
@@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
priv->prs_shadow[tid].valid = false;
}
}
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Remove VID filering entry for this port */
@@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
struct mvpp2 *priv = port->priv;
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Invalidate the guard entry */
mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false;
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Add guard entry that drops packets when no VID is matched on this port */
@@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
pe.index = tid;
reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
@@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
/* Update shadow table */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
mvpp2_prs_hw_write(priv, &pe);
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Parser default initialization */
@@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
{
int err, index, i;
+ priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+ sizeof(*priv->prs_shadow),
+ GFP_KERNEL);
+ if (!priv->prs_shadow)
+ return -ENOMEM;
+
+ priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+ MVPP2_PRS_DBL_VLANS_MAX,
+ GFP_KERNEL);
+ if (!priv->prs_double_vlans)
+ return -ENOMEM;
+
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Enable tcam table */
mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
@@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
mvpp2_prs_hw_inv(priv, index);
- priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
- sizeof(*priv->prs_shadow),
- GFP_KERNEL);
- if (!priv->prs_shadow)
- return -ENOMEM;
-
/* Always start from lookup = 0 */
for (index = 0; index < MVPP2_MAX_PORTS; index++)
mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
@@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_prs_vid_init(priv);
err = mvpp2_prs_etype_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_vlan_init(pdev, priv);
- if (err)
- return err;
-
- err = mvpp2_prs_pppoe_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_ip6_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_ip4_init(priv);
- if (err)
- return err;
+ err = err ? : mvpp2_prs_vlan_init(pdev, priv);
+ err = err ? : mvpp2_prs_pppoe_init(priv);
+ err = err ? : mvpp2_prs_ip6_init(priv);
+ err = err ? : mvpp2_prs_ip4_init(priv);
- return 0;
+ spin_unlock_bh(&priv->prs_spinlock);
+ return err;
}
/* Compare MAC DA with tcam entry data */
@@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
(priv->prs_shadow[tid].udf != udf_type))
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
@@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
}
/* Update parser's mac da entry */
-int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port,
+ const u8 *da, bool add)
{
unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
struct mvpp2 *priv = port->priv;
@@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
@@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
return 0;
}
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+{
+ int err;
+
+ spin_lock_bh(&port->priv->prs_spinlock);
+ err = __mvpp2_prs_mac_da_accept(port, da, add);
+ spin_unlock_bh(&port->priv->prs_spinlock);
+
+ return err;
+}
+
int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
unsigned long pmap;
int index, tid;
+ spin_lock_bh(&priv->prs_spinlock);
+
for (tid = MVPP2_PE_MAC_RANGE_START;
tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
@@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
(priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
pmap = mvpp2_prs_tcam_port_map_get(&pe);
@@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
continue;
/* Remove entry from TCAM */
- mvpp2_prs_mac_da_accept(port, da, false);
+ __mvpp2_prs_mac_da_accept(port, da, false);
}
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
{
switch (type) {
case MVPP2_TAG_TYPE_EDSA:
+ spin_lock_bh(&priv->prs_spinlock);
/* Add port to EDSA entries */
mvpp2_prs_dsa_tag_set(priv, port, true,
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
@@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
case MVPP2_TAG_TYPE_DSA:
+ spin_lock_bh(&priv->prs_spinlock);
/* Add port to DSA entries */
mvpp2_prs_dsa_tag_set(priv, port, true,
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
@@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
case MVPP2_TAG_TYPE_MH:
case MVPP2_TAG_TYPE_NONE:
+ spin_lock_bh(&priv->prs_spinlock);
/* Remove port form EDSA and DSA entries */
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
@@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
default:
@@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
tid = mvpp2_prs_tcam_first_free(priv,
MVPP2_PE_LAST_FREE_TID,
MVPP2_PE_FIRST_FREE_TID);
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&priv->prs_spinlock);
return tid;
+ }
pe.index = tid;
@@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock);
return 0;
}
@@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&port->priv->prs_spinlock);
+
tid = mvpp2_prs_flow_find(port->priv, port->id);
/* Such entry not exist */
@@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
tid = mvpp2_prs_tcam_first_free(port->priv,
MVPP2_PE_LAST_FREE_TID,
MVPP2_PE_FIRST_FREE_TID);
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&port->priv->prs_spinlock);
return tid;
+ }
pe.index = tid;
@@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
/* Update shadow table */
mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
} else {
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(port->priv, &pe, tid);
}
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
mvpp2_prs_hw_write(port->priv, &pe);
+ spin_unlock_bh(&port->priv->prs_spinlock);
return 0;
}
@@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index)
if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
return -EINVAL;
+ spin_lock_bh(&priv->prs_spinlock);
+
mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+ spin_unlock_bh(&priv->prs_spinlock);
return val;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index 7d0124b283da..01c6c0a2f283 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -47,7 +47,7 @@ static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
"rx_err_pkts",
};
-#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
+#define OCTEP_GLOBAL_STATS_CNT ARRAY_SIZE(octep_gstrings_global_stats)
static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_packets_posted[Q-%u]",
@@ -56,7 +56,7 @@ static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_busy[Q-%u]",
};
-#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+#define OCTEP_TX_Q_STATS_CNT ARRAY_SIZE(octep_gstrings_tx_q_stats)
static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_packets[Q-%u]",
@@ -64,7 +64,7 @@ static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_alloc_errors[Q-%u]",
};
-#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+#define OCTEP_RX_Q_STATS_CNT ARRAY_SIZE(octep_gstrings_rx_q_stats)
static void octep_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -80,32 +80,25 @@ static void octep_get_strings(struct net_device *netdev,
{
struct octep_device *oct = netdev_priv(netdev);
u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
- char *strings = (char *)data;
+ const char *str;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_gstrings_global_stats[i]);
- strings += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++)
+ ethtool_puts(&data, octep_gstrings_global_stats[i]);
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_gstrings_tx_q_stats[j], i);
- strings += ETH_GSTRING_LEN;
+ str = octep_gstrings_tx_q_stats[j];
+ ethtool_sprintf(&data, str, i);
}
- }
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_gstrings_rx_q_stats[j], i);
- strings += ETH_GSTRING_LEN;
+ str = octep_gstrings_rx_q_stats[j];
+ ethtool_sprintf(&data, str, i);
}
- }
break;
default:
break;
@@ -157,17 +150,14 @@ octep_get_ethtool_stats(struct net_device *netdev,
iface_rx_stats,
iface_tx_stats);
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- tx_busy_errors += iq->stats.tx_busy;
-
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
- rx_alloc_errors += oq->stats.alloc_failures;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_packets;
@@ -205,22 +195,18 @@ octep_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->err_pkts;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
@@ -451,6 +437,15 @@ static int octep_set_link_ksettings(struct net_device *netdev,
return 0;
}
+static void octep_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ channel->max_combined = CFG_GET_PORTS_MAX_IO_RINGS(oct->conf);
+ channel->combined_count = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+}
+
static const struct ethtool_ops octep_ethtool_ops = {
.get_drvinfo = octep_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -459,6 +454,7 @@ static const struct ethtool_ops octep_ethtool_ops = {
.get_ethtool_stats = octep_get_ethtool_stats,
.get_link_ksettings = octep_get_link_ksettings,
.set_link_ksettings = octep_set_link_ksettings,
+ .get_channels = octep_get_channels,
};
void octep_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 7c9faa714a10..bcea3fc26a8c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq)
if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
return 0;
}
@@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
wmb();
/* Ring Doorbell to notify the NIC of new packets */
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
@@ -991,37 +991,24 @@ dma_map_err:
static void octep_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
struct octep_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
int q;
- if (netif_running(netdev))
- octep_ctrl_net_get_if_stats(oct,
- OCTEP_CTRL_NET_INVALID_VFID,
- &oct->iface_rx_stats,
- &oct->iface_tx_stats);
-
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->collisions = oct->iface_tx_stats.xscol;
- stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
}
/**
@@ -1096,7 +1083,7 @@ static int octep_change_mtu(struct net_device *netdev, int new_mtu)
true);
if (!err) {
oct->link_info.mtu = new_mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
@@ -1137,6 +1124,59 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static bool octep_is_vf_valid(struct octep_device *oct, int vf)
+{
+ if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) {
+ netdev_err(oct->netdev, "Invalid VF ID %d\n", vf);
+ return false;
+ }
+
+ return true;
+}
+
+static int octep_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
+ ivi->spoofchk = true;
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ ivi->trusted = false;
+
+ return 0;
+}
+
+static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
+ return -EADDRNOTAVAIL;
+ }
+
+ dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac);
+ ether_addr_copy(oct->vf_info[vf].mac_addr, mac);
+ oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true);
+ if (err)
+ dev_err(&oct->pdev->dev,
+ "Set VF%d MAC address failed via host control Mbox\n",
+ vf);
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1146,6 +1186,8 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
.ndo_set_features = octep_set_features,
+ .ndo_get_vf_config = octep_get_vf_config,
+ .ndo_set_vf_mac = octep_set_vf_mac
};
/**
@@ -1197,7 +1239,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
miss_cnt);
rtnl_lock();
if (netif_running(oct->netdev))
- octep_stop(oct->netdev);
+ dev_close(oct->netdev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index fee59e0e0138..81ac4267811c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -220,6 +220,7 @@ struct octep_iface_link_info {
/* The Octeon VF device specific info data structure.*/
struct octep_pfvf_info {
u8 mac_addr[ETH_ALEN];
+ u32 flags;
u32 mbox_version;
};
@@ -257,11 +258,17 @@ struct octep_device {
/* Pointers to Octeon Tx queues */
struct octep_iq *iq[OCTEP_MAX_IQ];
+ /* Per iq stats */
+ struct octep_iq_stats stats_iq[OCTEP_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_oq *oq[OCTEP_MAX_OQ];
+ /* Per oq stats */
+ struct octep_oq_stats stats_oq[OCTEP_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index 2e2c3be8a0b4..0867fab61b19 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
#include "octep_config.h"
#include "octep_main.h"
@@ -155,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_err(&oct->pdev->dev,
+ "VF%d attempted to override administrative set MAC address\n",
+ vf_id);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+
err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
+
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -170,12 +182,21 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id);
+ ether_addr_copy(rsp->s_set_mac.mac_addr,
+ oct->vf_info[vf_id].mac_addr);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ return;
+ }
err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -185,6 +206,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
{
int err;
+ /* Reset VF-specific information maintained by the PF */
+ memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info));
err = octep_ctrl_net_dev_remove(oct, vf_id);
if (err) {
rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
index 0dc6eead292a..386a095a99bc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -8,8 +8,6 @@
#ifndef _OCTEP_PFVF_MBOX_H_
#define _OCTEP_PFVF_MBOX_H_
-/* VF flags */
-#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
#define OCTEON_SDP_16K_HW_FRS 16380UL
#define OCTEON_SDP_64K_HW_FRS 65531UL
@@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version {
#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+/* VF flags */
+/* PF has set VF MAC address */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0)
+
enum octep_pfvf_mbox_opcode {
OCTEP_PFVF_MBOX_CMD_VERSION,
OCTEP_PFVF_MBOX_CMD_SET_MTU,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 4746a6b258f0..82b6b19e76b4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -337,6 +338,51 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
}
/**
+ * octep_oq_next_pkt() - Move to the next packet in Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @buff_info: Current packet buffer info.
+ * @read_idx: Current packet index in the ring.
+ * @desc_used: Current packet descriptor number.
+ *
+ * Free the resources associated with a packet.
+ * Increment packet index in the ring and packet descriptor number.
+ */
+static void octep_oq_next_pkt(struct octep_oq *oq,
+ struct octep_rx_buffer *buff_info,
+ u32 *read_idx, u32 *desc_used)
+{
+ dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info->page = NULL;
+ (*read_idx)++;
+ (*desc_used)++;
+ if (*read_idx == oq->max_count)
+ *read_idx = 0;
+}
+
+/**
+ * octep_oq_drop_rx() - Free the resources associated with a packet.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @buff_info: Current packet buffer info.
+ * @read_idx: Current packet index in the ring.
+ * @desc_used: Current packet descriptor number.
+ *
+ */
+static void octep_oq_drop_rx(struct octep_oq *oq,
+ struct octep_rx_buffer *buff_info,
+ u32 *read_idx, u32 *desc_used)
+{
+ int data_len = buff_info->len - oq->max_single_buffer_size;
+
+ while (data_len > 0) {
+ octep_oq_next_pkt(oq, buff_info, read_idx, desc_used);
+ data_len -= oq->buffer_size;
+ };
+}
+
+/**
* __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
*
* @oct: Octeon device private data structure.
@@ -367,10 +413,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
desc_used = 0;
for (pkt = 0; pkt < pkts_to_process; pkt++) {
buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
- dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
- PAGE_SIZE, DMA_FROM_DEVICE);
resp_hw = page_address(buff_info->page);
- buff_info->page = NULL;
/* Swap the length field that is in Big-Endian to CPU */
buff_info->len = be64_to_cpu(resp_hw->length);
@@ -394,36 +437,33 @@ static int __octep_oq_process_rx(struct octep_device *oct,
data_offset = OCTEP_OQ_RESP_HW_SIZE;
rx_ol_flags = 0;
}
+
+ octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
+
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ if (!skb) {
+ octep_oq_drop_rx(oq, buff_info,
+ &read_idx, &desc_used);
+ oq->stats->alloc_failures++;
+ continue;
+ }
+ skb_reserve(skb, data_offset);
+
rx_bytes += buff_info->len;
if (buff_info->len <= oq->max_single_buffer_size) {
- skb = build_skb((void *)resp_hw, PAGE_SIZE);
- skb_reserve(skb, data_offset);
skb_put(skb, buff_info->len);
- read_idx++;
- desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
} else {
struct skb_shared_info *shinfo;
u16 data_len;
- skb = build_skb((void *)resp_hw, PAGE_SIZE);
- skb_reserve(skb, data_offset);
/* Head fragment includes response header(s);
* subsequent fragments contains only data.
*/
skb_put(skb, oq->max_single_buffer_size);
- read_idx++;
- desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
-
shinfo = skb_shinfo(skb);
data_len = buff_info->len - oq->max_single_buffer_size;
while (data_len) {
- dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
- PAGE_SIZE, DMA_FROM_DEVICE);
buff_info = (struct octep_rx_buffer *)
&oq->buff_info[read_idx];
if (data_len < oq->buffer_size) {
@@ -438,11 +478,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
buff_info->page, 0,
buff_info->len,
buff_info->len);
- buff_info->page = NULL;
- read_idx++;
- desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
+
+ octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
}
}
@@ -458,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 3b08e2d560dc..b4696c93d0e6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -186,8 +186,8 @@ struct octep_oq {
*/
u8 __iomem *pkts_sent_reg;
- /* Statistics for this OQ. */
- struct octep_oq_stats stats;
+ /* Pointer to statistics for this OQ. */
+ struct octep_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index 06851b78aa28..08ee90013fef 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
@@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 875a2c34091f..58fb39dda977 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -170,8 +170,8 @@ struct octep_iq {
*/
u16 flush_index;
- /* Statistics for this input queue. */
- struct octep_iq_stats stats;
+ /* Pointer to statistics for this input queue. */
+ struct octep_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
index a1979b45e355..241a7e7c7ad2 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -25,7 +25,7 @@ static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
"rx_dropped_bytes_fifo_full",
};
-#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
+#define OCTEP_VF_GLOBAL_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_global_stats)
static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_packets_posted[Q-%u]",
@@ -34,7 +34,7 @@ static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_busy[Q-%u]",
};
-#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+#define OCTEP_VF_TX_Q_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_tx_q_stats)
static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_packets[Q-%u]",
@@ -42,7 +42,7 @@ static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_alloc_errors[Q-%u]",
};
-#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+#define OCTEP_VF_RX_Q_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_rx_q_stats)
static void octep_vf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -58,32 +58,25 @@ static void octep_vf_get_strings(struct net_device *netdev,
{
struct octep_vf_device *oct = netdev_priv(netdev);
u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
- char *strings = (char *)data;
+ const char *str;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_vf_gstrings_global_stats[i]);
- strings += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++)
+ ethtool_puts(&data, octep_vf_gstrings_global_stats[i]);
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_vf_gstrings_tx_q_stats[j], i);
- strings += ETH_GSTRING_LEN;
+ str = octep_vf_gstrings_tx_q_stats[j];
+ ethtool_sprintf(&data, str, i);
}
- }
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_vf_gstrings_rx_q_stats[j], i);
- strings += ETH_GSTRING_LEN;
+ str = octep_vf_gstrings_rx_q_stats[j];
+ ethtool_sprintf(&data, str, i);
}
- }
break;
default:
break;
@@ -121,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
iface_tx_stats = &oct->iface_tx_stats;
iface_rx_stats = &oct->iface_rx_stats;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_busy_errors += iq->stats.tx_busy;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_alloc_errors;
@@ -141,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->dropped_octets_fifo_full;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
@@ -258,6 +244,15 @@ static int octep_vf_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static void octep_vf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct octep_vf_device *oct = netdev_priv(dev);
+
+ channel->max_combined = CFG_GET_PORTS_MAX_IO_RINGS(oct->conf);
+ channel->combined_count = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+}
+
static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_drvinfo = octep_vf_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -265,6 +260,7 @@ static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_sset_count = octep_vf_get_sset_count,
.get_ethtool_stats = octep_vf_get_ethtool_stats,
.get_link_ksettings = octep_vf_get_link_ksettings,
+ .get_channels = octep_vf_get_channels,
};
void octep_vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index dd49d0b8b494..420c3f4cf741 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -18,8 +18,6 @@
#include "octep_vf_config.h"
#include "octep_vf_main.h"
-struct workqueue_struct *octep_vf_wq;
-
/* Supported Devices */
static const struct pci_device_id octep_vf_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
@@ -574,7 +572,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
* caused queues to get re-enabled after
* being stopped
*/
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
fallthrough;
case 1: /* Queue left enabled, since IQ is not yet full*/
return 0;
@@ -731,7 +729,7 @@ ring_dbell:
/* Flush the hw descriptors before writing to doorbell */
smp_wmb();
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
}
@@ -786,27 +784,16 @@ static void octep_vf_get_stats64(struct net_device *netdev,
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- if (!octep_vf_get_if_stats(oct)) {
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
- oct->iface_rx_stats.err_pkts;
- stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
- stats->tx_dropped = oct->iface_tx_stats.dropped;
- }
}
/**
@@ -846,7 +833,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_hold(netdev, NULL, GFP_ATOMIC);
- schedule_work(&oct->tx_timeout_task);
+ if (!schedule_work(&oct->tx_timeout_task))
+ netdev_put(netdev, NULL);
+
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)
@@ -881,7 +870,7 @@ static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
err = octep_vf_mbox_set_mtu(oct, new_mtu);
if (!err) {
oct->link_info.mtu = new_mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index 5769f62545cd..b9f13506f462 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -246,11 +246,17 @@ struct octep_vf_device {
/* Pointers to Octeon Tx queues */
struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+ /* Per iq stats */
+ struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+ /* Per oq stats */
+ struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
@@ -314,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
-extern struct workqueue_struct *octep_vf_wq;
-
int octep_vf_device_setup(struct octep_vf_device *oct);
int octep_vf_setup_iqs(struct octep_vf_device *oct);
void octep_vf_free_iqs(struct octep_vf_device *oct);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
index 2eab21e43048..445b626efe11 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
index 82821bc28634..d70c8be3cfc4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
index fe46838b5200..9e296b7d7e34 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -187,7 +187,7 @@ struct octep_vf_oq {
u8 __iomem *pkts_sent_reg;
/* Statistics for this OQ. */
- struct octep_vf_oq_stats stats;
+ struct octep_vf_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
index 47a5c054fdb6..8180e5ce3d7e 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
@@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
index f338b975103c..1cede90e3a5f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -129,7 +129,7 @@ struct octep_vf_iq {
u16 flush_index;
/* Statistics for this input queue. */
- struct octep_vf_iq_stats stats;
+ struct octep_vf_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_vf_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index a32d85d6f599..35c4f5f64f58 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -46,3 +46,11 @@ config OCTEONTX2_VF
depends on OCTEONTX2_PF
help
This driver supports Marvell's OcteonTX2 NIC virtual function.
+
+config RVU_ESWITCH
+ tristate "Marvell RVU E-Switch support"
+ depends on OCTEONTX2_PF
+ default m
+ help
+ This driver supports Marvell's RVU E-Switch that
+ provides internal SRIOV packet steering and switching.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 3cf4c8285c90..244de500963e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,6 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \
+ rvu_rep.o cn20k/mbox_init.o cn20k/nix.o cn20k/debugfs.o \
+ cn20k/npa.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index b86f3224f0b7..42044cd810b1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -21,8 +21,9 @@
#include "rvu.h"
#include "lmac_common.h"
-#define DRV_NAME "Marvell-CGX/RPM"
-#define DRV_STRING "Marvell CGX/RPM Driver"
+#define DRV_NAME "Marvell-CGX-RPM"
+
+#define CGX_RX_STAT_GLOBAL_INDEX 9
static LIST_HEAD(cgx_list);
@@ -64,8 +65,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) },
{ 0, } /* end of table */
};
@@ -110,6 +121,11 @@ struct mac_ops *get_mac_ops(void *cgxd)
return ((struct cgx *)cgxd)->mac_ops;
}
+u32 cgx_get_fifo_len(void *cgxd)
+{
+ return ((struct cgx *)cgxd)->fifo_len;
+}
+
void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
{
writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
@@ -207,6 +223,24 @@ u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
}
+static u8 cgx_get_nix_resetbit(struct cgx *cgx)
+{
+ int first_lmac;
+ u8 p2x;
+
+ /* non 98XX silicons supports only NIX0 block */
+ if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX)
+ return CGX_NIX0_RESET;
+
+ first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+ p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac);
+
+ if (p2x == CMR_P2X_SEL_NIX1)
+ return CGX_NIX1_RESET;
+ else
+ return CGX_NIX0_RESET;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -499,7 +533,7 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
u8 num_lmacs;
u32 fifo_len;
- fifo_len = cgx->mac_ops->fifo_len;
+ fifo_len = cgx->fifo_len;
num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
switch (num_lmacs) {
@@ -682,6 +716,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
@@ -701,6 +740,30 @@ u64 cgx_features_get(void *cgxd)
return ((struct cgx *)cgxd)->hw_features;
}
+int cgx_stats_reset(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ int stat_id;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) {
+ if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX)
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ else
+ cgx_write(cgx, lmac_id,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ }
+
+ for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0);
+
+ return 0;
+}
+
static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
{
if (!linfo->fec)
@@ -1118,17 +1181,25 @@ static int cgx_link_usertable_index_map(int speed)
static void set_mod_args(struct cgx_set_link_mode_args *args,
u32 speed, u8 duplex, u8 autoneg, u64 mode)
{
- /* Fill default values incase of user did not pass
- * valid parameters
+ int mode_baseidx;
+ u8 cgx_mode;
+
+ if (args->multimode) {
+ args->mode |= mode;
+ return;
+ }
+
+ /* Derive mode_base_idx and mode fields based
+ * on cgx_mode value
*/
- if (args->duplex == DUPLEX_UNKNOWN)
- args->duplex = duplex;
- if (args->speed == SPEED_UNKNOWN)
- args->speed = speed;
- if (args->an == AUTONEG_UNKNOWN)
- args->an = autoneg;
+ cgx_mode = find_first_bit((unsigned long *)&mode,
+ CGX_MODE_MAX);
args->mode = mode;
- args->ports = 0;
+ mode_baseidx = cgx_mode - 41;
+ if (mode_baseidx > 0) {
+ args->mode_baseidx = 1;
+ args->mode = BIT_ULL(mode_baseidx);
+ }
}
static void otx2_map_ethtool_link_modes(u64 bitmask,
@@ -1136,16 +1207,16 @@ static void otx2_map_ethtool_link_modes(u64 bitmask,
{
switch (bitmask) {
case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
- set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT));
break;
case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
- set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT));
break;
case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
- set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT));
break;
case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
- set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT));
break;
case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
@@ -1417,25 +1488,36 @@ int cgx_get_fwdata_base(u64 *base)
}
int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ struct cgx_lmac_fwdata_s *linkmodes,
int cgx_id, int lmac_id)
{
struct cgx *cgx = cgxd;
u64 req = 0, resp;
+ u8 bit;
if (!cgx)
return -ENODEV;
- if (args.mode)
- otx2_map_ethtool_link_modes(args.mode, &args);
- if (!args.speed && args.duplex && !args.an)
- return -EINVAL;
+ for_each_set_bit(bit, args.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)
+ otx2_map_ethtool_link_modes(bit, &args);
+
+ if (args.multimode) {
+ if (linkmodes->advertised_link_modes_own != CGX_CMD_OWN_NS)
+ return -EBUSY;
+
+ linkmodes->advertised_link_modes = args.mode;
+ /* Update ownership */
+ linkmodes->advertised_link_modes_own = CGX_CMD_OWN_FIRMWARE;
+ args.mode = GENMASK_ULL(41, 0);
+ }
req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
req = FIELD_SET(CMDMODECHANGE_SPEED,
cgx_link_usertable_index_map(args.speed), req);
req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
- req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
+ req = FIELD_SET(CMDMODECHANGE_MODE_BASEIDX, args.mode_baseidx, req);
req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
@@ -1621,9 +1703,11 @@ unsigned long cgx_get_lmac_bmap(void *cgxd)
static int cgx_lmac_init(struct cgx *cgx)
{
+ u8 max_dmac_filters;
struct lmac *lmac;
+ int err, filter;
+ unsigned int i;
u64 lmac_list;
- int i, err;
/* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled
@@ -1649,7 +1733,7 @@ static int cgx_lmac_init(struct cgx *cgx)
err = -ENOMEM;
goto err_lmac_free;
}
- sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ sprintf(lmac->name, "cgx_fwi_%u_%u", cgx->cgx_id, i);
if (cgx->mac_ops->non_contiguous_serdes_lane) {
lmac->lmac_id = __ffs64(lmac_list);
lmac_list &= ~BIT_ULL(lmac->lmac_id);
@@ -1662,6 +1746,8 @@ static int cgx_lmac_init(struct cgx *cgx)
cgx->mac_ops->dmac_filter_count /
cgx->lmac_count;
+ max_dmac_filters = lmac->mac_to_index_bmap.max;
+
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
goto err_name_free;
@@ -1691,8 +1777,19 @@ static int cgx_lmac_init(struct cgx *cgx)
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
+
+ /* Disable stale DMAC filters for sane state */
+ for (filter = 0; filter < max_dmac_filters; filter++)
+ cgx_lmac_addr_del(cgx->cgx_id, lmac->lmac_id, filter);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx->cgx_id, lmac->lmac_id);
}
+ /* Start X2P reset on given MAC block */
+ cgx->mac_ops->mac_x2p_reset(cgx, true);
return cgx_lmac_verify_fwi_version(cgx);
err_bitmap_free:
@@ -1738,7 +1835,7 @@ static void cgx_populate_features(struct cgx *cgx)
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
- cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+ cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
if (is_dev_rpm(cgx))
@@ -1758,6 +1855,45 @@ static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
return 0x60;
}
+static void cgx_x2p_reset(void *cgxd, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ int lmac_id;
+ u64 cfg;
+
+ if (enable) {
+ for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac)
+ cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false);
+
+ usleep_range(1000, 2000);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
+ cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP;
+ cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
+ } else {
+ cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
+ cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP);
+ cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
+ }
+}
+
+static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ if (enable)
+ cfg |= DATA_PKT_RX_EN;
+ else
+ cfg &= ~DATA_PKT_RX_EN;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return 0;
+}
+
static struct mac_ops cgx_mac_ops = {
.name = "cgx",
.csr_offset = 0,
@@ -1788,6 +1924,9 @@ static struct mac_ops cgx_mac_ops = {
.pfc_config = cgx_lmac_pfc_config,
.mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
.mac_reset = cgx_lmac_reset,
+ .mac_stats_reset = cgx_stats_reset,
+ .mac_x2p_reset = cgx_x2p_reset,
+ .mac_enadis_rx = cgx_enadis_rx,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1824,6 +1963,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device;
}
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
/* MAP configuration registers */
cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
if (!cgx->reg_base) {
@@ -1832,6 +1977,14 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ if (!is_cn20k(pdev) &&
+ !is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
+ dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n",
+ cgx->cgx_id);
+ err = -ENODEV;
+ goto err_release_regions;
+ }
+
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
if (!cgx->lmac_count) {
dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
@@ -1841,7 +1994,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvec = pci_msix_vec_count(cgx->pdev);
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
- if (err < 0 || err != nvec) {
+ if (err < 0) {
dev_err(dev, "Request for %d msix vectors failed, err %d\n",
nvec, err);
goto err_release_regions;
@@ -1852,7 +2005,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* init wq for processing linkup requests */
INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
- cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", WQ_PERCPU, 0);
if (!cgx->cgx_cmd_workq) {
dev_err(dev, "alloc workqueue failed for cgx cmd");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 6f7d1dee5830..92ccf343dfe0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -32,6 +32,10 @@
#define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040
#define FW_CGX_INT BIT_ULL(1)
+#define CGXX_CMR_GLOBAL_CONFIG 0x08
+#define CGX_NIX0_RESET BIT_ULL(2)
+#define CGX_NIX1_RESET BIT_ULL(3)
+#define CGX_NSCI_DROP BIT_ULL(9)
#define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
@@ -141,6 +145,7 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_stats_reset(void *cgxd, int lmac_id);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
@@ -156,16 +161,13 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_fwdata_base(u64 *base);
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause);
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ struct cgx_lmac_fwdata_s *linkmodes,
int cgx_id, int lmac_id);
u64 cgx_features_get(void *cgxd);
struct mac_ops *get_mac_ops(void *cgxd);
@@ -184,4 +186,5 @@ int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
int pfvf_idx);
int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr);
+u32 cgx_get_fifo_len(void *cgxd);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index d4a27c882a5b..39352d451cc3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -95,7 +95,31 @@ enum CGX_MODE_ {
CGX_MODE_100G_C2M,
CGX_MODE_100G_CR4,
CGX_MODE_100G_KR4,
- CGX_MODE_MAX /* = 29 */
+ CGX_MODE_LAUI_2_C2C_BIT,
+ CGX_MODE_LAUI_2_C2M_BIT,
+ CGX_MODE_50GBASE_CR2_C_BIT,
+ CGX_MODE_50GBASE_KR2_C_BIT, /* = 30 */
+ CGX_MODE_100GAUI_2_C2C_BIT,
+ CGX_MODE_100GAUI_2_C2M_BIT,
+ CGX_MODE_100GBASE_CR2_BIT,
+ CGX_MODE_100GBASE_KR2_BIT,
+ CGX_MODE_SFI_1G_BIT,
+ CGX_MODE_25GBASE_CR_C_BIT,
+ CGX_MODE_25GBASE_KR_C_BIT,
+ CGX_MODE_SGMII_10M_BIT,
+ CGX_MODE_SGMII_100M_BIT, /* = 39 */
+ CGX_MODE_2500_BASEX_BIT = 42, /* Mode group 1 */
+ CGX_MODE_5000_BASEX_BIT,
+ CGX_MODE_O_USGMII_BIT,
+ CGX_MODE_Q_USGMII_BIT,
+ CGX_MODE_2_5G_USXGMII_BIT,
+ CGX_MODE_5G_USXGMII_BIT,
+ CGX_MODE_10G_SXGMII_BIT,
+ CGX_MODE_10G_DXGMII_BIT,
+ CGX_MODE_10G_QXGMII_BIT,
+ CGX_MODE_TP_BIT,
+ CGX_MODE_FIBER_BIT,
+ CGX_MODE_MAX /* = 53 */
};
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
@@ -258,7 +282,12 @@ struct cgx_lnk_sts {
#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
-#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
+/* this field categorize the mode ID(FLAGS) range to accommodate
+ * more modes.
+ * To specify mode ID range of 0 - 41, this field will be 0.
+ * To specify mode ID range of 42 - 83, this field will be 1.
+ */
+#define CMDMODECHANGE_MODE_BASEIDX GENMASK_ULL(21, 20)
#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
/* LINK_BRING_UP command timeout */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h
new file mode 100644
index 000000000000..4285b5d6a6a2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef CN20K_API_H
+#define CN20K_API_H
+
+#include "../rvu.h"
+
+struct ng_rvu {
+ struct mbox_ops *rvu_mbox_ops;
+ struct qmem *pf_mbox_addr;
+ struct qmem *vf_mbox_addr;
+};
+
+/* Mbox related APIs */
+int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int num);
+int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap);
+void cn20k_free_mbox_memory(struct rvu *rvu);
+int cn20k_register_afpf_mbox_intr(struct rvu *rvu);
+int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start);
+void cn20k_rvu_enable_mbox_intr(struct rvu *rvu);
+void cn20k_rvu_unregister_interrupts(struct rvu *rvu);
+int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs);
+void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs);
+void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs);
+#endif /* CN20K_API_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c
new file mode 100644
index 000000000000..498968bf4cf5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "struct.h"
+#include "debugfs.h"
+
+void print_nix_cn20k_sq_ctx(struct seq_file *m,
+ struct nix_cn20k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W1: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->lmt_dis, sq_ctx->mnq_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W13: aged_drop_octs \t\t\t%llu\n\n",
+ (u64)sq_ctx->aged_drop_octs);
+ seq_printf(m, "W13: aged_drop_pkts \t\t\t%llu\n\n",
+ (u64)sq_ctx->aged_drop_pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+void print_nix_cn20k_cq_ctx(struct seq_file *m,
+ struct nix_cn20k_aq_enq_rsp *rsp)
+{
+ struct nix_cn20k_cq_ctx_s *cq_ctx = &rsp->cq;
+
+ seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
+
+ seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
+ seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
+ cq_ctx->avg_con, cq_ctx->cint_idx);
+ seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
+ cq_ctx->cq_err, cq_ctx->qint_idx);
+ seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
+ cq_ctx->bpid, cq_ctx->bp_ena);
+
+ seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
+ seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
+ seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
+ seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
+ cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
+ cq_ctx->lbpid_low);
+ seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
+
+ seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
+ cq_ctx->update_time, cq_ctx->avg_level);
+ seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
+ cq_ctx->head, cq_ctx->tail);
+
+ seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
+ cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
+ seq_printf(m, "W3: qsize \t\t\t%d\nW3:stashing \t\t\t%d\n",
+ cq_ctx->qsize, cq_ctx->stashing);
+
+ seq_printf(m, "W3: caching \t\t\t%d\n", cq_ctx->caching);
+ seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
+ seq_printf(m, "W3: stash_thresh \t\t\t%d\n",
+ cq_ctx->stash_thresh);
+
+ seq_printf(m, "W3: msh_valid \t\t\t%d\nW3:msh_dst \t\t\t%d\n",
+ cq_ctx->msh_valid, cq_ctx->msh_dst);
+
+ seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
+ cq_ctx->cpt_drop_err_en);
+ seq_printf(m, "W3: ena \t\t\t%d\n",
+ cq_ctx->ena);
+ seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
+ cq_ctx->drop_ena, cq_ctx->drop);
+ seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
+
+ seq_printf(m, "W4: lbpid_ext \t\t\t\t%d\n\n", cq_ctx->lbpid_ext);
+ seq_printf(m, "W4: bpid_ext \t\t\t\t%d\n\n", cq_ctx->bpid_ext);
+}
+
+void print_npa_cn20k_aura_ctx(struct seq_file *m,
+ struct npa_cn20k_aq_enq_rsp *rsp)
+{
+ struct npa_cn20k_aura_s *aura = &rsp->aura;
+
+ seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
+
+ seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
+ aura->ena, aura->pool_caching);
+ seq_printf(m, "W1: avg con\t\t%d\n", aura->avg_con);
+ seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
+ aura->pool_drop_ena, aura->aura_drop_ena);
+ seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
+ aura->bp_ena, aura->aura_drop);
+ seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
+ aura->shift, aura->avg_level);
+
+ seq_printf(m, "W2: count\t\t%llu\nW2: nix_bpid\t\t%d\n",
+ (u64)aura->count, aura->bpid);
+
+ seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+ (u64)aura->limit, aura->bp, aura->fc_ena);
+
+ seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
+ aura->fc_up_crossing, aura->fc_stype);
+ seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
+
+ seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
+
+ seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
+ aura->pool_drop, aura->update_time);
+ seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
+ aura->err_int, aura->err_int_ena);
+ seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
+ aura->thresh_int, aura->thresh_int_ena);
+ seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
+ aura->thresh_up, aura->thresh_qint_idx);
+ seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
+
+ seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
+}
+
+void print_npa_cn20k_pool_ctx(struct seq_file *m,
+ struct npa_cn20k_aq_enq_rsp *rsp)
+{
+ struct npa_cn20k_pool_s *pool = &rsp->pool;
+
+ seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
+
+ seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
+ pool->ena, pool->nat_align);
+ seq_printf(m, "W1: stack_caching\t%d\n",
+ pool->stack_caching);
+ seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
+ pool->buf_offset, pool->buf_size);
+
+ seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
+ pool->stack_max_pages, pool->stack_pages);
+
+ seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
+ pool->stack_offset, pool->shift, pool->avg_level);
+ seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
+ pool->avg_con, pool->fc_ena, pool->fc_stype);
+ seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
+ pool->fc_hyst_bits, pool->fc_up_crossing);
+ seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
+
+ seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
+
+ seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
+
+ seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
+
+ seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
+ pool->err_int, pool->err_int_ena);
+ seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
+ seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
+ pool->thresh_int_ena, pool->thresh_up);
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
+ pool->thresh_qint_idx, pool->err_qint_idx);
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h
new file mode 100644
index 000000000000..a2e3a2cd6edb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef DEBUFS_H
+#define DEBUFS_H
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "struct.h"
+#include "../mbox.h"
+
+void print_nix_cn20k_sq_ctx(struct seq_file *m,
+ struct nix_cn20k_sq_ctx_s *sq_ctx);
+void print_nix_cn20k_cq_ctx(struct seq_file *m,
+ struct nix_cn20k_aq_enq_rsp *rsp);
+void print_npa_cn20k_aura_ctx(struct seq_file *m,
+ struct npa_cn20k_aq_enq_rsp *rsp);
+void print_npa_cn20k_pool_ctx(struct seq_file *m,
+ struct npa_cn20k_aq_enq_rsp *rsp);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c
new file mode 100644
index 000000000000..bd3aab7770dd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "rvu_trace.h"
+#include "mbox.h"
+#include "reg.h"
+#include "api.h"
+
+static irqreturn_t cn20k_afvf_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_irq_data *rvu_irq_data = rvu_irq;
+ struct rvu *rvu = rvu_irq_data->rvu;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ /* Clear interrupts */
+ intr = rvupf_read64(rvu, rvu_irq_data->intr_status);
+ rvupf_write64(rvu, rvu_irq_data->intr_status, intr);
+
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
+
+ rvu_irq_data->afvf_queue_work_hdlr(&rvu->afvf_wq_info, rvu_irq_data->start,
+ rvu_irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start)
+{
+ struct rvu_irq_data *irq_data;
+ int intr_vec, offset, vec = 0;
+ int err;
+
+ /* irq data for 4 VFPF intr vectors */
+ irq_data = devm_kcalloc(rvu->dev, 4,
+ sizeof(struct rvu_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1;
+ intr_vec++, vec++) {
+ switch (intr_vec) {
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
+ irq_data[vec].intr_status = RVU_MBOX_PF_VFPF1_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 64;
+ break;
+ }
+ irq_data[vec].afvf_queue_work_hdlr =
+ rvu_queue_work;
+ offset = pf_vec_start + intr_vec;
+ irq_data[vec].vec_num = offset;
+ irq_data[vec].rvu = rvu;
+
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAF VFAF%d Mbox%d",
+ vec / 2, vec % 2);
+ err = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ &irq_data[vec]);
+ if (err) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for AFVF mbox irq\n");
+ return err;
+ }
+ rvu->irq_allocated[offset] = true;
+ }
+
+ return 0;
+}
+
+/* CN20K mbox PFx => AF irq handler */
+static irqreturn_t cn20k_mbox_pf_common_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_irq_data *rvu_irq_data = rvu_irq;
+ struct rvu *rvu = rvu_irq_data->rvu;
+ u64 intr;
+
+ /* Clear interrupts */
+ intr = rvu_read64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status);
+ rvu_write64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status, intr);
+
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ rvu_irq_data->rvu_queue_work_hdlr(&rvu->afpf_wq_info,
+ rvu_irq_data->start,
+ rvu_irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+void cn20k_rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF_INT(0), INTR_MASK(hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF_INT(1), INTR_MASK(hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF1_INT(0), INTR_MASK(hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF1_INT(1), INTR_MASK(hw->total_pfs - 64));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(0),
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(1),
+ INTR_MASK(hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(0),
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(1),
+ INTR_MASK(hw->total_pfs - 64));
+}
+
+void cn20k_rvu_unregister_interrupts(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(0),
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(1),
+ INTR_MASK(rvu->hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(0),
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(1),
+ INTR_MASK(rvu->hw->total_pfs - 64));
+}
+
+int cn20k_register_afpf_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_irq_data *irq_data;
+ int intr_vec, ret, vec = 0;
+
+ /* irq data for 4 PF intr vectors */
+ irq_data = devm_kcalloc(rvu->dev, 4,
+ sizeof(struct rvu_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_AF_CN20K_INT_VEC_PFAF_MBOX0; intr_vec <=
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1; intr_vec++,
+ vec++) {
+ switch (intr_vec) {
+ case RVU_AF_CN20K_INT_VEC_PFAF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF_INT(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF_INT(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF1_INT(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF1_INT(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ }
+ irq_data[vec].rvu_queue_work_hdlr = rvu_queue_work;
+ irq_data[vec].vec_num = intr_vec;
+ irq_data[vec].rvu = rvu;
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[intr_vec * NAME_SIZE],
+ "RVUAF PFAF%d Mbox%d",
+ vec / 2, vec % 2);
+ ret = request_irq(pci_irq_vector(rvu->pdev, intr_vec),
+ rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
+ &rvu->irq_name[intr_vec * NAME_SIZE],
+ &irq_data[vec]);
+ if (ret)
+ return ret;
+
+ rvu->irq_allocated[intr_vec] = true;
+ }
+
+ return 0;
+}
+
+int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap)
+{
+ int region;
+ u64 bar;
+
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ bar = (u64)phys_to_virt((u64)rvu->ng_rvu->vf_mbox_addr->base);
+ bar += region * MBOX_SIZE;
+ mbox_addr[region] = (void *)bar;
+
+ if (!mbox_addr[region])
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ bar = (u64)phys_to_virt((u64)rvu->ng_rvu->pf_mbox_addr->base);
+ bar += region * MBOX_SIZE;
+
+ mbox_addr[region] = (void *)bar;
+
+ if (!mbox_addr[region])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int rvu_alloc_mbox_memory(struct rvu *rvu, int type,
+ int ndevs, int mbox_size)
+{
+ struct qmem *mbox_addr;
+ dma_addr_t iova;
+ int pf, err;
+
+ /* Allocate contiguous memory for mailbox communication.
+ * eg: AF <=> PFx mbox memory
+ * This allocated memory is split into chunks of MBOX_SIZE
+ * and setup into each of the RVU PFs. In HW this memory will
+ * get aliased to an offset within BAR2 of those PFs.
+ *
+ * AF will access mbox memory using direct physical addresses
+ * and PFs will access the same shared memory from BAR2.
+ *
+ * PF <=> VF mbox memory also works in the same fashion.
+ * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs
+ */
+
+ err = qmem_alloc(rvu->dev, &mbox_addr, ndevs, mbox_size);
+ if (err)
+ return -ENOMEM;
+
+ switch (type) {
+ case TYPE_AFPF:
+ rvu->ng_rvu->pf_mbox_addr = mbox_addr;
+ iova = (u64)mbox_addr->iova;
+ for (pf = 0; pf < ndevs; pf++) {
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFX_ADDR(pf),
+ (u64)iova);
+ iova += mbox_size;
+ }
+ break;
+ case TYPE_AFVF:
+ rvu->ng_rvu->vf_mbox_addr = mbox_addr;
+ rvupf_write64(rvu, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct mbox_ops cn20k_mbox_ops = {
+ .pf_intr_handler = cn20k_mbox_pf_common_intr_handler,
+ .afvf_intr_handler = cn20k_afvf_mbox_intr_handler,
+};
+
+int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs)
+{
+ int dev;
+
+ if (!is_cn20k(rvu->pdev))
+ return 0;
+
+ rvu->ng_rvu->rvu_mbox_ops = &cn20k_mbox_ops;
+
+ if (type == TYPE_AFVF) {
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_PF_VF_CFG, ilog2(MBOX_SIZE));
+ } else {
+ for (dev = 0; dev < ndevs; dev++)
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFX_CFG(dev), ilog2(MBOX_SIZE));
+ }
+
+ return rvu_alloc_mbox_memory(rvu, type, ndevs, MBOX_SIZE);
+}
+
+void cn20k_free_mbox_memory(struct rvu *rvu)
+{
+ if (!is_cn20k(rvu->pdev))
+ return;
+
+ qmem_free(rvu->dev, rvu->ng_rvu->pf_mbox_addr);
+ qmem_free(rvu->dev, rvu->ng_rvu->vf_mbox_addr);
+}
+
+void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs)
+{
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs)
+{
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int nixlf)
+{
+ int qints, hwctx_size, err;
+ u64 cfg, ctx_cfg;
+
+ if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))
+ return 0;
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c
new file mode 100644
index 000000000000..aa2016fd1bba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "struct.h"
+#include "../rvu.h"
+
+int rvu_mbox_handler_nix_cn20k_aq_enq(struct rvu *rvu,
+ struct nix_cn20k_aq_enq_req *req,
+ struct nix_cn20k_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
+ (struct nix_aq_enq_rsp *)rsp);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c
new file mode 100644
index 000000000000..fe8f926c8b75
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "struct.h"
+#include "../rvu.h"
+
+int rvu_mbox_handler_npa_cn20k_aq_enq(struct rvu *rvu,
+ struct npa_cn20k_aq_enq_req *req,
+ struct npa_cn20k_aq_enq_rsp *rsp)
+{
+ return rvu_npa_aq_enq_inst(rvu, (struct npa_aq_enq_req *)req,
+ (struct npa_aq_enq_rsp *)rsp);
+}
+EXPORT_SYMBOL(rvu_mbox_handler_npa_cn20k_aq_enq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h
new file mode 100644
index 000000000000..affb39803120
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef RVU_MBOX_REG_H
+#define RVU_MBOX_REG_H
+#include "../rvu.h"
+#include "../rvu_reg.h"
+
+/* RVUM block registers */
+#define RVU_PF_DISC (0x0)
+#define RVU_PRIV_PFX_DISC(a) (0x8000208 | (a) << 16)
+#define RVU_PRIV_HWVFX_DISC(a) (0xD000000 | (a) << 12)
+
+/* Mbox Registers */
+/* RVU AF BAR0 Mbox registers for AF => PFx */
+#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_MBOX_AF_PFX_CFG(a) (0x6000 | (a) << 4)
+#define RVU_MBOX_AF_AFPFX_TRIGX(a) (0x9000 | (a) << 3)
+#define RVU_MBOX_AF_PFAF_INT(a) (0x2980 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_W1S(a) (0x2988 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_ENA_W1S(a) (0x2990 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_ENA_W1C(a) (0x2998 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT(a) (0x29A0 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_W1S(a) (0x29A8 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_ENA_W1S(a) (0x29B0 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_ENA_W1C(a) (0x29B8 | (a) << 6)
+
+/* RVU PF => AF mbox registers */
+#define RVU_MBOX_PF_PFAF_TRIGX(a) (0xC00 | (a) << 3)
+#define RVU_MBOX_PF_INT (0xC20)
+#define RVU_MBOX_PF_INT_W1S (0xC28)
+#define RVU_MBOX_PF_INT_ENA_W1S (0xC30)
+#define RVU_MBOX_PF_INT_ENA_W1C (0xC38)
+
+#define RVU_AF_BAR2_SEL (0x9000000)
+#define RVU_AF_BAR2_PFID (0x16400)
+#define NIX_CINTX_INT_W1S(a) (0xd30 | (a) << 12)
+#define NIX_QINTX_CNT(a) (0xc00 | (a) << 12)
+
+#define RVU_MBOX_AF_VFAF_INT(a) (0x3000 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_W1S(a) (0x3008 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1S(a) (0x3010 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_INT(a) (0x3020 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_INT_W1S(a) (0x3028 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_IN_ENA_W1S(a) (0x3030 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_IN_ENA_W1C(a) (0x3038 | (a) << 6)
+
+#define RVU_MBOX_AF_AFVFX_TRIG(a, b) (0x10000 | (a) << 4 | (b) << 3)
+#define RVU_MBOX_AF_VFX_ADDR(a) (0x20000 | (a) << 4)
+#define RVU_MBOX_AF_VFX_CFG(a) (0x28000 | (a) << 4)
+
+#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3)
+
+#define RVU_MBOX_PF_VF_ADDR (0xC40)
+#define RVU_MBOX_PF_LMTLINE_ADDR (0xC48)
+#define RVU_MBOX_PF_VF_CFG (0xC60)
+
+#define RVU_MBOX_VF_VFPF_TRIGX(a) (0x3000 | (a) << 3)
+#define RVU_MBOX_VF_INT (0x20)
+#define RVU_MBOX_VF_INT_W1S (0x28)
+#define RVU_MBOX_VF_INT_ENA_W1S (0x30)
+#define RVU_MBOX_VF_INT_ENA_W1C (0x38)
+
+#define RVU_MBOX_VF_VFAF_TRIGX(a) (0x2000 | (a) << 3)
+#endif /* RVU_MBOX_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
new file mode 100644
index 000000000000..763f6cabd7c2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef STRUCT_H
+#define STRUCT_H
+
+#define NIX_MAX_CTX_SIZE 128
+
+/*
+ * CN20k RVU PF MBOX Interrupt Vector Enumeration
+ *
+ * Vectors 0 - 3 are compatible with pre cn20k and hence
+ * existing macros are being reused.
+ */
+enum rvu_mbox_pf_int_vec_e {
+ RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_MBOX_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0 = 0x6,
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 = 0x7,
+ RVU_MBOX_PF_INT_VEC_AFPF_MBOX = 0x8,
+ RVU_MBOX_PF_INT_VEC_CNT = 0x9,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_cn20k_int_vec_e {
+ RVU_AF_CN20K_INT_VEC_POISON = 0x0,
+ RVU_AF_CN20K_INT_VEC_PFFLR0 = 0x1,
+ RVU_AF_CN20K_INT_VEC_PFFLR1 = 0x2,
+ RVU_AF_CN20K_INT_VEC_PFME0 = 0x3,
+ RVU_AF_CN20K_INT_VEC_PFME1 = 0x4,
+ RVU_AF_CN20K_INT_VEC_GEN = 0x5,
+ RVU_AF_CN20K_INT_VEC_PFAF_MBOX0 = 0x6,
+ RVU_AF_CN20K_INT_VEC_PFAF_MBOX1 = 0x7,
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0 = 0x8,
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9,
+ RVU_AF_CN20K_INT_VEC_CNT = 0xa,
+};
+
+struct nix_cn20k_sq_ctx_s {
+ u64 ena : 1; /* W0 */
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 11; /* W1 */
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_weight : 14;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 reserved_120_120 : 1;
+ u64 smq_rr_count_lb : 7;
+ u64 smq_rr_count_ub : 25; /* W2 */
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 reserved_191_191 : 1;
+ u64 max_sqe_size : 2; /* W3 */
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 reserved_253_255 : 3;
+ u64 next_sqb : 64; /* W4 */
+ u64 tail_sqb : 64; /* W5 */
+ u64 smenq_sqb : 64; /* W6 */
+ u64 smenq_next_sqb : 64; /* W7 */
+ u64 head_sqb : 64; /* W8 */
+ u64 reserved_576_583 : 8; /* W9 */
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 reserved_630_639 : 10;
+ u64 scm_lso_rem : 18; /* W10 */
+ u64 reserved_658_703 : 46;
+ u64 octs : 48; /* W11 */
+ u64 reserved_752_767 : 16;
+ u64 pkts : 48; /* W12 */
+ u64 reserved_816_831 : 16;
+ u64 aged_drop_octs : 32; /* W13 */
+ u64 aged_drop_pkts : 32;
+ u64 dropped_octs : 48; /* W14 */
+ u64 reserved_944_959 : 16;
+ u64 dropped_pkts : 48; /* W15 */
+ u64 reserved_1008_1023 : 16;
+};
+
+static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE);
+
+struct nix_cn20k_cq_ctx_s {
+ u64 base : 64; /* W0 */
+ u64 lbp_ena : 1; /* W1 */
+ u64 lbpid_low : 3;
+ u64 bp_ena : 1;
+ u64 lbpid_med : 3;
+ u64 bpid : 9;
+ u64 lbpid_high : 3;
+ u64 qint_idx : 7;
+ u64 cq_err : 1;
+ u64 cint_idx : 7;
+ u64 avg_con : 9;
+ u64 wrptr : 20;
+ u64 tail : 20; /* W2 */
+ u64 head : 20;
+ u64 avg_level : 8;
+ u64 update_time : 16;
+ u64 bp : 8; /* W3 */
+ u64 drop : 8;
+ u64 drop_ena : 1;
+ u64 ena : 1;
+ u64 cpt_drop_err_en : 1;
+ u64 reserved_211_211 : 1;
+ u64 msh_dst : 11;
+ u64 msh_valid : 1;
+ u64 stash_thresh : 4;
+ u64 lbp_frac : 4;
+ u64 caching : 1;
+ u64 stashing : 1;
+ u64 reserved_234_235 : 2;
+ u64 qsize : 4;
+ u64 cq_err_int : 8;
+ u64 cq_err_int_ena : 8;
+ u64 bpid_ext : 2; /* W4 */
+ u64 reserved_258_259 : 2;
+ u64 lbpid_ext : 2;
+ u64 reserved_262_319 : 58;
+ u64 reserved_320_383 : 64; /* W5 */
+ u64 reserved_384_447 : 64; /* W6 */
+ u64 reserved_448_511 : 64; /* W7 */
+ u64 padding[8];
+};
+
+static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE);
+
+struct nix_cn20k_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 reserved_24_34 : 11;
+ u64 port_il4_dis : 1;
+ u64 port_ol4_dis : 1;
+ u64 lenerr_dis : 1;
+ u64 csum_il4_dis : 1;
+ u64 csum_ol4_dis : 1;
+ u64 len_il4_dis : 1;
+ u64 len_il3_dis : 1;
+ u64 len_ol4_dis : 1;
+ u64 len_ol3_dis : 1;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 pb_stashing : 1;
+ u64 ipsecd_drop_en : 1;
+ u64 chi_ena : 1;
+ u64 reserved_125_127 : 3;
+ u64 band_prof_id_l : 10;
+ u64 sso_fc_ena : 1;
+ u64 policer_ena : 1;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 spb_high_sizem1 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 reserved_171_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 band_prof_id_h : 4;
+ u64 reserved_188_189 : 2;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8;
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8;
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 reserved_288_291 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 reserved_315_319 : 5;
+ u64 ltag : 24;
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 ipsec_vwqe : 1;
+ u64 vwqe_ena : 1;
+ u64 vtime_wait : 8;
+ u64 max_vsize_exp : 4;
+ u64 vwqe_skip : 2;
+ u64 reserved_382_383 : 2;
+ u64 octs : 48;
+ u64 reserved_432_447 : 16;
+ u64 pkts : 48;
+ u64 reserved_496_511 : 16;
+ u64 drop_octs : 48;
+ u64 reserved_560_575 : 16;
+ u64 drop_pkts : 48;
+ u64 reserved_624_639 : 16;
+ u64 re_pkts : 48;
+ u64 reserved_688_703 : 16;
+ u64 reserved_704_767 : 64;
+ u64 reserved_768_831 : 64;
+ u64 reserved_832_895 : 64;
+ u64 reserved_896_959 : 64;
+ u64 reserved_960_1023 : 64;
+};
+
+static_assert(sizeof(struct nix_cn20k_rq_ctx_s) == NIX_MAX_CTX_SIZE);
+
+struct npa_cn20k_aura_s {
+ u64 pool_addr; /* W0 */
+ u64 ena : 1; /* W1 */
+ u64 reserved_65 : 2;
+ u64 pool_caching : 1;
+ u64 reserved_68 : 16;
+ u64 avg_con : 9;
+ u64 reserved_93 : 1;
+ u64 pool_drop_ena : 1;
+ u64 aura_drop_ena : 1;
+ u64 bp_ena : 1;
+ u64 reserved_97_103 : 7;
+ u64 aura_drop : 8;
+ u64 shift : 6;
+ u64 reserved_118_119 : 2;
+ u64 avg_level : 8;
+ u64 count : 36; /* W2 */
+ u64 reserved_164_167 : 4;
+ u64 bpid : 12;
+ u64 reserved_180_191 : 12;
+ u64 limit : 36; /* W3 */
+ u64 reserved_228_231 : 4;
+ u64 bp : 7;
+ u64 reserved_239_243 : 5;
+ u64 fc_ena : 1;
+ u64 fc_up_crossing : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 reserved_252_255 : 4;
+ u64 fc_addr; /* W4 */
+ u64 pool_drop : 8; /* W5 */
+ u64 update_time : 16;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_363 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_379_383 : 5;
+ u64 thresh : 36; /* W6*/
+ u64 rsvd_423_420 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_435_438 : 4;
+ u64 op_dpc_ena : 1;
+ u64 op_dpc_set : 5;
+ u64 reserved_445_445 : 1;
+ u64 stream_ctx : 1;
+ u64 unified_ctx : 1;
+ u64 reserved_448_511; /* W7 */
+ u64 padding[8];
+};
+
+static_assert(sizeof(struct npa_cn20k_aura_s) == NIX_MAX_CTX_SIZE);
+
+struct npa_cn20k_pool_s {
+ u64 stack_base; /* W0 */
+ u64 ena : 1;
+ u64 nat_align : 1;
+ u64 reserved_66_67 : 2;
+ u64 stack_caching : 1;
+ u64 reserved_69_87 : 19;
+ u64 buf_offset : 12;
+ u64 reserved_100_103 : 4;
+ u64 buf_size : 12;
+ u64 reserved_116_119 : 4;
+ u64 ref_cnt_prof : 3;
+ u64 reserved_123_127 : 5;
+ u64 stack_max_pages : 32;
+ u64 stack_pages : 32;
+ uint64_t bp_0 : 7;
+ uint64_t bp_1 : 7;
+ uint64_t bp_2 : 7;
+ uint64_t bp_3 : 7;
+ uint64_t bp_4 : 7;
+ uint64_t bp_5 : 7;
+ uint64_t bp_6 : 7;
+ uint64_t bp_7 : 7;
+ uint64_t bp_ena_0 : 1;
+ uint64_t bp_ena_1 : 1;
+ uint64_t bp_ena_2 : 1;
+ uint64_t bp_ena_3 : 1;
+ uint64_t bp_ena_4 : 1;
+ uint64_t bp_ena_5 : 1;
+ uint64_t bp_ena_6 : 1;
+ uint64_t bp_ena_7 : 1;
+ u64 stack_offset : 4;
+ u64 reserved_260_263 : 4;
+ u64 shift : 6;
+ u64 reserved_270_271 : 2;
+ u64 avg_level : 8;
+ u64 avg_con : 9;
+ u64 fc_ena : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 fc_up_crossing : 1;
+ u64 reserved_297_299 : 3;
+ u64 update_time : 16;
+ u64 reserved_316_319 : 4;
+ u64 fc_addr; /* W5 */
+ u64 ptr_start; /* W6 */
+ u64 ptr_end; /* W7 */
+ u64 bpid_0 : 12;
+ u64 reserved_524_535 : 12;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_555 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_571_575 : 5;
+ u64 thresh : 36;
+ u64 rsvd_612_615 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_627_630 : 4;
+ u64 op_dpc_ena : 1;
+ u64 op_dpc_set : 5;
+ u64 reserved_637_637 : 1;
+ u64 stream_ctx : 1;
+ u64 reserved_639 : 1;
+ u64 reserved_640_703; /* W10 */
+ u64 reserved_704_767; /* W11 */
+ u64 reserved_768_831; /* W12 */
+ u64 reserved_832_895; /* W13 */
+ u64 reserved_896_959; /* W14 */
+ u64 reserved_960_1023; /* W15 */
+};
+
+static_assert(sizeof(struct npa_cn20k_pool_s) == NIX_MAX_CTX_SIZE);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 2436c1ff9ba4..8a08bebf08c2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -39,7 +39,7 @@ struct qmem {
void *base;
dma_addr_t iova;
int alloc_sz;
- u16 entry_sz;
+ u32 entry_sz;
u8 align;
u32 qsize;
};
@@ -156,8 +156,10 @@ enum nix_scheduler {
#define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212
#define SDP_HW_MAX_FRS 65535
+#define SDP_HW_MIN_FRS 16
#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
+#define SDP_LINK_CREDIT 0x320202
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 0b4cba03f2e8..6180e68e1765 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -72,7 +72,6 @@ struct mac_ops {
u8 irq_offset;
u8 int_ena_bit;
u8 lmac_fwi;
- u32 fifo_len;
bool non_contiguous_serdes_lane;
/* RPM & CGX differs in number of Receive/transmit stats */
u8 rx_stats_cnt;
@@ -132,6 +131,9 @@ struct mac_ops {
/* FEC stats */
int (*get_fec_stats)(void *cgxd, int lmac_id,
struct cgx_fec_stats_rsp *rsp);
+ int (*mac_stats_reset)(void *cgxd, int lmac_id);
+ void (*mac_x2p_reset)(void *cgxd, bool enable);
+ int (*mac_enadis_rx)(void *cgxd, int lmac_id, bool enable);
};
struct cgx {
@@ -141,6 +143,10 @@ struct cgx {
u8 lmac_count;
/* number of LMACs per MAC could be 4 or 8 */
u8 max_lmac_per_mac;
+ /* length of fifo varies depending on the number
+ * of LMACS
+ */
+ u32 fifo_len;
#define MAX_LMAC_COUNT 8
struct lmac *lmac_idmap[MAX_LMAC_COUNT];
struct work_struct cgx_cmd_work;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 1e5aa5397504..75872d257eca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -10,8 +10,11 @@
#include <linux/pci.h>
#include "rvu_reg.h"
+#include "cn20k/reg.h"
+#include "cn20k/api.h"
#include "mbox.h"
#include "rvu_trace.h"
+#include "rvu.h"
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
@@ -28,8 +31,10 @@ void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
mdev->rsp_size = 0;
tx_hdr->num_msgs = 0;
tx_hdr->msg_size = 0;
+ tx_hdr->sig = 0;
rx_hdr->num_msgs = 0;
rx_hdr->msg_size = 0;
+ rx_hdr->sig = 0;
}
EXPORT_SYMBOL(__otx2_mbox_reset);
@@ -53,9 +58,98 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox)
}
EXPORT_SYMBOL(otx2_mbox_destroy);
+int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(1);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(0);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(0);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(1);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(1);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(0);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_VFPF:
+ mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(0);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(1);
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+ mbox->reg_base = reg_base;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+ mbox->ndevs = ndevs;
+
+ return 0;
+}
+
static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
void *reg_base, int direction, int ndevs)
{
+ if (is_cn20k(pdev))
+ return cn20k_mbox_setup(mbox, pdev, reg_base,
+ direction, ndevs);
+
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
@@ -188,14 +282,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
- struct device *sender = &mbox->pdev->dev;
while (!time_after(jiffies, timeout)) {
if (mdev->num_msgs == mdev->msgs_acked)
return 0;
usleep_range(800, 1000);
}
- dev_dbg(sender, "timed out while waiting for rsp\n");
+ trace_otx2_msg_wait_rsp(mbox->pdev);
return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
@@ -219,6 +312,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ struct mbox_msghdr *msg;
u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
@@ -234,7 +328,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
spin_lock(&mdev->mbox_lock);
- tx_hdr->msg_size = mdev->msg_size;
+ if (!tx_hdr->sig) {
+ tx_hdr->msg_size = mdev->msg_size;
+ tx_hdr->num_msgs = mdev->num_msgs;
+ }
/* Reset header for next messages */
mdev->msg_size = 0;
@@ -248,10 +345,12 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
* messages. So this should be written after writing all the messages
* to the shared memory.
*/
- tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
- trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+ msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size,
+ msg->id, msg->pcifunc);
spin_unlock(&mdev->mbox_lock);
@@ -306,6 +405,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_msghdr *msghdr = NULL;
+ struct mbox_hdr *mboxhdr = NULL;
spin_lock(&mdev->mbox_lock);
size = ALIGN(size, MBOX_MSG_ALIGN);
@@ -329,6 +429,11 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
mdev->msg_size += size;
mdev->rsp_size += size_rsp;
msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+
+ mboxhdr = mdev->mbase + mbox->tx_start;
+ /* Clear the msg header region */
+ memset(mboxhdr, 0, msgs_offset);
+
exit:
spin_unlock(&mdev->mbox_lock);
@@ -445,6 +550,14 @@ const char *otx2_mbox_id2name(u16 id)
#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CGX_MESSAGES
+#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CPT_MESSAGES
+#undef M
default:
return "INVALID ID";
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index eb2a20b5a0d0..a3e273126e4e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -10,9 +10,11 @@
#include <linux/etherdevice.h>
#include <linux/sizes.h>
+#include <linux/ethtool.h>
#include "rvu_struct.h"
#include "common.h"
+#include "cn20k/struct.h"
#define MBOX_SIZE SZ_64K
@@ -50,6 +52,11 @@
#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
+
struct otx2_mbox_dev {
void *mbase; /* This dev's mbox region */
void *hwbase;
@@ -78,6 +85,8 @@ struct otx2_mbox {
struct mbox_hdr {
u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
+ u16 opt_msg;
+ u8 sig;
};
/* Header which precedes every msg and is also part of it */
@@ -139,10 +148,14 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \
+M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \
+M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \
+M(REP_EVENT_NOTIFY, 0x00f, rep_event_notify, rep_event, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -174,6 +187,7 @@ M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
cgx_set_link_mode_rsp) \
M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
@@ -189,6 +203,8 @@ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
+M(NPA_CN20K_AQ_ENQ, 0x404, npa_cn20k_aq_enq, npa_cn20k_aq_enq_req, \
+ npa_cn20k_aq_enq_rsp) \
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
@@ -308,6 +324,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
@@ -317,6 +337,9 @@ M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_re
M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
nix_mcast_grp_update_req, \
nix_mcast_grp_update_rsp) \
+M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \
+M(NIX_CN20K_AQ_ENQ, 0x802f, nix_cn20k_aq_enq, nix_cn20k_aq_enq_req, \
+ nix_cn20k_aq_enq_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -378,12 +401,16 @@ M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
#define MBOX_UP_MCS_MESSAGES \
M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+#define MBOX_UP_REP_MESSAGES \
+M(REP_EVENT_UP_NOTIFY, 0xEF0, rep_event_up_notify, rep_event, msg_rsp) \
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
MBOX_UP_MCS_MESSAGES
+MBOX_UP_REP_MESSAGES
#undef M
};
@@ -510,6 +537,8 @@ struct get_hw_cap_rsp {
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
u8 npc_hash_extract; /* Is hash extract supported */
+#define HW_CAP_MACSEC BIT_ULL(1)
+ u64 hw_caps;
};
/* CGX mbox message formats */
@@ -634,11 +663,17 @@ struct cgx_lmac_fwdata_s {
u64 supported_link_modes;
/* only applicable if AN is supported */
u64 advertised_fec;
- u64 advertised_link_modes;
+ u64 advertised_link_modes_own:1; /* CGX_CMD_OWN */
+ u64 advertised_link_modes:63;
/* Only applicable if SFP/QSFP slot is present */
struct sfp_eeprom_s sfp_eeprom;
struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1021
+ u32 lmac_type;
+ u32 portm_idx;
+ u64 mgmt_port:1;
+ u64 advertised_an:1;
+ u64 port;
+#define LMAC_FWDATA_RESERVED_MEM 1018
u64 reserved[LMAC_FWDATA_RESERVED_MEM];
};
@@ -651,12 +686,13 @@ struct cgx_set_link_mode_args {
u32 speed;
u8 duplex;
u8 an;
- u8 ports;
+ u8 mode_baseidx;
+ u8 multimode;
u64 mode;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
};
struct cgx_set_link_mode_req {
-#define AUTONEG_UNKNOWN 0xff
struct mbox_msghdr hdr;
struct cgx_set_link_mode_args args;
};
@@ -800,6 +836,39 @@ struct npa_aq_enq_rsp {
};
};
+struct npa_cn20k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 aura_id;
+ u8 ctype;
+ u8 op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ struct npa_cn20k_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ struct npa_cn20k_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ struct npa_cn20k_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ struct npa_cn20k_pool_s pool_mask;
+ };
+};
+
+struct npa_cn20k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ struct npa_cn20k_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ struct npa_cn20k_pool_s pool;
+ };
+};
+
/* Disable all contexts of type 'ctype' */
struct hwctx_disable_req {
struct mbox_msghdr hdr;
@@ -908,6 +977,42 @@ struct nix_lf_free_req {
u64 flags;
};
+/* CN20K NIX AQ enqueue msg */
+struct nix_cn20k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_cn20k_rq_ctx_s rq;
+ struct nix_cn20k_sq_ctx_s sq;
+ struct nix_cn20k_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+ union {
+ struct nix_cn20k_rq_ctx_s rq_mask;
+ struct nix_cn20k_sq_ctx_s sq_mask;
+ struct nix_cn20k_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
+ };
+};
+
+struct nix_cn20k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_cn20k_rq_ctx_s rq;
+ struct nix_cn20k_sq_ctx_s sq;
+ struct nix_cn20k_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+};
+
/* CN10K NIX AQ enqueue msg */
struct nix_cn10k_aq_enq_req {
struct mbox_msghdr hdr;
@@ -1213,10 +1318,8 @@ struct nix_bp_cfg_req {
/* bpid_per_chan = 1 assigns separate bp id for each channel */
};
-/* PF can be mapped to either CGX or LBK interface,
- * so maximum 64 channels are possible.
- */
-#define NIX_MAX_BPID_CHAN 64
+/* Maximum channels any single NIX interface can have */
+#define NIX_MAX_BPID_CHAN 256
struct nix_bp_cfg_rsp {
struct mbox_msghdr hdr;
u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
@@ -1364,6 +1467,37 @@ struct nix_bandprof_get_hwinfo_rsp {
u32 policer_timeunit;
};
+struct nix_stats_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u16 pcifunc;
+ u64 rsvd;
+};
+
+struct nix_stats_rsp {
+ struct mbox_msghdr hdr;
+ u16 pcifunc;
+ struct {
+ u64 octs;
+ u64 ucast;
+ u64 bcast;
+ u64 mcast;
+ u64 drop;
+ u64 drop_octs;
+ u64 drop_mcast;
+ u64 drop_bcast;
+ u64 err;
+ u64 rsvd[5];
+ } rx;
+ struct {
+ u64 ucast;
+ u64 bcast;
+ u64 mcast;
+ u64 drop;
+ u64 octs;
+ } tx;
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -1525,6 +1659,41 @@ struct ptp_get_cap_rsp {
u64 cap;
};
+struct get_rep_cnt_rsp {
+ struct mbox_msghdr hdr;
+ u16 rep_cnt;
+ u16 rep_pf_map[64];
+ u64 rsvd;
+};
+
+struct esw_cfg_req {
+ struct mbox_msghdr hdr;
+ u8 ena;
+ u64 rsvd;
+};
+
+struct rep_evt_data {
+ u8 port_state;
+ u8 vf_state;
+ u16 rx_mode;
+ u16 rx_flags;
+ u16 mtu;
+ u8 mac[ETH_ALEN];
+ u64 rsvd[5];
+};
+
+struct rep_event {
+ struct mbox_msghdr hdr;
+ u16 pcifunc;
+#define RVU_EVENT_PORT_STATE BIT_ULL(0)
+#define RVU_EVENT_PFVF_STATE BIT_ULL(1)
+#define RVU_EVENT_MTU_CHANGE BIT_ULL(2)
+#define RVU_EVENT_RX_MODE_CHANGE BIT_ULL(3)
+#define RVU_EVENT_MAC_ADDR_CHANGE BIT_ULL(4)
+ u16 event;
+ struct rep_evt_data evt_data;
+};
+
struct flow_msg {
unsigned char dmac[6];
unsigned char smac[6];
@@ -1563,6 +1732,7 @@ struct flow_msg {
u8 icmp_type;
u8 icmp_code;
__be16 tcp_flags;
+ u16 sq_id;
};
struct npc_install_flow_req {
@@ -1717,6 +1887,13 @@ struct lmtst_tbl_setup_req {
u64 rsvd[4];
};
+struct ndc_sync_op {
+ struct mbox_msghdr hdr;
+ u8 nix_lf_tx_sync;
+ u8 nix_lf_rx_sync;
+ u8 npa_lf_sync;
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
@@ -1746,7 +1923,7 @@ struct cpt_lf_alloc_req_msg {
u16 nix_pf_func;
u16 sso_pf_func;
u16 eng_grpmsk;
- int blkaddr;
+ u8 blkaddr;
u8 ctx_ilen_valid : 1;
u8 ctx_ilen : 7;
};
@@ -1849,8 +2026,9 @@ struct cpt_flt_eng_info_req {
struct cpt_flt_eng_info_rsp {
struct mbox_msghdr hdr;
- u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
- u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+#define CPT_AF_MAX_FLT_INT_VECS 3
+ u64 flt_eng_map[CPT_AF_MAX_FLT_INT_VECS];
+ u64 rcvrd_eng_map[CPT_AF_MAX_FLT_INT_VECS];
u64 rsvd;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index d39d86e694cc..a80c8e7c94f2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -97,7 +97,7 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
- pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+ pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
event->intr_mask &= pfvf->intr_mask;
@@ -123,7 +123,7 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
struct mcs_intr_info *req;
int pf;
- pf = rvu_get_pf(event->pcifunc);
+ pf = rvu_get_pf(rvu->pdev, event->pcifunc);
mutex_lock(&rvu->mbox_lock);
@@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
mutex_unlock(&rvu->mbox_lock);
return 0;
@@ -191,7 +193,7 @@ int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
- pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+ pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
mcs->pf_map[0] = pcifunc;
pfvf->intr_mask = req->intr_mask;
@@ -911,7 +913,7 @@ int rvu_mcs_init(struct rvu *rvu)
/* Initialize the wq for handling mcs interrupts */
INIT_LIST_HEAD(&rvu->mcs_intrq_head);
INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
- rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", WQ_PERCPU, 0);
if (!rvu->mcs_intr_wq) {
dev_err(rvu->dev, "mcs alloc workqueue failed\n");
return -ENOMEM;
@@ -925,7 +927,6 @@ void rvu_mcs_exit(struct rvu *rvu)
if (!rvu->mcs_intr_wq)
return;
- flush_workqueue(rvu->mcs_intr_wq);
destroy_workqueue(rvu->mcs_intr_wq);
rvu->mcs_intr_wq = NULL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index d883157393ea..6c3aca6f278d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CUSTOM1 = 0xF,
};
+/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
+ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
+ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
+ */
enum npc_kpu_lc_ltype {
- NPC_LT_LC_IP = 1,
+ NPC_LT_LC_PTP = 1,
+ NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
@@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
- NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index bcc96eed2481..66749b3649c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev,
spin_lock_init(&ptp->ptp_lock);
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ptp->hrtimer.function = ptp_reset_thresh;
+ hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 76218f1cb459..2e9945446199 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -38,6 +38,9 @@ static struct mac_ops rpm_mac_ops = {
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
+ .mac_stats_reset = rpm_stats_reset,
+ .mac_x2p_reset = rpm_x2p_reset,
+ .mac_enadis_rx = rpm_enadis_rx,
};
static struct mac_ops rpm2_mac_ops = {
@@ -70,6 +73,9 @@ static struct mac_ops rpm2_mac_ops = {
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
+ .mac_stats_reset = rpm_stats_reset,
+ .mac_x2p_reset = rpm_x2p_reset,
+ .mac_enadis_rx = rpm_enadis_rx,
};
bool is_dev_rpm2(void *rpmd)
@@ -443,6 +449,21 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
return 0;
}
+int rpm_stats_reset(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
+ cfg |= RPMX_CMD_CLEAR_TX | RPMX_CMD_CLEAR_RX | BIT_ULL(lmac_id);
+ rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
+
+ return 0;
+}
+
u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
{
rpm_t *rpm = rpmd;
@@ -450,7 +471,7 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
int err;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
- err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
+ err = cgx_fwi_cmd_generic(req, &resp, rpm, lmac_id);
if (!err)
return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
return err;
@@ -463,7 +484,7 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
u8 num_lmacs;
u32 fifo_len;
- fifo_len = rpm->mac_ops->fifo_len;
+ fifo_len = rpm->fifo_len;
num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
switch (num_lmacs) {
@@ -516,9 +537,9 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
*/
max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
if (max_lmac > 4)
- fifo_len = rpm->mac_ops->fifo_len / 2;
+ fifo_len = rpm->fifo_len / 2;
else
- fifo_len = rpm->mac_ops->fifo_len;
+ fifo_len = rpm->fifo_len;
if (lmac_id < 4) {
num_lmacs = hweight8(lmac_info & 0xF);
@@ -682,46 +703,51 @@ int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
return 0;
+ /* latched registers FCFECX_CW_HI/RSFEC_STAT_FAST_DATA_HI_CDC are common
+ * for all counters. Acquire lock to ensure serialized reads
+ */
+ mutex_lock(&rpm->lock);
if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
- val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO);
- val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_CCW_LO(lmac_id));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_corr_blks = (val_hi << 16 | val_lo);
- val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO);
- val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_NCCW_LO(lmac_id));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_uncorr_blks = (val_hi << 16 | val_lo);
/* 50G uses 2 Physical serdes lines */
if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id ==
LMAC_MODE_50G_R) {
- val_lo = rpm_read(rpm, lmac_id,
- RPMX_MTI_FCFECX_VL1_CCW_LO);
- val_hi = rpm_read(rpm, lmac_id,
- RPMX_MTI_FCFECX_CW_HI);
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_FCFECX_VL1_CCW_LO(lmac_id));
+ val_hi = rpm_read(rpm, 0,
+ RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_corr_blks += (val_hi << 16 | val_lo);
- val_lo = rpm_read(rpm, lmac_id,
- RPMX_MTI_FCFECX_VL1_NCCW_LO);
- val_hi = rpm_read(rpm, lmac_id,
- RPMX_MTI_FCFECX_CW_HI);
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_FCFECX_VL1_NCCW_LO(lmac_id));
+ val_hi = rpm_read(rpm, 0,
+ RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_uncorr_blks += (val_hi << 16 | val_lo);
}
} else {
/* enable RS-FEC capture */
- cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
+ cfg = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL);
cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id);
- rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
+ rpm_write(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL, cfg);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2);
- val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC);
rsp->fec_corr_blks = (val_hi << 32 | val_lo);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3);
- val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC);
rsp->fec_uncorr_blks = (val_hi << 32 | val_lo);
}
+ mutex_unlock(&rpm->lock);
return 0;
}
@@ -746,3 +772,41 @@ int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr)
return 0;
}
+
+void rpm_x2p_reset(void *rpmd, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ int lmac_id;
+ u64 cfg;
+
+ if (enable) {
+ for_each_set_bit(lmac_id, &rpm->lmac_bmap, rpm->max_lmac_per_mac)
+ rpm->mac_ops->mac_enadis_rx(rpm, lmac_id, false);
+
+ usleep_range(1000, 2000);
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
+ rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg | RPM_NIX0_RESET);
+ } else {
+ cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
+ cfg &= ~RPM_NIX0_RESET;
+ rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg);
+ }
+}
+
+int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN;
+ else
+ cfg &= ~RPM_RX_EN;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index b79cfbc6f877..b8d3972e096a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -17,6 +17,8 @@
/* Registers */
#define RPMX_CMRX_CFG 0x00
+#define RPMX_CMR_GLOBAL_CFG 0x08
+#define RPM_NIX0_RESET BIT_ULL(3)
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_RX_ID_MAP 0x80
@@ -84,14 +86,18 @@
/* FEC stats */
#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
-#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
+#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(28)
+#define RPMX_CMD_CLEAR_RX BIT_ULL(30)
+#define RPMX_CMD_CLEAR_TX BIT_ULL(31)
+#define RPMX_MTI_RSFEC_STAT_STATN_CONTROL 0x40018
+#define RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC 0x40000
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
-#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
-#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620
-#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628
-#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630
-#define RPMX_MTI_FCFECX_CW_HI 0x38638
+#define RPMX_MTI_FCFECX_VL0_CCW_LO(a) (0x38618 + ((a) * 0x40))
+#define RPMX_MTI_FCFECX_VL0_NCCW_LO(a) (0x38620 + ((a) * 0x40))
+#define RPMX_MTI_FCFECX_VL1_CCW_LO(a) (0x38628 + ((a) * 0x40))
+#define RPMX_MTI_FCFECX_VL1_NCCW_LO(a) (0x38630 + ((a) * 0x40))
+#define RPMX_MTI_FCFECX_CW_HI(a) (0x38638 + ((a) * 0x40))
/* CN10KB CSR Declaration */
#define RPM2_CMRX_SW_INT 0x1b0
@@ -134,4 +140,7 @@ int rpm2_get_nr_lmacs(void *rpmd);
bool is_dev_rpm2(void *rpmd);
int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
+int rpm_stats_reset(void *rpmd, int lmac_id);
+void rpm_x2p_reset(void *rpmd, bool enable);
+int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ff78251f92d4..2d78e08f985f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -20,6 +20,8 @@
#include "rvu_trace.h"
#include "rvu_npc_hash.h"
+#include "cn20k/reg.h"
+#include "cn20k/api.h"
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -34,10 +36,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *));
-enum {
- TYPE_AFVF,
- TYPE_AFPF,
-};
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq);
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq);
/* Supported devices */
static const struct pci_device_id rvu_id_table[] = {
@@ -294,7 +294,7 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
- devnum = rvu_get_pf(pcifunc);
+ devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
@@ -359,7 +359,7 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
- devnum = rvu_get_pf(pcifunc);
+ devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
block->fn_map[lf] = attach ? pcifunc : 0;
@@ -400,11 +400,6 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
}
-inline int rvu_get_pf(u16 pcifunc)
-{
- return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
-}
-
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
{
u64 cfg;
@@ -422,7 +417,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
int pf, func;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
func = pcifunc & RVU_PFVF_FUNC_MASK;
/* Get first HWVF attached to this PF */
@@ -437,7 +432,7 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
if (pcifunc & RVU_PFVF_FUNC_MASK)
return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
else
- return &rvu->pf[rvu_get_pf(pcifunc)];
+ return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
}
static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
@@ -445,7 +440,7 @@ static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
int pf, vf, nvfs;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (pf >= rvu->hw->total_pfs)
return false;
@@ -760,6 +755,11 @@ static void rvu_free_hw_resources(struct rvu *rvu)
rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock);
+
+ /* Free the QINT/CINT memory */
+ pfvf = &rvu->pf[RVU_AFPF];
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
}
static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
@@ -1162,6 +1162,10 @@ cpt:
}
rvu_program_channels(rvu);
+ cgx_start_linkup(rvu);
+
+ rvu_block_bcast_xon(rvu, BLKADDR_NIX0);
+ rvu_block_bcast_xon(rvu, BLKADDR_NIX1);
err = rvu_mcs_init(rvu);
if (err) {
@@ -1392,8 +1396,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
- if (blktype == BLKTYPE_NIX)
- rvu_nix_reset_mac(pfvf, pcifunc);
block = &hw->block[blkaddr];
@@ -1406,6 +1408,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (lf < 0) /* This should never happen */
continue;
+ if (blktype == BLKTYPE_NIX) {
+ rvu_nix_reset_mac(pfvf, pcifunc);
+ rvu_npc_clear_ucast_entry(rvu, pcifunc, lf);
+ }
/* Disable the LF */
rvu_write64(rvu, blkaddr, block->lfcfg_reg |
(lf << block->lfshift), 0x00ULL);
@@ -1484,7 +1490,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
/* All CGX mapped PFs are set with assigned NIX block during init */
- if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
blkaddr = pf->nix_blkaddr;
} else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
@@ -1498,7 +1504,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
}
/* if SDP1 then the blkaddr is NIX1 */
- if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
+ if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1)
blkaddr = BLKADDR_NIX1;
switch (blkaddr) {
@@ -1643,7 +1649,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
if (req->ssow > block->lf.max) {
dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid SSOW req, %d > max %d\n",
- pcifunc, req->sso, block->lf.max);
+ pcifunc, req->ssow, block->lf.max);
return -EINVAL;
}
mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
@@ -2003,7 +2009,7 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
vf = pcifunc & RVU_PFVF_FUNC_MASK;
cfg = rvu_read64(rvu, BLKADDR_RVUM,
- RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc)));
numvfs = (cfg >> 12) & 0xFF;
if (vf && vf <= numvfs)
@@ -2014,6 +2020,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
+{
+ /* Sync cached info for this LF in NDC to LLC/DRAM */
+ rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx);
+ return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
+}
+
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
struct get_hw_cap_rsp *rsp)
{
@@ -2023,6 +2036,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_shaping = hw->cap.nix_shaping;
rsp->npc_hash_extract = hw->cap.npc_hash_extract;
+ if (rvu->mcs_blk_cnt)
+ rsp->hw_caps = HW_CAP_MACSEC;
+
return 0;
}
@@ -2068,6 +2084,65 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
return 0;
}
+int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu,
+ struct ndc_sync_op *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int err, lfidx, lfblkaddr;
+
+ if (req->npa_lf_sync) {
+ /* Get NPA LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (lfblkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Sync NPA NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NPA_AF_NDC_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NPA sync failed for LF %u\n", lfidx);
+ }
+
+ if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync)
+ return 0;
+
+ /* Get NIX LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (lfblkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->nix_lf_tx_sync) {
+ /* Sync NIX TX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_TX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-TX sync fail for LF %u\n", lfidx);
+ }
+
+ if (req->nix_lf_rx_sync) {
+ /* Sync NIX RX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_RX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-RX sync failed for LF %u\n", lfidx);
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -2106,7 +2181,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
- trace_otx2_msg_process(mbox->pdev, _id, err); \
+ trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
@@ -2151,15 +2226,30 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ if (req_hdr->sig && !(is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))) {
+ req_hdr->opt_msg = mw->mbox_wrk[devid].num_msgs;
+ rvu_write64(rvu, BLKADDR_NIX0, RVU_AF_BAR2_SEL,
+ RVU_AF_BAR2_PFID);
+ if (type == TYPE_AFPF)
+ rvu_write64(rvu, BLKADDR_NIX0,
+ AF_BAR2_ALIASX(0, NIX_CINTX_INT_W1S(devid)),
+ 0x1);
+ else
+ rvu_write64(rvu, BLKADDR_NIX0,
+ AF_BAR2_ALIASX(0, NIX_QINTX_CNT(devid)),
+ 0x1);
+ usleep_range(5000, 6000);
+ goto done;
+ }
+
for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
msg = mdev->mbase + offset;
/* Set which PF/VF sent this message based on mbox IRQ */
switch (type) {
case TYPE_AFPF:
- msg->pcifunc &=
- ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
- msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev);
+ msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0);
break;
case TYPE_AFVF:
msg->pcifunc &=
@@ -2177,16 +2267,17 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
err, otx2_mbox_id2name(msg->id),
- msg->id, rvu_get_pf(msg->pcifunc),
+ msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc),
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid);
}
+done:
mw->mbox_wrk[devid].num_msgs = 0;
- if (poll)
+ if (!is_cn20k(mbox->pdev) && poll)
otx2_mbox_wait_for_zero(mbox, devid);
/* Send mbox responses to VF/PF */
@@ -2292,13 +2383,21 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
__rvu_mbox_up_handler(mwork, TYPE_AFVF);
}
-static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+static int rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr,
int num, int type, unsigned long *pf_bmap)
{
struct rvu_hwinfo *hw = rvu->hw;
int region;
u64 bar4;
+ /* For cn20k platform AF mailbox region is allocated by software
+ * and the corresponding IOVA is programmed in hardware unlike earlier
+ * silicons where software uses the hardware region after ioremap.
+ */
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr,
+ num, type, pf_bmap);
+
/* For cn10k platform VF mailbox regions of a PF follows after the
* PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
* RVU_PF_VF_BAR4_ADDR register.
@@ -2317,7 +2416,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
- mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
if (!mbox_addr[region])
goto error;
}
@@ -2340,7 +2439,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
RVU_AF_PF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
- mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
if (!mbox_addr[region])
goto error;
}
@@ -2348,20 +2447,26 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
error:
while (region--)
- iounmap((void __iomem *)mbox_addr[region]);
+ iounmap(mbox_addr[region]);
return -ENOMEM;
}
+static struct mbox_ops rvu_mbox_ops = {
+ .pf_intr_handler = rvu_mbox_pf_intr_handler,
+ .afvf_intr_handler = rvu_mbox_intr_handler,
+};
+
static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *))
{
- int err = -EINVAL, i, dir, dir_up;
+ void __iomem **mbox_regions;
+ struct ng_rvu *ng_rvu_mbox;
+ int err, i, dir, dir_up;
void __iomem *reg_base;
struct rvu_work *mwork;
unsigned long *pf_bmap;
- void **mbox_regions;
const char *name;
u64 cfg;
@@ -2369,6 +2474,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
if (!pf_bmap)
return -ENOMEM;
+ ng_rvu_mbox = kzalloc(sizeof(*ng_rvu_mbox), GFP_KERNEL);
+ if (!ng_rvu_mbox) {
+ err = -ENOMEM;
+ goto free_bitmap;
+ }
+
/* RVU VFs */
if (type == TYPE_AFVF)
bitmap_set(pf_bmap, 0, num);
@@ -2382,12 +2493,20 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
+ rvu->ng_rvu = ng_rvu_mbox;
+
+ rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops;
+
+ err = cn20k_rvu_mbox_init(rvu, type, num);
+ if (err)
+ goto free_mem;
+
mutex_init(&rvu->mbox_lock);
- mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ mbox_regions = kcalloc(num, sizeof(void __iomem *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
- goto free_bitmap;
+ goto free_qmem;
}
switch (type) {
@@ -2410,12 +2529,13 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
break;
default:
+ err = -EINVAL;
goto free_regions;
}
- mw->mbox_wq = alloc_workqueue(name,
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- num);
+ mw->mbox_wq = alloc_workqueue("%s",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ num, name);
if (!mw->mbox_wq) {
err = -ENOMEM;
goto unmap_regions;
@@ -2457,7 +2577,11 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
- goto free_regions;
+
+ kfree(mbox_regions);
+ bitmap_free(pf_bmap);
+
+ return 0;
exit:
destroy_workqueue(mw->mbox_wq);
@@ -2466,6 +2590,10 @@ unmap_regions:
iounmap((void __iomem *)mbox_regions[num]);
free_regions:
kfree(mbox_regions);
+free_qmem:
+ cn20k_free_mbox_memory(rvu);
+free_mem:
+ kfree(rvu->ng_rvu);
free_bitmap:
bitmap_free(pf_bmap);
return err;
@@ -2492,8 +2620,8 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw)
otx2_mbox_destroy(&mw->mbox_up);
}
-static void rvu_queue_work(struct mbox_wq_info *mw, int first,
- int mdevs, u64 intr)
+void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -2567,7 +2695,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
- vfs -= 64;
+ vfs = 64;
}
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
@@ -2584,6 +2712,11 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
+ if (is_cn20k(rvu->pdev)) {
+ cn20k_rvu_enable_mbox_intr(rvu);
+ return;
+ }
+
/* Clear spurious irqs, if any */
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
@@ -2701,7 +2834,7 @@ static void rvu_flr_handler(struct work_struct *work)
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
for (vf = 0; vf < numvfs; vf++)
__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
@@ -2837,9 +2970,12 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu_cpt_unregister_interrupts(rvu);
- /* Disable the Mbox interrupt */
- rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
- INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ if (!is_cn20k(rvu->pdev))
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ else
+ cn20k_rvu_unregister_interrupts(rvu);
/* Disable the PF FLR interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
@@ -2872,6 +3008,10 @@ static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
* VF interrupts can be handled. Offset equal to zero means
* that PF vectors are not configured and overlapping AF vectors.
*/
+ if (is_cn20k(rvu->pdev))
+ return (pfvf->msix.max >= RVU_AF_CN20K_INT_VEC_CNT +
+ RVU_MBOX_PF_INT_VEC_CNT) && offset;
+
return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
offset;
}
@@ -2902,18 +3042,30 @@ static int rvu_register_interrupts(struct rvu *rvu)
return ret;
}
- /* Register mailbox interrupt handler */
- sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
- ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_pf_intr_handler, 0,
- &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
- if (ret) {
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for mbox irq\n");
- goto fail;
- }
+ if (!is_cn20k(rvu->pdev)) {
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE],
+ "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector
+ (rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX *
+ NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox\n");
+ goto fail;
+ }
- rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ } else {
+ ret = cn20k_register_afpf_mbox_intr(rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox\n");
+ goto fail;
+ }
+ }
/* Enable mailbox interrupts from all PFs */
rvu_enable_mbox_intr(rvu);
@@ -2968,34 +3120,40 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Get PF MSIX vectors offset. */
pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+ if (!is_cn20k(rvu->pdev)) {
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
- /* Register MBOX0 interrupt. */
- offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
- ret = request_irq(pci_irq_vector(rvu->pdev, offset),
- rvu_mbox_intr_handler, 0,
- &rvu->irq_name[offset * NAME_SIZE],
- rvu);
- if (ret)
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for Mbox0\n");
-
- rvu->irq_allocated[offset] = true;
+ rvu->irq_allocated[offset] = true;
- /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
- * simply increment current offset by 1.
- */
- offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
- ret = request_irq(pci_irq_vector(rvu->pdev, offset),
- rvu_mbox_intr_handler, 0,
- &rvu->irq_name[offset * NAME_SIZE],
- rvu);
- if (ret)
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for Mbox1\n");
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
- rvu->irq_allocated[offset] = true;
+ rvu->irq_allocated[offset] = true;
+ } else {
+ ret = cn20k_register_afvf_mbox_intr(rvu, pf_vec_start);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox\n");
+ }
/* Register FLR interrupt handler for AF's VFs */
offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
@@ -3106,6 +3264,9 @@ static void rvu_disable_afvf_intr(struct rvu *rvu)
{
int vfs = rvu->vfs;
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_disable_afvf_intr(rvu, vfs);
+
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
@@ -3122,6 +3283,9 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
{
int vfs = rvu->vfs;
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_enable_afvf_intr(rvu, vfs);
+
/* Clear any pending interrupts and enable AF VF interrupts for
* the first 64 VFs.
*/
@@ -3366,6 +3530,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
rvu->fwdata->ptp_ext_tstamp);
+ /* Alloc CINT and QINT memory */
+ rvu_alloc_cint_qint_mem(rvu, &rvu->pf[RVU_AFPF], BLKADDR_NIX0,
+ (rvu->hw->block[BLKADDR_NIX0].lf.max));
return 0;
err_dl:
rvu_unregister_dl(rvu);
@@ -3417,6 +3584,9 @@ static void rvu_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
devm_kfree(&pdev->dev, rvu->hw);
+ if (is_cn20k(rvu->pdev))
+ cn20k_free_mbox_memory(rvu);
+ kfree(rvu->ng_rvu);
devm_kfree(&pdev->dev, rvu);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 35834687e40f..e85dac2c806d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <net/devlink.h>
+#include <linux/soc/marvell/silicons.h>
#include "rvu_struct.h"
#include "rvu_devlink.h"
@@ -30,6 +31,8 @@
#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+#define PCI_SUBSYS_DEVID_CN20KA 0xC220
+#define PCI_SUBSYS_DEVID_CNF20KA 0xC320
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -41,12 +44,39 @@
#define MAX_CPT_BLKS 2
/* PF_FUNC */
-#define RVU_PFVF_PF_SHIFT 10
-#define RVU_PFVF_PF_MASK 0x3F
-#define RVU_PFVF_FUNC_SHIFT 0
-#define RVU_PFVF_FUNC_MASK 0x3FF
+#define RVU_OTX2_PFVF_PF_SHIFT 10
+#define RVU_OTX2_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+#define RVU_CN20K_PFVF_PF_SHIFT 9
+#define RVU_CN20K_PFVF_PF_MASK 0x7F
+
+static inline u16 rvu_make_pcifunc(struct pci_dev *pdev, int pf, int func)
+{
+ if (is_cn20k(pdev))
+ return ((pf & RVU_CN20K_PFVF_PF_MASK) <<
+ RVU_CN20K_PFVF_PF_SHIFT) |
+ ((func & RVU_PFVF_FUNC_MASK) <<
+ RVU_PFVF_FUNC_SHIFT);
+ else
+ return ((pf & RVU_OTX2_PFVF_PF_MASK) <<
+ RVU_OTX2_PFVF_PF_SHIFT) |
+ ((func & RVU_PFVF_FUNC_MASK) <<
+ RVU_PFVF_FUNC_SHIFT);
+}
+
+static inline int rvu_pcifunc_pf_mask(struct pci_dev *pdev)
+{
+ if (is_cn20k(pdev))
+ return ~(RVU_CN20K_PFVF_PF_MASK << RVU_CN20K_PFVF_PF_SHIFT);
+ else
+ return ~(RVU_OTX2_PFVF_PF_MASK << RVU_OTX2_PFVF_PF_SHIFT);
+}
+
+#define RVU_AFPF 25
#ifdef CONFIG_DEBUG_FS
+
struct dump_ctx {
int lf;
int id;
@@ -76,6 +106,7 @@ struct rvu_debugfs {
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
+ struct dump_ctx nix_tm_ctx;
struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
int npa_qsize_id;
int nix_qsize_id;
@@ -318,6 +349,7 @@ struct nix_mark_format {
/* smq(flush) to tl1 cir/pir info */
struct nix_smq_tree_ctx {
+ u16 schq;
u64 cir_off;
u64 cir_val;
u64 pir_off;
@@ -327,8 +359,6 @@ struct nix_smq_tree_ctx {
/* smq flush context */
struct nix_smq_flush_ctx {
int smq;
- u16 tl1_schq;
- u16 tl2_schq;
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
};
@@ -399,6 +429,7 @@ struct hw_cap {
bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */
bool npc_hash_extract; /* Hash extract enabled ? */
bool npc_exact_match_enabled; /* Exact match supported ? */
+ bool cpt_rxc; /* Is CPT-RXC supported */
};
struct rvu_hwinfo {
@@ -443,6 +474,23 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct rvu_irq_data {
+ u64 intr_status;
+ void (*rvu_queue_work_hdlr)(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+ void (*afvf_queue_work_hdlr)(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+ struct rvu *rvu;
+ int vec_num;
+ int start;
+ int mdevs;
+};
+
+struct mbox_ops {
+ irqreturn_t (*pf_intr_handler)(int irq, void *rvu_irq);
+ irqreturn_t (*afvf_intr_handler)(int irq, void *rvu_irq);
+};
+
struct channel_fwdata {
struct sdp_node_info info;
u8 valid;
@@ -450,6 +498,14 @@ struct channel_fwdata {
u8 reserved[RVU_CHANL_INFO_RESERVED];
};
+struct altaf_intr_notify {
+ unsigned long flr_pf_bmap[2];
+ unsigned long flr_vf_bmap[2];
+ unsigned long gint_paddr;
+ unsigned long gint_iova_addr;
+ unsigned long reserved[6];
+};
+
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
@@ -469,7 +525,8 @@ struct rvu_fwdata {
u32 ptp_ext_clk_rate;
u32 ptp_ext_tstamp;
struct channel_fwdata channel_data;
-#define FWDATA_RESERVED_MEM 958
+ struct altaf_intr_notify altaf_intr_info;
+#define FWDATA_RESERVED_MEM 946
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
@@ -512,6 +569,11 @@ struct rvu_switch {
u16 start_entry;
};
+struct rep_evtq_ent {
+ struct list_head node;
+ struct rep_event event;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -524,6 +586,7 @@ struct rvu {
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
u16 vf_devid; /* VF devices id */
+ bool def_rule_cntr_en;
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@@ -593,6 +656,18 @@ struct rvu {
spinlock_t cpt_intr_lock;
struct mutex mbox_lock; /* Serialize mbox up and down msgs */
+ u16 rep_pcifunc;
+ bool altaf_ready;
+ int rep_cnt;
+ u16 *rep2pfvf_map;
+ u8 rep_mode;
+ struct work_struct rep_evt_work;
+ struct workqueue_struct *rep_evt_wq;
+ struct list_head rep_evtq_head;
+ /* Representor event lock */
+ spinlock_t rep_evtq_lock;
+
+ struct ng_rvu *ng_rvu;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -689,6 +764,49 @@ static inline bool is_cnf10ka_a0(struct rvu *rvu)
return false;
}
+static inline bool is_cn10ka_a0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0x0F) == 0x0)
+ return true;
+ return false;
+}
+
+static inline bool is_cn10ka_a1(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0x0F) == 0x1)
+ return true;
+ return false;
+}
+
+static inline bool is_cn10kb(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ return true;
+ return false;
+}
+
+static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id)
+{
+ /* On CNF10KA and CNF10KB silicons only two CGX blocks are connected
+ * to NIX.
+ */
+ if (id == PCI_SUBSYS_DEVID_CNF10K_A || id == PCI_SUBSYS_DEVID_CNF10K_B)
+ return cgx_id <= 1;
+
+ return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX ||
+ id == PCI_SUBSYS_DEVID_98XX ||
+ id == PCI_SUBSYS_DEVID_CN10K_A ||
+ id == PCI_SUBSYS_DEVID_CN10K_B));
+}
+
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
@@ -789,7 +907,6 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
-int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
@@ -799,6 +916,7 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
int rvu_get_num_lbk_chans(void);
+int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset);
int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
u16 global_slot, u16 *slot_in_block);
@@ -817,15 +935,33 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
/* SDP APIs */
int rvu_sdp_init(struct rvu *rvu);
-bool is_sdp_pfvf(u16 pcifunc);
-bool is_sdp_pf(u16 pcifunc);
+bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc);
+bool is_sdp_pf(struct rvu *rvu, u16 pcifunc);
bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
+static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc)
+{
+ if (rvu->rep_pcifunc && rvu->rep_pcifunc == pcifunc)
+ return true;
+
+ return false;
+}
+
+static inline int rvu_get_pf(struct pci_dev *pdev, u16 pcifunc)
+{
+ if (is_cn20k(pdev))
+ return (pcifunc >> RVU_CN20K_PFVF_PF_SHIFT) &
+ RVU_CN20K_PFVF_PF_MASK;
+ else
+ return (pcifunc >> RVU_OTX2_PFVF_PF_SHIFT) &
+ RVU_OTX2_PFVF_PF_MASK;
+}
+
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
- !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
+ !is_sdp_pf(rvu, rvu_make_pcifunc(rvu->pdev, pf, 0));
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
@@ -837,7 +973,7 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
{
return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
- is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+ is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)));
}
#define M(_name, _id, fn_name, req, rsp) \
@@ -845,6 +981,10 @@ int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
#undef M
+/* Mbox APIs */
+void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
@@ -899,6 +1039,11 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
+int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int nixlf);
+void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr);
+int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
@@ -913,8 +1058,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable);
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -929,13 +1072,19 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
-
+void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule);
+void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp);
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf);
+
bool is_npc_intf_tx(u8 intf);
bool is_npc_intf_rx(u8 intf);
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
@@ -954,6 +1103,7 @@ void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+int npc_config_cntr_default_entries(struct rvu *rvu, bool enable);
bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
@@ -966,6 +1116,7 @@ int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
void rvu_mac_reset(struct rvu *rvu, u16 pcifunc);
u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
+void cgx_start_linkup(struct rvu *rvu);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
@@ -1014,7 +1165,8 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
-void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
+void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
@@ -1027,4 +1179,9 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
void rvu_mcs_exit(struct rvu *rvu);
+/* Representor APIs */
+int rvu_rep_pf_init(struct rvu *rvu);
+int rvu_rep_install_mcam_rules(struct rvu *rvu);
+void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
+int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index e9bf9231b018..3abd750a4bd7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \
return req; \
}
@@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+
mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
@@ -313,7 +315,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
spin_lock_init(&rvu->cgx_evq_lock);
INIT_LIST_HEAD(&rvu->cgx_evq_head);
INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
- rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", WQ_PERCPU, 0);
if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed");
return -ENOMEM;
@@ -349,6 +351,7 @@ static void rvu_cgx_wq_destroy(struct rvu *rvu)
int rvu_cgx_init(struct rvu *rvu)
{
+ struct mac_ops *mac_ops;
int cgx, err;
void *cgxd;
@@ -375,6 +378,15 @@ int rvu_cgx_init(struct rvu *rvu)
if (err)
return err;
+ /* Clear X2P reset on all MAC blocks */
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_x2p_reset(cgxd, false);
+ }
+
/* Register for CGX events */
err = cgx_lmac_event_handler_init(rvu);
if (err)
@@ -382,10 +394,26 @@ int rvu_cgx_init(struct rvu *rvu)
mutex_init(&rvu->cgx_cfg_lock);
- /* Ensure event handler registration is completed, before
- * we turn on the links
- */
- mb();
+ return 0;
+}
+
+void cgx_start_linkup(struct rvu *rvu)
+{
+ unsigned long lmac_bmap;
+ struct mac_ops *mac_ops;
+ int cgx, lmac, err;
+ void *cgxd;
+
+ /* Enable receive on all LMACS */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ mac_ops = get_mac_ops(cgxd);
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
+ mac_ops->mac_enadis_rx(cgxd, lmac, true);
+ }
/* Do link up for all CGX ports */
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
@@ -398,8 +426,6 @@ int rvu_cgx_init(struct rvu *rvu)
"Link up process failed to start on cgx %d\n",
cgx);
}
-
- return 0;
}
int rvu_cgx_exit(struct rvu *rvu)
@@ -431,7 +457,7 @@ int rvu_cgx_exit(struct rvu *rvu)
inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return false;
return true;
}
@@ -458,7 +484,7 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -475,7 +501,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -500,7 +526,7 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int i = 0, lmac_count = 0;
struct mac_ops *mac_ops;
u8 max_dmac_filters;
@@ -551,7 +577,7 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
void *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
@@ -604,11 +630,40 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
return rvu_lmac_get_stats(rvu, req, (void *)rsp);
}
+int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
+ struct rvu_pfvf *parent_pf;
+ struct mac_ops *mac_ops;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ parent_pf = &rvu->pf[pf];
+ /* To ensure reset cgx stats won't affect VF stats,
+ * check if it used by only PF interface.
+ * If not, return
+ */
+ if (parent_pf->cgx_users > 1) {
+ dev_info(rvu->dev, "CGX busy, could not reset statistics\n");
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_stats_reset(cgxd, lmac);
+}
+
int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct msg_req *req,
struct cgx_fec_stats_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
void *cgxd;
@@ -626,17 +681,20 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
- if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
if (rvu_npc_exact_has_match_table(rvu))
return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = &rvu->pf[pf];
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
@@ -646,7 +704,7 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0;
@@ -670,7 +728,7 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -688,7 +746,7 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
struct cgx_max_dmac_entries_get_rsp
*rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
/* If msg is received from PFs(which are not mapped to CGX LMACs)
@@ -714,20 +772,12 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
- u8 cgx_id, lmac_id;
- int rc = 0;
- u64 cfg;
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
-
- rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc)))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
- rsp->hdr.rc = rc;
- cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
- /* copy 48 bit mac address to req->mac_addr */
- u64_to_ether_addr(cfg, rsp->mac_addr);
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
return 0;
}
@@ -735,7 +785,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -754,7 +804,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -773,7 +823,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -809,7 +859,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc)))
return -EPERM;
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
@@ -823,7 +873,7 @@ int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
@@ -862,7 +912,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
int pf, err;
- pf = rvu_get_pf(req->hdr.pcifunc);
+ pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
@@ -878,7 +928,7 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
struct msg_req *req,
struct cgx_features_info_msg *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_idx, lmac;
void *cgxd;
@@ -894,13 +944,12 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
u32 rvu_cgx_get_fifolen(struct rvu *rvu)
{
- struct mac_ops *mac_ops;
- u32 fifo_len;
+ void *cgxd = rvu_first_cgx_pdata(rvu);
- mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
- fifo_len = mac_ops ? mac_ops->fifo_len : 0;
+ if (!cgxd)
+ return 0;
- return fifo_len;
+ return cgx_get_fifo_len(cgxd);
}
u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
@@ -921,7 +970,7 @@ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -951,7 +1000,7 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 rx_pfc = 0, tx_pfc = 0;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -992,7 +1041,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
int err = 0;
@@ -1019,7 +1068,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
@@ -1052,7 +1101,7 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
/* Assumes LF of a PF and all of its VF belongs to the same
* NIX block
*/
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return 0;
@@ -1079,10 +1128,10 @@ int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
struct rvu_pfvf *parent_pf, *pfvf;
int cgx_users, err = 0;
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return 0;
- parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
mutex_lock(&rvu->cgx_cfg_lock);
@@ -1125,7 +1174,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
struct fec_mode *req,
struct fec_mode *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
@@ -1141,7 +1190,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
struct cgx_fw_data *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!rvu->fwdata)
@@ -1168,7 +1217,8 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
struct cgx_set_link_mode_req *req,
struct cgx_set_link_mode_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
+ struct cgx_lmac_fwdata_s *linkmodes;
u8 cgx_idx, lmac;
void *cgxd;
@@ -1177,14 +1227,20 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
- rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ linkmodes = &rvu->fwdata->cgx_fw_data_usx[cgx_idx][lmac];
+ else
+ linkmodes = &rvu->fwdata->cgx_fw_data[cgx_idx][lmac];
+
+ rsp->status = cgx_set_link_mode(cgxd, req->args, linkmodes,
+ cgx_idx, lmac);
return 0;
}
int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -1202,7 +1258,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct cgx_mac_addr_update_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -1218,7 +1274,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
u8 rx_pause, u16 pfc_en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 rx_8023 = 0, tx_8023 = 0;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1256,7 +1312,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
struct cgx_pfc_cfg *req,
struct cgx_pfc_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -1281,7 +1337,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
struct cgx *cgxd;
u8 cgx, lmac;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7fa98aeb3663..d2163da28d18 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -13,19 +13,26 @@
/* RVU LMTST */
#define LMT_TBL_OP_READ 0
#define LMT_TBL_OP_WRITE 1
-#define LMT_MAP_TABLE_SIZE (128 * 1024)
#define LMT_MAPTBL_ENTRY_SIZE 16
+#define LMT_MAX_VFS 256
+
+#define LMT_MAP_ENTRY_ENA BIT_ULL(20)
+#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
/* Function to perform operations (read/write) on lmtst map table */
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
int lmt_tbl_op)
{
void __iomem *lmt_map_base;
- u64 tbl_base;
+ u64 tbl_base, cfg;
+ int pfs, vfs;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ vfs = 1 << (cfg & 0xF);
+ pfs = 1 << ((cfg >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
return -ENOMEM;
@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
*val = readq(lmt_map_base + index);
} else {
writeq((*val), (lmt_map_base + index));
+
+ cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
+ /* 2048 LMTLINES */
+ cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
+
+ writeq(cfg, (lmt_map_base + (index + 8)));
+
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
* changes effective. Write 1 for flush and read is being used as a
* barrier and sets up a data dependency. Write to 0 after a write
@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
- return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ return ((rvu_get_pf(rvu->pdev, pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
- pf = rvu_get_pf(pcifunc) & 0x1F;
+ pf = rvu_get_pf(rvu->pdev, pcifunc) & RVU_OTX2_PFVF_PF_MASK;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
@@ -141,7 +155,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
int err = 0;
u64 val;
- /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ /* Check if PF_FUNC wants to use its own local memory as LMTLINE
* region, if so, convert that IOVA to physical address and
* populate LMT table with that address
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index f047185f38e0..f404117bf6c8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -19,6 +19,12 @@
/* Length of initial context fetch in 128 byte words */
#define CPT_CTX_ILEN 1ULL
+/* Interrupt vector count of CPT RVU and RAS interrupts */
+#define CPT_10K_AF_RVU_RAS_INT_VEC_CNT 2
+
+/* Default CPT_AF_RXC_CFG1:max_rxc_icb_cnt */
+#define CPT_DFLT_MAX_RXC_ICB_CNT 0xC0ULL
+
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
u64 free_sts = 0, busy_sts = 0; \
@@ -37,6 +43,41 @@
(_rsp)->free_sts_##etype = free_sts; \
})
+#define MAX_AE GENMASK_ULL(47, 32)
+#define MAX_IE GENMASK_ULL(31, 16)
+#define MAX_SE GENMASK_ULL(15, 0)
+
+static u16 cpt_max_engines_get(struct rvu *rvu)
+{
+ u16 max_ses, max_ies, max_aes;
+ u64 reg;
+
+ reg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS1);
+ max_ses = FIELD_GET(MAX_SE, reg);
+ max_ies = FIELD_GET(MAX_IE, reg);
+ max_aes = FIELD_GET(MAX_AE, reg);
+
+ return max_ses + max_ies + max_aes;
+}
+
+/* Number of flt interrupt vectors are depends on number of engines that the
+ * chip has. Each flt vector represents 64 engines.
+ */
+static int cpt_10k_flt_nvecs_get(struct rvu *rvu, u16 max_engs)
+{
+ int flt_vecs;
+
+ flt_vecs = DIV_ROUND_UP(max_engs, 64);
+
+ if (flt_vecs > CPT_10K_AF_INT_VEC_FLT_MAX) {
+ dev_warn_once(rvu->dev, "flt_vecs:%d exceeds the max vectors:%d\n",
+ flt_vecs, CPT_10K_AF_INT_VEC_FLT_MAX);
+ flt_vecs = CPT_10K_AF_INT_VEC_FLT_MAX;
+ }
+
+ return flt_vecs;
+}
+
static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{
struct rvu_block *block = ptr;
@@ -150,17 +191,26 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- int i;
+ int i, flt_vecs;
+ u16 max_engs;
+ u8 nr;
+
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
/* Disable all CPT AF interrupts */
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
+ nr = (max_engs > 64) ? 64 : max_engs;
+ max_engs -= nr;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i),
+ INTR_MASK(nr));
+ }
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
- for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ /* CPT AF interrupt vectors are flt_int, rvu_int and ras_int. */
+ for (i = 0; i < flt_vecs + CPT_10K_AF_RVU_RAS_INT_VEC_CNT; i++)
if (rvu->irq_allocated[off + i]) {
free_irq(pci_irq_vector(rvu->pdev, off + i), block);
rvu->irq_allocated[off + i] = false;
@@ -206,12 +256,18 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu)
static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
+ int rvu_intr_vec, ras_intr_vec;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
irq_handler_t flt_fn;
- int i, ret;
+ int i, ret, flt_vecs;
+ u16 max_engs;
+ u8 nr;
+
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
- for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
@@ -229,20 +285,24 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
- if (i == CPT_10K_AF_INT_VEC_FLT2)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
- else
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
+
+ nr = (max_engs > 64) ? 64 : max_engs;
+ max_engs -= nr;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i),
+ INTR_MASK(nr));
}
- ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_intr_vec = flt_vecs;
+ ras_intr_vec = rvu_intr_vec + 1;
+
+ ret = rvu_cpt_do_register_interrupt(block, off + rvu_intr_vec,
rvu_cpt_af_rvu_intr_handler,
"CPTAF RVU");
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
- ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ ret = rvu_cpt_do_register_interrupt(block, off + ras_intr_vec,
rvu_cpt_af_ras_intr_handler,
"CPTAF RAS");
if (ret)
@@ -350,7 +410,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
- if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num)
return false;
if (pcifunc & RVU_PFVF_FUNC_MASK)
return false;
@@ -362,7 +422,7 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
- if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num)
return false;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return false;
@@ -632,7 +692,9 @@ int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
return ret;
}
-static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+static bool validate_and_update_reg_offset(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ u64 *reg_offset)
{
u64 offset = req->reg_offset;
int blkaddr, num_lfs, lf;
@@ -663,6 +725,11 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
if (lf < 0)
return false;
+ /* Translate local LF's offset to global CPT LF's offset to
+ * access LFX register.
+ */
+ *reg_offset = (req->reg_offset & 0xFF000) + (lf << 3);
+
return true;
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
@@ -673,6 +740,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
case CPT_AF_BLK_RST:
case CPT_AF_CONSTANTS1:
case CPT_AF_CTX_FLUSH_TIMER:
+ case CPT_AF_RXC_CFG1:
return true;
}
@@ -696,6 +764,7 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
struct cpt_rd_wr_reg_msg *req,
struct cpt_rd_wr_reg_msg *rsp)
{
+ u64 offset = req->reg_offset;
int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
@@ -707,23 +776,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
+ if (!validate_and_update_reg_offset(rvu, req, &offset))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
rsp->reg_offset = req->reg_offset;
rsp->ret_val = req->ret_val;
rsp->is_write = req->is_write;
- if (!is_valid_offset(rvu, req))
- return CPT_AF_ERR_ACCESS_DENIED;
-
if (req->is_write)
- rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ rvu_write64(rvu, blkaddr, offset, req->val);
else
- rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+ rsp->val = rvu_read64(rvu, blkaddr, offset);
return 0;
}
static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
{
+ struct rvu_hwinfo *hw = rvu->hw;
+
if (is_rvu_otx2(rvu))
return;
@@ -747,14 +818,16 @@ static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
+ rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+ rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
+ if (!hw->cap.cpt_rxc)
+ return;
rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
- rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
- rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
}
static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
@@ -913,13 +986,17 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r
struct rvu_block *block;
unsigned long flags;
int blkaddr, vec;
+ int flt_vecs;
+ u16 max_engs;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
block = &rvu->hw->block[blkaddr];
- for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
+ for (vec = 0; vec < flt_vecs; vec++) {
spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
@@ -935,10 +1012,11 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
struct cpt_rxc_time_cfg_req req, prev;
+ struct rvu_hwinfo *hw = rvu->hw;
int timeout = 2000;
u64 reg;
- if (is_rvu_otx2(rvu))
+ if (!hw->cap.cpt_rxc)
return;
/* Set time limit to minimum values, so that rxc entries will be
@@ -1211,10 +1289,30 @@ unlock:
return 0;
}
+#define MAX_RXC_ICB_CNT GENMASK_ULL(40, 32)
+
int rvu_cpt_init(struct rvu *rvu)
{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg_val;
+
/* Retrieve CPT PF number */
rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0) && !is_rvu_otx2(rvu) &&
+ !is_cn10kb(rvu))
+ hw->cap.cpt_rxc = true;
+
+ if (hw->cap.cpt_rxc && !is_cn10ka_a0(rvu) && !is_cn10ka_a1(rvu)) {
+ /* Set CPT_AF_RXC_CFG1:max_rxc_icb_cnt to 0xc0 to not effect
+ * inline inbound peak performance
+ */
+ reg_val = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1);
+ reg_val &= ~MAX_RXC_ICB_CNT;
+ reg_val |= FIELD_PREP(MAX_RXC_ICB_CNT,
+ CPT_DFLT_MAX_RXC_ICB_CNT);
+ rvu_write64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1, reg_val);
+ }
+
spin_lock_init(&rvu->cpt_intr_lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 2500f5ba4f5a..15d3cb0b9da6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -21,6 +21,8 @@
#include "rvu_npc_hash.h"
#include "mcs.h"
+#include "cn20k/debugfs.h"
+
#define DEBUGFS_DIR_NAME "octeontx2"
enum {
@@ -45,33 +47,6 @@ enum {
CGX_STAT18,
};
-/* NIX TX stats */
-enum nix_stat_lf_tx {
- TX_UCAST = 0x0,
- TX_BCAST = 0x1,
- TX_MCAST = 0x2,
- TX_DROP = 0x3,
- TX_OCTS = 0x4,
- TX_STATS_ENUM_LAST,
-};
-
-/* NIX RX stats */
-enum nix_stat_lf_rx {
- RX_OCTS = 0x0,
- RX_UCAST = 0x1,
- RX_BCAST = 0x2,
- RX_MCAST = 0x3,
- RX_DROP = 0x4,
- RX_DROP_OCTS = 0x5,
- RX_FCS = 0x6,
- RX_ERR = 0x7,
- RX_DRP_BCAST = 0x8,
- RX_DRP_MCAST = 0x9,
- RX_DRP_L3BCAST = 0xa,
- RX_DRP_L3MCAST = 0xb,
- RX_STATS_ENUM_LAST,
-};
-
static char *cgx_rx_stats_fields[] = {
[CGX_STAT0] = "Received packets",
[CGX_STAT1] = "Octets of received packets",
@@ -580,6 +555,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
u64 lmt_addr, val, tbl_base;
int pf, vf, num_vfs, hw_vfs;
void __iomem *lmt_map_base;
+ int apr_pfs, apr_vfs;
int buf_size = 10240;
size_t off = 0;
int index = 0;
@@ -595,8 +571,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ apr_vfs = 1 << (val & 0xF);
+ apr_pfs = 1 << ((val >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
+ LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
kfree(buf);
@@ -618,7 +598,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
pf);
- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
(tbl_base + index));
lmt_addr = readq(lmt_map_base + index);
@@ -631,7 +611,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
/* Reading num of VFs per PF */
rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
for (vf = 0; vf < num_vfs; vf++) {
- index = (pf * rvu->hw->total_vfs * 16) +
+ index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
off += scnprintf(&buf[off], buf_size - 1 - off,
"PF%d:VF%d \t\t", pf, vf);
@@ -663,16 +643,16 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
-static void get_lf_str_list(struct rvu_block block, int pcifunc,
+static void get_lf_str_list(const struct rvu_block *block, int pcifunc,
char *lfs)
{
- int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
+ int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max;
- for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
- if (lf >= block.lf.max)
+ for_each_set_bit(lf, block->lf.bmap, block->lf.max) {
+ if (lf >= block->lf.max)
break;
- if (block.fn_map[lf] != pcifunc)
+ if (block->fn_map[lf] != pcifunc)
continue;
if (lf == prev_lf + 1) {
@@ -710,7 +690,7 @@ static int get_max_column_width(struct rvu *rvu)
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
- pcifunc = pf << 10 | vf;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
if (!pcifunc)
continue;
@@ -719,7 +699,7 @@ static int get_max_column_width(struct rvu *rvu)
if (!strlen(block.name))
continue;
- get_lf_str_list(block, pcifunc, buf);
+ get_lf_str_list(&block, pcifunc, buf);
if (lf_str_size <= strlen(buf))
lf_str_size = strlen(buf) + 1;
}
@@ -781,7 +761,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
off = 0;
flag = 0;
- pcifunc = pf << 10 | vf;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
if (!pcifunc)
continue;
@@ -803,7 +783,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
continue;
len = 0;
lfs[len] = '\0';
- get_lf_str_list(block, pcifunc, lfs);
+ get_lf_str_list(&block, pcifunc, lfs);
if (strlen(lfs))
flag = 1;
@@ -838,10 +818,10 @@ RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
{
+ char cgx[10], lmac[10], chan[10];
struct rvu *rvu = filp->private;
struct pci_dev *pdev = NULL;
struct mac_ops *mac_ops;
- char cgx[10], lmac[10];
struct rvu_pfvf *pfvf;
int pf, domain, blkid;
u8 cgx_id, lmac_id;
@@ -852,7 +832,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
/* There can be no CGX devices at all */
if (!mac_ops)
return 0;
- seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
mac_ops->name);
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
@@ -864,7 +844,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
cgx[0] = 0;
lmac[0] = 0;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (pfvf->nix_blkaddr == BLKADDR_NIX0)
@@ -876,8 +856,11 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
&lmac_id);
sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
sprintf(lmac, "LMAC%d", lmac_id);
- seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
- dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+ sprintf(chan, "%d",
+ rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
+ chan);
pci_dev_put(pdev);
}
@@ -886,6 +869,71 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused)
+{
+ struct rvu *rvu = s->private;
+ struct rvu_fwdata *fwdata;
+ u8 mac[ETH_ALEN];
+ int count = 0, i;
+
+ if (!rvu->fwdata)
+ return -EAGAIN;
+
+ fwdata = rvu->fwdata;
+ seq_puts(s, "\nRVU Firmware Data:\n");
+ seq_puts(s, "\n\t\tPTP INFORMATION\n");
+ seq_puts(s, "\t\t===============\n");
+ seq_printf(s, "\t\texternal clockrate \t :%x\n",
+ fwdata->ptp_ext_clk_rate);
+ seq_printf(s, "\t\texternal timestamp \t :%x\n",
+ fwdata->ptp_ext_tstamp);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n");
+ seq_puts(s, "\t\t=======================\n");
+ seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid);
+ seq_printf(s, "\t\tNode ID \t\t :%x\n",
+ fwdata->channel_data.info.node_id);
+ seq_printf(s, "\t\tNumber of VFs \t\t :%x\n",
+ fwdata->channel_data.info.max_vfs);
+ seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n",
+ fwdata->channel_data.info.num_pf_rings);
+ seq_printf(s, "\t\tPF SRN \t\t\t :%x\n",
+ fwdata->channel_data.info.pf_srn);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tPF-INDEX MACADDRESS\n");
+ seq_puts(s, "\t\t====================\n");
+ for (i = 0; i < PF_MACNUM_MAX; i++) {
+ u64_to_ether_addr(fwdata->pf_macs[i], mac);
+ if (!is_zero_ether_addr(mac)) {
+ seq_printf(s, "\t\t %d %pM\n", i, mac);
+ count++;
+ }
+ }
+
+ if (!count)
+ seq_puts(s, "\t\tNo valid address found\n");
+
+ seq_puts(s, "\n\t\tVF-INDEX MACADDRESS\n");
+ seq_puts(s, "\t\t====================\n");
+ count = 0;
+ for (i = 0; i < VF_MACNUM_MAX; i++) {
+ u64_to_ether_addr(fwdata->vf_macs[i], mac);
+ if (!is_zero_ether_addr(mac)) {
+ seq_printf(s, "\t\t %d %pM\n", i, mac);
+ count++;
+ }
+ }
+
+ if (!count)
+ seq_puts(s, "\t\tNo valid address found\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL);
+
static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
@@ -941,19 +989,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
/* The 'qsize' entry dumps current Aura/Pool context Qsize
* and each context's current enable/disable status in a bitmap.
*/
-static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
int blktype)
{
- void (*print_qsize)(struct seq_file *filp,
+ void (*print_qsize)(struct seq_file *s,
struct rvu_pfvf *pfvf) = NULL;
- struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
int blkaddr;
- rvu = filp->private;
+ rvu = s->private;
switch (blktype) {
case BLKTYPE_NPA:
qsize_id = rvu->rvu_dbg.npa_qsize_id;
@@ -969,42 +1016,36 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->file->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(s->file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- print_qsize(filp, pfvf);
+ print_qsize(s, pfvf);
return 0;
}
-static ssize_t rvu_dbg_qsize_write(struct file *filp,
+static ssize_t rvu_dbg_qsize_write(struct file *file,
const char __user *buffer, size_t count,
loff_t *ppos, int blktype)
{
char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
- struct seq_file *seqfile = filp->private_data;
+ struct seq_file *seqfile = file->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
- struct dentry *current_dir;
int blkaddr;
u16 pcifunc;
int ret, lf;
- cmd_buf = memdup_user(buffer, count + 1);
+ cmd_buf = memdup_user_nul(buffer, count);
if (IS_ERR(cmd_buf))
return -ENOMEM;
- cmd_buf[count] = '\0';
-
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
@@ -1022,13 +1063,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
goto qsize_write_done;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
@@ -1065,6 +1103,11 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
struct npa_aura_s *aura = &rsp->aura;
struct rvu *rvu = m->private;
+ if (is_cn20k(rvu->pdev)) {
+ print_npa_cn20k_aura_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp);
+ return;
+ }
+
seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
@@ -1113,6 +1156,11 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
struct npa_pool_s *pool = &rsp->pool;
struct rvu *rvu = m->private;
+ if (is_cn20k(rvu->pdev)) {
+ print_npa_cn20k_pool_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp);
+ return;
+ }
+
seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
@@ -1605,6 +1653,370 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
(u64)sq_ctx->dropped_pkts);
}
+static void print_tm_tree(struct seq_file *m,
+ struct nix_aq_enq_rsp *rsp, u64 sq)
+{
+ struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ u16 p1, p2, p3, p4, schq;
+ int blkaddr;
+ u64 cfg;
+
+ if (!sq_ctx->ena)
+ return;
+
+ blkaddr = nix_hw->blkaddr;
+ schq = sq_ctx->smq;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
+ p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
+ p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
+ p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
+ p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
+ seq_printf(m,
+ "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
+ sq, schq, p1, p2, p3, p4);
+}
+
+/*dumps given tm_tree registers*/
+static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
+{
+ int qidx, nixlf, rc, id, max_id = 0;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+
+ nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
+ id = rvu->rvu_dbg.nix_tm_ctx.id;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ max_id = pfvf->sq_ctx->qsize;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_SQ;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
+ for (qidx = id; qidx < max_id; qidx++) {
+ aq_req.qidx = qidx;
+
+ /* Skip SQ's if not initialized */
+ if (!test_bit(qidx, pfvf->sq_bmap))
+ continue;
+
+ rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
+
+ if (rc) {
+ seq_printf(m, "Failed to read SQ(%d) context\n",
+ aq_req.qidx);
+ continue;
+ }
+ print_tm_tree(m, &rsp, aq_req.qidx);
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 nixlf;
+ int ret;
+
+ ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
+ if (ret)
+ return ret;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
+ return count;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
+
+static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ int blkaddr, link, link_level;
+ struct rvu_hwinfo *hw;
+
+ hw = rvu->hw;
+ blkaddr = nix_hw->blkaddr;
+ if (lvl == NIX_TXSCH_LVL_MDQ) {
+ seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
+ seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_MDQX_OUT_MD_COUNT(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_MDQX_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL4) {
+ seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_SDP_LINK_CFG(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_MD_DEBUG1(schq)));
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL3) {
+ seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_MD_DEBUG1(schq)));
+
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
+ & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl == link_level) {
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
+ schq, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_BP_STATUS(schq)));
+ for (link = 0; link < hw->cgx_links; link++)
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
+ schq, link,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
+ }
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL2) {
+ seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_MD_DEBUG1(schq)));
+
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
+ & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl == link_level) {
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
+ schq, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_BP_STATUS(schq)));
+ for (link = 0; link < hw->cgx_links; link++)
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
+ schq, link, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
+ }
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
+ schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
+ seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_HW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_MD_DEBUG1(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
+ schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_DROPPED_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_DROPPED_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_RED_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_RED_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_YELLOW_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_YELLOW_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_GREEN_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_GREEN_BYTES(schq)));
+ seq_puts(m, "\n");
+ }
+}
+
+/*dumps given tm_topo registers*/
+static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_aq_enq_req aq_req;
+ struct nix_txsch *txsch;
+ int nixlf, lvl, schq;
+ u16 pcifunc;
+
+ nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_SQ;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
+ print_tm_topo(m, schq, lvl);
+ }
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 nixlf;
+ int ret;
+
+ ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
+ if (ret)
+ return ret;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
+ return count;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
+
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
@@ -1612,10 +2024,16 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
+ if (is_cn20k(rvu->pdev)) {
+ print_nix_cn20k_sq_ctx(m, (struct nix_cn20k_sq_ctx_s *)sq_ctx);
+ return;
+ }
+
if (!is_rvu_otx2(rvu)) {
print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
return;
}
+
seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
sq_ctx->sqe_way_mask, sq_ctx->cq);
seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
@@ -1706,7 +2124,9 @@ static void print_nix_cn10k_rq_ctx(struct seq_file *m,
seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
- seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: band_prof_id \t\t%d\n",
+ (u16)rq_ctx->band_prof_id_h << 10 | rq_ctx->band_prof_id);
+
seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
@@ -1828,6 +2248,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
+ if (is_cn20k(rvu->pdev)) {
+ print_nix_cn20k_cq_ctx(m, (struct nix_cn20k_aq_enq_rsp *)rsp);
+ return;
+ }
+
seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
@@ -1857,6 +2282,7 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
cq_ctx->qsize, cq_ctx->caching);
+
seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
cq_ctx->substream, cq_ctx->ena);
if (!is_rvu_otx2(rvu)) {
@@ -2218,7 +2644,10 @@ static void print_band_prof_ctx(struct seq_file *m,
(prof->rc_action == 1) ? "DROP" : "RED";
seq_printf(m, "W1: rc_action\t\t%s\n", str);
seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
- seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+
+ seq_printf(m, "W1: band_prof_id\t%d\n",
+ (u16)prof->band_prof_id_h << 7 | prof->band_prof_id);
+
seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
@@ -2291,10 +2720,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
pcifunc = ipolicer->pfvf_map[idx];
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(m, "Allocated to :: PF %d\n",
- rvu_get_pf(pcifunc));
+ rvu_get_pf(rvu->pdev, pcifunc));
else
seq_printf(m, "Allocated to :: PF %d VF %d\n",
- rvu_get_pf(pcifunc),
+ rvu_get_pf(rvu->pdev, pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
print_band_prof_ctx(m, &aq_rsp.prof);
}
@@ -2351,6 +2780,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
nix_hw = &rvu->hw->nix[1];
}
+ debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tm_tree_fops);
+ debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tm_topo_fops);
debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_sq_ctx_fops);
debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
@@ -2365,8 +2798,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_tx_hits_miss_fops);
debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
- debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ blkaddr, &rvu_dbg_nix_qsize_fops);
debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_band_prof_ctx_fops);
debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
@@ -2383,6 +2816,9 @@ static void rvu_dbg_npa_init(struct rvu *rvu)
&rvu_dbg_npa_aura_ctx_fops);
debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
&rvu_dbg_npa_pool_ctx_fops);
+
+ if (is_cn20k(rvu->pdev)) /* NDC not appliable for cn20k */
+ return;
debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
&rvu_dbg_npa_ndc_cache_fops);
debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
@@ -2515,28 +2951,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
+static int rvu_dbg_derive_lmacid(struct seq_file *s)
{
- struct dentry *current_dir;
- char *buf;
-
- current_dir = filp->file->f_path.dentry->d_parent;
- buf = strrchr(current_dir->d_name.name, 'c');
- if (!buf)
- return -EINVAL;
-
- return kstrtoint(buf + 1, 10, lmac_id);
+ return debugfs_get_aux_num(s->file);
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
{
- int lmac_id, err;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_stats(filp, lmac_id);
-
- return err;
+ return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
@@ -2594,18 +3016,103 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
return 0;
}
-static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
{
- int err, lmac_id;
+ return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
+}
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_dmac_flt(filp, lmac_id);
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
- return err;
+static int cgx_print_fwdata(struct seq_file *s, int lmac_id)
+{
+ struct cgx_lmac_fwdata_s *fwdata;
+ void *cgxd = s->private;
+ struct phy_s *phy;
+ struct rvu *rvu;
+ int cgx_id, i;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ if (!rvu->fwdata)
+ return -EAGAIN;
+
+ cgx_id = cgx_get_cgxid(cgxd);
+
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
+ else
+ fwdata = &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id];
+
+ seq_puts(s, "\nFIRMWARE SHARED:\n");
+ seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n");
+ seq_puts(s, "\t\t==========================\n");
+ seq_printf(s, "\t\t Link modes \t\t :%llx\n",
+ fwdata->supported_link_modes);
+ seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an);
+ seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n");
+ seq_puts(s, "\t\t==========================\n");
+ seq_printf(s, "\t\t Link modes \t\t :%llx\n",
+ (u64)fwdata->advertised_link_modes);
+ seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an);
+ seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t\tLMAC CONFIG\t\t\n");
+ seq_puts(s, "\t\t============\n");
+ seq_printf(s, "\t\t rw_valid \t\t :%x\n", fwdata->rw_valid);
+ seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type);
+ seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx);
+ seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port);
+ seq_printf(s, "\t\t Link modes own \t :%llx\n",
+ (u64)fwdata->advertised_link_modes_own);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tEEPROM DATA\n");
+ seq_puts(s, "\t\t===========\n");
+ seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id);
+ seq_puts(s, "\t\t data \t\t\t :\n");
+ seq_puts(s, "\t\t");
+ for (i = 0; i < SFP_EEPROM_SIZE; i++) {
+ seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]);
+ if ((i + 1) % 16 == 0) {
+ seq_puts(s, "\n");
+ seq_puts(s, "\t\t");
+ }
+ }
+ seq_puts(s, "\n");
+
+ phy = &fwdata->phy;
+ seq_puts(s, "\n\t\tPHY INFORMATION\n");
+ seq_puts(s, "\t\t===============\n");
+ seq_printf(s, "\t\t Mod type configurable \t\t :%x\n",
+ phy->misc.can_change_mod_type);
+ seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type);
+ seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats);
+ seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n",
+ phy->fec_stats.rsfec_corr_cws);
+ seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n",
+ phy->fec_stats.rsfec_uncorr_cws);
+ seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n",
+ phy->fec_stats.brfec_corr_blks);
+ seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n",
+ phy->fec_stats.brfec_uncorr_blks);
+ seq_puts(s, "\n");
+
+ return 0;
}
-RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused)
+{
+ return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s));
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL);
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
@@ -2641,11 +3148,14 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
- cgx, &rvu_dbg_cgx_stat_fops);
- debugfs_create_file("mac_filter", 0600,
- rvu->rvu_dbg.lmac, cgx,
+ debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file_aux_num("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx, lmac_id,
&rvu_dbg_cgx_dmac_flt_fops);
+ debugfs_create_file("fwdata", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_fwdata_fops);
}
}
}
@@ -2667,10 +3177,10 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
- rvu_get_pf(pcifunc));
+ rvu_get_pf(rvu->pdev, pcifunc));
else
seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
- rvu_get_pf(pcifunc),
+ rvu_get_pf(rvu->pdev, pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
if (entry_acnt) {
@@ -2733,13 +3243,13 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_puts(filp, "\n\t\t Current allocation\n");
seq_puts(filp, "\t\t====================\n");
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
- pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
}
}
@@ -3010,7 +3520,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
mutex_lock(&mcam->lock);
list_for_each_entry(iter, &mcam->mcam_rules, list) {
- pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, iter->owner);
seq_printf(s, "\n\tInstalled by: PF%d ", pf);
if (iter->owner & RVU_PFVF_FUNC_MASK) {
@@ -3028,7 +3538,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
rvu_dbg_npc_mcam_show_flows(s, iter);
if (is_npc_intf_rx(iter->intf)) {
target = iter->rx_action.pf_func;
- pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, target);
seq_printf(s, "\tForward to: PF%d ", pf);
if (target & RVU_PFVF_FUNC_MASK) {
@@ -3475,6 +3985,9 @@ static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
{
+ if (is_cn20k(rvu->pdev))
+ return "cn20k";
+
if (!is_rvu_otx2(rvu))
return "cn10k";
else
@@ -3492,6 +4005,9 @@ void rvu_dbg_init(struct rvu *rvu)
debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
rvu, &rvu_dbg_lmtst_map_table_fops);
+ debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rvu_fwdata_fops);
+
if (!cgx_get_cgxcnt_max())
goto create;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 96c04f7d93f8..0f9953eaf1b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -207,7 +207,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu)
rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
}
- for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
if (rvu->irq_allocated[offs + i]) {
free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
rvu->irq_allocated[offs + i] = false;
@@ -505,7 +505,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
rvu_reporters->nix_event_ctx = nix_event_context;
rvu_reporters->rvu_hw_nix_intr_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_intr_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
@@ -513,7 +515,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_gen_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_gen_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
@@ -521,7 +525,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_err_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_err_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
@@ -529,7 +535,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_ras_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_ras_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
@@ -1051,7 +1059,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
rvu_reporters->npa_event_ctx = npa_event_context;
rvu_reporters->rvu_hw_npa_intr_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_intr_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
@@ -1059,7 +1069,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_gen_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_gen_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
@@ -1067,7 +1079,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_err_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_err_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
@@ -1075,7 +1089,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_ras_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_ras_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
@@ -1202,7 +1218,8 @@ static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1216,7 +1233,8 @@ static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1237,11 +1255,13 @@ enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1256,7 +1276,8 @@ static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
}
static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1295,7 +1316,8 @@ static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1310,7 +1332,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32
}
static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1355,8 +1378,36 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink
return 0;
}
+static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ ctx->val.vbool = rvu->def_rule_cntr_en;
+
+ return 0;
+}
+
+static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = npc_config_cntr_default_entries(rvu, ctx->val.vbool);
+ if (!err)
+ rvu->def_rule_cntr_en = ctx->val.vbool;
+
+ return err;
+}
+
static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1367,7 +1418,8 @@ static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1440,6 +1492,11 @@ static const struct devlink_param rvu_af_dl_params[] = {
rvu_af_dl_npc_mcam_high_zone_percent_get,
rvu_af_dl_npc_mcam_high_zone_percent_set,
rvu_af_dl_npc_mcam_high_zone_percent_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
+ "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_npc_def_rule_cntr_get,
+ rvu_af_dl_npc_def_rule_cntr_set, NULL),
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
"nix_maxlf", DEVLINK_PARAM_TYPE_U16,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1464,6 +1521,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
struct rvu *rvu = rvu_dl->rvu;
struct rvu_switch *rswitch;
+ if (rvu->rep_mode)
+ return -EOPNOTSUPP;
+
rswitch = &rvu->rswitch;
*mode = rswitch->mode;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 00af8888e329..2f485a930edd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -31,6 +31,7 @@ static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
u32 leaf_prof);
static const char *nix_get_ctx_name(int ctype);
+static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -312,7 +313,10 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
/* TLs aggegating traffic are shared across PF and VFs */
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
- if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+ if ((nix_get_tx_link(rvu, map_func) !=
+ nix_get_tx_link(rvu, pcifunc)) &&
+ (rvu_get_pf(rvu->pdev, map_func) !=
+ rvu_get_pf(rvu->pdev, pcifunc)))
return false;
else
return true;
@@ -336,7 +340,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
bool from_vf;
int err;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP)
return 0;
@@ -360,7 +364,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
-
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -414,7 +417,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
break;
case NIX_INTF_TYPE_SDP:
from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
- parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
sdp_info = parent_pf->sdp_info;
if (!sdp_info) {
dev_err(rvu->dev, "Invalid sdp_info pointer\n");
@@ -567,9 +570,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static u16 nix_get_channel(u16 chan, bool cpt_link)
+{
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ return cpt_link ? chan | BIT(11) : chan;
+}
+
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, pf, type, err;
@@ -577,13 +588,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
struct nix_bp *bp;
+ u16 chan_v;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
+ if (is_sdp_pfvf(rvu, pcifunc))
+ type = NIX_INTF_TYPE_SDP;
+
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
@@ -592,8 +610,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ chan_v = nix_get_channel(chan, cpt_link);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
if (type == NIX_INTF_TYPE_LBK) {
@@ -612,6 +631,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
@@ -691,20 +724,22 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
type = NIX_INTF_TYPE_SDP;
/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
@@ -712,6 +747,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
type != NIX_INTF_TYPE_SDP)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
@@ -725,9 +763,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ chan_v = nix_get_channel(chan, cpt_link);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
cfg &= ~GENMASK_ULL(8, 0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
@@ -745,6 +785,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
@@ -965,6 +1019,12 @@ static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
{
struct nix_cn10k_aq_enq_req *aq_req;
+ if (is_cn20k(rvu->pdev)) {
+ *smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq;
+ *smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq;
+ return;
+ }
+
if (!is_rvu_otx2(rvu)) {
aq_req = (struct nix_cn10k_aq_enq_req *)req;
*smq = aq_req->sq.smq;
@@ -1095,36 +1155,36 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
case NIX_AQ_INSTOP_WRITE:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(mask, &req->rq_mask,
- sizeof(struct nix_rq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(mask, &req->sq_mask,
- sizeof(struct nix_sq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(mask, &req->cq_mask,
- sizeof(struct nix_cq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(mask, &req->rss_mask,
- sizeof(struct nix_rsse_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
- sizeof(struct nix_rx_mce_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
memcpy(mask, &req->prof_mask,
- sizeof(struct nix_bandprof_s));
+ NIX_MAX_CTX_SIZE);
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
- memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
+ memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_SQ)
- memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
+ memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_CQ)
- memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
+ memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_RSS)
- memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+ memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_MCE)
- memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
- memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
+ memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE);
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
@@ -1189,22 +1249,22 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (req->op == NIX_AQ_INSTOP_READ) {
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(&rsp->rq, ctx,
- sizeof(struct nix_rq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(&rsp->sq, ctx,
- sizeof(struct nix_sq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(&rsp->cq, ctx,
- sizeof(struct nix_cq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(&rsp->rss, ctx,
- sizeof(struct nix_rsse_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
- sizeof(struct nix_rx_mce_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
memcpy(&rsp->prof, ctx,
- sizeof(struct nix_bandprof_s));
+ NIX_MAX_CTX_SIZE);
}
}
@@ -1235,8 +1295,8 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
/* Make copy of original context & mask which are required
* for resubmission
*/
- memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
- memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE);
+ memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE);
/* exclude fields which HW can update */
aq_req.cq_mask.cq_err = 0;
@@ -1255,7 +1315,7 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
* updated fields are masked out for request and response
* comparison
*/
- for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64);
word++) {
*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
@@ -1263,14 +1323,14 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
}
- if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE))
return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
return 0;
}
-static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp)
+int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
{
struct nix_hw *nix_hw;
int err, retries = 5;
@@ -1614,8 +1674,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+ if (is_rep_dev(rvu, pcifunc)) {
+ pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN;
+ pfvf->tx_chan_cnt = 1;
+ goto exit;
+ }
+
intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
intf = NIX_INTF_TYPE_SDP;
err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
@@ -1684,6 +1750,9 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ if (is_rep_dev(rvu, pcifunc))
+ goto free_lf;
+
if (req->flags & NIX_LF_DISABLE_FLOWS)
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
else
@@ -1695,6 +1764,7 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
nix_interface_deinit(rvu, pcifunc, nixlf);
+free_lf:
/* Reset this NIX LF */
err = rvu_lf_reset(rvu, block, nixlf);
if (err) {
@@ -1735,7 +1805,8 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
if (rc < 0) {
dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
- rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ rvu_get_pf(rvu->pdev, pcifunc),
+ pcifunc & RVU_PFVF_FUNC_MASK);
return NIX_AF_ERR_MARK_CFG_FAIL;
}
@@ -1987,7 +2058,7 @@ static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id = 0, lmac_id = 0;
if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
@@ -2005,9 +2076,10 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
int link, int *start, int *end)
{
struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
- if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
+ /* LBK links */
+ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) {
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
@@ -2259,14 +2331,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
schq = smq;
for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+ smq_tree_ctx->schq = schq;
if (lvl == NIX_TXSCH_LVL_TL1) {
- smq_flush_ctx->tl1_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
smq_tree_ctx->pir_off = 0;
smq_tree_ctx->pir_val = 0;
parent_off = 0;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- smq_flush_ctx->tl2_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
parent_off = NIX_AF_TL2X_PARENT(schq);
@@ -2301,8 +2372,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ int tl2, tl2_schq;
u64 regoff;
- int tl2;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -2310,16 +2381,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
/* loop through all TL2s with matching PF_FUNC */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
/* skip the smq(flush) TL2 */
- if (tl2 == smq_flush_ctx->tl2_schq)
+ if (tl2 == tl2_schq)
continue;
/* skip unused TL2s */
if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
continue;
/* skip if PF_FUNC doesn't match */
if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
- (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
+ (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
~RVU_PFVF_FUNC_MASK)))
continue;
/* enable/disable XOFF */
@@ -2361,10 +2433,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf)
{
struct nix_smq_flush_ctx *smq_flush_ctx;
- int pf = rvu_get_pf(pcifunc);
+ int err, restore_tx_en = 0, i;
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- int err, restore_tx_en = 0;
- u64 cfg;
+ u16 tl2_tl3_link_schq;
+ u8 link, link_level;
+ u64 cfg, bmap = 0;
if (!is_rvu_otx2(rvu)) {
/* Skip SMQ flush if pkt count is zero */
@@ -2388,16 +2462,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
-
/* Disable backpressure from physical link,
* otherwise SMQ flush may stall.
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
+ link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
+
+ /* SMQ set enqueue xoff */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ cfg |= BIT_ULL(50);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ if (!(cfg & BIT_ULL(12)))
+ continue;
+ bmap |= BIT_ULL(i);
+ cfg &= ~BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+ }
+
+ /* Do SMQ flush and set enqueue xoff */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -2406,6 +2502,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
"NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
nixlf, smq);
+ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+ if (!(bmap & BIT_ULL(i)))
+ continue;
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ cfg |= BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+ }
+
/* clear XOFF on TL2s */
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
@@ -2497,9 +2604,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
}
mutex_unlock(&rvu->rsrc_lock);
- /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
- rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
- err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
if (err)
dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
@@ -2723,11 +2828,11 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
{
struct rvu_hwinfo *hw = rvu->hw;
int lbk_link_start, lbk_links;
- u8 pf = rvu_get_pf(pcifunc);
+ u8 pf = rvu_get_pf(rvu->pdev, pcifunc);
int schq;
u64 cfg;
- if (!is_pf_cgxmapped(rvu, pf))
+ if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc))
return;
cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
@@ -3093,7 +3198,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
if (err) {
dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
- rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ rvu_get_pf(rvu->pdev, pcifunc),
+ pcifunc & RVU_PFVF_FUNC_MASK);
return err;
}
return 0;
@@ -3361,7 +3467,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
- pcifunc >> RVU_PFVF_PF_SHIFT);
+ rvu_get_pf(rvu->pdev, pcifunc));
return -EINVAL;
}
@@ -3413,7 +3519,8 @@ void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
struct rvu_pfvf *pfvf;
if (!hw->cap.nix_rx_multicast ||
- !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev,
+ pcifunc & ~RVU_PFVF_FUNC_MASK))) {
*mce_list = NULL;
*mce_idx = 0;
return;
@@ -3447,13 +3554,13 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
return 0;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return 0;
@@ -3522,7 +3629,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
- pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
pcifunc |= idx;
/* Add dummy entries now, so that we don't have to check
* for whether AQ_OP should be INIT/WRITE later on.
@@ -3864,6 +3971,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
return -ERANGE;
}
+/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
+#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
+/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
+#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
+
static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
{
int idx, nr_field, key_off, field_marker, keyoff_marker;
@@ -3933,7 +4045,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->hdr_offset = 9; /* offset */
field->bytesm1 = 0; /* 1 byte */
field->ltype_match = NPC_LT_LC_IP;
- field->ltype_mask = 0xF;
+ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
break;
case NIX_FLOW_KEY_TYPE_IPV4:
case NIX_FLOW_KEY_TYPE_INNR_IPV4:
@@ -3960,8 +4072,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->bytesm1 = 3; /* DIP, 4 bytes */
}
}
-
- field->ltype_mask = 0xF; /* Match only IPv4 */
+ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
keyoff_marker = false;
break;
case NIX_FLOW_KEY_TYPE_IPV6:
@@ -3990,7 +4101,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->bytesm1 = 15; /* DIP,16 bytes */
}
}
- field->ltype_mask = 0xF; /* Match only IPv6 */
+ field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
break;
case NIX_FLOW_KEY_TYPE_TCP:
case NIX_FLOW_KEY_TYPE_UDP:
@@ -4356,8 +4467,6 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
ether_addr_copy(pfvf->default_mac, req->mac_addr);
- rvu_switch_update_rules(rvu, pcifunc);
-
return 0;
}
@@ -4455,7 +4564,7 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
static void nix_find_link_frs(struct rvu *rvu,
struct nix_frs_cfg *req, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct rvu_pfvf *pfvf;
int maxlen, minlen;
int numvfs, hwvf;
@@ -4502,7 +4611,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int blkaddr, link = -1;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
@@ -4518,7 +4627,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- if (is_lbk_vf(rvu, pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &max_mtu);
@@ -4546,6 +4655,8 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
/* For VFs of PF0 ingress is LBK port, so config LBK link */
pfvf = rvu_get_pfvf(rvu, pcifunc);
link = hw->cgx_links + pfvf->lbkid;
+ } else if (is_rep_dev(rvu, pcifunc)) {
+ link = hw->cgx_links + 0;
}
if (link < 0)
@@ -4619,6 +4730,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
+ /* Set SDP link credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
+
/* Set default min/max packet lengths allowed on NIX Rx links.
*
* With HW reset minlen value of 60byte, HW will treat ARP pkts
@@ -4637,7 +4751,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
if (hw->sdp_links) {
link = hw->cgx_links + hw->lbk_links;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
- SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS);
}
/* Get MCS external bypass status for CN10K-B */
@@ -4942,7 +5056,7 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
(ltdefs->rx_apad1.ltype_match << 4) |
ltdefs->rx_apad1.ltype_mask);
- /* Receive ethertype defination register defines layer
+ /* Receive ethertype definition register defines layer
* information in NPC_RESULT_S to identify the Ethertype
* location in L2 header. Used for Ethertype overwriting
* in inline IPsec flow.
@@ -5129,7 +5243,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int nixlf, err;
+ int nixlf, err, pf;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
@@ -5145,7 +5259,11 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
set_bit(NIXLF_INITIALIZED, &pfvf->flags);
- rvu_switch_update_rules(rvu, pcifunc);
+ rvu_switch_update_rules(rvu, pcifunc, true);
+
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, true);
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -5155,7 +5273,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int nixlf, err;
+ int nixlf, err, pf;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
@@ -5173,8 +5291,12 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
if (err)
return err;
+ rvu_switch_update_rules(rvu, pcifunc, false);
rvu_cgx_tx_enable(rvu, pcifunc, true);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
return 0;
}
@@ -5184,7 +5306,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
u64 sa_base;
@@ -5202,6 +5324,9 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
+
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
@@ -5270,7 +5395,7 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int nixlf;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
return 0;
@@ -5693,6 +5818,8 @@ static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
}
}
+#define NIX_BW_PROF_HI_MASK GENMASK(10, 7)
+
static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
struct nix_hw *nix_hw, u16 pcifunc)
{
@@ -5731,7 +5858,8 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
return -EINVAL;
ipolicer = &nix_hw->ipolicer[hi_layer];
- prof_idx = req->prof.band_prof_id;
+ prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h);
+ prof_idx |= req->prof.band_prof_id;
if (prof_idx >= ipolicer->band_prof.max ||
ipolicer->pfvf_map[prof_idx] != pcifunc)
return -EINVAL;
@@ -5896,8 +6024,10 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
aq_req->op = NIX_AQ_INSTOP_WRITE;
aq_req->qidx = leaf_prof;
- aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof.band_prof_id = mid_prof & 0x7F;
aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof);
+ aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0);
aq_req->prof.hl_en = 1;
aq_req->prof_mask.hl_en = 1;
@@ -5906,6 +6036,8 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
(struct nix_aq_enq_rsp *)aq_rsp);
}
+#define NIX_RQ_PROF_HI_MASK GENMASK(13, 10)
+
int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
u16 rq_idx, u16 match_id)
{
@@ -5937,7 +6069,8 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
return 0;
/* Get the bandwidth profile ID mapped to this RQ */
- leaf_prof = aq_rsp.rq.band_prof_id;
+ leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h);
+ leaf_prof |= aq_rsp.rq.band_prof_id;
ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
ipolicer->match_id[leaf_prof] = match_id;
@@ -5975,7 +6108,10 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
* to different RQs and marked with same match_id
* are rate limited in a aggregate fashion
*/
- mid_prof = aq_rsp.prof.band_prof_id;
+ mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK,
+ aq_rsp.prof.band_prof_id_h);
+ mid_prof |= aq_rsp.prof.band_prof_id;
+
rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
&aq_req, &aq_rsp,
leaf_prof, mid_prof);
@@ -6097,7 +6233,8 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
if (!aq_rsp.prof.hl_en)
return;
- mid_prof = aq_rsp.prof.band_prof_id;
+ mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h);
+ mid_prof |= aq_rsp.prof.band_prof_id;
ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
ipolicer->ref_count[mid_prof]--;
/* If ref_count is zero, free mid layer profile */
@@ -6497,3 +6634,19 @@ unlock_grp:
return ret;
}
+
+/* On CN10k and older series of silicons, hardware may incorrectly
+ * assert XOFF on certain channels. Issue a write on NIX_AF_RX_CHANX_CFG
+ * to broadcacst XON on the same.
+ */
+void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+ u64 cfg;
+
+ if (!block->implemented || is_cn20k(rvu->pdev))
+ return;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0), cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 4f5ca5ab13a4..e2a33e46b48a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -464,6 +464,23 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static void npa_aq_ndc_config(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+
+ if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */
+ return;
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+ cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of stack pages */
+ cfg |= 0x10ULL;
+#endif
+ rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+}
+
static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
{
u64 cfg;
@@ -479,14 +496,7 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
#endif
- /* Do not bypass NDC cache */
- cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
- cfg &= ~0x03DULL;
-#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
- /* Disable caching of stack pages */
- cfg |= 0x10ULL;
-#endif
- rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+ npa_aq_ndc_config(rvu, block);
/* For CN10K NPA BATCH DMA set 35 cache lines */
if (!is_rvu_otx2(rvu)) {
@@ -567,6 +577,9 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
int bank, max_bank, line, max_line, err;
u64 reg, ndc_af_const;
+ if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */
+ return 0;
+
/* Set the ENABLE bit(63) to '0' */
reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index be709f83f331..c7c70429eb6c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -147,7 +147,9 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
- int pf = rvu_get_pf(pcifunc);
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int index;
/* Check if this is for a PF */
@@ -698,7 +700,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
- is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
@@ -820,24 +822,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
- return;
-
- /* Get 'pcifunc' of PF device */
- pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
-
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
- NIXLF_BCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
-}
-
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan)
{
@@ -1125,6 +1109,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
int index, blkaddr;
@@ -1133,9 +1118,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
return;
/* Ucast MCAM match entry of this PF/VF */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC),
+ pfvf->nix_rx_intf)) {
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ }
/* Nothing to do for VFs, on platforms where pkt replication
* is not supported
@@ -2181,7 +2169,6 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
npc_mcam_rsrcs_deinit(rvu);
- kfree(mcam->counters.bmap);
if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr);
else
@@ -2520,7 +2507,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
* - when available free entries are less.
* Lower priority ones out of avaialble free entries are always
* chosen when 'high vs low' question arises.
+ *
+ * For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF then allocate low
+ * priority entries.
*/
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
/* Get the search range for priority allocation request */
if (req->priority) {
@@ -2529,17 +2526,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
- /* For a VF base MCAM match rule is set by its PF. And all the
- * further MCAM rules installed by VF on its own are
- * concatenated with the base rule set by its PF. Hence PF entries
- * should be at lower priority compared to VF entries. Otherwise
- * base rule is hit always and rules installed by VF will be of
- * no use. Hence if the request is from PF and NOT a priority
- * allocation request then allocate low priority entries.
- */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK))
- goto lprio_alloc;
-
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -2569,6 +2555,18 @@ lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
+ /* Ensure PF requests are always at bottom and if PF requests
+ * for higher/lower priority entry wrt reference entry then
+ * honour that criteria and start search for entries from bottom
+ * and not in mid zone.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_HIGHER_PRIO)
+ end = req->ref_entry;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_LOWER_PRIO)
+ start = req->ref_entry;
}
alloc:
@@ -2681,6 +2679,49 @@ void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
npc_mcam_set_bit(mcam, entry_idx);
}
+int npc_config_cntr_default_entries(struct rvu *rvu, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_install_flow_rsp rsp;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, rule->entry))
+ continue;
+ if (!rule->default_rule)
+ continue;
+ if (enable && !rule->has_cntr) { /* Alloc and map new counter */
+ __rvu_mcam_add_counter_to_rule(rvu, rule->owner,
+ rule, &rsp);
+ if (rsp.counter < 0) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate cntr for default rule (err=%d)\n",
+ __func__, rsp.counter);
+ break;
+ }
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ rule->entry, rsp.counter);
+ /* Reset counter before use */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(rule->cntr), 0x0);
+ }
+
+ /* Free and unmap counter */
+ if (!enable && rule->has_cntr)
+ __rvu_mcam_remove_counter_from_rule(rvu, rule->owner,
+ rule);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
struct npc_mcam_alloc_entry_req *req,
struct npc_mcam_alloc_entry_rsp *rsp)
@@ -2965,9 +3006,9 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
return rc;
}
-int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
- struct npc_mcam_alloc_counter_req *req,
- struct npc_mcam_alloc_counter_rsp *rsp)
+static int __npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
@@ -2988,11 +3029,9 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
return NPC_MCAM_INVALID_REQ;
- mutex_lock(&mcam->lock);
/* Check if unused counters are available or not */
if (!rvu_rsrc_free_count(&mcam->counters)) {
- mutex_unlock(&mcam->lock);
return NPC_MCAM_ALLOC_FAILED;
}
@@ -3025,12 +3064,27 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
}
}
- mutex_unlock(&mcam->lock);
return 0;
}
-int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int err;
+
+ mutex_lock(&mcam->lock);
+
+ err = __npc_mcam_alloc_counter(rvu, req, rsp);
+
+ mutex_unlock(&mcam->lock);
+ return err;
+}
+
+static int __npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct msg_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 index, entry = 0;
@@ -3040,10 +3094,8 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
- mutex_lock(&mcam->lock);
err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
if (err) {
- mutex_unlock(&mcam->lock);
return err;
}
@@ -3067,10 +3119,66 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
index, req->cntr);
}
- mutex_unlock(&mcam->lock);
return 0;
}
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int err;
+
+ mutex_lock(&mcam->lock);
+
+ err = __npc_mcam_free_counter(rvu, req, rsp);
+
+ mutex_unlock(&mcam->lock);
+
+ return err;
+}
+
+void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ __npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = __npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
{
@@ -3328,7 +3436,7 @@ int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int blkaddr, nixlf, rc, intf_mode;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u64 rxpkind, txpkind;
u8 cgx_id, lmac_id;
@@ -3468,3 +3576,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
return 0;
}
+
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int ucast_idx, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false);
+
+ npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0);
+
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == ucast_idx) {
+ list_del(&rule->list);
+ kfree(rule);
+ break;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c181e7aa9eb6..b56395ac5a74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
+ /* Allow extracting SPI field from AH and ESP headers at same offset */
+ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
(*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
*features |= BIT_ULL(NPC_IPSEC_SPI);
@@ -1081,44 +1081,26 @@ static void rvu_mcam_add_rule(struct npc_mcam *mcam,
static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule)
{
- struct npc_mcam_oper_counter_req free_req = { 0 };
- struct msg_rsp free_rsp;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
- if (!rule->has_cntr)
- return;
+ mutex_lock(&mcam->lock);
- free_req.hdr.pcifunc = pcifunc;
- free_req.cntr = rule->cntr;
+ __rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
- rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
- rule->has_cntr = false;
+ mutex_unlock(&mcam->lock);
}
static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule,
struct npc_install_flow_rsp *rsp)
{
- struct npc_mcam_alloc_counter_req cntr_req = { 0 };
- struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
- int err;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
- cntr_req.hdr.pcifunc = pcifunc;
- cntr_req.contig = true;
- cntr_req.count = 1;
+ mutex_lock(&mcam->lock);
- /* we try to allocate a counter to track the stats of this
- * rule. If counter could not be allocated then proceed
- * without counter because counters are limited than entries.
- */
- err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
- &cntr_rsp);
- if (!err && cntr_rsp.count) {
- rule->cntr = cntr_rsp.cntr;
- rule->has_cntr = true;
- rsp->counter = rule->cntr;
- } else {
- rsp->counter = err;
- }
+ __rvu_mcam_add_counter_to_rule(rvu, pcifunc, rule, rsp);
+
+ mutex_unlock(&mcam->lock);
}
static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
@@ -1187,6 +1169,8 @@ static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target;
action.op = NIX_RX_ACTIONOP_UCAST;
}
+ if (req->match_id)
+ action.match_id = req->match_id;
}
entry->action = *(u64 *)&action;
@@ -1414,6 +1398,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
struct npc_install_flow_rsp *rsp)
{
bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ bool from_rep_dev = !!is_rep_dev(rvu, req->hdr.pcifunc);
struct rvu_switch *rswitch = &rvu->rswitch;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@@ -1467,18 +1452,21 @@ process_flow:
* hence modify pcifunc accordingly.
*/
- /* AF installing for a PF/VF */
- if (!req->hdr.pcifunc)
+ if (!req->hdr.pcifunc) {
+ /* AF installing for a PF/VF */
target = req->vf;
- /* PF installing for its VF */
- else if (!from_vf && req->vf) {
+ } else if (!from_vf && req->vf && !from_rep_dev) {
+ /* PF installing for its VF */
target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
pf_set_vfs_mac = req->default_rule &&
(req->features & BIT_ULL(NPC_DMAC));
- }
- /* msg received from PF/VF */
- else
+ } else if (from_rep_dev && req->vf) {
+ /* Representor device installing for a representee */
+ target = req->vf;
+ } else {
+ /* msg received from PF/VF */
target = req->hdr.pcifunc;
+ }
/* ignore chan_mask in case pf func is not AF, revisit later */
if (!is_pffunc_af(req->hdr.pcifunc))
@@ -1490,8 +1478,10 @@ process_flow:
pfvf = rvu_get_pfvf(rvu, target);
+ if (from_rep_dev)
+ req->channel = pfvf->rx_chan_base;
/* PF installing for its VF */
- if (req->hdr.pcifunc && !from_vf && req->vf)
+ if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev)
set_bit(PF_SET_VF_CFG, &pfvf->flags);
/* update req destination mac addr */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index d2661e7fabdb..999f6d93c7fe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -1465,7 +1465,7 @@ static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_
int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
{
struct npc_exact_table *table;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
u32 drop_mcam_idx;
bool *promisc;
@@ -1512,7 +1512,7 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
{
struct npc_exact_table *table;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
u32 drop_mcam_idx;
bool *promisc;
@@ -1560,7 +1560,7 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u32 seq_id = req->index;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
@@ -1593,7 +1593,7 @@ int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct cgx_mac_addr_update_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct npc_exact_table_entry *entry;
struct npc_exact_table *table;
struct rvu_pfvf *pfvf;
@@ -1675,7 +1675,7 @@ int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
int rc = 0;
@@ -1711,7 +1711,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
int rc;
rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
@@ -1736,7 +1736,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u32 seq_id = req->index;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
@@ -2001,7 +2001,7 @@ int rvu_npc_exact_init(struct rvu *rvu)
}
/* Filter rules are only for PF */
- pcifunc = RVU_PFFUNC(i, 0);
+ pcifunc = RVU_PFFUNC(rvu->pdev, i, 0);
dev_dbg(rvu->dev,
"%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
index 57a09328d46b..cb25cf478f1f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -139,9 +139,7 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
#define NPC_MCAM_DROP_RULE_MAX 30
#define NPC_MCAM_SDP_DROP_RULE_IDX 0
-#define RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+#define RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
enum npc_exact_opc_type {
NPC_EXACT_OPC_MEM,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 086f05c0376f..62cdc714ba57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -121,6 +121,7 @@
#define NPA_AF_LF_RST (0x0020)
#define NPA_AF_GEN_CFG (0x0030)
#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_NDC_SYNC (0x0050)
#define NPA_AF_INP_CTL (0x00D0)
#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
#define NPA_AF_AVG_DELAY (0x0100)
@@ -239,6 +240,7 @@
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_RX_SYNC (0x03E0)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -429,6 +431,8 @@
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
+#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
+#define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
@@ -441,6 +445,12 @@
#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+#define NIX_VLAN_ETYPE_MASK GENMASK_ULL(63, 48)
+
+#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16)
+#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16)
+#define NIX_AF_TL3_PARENT_MASK GENMASK_ULL(23, 16)
+#define NIX_AF_TL2_PARENT_MASK GENMASK_ULL(20, 16)
/* SSO */
#define SSO_AF_CONST (0x1000)
@@ -536,6 +546,7 @@
#define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
+#define CPT_AF_RXC_CFG1 (0x50000ull)
#define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
new file mode 100644
index 000000000000..4415d0ce9aef
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_REP_MESSAGES
+#undef M
+
+static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc);
+ struct rep_event *msg;
+ int pf;
+
+ pf = rvu_get_pf(rvu->pdev, event->pcifunc);
+
+ if (event->event & RVU_EVENT_MAC_ADDR_CHANGE)
+ ether_addr_copy(pfvf->mac_addr, event->evt_data.mac);
+
+ mutex_lock(&rvu->mbox_lock);
+ msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
+ if (!msg) {
+ mutex_unlock(&rvu->mbox_lock);
+ return -ENOMEM;
+ }
+
+ msg->hdr.pcifunc = event->pcifunc;
+ msg->event = event->event;
+
+ memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data));
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
+ return 0;
+}
+
+static void rvu_rep_wq_handler(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, rep_evt_work);
+ struct rep_evtq_ent *qentry;
+ struct rep_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->rep_evtq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->rep_evtq_head,
+ struct rep_evtq_ent,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->event;
+
+ rvu_rep_up_notify(rvu, event);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req,
+ struct msg_rsp *rsp)
+{
+ struct rep_evtq_ent *qentry;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->event = *req;
+ spin_lock(&rvu->rep_evtq_lock);
+ list_add_tail(&qentry->node, &rvu->rep_evtq_head);
+ spin_unlock(&rvu->rep_evtq_lock);
+ queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work);
+ return 0;
+}
+
+int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rep_event *req;
+ int pf;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
+ return 0;
+
+ pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc);
+
+ mutex_lock(&rvu->mbox_lock);
+ req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
+ if (!req) {
+ mutex_unlock(&rvu->mbox_lock);
+ return -ENOMEM;
+ }
+
+ req->hdr.pcifunc = rvu->rep_pcifunc;
+ req->event |= RVU_EVENT_PFVF_STATE;
+ req->pcifunc = pcifunc;
+ req->evt_data.vf_state = enable;
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
+ return 0;
+}
+
+#define RVU_LF_RX_STATS(reg) \
+ rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg))
+
+#define RVU_LF_TX_STATS(reg) \
+ rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg))
+
+int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu,
+ struct nix_stats_req *req,
+ struct nix_stats_rsp *rsp)
+{
+ u16 pcifunc = req->pcifunc;
+ int nixlf, blkaddr, err;
+ struct msg_req rst_req;
+ struct msg_rsp rst_rsp;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return 0;
+
+ if (req->reset) {
+ rst_req.hdr.pcifunc = pcifunc;
+ return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp);
+ }
+ rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS);
+ rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST);
+ rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST);
+ rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST);
+ rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP);
+ rsp->rx.err = RVU_LF_RX_STATS(RX_ERR);
+ rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS);
+ rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST);
+ rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST);
+
+ rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS);
+ rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST);
+ rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST);
+ rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST);
+ rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP);
+
+ rsp->pcifunc = req->pcifunc;
+ return 0;
+}
+
+static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc)
+{
+ int id;
+
+ for (id = 0; id < rvu->rep_cnt; id++)
+ if (rvu->rep2pfvf_map[id] == pcifunc)
+ return id;
+ return 0;
+}
+
+static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc,
+ u16 vlan_tci, int *vidx)
+{
+ struct nix_vtag_config_rsp rsp = {};
+ struct nix_vtag_config req = {};
+ u64 etype = ETH_P_8021Q;
+ int err;
+
+ /* Insert vlan tag */
+ req.hdr.pcifunc = pcifunc;
+ req.vtag_size = VTAGSIZE_T4;
+ req.cfg_type = 0; /* tx vlan cfg */
+ req.tx.cfg_vtag0 = true;
+ req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci;
+
+ err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "Tx vlan config failed\n");
+ return err;
+ }
+ *vidx = rsp.vtag0_idx;
+ return 0;
+}
+
+static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_vtag_config req = {};
+ struct nix_vtag_config_rsp rsp;
+
+ /* config strip, capture and size */
+ req.hdr.pcifunc = pcifunc;
+ req.vtag_size = VTAGSIZE_T4;
+ req.cfg_type = 1; /* rx vlan cfg */
+ req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req.rx.strip_vtag = true;
+ req.rx.capture_vtag = false;
+
+ return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
+}
+
+static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 entry, bool rte)
+{
+ struct npc_install_flow_req req = {};
+ struct npc_install_flow_rsp rsp = {};
+ struct rvu_pfvf *pfvf;
+ u16 vlan_tci, rep_id;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* To steer the traffic from Representee to Representor */
+ rep_id = rvu_rep_get_vlan_id(rvu, pcifunc);
+ if (rte) {
+ vlan_tci = rep_id | BIT_ULL(8);
+ req.vf = rvu->rep_pcifunc;
+ req.op = NIX_RX_ACTIONOP_UCAST;
+ req.index = rep_id;
+ } else {
+ vlan_tci = rep_id;
+ req.vf = pcifunc;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ }
+
+ rvu_rep_rx_vlan_cfg(rvu, req.vf);
+ req.entry = entry;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG);
+ req.vtag0_valid = true;
+ req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ req.packet.vlan_tci = cpu_to_be16(vlan_tci);
+ req.mask.vlan_tci = cpu_to_be16(0xffff);
+
+ req.channel = RVU_SWITCH_LBK_CHAN;
+ req.chan_mask = 0xffff;
+ req.intf = pfvf->nix_rx_intf;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry,
+ bool rte)
+{
+ struct npc_install_flow_req req = {};
+ struct npc_install_flow_rsp rsp = {};
+ struct rvu_pfvf *pfvf;
+ int vidx, err;
+ u16 vlan_tci;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc);
+ if (rte)
+ vlan_tci |= BIT_ULL(8);
+
+ err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx);
+ if (err)
+ return err;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ if (rte) {
+ req.vf = pcifunc;
+ } else {
+ req.vf = rvu->rep_pcifunc;
+ req.packet.sq_id = vlan_tci;
+ req.mask.sq_id = 0xffff;
+ }
+
+ req.entry = entry;
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+ req.vtag0_def = vidx;
+ req.vtag0_op = 1;
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+int rvu_rep_install_mcam_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err, nixlf, i;
+ u8 rep;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+ rep = true;
+ for (i = 0; i < 2; i++) {
+ err = rvu_rep_install_rx_rule(rvu, pcifunc,
+ start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ err = rvu_rep_install_tx_rule(rvu, pcifunc,
+ start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ rep = false;
+ }
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* Skip installimg rules if nixlf is not attached */
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ continue;
+ rep = true;
+ for (i = 0; i < 2; i++) {
+ err = rvu_rep_install_rx_rule(rvu, pcifunc,
+ start + entry,
+ rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ err = rvu_rep_install_tx_rule(rvu, pcifunc,
+ start + entry,
+ rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ rep = false;
+ }
+ }
+ }
+
+ /* Initialize the wq for handling REP events */
+ spin_lock_init(&rvu->rep_evtq_lock);
+ INIT_LIST_HEAD(&rvu->rep_evtq_head);
+ INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler);
+ rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", WQ_PERCPU, 0);
+ if (!rvu->rep_evt_wq) {
+ dev_err(rvu->dev, "REP workqueue allocation failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u32 max = rswitch->used_entries;
+ int blkaddr;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ if (blkaddr < 0)
+ return;
+
+ rvu_switch_enable_lbk_link(rvu, pcifunc, ena);
+ mutex_lock(&mcam->lock);
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+int rvu_rep_pf_init(struct rvu *rvu)
+{
+ u16 pcifunc = rvu->rep_pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ rvu_switch_enable_lbk_link(rvu, pcifunc, true);
+ rvu_rep_rx_vlan_cfg(rvu, pcifunc);
+ return 0;
+}
+
+int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ if (req->hdr.pcifunc != rvu->rep_pcifunc)
+ return 0;
+
+ rvu->rep_mode = req->ena;
+
+ if (!rvu->rep_mode)
+ rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
+ struct get_rep_cnt_rsp *rsp)
+{
+ int pf, vf, numvfs, hwvf, rep = 0;
+ u16 pcifunc;
+
+ rvu->rep_pcifunc = req->hdr.pcifunc;
+ rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ rvu->rep_cnt = rsp->rep_cnt;
+
+ rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt *
+ sizeof(u16), GFP_KERNEL);
+ if (!rvu->rep2pfvf_map)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
+ rvu->rep2pfvf_map[rep] = pcifunc;
+ rsp->rep_pf_map[rep] = pcifunc;
+ rep++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++) {
+ rvu->rep2pfvf_map[rep] = pcifunc |
+ ((vf + 1) & RVU_PFVF_FUNC_MASK);
+ rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep];
+ rep++;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index 38cfe148f4b7..e4a5f9fa6fd4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -17,9 +17,9 @@
/* SDP PF number */
static int sdp_pf_num[MAX_SDP] = {-1, -1};
-bool is_sdp_pfvf(u16 pcifunc)
+bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc)
{
- u16 pf = rvu_get_pf(pcifunc);
+ u16 pf = rvu_get_pf(rvu->pdev, pcifunc);
u32 found = 0, i = 0;
while (i < MAX_SDP) {
@@ -34,9 +34,9 @@ bool is_sdp_pfvf(u16 pcifunc)
return true;
}
-bool is_sdp_pf(u16 pcifunc)
+bool is_sdp_pf(struct rvu *rvu, u16 pcifunc)
{
- return (is_sdp_pfvf(pcifunc) &&
+ return (is_sdp_pfvf(rvu, pcifunc) &&
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -46,7 +46,7 @@ bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
return (rvu->vf_devid == RVU_SDP_VF_DEVID);
- return (is_sdp_pfvf(pcifunc) &&
+ return (is_sdp_pfvf(rvu, pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 5ef406c7e8a4..8e868f815de1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -13,6 +13,8 @@
#define RVU_MULTI_BLK_VER 0x7ULL
+#define NIX_MAX_CTX_SIZE 128
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL,
@@ -33,7 +35,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
BLKADDR_APR = 0x16ULL,
- BLK_COUNT = 0x17ULL,
+ BLKADDR_MBOX = 0x1bULL,
+ BLK_COUNT = 0x1cULL,
};
/* RVU Block Type Enumeration */
@@ -49,7 +52,8 @@ enum rvu_block_type_e {
BLKTYPE_TIM = 0x8,
BLKTYPE_CPT = 0x9,
BLKTYPE_NDC = 0xa,
- BLKTYPE_MAX = 0xa,
+ BLKTYPE_MBOX = 0x13,
+ BLKTYPE_MAX = 0x13,
};
/* RVU Admin function Interrupt Vector Enumeration */
@@ -71,13 +75,11 @@ enum cpt_af_int_vec_e {
CPT_AF_INT_VEC_CNT = 0x4,
};
-enum cpt_10k_af_int_vec_e {
+enum cpt_cn10k_flt_int_vec_e {
CPT_10K_AF_INT_VEC_FLT0 = 0x0,
CPT_10K_AF_INT_VEC_FLT1 = 0x1,
CPT_10K_AF_INT_VEC_FLT2 = 0x2,
- CPT_10K_AF_INT_VEC_RVU = 0x3,
- CPT_10K_AF_INT_VEC_RAS = 0x4,
- CPT_10K_AF_INT_VEC_CNT = 0x5,
+ CPT_10K_AF_INT_VEC_FLT_MAX = 0x3,
};
/* NPA Admin function Interrupt Vector Enumeration */
@@ -370,8 +372,12 @@ struct nix_cq_ctx_s {
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
+ /* Ensure all context sizes are 128 bytes */
+ u64 padding[12];
};
+static_assert(sizeof(struct nix_cq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* CN10K NIX Receive queue context structure */
struct nix_cn10k_rq_ctx_s {
u64 ena : 1;
@@ -413,7 +419,8 @@ struct nix_cn10k_rq_ctx_s {
u64 rsvd_171 : 1;
u64 later_skip : 6;
u64 xqe_imm_size : 6;
- u64 rsvd_189_184 : 6;
+ u64 band_prof_id_h : 4;
+ u64 rsvd_189_188 : 2;
u64 xqe_imm_copy : 1;
u64 xqe_hdr_split : 1;
u64 xqe_drop : 8; /* W3 */
@@ -460,6 +467,8 @@ struct nix_cn10k_rq_ctx_s {
u64 rsvd_1023_960; /* W15 */
};
+static_assert(sizeof(struct nix_cn10k_rq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* CN10K NIX Send queue context structure */
struct nix_cn10k_sq_ctx_s {
u64 ena : 1;
@@ -523,6 +532,8 @@ struct nix_cn10k_sq_ctx_s {
u64 rsvd_1023_1008 : 16;
};
+static_assert(sizeof(struct nix_cn10k_sq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* NIX Receive queue context structure */
struct nix_rq_ctx_s {
u64 ena : 1;
@@ -594,6 +605,8 @@ struct nix_rq_ctx_s {
u64 rsvd_1023_960; /* W15 */
};
+static_assert(sizeof(struct nix_rq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* NIX sqe sizes */
enum nix_maxsqesz {
NIX_MAXSQESZ_W16 = 0x0,
@@ -668,13 +681,18 @@ struct nix_sq_ctx_s {
u64 rsvd_1023_1008 : 16;
};
+static_assert(sizeof(struct nix_sq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* NIX Receive side scaling entry structure*/
struct nix_rsse_s {
uint32_t rq : 20;
uint32_t reserved_20_31 : 12;
-
+ /* Ensure all context sizes are minimum 128 bytes */
+ u64 padding[15];
};
+static_assert(sizeof(struct nix_rsse_s) == NIX_MAX_CTX_SIZE);
+
/* NIX receive multicast/mirror entry structure */
struct nix_rx_mce_s {
uint64_t op : 2;
@@ -684,8 +702,12 @@ struct nix_rx_mce_s {
uint64_t rsvd_31_24 : 8;
uint64_t pf_func : 16;
uint64_t next : 16;
+ /* Ensure all context sizes are minimum 128 bytes */
+ u64 padding[15];
};
+static_assert(sizeof(struct nix_rx_mce_s) == NIX_MAX_CTX_SIZE);
+
enum nix_band_prof_layers {
BAND_PROF_LEAF_LAYER = 0,
BAND_PROF_INVAL_LAYER = 1,
@@ -736,7 +758,8 @@ struct nix_bandprof_s {
uint64_t rc_action : 2;
uint64_t meter_algo : 2;
uint64_t band_prof_id : 7;
- uint64_t reserved_111_118 : 8;
+ uint64_t band_prof_id_h : 4;
+ uint64_t reserved_115_118 : 4;
uint64_t hl_en : 1;
uint64_t reserved_120_127 : 8;
uint64_t ts : 48; /* W2 */
@@ -769,6 +792,8 @@ struct nix_bandprof_s {
uint64_t reserved_1008_1023 : 16;
};
+static_assert(sizeof(struct nix_bandprof_s) == NIX_MAX_CTX_SIZE);
+
enum nix_lsoalg {
NIX_LSOALG_NOP,
NIX_LSOALG_ADD_SEGNUM,
@@ -825,4 +850,30 @@ enum nix_tx_vtag_op {
#define VTAG_STRIP BIT_ULL(4)
#define VTAG_CAPTURE BIT_ULL(5)
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
index 854045ed3b06..49ce38685a7e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -8,7 +8,7 @@
#include <linux/bitfield.h>
#include "rvu.h"
-static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
+void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct nix_hw *nix_hw;
@@ -93,7 +93,7 @@ static int rvu_switch_install_rules(struct rvu *rvu)
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
/* rvu_get_nix_blkaddr sets up the corresponding NIX block
* address and NIX RX and TX interfaces for a pcifunc.
* Generally it is called during attach call of a pcifunc but it
@@ -126,7 +126,7 @@ static int rvu_switch_install_rules(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
rvu_get_nix_blkaddr(rvu, pcifunc);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
@@ -166,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu)
alloc_req.contig = true;
alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ if (rvu->rep_mode)
+ alloc_req.count = alloc_req.count * 4;
ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
&alloc_rsp);
if (ret) {
@@ -189,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu)
rswitch->used_entries = alloc_rsp.count;
rswitch->start_entry = alloc_rsp.entry;
- ret = rvu_switch_install_rules(rvu);
+ if (rvu->rep_mode) {
+ rvu_rep_pf_init(rvu);
+ ret = rvu_rep_install_mcam_rules(rvu);
+ } else {
+ ret = rvu_switch_install_rules(rvu);
+ }
if (ret)
goto uninstall_rules;
@@ -222,11 +229,14 @@ void rvu_switch_disable(struct rvu *rvu)
if (!rswitch->used_entries)
return;
+ if (rvu->rep_mode)
+ goto free_ents;
+
for (pf = 1; pf < hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
dev_err(rvu->dev,
@@ -238,7 +248,7 @@ void rvu_switch_disable(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
dev_err(rvu->dev,
@@ -249,6 +259,7 @@ void rvu_switch_disable(struct rvu *rvu)
}
}
+free_ents:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
free_req.all = 1;
@@ -258,12 +269,15 @@ void rvu_switch_disable(struct rvu *rvu)
kfree(rswitch->entry2pcifunc);
}
-void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
{
struct rvu_switch *rswitch = &rvu->rswitch;
u32 max = rswitch->used_entries;
u16 entry;
+ if (rvu->rep_mode)
+ return rvu_rep_update_rules(rvu, pcifunc, ena);
+
if (!rswitch->used_entries)
return;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
index 775fd4c35794..19e0d16b12f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -11,3 +11,5 @@
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status);
+EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index 28984d0e848a..4cd0fc4b0d20 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -18,33 +18,42 @@
#include "mbox.h"
TRACE_EVENT(otx2_msg_alloc,
- TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
- TP_ARGS(pdev, id, size),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc),
+ TP_ARGS(pdev, id, size, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(u64, size)
+ __field(u16, pcifunc)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->size = size;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size,
+ __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_send,
- TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
- TP_ARGS(pdev, num_msgs, msg_size),
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size,
+ u16 id, u16 pcifunc),
+ TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, num_msgs)
__field(u64, msg_size)
+ __field(u16, id)
+ __field(u16, pcifunc)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ TP_fast_assign(__assign_str(dev);
__entry->num_msgs = num_msgs;
__entry->msg_size = msg_size;
+ __entry->id = id;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
- __entry->num_msgs, __entry->msg_size)
+ TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n",
+ __get_str(dev), __entry->num_msgs, __entry->msg_size,
+ otx2_mbox_id2name(__entry->id), __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_check,
@@ -55,7 +64,7 @@ TRACE_EVENT(otx2_msg_check,
__field(u16, rspid)
__field(int, rc)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ TP_fast_assign(__assign_str(dev);
__entry->reqid = reqid;
__entry->rspid = rspid;
__entry->rc = rc;
@@ -72,8 +81,8 @@ TRACE_EVENT(otx2_msg_interrupt,
__string(str, msg)
__field(u64, intr)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
- __assign_str(str, msg);
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
__entry->intr = intr;
),
TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
@@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt,
);
TRACE_EVENT(otx2_msg_process,
- TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
- TP_ARGS(pdev, id, err),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc),
+ TP_ARGS(pdev, id, err, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(int, err)
+ __field(u16, pcifunc)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->err = err;
+ __entry->pcifunc = pcifunc;
+ ),
+ TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id),
+ __entry->err, __entry->pcifunc)
+);
+
+TRACE_EVENT(otx2_msg_wait_rsp,
+ TP_PROTO(const struct pci_dev *pdev),
+ TP_ARGS(pdev),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ ),
+ TP_fast_assign(__assign_str(dev)
+ ),
+ TP_printk("[%s] timed out while waiting for response\n",
+ __get_str(dev))
+);
+
+TRACE_EVENT(otx2_msg_status,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs),
+ TP_ARGS(pdev, msg, num_msgs),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u16, num_msgs)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->num_msgs = num_msgs;
+ ),
+ TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev),
+ __get_str(str), __entry->num_msgs)
+);
+
+TRACE_EVENT(otx2_parse_dump,
+ TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word),
+ TP_ARGS(pdev, msg, word),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, w0)
+ __field(u64, w1)
+ __field(u64, w2)
+ __field(u64, w3)
+ __field(u64, w4)
+ __field(u64, w5)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->w0 = *(word + 0);
+ __entry->w1 = *(word + 1);
+ __entry->w2 = *(word + 2);
+ __entry->w3 = *(word + 3);
+ __entry->w4 = *(word + 4);
+ __entry->w5 = *(word + 5);
),
- TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->err)
+ TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n",
+ __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2,
+ __entry->w3, __entry->w4, __entry->w5)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 5664f768cb0c..883e9f4d601c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -5,14 +5,16 @@
obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
+obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
- otx2_devlink.o qos_sq.o qos.o
-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+ otx2_flows.o otx2_tc.o cn10k.o cn20k.o otx2_dmac_flt.o \
+ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
+rvu_nicvf-y := otx2_vf.o
+rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index c1c99d7054f8..3e1bf22cba69 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -14,6 +14,9 @@ static struct dev_hw_ops otx2_hw_ops = {
.sqe_flush = otx2_sqe_flush,
.aura_freeptr = otx2_aura_freeptr,
.refill_pool_ptrs = otx2_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
+ .aura_aq_init = otx2_aura_aq_init,
+ .pool_aq_init = otx2_pool_aq_init,
};
static struct dev_hw_ops cn10k_hw_ops = {
@@ -21,8 +24,22 @@ static struct dev_hw_ops cn10k_hw_ops = {
.sqe_flush = cn10k_sqe_flush,
.aura_freeptr = cn10k_aura_freeptr,
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
+ .aura_aq_init = otx2_aura_aq_init,
+ .pool_aq_init = otx2_pool_aq_init,
};
+void otx2_init_hw_ops(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
+ return;
+ }
+
+ pfvf->hw_ops = &cn10k_hw_ops;
+}
+EXPORT_SYMBOL(otx2_init_hw_ops);
+
int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
@@ -30,12 +47,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
struct otx2_lmt_info *lmt_info;
int err, cpu;
- if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
- pfvf->hw_ops = &otx2_hw_ops;
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
return 0;
- }
- pfvf->hw_ops = &cn10k_hw_ops;
/* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
@@ -72,7 +86,7 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
}
EXPORT_SYMBOL(cn10k_lmtst_init);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct nix_cn10k_aq_enq_req *aq;
struct otx2_nic *pfvf = dev;
@@ -88,7 +102,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@@ -112,9 +126,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
+ struct otx2_pool *pool;
dma_addr_t bufptr;
int num_ptrs = 1;
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
@@ -124,7 +141,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
break;
}
cq->pool_ptrs--;
- ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ ptrs[num_ptrs] = pool->xsk_pool ?
+ (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
+
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
@@ -203,6 +222,11 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
rsp = (struct nix_bandprof_alloc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ rc = PTR_ERR(rsp);
+ goto out;
+ }
+
if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
rc = -EIO;
goto out;
@@ -317,6 +341,12 @@ int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
aq->rq.band_prof_id = policer;
aq->rq_mask.band_prof_id = GENMASK(9, 0);
+ /* If policer id is greater than 1023 then it implies hardware supports
+ * more leaf profiles. In that case use band_prof_id_h for 4 MSBs.
+ */
+ aq->rq.band_prof_id_h = policer >> 10;
+ aq->rq_mask.band_prof_id_h = GENMASK(3, 0);
+
/* Fill AQ info */
aq->qidx = rq_idx;
aq->ctype = NIX_AQ_CTYPE_RQ;
@@ -347,9 +377,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
mutex_lock(&pfvf->mbox.lock);
/* Remove RQ's policer mapping */
- for (qidx = 0; qidx < hw->rx_queues; qidx++)
- cn10k_map_unmap_rq_policer(pfvf, qidx,
- hw->matchall_ipolicer, false);
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false);
+ if (rc)
+ dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).",
+ qidx, rc);
+ }
rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index c1861f7de254..945ab10bd4ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -26,7 +26,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
@@ -39,4 +39,5 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
u32 burst, u64 rate, bool pps);
int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
+void otx2_init_hw_ops(struct otx2_nic *pfvf);
#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
new file mode 100644
index 000000000000..77543d472345
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -0,0 +1,1042 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <net/xfrm.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "cn10k_ipsec.h"
+
+static bool is_dev_support_ipsec_offload(struct pci_dev *pdev)
+{
+ return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
+}
+
+static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf)
+{
+ enum cn10k_cpt_hw_state_e state;
+
+ while (true) {
+ state = atomic_cmpxchg(&pf->ipsec.cpt_state,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE);
+ if (state == CN10K_CPT_HW_AVAILABLE)
+ return true;
+ if (state == CN10K_CPT_HW_UNAVAILABLE)
+ return false;
+
+ mdelay(1);
+ }
+}
+
+static void cn10k_cpt_device_set_available(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE);
+}
+
+static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE);
+}
+
+static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
+{
+ struct rsrc_attach *attach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
+ if (!attach)
+ goto unlock;
+
+ attach->cptlfs = true;
+ attach->modify = true;
+
+ /* Send attach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
+{
+ struct rsrc_detach *detach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
+ if (!detach)
+ goto unlock;
+
+ detach->partial = true;
+ detach->cptlfs = true;
+
+ /* Send detach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
+{
+ struct cpt_lf_alloc_req_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ /* PF function */
+ req->nix_pf_func = pf->pcifunc;
+ /* Enable SE-IE Engine Group */
+ req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
+
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
+{
+ mutex_lock(&pf->mbox.lock);
+ otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->enable = 1;
+ req->nix_pf_func = pf->pcifunc;
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
+{
+ u64 reg_val;
+
+ /* Set Execution Enable of instruction queue */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ reg_val |= BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Set iqueue's enqueuing */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
+ reg_val |= BIT_ULL(0);
+ otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
+}
+
+static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
+{
+ u32 inflight, grb_cnt, gwb_cnt;
+ u32 nq_ptr, dq_ptr;
+ int timeout = 20;
+ u64 reg_val;
+ int cnt;
+
+ /* Disable instructions enqueuing */
+ otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
+
+ /* Wait for instruction queue to become empty.
+ * CPT_LF_INPROG.INFLIGHT count is zero
+ */
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ if (!inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
+ break;
+ }
+ } while (1);
+
+ /* Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ reg_val &= ~BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ if (reg_val & BIT_ULL(31))
+ cnt = 0;
+ else
+ cnt++;
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
+ nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ } while ((cnt < 10) && (nq_ptr != dq_ptr));
+
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
+ gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
+ if (inflight == 0 && gwb_cnt < 40 &&
+ (grb_cnt == 0 || grb_cnt == 40))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
+
+/* Allocate memory for CPT outbound Instruction queue.
+ * Instruction queue memory format is:
+ * -----------------------------
+ * | Instruction Group memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | x 16 Bytes) |
+ * | |
+ * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
+ * | Flow Control (128 Bytes) |
+ * | |
+ * -----------------------------
+ * | Instruction Memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | × 40 × 64 bytes) |
+ * | |
+ * -----------------------------
+ */
+static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
+ CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
+
+ iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr)
+ return -ENOMEM;
+
+ /* iq->vaddr/dma_addr points to Flow Control location */
+ iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
+ return 0;
+}
+
+static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ if (iq->real_vaddr)
+ dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
+ iq->real_dma_addr);
+
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+}
+
+static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
+{
+ u64 reg_val;
+ int ret;
+
+ /* Allocate Memory for CPT IQ */
+ ret = cn10k_outb_cptlf_iq_alloc(pf);
+ if (ret)
+ return ret;
+
+ /* Disable IQ */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
+
+ /* Set IQ size */
+ reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
+ CN10K_CPT_EXTRA_SIZE_DIV40);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
+
+ return 0;
+}
+
+static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
+{
+ int ret;
+
+ /* Initialize CPTLF Instruction Queue (IQ) */
+ ret = cn10k_outb_cptlf_iq_init(pf);
+ if (ret)
+ return ret;
+
+ /* Configure CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_config(pf);
+ if (ret)
+ goto iq_clean;
+
+ /* Enable CPTLF IQ */
+ cn10k_outb_cptlf_iq_enable(pf);
+ return 0;
+iq_clean:
+ cn10k_outb_cptlf_iq_free(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ /* Attach a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_attach(pf);
+ if (ret)
+ return ret;
+
+ /* Allocate a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_alloc(pf);
+ if (ret)
+ goto detach;
+
+ /* Initialize the CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_init(pf);
+ if (ret)
+ goto lf_free;
+
+ pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
+ CN10K_CPT_LF_NQX(0));
+
+ /* Set ipsec offload enabled for this device */
+ pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ cn10k_cpt_device_set_available(pf);
+ return 0;
+
+lf_free:
+ cn10k_outb_cptlf_free(pf);
+detach:
+ cn10k_outb_cptlf_detach(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
+{
+ int ret;
+
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ netdev_err(pf->netdev, "CPT LF device unavailable\n");
+ return -ENODEV;
+ }
+
+ /* Set ipsec offload disabled for this device */
+ pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ /* Disable CPTLF Instruction Queue (IQ) */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address and size to 0 */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
+
+ /* Free CPTLF IQ */
+ cn10k_outb_cptlf_iq_free(pf);
+
+ /* Free and detach CPT LF */
+ cn10k_outb_cptlf_free(pf);
+ ret = cn10k_outb_cptlf_detach(pf);
+ if (ret)
+ netdev_err(pf->netdev, "Failed to detach CPT LF\n");
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return ret;
+}
+
+static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
+ u64 size)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, inst, size);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
+ struct cpt_res_s *res)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u64 *completion_ptr = (u64 *)res;
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ netdev_err(pf->netdev, "CPT response timeout\n");
+ return -EBUSY;
+ }
+ } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) ==
+ CN10K_CPT_COMP_E_NOTDONE);
+
+ if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
+ res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
+ netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
+ res->compcode, res->doneint);
+ netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
+ res->uc_compcode, (u64)res->uc_info, res->esn);
+ }
+ return 0;
+}
+
+static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
+{
+ dma_addr_t res_iova, dptr_iova, sa_iova;
+ struct cn10k_tx_sa_s *sa_dptr;
+ struct cpt_inst_s inst = {};
+ struct cpt_res_s *res;
+ u32 sa_size, off;
+ u64 *sptr, *dptr;
+ u64 reg_val;
+ int ret;
+
+ sa_iova = sa_info->iova;
+ if (!sa_iova)
+ return -EINVAL;
+
+ res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
+ &res_iova, GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ sa_size = sizeof(struct cn10k_tx_sa_s);
+ sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
+ if (!sa_dptr) {
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
+ res_iova);
+ return -ENOMEM;
+ }
+
+ sptr = (__force u64 *)sa_info->base;
+ dptr = (__force u64 *)sa_dptr;
+ for (off = 0; off < (sa_size / 8); off++)
+ *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
+
+ res->compcode = CN10K_CPT_COMP_E_NOTDONE;
+ inst.res_addr = res_iova;
+ inst.dptr = (u64)dptr_iova;
+ inst.param2 = sa_size >> 3;
+ inst.dlen = sa_size;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
+ inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
+ inst.cptr = sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* Check if CPT-LF available */
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ dma_wmb();
+ ret = cn10k_wait_for_cpt_respose(pf, res);
+ if (ret)
+ goto set_available;
+
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7);
+ otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
+
+set_available:
+ cn10k_cpt_device_set_available(pf);
+free_mem:
+ dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
+ return ret;
+}
+
+static int cn10k_ipsec_get_hw_ctx_offset(void)
+{
+ /* Offset on Hardware-context offset in word */
+ return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
+}
+
+static int cn10k_ipsec_get_ctx_push_size(void)
+{
+ /* Context push size is round up and in multiple of 8 Byte */
+ return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
+}
+
+static int cn10k_ipsec_get_aes_key_len(int key_len)
+{
+ /* key_len is aes key length in bytes */
+ switch (key_len) {
+ case 16:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_128;
+ case 24:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_192;
+ default:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_256;
+ }
+}
+
+static void cn10k_outb_prepare_sa(struct xfrm_state *x,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int key_len = (x->aead->alg_key_len + 7) / 8;
+ struct net_device *netdev = x->xso.dev;
+ u8 *key = x->aead->alg_key;
+ struct otx2_nic *pf;
+ u32 *tmp_salt;
+ u64 *tmp_key;
+ int idx;
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* context size, 128 Byte aligned up */
+ pf = netdev_priv(netdev);
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+
+ /* Ucode to skip two words of CPT_CTX_HW_S */
+ sa_entry->ctx_hdr_size = 1;
+
+ /* Allow Atomic operation (AOP) */
+ sa_entry->aop_valid = 1;
+
+ /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */
+ sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
+ sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
+ sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
+ sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET;
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
+ else
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
+
+ /* Last 4 bytes are salt */
+ key_len -= 4;
+ sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
+ memcpy(sa_entry->cipher_key, key, key_len);
+ tmp_key = (u64 *)sa_entry->cipher_key;
+
+ for (idx = 0; idx < key_len / 8; idx++)
+ tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
+
+ memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
+ tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
+ *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
+
+ /* Write SA context data to memory before enabling */
+ wmb();
+
+ /* Enable SA */
+ sa_entry->sa_valid = 1;
+}
+
+static int cn10k_ipsec_validate_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload authenticated xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only AES-GCM-ICV16 xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload compressed xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET && x->props.family != AF_INET6) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only IPv4/v6 xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload other than crypto-mode");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only tunnel/transport xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ESP xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulated xfrm state may not be offloaded");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without aead");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD ICV length other than 128bit");
+ return -EINVAL;
+ }
+ if (x->aead->alg_key_len != 128 + 32 &&
+ x->aead->alg_key_len != 192 + 32 &&
+ x->aead->alg_key_len != 256 + 32) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD key length other than 128/192/256bit");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with tfc padding");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without geniv");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported");
+ return -EOPNOTSUPP;
+}
+
+static int cn10k_ipsec_outb_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ err = cn10k_ipsec_validate_state(x, extack);
+ if (err)
+ return err;
+
+ pf = netdev_priv(dev);
+
+ err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
+ if (err)
+ return err;
+
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ cn10k_outb_prepare_sa(x, sa_entry);
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA");
+ qmem_free(pf->dev, sa_info);
+ return err;
+ }
+
+ x->xso.offload_handle = (unsigned long)sa_info;
+ /* Enable static branch when first SA setup */
+ if (!pf->ipsec.outb_sa_count)
+ static_branch_enable(&cn10k_ipsec_sa_enabled);
+ pf->ipsec.outb_sa_count++;
+ return 0;
+}
+
+static int cn10k_ipsec_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return cn10k_ipsec_inb_add_state(x, extack);
+ else
+ return cn10k_ipsec_outb_add_state(dev, x, extack);
+}
+
+static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x)
+{
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return;
+
+ pf = netdev_priv(dev);
+
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+ /* Disable SA in CPT h/w */
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->aop_valid = 1;
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err)
+ netdev_err(dev, "Error (%d) deleting SA\n", err);
+
+ x->xso.offload_handle = 0;
+ qmem_free(pf->dev, sa_info);
+
+ /* If no more SA's then update netdev feature for potential change
+ * in NETIF_F_HW_ESP.
+ */
+ if (!--pf->ipsec.outb_sa_count)
+ queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
+}
+
+static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = cn10k_ipsec_add_state,
+ .xdo_dev_state_delete = cn10k_ipsec_del_state,
+};
+
+static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
+{
+ struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
+ sa_work);
+ struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec);
+
+ /* Disable static branch when no more SA enabled */
+ static_branch_disable(&cn10k_ipsec_sa_enabled);
+ rtnl_lock();
+ netdev_update_features(pf->netdev);
+ rtnl_unlock();
+}
+
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ /* IPsec offload supported on cn10k */
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return -EOPNOTSUPP;
+
+ /* Initialize CPT for outbound ipsec offload */
+ if (enable)
+ return cn10k_outb_cpt_init(netdev);
+
+ /* Don't do CPT cleanup if SA installed */
+ if (pf->ipsec.outb_sa_count) {
+ netdev_err(pf->netdev, "SA installed on this device\n");
+ return -EBUSY;
+ }
+
+ return cn10k_outb_cpt_clean(pf);
+}
+
+int cn10k_ipsec_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u32 sa_size;
+
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return 0;
+
+ /* Each SA entry size is 128 Byte round up in size */
+ sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
+ (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
+ OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
+ pf->ipsec.sa_size = sa_size;
+
+ INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq",
+ WQ_PERCPU, 0);
+ if (!pf->ipsec.sa_workq) {
+ netdev_err(pf->netdev, "SA alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ /* Set xfrm device ops */
+ netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+ netdev->hw_features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_ipsec_init);
+
+void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return;
+
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ return;
+
+ if (pf->ipsec.sa_workq) {
+ destroy_workqueue(pf->ipsec.sa_workq);
+ pf->ipsec.sa_workq = NULL;
+ }
+
+ cn10k_outb_cpt_clean(pf);
+}
+EXPORT_SYMBOL(cn10k_ipsec_clean);
+
+static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 *src;
+
+ src = (u8 *)skb->data + ETH_HLEN;
+
+ if (x->props.family == AF_INET) {
+ iph = (struct iphdr *)src;
+ return ntohs(iph->tot_len);
+ }
+
+ ipv6h = (struct ipv6hdr *)src;
+ return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
+}
+
+/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
+ * SG of NIX and CPT are same in size.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct cpt_sg_s *cpt_sg = NULL;
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u64 *cpt_iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+ cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
+ cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[seg % MAX_SEGS_PER_SG] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+ *cpt_iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+
+ *cpt_sg = *(struct cpt_sg_s *)sg;
+ cpt_sg->rsvd_63_50 = 0;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+static u16 cn10k_ipsec_get_param1(u8 iv_offset)
+{
+ u16 param1_val;
+
+ /* Set Crypto mode, disable L3/L4 checksum */
+ param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
+ CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
+ param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
+ return param1_val;
+}
+
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ struct cpt_inst_s inst;
+ struct cpt_res_s *res;
+ struct xfrm_state *x;
+ struct qmem *sa_info;
+ dma_addr_t dptr_iova;
+ struct sec_path *sp;
+ u8 encap_offset;
+ u8 auth_offset;
+ u8 gthr_size;
+ u8 iv_offset;
+ u16 dlen;
+
+ /* Check for IPSEC offload enabled */
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ goto drop;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(!sp->len))
+ goto drop;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ goto drop;
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL)
+ goto drop;
+
+ dlen = cn10k_ipsec_get_ip_data_len(x, skb);
+ if (dlen == 0 && netif_msg_tx_err(pf)) {
+ netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
+ goto drop;
+ }
+
+ /* Check for valid SA context */
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ if (!sa_info)
+ goto drop;
+
+ memset(&inst, 0, sizeof(struct cpt_inst_s));
+
+ /* Get authentication offset */
+ if (x->props.family == AF_INET)
+ auth_offset = sizeof(struct iphdr);
+ else
+ auth_offset = sizeof(struct ipv6hdr);
+
+ /* IV offset is after ESP header */
+ iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
+ /* Encap will start after IV */
+ encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
+
+ /* CPT Instruction word-1 */
+ res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
+ res->compcode = 0;
+ inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
+
+ /* CPT Instruction word-2 */
+ inst.rvu_pf_func = pf->pcifunc;
+
+ /* CPT Instruction word-3:
+ * Set QORD to force CPT_RES_S write completion
+ */
+ inst.qord = 1;
+
+ /* CPT Instruction word-4 */
+ /* inst.dlen should not include ICV length */
+ inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8);
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
+ inst.param1 = cn10k_ipsec_get_param1(iv_offset);
+
+ inst.param2 = encap_offset <<
+ CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
+ inst.param2 |= (u16)auth_offset <<
+ CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
+
+ /* CPT Instruction word-5 */
+ gthr_size = num_segs / MAX_SEGS_PER_SG;
+ gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
+
+ gthr_size &= 0xF;
+ dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
+ inst.dptr = dptr_iova | ((u64)gthr_size << 60);
+
+ /* CPT Instruction word-6 */
+ inst.rptr = inst.dptr;
+
+ /* CPT Instruction word-7 */
+ inst.cptr = sa_info->iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* CPT Instruction word-0 */
+ inst.nixtxl = (size / 16) - 1;
+ inst.dat_offset = ETH_HLEN;
+ inst.nixtx_offset = sq->sqe_size;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Finally Flush the CPT instruction */
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ return true;
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
new file mode 100644
index 000000000000..43fbce0d6039
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef CN10K_IPSEC_H
+#define CN10K_IPSEC_H
+
+#include <linux/types.h>
+
+DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+/* CPT instruction size in bytes */
+#define CN10K_CPT_INST_SIZE 64
+
+/* CPT instruction (CPT_INST_S) queue length */
+#define CN10K_CPT_INST_QLEN 8200
+
+/* CPT instruction queue size passed to HW is in units of
+ * 40*CPT_INST_S messages.
+ */
+#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
+
+/* CPT needs 320 free entries */
+#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
+#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
+
+/* CPT instruction queue length in bytes */
+#define CN10K_CPT_INST_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
+ CN10K_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define CN10K_CPT_INST_GRP_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define CN10K_CPT_Q_FC_LEN 128
+
+/* Default CPT engine group for ipsec offload */
+#define CN10K_DEF_CPT_IPSEC_EGRP 1
+
+/* CN10K CPT LF registers */
+#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
+#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
+#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
+#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
+#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
+#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
+#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
+#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
+#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+
+/* IPSEC Instruction opcodes */
+#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
+#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL
+
+enum cn10k_cpt_comp_e {
+ CN10K_CPT_COMP_E_NOTDONE = 0x00,
+ CN10K_CPT_COMP_E_GOOD = 0x01,
+ CN10K_CPT_COMP_E_FAULT = 0x02,
+ CN10K_CPT_COMP_E_HWERR = 0x04,
+ CN10K_CPT_COMP_E_INSTERR = 0x05,
+ CN10K_CPT_COMP_E_WARN = 0x06,
+ CN10K_CPT_COMP_E_MASK = 0x3F
+};
+
+struct cn10k_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+enum cn10k_cpt_hw_state_e {
+ CN10K_CPT_HW_UNAVAILABLE,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE
+};
+
+struct cn10k_ipsec {
+ /* Outbound CPT */
+ u64 io_addr;
+ atomic_t cpt_state;
+ struct cn10k_cpt_inst_queue iq;
+
+ /* SA info */
+ u32 sa_size;
+ u32 outb_sa_count;
+ struct work_struct sa_work;
+ struct workqueue_struct *sa_workq;
+};
+
+/* CN10K IPSEC Security Association (SA) */
+/* SA direction */
+#define CN10K_IPSEC_SA_DIR_INB 0
+#define CN10K_IPSEC_SA_DIR_OUTB 1
+/* SA protocol */
+#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
+#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
+/* SA Encryption Type */
+#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
+/* SA IPSEC mode Transport/Tunnel */
+#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
+#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
+/* SA AES Key Length */
+#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
+#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
+#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
+/* IV Source */
+#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0
+#define CN10K_IPSEC_SA_IV_SRC_PACKET 3
+
+struct cn10k_tx_sa_s {
+ u64 esn_en : 1; /* W0 */
+ u64 rsvd_w0_1_8 : 8;
+ u64 hw_ctx_off : 7;
+ u64 ctx_id : 16;
+ u64 rsvd_w0_32_47 : 16;
+ u64 ctx_push_size : 7;
+ u64 rsvd_w0_55 : 1;
+ u64 ctx_hdr_size : 2;
+ u64 aop_valid : 1;
+ u64 rsvd_w0_59 : 1;
+ u64 ctx_size : 4;
+ u64 w1; /* W1 */
+ u64 sa_valid : 1; /* W2 */
+ u64 sa_dir : 1;
+ u64 rsvd_w2_2_3 : 2;
+ u64 ipsec_mode : 1;
+ u64 ipsec_protocol : 1;
+ u64 aes_key_len : 2;
+ u64 enc_type : 3;
+ u64 rsvd_w2_11_19 : 9;
+ u64 iv_src : 2;
+ u64 rsvd_w2_22_31 : 10;
+ u64 rsvd_w2_32_63 : 32;
+ u64 w3; /* W3 */
+ u8 cipher_key[32]; /* W4 - W7 */
+ u32 rsvd_w8_0_31; /* W8 : IV */
+ u32 iv_gcm_salt;
+ u64 rsvd_w9_w30[22]; /* W9 - W30 */
+ u64 hw_ctx[6]; /* W31 - W36 */
+};
+
+/* CPT instruction parameter-1 */
+#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
+#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
+#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
+#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
+
+/* CPT instruction parameter-2 */
+#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
+#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
+
+/* CPT Instruction Structure */
+struct cpt_inst_s {
+ u64 nixtxl : 3; /* W0 */
+ u64 doneint : 1;
+ u64 rsvd_w0_4_15 : 12;
+ u64 dat_offset : 8;
+ u64 ext_param1 : 8;
+ u64 nixtx_offset : 20;
+ u64 rsvd_w0_52_63 : 12;
+ u64 res_addr; /* W1 */
+ u64 tag : 32; /* W2 */
+ u64 tt : 2;
+ u64 grp : 10;
+ u64 rsvd_w2_44_47 : 4;
+ u64 rvu_pf_func : 16;
+ u64 qord : 1; /* W3 */
+ u64 rsvd_w3_1_2 : 2;
+ u64 wqe_ptr : 61;
+ u64 dlen : 16; /* W4 */
+ u64 param2 : 16;
+ u64 param1 : 16;
+ u64 opcode_major : 8;
+ u64 opcode_minor : 8;
+ u64 dptr; /* W5 */
+ u64 rptr; /* W6 */
+ u64 cptr : 60; /* W7 */
+ u64 ctx_val : 1;
+ u64 egrp : 3;
+};
+
+/* CPT Instruction Result Structure */
+struct cpt_res_s {
+ u64 compcode : 7; /* W0 */
+ u64 doneint : 1;
+ u64 uc_compcode : 8;
+ u64 uc_info : 48;
+ u64 esn; /* W1 */
+};
+
+/* CPT SG structure */
+struct cpt_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_63_50 : 14;
+};
+
+/* CPT LF_INPROG Register */
+#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
+#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
+#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
+
+/* CPT LF_Q_GRP_PTR Register */
+#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
+#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+
+/* CPT LF CTX Flush Register */
+#define CPT_LF_CTX_FLUSH_CPTR GENMASK_ULL(45, 0)
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int cn10k_ipsec_init(struct net_device *netdev);
+void cn10k_ipsec_clean(struct otx2_nic *pf);
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset);
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size);
+#else
+static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
+{
+ return 0;
+}
+
+static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+}
+
+static inline __maybe_unused
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ return 0;
+}
+
+static inline bool __maybe_unused
+otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ return true;
+}
+
+static inline bool __maybe_unused
+cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ return true;
+}
+#endif
+#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 6cc7a78968fc..4c7e0f345cb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
return "SA";
default:
return "Unknown";
- };
-
- return "Unknown";
+ }
}
static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
@@ -533,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
if (sw_tx_sc->encrypt)
sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
- policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
+ pfvf->netdev->mtu + OTX2_ETH_HLEN);
/* Write SecTag excluding AN bits(1..0) */
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
new file mode 100644
index 000000000000..a60f8cf53feb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+#include "cn10k.h"
+
+/* CN20K mbox AF => PFx irq handler */
+irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = pf_irq;
+ struct mbox *mw = &pf->mbox;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 pf_trig_val;
+
+ pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, pf_trig_val);
+
+ if (pf_trig_val & BIT_ULL(0)) {
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
+ BIT_ULL(0));
+ }
+
+ if (pf_trig_val & BIT_ULL(1)) {
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
+ BIT_ULL(1));
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+{
+ struct otx2_nic *vf = vf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 vf_trig_val;
+
+ vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL;
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, vf_trig_val);
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ if (vf_trig_val & BIT_ULL(1)) {
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF",
+ BIT_ULL(1));
+ }
+
+ if (vf_trig_val & BIT_ULL(0)) {
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF",
+ BIT_ULL(0));
+ }
+
+ return IRQ_HANDLED;
+}
+
+void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ /* Clear PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
+
+ /* Enable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ if (numvfs > 64) {
+ numvfs -= 64;
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+}
+
+void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ int vector, intr_vec, vec = 0;
+
+ /* Disable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull);
+
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
+
+ if (numvfs > 64) {
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
+ }
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
+ vector = pci_irq_vector(pf->pdev, intr_vec);
+ free_irq(vector, pf->hw.pfvf_irq_devid[vec]);
+ }
+}
+
+irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct pf_irq_data *irq_data = pf_irq;
+ struct otx2_nic *pf = irq_data->pf;
+ struct mbox *mbox;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ /* Clear interrupts */
+ intr = otx2_read64(pf, irq_data->intr_status);
+ otx2_write64(pf, irq_data->intr_status, intr);
+ mbox = pf->mbox_pfvf;
+
+ if (intr)
+ trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr);
+
+ irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start,
+ irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ struct pf_irq_data *irq_data;
+ int intr_vec, ret, vec = 0;
+ char *irq_name;
+
+ /* irq data for 4 PF intr vectors */
+ irq_data = devm_kcalloc(pf->dev, 4,
+ sizeof(struct pf_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
+ switch (intr_vec) {
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ }
+ irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work;
+ irq_data[vec].vec_num = intr_vec;
+ irq_data[vec].pf = pf;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[intr_vec * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev,
+ pf->pcifunc), vec / 2, vec % 2);
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d",
+ vec / 2, vec % 2);
+
+ hw->pfvf_irq_devid[vec] = &irq_data[vec];
+ ret = request_irq(pci_irq_vector(pf->pdev, intr_vec),
+ pf->hw_ops->pfvf_mbox_intr_handler, 0,
+ irq_name,
+ &irq_data[vec]);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
+ return ret;
+ }
+ }
+
+ cn20k_enable_pfvf_mbox_intr(pf, numvfs);
+
+ return 0;
+}
+
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
+
+static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id)
+{
+#ifdef CONFIG_DCB
+ return pfvf->queue_to_pfc_map[aura_id];
+#else
+ return 0;
+#endif
+}
+
+static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
+ struct npa_cn20k_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ u8 bpid_idx;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err)
+ return err;
+ }
+
+ /* Initialize this aura's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+
+ aq->aura_id = aura_id;
+
+ /* Will be filled by AF with correct pool context address */
+ aq->aura.pool_addr = pool_id;
+ aq->aura.pool_caching = 1;
+ aq->aura.shift = ilog2(numptrs) - 8;
+ aq->aura.count = numptrs;
+ aq->aura.limit = numptrs;
+ aq->aura.avg_level = 255;
+ aq->aura.ena = 1;
+ aq->aura.fc_ena = 1;
+ aq->aura.fc_addr = pool->fc_addr->iova;
+ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
+ aq->aura.bp_ena = 0;
+ /* If NIX1 LF is attached then specify NIX1_RX.
+ *
+ * Below NPA_AURA_S[BP_ENA] is set according to the
+ * NPA_BPINTF_E enumeration given as:
+ * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
+ * NIX0_RX is 0x0 + 0*0x1 = 0
+ * NIX1_RX is 0x0 + 1*0x1 = 1
+ * But in HRM it is given that
+ * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
+ * NIX-RX based on [BP] level. One bit per NIX-RX; index
+ * enumerated by NPA_BPINTF_E."
+ */
+ if (pfvf->nix_blkaddr == BLKADDR_NIX1)
+ aq->aura.bp_ena = 1;
+
+ bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id);
+ aq->aura.bpid = pfvf->bpid[bpid_idx];
+
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size,
+ int type)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct npa_cn20k_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err, sz;
+
+ pool = &pfvf->qset.pool[pool_id];
+ /* Alloc memory for stack which is used to store buffer pointers */
+ err = qmem_alloc(pfvf->dev, &pool->stack,
+ stack_pages, pfvf->hw.stack_pg_bytes);
+ if (err)
+ return err;
+
+ pool->rbsize = buf_size;
+
+ /* Initialize this pool's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ qmem_free(pfvf->dev, pool->stack);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ qmem_free(pfvf->dev, pool->stack);
+ return -ENOMEM;
+ }
+ }
+
+ aq->aura_id = pool_id;
+ aq->pool.stack_base = pool->stack->iova;
+ aq->pool.stack_caching = 1;
+ aq->pool.ena = 1;
+ aq->pool.buf_size = buf_size / 128;
+ aq->pool.stack_max_pages = stack_pages;
+ aq->pool.shift = ilog2(numptrs) - 8;
+ aq->pool.ptr_start = 0;
+ aq->pool.ptr_end = ~0ULL;
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ if (type != AURA_NIX_RQ) {
+ pool->page_pool = NULL;
+ return 0;
+ }
+
+ sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE);
+ pp_params.order = get_order(sz);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
+
+ return 0;
+}
+
+static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
+{
+ struct nix_cn20k_aq_enq_req *aq;
+ struct otx2_nic *pfvf = dev;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = (SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static struct dev_hw_ops cn20k_hw_ops = {
+ .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler,
+ .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler,
+ .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler,
+ .sq_aq_init = cn20k_sq_aq_init,
+ .sqe_flush = cn10k_sqe_flush,
+ .aura_freeptr = cn10k_aura_freeptr,
+ .refill_pool_ptrs = cn10k_refill_pool_ptrs,
+ .aura_aq_init = cn20k_aura_aq_init,
+ .pool_aq_init = cn20k_pool_aq_init,
+};
+
+void cn20k_init(struct otx2_nic *pfvf)
+{
+ pfvf->hw_ops = &cn20k_hw_ops;
+}
+EXPORT_SYMBOL(cn20k_init);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
new file mode 100644
index 000000000000..832adaf8c57f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef CN20K_H
+#define CN20K_H
+
+#include "otx2_common.h"
+
+void cn20k_init(struct otx2_nic *pfvf);
+int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+#endif /* CN20K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index a85ac039d779..75ebb17419c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -10,22 +10,30 @@
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
+#include <linux/dcbnl.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
+
+static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
+{
+ return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en;
+}
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- u64 *ptr;
+ void __iomem *ptr;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
stats->pkts = otx2_atomic64_add(incr, ptr);
}
@@ -33,12 +41,12 @@ static void otx2_nix_sq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- u64 *ptr;
+ void __iomem *ptr;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
stats->pkts = otx2_atomic64_add(incr, ptr);
}
@@ -83,6 +91,7 @@ int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
return 1;
}
+EXPORT_SYMBOL(otx2_update_rq_stats);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
{
@@ -99,6 +108,7 @@ int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
return 1;
}
+EXPORT_SYMBOL(otx2_update_sq_stats);
void otx2_get_dev_stats(struct otx2_nic *pfvf)
{
@@ -114,7 +124,9 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf)
dev_stats->rx_ucast_frames;
dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
- dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP) +
+ (unsigned long)atomic_long_read(&dev_stats->tx_discards);
+
dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
@@ -227,7 +239,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
u16 maxlen;
int err;
- maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ maxlen = pfvf->hw.max_mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
@@ -236,7 +248,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ req->maxlen = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
/* Use max receive length supported by hardware for loopback devices */
if (is_otx2_lbkvf(pfvf->pdev))
@@ -246,13 +258,14 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_hw_set_mtu);
int otx2_config_pause_frm(struct otx2_nic *pfvf)
{
struct cgx_pause_frm_cfg *req;
int err;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 0;
mutex_lock(&pfvf->mbox.lock);
@@ -307,19 +320,22 @@ fail:
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
- struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
- rss_ctx = rss->rss_ctx[ctx_id];
+ ind_tbl = ind_tbl ?: rss->ind_tbl;
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore the queue if AF_XDP zero copy is enabled */
+ if (test_bit(ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
/* The shared memory buffer can be full.
@@ -337,7 +353,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
}
}
- aq->rss.rq = rss_ctx->ind_tbl[idx];
+ aq->rss.rq = ind_tbl[idx];
/* Fill AQ info */
aq->qidx = index + idx;
@@ -375,30 +391,22 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+ rss->rss_size = sizeof(*rss->ind_tbl);
/* Init RSS key if it is not setup already */
if (!rss->enable)
netdev_rss_key_fill(rss->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
- if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Set RSS group 0 as default indirection table */
- rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
- GFP_KERNEL);
- if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
- return -ENOMEM;
-
- rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
+ if (!netif_is_rxfh_configured(pfvf->netdev))
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] =
+ rss->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
- }
- ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
+
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
if (ret)
return ret;
@@ -539,10 +547,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
u8 *buf;
+ if (pool->xsk_pool)
+ return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx);
+
if (pool->page_pool)
return otx2_alloc_pool_buf(pfvf, pool, dma);
@@ -561,12 +572,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
int ret;
local_bh_disable();
- ret = __otx2_alloc_rbuf(pfvf, pool, dma);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx);
local_bh_enable();
return ret;
}
@@ -574,7 +585,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma,
+ cq->cq_idx, cq->pool_ptrs - 1)))
return -ENOMEM;
return 0;
}
@@ -646,20 +658,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
+ int sdp_chan = hw->tx_chan_base + prio;
+
+ if (is_otx2_sdp_rep(pfvf->pdev))
+ prio = 0;
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (is_otx2_sdp_rep(pfvf->pdev)) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+ req->regval[2] = BIT_ULL(12) | BIT_ULL(13) |
+ (sdp_chan & 0xff);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
- if (lvl == hw->txschq_link_cfg_lvl) {
+ if (lvl == hw->txschq_link_cfg_lvl &&
+ !is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
/* Enable this queue and backpressure
@@ -670,13 +693,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
+ req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
- if (lvl == hw->txschq_link_cfg_lvl) {
+ if (lvl == hw->txschq_link_cfg_lvl &&
+ !is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
/* Enable this queue and backpressure
@@ -698,7 +722,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->num_regs++;
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
- req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+ req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
req->num_regs++;
req->reg[2] = NIX_AF_TL1X_CIR(schq);
@@ -735,6 +759,7 @@ EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
+ int chan_cnt = pfvf->hw.tx_chan_cnt;
struct nix_txsch_alloc_req *req;
struct nix_txsch_alloc_rsp *rsp;
int lvl, schq, rc;
@@ -747,6 +772,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
/* Request one schq per level */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
req->schq[lvl] = 1;
+
+ if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) {
+ req->schq[NIX_TXSCH_LVL_SMQ] = chan_cnt;
+ req->schq[NIX_TXSCH_LVL_TL4] = chan_cnt;
+ }
+
rc = otx2_sync_mbox_msg(&pfvf->mbox);
if (rc)
return rc;
@@ -757,10 +788,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
return PTR_ERR(rsp);
/* Setup transmit scheduler list */
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl];
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pfvf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+ }
pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
@@ -798,12 +831,15 @@ EXPORT_SYMBOL(otx2_txschq_free_one);
void otx2_txschq_stop(struct otx2_nic *pfvf)
{
- int lvl, schq;
+ int lvl, schq, idx;
/* free non QOS TLx nodes */
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
- otx2_txschq_free_one(pfvf, lvl,
- pfvf->hw.txschq_list[lvl][0]);
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) {
+ otx2_txschq_free_one(pfvf, lvl,
+ pfvf->hw.txschq_list[lvl][idx]);
+ }
+ }
/* Clear the txschq list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
@@ -817,9 +853,10 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{
int qidx, sqe_tail, sqe_head;
struct otx2_snd_queue *sq;
- u64 incr, *ptr, val;
+ void __iomem *ptr;
+ u64 incr, val;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
sq = &pfvf->qset.sq[qidx];
if (!sq->sqb_ptrs)
@@ -850,7 +887,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct nix_aq_enq_req *aq;
@@ -883,7 +920,7 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
-int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct otx2_nic *pfvf = dev;
struct otx2_snd_queue *sq;
@@ -902,7 +939,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@@ -925,6 +962,7 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
struct otx2_qset *qset = &pfvf->qset;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
+ u8 chan_offset;
int err;
pool = &pfvf->qset.pool[sqb_aura];
@@ -936,6 +974,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
+ /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
+ * SG of NIX and CPT are same in size. Allocate memory for CPT SG
+ * same as NIX SQE for base address alignment.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+ err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
+ sq->sqe_size * 2);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
+ if (err)
+ return err;
+
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
@@ -970,8 +1031,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
+ /* Attach XSK_BUFF_POOL to XDP queue */
+ if (qidx > pfvf->hw.xdp_queues)
+ otx2_attach_xsk_buff(pfvf, sq, (qidx - pfvf->hw.xdp_queues));
- err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+
+ chan_offset = qidx % pfvf->hw.tx_chan_cnt;
+ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
if (err) {
kfree(sq->sg);
sq->sg = NULL;
@@ -982,12 +1048,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
-static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
@@ -996,8 +1063,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- if (pfvf->xdp_prog)
+ if (pfvf->xdp_prog) {
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ pool = &qset->pool[qidx];
+ if (pool->xsk_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq);
+ } else if (pool->page_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool->page_pool);
+ }
+ }
} else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
@@ -1216,9 +1295,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_head_page(phys_to_virt(pa));
-
if (pool->page_pool) {
page_pool_put_full_page(pool->page_pool, page, true);
+ } else if (pool->xsk_pool) {
+ /* Note: No way of identifying xdp_buff */
} else {
dma_unmap_page_attrs(pfvf->dev, iova, size,
DMA_FROM_DEVICE,
@@ -1233,6 +1313,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
int pool_id, pool_start = 0, pool_end = 0, size = 0;
struct otx2_pool *pool;
u64 iova;
+ int idx;
if (type == AURA_NIX_SQ) {
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1247,16 +1328,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
/* Free SQB and RQB pointers from the aura pool */
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
- iova = otx2_aura_allocptr(pfvf, pool_id);
pool = &pfvf->qset.pool[pool_id];
+ iova = otx2_aura_allocptr(pfvf, pool_id);
while (iova) {
if (type == AURA_NIX_RQ)
iova -= OTX2_HEAD_ROOM;
-
otx2_free_bufs(pfvf, pool, iova, size);
-
iova = otx2_aura_allocptr(pfvf, pool_id);
}
+
+ for (idx = 0 ; idx < pool->xdp_cnt; idx++) {
+ if (!pool->xdp[idx])
+ continue;
+
+ xsk_buff_free(pool->xdp[idx]);
+ }
}
}
@@ -1273,7 +1359,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
+ devm_kfree(pfvf->dev, pool->xdp);
+ pool->xsk_pool = NULL;
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
@@ -1282,6 +1369,13 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs)
{
+ return pfvf->hw_ops->aura_aq_init(pfvf, aura_id, pool_id,
+ numptrs);
+}
+
+int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1359,7 +1453,15 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type)
{
+ return pfvf->hw_ops->pool_aq_init(pfvf, pool_id, stack_pages, numptrs,
+ buf_size, type);
+}
+
+int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size, int type)
+{
struct page_pool_params pp_params = { 0 };
+ struct xsk_buff_pool *xsk_pool;
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1403,21 +1505,33 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
aq->ctype = NPA_AQ_CTYPE_POOL;
aq->op = NPA_AQ_INSTOP_INIT;
- if (type != AURA_NIX_RQ) {
- pool->page_pool = NULL;
+ if (type != AURA_NIX_RQ)
+ return 0;
+
+ if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) {
+ pp_params.order = get_order(buf_size);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
return 0;
}
- pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dev = pfvf->dev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
- pool->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(pool->page_pool)) {
- netdev_err(pfvf->netdev, "Creation of page pool failed\n");
- return PTR_ERR(pool->page_pool);
+ /* Set XSK pool to support AF_XDP zero-copy */
+ xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id);
+ if (xsk_pool) {
+ pool->xsk_pool = xsk_pool;
+ pool->xdp_cnt = numptrs;
+ pool->xdp = devm_kcalloc(pfvf->dev,
+ numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
+ if (!pool->xdp)
+ return -ENOMEM;
}
return 0;
@@ -1478,9 +1592,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ ptr--;
+ while (ptr >= 0) {
+ xsk_buff_free(pool->xdp[ptr]);
+ ptr--;
+ }
+ }
goto err_mem;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
@@ -1530,11 +1653,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
+
for (ptr = 0; ptr < num_ptrs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ while (ptr)
+ xsk_buff_free(pool->xdp[--ptr]);
+ }
return -ENOMEM;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ pool->xsk_pool ? bufptr :
bufptr + OTX2_HEAD_ROOM);
}
}
@@ -1693,18 +1824,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
-#ifdef CONFIG_DCB
- req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
- req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
-#else
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
-#endif
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
+ req->bpid_per_chan = 0;
+ }
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_config_bp);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
+ req->bpid_per_chan = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_nix_cpt_config_bp);
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@@ -1738,6 +1894,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.sqb_size = rsp->sqb_size;
pfvf->hw.rx_chan_base = rsp->rx_chan_base;
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt;
+ pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
pfvf->hw.cgx_links = rsp->cgx_links;
@@ -1782,6 +1940,7 @@ void otx2_free_cints(struct otx2_nic *pfvf, int n)
free_irq(vector, &qset->napi[qidx]);
}
}
+EXPORT_SYMBOL(otx2_free_cints);
void otx2_set_cints_affinity(struct otx2_nic *pfvf)
{
@@ -1837,6 +1996,10 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
if (!rc) {
rsp = (struct nix_hw_info *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ rc = PTR_ERR(rsp);
+ goto out;
+ }
/* HW counts VLAN insertion bytes (8 for double tag)
* irrespective of whether SQE is requesting to insert VLAN
@@ -1898,6 +2061,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t
}
EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct get_hw_cap_rsp *rsp;
+ struct msg_req *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_get_hw_cap(mbox);
+ if (!req)
+ goto fail;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (rsp->hw_caps & HW_CAP_MACSEC)
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
@@ -1911,3 +2111,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* Crypto hardware need write permission for ipsec crypto offload */
+ if (unlikely(xfrm_offload(skb))) {
+ dir = DMA_BIDIRECTIONAL;
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ }
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, dir);
+}
+
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ struct sk_buff *skb = NULL;
+ int seg;
+
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(xfrm_offload(skb)))
+ dir = DMA_BIDIRECTIONAL;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], dir);
+ }
+ sg->num_segs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 7e16a341ec58..e616a727a3a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -14,6 +14,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/soc/marvell/silicons.h>
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/macsec.h>
#include <net/pkt_cls.h>
@@ -21,14 +22,19 @@
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
+#include <net/page_pool/helpers.h>
#include <mbox.h>
#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include "otx2_devlink.h"
+#include <rvu.h>
#include <rvu_trace.h>
#include "qos.h"
+#include "rep.h"
+#include "cn10k_ipsec.h"
+#include "cn20k.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -39,8 +45,11 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
+#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
+
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
#define PCI_MBOX_BAR_NUM 4
@@ -52,6 +61,15 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
+/* Number of segments per SG structure */
+#define MAX_SEGS_PER_SG 3
+
+irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq);
+irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq);
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -76,10 +94,6 @@ struct otx2_lmt_info {
u64 lmt_addr;
u16 lmt_id;
};
-/* RSS configuration */
-struct otx2_rss_ctx {
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
-};
struct otx2_rss_info {
u8 enable;
@@ -87,7 +101,7 @@ struct otx2_rss_info {
u16 rss_size;
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
- struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
+ u32 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
};
/* NIX (or NPC) RX errors */
@@ -120,31 +134,10 @@ enum otx2_errcodes_re {
ERRCODE_IL4_CSUM = 0x22,
};
-/* NIX TX stats */
-enum nix_stat_lf_tx {
- TX_UCAST = 0x0,
- TX_BCAST = 0x1,
- TX_MCAST = 0x2,
- TX_DROP = 0x3,
- TX_OCTS = 0x4,
- TX_STATS_ENUM_LAST,
-};
-
-/* NIX RX stats */
-enum nix_stat_lf_rx {
- RX_OCTS = 0x0,
- RX_UCAST = 0x1,
- RX_BCAST = 0x2,
- RX_MCAST = 0x3,
- RX_DROP = 0x4,
- RX_DROP_OCTS = 0x5,
- RX_FCS = 0x6,
- RX_ERR = 0x7,
- RX_DRP_BCAST = 0x8,
- RX_DRP_MCAST = 0x9,
- RX_DRP_L3BCAST = 0xa,
- RX_DRP_L3MCAST = 0xb,
- RX_STATS_ENUM_LAST,
+enum otx2_xdp_action {
+ OTX2_XDP_TX = BIT(0),
+ OTX2_XDP_REDIRECT = BIT(1),
+ OTX2_AF_XDP_FRAME = BIT(2),
};
struct otx2_dev_stats {
@@ -161,6 +154,7 @@ struct otx2_dev_stats {
u64 tx_bcast_frames;
u64 tx_mcast_frames;
u64 tx_drops;
+ atomic_long_t tx_discards;
};
/* Driver counted stats */
@@ -224,15 +218,19 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
+ u8 txschq_cnt[NIX_TXSCH_LVL_CNT];
u8 txschq_aggr_lvl_rr_prio;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
+ u32 max_mtu;
u8 smq_link_type;
/* HW settings, coalescing etc */
u16 rx_chan_base;
u16 tx_chan_base;
+ u8 rx_chan_cnt;
+ u8 tx_chan_cnt;
u16 cq_qcount_wait;
u16 cq_ecount_wait;
u16 rq_skid;
@@ -253,6 +251,7 @@ struct otx2_hw {
u16 nix_msixoff; /* Offset of NIX vectors */
char *irq_name;
cpumask_var_t *affinity_mask;
+ struct pf_irq_data *pfvf_irq_devid[4];
/* Stats */
struct otx2_dev_stats dev_stats;
@@ -346,12 +345,9 @@ struct otx2_flow_config {
u16 *def_ent;
u16 nr_flows;
#define OTX2_DEFAULT_FLOWCOUNT 16
-#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_DEFAULT_UNICAST_FLOWS 4
#define OTX2_MAX_VLAN_FLOWS 1
#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
-#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
- OTX2_MAX_UNICAST_FLOWS + \
- OTX2_MAX_VLAN_FLOWS)
u16 unicast_offset;
u16 rx_vlan_offset;
u16 vf_vlan_offset;
@@ -363,16 +359,28 @@ struct otx2_flow_config {
struct list_head flow_list;
u32 dmacflt_max_flows;
u16 max_flows;
+ refcount_t mark_flows;
struct list_head flow_list_tc;
+ u8 ucast_flt_cnt;
bool ntuple;
+ u16 ntuple_cnt;
};
struct dev_hw_ops {
- int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
+ int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset,
+ u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
+ irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq);
+ irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq);
+ irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq);
+ int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs);
+ int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size,
+ int type);
};
#define CN10K_MCS_SA_PER_SC 4
@@ -440,6 +448,16 @@ struct cn10k_mcs_cfg {
struct list_head rxsc_list;
};
+struct pf_irq_data {
+ u64 intr_status;
+ void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw,
+ int first, int mdevs, u64 intr);
+ struct otx2_nic *pf;
+ int vec_num;
+ int start;
+ int mdevs;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -465,6 +483,10 @@ struct otx2_nic {
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
+#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
+#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
+#define OTX2_FLAG_PORT_UP BIT_ULL(19)
+#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20)
u64 flags;
u64 *cq_op_addr;
@@ -479,6 +501,7 @@ struct otx2_nic {
struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq;
struct workqueue_struct *mbox_pfvf_wq;
+ struct qmem *pfvf_mbox_addr;
u8 total_vfs;
u16 pcifunc; /* RVU PF_FUNC */
@@ -510,15 +533,15 @@ struct otx2_nic {
u32 nix_lmt_size;
struct otx2_ptp *ptp;
- struct hwtstamp_config tstamp;
+ struct kernel_hwtstamp_config tstamp;
unsigned long rq_bmap;
/* Devlink */
struct otx2_devlink *dl;
-#ifdef CONFIG_DCB
/* PFC */
u8 pfc_en;
+#ifdef CONFIG_DCB
u8 *queue_to_pfc_map;
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
@@ -532,11 +555,24 @@ struct otx2_nic {
#if IS_ENABLED(CONFIG_MACSEC)
struct cn10k_mcs_cfg *macsec_cfg;
#endif
+
+#if IS_ENABLED(CONFIG_RVU_ESWITCH)
+ struct rep_dev **reps;
+ int rep_cnt;
+ u16 rep_pf_map[RVU_MAX_REP];
+ u16 esw_mode;
+#endif
+
+ /* Inline ipsec */
+ struct cn10k_ipsec ipsec;
+ /* af_xdp zero-copy */
+ unsigned long *af_xdp_zc_qidx;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
{
- return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
+ return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) ||
+ (pdev->device == PCI_DEVID_RVU_REP);
}
static inline bool is_96xx_A0(struct pci_dev *pdev)
@@ -551,6 +587,11 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
}
+static inline bool is_otx2_sdp_rep(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP;
+}
+
/* REVID for PCIe devices.
* Bits 0..1: minor pass, bit 3..2: major pass
* bits 7..4: midr id
@@ -576,6 +617,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -608,9 +658,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
__set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag);
}
-
- if (is_dev_cn10kb(pfvf->pdev))
- __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -625,6 +672,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
+ case BLKTYPE_CPT:
+ blkaddr = BLKADDR_CPT0;
+ break;
default:
blkaddr = BLKADDR_RVUM;
break;
@@ -706,8 +756,9 @@ static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
}
-static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr)
{
+ u64 __iomem *ptr = addr;
u64 result;
__asm__ volatile(".cpu generic+lse\n"
@@ -720,7 +771,11 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
#else
#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
-#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
+
+static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr)
+{
+ return 0;
+}
#endif
static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
@@ -770,7 +825,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
- u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
+ void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
u64 incr = (u64)aura | BIT_ULL(63);
return otx2_atomic64_add(incr, ptr);
@@ -845,6 +900,7 @@ static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
{ \
struct _req_type *req; \
+ u16 pcifunc = mbox->pfvf->pcifunc; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&mbox->mbox, 0, sizeof(struct _req_type), \
@@ -853,7 +909,8 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
+ req->hdr.pcifunc = pcifunc; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \
return req; \
}
@@ -873,21 +930,11 @@ MBOX_UP_MCS_MESSAGES
/* Time to wait before watchdog kicks off */
#define OTX2_TX_TIMEOUT (100 * HZ)
-#define RVU_PFVF_PF_SHIFT 10
-#define RVU_PFVF_PF_MASK 0x3F
-#define RVU_PFVF_FUNC_SHIFT 0
-#define RVU_PFVF_FUNC_MASK 0x3FF
-
static inline bool is_otx2_vf(u16 pcifunc)
{
return !!(pcifunc & RVU_PFVF_FUNC_MASK);
}
-static inline int rvu_get_pf(u16 pcifunc)
-{
- return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
-}
-
static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
struct page *page,
size_t offset, size_t size,
@@ -913,15 +960,19 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
{
u16 smq;
+ int idx;
+
#ifdef CONFIG_DCB
if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
#endif
/* check if qidx falls under QOS queues */
- if (qidx >= pfvf->hw.non_qos_queues)
+ if (qidx >= pfvf->hw.non_qos_queues) {
smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
- else
- smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ } else {
+ idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ];
+ smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx];
+ }
return smq;
}
@@ -961,6 +1012,7 @@ void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
void otx2_setup_segmentation(struct otx2_nic *pfvf);
+int otx2_reset_mac_stats(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@@ -980,27 +1032,49 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma);
+ dma_addr_t *dma, int qidx, int idx);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
-int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma);
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type);
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs);
+int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf);
+void otx2_free_queue_mem(struct otx2_qset *qset);
+int otx2_alloc_queue_mem(struct otx2_nic *pf);
+int otx2_init_hw_resources(struct otx2_nic *pfvf);
+void otx2_free_hw_resources(struct otx2_nic *pf);
+int otx2_wq_init(struct otx2_nic *pf);
+int otx2_check_pf_usable(struct otx2_nic *pf);
+int otx2_pfaf_mbox_init(struct otx2_nic *pf);
+int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af);
+int otx2_realloc_msix_vectors(struct otx2_nic *pf);
+void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
+void otx2_disable_mbox_intr(struct otx2_nic *pf);
+void otx2_disable_napi(struct otx2_nic *pf);
+irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf);
+int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs);
+int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size, int type);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -1034,8 +1108,11 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
-int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
-int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+int otx2_config_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int otx2_config_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
@@ -1057,13 +1134,17 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags);
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
u64 iova, int size);
+int otx2_mcam_entry_init(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
@@ -1125,4 +1206,19 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid);
void otx2_qos_config_txschq(struct otx2_nic *pfvf);
void otx2_clean_qos_queues(struct otx2_nic *pfvf);
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info);
+int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower);
+
+static inline int mcam_entry_cmp(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len);
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx);
+void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 28fb643d2917..f110dfa42360 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_config);
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -260,6 +262,7 @@ update_sq_smq_map:
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_update);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
{
@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_stop);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
@@ -311,6 +315,11 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pfc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto unlock;
+ }
+
if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
dev_warn(pfvf->dev,
"Failed to config PFC\n");
@@ -321,6 +330,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
bool pfc_enable)
@@ -385,6 +395,7 @@ out:
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
qidx, err);
}
+EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
@@ -424,6 +435,9 @@ process_pfc:
return err;
}
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pfvf, false);
+
/* Request Per channel Bpids */
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
@@ -472,3 +486,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
return 0;
}
+EXPORT_SYMBOL(otx2_dcbnl_set_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 4e1130496573..a72694219df4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -32,7 +32,8 @@ static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
}
static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
@@ -40,13 +41,15 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
if (!pfvf->flow_cfg)
return 0;
+ pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
return 0;
}
static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
@@ -63,9 +66,69 @@ static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
return 0;
}
+static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ int err;
+
+ pfvf->flow_cfg->ucast_flt_cnt = ctx->val.vu8;
+
+ otx2_mcam_flow_del(pfvf);
+ err = otx2_mcam_entry_init(pfvf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vu8 = pfvf->flow_cfg ? pfvf->flow_cfg->ucast_flt_cnt : 0;
+
+ return 0;
+}
+
+static int otx2_dl_ucast_flt_cnt_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ /* Check for UNICAST filter support*/
+ if (!(pfvf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unicast filter not enabled");
+ return -EINVAL;
+ }
+
+ if (!pfvf->flow_cfg) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "pfvf->flow_cfg not initialized");
+ return -EINVAL;
+ }
+
+ if (pfvf->flow_cfg->nr_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify count when there are active rules");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
enum otx2_dl_param_id {
OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT,
};
static const struct devlink_param otx2_dl_params[] = {
@@ -74,9 +137,63 @@ static const struct devlink_param otx2_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
otx2_dl_mcam_count_get, otx2_dl_mcam_count_set,
otx2_dl_mcam_count_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT,
+ "unicast_filter_count", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_ucast_flt_cnt_get, otx2_dl_ucast_flt_cnt_set,
+ otx2_dl_ucast_flt_cnt_validate),
};
+#ifdef CONFIG_RVU_ESWITCH
+static int otx2_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!otx2_rep_dev(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ *mode = pfvf->esw_mode;
+
+ return 0;
+}
+
+static int otx2_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ int ret = 0;
+
+ if (!otx2_rep_dev(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ if (pfvf->esw_mode == mode)
+ return 0;
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ rvu_rep_destroy(pfvf);
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ ret = rvu_rep_create(pfvf, extack);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ret)
+ pfvf->esw_mode = mode;
+
+ return ret;
+}
+#endif
+
static const struct devlink_ops otx2_devlink_ops = {
+#ifdef CONFIG_RVU_ESWITCH
+ .eswitch_mode_get = otx2_devlink_eswitch_mode_get,
+ .eswitch_mode_set = otx2_devlink_eswitch_mode_set,
+#endif
};
int otx2_register_dl(struct otx2_nic *pfvf)
@@ -112,6 +229,7 @@ err_dl:
devlink_free(dl);
return err;
}
+EXPORT_SYMBOL(otx2_register_dl);
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
@@ -123,3 +241,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
ARRAY_SIZE(otx2_dl_params));
devlink_free(dl);
}
+EXPORT_SYMBOL(otx2_unregister_dl);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
index 80d853b343f9..2046dd0da00d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -28,6 +28,11 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
if (!err) {
rsp = (struct cgx_mac_addr_add_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&pf->mbox.lock);
+ return PTR_ERR(rsp);
+ }
+
*dmac_index = rsp->index;
}
@@ -200,6 +205,10 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
rsp = (struct cgx_mac_addr_update_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ rc = PTR_ERR(rsp);
+ goto out;
+ }
pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 7f786de61014..b90e23dc49de 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -15,6 +15,7 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+#include <cgx_fw_if.h>
#define DRV_NAME "rvu-nicpf"
#define DRV_VF_NAME "rvu-nicvf"
@@ -85,26 +86,22 @@ static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
int start_qidx = qset * pfvf->hw.rx_queues;
int qidx, stats;
- for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
- for (stats = 0; stats < otx2_n_queue_stats; stats++) {
- sprintf(*data, "rxq%d: %s", qidx + start_qidx,
- otx2_queue_stats[stats].name);
- *data += ETH_GSTRING_LEN;
- }
- }
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++)
+ for (stats = 0; stats < otx2_n_queue_stats; stats++)
+ ethtool_sprintf(data, "rxq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
- for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
- for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++)
+ for (stats = 0; stats < otx2_n_queue_stats; stats++)
if (qidx >= pfvf->hw.non_qos_queues)
- sprintf(*data, "txq_qos%d: %s",
- qidx + start_qidx - pfvf->hw.non_qos_queues,
- otx2_queue_stats[stats].name);
+ ethtool_sprintf(data, "txq_qos%d: %s",
+ qidx + start_qidx -
+ pfvf->hw.non_qos_queues,
+ otx2_queue_stats[stats].name);
else
- sprintf(*data, "txq%d: %s", qidx + start_qidx,
- otx2_queue_stats[stats].name);
- *data += ETH_GSTRING_LEN;
- }
- }
+ ethtool_sprintf(data, "txq%d: %s",
+ qidx + start_qidx,
+ otx2_queue_stats[stats].name);
}
static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -115,36 +112,25 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (stats = 0; stats < otx2_n_dev_stats; stats++) {
- memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < otx2_n_dev_stats; stats++)
+ ethtool_puts(&data, otx2_dev_stats[stats].name);
- for (stats = 0; stats < otx2_n_drv_stats; stats++) {
- memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < otx2_n_drv_stats; stats++)
+ ethtool_puts(&data, otx2_drv_stats[stats].name);
otx2_get_qset_strings(pfvf, &data, 0);
if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
- for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_rxstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++)
+ ethtool_sprintf(&data, "cgx_rxstat%d: ", stats);
- for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_txstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++)
+ ethtool_sprintf(&data, "cgx_txstat%d: ", stats);
}
- strcpy(data, "reset_count");
- data += ETH_GSTRING_LEN;
- sprintf(data, "Fec Corrected Errors: ");
- data += ETH_GSTRING_LEN;
- sprintf(data, "Fec Uncorrected Errors: ");
- data += ETH_GSTRING_LEN;
+ ethtool_puts(&data, "reset_count");
+ ethtool_puts(&data, "Fec Corrected Errors: ");
+ ethtool_puts(&data, "Fec Uncorrected Errors: ");
}
static void otx2_get_qset_stats(struct otx2_nic *pfvf,
@@ -330,7 +316,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_pause_frm_cfg *req, *rsp;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return;
mutex_lock(&pfvf->mbox.lock);
@@ -343,6 +329,11 @@ static void otx2_get_pauseparam(struct net_device *netdev,
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pause_frm_cfg *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+ }
+
pause->rx_pause = rsp->rx_pause;
pause->tx_pause = rsp->tx_pause;
}
@@ -357,7 +348,7 @@ static int otx2_set_pauseparam(struct net_device *netdev,
if (pause->autoneg)
return -EOPNOTSUPP;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return -EOPNOTSUPP;
if (pause->rx_pause)
@@ -569,10 +560,13 @@ static int otx2_set_coalesce(struct net_device *netdev,
return 0;
}
-static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
- struct ethtool_rxnfc *nfc)
+static int otx2_get_rss_hash_opts(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
if (!(rss->flowkey_cfg &
(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
@@ -619,12 +613,17 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
return 0;
}
-static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
- struct ethtool_rxnfc *nfc)
+static int otx2_set_rss_hash_opts(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
- u32 rss_cfg = rss->flowkey_cfg;
+ struct otx2_rss_info *rss;
+ u32 rss_cfg;
+
+ rss = &pfvf->hw.rss_info;
+ rss_cfg = rss->flowkey_cfg;
if (!rss->enable) {
netdev_err(pfvf->netdev,
@@ -753,8 +752,6 @@ static int otx2_get_rxnfc(struct net_device *dev,
if (netif_running(dev) && ntuple)
ret = otx2_get_all_flows(pfvf, nfc, rules);
break;
- case ETHTOOL_GRXFH:
- return otx2_get_rss_hash_opts(pfvf, nfc);
default:
break;
}
@@ -769,9 +766,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
pfvf->flow_cfg->ntuple = ntuple;
switch (nfc->cmd) {
- case ETHTOOL_SRXFH:
- ret = otx2_set_rss_hash_opts(pfvf, nfc);
- break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
ret = otx2_add_flow(pfvf, nfc);
@@ -802,35 +796,75 @@ static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
return MAX_RSS_INDIR_TBL_SIZE;
}
-static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
+static int otx2_create_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ unsigned int queues;
+ u32 *ind_tbl;
+ int idx;
+
+ rss = &pfvf->hw.rss_info;
+ queues = pfvf->hw.rx_queues;
+
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+
+ if (!rss->enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
- otx2_rss_ctx_flow_del(pfvf, ctx_id);
- kfree(rss->rss_ctx[ctx_id]);
- rss->rss_ctx[ctx_id] = NULL;
+ ind_tbl = rxfh->indir;
+ if (!ind_tbl) {
+ ind_tbl = ethtool_rxfh_context_indir(ctx);
+ for (idx = 0; idx < rss->rss_size; idx++)
+ ind_tbl[idx] = ethtool_rxfh_indir_default(idx, queues);
+ }
+ otx2_set_rss_table(pfvf, rxfh->rss_context, ind_tbl);
return 0;
}
-static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
- u32 *rss_context)
+static int otx2_modify_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- u8 ctx;
+ struct otx2_nic *pfvf = netdev_priv(dev);
- for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
- if (!rss->rss_ctx[ctx])
- break;
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!pfvf->hw.rss_info.enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
}
- if (ctx == MAX_RSS_GROUPS)
- return -EINVAL;
- rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
- if (!rss->rss_ctx[ctx])
- return -ENOMEM;
- *rss_context = ctx;
+ if (rxfh->indir)
+ otx2_set_rss_table(pfvf, rxfh->rss_context, rxfh->indir);
+
+ return 0;
+}
+
+static int otx2_remove_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ if (!pfvf->hw.rss_info.enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+ otx2_rss_ctx_flow_del(pfvf, rss_context);
return 0;
}
@@ -839,23 +873,14 @@ static int otx2_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int ret, idx;
+ int idx;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- rss_context = rxfh->rss_context;
-
- if (rss_context != ETH_RXFH_CONTEXT_ALLOC &&
- rss_context >= MAX_RSS_GROUPS)
- return -EINVAL;
-
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
@@ -867,21 +892,12 @@ static int otx2_set_rxfh(struct net_device *dev,
memcpy(rss->key, rxfh->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
}
- if (rxfh->rss_delete)
- return otx2_rss_ctx_delete(pfvf, rss_context);
-
- if (rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = otx2_rss_ctx_create(pfvf, &rss_context);
- rxfh->rss_context = rss_context;
- if (ret)
- return ret;
- }
+
if (rxfh->indir) {
- rss_ctx = rss->rss_ctx[rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] = rxfh->indir[idx];
+ rss->ind_tbl[idx] = rxfh->indir[idx];
}
- otx2_set_rss_table(pfvf, rss_context);
+ otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
return 0;
}
@@ -890,9 +906,7 @@ static int otx2_set_rxfh(struct net_device *dev,
static int otx2_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
u32 *indir = rxfh->indir;
int idx, rx_queues;
@@ -900,28 +914,21 @@ static int otx2_get_rxfh(struct net_device *dev,
rss = &pfvf->hw.rss_info;
rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->rss_context)
- rss_context = rxfh->rss_context;
-
if (!indir)
return 0;
- if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ if (!rss->enable) {
rx_queues = pfvf->hw.rx_queues;
for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
return 0;
}
- if (rss_context >= MAX_RSS_GROUPS)
- return -ENOENT;
-
- rss_ctx = rss->rss_ctx[rss_context];
- if (!rss_ctx)
- return -ENOENT;
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss_ctx->ind_tbl[idx];
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore if the rx queue is AF_XDP zero copy enabled */
+ if (test_bit(rss->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+ indir[idx] = rss->ind_tbl[idx];
}
if (rxfh->key)
memcpy(rxfh->key, rss->key, sizeof(rss->key));
@@ -947,14 +954,14 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- /* LBK link is internal and always UP */
- if (is_otx2_lbkvf(pfvf->pdev))
+ /* LBK and SDP links are internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 1;
return pfvf->linfo.link_up;
}
static int otx2_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -962,8 +969,6 @@ static int otx2_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1074,6 +1079,11 @@ static int otx2_set_fecparam(struct net_device *netdev,
rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto end;
+ }
+
if (rsp->fec >= 0)
pfvf->linfo.fec = rsp->fec;
else
@@ -1126,17 +1136,9 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap,
*link_ksettings)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
- const int otx2_sgmii_features[6] = {
- ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- };
/* CGX link modes to Ethtool link mode mapping */
- const int cgx_link_mode[27] = {
- 0, /* SGMII Mode */
+ const int cgx_link_mode[CGX_MODE_MAX] = {
+ 0, /* SGMII 1000baseT */
ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
@@ -1166,14 +1168,19 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap,
};
u8 bit;
- for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
- /* SGMII mode is set */
- if (bit == 0)
- linkmode_set_bit_array(otx2_sgmii_features,
- ARRAY_SIZE(otx2_sgmii_features),
- otx2_link_modes);
- else
+ for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, ARRAY_SIZE(cgx_link_mode)) {
+ if (bit == CGX_MODE_SGMII_10M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, otx2_link_modes);
+ } else if (bit == CGX_MODE_SGMII_100M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, otx2_link_modes);
+ } else if (bit == CGX_MODE_SGMII) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, otx2_link_modes);
+ } else {
linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
+ }
}
if (req_mode == OTX2_MODE_ADVERTISED)
@@ -1214,23 +1221,10 @@ static int otx2_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
- u64 *mode)
-{
- u32 bit_pos;
-
- /* Firmware does not support requesting multiple advertised modes
- * return first set bit
- */
- bit_pos = find_first_bit(cmd->link_modes.advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
- *mode = bit_pos;
-}
-
static int otx2_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct otx2_nic *pf = netdev_priv(netdev);
struct ethtool_link_ksettings cur_ks;
struct cgx_set_link_mode_req *req;
@@ -1267,7 +1261,20 @@ static int otx2_set_link_ksettings(struct net_device *netdev,
*/
req->args.duplex = cmd->base.duplex ^ 0x1;
req->args.an = cmd->base.autoneg;
- otx2_get_advertised_mode(cmd, &req->args.mode);
+ /* Mask unsupported modes and send message to AF */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mask);
+
+ linkmode_copy(req->args.advertising,
+ cmd->link_modes.advertising);
+ linkmode_andnot(req->args.advertising,
+ req->args.advertising, mask);
+
+ /* inform AF that we need parse this differently */
+ if (bitmap_weight(req->args.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) >= 2)
+ req->args.multimode = true;
err = otx2_sync_mbox_msg(&pf->mbox);
end:
@@ -1276,7 +1283,8 @@ end:
}
static void otx2_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_fw_data *rsp;
@@ -1309,12 +1317,12 @@ static void otx2_get_fec_stats(struct net_device *netdev,
}
static const struct ethtool_ops otx2_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
+ .rxfh_max_num_contexts = MAX_RSS_GROUPS,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -1332,6 +1340,11 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_fields = otx2_get_rss_hash_opts,
+ .set_rxfh_fields = otx2_set_rss_hash_opts,
+ .create_rxfh_context = otx2_create_rxfh,
+ .modify_rxfh_context = otx2_modify_rxfh,
+ .remove_rxfh_context = otx2_remove_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -1367,20 +1380,15 @@ static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (stats = 0; stats < otx2_n_dev_stats; stats++) {
- memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < otx2_n_dev_stats; stats++)
+ ethtool_puts(&data, otx2_dev_stats[stats].name);
- for (stats = 0; stats < otx2_n_drv_stats; stats++) {
- memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < otx2_n_drv_stats; stats++)
+ ethtool_puts(&data, otx2_drv_stats[stats].name);
otx2_get_qset_strings(vf, &data, 0);
- strcpy(data, "reset_count");
- data += ETH_GSTRING_LEN;
+ ethtool_puts(&data, "reset_count");
}
static void otx2vf_get_ethtool_stats(struct net_device *netdev,
@@ -1421,7 +1429,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- if (is_otx2_lbkvf(pfvf->pdev)) {
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) {
cmd->base.duplex = DUPLEX_FULL;
cmd->base.speed = SPEED_100000;
} else {
@@ -1431,12 +1439,12 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops otx2vf_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
+ .rxfh_max_num_contexts = MAX_RSS_GROUPS,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
@@ -1450,6 +1458,11 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_fields = otx2_get_rss_hash_opts,
+ .set_rxfh_fields = otx2_set_rss_hash_opts,
+ .create_rxfh_context = otx2_create_rxfh,
+ .modify_rxfh_context = otx2_modify_rxfh,
+ .remove_rxfh_context = otx2_remove_rxfh,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 97a71e9b8563..64c6d9162ef6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -12,8 +12,6 @@
#define OTX2_DEFAULT_ACTION 0x1
-static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
-
struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
@@ -66,11 +64,6 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
return 0;
}
-static int mcam_entry_cmp(const void *a, const void *b)
-{
- return *(u16 *)a - *(u16 *)b;
-}
-
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
@@ -121,6 +114,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ goto exit;
for (ent = 0; ent < rsp->count; ent++)
flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
@@ -161,7 +156,7 @@ exit:
}
EXPORT_SYMBOL(otx2_alloc_mcam_entries);
-static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+int otx2_mcam_entry_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_get_field_status_req *freq;
@@ -172,7 +167,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
int ent, count;
vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
- count = OTX2_MAX_UNICAST_FLOWS +
+ count = flow_cfg->ucast_flt_cnt +
OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
@@ -199,6 +194,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return PTR_ERR(rsp);
+ }
if (rsp->count != req->count) {
netdev_info(pfvf->netdev,
@@ -214,7 +213,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
flow_cfg->vf_vlan_offset = 0;
flow_cfg->unicast_offset = vf_vlan_max_flows;
flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
- OTX2_MAX_UNICAST_FLOWS;
+ flow_cfg->ucast_flt_cnt;
pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
/* Check if NPC_DMAC field is supported
@@ -234,6 +233,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &freq->hdr);
+ if (IS_ERR(frsp)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return PTR_ERR(frsp);
+ }
if (frsp->enable) {
pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
@@ -244,7 +247,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
- count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
@@ -252,8 +255,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+ refcount_set(&flow_cfg->mark_flows, 1);
return 0;
}
+EXPORT_SYMBOL(otx2_mcam_entry_init);
/* TODO : revisit on size */
#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
@@ -301,6 +306,9 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
+ pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
+ pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
+
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
*/
@@ -313,7 +321,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return 0;
pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
- * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+ * pf->flow_cfg->ucast_flt_cnt, GFP_KERNEL);
if (!pf->mac_table)
return -ENOMEM;
@@ -355,7 +363,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
return -ENOMEM;
/* dont have free mcam entries or uc list is greater than alloted */
- if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ if (netdev_uc_count(pf->netdev) > pf->flow_cfg->ucast_flt_cnt)
return -ENOMEM;
mutex_lock(&pf->mbox.lock);
@@ -366,7 +374,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
}
/* unicast offset starts with 32 0..31 for ntuple */
- for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) {
if (pf->mac_table[i].inuse)
continue;
ether_addr_copy(pf->mac_table[i].addr, mac);
@@ -409,7 +417,7 @@ static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
{
int i;
- for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) {
if (!pf->mac_table[i].inuse)
continue;
@@ -1393,6 +1401,7 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
}
pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ flow_cfg->max_flows = 0;
mutex_unlock(&pfvf->mbox.lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 3f46d5e0fb2e..a7feb4c392b3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -26,6 +26,8 @@
#include "cn10k.h"
#include "qos.h"
#include <rvu_trace.h>
+#include "cn10k_ipsec.h"
+#include "otx2_xsk.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -67,7 +69,7 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2_open(netdev);
@@ -204,7 +206,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
/* Register ME interrupt handler*/
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
otx2_pf_me_intr_handler, 0, irq_name, pf);
if (ret) {
@@ -214,7 +217,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
/* Register FLR interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
if (ret) {
@@ -226,7 +230,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
if (numvfs > 64) {
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFME1),
otx2_pf_me_intr_handler, 0, irq_name, pf);
@@ -236,7 +240,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
}
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFFLR1),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
@@ -292,8 +296,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
-static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
- int first, int mdevs, u64 intr)
+void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -450,7 +454,6 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
struct mbox_msghdr *msg = NULL;
int offset, vf_idx, id, err;
struct otx2_mbox_dev *mdev;
- struct mbox_hdr *req_hdr;
struct otx2_mbox *mbox;
struct mbox *vf_mbox;
struct otx2_nic *pf;
@@ -461,9 +464,11 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
mbox = &pf->mbox_pfvf[0].mbox;
mdev = &mbox->dev[vf_idx];
- req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)",
+ vf_mbox->num_msgs);
for (id = 0; id < vf_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
@@ -473,7 +478,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
goto inval_msg;
/* Set VF's number in each of the msg */
- msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc &= ~RVU_PFVF_FUNC_MASK;
msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
offset = msg->next_msgoff;
}
@@ -494,7 +499,6 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
struct otx2_nic *pf = vf_mbox->pfvf;
struct otx2_mbox_dev *mdev;
int offset, id, vf_idx = 0;
- struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
@@ -502,8 +506,10 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
mbox = &pf->mbox_pfvf[0].mbox_up;
mdev = &mbox->dev[vf_idx];
- rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)",
+ vf_mbox->up_num_msgs);
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
@@ -523,6 +529,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
switch (msg->id) {
case MBOX_MSG_CGX_LINK_EVENT:
+ case MBOX_MSG_REP_EVENT_UP_NOTIFY:
break;
default:
if (msg->rc)
@@ -540,7 +547,7 @@ end:
}
}
-static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
int vfs = pf->total_vfs;
@@ -569,6 +576,23 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
return IRQ_HANDLED;
}
+static void *cn20k_pfvf_mbox_alloc(struct otx2_nic *pf, int numvfs)
+{
+ struct qmem *mbox_addr;
+ int err;
+
+ err = qmem_alloc(&pf->pdev->dev, &mbox_addr, numvfs, MBOX_SIZE);
+ if (err) {
+ dev_err(pf->dev, "qmem alloc fail\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ otx2_write64(pf, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
+ pf->pfvf_mbox_addr = mbox_addr;
+
+ return mbox_addr->base;
+}
+
static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
{
void __iomem *hwbase;
@@ -590,20 +614,27 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
- /* On CN10K platform, PF <-> VF mailbox region follows after
- * PF <-> AF mailbox region.
+ /* For CN20K, PF allocates mbox memory in DRAM and writes PF/VF
+ * regions/offsets in RVU_PF_VF_MBOX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
+ * gives the aliased address to access PF/VF mailbox regions.
*/
- if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- MBOX_SIZE;
- else
- base = readq((void __iomem *)((u64)pf->reg_base +
- RVU_PF_VF_BAR4_ADDR));
+ if (is_cn20k(pf->pdev)) {
+ hwbase = (void __iomem *)cn20k_pfvf_mbox_alloc(pf, numvfs);
+ } else {
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq(pf->reg_base + RVU_PF_VF_BAR4_ADDR);
- hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
- if (!hwbase) {
- err = -ENOMEM;
- goto free_wq;
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ if (!hwbase) {
+ err = -ENOMEM;
+ goto free_wq;
+ }
}
mbox = &pf->mbox_pfvf[0];
@@ -627,7 +658,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
return 0;
free_iomem:
- if (hwbase)
+ if (hwbase && !(is_cn20k(pf->pdev)))
iounmap(hwbase);
free_wq:
destroy_workqueue(pf->mbox_pfvf_wq);
@@ -646,8 +677,10 @@ static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
pf->mbox_pfvf_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !is_cn20k(pf->pdev))
iounmap(mbox->mbox.hwbase);
+ else
+ qmem_free(&pf->pdev->dev, pf->pfvf_mbox_addr);
otx2_mbox_destroy(&mbox->mbox);
}
@@ -671,6 +704,9 @@ static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
{
int vector;
+ if (is_cn20k(pf->pdev))
+ return cn20k_disable_pfvf_mbox_intr(pf, numvfs);
+
/* Disable PF <=> VF mailbox IRQ */
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
@@ -692,11 +728,14 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
char *irq_name;
int err;
+ if (is_cn20k(pf->pdev))
+ return cn20k_register_pfvf_mbox_intr(pf, numvfs);
+
/* Register MBOX0 interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
- "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
+ "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pdev, pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
@@ -712,7 +751,8 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
- "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
+ "RVUPF%d_VF Mbox1",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
err = request_irq(pci_irq_vector(pf->pdev,
@@ -820,6 +860,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
+ trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
@@ -836,6 +879,9 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
struct cgx_link_user_info *linfo = &pf->linfo;
struct net_device *netdev = pf->netdev;
+ if (pf->flags & OTX2_FLAG_PORT_UP)
+ return;
+
pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
linfo->link_up ? "UP" : "DOWN", linfo->speed,
linfo->full_duplex ? "Full" : "Half");
@@ -848,6 +894,35 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
+static int otx2_mbox_up_handler_rep_event_up_notify(struct otx2_nic *pf,
+ struct rep_event *info,
+ struct msg_rsp *rsp)
+{
+ struct net_device *netdev = pf->netdev;
+
+ if (info->event == RVU_EVENT_MTU_CHANGE) {
+ netdev->mtu = info->evt_data.mtu;
+ return 0;
+ }
+
+ if (info->event == RVU_EVENT_PORT_STATE) {
+ if (info->evt_data.port_state) {
+ pf->flags |= OTX2_FLAG_PORT_UP;
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ pf->flags &= ~OTX2_FLAG_PORT_UP;
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+ return 0;
+ }
+#ifdef CONFIG_RVU_ESWITCH
+ rvu_event_up_notify(pf, info);
+#endif
+ return 0;
+}
+
int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
struct mcs_intr_info *event,
struct msg_rsp *rsp)
@@ -917,6 +992,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
}
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
+MBOX_UP_REP_MESSAGES
#undef M
break;
default:
@@ -942,6 +1018,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
@@ -962,7 +1041,7 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_mbox_msg_send(mbox, 0);
}
-static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
struct mbox *mw = &pf->mbox;
@@ -991,6 +1070,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)",
+ hdr->num_msgs);
}
if (mbox_data & MBOX_DOWN_MSG) {
@@ -1007,21 +1089,33 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)",
+ hdr->num_msgs);
}
return IRQ_HANDLED;
}
-static void otx2_disable_mbox_intr(struct otx2_nic *pf)
+void otx2_disable_mbox_intr(struct otx2_nic *pf)
{
- int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ int vector;
/* Disable AF => PF mailbox IRQ */
- otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ if (!is_cn20k(pf->pdev)) {
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ } else {
+ vector = pci_irq_vector(pf->pdev,
+ RVU_MBOX_PF_INT_VEC_AFPF_MBOX);
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C,
+ BIT_ULL(0) | BIT_ULL(1));
+ }
free_irq(vector, pf);
}
+EXPORT_SYMBOL(otx2_disable_mbox_intr);
-static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
+int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
{
struct otx2_hw *hw = &pf->hw;
struct msg_req *req;
@@ -1029,10 +1123,24 @@ static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
int err;
/* Register mailbox interrupt handler */
- irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
- err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
- otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
+ if (!is_cn20k(pf->pdev)) {
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
+ err = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ pf->hw_ops->pfaf_mbox_intr_handler,
+ 0, irq_name, pf);
+ } else {
+ irq_name = &hw->irq_name[RVU_MBOX_PF_INT_VEC_AFPF_MBOX *
+ NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
+ err = request_irq(pci_irq_vector
+ (pf->pdev, RVU_MBOX_PF_INT_VEC_AFPF_MBOX),
+ pf->hw_ops->pfaf_mbox_intr_handler,
+ 0, irq_name, pf);
+ }
if (err) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for PFAF mbox irq\n");
@@ -1042,8 +1150,14 @@ static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
/* Enable mailbox interrupt for msgs coming from AF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
- otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(pf->pdev)) {
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0) | BIT_ULL(1));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1));
+ }
if (!probe_af)
return 0;
@@ -1065,7 +1179,7 @@ static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
return 0;
}
-static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
+void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
{
struct mbox *mbox = &pf->mbox;
@@ -1074,14 +1188,15 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
pf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !is_cn20k(pf->pdev))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
otx2_mbox_destroy(&mbox->mbox_up);
}
+EXPORT_SYMBOL(otx2_pfaf_mbox_destroy);
-static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
+int otx2_pfaf_mbox_init(struct otx2_nic *pf)
{
struct mbox *mbox = &pf->mbox;
void __iomem *hwbase;
@@ -1093,12 +1208,20 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
if (!pf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e AF) and this PF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
+ /* For CN20K, AF allocates mbox memory in DRAM and writes PF
+ * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
+ * gives the aliased address to access AF/PF mailbox regions.
*/
- hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
- MBOX_SIZE);
+ if (is_cn20k(pf->pdev))
+ hwbase = pf->reg_base + RVU_PFX_FUNC_PFAF_MBOX +
+ ((u64)BLKADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT);
+ else
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e AF) and this PF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start
+ (pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE);
if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM;
@@ -1150,6 +1273,23 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
return err;
}
+int otx2_reset_mac_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_stats_rst(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
{
struct msg_req *msg;
@@ -1254,8 +1394,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
struct otx2_nic *pf = data;
struct otx2_snd_queue *sq;
- u64 val, *ptr;
- u64 qidx = 0;
+ void __iomem *ptr;
+ u64 val, qidx = 0;
/* CQ */
for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
@@ -1366,7 +1506,7 @@ done:
return IRQ_HANDLED;
}
-static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
+irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
{
struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
@@ -1385,20 +1525,25 @@ static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL(otx2_cq_intr_handler);
-static void otx2_disable_napi(struct otx2_nic *pf)
+void otx2_disable_napi(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
struct otx2_cq_poll *cq_poll;
+ struct work_struct *work;
int qidx;
for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
cq_poll = &qset->napi[qidx];
- cancel_work_sync(&cq_poll->dim.work);
+ work = &cq_poll->dim.work;
+ if (work->func)
+ cancel_work_sync(work);
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
}
}
+EXPORT_SYMBOL(otx2_disable_napi);
static void otx2_free_cq_res(struct otx2_nic *pf)
{
@@ -1430,6 +1575,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
if (!sq->sqe)
continue;
qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->sqe_ring);
+ qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
@@ -1464,7 +1611,7 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
return ALIGN(rbuf_size, 2048);
}
-static int otx2_init_hw_resources(struct otx2_nic *pf)
+int otx2_init_hw_resources(struct otx2_nic *pf)
{
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
@@ -1480,10 +1627,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- /* Maximum hardware supported transmit length */
- pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
-
- pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
+ if (!otx2_rep_dev(pf->pdev)) {
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
+ pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
+ }
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1496,6 +1644,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pf, false);
+
/* Enable backpressure for CGX mapped PF/VFs */
if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
@@ -1536,10 +1687,15 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
}
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- err = otx2_txschq_config(pf, lvl, 0, false);
- if (err) {
- mutex_unlock(&mbox->lock);
- goto err_free_nix_queues;
+ int idx;
+
+ for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) {
+ err = otx2_txschq_config(pf, lvl, idx, false);
+ if (err) {
+ dev_err(pf->dev, "Failed to config TXSCH\n");
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
}
}
@@ -1588,16 +1744,15 @@ exit:
mutex_unlock(&mbox->lock);
return err;
}
+EXPORT_SYMBOL(otx2_init_hw_resources);
-static void otx2_free_hw_resources(struct otx2_nic *pf)
+void otx2_free_hw_resources(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
- struct otx2_pool *pool;
struct msg_req *req;
- int pool_id;
int qidx;
/* Ensure all SQE are processed */
@@ -1611,11 +1766,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_pfc_txschq_stop(pf);
#endif
- otx2_clean_qos_queues(pf);
+ if (!otx2_rep_dev(pf->pdev))
+ otx2_clean_qos_queues(pf);
mutex_lock(&mbox->lock);
/* Disable backpressure */
- if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, false);
mutex_unlock(&mbox->lock);
@@ -1637,17 +1793,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
- for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
- pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
- pool = &pf->qset.pool[pool_id];
- page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
- }
-
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
- cn10k_free_all_ipolicers(pf);
+ if (!otx2_rep_dev(pf->pdev))
+ cn10k_free_all_ipolicers(pf);
mutex_lock(&mbox->lock);
/* Reset NIX LF */
@@ -1675,6 +1825,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
}
mutex_unlock(&mbox->lock);
}
+EXPORT_SYMBOL(otx2_free_hw_resources);
static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
{
@@ -1701,7 +1852,7 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
return;
if ((netdev->flags & IFF_PROMISC) ||
- (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) {
promisc = true;
}
@@ -1757,15 +1908,24 @@ static void otx2_dim_work(struct work_struct *w)
dim->state = DIM_START_MEASURE;
}
-int otx2_open(struct net_device *netdev)
+void otx2_free_queue_mem(struct otx2_qset *qset)
+{
+ kfree(qset->sq);
+ qset->sq = NULL;
+ kfree(qset->cq);
+ qset->cq = NULL;
+ kfree(qset->rq);
+ qset->rq = NULL;
+ kfree(qset->napi);
+ qset->napi = NULL;
+}
+EXPORT_SYMBOL(otx2_free_queue_mem);
+
+int otx2_alloc_queue_mem(struct otx2_nic *pf)
{
- struct otx2_nic *pf = netdev_priv(netdev);
- struct otx2_cq_poll *cq_poll = NULL;
struct otx2_qset *qset = &pf->qset;
- int err = 0, qidx, vec;
- char *irq_name;
+ struct otx2_cq_poll *cq_poll;
- netif_carrier_off(netdev);
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
@@ -1785,7 +1945,6 @@ int otx2_open(struct net_device *netdev)
/* CQ size of SQ */
qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
- err = -ENOMEM;
qset->cq = kcalloc(pf->qset.cq_cnt,
sizeof(struct otx2_cq_queue), GFP_KERNEL);
if (!qset->cq)
@@ -1801,6 +1960,28 @@ int otx2_open(struct net_device *netdev)
if (!qset->rq)
goto err_free_mem;
+ return 0;
+
+err_free_mem:
+ otx2_free_queue_mem(qset);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(otx2_alloc_queue_mem);
+
+int otx2_open(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ netif_carrier_off(netdev);
+
+ err = otx2_alloc_queue_mem(pf);
+ if (err)
+ return err;
+
err = otx2_init_hw_resources(pf);
if (err)
goto err_free_mem;
@@ -1862,7 +2043,7 @@ int otx2_open(struct net_device *netdev)
if (err) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for QERR\n",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
goto err_disable_napi;
}
@@ -1873,9 +2054,17 @@ int otx2_open(struct net_device *netdev)
vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+ int name_len;
- snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
- qidx);
+ name_len = snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d",
+ pf->netdev->name, qidx);
+ if (name_len >= NAME_SIZE) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n",
+ rvu_get_pf(pf->pdev, pf->pcifunc), qidx);
+ err = -EINVAL;
+ goto err_free_cints;
+ }
err = request_irq(pci_irq_vector(pf->pdev, vec),
otx2_cq_intr_handler, 0, irq_name,
@@ -1883,7 +2072,7 @@ int otx2_open(struct net_device *netdev)
if (err) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for CQ%d\n",
- rvu_get_pf(pf->pcifunc), qidx);
+ rvu_get_pf(pf->pdev, pf->pcifunc), qidx);
goto err_free_cints;
}
vec++;
@@ -1911,6 +2100,7 @@ int otx2_open(struct net_device *netdev)
}
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
+ pf->flags &= ~OTX2_FLAG_PORT_UP;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -1958,10 +2148,7 @@ err_disable_napi:
otx2_disable_napi(pf);
otx2_free_hw_resources(pf);
err_free_mem:
- kfree(qset->sq);
- kfree(qset->cq);
- kfree(qset->rq);
- kfree(qset->napi);
+ otx2_free_queue_mem(qset);
return err;
}
EXPORT_SYMBOL(otx2_open);
@@ -1971,7 +2158,6 @@ int otx2_stop(struct net_device *netdev)
struct otx2_nic *pf = netdev_priv(netdev);
struct otx2_cq_poll *cq_poll = NULL;
struct otx2_qset *qset = &pf->qset;
- struct otx2_rss_info *rss;
int qidx, vec, wrk;
/* If the DOWN flag is set resources are already freed */
@@ -1989,10 +2175,7 @@ int otx2_stop(struct net_device *netdev)
otx2_rxtx_enable(pf, false);
/* Clear RSS enable flag */
- rss = &pf->hw.rss_info;
- rss->enable = false;
- if (!netif_is_rxfh_configured(netdev))
- kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+ pf->hw.rss_info.enable = false;
/* Cleanup Queue IRQ */
vec = pci_irq_vector(pf->pdev,
@@ -2026,11 +2209,7 @@ int otx2_stop(struct net_device *netdev)
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
-
- kfree(qset->sq);
- kfree(qset->cq);
- kfree(qset->rq);
- kfree(qset->napi);
+ otx2_free_queue_mem(qset);
/* Do not clear RQ/SQ ringsize settings */
memset_startat(qset, 0, sqe_cnt);
return 0;
@@ -2041,6 +2220,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
int sq_idx;
@@ -2053,6 +2233,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
(!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ dev_stats = &pf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -2060,7 +2242,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
sq = &pf->qset.sq[sq_idx];
txq = netdev_get_tx_queue(netdev, qidx);
- if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@@ -2177,6 +2359,10 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
+ if (changed & NETIF_F_HW_ESP)
+ return cn10k_ipsec_ethtool_init(netdev,
+ features & NETIF_F_HW_ESP);
+
return otx2_handle_ntuple_tc_features(netdev, features);
}
@@ -2259,18 +2445,26 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
return 0;
}
-int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+int otx2_config_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ *config = pfvf->tstamp;
+ return 0;
+}
+EXPORT_SYMBOL(otx2_config_hwtstamp_get);
+
+int otx2_config_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- struct hwtstamp_config config;
if (!pfvf->ptp)
return -ENODEV;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
@@ -2279,8 +2473,11 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
otx2_config_hw_tx_tstamp(pfvf, false);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step time stamping is not supported");
return -ERANGE;
+ }
pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
schedule_delayed_work(&pfvf->ptp->synctstamp_work,
msecs_to_jiffies(500));
@@ -2292,7 +2489,7 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
otx2_config_hw_rx_tstamp(pfvf, false);
break;
@@ -2311,35 +2508,17 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
otx2_config_hw_rx_tstamp(pfvf, true);
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- memcpy(&pfvf->tstamp, &config, sizeof(config));
-
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(otx2_config_hwtstamp);
+ pfvf->tstamp = *config;
-int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
-{
- struct otx2_nic *pfvf = netdev_priv(netdev);
- struct hwtstamp_config *cfg = &pfvf->tstamp;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return otx2_config_hwtstamp(netdev, req);
- case SIOCGHWTSTAMP:
- return copy_to_user(req->ifr_data, cfg,
- sizeof(*cfg)) ? -EFAULT : 0;
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
-EXPORT_SYMBOL(otx2_ioctl);
+EXPORT_SYMBOL(otx2_config_hwtstamp_set);
static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
{
@@ -2585,7 +2764,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
int qidx)
{
- struct page *page;
u64 dma_addr;
int err = 0;
@@ -2595,11 +2773,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;
- err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len,
+ qidx, OTX2_XDP_REDIRECT);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
- page = virt_to_page(xdpf->data);
- put_page(page);
+ xdp_return_frame(xdpf);
return -ENOMEM;
}
return 0;
@@ -2683,6 +2861,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return otx2_xdp_setup(pf, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2755,17 +2935,19 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
- .ndo_eth_ioctl = otx2_ioctl,
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_bpf = otx2_xdp,
+ .ndo_xsk_wakeup = otx2_xsk_wakeup,
.ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
+ .ndo_hwtstamp_get = otx2_config_hwtstamp_get,
+ .ndo_hwtstamp_set = otx2_config_hwtstamp_set,
};
-static int otx2_wq_init(struct otx2_nic *pf)
+int otx2_wq_init(struct otx2_nic *pf)
{
pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
if (!pf->otx2_wq)
@@ -2776,7 +2958,7 @@ static int otx2_wq_init(struct otx2_nic *pf)
return 0;
}
-static int otx2_check_pf_usable(struct otx2_nic *nic)
+int otx2_check_pf_usable(struct otx2_nic *nic)
{
u64 rev;
@@ -2794,7 +2976,7 @@ static int otx2_check_pf_usable(struct otx2_nic *nic)
return 0;
}
-static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
+int otx2_realloc_msix_vectors(struct otx2_nic *pf)
{
struct otx2_hw *hw = &pf->hw;
int num_vec, err;
@@ -2816,6 +2998,7 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return otx2_register_mbox_intr(pf, false);
}
+EXPORT_SYMBOL(otx2_realloc_msix_vectors);
static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
{
@@ -2851,6 +3034,98 @@ static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
}
}
+int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
+{
+ struct device *dev = &pdev->dev;
+ struct otx2_hw *hw = &pf->hw;
+ int num_vec, err;
+
+ num_vec = pci_msix_vec_count(pdev);
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name)
+ return -ENOMEM;
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask)
+ return -ENOMEM;
+
+ /* Map CSRs */
+ pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!pf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ return -ENOMEM;
+ }
+
+ err = otx2_check_pf_usable(pf);
+ if (err)
+ return err;
+
+ if (!is_cn20k(pf->pdev))
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ else
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_MBOX_PF_INT_VEC_CNT,
+ RVU_MBOX_PF_INT_VEC_CNT,
+ PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ otx2_setup_dev_hw_settings(pf);
+
+ if (is_cn20k(pf->pdev))
+ cn20k_init(pf);
+ else
+ otx2_init_hw_ops(pf);
+
+ /* Init PF <=> AF mailbox stuff */
+ err = otx2_pfaf_mbox_init(pf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2_register_mbox_intr(pf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and NIX LFs to this PF.
+ * NIX and NPA LFs are needed for this PF to function as a NIC.
+ */
+ err = otx2_attach_npa_nix(pf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2_realloc_msix_vectors(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = cn10k_lmtst_init(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ return 0;
+
+err_detach_rsrc:
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
+ otx2_detach_resources(&pf->mbox);
+err_disable_mbox_intr:
+ otx2_disable_mbox_intr(pf);
+err_mbox_destroy:
+ otx2_pfaf_mbox_destroy(pf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+
+ return err;
+}
+EXPORT_SYMBOL(otx2_init_rsrc);
+
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -2858,7 +3133,6 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct net_device *netdev;
struct otx2_nic *pf;
struct otx2_hw *hw;
- int num_vec;
err = pcim_enable_device(pdev);
if (err) {
@@ -2866,7 +3140,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -2875,7 +3149,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -2885,10 +3159,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -2909,72 +3181,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Use CQE of 128 byte descriptor size by default */
hw->xqe_size = 128;
- num_vec = pci_msix_vec_count(pdev);
- hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
- GFP_KERNEL);
- if (!hw->irq_name) {
- err = -ENOMEM;
- goto err_free_netdev;
- }
-
- hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
- sizeof(cpumask_var_t), GFP_KERNEL);
- if (!hw->affinity_mask) {
- err = -ENOMEM;
- goto err_free_netdev;
- }
-
- /* Map CSRs */
- pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
- if (!pf->reg_base) {
- dev_err(dev, "Unable to map physical function CSRs, aborting\n");
- err = -ENOMEM;
- goto err_free_netdev;
- }
-
- err = otx2_check_pf_usable(pf);
+ err = otx2_init_rsrc(pdev, pf);
if (err)
goto err_free_netdev;
- err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
- RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
- if (err < 0) {
- dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
- __func__, num_vec);
- goto err_free_netdev;
- }
-
- otx2_setup_dev_hw_settings(pf);
-
- /* Init PF <=> AF mailbox stuff */
- err = otx2_pfaf_mbox_init(pf);
- if (err)
- goto err_free_irq_vectors;
-
- /* Register mailbox interrupt */
- err = otx2_register_mbox_intr(pf, true);
- if (err)
- goto err_mbox_destroy;
-
- /* Request AF to attach NPA and NIX LFs to this PF.
- * NIX and NPA LFs are needed for this PF to function as a NIC.
- */
- err = otx2_attach_npa_nix(pf);
- if (err)
- goto err_disable_mbox_intr;
-
- err = otx2_realloc_msix_vectors(pf);
- if (err)
- goto err_detach_rsrc;
-
err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
if (err)
goto err_detach_rsrc;
- err = cn10k_lmtst_init(pf);
- if (err)
- goto err_detach_rsrc;
-
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -3004,6 +3218,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ otx2_set_hw_capabilities(pf);
+
err = cn10k_mcs_init(pf);
if (err)
goto err_del_mcam_entries;
@@ -3037,11 +3253,19 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
+ hw->max_mtu = netdev->max_mtu;
+
+ /* reset CGX/RPM MAC stats */
+ otx2_reset_mac_stats(pf);
+
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_mcs_free;
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_mcs_free;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(pf);
@@ -3066,22 +3290,36 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!pf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_sriov_cleannup;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_pf_sriov_init;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(pf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(pf->af_xdp_zc_qidx);
+#endif
+err_sriov_cleannup:
+ otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(pf);
err_mcs_free:
cn10k_mcs_free(pf);
err_del_mcam_entries:
@@ -3094,17 +3332,12 @@ err_detach_rsrc:
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
-err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
-err_mbox_destroy:
otx2_pfaf_mbox_destroy(pf);
-err_free_irq_vectors:
pci_free_irq_vectors(hw->pdev);
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -3146,6 +3379,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req = (struct cgx_link_info_msg *)msghdr;
req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = pf->pcifunc;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
@@ -3221,6 +3455,29 @@ static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
return otx2_sriov_enable(pdev, numvfs);
}
+static void otx2_ndc_sync(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ struct ndc_sync_op *req;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+
+ req->nix_lf_tx_sync = 1;
+ req->nix_lf_rx_sync = 1;
+ req->npa_lf_sync = 1;
+
+ if (!otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "NDC sync operation failed\n");
+
+ mutex_unlock(&mbox->lock);
+}
+
static void otx2_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3259,6 +3516,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_ipsec_clean(pf);
cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
@@ -3269,6 +3527,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_shutdown_qos(pf);
+ otx2_ndc_sync(pf);
otx2_detach_resources(&pf->mbox);
if (pf->hw.lmt_info)
free_percpu(pf->hw.lmt_info);
@@ -3277,10 +3536,9 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
+ bitmap_free(pf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2_pf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 63130ba37e9d..dedd586ed310 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -193,7 +193,7 @@ static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static u64 ptp_cc_read(struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
@@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 45a32e4b49d1..1cd576fd09c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -44,6 +44,17 @@
#define RVU_PF_VF_MBOX_ADDR (0xC40)
#define RVU_PF_LMTLINE_ADDR (0xC48)
+#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3)
+
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
#define RVU_VF_VFPF_MBOX1 (0x00008)
@@ -58,6 +69,11 @@
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
#define RVU_VF_MBOX_REGION (0xC0000)
+/* CN20K RVU_MBOX_E: RVU PF/VF MBOX Address Range Enumeration */
+#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_PFX_FUNC_PFAF_MBOX (0x80000)
+#define RVU_PFX_FUNCX_VFAF_MBOX (0x40000)
+
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
@@ -138,38 +154,12 @@
#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12)
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
-/* NIX AF transmit scheduler registers */
-#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
-#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
-#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
-#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
-#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
-#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
-#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
-#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
-#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
-#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
-
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+/* CN20K registers */
+#define RVU_PF_DISC (0x0)
+
#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index f4655a8c0705..26a08d2cfbb1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -443,6 +443,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
+ struct rep_dev *rdev;
u32 burst, mark = 0;
u8 nr_police = 0;
u8 num_intf = 1;
@@ -464,14 +465,19 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return 0;
case FLOW_ACTION_REDIRECT_INGRESS:
target = act->dev;
- priv = netdev_priv(target);
- /* npc_install_flow_req doesn't support passing a target pcifunc */
- if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't redirect to other pf/vf");
- return -EOPNOTSUPP;
+ if (target->dev.parent) {
+ priv = netdev_priv(target);
+ if (rvu_get_pf(nic->pdev, nic->pcifunc) !=
+ rvu_get_pf(nic->pdev, priv->pcifunc)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
+ return -EOPNOTSUPP;
+ }
+ req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
+ } else {
+ rdev = netdev_priv(target);
+ req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK;
}
- req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
/* if op is already set; avoid overwriting the same */
if (!req->op)
@@ -511,7 +517,15 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
nr_police++;
break;
case FLOW_ACTION_MARK:
+ if (act->mark & ~OTX2_RX_MATCH_ID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported");
+ return -EOPNOTSUPP;
+ }
mark = act->mark;
+ req->match_id = mark & OTX2_RX_MATCH_ID_MASK;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ nic->flags |= OTX2_FLAG_TC_MARK_ENABLED;
+ refcount_inc(&nic->flow_cfg->mark_flows);
break;
case FLOW_ACTION_RX_QUEUE_MAPPING:
@@ -692,10 +706,6 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
u32 val;
flow_rule_match_control(rule, &match);
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
- NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
- return -EOPNOTSUPP;
- }
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
@@ -713,6 +723,10 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
return -EOPNOTSUPP;
}
}
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
@@ -1187,6 +1201,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
return -EINVAL;
}
+ /* Disable TC MARK flag if they are no rules with skbedit mark action */
+ if (flow_node->req.match_id)
+ if (!refcount_dec_and_test(&flow_cfg->mark_flows))
+ nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED;
+
if (flow_node->is_act_police) {
__clear_bit(flow_node->rq, &nic->rq_bmap);
@@ -1287,6 +1306,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
req->channel = nic->hw.rx_chan_base;
req->entry = flow_cfg->flow_ent[mcam_idx];
req->intf = NIX_INTF_RX;
+ req->vf = nic->pcifunc;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -1306,7 +1326,6 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
free_leaf:
otx2_tc_del_from_flow_list(flow_cfg, new_node);
- kfree_rcu(new_node, rcu);
if (new_node->is_act_police) {
mutex_lock(&nic->mbox.lock);
@@ -1326,6 +1345,7 @@ free_leaf:
mutex_unlock(&nic->mbox.lock);
}
+ kfree_rcu(new_node, rcu);
return rc;
}
@@ -1387,8 +1407,8 @@ static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
return 0;
}
-static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
- struct flow_cls_offload *cls_flower)
+int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
@@ -1401,6 +1421,7 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_setup_tc_cls_flower);
static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f828d32737af..625bb5a05344 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,8 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
+#include <net/xdp.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -18,6 +20,7 @@
#include "otx2_txrx.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
#define PTP_PORT 0x13F
@@ -26,11 +29,30 @@
*/
#define PTP_SYNC_SEC_OFFSET 34
+DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+static int otx2_get_free_sqe(struct otx2_snd_queue *sq)
+{
+ return (sq->cons_head - sq->head - 1 + sq->sqe_cnt)
+ & (sq->sqe_cnt - 1);
+}
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush);
+ u32 *metasize, bool *need_xdp_flush);
+
+static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
+ struct sk_buff *skb)
+{
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
+ (sq->head * (sq->sqe_size * 2));
+ else
+ sq->sqe_base = sq->sqe->base;
+}
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
@@ -80,54 +102,24 @@ static unsigned int frag_num(unsigned int i)
#endif
}
-static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, int seg, int *len)
-{
- const skb_frag_t *frag;
- struct page *page;
- int offset;
-
- /* First segment is always skb->data */
- if (!seg) {
- page = virt_to_page(skb->data);
- offset = offset_in_page(skb->data);
- *len = skb_headlen(skb);
- } else {
- frag = &skb_shinfo(skb)->frags[seg - 1];
- page = skb_frag_page(frag);
- offset = skb_frag_off(frag);
- *len = skb_frag_size(frag);
- }
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
-}
-
-static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
-{
- int seg;
-
- for (seg = 0; seg < sg->num_segs; seg++) {
- otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE);
- }
- sg->num_segs = 0;
-}
-
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
- struct nix_cqe_tx_s *cqe)
+ struct nix_cqe_tx_s *cqe,
+ int *xsk_frames)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sg_list *sg;
- struct page *page;
- u64 pa;
sg = &sq->sg[snd_comp->sqe_id];
+ if (sg->flags & OTX2_AF_XDP_FRAME) {
+ (*xsk_frames)++;
+ return;
+ }
- pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
- otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
- sg->size[0], DMA_TO_DEVICE);
- page = virt_to_page(phys_to_virt(pa));
- put_page(page);
+ if (sg->flags & OTX2_XDP_REDIRECT)
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
+ xdp_return_frame((struct xdp_frame *)sg->skb);
+ sg->skb = (u64)NULL;
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
@@ -343,18 +335,24 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ u64 *word = (u64 *)parse;
void *end, *start;
+ u32 metasize = 0;
u64 *seg_addr;
u16 *seg_size;
int seg;
if (unlikely(parse->errlev || parse->errcode)) {
- if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) {
+ trace_otx2_parse_dump(pfvf->pdev, "Err:", word);
return;
+ }
}
+ trace_otx2_parse_dump(pfvf->pdev, "", word);
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq,
+ &metasize, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@@ -376,11 +374,18 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
otx2_set_rxhash(pfvf, cqe, skb);
- skb_record_rx_queue(skb, cq->cq_idx);
- if (pfvf->netdev->features & NETIF_F_RXCSUM)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) {
+ skb_record_rx_queue(skb, cq->cq_idx);
+ if (pfvf->netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED)
+ skb->mark = parse->match_id;
skb_mark_for_recycle(skb);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
napi_gro_frags(napi);
}
@@ -444,23 +449,42 @@ int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
return cnt - cq->pool_ptrs;
}
+static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool,
+ int *xsk_frames, int qidx, int budget)
+{
+ if (*xsk_frames)
+ xsk_tx_completed(xsk_pool, *xsk_frames);
+
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget);
+}
+
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
int tx_pkts = 0, tx_bytes = 0, qidx;
struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
+ struct net_device *ndev;
int processed_cqe = 0;
+ int xsk_frames = 0;
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
if (cq->pend_cqe >= budget)
goto process_cqe;
- if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) {
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames,
+ qidx, budget);
return 0;
+ }
process_cqe:
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- sq = &pfvf->qset.sq[qidx];
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
@@ -470,10 +494,8 @@ process_cqe:
break;
}
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
-
if (cq->cq_type == CQ_XDP)
- otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe, &xsk_frames);
else
otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
cqe, budget, &tx_pkts, &tx_bytes);
@@ -490,6 +512,13 @@ process_cqe:
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
+#if IS_ENABLED(CONFIG_RVU_ESWITCH)
+ if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
+ ndev = pfvf->reps[qidx]->netdev;
+ else
+#endif
+ ndev = pfvf->netdev;
+
if (likely(tx_pkts)) {
struct netdev_queue *txq;
@@ -497,20 +526,26 @@ process_cqe:
if (qidx >= pfvf->hw.tx_queues)
qidx -= pfvf->hw.xdp_queues;
- txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
+ qidx = 0;
+ txq = netdev_get_tx_queue(ndev, qidx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
/* Check if queue was stopped earlier due to ring full */
smp_mb();
if (netif_tx_queue_stopped(txq) &&
- netif_carrier_ok(pfvf->netdev))
+ netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
+
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget);
+
return 0;
}
static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = { 0 };
u64 rx_frames, rx_bytes;
u64 tx_frames, tx_bytes;
@@ -524,15 +559,16 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
rx_frames + tx_frames,
rx_bytes + tx_bytes,
&dim_sample);
- net_dim(&cq_poll->dim, dim_sample);
+ net_dim(&cq_poll->dim, &dim_sample);
}
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
+ struct otx2_cq_queue *cq = NULL;
+ struct otx2_pool *pool = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
- struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
@@ -557,6 +593,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (rx_cq && rx_cq->pool_ptrs)
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -569,20 +606,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ if (likely(cq))
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- work->napi = napi;
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
+ if (likely(cq)) {
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ /* Call wake-up for not able to fill buffers */
+ if (pool->xsk_pool)
+ xsk_set_rx_need_wakeup(pool->xsk_pool);
}
} else {
+ /* Clear wake-up, since buffers are filled successfully */
+ if (pool && pool->xsk_pool)
+ xsk_clear_rx_need_wakeup(pool->xsk_pool);
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
@@ -591,6 +639,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
return workdone;
}
+EXPORT_SYMBOL(otx2_napi_handler);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx)
@@ -609,7 +658,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
-#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@@ -684,7 +732,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
__be16 l3_proto = vlan_get_protocol(skb);
struct udphdr *udph = udp_hdr(skb);
- u16 iplen;
+ __be16 iplen;
ext->lso_sb = skb_transport_offset(skb) +
sizeof(struct udphdr);
@@ -1138,18 +1186,19 @@ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
}
}
-bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
- struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
- struct otx2_nic *pfvf = netdev_priv(netdev);
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_nic *pfvf = dev;
+ bool ret;
/* Check if there is enough room between producer
* and consumer index.
*/
- free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ free_desc = otx2_get_free_sqe(sq);
if (free_desc < sq->sqe_thresh)
return false;
@@ -1161,6 +1210,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
+
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@@ -1171,12 +1221,18 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ return true;
+ }
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
+ /* Set sqe base address */
+ otx2_sq_set_sqe_base(sq, skb);
+
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@@ -1189,7 +1245,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
- if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
+ else
+ ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
+
+ if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@@ -1198,11 +1260,15 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
sqe_hdr->sizem1 = (offset / 16) - 1;
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
+ offset);
+
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
-
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
@@ -1215,15 +1281,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
u16 pool_id;
u64 iova;
- if (pfvf->xdp_prog)
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ if (pfvf->xdp_prog) {
+ if (pool->page_pool)
+ xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq);
+
xdp_rxq_info_unreg(&cq->xdp_rxq);
+ }
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
- pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
- pool = &pfvf->qset.pool[pool_id];
-
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
@@ -1344,8 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
@@ -1362,16 +1432,34 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
sq->sg[sq->head].dma_addr[0] = dma_addr;
sq->sg[sq->head].size[0] = len;
sq->sg[sq->head].num_segs = 1;
+ sq->sg[sq->head].flags = flags;
+ sq->sg[sq->head].skb = (u64)xdpf;
+}
+
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_snd_queue *sq;
+ int free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = otx2_get_free_sqe(sq);
+ if (free_sqe < sq->sqe_thresh) {
+ netdev_warn(pfvf->netdev, "No free sqe for Send queue%d\n", qidx);
+ return 0;
+ }
+
+ return free_sqe - sq->sqe_thresh;
}
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags)
{
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_snd_queue *sq;
int offset, free_sqe;
sq = &pfvf->qset.sq[qidx];
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_sqe = otx2_get_free_sqe(sq);
if (free_sqe < sq->sqe_thresh)
return false;
@@ -1390,7 +1478,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
offset = sizeof(*sqe_hdr);
- otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags);
sqe_hdr->sizem1 = (offset / 16) - 1;
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
@@ -1401,16 +1489,30 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush)
+ u32 *metasize, bool *need_xdp_flush)
{
+ struct xdp_buff xdp, *xsk_buff = NULL;
unsigned char *hard_start;
+ struct otx2_pool *pool;
+ struct xdp_frame *xdpf;
int qidx = cq->cq_idx;
- struct xdp_buff xdp;
struct page *page;
u64 iova, pa;
u32 act;
int err;
+ pool = &pfvf->qset.pool[qidx];
+
+ if (pool->xsk_pool) {
+ xsk_buff = pool->xdp[--cq->rbpool->xdp_top];
+ if (!xsk_buff)
+ return false;
+
+ xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size;
+ act = bpf_prog_run_xdp(prog, xsk_buff);
+ goto handle_xdp_verdict;
+ }
+
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_page(phys_to_virt(pa));
@@ -1419,41 +1521,63 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
hard_start = (unsigned char *)phys_to_virt(pa);
xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
- cqe->sg.seg_size, false);
+ cqe->sg.seg_size, true);
act = bpf_prog_run_xdp(prog, &xdp);
+handle_xdp_verdict:
switch (act) {
case XDP_PASS:
+ *metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
qidx += pfvf->hw.tx_queues;
cq->pool_ptrs++;
- return otx2_xdp_sq_append_pkt(pfvf, iova,
- cqe->sg.seg_size, qidx);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ return otx2_xdp_sq_append_pkt(pfvf, xdpf,
+ cqe->sg.seg_addr,
+ cqe->sg.seg_size,
+ qidx, OTX2_XDP_TX);
case XDP_REDIRECT:
cq->pool_ptrs++;
- err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+ if (xsk_buff) {
+ err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog);
+ if (!err) {
+ *need_xdp_flush = true;
+ return true;
+ }
+ return false;
+ }
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
if (!err) {
*need_xdp_flush = true;
return true;
}
- put_page(page);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ xdp_return_frame(xdpf);
break;
default:
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
- break;
+ fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(pfvf->netdev, prog, act);
- break;
+ if (act == XDP_ABORTED)
+ trace_xdp_exception(pfvf->netdev, prog, act);
+ fallthrough;
case XDP_DROP:
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
- put_page(page);
cq->pool_ptrs++;
+ if (xsk_buff) {
+ xsk_buff_free(xsk_buff);
+ } else if (pp_page_to_nmdesc(page)->pp) {
+ page_pool_recycle_direct(pool->page_pool, page);
+ } else {
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
return true;
}
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index a82ffca8ce1b..acf259d72008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -12,6 +12,7 @@
#include <linux/iommu.h>
#include <linux/if_vlan.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -62,6 +63,9 @@
#define CQ_OP_STAT_OP_ERR 63
#define CQ_OP_STAT_CQ_ERR 46
+/* Packet mark mask */
+#define OTX2_RX_MATCH_ID_MASK 0x0000ffff
+
struct queue_stats {
u64 bytes;
u64 pkts;
@@ -73,6 +77,7 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
+ u16 flags;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
@@ -98,6 +103,11 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
+ /* SQE ring and CPT response queue for Inline IPSEC */
+ struct qmem *sqe_ring;
+ struct qmem *cpt_resp;
+ /* Buffer pool for af_xdp zero-copy */
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_aligned_in_smp;
enum cq_type {
@@ -121,7 +131,11 @@ struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
struct page_pool *page_pool;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp;
+ u16 xdp_cnt;
u16 rbsize;
+ u16 xdp_top;
};
struct otx2_cq_queue {
@@ -138,6 +152,7 @@ struct otx2_cq_queue {
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ bool xsk_zc_en;
struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
@@ -164,7 +179,8 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
}
int otx2_napi_handler(struct napi_struct *napi, int budget);
-bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index cf0aa16d7540..f4fdbfba8667 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@@ -21,6 +22,7 @@
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_REP) },
{ }
};
@@ -134,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
+ rsp->hdr.pcifunc = req->pcifunc;
rsp->hdr.rc = 0;
err = otx2_mbox_up_handler_cgx_link_event(
vf, (struct cgx_link_info_msg *)req, rsp);
@@ -238,6 +240,10 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
/* Disable VF => PF mailbox IRQ */
otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+
+ if (is_cn20k(vf->pdev))
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0) | BIT_ULL(1));
+
free_irq(vector, vf);
}
@@ -250,9 +256,18 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Register mailbox interrupt handler */
irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
- err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
- otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ snprintf(irq_name, NAME_SIZE, "RVUVF%d AFVF Mbox", ((vf->pcifunc &
+ RVU_PFVF_FUNC_MASK) - 1));
+
+ if (!is_cn20k(vf->pdev)) {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ } else {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ vf->hw_ops->vfaf_mbox_intr_handler, 0, irq_name,
+ vf);
+ }
+
if (err) {
dev_err(vf->dev,
"RVUPF: IRQ registration failed for VFAF mbox irq\n");
@@ -262,8 +277,15 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Enable mailbox interrupt for msgs coming from PF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
- otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(vf->pdev)) {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0) | BIT_ULL(1) |
+ BIT_ULL(2) | BIT_ULL(3));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1) | BIT_ULL(2) | BIT_ULL(3));
+ }
if (!probe_pf)
return 0;
@@ -313,7 +335,13 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
if (!vf->mbox_wq)
return -ENOMEM;
- if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn20k platform, VF mailbox region is in dram aliased from AF
+ * VF MBOX ADDR, MBOX is a separate RVU block.
+ */
+ if (is_cn20k(vf->pdev)) {
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION + ((u64)BLKADDR_MBOX <<
+ RVU_FUNC_BLKADDR_SHIFT);
+ } else if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
/* For cn10k platform, VF mailbox region is in its BAR2
* register space
*/
@@ -371,7 +399,7 @@ static int otx2vf_open(struct net_device *netdev)
/* LBKs do not receive link events so tell everyone we are up here */
vf = netdev_priv(netdev);
- if (is_otx2_lbkvf(vf->pdev)) {
+ if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev)) {
pr_info("%s NIC Link is UP\n", netdev->name);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@@ -389,13 +417,23 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *vf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > vf->tx_max_pktlen)) {
+ dev_stats = &vf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
- if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@@ -456,7 +494,7 @@ static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2vf_open(netdev);
@@ -496,11 +534,12 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
- .ndo_eth_ioctl = otx2_ioctl,
.ndo_setup_tc = otx2_setup_tc,
+ .ndo_hwtstamp_get = otx2_config_hwtstamp_get,
+ .ndo_hwtstamp_set = otx2_config_hwtstamp_set,
};
-static int otx2_wq_init(struct otx2_nic *vf)
+static int otx2_vf_wq_init(struct otx2_nic *vf)
{
vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
if (!vf->otx2_wq)
@@ -546,7 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -555,7 +594,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -563,10 +602,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qcount = num_online_cpus();
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -616,6 +653,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
otx2_setup_dev_hw_settings(vf);
+
+ if (is_cn20k(vf->pdev))
+ cn20k_init(vf);
+ else
+ otx2_init_hw_ops(vf);
+
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
@@ -671,6 +714,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(vf);
+ hw->max_mtu = netdev->max_mtu;
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
@@ -682,13 +726,26 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
+ if (is_otx2_sdp_rep(vf->pdev)) {
+ int n;
+
+ n = vf->pcifunc & RVU_PFVF_FUNC_MASK;
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d",
+ pdev->bus->number, n);
+ }
+
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_ipsec_clean;
}
- err = otx2_wq_init(vf);
+ err = otx2_vf_wq_init(vf);
if (err)
goto err_unreg_netdev;
@@ -706,19 +763,36 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_shutdown_tc;
+ vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!vf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_unreg_devlink;
+ }
+
#ifdef CONFIG_DCB
- err = otx2_dcbnl_set_ops(netdev);
- if (err)
- goto err_shutdown_tc;
+ /* Priority flow control is not supported for LBK and SDP vf(s) */
+ if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) {
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_free_zc_bmap;
+ }
#endif
otx2_qos_init(vf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(vf->af_xdp_zc_qidx);
+#endif
+err_unreg_devlink:
+ otx2_unregister_dl(vf);
err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@@ -735,8 +809,6 @@ err_free_irq_vectors:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -771,6 +843,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
@@ -782,10 +855,9 @@ static void otx2vf_remove(struct pci_dev *pdev)
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
+ bitmap_free(vf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2vf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
new file mode 100644
index 000000000000..7d67b4cbaf71
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bpf_trace.h>
+#include <linux/stringify.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "otx2_xsk.h"
+
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx)
+{
+ struct xdp_buff *xdp;
+ int delta;
+
+ xdp = xsk_buff_alloc(pool->xsk_pool);
+ if (!xdp)
+ return -ENOMEM;
+
+ pool->xdp[pool->xdp_top++] = xdp;
+ *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
+ /* Adjust xdp->data for unaligned addresses */
+ delta = *dma - xsk_buff_xdp_get_dma(xdp);
+ xdp->data += delta;
+
+ return 0;
+}
+
+static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
+ struct npa_aq_enq_req *aura_aq;
+ struct npa_aq_enq_req *pool_aq;
+ struct nix_aq_enq_req *rq_aq;
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_rq_aq)
+ return -ENOMEM;
+ cn10k_rq_aq->qidx = qidx;
+ cn10k_rq_aq->rq.ena = 0;
+ cn10k_rq_aq->rq_mask.ena = 1;
+ cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ } else {
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+ rq_aq->qidx = qidx;
+ rq_aq->sq.ena = 0;
+ rq_aq->sq_mask.ena = 1;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ }
+
+ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aura_aq)
+ goto fail;
+
+ aura_aq->aura_id = aura_id;
+ aura_aq->aura.ena = 0;
+ aura_aq->aura_mask.ena = 1;
+ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
+ aura_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!pool_aq)
+ goto fail;
+
+ pool_aq->aura_id = aura_id;
+ pool_aq->pool.ena = 0;
+ pool_aq->pool_mask.ena = 1;
+
+ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
+ pool_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+}
+
+static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
+ u64 iova;
+
+ /* If the DOWN flag is set SQs are already freed */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return;
+
+ cq = &qset->cq[qidx];
+ if (cq)
+ otx2_cleanup_rx_cqes(pfvf, cq, qidx);
+
+ pool = &pfvf->qset.pool[qidx];
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ while (iova) {
+ iova -= OTX2_HEAD_ROOM;
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ otx2_xsk_ctx_disable(pfvf, qidx, qidx);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ u16 rx_queues = pf->hw.rx_queues;
+ u16 tx_queues = pf->hw.tx_queues;
+ int err;
+
+ if (qidx >= rx_queues || qidx >= tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ set_bit(qidx, pf->af_xdp_zc_qidx);
+ otx2_clean_up_rq(pf, qidx);
+ /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
+ /* Kick start the NAPI context so that receiving will start */
+ return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
+}
+
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
+{
+ struct net_device *netdev = pf->netdev;
+ struct xsk_buff_pool *pool;
+ struct otx2_snd_queue *sq;
+
+ pool = xsk_get_pool_from_qid(netdev, qidx);
+ if (!pool)
+ return -EINVAL;
+
+ sq = &pf->qset.sq[qidx + pf->hw.tx_queues];
+ sq->xsk_pool = NULL;
+ otx2_clean_up_rq(pf, qidx);
+ clear_bit(qidx, pf->af_xdp_zc_qidx);
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
+
+ return 0;
+}
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ if (pool)
+ return otx2_xsk_pool_enable(pf, pool, qidx);
+
+ return otx2_xsk_pool_disable(pf, qidx);
+}
+
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(dev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return -ENETDOWN;
+
+ if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues)
+ return -EINVAL;
+
+ cq_poll = &qset->napi[queue_id];
+ if (!cq_poll)
+ return -EINVAL;
+
+ /* Trigger interrupt */
+ if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) {
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ }
+
+ return 0;
+}
+
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx)
+{
+ if (test_bit(qidx, pfvf->af_xdp_zc_qidx))
+ sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
+}
+
+static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len,
+ u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset;
+
+ sq = &pfvf->qset.sq[qidx];
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+}
+
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget)
+{
+ struct xdp_desc *xdp_desc = pool->tx_descs;
+ int i, batch;
+
+ budget = min(budget, otx2_read_free_sqe(pfvf, queue));
+ batch = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!batch)
+ return;
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t dma_addr;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
+ otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
new file mode 100644
index 000000000000..8047fafee8fe
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef OTX2_XSK_H
+#define OTX2_XSK_H
+
+struct otx2_nic;
+struct xsk_buff_pool;
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid);
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx);
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget);
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx);
+
+#endif /* OTX2_XSK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1723e9912ae0..5765bac119f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
num_regs++;
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
-
} else if (level == NIX_TXSCH_LVL_TL4) {
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL3) {
@@ -166,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL2) {
+ /* configure parent txschq */
+ cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
+ cfg->regval[num_regs] = (u64)hw->tx_link << 16;
+ num_regs++;
+
/* configure link cfg */
if (level == pfvf->qos.link_cfg_lvl) {
cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
@@ -176,7 +180,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
/* check if node is root */
if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
- cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 |
+ cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
mtu_to_dwrr_weight(pfvf,
pfvf->tx_max_pktlen);
num_regs++;
@@ -545,6 +549,20 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
return node;
}
+static struct otx2_qos_node
+*otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid)
+{
+ struct otx2_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) {
+ if (node->qid == qid)
+ break;
+ }
+
+ return node;
+}
+
static struct otx2_qos_node *
otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
{
@@ -917,6 +935,7 @@ static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
otx2_qos_disable_sq(pfvf, qid);
pfvf->qos.qid_to_sqmap[qid] = node->schq;
+ otx2_qos_txschq_config(pfvf, node);
otx2_qos_enable_sq(pfvf, qid);
}
@@ -1407,7 +1426,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
/* delete the txschq nodes allocated for this node */
+ otx2_qos_disable_sq(pfvf, qid);
+ otx2_qos_free_hw_node_schq(pfvf, node);
otx2_qos_free_sw_node_schq(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
/* mark this node as htb inner node */
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
@@ -1475,13 +1497,45 @@ out:
return ret;
}
+static int otx2_qos_cur_leaf_nodes(struct otx2_nic *pfvf)
+{
+ int last = find_last_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues);
+
+ return last == pfvf->hw.tc_tx_queues ? 0 : last + 1;
+}
+
+static void otx2_reset_qdisc(struct net_device *dev, u16 qid)
+{
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+
+ if (!qdisc)
+ return;
+
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+}
+
+static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node,
+ int qid)
+{
+ struct otx2_qos_node *tmp;
+
+ list_for_each_entry(tmp, &node->child_schq_list, list)
+ if (tmp->level == NIX_TXSCH_LVL_MDQ) {
+ otx2_qos_txschq_config(pfvf, tmp);
+ pfvf->qos.qid_to_sqmap[qid] = tmp->schq;
+ }
+}
+
static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
struct netlink_ext_ack *extack)
{
struct otx2_qos_node *node, *parent;
int dwrr_del_node = false;
+ u16 qid, moved_qid;
u64 prio;
- u16 qid;
netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
@@ -1517,6 +1571,37 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
if (!parent->child_static_cnt)
parent->max_static_prio = 0;
+ moved_qid = otx2_qos_cur_leaf_nodes(pfvf);
+
+ /* last node just deleted */
+ if (moved_qid == 0 || moved_qid == qid)
+ return 0;
+
+ moved_qid--;
+
+ node = otx2_sw_node_find_by_qid(pfvf, moved_qid);
+ if (!node)
+ return 0;
+
+ /* stop traffic to the old queue and disable
+ * SQ associated with it
+ */
+ node->qid = OTX2_QOS_QID_INNER;
+ __clear_bit(moved_qid, pfvf->qos.qos_sq_bmap);
+ otx2_qos_disable_sq(pfvf, moved_qid);
+
+ otx2_reset_qdisc(pfvf->netdev, pfvf->hw.tx_queues + moved_qid);
+
+ /* enable SQ associated with qid and
+ * update the node
+ */
+ otx2_cfg_smq(pfvf, node, qid);
+
+ otx2_qos_enable_sq(pfvf, qid);
+ __set_bit(qid, pfvf->qos.qos_sq_bmap);
+ node->qid = qid;
+
+ *classid = node->classid;
return 0;
}
@@ -1553,7 +1638,9 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
if (!node->is_static)
dwrr_del_node = true;
+ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
/* destroy the leaf node */
+ otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
@@ -1596,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
}
kfree(new_cfg);
- /* update tx_real_queues */
- otx2_qos_update_tx_netdev_queues(pfvf);
-
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 9d887bfc3108..2872adabc830 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
if (err)
goto sqb_free;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
@@ -151,9 +151,10 @@ static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
{
int sqe_tail, sqe_head;
- u64 incr, *ptr, val;
+ void __iomem *ptr;
+ u64 incr, val;
- ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
incr = (u64)qidx << 32;
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
@@ -256,6 +257,26 @@ out:
return err;
}
+static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf)
+{
+ struct ndc_sync_op *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->nix_lf_tx_sync = true;
+ req->npa_lf_sync = true;
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -285,6 +306,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
otx2_qos_sqb_flush(pfvf, sq_idx);
otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
+ /* NIX/NPA NDC sync */
+ otx2_qos_nix_npa_ndc_sync(pfvf);
otx2_cleanup_tx_cqes(pfvf, cq);
mutex_lock(&pfvf->mbox.lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
new file mode 100644
index 000000000000..b476733a0234
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU representor driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+#include <linux/sort.h>
+
+#include "otx2_common.h"
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "rep.h"
+
+#define DRV_NAME "rvu_rep"
+#define DRV_STRING "Marvell RVU Representor Driver"
+
+static const struct pci_device_id rvu_rep_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) },
+ { }
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
+
+static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
+ struct rep_event *data);
+
+static int rvu_rep_mcam_flow_init(struct rep_dev *rep)
+{
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct otx2_nic *priv = rep->mdev;
+ int ent, allocated = 0;
+ int count;
+
+ rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL);
+
+ if (!rep->flow_cfg)
+ return -ENOMEM;
+
+ count = OTX2_DEFAULT_FLOWCOUNT;
+
+ rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL);
+ if (!rep->flow_cfg->flow_ent)
+ return -ENOMEM;
+
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox);
+ if (!req)
+ goto exit;
+
+ req->hdr.pcifunc = rep->pcifunc;
+ req->contig = false;
+ req->ref_entry = 0;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+
+ if (otx2_sync_mbox_msg(&priv->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&priv->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ goto exit;
+
+ for (ent = 0; ent < rsp->count; ent++)
+ rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ if (rsp->count != req->count)
+ break;
+ }
+exit:
+ /* Multiple MCAM entry alloc requests could result in non-sequential
+ * MCAM entries in the flow_ent[] array. Sort them in an ascending
+ * order, otherwise user installed ntuple filter index and MCAM entry
+ * index will not be in sync.
+ */
+ if (allocated)
+ sort(&rep->flow_cfg->flow_ent[0], allocated,
+ sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
+
+ mutex_unlock(&priv->mbox.lock);
+
+ rep->flow_cfg->max_flows = allocated;
+
+ if (allocated) {
+ rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+ }
+
+ INIT_LIST_HEAD(&rep->flow_cfg->flow_list);
+ INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc);
+ return 0;
+}
+
+static int rvu_rep_setup_tc_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct rep_dev *rep = cb_priv;
+ struct otx2_nic *priv = rep->mdev;
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return -EINVAL;
+
+ if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ rvu_rep_mcam_flow_init(rep);
+
+ priv->netdev = rep->netdev;
+ priv->flags = rep->flags;
+ priv->pcifunc = rep->pcifunc;
+ priv->flow_cfg = rep->flow_cfg;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return otx2_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(rvu_rep_block_cb_list);
+static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rvu_rep *rep = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &rvu_rep_block_cb_list,
+ rvu_rep_setup_tc_cb,
+ rep, rep, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+rvu_rep_sp_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct otx2_rcv_queue *rq;
+ struct otx2_snd_queue *sq;
+ u16 qidx = rep->rep_id;
+
+ otx2_update_rq_stats(priv, qidx);
+ rq = &priv->qset.rq[qidx];
+
+ otx2_update_sq_stats(priv, qidx);
+ sq = &priv->qset.sq[qidx];
+
+ stats->tx_bytes = sq->stats.bytes;
+ stats->tx_packets = sq->stats.pkts;
+ stats->rx_bytes = rq->stats.bytes;
+ stats->rx_packets = rq->stats.pkts;
+ return 0;
+}
+
+static bool
+rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
+}
+
+static int
+rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
+ return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
+
+ return -EINVAL;
+}
+
+static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
+
+ ether_addr_copy(hw_addr, rep->mac);
+ *hw_addr_len = ETH_ALEN;
+ return 0;
+}
+
+static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ eth_hw_addr_set(rep->netdev, hw_addr);
+ ether_addr_copy(rep->mac, hw_addr);
+
+ ether_addr_copy(evt.evt_data.mac, hw_addr);
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt);
+ return 0;
+}
+
+static const struct devlink_port_ops rvu_rep_dl_port_ops = {
+ .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set,
+};
+
+static void
+rvu_rep_devlink_set_switch_id(struct otx2_nic *priv,
+ struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = priv->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+static void rvu_rep_devlink_port_unregister(struct rep_dev *rep)
+{
+ devlink_port_unregister(&rep->dl_port);
+}
+
+static int rvu_rep_devlink_port_register(struct rep_dev *rep)
+{
+ struct devlink_port_attrs attrs = {};
+ struct otx2_nic *priv = rep->mdev;
+ struct devlink *dl = priv->dl->dl;
+ int err;
+
+ if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc);
+ } else {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc);
+ attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK;
+ }
+
+ rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id);
+ devlink_port_attrs_set(&rep->dl_port, &attrs);
+
+ err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id,
+ &rvu_rep_dl_port_ops);
+ if (err) {
+ dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc)
+{
+ int rep_id;
+
+ for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++)
+ if (priv->rep_pf_map[rep_id] == pcifunc)
+ return rep_id;
+ return -EINVAL;
+}
+
+static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
+ struct rep_event *data)
+{
+ struct rep_event *req;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ req->event = event;
+ req->pcifunc = data->pcifunc;
+
+ memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data));
+ otx2_sync_mbox_msg(&priv->mbox);
+ mutex_unlock(&priv->mbox.lock);
+ return 0;
+}
+
+static void rvu_rep_state_evt_handler(struct otx2_nic *priv,
+ struct rep_event *info)
+{
+ struct rep_dev *rep;
+ int rep_id;
+
+ rep_id = rvu_rep_get_repid(priv, info->pcifunc);
+ rep = priv->reps[rep_id];
+ if (info->evt_data.vf_state)
+ rep->flags |= RVU_REP_VF_INITIALIZED;
+ else
+ rep->flags &= ~RVU_REP_VF_INITIALIZED;
+}
+
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info)
+{
+ if (info->event & RVU_EVENT_PFVF_STATE)
+ rvu_rep_state_evt_handler(pf, info);
+ return 0;
+}
+
+static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ netdev_info(dev, "Changing MTU from %d to %d\n",
+ dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ evt.evt_data.mtu = new_mtu;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt);
+ return 0;
+}
+
+static void rvu_rep_get_stats(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct nix_stats_req *req;
+ struct nix_stats_rsp *rsp;
+ struct rep_stats *stats;
+ struct otx2_nic *priv;
+ struct rep_dev *rep;
+ int err;
+
+ rep = container_of(del_work, struct rep_dev, stats_wrk);
+ priv = rep->mdev;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return;
+ }
+ req->pcifunc = rep->pcifunc;
+ err = otx2_sync_mbox_msg_busy_poll(&priv->mbox);
+ if (err)
+ goto exit;
+
+ rsp = (struct nix_stats_rsp *)
+ otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto exit;
+ }
+
+ stats = &rep->stats;
+ stats->rx_bytes = rsp->rx.octs;
+ stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast +
+ rsp->rx.mcast;
+ stats->rx_drops = rsp->rx.drop;
+ stats->rx_mcast_frames = rsp->rx.mcast;
+ stats->tx_bytes = rsp->tx.octs;
+ stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
+ stats->tx_drops = rsp->tx.drop +
+ (unsigned long)atomic_long_read(&stats->tx_discards);
+exit:
+ mutex_unlock(&priv->mbox.lock);
+}
+
+static void rvu_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return;
+
+ stats->rx_packets = rep->stats.rx_frames;
+ stats->rx_bytes = rep->stats.rx_bytes;
+ stats->rx_dropped = rep->stats.rx_drops;
+ stats->multicast = rep->stats.rx_mcast_frames;
+
+ stats->tx_packets = rep->stats.tx_frames;
+ stats->tx_bytes = rep->stats.tx_bytes;
+ stats->tx_dropped = rep->stats.tx_drops;
+
+ schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100));
+}
+
+static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena)
+{
+ struct esw_cfg_req *req;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ req->ena = ena;
+ otx2_sync_mbox_msg(&priv->mbox);
+ mutex_unlock(&priv->mbox.lock);
+ return 0;
+}
+
+static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *pf = rep->mdev;
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+ struct rep_stats *stats;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ stats = &rep->stats;
+ atomic_long_inc(&stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sq = &pf->qset.sq[rep->rep_id];
+ txq = netdev_get_tx_queue(dev, 0);
+
+ if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, in case SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+ return NETDEV_TX_OK;
+}
+
+static int rvu_rep_open(struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return 0;
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ evt.event = RVU_EVENT_PORT_STATE;
+ evt.evt_data.port_state = 1;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
+ return 0;
+}
+
+static int rvu_rep_stop(struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return 0;
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+ evt.event = RVU_EVENT_PORT_STATE;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
+ return 0;
+}
+
+static const struct net_device_ops rvu_rep_netdev_ops = {
+ .ndo_open = rvu_rep_open,
+ .ndo_stop = rvu_rep_stop,
+ .ndo_start_xmit = rvu_rep_xmit,
+ .ndo_get_stats64 = rvu_rep_get_stats64,
+ .ndo_change_mtu = rvu_rep_change_mtu,
+ .ndo_has_offload_stats = rvu_rep_has_offload_stats,
+ .ndo_get_offload_stats = rvu_rep_get_offload_stats,
+ .ndo_setup_tc = rvu_rep_setup_tc,
+};
+
+static int rvu_rep_napi_init(struct otx2_nic *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_hw *hw = &priv->hw;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ if (!qset->napi)
+ return -ENOMEM;
+
+ /* Register NAPI handler */
+ for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ cq_poll->cint_idx = qidx;
+ cq_poll->cq_ids[CQ_RX] =
+ (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ?
+ qidx + hw->rx_queues :
+ CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ;
+
+ cq_poll->dev = (void *)priv;
+ netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi,
+ otx2_napi_handler);
+ napi_enable(&cq_poll->napi);
+ }
+ /* Register CQ IRQ handlers */
+ vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
+ irq_name = &hw->irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx);
+
+ err = request_irq(pci_irq_vector(priv->pdev, vec),
+ otx2_cq_intr_handler, 0, irq_name,
+ &qset->napi[qidx]);
+ if (err) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "RVU REP IRQ registration failed for CQ%d",
+ qidx);
+ goto err_free_cints;
+ }
+ vec++;
+
+ /* Enable CQ IRQ */
+ otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
+ otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
+ }
+ priv->flags &= ~OTX2_FLAG_INTF_DOWN;
+ return 0;
+
+err_free_cints:
+ otx2_free_cints(priv, qidx);
+ otx2_disable_napi(priv);
+ return err;
+}
+
+static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct otx2_cq_poll *cq_poll = NULL;
+ int qidx, vec;
+
+ /* Cleanup CQ NAPI and IRQ */
+ vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) {
+ /* Disable interrupt */
+ otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ synchronize_irq(pci_irq_vector(priv->pdev, vec));
+
+ cq_poll = &qset->napi[qidx];
+ napi_synchronize(&cq_poll->napi);
+ vec++;
+ }
+ otx2_free_cints(priv, priv->hw.cint_cnt);
+ otx2_disable_napi(priv);
+}
+
+static void rvu_rep_rsrc_free(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct delayed_work *work;
+ int wrk;
+
+ for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) {
+ work = &priv->refill_wrk[wrk].pool_refill_work;
+ cancel_delayed_work_sync(work);
+ }
+ devm_kfree(priv->dev, priv->refill_wrk);
+
+ otx2_free_hw_resources(priv);
+ otx2_free_queue_mem(qset);
+}
+
+static int rvu_rep_rsrc_init(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ int err;
+
+ err = otx2_alloc_queue_mem(priv);
+ if (err)
+ return err;
+
+ priv->hw.max_mtu = otx2_get_max_mtu(priv);
+ priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN;
+ priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
+ err = otx2_init_hw_resources(priv);
+ if (err)
+ goto err_free_rsrc;
+
+ /* Set maximum frame size allowed in HW */
+ err = otx2_hw_set_mtu(priv, priv->hw.max_mtu);
+ if (err) {
+ dev_err(priv->dev, "Failed to set HW MTU\n");
+ goto err_free_rsrc;
+ }
+ return 0;
+
+err_free_rsrc:
+ otx2_free_hw_resources(priv);
+ otx2_free_queue_mem(qset);
+ return err;
+}
+
+void rvu_rep_destroy(struct otx2_nic *priv)
+{
+ struct rep_dev *rep;
+ int rep_id;
+
+ rvu_eswitch_config(priv, false);
+ priv->flags |= OTX2_FLAG_INTF_DOWN;
+ rvu_rep_free_cq_rsrc(priv);
+ for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) {
+ rep = priv->reps[rep_id];
+ unregister_netdev(rep->netdev);
+ rvu_rep_devlink_port_unregister(rep);
+ free_netdev(rep->netdev);
+ kfree(rep->flow_cfg);
+ }
+ kfree(priv->reps);
+ rvu_rep_rsrc_free(priv);
+}
+
+int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
+{
+ int rep_cnt = priv->rep_cnt;
+ struct net_device *ndev;
+ struct rep_dev *rep;
+ int rep_id, err;
+ u16 pcifunc;
+
+ err = rvu_rep_rsrc_init(priv);
+ if (err)
+ return -ENOMEM;
+
+ priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL);
+ if (!priv->reps)
+ return -ENOMEM;
+
+ for (rep_id = 0; rep_id < rep_cnt; rep_id++) {
+ ndev = alloc_etherdev(sizeof(*rep));
+ if (!ndev) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "PFVF representor:%d creation failed",
+ rep_id);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ rep = netdev_priv(ndev);
+ priv->reps[rep_id] = rep;
+ rep->mdev = priv;
+ rep->netdev = ndev;
+ rep->rep_id = rep_id;
+
+ ndev->min_mtu = OTX2_MIN_MTU;
+ ndev->max_mtu = priv->hw.max_mtu;
+ ndev->netdev_ops = &rvu_rep_netdev_ops;
+ pcifunc = priv->rep_pf_map[rep_id];
+ rep->pcifunc = pcifunc;
+
+ snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d",
+ rvu_get_pf(priv->pdev, pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK));
+
+ ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+
+ ndev->hw_features |= NETIF_F_HW_TC;
+ ndev->features |= ndev->hw_features;
+ eth_hw_addr_random(ndev);
+ err = rvu_rep_devlink_port_register(rep);
+ if (err) {
+ free_netdev(ndev);
+ goto exit;
+ }
+
+ SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
+ err = register_netdev(ndev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "PFVF representor registration failed");
+ rvu_rep_devlink_port_unregister(rep);
+ free_netdev(ndev);
+ goto exit;
+ }
+
+ INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats);
+ }
+ err = rvu_rep_napi_init(priv, extack);
+ if (err)
+ goto exit;
+
+ rvu_eswitch_config(priv, true);
+ return 0;
+exit:
+ while (--rep_id >= 0) {
+ rep = priv->reps[rep_id];
+ unregister_netdev(rep->netdev);
+ rvu_rep_devlink_port_unregister(rep);
+ free_netdev(rep->netdev);
+ }
+ kfree(priv->reps);
+ rvu_rep_rsrc_free(priv);
+ return err;
+}
+
+static int rvu_get_rep_cnt(struct otx2_nic *priv)
+{
+ struct get_rep_cnt_rsp *rsp;
+ struct mbox_msghdr *msghdr;
+ struct msg_req *req;
+ int err, rep;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ err = otx2_sync_mbox_msg(&priv->mbox);
+ if (err)
+ goto exit;
+
+ msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(msghdr)) {
+ err = PTR_ERR(msghdr);
+ goto exit;
+ }
+
+ rsp = (struct get_rep_cnt_rsp *)msghdr;
+ priv->hw.tx_queues = rsp->rep_cnt;
+ priv->hw.rx_queues = rsp->rep_cnt;
+ priv->rep_cnt = rsp->rep_cnt;
+ for (rep = 0; rep < priv->rep_cnt; rep++)
+ priv->rep_pf_map[rep] = rsp->rep_pf_map[rep];
+
+exit:
+ mutex_unlock(&priv->mbox.lock);
+ return err;
+}
+
+static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct otx2_nic *priv;
+ struct otx2_hw *hw;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pcim_request_all_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_set_drv_data;
+ }
+
+ pci_set_master(pdev);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto err_set_drv_data;
+ }
+
+ pci_set_drvdata(pdev, priv);
+ priv->pdev = pdev;
+ priv->dev = dev;
+ priv->flags |= OTX2_FLAG_INTF_DOWN;
+ priv->flags |= OTX2_FLAG_REP_MODE_ENABLED;
+
+ hw = &priv->hw;
+ hw->pdev = pdev;
+ hw->max_queues = OTX2_MAX_CQ_CNT;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ hw->xqe_size = 128;
+
+ err = otx2_init_rsrc(pdev, priv);
+ if (err)
+ goto err_set_drv_data;
+
+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ err = rvu_get_rep_cnt(priv);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2_register_dl(priv);
+ if (err)
+ goto err_detach_rsrc;
+
+ return 0;
+
+err_detach_rsrc:
+ if (priv->hw.lmt_info)
+ free_percpu(priv->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
+ qmem_free(priv->dev, priv->dync_lmt);
+ otx2_detach_resources(&priv->mbox);
+ otx2_disable_mbox_intr(priv);
+ otx2_pfaf_mbox_destroy(priv);
+ pci_free_irq_vectors(pdev);
+err_set_drv_data:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void rvu_rep_remove(struct pci_dev *pdev)
+{
+ struct otx2_nic *priv = pci_get_drvdata(pdev);
+
+ otx2_unregister_dl(priv);
+ if (!(priv->flags & OTX2_FLAG_INTF_DOWN))
+ rvu_rep_destroy(priv);
+ otx2_detach_resources(&priv->mbox);
+ if (priv->hw.lmt_info)
+ free_percpu(priv->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
+ qmem_free(priv->dev, priv->dync_lmt);
+ otx2_disable_mbox_intr(priv);
+ otx2_pfaf_mbox_destroy(priv);
+ pci_free_irq_vectors(priv->pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rvu_rep_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_rep_id_table,
+ .probe = rvu_rep_probe,
+ .remove = rvu_rep_remove,
+ .shutdown = rvu_rep_remove,
+};
+
+static int __init rvu_rep_init_module(void)
+{
+ return pci_register_driver(&rvu_rep_driver);
+}
+
+static void __exit rvu_rep_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_rep_driver);
+}
+
+module_init(rvu_rep_init_module);
+module_exit(rvu_rep_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
new file mode 100644
index 000000000000..5bc9e2c7d800
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU REPRESENTOR driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef REP_H
+#define REP_H
+
+#include <linux/pci.h>
+
+#include "otx2_reg.h"
+#include "otx2_txrx.h"
+#include "otx2_common.h"
+
+#define PCI_DEVID_RVU_REP 0xA0E0
+
+#define RVU_MAX_REP OTX2_MAX_CQ_CNT
+
+struct rep_stats {
+ u64 rx_bytes;
+ u64 rx_frames;
+ u64 rx_drops;
+ u64 rx_mcast_frames;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_drops;
+ atomic_long_t tx_discards;
+};
+
+struct rep_dev {
+ struct otx2_nic *mdev;
+ struct net_device *netdev;
+ struct rep_stats stats;
+ struct delayed_work stats_wrk;
+ struct devlink_port dl_port;
+ struct otx2_flow_config *flow_cfg;
+#define RVU_REP_VF_INITIALIZED BIT_ULL(0)
+ u64 flags;
+ u16 rep_id;
+ u16 pcifunc;
+ u8 mac[ETH_ALEN];
+};
+
+static inline bool otx2_rep_dev(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_RVU_REP;
+}
+
+int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack);
+void rvu_rep_destroy(struct otx2_nic *priv);
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info);
+#endif /* REP_H */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
index 4cd53a2dae46..634f4543c1d7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
static void prestera_counter_stats_work(struct work_struct *work)
{
- struct delayed_work *dl_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *dl_work = to_delayed_work(work);
struct prestera_counter *counter =
container_of(dl_work, struct prestera_counter, stats_dw);
struct prestera_counter_block *block;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 8b9455d8a4f7..418101a93149 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -229,6 +229,10 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_control(f_rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index fc6f7d2746e8..197198ba61b1 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -419,15 +419,6 @@ struct prestera_msg_vtcam_destroy_req {
__le32 vtcam_id;
};
-struct prestera_msg_vtcam_rule_add_req {
- struct prestera_msg_cmd cmd;
- __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
- __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
- __le32 vtcam_id;
- __le32 prio;
- __le32 n_act;
-};
-
struct prestera_msg_vtcam_rule_del_req {
struct prestera_msg_cmd cmd;
__le32 vtcam_id;
@@ -471,6 +462,16 @@ struct prestera_msg_acl_action {
};
};
+struct prestera_msg_vtcam_rule_add_req {
+ struct prestera_msg_cmd cmd;
+ __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 vtcam_id;
+ __le32 prio;
+ __le32 n_act;
+ struct prestera_msg_acl_action actions_msg[] __counted_by_le(n_act);
+};
+
struct prestera_msg_counter_req {
struct prestera_msg_cmd cmd;
__le32 client;
@@ -702,12 +703,6 @@ struct prestera_msg_flood_domain_destroy_req {
__le32 flood_domain_idx;
};
-struct prestera_msg_flood_domain_ports_set_req {
- struct prestera_msg_cmd cmd;
- __le32 flood_domain_idx;
- __le32 ports_num;
-};
-
struct prestera_msg_flood_domain_ports_reset_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
@@ -725,6 +720,13 @@ struct prestera_msg_flood_domain_port {
__le16 port_type;
};
+struct prestera_msg_flood_domain_ports_set_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le32 ports_num;
+ struct prestera_msg_flood_domain_port ports[] __counted_by_le(ports_num);
+};
+
struct prestera_msg_mdb_create_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
@@ -1371,23 +1373,18 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
struct prestera_acl_hw_action_info *act,
u8 n_act, u32 *rule_id)
{
- struct prestera_msg_acl_action *actions_msg;
struct prestera_msg_vtcam_rule_add_req *req;
struct prestera_msg_vtcam_resp resp;
- void *buff;
- u32 size;
+ size_t size;
int err;
u8 i;
- size = sizeof(*req) + sizeof(*actions_msg) * n_act;
-
- buff = kzalloc(size, GFP_KERNEL);
- if (!buff)
+ size = struct_size(req, actions_msg, n_act);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
return -ENOMEM;
- req = buff;
req->n_act = __cpu_to_le32(n_act);
- actions_msg = buff + sizeof(*req);
/* put acl matches into the message */
memcpy(req->key, key, sizeof(req->key));
@@ -1395,7 +1392,7 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
/* put acl actions into the message */
for (i = 0; i < n_act; i++) {
- err = prestera_acl_rule_add_put_action(&actions_msg[i],
+ err = prestera_acl_rule_add_put_action(&req->actions_msg[i],
&act[i]);
if (err)
goto free_buff;
@@ -1411,7 +1408,7 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
*rule_id = __le32_to_cpu(resp.rule_id);
free_buff:
- kfree(buff);
+ kfree(req);
return err;
}
@@ -2461,14 +2458,13 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
{
struct prestera_flood_domain_port *flood_domain_port;
struct prestera_msg_flood_domain_ports_set_req *req;
- struct prestera_msg_flood_domain_port *ports;
struct prestera_switch *sw = domain->sw;
struct prestera_port *port;
u32 ports_num = 0;
- int buf_size;
- void *buff;
+ size_t buf_size;
u16 lag_id;
int err;
+ int i = 0;
list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
flood_domain_port_node)
@@ -2477,15 +2473,11 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
if (!ports_num)
return -EINVAL;
- buf_size = sizeof(*req) + sizeof(*ports) * ports_num;
-
- buff = kmalloc(buf_size, GFP_KERNEL);
- if (!buff)
+ buf_size = struct_size(req, ports, ports_num);
+ req = kmalloc(buf_size, GFP_KERNEL);
+ if (!req)
return -ENOMEM;
- req = buff;
- ports = buff + sizeof(*req);
-
req->flood_domain_idx = __cpu_to_le32(domain->idx);
req->ports_num = __cpu_to_le32(ports_num);
@@ -2494,31 +2486,30 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
if (netif_is_lag_master(flood_domain_port->dev)) {
if (prestera_lag_id(sw, flood_domain_port->dev,
&lag_id)) {
- kfree(buff);
+ kfree(req);
return -EINVAL;
}
- ports->port_type =
+ req->ports[i].port_type =
__cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
- ports->lag_id = __cpu_to_le16(lag_id);
+ req->ports[i].lag_id = __cpu_to_le16(lag_id);
} else {
port = prestera_port_dev_lower_find(flood_domain_port->dev);
- ports->port_type =
+ req->ports[i].port_type =
__cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
- ports->dev_num = __cpu_to_le32(port->dev_id);
- ports->port_num = __cpu_to_le32(port->hw_id);
+ req->ports[i].dev_num = __cpu_to_le32(port->dev_id);
+ req->ports[i].port_num = __cpu_to_le32(port->hw_id);
}
- ports->vid = __cpu_to_le16(flood_domain_port->vid);
-
- ports++;
+ req->ports[i].vid = __cpu_to_le16(flood_domain_port->vid);
+ i++;
}
err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
&req->cmd, buf_size);
- kfree(buff);
+ kfree(req);
return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 4fb886c57cd7..65e7ef033bde 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -280,6 +280,7 @@ prestera_mac_select_pcs(struct phylink_config *config,
}
static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct prestera_port *port = container_of(pcs, struct prestera_port,
@@ -395,7 +396,6 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
continue;
port->phylink_pcs.ops = &prestera_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phy_config.dev = &port->dev->dev;
port->phy_config.type = PHYLINK_NETDEV;
@@ -489,7 +489,7 @@ static int prestera_port_change_mtu(struct net_device *dev, int mtu)
if (err)
return err;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
@@ -633,7 +633,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
if (err)
goto err_dl_port_register;
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ dev->features |= NETIF_F_HW_TC;
+ dev->netns_immutable = true;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
@@ -821,7 +822,7 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
if (port->state_mac.oper) {
if (port->phy_link)
- phylink_mac_change(port->phy_link, true);
+ phylink_pcs_change(&port->phylink_pcs, true);
else
netif_carrier_on(port->dev);
@@ -829,7 +830,7 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
queue_delayed_work(prestera_wq, caching_dw, 0);
} else {
if (port->phy_link)
- phylink_mac_change(port->phy_link, false);
+ phylink_pcs_change(&port->phylink_pcs, false);
else if (netif_running(port->dev) && netif_carrier_ok(port->dev))
netif_carrier_off(port->dev);
@@ -1499,7 +1500,7 @@ EXPORT_SYMBOL(prestera_device_unregister);
static int __init prestera_module_init(void)
{
- prestera_wq = alloc_workqueue("prestera", 0, 0);
+ prestera_wq = alloc_workqueue("prestera", WQ_PERCPU, 0);
if (!prestera_wq)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 35857dc19542..3e13322470da 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_pci_enable_device;
}
- err = pci_request_regions(pdev, driver_name);
+ err = pcim_request_all_regions(pdev, driver_name);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pcim_request_all_regions failed\n");
goto err_pci_request_regions;
}
@@ -898,7 +898,7 @@ static int prestera_pci_probe(struct pci_dev *pdev,
dev_info(fw->dev.dev, "Prestera FW is ready\n");
- fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI | WQ_PERCPU, 1);
if (!fw->wq) {
err = -ENOMEM;
goto err_wq_alloc;
@@ -938,7 +938,6 @@ err_pci_dev_alloc:
err_pp_ioremap:
err_mem_ioremap:
err_dma_mask:
- pci_release_regions(pdev);
err_pci_request_regions:
err_pci_enable_device:
return err;
@@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
destroy_workqueue(fw->wq);
prestera_fw_uninit(fw);
- pci_release_regions(pdev);
}
static const struct pci_device_id prestera_pci_devices[] = {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index cc2a9ae794be..39d9bf82c115 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -96,7 +96,7 @@ struct prestera_sdma {
struct dma_pool *desc_pool;
struct work_struct tx_work;
struct napi_struct rx_napi;
- struct net_device napi_dev;
+ struct net_device *napi_dev;
u32 map_addr;
u64 dma_mask;
/* protect SDMA with concurrent access from multiple CPUs */
@@ -654,13 +654,21 @@ static int prestera_sdma_switch_init(struct prestera_switch *sw)
if (err)
goto err_evt_register;
- init_dummy_netdev(&sdma->napi_dev);
+ sdma->napi_dev = alloc_netdev_dummy(0);
+ if (!sdma->napi_dev) {
+ dev_err(dev, "not able to initialize dummy device\n");
+ err = -ENOMEM;
+ goto err_alloc_dummy;
+ }
- netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
+ netif_napi_add(sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
napi_enable(&sdma->rx_napi);
return 0;
+err_alloc_dummy:
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event);
err_evt_register:
err_tx_init:
prestera_sdma_tx_fini(sdma);
@@ -677,6 +685,7 @@ static void prestera_sdma_switch_fini(struct prestera_switch *sw)
napi_disable(&sdma->rx_napi);
netif_napi_del(&sdma->rx_napi);
+ free_netdev(sdma->napi_dev);
prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
prestera_rxtx_handle_event);
prestera_sdma_tx_fini(sdma);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index dd6ca2e4fd51..68f8a1e36aa6 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -353,7 +353,7 @@ static void rxq_refill(struct net_device *dev)
static inline void rxq_refill_timer_wrapper(struct timer_list *t)
{
- struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
+ struct pxa168_eth_private *pep = timer_container_of(pep, t, timeout);
napi_schedule(&pep->napi);
}
@@ -1175,7 +1175,7 @@ static int pxa168_eth_stop(struct net_device *dev)
/* Write to ICR to clear interrupts. */
wrl(pep, INT_W_CLEAR, 0);
napi_disable(&pep->napi);
- del_timer_sync(&pep->timeout);
+ timer_delete_sync(&pep->timeout);
netif_carrier_off(dev);
free_irq(dev->irq, dev);
rxq_deinit(dev);
@@ -1188,7 +1188,7 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
set_port_config_ext(pep);
if (!netif_running(dev))
@@ -1229,9 +1229,9 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
int work_done = 0;
/*
- * We call txq_reclaim every time since in NAPI interupts are disabled
- * and due to this we miss the TX_DONE interrupt,which is not updated in
- * interrupt status register.
+ * We call txq_reclaim every time since in NAPI interrupts are disabled
+ * and due to this we miss the TX_DONE interrupt, which is not updated
+ * in interrupt status register.
*/
txq_reclaim(dev, 0);
if (netif_queue_stopped(dev)
@@ -1394,18 +1394,15 @@ static int pxa168_eth_probe(struct platform_device *pdev)
printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
+ dev_err(&pdev->dev, "Fast Ethernet failed to get and enable clock\n");
return -ENODEV;
}
- clk_prepare_enable(clk);
dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
- if (!dev) {
- err = -ENOMEM;
- goto err_clk;
- }
+ if (!dev)
+ return -ENOMEM;
platform_set_drvdata(pdev, dev);
pep = netdev_priv(dev);
@@ -1523,8 +1520,6 @@ err_free_mdio:
mdiobus_free(pep->smi_bus);
err_netdev:
free_netdev(dev);
-err_clk:
- clk_disable_unprepare(clk);
return err;
}
@@ -1542,7 +1537,6 @@ static void pxa168_eth_remove(struct platform_device *pdev)
if (dev->phydev)
phy_disconnect(dev->phydev);
- clk_disable_unprepare(pep->clk);
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
@@ -1579,7 +1573,7 @@ MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
static struct platform_driver pxa168_eth_driver = {
.probe = pxa168_eth_probe,
- .remove_new = pxa168_eth_remove,
+ .remove = pxa168_eth_remove,
.shutdown = pxa168_eth_shutdown,
.resume = pxa168_eth_resume,
.suspend = pxa168_eth_suspend,
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 1b43704baceb..05349a0b2db1 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -484,8 +484,7 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- skge_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, skge_stats[i].name);
break;
}
}
@@ -1495,7 +1494,7 @@ static int xm_check_link(struct net_device *dev)
*/
static void xm_link_timer(struct timer_list *t)
{
- struct skge_port *skge = from_timer(skge, t, link_timer);
+ struct skge_port *skge = timer_container_of(skge, t, link_timer);
struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
@@ -2663,7 +2662,7 @@ static int skge_down(struct net_device *dev)
netif_tx_disable(dev);
if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
- del_timer_sync(&skge->link_timer);
+ timer_delete_sync(&skge->link_timer);
napi_disable(&skge->napi);
netif_carrier_off(dev);
@@ -2905,13 +2904,13 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
int err;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
skge_down(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = skge_up(dev);
if (err)
@@ -3743,10 +3742,7 @@ static int skge_device_event(struct notifier_block *unused,
skge = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
- if (skge->debugfs)
- skge->debugfs = debugfs_rename(skge_debug,
- skge->debugfs,
- skge_debug, dev->name);
+ debugfs_change_name(skge->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 07720841a8d7..3831f533b9db 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -34,6 +34,7 @@
#include <linux/mii.h>
#include <linux/of_net.h>
#include <linux/dmi.h>
+#include <linux/skbuff_ref.h>
#include <asm/irq.h>
@@ -129,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
@@ -2383,7 +2385,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u32 imask;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
}
@@ -2406,7 +2408,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_rx_stop(sky2);
sky2_rx_clean(sky2);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
@@ -2959,7 +2961,7 @@ static int sky2_rx_hung(struct net_device *dev)
static void sky2_watchdog(struct timer_list *t)
{
- struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
+ struct sky2_hw *hw = timer_container_of(hw, t, watchdog_timer);
/* Check for lost IRQ once a second */
if (sky2_read32(hw, B0_ISRC)) {
@@ -3799,8 +3801,7 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- sky2_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, sky2_stats[i].name);
break;
}
}
@@ -4493,10 +4494,7 @@ static int sky2_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (sky2->debugfs) {
- sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
- sky2_debug, dev->name);
- }
+ debugfs_change_name(sky2->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
@@ -5054,7 +5052,7 @@ static int sky2_suspend(struct device *dev)
if (!hw)
return 0;
- del_timer_sync(&hw->watchdog_timer);
+ timer_delete_sync(&hw->watchdog_timer);
cancel_work_sync(&hw->restart_work);
rtnl_lock();
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index da0db417ab69..2ba361f8ce7d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek devices"
- depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
+ depends on ARCH_MEDIATEK || ARCH_AIROHA || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
help
If you have a Mediatek SoC with ethernet, say Y.
@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
select PINCTRL
select PHYLINK
select DIMLIB
+ select GENERIC_ALLOCATOR
select PAGE_POOL
select PAGE_POOL_STATS
select PCS_MTK_LYNXI
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 7c27a19c4d8f..b4c01e2878f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -14,7 +14,7 @@
struct mtk_eth_muxc {
const char *name;
- int cap_bit;
+ u64 cap_bit;
int (*set_path)(struct mtk_eth *eth, u64 path);
};
@@ -31,6 +31,8 @@ static const char *mtk_eth_path_name(u64 path)
return "gmac2_rgmii";
case MTK_ETH_PATH_GMAC2_SGMII:
return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_2P5GPHY:
+ return "gmac2_2p5gphy";
case MTK_ETH_PATH_GMAC2_GEPHY:
return "gmac2_gephy";
case MTK_ETH_PATH_GDM1_ESW:
@@ -127,6 +129,29 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
return 0;
}
+static int set_mux_gmac2_to_2p5gphy(struct mtk_eth *eth, u64 path)
+{
+ int ret;
+
+ if (path == MTK_ETH_PATH_GMAC2_2P5GPHY) {
+ ret = regmap_clear_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_GMAC2_V2);
+ if (ret)
+ return ret;
+
+ /* Setup mux to 2p5g PHY */
+ ret = regmap_clear_bits(eth->infra, TOP_MISC_NETSYS_PCS_MUX,
+ MUX_G2_USXGMII_SEL);
+ if (ret)
+ return ret;
+
+ dev_dbg(eth->dev, "path %s in %s updated\n",
+ mtk_eth_path_name(path), __func__);
+ }
+
+ return 0;
+}
+
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
@@ -210,6 +235,10 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
.cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
.set_path = set_mux_u3_gmac2_to_qphy,
}, {
+ .name = "mux_gmac2_to_2p5gphy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_TO_2P5GPHY,
+ .set_path = set_mux_gmac2_to_2p5gphy,
+ }, {
.name = "mux_gmac1_gmac2_to_sgmii_rgmii",
.cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
.set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
@@ -260,6 +289,20 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
return mtk_eth_mux_setup(eth, path);
}
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ u64 path = 0;
+
+ if (mac_id == MTK_GMAC2_ID)
+ path = MTK_ETH_PATH_GMAC2_2P5GPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ return mtk_eth_mux_setup(eth, path);
+}
+
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
u64 path = 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index caa13b9cedff..e68997a29191 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -27,6 +27,7 @@
#include <net/dsa.h>
#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
+#include <linux/genalloc.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
@@ -80,7 +81,9 @@ static const struct mtk_reg_map mtk_reg_map = {
.fq_blen = 0x1b2c,
},
.gdm1_cnt = 0x2400,
- .gdma_to_ppe = 0x4444,
+ .gdma_to_ppe = {
+ [0] = 0x4444,
+ },
.ppe_base = 0x0c00,
.wdma_base = {
[0] = 0x2800,
@@ -110,16 +113,16 @@ static const struct mtk_reg_map mt7986_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
- .rx_ptr = 0x6100,
- .rx_cnt_cfg = 0x6104,
- .pcrx_ptr = 0x6108,
- .glo_cfg = 0x6204,
- .rst_idx = 0x6208,
- .delay_irq = 0x620c,
- .irq_status = 0x6220,
- .irq_mask = 0x6228,
- .adma_rx_dbg0 = 0x6238,
- .int_grp = 0x6250,
+ .rx_ptr = 0x4100,
+ .rx_cnt_cfg = 0x4104,
+ .pcrx_ptr = 0x4108,
+ .glo_cfg = 0x4204,
+ .rst_idx = 0x4208,
+ .delay_irq = 0x420c,
+ .irq_status = 0x4220,
+ .irq_mask = 0x4228,
+ .adma_rx_dbg0 = 0x4238,
+ .int_grp = 0x4250,
},
.qdma = {
.qtx_cfg = 0x4400,
@@ -144,7 +147,10 @@ static const struct mtk_reg_map mt7986_reg_map = {
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
- .gdma_to_ppe = 0x3333,
+ .gdma_to_ppe = {
+ [0] = 0x3333,
+ [1] = 0x4444,
+ },
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
@@ -192,7 +198,11 @@ static const struct mtk_reg_map mt7988_reg_map = {
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
- .gdma_to_ppe = 0x3333,
+ .gdma_to_ppe = {
+ [0] = 0x3333,
+ [1] = 0x4444,
+ [2] = 0xcccc,
+ },
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
@@ -260,12 +270,8 @@ static const char * const mtk_clks_source_name[] = {
"ethwarp_wocpu2",
"ethwarp_wocpu1",
"ethwarp_wocpu0",
- "top_usxgmii0_sel",
- "top_usxgmii1_sel",
"top_sgm0_sel",
"top_sgm1_sel",
- "top_xfi_phy0_xtal_sel",
- "top_xfi_phy1_xtal_sel",
"top_eth_gmii_sel",
"top_eth_refck_50m_sel",
"top_eth_sys_200m_sel",
@@ -498,7 +504,7 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
static void mtk_setup_bridge_switch(struct mtk_eth *eth)
{
/* Force Port1 XGMAC Link Up */
- mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
+ mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
MTK_XGMAC_STS(MTK_GMAC1_ID));
/* Adjust GSW bridge IPG to 11 */
@@ -527,6 +533,26 @@ static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
return NULL;
}
+static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ if (mtk_interface_mode_is_xgmii(eth, iface) &&
+ mac->id != MTK_GMAC1_ID) {
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
+ XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
+ MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
+ }
+
+ return 0;
+}
+
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -568,6 +594,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
break;
case PHY_INTERFACE_MODE_INTERNAL:
+ if (mac->id == MTK_GMAC2_ID &&
+ MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
+ err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
break;
default:
goto err_phy;
@@ -639,12 +671,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
/* Setup gmac */
- if (mtk_is_netsys_v3_or_greater(eth) &&
- mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
- mtk_setup_bridge_switch(eth);
+ if (mac->id == MTK_GMAC1_ID)
+ mtk_setup_bridge_switch(eth);
}
return;
@@ -691,10 +723,19 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
- mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
+ /* GMAC modes */
+ mtk_m32(mac->hw,
+ MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
+ MTK_MAC_MCR(mac->id));
+ } else if (mac->id != MTK_GMAC1_ID) {
+ /* XGMAC except for built-in switch */
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
+ MTK_XMAC_MCR(mac->id));
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
+ MTK_XGMAC_STS(mac->id));
+ }
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
@@ -725,7 +766,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
@@ -748,13 +789,13 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
val |= MTK_QTX_SCH_MAX_RATE_EN |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
break;
default:
@@ -766,13 +807,12 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
-static void mtk_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex, bool tx_pause, bool rx_pause)
+static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
@@ -806,32 +846,145 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ u32 mcr;
+
+ if (mac->id == MTK_GMAC1_ID)
+ return;
+
+ /* Eliminate the interference(before link-up) caused by PHY noise */
+ mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
+ mdelay(20);
+ mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
+ MTK_XMAC_CNT_CTRL(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
+
+ mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
+ mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
+ XMAC_MCR_TRX_DISABLE);
+ /* Configure pause modes -
+ * phylink will avoid these for half duplex
+ */
+ if (tx_pause)
+ mcr |= XMAC_MCR_FORCE_TX_FC;
+ if (rx_pause)
+ mcr |= XMAC_MCR_FORCE_RX_FC;
+
+ mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
+}
+
+static void mtk_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+
+ if (mtk_interface_mode_is_xgmii(mac->hw, interface))
+ mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+ else
+ mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
+}
+
+static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 val;
+
+ if (mtk_interface_mode_is_xgmii(eth, mac->interface))
+ return -EOPNOTSUPP;
+
+ /* Tx idle timer in ms */
+ timer = DIV_ROUND_UP(timer, 1000);
+
+ /* If the timer is zero, then set LPI_MODE, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = MAC_EEE_LPI_MODE;
+ else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
+ val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
+ else
+ val = MAC_EEE_LPI_TXIDLE_THD;
+
+ if (tx_clk_stop)
+ val |= MAC_EEE_CKG_TXIDLE;
+
+ /* PHY Wake-up time, this field does not have a reset value, so use the
+ * reset value from MT7531 (36us for 100M and 17us for 1000M).
+ */
+ val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
+ FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
+
+ mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
+ mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static const struct phylink_mac_ops mtk_phylink_ops = {
+ .mac_prepare = mtk_mac_prepare,
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
+ .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
};
+static void mtk_mdio_config(struct mtk_eth *eth)
+{
+ u32 val;
+
+ /* Configure MDC Divider */
+ val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
+
+ /* Configure MDC Turbo Mode */
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
+ else
+ val |= PPSC_MDC_TURBO;
+
+ mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
+}
+
static int mtk_mdio_init(struct mtk_eth *eth)
{
- unsigned int max_clk = 2500000, divider;
+ unsigned int max_clk = 2500000;
struct device_node *mii_np;
int ret;
u32 val;
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
dev_err(eth->dev, "no %s child node found", "mdio-bus");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
ret = -ENOMEM;
@@ -856,20 +1009,9 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
max_clk = val;
}
- divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
-
- /* Configure MDC Turbo Mode */
- if (mtk_is_netsys_v3_or_greater(eth))
- mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
-
- /* Configure MDC Divider */
- val = FIELD_PREP(PPSC_MDC_CFG, divider);
- if (!mtk_is_netsys_v3_or_greater(eth))
- val |= PPSC_MDC_TURBO;
- mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
-
- dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
-
+ eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+ mtk_mdio_config(eth);
+ dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
@@ -1107,7 +1249,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
@@ -1126,56 +1268,85 @@ static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
return (void *)data;
}
+static void *mtk_dma_ring_alloc(struct mtk_eth *eth, size_t size,
+ dma_addr_t *dma_handle, bool use_sram)
+{
+ void *dma_ring;
+
+ if (use_sram && eth->sram_pool) {
+ dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size);
+ if (!dma_ring)
+ return dma_ring;
+ *dma_handle = gen_pool_virt_to_phys(eth->sram_pool,
+ (unsigned long)dma_ring);
+ } else {
+ dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle,
+ GFP_KERNEL);
+ }
+
+ return dma_ring;
+}
+
+static void mtk_dma_ring_free(struct mtk_eth *eth, size_t size, void *dma_ring,
+ dma_addr_t dma_handle, bool in_sram)
+{
+ if (in_sram && eth->sram_pool)
+ gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size);
+ else
+ dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle);
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
- int cnt = MTK_QDMA_RING_SIZE;
+ int cnt = soc->tx.fq_dma_size;
dma_addr_t dma_addr;
- int i;
+ int i, j, len;
+
+ eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size,
+ &eth->phy_scratch_ring, true);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
- eth->scratch_ring = eth->sram_base;
- else
- eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * soc->txrx.txd_size,
- &eth->phy_scratch_ring,
- GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- if (unlikely(!eth->scratch_head))
- return -ENOMEM;
+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
- dma_addr = dma_map_single(eth->dma_dev,
- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
+ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
+ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
+ if (unlikely(!eth->scratch_head[j]))
+ return -ENOMEM;
- for (i = 0; i < cnt; i++) {
- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
- struct mtk_tx_dma_v2 *txd;
+ dma_addr = dma_map_single(eth->dma_dev,
+ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
- txd = eth->scratch_ring + i * soc->txrx.txd_size;
- txd->txd1 = addr;
- if (i < cnt - 1)
- txd->txd2 = eth->phy_scratch_ring +
- (i + 1) * soc->txrx.txd_size;
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
- txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
- txd->txd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
- txd->txd5 = 0;
- txd->txd6 = 0;
- txd->txd7 = 0;
- txd->txd8 = 0;
+ for (i = 0; i < len; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
+ txd->txd2 = eth->phy_scratch_ring +
+ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
+
+ txd->txd4 = 0;
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
}
@@ -1416,7 +1587,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free)
return -ENOMEM;
- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
@@ -1457,7 +1628,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
- soc->txrx.dma_max_len);
+ soc->tx.dma_max_len);
txd_info.qid = queue;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
@@ -1470,7 +1641,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, txd, &txd_info);
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
@@ -1513,7 +1684,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
} else {
int next_idx;
- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1522,7 +1693,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
/* unmap dma */
mtk_tx_unmap(eth, tx_buf, NULL, false);
@@ -1547,7 +1718,7 @@ static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- eth->soc->txrx.dma_max_len);
+ eth->soc->tx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1590,6 +1761,13 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
bool gso = false;
int tx_num;
+ if (skb_vlan_tag_present(skb) &&
+ !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) {
+ skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ goto dropped;
+ }
+
/* normally we can rely on the stack not calling this more than once,
* however we have 2 queues running on the same ring so we need to lock
* the ring access
@@ -1635,8 +1813,9 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
spin_unlock(&eth->page_lock);
- stats->tx_dropped++;
dev_kfree_skb_any(skb);
+dropped:
+ stats->tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1654,7 +1833,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
@@ -1710,7 +1889,7 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
- err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -1822,7 +2001,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
}
htxd = txd;
- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
htx_buf = tx_buf;
@@ -1841,7 +2020,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
goto unmap;
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
n_desc++;
}
@@ -1879,7 +2058,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
} else {
int idx;
- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
@@ -1890,7 +2069,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
unmap:
while (htxd != txd) {
- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
@@ -2009,6 +2188,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
dma_addr_t dma_addr = DMA_MAPPING_ERROR;
+ int ppe_idx = 0;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
@@ -2021,14 +2201,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
switch (val) {
@@ -2052,6 +2232,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
netdev = eth->netdev[mac];
+ ppe_idx = eth->mac[mac]->ppe_idx;
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
@@ -2062,7 +2243,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (ring->page_pool) {
struct page *page = virt_to_head_page(data);
struct xdp_buff xdp;
- u32 ret;
+ u32 ret, metasize;
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
@@ -2078,7 +2259,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
- false);
+ true);
xdp_buff_clear_frags_flag(&xdp);
ret = mtk_xdp_run(eth, ring, &xdp, netdev);
@@ -2098,6 +2279,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
@@ -2140,7 +2324,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
@@ -2156,7 +2340,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxdcsum = &trxd.rxd4;
}
- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -2175,7 +2359,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
}
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+ mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
@@ -2184,14 +2368,18 @@ skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+ addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
+ rxd->rxd2);
+ else
+ addr64 = RX_DMA_PREP_ADDR64(dma_addr);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
- likely(dma_addr != DMA_MAPPING_ERROR))
- rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
ring->calc_idx = idx;
done++;
@@ -2210,7 +2398,7 @@ rx_done:
eth->rx_bytes += bytes;
dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
&dim_sample);
- net_dim(&eth->rx_dim, dim_sample);
+ net_dim(&eth->rx_dim, &dim_sample);
if (xdp_flush)
xdp_do_flush();
@@ -2280,7 +2468,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
break;
tx_buf = mtk_desc_to_tx_buf(ring, desc,
- eth->soc->txrx.txd_size);
+ eth->soc->tx.desc_size);
if (!tx_buf->data)
break;
@@ -2331,7 +2519,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
+ desc = ring->dma + cpu * eth->soc->tx.desc_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -2360,7 +2548,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
&dim_sample);
- net_dim(&eth->tx_dim, dim_sample);
+ net_dim(&eth->tx_dim, &dim_sample);
if (mtk_queue_stopped(eth) &&
(atomic_read(&ring->free_count) > ring->thresh))
@@ -2421,7 +2609,7 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ mtk_w32(eth, eth->soc->rx.irq_done_mask,
reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
@@ -2437,10 +2625,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
return budget;
} while (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask);
+ eth->soc->rx.irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
return rx_done_total;
}
@@ -2449,7 +2637,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = soc->txrx.txd_size;
+ int i, sz = soc->tx.desc_size;
struct mtk_tx_dma_v2 *txd;
int ring_size;
u32 ofs, val;
@@ -2457,21 +2645,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
ring_size = MTK_QDMA_RING_SIZE;
else
- ring_size = MTK_DMA_SIZE;
+ ring_size = soc->tx.dma_size;
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL);
if (!ring->buf)
goto no_tx_mem;
- if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
- ring->dma = eth->sram_base + ring_size * sz;
- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
- } else {
- ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
- &ring->phys, GFP_KERNEL);
- }
-
+ ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true);
if (!ring->dma)
goto no_tx_mem;
@@ -2570,16 +2751,16 @@ static void mtk_tx_clean(struct mtk_eth *eth)
kfree(ring->buf);
ring->buf = NULL;
}
- if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
- dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
- ring->dma, ring->phys);
+
+ if (ring->dma) {
+ mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size,
+ ring->dma, ring->phys, true);
ring->dma = NULL;
}
if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
@@ -2588,15 +2769,11 @@ static void mtk_tx_clean(struct mtk_eth *eth)
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_rx_ring *ring;
- int rx_data_len, rx_dma_size, tx_ring_size;
+ int rx_data_len, rx_dma_size;
int i;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- tx_ring_size = MTK_QDMA_RING_SIZE;
- else
- tx_ring_size = MTK_DMA_SIZE;
-
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
@@ -2610,7 +2787,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
} else {
rx_data_len = ETH_DATA_LEN;
- rx_dma_size = MTK_DMA_SIZE;
+ rx_dma_size = soc->rx.dma_size;
}
ring->frag_size = mtk_max_frag_size(rx_data_len);
@@ -2631,20 +2808,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
ring->page_pool = pp;
}
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
- rx_flag != MTK_RX_FLAGS_NORMAL) {
- ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * eth->soc->txrx.rxd_size,
- &ring->phys, GFP_KERNEL);
- } else {
- struct mtk_tx_ring *tx_ring = &eth->tx_ring;
-
- ring->dma = tx_ring->dma + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
- ring->phys = tx_ring->phys + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
- }
-
+ ring->dma = mtk_dma_ring_alloc(eth,
+ rx_dma_size * eth->soc->rx.desc_size,
+ &ring->phys,
+ rx_flag == MTK_RX_FLAGS_NORMAL);
if (!ring->dma)
return -ENOMEM;
@@ -2653,7 +2820,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
dma_addr_t dma_addr;
void *data;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (ring->page_pool) {
data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr, GFP_KERNEL);
@@ -2690,7 +2857,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd3 = 0;
rxd->rxd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
@@ -2744,7 +2911,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!ring->data[i])
continue;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (!rxd->rxd1)
continue;
@@ -2759,10 +2926,9 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
ring->data = NULL;
}
- if (!in_sram && ring->dma) {
- dma_free_coherent(eth->dma_dev,
- ring->dma_size * eth->soc->txrx.rxd_size,
- ring->dma, ring->phys);
+ if (ring->dma) {
+ mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size,
+ ring->dma, ring->phys, in_sram);
ring->dma = NULL;
}
@@ -3117,20 +3283,29 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
- int i;
+ int i, j, txqs = 1;
- for (i = 0; i < MTK_MAX_DEVS; i++)
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
- if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
- dma_free_coherent(eth->dma_dev,
- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
- eth->scratch_ring, eth->phy_scratch_ring);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ txqs = MTK_QDMA_NUM_QUEUES;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ for (j = 0; j < txqs; j++)
+ netdev_tx_reset_subqueue(eth->netdev[i], j);
+ }
+
+ if (eth->scratch_ring) {
+ mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size,
+ eth->scratch_ring, eth->phy_scratch_ring,
+ true);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
+
mtk_tx_clean(eth);
- mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
+ mtk_rx_clean(eth, &eth->rx_ring[0], true);
mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
if (eth->hwlro) {
@@ -3139,7 +3314,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
mtk_rx_clean(eth, &eth->rx_ring[i], false);
}
- kfree(eth->scratch_head);
+ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
+ kfree(eth->scratch_head[i]);
+ eth->scratch_head[i] = NULL;
+ }
}
static bool mtk_hw_reset_check(struct mtk_eth *eth)
@@ -3168,13 +3346,60 @@ static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&eth->pending_work);
}
+static int mtk_get_irqs(struct platform_device *pdev, struct mtk_eth *eth)
+{
+ int i;
+
+ /* future SoCs beginning with MT7988 should use named IRQs in dts */
+ eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname_optional(pdev, "fe1");
+ eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname_optional(pdev, "fe2");
+ if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0)
+ return 0;
+
+ /* only use legacy mode if platform_get_irq_byname_optional returned -ENXIO */
+ if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO)
+ return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_TX],
+ "Error requesting FE TX IRQ\n");
+
+ if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO)
+ return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_RX],
+ "Error requesting FE RX IRQ\n");
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT))
+ dev_warn(&pdev->dev, "legacy DT: missing interrupt-names.");
+
+ /* legacy way:
+ * On MTK_SHARED_INT SoCs (MT7621 + MT7628) the first IRQ is taken
+ * from devicetree and used for both RX and TX - it is shared.
+ * On SoCs with non-shared IRQs the first entry is not used,
+ * the second is for TX, and the third is for RX.
+ */
+ for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+ if (i == MTK_FE_IRQ_SHARED)
+ eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i);
+ else
+ eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED];
+ } else {
+ eth->irq[i] = platform_get_irq(pdev, i + 1);
+ }
+
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
__napi_schedule(&eth->rx_napi);
}
@@ -3200,9 +3425,9 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth)
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
if (mtk_r32(eth, reg_map->pdma.irq_mask) &
- eth->soc->txrx.rx_irq_done_mask) {
+ eth->soc->rx.irq_done_mask) {
if (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask)
+ eth->soc->rx.irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
@@ -3220,10 +3445,10 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
- mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
}
#endif
@@ -3248,7 +3473,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
if (mtk_is_netsys_v2_or_greater(eth))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
- MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
+ MTK_CHK_DDONE_EN;
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
@@ -3266,37 +3491,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
return 0;
}
-static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
{
- int i;
+ u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
- for (i = 0; i < MTK_MAX_DEVS; i++) {
- u32 val;
-
- if (!eth->netdev[i])
- continue;
-
- val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+ val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
- /* default setup the forward port to send frame to PDMA */
- val &= ~0xffff;
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
- /* Enable RX checksum */
- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
- val |= config;
+ val |= config;
- if (netdev_uses_dsa(eth->netdev[i]))
- val |= MTK_GDMA_SPECIAL_TAG;
+ if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
+ val |= MTK_GDMA_SPECIAL_TAG;
- mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
- }
- /* Reset and enable PSE */
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
}
@@ -3356,7 +3571,10 @@ static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- int i, err;
+ struct mtk_mac *target_mac;
+ int i, err, ppe_num;
+
+ ppe_num = eth->soc->ppe_num;
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
@@ -3380,18 +3598,35 @@ static int mtk_open(struct net_device *dev)
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_start(eth->ppe[i]);
- gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
- : MTK_GDMA_TO_PDMA;
- mtk_gdm_config(eth, gdm_config);
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ target_mac = netdev_priv(eth->netdev[i]);
+ if (!soc->offload_version) {
+ target_mac->ppe_idx = 0;
+ gdm_config = MTK_GDMA_TO_PDMA;
+ } else if (ppe_num >= 3 && target_mac->id == 2) {
+ target_mac->ppe_idx = 2;
+ gdm_config = soc->reg_map->gdma_to_ppe[2];
+ } else if (ppe_num >= 2 && target_mac->id == 1) {
+ target_mac->ppe_idx = 1;
+ gdm_config = soc->reg_map->gdma_to_ppe[1];
+ } else {
+ target_mac->ppe_idx = 0;
+ gdm_config = soc->reg_map->gdma_to_ppe[0];
+ }
+ mtk_gdm_config(eth, target_mac->id, gdm_config);
+ }
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
- }
- else
+ } else {
refcount_inc(&eth->dma_refcnt);
+ }
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
@@ -3468,10 +3703,11 @@ static int mtk_stop(struct net_device *dev)
if (!refcount_dec_and_test(&eth->dma_refcnt))
return 0;
- mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+ for (i = 0; i < MTK_MAX_DEVS; i++)
+ mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -3893,7 +4129,11 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ /* No MT7628/88 support yet */
+ if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ mtk_mdio_config(eth);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3947,17 +4187,33 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
- /* PSE should not drop port1, port8 and port9 packets */
- mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+ /* PSE dummy page mechanism */
+ mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
+ PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
+
+ /* PSE free buffer drop threshold */
+ mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
+
+ /* PSE should not drop port8, port9 and port13 packets from
+ * WDMA Tx
+ */
+ mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
+ * ring full
+ */
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
/* GDM and CDM Threshold */
- mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */
@@ -3974,7 +4230,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
- mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+ mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
@@ -4055,7 +4311,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
}
mtk_set_mcr_max_rx(mac, length);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -4294,10 +4550,8 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
case ETH_SS_STATS: {
struct mtk_mac *mac = netdev_priv(dev);
- for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
- memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
+ ethtool_puts(&data, mtk_ethtool_stats[i].str);
if (mtk_page_pool_enabled(mac->hw))
page_pool_ethtool_stats_get_strings(data);
break;
@@ -4427,6 +4681,34 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(mac->phylink, pause);
+}
+
+static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(mac->phylink, pause);
+}
+
+static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_get_eee(mac->phylink, eee);
+}
+
+static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_eee(mac->phylink, eee);
+}
+
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4455,8 +4737,12 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_pauseparam = mtk_get_pauseparam,
+ .set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
- .set_rxnfc = mtk_set_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
+ .get_eee = mtk_get_eee,
+ .set_eee = mtk_set_eee,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -4566,6 +4852,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.type = PHYLINK_NETDEV;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+ mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
+ MAC_2500FD;
+ mac->phylink_config.lpi_timer_default = 1000;
/* MT7623 gmac0 is now missing its speed-specific PLL configuration
* in its .mac_config method (since state->speed is not valid there.
@@ -4604,7 +4893,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
- MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
id == MTK_GMAC1_ID) {
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE |
@@ -4624,6 +4913,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink = phylink;
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
+ id == MTK_GMAC2_ID)
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ mac->phylink_config.supported_interfaces);
+
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
@@ -4638,7 +4932,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
- eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED];
eth->netdev[id]->dev.of_node = np;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
@@ -4681,7 +4975,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
list_add_tail(&dev->close_list, &dev_list);
}
- dev_close_many(&dev_list, false);
+ netif_close_many(&dev_list, false);
eth->dma_dev = dma_dev;
@@ -4723,9 +5017,30 @@ static int mtk_sgmii_init(struct mtk_eth *eth)
return 0;
}
+static int mtk_setup_legacy_sram(struct mtk_eth *eth, struct resource *res)
+{
+ dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n");
+
+ if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 >
+ res->end)
+ return -EINVAL;
+
+ eth->sram_pool = devm_gen_pool_create(eth->dev,
+ const_ilog2(MTK_ETH_SRAM_GRANULARITY),
+ NUMA_NO_NODE, dev_name(eth->dev));
+
+ if (IS_ERR(eth->sram_pool))
+ return PTR_ERR(eth->sram_pool);
+
+ return gen_pool_add_virt(eth->sram_pool,
+ (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET,
+ res->start + MTK_ETH_SRAM_OFFSET,
+ MTK_ETH_NETSYS_V2_SRAM_SIZE, NUMA_NO_NODE);
+}
+
static int mtk_probe(struct platform_device *pdev)
{
- struct resource *res = NULL, *res_sram;
+ struct resource *res = NULL;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -4745,20 +5060,6 @@ static int mtk_probe(struct platform_device *pdev)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
- /* SRAM is actual memory and supports transparent access just like DRAM.
- * Hence we don't require __iomem being set and don't need to use accessor
- * functions to read from or write to SRAM.
- */
- if (mtk_is_netsys_v3_or_greater(eth)) {
- eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(eth->sram_base))
- return PTR_ERR(eth->sram_base);
- } else {
- eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
- }
- }
-
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
if (!err)
@@ -4833,16 +5134,21 @@ static int mtk_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_destroy_sgmii;
}
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
- if (mtk_is_netsys_v3_or_greater(eth)) {
- res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_sram) {
+ eth->sram_pool = of_gen_pool_get(pdev->dev.of_node,
+ "sram", 0);
+ if (!eth->sram_pool) {
+ if (!mtk_is_netsys_v3_or_greater(eth)) {
+ err = mtk_setup_legacy_sram(eth, res);
+ if (err)
+ goto err_destroy_sgmii;
+ } else {
+ dev_err(&pdev->dev,
+ "Could not get SRAM pool\n");
err = -EINVAL;
goto err_destroy_sgmii;
}
- eth->phy_scratch_ring = res_sram->start;
- } else {
- eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
}
}
}
@@ -4868,17 +5174,10 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < 3; i++) {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
- eth->irq[i] = eth->irq[0];
- else
- eth->irq[i] = platform_get_irq(pdev, i);
- if (eth->irq[i] < 0) {
- dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- err = -ENXIO;
- goto err_wed_exit;
- }
- }
+ err = mtk_get_irqs(pdev, eth);
+ if (err)
+ goto err_wed_exit;
+
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
@@ -4922,17 +5221,17 @@ static int mtk_probe(struct platform_device *pdev)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
- err = devm_request_irq(eth->dev, eth->irq[0],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED],
mtk_handle_irq, 0,
dev_name(eth->dev), eth);
} else {
- err = devm_request_irq(eth->dev, eth->irq[1],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX],
mtk_handle_irq_tx, 0,
dev_name(eth->dev), eth);
if (err)
goto err_free_dev;
- err = devm_request_irq(eth->dev, eth->irq[2],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX],
mtk_handle_irq_rx, 0,
dev_name(eth->dev), eth);
}
@@ -4947,23 +5246,24 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
+ u8 ppe_num = eth->soc->ppe_num;
- num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
- for (i = 0; i < num_ppe; i++) {
- u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+ ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
+ for (i = 0; i < ppe_num; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base;
+ ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
if (!eth->ppe[i]) {
err = -ENOMEM;
goto err_deinit_ppe;
}
- }
+ err = mtk_eth_offload_init(eth, i);
- err = mtk_eth_offload_init(eth);
- if (err)
- goto err_deinit_ppe;
+ if (err)
+ goto err_deinit_ppe;
+ }
}
for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -4977,15 +5277,20 @@ static int mtk_probe(struct platform_device *pdev)
} else
netif_info(eth, probe, eth->netdev[i],
"mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[i]->base_addr, eth->irq[0]);
+ eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]);
}
/* we run 2 devices on the same DMA ring so we need a dummy device
* for NAPI to work
*/
- init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
+ eth->dummy_dev = alloc_netdev_dummy(0);
+ if (!eth->dummy_dev) {
+ err = -ENOMEM;
+ dev_err(eth->dev, "failed to allocated dummy device\n");
+ goto err_unreg_netdev;
+ }
+ netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
schedule_delayed_work(&eth->reset.monitor_work,
@@ -4993,6 +5298,8 @@ static int mtk_probe(struct platform_device *pdev)
return 0;
+err_unreg_netdev:
+ mtk_unreg_dev(eth);
err_deinit_ppe:
mtk_ppe_deinit(eth);
mtk_mdio_cleanup(eth);
@@ -5029,6 +5336,7 @@ static void mtk_remove(struct platform_device *pdev)
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
+ free_netdev(eth->dummy_dev);
mtk_mdio_cleanup(eth);
}
@@ -5039,11 +5347,18 @@ static const struct mtk_soc_data mt2701_data = {
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5057,13 +5372,21 @@ static const struct mtk_soc_data mt7621_data = {
.required_pctl = false,
.version = 1,
.offload_version = 1,
+ .ppe_num = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5078,14 +5401,22 @@ static const struct mtk_soc_data mt7622_data = {
.required_pctl = false,
.version = 1,
.offload_version = 2,
+ .ppe_num = 1,
.hash_offset = 2,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5099,14 +5430,22 @@ static const struct mtk_soc_data mt7623_data = {
.required_pctl = true,
.version = 1,
.offload_version = 1,
+ .ppe_num = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5121,11 +5460,18 @@ static const struct mtk_soc_data mt7629_data = {
.required_pctl = false,
.has_accounting = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5140,16 +5486,24 @@ static const struct mtk_soc_data mt7981_data = {
.required_pctl = false,
.version = 2,
.offload_version = 2,
+ .ppe_num = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5162,16 +5516,24 @@ static const struct mtk_soc_data mt7986_data = {
.required_pctl = false,
.version = 2,
.offload_version = 2,
+ .ppe_num = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5184,16 +5546,24 @@ static const struct mtk_soc_data mt7988_data = {
.required_pctl = false,
.version = 3,
.offload_version = 2,
+ .ppe_num = 3,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(4K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma_v2),
+ .irq_done_mask = MTK_RX_DONE_INT_V2,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5204,13 +5574,19 @@ static const struct mtk_soc_data rt5350_data = {
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5230,7 +5606,7 @@ MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
- .remove_new = mtk_remove,
+ .remove = mtk_remove,
.driver = {
.name = "mtk_soc_eth",
.of_match_table = of_mtk_match,
@@ -5242,3 +5618,4 @@ module_platform_driver(mtk_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 9ae3b8a71d0e..0168e2fbc619 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -32,7 +32,9 @@
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
-#define MTK_DMA_SIZE 512
+#define MTK_DMA_SIZE(x) (SZ_##x)
+#define MTK_FQ_DMA_HEAD 32
+#define MTK_FQ_DMA_LENGTH 2048
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@@ -139,8 +141,10 @@
#define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
0x54C : 0x50C + (_x * 0x1000); })
-/* Internal SRAM offset */
-#define MTK_ETH_SRAM_OFFSET 0x40000
+/* legacy DT support for internal SRAM */
+#define MTK_ETH_SRAM_OFFSET 0x40000
+#define MTK_ETH_SRAM_GRANULARITY 32
+#define MTK_ETH_NETSYS_V2_SRAM_SIZE 0x40000
/* FE global misc reg*/
#define MTK_FE_GLO_MISC 0x124
@@ -149,7 +153,15 @@
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_DROP_CFG 0x108
-#define PSE_PPE0_DROP 0x110
+#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
+
+/* PSE Last FreeQ Page Request Control */
+#define PSE_DUMY_REQ 0x10C
+/* PSE_DUMY_REQ is not a typo but actually called like that also in
+ * MediaTek's datasheet
+ */
+#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
+#define DUMMY_PAGE_THR 0x1
/* PSE Input Queue Reservation Register*/
#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
@@ -327,8 +339,8 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_PQID GENMASK(3, 0)
#define TX_DMA_ADDR64_MASK GENMASK(3, 0)
@@ -348,8 +360,8 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
#define RX_DMA_ADDR64_MASK GENMASK(3, 0)
#if IS_ENABLED(CONFIG_64BIT)
@@ -421,7 +433,8 @@
/* XMAC status registers */
#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
-#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_MODE(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(27) : BIT(11))
#define MTK_USXGMII_PCS_LINK BIT(8)
#define MTK_XGMAC_RX_FC BIT(5)
#define MTK_XGMAC_TX_FC BIT(4)
@@ -451,6 +464,8 @@
#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_EEE1G BIT(7)
+#define MAC_MCR_EEE100M BIT(6)
#define MAC_MCR_FORCE_RX_FC BIT(5)
#define MAC_MCR_FORCE_TX_FC BIT(4)
#define MAC_MCR_SPEED_1000 BIT(3)
@@ -459,6 +474,15 @@
#define MAC_MCR_FORCE_LINK BIT(0)
#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+/* Mac EEE control registers */
+#define MTK_MAC_EEECR(x) (0x10104 + (x * 0x100))
+#define MAC_EEE_WAKEUP_TIME_1000 GENMASK(31, 24)
+#define MAC_EEE_WAKEUP_TIME_100 GENMASK(23, 16)
+#define MAC_EEE_LPI_TXIDLE_THD GENMASK(15, 8)
+#define MAC_EEE_CKG_TXIDLE BIT(3)
+#define MAC_EEE_CKG_RXLPI BIT(2)
+#define MAC_EEE_LPI_MODE BIT(0)
+
/* Mac status registers */
#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
#define MAC_MSR_EEE1G BIT(7)
@@ -503,6 +527,21 @@
#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
#define INTF_MODE_RGMII_10_100 0
+/* XFI Mac control registers */
+#define MTK_XMAC_BASE(x) (0x12000 + (((x) - 1) * 0x1000))
+#define MTK_XMAC_MCR(x) (MTK_XMAC_BASE(x))
+#define XMAC_MCR_TRX_DISABLE 0xf
+#define XMAC_MCR_FORCE_TX_FC BIT(5)
+#define XMAC_MCR_FORCE_RX_FC BIT(4)
+
+/* XFI Mac logic reset registers */
+#define MTK_XMAC_LOGIC_RST(x) (MTK_XMAC_BASE(x) + 0x10)
+#define XMAC_LOGIC_RST BIT(0)
+
+/* XFI Mac count global control */
+#define MTK_XMAC_CNT_CTRL(x) (MTK_XMAC_BASE(x) + 0x100)
+#define XMAC_GLB_CNTCLR BIT(0)
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -566,6 +605,10 @@
#define GEPHY_MAC_SEL BIT(1)
/* Top misc registers */
+#define TOP_MISC_NETSYS_PCS_MUX 0x0
+#define NETSYS_PCS_MUX_MASK GENMASK(1, 0)
+#define MUX_G2_USXGMII_SEL BIT(1)
+
#define USB_PHY_SWITCH_REG 0x218
#define QPHY_SEL_MASK GENMASK(1, 0)
#define SGMII_QPHY_SEL 0x2
@@ -601,6 +644,11 @@
#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+#define MTK_FE_IRQ_SHARED 0
+#define MTK_FE_IRQ_TX 0
+#define MTK_FE_IRQ_RX 1
+#define MTK_FE_IRQ_NUM (MTK_FE_IRQ_RX + 1)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -722,12 +770,8 @@ enum mtk_clks_map {
MTK_CLK_ETHWARP_WOCPU2,
MTK_CLK_ETHWARP_WOCPU1,
MTK_CLK_ETHWARP_WOCPU0,
- MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
- MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
MTK_CLK_TOP_SGM_0_SEL,
MTK_CLK_TOP_SGM_1_SEL,
- MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
- MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
MTK_CLK_TOP_ETH_GMII_SEL,
MTK_CLK_TOP_ETH_REFCK_50M_SEL,
MTK_CLK_TOP_ETH_SYS_200M_SEL,
@@ -798,19 +842,9 @@ enum mtk_clks_map {
BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
BIT_ULL(MTK_CLK_CRYPTO) | \
- BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
- BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
- BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
- BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
- BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
- BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
- BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
- BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
- BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
- BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
@@ -944,6 +978,7 @@ enum mkt_eth_capabilities {
MTK_RGMII_BIT = 0,
MTK_TRGMII_BIT,
MTK_SGMII_BIT,
+ MTK_2P5GPHY_BIT,
MTK_ESW_BIT,
MTK_GEPHY_BIT,
MTK_MUX_BIT,
@@ -964,6 +999,7 @@ enum mkt_eth_capabilities {
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT,
MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
@@ -973,6 +1009,7 @@ enum mkt_eth_capabilities {
MTK_ETH_PATH_GMAC1_SGMII_BIT,
MTK_ETH_PATH_GMAC2_RGMII_BIT,
MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_2P5GPHY_BIT,
MTK_ETH_PATH_GMAC2_GEPHY_BIT,
MTK_ETH_PATH_GDM1_ESW_BIT,
};
@@ -981,6 +1018,7 @@ enum mkt_eth_capabilities {
#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_2P5GPHY BIT_ULL(MTK_2P5GPHY_BIT)
#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
@@ -1003,6 +1041,8 @@ enum mkt_eth_capabilities {
BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC2_TO_2P5GPHY \
+ BIT_ULL(MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
@@ -1014,6 +1054,7 @@ enum mkt_eth_capabilities {
#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_2P5GPHY BIT_ULL(MTK_ETH_PATH_GMAC2_2P5GPHY_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
@@ -1023,6 +1064,7 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GMAC2_2P5GPHY (MTK_ETH_PATH_GMAC2_2P5GPHY | MTK_2P5GPHY)
#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
/* MUXes present on SoCs */
@@ -1042,6 +1084,10 @@ enum mkt_eth_capabilities {
(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
MTK_SHARED_SGMII)
+/* 2: GMAC2 -> 2P5GPHY */
+#define MTK_MUX_GMAC2_TO_2P5GPHY \
+ (MTK_ETH_MUX_GMAC2_TO_2P5GPHY | MTK_MUX | MTK_INFRA)
+
/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
@@ -1077,8 +1123,9 @@ enum mkt_eth_capabilities {
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_RSTCTRL_PPE1 | MTK_SRAM)
-#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \
- MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM)
+#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_GMAC2_2P5GPHY | \
+ MTK_MUX_GMAC2_TO_2P5GPHY | MTK_QDMA | MTK_RSTCTRL_PPE1 | \
+ MTK_RSTCTRL_PPE2 | MTK_SRAM)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
@@ -1130,7 +1177,7 @@ struct mtk_reg_map {
u32 tx_sch_rate; /* tx scheduler rate control registers */
} qdma;
u32 gdm1_cnt;
- u32 gdma_to_ppe;
+ u32 gdma_to_ppe[3];
u32 ppe_base;
u32 wdma_base[3];
u32 pse_iq_sta;
@@ -1138,7 +1185,7 @@ struct mtk_reg_map {
};
/* struct mtk_eth_data - This is the structure holding all differences
- * among various plaforms
+ * among various platforms
* @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
@@ -1153,10 +1200,9 @@ struct mtk_reg_map {
* @foe_entry_size Foe table entry size.
* @has_accounting Bool indicating support for accounting of
* offloaded flows.
- * @txd_size Tx DMA descriptor size.
- * @rxd_size Rx DMA descriptor size.
- * @rx_irq_done_mask Rx irq done register mask.
- * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @desc_size Tx/Rx DMA descriptor size.
+ * @irq_done_mask Rx irq done register mask.
+ * @dma_l4_valid Rx DMA valid register mask.
* @dma_max_len Max DMA tx/rx buffer length.
* @dma_len_offset Tx/Rx DMA length field offset.
*/
@@ -1169,18 +1215,26 @@ struct mtk_soc_data {
u8 offload_version;
u8 hash_offset;
u8 version;
+ u8 ppe_num;
u16 foe_entry_size;
netdev_features_t hw_features;
bool has_accounting;
bool disable_pll_modes;
struct {
- u32 txd_size;
- u32 rxd_size;
- u32 rx_irq_done_mask;
- u32 rx_dma_l4_valid;
+ u32 desc_size;
u32 dma_max_len;
u32 dma_len_offset;
- } txrx;
+ u32 dma_size;
+ u32 fq_dma_size;
+ } tx;
+ struct {
+ u32 desc_size;
+ u32 irq_done_mask;
+ u32 dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ u32 dma_size;
+ } rx;
};
#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
@@ -1191,8 +1245,9 @@ struct mtk_soc_data {
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
- * @dev: The device pointer used for dma mapping/alloc
+ * @dma_dev: The device pointer used for dma mapping/alloc
* @base: The mapped register i/o base
+ * @sram_pool: Pointer to SRAM pool used for DMA descriptor rings
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
* @rx_irq__lock: Make sure that IRQ register operations are atomic
@@ -1231,21 +1286,21 @@ struct mtk_soc_data {
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
* @state: Initialization and runtime state of the device
- * @soc: Holding specific data among vaious SoCs
+ * @soc: Holding specific data among various SoCs
*/
struct mtk_eth {
struct device *dev;
struct device *dma_dev;
void __iomem *base;
- void *sram_base;
+ struct gen_pool *sram_pool;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
spinlock_t rx_irq_lock;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
- int irq[3];
+ int irq[MTK_FE_IRQ_NUM];
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
@@ -1261,10 +1316,11 @@ struct mtk_eth {
struct napi_struct rx_napi;
void *scratch_ring;
dma_addr_t phy_scratch_ring;
- void *scratch_head;
+ void *scratch_head[MTK_FQ_DMA_HEAD];
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
+ unsigned int mdc_divider;
struct work_struct pending_work;
unsigned long state;
@@ -1286,7 +1342,7 @@ struct mtk_eth {
struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
- struct mtk_ppe *ppe[2];
+ struct mtk_ppe *ppe[3];
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
@@ -1311,6 +1367,7 @@ struct mtk_eth {
struct mtk_mac {
int id;
phy_interface_t interface;
+ u8 ppe_idx;
int speed;
struct device_node *of_node;
struct phylink *phylink;
@@ -1421,6 +1478,23 @@ static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
return MTK_FOE_IB2_MULTICAST;
}
+static inline bool mtk_interface_mode_is_xgmii(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ return false;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_5GBASER:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
@@ -1429,10 +1503,11 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
-int mtk_eth_offload_init(struct mtk_eth *eth);
+int mtk_eth_offload_init(struct mtk_eth *eth, u8 id);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 6ce0db3a1a92..ada852adc5f7 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -8,8 +8,11 @@
#include <linux/platform_device.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+
#include <net/dst_metadata.h>
#include <net/dsa.h>
+#include <net/ipv6.h>
+
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
@@ -338,7 +341,6 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
{
int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
- int i;
switch (type) {
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
@@ -359,10 +361,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
return -EINVAL;
}
- for (i = 0; i < 4; i++)
- src[i] = be32_to_cpu(src_addr[i]);
- for (i = 0; i < 4; i++)
- dest[i] = be32_to_cpu(dest_addr[i]);
+ ipv6_addr_be32_to_cpu(src, src_addr);
+ ipv6_addr_be32_to_cpu(dest, dest_addr);
return 0;
}
@@ -580,7 +580,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
idle = cur_idle;
entry->data.ib1 &= ~ib1_ts_mask;
- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
+ entry->data.ib1 |= ib1 & ib1_ts_mask;
}
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 691806bca372..223f709e2704 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -8,7 +8,7 @@
#include <linux/bitfield.h>
#include <linux/rhashtable.h>
-#define MTK_PPE_ENTRIES_SHIFT 3
+#define MTK_PPE_ENTRIES_SHIFT 4
#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
#define MTK_PPE_WAIT_TIMEOUT_US 1000000
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 1a97feca77f2..570ebf91f693 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -3,6 +3,9 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
+
+#include <net/ipv6.h>
+
#include "mtk_eth_soc.h"
struct mtk_flow_addr_info
@@ -47,16 +50,14 @@ static const char *mtk_foe_pkt_type_str(int type)
static void
mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
{
- __be32 n_addr[4];
- int i;
+ __be32 n_addr[IPV6_ADDR_WORDS];
if (!ipv6) {
seq_printf(m, "%pI4h", addr);
return;
}
- for (i = 0; i < ARRAY_SIZE(n_addr); i++)
- n_addr[i] = htonl(addr[i]);
+ ipv6_addr_cpu_to_be32(n_addr, addr);
seq_printf(m, "%pI6", n_addr);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index fbb5e9d5af13..e9bd32741983 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -34,8 +34,10 @@ struct mtk_flow_data {
u16 vlan_in;
struct {
- u16 id;
- __be16 proto;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlans[2];
u8 num;
} vlan;
struct {
@@ -99,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
return -1;
+ rcu_read_lock();
err = dev_fill_forward_path(dev, addr, &stack);
+ rcu_read_unlock();
if (err)
return err;
@@ -245,10 +249,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
int ppe_index)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct net_device *idev = NULL, *odev = NULL;
struct flow_action_entry *act;
struct mtk_flow_data data = {};
struct mtk_foe_entry foe;
- struct net_device *odev = NULL;
struct mtk_flow_entry *entry;
int offload_type = 0;
int wed_index = -1;
@@ -264,6 +268,17 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
struct flow_match_meta match;
flow_rule_match_meta(rule, &match);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) {
+ struct mtk_mac *mac = netdev_priv(idev);
+
+ if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num))
+ return -EINVAL;
+
+ ppe_index = mac->ppe_idx;
+ }
+ }
} else {
return -EOPNOTSUPP;
}
@@ -273,6 +288,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
} else {
return -EOPNOTSUPP;
}
@@ -334,18 +353,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
- if (data.vlan.num == 1 ||
+ if (data.vlan.num + data.pppoe.num == 2 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- data.vlan.id = act->vlan.vid;
- data.vlan.proto = act->vlan.proto;
+ data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
+ data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
- if (data.pppoe.num == 1)
+ if (data.pppoe.num == 1 ||
+ data.vlan.num == 2)
return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid;
@@ -435,12 +455,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
- if (data.vlan.num == 1) {
- if (data.vlan.proto != htons(ETH_P_8021Q))
- return -EOPNOTSUPP;
+ for (i = 0; i < data.vlan.num; i++)
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
- }
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
@@ -633,7 +650,9 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-int mtk_eth_offload_init(struct mtk_eth *eth)
+int mtk_eth_offload_init(struct mtk_eth *eth, u8 id)
{
+ if (!eth->ppe[id] || !eth->ppe[id]->foe_table)
+ return 0;
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 31aebeb2e285..b83886a41121 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
unsigned int head = ring->head;
unsigned int entry = ring->tail;
+ unsigned long flags;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
netif_wake_queue(ndev);
if (napi_complete(napi)) {
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, false, true);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
@@ -1341,16 +1342,16 @@ push_new_skb:
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
+ unsigned long flags;
int work_done = 0;
priv = container_of(napi, struct mtk_star_priv, rx_napi);
work_done = mtk_star_rx(priv, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- spin_lock(&priv->lock);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, true, false);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return work_done;
@@ -1427,15 +1428,10 @@ static int mtk_star_mdio_init(struct net_device *ndev)
of_node = dev->of_node;
- mdio_node = of_get_child_by_name(of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto out_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
@@ -1467,6 +1463,8 @@ static __maybe_unused int mtk_star_suspend(struct device *dev)
if (netif_running(ndev))
mtk_star_disable(ndev);
+ netif_device_detach(ndev);
+
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
return 0;
@@ -1491,6 +1489,8 @@ static __maybe_unused int mtk_star_resume(struct device *dev)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+ netif_device_attach(ndev);
+
return ret;
}
@@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
struct mtk_star_priv *priv;
+ struct phy_device *phydev;
struct net_device *ndev;
struct device *dev;
void __iomem *base;
@@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
+ phydev = of_phy_find_device(priv->phy_node);
+ if (phydev) {
+ phydev->mac_managed_pm = true;
+ put_device(&phydev->mdio.dev);
+ }
+
return devm_register_netdev(dev, ndev);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 61334a71058c..1ed1f88dd7f8 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -59,7 +59,9 @@ struct mtk_wed_flow_block_priv {
static const struct mtk_wed_soc_data mt7622_data = {
.regmap = {
.tx_bm_tkid = 0x088,
- .wpdma_rx_ring0 = 0x770,
+ .wpdma_rx_ring = {
+ 0x770,
+ },
.reset_idx_tx_mask = GENMASK(3, 0),
.reset_idx_rx_mask = GENMASK(17, 16),
},
@@ -70,7 +72,9 @@ static const struct mtk_wed_soc_data mt7622_data = {
static const struct mtk_wed_soc_data mt7986_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
- .wpdma_rx_ring0 = 0x770,
+ .wpdma_rx_ring = {
+ 0x770,
+ },
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
@@ -81,7 +85,10 @@ static const struct mtk_wed_soc_data mt7986_data = {
static const struct mtk_wed_soc_data mt7988_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
- .wpdma_rx_ring0 = 0x7d0,
+ .wpdma_rx_ring = {
+ 0x7d0,
+ 0x7d8,
+ },
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
@@ -621,8 +628,8 @@ mtk_wed_amsdu_init(struct mtk_wed_device *dev)
return ret;
}
- /* eagle E1 PCIE1 tx ring 22 flow control issue */
- if (dev->wlan.id == 0x7991)
+ /* Kite and Eagle E1 PCIE1 tx ring 22 flow control issue */
+ if (dev->wlan.id == 0x7991 || dev->wlan.id == 0x7992)
wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
@@ -670,7 +677,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
void *buf;
int s;
- page = __dev_alloc_page(GFP_KERNEL);
+ page = __dev_alloc_page(GFP_KERNEL | GFP_DMA32);
if (!page)
return -ENOMEM;
@@ -793,7 +800,7 @@ mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
struct page *page;
int s;
- page = __dev_alloc_page(GFP_KERNEL);
+ page = __dev_alloc_page(GFP_KERNEL | GFP_DMA32);
if (!page)
return -ENOMEM;
@@ -1239,7 +1246,11 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
return;
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
- wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[0],
+ dev->wlan.wpdma_rx[0]);
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[1],
+ dev->wlan.wpdma_rx[1]);
if (!dev->wlan.hw_rro)
return;
@@ -1318,26 +1329,14 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
static int
mtk_wed_rro_alloc(struct mtk_wed_device *dev)
{
- struct reserved_mem *rmem;
- struct device_node *np;
- int index;
-
- index = of_property_match_string(dev->hw->node, "memory-region-names",
- "wo-dlm");
- if (index < 0)
- return index;
+ struct resource res;
+ int ret;
- np = of_parse_phandle(dev->hw->node, "memory-region", index);
- if (!np)
- return -ENODEV;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
-
- if (!rmem)
- return -ENODEV;
+ ret = of_reserved_mem_region_to_resource_byname(dev->hw->node, "wo-dlm", &res);
+ if (ret)
+ return ret;
- dev->rro.miod_phys = rmem->base;
+ dev->rro.miod_phys = res.start;
dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
@@ -2000,7 +1999,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
if (mtk_wed_is_v3_or_greater(dev->hw))
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
- /* initail tx interrupt trigger */
+ /* initial tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
@@ -2011,7 +2010,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
dev->wlan.tx_tbit[1]));
- /* initail txfree interrupt trigger */
+ /* initial txfree interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
@@ -2335,6 +2334,16 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
+ if (dev->wlan.hw_rro) {
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
+ u32 addr = MTK_WED_RRO_MSDU_PG_CTRL0(i) +
+ MTK_WED_RING_OFS_COUNT;
+
+ if (!wed_r32(dev, addr))
+ wed_w32(dev, addr, 1);
+ }
+ }
+
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
@@ -2417,6 +2426,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
dev->version = hw->version;
dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
+ ret = dma_set_mask_and_coherent(hw->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
+
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
mtk_eth_set_dma_device(hw->eth, hw->dev);
@@ -2666,14 +2679,15 @@ mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri
{
struct mtk_wed_flow_block_priv *priv = cb_priv;
struct flow_cls_offload *cls = type_data;
- struct mtk_wed_hw *hw = priv->hw;
+ struct mtk_wed_hw *hw = NULL;
- if (!tc_can_offload(priv->dev))
+ if (!priv || !tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
+ hw = priv->hw;
return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
}
@@ -2729,6 +2743,7 @@ mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
kfree(block_cb->cb_priv);
+ block_cb->cb_priv = NULL;
}
return 0;
default:
@@ -2792,7 +2807,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
if (!pdev)
goto err_of_node_put;
- get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
goto err_put_device;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index c1f0479d7a71..b49aee9a8b65 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -17,7 +17,7 @@ struct mtk_wed_wo;
struct mtk_wed_soc_data {
struct {
u32 tx_bm_tkid;
- u32 wpdma_rx_ring0;
+ u32 wpdma_rx_ring[MTK_WED_RX_QUEUES];
u32 reset_idx_tx_mask;
u32 reset_idx_rx_mask;
} regmap;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
index ea0884186d76..fa6b21603416 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -10,7 +10,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/soc/mediatek/mtk_wed.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "mtk_wed_regs.h"
#include "mtk_wed_wo.h"
@@ -234,27 +234,23 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
}
static int
-mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
+mtk_wed_get_memory_region(struct mtk_wed_hw *hw, const char *name,
struct mtk_wed_wo_memory_region *region)
{
- struct reserved_mem *rmem;
- struct device_node *np;
-
- np = of_parse_phandle(hw->node, "memory-region", index);
- if (!np)
- return -ENODEV;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
+ struct resource res;
+ int ret;
- if (!rmem)
- return -ENODEV;
+ ret = of_reserved_mem_region_to_resource_byname(hw->node, name, &res);
+ if (ret)
+ return 0;
- region->phy_addr = rmem->base;
- region->size = rmem->size;
- region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
+ region->phy_addr = res.start;
+ region->size = resource_size(&res);
+ region->addr = devm_ioremap_resource(hw->dev, &res);
+ if (IS_ERR(region->addr))
+ return PTR_ERR(region->addr);
- return !region->addr ? -EINVAL : 0;
+ return 0;
}
static int
@@ -319,13 +315,7 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
/* load firmware region metadata */
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
- int index = of_property_match_string(wo->hw->node,
- "memory-region-names",
- mem_region[i].name);
- if (index < 0)
- continue;
-
- ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
+ ret = mtk_wed_get_memory_region(wo->hw, mem_region[i].name, &mem_region[i]);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
index 87a67fa3868d..c01b1e8428f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -91,8 +91,8 @@ enum mtk_wed_dummy_cr_idx {
#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
-#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
-#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
+#define MT7988_FIRMWARE_WO0 "mediatek/mt7988/mt7988_wo_0.bin"
+#define MT7988_FIRMWARE_WO1 "mediatek/mt7988/mt7988_wo_1.bin"
#define MTK_WO_MCU_CFG_LS_BASE 0
#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 825e05fb8607..0b1cb340206f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -7,6 +7,7 @@ config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on PCI && NETDEVICES && ETHERNET && INET
depends on PTP_1588_CLOCK_OPTIONAL
+ select PAGE_POOL
select MLX4_CORE
help
This driver supports Mellanox Technologies ConnectX Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b330020dc0d6..07b061a97a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -526,28 +526,6 @@ out:
return res;
}
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
-{
- struct mlx4_zone_entry *zone;
- int res = 0;
-
- spin_lock(&zones->lock);
-
- zone = __mlx4_find_zone_by_uid(zones, uid);
-
- if (NULL == zone) {
- res = -1;
- goto out;
- }
-
- __mlx4_free_from_zone(zone, obj, count);
-
-out:
- spin_unlock(&zones->lock);
-
- return res;
-}
-
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
{
struct mlx4_zone_entry *zone;
@@ -682,9 +660,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
- struct mlx4_db *db, int order)
+ struct mlx4_db *db, unsigned int order)
{
- int o;
+ unsigned int o;
int i;
for (o = order; o <= 1; ++o) {
@@ -712,7 +690,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 0d8a362c2673..edcc6f662618 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -236,7 +236,7 @@ static void dump_err_buf(struct mlx4_dev *dev)
static void poll_catas(struct timer_list *t)
{
- struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
+ struct mlx4_priv *priv = timer_container_of(priv, t, catas_err.timer);
struct mlx4_dev *dev = &priv->dev;
u32 slave_read;
@@ -305,7 +305,7 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- del_timer_sync(&priv->catas_err.timer);
+ timer_delete_sync(&priv->catas_err.timer);
if (priv->catas_err.map) {
iounmap(priv->catas_err.map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index cd754cd76bde..2aeaafcfb993 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -38,7 +38,7 @@
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
*/
-static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
+static u64 mlx4_en_read_clock(struct cyclecounter *tc)
{
struct mlx4_en_dev *mdev =
container_of(tc, struct mlx4_en_dev, cycles);
@@ -249,7 +249,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
- u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 max_val_cycles = freq_khz * 1000ULL * MLX4_EN_WRAP_AROUND_SEC;
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1184ac5751e1..0e92956e84cf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -126,6 +126,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
+ irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
}
if (cq->type == RX)
@@ -142,18 +143,24 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (err)
goto free_eq;
+ cq->cq_idx = cq_idx;
cq->mcq.event = mlx4_en_cq_event;
switch (cq->type) {
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq);
+ netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
+ netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi);
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
- netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
+ netif_napi_add_config(cq->dev, &cq->napi, mlx4_en_poll_rx_cq,
+ cq_idx);
+ netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
+ netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi);
break;
case TX_XDP:
/* nothing regarding napi, it's shared with rx ring */
@@ -189,6 +196,14 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
if (cq->type != TX_XDP) {
+ enum netdev_queue_type qtype;
+
+ if (cq->type == RX)
+ qtype = NETDEV_QUEUE_TYPE_RX;
+ else
+ qtype = NETDEV_QUEUE_TYPE_TX;
+
+ netif_queue_set_napi(cq->dev, cq->cq_idx, qtype, NULL);
napi_disable(&cq->napi);
netif_napi_del(&cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 752a72499b4f..be80da03a594 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -290,9 +290,6 @@ static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
- if (!my_ets)
- return -EINVAL;
-
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 619e1c3ef7f9..ad6298456639 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -450,7 +450,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int index = 0;
int i, strings = 0;
struct bitmap_iterator it;
@@ -459,10 +458,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ ethtool_puts(&data, mlx4_en_test_names[i]);
if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
for (; i < MLX4_EN_NUM_SELF_TEST; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ ethtool_puts(&data, mlx4_en_test_names[i]);
break;
case ETH_SS_STATS:
@@ -470,74 +469,56 @@ static void mlx4_en_get_strings(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PORT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PF_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PKT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_XDP_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PHY_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_bytes", i);
+ ethtool_sprintf(&data, "tx%d_packets", i);
+ ethtool_sprintf(&data, "tx%d_bytes", i);
}
for (i = 0; i < priv->rx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_bytes", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_dropped", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_drop", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_redirect", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_redirect_fail", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_tx", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_tx_full", i);
+ ethtool_sprintf(&data, "rx%d_packets", i);
+ ethtool_sprintf(&data, "rx%d_bytes", i);
+ ethtool_sprintf(&data, "rx%d_dropped", i);
+ ethtool_sprintf(&data, "rx%d_xdp_drop", i);
+ ethtool_sprintf(&data, "rx%d_xdp_redirect", i);
+ ethtool_sprintf(&data, "rx%d_xdp_redirect_fail", i);
+ ethtool_sprintf(&data, "rx%d_xdp_tx", i);
+ ethtool_sprintf(&data, "rx%d_xdp_tx_full", i);
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx4_en_priv_flags[i]);
+ ethtool_puts(&data, mlx4_en_priv_flags[i]);
break;
}
@@ -1746,6 +1727,13 @@ static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
}
+static u32 mlx4_en_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->rx_ring_num;
+}
+
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1762,9 +1750,6 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return -EINVAL;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->rx_ring_num;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = mlx4_en_get_num_flows(priv);
break;
@@ -1903,7 +1888,7 @@ out:
}
static int mlx4_en_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -1916,6 +1901,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
info->so_timestamping |=
SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -2055,20 +2041,20 @@ static int mlx4_en_get_module_info(struct net_device *dev,
switch (data[0] /* identifier */) {
case MLX4_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLX4_MODULE_ID_QSFP_PLUS:
if (data[1] >= 0x3) { /* revision id */
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLX4_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
@@ -2172,6 +2158,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
+ .get_rx_ring_count = mlx4_en_get_rx_ring_count,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
.get_rxfh = mlx4_en_get_rxfh,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5d3fde63b273..81bf8908b897 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -43,6 +43,7 @@
#include <net/vxlan.h>
#include <net/devlink.h>
#include <net/rps.h>
+#include <net/netdev_queues.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -1179,9 +1180,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
mlx4_unregister_mac(mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
- kfree_rcu(entry, rcu);
en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
entry->mac, priv->port);
+ kfree_rcu(entry, rcu);
++removed;
}
}
@@ -1649,7 +1650,7 @@ int mlx4_en_start_port(struct net_device *dev)
sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
- dev->mtu = min(dev->mtu, priv->max_mtu);
+ WRITE_ONCE(dev->mtu, min(dev->mtu, priv->max_mtu));
mlx4_en_calc_rx_buf(dev);
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
@@ -2073,6 +2074,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
priv->rx_ring[i]->csum_complete = 0;
+ priv->rx_ring[i]->alloc_fail = 0;
}
}
@@ -2394,7 +2396,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
!mlx4_en_check_xdp_mtu(dev, new_mtu))
return -EOPNOTSUPP;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
@@ -2418,21 +2420,22 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
/* device doesn't support time stamping */
- if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "device doesn't support time stamping");
return -EINVAL;
+ }
/* TX HW timestamp */
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -2441,7 +2444,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
}
/* RX HW timestamp */
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -2459,39 +2462,27 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
if (mlx4_en_reset_config(dev, config, dev->features)) {
- config.tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->tx_type = HWTSTAMP_TX_OFF;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
}
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
- sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
-}
-
-static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlx4_en_hwtstamp_set(dev, ifr);
- case SIOCGHWTSTAMP:
- return mlx4_en_hwtstamp_get(dev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ *config = priv->hwtstamp_config;
+ return 0;
}
static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
@@ -2558,7 +2549,7 @@ static int mlx4_en_set_features(struct net_device *netdev,
}
if (reset) {
- ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
+ ret = mlx4_en_reset_config(netdev, &priv->hwtstamp_config,
features);
if (ret)
return ret;
@@ -2668,8 +2659,7 @@ static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
.sync_table = mlx4_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
@@ -2843,7 +2833,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
- .ndo_eth_ioctl = mlx4_en_ioctl,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
@@ -2857,6 +2846,8 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
+ .ndo_hwtstamp_get = mlx4_en_hwtstamp_get,
+ .ndo_hwtstamp_set = mlx4_en_hwtstamp_set,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -3099,6 +3090,77 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
last_i += NUM_PHY_STATS;
}
+static void mlx4_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ const struct mlx4_en_rx_ring *ring;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ ring = priv->rx_ring[i];
+ stats->packets = READ_ONCE(ring->packets);
+ stats->bytes = READ_ONCE(ring->bytes);
+ stats->alloc_fail = READ_ONCE(ring->alloc_fail);
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void mlx4_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ const struct mlx4_en_tx_ring *ring;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ ring = priv->tx_ring[TX][i];
+ stats->packets = READ_ONCE(ring->packets);
+ stats->bytes = READ_ONCE(ring->bytes);
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void mlx4_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ if (priv->rx_ring_num) {
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+ }
+
+ if (priv->tx_ring_num[TX]) {
+ tx->packets = 0;
+ tx->bytes = 0;
+ }
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static const struct netdev_stat_ops mlx4_stat_ops = {
+ .get_queue_stats_rx = mlx4_get_queue_stats_rx,
+ .get_queue_stats_tx = mlx4_get_queue_stats_tx,
+ .get_base_stats = mlx4_get_base_stats,
+};
+
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
@@ -3262,6 +3324,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+ dev->stat_ops = &mlx4_stat_ops;
dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
@@ -3439,7 +3502,7 @@ out:
}
int mlx4_en_reset_config(struct net_device *dev,
- struct hwtstamp_config ts_config,
+ struct kernel_hwtstamp_config *ts_config,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -3449,8 +3512,8 @@ int mlx4_en_reset_config(struct net_device *dev,
int port_up = 0;
int err = 0;
- if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
- priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
+ if (priv->hwtstamp_config.tx_type == ts_config->tx_type &&
+ priv->hwtstamp_config.rx_filter == ts_config->rx_filter &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
return 0; /* Nothing to change */
@@ -3469,7 +3532,7 @@ int mlx4_en_reset_config(struct net_device *dev,
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
- memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+ memcpy(&new_prof.hwtstamp_config, ts_config, sizeof(*ts_config));
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
@@ -3487,7 +3550,7 @@ int mlx4_en_reset_config(struct net_device *dev,
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ } else if (ts_config->rx_filter == HWTSTAMP_FILTER_NONE) {
/* RX time-stamping is OFF, update the RX vlan offload
* to the latest wanted state
*/
@@ -3508,7 +3571,7 @@ int mlx4_en_reset_config(struct net_device *dev,
* Regardless of the caller's choice,
* Turn Off RX vlan offload in case of time-stamping is ON
*/
- if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (ts_config->rx_filter != HWTSTAMP_FILTER_NONE) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index eac49657bd07..13666d50b90f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -42,63 +42,49 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/irq.h>
+#include <linux/skbuff_ref.h>
#include <net/ip.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
-static int mlx4_alloc_page(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frag,
- gfp_t gfp)
-{
- struct page *page;
- dma_addr_t dma;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- return -ENOMEM;
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
- if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- __free_page(page);
- return -ENOMEM;
- }
- frag->page = page;
- frag->dma = dma;
- frag->page_offset = priv->rx_headroom;
- return 0;
-}
-
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
gfp_t gfp)
{
+ dma_addr_t dma;
int i;
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp))
+ frags->page = page_pool_alloc_pages(ring->pp, gfp);
+ if (!frags->page) {
+ ring->alloc_fail++;
return -ENOMEM;
+ }
+ page_pool_fragment_page(frags->page, 1);
+ frags->page_offset = priv->rx_headroom;
+
ring->rx_alloc_pages++;
}
- rx_desc->data[i].addr = cpu_to_be64(frags->dma +
- frags->page_offset);
+ dma = page_pool_get_dma_addr(frags->page);
+ rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset);
}
return 0;
}
static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frag)
{
- if (frag->page) {
- dma_unmap_page(priv->ddev, frag->dma,
- PAGE_SIZE, priv->dma_dir);
- __free_page(frag->page);
- }
+ if (frag->page)
+ page_pool_put_full_page(ring->pp, frag->page, false);
/* We need to clear all fields, otherwise a change of priv->log_rx_info
* could lead to see garbage later in frag->page.
*/
@@ -138,18 +124,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
(index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
- if (likely(ring->page_cache.index > 0)) {
- /* XDP uses a single page per frame */
- if (!frags->page) {
- ring->page_cache.index--;
- frags->page = ring->page_cache.buf[ring->page_cache.index].page;
- frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
- }
- frags->page_offset = XDP_PACKET_HEADROOM;
- rx_desc->data[0].addr = cpu_to_be64(frags->dma +
- XDP_PACKET_HEADROOM);
- return 0;
- }
return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
@@ -175,7 +149,7 @@ static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags + nr);
+ mlx4_en_free_frag(priv, ring, frags + nr);
}
}
@@ -265,6 +239,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct page_pool_params pp = {};
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
@@ -283,8 +258,27 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
- if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ pp.flags = PP_FLAG_DMA_MAP;
+ pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE);
+ pp.nid = node;
+ pp.napi = &priv->rx_cq[queue_index]->napi;
+ pp.netdev = priv->dev;
+ pp.dev = &mdev->dev->persist->pdev->dev;
+ pp.dma_dir = priv->dma_dir;
+
+ ring->pp = page_pool_create(&pp);
+ if (IS_ERR(ring->pp)) {
+ err = PTR_ERR(ring->pp);
goto err_ring;
+ }
+
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ goto err_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ ring->pp);
+ if (err)
+ goto err_xdp_info;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
@@ -316,6 +310,8 @@ err_info:
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
+err_pp:
+ page_pool_destroy(ring->pp);
err_ring:
kfree(ring);
*pring = NULL;
@@ -406,26 +402,6 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
}
}
-/* When the rx ring is running in page-per-packet mode, a released frame can go
- * directly into a small cache, to avoid unmapping or touching the page
- * allocator. In bpf prog performance scenarios, buffers are either forwarded
- * or dropped, never converted to skbs, so every page can come directly from
- * this cache when it is sized to be a multiple of the napi budget.
- */
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame)
-{
- struct mlx4_en_page_cache *cache = &ring->page_cache;
-
- if (cache->index >= MLX4_EN_CACHE_SIZE)
- return false;
-
- cache->buf[cache->index].page = frame->page;
- cache->buf[cache->index].dma = frame->dma;
- cache->index++;
- return true;
-}
-
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
@@ -442,6 +418,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
kvfree(ring->rx_info);
+ page_pool_destroy(ring->pp);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
@@ -450,14 +427,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
- int i;
-
- for (i = 0; i < ring->page_cache.index; i++) {
- dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(ring->page_cache.buf[i].page);
- }
- ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
@@ -484,7 +453,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (unlikely(!page))
goto fail;
- dma = frags->dma;
+ dma = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
frag_size, priv->dma_dir);
@@ -493,8 +462,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
truesize += frag_info->frag_stride;
if (frag_info->frag_stride == PAGE_SIZE / 2) {
+ struct netmem_desc *desc = pp_page_to_nmdesc(page);
+
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
+ atomic_long_read(&desc->pp_ref_count) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
@@ -508,10 +480,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
}
if (release) {
- dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
frags->page = NULL;
} else {
- page_ref_inc(page);
+ page_pool_ref_page(page);
}
nr++;
@@ -781,7 +752,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
@@ -820,7 +792,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
@@ -883,6 +856,7 @@ xdp_drop_no_cnt:
skb = napi_get_frags(&cq->napi);
if (unlikely(!skb))
goto next;
+ skb_mark_for_recycle(skb);
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
u64 timestamp = mlx4_en_get_cqe_ts(cqe);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1ddb11cb25f9..87f35bcbeff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,6 +44,7 @@
#include <linux/ipv6.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
@@ -350,16 +351,10 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_rx_alloc frame = {
- .page = tx_info->page,
- .dma = tx_info->map0_dma,
- };
-
- if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
- dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(tx_info->page);
- }
+ struct page_pool *pool = ring->recycle_ring->pp;
+
+ /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */
+ page_pool_put_full_page(pool, tx_info->page, !!napi_mode);
return tx_info->nr_txbb;
}
@@ -450,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up))
return 0;
+ if (unlikely(!napi_budget) && cq->type == TX_XDP)
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -1194,7 +1191,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
- dma = frame->dma;
+ dma = page_pool_get_dma_addr(frame->page);
tx_info->page = frame->page;
frame->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7b02ff61126d..4293f8e33f44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -49,6 +49,8 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+#include <rdma/ib_verbs.h>
+
#include "mlx4.h"
#include "fw.h"
#include "icm.h"
@@ -169,30 +171,27 @@ module_param_array(port_type_array, int, &arr_argc, 0444);
MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
"1 for IB, 2 for Ethernet");
-struct mlx4_port_config {
- struct list_head list;
- enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
- struct pci_dev *pdev;
-};
-
static atomic_t pf_loading = ATOMIC_INIT(0);
static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
ctx->val.vbool = !!mlx4_internal_err_reset;
return 0;
}
static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
mlx4_internal_err_reset = ctx->val.vbool;
return 0;
}
static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
@@ -202,7 +201,8 @@ static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
@@ -1250,14 +1250,6 @@ err_out:
return err ? err : count;
}
-enum ibta_mtu {
- IB_MTU_256 = 1,
- IB_MTU_512 = 2,
- IB_MTU_1024 = 3,
- IB_MTU_2048 = 4,
- IB_MTU_4096 = 5
-};
-
static inline int int_to_ibta_mtu(int mtu)
{
switch (mtu) {
@@ -1270,7 +1262,7 @@ static inline int int_to_ibta_mtu(int mtu)
}
}
-static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
+static inline int ibta_mtu_to_int(enum ib_mtu mtu)
{
switch (mtu) {
case IB_MTU_256: return 256;
@@ -4376,7 +4368,6 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d7d856d1758a..b213094ea30f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1478,12 +1478,6 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
int align, u32 skip_mask, u32 *puid);
-/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
- * <zones>.
- */
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
- u32 uid, u32 obj, u32 count);
-
/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
* specifying the uid when freeing an object, zone allocator could figure it by
* itself. Other parameters are similar to mlx4_zone_free.
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index efe3f97b874f..aab97694f86b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -247,20 +247,11 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
- dma_addr_t dma;
u32 page_offset;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
-struct mlx4_en_page_cache {
- u32 index;
- struct {
- struct page *page;
- dma_addr_t dma;
- } buf[MLX4_EN_CACHE_SIZE];
-};
-
enum {
MLX4_EN_TX_RING_STATE_RECOVERING,
};
@@ -335,14 +326,14 @@ struct mlx4_en_rx_ring {
u16 stride;
u16 log_stride;
u16 cqn; /* index of port CQ associated with this ring */
+ u8 fcs_del;
u32 prod;
u32 cons;
u32 buf_size;
- u8 fcs_del;
+ struct page_pool *pp;
void *buf;
void *rx_info;
struct bpf_prog __rcu *xdp_prog;
- struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
unsigned long csum_ok;
@@ -355,6 +346,7 @@ struct mlx4_en_rx_ring {
unsigned long xdp_tx;
unsigned long xdp_tx_full;
unsigned long dropped;
+ unsigned long alloc_fail;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
struct xdp_rxq_info xdp_rxq;
@@ -379,6 +371,7 @@ struct mlx4_en_cq {
#define MLX4_EN_OPCODE_ERROR 0x1e
const struct cpumask *aff_mask;
+ int cq_idx;
};
struct mlx4_en_port_profile {
@@ -395,7 +388,7 @@ struct mlx4_en_port_profile {
u8 num_up;
int rss_rings;
int inline_thold;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
};
struct mlx4_en_profile {
@@ -619,7 +612,7 @@ struct mlx4_en_priv {
bool wol;
struct device *ddev;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
@@ -705,8 +698,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
@@ -789,7 +780,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
int mlx4_en_moderation_update(struct mlx4_en_priv *priv);
int mlx4_en_reset_config(struct net_device *dev,
- struct hwtstamp_config ts_config,
+ struct kernel_hwtstamp_config *ts_config,
netdev_features_t new_features);
void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
struct mlx4_en_stats_bitmap *stats_bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d7444782bfdd..698a5d1f0d7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -106,7 +106,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4e43f4a7d246..e3d0b13c1610 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -147,26 +147,6 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
-int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
- int i;
-
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (!table->refs[i])
- continue;
-
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- *idx = i;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
-
static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
{
int i, num_eth_ports = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 685335832a93..3c3e84100d5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -8,7 +8,6 @@ config MLX5_CORE
depends on PCI
select AUXILIARY_BUS
select NET_DEVLINK
- depends on VXLAN || !VXLAN
depends on MLXFW || !MLXFW
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
@@ -31,6 +30,7 @@ config MLX5_CORE_EN
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
select PAGE_POOL
+ select PAGE_POOL_STATS
select DIMLIB
help
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -80,8 +80,8 @@ config MLX5_BRIDGE
default y
help
mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
- Enable adding representors of mlx5 uplink and VF ports to Bridge and
- offloading rules for traffic between such ports. Supports VLANs (trunk and
+ Enable offloading FDB rules from a bridge device containing
+ representors of mlx5 uplink and VF ports. Supports VLANs (trunk and
access modes).
config MLX5_CLS_ACT
@@ -172,6 +172,16 @@ config MLX5_SW_STEERING
help
Build support for software-managed steering in the NIC.
+config MLX5_HW_STEERING
+ bool "Mellanox Technologies hardware-managed steering"
+ depends on MLX5_CORE_EN && MLX5_ESWITCH
+ default y
+ help
+ Build support for Hardware-Managed Flow Steering (HMFS) in the NIC.
+ HMFS is a new approach to managing steering rules where STEs are
+ written to ICM by HW (as opposed to SW in software-managed steering),
+ which allows higher rate of rule insertion.
+
config MLX5_SF
bool "Mellanox Technologies subfunction device support using auxiliary device"
depends on MLX5_CORE && MLX5_CORE_EN
@@ -197,3 +207,14 @@ config MLX5_DPLL
help
DPLL support in Mellanox Technologies ConnectX NICs.
+config MLX5_EN_PSP
+ bool "Mellanox Technologies support for PSP cryptography-offload acceleration"
+ depends on INET_PSP
+ depends on MLX5_CORE_EN
+ default y
+ help
+ mlx5 device offload support for Google PSP Security Protocol offload.
+ Adds support for PSP encryption offload and for SPI and key generation
+ interfaces to PSP Stack which supports PSP crypto offload.
+
+ If unsure, say Y.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 76dc5a9b9648..8ffa286a18f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o lib/nv_param.o
#
# Netdev basic
@@ -29,7 +29,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
- lib/crypto.o lib/sd.o
+ lib/crypto.o lib/sd.o en/pcie_cong_event.o
#
# Netdev extra
@@ -60,6 +60,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += en/tc/ct_fs_hmfs.o
endif
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
@@ -68,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o esw/legacy.o \
+ ecpf.o rdma.o esw/legacy.o esw/adj_vport.o \
esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
@@ -84,7 +85,9 @@ mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge
mlx5_core-$(CONFIG_HWMON) += hwmon.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
-mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
+ifneq ($(CONFIG_VXLAN),)
+ mlx5_core-y += lib/vxlan.o
+endif
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o lib/hv_vhca.o
@@ -109,16 +112,55 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
-mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
- steering/dr_matcher.o steering/dr_rule.o \
- steering/dr_icm_pool.o steering/dr_buddy.o \
- steering/dr_ste.o steering/dr_send.o \
- steering/dr_ste_v0.o steering/dr_ste_v1.o \
- steering/dr_ste_v2.o \
- steering/dr_cmd.o steering/dr_fw.o \
- steering/dr_action.o steering/fs_dr.o \
- steering/dr_definer.o steering/dr_ptrn.o \
- steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
+mlx5_core-$(CONFIG_MLX5_EN_PSP) += en_accel/psp.o en_accel/psp_rxtx.o
+
+#
+# SW Steering
+#
+mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
+ steering/sws/dr_table.o \
+ steering/sws/dr_matcher.o \
+ steering/sws/dr_rule.o \
+ steering/sws/dr_icm_pool.o \
+ steering/sws/dr_buddy.o \
+ steering/sws/dr_ste.o \
+ steering/sws/dr_send.o \
+ steering/sws/dr_ste_v0.o \
+ steering/sws/dr_ste_v1.o \
+ steering/sws/dr_ste_v2.o \
+ steering/sws/dr_ste_v3.o \
+ steering/sws/dr_cmd.o \
+ steering/sws/dr_fw.o \
+ steering/sws/dr_action.o \
+ steering/sws/dr_definer.o \
+ steering/sws/dr_ptrn.o \
+ steering/sws/dr_arg.o \
+ steering/sws/dr_dbg.o \
+ steering/sws/fs_dr.o \
+ lib/smfs.o
+
+#
+# HW Steering
+#
+mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
+ steering/hws/context.o \
+ steering/hws/pat_arg.o \
+ steering/hws/buddy.o \
+ steering/hws/pool.o \
+ steering/hws/table.o \
+ steering/hws/action.o \
+ steering/hws/rule.o \
+ steering/hws/matcher.o \
+ steering/hws/send.o \
+ steering/hws/definer.o \
+ steering/hws/bwc.o \
+ steering/hws/debug.o \
+ steering/hws/vport.o \
+ steering/hws/bwc_complex.o \
+ steering/hws/fs_hws_pools.o \
+ steering/hws/fs_hws.o \
+ steering/hws/action_ste_pool.o
+
#
# SF device
#
@@ -129,5 +171,10 @@ mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_
#
mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
+#
+# TPH support
+#
+mlx5_core-$(CONFIG_PCIE_TPH) += lib/st.o
+
obj-$(CONFIG_MLX5_DPLL) += mlx5_dpll.o
mlx5_dpll-y := dpll.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4957412ff1f6..5b08e5ffe0e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -94,6 +94,11 @@ static u16 in_to_opcode(void *in)
return MLX5_GET(mbox_in, in, opcode);
}
+static u16 in_to_uid(void *in)
+{
+ return MLX5_GET(mbox_in, in, uid);
+}
+
/* Returns true for opcodes that might be triggered very frequently and throttle
* the command interface. Limit their command slots usage.
*/
@@ -176,6 +181,7 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
lockdep_assert_held(&cmd->alloc_lock);
+ cmd->ent_arr[idx] = NULL;
set_bit(idx, &cmd->vars.bitmask);
}
@@ -289,6 +295,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
return;
}
cond_resched();
+ if (mlx5_cmd_is_down(dev)) {
+ ent->ret = -ENXIO;
+ return;
+ }
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -754,6 +764,8 @@ static const char *cmd_status_str(u8 status)
return "bad resource";
case MLX5_CMD_STAT_RES_BUSY:
return "resource busy";
+ case MLX5_CMD_STAT_NOT_READY:
+ return "FW not ready";
case MLX5_CMD_STAT_LIM_ERR:
return "limits exceeded";
case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
@@ -787,6 +799,7 @@ static int cmd_status_to_err(u8 status)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
+ case MLX5_CMD_STAT_NOT_READY: return -EAGAIN;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
@@ -815,14 +828,16 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
+ u8 status;
u16 uid;
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
- uid = MLX5_GET(mbox_in, in, uid);
+ uid = in_to_uid(in);
+ status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
- opcode != MLX5_CMD_OP_CREATE_UCTX)
+ opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
}
@@ -917,8 +932,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
static void cb_timeout_handler(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_cmd_work_ent *ent = container_of(dwork,
struct mlx5_cmd_work_ent,
cb_timeout_work);
@@ -969,19 +983,32 @@ static void cmd_work_handler(struct work_struct *work)
bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
struct mlx5_core_dev *dev;
- unsigned long cb_timeout;
- struct semaphore *sem;
+ unsigned long timeout;
unsigned long flags;
int alloc_ret;
int cmd_mode;
+ complete(&ent->handling);
+
dev = container_of(cmd, struct mlx5_core_dev, cmd);
- cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+ timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
- complete(&ent->handling);
- sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
- down(sem);
if (!ent->page_queue) {
+ if (down_timeout(&cmd->vars.sem, timeout)) {
+ mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
+ mlx5_command_str(ent->op), ent->op);
+ if (ent->callback) {
+ ent->callback(-EBUSY, ent->context);
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+ cmd_ent_put(ent);
+ } else {
+ ent->ret = -EBUSY;
+ complete(&ent->done);
+ }
+ complete(&ent->slotted);
+ return;
+ }
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
@@ -994,10 +1021,12 @@ static void cmd_work_handler(struct work_struct *work)
ent->ret = -EAGAIN;
complete(&ent->done);
}
- up(sem);
+ up(&cmd->vars.sem);
+ complete(&ent->slotted);
return;
}
} else {
+ down(&cmd->vars.pages_sem);
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
@@ -1005,6 +1034,8 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
+ complete(&ent->slotted);
+
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -1023,7 +1054,7 @@ static void cmd_work_handler(struct work_struct *work)
ent->ts1 = ktime_get_ns();
cmd_mode = cmd->mode;
- if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
+ if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
@@ -1044,7 +1075,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
- mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
}
}
@@ -1143,6 +1174,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
ent->ret = -ECANCELED;
goto out_err;
}
+
+ wait_for_completion(&ent->slotted);
+
if (cmd->mode == CMD_MODE_POLLING || ent->polling)
wait_for_completion(&ent->done);
else if (!wait_for_completion_timeout(&ent->done, timeout))
@@ -1157,6 +1191,9 @@ out_err:
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
mlx5_command_str(ent->op), ent->op);
+ } else if (err == -EBUSY) {
+ mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
+ mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1164,6 +1201,44 @@ out_err:
return err;
}
+/* Check if all command slots are stalled (timed out and not recovered).
+ * returns true if all slots timed out on a recent command and have not been
+ * completed by FW yet. (stalled state)
+ * false otherwise (at least one slot is not stalled).
+ *
+ * In such odd situation "all_stalled", this serves as a protection mechanism
+ * to avoid blocking the kernel for long periods of time in case FW is not
+ * responding to commands.
+ */
+static bool mlx5_cmd_all_stalled(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ bool all_stalled = true;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+
+ /* at least one command slot is free */
+ if (bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds) > 0) {
+ all_stalled = false;
+ goto out;
+ }
+
+ for_each_clear_bit(i, &cmd->vars.bitmask, cmd->vars.max_reg_cmds) {
+ struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
+
+ if (!test_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, &ent->state)) {
+ all_stalled = false;
+ break;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+ return all_stalled;
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -1194,6 +1269,15 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (callback && page_queue)
return -EINVAL;
+ if (!page_queue && mlx5_cmd_all_stalled(dev)) {
+ mlx5_core_err_rl(dev,
+ "All CMD slots are stalled, aborting command\n");
+ /* there's no reason to wait and block the whole kernel if FW
+ * isn't currently responding to all slots, fail immediately
+ */
+ return -EAGAIN;
+ }
+
ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
callback, context, page_queue);
if (IS_ERR(ent))
@@ -1208,6 +1292,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->polling = force_polling;
init_completion(&ent->handling);
+ init_completion(&ent->slotted);
if (!callback)
init_completion(&ent->done);
@@ -1225,7 +1310,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
- if (err == -ETIMEDOUT || err == -ECANCELED)
+ if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
goto out_free;
ds = ent->ts2 - ent->ts1;
@@ -1611,6 +1696,9 @@ static int cmd_comp_notifier(struct notifier_block *nb,
dev = container_of(cmd, struct mlx5_core_dev, cmd);
eqe = data;
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return NOTIFY_DONE;
+
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
return NOTIFY_OK;
@@ -1660,6 +1748,13 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
if (test_bit(i, &vector)) {
ent = cmd->ent_arr[i];
+ if (forced && ent->ret == -ETIMEDOUT)
+ set_bit(MLX5_CMD_ENT_STATE_TIMEDOUT,
+ &ent->state);
+ else if (!forced) /* real FW completion */
+ clear_bit(MLX5_CMD_ENT_STATE_TIMEDOUT,
+ &ent->state);
+
/* if we already completed the command, ignore it */
if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
&ent->state)) {
@@ -1734,6 +1829,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
}
}
+#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
+#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
+ MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
+
static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
@@ -1745,7 +1844,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
/* wait for pending handlers to complete */
mlx5_eq_synchronize_cmd_irq(dev);
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
- vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
+ vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
if (!vector)
goto no_trig;
@@ -1835,6 +1934,17 @@ static int is_manage_pages(void *in)
return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
+static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev)
+{
+ return !xa_empty(&dev->cmd.vars.privileged_uids);
+}
+
+static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev,
+ u16 uid)
+{
+ return !!xa_load(&dev->cmd.vars.privileged_uids, uid);
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. Page queue commands do not support asynchrous completion
@@ -1845,7 +1955,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
{
struct mlx5_cmd_msg *inb, *outb;
u16 opcode = in_to_opcode(in);
- bool throttle_op;
+ bool throttle_locked = false;
+ bool unpriv_locked = false;
+ u16 uid = in_to_uid(in);
int pages_queue;
gfp_t gfp;
u8 token;
@@ -1854,12 +1966,19 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
return -ENXIO;
- throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
- if (throttle_op) {
- /* atomic context may not sleep */
- if (callback)
- return -EINVAL;
- down(&dev->cmd.vars.throttle_sem);
+ if (!callback) {
+ /* The semaphore is already held for callback commands. It was
+ * acquired in mlx5_cmd_exec_cb()
+ */
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ unpriv_locked = true;
+ down(&dev->cmd.vars.unprivileged_sem);
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(opcode)) {
+ throttle_locked = true;
+ down(&dev->cmd.vars.throttle_sem);
+ }
}
pages_queue = is_manage_pages(in);
@@ -1887,8 +2006,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
pages_queue, token, force_polling);
- if (callback)
- return err;
+ if (callback && !err)
+ return 0;
if (err > 0) /* Failed in FW, command didn't execute */
err = deliv_status_to_err(err);
@@ -1903,8 +2022,11 @@ out_out:
out_in:
free_msg(dev, inb);
out_up:
- if (throttle_op)
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+
return err;
}
@@ -2065,10 +2187,23 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
+ struct mlx5_core_dev *dev;
+ bool throttle_locked;
+ bool unpriv_locked;
ctx = work->ctx;
- status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
+ dev = ctx->dev;
+ throttle_locked = work->throttle_locked;
+ unpriv_locked = work->unpriv_locked;
+ status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
+ /* Can't access "work" from this point on. It could have been freed in
+ * the callback.
+ */
+ if (throttle_locked)
+ up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
@@ -2077,6 +2212,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work)
{
+ struct mlx5_core_dev *dev = ctx->dev;
+ u16 uid;
int ret;
work->ctx = ctx;
@@ -2084,11 +2221,43 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->opcode = in_to_opcode(in);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
+ work->throttle_locked = false;
+ work->unpriv_locked = false;
+ uid = in_to_uid(in);
+
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
- ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
+
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->unpriv_locked = true;
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->throttle_locked = true;
+ }
+
+ ret = cmd_exec(dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
- if (ret && atomic_dec_and_test(&ctx->num_inflight))
+ if (ret)
+ goto sem_up;
+
+ return 0;
+
+sem_up:
+ if (work->throttle_locked)
+ up(&dev->cmd.vars.throttle_sem);
+ if (work->unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+dec_num_inflight:
+ if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
return ret;
@@ -2319,15 +2488,21 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
cmd->state = MLX5_CMDIF_STATE_DOWN;
cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
- cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
+ cmd->vars.bitmask = MLX5_CMD_MASK;
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ sema_init(&cmd->vars.unprivileged_sem,
+ DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+
+ xa_init(&cmd->vars.privileged_uids);
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool)
- return -ENOMEM;
+ if (!cmd->pool) {
+ err = -ENOMEM;
+ goto err_destroy_xa;
+ }
err = alloc_cmd_page(dev, cmd);
if (err)
@@ -2361,6 +2536,8 @@ err_cmd_page:
free_cmd_page(dev, cmd);
err_free_pool:
dma_pool_destroy(cmd->pool);
+err_destroy_xa:
+ xa_destroy(&dev->cmd.vars.privileged_uids);
return err;
}
@@ -2373,6 +2550,7 @@ void mlx5_cmd_disable(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
+ xa_destroy(&dev->cmd.vars.privileged_uids);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
@@ -2380,3 +2558,18 @@ void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
{
dev->cmd.state = cmdif_state;
}
+
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ return xa_insert(&dev->cmd.vars.privileged_uids, uid,
+ xa_mk_value(uid), GFP_KERNEL);
+}
+EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid);
+
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid);
+
+ WARN(!data, "Privileged UID %u does not exist\n", uid);
+}
+EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 4caa1b6f40ba..60f7ab1d72e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -66,11 +66,12 @@ void mlx5_cq_tasklet_cb(struct tasklet_struct *t)
tasklet_schedule(&ctx->task);
}
-static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
- struct mlx5_eqe *eqe)
+void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
+ struct mlx5_eqe *eqe)
{
unsigned long flags;
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+ bool schedule_tasklet = false;
spin_lock_irqsave(&tasklet_ctx->lock, flags);
/* When migrating CQs between EQs will be implemented, please note
@@ -80,11 +81,29 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
mlx5_cq_hold(cq);
+ /* If the tasklet CQ work list isn't empty, mlx5_cq_tasklet_cb()
+ * is scheduled/running and hasn't processed the list yet, so it
+ * will see this added CQ when it runs. If the list is empty,
+ * the tasklet needs to be scheduled to pick up the CQ. The
+ * spinlock avoids any race with the tasklet accessing the list.
+ */
+ schedule_tasklet = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
+
+ if (schedule_tasklet)
+ tasklet_schedule(&tasklet_ctx->task);
+}
+EXPORT_SYMBOL(mlx5_add_cq_to_tasklet);
+
+static void mlx5_core_cq_dummy_cb(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
+{
+ mlx5_core_err(cq->eq->core.dev,
+ "CQ default completion callback, CQ #%u\n", cq->cqn);
}
+#define MLX5_CQ_INIT_CMD_SN cpu_to_be32(2 << 28)
/* Callers must verify outbox status in case of err */
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
@@ -110,10 +129,19 @@ int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->arm_sn = 0;
cq->eq = eq;
cq->uid = MLX5_GET(create_cq_in, in, uid);
+
+ /* Kernel CQs must set the arm_db address prior to calling
+ * this function, allowing for the proper value to be
+ * initialized. User CQs are responsible for their own
+ * initialization since they do not use the arm_db field.
+ */
+ if (cq->arm_db)
+ *cq->arm_db = MLX5_CQ_INIT_CMD_SN;
+
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
- cq->comp = mlx5_add_cq_to_tasklet;
+ cq->comp = mlx5_core_cq_dummy_cb;
/* assuming CQ will be deleted before the EQ */
cq->tasklet_ctx.priv = &eq->tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
@@ -134,7 +162,6 @@ int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
cq->cqn);
- cq->uar = dev->priv.uar;
cq->irqn = eq->core.irqn;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 09652dc89115..36806e813c33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -143,8 +143,8 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t average_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t reset_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
{
struct mlx5_cmd_stats *stats;
@@ -152,6 +152,11 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
spin_lock_irq(&stats->lock);
stats->sum = 0;
stats->n = 0;
+ stats->failed = 0;
+ stats->failed_mbox_status = 0;
+ stats->last_failed_errno = 0;
+ stats->last_failed_mbox_status = 0;
+ stats->last_failed_syndrome = 0;
spin_unlock_irq(&stats->lock);
*pos += count;
@@ -159,11 +164,16 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
return count;
}
-static const struct file_operations stats_fops = {
+static const struct file_operations reset_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = reset_write,
+};
+
+static const struct file_operations average_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = average_read,
- .write = average_write,
};
static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
@@ -228,8 +238,10 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
continue;
stats->root = debugfs_create_dir(namep, *cmd);
+ debugfs_create_file("reset", 0200, stats->root, stats,
+ &reset_fops);
debugfs_create_file("average", 0400, stats->root, stats,
- &stats_fops);
+ &average_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
debugfs_create_u64("failed_mbox_status", 0400, stats->root,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 47e7c2639774..64c04f52990f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -228,8 +228,15 @@ enum {
MLX5_INTERFACE_PROTOCOL_VNET,
MLX5_INTERFACE_PROTOCOL_DPLL,
+ MLX5_INTERFACE_PROTOCOL_FWCTL,
};
+static bool is_fwctl_supported(struct mlx5_core_dev *dev)
+{
+ /* fwctl is most useful on PFs, prevent fwctl on SFs for now */
+ return MLX5_CAP_GEN(dev, uctx_cap) && !mlx5_core_is_sf(dev);
+}
+
static const struct mlx5_adev_device {
const char *suffix;
bool (*is_supported)(struct mlx5_core_dev *dev);
@@ -252,6 +259,8 @@ static const struct mlx5_adev_device {
.is_supported = &is_mp_supported },
[MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
.is_supported = &is_dpll_supported },
+ [MLX5_INTERFACE_PROTOCOL_FWCTL] = { .suffix = "fwctl",
+ .is_supported = &is_fwctl_supported },
};
int mlx5_adev_idx_alloc(void)
@@ -349,7 +358,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
int ret = 0, i;
devl_assert_locked(priv_to_devlink(dev));
@@ -406,7 +415,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
pm_message_t pm = {};
int i;
@@ -555,10 +564,14 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
{
- u64 fsystem_guid, psystem_guid;
+ u8 fsystem_guid[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ u8 psystem_guid[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ u8 flen;
+ u8 plen;
- fsystem_guid = mlx5_query_nic_system_image_guid(dev);
- psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
+ mlx5_query_nic_sw_system_image_guid(dev, fsystem_guid, &flen);
+ mlx5_query_nic_sw_system_image_guid(peer_dev, psystem_guid, &plen);
- return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
+ return plen && flen && flen == plen &&
+ !memcmp(fsystem_guid, psystem_guid, flen);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 98d4306929f3..887adf4807d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -10,6 +10,7 @@
#include "esw/qos.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
+#include "lib/nv_param.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -35,6 +36,55 @@ static u16 mlx5_fw_ver_subminor(u32 version)
return version & 0xffff;
}
+static int mlx5_devlink_serial_numbers_put(struct mlx5_core_dev *dev,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct pci_dev *pdev = dev->pdev;
+ unsigned int vpd_size, kw_len;
+ char *str, *end;
+ u8 *vpd_data;
+ int err = 0;
+ int start;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data))
+ return 0;
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start >= 0) {
+ str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
+ if (!str) {
+ err = -ENOMEM;
+ goto end;
+ }
+ end = strchrnul(str, ' ');
+ *end = '\0';
+ err = devlink_info_board_serial_number_put(req, str);
+ kfree(str);
+ if (err)
+ goto end;
+ }
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "V3", &kw_len);
+ if (start >= 0) {
+ str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
+ if (!str) {
+ err = -ENOMEM;
+ goto end;
+ }
+ err = devlink_info_serial_number_put(req, str);
+ kfree(str);
+ if (err)
+ goto end;
+ }
+
+end:
+ kfree(vpd_data);
+ return err;
+}
+
#define DEVLINK_FW_STRING_LEN 32
static int
@@ -46,6 +96,13 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
+ err = mlx5_devlink_serial_numbers_put(dev, req, extack);
+ if (err)
+ return err;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -104,7 +161,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- mlx5_unload_one_devl_locked(dev, true);
+ mlx5_sync_reset_unload_flow(dev, true);
err = mlx5_health_wait_pci_up(dev);
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
@@ -147,11 +204,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
- if (mlx5_lag_is_active(dev)) {
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
- return -EOPNOTSUPP;
- }
-
if (mlx5_core_is_mp_slave(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
return -EOPNOTSUPP;
@@ -320,11 +372,14 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
.rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set,
.rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set,
+ .rate_leaf_tc_bw_set = mlx5_esw_devlink_rate_leaf_tc_bw_set,
+ .rate_node_tc_bw_set = mlx5_esw_devlink_rate_node_tc_bw_set,
.rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set,
.rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
- .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
+ .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
@@ -475,6 +530,25 @@ mlx5_devlink_hairpin_queue_size_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int mlx5_devlink_num_doorbells_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *mdev = devlink_priv(devlink);
+ u32 val32 = val.vu32;
+ u32 max_num_channels;
+
+ max_num_channels = mlx5e_get_max_num_channels(mdev);
+ if (val32 > max_num_channels) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Requested num_doorbells (%u) exceeds max number of channels (%u)",
+ val32, max_num_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void mlx5_devlink_hairpin_params_init_values(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -554,6 +628,9 @@ static const struct devlink_param mlx5_devlink_eth_params[] = {
"hairpin_queue_size", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
mlx5_devlink_hairpin_queue_size_validate),
+ DEVLINK_PARAM_GENERIC(NUM_DOORBELLS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_num_doorbells_validate),
};
static int mlx5_devlink_eth_params_register(struct devlink *devlink)
@@ -577,6 +654,10 @@ static int mlx5_devlink_eth_params_register(struct devlink *devlink)
mlx5_devlink_hairpin_params_init_values(devlink);
+ value.vu32 = MLX5_DEFAULT_NUM_DOORBELLS;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS,
+ value);
return 0;
}
@@ -591,6 +672,105 @@ static void mlx5_devlink_eth_params_unregister(struct devlink *devlink)
ARRAY_SIZE(mlx5_devlink_eth_params));
}
+#define MLX5_PCIE_CONG_THRESH_MAX 10000
+#define MLX5_PCIE_CONG_THRESH_DEF_LOW 7500
+#define MLX5_PCIE_CONG_THRESH_DEF_HIGH 9000
+
+static int
+mlx5_devlink_pcie_cong_thresh_validate(struct devlink *devl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu16 > MLX5_PCIE_CONG_THRESH_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Value %u > max supported (%u)",
+ val.vu16, MLX5_PCIE_CONG_THRESH_MAX);
+
+ return -EINVAL;
+ }
+
+ switch (id) {
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void mlx5_devlink_pcie_cong_init_values(struct devlink *devlink)
+{
+ union devlink_param_value value;
+ u32 id;
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH;
+ devl_param_driverinit_value_set(devlink, id, value);
+}
+
+static const struct devlink_param mlx5_devlink_pcie_cong_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ "pcie_cong_inbound_low", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ "pcie_cong_inbound_high", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ "pcie_cong_outbound_low", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ "pcie_cong_outbound_high", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+};
+
+static int mlx5_devlink_pcie_cong_params_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int err;
+
+ if (!mlx5_pcie_cong_event_supported(dev))
+ return 0;
+
+ err = devl_params_register(devlink, mlx5_devlink_pcie_cong_params,
+ ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
+ if (err)
+ return err;
+
+ mlx5_devlink_pcie_cong_init_values(devlink);
+
+ return 0;
+}
+
+static void mlx5_devlink_pcie_cong_params_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!mlx5_pcie_cong_event_supported(dev))
+ return;
+
+ devl_params_unregister(devlink, mlx5_devlink_pcie_cong_params,
+ ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
+}
+
static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -836,8 +1016,20 @@ int mlx5_devlink_params_register(struct devlink *devlink)
if (err)
goto max_uc_list_err;
+ err = mlx5_devlink_pcie_cong_params_register(devlink);
+ if (err)
+ goto pcie_cong_err;
+
+ err = mlx5_nv_param_register_dl_params(devlink);
+ if (err)
+ goto nv_param_err;
+
return 0;
+nv_param_err:
+ mlx5_devlink_pcie_cong_params_unregister(devlink);
+pcie_cong_err:
+ mlx5_devlink_max_uc_list_params_unregister(devlink);
max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
@@ -848,6 +1040,8 @@ auxdev_reg_err:
void mlx5_devlink_params_unregister(struct devlink *devlink)
{
+ mlx5_nv_param_unregister_dl_params(devlink);
+ mlx5_devlink_pcie_cong_params_unregister(devlink);
mlx5_devlink_max_uc_list_params_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devl_params_unregister(devlink, mlx5_devlink_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 961f75da6227..43b9bf8829cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -22,6 +22,12 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
+ MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
index 406ebe17405f..b4b3a43e56a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
@@ -22,10 +22,10 @@ TRACE_EVENT(mlx5_cmd,
__field(u32, syndrome)
__field(int, err)
),
- TP_fast_assign(__assign_str(command_str, command_str);
+ TP_fast_assign(__assign_str(command_str);
__entry->opcode = opcode;
__entry->op_mod = op_mod;
- __assign_str(status_str, status_str);
+ __assign_str(status_str);
__entry->status = status;
__entry->syndrome = syndrome;
__entry->err = err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
index f15718db5d0e..78e481b2c015 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
@@ -25,7 +25,7 @@ TRACE_EVENT(mlx5e_rep_neigh_update,
struct in6_addr *pin6;
__be32 *p32;
- __assign_str(devname, nhe->neigh_dev->name);
+ __assign_str(devname);
__entry->neigh_connected = neigh_connected;
memcpy(__entry->ha, ha, ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
index ac52ef37f38a..4b1ca228012b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
@@ -86,7 +86,7 @@ TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
struct in6_addr *pin6;
__be32 *p32;
- __assign_str(devname, nhe->neigh_dev->name);
+ __assign_str(devname);
__entry->neigh_used = neigh_used;
p32 = (__be32 *)__entry->v4;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index ddf1b87f1bc0..d6e736c1fb24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -203,10 +203,10 @@ TRACE_EVENT(mlx5_fs_set_fte,
fs_get_obj(__entry->fg, fte->node.parent);
__entry->group_index = __entry->fg->id;
__entry->index = fte->index;
- __entry->action = fte->action.action;
+ __entry->action = fte->act_dests.action.action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
- __entry->flow_tag = fte->flow_context.flow_tag;
- __entry->flow_source = fte->flow_context.flow_source;
+ __entry->flow_tag = fte->act_dests.flow_context.flow_tag;
+ __entry->flow_source = fte->act_dests.flow_context.flow_source;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
@@ -284,7 +284,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
TP_fast_assign(
__entry->rule = rule;
fs_get_obj(__entry->fte, rule->node.parent);
- __entry->index = __entry->fte->dests_size - 1;
+ __entry->index = __entry->fte->act_dests.dests_size - 1;
__entry->sw_action = rule->sw_action;
memcpy(__entry->destination,
&rule->dest_attr,
@@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
if (rule->dest_attr.type &
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter_id;
+ mlx5_fc_id(rule->dest_attr.counter);
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 080e7eab52c7..7bcf822a89f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -54,7 +54,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) {
mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
index 3038be575923..50f8a7630f86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
@@ -55,12 +55,11 @@ TRACE_EVENT(mlx5_fw,
),
TP_fast_assign(
- __assign_str(dev_name,
- dev_name(tracer->dev->device));
+ __assign_str(dev_name);
__entry->trace_timestamp = trace_timestamp;
__entry->lost = lost;
__entry->event_id = event_id;
- __assign_str(msg, msg);
+ __assign_str(msg);
),
TP_printk("%s [0x%llx] %d [0x%x] %s",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index c7216e84ef8c..7cae0c6e5e8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+#include <linux/mlx5/vport.h>
+
#include "reporter_vnic.h"
#include "en_stats.h"
#include "devlink.h"
@@ -13,6 +15,50 @@ struct mlx5_vnic_diag_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
+static void mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
+{
+ u32 out_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 in_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 out_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 in_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 cur_alloc_icm;
+ int vhca_icm_ctrl;
+ u16 vhca_id;
+ int err;
+
+ err = mlx5_core_access_reg(dev, in_reg, sizeof(in_reg), out_reg,
+ sizeof(out_reg), MLX5_REG_NIC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading nic_cap_reg failed. err = %d\n", err);
+ return;
+ }
+ vhca_icm_ctrl = MLX5_GET(nic_cap_reg, out_reg, vhca_icm_ctrl);
+ if (!vhca_icm_ctrl)
+ return;
+
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id_valid, other_vport);
+ if (other_vport) {
+ err = mlx5_vport_get_vhca_id(dev, vport_num, &vhca_id);
+ if (err) {
+ mlx5_core_warn(dev, "vport to vhca_id failed. vport_num = %d, err = %d\n",
+ vport_num, err);
+ return;
+ }
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id, vhca_id);
+ }
+ err = mlx5_core_access_reg(dev, in_icm_reg, sizeof(in_icm_reg),
+ out_icm_reg, sizeof(out_icm_reg),
+ MLX5_REG_VHCA_ICM_CTRL, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading vhca_icm_ctrl failed. err = %d\n", err);
+ return;
+ }
+ cur_alloc_icm = MLX5_GET(vhca_icm_ctrl_reg, out_icm_reg, cur_alloc_icm);
+ devlink_fmsg_u32_pair_put(fmsg, "icm_consumption", cur_alloc_icm);
+}
+
void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
struct devlink_fmsg *fmsg,
u16 vport_num, bool other_vport)
@@ -59,6 +105,17 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
}
+ if (MLX5_CAP_GEN(dev, nic_cap_reg))
+ mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_bar_uar_access))
+ devlink_fmsg_u32_pair_put(fmsg, "bar_uar_access",
+ VNIC_ENV_GET(&vnic, bar_uar_access));
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_odp_page_fault)) {
+ devlink_fmsg_u32_pair_put(fmsg, "odp_local_triggered_page_fault",
+ VNIC_ENV_GET(&vnic, odp_local_triggered_page_fault));
+ devlink_fmsg_u32_pair_put(fmsg, "odp_remote_triggered_page_fault",
+ VNIC_ENV_GET(&vnic, odp_remote_triggered_page_fault));
+ }
devlink_fmsg_obj_nest_end(fmsg);
devlink_fmsg_pair_nest_end(fmsg);
@@ -87,11 +144,11 @@ void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev)
health->vnic_reporter =
devlink_health_reporter_create(devlink,
&mlx5_reporter_vnic_ops,
- 0, dev);
+ dev);
if (IS_ERR(health->vnic_reporter))
mlx5_core_warn(dev,
- "Failed to create vnic reporter, err = %ld\n",
- PTR_ERR(health->vnic_reporter));
+ "Failed to create vnic reporter, err = %pe\n",
+ health->vnic_reporter);
}
void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 904e08de852e..1e5522a19483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -166,9 +166,90 @@ static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
return 0;
}
+enum {
+ MLX5_DPLL_SSM_CODE_PRC = 0b0010,
+ MLX5_DPLL_SSM_CODE_SSU_A = 0b0100,
+ MLX5_DPLL_SSM_CODE_SSU_B = 0b1000,
+ MLX5_DPLL_SSM_CODE_EEC1 = 0b1011,
+ MLX5_DPLL_SSM_CODE_PRTC = 0b0010,
+ MLX5_DPLL_SSM_CODE_EPRTC = 0b0010,
+ MLX5_DPLL_SSM_CODE_EEEC = 0b1011,
+ MLX5_DPLL_SSM_CODE_EPRC = 0b0010,
+};
+
+enum {
+ MLX5_DPLL_ENHANCED_SSM_CODE_PRC = 0xff,
+ MLX5_DPLL_ENHANCED_SSM_CODE_SSU_A = 0xff,
+ MLX5_DPLL_ENHANCED_SSM_CODE_SSU_B = 0xff,
+ MLX5_DPLL_ENHANCED_SSM_CODE_EEC1 = 0xff,
+ MLX5_DPLL_ENHANCED_SSM_CODE_PRTC = 0x20,
+ MLX5_DPLL_ENHANCED_SSM_CODE_EPRTC = 0x21,
+ MLX5_DPLL_ENHANCED_SSM_CODE_EEEC = 0x22,
+ MLX5_DPLL_ENHANCED_SSM_CODE_EPRC = 0x23,
+};
+
+#define __MLX5_DPLL_SSM_COMBINED_CODE(ssm_code, enhanced_ssm_code) \
+ ((ssm_code) | ((enhanced_ssm_code) << 8))
+
+#define MLX5_DPLL_SSM_COMBINED_CODE(type) \
+ __MLX5_DPLL_SSM_COMBINED_CODE(MLX5_DPLL_SSM_CODE_##type, \
+ MLX5_DPLL_ENHANCED_SSM_CODE_##type)
+
+static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll,
+ void *priv, unsigned long *qls,
+ struct netlink_ext_ack *extack)
+{
+ u8 network_option, ssm_code, enhanced_ssm_code;
+ u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ struct mlx5_dpll *mdpll = priv;
+ int err;
+
+ err = mlx5_core_access_reg(mdpll->mdev, in, sizeof(in),
+ out, sizeof(out), MLX5_REG_MSECQ, 0, 0);
+ if (err)
+ return err;
+ network_option = MLX5_GET(msecq_reg, out, network_option);
+ if (network_option != 1)
+ goto errout;
+ ssm_code = MLX5_GET(msecq_reg, out, local_ssm_code);
+ enhanced_ssm_code = MLX5_GET(msecq_reg, out, local_enhanced_ssm_code);
+
+ switch (__MLX5_DPLL_SSM_COMBINED_CODE(ssm_code, enhanced_ssm_code)) {
+ case MLX5_DPLL_SSM_COMBINED_CODE(PRC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRC, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(SSU_A):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_A, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(SSU_B):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_B, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(EEC1):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEC1, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(PRTC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRTC, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(EPRTC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRTC, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(EEEC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEEC, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(EPRC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRC, qls);
+ return 0;
+ }
+errout:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware");
+ return -EINVAL;
+}
+
static const struct dpll_device_ops mlx5_dpll_device_ops = {
.lock_status_get = mlx5_dpll_device_lock_status_get,
.mode_get = mlx5_dpll_device_mode_get,
+ .clock_quality_level_get = mlx5_dpll_clock_quality_level_get,
};
static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 84db05fb9389..811178d8976c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -47,6 +47,7 @@
#include <linux/rhashtable.h>
#include <net/udp_tunnel.h>
#include <net/switchdev.h>
+#include <net/psp/types.h>
#include <net/xdp.h>
#include <linux/dim.h>
#include <linux/bits.h>
@@ -68,7 +69,7 @@ struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
-#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + PSP_ENCAP_HLEN + PSP_TRL_SIZE + VLAN_HLEN + ETH_FCS_LEN)
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
@@ -80,11 +81,14 @@ struct page_pool;
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256)
+#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
-#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
-#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
-#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
+#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT (6)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT (12)
+#define MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE (16)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE BIT(MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@ -93,8 +97,6 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
-
/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
* These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
@@ -129,7 +131,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
-#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+#define MLX5E_DEFAULT_SHAMPO_TIMEOUT 1024
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
@@ -146,25 +148,6 @@ struct page_pool;
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
- (sizeof(struct mlx5e_umr_wqe) +\
- (sizeof(struct mlx5_klm) * (sgl_len)))
-
-#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \
- (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB))
-
-#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\
- (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS))
-
-#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\
- (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
-
-#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
- ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
-
-#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
-
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
@@ -249,16 +232,22 @@ struct mlx5e_rx_wqe_cyc {
DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
-struct mlx5e_umr_wqe {
+struct mlx5e_umr_wqe_hdr {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5e_umr_wqe_hdr hdr;
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
+static_assert(offsetof(struct mlx5e_umr_wqe, inline_mtts) == sizeof(struct mlx5e_umr_wqe_hdr),
+ "struct members should be included in struct mlx5e_umr_wqe_hdr, not in struct mlx5e_umr_wqe");
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
@@ -291,10 +280,6 @@ enum packet_merge {
struct mlx5e_packet_merge_param {
enum packet_merge type;
u32 timeout;
- struct {
- u8 match_criteria_type;
- u8 alignment_granularity;
- } shampo;
};
struct mlx5e_params {
@@ -320,6 +305,8 @@ struct mlx5e_params {
bool scatter_fcs_en;
bool rx_dim_enabled;
bool tx_dim_enabled;
+ bool rx_moder_use_cqe_mode;
+ bool tx_moder_use_cqe_mode;
u32 pflags;
struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk;
@@ -358,6 +345,7 @@ struct mlx5e_cq {
/* data path - accessed per napi poll */
u16 event_ctr;
struct napi_struct *napi;
+ struct mlx5_uars_page *uar;
struct mlx5_core_cq mcq;
struct mlx5e_ch_stats *ch_stats;
@@ -389,7 +377,7 @@ struct mlx5e_sq_dma {
enum mlx5e_dma_map_type type;
};
-/* Keep this enum consistent with with the corresponding strings array
+/* Keep this enum consistent with the corresponding strings array
* declared in en/reporter_tx.c
*/
enum {
@@ -398,10 +386,8 @@ enum {
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_DIM,
- MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
- MLX5E_SQ_STATE_XDP_MULTIBUF,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
@@ -410,6 +396,7 @@ struct mlx5e_tx_mpwqe {
struct mlx5e_tx_wqe *wqe;
u32 bytes_count;
u8 ds_count;
+ u8 ds_count_max;
u8 pkt_count;
u8 inline_on;
};
@@ -430,7 +417,7 @@ struct mlx5e_txqsq {
u16 cc;
u16 skb_fifo_cc;
u32 dma_fifo_cc;
- struct dim dim; /* Adaptive Moderation */
+ struct dim *dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
@@ -531,6 +518,12 @@ struct mlx5e_xdpsq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
+struct mlx5e_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+};
+
struct mlx5e_ktls_resync_resp;
struct mlx5e_icosq {
@@ -562,7 +555,7 @@ struct mlx5e_icosq {
} ____cacheline_aligned_in_smp;
struct mlx5e_frag_page {
- struct page *page;
+ netmem_ref netmem;
u16 frags;
};
@@ -639,17 +632,16 @@ struct mlx5e_dma_info {
};
struct mlx5e_shampo_hd {
- u32 mkey;
- struct mlx5e_dma_info *info;
struct mlx5e_frag_page *pages;
- u16 curr_page_index;
- u16 hd_per_wq;
+ u32 hd_per_wq;
+ u32 hd_per_page;
u16 hd_per_wqe;
+ u8 log_hd_per_page;
+ u8 log_hd_entry_size;
unsigned long *bitmap;
u16 pi;
u16 ci;
- __be32 key;
- u64 last_addr;
+ __be32 mkey_be;
};
struct mlx5e_hw_gro_data {
@@ -677,7 +669,7 @@ struct mlx5e_rq {
} wqe;
struct {
struct mlx5_wq_ll wq;
- struct mlx5e_umr_wqe umr_wqe;
+ struct mlx5e_umr_wqe_hdr umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
__be32 umr_mkey_be;
@@ -707,7 +699,7 @@ struct mlx5e_rq {
struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq;
struct mlx5e_cq_decomp cqd;
- struct hwtstamp_config *tstamp;
+ struct kernel_hwtstamp_config *hwtstamp_config;
struct mlx5_clock *clock;
struct mlx5e_icosq *icosq;
struct mlx5e_priv *priv;
@@ -722,18 +714,24 @@ struct mlx5e_rq {
int ix;
unsigned int hw_mtu;
- struct dim dim; /* Dynamic Interrupt Moderation */
+ struct dim *dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog __rcu *xdp_prog;
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
+
+ /* page pools */
struct page_pool *page_pool;
+ struct page_pool *hd_page_pool;
+
+ struct mlx5e_xdp_buff mxbuf;
/* AF_XDP zero-copy */
struct xsk_buff_pool *xsk_pool;
struct work_struct recover_work;
+ struct work_struct rx_timeout_work;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -771,7 +769,7 @@ struct mlx5e_channel {
u8 lag_port;
/* XDP_REDIRECT */
- struct mlx5e_xdpsq xdpsq;
+ struct mlx5e_xdpsq *xdpsq;
/* AF_XDP zero-copy */
struct mlx5e_rq xskrq;
@@ -789,14 +787,18 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int vec_ix;
int sd_ix;
int cpu;
+ struct mlx5_sq_bfreg *bfreg;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
+
+ /* coalescing configuration */
+ struct dim_cq_moder rx_cq_moder;
+ struct dim_cq_moder tx_cq_moder;
};
struct mlx5e_ptp;
@@ -879,6 +881,8 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_selq selq;
struct mlx5e_txqsq **txq2sq;
+ struct mlx5e_sq_stats **txq2sq_stats;
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -916,12 +920,14 @@ struct mlx5e_priv {
u8 max_opened_tc;
bool tx_ptp_opened;
bool rx_ptp_opened;
- struct hwtstamp_config tstamp;
+ struct kernel_hwtstamp_config hwtstamp_config;
u16 q_counter[MLX5_SD_MAX_GROUP_SZ];
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
+ struct mlx5e_pcie_cong_event *cong_event;
+
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
@@ -935,6 +941,9 @@ struct mlx5e_priv {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_psp *psp;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_tls *tls;
#endif
@@ -949,6 +958,7 @@ struct mlx5e_priv {
struct mlx5e_mqprio_rl *mqprio_rl;
struct dentry *dfs_root;
struct mlx5_devcom_comp_dev *devcom;
+ struct ethtool_fec_hist_range *fec_ranges;
};
struct mlx5e_dev {
@@ -1008,7 +1018,8 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
+void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len);
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -1018,8 +1029,11 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
void mlx5e_set_rx_mode_work(struct work_struct *work);
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
-int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
+int mlx5e_hwtstamp_set(struct mlx5e_priv *priv,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int mlx5e_hwtstamp_get(struct mlx5e_priv *priv,
+ struct kernel_hwtstamp_config *config);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -1040,6 +1054,11 @@ void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
+bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled);
+bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state);
+
struct mlx5e_sq_param;
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
@@ -1053,6 +1072,7 @@ struct mlx5e_create_cq_param {
struct mlx5e_ch_stats *ch_stats;
int node;
int ix;
+ struct mlx5_uars_page *uar;
};
struct mlx5e_cq_param;
@@ -1060,6 +1080,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq);
+int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u8 cq_period_mode);
+int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u16 cq_period, u16 cq_max_count, u8 cq_period_mode);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -1087,6 +1111,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
+int mlx5e_update_tc_and_tx_queues_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
@@ -1118,6 +1143,11 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
+bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled);
+bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state);
+
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_ETH(mdev, swp) &&
@@ -1129,7 +1159,9 @@ extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
+int mlx5e_modify_tirs_lb(struct mlx5_core_dev *mdev, bool enable_uc_lb,
+ bool enable_mc_lb);
+int mlx5e_refresh_tirs(struct mlx5_core_dev *mdev, bool enable_uc_lb,
bool enable_mc_lb);
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
@@ -1143,7 +1175,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
-int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
@@ -1160,7 +1191,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
- uint32_t stringset, uint8_t *data);
+ u32 stringset, u8 *data);
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
@@ -1168,35 +1199,30 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct netlink_ext_ack *extack);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal);
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack);
-int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
- struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
- const struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh);
-int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack);
+int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
+int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
-void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam);
-int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
static inline bool
@@ -1222,10 +1248,8 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
-void mlx5e_rx_dim_work(struct work_struct *work);
-void mlx5e_tx_dim_work(struct work_struct *work);
-void mlx5e_set_xdp_feature(struct net_device *netdev);
+void mlx5e_set_xdp_feature(struct mlx5e_priv *priv);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index 874a1016623c..66e719e88503 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -3,6 +3,7 @@
#include "channels.h"
#include "en.h"
+#include "en/dim.h"
#include "en/ptp.h"
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
@@ -55,3 +56,85 @@ bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
*rqn = c->rq.rqn;
return true;
}
+
+int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enable)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enable)
+{
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ int err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], enable);
+
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ /* If dim is enabled for the channel, reset the dim state so the
+ * collected statistics will be reset. This is useful for
+ * supporting legacy interfaces that allow things like changing
+ * the CQ period mode for all channels without disturbing
+ * individual channel configurations.
+ */
+ if (chs->c[i]->rq.dim) {
+ int err;
+
+ mlx5e_dim_rx_change(&chs->c[i]->rq, false);
+ err = mlx5e_dim_rx_change(&chs->c[i]->rq, true);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs)
+{
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ int err;
+
+ /* If dim is enabled for the channel, reset the dim
+ * state so the collected statistics will be reset. This
+ * is useful for supporting legacy interfaces that allow
+ * things like changing the CQ period mode for all
+ * channels without disturbing individual channel
+ * configurations.
+ */
+ if (!chs->c[i]->sq[tc].dim)
+ continue;
+
+ mlx5e_dim_tx_change(&chs->c[i]->sq[tc], false);
+ err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], true);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index 6715aa9383b9..eda80f8c6c02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -15,5 +15,9 @@ void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix,
void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
u32 *vhca_id);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
+int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enabled);
+int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enabled);
+int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs);
+int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index b59aee75de94..2c98a5299df3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -26,7 +26,6 @@ struct mlx5e_dcbx {
u8 cap;
/* Buffer configuration */
- bool manual_buffer;
u32 cable_len;
u32 xoff;
u16 port_buff_cell_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 0b1ac6e5c890..8818f65d1fbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -40,11 +40,8 @@ void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev)
static void
mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
{
- u64 parent_id;
-
- parent_id = mlx5_query_nic_system_image_guid(dev);
- ppid->id_len = sizeof(parent_id);
- memcpy(ppid->id, &parent_id, sizeof(parent_id));
+ BUILD_BUG_ON(MLX5_SW_IMAGE_GUID_MAX_BYTES > MAX_PHYS_ITEM_ID_LEN);
+ mlx5_query_nic_sw_system_image_guid(dev, ppid->id, &ppid->id_len);
}
int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
new file mode 100644
index 000000000000..110e2c6b7e51
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved */
+
+#ifndef __MLX5_EN_DIM_H__
+#define __MLX5_EN_DIM_H__
+
+#include <linux/dim.h>
+#include <linux/types.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+/* Forward declarations */
+struct mlx5e_rq;
+struct mlx5e_txqsq;
+struct work_struct;
+
+/* convert a boolean value for cqe mode to appropriate dim constant
+ * true : DIM_CQ_PERIOD_MODE_START_FROM_CQE
+ * false : DIM_CQ_PERIOD_MODE_START_FROM_EQE
+ */
+static inline int mlx5e_dim_cq_period_mode(bool start_from_cqe)
+{
+ return start_from_cqe ? DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+static inline enum mlx5_cq_period_mode
+mlx5e_cq_period_mode(enum dim_cq_period_mode cq_period_mode)
+{
+ switch (cq_period_mode) {
+ case DIM_CQ_PERIOD_MODE_START_FROM_EQE:
+ return MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ case DIM_CQ_PERIOD_MODE_START_FROM_CQE:
+ return MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
+ default:
+ WARN_ON_ONCE(true);
+ return MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+}
+
+void mlx5e_rx_dim_work(struct work_struct *work);
+void mlx5e_tx_dim_work(struct work_struct *work);
+int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enabled);
+int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enabled);
+
+#endif /* __MLX5_EN_DIM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 4d6225e0eec7..c3408b3f7010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -18,7 +18,8 @@ enum {
enum {
MLX5E_TC_PRIO = 0,
- MLX5E_NIC_PRIO
+ MLX5E_PROMISC_PRIO,
+ MLX5E_NIC_PRIO,
};
struct mlx5e_flow_table {
@@ -56,7 +57,7 @@ struct mlx5e_l2_table {
bool promisc_enabled;
};
-#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_TT - 1)
+#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_INDIR_TIRS)
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -68,9 +69,13 @@ struct mlx5e_l2_table {
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-/* NIC prio FTS */
+/* NIC promisc FT level */
enum {
MLX5E_PROMISC_FT_LEVEL,
+};
+
+/* NIC prio FTS */
+enum {
MLX5E_VLAN_FT_LEVEL,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
@@ -83,10 +88,11 @@ enum {
#ifdef CONFIG_MLX5_EN_ARFS
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
-#ifdef CONFIG_MLX5_EN_IPSEC
- MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
- MLX5E_ACCEL_FS_ESP_FT_LEVEL,
+#if defined(CONFIG_MLX5_EN_IPSEC) || defined(CONFIG_MLX5_EN_PSP)
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL,
+ MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
@@ -126,7 +132,8 @@ struct mlx5e_ptp_fs;
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
- struct ttc_params *ttc_params, bool tunnel);
+ struct ttc_params *ttc_params, bool tunnel,
+ bool ipsec_rss);
void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
@@ -154,6 +161,19 @@ struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
+
+static inline bool mlx5e_fs_has_arfs(struct net_device *netdev)
+{
+ return IS_ENABLED(CONFIG_MLX5_EN_ARFS) &&
+ netdev->hw_features & NETIF_F_NTUPLE;
+}
+
+static inline bool mlx5e_fs_want_arfs(struct net_device *netdev)
+{
+ return IS_ENABLED(CONFIG_MLX5_EN_ARFS) &&
+ netdev->features & NETIF_F_NTUPLE;
+}
+
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
index 9e276fd3c0cf..c21fe36527a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
@@ -11,6 +11,11 @@ int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
+int mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack);
+int mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc);
int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
@@ -20,6 +25,15 @@ static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
+static inline int
+mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
+{ return -EOPNOTSUPP; }
+static inline int
+mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc)
+{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 81523825faa2..cb972b2d46e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -114,6 +114,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
int err = 0;
rtnl_lock();
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
@@ -123,6 +124,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
out:
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index b4f3bd7d346e..195863b2c013 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -138,8 +138,8 @@ void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
if (IS_ERR_OR_NULL(agent)) {
if (IS_ERR(agent))
netdev_warn(priv->netdev,
- "Failed to create hv vhca stats agent, err = %ld\n",
- PTR_ERR(agent));
+ "Failed to create hv vhca stats agent, err = %pe\n",
+ agent);
kvfree(priv->stats_agent.buf);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
index 4e72ca8070e2..1de18c7e96ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
@@ -6,6 +6,7 @@
#include <linux/xarray.h>
#include <linux/hashtable.h>
#include <linux/refcount.h>
+#include <linux/mlx5/driver.h>
#include "mapping.h"
@@ -24,7 +25,8 @@ struct mapping_ctx {
struct delayed_work dwork;
struct list_head pending_list;
spinlock_t pending_list_lock; /* Guards pending list */
- u64 id;
+ u8 id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ u8 id_len;
u8 type;
struct list_head list;
refcount_t refcount;
@@ -220,13 +222,15 @@ mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
}
struct mapping_ctx *
-mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal)
+mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id,
+ bool delayed_removal)
{
struct mapping_ctx *ctx;
mutex_lock(&shared_ctx_lock);
list_for_each_entry(ctx, &shared_ctx_list, list) {
- if (ctx->id == id && ctx->type == type) {
+ if (ctx->type == type && ctx->id_len == id_len &&
+ !memcmp(id, ctx->id, id_len)) {
if (refcount_inc_not_zero(&ctx->refcount))
goto unlock;
break;
@@ -237,7 +241,8 @@ mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delaye
if (IS_ERR(ctx))
goto unlock;
- ctx->id = id;
+ memcpy(ctx->id, id, id_len);
+ ctx->id_len = id_len;
ctx->type = type;
list_add(&ctx->list, &shared_ctx_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
index 4e2119f0f4c1..e86a103d58b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
@@ -27,6 +27,7 @@ void mapping_destroy(struct mapping_ctx *ctx);
/* adds mapping with an id or get an existing mapping with the same id
*/
struct mapping_ctx *
-mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal);
+mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id,
+ bool delayed_removal);
#endif /* __MLX5_MAPPING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index a3f31d9d527e..c9bdee9a8b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,9 +6,14 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include "en_accel/psp.h"
+#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
+
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{
u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
@@ -95,25 +100,29 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
return sizeof(struct mlx5_ksm) * 4;
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
- return 0;
+ return 1;
}
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
- u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u8 max_pages_per_wqe, max_log_wqe_size_calc;
+ u8 max_log_wqe_size_cap;
u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
- max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+ max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+ WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+ max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
+ MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
+
+ return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
}
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
@@ -406,25 +415,10 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
-u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+static u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params *params)
{
- return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
-}
-
-u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
-}
-
-u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- PAGE_SIZE;
-
- return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
+ return order_base_2(DIV_ROUND_UP(MLX5E_SHAMPO_WQ_RESRV_SIZE,
+ params->sw_mtu));
}
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
@@ -513,77 +507,6 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param
return 0;
}
-static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder = {};
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder = {};
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
-{
- return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
- DIM_CQ_PERIOD_MODE_START_FROM_CQE :
- DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->tx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
- } else {
- params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->rx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
- } else {
- params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_tx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
- params->tx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_rx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- params->rx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
{
u32 link_speed = 0;
@@ -689,6 +612,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
.ix = c->vec_ix,
+ .uar = c->bfreg->up,
};
}
@@ -888,7 +812,7 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
@@ -897,12 +821,12 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
- int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
int wqe_size = BIT(log_stride_sz) * num_strides;
+ int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
* so we get a filler cqe for the rest of the reservation.
@@ -963,6 +887,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 lro_timeout;
int ndsegs = 1;
int err;
@@ -988,22 +913,27 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
- if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
- MLX5_SET(wq, wq, shampo_enable, true);
- MLX5_SET(wq, wq, log_reservation_size,
- mlx5e_shampo_get_log_rsrv_size(mdev, params));
- MLX5_SET(wq, wq,
- log_max_num_of_packets_per_reservation,
- mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
- MLX5_SET(wq, wq, log_headers_entry_size,
- mlx5e_shampo_get_log_hd_entry_size(mdev, params));
- MLX5_SET(rqc, rqc, reservation_timeout,
- params->packet_merge.timeout);
- MLX5_SET(rqc, rqc, shampo_match_criteria_type,
- params->packet_merge.shampo.match_criteria_type);
- MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
- params->packet_merge.shampo.alignment_granularity);
- }
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO)
+ break;
+
+ MLX5_SET(wq, wq, shampo_enable, true);
+ MLX5_SET(wq, wq, log_reservation_size,
+ MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
+ MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
+ MLX5_SET(wq, wq,
+ log_max_num_of_packets_per_reservation,
+ mlx5e_shampo_get_log_pkt_per_rsrv(params));
+ MLX5_SET(wq, wq, log_headers_entry_size,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE -
+ MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT);
+ lro_timeout =
+ mlx5e_choose_lro_timeout(mdev,
+ MLX5E_DEFAULT_SHAMPO_TIMEOUT);
+ MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout);
+ MLX5_SET(rqc, rqc, shampo_match_criteria_type,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
+ MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
break;
}
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -1075,7 +1005,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
bool allow_swp;
allow_swp = mlx5_geneve_tx_allowed(mdev) ||
- (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
+ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO) ||
+ mlx5_is_psp_device(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -1106,17 +1037,17 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
- int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
int wqe_size = BIT(log_stride_sz) * num_strides;
+ int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
u32 hd_per_wqe;
/* Assumption: hd_per_wqe % 8 == 0. */
- hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
- mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
- __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
+ hd_per_wqe = (wqe_size / rsrv_size) * pkt_per_rsrv;
+ mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_rsrv = %d\n",
+ __func__, hd_per_wqe, rsrv_size, wqe_size, pkt_per_rsrv);
return hd_per_wqe;
}
@@ -1141,22 +1072,36 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
+ int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest;
void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev);
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
- max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
- rest = max_hd_per_wqe % max_klm_per_umr;
- wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
+ max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr;
+ rest = max_hd_per_wqe % max_ksm_per_umr;
+ wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe;
if (rest)
- wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
+ wqebbs += MLX5E_KSM_UMR_WQEBBS(rest);
wqebbs *= wq_size;
return wqebbs;
}
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -1287,7 +1232,6 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
@@ -1296,7 +1240,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
@@ -1315,7 +1258,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 9a781f18b57f..00617c65fe3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
- bool is_xdp_mb;
u16 stop_room;
};
@@ -52,6 +51,7 @@ struct mlx5e_create_sq_param {
u32 tisn;
u8 tis_lst_sz;
u8 min_inline_mode;
+ u32 uar_page;
};
/* Striding RQ dynamic parameters */
@@ -77,11 +77,6 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
/* Parameter calculations */
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
@@ -101,18 +96,13 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
@@ -143,7 +133,6 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param);
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
new file mode 100644
index 000000000000..2eb666a46f39
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES.
+
+#include "../devlink.h"
+#include "en.h"
+#include "pcie_cong_event.h"
+
+#define MLX5E_CONG_HIGH_STATE 0x7
+
+enum {
+ MLX5E_INBOUND_CONG = BIT(0),
+ MLX5E_OUTBOUND_CONG = BIT(1),
+};
+
+struct mlx5e_pcie_cong_thresh {
+ u16 inbound_high;
+ u16 inbound_low;
+ u16 outbound_high;
+ u16 outbound_low;
+};
+
+struct mlx5e_pcie_cong_stats {
+ u32 pci_bw_inbound_high;
+ u32 pci_bw_inbound_low;
+ u32 pci_bw_outbound_high;
+ u32 pci_bw_outbound_low;
+ u32 pci_bw_stale_event;
+};
+
+struct mlx5e_pcie_cong_event {
+ u64 obj_id;
+
+ struct mlx5e_priv *priv;
+
+ /* For event notifier and workqueue. */
+ struct work_struct work;
+ struct mlx5_nb nb;
+
+ /* Stores last read state. */
+ u8 state;
+
+ /* For ethtool stats group. */
+ struct mlx5e_pcie_cong_stats stats;
+};
+
+
+static const struct counter_desc mlx5e_pcie_cong_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_inbound_high) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_inbound_low) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_outbound_high) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_outbound_low) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_stale_event) },
+};
+
+#define NUM_PCIE_CONG_COUNTERS ARRAY_SIZE(mlx5e_pcie_cong_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie_cong)
+{
+ return priv->cong_event ? NUM_PCIE_CONG_COUNTERS : 0;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie_cong) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie_cong)
+{
+ if (!priv->cong_event)
+ return;
+
+ for (int i = 0; i < NUM_PCIE_CONG_COUNTERS; i++)
+ ethtool_puts(data, mlx5e_pcie_cong_stats_desc[i].format);
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie_cong)
+{
+ if (!priv->cong_event)
+ return;
+
+ for (int i = 0; i < NUM_PCIE_CONG_COUNTERS; i++) {
+ u32 ctr = MLX5E_READ_CTR32_CPU(&priv->cong_event->stats,
+ mlx5e_pcie_cong_stats_desc,
+ i);
+
+ mlx5e_ethtool_put_stat(data, ctr);
+ }
+}
+
+MLX5E_DEFINE_STATS_GRP(pcie_cong, 0);
+
+static int
+mlx5_cmd_pcie_cong_event_set(struct mlx5_core_dev *dev,
+ const struct mlx5e_pcie_cong_thresh *config,
+ u64 *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *cong_obj;
+ void *hdr;
+ int err;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+ cong_obj = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, cong_obj);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj, inbound_event_en, 1);
+ MLX5_SET(pcie_cong_event_obj, cong_obj, outbound_event_en, 1);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ inbound_cong_high_threshold, config->inbound_high);
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ inbound_cong_low_threshold, config->inbound_low);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ outbound_cong_high_threshold, config->outbound_high);
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ outbound_cong_low_threshold, config->outbound_low);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ mlx5_core_dbg(dev, "PCIe congestion event (obj_id=%llu) created. Config: in: [%u, %u], out: [%u, %u]\n",
+ *obj_id,
+ config->inbound_high, config->inbound_low,
+ config->outbound_high, config->outbound_low);
+
+ return 0;
+}
+
+static int mlx5_cmd_pcie_cong_event_destroy(struct mlx5_core_dev *dev,
+ u64 obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *hdr;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, obj_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_pcie_cong_event_query(struct mlx5_core_dev *dev,
+ u64 obj_id,
+ u32 *state)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(pcie_cong_event_cmd_out)];
+ void *obj;
+ void *hdr;
+ u8 cong;
+ int err;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, obj_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ obj = MLX5_ADDR_OF(pcie_cong_event_cmd_out, out, cong_obj);
+
+ if (state) {
+ cong = MLX5_GET(pcie_cong_event_obj, obj, inbound_cong_state);
+ if (cong == MLX5E_CONG_HIGH_STATE)
+ *state |= MLX5E_INBOUND_CONG;
+
+ cong = MLX5_GET(pcie_cong_event_obj, obj, outbound_cong_state);
+ if (cong == MLX5E_CONG_HIGH_STATE)
+ *state |= MLX5E_OUTBOUND_CONG;
+ }
+
+ return 0;
+}
+
+static void mlx5e_pcie_cong_event_work(struct work_struct *work)
+{
+ struct mlx5e_pcie_cong_event *cong_event;
+ struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
+ u32 new_cong_state = 0;
+ u32 changes;
+ int err;
+
+ cong_event = container_of(work, struct mlx5e_pcie_cong_event, work);
+ priv = cong_event->priv;
+ dev = priv->mdev;
+
+ err = mlx5_cmd_pcie_cong_event_query(dev, cong_event->obj_id,
+ &new_cong_state);
+ if (err) {
+ mlx5_core_warn(dev, "Error %d when querying PCIe cong event object (obj_id=%llu).\n",
+ err, cong_event->obj_id);
+ return;
+ }
+
+ changes = cong_event->state ^ new_cong_state;
+ if (!changes) {
+ cong_event->stats.pci_bw_stale_event++;
+ return;
+ }
+
+ cong_event->state = new_cong_state;
+
+ if (changes & MLX5E_INBOUND_CONG) {
+ if (new_cong_state & MLX5E_INBOUND_CONG)
+ cong_event->stats.pci_bw_inbound_high++;
+ else
+ cong_event->stats.pci_bw_inbound_low++;
+ }
+
+ if (changes & MLX5E_OUTBOUND_CONG) {
+ if (new_cong_state & MLX5E_OUTBOUND_CONG)
+ cong_event->stats.pci_bw_outbound_high++;
+ else
+ cong_event->stats.pci_bw_outbound_low++;
+ }
+}
+
+static int mlx5e_pcie_cong_event_handler(struct notifier_block *nb,
+ unsigned long event, void *eqe)
+{
+ struct mlx5e_pcie_cong_event *cong_event;
+
+ cong_event = mlx5_nb_cof(nb, struct mlx5e_pcie_cong_event, nb);
+ queue_work(cong_event->priv->wq, &cong_event->work);
+
+ return NOTIFY_OK;
+}
+
+static int
+mlx5e_pcie_cong_get_thresh_config(struct mlx5_core_dev *dev,
+ struct mlx5e_pcie_cong_thresh *config)
+{
+ u32 ids[4] = {
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ };
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val[4];
+
+ for (int i = 0; i < 4; i++) {
+ u32 id = ids[i];
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink, id, &val[i]);
+ if (err)
+ return err;
+ }
+
+ config->inbound_low = val[0].vu16;
+ config->inbound_high = val[1].vu16;
+ config->outbound_low = val[2].vu16;
+ config->outbound_high = val[3].vu16;
+
+ return 0;
+}
+
+static int
+mlx5e_thresh_config_validate(struct mlx5_core_dev *mdev,
+ const struct mlx5e_pcie_cong_thresh *config)
+{
+ int err = 0;
+
+ if (config->inbound_low >= config->inbound_high) {
+ err = -EINVAL;
+ mlx5_core_err(mdev, "PCIe inbound congestion threshold configuration invalid: low (%u) >= high (%u).\n",
+ config->inbound_low, config->inbound_high);
+ }
+
+ if (config->outbound_low >= config->outbound_high) {
+ err = -EINVAL;
+ mlx5_core_err(mdev, "PCIe outbound congestion threshold configuration invalid: low (%u) >= high (%u).\n",
+ config->outbound_low, config->outbound_high);
+ }
+
+ return err;
+}
+
+int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_cong_thresh thresh_config = {};
+ struct mlx5e_pcie_cong_event *cong_event;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ if (!mlx5_pcie_cong_event_supported(mdev))
+ return 0;
+
+ err = mlx5e_pcie_cong_get_thresh_config(mdev, &thresh_config);
+ if (WARN_ON(err))
+ return err;
+
+ err = mlx5e_thresh_config_validate(mdev, &thresh_config);
+ if (err) {
+ mlx5_core_err(mdev, "PCIe congestion event feature disabled\n");
+ return err;
+ }
+
+ cong_event = kvzalloc_node(sizeof(*cong_event), GFP_KERNEL,
+ mdev->priv.numa_node);
+ if (!cong_event)
+ return -ENOMEM;
+
+ INIT_WORK(&cong_event->work, mlx5e_pcie_cong_event_work);
+ MLX5_NB_INIT(&cong_event->nb, mlx5e_pcie_cong_event_handler,
+ OBJECT_CHANGE);
+
+ cong_event->priv = priv;
+
+ err = mlx5_cmd_pcie_cong_event_set(mdev, &thresh_config,
+ &cong_event->obj_id);
+ if (err) {
+ mlx5_core_warn(mdev, "Error creating a PCIe congestion event object\n");
+ goto err_free;
+ }
+
+ err = mlx5_eq_notifier_register(mdev, &cong_event->nb);
+ if (err) {
+ mlx5_core_warn(mdev, "Error registering notifier for the PCIe congestion event\n");
+ goto err_obj_destroy;
+ }
+
+ priv->cong_event = cong_event;
+
+ return 0;
+
+err_obj_destroy:
+ mlx5_cmd_pcie_cong_event_destroy(mdev, cong_event->obj_id);
+err_free:
+ kvfree(cong_event);
+
+ return err;
+}
+
+void mlx5e_pcie_cong_event_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_cong_event *cong_event = priv->cong_event;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!cong_event)
+ return;
+
+ priv->cong_event = NULL;
+
+ mlx5_eq_notifier_unregister(mdev, &cong_event->nb);
+ cancel_work_sync(&cong_event->work);
+
+ if (mlx5_cmd_pcie_cong_event_destroy(mdev, cong_event->obj_id))
+ mlx5_core_warn(mdev, "Error destroying PCIe congestion event (obj_id=%llu)\n",
+ cong_event->obj_id);
+
+ kvfree(cong_event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h
new file mode 100644
index 000000000000..b1ea46bf648a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_PCIE_CONG_EVENT_H__
+#define __MLX5_PCIE_CONG_EVENT_H__
+
+int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv);
+void mlx5e_pcie_cong_event_cleanup(struct mlx5e_priv *priv);
+
+#endif /* __MLX5_PCIE_CONG_EVENT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index dbe2b19a9570..6049ccf475bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -41,7 +41,7 @@ void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
*an_disable_cap = 0;
*an_disable_admin = 0;
- if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1))
+ if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1, 0))
return;
*an_status = MLX5_GET(ptys_reg, out, an_status);
@@ -80,6 +80,7 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5_port_eth_proto eproto;
+ const struct mlx5_link_info *info;
bool force_legacy = false;
bool ext;
int err;
@@ -94,9 +95,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
goto out;
}
- *speed = mlx5_port_ptys2speed(mdev, eproto.oper, force_legacy);
- if (!(*speed))
+ info = mlx5_port_ptys2info(mdev, eproto.oper, force_legacy);
+ if (!info) {
+ *speed = SPEED_UNKNOWN;
err = -EINVAL;
+ goto out;
+ }
+ *speed = info->speed;
out:
return err;
@@ -292,10 +297,20 @@ enum mlx5e_fec_supported_link_mode {
MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X,
MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X,
MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X,
MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
+#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X
+#define MLX5E_FEC_FIRST_200G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X
#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
do { \
@@ -308,6 +323,19 @@ enum mlx5e_fec_supported_link_mode {
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
+/* Returns true if FEC can be set for a given link mode. */
+static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev,
+ enum mlx5e_fec_supported_link_mode link_mode)
+{
+ return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE ||
+ (link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) ||
+ (link_mode < MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)) ||
+ (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_200G_per_lane_in_pplm));
+}
+
/* get/set FEC admin field for a given speed */
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
enum mlx5e_fec_supported_link_mode link_mode)
@@ -340,6 +368,30 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -381,6 +433,30 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
*fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -389,7 +465,6 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
{
- bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
@@ -407,7 +482,7 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
u16 fec_caps;
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
mlx5e_get_fec_cap_field(out, &fec_caps, i);
@@ -420,7 +495,6 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
u16 *fec_configured_mode)
{
- bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
@@ -445,7 +519,7 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
*fec_configured_mode = 0;
for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
mlx5e_fec_admin_field(out, fec_configured_mode, 0, i);
@@ -456,6 +530,26 @@ out:
return 0;
}
+static u16 mlx5e_remap_fec_conf_mode(enum mlx5e_fec_supported_link_mode link_mode,
+ u16 conf_fec)
+{
+ /* RS fec in ethtool is originally mapped to MLX5E_FEC_RS_528_514.
+ * For link modes up to 25G per lane, the value is kept.
+ * For 50G or 100G per lane, it's remapped to MLX5E_FEC_RS_544_514.
+ * For 200G per lane, remapped to MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD.
+ */
+ if (conf_fec != BIT(MLX5E_FEC_RS_528_514))
+ return conf_fec;
+
+ if (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD);
+
+ if (link_mode >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514);
+
+ return conf_fec;
+}
+
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
{
bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
@@ -489,17 +583,10 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
u16 conf_fec = fec_policy;
u16 fec_caps = 0;
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
- /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
- * to link modes up to 25G per lane and to
- * MLX5E_FEC_RS_544_514 in the new link modes based on
- * 50 G per lane
- */
- if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
- i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
- conf_fec = (1 << MLX5E_FEC_RS_544_514);
+ conf_fec = mlx5e_remap_fec_conf_mode(i, conf_fec);
mlx5e_get_fec_cap_field(out, &fec_caps, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index d1da225f35da..fa2283dd383b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -61,6 +61,7 @@ enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
+ MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD = 4,
MLX5E_FEC_RS_544_514 = 7,
MLX5E_FEC_LLRS_272_257_1 = 9,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 8e25f4ef5ccc..4720523813b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
/* Total shared buffer size is split in a ratio of 3:1 between
* lossy and lossless pools respectively.
*/
- lossy_epool_size = (shared_buffer_size / 4) * 3;
lossless_ipool_size = shared_buffer_size / 4;
+ lossy_epool_size = shared_buffer_size - lossless_ipool_size;
mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
lossy_epool_size);
@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
- u32 new_headroom_size = 0;
- u32 current_headroom_size;
+ u32 current_headroom_cells = 0;
+ u32 new_headroom_cells = 0;
void *in;
int err;
int i;
- current_headroom_size = port_buffer->headroom_size;
-
in = kzalloc(sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+ current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
+
u64 size = port_buffer->buffer[i].size;
u64 xoff = port_buffer->buffer[i].xoff;
u64 xon = port_buffer->buffer[i].xon;
- new_headroom_size += size;
do_div(size, port_buff_cell_sz);
+ new_headroom_cells += size;
do_div(xoff, port_buff_cell_sz);
do_div(xon, port_buff_cell_sz);
MLX5_SET(bufferx_reg, buffer, size, size);
@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
- new_headroom_size /= port_buff_cell_sz;
- current_headroom_size /= port_buff_cell_sz;
- err = port_update_shared_buffer(priv->mdev, current_headroom_size,
- new_headroom_size);
+ err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
+ new_headroom_cells);
if (err)
goto out;
@@ -331,6 +329,9 @@ static int port_set_buffer(struct mlx5e_priv *priv,
if (err)
goto out;
+ /* RO bits should be set to 0 on write */
+ MLX5_SET(pbmc_reg, in, port_buffer_size, 0);
+
err = mlx5e_port_set_pbmc(mdev, in);
out:
kfree(in);
@@ -574,7 +575,6 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
}
- priv->dcbx.xoff = xoff;
/* Apply the settings */
if (update_buffer) {
@@ -583,6 +583,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return err;
}
+ priv->dcbx.xoff = xoff;
+
if (update_prio2buffer)
err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d0af7271da34..424f8a2728a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -8,6 +8,7 @@
#include "en/fs_tt_redirect.h"
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <net/netdev_lock.h>
struct mlx5e_ptp_fs {
struct mlx5_flow_handle *l2_rule;
@@ -81,7 +82,7 @@ static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
}
static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
- struct mlx5e_ptp_cq_stats *cq_stats)
+ struct mlx5e_ptpsq *ptpsq)
{
struct skb_shared_hwtstamps hwts = {};
ktime_t diff;
@@ -91,8 +92,17 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
/* Maximal allowed diff is 1 / 128 second */
if (diff > (NSEC_PER_SEC >> 7)) {
- cq_stats->abort++;
- cq_stats->abort_abs_diff_ns += diff;
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ ptpsq->cq_stats->abort++;
+ ptpsq->cq_stats->abort_abs_diff_ns += diff;
+ if (diff > (NSEC_PER_SEC >> 1) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+ netdev_warn(sq->channel->netdev,
+ "PTP TX timestamp difference between CQE and port exceeds threshold: %lld ns, recovering SQ %u\n",
+ (s64)diff, sq->sqn);
+ queue_work(sq->priv->wq, &ptpsq->report_unhealthy_work);
+ }
return;
}
@@ -102,7 +112,7 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
- struct mlx5e_ptp_cq_stats *cq_stats)
+ struct mlx5e_ptpsq *ptpsq)
{
switch (hwtstamp_type) {
case (MLX5E_SKB_CB_CQE_HWTSTAMP):
@@ -120,7 +130,7 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
!mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
return;
- mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
+ mlx5e_skb_cb_hwtstamp_tx(skb, ptpsq);
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
@@ -169,6 +179,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
WARN_ON_ONCE(!pos->inuse);
pos->inuse = false;
list_del(&pos->entry);
+ ptpsq->cq_stats->lost_cqe++;
}
spin_unlock_bh(&cqe_list->tracker_list_lock);
}
@@ -207,7 +218,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
- hwtstamp, ptpsq->cq_stats);
+ hwtstamp, ptpsq);
ptpsq->cq_stats->cqe++;
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
@@ -325,21 +336,19 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->priv = c->priv;
sq->mdev = mdev;
sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->ptp_stats.sq[tc];
sq->ptpsq = ptpsq;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
- if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
@@ -448,8 +457,22 @@ static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
{
struct mlx5e_ptpsq *ptpsq =
container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ /* Recovering the PTP SQ means re-enabling NAPI, which requires the
+ * netdev instance lock. However, SQ closing has to wait for this work
+ * task to finish while also holding the same lock. So either get the
+ * lock or find that the SQ is no longer enabled and thus this work is
+ * not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
+ netdev_unlock(sq->netdev);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
@@ -472,6 +495,7 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
csp.wq_ctrl = &txqsq->wq_ctrl;
csp.min_inline_mode = txqsq->min_inline_mode;
csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
if (err)
@@ -563,6 +587,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = MLX5E_PTP_CHANNEL_IX;
+ ccp.uar = c->bfreg->up;
cq_param = &cparams->txq_sq_param.cqp;
@@ -612,6 +637,7 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = MLX5E_PTP_CHANNEL_IX;
+ ccp.uar = c->bfreg->up;
cq_param = &cparams->rq_param.cqp;
@@ -695,8 +721,8 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
- rq->tstamp = &priv->tstamp;
+ rq->clock = mdev->clock;
+ rq->hwtstamp_config = &priv->hwtstamp_config;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->ptp_stats.rq;
@@ -879,19 +905,19 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->priv = priv;
c->mdev = priv->mdev;
- c->tstamp = &priv->tstamp;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
+ c->bfreg = &mdev->priv.bfreg;
err = mlx5e_ptp_set_state(c, params);
if (err)
goto err_free;
- netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
+ netif_napi_add_locked(netdev, &c->napi, mlx5e_ptp_napi_poll);
mlx5e_ptp_build_params(c, cparams, params);
@@ -909,7 +935,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
return 0;
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
err_free:
kvfree(cparams);
kvfree(c);
@@ -919,7 +945,7 @@ err_free:
void mlx5e_ptp_close(struct mlx5e_ptp *c)
{
mlx5e_ptp_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -928,7 +954,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
@@ -956,7 +982,7 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
}
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 883c044852f1..2a457a2ed707 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -64,8 +64,8 @@ struct mlx5e_ptp {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
+ struct mlx5_sq_bfreg *bfreg;
};
static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
@@ -147,7 +147,7 @@ enum {
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
- struct mlx5e_ptp_cq_stats *cq_stats);
+ struct mlx5e_ptpsq *ptpsq);
void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb);
#endif /* __MLX5_EN_PTP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 6743806b8480..4e461cb03b83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -170,6 +170,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
priv->txq2sq[qid] = sq;
+ priv->txq2sq_stats[qid] = sq->stats;
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -186,6 +187,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq *sq;
+ u16 txq_ix;
sq = mlx5e_get_qos_sq(priv, qid);
if (!sq) /* Handle the case when the SQ failed to open. */
@@ -194,7 +196,10 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
- priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+ txq_ix = mlx5e_qid_from_qos(&priv->channels, qid);
+
+ priv->txq2sq[txq_ix] = NULL;
+ priv->txq2sq_stats[txq_ix] = NULL;
/* Make the change to txq2sq visible before the queue is started again.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -325,6 +330,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
{
struct mlx5e_params *params = &c->priv->channels.params;
struct mlx5e_txqsq __rcu **qos_sqs;
+ u16 txq_ix;
int i;
qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
@@ -342,8 +348,11 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
+ txq_ix = mlx5e_qid_from_qos(&c->priv->channels, qid);
+
/* The queue is disabled, no synchronization with datapath is needed. */
- c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
+ c->priv->txq2sq[txq_ix] = NULL;
+ c->priv->txq2sq_stats[txq_ix] = NULL;
}
}
@@ -365,7 +374,7 @@ void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_que
void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
{
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
if (!qdisc)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 5d128c5b4529..87a2ad69526d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -30,15 +30,11 @@ static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswi
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev, *esw_mdev;
- u64 system_guid, esw_system_guid;
mdev = priv->mdev;
esw_mdev = esw->dev;
- system_guid = mlx5_query_nic_system_image_guid(mdev);
- esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
-
- return system_guid == esw_system_guid;
+ return mlx5_same_hw_devs(mdev, esw_mdev);
}
static struct net_device *
@@ -48,15 +44,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower, iter) {
- struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
-
if (!mlx5e_eswitch_rep(lower))
continue;
- priv = netdev_priv(lower);
- mdev = priv->mdev;
- if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ if (mlx5_esw_bridge_dev_same_esw(lower, esw))
return lower;
}
@@ -125,7 +116,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *
priv = netdev_priv(rep);
mdev = priv->mdev;
if (netif_is_lag_master(dev))
- return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return mlx5_lag_is_master(mdev);
return true;
}
@@ -455,6 +446,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!rep)
return NOTIFY_DONE;
+ if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
+ return NOTIFY_DONE;
+
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = container_of(info,
@@ -490,8 +484,8 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
fdb_info,
br_offloads);
if (IS_ERR(work)) {
- WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
- PTR_ERR(work));
+ WARN_ONCE(1, "Failed to init switchdev work, err=%pe",
+ work);
return notifier_from_errno(PTR_ERR(work));
}
@@ -529,7 +523,8 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
br_offloads = mlx5_esw_bridge_init(esw);
rtnl_unlock();
if (IS_ERR(br_offloads)) {
- esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
+ esw_warn(mdev, "Failed to init esw bridge (err=%pe)\n",
+ br_offloads);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 25d751eba99b..0686fbdd5a05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -170,16 +170,23 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{
struct mlx5_eq_comp *eq;
+ struct mlx5e_priv *priv;
struct mlx5e_rq *rq;
int err;
rq = ctx;
+ priv = rq->priv;
+
+ mutex_lock(&priv->state_lock);
+
eq = rq->cq.mcq.eq;
err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
if (err && rq->icosq)
clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
+ mutex_unlock(&priv->state_lock);
+
return err;
}
@@ -311,16 +318,15 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx
struct devlink_fmsg *fmsg)
{
mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
- devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
+ devlink_fmsg_u32_pair_put(fmsg, "filter_type",
+ priv->hwtstamp_config.rx_filter);
mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static void
-mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+mlx5e_rx_reporter_diagnose_common_config(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
@@ -340,20 +346,100 @@ static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static void mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ unsigned int max_nch = mlx5e_rx_res_get_max_nch(rx_res);
int i;
- mutex_lock(&priv->state_lock);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "Direct TIRs");
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ for (i = 0; i < max_nch; i++) {
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "ix", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rx_res_get_tirn_direct(rx_res, i));
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rx_res_get_rqtn_direct(rx_res, i));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(struct mlx5e_rss *rss, bool inner,
+ struct devlink_fmsg *fmsg)
+{
+ bool found_valid_tir = false;
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ if (!mlx5e_rss_valid_tir(rss, tt, inner))
+ continue;
+
+ if (!found_valid_tir) {
+ char *tir_msg = inner ? "Inner TIRs Numbers" : "TIRs Numbers";
+
+ found_valid_tir = true;
+ devlink_fmsg_arr_pair_nest_start(fmsg, tir_msg);
+ }
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "tt", mlx5_ttc_get_name(tt));
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rss_get_tirn(rss, tt, inner));
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ if (found_valid_tir)
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_ix(struct mlx5e_rx_res *rx_res, u32 rss_idx,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rss *rss = mlx5e_rx_res_rss_get(rx_res, rss_idx);
+
+ if (!rss)
+ return;
+
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Index", rss_idx);
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rss_get_rqtn(rss));
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, false, fmsg);
+ if (mlx5e_rss_get_inner_ft_support(rss))
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, true, fmsg);
+
+ devlink_fmsg_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
+{
+ int rss_ix;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RSS");
+ for (rss_ix = 0; rss_ix < MLX5E_MAX_NUM_RSS; rss_ix++)
+ mlx5e_rx_reporter_diagnose_rx_res_rss_ix(rx_res, rss_ix, fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rx_res *rx_res = priv->rx_res;
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX resources");
+ mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(rx_res, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res_rss(rx_res, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int i;
- mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
for (i = 0; i < priv->channels.num; i++) {
@@ -367,7 +453,24 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+
devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ mlx5e_rx_reporter_diagnose_common_config(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rqs(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res(priv, fmsg);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
@@ -549,25 +652,29 @@ void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
mutex_unlock(&c->icosq_recovery_lock);
}
+#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
+#define MLX5E_REPORTER_RX_BURST_PERIOD 500
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
.diagnose = mlx5e_rx_reporter_diagnose,
.dump = mlx5e_rx_reporter_dump,
+ .default_graceful_period = MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
+ .default_burst_period = MLX5E_REPORTER_RX_BURST_PERIOD,
};
-#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
-
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *port = priv->netdev->devlink_port;
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ reporter = devlink_port_health_reporter_create(port,
&mlx5_rx_reporter_ops,
- MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
+ priv);
if (IS_ERR(reporter)) {
- netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
- PTR_ERR(reporter));
+ netdev_warn(priv->netdev, "Failed to create rx reporter, err = %pe\n",
+ reporter);
return;
}
priv->rx_reporter = reporter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 22918b2ef7f1..9e2cf191ed30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -13,10 +13,8 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_RECOVERING] = "recovering",
[MLX5E_SQ_STATE_IPSEC] = "ipsec",
[MLX5E_SQ_STATE_DIM] = "dim",
- [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
- [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
@@ -108,9 +106,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
- rtnl_lock();
mlx5e_activate_txqsq(sq);
- rtnl_unlock();
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
@@ -146,7 +142,9 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
+ mutex_lock(&priv->state_lock);
err = mlx5e_safe_reopen_channels(priv);
+ mutex_unlock(&priv->state_lock);
if (!err) {
to_ctx->status = 1; /* all channels recovered */
return err;
@@ -182,16 +180,12 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
- rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
- rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
- rtnl_lock();
mlx5e_activate_priv_channels(priv);
- rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
@@ -317,6 +311,30 @@ out:
mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
+static void
+mlx5e_tx_reporter_diagnose_tis_config(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ u8 num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ u32 tc, i, tisn;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "TIS Config");
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
+ for (tc = 0; tc < num_tc; tc++) {
+ tisn = mlx5e_profile_get_tisn(priv->mdev, priv,
+ priv->profile, i, tc);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "lag port", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
+ devlink_fmsg_u32_pair_put(fmsg, "tisn", tisn);
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+ }
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
@@ -332,6 +350,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
goto unlock;
mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
+ mlx5e_tx_reporter_diagnose_tis_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
for (i = 0; i < priv->channels.num; i++) {
@@ -520,26 +539,30 @@ void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
+#define MLX5E_REPORTER_TX_GRACEFUL_PERIOD 500
+#define MLX5E_REPORTER_TX_BURST_PERIOD 500
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
.diagnose = mlx5e_tx_reporter_diagnose,
.dump = mlx5e_tx_reporter_dump,
+ .default_graceful_period = MLX5E_REPORTER_TX_GRACEFUL_PERIOD,
+ .default_burst_period = MLX5E_REPORTER_TX_BURST_PERIOD,
};
-#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
-
void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *port = priv->netdev->devlink_port;
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ reporter = devlink_port_health_reporter_create(port,
&mlx5_tx_reporter_ops,
- MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
+ priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
- "Failed to create tx reporter, err = %ld\n",
- PTR_ERR(reporter));
+ "Failed to create tx reporter, err = %pe\n",
+ reporter);
return;
}
priv->tx_reporter = reporter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 5f742f896600..88b0e1050d1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -75,18 +75,22 @@ struct mlx5e_rss {
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
struct mlx5_core_dev *mdev; /* primary */
- u32 drop_rqn;
- bool inner_ft_support;
+ struct mlx5e_rss_params params;
bool enabled;
refcount_t refcnt;
};
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
+{
+ return rss->params.inner_ft_support;
+}
+
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
}
-int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size)
{
indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
@@ -134,7 +138,8 @@ static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
if (!rss)
return ERR_PTR(-ENOMEM);
- err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
+ err = mlx5e_rss_params_indir_init(&rss->indir,
+ from->indir.actual_table_size,
from->indir.max_table_size);
if (err)
goto err_free_rss;
@@ -156,6 +161,7 @@ static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
{
enum mlx5_traffic_types tt;
+ rss->hash.symmetric = true;
rss->hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
sizeof(rss->hash.toeplitz_hash_key));
@@ -186,11 +192,12 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
return rss_tt;
}
-static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner)
+static int
+mlx5e_rss_create_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner)
{
+ bool rss_inner = rss->params.inner_ft_support;
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_tir **tir_p;
@@ -198,7 +205,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
u32 rqtn;
int err;
- if (inner && !rss->inner_ft_support) {
+ if (inner && !rss_inner) {
mlx5e_rss_warn(rss->mdev,
"Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
tt);
@@ -221,9 +228,11 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
- rqtn, rss->inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ rqtn, rss_inner);
+ mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
+ mlx5e_tir_builder_build_self_lb_block(builder, rss->params.self_lb_blk,
+ rss->params.self_lb_blk);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
err = mlx5e_tir_init(tir, builder, rss->mdev, true);
@@ -258,15 +267,16 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types
*tir_p = NULL;
}
-static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner)
+static int
+mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
@@ -329,7 +339,7 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
tt, err);
}
- if (!rss->inner_ft_support)
+ if (!rss->params.inner_ft_support)
continue;
err = mlx5e_rss_update_tir(rss, tt, true);
@@ -349,14 +359,16 @@ static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
refcount_set(&rss->refcnt, 1);
return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
- rss->drop_rqn, rss->indir.max_table_size);
+ rss->params.drop_rqn,
+ rss->indir.max_table_size);
}
-struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- enum mlx5e_rss_init_type type, unsigned int nch,
- unsigned int max_nch)
+struct mlx5e_rss *
+mlx5e_rss_init(struct mlx5_core_dev *mdev,
+ const struct mlx5e_rss_params *params,
+ const struct mlx5e_rss_init_params *init_params)
{
+ u32 rqt_max_size, rqt_size;
struct mlx5e_rss *rss;
int err;
@@ -364,29 +376,31 @@ struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_suppo
if (!rss)
return ERR_PTR(-ENOMEM);
- err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
- mlx5e_rqt_size(mdev, nch),
- mlx5e_rqt_size(mdev, max_nch));
+ rqt_size = mlx5e_rqt_size(mdev, init_params->nch);
+ rqt_max_size = mlx5e_rqt_size(mdev, init_params->max_nch);
+ err = mlx5e_rss_params_indir_init(&rss->indir, rqt_size, rqt_max_size);
if (err)
goto err_free_rss;
rss->mdev = mdev;
- rss->inner_ft_support = inner_ft_support;
- rss->drop_rqn = drop_rqn;
+ rss->params = *params;
err = mlx5e_rss_init_no_tirs(rss);
if (err)
goto err_free_indir;
- if (type == MLX5E_RSS_INIT_NO_TIRS)
+ if (init_params->type == MLX5E_RSS_INIT_NO_TIRS)
goto out;
- err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
+ err = mlx5e_rss_create_tirs(rss, init_params->pkt_merge_param,
+ false);
if (err)
goto err_destroy_rqt;
- if (inner_ft_support) {
- err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
+ if (params->inner_ft_support) {
+ err = mlx5e_rss_create_tirs(rss,
+ init_params->pkt_merge_param,
+ true);
if (err)
goto err_destroy_tirs;
}
@@ -412,7 +426,7 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
mlx5e_rss_destroy_tirs(rss, false);
- if (rss->inner_ft_support)
+ if (rss->params.inner_ft_support)
mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt);
@@ -442,20 +456,30 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
{
struct mlx5e_tir *tir;
- WARN_ON(inner && !rss->inner_ft_support);
+ WARN_ON(inner && !rss->params.inner_ft_support);
tir = rss_get_tir(rss, tt, inner);
WARN_ON(!tir);
return mlx5e_tir_get_tirn(tir);
}
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss)
+{
+ return mlx5e_rqt_get_rqtn(&rss->rqt);
+}
+
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner)
+{
+ return !!rss_get_tir(rss, tt, inner);
+}
+
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
-int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner, u32 *tirn)
+int
+mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
@@ -463,7 +487,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
if (!tir) { /* TIR doesn't exist, create one */
int err;
- err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
@@ -496,10 +520,11 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
- err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->params.drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
- mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
+ mlx5e_rqt_get_rqtn(&rss->rqt),
+ rss->params.drop_rqn, err);
}
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -532,7 +557,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
}
inner_tir:
- if (!rss->inner_ft_support)
+ if (!rss->params.inner_ft_support)
continue;
tir = rss_get_tir(rss, tt, true);
@@ -551,7 +576,8 @@ inner_tir:
return final_err;
}
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
+void mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric)
{
if (indir)
memcpy(indir, rss->indir.table,
@@ -564,11 +590,12 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
if (hfunc)
*hfunc = rss->hash.hfunc;
- return 0;
+ if (symmetric)
+ *symmetric = rss->hash.symmetric;
}
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
@@ -608,6 +635,11 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
+ if (symmetric) {
+ rss->hash.symmetric = *symmetric;
+ changed_hash = true;
+ }
+
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
@@ -658,7 +690,7 @@ int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return err;
}
- if (!(rss->inner_ft_support))
+ if (!(rss->params.inner_ft_support))
return 0;
err = mlx5e_rss_update_tir(rss, tt, true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d0df98963c8d..17664757a561 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -13,40 +13,57 @@ enum mlx5e_rss_init_type {
MLX5E_RSS_INIT_TIRS
};
+struct mlx5e_rss_init_params {
+ enum mlx5e_rss_init_type type;
+ const struct mlx5e_packet_merge_param *pkt_merge_param;
+ unsigned int nch;
+ unsigned int max_nch;
+};
+
+struct mlx5e_rss_params {
+ bool inner_ft_support;
+ u32 drop_rqn;
+ bool self_lb_blk;
+};
+
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
struct mlx5e_rss;
-int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size);
void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir);
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels);
-struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- enum mlx5e_rss_init_type type, unsigned int nch,
- unsigned int max_nch);
+struct mlx5e_rss *
+mlx5e_rss_init(struct mlx5_core_dev *mdev,
+ const struct mlx5e_rss_params *params,
+ const struct mlx5e_rss_init_params *init_params);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss);
unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss);
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss);
u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
-int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner, u32 *tirn);
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
+int
+mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner, u32 *tirn);
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
+void mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index a86eade9a9e0..55c117b7d8c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -5,8 +5,6 @@
#include "channels.h"
#include "params.h"
-#define MLX5E_MAX_NUM_RSS 16
-
struct mlx5e_rx_res {
struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
@@ -56,51 +54,74 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss_init_params init_params;
+ struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
if (WARN_ON(res->rss[0]))
return -EINVAL;
- rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
+ init_params = (struct mlx5e_rss_init_params) {
+ .type = MLX5E_RSS_INIT_TIRS,
+ .pkt_merge_param = &res->pkt_merge_param,
+ .nch = init_nch,
+ .max_nch = res->max_nch,
+ };
+
+ rss_params = (struct mlx5e_rss_params) {
+ .inner_ft_support = inner_ft_support,
+ .drop_rqn = res->drop_rqn,
+ .self_lb_blk =
+ res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK,
+ };
+
+ rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
- mlx5e_rss_set_indir_uniform(rss, init_nch);
+ mlx5e_rss_set_indir_uniform(rss, init_params.nch);
res->rss[0] = rss;
return 0;
}
-int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss_init_params init_params;
+ struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
- int i;
- for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
- if (!res->rss[i])
- break;
-
- if (i == MLX5E_MAX_NUM_RSS)
+ if (WARN_ON_ONCE(res->rss[rss_idx]))
return -ENOSPC;
- rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
- res->max_nch);
+ init_params = (struct mlx5e_rss_init_params) {
+ .type = MLX5E_RSS_INIT_NO_TIRS,
+ .pkt_merge_param = &res->pkt_merge_param,
+ .nch = init_nch,
+ .max_nch = res->max_nch,
+ };
+
+ rss_params = (struct mlx5e_rss_params) {
+ .inner_ft_support = inner_ft_support,
+ .drop_rqn = res->drop_rqn,
+ .self_lb_blk =
+ res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK,
+ };
+
+ rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
- mlx5e_rss_set_indir_uniform(rss, init_nch);
+ mlx5e_rss_set_indir_uniform(rss, init_params.nch);
if (res->rss_active) {
u32 *vhca_ids = get_vhca_ids(res, 0);
mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
}
- res->rss[i] = rss;
- *rss_idx = i;
+ res->rss[rss_idx] = rss;
return 0;
}
@@ -195,23 +216,22 @@ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int n
mlx5e_rss_set_indir_uniform(res->rss[0], nch);
}
-int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc)
+void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
- struct mlx5e_rss *rss;
+ struct mlx5e_rss *rss = NULL;
- if (rss_idx >= MLX5E_MAX_NUM_RSS)
- return -EINVAL;
+ if (rss_idx < MLX5E_MAX_NUM_RSS)
+ rss = res->rss[rss_idx];
+ if (WARN_ON_ONCE(!rss))
+ return;
- rss = res->rss[rss_idx];
- if (!rss)
- return -ENOENT;
-
- return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
+ mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
}
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc)
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric)
{
u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
@@ -223,8 +243,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
- res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
+ res->rss_rqns, vhca_ids, res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -330,6 +350,7 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ bool self_lb_blk = res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK;
struct mlx5e_tir_builder *builder;
int err = 0;
int ix;
@@ -360,6 +381,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
+ mlx5e_tir_builder_build_self_lb_block(builder, self_lb_blk,
+ self_lb_blk);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
@@ -447,7 +470,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch)
{
bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
@@ -463,7 +486,7 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
- res->pkt_merge_param = *init_pkt_merge_param;
+ res->pkt_merge_param = *pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
err = mlx5e_rx_res_rss_init_def(res, init_nch);
@@ -497,6 +520,11 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_free(res);
}
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
+{
+ return res->max_nch;
+}
+
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
@@ -522,7 +550,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
@@ -575,8 +603,6 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
for (ix = 0; ix < nch; ix++)
mlx5e_rx_res_channel_activate_direct(res, chs, ix);
- for (ix = nch; ix < res->max_nch; ix++)
- mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
@@ -599,7 +625,7 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_rss_disable(res);
- for (ix = 0; ix < res->max_nch; ix++)
+ for (ix = 0; ix < res->rss_nch; ix++)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 7b1a9f0f1874..675780120a20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -10,6 +10,8 @@
#include "fs.h"
#include "rss.h"
+#define MLX5E_MAX_NUM_RSS 16
+
struct mlx5e_rx_res;
struct mlx5e_channels;
@@ -19,13 +21,14 @@ enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
MLX5E_RX_RES_FEATURE_PTP = BIT(1),
MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2),
+ MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK = BIT(3),
};
/* Setup */
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
@@ -34,6 +37,9 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res);
+bool mlx5_rx_res_rss_inner_ft_support(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
@@ -43,10 +49,12 @@ void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *ch
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
-int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc);
+void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric);
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc);
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric);
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
enum mlx5_traffic_types tt);
@@ -55,7 +63,7 @@ int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch);
int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res);
int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index d6c12d0ea55b..2e528b2c34d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -73,11 +73,6 @@ struct mlx5e_tc_act {
bool is_terminating_action;
};
-struct mlx5e_tc_flow_action {
- unsigned int num_entries;
- struct flow_action_entry **entries;
-};
-
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
extern struct mlx5e_tc_act mlx5e_tc_act_trap;
extern struct mlx5e_tc_act mlx5e_tc_act_accept;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index feeb41693c17..b6cabe829f19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -5,6 +5,16 @@
#include "en/tc_priv.h"
#include "en/tc_ct.h"
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ return !((act->ct.action & TCA_CT_ACT_COMMIT) &&
+ flow_action_is_last_entry(parse_state->flow_action, act));
+}
+
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -56,6 +66,7 @@ tc_act_is_missable_ct(const struct flow_action_entry *act)
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.post_parse = tc_act_post_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index a13c5e707b83..9bdb5820c553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -94,29 +94,30 @@ mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct netlink_ext_ack *extack)
{
- struct net_device *vlan_dev = *out_dev;
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_PUSH,
- .vlan.vid = vlan_dev_vlan_id(vlan_dev),
- .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
- .vlan.prio = 0,
- };
- int err;
-
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack, NULL);
- if (err)
- return err;
-
- rcu_read_lock();
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
- rcu_read_unlock();
- if (!*out_dev)
- return -ENODEV;
+ do {
+ struct net_device *vlan_dev = *out_dev;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_PUSH,
+ .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+ .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+ .vlan.prio = 0,
+ };
+ int err;
+
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr,
+ &attr->action, extack, NULL);
+ if (err)
+ return err;
- if (is_vlan_dev(*out_dev))
- err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
+ dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+ } while (is_vlan_dev(*out_dev));
- return err;
+ return 0;
}
int
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index bb6b1a979ba1..e5b30801314b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -25,6 +25,8 @@ struct mlx5_ct_fs_ops {
struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule);
void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+ int (*ct_rule_update)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr);
size_t priv_size;
};
@@ -46,4 +48,14 @@ mlx5_ct_fs_smfs_ops_get(void)
}
#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+#if IS_ENABLED(CONFIG_MLX5_HW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
index ae4f55be48ce..64a82aafaaca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
@@ -65,9 +65,30 @@ mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(dmfs_rule);
}
+static int mlx5_ct_fs_dmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_dmfs_rule,
+ fs_rule);
+ struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+ struct mlx5_flow_handle *rule;
+
+ rule = mlx5_tc_rule_insert(priv, spec, attr);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+ mlx5_tc_rule_delete(priv, dmfs_rule->rule, dmfs_rule->attr);
+
+ dmfs_rule->rule = rule;
+ dmfs_rule->attr = attr;
+
+ return 0;
+}
+
static struct mlx5_ct_fs_ops dmfs_ops = {
.ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_dmfs_ct_rule_update,
.init = mlx5_ct_fs_dmfs_init,
.destroy = mlx5_ct_fs_dmfs_destroy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
new file mode 100644
index 000000000000..d3db6146fcad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc_priv.h"
+#include "en/tc/ct_fs.h"
+#include "fs_core.h"
+#include "steering/hws/fs_hws_pools.h"
+#include "steering/hws/mlx5hws.h"
+#include "steering/hws/table.h"
+
+struct mlx5_ct_fs_hmfs_matcher {
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ refcount_t ref;
+};
+
+/* We need {ipv4, ipv6} x {tcp, udp, gre} matchers. */
+#define NUM_MATCHERS (2 * 3)
+
+struct mlx5_ct_fs_hmfs {
+ struct mlx5hws_table *ct_tbl;
+ struct mlx5hws_table *ct_nat_tbl;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5hws_action *fwd_action;
+ struct mlx5hws_action *last_action;
+ struct mlx5hws_context *ctx;
+ struct mutex lock; /* Guards matchers */
+ struct mlx5_ct_fs_hmfs_matcher matchers[NUM_MATCHERS];
+ struct mlx5_ct_fs_hmfs_matcher matchers_nat[NUM_MATCHERS];
+};
+
+struct mlx5_ct_fs_hmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5hws_bwc_rule *hws_bwc_rule;
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_fc *counter;
+};
+
+static u32 get_matcher_idx(bool ipv4, bool tcp, bool gre)
+{
+ return ipv4 * 3 + tcp * 2 + gre;
+}
+
+static int mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ ct_tbl = ct->fs_hws_table.hws_table;
+ ct_nat_tbl = ct_nat->fs_hws_table.hws_table;
+ post_ct_tbl = post_ct->fs_hws_table.hws_table;
+ fs_hmfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to init, missing backing hws tables");
+ return -EOPNOTSUPP;
+ }
+
+ netdev_dbg(fs->netdev, "using hmfs steering");
+
+ fs_hmfs->ct_tbl = ct_tbl;
+ fs_hmfs->ct_nat_tbl = ct_nat_tbl;
+ fs_hmfs->ctx = ct_tbl->ctx;
+ mutex_init(&fs_hmfs->lock);
+
+ fs_hmfs->fwd_action = mlx5hws_action_create_dest_table(ct_tbl->ctx, post_ct_tbl, flags);
+ if (!fs_hmfs->fwd_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create fwd action\n");
+ return -EINVAL;
+ }
+ fs_hmfs->last_action = mlx5hws_action_create_last(ct_tbl->ctx, flags);
+ if (!fs_hmfs->last_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create last action\n");
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ mlx5hws_action_destroy(fs_hmfs->last_action);
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+}
+
+static struct mlx5hws_bwc_matcher *
+mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5hws_table *tbl,
+ struct mlx5_flow_spec *spec, bool ipv4, bool tcp, bool gre)
+{
+ u8 match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+ struct mlx5hws_match_parameters mask = {
+ .match_buf = spec->match_criteria,
+ .match_sz = sizeof(spec->match_criteria),
+ };
+ u32 priority = get_matcher_idx(ipv4, tcp, gre); /* Static priority based on params. */
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+
+ hws_bwc_matcher = mlx5hws_bwc_matcher_create(tbl, priority, match_criteria_enable, &mask);
+ if (!hws_bwc_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return hws_bwc_matcher;
+}
+
+static struct mlx5_ct_fs_hmfs_matcher *
+mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ bool nat, bool ipv4, bool tcp, bool gre)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ u32 matcher_idx = get_matcher_idx(ipv4, tcp, gre);
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ struct mlx5hws_table *tbl;
+
+ hmfs_matcher = nat ?
+ (fs_hmfs->matchers_nat + matcher_idx) :
+ (fs_hmfs->matchers + matcher_idx);
+
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ return hmfs_matcher;
+
+ mutex_lock(&fs_hmfs->lock);
+
+ /* Retry with lock, as the matcher might be already created by another cpu. */
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ goto out_unlock;
+
+ tbl = nat ? fs_hmfs->ct_nat_tbl : fs_hmfs->ct_tbl;
+
+ hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
+ if (IS_ERR(hws_bwc_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %pe\n",
+ nat, ipv4, tcp, gre, hws_bwc_matcher);
+
+ hmfs_matcher = ERR_CAST(hws_bwc_matcher);
+ goto out_unlock;
+ }
+
+ hmfs_matcher->hws_bwc_matcher = hws_bwc_matcher;
+ refcount_set(&hmfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_hmfs->lock);
+ return hmfs_matcher;
+}
+
+static void
+mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&hmfs_matcher->ref, &fs_hmfs->lock))
+ return;
+
+ mlx5hws_bwc_matcher_destroy(hmfs_matcher->hws_bwc_matcher);
+ mutex_unlock(&fs_hmfs->lock);
+}
+
+#define NUM_CT_HMFS_RULES 4
+
+static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
+ struct mlx5_flow_attr *attr,
+ struct mlx5hws_rule_action *rule_actions)
+{
+ struct mlx5_fs_hws_action *mh_action = &attr->modify_hdr->fs_hws_action;
+
+ memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
+ rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
+ rule_actions[0].counter.offset =
+ attr->counter->id - attr->counter->bulk->base_id;
+ /* Modify header is special, it may require extra arguments outside the action itself. */
+ if (mh_action->mh_data) {
+ rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
+ rule_actions[1].modify_header.data = mh_action->mh_data->data;
+ }
+ rule_actions[1].action = mh_action->hws_action;
+ rule_actions[2].action = fs_hmfs->fwd_action;
+ rule_actions[3].action = fs_hmfs->last_action;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ struct mlx5hws_match_parameters match_params = {
+ .match_buf = spec->match_value,
+ .match_sz = ARRAY_SIZE(spec->match_value),
+ };
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule;
+ bool nat, tcp, ipv4, gre;
+ int err;
+
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ hmfs_rule = kzalloc(sizeof(*hmfs_rule), GFP_KERNEL);
+ if (!hmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ nat = (attr->ft == fs_hmfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
+
+ hmfs_matcher = mlx5_ct_fs_hmfs_matcher_get(fs, spec, nat, ipv4, tcp, gre);
+ if (IS_ERR(hmfs_matcher)) {
+ err = PTR_ERR(hmfs_matcher);
+ goto err_free_rule;
+ }
+ hmfs_rule->hmfs_matcher = hmfs_matcher;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+ hmfs_rule->counter = attr->counter;
+
+ hmfs_rule->hws_bwc_rule =
+ mlx5hws_bwc_rule_create(hmfs_matcher->hws_bwc_matcher, &match_params,
+ spec->flow_context.flow_source, rule_actions);
+ if (!hmfs_rule->hws_bwc_rule) {
+ err = -EINVAL;
+ goto err_put_matcher;
+ }
+
+ return &hmfs_rule->fs_rule;
+
+err_put_matcher:
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_matcher);
+err_free_rule:
+ kfree(hmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ mlx5hws_bwc_rule_destroy(hmfs_rule->hws_bwc_rule);
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_rule->hmfs_matcher);
+ kfree(hmfs_rule);
+}
+
+static int mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ int err;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+
+ err = mlx5hws_bwc_rule_action_update(hmfs_rule->hws_bwc_rule, rule_actions);
+ if (err) {
+ mlx5_fc_put_hws_action(attr->counter);
+ return err;
+ }
+
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ hmfs_rule->counter = attr->counter;
+
+ return 0;
+}
+
+static struct mlx5_ct_fs_ops hmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_hmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_hmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_hmfs_ct_rule_update,
+
+ .init = mlx5_ct_fs_hmfs_init,
+ .destroy = mlx5_ct_fs_hmfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_hmfs),
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return &hmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 8c531f4ec912..4d6924b644c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -13,7 +13,6 @@
#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
#define ct_dbg(fmt, args...)\
netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
-#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
struct mlx5_ct_fs_smfs_matcher {
struct mlx5dr_matcher *dr_matcher;
@@ -149,8 +148,8 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
if (IS_ERR(dr_matcher)) {
netdev_warn(fs->netdev,
- "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
- nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %pe\n",
+ nat, ipv4, tcp, gre, dr_matcher);
smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock;
@@ -220,78 +219,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
mlx5_smfs_action_destroy(fs_smfs->fwd_action);
}
-static inline bool
-mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)
-{
-#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
- const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
- DISS_BIT(META);
- const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
- const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
-
- return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
- used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
-}
-
-static bool
-mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
-{
- struct flow_match_ipv4_addrs ipv4_addrs;
- struct flow_match_ipv6_addrs ipv6_addrs;
- struct flow_match_control control;
- struct flow_match_basic basic;
- struct flow_match_ports ports;
- struct flow_match_tcp tcp;
-
- if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
- ct_dbg("rule uses unexpected dissectors (0x%016llx)",
- flow_rule->match.dissector->used_keys);
- return false;
- }
-
- flow_rule_match_basic(flow_rule, &basic);
- flow_rule_match_control(flow_rule, &control);
- flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
- flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
- if (basic.key->ip_proto != IPPROTO_GRE)
- flow_rule_match_ports(flow_rule, &ports);
- if (basic.key->ip_proto == IPPROTO_TCP)
- flow_rule_match_tcp(flow_rule, &tcp);
-
- if (basic.mask->n_proto != htons(0xFFFF) ||
- (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
- basic.mask->ip_proto != 0xFF ||
- (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
- basic.key->ip_proto != IPPROTO_GRE)) {
- ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
- ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
- basic.key->ip_proto, basic.mask->ip_proto);
- return false;
- }
-
- if (basic.key->ip_proto != IPPROTO_GRE &&
- (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
- ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
- ports.mask->src, ports.mask->dst);
- return false;
- }
-
- if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
- ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
- return false;
- }
-
- return true;
-}
-
static struct mlx5_ct_fs_rule *
mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
@@ -304,7 +231,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
int num_actions = 0, err;
bool nat, tcp, ipv4, gre;
- if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
return ERR_PTR(-EOPNOTSUPP);
smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
@@ -318,7 +245,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
}
actions[num_actions++] = smfs_rule->count_action;
- actions[num_actions++] = attr->modify_hdr->action.dr_action;
+ actions[num_actions++] = attr->modify_hdr->fs_dr_action.dr_action;
actions[num_actions++] = fs_smfs->fwd_action;
nat = (attr->ft == fs_smfs->ct_nat);
@@ -368,9 +295,35 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(smfs_rule);
}
+static int mlx5_ct_fs_smfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_smfs_rule,
+ fs_rule);
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5dr_action *actions[3]; /* We only need to create 3 actions, see below. */
+ struct mlx5dr_rule *rule;
+
+ actions[0] = smfs_rule->count_action;
+ actions[1] = attr->modify_hdr->fs_dr_action.dr_action;
+ actions[2] = fs_smfs->fwd_action;
+
+ rule = mlx5_smfs_rule_create(smfs_rule->smfs_matcher->dr_matcher, spec,
+ ARRAY_SIZE(actions), actions, spec->flow_context.flow_source);
+ if (!rule)
+ return -EINVAL;
+
+ mlx5_smfs_rule_destroy(smfs_rule->rule);
+ smfs_rule->rule = rule;
+
+ return 0;
+}
+
static struct mlx5_ct_fs_ops fs_smfs_ops = {
.ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_smfs_ct_rule_update,
.init = mlx5_ct_fs_smfs_init,
.destroy = mlx5_ct_fs_smfs_destroy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
index 8afcec0c5d3c..991f47050643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
@@ -93,8 +93,8 @@ mlx5e_int_port_create_rx_rule(struct mlx5_eswitch *esw,
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule))
- mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %ld\n",
- PTR_ERR(flow_rule));
+ mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %pe\n",
+ flow_rule);
kvfree(spec);
@@ -307,7 +307,8 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_int_port_priv *int_port_priv;
- u64 mapping_id;
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ u8 id_len;
if (!mlx5e_tc_int_port_supported(esw))
return NULL;
@@ -316,14 +317,15 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv)
if (!int_port_priv)
return NULL;
- mapping_id = mlx5_query_nic_system_image_guid(priv->mdev);
+ mlx5_query_nic_sw_system_image_guid(priv->mdev, mapping_id, &id_len);
- int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_INT_PORT,
+ int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_INT_PORT,
sizeof(u32) * 2,
(1 << ESW_VPORT_BITS) - 1, true);
if (IS_ERR(int_port_priv->metadata_mapping)) {
- mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%ld\n",
- PTR_ERR(int_port_priv->metadata_mapping));
+ mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%pe\n",
+ int_port_priv->metadata_mapping);
goto err_mapping;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 8218c892b161..7819fb297280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -593,3 +593,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
*drops = packets2;
*lastuse = max_t(u64, lastuse1, lastuse2);
}
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return meter->meters_obj->base_id;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 9b795cd106bb..d6afb6556875 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -72,4 +72,17 @@ void
mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline int
+mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index fadfa8b50beb..fc0e57403d25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -69,6 +69,8 @@ struct mlx5_tc_ct_priv {
struct rhashtable ct_tuples_nat_ht;
struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat;
+ struct mlx5_flow_group *ct_nat_miss_group;
+ struct mlx5_flow_handle *ct_nat_miss_rule;
struct mlx5e_post_act *post_act;
struct mutex control_lock; /* guards parallel adds/dels */
struct mapping_ctx *zone_mapping;
@@ -141,6 +143,8 @@ struct mlx5_ct_counter {
enum {
MLX5_CT_ENTRY_FLAG_VALID,
+ MLX5_CT_ENTRY_IN_CT_TABLE,
+ MLX5_CT_ENTRY_IN_CT_NAT_TABLE,
};
struct mlx5_ct_entry {
@@ -198,9 +202,15 @@ static const struct rhashtable_params tuples_nat_ht_params = {
};
static bool
-mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry *entry)
{
- return !!(entry->tuple_nat_node.next);
+ return test_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
+}
+
+static bool
+mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry *entry)
+{
+ return test_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
}
static int
@@ -526,8 +536,10 @@ static void
mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
atomic_dec(&ct_priv->debugfs.stats.offloaded);
}
@@ -814,7 +826,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
&zone_rule->mh,
zone_restore_id,
nat,
- mlx5_tc_ct_entry_has_nat(entry));
+ mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
@@ -854,7 +866,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return 0;
err_rule:
- mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);
@@ -864,15 +876,14 @@ err_attr:
}
static int
-mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
- struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry,
- bool nat, u8 zone_restore_id)
+mlx5_tc_ct_entry_update_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat, u8 zone_restore_id)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
struct mlx5e_mod_hdr_handle *mh;
- struct mlx5_ct_fs_rule *rule;
struct mlx5_flow_spec *spec;
int err;
@@ -888,31 +899,28 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
*old_attr = *attr;
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
- nat, mlx5_tc_ct_entry_has_nat(entry));
+ nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
- ct_dbg("Failed to create ct entry mod hdr");
+ ct_dbg("Failed to create ct entry mod hdr, err: %d", err);
goto err_mod_hdr;
}
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
- rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
+ err = ct_priv->fs_ops->ct_rule_update(ct_priv->fs, zone_rule->rule, spec, attr);
+ if (err) {
+ ct_dbg("Failed to update ct entry rule, nat: %d, err: %d", nat, err);
goto err_rule;
}
- ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
- zone_rule->rule = rule;
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
zone_rule->mh = mh;
mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id);
kfree(old_attr);
kvfree(spec);
- ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
+ ct_dbg("Updated ct entry rule in zone %d", entry->tuple.zone);
return 0;
@@ -920,6 +928,7 @@ err_rule:
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
+ *attr = *old_attr;
kfree(old_attr);
err_attr:
kvfree(spec);
@@ -957,11 +966,13 @@ static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
{
struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node,
- tuples_nat_ht_params);
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
- tuples_ht_params);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node,
+ tuples_nat_ht_params);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+ tuples_ht_params);
}
static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
@@ -1015,7 +1026,7 @@ mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
return ERR_PTR(-ENOMEM);
counter->is_shared = false;
- counter->counter = mlx5_fc_create_ex(ct_priv->dev, true);
+ counter->counter = mlx5_fc_create(ct_priv->dev, true);
if (IS_ERR(counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(counter->counter);
@@ -1100,54 +1111,63 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
return err;
}
- err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
- if (err)
- goto err_orig;
+ if (mlx5_tc_ct_entry_in_ct_table(entry)) {
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ goto err_orig;
+ }
- err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
- if (err)
- goto err_nat;
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err)
+ goto err_nat;
+ }
atomic_inc(&ct_priv->debugfs.stats.offloaded);
return 0;
err_nat:
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
static int
-mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
- struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry,
- u8 zone_restore_id)
+mlx5_tc_ct_entry_update_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ u8 zone_restore_id)
{
- int err;
+ int err = 0;
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
- if (err)
- return err;
+ if (mlx5_tc_ct_entry_in_ct_table(entry)) {
+ err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ return err;
+ }
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
- if (err)
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err && mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ }
return err;
}
static int
-mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry, unsigned long cookie)
+mlx5_tc_ct_block_flow_offload_update(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry, unsigned long cookie)
{
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
int err;
- err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
if (!err)
return 0;
@@ -1175,6 +1195,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
struct flow_action_entry *meta_action;
unsigned long cookie = flow->cookie;
struct mlx5_ct_entry *entry;
+ bool has_nat;
int err;
meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
@@ -1192,7 +1213,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock);
- err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
+ err = mlx5_tc_ct_block_flow_offload_update(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry);
return err;
}
@@ -1216,6 +1237,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err = mlx5_tc_ct_rule_to_tuple_nat(&entry->tuple_nat, flow_rule);
if (err)
goto err_set;
+ has_nat = memcmp(&entry->tuple, &entry->tuple_nat,
+ sizeof(entry->tuple));
spin_lock_bh(&ct_priv->ht_lock);
@@ -1224,18 +1247,24 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_entries;
- err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
- if (err)
- goto err_tuple;
-
- if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
+ if (has_nat) {
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
if (err)
goto err_tuple_nat;
+
+ set_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
+ }
+
+ if (!mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
+ if (err)
+ goto err_tuple;
+
+ set_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
}
spin_unlock_bh(&ct_priv->ht_lock);
@@ -1251,17 +1280,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_rules:
spin_lock_bh(&ct_priv->ht_lock);
- if (mlx5_tc_ct_entry_has_nat(entry))
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
-err_tuple_nat:
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
err_tuple:
- rhashtable_remove_fast(&ft->ct_entries_ht,
- &entry->node,
- cts_ht_params);
+ mlx5_tc_ct_entry_remove_from_tuples(entry);
+err_tuple_nat:
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
err_entries:
spin_unlock_bh(&ct_priv->ht_lock);
err_set:
@@ -1330,6 +1352,32 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
return 0;
}
+static bool
+mlx5_tc_ct_filter_legacy_non_nic_flows(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_match_meta match;
+ struct net_device *netdev;
+ bool same_dev = false;
+
+ if (!is_mdev_legacy_mode(ct_priv->dev) ||
+ !flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return true;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!(match.key->ingress_ifindex & match.mask->ingress_ifindex))
+ return true;
+
+ netdev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ same_dev = ct_priv->netdev == netdev;
+ dev_put(netdev);
+
+ return same_dev;
+}
+
static int
mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
void *cb_priv)
@@ -1342,6 +1390,9 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
switch (f->command) {
case FLOW_CLS_REPLACE:
+ if (!mlx5_tc_ct_filter_legacy_non_nic_flows(ft, f))
+ return -EOPNOTSUPP;
+
return mlx5_tc_ct_block_flow_offload_add(ft, f);
case FLOW_CLS_DESTROY:
return mlx5_tc_ct_block_flow_offload_del(ft, f);
@@ -2046,10 +2097,19 @@ mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
int err;
- if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
- ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
- ct_dbg("Using SMFS ct flow steering provider");
- fs_ops = mlx5_ct_fs_smfs_ops_get();
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_HMFS) {
+ ct_dbg("Using HMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_hmfs_ops_get();
+ } else if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ if (!fs_ops) {
+ ct_dbg("Requested flow steering mode is not enabled.");
+ return -EOPNOTSUPP;
+ }
}
ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
@@ -2149,6 +2209,76 @@ mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
debugfs_remove_recursive(ct_priv->debugfs.root);
}
+static struct mlx5_flow_handle *
+tc_ct_add_miss_rule(struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_ft;
+
+ return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
+}
+
+static int
+tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table *from,
+ struct mlx5_flow_table *to,
+ struct mlx5_flow_group **miss_group,
+ struct mlx5_flow_handle **miss_rule)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+ unsigned int max_fte = from->max_fte;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* create miss group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ max_fte - 1);
+ group = mlx5_create_flow_group(from, flow_group_in);
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ goto err_miss_grp;
+ }
+
+ /* add miss rule to next fdb */
+ rule = tc_ct_add_miss_rule(from, to);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_miss_rule;
+ }
+
+ *miss_group = group;
+ *miss_rule = rule;
+ kvfree(flow_group_in);
+ return 0;
+
+err_miss_rule:
+ mlx5_destroy_flow_group(group);
+err_miss_grp:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void
+tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group *miss_group,
+ struct mlx5_flow_handle *miss_rule)
+{
+ mlx5_del_flow_rules(miss_rule);
+ mlx5_destroy_flow_group(miss_group);
+}
+
#define INIT_ERR_PREFIX "tc ct offload init failed"
struct mlx5_tc_ct_priv *
@@ -2157,9 +2287,10 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act)
{
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
struct mlx5_tc_ct_priv *ct_priv;
struct mlx5_core_dev *dev;
- u64 mapping_id;
+ u8 id_len;
int err;
dev = priv->mdev;
@@ -2171,16 +2302,18 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!ct_priv)
goto err_alloc;
- mapping_id = mlx5_query_nic_system_image_guid(dev);
+ mlx5_query_nic_sw_system_image_guid(dev, mapping_id, &id_len);
- ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE,
+ ct_priv->zone_mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_ZONE,
sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
err = PTR_ERR(ct_priv->zone_mapping);
goto err_mapping_zone;
}
- ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS,
+ ct_priv->labels_mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_LABELS,
sizeof(u32) * 4, 0, true);
if (IS_ERR(ct_priv->labels_mapping)) {
err = PTR_ERR(ct_priv->labels_mapping);
@@ -2212,6 +2345,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
goto err_ct_nat_tbl;
}
+ err = tc_ct_add_ct_table_miss_rule(ct_priv->ct_nat, ct_priv->ct,
+ &ct_priv->ct_nat_miss_group,
+ &ct_priv->ct_nat_miss_rule);
+ if (err)
+ goto err_ct_zone_ht;
+
ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
@@ -2273,6 +2412,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
ct_priv->fs_ops->destroy(ct_priv->fs);
kfree(ct_priv->fs);
+ tc_ct_del_ct_table_miss_rule(ct_priv->ct_nat_miss_group, ct_priv->ct_nat_miss_rule);
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
@@ -2325,3 +2465,74 @@ out_inc_drop:
atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
return false;
}
+
+static bool mlx5e_tc_ct_valid_used_dissector_keys(const u64 used_keys)
+{
+#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
+ const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
+ DISS_BIT(META);
+ const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
+}
+
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5e_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected dissectors (0x%016llx)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
+ netdev_dbg(dev, "ct_debug: rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected tcp match (flags 0x%02x)",
+ tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index b66c5f98067f..5e9dbdd4a5e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -128,6 +128,9 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule);
+
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -202,5 +205,12 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
return false;
}
+static inline bool
+mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev,
+ struct flow_rule *flow_rule)
+{
+ return false;
+}
+
#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 6cc23af66b5b..efb34de4cb7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
struct completion init_done;
struct completion del_hw_done;
struct mlx5_flow_attr *attr;
+ struct mlx5_flow_attr *extra_split_attr;
struct list_head attrs;
u32 chain_mapping;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 8dfb57f712b0..a14f216048cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
-#include <net/inet_ecn.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
@@ -31,8 +32,7 @@ static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr
{
if (attr->n)
neigh_release(attr->n);
- if (attr->route_dev)
- dev_put(attr->route_dev);
+ dev_put(attr->route_dev);
}
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
@@ -68,16 +68,14 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
* while holding rcu read lock. Take the net_device for correctness
* sake.
*/
- if (uplink_upper)
- dev_hold(uplink_upper);
+ dev_hold(uplink_upper);
rcu_read_unlock();
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
- if (uplink_upper)
- dev_put(uplink_upper);
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
@@ -236,7 +234,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
+ attr.fl.fl4.flowi4_dscp = inet_dsfield_to_dscp(tun_key->tos);
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -352,7 +350,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
+ attr.fl.fl4.flowi4_dscp = inet_dsfield_to_dscp(tun_key->tos);
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -850,6 +848,12 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
flow_rule_match_enc_control(rule, &match);
addr_type = match.key->addr_type;
+ if (flow_rule_has_enc_control_flags(match.mask->flags,
+ extack)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
/* For tunnel addr_type used same key id`s as for non-tunnel */
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 92065568bb19..6873c1201803 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -117,7 +117,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b,
- __be16 tun_flags);
+ u32 tun_type);
#endif /* CONFIG_MLX5_ESWITCH */
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index f1d1e1542e81..0735d10f2bac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -5,6 +5,7 @@
#include <net/nexthop.h>
#include <net/ip_tunnels.h>
#include "tc_tun_encap.h"
+#include "fs_core.h"
#include "en_tc.h"
#include "tc_tun.h"
#include "rep/tc.h"
@@ -24,17 +25,24 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
- if (!route_dev || !netif_is_ovs_master(route_dev) ||
- attr->parse_attr->filter_dev == e->out_dev)
+ if (!route_dev || !netif_is_ovs_master(route_dev))
goto out;
+ if (priv->mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS &&
+ mlx5e_eswitch_uplink_rep(attr->parse_attr->filter_dev) &&
+ (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) {
+ mlx5_core_warn(priv->mdev,
+ "Matching on external port with encap + fwd to table actions is not allowed for firmware steering\n");
+ err = -EINVAL;
+ goto out;
+ }
+
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
MLX5E_TC_INT_PORT_EGRESS,
&attr->action, out_index);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -164,8 +172,8 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
- mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
- PTR_ERR(e->pkt_reformat));
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %pe\n",
+ e->pkt_reformat);
return;
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
@@ -587,7 +595,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b,
- __be16 tun_flags)
+ u32 tun_type)
{
struct ip_tunnel_info *a_info;
struct ip_tunnel_info *b_info;
@@ -596,8 +604,8 @@ bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
return false;
- a_has_opts = !!(a->ip_tun_key->tun_flags & tun_flags);
- b_has_opts = !!(b->ip_tun_key->tun_flags & tun_flags);
+ a_has_opts = test_bit(tun_type, a->ip_tun_key->tun_flags);
+ b_has_opts = test_bit(tun_type, b->ip_tun_key->tun_flags);
/* keys are equal when both don't have any options attached */
if (!a_has_opts && !b_has_opts)
@@ -744,8 +752,7 @@ static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
}
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -779,8 +786,7 @@ static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -1839,8 +1845,8 @@ static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event
queue_work(priv->wq, &fib_work->work);
} else if (IS_ERR(fib_work)) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to init fib work");
- mlx5_core_warn(priv->mdev, "Failed to init fib work, %ld\n",
- PTR_ERR(fib_work));
+ mlx5_core_warn(priv->mdev, "Failed to init fib work, %pe\n",
+ fib_work);
}
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 2bcd10b6d653..bf969212cc77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -106,12 +106,13 @@ static int mlx5e_gen_ip_tunnel_header_geneve(char buf[],
memset(geneveh, 0, sizeof(*geneveh));
geneveh->ver = MLX5E_GENEVE_VER;
geneveh->opt_len = tun_info->options_len / 4;
- geneveh->oam = !!(tun_info->key.tun_flags & TUNNEL_OAM);
- geneveh->critical = !!(tun_info->key.tun_flags & TUNNEL_CRIT_OPT);
+ geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, tun_info->key.tun_flags);
+ geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT,
+ tun_info->key.tun_flags);
mlx5e_tunnel_id_to_vni(tun_info->key.tun_id, geneveh->vni);
geneveh->proto_type = htons(ETH_P_TEB);
- if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags)) {
if (!geneveh->opt_len)
return -EOPNOTSUPP;
ip_tunnel_info_opts_get(geneveh->options, tun_info);
@@ -188,7 +189,7 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
/* make sure that we're talking about GENEVE options */
- if (enc_opts.key->dst_opt_type != TUNNEL_GENEVE_OPT) {
+ if (enc_opts.key->dst_opt_type != IP_TUNNEL_GENEVE_OPT_BIT) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on GENEVE options: option type is not GENEVE");
netdev_warn(priv->netdev,
@@ -337,7 +338,8 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
- return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_GENEVE_OPT);
+ return mlx5e_tc_tun_encap_info_equal_options(a, b,
+ IP_TUNNEL_GENEVE_OPT_BIT);
}
struct mlx5e_tc_tunnel geneve_tunnel = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index ada14f0574dc..579eda89fc76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -31,12 +31,16 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ IP_TUNNEL_DECLARE_FLAGS(unsupp) = { };
int hdr_len;
*ip_proto = IPPROTO_GRE;
/* the HW does not calculate GRE csum or sequences */
- if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
+ __set_bit(IP_TUNNEL_CSUM_BIT, unsupp);
+ __set_bit(IP_TUNNEL_SEQ_BIT, unsupp);
+
+ if (ip_tunnel_flags_intersect(tun_key->tun_flags, unsupp))
return -EOPNOTSUPP;
greh->protocol = htons(ETH_P_TEB);
@@ -44,7 +48,7 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
/* GRE key */
hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e);
greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
- if (tun_key->tun_flags & TUNNEL_KEY) {
+ if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
*ptr = tun_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index a184d739d5f8..7a18a469961d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -90,7 +90,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
const struct vxlan_metadata *md;
struct vxlanhdr *vxh;
- if ((tun_key->tun_flags & TUNNEL_VXLAN_OPT) &&
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags) &&
e->tun_info->options_len != sizeof(*md))
return -EOPNOTSUPP;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
@@ -99,7 +99,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
udp->dest = tun_key->tp_dst;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(tun_id);
- if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) {
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags)) {
md = ip_tunnel_info_opts(e->tun_info);
vxlan_build_gbp_hdr(vxh, md);
}
@@ -125,7 +125,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (enc_opts.key->dst_opt_type != TUNNEL_VXLAN_OPT) {
+ if (enc_opts.key->dst_opt_type != IP_TUNNEL_VXLAN_OPT_BIT) {
NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP");
return -EOPNOTSUPP;
}
@@ -140,7 +140,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
gbp_mask = (u32 *)&enc_opts.mask->data[0];
if (*gbp_mask & ~VXLAN_GBP_MASK) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)", *gbp_mask);
return -EINVAL;
}
@@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
struct flow_match_enc_keyid enc_keyid;
void *misc_c, *misc_v;
- misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
@@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
if (err)
return err;
+
+ /* We can't mix custom tunnel headers with symbolic ones and we
+ * don't have a symbolic field name for GBP, so we use custom
+ * tunnel headers in this case. We need hardware support to
+ * match on custom tunnel headers, but we already know it's
+ * supported because the previous call successfully checked for
+ * that.
+ */
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_5);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_5);
+
+ /* Shift by 8 to account for the reserved bits in the vxlan
+ * header after the VNI.
+ */
+ MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1,
+ be32_to_cpu(enc_keyid.mask->keyid) << 8);
+ MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1,
+ be32_to_cpu(enc_keyid.key->keyid) << 8);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ return 0;
}
/* match on VNI is required */
@@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
@@ -208,7 +234,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
- return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_VXLAN_OPT);
+ return mlx5e_tc_tun_encap_info_equal_options(a, b,
+ IP_TUNNEL_VXLAN_OPT_BIT);
}
static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index d4239e3b3c88..0b55e77f19c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -23,6 +23,9 @@ struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify)
struct mlx5e_tir_builder *builder;
builder = kvzalloc(sizeof(*builder), GFP_KERNEL);
+ if (!builder)
+ return NULL;
+
builder->modify = modify;
return builder;
@@ -121,7 +124,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, rss_hash->symmetric);
memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
}
@@ -143,6 +146,31 @@ void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder)
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
+static void mlx5e_tir_context_self_lb_block(void *tirc, bool enable_uc_lb,
+ bool enable_mc_lb)
+{
+ u8 lb_flags = 0;
+
+ if (enable_uc_lb)
+ lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (enable_mc_lb)
+ lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flags);
+}
+
+void mlx5e_tir_builder_build_self_lb_block(struct mlx5e_tir_builder *builder,
+ bool enable_uc_lb,
+ bool enable_mc_lb)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ if (builder->modify)
+ MLX5_SET(modify_tir_in, builder->in, bitmask.self_lb_en, 1);
+
+ mlx5e_tir_context_self_lb_block(tirc, enable_uc_lb, enable_mc_lb);
+}
+
void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder)
{
void *tirc = mlx5e_tir_builder_get_tirc(builder);
@@ -150,9 +178,7 @@ void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder)
WARN_ON(builder->modify);
MLX5_SET(tirc, tirc, tls_en, 1);
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
+ mlx5e_tir_context_self_lb_block(tirc, true, true);
}
int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
index 857a84bcd53a..958eeb959a19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -9,6 +9,7 @@
struct mlx5e_rss_params_hash {
u8 hfunc;
u8 toeplitz_hash_key[40];
+ bool symmetric;
};
struct mlx5e_rss_params_traffic_type {
@@ -34,6 +35,9 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const struct mlx5e_rss_params_traffic_type *rss_tt,
bool inner);
void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder);
+void mlx5e_tir_builder_build_self_lb_block(struct mlx5e_tir_builder *builder,
+ bool enable_uc_lb,
+ bool enable_mc_lb);
void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder);
struct mlx5_core_dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 53ca16cb9c41..da8c44f46edb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -46,8 +46,8 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
- rq->tstamp = &priv->tstamp;
+ rq->clock = mdev->clock;
+ rq->hwtstamp_config = &priv->hwtstamp_config;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &priv->trap_stats.rq;
@@ -76,6 +76,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
ccp.ch_stats = t->stats;
ccp.napi = &t->napi;
ccp.ix = 0;
+ ccp.uar = mdev->priv.bfreg.up;
err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
@@ -143,13 +144,12 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->priv = priv;
t->mdev = priv->mdev;
- t->tstamp = &priv->tstamp;
t->pdev = mlx5_core_dma_dev(priv->mdev);
t->netdev = priv->netdev;
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
- netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll);
+ netif_napi_add_locked(netdev, &t->napi, mlx5e_trap_napi_poll);
err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
@@ -164,7 +164,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
err_close_trap_rq:
mlx5e_close_trap_rq(&t->rq);
err_napi_del:
- netif_napi_del(&t->napi);
+ netif_napi_del_locked(&t->napi);
kvfree(t);
return ERR_PTR(err);
}
@@ -173,13 +173,13 @@ void mlx5e_close_trap(struct mlx5e_trap *trap)
{
mlx5e_tir_destroy(&trap->tir);
mlx5e_close_trap_rq(&trap->rq);
- netif_napi_del(&trap->napi);
+ netif_napi_del_locked(&trap->napi);
kvfree(trap);
}
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
- napi_enable(&trap->napi);
+ napi_enable_locked(&trap->napi);
mlx5e_activate_rq(&trap->rq);
mlx5e_trigger_napi_sched(&trap->napi);
}
@@ -189,7 +189,7 @@ void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
struct mlx5e_trap *trap = priv->en_trap;
mlx5e_deactivate_rq(&trap->rq);
- napi_disable(&trap->napi);
+ napi_disable_locked(&trap->napi);
}
static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
@@ -285,6 +285,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ netdev_lock(priv->netdev);
switch (trap_ctx->action) {
case DEVLINK_TRAP_ACTION_TRAP:
err = mlx5e_handle_action_trap(priv, trap_ctx->id);
@@ -297,6 +298,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
trap_ctx->action);
err = -EINVAL;
}
+ netdev_unlock(priv->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
index aa3f17658c6d..394e917ea2b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
@@ -22,7 +22,6 @@ struct mlx5e_trap {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
struct mlx5e_params params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 879d698b6119..7e191e1569e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,6 +6,8 @@
#include "en.h"
#include <linux/indirect_call_wrapper.h>
+#include <net/ip6_checksum.h>
+#include <net/tcp.h>
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
@@ -34,6 +36,25 @@
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+#define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\
+ (sizeof(struct mlx5e_umr_wqe) +\
+ (sizeof(struct mlx5_ksm) * (sgl_len)))
+
+#define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \
+ (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB))
+
+#define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\
+ (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS))
+
+#define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\
+ (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm))
+
+#define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\
+ ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)
+
+#define MLX5E_MAX_KSM_PER_WQE(mdev) \
+ MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
+
static inline
ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
{
@@ -71,7 +92,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);
-static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
+static inline bool mlx5e_rx_hw_stamp(struct kernel_hwtstamp_config *config)
{
return config->rx_filter == HWTSTAMP_FILTER_ALL;
}
@@ -193,6 +214,19 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq,
+ u16 *size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs);
+
+ return pi;
+}
+
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -275,10 +309,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
- struct mlx5_core_cq *mcq;
-
- mcq = &cq->mcq;
- mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, cq->uar->map, cq->wq.cc);
}
static inline struct mlx5e_sq_dma *
@@ -288,14 +319,24 @@ mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
}
static inline void
-mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
- enum mlx5e_dma_map_type map_type)
+mlx5e_dma_push_single(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size)
{
struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
dma->addr = addr;
dma->size = size;
- dma->type = map_type;
+ dma->type = MLX5E_DMA_MAP_SINGLE;
+}
+
+static inline void
+mlx5e_dma_push_netmem(struct mlx5e_txqsq *sq, netmem_ref netmem,
+ dma_addr_t addr, u32 size)
+{
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
+
+ netmem_dma_unmap_addr_set(netmem, dma, addr, addr);
+ dma->size = size;
+ dma->type = MLX5E_DMA_MAP_PAGE;
}
static inline
@@ -328,7 +369,8 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
break;
case MLX5E_DMA_MAP_PAGE:
- dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(pdev, dma->addr, dma->size,
+ DMA_TO_DEVICE, 0);
break;
default:
WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
@@ -337,9 +379,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ return session->ds_count == session->ds_count_max;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
@@ -460,6 +502,41 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
+static inline void
+mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel)
+{
+ const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ int len;
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb))
+ return;
+
+ if (skb_is_gso_tcp(skb)) {
+ th = inner_tcp_hdr(skb);
+ len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
+
+ if (ip->version == 4) {
+ th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+ th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ uh = (struct udphdr *)skb_inner_transport_header(skb);
+ len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+
+ if (ip->version == 4) {
+ uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+ uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
+ }
+}
+
#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 82b5ca1be4f3..80f9fc10877a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -179,7 +179,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
{
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
- if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
+ if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->hwtstamp_config)))
return -ENODATA;
*timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
@@ -289,9 +289,9 @@ static u64 mlx5e_xsk_fill_timestamp(void *_priv)
ts = get_cqe_ts(priv->cqe);
if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
- return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_real_time_cyc2time(priv->cq->mdev->clock, ts);
- return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_timecounter_cyc2time(priv->cq->mdev->clock, ts);
}
static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
@@ -390,6 +390,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = sq->max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
};
@@ -501,7 +502,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
- if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
stats->xmit++;
@@ -546,6 +547,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
bool inline_ok;
bool linear;
u16 pi;
+ int i;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -565,7 +567,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
linear = !!(dma_len - inline_hdr_sz);
ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz;
- /* check_result must be 0 if sinfo is passed. */
+ /* check_result must be 0 if xdptxd->has_frags is true. */
if (!check_result) {
int stop_room = 1;
@@ -612,41 +614,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- int i;
-
- memset(&cseg->trailer, 0, sizeof(cseg->trailer));
- memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
- dma_addr_t addr;
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
- page_pool_get_dma_addr(skb_frag_page(frag)) +
- skb_frag_off(frag);
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+ dma_addr_t addr;
- dseg->addr = cpu_to_be64(addr);
- dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
- dseg->lkey = sq->mkey_be;
- dseg++;
- }
+ addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ dseg++;
+ }
- sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = num_wqebbs,
- .num_pkts = 1,
- };
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->pc += num_wqebbs;
- } else {
- cseg->fm_ce_se = 0;
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = 1,
+ };
- sq->pc++;
- }
+ sq->pc += num_wqebbs;
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
@@ -713,10 +707,11 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
page = xdpi.page.page;
- /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
- * as we know this is a page_pool page.
+ /* No need to check page_pool_page_is_pp() as we
+ * know this is a page_pool page.
*/
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp,
+ page);
} while (++n < num);
break;
@@ -865,7 +860,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(sq_num >= priv->channels.num))
return -ENXIO;
- sq = &priv->channels.c[sq_num]->xdpsq;
+ sq = priv->channels.c[sq_num]->xdpsq;
for (i = 0; i < n; i++) {
struct mlx5e_xmit_data_frags xdptxdf = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e054db1e10f8..46ab0a9e8cdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -45,12 +45,6 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
-struct mlx5e_xdp_buff {
- struct xdp_buff xdp;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
-};
-
/* XDP packets can be transmitted in different ways. On completion, we need to
* distinguish between them to clean up things in a proper way.
*/
@@ -182,13 +176,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ session->ds_count_max;
- return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index b8dd74453655..2b05536d564a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -123,7 +123,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
/* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
@@ -134,7 +134,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -144,7 +144,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
icosq->pc += rq->mpwqe.umr_wqebbs;
- icosq->doorbell_cseg = &umr_wqe->ctrl;
+ icosq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
@@ -270,7 +270,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
/* Possible flows:
@@ -319,7 +319,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 06592b9f0424..5981c71cae2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
- /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ /* AF_XDP doesn't support frames larger than PAGE_SIZE,
+ * and xsk->chunk_size is limited to 65535 bytes.
+ */
+ if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false;
@@ -52,7 +54,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_channel_param *cparam)
{
mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
- mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
}
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
@@ -69,8 +71,8 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->priv = c->priv;
- rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->hwtstamp_config = &c->priv->hwtstamp_config;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index caa34b9c161e..8bef99e8367e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -42,6 +42,8 @@
#include <en_accel/macsec.h>
#include "en.h"
#include "en/txrx.h"
+#include "en_accel/psp.h"
+#include "en_accel/psp_rxtx.h"
#if IS_ENABLED(CONFIG_GENEVE)
#include <net/geneve.h>
@@ -102,8 +104,14 @@ static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+ struct udphdr *udphdr;
- udp_hdr(skb)->len = htons(payload_len);
+ if (skb->encapsulation)
+ udphdr = (struct udphdr *)skb_inner_transport_header(skb);
+ else
+ udphdr = udp_hdr(skb);
+
+ udphdr->len = htons(payload_len);
}
struct mlx5e_accel_tx_state {
@@ -113,6 +121,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_accel_tx_ipsec_state ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_accel_tx_psp_state psp_st;
+#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -131,6 +142,13 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return false;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload(skb, dev)) {
+ if (unlikely(!mlx5e_psp_handle_tx_skb(dev, skb, &state->psp_st)))
+ return false;
+ }
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
@@ -151,8 +169,14 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_state *state)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->psp_st))
+ return mlx5e_psp_tx_ids_len(&state->psp_st);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
return mlx5e_ipsec_tx_ids_len(&state->ipsec);
@@ -166,8 +190,14 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&accel->psp_st))
+ mlx5e_psp_tx_build_eseg(priv, skb, &accel->psp_st, eseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
@@ -193,6 +223,11 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->psp_st))
+ mlx5e_psp_handle_tx_wqe(wqe, &state->psp_st, inlseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
state->ipsec.xo && state->ipsec.tailen)
@@ -202,21 +237,40 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
- return mlx5e_ktls_init_rx(priv);
+ int err;
+
+ err = mlx5_accel_psp_fs_init_rx_tables(priv);
+ if (err)
+ goto out;
+
+ err = mlx5e_ktls_init_rx(priv);
+ if (err)
+ mlx5_accel_psp_fs_cleanup_rx_tables(priv);
+
+out:
+ return err;
}
static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
+ mlx5_accel_psp_fs_cleanup_rx_tables(priv);
}
static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
{
+ int err;
+
+ err = mlx5_accel_psp_fs_init_tx_tables(priv);
+ if (err)
+ return err;
+
return mlx5e_ktls_init_tx(priv);
}
static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_tx(priv);
+ mlx5_accel_psp_fs_cleanup_tx_tables(priv);
}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index c7d191f66ad1..1febdc5b81f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -73,7 +73,7 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag)
+ u32 flow_tag)
{
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
@@ -138,7 +138,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
- fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
+ fs_err(fs, "mlx5_add_flow_rules() failed, flow is %pe\n", flow);
out:
kvfree(spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
index a032bff482a6..7e899c716267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
@@ -11,14 +11,14 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag);
+ u32 flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag)
+ u32 flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) {}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index c54fd01ea635..35d9530037a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -36,6 +36,7 @@
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <net/netevent.h>
+#include <net/ipv6_stubs.h>
#include "en.h"
#include "eswitch.h"
@@ -67,7 +68,6 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
return;
spin_lock_bh(&x->lock);
- xfrm_state_check_expire(x);
if (x->km.state == XFRM_STATE_EXPIRED) {
sa_entry->attrs.drop = true;
spin_unlock_bh(&x->lock);
@@ -75,6 +75,13 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
mlx5e_accel_ipsec_fs_modify(sa_entry);
return;
}
+
+ if (x->km.state != XFRM_STATE_VALID) {
+ spin_unlock_bh(&x->lock);
+ return;
+ }
+
+ xfrm_state_check_expire(x);
spin_unlock_bh(&x->lock);
queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
@@ -88,25 +95,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 esn, esn_msb;
u8 overlap;
- switch (x->xso.type) {
- case XFRM_DEV_OFFLOAD_PACKET:
- switch (x->xso.dir) {
- case XFRM_DEV_OFFLOAD_IN:
- esn = x->replay_esn->seq;
- esn_msb = x->replay_esn->seq_hi;
- break;
- case XFRM_DEV_OFFLOAD_OUT:
- esn = x->replay_esn->oseq;
- esn_msb = x->replay_esn->oseq_hi;
- break;
- default:
- WARN_ON(true);
- return false;
- }
- break;
- case XFRM_DEV_OFFLOAD_CRYPTO:
- /* Already parsed by XFRM core */
+ switch (x->xso.dir) {
+ case XFRM_DEV_OFFLOAD_IN:
esn = x->replay_esn->seq;
+ esn_msb = x->replay_esn->seq_hi;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ esn = x->replay_esn->oseq;
+ esn_msb = x->replay_esn->oseq_hi;
break;
default:
WARN_ON(true);
@@ -115,11 +111,15 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
overlap = sa_entry->esn_state.overlap;
- if (esn >= x->replay_esn->replay_window)
- seq_bottom = esn - x->replay_esn->replay_window + 1;
+ if (!x->replay_esn->replay_window) {
+ seq_bottom = esn;
+ } else {
+ if (esn >= x->replay_esn->replay_window)
+ seq_bottom = esn - x->replay_esn->replay_window + 1;
- if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
- esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
+ esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ }
if (sa_entry->esn_state.esn_msb)
sa_entry->esn_state.esn = esn;
@@ -260,10 +260,15 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec_addr *addrs = &attrs->addrs;
+ struct net_device *netdev = sa_entry->dev;
struct xfrm_state *x = sa_entry->x;
- struct net_device *netdev;
+ struct dst_entry *rt_dst_entry;
+ struct flowi4 fl4 = {};
+ struct flowi6 fl6 = {};
struct neighbour *n;
u8 addr[ETH_ALEN];
+ struct rtable *rt;
const void *pkey;
u8 *dst, *src;
@@ -271,25 +276,94 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->type != XFRM_DEV_OFFLOAD_PACKET)
return;
- netdev = x->xso.real_dev;
-
mlx5_query_mac_address(mdev, addr);
switch (attrs->dir) {
case XFRM_DEV_OFFLOAD_IN:
src = attrs->dmac;
dst = attrs->smac;
- pkey = &attrs->saddr.a4;
+
+ switch (addrs->family) {
+ case AF_INET:
+ fl4.flowi4_proto = x->sel.proto;
+ fl4.daddr = addrs->saddr.a4;
+ fl4.saddr = addrs->daddr.a4;
+ pkey = &addrs->saddr.a4;
+ break;
+ case AF_INET6:
+ fl6.flowi6_proto = x->sel.proto;
+ memcpy(fl6.daddr.s6_addr32, addrs->saddr.a6, 16);
+ memcpy(fl6.saddr.s6_addr32, addrs->daddr.a6, 16);
+ pkey = &addrs->saddr.a6;
+ break;
+ default:
+ return;
+ }
break;
case XFRM_DEV_OFFLOAD_OUT:
src = attrs->smac;
dst = attrs->dmac;
- pkey = &attrs->daddr.a4;
+ switch (addrs->family) {
+ case AF_INET:
+ fl4.flowi4_proto = x->sel.proto;
+ fl4.daddr = addrs->daddr.a4;
+ fl4.saddr = addrs->saddr.a4;
+ pkey = &addrs->daddr.a4;
+ break;
+ case AF_INET6:
+ fl6.flowi6_proto = x->sel.proto;
+ memcpy(fl6.daddr.s6_addr32, addrs->daddr.a6, 16);
+ memcpy(fl6.saddr.s6_addr32, addrs->saddr.a6, 16);
+ pkey = &addrs->daddr.a6;
+ break;
+ default:
+ return;
+ }
break;
default:
return;
}
ether_addr_copy(src, addr);
+
+ /* Destination can refer to a routed network, so perform FIB lookup
+ * to resolve nexthop and get its MAC. Neighbour resolution is used as
+ * fallback.
+ */
+ switch (addrs->family) {
+ case AF_INET:
+ rt = ip_route_output_key(dev_net(netdev), &fl4);
+ if (IS_ERR(rt))
+ goto neigh;
+
+ if (rt->rt_type != RTN_UNICAST) {
+ ip_rt_put(rt);
+ goto neigh;
+ }
+ rt_dst_entry = &rt->dst;
+ break;
+ case AF_INET6:
+ rt_dst_entry = ipv6_stub->ipv6_dst_lookup_flow(
+ dev_net(netdev), NULL, &fl6, NULL);
+ if (IS_ERR(rt_dst_entry))
+ goto neigh;
+ break;
+ default:
+ return;
+ }
+
+ n = dst_neigh_lookup(rt_dst_entry, pkey);
+ if (!n) {
+ dst_release(rt_dst_entry);
+ goto neigh;
+ }
+
+ neigh_ha_snapshot(addr, n, netdev);
+ ether_addr_copy(dst, addr);
+ dst_release(rt_dst_entry);
+ neigh_release(n);
+ return;
+
+neigh:
n = neigh_lookup(&arp_tbl, pkey, netdev);
if (!n) {
n = neigh_create(&arp_tbl, pkey, netdev);
@@ -304,6 +378,16 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
neigh_release(n);
}
+static void mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr *addrs)
+{
+ /*
+ * State doesn't have subnet prefixes in outer headers.
+ * The match is performed for exaxt source/destination addresses.
+ */
+ memset(addrs->smask.m6, 0xFF, sizeof(__be32) * 4);
+ memset(addrs->dmask.m6, 0xFF, sizeof(__be32) * 4);
+}
+
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
@@ -375,9 +459,11 @@ skip_replay_window:
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
- memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
- attrs->family = x->props.family;
+ memcpy(&attrs->addrs.saddr, x->props.saddr.a6,
+ sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, x->id.daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = x->props.family;
+ mlx5e_ipsec_state_mask(&attrs->addrs);
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
attrs->upspec.dport = ntohs(x->sel.dport);
@@ -429,7 +515,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
}
if (x->encap) {
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
- NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulation is not supported");
return -EINVAL;
}
@@ -680,17 +767,18 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
return 0;
}
-static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+static int mlx5e_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
- struct net_device *netdev = x->xso.real_dev;
+ bool allow_tunnel_mode = false;
struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
gfp_t gfp;
int err;
- priv = netdev_priv(netdev);
+ priv = netdev_priv(dev);
if (!priv->ipsec)
return -EOPNOTSUPP;
@@ -701,6 +789,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
return -ENOMEM;
sa_entry->x = x;
+ sa_entry->dev = dev;
sa_entry->ipsec = ipsec;
/* Check if this SA is originated from acquire flow temporary SA */
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
@@ -715,15 +804,36 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
goto err_xfrm;
}
+ err = mlx5_eswitch_block_mode(priv->mdev);
+ if (err)
+ goto unblock_ipsec;
+
+ if (x->props.mode == XFRM_MODE_TUNNEL &&
+ x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+ allow_tunnel_mode = mlx5e_ipsec_fs_tunnel_allowed(sa_entry);
+ if (!allow_tunnel_mode) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Packet offload tunnel mode is disabled due to encap settings");
+ err = -EINVAL;
+ goto unblock_mode;
+ }
+ }
+
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = 1;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
err = mlx5_ipsec_create_work(sa_entry);
if (err)
- goto unblock_ipsec;
+ goto unblock_encap;
err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
@@ -738,14 +848,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_hw_ctx;
- if (x->props.mode == XFRM_MODE_TUNNEL &&
- x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
- NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
- err = -EINVAL;
- goto err_add_rule;
- }
-
/* We use *_bh() variant because xfrm_timer_handler(), which runs
* in softirq context, can reach our state delete logic and we need
* xa_erase_bh() there.
@@ -761,13 +863,20 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
MLX5_IPSEC_RESCHED);
- if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL)
- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
- MLX5E_IPSEC_TUNNEL_SA);
+ if (allow_tunnel_mode) {
+ xa_lock_bh(&ipsec->sadb);
+ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
+ xa_unlock_bh(&ipsec->sadb);
+ }
out:
x->xso.offload_handle = (unsigned long)sa_entry;
+ if (allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(priv->mdev);
+
+ mlx5_eswitch_unblock_mode(priv->mdev);
+
return 0;
err_add_rule:
@@ -780,6 +889,11 @@ release_work:
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+unblock_encap:
+ if (allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(priv->mdev);
+unblock_mode:
+ mlx5_eswitch_unblock_mode(priv->mdev);
unblock_ipsec:
mlx5_eswitch_unblock_ipsec(priv->mdev);
err_xfrm:
@@ -788,10 +902,9 @@ err_xfrm:
return err;
}
-static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+static void mlx5e_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -800,15 +913,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
-
- if (attrs->mode == XFRM_MODE_TUNNEL &&
- attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- /* Make sure that no ARP requests are running in parallel */
- flush_workqueue(ipsec->wq);
-
}
-static void mlx5e_xfrm_free_state(struct xfrm_state *x)
+static void mlx5e_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -841,8 +948,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec *ipsec;
struct neighbour *n = ptr;
- struct net_device *netdev;
- struct xfrm_state *x;
unsigned long idx;
if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
@@ -852,21 +957,19 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
attrs = &sa_entry->attrs;
- if (attrs->family == AF_INET) {
- if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
- !neigh_key_eq32(n, &attrs->daddr.a4))
+ if (attrs->addrs.family == AF_INET) {
+ if (!neigh_key_eq32(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq32(n, &attrs->addrs.daddr.a4))
continue;
} else {
- if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
- !neigh_key_eq128(n, &attrs->daddr.a4))
+ if (!neigh_key_eq128(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq128(n, &attrs->addrs.daddr.a4))
continue;
}
- x = sa_entry->x;
- netdev = x->xso.real_dev;
data = sa_entry->work->data;
- neigh_ha_snapshot(data->addr, n, netdev);
+ neigh_ha_snapshot(data->addr, n, sa_entry->dev);
queue_work(ipsec->wq, &sa_entry->work->work);
}
@@ -952,21 +1055,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
priv->ipsec = NULL;
}
-static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
@@ -989,35 +1077,94 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct net *net = dev_net(x->xso.dev);
+ u64 trailer_packets = 0, trailer_bytes = 0;
+ u64 replay_packets = 0, replay_bytes = 0;
+ u64 auth_packets = 0, auth_bytes = 0;
+ u64 success_packets, success_bytes;
u64 packets, bytes, lastuse;
+ size_t headers;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
+ lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&net->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
- x->stats.integrity_failed += packets;
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
-
- mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
+ &auth_packets, &lastuse);
+ x->stats.integrity_failed += auth_packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
+ &trailer_packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
}
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
+ &replay_packets, &lastuse);
+ x->stats.replay += replay_packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
+ }
+
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
- x->curlft.packets += packets;
- x->curlft.bytes += bytes;
+ success_packets = packets - auth_packets - trailer_packets - replay_packets;
+ x->curlft.packets += success_packets;
+ /* NIC counts all bytes passed through flow steering and doesn't have
+ * an ability to count payload data size which is needed for SA.
+ *
+ * To overcome HW limitestion, let's approximate the payload size
+ * by removing always available headers.
+ */
+ headers = sizeof(struct ethhdr);
+ if (sa_entry->attrs.addrs.family == AF_INET)
+ headers += sizeof(struct iphdr);
+ else
+ headers += sizeof(struct ipv6hdr);
- if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
- x->stats.replay += packets;
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
+ x->curlft.bytes += success_bytes - headers * success_packets;
+}
+
+static __be32 word_to_mask(int prefix)
+{
+ if (prefix < 0)
+ return 0;
+
+ if (!prefix || prefix > 31)
+ return cpu_to_be32(0xFFFFFFFF);
+
+ return cpu_to_be32(((1U << prefix) - 1) << (32 - prefix));
+}
+
+static void mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr *addrs,
+ struct xfrm_selector *sel)
+{
+ int i;
+
+ if (addrs->family == AF_INET) {
+ addrs->smask.m4 = word_to_mask(sel->prefixlen_s);
+ addrs->saddr.a4 &= addrs->smask.m4;
+ addrs->dmask.m4 = word_to_mask(sel->prefixlen_d);
+ addrs->daddr.a4 &= addrs->dmask.m4;
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (sel->prefixlen_s != 32 * i)
+ addrs->smask.m6[i] =
+ word_to_mask(sel->prefixlen_s - 32 * i);
+ addrs->saddr.a6[i] &= addrs->smask.m6[i];
+
+ if (sel->prefixlen_d != 32 * i)
+ addrs->dmask.m6[i] =
+ word_to_mask(sel->prefixlen_d - 32 * i);
+ addrs->daddr.a6[i] &= addrs->dmask.m6[i];
}
}
@@ -1093,9 +1240,10 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
sel = &x->selector;
memset(attrs, 0, sizeof(*attrs));
- memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
- attrs->family = sel->family;
+ memcpy(&attrs->addrs.saddr, sel->saddr.a6, sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, sel->daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = sel->family;
+ mlx5e_ipsec_policy_mask(&attrs->addrs, sel);
attrs->dir = x->xdo.dir;
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
@@ -1111,7 +1259,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xdo.real_dev;
+ struct net_device *netdev = x->xdo.dev;
struct mlx5e_ipsec_pol_entry *pol_entry;
struct mlx5e_priv *priv;
int err;
@@ -1173,7 +1321,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 7d943e93cf6d..f8eaaf37963b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,27 +76,36 @@ struct mlx5_replay_esn {
u8 trigger : 1;
};
-struct mlx5_accel_esp_xfrm_attrs {
- u32 spi;
- u32 mode;
- struct aes_gcm_keymat aes_gcm;
-
+struct mlx5e_ipsec_addr {
union {
__be32 a4;
__be32 a6[4];
} saddr;
-
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } smask;
union {
__be32 a4;
__be32 a6[4];
} daddr;
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } dmask;
+ u8 family;
+};
+struct mlx5_accel_esp_xfrm_attrs {
+ u32 spi;
+ u32 mode;
+ struct aes_gcm_keymat aes_gcm;
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
u8 encap : 1;
- u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
@@ -128,6 +137,7 @@ struct mlx5e_ipsec_hw_stats {
u64 ipsec_rx_bytes;
u64 ipsec_rx_drop_pkts;
u64 ipsec_rx_drop_bytes;
+ u64 ipsec_rx_drop_mismatch_sa_sel;
u64 ipsec_tx_pkts;
u64 ipsec_tx_bytes;
u64 ipsec_tx_drop_pkts;
@@ -175,6 +185,7 @@ struct mlx5e_ipsec_rx_create_attr {
u32 family;
int prio;
int pol_level;
+ int pol_miss_level;
int sa_level;
int status_level;
enum mlx5_flow_namespace_type chains_ns;
@@ -184,6 +195,7 @@ struct mlx5e_ipsec_ft {
struct mutex mutex; /* Protect changes to this struct */
struct mlx5_flow_table *pol;
struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *sa_sel;
struct mlx5_flow_table *status;
u32 refcnt;
};
@@ -195,6 +207,8 @@ struct mlx5e_ipsec_drop {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *status_pass;
+ struct mlx5_flow_handle *sa_sel;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
@@ -206,6 +220,7 @@ struct mlx5e_ipsec_rule {
struct mlx5e_ipsec_miss {
struct mlx5_flow_group *group;
struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
};
struct mlx5e_ipsec_tx_create_attr {
@@ -260,6 +275,7 @@ struct mlx5e_ipsec_limits {
struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_esn_state esn_state;
struct xfrm_state *x;
+ struct net_device *dev;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
@@ -274,18 +290,8 @@ struct mlx5e_ipsec_sa_entry {
};
struct mlx5_accel_pol_xfrm_attrs {
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
- u8 family;
u8 action;
u8 type : 2;
u8 dir : 2;
@@ -313,7 +319,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
-bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry);
+bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
@@ -336,6 +342,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv);
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -381,6 +388,10 @@ static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *sl
static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
}
+
+static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 41a2543a52cd..feef86fff4bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -16,6 +16,16 @@
#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
#define IPSEC_TUNNEL_DEFAULT_TTL 0x40
+#define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
+
+enum {
+ MLX5_IPSEC_ASO_OK,
+ MLX5_IPSEC_ASO_BAD_REPLY,
+
+ /* For crypto offload, set by driver */
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
+};
+
struct mlx5e_ipsec_fc {
struct mlx5_fc *cnt;
struct mlx5_fc *drop;
@@ -33,6 +43,9 @@ struct mlx5e_ipsec_tx {
};
struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *pass_group;
+ struct mlx5_flow_handle *packet_offload_pass_rule;
+ struct mlx5_flow_handle *crypto_offload_pass_rule;
struct mlx5_flow_group *drop_all_group;
struct mlx5e_ipsec_drop all;
};
@@ -41,11 +54,14 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_miss sa_sel;
+ struct mlx5e_ipsec_status_checks status_checks;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *pol_miss_ft;
+ struct mlx5_flow_handle *pol_miss_rule;
u8 allow_tunnel_mode : 1;
+ u8 ttc_rules_added : 1;
};
/* IPsec RX flow steering */
@@ -130,11 +146,12 @@ static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
int level, int prio,
+ int num_reserved_entries,
int max_num_groups, u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
- ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
ft_attr.autogroup.max_num_groups = max_num_groups;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = level;
@@ -147,22 +164,35 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status_drops.all.rule);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
- mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+ mlx5_del_flow_rules(rx->status_checks.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
+ mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status.rule);
+ mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+}
- if (rx != ipsec->rx_esw)
- return;
+static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-#ifdef CONFIG_MLX5_ESWITCH
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-#endif
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ }
}
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
@@ -194,17 +224,14 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -223,7 +250,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -275,16 +302,14 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -348,7 +373,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -359,9 +384,9 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status_drops.drop_all_group = g;
- rx->status_drops.all.rule = rule;
- rx->status_drops.all.fc = flow_counter;
+ rx->status_checks.drop_all_group = g;
+ rx->status_checks.all.rule = rule;
+ rx->status_checks.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
@@ -377,9 +402,52 @@ err_out:
return err;
}
-static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_flow_group *fg;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ start_flow_index, ft->max_fte - 3);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, ft->max_fte - 2);
+
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to create rx status pass flow group, err=%d\n",
+ err);
+ }
+ rx->status_checks.pass_group = fg;
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static struct mlx5_flow_handle *
+ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest,
+ u8 aso_ok)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -388,7 +456,7 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
@@ -397,11 +465,11 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_4, 0);
+ misc_parameters_2.metadata_reg_c_4, aso_ok);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
@@ -412,19 +480,19 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status.rule = rule;
kvfree(spec);
- return 0;
+ return rule;
err_rule:
kvfree(spec);
- return err;
+ return ERR_PTR(err);
}
static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
ipsec_rx_status_pass_destroy(ipsec, rx);
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
ipsec_rx_status_drop_destroy(ipsec, rx);
}
@@ -432,19 +500,44 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
+ struct mlx5_flow_destination pol_dest[2];
+ struct mlx5_flow_handle *rule;
int err;
err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
- err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ err = ipsec_rx_status_pass_group_create(ipsec, rx);
if (err)
- goto err_pass_create;
+ goto err_pass_group_create;
+
+ rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_crypto_offload_pass_create;
+ }
+ rx->status_checks.crypto_offload_pass_rule = rule;
+
+ pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ pol_dest[0].ft = rx->ft.pol;
+ pol_dest[1] = dest[1];
+ rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
+ MLX5_IPSEC_ASO_OK);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_packet_offload_pass_create;
+ }
+ rx->status_checks.packet_offload_pass_rule = rule;
return 0;
-err_pass_create:
+err_packet_offload_pass_create:
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+err_crypto_offload_pass_create:
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
+err_pass_group_create:
ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
@@ -493,35 +586,56 @@ out:
return err;
}
+static struct mlx5_flow_destination
+ipsec_rx_decrypted_pkt_def_dest(struct mlx5_ttc_table *ttc, u32 family)
+{
+ struct mlx5_flow_destination dest;
+
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return mlx5_ttc_get_default_dest(ttc, family2tt(family));
+
+ dest.ft = mlx5_get_ttc_flow_table(ttc);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ return dest;
+}
+
+static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *old_dest,
+ struct mlx5_flow_destination *new_dest)
+{
+ mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
+ mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
+ new_dest, old_dest);
+}
+
static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
- old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
- family2tt(family));
+ old_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
}
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
- family2tt(family));
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+ new_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
}
@@ -570,20 +684,18 @@ static void ipsec_mpv_work_handler(struct work_struct *_work)
complete(&work->master_priv->ipsec->comp);
}
-static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
+static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ if (rx->ttc_rules_added)
+ mlx5_ttc_destroy_ipsec_rules(ttc);
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx, u32 family)
+static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
{
- /* disconnect */
- if (rx != ipsec->rx_esw)
- ipsec_rx_ft_disconnect(ipsec, family);
-
if (rx->chains) {
ipsec_chains_destroy(rx->chains);
} else {
@@ -592,6 +704,29 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.pol);
}
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+}
+
+static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->sa_sel.rule);
+ mlx5_fc_destroy(mdev, rx->sa_sel.fc);
+ rx->sa_sel.fc = NULL;
+ mlx5_destroy_flow_group(rx->sa_sel.group);
+ mlx5_destroy_flow_table(rx->ft.sa_sel);
+}
+
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
+{
+ /* disconnect */
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_disconnect(ipsec, rx, family);
+
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
@@ -600,7 +735,17 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+
+ ipsec_rx_policy_destroy(rx);
+
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ if (rx == ipsec->rx_esw)
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
+ 0, 1, 0);
+#endif
}
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
@@ -620,6 +765,7 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
attr->family = family;
attr->prio = MLX5E_NIC_PRIO;
attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
+ attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL;
attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
@@ -636,7 +782,7 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
if (rx == ipsec->rx_esw)
return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
- *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
+ *dest = ipsec_rx_decrypted_pkt_def_dest(attr->ttc, attr->family);
err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
attr->prio);
@@ -652,22 +798,254 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
return 0;
}
+static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_destination *miss_dest)
+{
+ if (rx == ipsec->rx_esw)
+ *miss_dest = *dest;
+ else
+ *miss_dest =
+ mlx5_ttc_get_default_dest(attr->ttc,
+ family2tt(attr->family));
+}
+
+static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = rx->pol_miss_ft;
+}
+
static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5e_ipsec_rx_create_attr *attr)
{
struct mlx5_flow_destination dest = {};
+ struct mlx5_ttc_table *ttc, *inner_ttc;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx->ft.pol;
- mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
+ dest.ft = rx->ft.sa;
+ if (mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest))
+ return;
+
+ ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ inner_ttc = mlx5e_fs_get_ttc(ipsec->fs, true);
+ rx->ttc_rules_added = !mlx5_ttc_create_ipsec_rules(ttc, inner_ttc);
}
+static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (rx == ipsec->rx_esw) {
+ /* No need to create miss table for switchdev mode,
+ * just set it to the root chain table.
+ */
+ rx->pol_miss_ft = dest->ft;
+ return 0;
+ }
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = attr->pol_miss_level;
+ ft_attr.prio = attr->prio;
+
+ ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule;
+ }
+
+ rx->pol_miss_ft = ft;
+ rx->pol_miss_rule = rule;
+
+ return 0;
+
+err_rule:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
+static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_destination default_dest;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
+ if (err)
+ return err;
+
+ ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ rx->chains = ipsec_chains_create(mdev,
+ default_dest.ft,
+ attr->chains_ns,
+ attr->prio,
+ attr->sa_level,
+ &rx->ft.pol);
+ if (IS_ERR(rx->chains))
+ err = PTR_ERR(rx->chains);
+ } else {
+ ft = ipsec_ft_create(attr->ns, attr->pol_level,
+ attr->prio, 1, 2, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_out;
+ }
+ rx->ft.pol = ft;
+
+ err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
+ &default_dest);
+ if (err)
+ mlx5_destroy_flow_table(rx->ft.pol);
+ }
+
+ if (!err)
+ return 0;
+
+err_out:
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+ return err;
+}
+
+static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ u32 *flow_group_in;
+ struct mlx5_fc *fc;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
+ MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
+ err);
+ goto err_ft;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
+ err);
+ goto err_fg;
+ }
+
+ fc = mlx5_fc_create(mdev, false);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ mlx5_core_err(mdev,
+ "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
+ err);
+ goto err_cnt;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = fc;
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
+ err);
+ goto err_rule;
+ }
+
+ rx->ft.sa_sel = ft;
+ rx->sa_sel.group = fg;
+ rx->sa_sel.fc = fc;
+ rx->sa_sel.rule = rule;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, fc);
+err_cnt:
+ mlx5_destroy_flow_group(fg);
+err_fg:
+ mlx5_destroy_flow_table(ft);
+err_ft:
+ kvfree(flow_group_in);
+ return err;
+}
+
+/* The decryption processing is as follows:
+ *
+ * +----------+ +-------------+
+ * | | | |
+ * | Kernel <--------------+----------+ policy miss <------------+
+ * | | ^ | | ^
+ * +----^-----+ | +-------------+ |
+ * | crypto |
+ * miss offload ok allow/default
+ * ^ ^ ^
+ * | | packet |
+ * +----+---------+ +----+-------------+ offload ok +------+---+
+ * | | | | (no UPSPEC) | |
+ * | SA (decrypt) +-----> status +--->------->----+ policy |
+ * | | | | | |
+ * +--------------+ ++---------+-------+ +-^----+---+
+ * | | | |
+ * v packet +-->->---+ v
+ * | offload ok match |
+ * fails (with UPSPEC) | block
+ * | | +-------------+-+ |
+ * v v | | miss v
+ * drop +---> SA sel +--------->drop
+ * | |
+ * +---------------+
+ */
+
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
+ struct mlx5_flow_destination dest[2], miss_dest;
struct mlx5e_ipsec_rx_create_attr attr;
- struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
u32 flags = 0;
int err;
@@ -678,81 +1056,63 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
}
rx->ft.status = ft;
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
+ err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
if (err)
- goto err_add;
+ goto err_fs_ft_sa_sel;
/* Create FT */
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ rx->allow_tunnel_mode =
+ mlx5_eswitch_block_encap(mdev, rx == ipsec->rx_esw);
+
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
+ ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
rx->ft.sa = ft;
- err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
+ ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
if (err)
goto err_fs;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
- rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
- attr.chains_ns,
- attr.prio,
- attr.pol_level,
- &rx->ft.pol);
- if (IS_ERR(rx->chains)) {
- err = PTR_ERR(rx->chains);
- goto err_pol_ft;
- }
-
- goto connect;
- }
+ err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
+ if (err)
+ goto err_policy;
- ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto err_pol_ft;
- }
- rx->ft.pol = ft;
- memset(dest, 0x00, 2 * sizeof(*dest));
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = rx->ft.sa;
- err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
- goto err_pol_miss;
+ goto err_add;
-connect:
/* connect */
if (rx != ipsec->rx_esw)
ipsec_rx_ft_connect(ipsec, rx, &attr);
return 0;
-err_pol_miss:
- mlx5_destroy_flow_table(rx->ft.pol);
-err_pol_ft:
+err_add:
+ ipsec_rx_policy_destroy(rx);
+err_policy:
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
-err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
-err_add:
+err_fs_ft:
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+err_fs_ft_sa_sel:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -874,7 +1234,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ dest.counter = tx->fc->cnt;
fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
@@ -942,7 +1302,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
int err;
ipsec_tx_create_attr_set(ipsec, tx, &attr);
- ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tx->ft.status = ft;
@@ -952,10 +1312,12 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto err_status_rule;
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ tx->allow_tunnel_mode =
+ mlx5_eswitch_block_encap(mdev, tx == ipsec->tx_esw);
+
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
+ ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_sa_ft;
@@ -983,7 +1345,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto connect_roce;
}
- ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
+ ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -1151,9 +1513,14 @@ static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
mutex_unlock(&tx->ft.mutex);
}
-static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = &addrs->saddr.a4;
+ __be32 *smask = &addrs->smask.m4;
+ __be32 *daddr = &addrs->daddr.a4;
+ __be32 *dmask = &addrs->dmask.m4;
+
if (!*saddr && !*daddr)
return;
@@ -1165,21 +1532,26 @@ static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
if (*saddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
}
if (*daddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
}
}
-static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr6(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = addrs->saddr.a6;
+ __be32 *smask = addrs->smask.m6;
+ __be32 *daddr = addrs->daddr.a6;
+ __be32 *dmask = addrs->dmask.m6;
+
if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
return;
@@ -1191,15 +1563,15 @@ static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
if (!addr6_all_zero(saddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
}
if (!addr6_all_zero(daddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
}
}
@@ -1341,7 +1713,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[2], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
- MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], data,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
MLX5_SET(set_action_in, action[2], offset, 0);
MLX5_SET(set_action_in, action[2], length, 32);
}
@@ -1360,8 +1733,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
- mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
- PTR_ERR(modify_hdr));
+ mlx5_core_err(mdev, "Failed to allocate modify_header %pe\n",
+ modify_hdr);
return PTR_ERR(modify_hdr);
}
@@ -1388,7 +1761,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
bfflen += sizeof(*esp_hdr) + 8;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
bfflen += sizeof(*iphdr);
break;
@@ -1405,7 +1778,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
return -ENOMEM;
eth_hdr = (struct ethhdr *)reformatbf;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
eth_hdr->h_proto = htons(ETH_P_IP);
break;
@@ -1428,11 +1801,11 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
reformat_params->param_0 = attrs->authsize;
hdr = reformatbf + sizeof(*eth_hdr);
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
iphdr = (struct iphdr *)hdr;
- memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
- memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
+ memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
+ memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
iphdr->version = 4;
iphdr->ihl = 5;
iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1441,8 +1814,8 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
break;
case AF_INET6:
ipv6hdr = (struct ipv6hdr *)hdr;
- memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
- memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
+ memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
+ memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
ipv6hdr->nexthdr = IPPROTO_ESP;
ipv6hdr->version = 6;
ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1476,7 +1849,7 @@ static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
case XFRM_DEV_OFFLOAD_OUT:
- if (attrs->family == AF_INET) {
+ if (attrs->addrs.family == AF_INET) {
if (attrs->encap)
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
@@ -1577,6 +1950,85 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
return 0;
}
+static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct upspec *upspec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.sa_sel;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx pass rule, err=%d\n",
+ err);
+ goto err_add_status_pass_rule;
+ }
+
+ sa_entry->ipsec_rule.status_pass = rule;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4, 0);
+
+ setup_fte_upper_proto_match(spec, upspec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.pol;
+
+ rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx sa selector rule, err=%d\n",
+ err);
+ goto err_add_sa_sel_rule;
+ }
+
+ sa_entry->ipsec_rule.sa_sel = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_add_sa_sel_rule:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+err_add_status_pass_rule:
+ kvfree(spec);
+ return err;
+}
+
static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -1590,7 +2042,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_fc *counter;
int err = 0;
- rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
+ rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
if (IS_ERR(rx))
return PTR_ERR(rx);
@@ -1600,16 +2052,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, attrs->encap);
if (!attrs->encap)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
- setup_fte_upper_proto_match(spec, &attrs->upspec);
if (!attrs->drop) {
if (rx != ipsec->rx_esw)
@@ -1650,13 +2101,20 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+
+ if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
+ err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
+ if (err)
+ goto err_add_sa_sel;
+ }
+
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
err = rx_add_rule_drop_replay(sa_entry, rx);
if (err)
@@ -1680,6 +2138,11 @@ err_drop_reason:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
}
err_add_replay:
+ if (sa_entry->ipsec_rule.sa_sel) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+ }
+err_add_sa_sel:
mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
@@ -1692,7 +2155,7 @@ err_pkt_reformat:
err_mod_header:
kvfree(spec);
err_alloc:
- rx_ft_put(ipsec, attrs->family, attrs->type);
+ rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
return err;
}
@@ -1719,23 +2182,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
- else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
-
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
+ else
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (attrs->reqid)
- setup_fte_reg_c4(spec, attrs->reqid);
+ setup_fte_reg_c4(spec, attrs->reqid);
err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
@@ -1763,7 +2224,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1813,10 +2274,10 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
tx = ipsec_tx(ipsec, attrs->type);
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1836,7 +2297,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dest[dstn].counter = tx->fc->drop;
dstn++;
break;
default:
@@ -1886,12 +2347,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
struct mlx5e_ipsec_rx *rx;
int err, dstn = 0;
- ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
- attrs->type);
+ ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
+ attrs->prio, attrs->type);
if (IS_ERR(ft))
return PTR_ERR(ft);
- rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1899,10 +2360,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1914,7 +2375,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
case XFRM_POLICY_BLOCK:
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dest[dstn].counter = rx->fc->drop;
dstn++;
break;
default:
@@ -1926,8 +2387,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.flags |= FLOW_ACT_NO_APPEND;
if (rx == ipsec->rx_esw && rx->chains)
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dstn].ft = rx->ft.sa;
+ ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
dstn++;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
if (IS_ERR(rule)) {
@@ -1943,7 +2403,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
err_action:
kvfree(spec);
err_alloc:
- rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
+ rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
+ attrs->type);
return err;
}
@@ -2064,6 +2525,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_rx_bytes = 0;
stats->ipsec_rx_drop_pkts = 0;
stats->ipsec_rx_drop_bytes = 0;
+ stats->ipsec_rx_drop_mismatch_sa_sel = 0;
stats->ipsec_tx_pkts = 0;
stats->ipsec_tx_bytes = 0;
stats->ipsec_tx_drop_pkts = 0;
@@ -2073,6 +2535,9 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
&stats->ipsec_rx_drop_bytes);
+ if (ipsec->rx_ipv4->sa_sel.fc)
+ mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
+ &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
fc = ipsec->tx->fc;
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
@@ -2101,6 +2566,11 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_tx_drop_pkts += packets;
stats->ipsec_tx_drop_bytes += bytes;
}
+
+ if (ipsec->rx_esw->sa_sel.fc &&
+ !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
+ &packets, &bytes))
+ stats->ipsec_rx_drop_mismatch_sa_sel += packets;
}
}
@@ -2198,12 +2668,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_del_flow_rules(ipsec_rule->auth.rule);
mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+ if (ipsec_rule->sa_sel) {
+ mlx5_del_flow_rules(ipsec_rule->sa_sel);
+ mlx5_del_flow_rules(ipsec_rule->status_pass);
+ }
+
if (ipsec_rule->replay.rule) {
mlx5_del_flow_rules(ipsec_rule->replay.rule);
mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
}
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
- rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
+ rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
+ sa_entry->attrs.type);
}
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -2239,7 +2715,8 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
+ rx_ft_put_policy(pol_entry->ipsec,
+ pol_entry->attrs.addrs.family,
pol_entry->attrs.prio, pol_entry->attrs.type);
return;
}
@@ -2373,18 +2850,24 @@ void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
}
-bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
+bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
- struct mlx5e_ipsec_rx *rx;
- struct mlx5e_ipsec_tx *tx;
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct xfrm_state *x = sa_entry->x;
+ bool from_fdb;
- rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
- tx = ipsec_tx(sa_entry->ipsec, attrs->type);
- if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
- return tx->allow_tunnel_mode;
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) {
+ struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, x->xso.type);
+
+ from_fdb = (tx == ipsec->tx_esw);
+ } else {
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, x->props.family,
+ x->xso.type);
+
+ from_fdb = (rx == ipsec->rx_esw);
+ }
- return rx->allow_tunnel_mode;
+ return mlx5_eswitch_block_encap(ipsec->mdev, from_fdb);
}
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
@@ -2410,9 +2893,30 @@ void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
- if (!priv->ipsec)
- return; /* IPsec not supported */
+ if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2)
+ return; /* IPsec not supported or no peers */
mlx5_devcom_send_event(priv->devcom, event, event, priv);
wait_for_completion(&priv->ipsec->comp);
}
+
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5e_priv *peer_priv;
+
+ if (!priv->devcom)
+ return;
+
+ if (!mlx5_devcom_for_each_peer_begin(priv->devcom))
+ goto out;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp);
+ if (peer_priv)
+ complete_all(&peer_priv->ipsec->comp);
+
+ mlx5_devcom_for_each_peer_end(priv->devcom);
+out:
+ mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 6e00afe4671b..ef7322d381af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -42,8 +42,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
(mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
- (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
- is_mdev_legacy_mode(mdev)))) {
+ is_mdev_legacy_mode(mdev))) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
@@ -51,9 +50,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
- if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
+ if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
+ ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
caps |= MLX5_IPSEC_CAP_PRIO;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
@@ -90,8 +90,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
@@ -119,13 +120,18 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ if (!attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ sa_entry->esn_state.esn);
+ }
if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
attrs->lft.hard_packet_limit);
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
+ MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->lft.soft_packet_limit != XFRM_INF) {
@@ -173,7 +179,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 727fa7c18523..6056106edcc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -327,6 +327,10 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
if (unlikely(!sa_entry)) {
rcu_read_unlock();
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ /* Clear secpath to prevent invalid dereference
+ * in downstream XFRM policy checks.
+ */
+ secpath_reset(skb);
return;
}
xfrm_state_hold(sa_entry->x);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 82064614846f..45b0d19e735c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -40,7 +40,7 @@
#include "en/txrx.h"
/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
-#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
+#define MLX5_IPSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x2)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
if (!x || !x->xso.offload_handle)
goto out_disable;
- if (xo->inner_ipproto) {
- /* Cannot support tunnel packet over IPsec tunnel mode
- * because we cannot offload three IP header csum
- */
- if (x->props.mode == XFRM_MODE_TUNNEL)
- goto out_disable;
-
- /* Only support UDP or TCP L4 checksum */
- if (xo->inner_ipproto != IPPROTO_UDP &&
- xo->inner_ipproto != IPPROTO_TCP)
- goto out_disable;
- }
+ /* Only support UDP or TCP L4 checksum */
+ if (xo->inner_ipproto &&
+ xo->inner_ipproto != IPPROTO_UDP &&
+ xo->inner_ipproto != IPPROTO_TCP)
+ goto out_disable;
return features;
@@ -123,6 +116,7 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
+ struct mlx5_core_dev *mdev = sq->mdev;
u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg))
@@ -132,9 +126,12 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
inner_ipproto = xfrm_offload(skb)->inner_ipproto;
if (inner_ipproto) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
- if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) {
+ mlx5e_swp_encap_csum_partial(mdev, skb, true);
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ }
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ mlx5e_swp_encap_csum_partial(mdev, skb, false);
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index dd36b04e30a0..93be388068f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -42,6 +42,7 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_mismatch_sa_sel) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
@@ -78,13 +79,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
unsigned int i;
if (!priv->ipsec)
- return idx;
+ return;
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_ipsec_hw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
@@ -92,14 +90,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
int i;
if (!priv->ipsec)
- return idx;
+ return;
mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
- mlx5e_ipsec_hw_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
+ mlx5e_ipsec_hw_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
@@ -115,9 +113,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw)
if (priv->ipsec)
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_sw_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, mlx5e_ipsec_sw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
@@ -126,9 +122,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
if (priv->ipsec)
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
- mlx5e_ipsec_sw_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR_ATOMIC64(
+ &priv->ipsec->sw_stats,
+ mlx5e_ipsec_sw_stats_desc, i));
}
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index adc6d8ea0960..07a04a142a2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -95,8 +95,8 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
-int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
+void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data);
+void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data);
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
@@ -144,15 +144,9 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
-static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
-{
- return 0;
-}
+static inline void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data) { }
-static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
-{
- return 0;
-}
+static inline void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data) { }
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 65ccb33edafb..da2d1eb52c13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -320,7 +320,6 @@ err_dma_unmap:
err_free:
kfree(buf);
err_out:
- priv_rx->rq_stats->tls_resync_req_skip++;
return err;
}
@@ -339,14 +338,19 @@ static void resync_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
return;
}
c = resync->priv->channels.c[priv_rx->rxq];
sq = &c->async_icosq;
- if (resync_post_get_progress_params(sq, priv_rx))
+ if (resync_post_get_progress_params(sq, priv_rx)) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
mlx5e_ktls_priv_rx_put(priv_rx);
+ }
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -425,14 +429,21 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
{
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct tls_offload_resync_async *async_resync;
+ struct tls_offload_context_rx *rx_ctx;
u8 tracker_state, auth_state, *ctx;
struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
dev = mlx5_core_dma_dev(sq->channel->mdev);
- if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
+ rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk));
+ async_resync = rx_ctx->resync_async;
+ if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
+ }
dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
DMA_FROM_DEVICE);
@@ -443,11 +454,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
}
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
- tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+ tls_offload_rx_resync_async_request_end(async_resync,
+ cpu_to_be32(hw_seq));
priv_rx->rq_stats->tls_resync_req_end++;
out:
mlx5e_ktls_priv_rx_put(priv_rx);
@@ -472,8 +485,10 @@ static bool resync_queue_get_psv(struct sock *sk)
resync = &priv_rx->resync;
mlx5e_ktls_priv_rx_get(priv_rx);
- if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
+ if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ return false;
+ }
return true;
}
@@ -482,6 +497,7 @@ static bool resync_queue_get_psv(struct sock *sk)
static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct tls_offload_resync_async *resync_async;
struct net_device *netdev = rq->netdev;
struct net *net = dev_net(netdev);
struct sock *sk = NULL;
@@ -498,9 +514,9 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct iphdr);
th = (void *)iph + sizeof(struct iphdr);
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
+ sk = inet_lookup_established(net, iph->saddr, th->source,
+ iph->daddr, th->dest,
+ netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
@@ -508,8 +524,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct ipv6hdr);
th = (void *)ipv6h + sizeof(struct ipv6hdr);
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &ipv6h->saddr, th->source,
+ sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
#endif
@@ -528,7 +543,8 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
seq = th->seq;
datalen = skb->len - depth;
- tls_offload_rx_resync_async_request_start(sk, seq, datalen);
+ resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async;
+ tls_offload_rx_resync_async_request_start(resync_async, seq, datalen);
rq->stats->tls_resync_req_start++;
unref:
@@ -557,6 +573,18 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
resync_handle_seq_match(priv_rx, c);
}
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi)
+{
+ struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct mlx5e_ktls_rx_resync_buf *buf;
+
+ buf = wi->tls_get_params.buf;
+ priv_rx = buf->priv_rx;
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core);
+}
+
/* End of resync section */
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 7c1c0eb16787..60be2d72eb9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -58,35 +58,31 @@ int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
}
-int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data)
{
- unsigned int i, n, idx = 0;
+ unsigned int i, n;
if (!priv->tls)
- return 0;
+ return;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ktls_sw_stats_desc[i].format);
-
- return n;
+ ethtool_puts(data, mlx5e_ktls_sw_stats_desc[i].format);
}
-int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
+void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data)
{
- unsigned int i, n, idx = 0;
+ unsigned int i, n;
if (!priv->tls)
- return 0;
+ return;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- mlx5e_ktls_sw_stats_desc,
- i);
-
- return n;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_ktls_sw_stats_desc, i));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index d61be26a4df1..08f06984407b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -660,7 +660,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
while (remaining > 0) {
skb_frag_t *frag = &record->frags[i];
- get_page(skb_frag_page(frag));
+ page_ref_inc(skb_frag_page(frag));
remaining -= skb_frag_size(frag);
info->frags[i++] = *frag;
}
@@ -744,7 +744,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
- mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+ mlx5e_dma_push_netmem(sq, skb_frag_netmem(frag), dma_addr, fsz);
tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
@@ -763,7 +763,7 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats = sq->stats;
mlx5e_tx_dma_unmap(sq->pdev, dma);
- put_page(wi->resync_dump_frag_page);
+ page_ref_dec(wi->resync_dump_frag_page);
stats->tls_dump_packets++;
stats->tls_dump_bytes += wi->num_bytes;
}
@@ -816,12 +816,12 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
err_out:
for (; i < info.nr_frags; i++)
- /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
+ /* The page_ref_dec() here undoes the page ref obtained in tx_sync_info_get().
* Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
* released only upon their completions (or in mlx5e_free_txqsq_descs,
* if channel closes).
*/
- put_page(skb_frag_page(&info.frags[i]));
+ page_ref_dec(skb_frag_page(&info.frags[i]));
return MLX5E_KTLS_SYNC_FAIL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index f87b65c560ea..cb08799769ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,6 +29,10 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi);
+
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index b2cabd6ab86c..528b04d4de41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
struct mlx5_macsec_rule_attrs rule_attrs;
union mlx5_macsec_rule *macsec_rule;
+ if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
+ return 0;
+
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;
@@ -1640,6 +1644,7 @@ static const struct macsec_ops macsec_offload_ops = {
.mdo_add_secy = mlx5e_macsec_add_secy,
.mdo_upd_secy = mlx5e_macsec_upd_secy,
.mdo_del_secy = mlx5e_macsec_del_secy,
+ .rx_uses_md_dst = true,
};
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
@@ -1671,7 +1676,7 @@ void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
if (!fs_id)
return;
- eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
+ eseg->flow_table_metadata = cpu_to_be32(MLX5_MACSEC_TX_METADATA(fs_id));
}
void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
index 4559ee16a11a..4bb47d48061d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -38,16 +38,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
unsigned int i;
if (!priv->macsec)
- return idx;
+ return;
if (!mlx5e_is_macsec_device(priv->mdev))
- return idx;
+ return;
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_macsec_hw_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_macsec_hw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
@@ -56,19 +53,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
int i;
if (!priv->macsec)
- return idx;
+ return;
if (!mlx5e_is_macsec_device(priv->mdev))
- return idx;
+ return;
macsec_fs = priv->mdev->macsec_fs;
mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs));
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs),
- mlx5e_macsec_hw_stats_desc,
- i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ mlx5_macsec_fs_get_stats(macsec_fs),
+ mlx5e_macsec_hw_stats_desc, i));
}
MLX5E_DEFINE_STATS_GRP(macsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
new file mode 100644
index 000000000000..38e7c77cc851
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#include <linux/mlx5/device.h>
+#include <net/psp.h>
+#include <linux/psp.h>
+#include "mlx5_core.h"
+#include "psp.h"
+#include "lib/crypto.h"
+#include "en_accel/psp.h"
+#include "fs_core.h"
+
+enum accel_fs_psp_type {
+ ACCEL_FS_PSP4,
+ ACCEL_FS_PSP6,
+ ACCEL_FS_PSP_NUM_TYPES,
+};
+
+enum accel_psp_syndrome {
+ PSP_OK = 0,
+ PSP_ICV_FAIL,
+ PSP_BAD_TRAILER,
+};
+
+struct mlx5e_psp_tx {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rule;
+ struct mutex mutex; /* Protect PSP TX steering */
+ u32 refcnt;
+ struct mlx5_fc *tx_counter;
+};
+
+struct mlx5e_psp_rx_err {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *auth_fail_rule;
+ struct mlx5_flow_handle *err_rule;
+ struct mlx5_flow_handle *bad_rule;
+ struct mlx5_modify_hdr *copy_modify_hdr;
+};
+
+struct mlx5e_accel_fs_psp_prot {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+ struct mlx5_flow_destination default_dest;
+ struct mlx5e_psp_rx_err rx_err;
+ u32 refcnt;
+ struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
+ struct mlx5_flow_handle *def_rule;
+};
+
+struct mlx5e_accel_fs_psp {
+ struct mlx5e_accel_fs_psp_prot fs_prot[ACCEL_FS_PSP_NUM_TYPES];
+ struct mlx5_fc *rx_counter;
+ struct mlx5_fc *rx_auth_fail_counter;
+ struct mlx5_fc *rx_err_counter;
+ struct mlx5_fc *rx_bad_counter;
+};
+
+struct mlx5e_psp_fs {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_psp_tx *tx_fs;
+ /* Rx manage */
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_accel_fs_psp *rx_fs;
+};
+
+/* PSP RX flow steering */
+static enum mlx5_traffic_types fs_psp2tt(enum accel_fs_psp_type i)
+{
+ if (i == ACCEL_FS_PSP4)
+ return MLX5_TT_IPV4_UDP;
+
+ return MLX5_TT_IPV6_UDP;
+}
+
+static void accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs *fs,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ if (rx_err->bad_rule) {
+ mlx5_del_flow_rules(rx_err->bad_rule);
+ rx_err->bad_rule = NULL;
+ }
+
+ if (rx_err->err_rule) {
+ mlx5_del_flow_rules(rx_err->err_rule);
+ rx_err->err_rule = NULL;
+ }
+
+ if (rx_err->auth_fail_rule) {
+ mlx5_del_flow_rules(rx_err->auth_fail_rule);
+ rx_err->auth_fail_rule = NULL;
+ }
+
+ if (rx_err->rule) {
+ mlx5_del_flow_rules(rx_err->rule);
+ rx_err->rule = NULL;
+ }
+
+ if (rx_err->copy_modify_hdr) {
+ mlx5_modify_header_dealloc(fs->mdev, rx_err->copy_modify_hdr);
+ rx_err->copy_modify_hdr = NULL;
+ }
+}
+
+static void accel_psp_fs_rx_err_destroy_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ accel_psp_fs_rx_err_del_rules(fs, rx_err);
+
+ if (rx_err->ft) {
+ mlx5_destroy_flow_table(rx_err->ft);
+ rx_err->ft = NULL;
+ }
+}
+
+static void accel_psp_setup_syndrome_match(struct mlx5_flow_spec *spec,
+ enum accel_psp_syndrome syndrome)
+{
+ void *misc_params_2;
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc_params_2, psp_syndrome);
+ misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc_params_2, psp_syndrome, syndrome);
+}
+
+static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_flow_handle *fte;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Action to copy 7 bit psp_syndrome to regB[23:29] */
+ MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_PSP_SYNDROME);
+ MLX5_SET(copy_action_in, action, src_offset, 0);
+ MLX5_SET(copy_action_in, action, length, 7);
+ MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(copy_action_in, action, dst_offset, 23);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev,
+ "fail to alloc psp copy modify_header_id err=%d\n", err);
+ goto out_spec;
+ }
+
+ accel_psp_setup_syndrome_match(spec, PSP_OK);
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.modify_hdr = modify_hdr;
+ dest[0].type = fs_prot->default_dest.type;
+ dest[0].ft = fs_prot->default_dest.ft;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = fs->rx_fs->rx_counter;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 2);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx err copy rule err=%d\n", err);
+ goto out;
+ }
+ rx_err->rule = fte;
+
+ /* add auth fail drop rule */
+ memset(spec, 0, sizeof(*spec));
+ memset(&flow_act, 0, sizeof(flow_act));
+ accel_psp_setup_syndrome_match(spec, PSP_ICV_FAIL);
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter = fs->rx_fs->rx_auth_fail_counter;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx auth fail drop rule err=%d\n",
+ err);
+ goto out_drop_rule;
+ }
+ rx_err->auth_fail_rule = fte;
+
+ /* add framing drop rule */
+ memset(spec, 0, sizeof(*spec));
+ memset(&flow_act, 0, sizeof(flow_act));
+ accel_psp_setup_syndrome_match(spec, PSP_BAD_TRAILER);
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter = fs->rx_fs->rx_err_counter;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx framing err drop rule err=%d\n",
+ err);
+ goto out_drop_auth_fail_rule;
+ }
+ rx_err->err_rule = fte;
+
+ /* add misc. errors drop rule */
+ memset(spec, 0, sizeof(*spec));
+ memset(&flow_act, 0, sizeof(flow_act));
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter = fs->rx_fs->rx_bad_counter;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx misc. err drop rule err=%d\n",
+ err);
+ goto out_drop_error_rule;
+ }
+ rx_err->bad_rule = fte;
+
+ rx_err->copy_modify_hdr = modify_hdr;
+
+ goto out_spec;
+
+out_drop_error_rule:
+ mlx5_del_flow_rules(rx_err->err_rule);
+ rx_err->err_rule = NULL;
+out_drop_auth_fail_rule:
+ mlx5_del_flow_rules(rx_err->auth_fail_rule);
+ rx_err->auth_fail_rule = NULL;
+out_drop_rule:
+ mlx5_del_flow_rules(rx_err->rule);
+ rx_err->rule = NULL;
+out:
+ mlx5_modify_header_dealloc(mdev, modify_hdr);
+out_spec:
+ kfree(spec);
+ return err;
+}
+
+static int accel_psp_fs_rx_err_create_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+ int err;
+
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; // MLX5E_ACCEL_FS_TCP_FT_LEVEL
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(fs->mdev, "fail to create psp rx inline ft err=%d\n", err);
+ return err;
+ }
+
+ rx_err->ft = ft;
+ err = accel_psp_fs_rx_err_add_rule(fs, fs_prot, rx_err);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ mlx5_destroy_flow_table(ft);
+ rx_err->ft = NULL;
+ return err;
+}
+
+static void accel_psp_fs_rx_fs_destroy(struct mlx5e_accel_fs_psp_prot *fs_prot)
+{
+ if (fs_prot->def_rule) {
+ mlx5_del_flow_rules(fs_prot->def_rule);
+ fs_prot->def_rule = NULL;
+ }
+
+ if (fs_prot->miss_rule) {
+ mlx5_del_flow_rules(fs_prot->miss_rule);
+ fs_prot->miss_rule = NULL;
+ }
+
+ if (fs_prot->miss_group) {
+ mlx5_destroy_flow_group(fs_prot->miss_group);
+ fs_prot->miss_group = NULL;
+ }
+
+ if (fs_prot->ft) {
+ mlx5_destroy_flow_table(fs_prot->ft);
+ fs_prot->ft = NULL;
+ }
+}
+
+static void setup_fte_udp_psp(struct mlx5_flow_spec *spec, u16 udp_port)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, udp_port);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, IPPROTO_UDP);
+}
+
+static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot)
+{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_group *miss_group;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Create FT */
+ ft_attr.max_fte = 2;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "fail to create psp rx ft err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->ft = ft;
+
+ /* Create miss_group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ miss_group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ mlx5_core_err(mdev, "fail to create psp rx miss_group err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->miss_group = miss_group;
+
+ /* Create miss rule */
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to create psp rx miss_rule err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->miss_rule = rule;
+
+ /* Add default Rx psp rule */
+ setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
+ /* Set bit[31, 30] PSP marker */
+ /* Set bit[29-23] psp_syndrome is set in error FT */
+#define MLX5E_PSP_MARKER_BIT (BIT(30) | BIT(31))
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, MLX5E_PSP_MARKER_BIT);
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev, "fail to alloc psp set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto out_err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = modify_hdr;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = fs_prot->rx_err.ft;
+ rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "fail to add psp rule Rx decryption, err=%d, flow_act.action = %#04X\n",
+ err, flow_act.action);
+ goto out_err;
+ }
+
+ fs_prot->def_rule = rule;
+ goto out;
+
+out_err:
+ accel_psp_fs_rx_fs_destroy(fs_prot);
+out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int accel_psp_fs_rx_destroy(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = fs->rx_fs;
+
+ /* The netdev unreg already happened, so all offloaded rule are already removed */
+ fs_prot = &accel_psp->fs_prot[type];
+
+ accel_psp_fs_rx_fs_destroy(fs_prot);
+
+ accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
+
+ return 0;
+}
+
+static int accel_psp_fs_rx_create(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ int err;
+
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+
+ fs_prot->default_dest = mlx5_ttc_get_default_dest(ttc, fs_psp2tt(type));
+
+ err = accel_psp_fs_rx_err_create_ft(fs, fs_prot, &fs_prot->rx_err);
+ if (err)
+ return err;
+
+ err = accel_psp_fs_rx_create_ft(fs, fs_prot);
+ if (err)
+ accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
+
+ return err;
+}
+
+static int accel_psp_fs_rx_ft_get(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_accel_fs_psp *accel_psp;
+ struct mlx5_ttc_table *ttc;
+ int err = 0;
+
+ if (!fs || !fs->rx_fs)
+ return -EINVAL;
+
+ ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+ mutex_lock(&fs_prot->prot_mutex);
+ if (fs_prot->refcnt++)
+ goto out;
+
+ /* create FT */
+ err = accel_psp_fs_rx_create(fs, type);
+ if (err) {
+ fs_prot->refcnt--;
+ goto out;
+ }
+
+ /* connect */
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = fs_prot->ft;
+ mlx5_ttc_fwd_dest(ttc, fs_psp2tt(type), &dest);
+
+out:
+ mutex_unlock(&fs_prot->prot_mutex);
+ return err;
+}
+
+static void accel_psp_fs_rx_ft_put(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+ mutex_lock(&fs_prot->prot_mutex);
+ if (--fs_prot->refcnt)
+ goto out;
+
+ /* disconnect */
+ mlx5_ttc_fwd_default_dest(ttc, fs_psp2tt(type));
+
+ /* remove FT */
+ accel_psp_fs_rx_destroy(fs, type);
+
+out:
+ mutex_unlock(&fs_prot->prot_mutex);
+}
+
+static void accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ enum accel_fs_psp_type i;
+
+ if (!fs->rx_fs)
+ return;
+
+ accel_psp = fs->rx_fs;
+ mlx5_fc_destroy(fs->mdev, accel_psp->rx_bad_counter);
+ mlx5_fc_destroy(fs->mdev, accel_psp->rx_err_counter);
+ mlx5_fc_destroy(fs->mdev, accel_psp->rx_auth_fail_counter);
+ mlx5_fc_destroy(fs->mdev, accel_psp->rx_counter);
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_destroy(&fs_prot->prot_mutex);
+ WARN_ON(fs_prot->refcnt);
+ }
+ kfree(fs->rx_fs);
+ fs->rx_fs = NULL;
+}
+
+static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_fc *flow_counter;
+ enum accel_fs_psp_type i;
+ int err;
+
+ accel_psp = kzalloc(sizeof(*accel_psp), GFP_KERNEL);
+ if (!accel_psp)
+ return -ENOMEM;
+
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_init(&fs_prot->prot_mutex);
+ }
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp rx flow counter err=%pe\n",
+ flow_counter);
+ err = PTR_ERR(flow_counter);
+ goto out_err;
+ }
+ accel_psp->rx_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp rx auth fail flow counter err=%pe\n",
+ flow_counter);
+ err = PTR_ERR(flow_counter);
+ goto out_counter_err;
+ }
+ accel_psp->rx_auth_fail_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp rx error flow counter err=%pe\n",
+ flow_counter);
+ err = PTR_ERR(flow_counter);
+ goto out_auth_fail_counter_err;
+ }
+ accel_psp->rx_err_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp rx bad flow counter err=%pe\n",
+ flow_counter);
+ err = PTR_ERR(flow_counter);
+ goto out_err_counter_err;
+ }
+ accel_psp->rx_bad_counter = flow_counter;
+
+ fs->rx_fs = accel_psp;
+
+ return 0;
+
+out_err_counter_err:
+ mlx5_fc_destroy(mdev, accel_psp->rx_err_counter);
+ accel_psp->rx_err_counter = NULL;
+out_auth_fail_counter_err:
+ mlx5_fc_destroy(mdev, accel_psp->rx_auth_fail_counter);
+ accel_psp->rx_auth_fail_counter = NULL;
+out_counter_err:
+ mlx5_fc_destroy(mdev, accel_psp->rx_counter);
+ accel_psp->rx_counter = NULL;
+out_err:
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_destroy(&fs_prot->prot_mutex);
+ }
+ kfree(accel_psp);
+ fs->rx_fs = NULL;
+
+ return err;
+}
+
+void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv)
+{
+ int i;
+
+ if (!priv->psp)
+ return;
+
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++)
+ accel_psp_fs_rx_ft_put(priv->psp->fs, i);
+}
+
+int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp_fs *fs;
+ int err, i;
+
+ if (!priv->psp)
+ return 0;
+
+ fs = priv->psp->fs;
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ err = accel_psp_fs_rx_ft_get(fs, i);
+ if (err)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ i--;
+ while (i >= 0) {
+ accel_psp_fs_rx_ft_put(fs, i);
+ --i;
+ }
+
+ return err;
+}
+
+static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_act flow_act = {};
+ u32 *in, *mc, *outer_headers_c;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_psp_tx *tx_fs;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!spec || !in) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ft_attr.max_fte = 1;
+#define MLX5E_PSP_PRIO 0
+ ft_attr.prio = MLX5E_PSP_PRIO;
+#define MLX5E_PSP_LEVEL 0
+ ft_attr.level = MLX5E_PSP_LEVEL;
+ ft_attr.autogroup.max_num_groups = 1;
+
+ tx_fs = fs->tx_fs;
+ ft = mlx5_create_flow_table(tx_fs->ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow table, err = %d\n", err);
+ goto out;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ fg = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow group, err = %d\n", err);
+ goto err_create_fg;
+ }
+
+ setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = tx_fs->tx_counter;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow rule, err = %d\n", err);
+ goto err_add_flow_rule;
+ }
+
+ tx_fs->ft = ft;
+ tx_fs->fg = fg;
+ tx_fs->rule = rule;
+ goto out;
+
+err_add_flow_rule:
+ mlx5_destroy_flow_group(fg);
+err_create_fg:
+ mlx5_destroy_flow_table(ft);
+out:
+ kvfree(in);
+ kvfree(spec);
+ return err;
+}
+
+static void accel_psp_fs_tx_destroy(struct mlx5e_psp_tx *tx_fs)
+{
+ if (!tx_fs->ft)
+ return;
+
+ mlx5_del_flow_rules(tx_fs->rule);
+ mlx5_destroy_flow_group(tx_fs->fg);
+ mlx5_destroy_flow_table(tx_fs->ft);
+}
+
+static int accel_psp_fs_tx_ft_get(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+ int err = 0;
+
+ mutex_lock(&tx_fs->mutex);
+ if (tx_fs->refcnt++)
+ goto out;
+
+ err = accel_psp_fs_tx_create_ft_table(fs);
+ if (err)
+ tx_fs->refcnt--;
+out:
+ mutex_unlock(&tx_fs->mutex);
+ return err;
+}
+
+static void accel_psp_fs_tx_ft_put(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+
+ mutex_lock(&tx_fs->mutex);
+ if (--tx_fs->refcnt)
+ goto out;
+
+ accel_psp_fs_tx_destroy(tx_fs);
+out:
+ mutex_unlock(&tx_fs->mutex);
+}
+
+static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+
+ if (!tx_fs)
+ return;
+
+ mlx5_fc_destroy(fs->mdev, tx_fs->tx_counter);
+ mutex_destroy(&tx_fs->mutex);
+ WARN_ON(tx_fs->refcnt);
+ kfree(tx_fs);
+ fs->tx_fs = NULL;
+}
+
+static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_fc *flow_counter;
+ struct mlx5e_psp_tx *tx_fs;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp tx flow counter err=%pe\n",
+ flow_counter);
+ kfree(tx_fs);
+ return PTR_ERR(flow_counter);
+ }
+ tx_fs->tx_counter = flow_counter;
+ mutex_init(&tx_fs->mutex);
+ tx_fs->ns = ns;
+ fs->tx_fs = tx_fs;
+ return 0;
+}
+
+static void
+mlx5e_accel_psp_fs_get_stats_fill(struct mlx5e_priv *priv,
+ struct mlx5e_psp_stats *stats)
+{
+ struct mlx5e_psp_tx *tx_fs = priv->psp->fs->tx_fs;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = (struct mlx5e_accel_fs_psp *)priv->psp->fs->rx_fs;
+
+ if (tx_fs->tx_counter)
+ mlx5_fc_query(mdev, tx_fs->tx_counter, &stats->psp_tx_pkts,
+ &stats->psp_tx_bytes);
+
+ if (accel_psp->rx_counter)
+ mlx5_fc_query(mdev, accel_psp->rx_counter, &stats->psp_rx_pkts,
+ &stats->psp_rx_bytes);
+
+ if (accel_psp->rx_auth_fail_counter)
+ mlx5_fc_query(mdev, accel_psp->rx_auth_fail_counter,
+ &stats->psp_rx_pkts_auth_fail,
+ &stats->psp_rx_bytes_auth_fail);
+
+ if (accel_psp->rx_err_counter)
+ mlx5_fc_query(mdev, accel_psp->rx_err_counter,
+ &stats->psp_rx_pkts_frame_err,
+ &stats->psp_rx_bytes_frame_err);
+
+ if (accel_psp->rx_bad_counter)
+ mlx5_fc_query(mdev, accel_psp->rx_bad_counter,
+ &stats->psp_rx_pkts_drop,
+ &stats->psp_rx_bytes_drop);
+}
+
+void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv)
+{
+ if (!priv->psp)
+ return;
+
+ accel_psp_fs_tx_ft_put(priv->psp->fs);
+}
+
+int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
+{
+ if (!priv->psp)
+ return 0;
+
+ return accel_psp_fs_tx_ft_get(priv->psp->fs);
+}
+
+static void mlx5e_accel_psp_fs_cleanup(struct mlx5e_psp_fs *fs)
+{
+ accel_psp_fs_cleanup_rx(fs);
+ accel_psp_fs_cleanup_tx(fs);
+ kfree(fs);
+}
+
+static struct mlx5e_psp_fs *mlx5e_accel_psp_fs_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp_fs *fs;
+ int err = 0;
+
+ fs = kzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ return ERR_PTR(-ENOMEM);
+
+ fs->mdev = priv->mdev;
+ err = accel_psp_fs_init_tx(fs);
+ if (err)
+ goto err_tx;
+
+ fs->fs = priv->fs;
+ err = accel_psp_fs_init_rx(fs);
+ if (err)
+ goto err_rx;
+
+ return fs;
+
+err_rx:
+ accel_psp_fs_cleanup_tx(fs);
+err_tx:
+ kfree(fs);
+ return ERR_PTR(err);
+}
+
+static int
+mlx5e_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack)
+{
+ return 0; /* TODO: this should actually do things to the device */
+}
+
+static int
+mlx5e_psp_generate_key_spi(struct mlx5_core_dev *mdev,
+ enum mlx5_psp_gen_spi_in_key_size keysz,
+ unsigned int keysz_bytes,
+ struct psp_key_parsed *key)
+{
+ u32 out[MLX5_ST_SZ_DW(psp_gen_spi_out) + MLX5_ST_SZ_DW(key_spi)] = {};
+ u32 in[MLX5_ST_SZ_DW(psp_gen_spi_in)] = {};
+ void *outkey;
+ int err;
+
+ WARN_ON_ONCE(keysz_bytes > PSP_MAX_KEY);
+
+ MLX5_SET(psp_gen_spi_in, in, opcode, MLX5_CMD_OP_PSP_GEN_SPI);
+ MLX5_SET(psp_gen_spi_in, in, key_size, keysz);
+ MLX5_SET(psp_gen_spi_in, in, num_of_spi, 1);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ outkey = MLX5_ADDR_OF(psp_gen_spi_out, out, key_spi);
+ key->spi = cpu_to_be32(MLX5_GET(key_spi, outkey, spi));
+ memcpy(key->key, MLX5_ADDR_OF(key_spi, outkey, key) + 32 - keysz_bytes,
+ keysz_bytes);
+
+ return 0;
+}
+
+static int
+mlx5e_psp_rx_spi_alloc(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ enum mlx5_psp_gen_spi_in_key_size keysz;
+ u8 keysz_bytes;
+
+ switch (version) {
+ case PSP_VERSION_HDR0_AES_GCM_128:
+ keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128;
+ keysz_bytes = 16;
+ break;
+ case PSP_VERSION_HDR0_AES_GCM_256:
+ keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256;
+ keysz_bytes = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5e_psp_generate_key_spi(priv->mdev, keysz, keysz_bytes, assoc);
+}
+
+struct psp_key {
+ u32 id;
+};
+
+static int mlx5e_psp_assoc_add(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct psp_key_parsed *tx = &pas->tx;
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_key *nkey;
+ int err;
+
+ mdev = priv->mdev;
+ nkey = (struct psp_key *)pas->drv_data;
+
+ err = mlx5_create_encryption_key(mdev, tx->key,
+ psp_key_size(pas->version),
+ MLX5_ACCEL_OBJ_PSP_KEY,
+ &nkey->id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create encryption key (err = %d)\n", err);
+ return err;
+ }
+
+ atomic_inc(&psp->tx_key_cnt);
+ return 0;
+}
+
+static void mlx5e_psp_assoc_del(struct psp_dev *psd, struct psp_assoc *pas)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_key *nkey;
+
+ nkey = (struct psp_key *)pas->drv_data;
+ mlx5_destroy_encryption_key(priv->mdev, nkey->id);
+ atomic_dec(&psp->tx_key_cnt);
+}
+
+static int mlx5e_psp_rotate_key(struct mlx5_core_dev *mdev)
+{
+ u32 in[MLX5_ST_SZ_DW(psp_rotate_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(psp_rotate_key_out)];
+
+ MLX5_SET(psp_rotate_key_in, in, opcode,
+ MLX5_CMD_OP_PSP_ROTATE_KEY);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int
+mlx5e_psp_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *exack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+
+ /* no support for protecting against external rotations */
+ psd->generation = 0;
+
+ return mlx5e_psp_rotate_key(priv->mdev);
+}
+
+static void
+mlx5e_psp_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5e_psp_stats nstats;
+
+ mlx5e_accel_psp_fs_get_stats_fill(priv, &nstats);
+ stats->rx_packets = nstats.psp_rx_pkts;
+ stats->rx_bytes = nstats.psp_rx_bytes;
+ stats->rx_auth_fail = nstats.psp_rx_pkts_auth_fail;
+ stats->rx_error = nstats.psp_rx_pkts_frame_err;
+ stats->rx_bad = nstats.psp_rx_pkts_drop;
+ stats->tx_packets = nstats.psp_tx_pkts;
+ stats->tx_bytes = nstats.psp_tx_bytes;
+ stats->tx_error = atomic_read(&priv->psp->tx_drop);
+}
+
+static struct psp_dev_ops mlx5_psp_ops = {
+ .set_config = mlx5e_psp_set_config,
+ .rx_spi_alloc = mlx5e_psp_rx_spi_alloc,
+ .tx_key_add = mlx5e_psp_assoc_add,
+ .tx_key_del = mlx5e_psp_assoc_del,
+ .key_rotate = mlx5e_psp_key_rotate,
+ .get_stats = mlx5e_psp_get_stats,
+};
+
+void mlx5e_psp_unregister(struct mlx5e_priv *priv)
+{
+ if (!priv->psp || !priv->psp->psp)
+ return;
+
+ psp_dev_unregister(priv->psp->psp);
+}
+
+void mlx5e_psp_register(struct mlx5e_priv *priv)
+{
+ /* FW Caps missing */
+ if (!priv->psp)
+ return;
+
+ priv->psp->caps.assoc_drv_spc = sizeof(u32);
+ priv->psp->caps.versions = 1 << PSP_VERSION_HDR0_AES_GCM_128;
+ if (MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_encrypt) &&
+ MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_decrypt))
+ priv->psp->caps.versions |= 1 << PSP_VERSION_HDR0_AES_GCM_256;
+
+ priv->psp->psp = psp_dev_create(priv->netdev, &mlx5_psp_ops,
+ &priv->psp->caps, NULL);
+ if (IS_ERR(priv->psp->psp))
+ mlx5_core_err(priv->mdev, "PSP failed to register due to %pe\n",
+ priv->psp->psp);
+}
+
+int mlx5e_psp_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_psp_fs *fs;
+ struct mlx5e_psp *psp;
+ int err;
+
+ if (!mlx5_is_psp_device(mdev)) {
+ mlx5_core_dbg(mdev, "PSP offload not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp)) {
+ mlx5_core_dbg(mdev, "SWP not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum)) {
+ mlx5_core_dbg(mdev, "SWP checksum not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial)) {
+ mlx5_core_dbg(mdev, "SWP L4 partial checksum not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_lso)) {
+ mlx5_core_dbg(mdev, "PSP LSO not supported\n");
+ return 0;
+ }
+
+ psp = kzalloc(sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return -ENOMEM;
+
+ priv->psp = psp;
+ fs = mlx5e_accel_psp_fs_init(priv);
+ if (IS_ERR(fs)) {
+ err = PTR_ERR(fs);
+ goto out_err;
+ }
+
+ psp->fs = fs;
+
+ mlx5_core_dbg(priv->mdev, "PSP attached to netdevice\n");
+ return 0;
+
+out_err:
+ priv->psp = NULL;
+ kfree(psp);
+ return err;
+}
+
+void mlx5e_psp_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp *psp = priv->psp;
+
+ if (!psp)
+ return;
+
+ WARN_ON(atomic_read(&psp->tx_key_cnt));
+ mlx5e_accel_psp_fs_cleanup(psp->fs);
+ priv->psp = NULL;
+ kfree(psp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h
new file mode 100644
index 000000000000..6b62fef0d9a7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_ACCEL_PSP_H__
+#define __MLX5E_ACCEL_PSP_H__
+#if IS_ENABLED(CONFIG_MLX5_EN_PSP)
+#include <net/psp/types.h>
+#include "en.h"
+
+struct mlx5e_psp_stats {
+ u64 psp_rx_pkts;
+ u64 psp_rx_bytes;
+ u64 psp_rx_pkts_auth_fail;
+ u64 psp_rx_bytes_auth_fail;
+ u64 psp_rx_pkts_frame_err;
+ u64 psp_rx_bytes_frame_err;
+ u64 psp_rx_pkts_drop;
+ u64 psp_rx_bytes_drop;
+ u64 psp_tx_pkts;
+ u64 psp_tx_bytes;
+ u64 psp_tx_pkts_drop;
+ u64 psp_tx_bytes_drop;
+};
+
+struct mlx5e_psp {
+ struct psp_dev *psp;
+ struct psp_dev_caps caps;
+ struct mlx5e_psp_fs *fs;
+ atomic_t tx_key_cnt;
+ atomic_t tx_drop;
+};
+
+static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, psp))
+ return false;
+
+ if (!MLX5_CAP_PSP(mdev, psp_crypto_offload) ||
+ !MLX5_CAP_PSP(mdev, psp_crypto_esp_aes_gcm_128_encrypt) ||
+ !MLX5_CAP_PSP(mdev, psp_crypto_esp_aes_gcm_128_decrypt))
+ return false;
+
+ return true;
+}
+
+int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv);
+void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv);
+int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv);
+void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv);
+void mlx5e_psp_register(struct mlx5e_priv *priv);
+void mlx5e_psp_unregister(struct mlx5e_priv *priv);
+int mlx5e_psp_init(struct mlx5e_priv *priv);
+void mlx5e_psp_cleanup(struct mlx5e_priv *priv);
+#else
+static inline int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv) { }
+static inline int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv) { }
+static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+static inline void mlx5e_psp_register(struct mlx5e_priv *priv) { }
+static inline void mlx5e_psp_unregister(struct mlx5e_priv *priv) { }
+static inline int mlx5e_psp_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_psp_cleanup(struct mlx5e_priv *priv) { }
+#endif /* CONFIG_MLX5_EN_PSP */
+#endif /* __MLX5E_ACCEL_PSP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
new file mode 100644
index 000000000000..c17ea0fcd8ef
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
+#include <net/psp/types.h>
+
+#include "en.h"
+#include "psp.h"
+#include "en_accel/psp_rxtx.h"
+#include "en_accel/psp.h"
+
+enum {
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED,
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
+};
+
+static void mlx5e_psp_set_swp(struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ /* Tunnel Mode:
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP IP L4
+ *
+ * Transport Mode:
+ * SWP: OutL3 OutL4
+ * Pkt: MAC IP ESP L4
+ *
+ * Tunnel(VXLAN TCP/UDP) over Transport Mode
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP UDP VXLAN IP L4
+ */
+ u8 inner_ipproto = 0;
+ struct ethhdr *eth;
+
+ /* Shared settings */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
+ inner_ipproto = skb->inner_ipproto;
+ /* Set SWP additional flags for packet of type IP|UDP|PSP|[ TCP | UDP ] */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* IP in IP tunneling like vxlan*/
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return;
+
+ eth = (struct ethhdr *)skb_inner_mac_header(skb);
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IP:
+ inner_ipproto = ((struct iphdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->protocol;
+ break;
+ case ETH_P_IPV6:
+ inner_ipproto = ((struct ipv6hdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->nexthdr;
+ break;
+ default:
+ break;
+ }
+
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode PSP i.e. PSP payload is vxlan tunnel */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ psp_st->inner_ipproto = inner_ipproto;
+ }
+}
+
+static bool mlx5e_psp_set_state(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ struct psp_assoc *pas;
+ bool ret = false;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ if (!pas)
+ goto out;
+
+ ret = true;
+ psp_st->tailen = PSP_TRL_SIZE;
+ psp_st->spi = pas->tx.spi;
+ psp_st->ver = pas->version;
+ psp_st->keyid = *(u32 *)pas->drv_data;
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ u32 psp_meta_data = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u16 dev_id = priv->psp->psp->id;
+ bool strip_icv = true;
+ u8 generation = 0;
+
+ /* TBD: report errors as SW counters to ethtool, any further handling ? */
+ if (MLX5_PSP_METADATA_SYNDROME(psp_meta_data) != MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED)
+ goto drop;
+
+ if (psp_dev_rcv(skb, dev_id, generation, strip_icv))
+ goto drop;
+
+ skb->decrypted = 1;
+ return false;
+
+drop:
+ kfree_skb(skb);
+ return true;
+}
+
+void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (!mlx5_is_psp_device(priv->mdev))
+ return;
+
+ if (unlikely(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)))
+ return;
+
+ mlx5e_psp_set_swp(skb, psp_st, eseg);
+ /* Special WA for PSP LSO in ConnectX7 */
+ eseg->swp_outer_l3_offset = 0;
+ eseg->swp_inner_l3_offset = 0;
+
+ eseg->flow_table_metadata |= cpu_to_be32(psp_st->keyid);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER) |
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+}
+
+void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_inline_seg *inlseg)
+{
+ inlseg->byte_count = cpu_to_be32(psp_st->tailen | MLX5_INLINE_SEG);
+}
+
+bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct net *net = sock_net(skb->sk);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+
+ if (!mlx5e_psp_set_state(priv, skb, psp_st))
+ return true;
+
+ /* psp_encap of the packet */
+ if (!psp_dev_encapsulate(net, skb, psp_st->spi, psp_st->ver, 0)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT);
+ atomic_inc(&priv->psp->tx_drop);
+ return false;
+ }
+ if (skb_is_gso(skb)) {
+ ip6 = ipv6_hdr(skb);
+ th = inner_tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr,
+ &ip6->daddr, 0);
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h
new file mode 100644
index 000000000000..70289c921bd6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_PSP_RXTX_H__
+#define __MLX5E_PSP_RXTX_H__
+
+#include <linux/skbuff.h>
+#include <net/xfrm.h>
+#include <net/psp.h>
+#include "en.h"
+#include "en/txrx.h"
+
+/* Bit30: PSP marker, Bit29-23: PSP syndrome, Bit22-0: PSP obj id */
+#define MLX5_PSP_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x3)
+#define MLX5_PSP_METADATA_SYNDROME(metadata) (((metadata) >> 23) & GENMASK(6, 0))
+#define MLX5_PSP_METADATA_HANDLE(metadata) ((metadata) & GENMASK(22, 0))
+
+struct mlx5e_accel_tx_psp_state {
+ u32 tailen;
+ u32 keyid;
+ __be32 spi;
+ u8 inner_ipproto;
+ u8 ver;
+};
+
+#ifdef CONFIG_MLX5_EN_PSP
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
+{
+ return (psp_state->tailen != 0);
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = !!psp_skb_get_assoc_rcu(skb);
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st);
+
+void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg);
+
+void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_inline_seg *inlseg);
+
+static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ u8 inner_ipproto;
+
+ if (!mlx5e_psp_is_offload_state(psp_st))
+ return false;
+
+ inner_ipproto = psp_st->inner_ipproto;
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ sq->stats->csum_partial_inner++;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ sq->stats->csum_partial_inner++;
+ }
+
+ return true;
+}
+
+static inline unsigned int mlx5e_psp_tx_ids_len(struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ return psp_st->tailen;
+}
+
+static inline bool mlx5e_psp_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_PSP_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+#else
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ return false;
+}
+#endif /* CONFIG_MLX5_EN_PSP */
+#endif /* __MLX5E_PSP_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 6ed3a32b7e22..5a2ac7b6f260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include "devlink.h"
#include "en.h"
#include "lib/crypto.h"
@@ -140,9 +141,22 @@ err_close_tises:
return err;
}
+static unsigned int
+mlx5e_get_devlink_param_num_doorbells(struct mlx5_core_dev *dev)
+{
+ const u32 param_id = DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS;
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink, param_id, &val);
+ return err ? MLX5_DEFAULT_NUM_DOORBELLS : val.vu32;
+}
+
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+ unsigned int num_doorbells, i;
int err;
err = mlx5_core_alloc_pd(mdev, &res->pdn);
@@ -163,17 +177,30 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
goto err_dealloc_transport_domain;
}
- err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
- if (err) {
- mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
+ num_doorbells = min(mlx5e_get_devlink_param_num_doorbells(mdev),
+ mlx5e_get_max_num_channels(mdev));
+ res->bfregs = kcalloc(num_doorbells, sizeof(*res->bfregs), GFP_KERNEL);
+ if (!res->bfregs) {
+ err = -ENOMEM;
goto err_destroy_mkey;
}
+ for (i = 0; i < num_doorbells; i++) {
+ err = mlx5_alloc_bfreg(mdev, res->bfregs + i, false, false);
+ if (err) {
+ mlx5_core_warn(mdev,
+ "could only allocate %d/%d doorbells, err %d.\n",
+ i, num_doorbells, err);
+ break;
+ }
+ }
+ res->num_bfregs = i;
+
if (create_tises) {
err = mlx5e_create_tises(mdev, res->tisn);
if (err) {
mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
- goto err_destroy_bfreg;
+ goto err_destroy_bfregs;
}
res->tisn_valid = true;
}
@@ -183,15 +210,17 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
mdev->mlx5e_res.dek_priv = mlx5_crypto_dek_init(mdev);
if (IS_ERR(mdev->mlx5e_res.dek_priv)) {
- mlx5_core_err(mdev, "crypto dek init failed, %ld\n",
- PTR_ERR(mdev->mlx5e_res.dek_priv));
+ mlx5_core_err(mdev, "crypto dek init failed, %pe\n",
+ mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
}
return 0;
-err_destroy_bfreg:
- mlx5_free_bfreg(mdev, &res->bfreg);
+err_destroy_bfregs:
+ for (i = 0; i < res->num_bfregs; i++)
+ mlx5_free_bfreg(mdev, res->bfregs + i);
+ kfree(res->bfregs);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
@@ -209,52 +238,52 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mdev->mlx5e_res.dek_priv = NULL;
if (res->tisn_valid)
mlx5e_destroy_tises(mdev, res->tisn);
- mlx5_free_bfreg(mdev, &res->bfreg);
+ for (unsigned int i = 0; i < res->num_bfregs; i++)
+ mlx5_free_bfreg(mdev, res->bfregs + i);
+ kfree(res->bfregs);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
memset(res, 0, sizeof(*res));
}
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
- bool enable_mc_lb)
+int mlx5e_modify_tirs_lb(struct mlx5_core_dev *mdev, bool enable_uc_lb,
+ bool enable_mc_lb)
{
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_tir_builder *builder;
struct mlx5e_tir *tir;
- u8 lb_flags = 0;
- int err = 0;
- u32 tirn = 0;
- int inlen;
- void *in;
+ int err = 0;
- inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
return -ENOMEM;
- if (enable_uc_lb)
- lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
-
- if (enable_mc_lb)
- lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
-
- if (lb_flags)
- MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags);
-
- MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ mlx5e_tir_builder_build_self_lb_block(builder, enable_uc_lb,
+ enable_mc_lb);
mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) {
- tirn = tir->tirn;
- err = mlx5_core_modify_tir(mdev, tirn, in);
- if (err)
+ err = mlx5e_tir_modify(tir, builder);
+ if (err) {
+ mlx5_core_err(mdev,
+ "modify tir(0x%x) enable_lb uc(%d) mc(%d) failed, %d\n",
+ mlx5e_tir_get_tirn(tir),
+ enable_uc_lb, enable_mc_lb, err);
break;
+ }
}
mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
- kvfree(in);
- if (err)
- netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+ mlx5e_tir_builder_free(builder);
return err;
}
+
+int mlx5e_refresh_tirs(struct mlx5_core_dev *mdev, bool enable_uc_lb,
+ bool enable_mc_lb)
+{
+ if (MLX5_CAP_GEN(mdev, tis_tir_td_order))
+ return 0; /* refresh not needed */
+
+ return mlx5e_modify_tirs_lb(mdev, enable_uc_lb, enable_mc_lb);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8705cffc747f..fddf7c207f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -31,14 +31,15 @@
*/
#include <linux/device.h>
#include <linux/netdevice.h>
+#include <linux/units.h>
#include "en.h"
#include "en/port.h"
#include "en/port_buffer.h"
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_100MB (100000)
-#define MLX5E_1GB (1000000)
+#define MLX5E_100MB_TO_KB (100 * MEGA / KILO)
+#define MLX5E_1GB_TO_KB (GIGA / KILO)
#define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0
@@ -362,6 +363,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
+ u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 old_cable_len = priv->dcbx.cable_len;
@@ -389,7 +391,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
if (MLX5_BUFFER_SUPPORTED(mdev)) {
pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
- if (priv->dcbx.manual_buffer)
+ ret = mlx5_query_port_buffer_ownership(mdev,
+ &buffer_ownership);
+ if (ret)
+ netdev_err(dev,
+ "%s, Failed to get buffer ownership: %d\n",
+ __func__, ret);
+
+ if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
ret = mlx5e_port_manual_buffer_config(priv, changed,
dev->mtu, &pfc_new,
NULL, NULL);
@@ -564,10 +573,10 @@ static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
switch (max_bw_unit[i]) {
case MLX5_100_MBPS_UNIT:
- maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
+ maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB_TO_KB;
break;
case MLX5_GBPS_UNIT:
- maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
+ maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB_TO_KB;
break;
case MLX5_BW_NO_LIMIT:
break;
@@ -587,32 +596,55 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
- __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
+ u64 upper_limit_100mbps;
+ u64 upper_limit_gbps;
int i;
+ struct {
+ int scale;
+ const char *units_str;
+ } units[] = {
+ [MLX5_100_MBPS_UNIT] = {
+ .scale = 100,
+ .units_str = "Mbps",
+ },
+ [MLX5_GBPS_UNIT] = {
+ .scale = 1,
+ .units_str = "Gbps",
+ },
+ };
memset(max_bw_value, 0, sizeof(max_bw_value));
memset(max_bw_unit, 0, sizeof(max_bw_unit));
+ upper_limit_100mbps = U8_MAX * MLX5E_100MB_TO_KB;
+ upper_limit_gbps = U8_MAX * MLX5E_1GB_TO_KB;
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
if (!maxrate->tc_maxrate[i]) {
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
continue;
}
- if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
+ if (maxrate->tc_maxrate[i] <= upper_limit_100mbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
- MLX5E_100MB);
+ MLX5E_100MB_TO_KB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
- } else {
+ } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
- MLX5E_1GB);
+ MLX5E_1GB_TO_KB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
+ } else {
+ netdev_err(netdev,
+ "tc_%d maxrate %llu Kbps exceeds limit %llu\n",
+ i, maxrate->tc_maxrate[i],
+ upper_limit_gbps);
+ return -EINVAL;
}
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
- __func__, i, max_bw_value[i]);
+ netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
+ max_bw_value[i] * units[max_bw_unit[i]].scale,
+ units[max_bw_unit[i]].units_str);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
@@ -982,7 +1014,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
if (!changed)
return 0;
- priv->dcbx.manual_buffer = true;
err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
buffer_size, prio2buffer);
return err;
@@ -1147,6 +1178,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
bool reset = true;
int err;
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
@@ -1162,6 +1194,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
&trust_state, reset);
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
return err;
}
@@ -1250,7 +1283,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
- priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
mlx5e_ets_init(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
index ca9cfbf57d8f..d1d629697e28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -30,21 +30,22 @@
* SOFTWARE.
*/
-#include <linux/dim.h>
#include "en.h"
+#include "en/dim.h"
static void
mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
{
- mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts);
+ mlx5e_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts,
+ mlx5e_cq_period_mode(moder.cq_period_mode));
dim->state = DIM_START_MEASURE;
}
void mlx5e_rx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct mlx5e_rq *rq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
@@ -54,9 +55,95 @@ void mlx5e_rx_dim_work(struct work_struct *work)
void mlx5e_tx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
+ struct mlx5e_txqsq *sq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
}
+
+static struct dim *mlx5e_dim_enable(struct mlx5_core_dev *mdev,
+ void (*work_fun)(struct work_struct *), int cpu,
+ u8 cq_period_mode, struct mlx5_core_cq *mcq,
+ void *queue)
+{
+ struct dim *dim;
+ int err;
+
+ dim = kvzalloc_node(sizeof(*dim), GFP_KERNEL, cpu_to_node(cpu));
+ if (!dim)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&dim->work, work_fun);
+
+ dim->mode = cq_period_mode;
+ dim->priv = queue;
+
+ err = mlx5e_modify_cq_period_mode(mdev, mcq, dim->mode);
+ if (err) {
+ kvfree(dim);
+ return ERR_PTR(err);
+ }
+
+ return dim;
+}
+
+static void mlx5e_dim_disable(struct dim *dim)
+{
+ cancel_work_sync(&dim->work);
+ kvfree(dim);
+}
+
+int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable)
+{
+ if (enable == !!rq->dim)
+ return 0;
+
+ if (enable) {
+ struct mlx5e_channel *c = rq->channel;
+ struct dim *dim;
+
+ dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu,
+ c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq);
+ if (IS_ERR(dim))
+ return PTR_ERR(dim);
+
+ rq->dim = dim;
+
+ __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+ } else {
+ __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+ synchronize_net();
+ mlx5e_dim_disable(rq->dim);
+ rq->dim = NULL;
+ }
+
+ return 0;
+}
+
+int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable)
+{
+ if (enable == !!sq->dim)
+ return 0;
+
+ if (enable) {
+ struct mlx5e_channel *c = sq->channel;
+ struct dim *dim;
+
+ dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu,
+ c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq);
+ if (IS_ERR(dim))
+ return PTR_ERR(dim);
+
+ sq->dim = dim;
+
+ __set_bit(MLX5E_SQ_STATE_DIM, &sq->state);
+ } else {
+ __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state);
+ synchronize_net();
+ mlx5e_dim_disable(sq->dim);
+ sq->dim = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 67a29826bb57..d3fef1e7e2f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -30,15 +30,21 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <linux/ethtool_netlink.h>
+#include <net/netdev_queues.h>
#include "en.h"
+#include "en/channels.h"
+#include "en/dim.h"
#include "en/port.h"
#include "en/params.h"
#include "en/ptp.h"
#include "lib/clock.h"
#include "en/fs_ethtool.h"
+#define LANES_UNKNOWN 0
+
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
@@ -80,17 +86,15 @@ struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER];
({ \
struct ptys2ethtool_config *cfg; \
const unsigned int modes[] = { __VA_ARGS__ }; \
- unsigned int i, bit, idx; \
+ unsigned int i; \
cfg = &ptys2##table##_ethtool_table[reg_]; \
bitmap_zero(cfg->supported, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
bitmap_zero(cfg->advertised, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
- bit = modes[i] % 64; \
- idx = modes[i] / 64; \
- __set_bit(bit, &cfg->supported[idx]); \
- __set_bit(bit, &cfg->advertised[idx]); \
+ bitmap_set(cfg->supported, modes[i], 1); \
+ bitmap_set(cfg->advertised, modes[i], 1); \
} \
})
@@ -136,6 +140,10 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
@@ -201,6 +209,12 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_8_400GBASE_CR8, ext,
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext,
ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
@@ -219,14 +233,45 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT);
-}
-
-static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_8_800GBASE_CR8_KR8, ext,
+ ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_1_200GBASE_CR1_KR1, ext,
+ ETHTOOL_LINK_MODE_200000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseVR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_2_400GBASE_CR2_KR2, ext,
+ ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_4_800GBASE_CR4_KR4, ext,
+ ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8, ext,
+ ETHTOOL_LINK_MODE_1600000baseCR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseDR8_2_Full_BIT);
+}
+
+static void mlx5e_ethtool_get_speed_arr(bool ext,
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = mlx5_ptys_ext_supported(mdev);
-
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
ARRAY_SIZE(ptys2legacy_ethtool_table);
@@ -269,8 +314,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < MLX5E_NUM_PFLAGS; i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx5e_priv_flags[i].name);
+ ethtool_puts(&data, mlx5e_priv_flags[i].name);
break;
case ETH_SS_TEST:
@@ -327,11 +371,6 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
-
- kernel_param->tcp_data_split =
- (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
- ETHTOOL_TCP_DATA_SPLIT_ENABLED :
- ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
@@ -344,36 +383,47 @@ static void mlx5e_get_ringparam(struct net_device *dev,
mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
+static bool mlx5e_ethtool_set_tcp_data_split(struct mlx5e_priv *priv,
+ u8 tcp_data_split,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = priv->netdev;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ !(dev->features & NETIF_F_GRO_HW)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TCP-data-split is not supported when GRO HW is disabled");
+ return false;
+ }
+
+ /* Might need to disable HW-GRO if it was kept on due to hds. */
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED &&
+ dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ netdev_update_features(priv->netdev);
+
+ return true;
+}
+
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_params new_params;
u8 log_rq_size;
u8 log_sq_size;
int err = 0;
- if (param->rx_jumbo_pending) {
- netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
- __func__);
- return -EINVAL;
- }
- if (param->rx_mini_pending) {
- netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
- __func__);
- return -EINVAL;
- }
-
if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
- __func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "rx (%d) < min (%d)",
+ param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx (%d) < min (%d)",
+ param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
@@ -399,6 +449,9 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
unlock:
mutex_unlock(&priv->state_lock);
+ if (!err)
+ netdev_update_features(priv->netdev);
+
return err;
}
@@ -409,7 +462,12 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ if (!mlx5e_ethtool_set_tcp_data_split(priv,
+ kernel_param->tcp_data_split,
+ extack))
+ return -EINVAL;
+
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
@@ -436,7 +494,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
unsigned int count = ch->combined_count;
struct mlx5e_params new_params;
bool arfs_enabled;
- int rss_cnt;
bool opened;
int err = 0;
@@ -490,17 +547,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- /* Don't allow changing the number of channels if non-default RSS contexts exist,
- * the kernel doesn't protect against set_channels operations that break them.
- */
- rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1;
- if (rss_cnt) {
- err = -EINVAL;
- netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n",
- __func__, rss_cnt);
- goto out;
- }
-
/* Don't allow changing the number of channels if MQPRIO mode channel offload is active,
* because it defines a partition over the channels queues.
*/
@@ -516,7 +562,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
+ arfs_enabled = opened && mlx5e_fs_want_arfs(priv->netdev);
if (arfs_enabled)
mlx5e_arfs_disable(priv->fs);
@@ -548,27 +594,27 @@ static int mlx5e_set_channels(struct net_device *dev,
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal)
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct dim_cq_moder *rx_moder, *tx_moder;
- if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) {
+ NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP;
+ }
rx_moder = &priv->channels.params.rx_cq_moderation;
coal->rx_coalesce_usecs = rx_moder->usec;
coal->rx_max_coalesced_frames = rx_moder->pkts;
coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
+ kernel_coal->use_cqe_mode_rx = priv->channels.params.rx_moder_use_cqe_mode;
tx_moder = &priv->channels.params.tx_cq_moderation;
coal->tx_coalesce_usecs = tx_moder->usec;
coal->tx_max_coalesced_frames = tx_moder->pkts;
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
-
- kernel_coal->use_cqe_mode_rx =
- MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
- kernel_coal->use_cqe_mode_tx =
- MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
+ kernel_coal->use_cqe_mode_tx = priv->channels.params.tx_moder_use_cqe_mode;
return 0;
}
@@ -580,14 +626,76 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
+}
+
+static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct dim_cq_moder cur_moder;
+ struct mlx5e_channels *chs;
+ struct mlx5e_channel *c;
+
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->state_lock);
+
+ chs = &priv->channels;
+ if (chs->num <= queue) {
+ mutex_unlock(&priv->state_lock);
+ return -EINVAL;
+ }
+
+ c = chs->c[queue];
+
+ coal->use_adaptive_rx_coalesce = !!c->rq.dim;
+ if (coal->use_adaptive_rx_coalesce) {
+ cur_moder = net_dim_get_rx_moderation(c->rq.dim->mode,
+ c->rq.dim->profile_ix);
+
+ coal->rx_coalesce_usecs = cur_moder.usec;
+ coal->rx_max_coalesced_frames = cur_moder.pkts;
+ } else {
+ coal->rx_coalesce_usecs = c->rx_cq_moder.usec;
+ coal->rx_max_coalesced_frames = c->rx_cq_moder.pkts;
+ }
+
+ coal->use_adaptive_tx_coalesce = !!c->sq[0].dim;
+ if (coal->use_adaptive_tx_coalesce) {
+ /* NOTE: Will only display DIM coalesce profile information of
+ * first channel. The current interface cannot display this
+ * information for all tc.
+ */
+ cur_moder = net_dim_get_tx_moderation(c->sq[0].dim->mode,
+ c->sq[0].dim->profile_ix);
+
+ coal->tx_coalesce_usecs = cur_moder.usec;
+ coal->tx_max_coalesced_frames = cur_moder.pkts;
+
+ } else {
+ coal->tx_coalesce_usecs = c->tx_cq_moder.usec;
+ coal->tx_max_coalesced_frames = c->tx_cq_moder.pkts;
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_per_queue_coalesce(priv, queue, coal);
}
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
static void
-mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder)
{
int tc;
int i;
@@ -595,38 +703,35 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5_core_dev *mdev = c->mdev;
+ enum mlx5_cq_period_mode mode;
+
+ mode = mlx5e_cq_period_mode(moder->cq_period_mode);
+ c->tx_cq_moder = *moder;
for (tc = 0; tc < c->num_tc; tc++) {
- mlx5_core_modify_cq_moderation(mdev,
- &c->sq[tc].cq.mcq,
- coal->tx_coalesce_usecs,
- coal->tx_max_coalesced_frames);
+ mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
+ moder->usec, moder->pkts,
+ mode);
}
}
}
static void
-mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder)
{
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5_core_dev *mdev = c->mdev;
+ enum mlx5_cq_period_mode mode;
- mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
- coal->rx_coalesce_usecs,
- coal->rx_max_coalesced_frames);
- }
-}
+ mode = mlx5e_cq_period_mode(moder->cq_period_mode);
+ c->rx_cq_moder = *moder;
-/* convert a boolean value of cq_mode to mlx5 period mode
- * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE
- * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE
- */
-static int cqe_mode_to_period_mode(bool val)
-{
- return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts,
+ mode);
+ }
}
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
@@ -636,89 +741,108 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_dim_enabled, tx_dim_enabled;
struct mlx5e_params new_params;
bool reset_rx, reset_tx;
- bool reset = true;
u8 cq_period_mode;
int err = 0;
- if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ if (!MLX5_CAP_GEN(mdev, cq_moderation) ||
+ !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) {
+ NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP;
+ }
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
- netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
- __func__, MLX5E_MAX_COAL_TIME);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Max coalesce time %lu usecs, tx-usecs (%u) rx-usecs (%u)",
+ MLX5E_MAX_COAL_TIME, coal->tx_coalesce_usecs,
+ coal->rx_coalesce_usecs);
return -ERANGE;
}
if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
- netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
- __func__, MLX5E_MAX_COAL_FRAMES);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Max coalesce frames %lu, tx-frames (%u) rx-frames (%u)",
+ MLX5E_MAX_COAL_FRAMES, coal->tx_max_coalesced_frames,
+ coal->rx_max_coalesced_frames);
return -ERANGE;
}
if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
!MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
- NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
+ NL_SET_ERR_MSG_MOD(extack, "cqe-mode-rx/tx is not supported on this device");
return -EOPNOTSUPP;
}
+ rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
- rx_moder = &new_params.rx_cq_moderation;
- rx_moder->usec = coal->rx_coalesce_usecs;
- rx_moder->pkts = coal->rx_max_coalesced_frames;
- new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx);
+ reset_rx = mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode,
+ rx_dim_enabled, false);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, cq_period_mode);
- tx_moder = &new_params.tx_cq_moderation;
- tx_moder->usec = coal->tx_coalesce_usecs;
- tx_moder->pkts = coal->tx_max_coalesced_frames;
- new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+ cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx);
+ reset_tx = mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode,
+ tx_dim_enabled, false);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, cq_period_mode);
- reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
- reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
+ reset_rx |= rx_dim_enabled != new_params.rx_dim_enabled;
+ reset_tx |= tx_dim_enabled != new_params.tx_dim_enabled;
- cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx);
- if (cq_period_mode != rx_moder->cq_period_mode) {
- mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
- reset_rx = true;
- }
+ /* Solely used for global ethtool get coalesce */
+ rx_moder = &new_params.rx_cq_moderation;
+ new_params.rx_dim_enabled = rx_dim_enabled;
+ new_params.rx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_rx;
- cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx);
- if (cq_period_mode != tx_moder->cq_period_mode) {
- mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
- reset_tx = true;
- }
+ tx_moder = &new_params.tx_cq_moderation;
+ new_params.tx_dim_enabled = tx_dim_enabled;
+ new_params.tx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_tx;
if (reset_rx) {
- u8 mode = MLX5E_GET_PFLAG(&new_params,
- MLX5E_PFLAG_RX_CQE_BASED_MODER);
+ mlx5e_channels_rx_change_dim(&priv->channels, false);
+ mlx5e_reset_rx_moderation(rx_moder, new_params.rx_moder_use_cqe_mode,
+ rx_dim_enabled);
+
+ mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder);
+ } else if (!rx_dim_enabled) {
+ rx_moder->usec = coal->rx_coalesce_usecs;
+ rx_moder->pkts = coal->rx_max_coalesced_frames;
- mlx5e_reset_rx_moderation(&new_params, mode);
+ mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder);
}
+
if (reset_tx) {
- u8 mode = MLX5E_GET_PFLAG(&new_params,
- MLX5E_PFLAG_TX_CQE_BASED_MODER);
+ mlx5e_channels_tx_change_dim(&priv->channels, false);
+ mlx5e_reset_tx_moderation(tx_moder, new_params.tx_moder_use_cqe_mode,
+ tx_dim_enabled);
- mlx5e_reset_tx_moderation(&new_params, mode);
- }
+ mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder);
+ } else if (!tx_dim_enabled) {
+ tx_moder->usec = coal->tx_coalesce_usecs;
+ tx_moder->pkts = coal->tx_max_coalesced_frames;
- /* If DIM state hasn't changed, it's possible to modify interrupt
- * moderation parameters on the fly, even if the channels are open.
- */
- if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- if (!coal->use_adaptive_rx_coalesce)
- mlx5e_set_priv_channels_rx_coalesce(priv, coal);
- if (!coal->use_adaptive_tx_coalesce)
- mlx5e_set_priv_channels_tx_coalesce(priv, coal);
- reset = false;
+ mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder);
}
- err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
+ /* DIM enable/disable Rx and Tx channels */
+ err = mlx5e_channels_rx_change_dim(&priv->channels, rx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ err = mlx5e_channels_tx_change_dim(&priv->channels, tx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false);
+state_unlock:
mutex_unlock(&priv->state_lock);
return err;
}
@@ -733,37 +857,101 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
-static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
- unsigned long *supported_modes,
- u32 eth_proto_cap)
+static int mlx5e_ethtool_set_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
+ struct ethtool_coalesce *coal)
{
- unsigned long proto_cap = eth_proto_cap;
- struct ptys2ethtool_config *table;
- u32 max_size;
- int proto;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_dim_enabled, tx_dim_enabled;
+ struct mlx5e_channels *chs;
+ struct mlx5e_channel *c;
+ int err = 0;
+ int tc;
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(supported_modes, supported_modes,
- table[proto].supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
+ netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
+ __func__, MLX5E_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
+ coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
+ netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
+ __func__, MLX5E_MAX_COAL_FRAMES);
+ return -ERANGE;
+ }
+
+ rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+
+ mutex_lock(&priv->state_lock);
+
+ chs = &priv->channels;
+ if (chs->num <= queue) {
+ mutex_unlock(&priv->state_lock);
+ return -EINVAL;
+ }
+
+ c = chs->c[queue];
+
+ err = mlx5e_dim_rx_change(&c->rq, rx_dim_enabled);
+ if (err)
+ goto state_unlock;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_dim_tx_change(&c->sq[tc], tx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ }
+
+ if (!rx_dim_enabled) {
+ c->rx_cq_moder.usec = coal->rx_coalesce_usecs;
+ c->rx_cq_moder.pkts = coal->rx_max_coalesced_frames;
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+ coal->rx_max_coalesced_frames);
+ }
+
+ if (!tx_dim_enabled) {
+ c->tx_cq_moder.usec = coal->tx_coalesce_usecs;
+ c->tx_cq_moder.pkts = coal->tx_max_coalesced_frames;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5_core_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
+
+state_unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
}
-static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
- u32 eth_proto_cap, bool ext)
+static void ptys2ethtool_process_link(u32 eth_eproto, bool ext, bool advertised,
+ unsigned long *modes)
{
- unsigned long proto_cap = eth_proto_cap;
+ unsigned long eproto = eth_eproto;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
- max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
- ARRAY_SIZE(ptys2legacy_ethtool_table);
-
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(advertising_modes, advertising_modes,
- table[proto].advertised,
+ mlx5e_ethtool_get_speed_arr(ext, &table, &max_size);
+ for_each_set_bit(proto, &eproto, max_size)
+ bitmap_or(modes, modes,
+ advertised ?
+ table[proto].advertised : table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
@@ -773,6 +961,7 @@ static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
+ [MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD] = ETHTOOL_FEC_RS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
@@ -916,50 +1105,51 @@ static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
}
}
-static void get_speed_duplex(struct net_device *netdev,
- u32 eth_proto_oper, bool force_legacy,
- u16 data_rate_oper,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_link_properties(struct net_device *netdev,
+ u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 speed = SPEED_UNKNOWN;
+ const struct mlx5_link_info *info;
u8 duplex = DUPLEX_UNKNOWN;
+ u32 speed = SPEED_UNKNOWN;
+ u32 lanes = LANES_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
- if (!speed) {
- if (data_rate_oper)
- speed = 100 * data_rate_oper;
- else
- speed = SPEED_UNKNOWN;
- goto out;
- }
-
- duplex = DUPLEX_FULL;
+ info = mlx5_port_ptys2info(priv->mdev, eth_proto_oper, force_legacy);
+ if (info) {
+ speed = info->speed;
+ lanes = info->lanes;
+ duplex = DUPLEX_FULL;
+ } else if (data_rate_oper)
+ speed = 100 * data_rate_oper;
out:
- link_ksettings->base.speed = speed;
link_ksettings->base.duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->lanes = lanes;
}
static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *supported = link_ksettings->link_modes.supported;
- ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
+ bool ext = mlx5_ptys_ext_supported(mdev);
+
+ ptys2ethtool_process_link(eth_proto_cap, ext, false, supported);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+static void get_advertising(u32 eth_proto_admin, u8 tx_pause, u8 rx_pause,
struct ethtool_link_ksettings *link_ksettings,
bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
-
+ ptys2ethtool_process_link(eth_proto_admin, ext, true, advertising);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
@@ -1015,11 +1205,11 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
bool ext = mlx5_ptys_ext_supported(mdev);
- ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
+ ptys2ethtool_process_link(eth_proto_lp, ext, true, lp_advertising);
}
-int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
- struct ethtool_link_ksettings *link_ksettings)
+static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
@@ -1037,7 +1227,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
bool ext;
int err;
- err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1, 0);
if (err) {
netdev_err(priv->netdev, "%s: query port ptys failed: %d\n",
__func__, err);
@@ -1077,8 +1267,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
- get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- data_rate_oper, link_ksettings);
+ get_link_properties(priv->netdev, eth_proto_oper, !admin_ext,
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
@@ -1141,7 +1331,8 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (*ptys2legacy_ethtool_table[i].advertised == 0)
+ if (bitmap_empty(ptys2legacy_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
continue;
if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised,
link_modes,
@@ -1155,18 +1346,18 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
- unsigned long modes[2];
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) {
- if (ptys2ext_ethtool_table[i].advertised[0] == 0 &&
- ptys2ext_ethtool_table[i].advertised[1] == 0)
+ if (bitmap_empty(ptys2ext_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
continue;
- memset(modes, 0, sizeof(modes));
+ bitmap_zero(modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(modes, ptys2ext_ethtool_table[i].advertised,
link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
- if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] &&
- modes[1] == ptys2ext_ethtool_table[i].advertised[1])
+ if (bitmap_equal(modes, ptys2ext_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
}
return ptys_modes;
@@ -1182,28 +1373,22 @@ static bool ext_link_mode_requested(const unsigned long *adver)
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
-{
- bool ext_link_mode = ext_link_mode_requested(adver);
-
- return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
-}
-
-int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
- const struct ethtool_link_ksettings *link_ksettings)
+static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
+ struct mlx5_link_info info = {};
const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
+ bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
u8 autoneg;
- u32 speed;
bool ext;
int err;
@@ -1211,13 +1396,15 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
adver = link_ksettings->link_modes.advertising;
autoneg = link_ksettings->base.autoneg;
- speed = link_ksettings->base.speed;
+ info.speed = link_ksettings->base.speed;
+ info.lanes = link_ksettings->lanes;
ext_supported = mlx5_ptys_ext_supported(mdev);
- ext = ext_requested(autoneg, adver, ext_supported);
- if (!ext_supported && ext)
+ ext_requested = ext_link_mode_requested(adver);
+ if (!ext_supported && ext_requested)
return -EOPNOTSUPP;
+ ext = autoneg == AUTONEG_ENABLE ? ext_requested : ext_supported;
ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
@@ -1227,7 +1414,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
- mlx5_port_speed2linkmodes(mdev, speed, !ext);
+ mlx5_port_info2linkmodes(mdev, &info, !ext);
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
@@ -1251,7 +1438,12 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ if (err) {
+ netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
+ goto out;
+ }
+
mlx5_toggle_port_link(mdev);
out:
@@ -1290,63 +1482,150 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 rss_context = rxfh->rss_context;
- int err;
+ bool symmetric;
mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
- rxfh->indir, rxfh->key, &rxfh->hfunc);
+ mlx5e_rx_res_rss_get_rxfh(priv->rx_res, 0, rxfh->indir, rxfh->key,
+ &rxfh->hfunc, &symmetric);
mutex_unlock(&priv->state_lock);
- return err;
+
+ if (symmetric)
+ rxfh->input_xfrm = RXH_XFRM_SYM_OR_XOR;
+
+ return 0;
}
-int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- u32 *rss_context = &rxfh->rss_context;
- u8 hfunc = rxfh->hfunc;
unsigned int count;
- int err;
-
- mutex_lock(&priv->state_lock);
count = priv->channels.params.num_channels;
- if (hfunc == ETH_RSS_HASH_XOR) {
+ if (rxfh->hfunc == ETH_RSS_HASH_XOR) {
unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
if (count > xor8_max_channels) {
- err = -EINVAL;
- netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
- __func__, count, xor8_max_channels);
- goto unlock;
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Number of channels (%u) exceeds the max for XOR8 RSS (%u)",
+ count, xor8_max_channels);
+ return -EINVAL;
}
}
- if (*rss_context && rxfh->rss_delete) {
- err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
+ return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
+ if (err)
goto unlock;
- }
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
- if (err)
- goto unlock;
- }
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
+ rxfh->indir, rxfh->key,
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int mlx5e_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_rx_res_rss_init(priv->rx_res, rxfh->rss_context,
+ priv->channels.params.num_channels);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
rxfh->indir, rxfh->key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
+ if (err)
+ goto unlock;
+
+ mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rxfh->rss_context,
+ ethtool_rxfh_context_indir(ctx),
+ ethtool_rxfh_context_key(ctx),
+ &ctx->hfunc, &symmetric);
+ if (symmetric)
+ ctx->input_xfrm = RXH_XFRM_SYM_OR_XOR;
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
+static int mlx5e_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
+ rxfh->indir, rxfh->key,
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int mlx5e_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_rx_res_rss_destroy(priv->rx_res, rss_context);
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -1446,8 +1725,8 @@ static void mlx5e_get_pause_stats(struct net_device *netdev,
mlx5e_stats_pause_get(priv, pause_stats);
}
-void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam)
+static void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -1468,8 +1747,8 @@ static void mlx5e_get_pauseparam(struct net_device *netdev,
mlx5e_ethtool_get_pauseparam(priv, pauseparam);
}
-int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam)
+static int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -1500,7 +1779,7 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
}
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1511,6 +1790,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
return 0;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1524,7 +1804,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
}
static int mlx5e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1655,11 +1935,12 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
static void mlx5e_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5e_stats_fec_get(priv, fec_stats);
+ mlx5e_stats_fec_get(priv, fec_stats, hist);
}
static int mlx5e_get_fecparam(struct net_device *netdev,
@@ -1751,7 +2032,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
int size_read = 0;
u8 data[4] = {0};
- size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+ size_read = mlx5_query_module_eeprom(dev, 0, 2, data, NULL);
if (size_read < 2)
return -EIO;
@@ -1793,6 +2074,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
int offset = ee->offset;
int size_read;
+ u8 status = 0;
int i = 0;
if (!ee->len)
@@ -1802,15 +2084,15 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
while (i < ee->len) {
size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
- data + i);
-
+ data + i, &status);
if (!size_read)
/* Done reading */
return 0;
if (size_read < 0) {
- netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
- __func__, size_read);
+ netdev_err(netdev,
+ "%s: mlx5_query_eeprom failed:0x%x, status %u\n",
+ __func__, size_read, status);
return size_read;
}
@@ -1830,6 +2112,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
u8 *data = page_data->data;
int size_read;
+ u8 status = 0;
int i = 0;
if (!page_data->length)
@@ -1843,18 +2126,19 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
query.page = page_data->page;
while (i < page_data->length) {
query.size = page_data->length - i;
- size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i);
+ size_read = mlx5_query_module_eeprom_by_page(mdev, &query,
+ data + i, &status);
/* Done reading, return how many bytes was read */
if (!size_read)
return i;
- if (size_read == -EINVAL)
- return -EINVAL;
if (size_read < 0) {
- netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n",
- __func__, size_read);
- return i;
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Query module eeprom by page failed, read %u bytes, err %d, status %u",
+ i, size_read, status);
+ return size_read;
}
i += size_read;
@@ -1879,14 +2163,9 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
if (err)
return err;
- dev_hold(dev);
- rtnl_unlock();
-
err = mlx5_firmware_flash(mdev, fw, NULL);
release_firmware(fw);
- rtnl_lock();
- dev_put(dev);
return err;
}
@@ -1908,7 +2187,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP;
- cq_period_mode = cqe_mode_to_period_mode(enable);
+ cq_period_mode = mlx5e_dim_cq_period_mode(enable);
current_cq_period_mode = is_rx_cq ?
priv->channels.params.rx_cq_moderation.cq_period_mode :
@@ -1918,12 +2197,22 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
return 0;
new_params = priv->channels.params;
- if (is_rx_cq)
- mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
- else
- mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
+ if (is_rx_cq) {
+ mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode,
+ false, true);
+ mlx5e_channels_rx_toggle_dim(&priv->channels);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ cq_period_mode);
+ } else {
+ mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode,
+ false, true);
+ mlx5e_channels_tx_toggle_dim(&priv->channels);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ cq_period_mode);
+ }
- return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ /* Update pflags of existing channels without resetting them */
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
@@ -1990,7 +2279,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
+ rx_filter = priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE;
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
if (err)
return err;
@@ -2005,7 +2294,6 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
- int err;
if (enable) {
/* Checking the regular RQ here; mlx5e_validate_xsk_param called
@@ -2026,14 +2314,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
mlx5e_set_rq_type(mdev, &new_params);
- err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
- if (err)
- return err;
-
- /* update XDP supported features */
- mlx5e_set_xdp_feature(netdev);
-
- return 0;
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
@@ -2124,7 +2405,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
*/
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_num_channels_changed_ctx, NULL, true);
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (!err)
priv->tx_ptp_opened = true;
@@ -2194,21 +2475,35 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
+static int mlx5e_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_rxfh_fields(priv, info);
+}
+
+static int mlx5e_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
+}
+
+static u32 mlx5e_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return priv->channels.params.num_channels;
+}
+
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
- * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
- * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
- * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
- */
- if (info->cmd == ETHTOOL_GRXRINGS) {
- info->data = priv->channels.params.num_channels;
- return 0;
- }
-
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
@@ -2422,12 +2717,25 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
mlx5e_stats_rmon_get(priv, rmon_stats, ranges);
}
+static void mlx5e_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_ts_get(priv, ts_stats);
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
- .cap_rss_ctx_supported = true,
+ .cap_link_lanes_supported = true,
+ .rxfh_per_ctx_fields = true,
+ .rxfh_per_ctx_key = true,
+ .rxfh_max_num_contexts = MLX5E_MAX_NUM_RSS,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_input_xfrm = RXH_XFRM_SYM_OR_XOR,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
@@ -2440,14 +2748,22 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_channels = mlx5e_set_channels,
.get_coalesce = mlx5e_get_coalesce,
.set_coalesce = mlx5e_set_coalesce,
+ .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce,
.get_link_ksettings = mlx5e_get_link_ksettings,
.set_link_ksettings = mlx5e_set_link_ksettings,
.get_rxfh_key_size = mlx5e_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+ .get_rxfh_fields = mlx5e_get_rxfh_fields,
+ .set_rxfh_fields = mlx5e_set_rxfh_fields,
+ .create_rxfh_context = mlx5e_create_rxfh_context,
+ .modify_rxfh_context = mlx5e_modify_rxfh_context,
+ .remove_rxfh_context = mlx5e_remove_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .get_rx_ring_count = mlx5e_get_rx_ring_count,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pause_stats = mlx5e_get_pause_stats,
@@ -2471,5 +2787,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_eth_mac_stats = mlx5e_get_eth_mac_stats,
.get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
.get_rmon_stats = mlx5e_get_rmon_stats,
+ .get_ts_stats = mlx5e_get_ts_stats,
.get_link_ext_stats = mlx5e_get_link_ext_stats
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 777d311d44ef..8928d2dcd43f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -484,7 +484,9 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
}
/* Need to fix some features.. */
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
return err;
}
@@ -521,7 +523,9 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
clear_bit(vid, fs->vlan->active_svlans);
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
}
return 0;
@@ -776,7 +780,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.prio = MLX5E_PROMISC_PRIO;
ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
@@ -896,12 +900,14 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -911,21 +917,34 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
}
}
+static bool mlx5e_ipsec_rss_supported(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, ipsec_next_header) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, outer_l4_type_ext) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, inner_l4_type_ext);
+}
+
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
- struct ttc_params *ttc_params, bool tunnel)
+ struct ttc_params *ttc_params, bool tunnel,
+ bool ipsec_rss)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
+ ttc_params->ipsec_rss = ipsec_rss &&
+ mlx5e_ipsec_rss_supported(fs->mdev);
+
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -1291,7 +1310,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
{
struct ttc_params ttc_params = {};
- mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
+ mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true, true);
fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
return PTR_ERR_OR_ZERO(fs->ttc);
}
@@ -1309,8 +1328,7 @@ int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
return -EOPNOTSUPP;
mlx5e_fs_set_ns(fs, ns, false);
- err = mlx5e_arfs_create_tables(fs, rx_res,
- !!(netdev->hw_features & NETIF_F_NTUPLE));
+ err = mlx5e_arfs_create_tables(fs, rx_res, mlx5e_fs_has_arfs(netdev));
if (err) {
fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
netdev->hw_features &= ~NETIF_F_NTUPLE;
@@ -1357,7 +1375,7 @@ err_destroy_ttc_table:
err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(fs);
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(fs, mlx5e_fs_has_arfs(netdev));
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3eccdadc0357..63bdef5b4ba5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -704,7 +704,7 @@ static int validate_flow(struct mlx5e_priv *priv,
num_tuples += ret;
break;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) {
ret = validate_vlan(fs);
@@ -734,7 +734,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
if (num_tuples <= 0) {
netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
__func__, num_tuples);
- return num_tuples;
+ return num_tuples < 0 ? num_tuples : -EINVAL;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
@@ -884,25 +884,27 @@ static int flow_type_to_traffic_type(u32 flow_type)
case ESP_V6_FLOW:
return MLX5_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
+ case IP_USER_FLOW:
return MLX5_TT_IPV4;
case IPV6_FLOW:
+ case IPV6_USER_FLOW:
return MLX5_TT_IPV6;
default:
return -EINVAL;
}
}
-static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *nfc)
+int mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
u8 rx_hash_field = 0;
u32 flow_type = 0;
- u32 rss_idx = 0;
+ u32 rss_idx;
int err;
int tt;
- if (nfc->flow_type & FLOW_RSS)
- rss_idx = nfc->rss_context;
+ rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
@@ -939,16 +941,15 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
return err;
}
-static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *nfc)
+int mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc)
{
int hash_field = 0;
u32 flow_type = 0;
- u32 rss_idx = 0;
+ u32 rss_idx;
int tt;
- if (nfc->flow_type & FLOW_RSS)
- rss_idx = nfc->rss_context;
+ rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
@@ -984,9 +985,6 @@ int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
- case ETHTOOL_SRXFH:
- err = mlx5e_set_rss_hash_opt(priv, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1011,9 +1009,6 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
- case ETHTOOL_GRXFH:
- err = mlx5e_get_rss_hash_opt(priv, info);
- break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 319930c04093..6168f0814414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <net/tc_act/tc_gact.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
@@ -38,15 +39,20 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
+#include "en/dim.h"
#include "en/txrx.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
+#include "en_accel/psp.h"
#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
@@ -71,6 +77,29 @@
#include "en/trap.h"
#include "lib/devcom.h"
#include "lib/sd.h"
+#include "en/pcie_cong_event.h"
+
+static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, shampo) ||
+ !MLX5_CAP_SHAMPO(mdev, shampo_header_split_data_merge))
+ return false;
+
+ /* Our HW-GRO implementation relies on "KSM Mkey" for
+ * SHAMPO headers buffer mapping
+ */
+ if (!MLX5_CAP_GEN(mdev, fixed_buffer_size))
+ return false;
+
+ if (!MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer_valid))
+ return false;
+
+ if (MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer) >
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
+ return false;
+
+ return true;
+}
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
@@ -204,13 +233,17 @@ static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data)
static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
{
+ struct mlx5_devcom_match_attr attr = {
+ .key.val = *data,
+ };
+
priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
MLX5_DEVCOM_MPV,
- *data,
+ &attr,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR(priv->devcom))
- return PTR_ERR(priv->devcom);
+ if (!priv->devcom)
+ return -EINVAL;
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -223,7 +256,7 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->devcom))
+ if (!priv->devcom)
return;
if (mlx5_core_is_mp_master(priv->mdev)) {
@@ -233,6 +266,7 @@ static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
}
mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -287,8 +321,8 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->hdr.ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->hdr.uctrl;
u16 octowords;
u8 ds_cnt;
@@ -306,52 +340,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
-{
- rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
- GFP_KERNEL, node);
- if (!rq->mpwqe.shampo)
- return -ENOMEM;
- return 0;
-}
-
-static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
-{
- kvfree(rq->mpwqe.shampo);
-}
-
-static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
- node);
- shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->info)),
- GFP_KERNEL, node);
- shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->pages)),
- GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->info || !shampo->pages)
- goto err_nomem;
-
- return 0;
-
-err_nomem:
- kvfree(shampo->info);
- kvfree(shampo->bitmap);
- kvfree(shampo->pages);
-
- return -ENOMEM;
-}
-
-static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
-{
- kvfree(rq->mpwqe.shampo->bitmap);
- kvfree(rq->mpwqe.shampo->info);
- kvfree(rq->mpwqe.shampo->pages);
-}
-
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@ -374,7 +362,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
}
- mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
+ mlx5e_build_umr_wqe(rq, rq->icosq,
+ container_of(&rq->mpwqe.umr_wqe,
+ struct mlx5e_umr_wqe, hdr));
return 0;
}
@@ -502,8 +492,8 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
return err;
}
-static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
- u64 nentries,
+static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev,
+ u64 nentries, u8 log_entry_size,
u32 *umr_mkey)
{
int inlen;
@@ -523,12 +513,13 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET(mkc, mkc, translations_octword_size, nentries);
- MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, log_page_size, log_entry_size);
+ MLX5_SET64(mkc, mkc, len, nentries << log_entry_size);
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
kvfree(in);
@@ -561,17 +552,26 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
- struct mlx5e_rq *rq)
+ u16 hd_per_wq, __be32 *umr_mkey)
{
- u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+ u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+ u32 mkey;
+ int err;
- if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
- mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_klm_size, rq->mpwqe.shampo->hd_per_wq);
+ if (max_ksm_size < hd_per_wq) {
+ mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
+ max_ksm_size, hd_per_wq);
return -EINVAL;
}
- return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
- &rq->mpwqe.shampo->mkey);
+
+ err = mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
+ &mkey);
+ if (err)
+ return err;
+
+ *umr_mkey = cpu_to_be32(mkey);
+ return 0;
}
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -682,6 +682,27 @@ static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_rq_cqe_err(rq);
}
+static void mlx5e_rq_timeout_work(struct work_struct *timeout_work)
+{
+ struct mlx5e_rq *rq = container_of(timeout_work,
+ struct mlx5e_rq,
+ rx_timeout_work);
+
+ /* Acquire netdev instance lock to synchronize with channel close and
+ * reopen flows. Either successfully obtain the lock, or detect that
+ * channels are closing for another reason, making this work no longer
+ * necessary.
+ */
+ while (!netdev_trylock(rq->netdev)) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &rq->priv->state))
+ return;
+ msleep(20);
+ }
+
+ mlx5e_reporter_rx_timeout(rq);
+ netdev_unlock(rq->netdev);
+}
+
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
@@ -714,8 +735,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->priv = c->priv;
- rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->hwtstamp_config = &c->priv->hwtstamp_config;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
@@ -733,6 +754,35 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
xdp_frag_size);
}
+static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq,
+ int node)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ shampo->hd_per_wq = hd_per_wq;
+
+ shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node);
+ shampo->pages = kvzalloc_node(array_size(hd_per_wq,
+ sizeof(*shampo->pages)),
+ GFP_KERNEL, node);
+ if (!shampo->bitmap || !shampo->pages)
+ goto err_nomem;
+
+ return 0;
+
+err_nomem:
+ kvfree(shampo->pages);
+ bitmap_free(shampo->bitmap);
+
+ return -ENOMEM;
+}
+
+static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo->pages);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
+}
+
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
@@ -741,43 +791,93 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
int node)
{
void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ u8 log_hd_per_page, log_hd_entry_size;
+ u16 hd_per_wq, hd_per_wqe;
+ u32 hd_pool_size;
int wq_size;
int err;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- err = mlx5e_rq_shampo_hd_alloc(rq, node);
- if (err)
- goto out;
- rq->mpwqe.shampo->hd_per_wq =
- mlx5e_shampo_hd_per_wq(mdev, params, rqp);
- err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+
+ rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+ GFP_KERNEL, node);
+ if (!rq->mpwqe.shampo)
+ return -ENOMEM;
+
+ /* split headers data structures */
+ hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node);
if (err)
- goto err_shampo_hd;
- err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+ goto err_shampo_hd_info_alloc;
+
+ err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
+ &rq->mpwqe.shampo->mkey_be);
if (err)
- goto err_shampo_info;
+ goto err_umr_mkey;
+
+ hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+
+ BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
+ if (hd_per_wqe >= MLX5E_SHAMPO_WQ_HEADER_PER_PAGE) {
+ log_hd_per_page = MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
+ log_hd_entry_size = MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ } else {
+ log_hd_per_page = order_base_2(hd_per_wqe);
+ log_hd_entry_size = order_base_2(PAGE_SIZE / hd_per_wqe);
+ }
+
+ rq->mpwqe.shampo->hd_per_wqe = hd_per_wqe;
+ rq->mpwqe.shampo->hd_per_page = BIT(log_hd_per_page);
+ rq->mpwqe.shampo->log_hd_per_page = log_hd_per_page;
+ rq->mpwqe.shampo->log_hd_entry_size = log_hd_entry_size;
+
+ hd_pool_size = (hd_per_wqe * wq_size) >> log_hd_per_page;
+
+ if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
+ /* Separate page pool for shampo headers */
+ struct page_pool_params pp_params = { };
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = hd_pool_size;
+ pp_params.nid = node;
+ pp_params.dev = rq->pdev;
+ pp_params.napi = rq->cq.napi;
+ pp_params.netdev = rq->netdev;
+ pp_params.dma_dir = rq->buff.map_dir;
+ pp_params.max_len = PAGE_SIZE;
+
+ rq->hd_page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->hd_page_pool)) {
+ err = PTR_ERR(rq->hd_page_pool);
+ rq->hd_page_pool = NULL;
+ goto err_hds_page_pool;
+ }
+ } else {
+ /* Common page pool, reserve space for headers. */
+ *pool_size += hd_pool_size;
+ rq->hd_page_pool = NULL;
+ }
+
+ /* gro only data structures */
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
if (!rq->hw_gro_data) {
err = -ENOMEM;
goto err_hw_gro_data;
}
- rq->mpwqe.shampo->key =
- cpu_to_be32(rq->mpwqe.shampo->mkey);
- rq->mpwqe.shampo->hd_per_wqe =
- mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
- wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
- *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
- MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
return 0;
err_hw_gro_data:
+ page_pool_destroy(rq->hd_page_pool);
+err_hds_page_pool:
+ mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.shampo->mkey_be));
+err_umr_mkey:
mlx5e_rq_shampo_hd_info_free(rq);
-err_shampo_info:
- mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
-err_shampo_hd:
- mlx5e_rq_shampo_hd_free(rq);
-out:
+err_shampo_hd_info_alloc:
+ kvfree(rq->mpwqe.shampo);
return err;
}
@@ -787,9 +887,12 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
return;
kvfree(rq->hw_gro_data);
+ if (rq->hd_page_pool != rq->page_pool)
+ page_pool_destroy(rq->hd_page_pool);
mlx5e_rq_shampo_hd_info_free(rq);
- mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
- mlx5e_rq_shampo_hd_free(rq);
+ mlx5_core_destroy_mkey(rq->mdev,
+ be32_to_cpu(rq->mpwqe.shampo->mkey_be));
+ kvfree(rq->mpwqe.shampo);
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
@@ -807,6 +910,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rqp->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
+ INIT_WORK(&rq->rx_timeout_work, mlx5e_rq_timeout_work);
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
@@ -892,6 +996,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err)
+ goto err_free_by_rq_type;
xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
@@ -906,6 +1012,11 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pp_params.netdev = rq->netdev;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
+ pp_params.queue_idx = rq->ix;
+
+ /* Shampo header data split allow for unreadable netmem */
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
/* page_pool can be used even when there is no rq->xdp_prog,
* given page_pool does not handle DMA mapping there is no
@@ -918,12 +1029,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->page_pool = NULL;
goto err_free_by_rq_type;
}
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ if (!rq->hd_page_pool)
+ rq->hd_page_pool = rq->page_pool;
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
+ if (err)
+ goto err_destroy_page_pool;
+ }
}
- if (err)
- goto err_destroy_page_pool;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -960,17 +1074,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
}
}
- INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
-
- switch (params->rx_cq_moderation.cq_period_mode) {
- case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
- rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
- break;
- case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
- default:
- rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
- }
-
return 0;
err_destroy_page_pool:
@@ -1000,29 +1103,31 @@ err_rq_xdp_prog:
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
- struct bpf_prog *old_prog;
-
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
- old_prog = rcu_dereference_protected(rq->xdp_prog,
- lockdep_is_held(&rq->priv->state_lock));
- if (old_prog)
- bpf_prog_put(old_prog);
- }
+ kvfree(rq->dim);
+ page_pool_destroy(rq->page_pool);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ mlx5e_rq_free_shampo(rq);
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
- mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_wqe_alloc_info(rq);
}
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
+ struct bpf_prog *old_prog;
+
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&rq->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
}
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
@@ -1060,7 +1165,8 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_cou
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
MLX5_SET(wq, wq, log_headers_buffer_entry_num,
order_base_2(rq->mpwqe.shampo->hd_per_wq));
- MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
+ MLX5_SET(wq, wq, headers_mkey,
+ be32_to_cpu(rq->mpwqe.shampo->mkey_be));
}
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
@@ -1108,7 +1214,7 @@ static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
struct mlx5_cqe64 *cqe;
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) {
- while ((cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)))
+ while ((cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)))
mlx5_cqwq_pop(cqwq);
} else {
while ((cqe = mlx5_cqwq_get_cqe(cqwq)))
@@ -1190,7 +1296,8 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
- mlx5e_reporter_rx_timeout(rq);
+ queue_work(rq->priv->wq, &rq->rx_timeout_work);
+
return -ETIMEDOUT;
}
@@ -1216,18 +1323,17 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
+ rq->mpwqe.actual_wq_head = wq->head;
+ rq->mpwqe.umr_in_progress = 0;
+ rq->mpwqe.umr_completed = 0;
+
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 len;
- len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
- (rq->mpwqe.shampo->hd_per_wq - 1);
- mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
- rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
+ len = (shampo->pi - shampo->ci) & shampo->hd_per_wq;
+ mlx5e_shampo_fill_umr(rq, len);
}
-
- rq->mpwqe.actual_wq_head = wq->head;
- rq->mpwqe.umr_in_progress = 0;
- rq->mpwqe.umr_completed = 0;
}
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
@@ -1252,8 +1358,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
- 0, true);
+ mlx5e_shampo_dealloc_hd(rq);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u16 missing = mlx5_wq_cyc_missing(wq);
@@ -1300,8 +1405,21 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
- if (params->rx_dim_enabled)
- __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+ if (rq->channel && !params->rx_dim_enabled) {
+ rq->channel->rx_cq_moder = params->rx_cq_moderation;
+ } else if (rq->channel) {
+ u8 cq_period_mode;
+
+ cq_period_mode = params->rx_moder_use_cqe_mode ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode,
+ params->rx_dim_enabled);
+
+ err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled);
+ if (err)
+ goto err_destroy_rq;
+ }
/* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render
@@ -1347,8 +1465,10 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->dim.work);
+ if (rq->dim)
+ cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work);
+ cancel_work_sync(&rq->rx_timeout_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1429,7 +1549,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
sq->xsk_pool = xsk_pool;
@@ -1514,7 +1634,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
int err;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
@@ -1591,7 +1711,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
@@ -1599,13 +1719,11 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
- if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw)
@@ -1623,9 +1741,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
- sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
-
return 0;
err_sq_wq_destroy:
@@ -1636,6 +1751,7 @@ err_sq_wq_destroy:
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
+ kvfree(sq->dim);
mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
@@ -1679,7 +1795,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, csp->uar_page);
MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
@@ -1783,6 +1899,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
@@ -1791,11 +1908,27 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (tx_rate)
mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
- if (params->tx_dim_enabled)
- sq->state |= BIT(MLX5E_SQ_STATE_DIM);
+ if (sq->channel && !params->tx_dim_enabled) {
+ sq->channel->tx_cq_moder = params->tx_cq_moderation;
+ } else if (sq->channel) {
+ u8 cq_period_mode;
+
+ cq_period_mode = params->tx_moder_use_cqe_mode ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder,
+ cq_period_mode,
+ params->tx_dim_enabled);
+
+ err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled);
+ if (err)
+ goto err_destroy_sq;
+ }
return 0;
+err_destroy_sq:
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
err_free_txqsq:
mlx5e_free_txqsq(sq);
@@ -1847,7 +1980,8 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
- cancel_work_sync(&sq->dim.work);
+ if (sq->dim)
+ cancel_work_sync(&sq->dim->work);
cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
@@ -1863,7 +1997,63 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
+ /* Recovering queues means re-enabling NAPI, which requires the netdev
+ * instance lock. However, SQ closing flows have to wait for work tasks
+ * to finish while also holding the netdev instance lock. So either get
+ * the lock or find that the SQ is no longer enabled and thus this work
+ * is not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
+
mlx5e_reporter_tx_err_cqe(sq);
+ netdev_unlock(sq->netdev);
+}
+
+static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
+{
+ return (struct dim_cq_moder) {
+ .cq_period_mode = cq_period_mode,
+ .pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS,
+ .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE :
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC,
+ };
+}
+
+bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled)
+{
+ bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
+
+ if (dim_enabled)
+ *cq_moder = net_dim_get_def_tx_moderation(cq_period_mode);
+ else
+ *cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode);
+
+ return reset_needed;
+}
+
+bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state)
+{
+ bool reset = false;
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ if (keep_dim_state)
+ dim_enabled = !!chs->c[i]->sq[tc].dim;
+
+ reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder,
+ cq_period_mode, dim_enabled);
+ }
+ }
+
+ return reset;
}
static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
@@ -1880,6 +2070,7 @@ static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -1940,43 +2131,15 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ csp.uar_page = c->bfreg->index;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- if (param->is_xdp_mb)
- set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
- unsigned int inline_hdr_sz = 0;
- int i;
-
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
-
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
-
- sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = 1,
- .num_pkts = 1,
- };
-
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- }
- }
-
return 0;
err_free_xdpsq:
@@ -1998,9 +2161,48 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
+static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_create_cq_param *ccp)
+{
+ struct mlx5e_xdpsq *xdpsq;
+ int err;
+
+ xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!xdpsq)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation,
+ &cparam->xdp_sq.cqp, ccp, &xdpsq->cq);
+ if (err)
+ goto err_free_xdpsq;
+
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, xdpsq, true);
+ if (err)
+ goto err_close_xdpsq_cq;
+
+ return xdpsq;
+
+err_close_xdpsq_cq:
+ mlx5e_close_cq(&xdpsq->cq);
+err_free_xdpsq:
+ kvfree(xdpsq);
+
+ return ERR_PTR(err);
+}
+
+static void mlx5e_close_xdpredirect_sq(struct mlx5e_xdpsq *xdpsq)
+{
+ mlx5e_close_xdpsq(xdpsq);
+ mlx5e_close_cq(&xdpsq->cq);
+ kvfree(xdpsq);
+}
+
static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
struct net_device *netdev,
struct workqueue_struct *workqueue,
+ struct mlx5_uars_page *uar,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -2017,7 +2219,6 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
*mcq->set_ci_db = 0;
- *mcq->arm_db = 0;
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
@@ -2032,6 +2233,7 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
cq->mdev = mdev;
cq->netdev = netdev;
cq->workqueue = workqueue;
+ cq->uar = uar;
return 0;
}
@@ -2047,7 +2249,8 @@ static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
- err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq);
+ err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq,
+ ccp->uar, param, cq);
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
@@ -2089,9 +2292,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
- MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
+ MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
+
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, cq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -2127,8 +2331,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
if (err)
goto err_free_cq;
- if (MLX5_CAP_GEN(mdev, cq_moderation))
- mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
+ if (MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify))
+ mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts,
+ mlx5e_cq_period_mode(moder.cq_period_mode));
return 0;
err_free_cq:
@@ -2143,6 +2349,40 @@ void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq);
}
+int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u8 cq_period_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode));
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD_MODE);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
+}
+
+int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u16 cq_period, u16 cq_max_count, u8 cq_period_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period, cq_period);
+ MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
+ MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode);
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
+}
+
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_create_cq_param *ccp,
@@ -2351,6 +2591,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
+ const struct net_device_ops *netdev_ops = c->netdev->netdev_ops;
struct dim_cq_moder icocq_moder = {0, 0};
struct mlx5e_create_cq_param ccp;
int err;
@@ -2371,15 +2612,18 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
- &c->xdpsq.cq);
- if (err)
- goto err_close_tx_cqs;
+ if (netdev_ops->ndo_xdp_xmit && c->xdp) {
+ c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp);
+ if (IS_ERR(c->xdpsq)) {
+ err = PTR_ERR(c->xdpsq);
+ goto err_close_tx_cqs;
+ }
+ }
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
- goto err_close_xdp_tx_cqs;
+ goto err_close_xdpredirect_sq;
err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
@@ -2391,7 +2635,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
mlx5e_async_icosq_err_cqe_work);
if (err)
- goto err_close_xdpsq_cq;
+ goto err_close_rq_xdpsq_cq;
mutex_init(&c->icosq_recovery_lock);
@@ -2415,16 +2659,8 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
goto err_close_rq;
}
- err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
- if (err)
- goto err_close_xdp_sq;
-
return 0;
-err_close_xdp_sq:
- if (c->xdp)
- mlx5e_close_xdpsq(&c->rq_xdpsq);
-
err_close_rq:
mlx5e_close_rq(&c->rq);
@@ -2437,15 +2673,16 @@ err_close_icosq:
err_close_async_icosq:
mlx5e_close_icosq(&c->async_icosq);
-err_close_xdpsq_cq:
+err_close_rq_xdpsq_cq:
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
-err_close_xdp_tx_cqs:
- mlx5e_close_cq(&c->xdpsq.cq);
+err_close_xdpredirect_sq:
+ if (c->xdpsq)
+ mlx5e_close_xdpredirect_sq(c->xdpsq);
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
@@ -2461,7 +2698,6 @@ err_close_async_icosq_cq:
static void mlx5e_close_queues(struct mlx5e_channel *c)
{
- mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
/* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
@@ -2474,7 +2710,8 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
- mlx5e_close_cq(&c->xdpsq.cq);
+ if (c->xdpsq)
+ mlx5e_close_xdpredirect_sq(c->xdpsq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
mlx5e_close_cq(&c->async_icosq.cq);
@@ -2525,13 +2762,27 @@ void mlx5e_trigger_napi_sched(struct napi_struct *napi)
local_bh_enable();
}
+static void mlx5e_channel_pick_doorbell(struct mlx5e_channel *c)
+{
+ struct mlx5e_hw_objs *hw_objs = &c->mdev->mlx5e_res.hw_objs;
+
+ /* No dedicated Ethernet doorbells, use the global one. */
+ if (hw_objs->num_bfregs == 0) {
+ c->bfreg = &c->mdev->priv.bfreg;
+ return;
+ }
+
+ /* Round-robin between doorbells. */
+ c->bfreg = hw_objs->bfregs + c->vec_ix % hw_objs->num_bfregs;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
struct net_device *netdev = priv->netdev;
+ struct mlx5e_channel_param *cparam;
struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
@@ -2553,12 +2804,18 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
- if (!c)
- return -ENOMEM;
+ cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
+ if (!c || !cparam) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ err = mlx5e_build_channel_param(mdev, params, cparam);
+ if (err)
+ goto err_free;
c->priv = priv;
c->mdev = mdev;
- c->tstamp = &priv->tstamp;
c->ix = ix;
c->vec_ix = vec_ix;
c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix);
@@ -2572,8 +2829,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
- netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
- netif_napi_set_irq(&c->napi, irq);
+ mlx5e_channel_pick_doorbell(c);
+
+ netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
+ netif_napi_set_irq_locked(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2588,14 +2847,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
*cp = c;
+ kvfree(cparam);
return 0;
err_close_queues:
mlx5e_close_queues(c);
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
+err_free:
+ kvfree(cparam);
kvfree(c);
return err;
@@ -2605,7 +2867,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
@@ -2637,7 +2899,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_txqsq(&c->sq[tc]);
mlx5e_qos_deactivate_queues(c);
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2646,7 +2908,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
mlx5e_qos_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -2654,20 +2916,14 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
- struct mlx5e_channel_param *cparam;
int err = -ENOMEM;
int i;
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
- cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
- if (!chs->c || !cparam)
- goto err_free;
-
- err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
- if (err)
- goto err_free;
+ if (!chs->c)
+ goto err_out;
for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL;
@@ -2675,7 +2931,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (chs->params.xdp_prog)
xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -2693,7 +2949,6 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
}
mlx5e_health_channels_update(priv);
- kvfree(cparam);
return 0;
err_close_ptp:
@@ -2704,9 +2959,8 @@ err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
-err_free:
kfree(chs->c);
- kvfree(cparam);
+err_out:
chs->num = 0;
return err;
}
@@ -2901,7 +3155,31 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
return err;
}
-static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
+static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
+ struct mlx5e_params *params)
+{
+ int ix;
+
+ for (ix = 0; ix < params->num_channels; ix++) {
+ int num_comp_vectors, irq, vec_ix;
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
+ cpumask_clear(priv->scratchpad.cpumask);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
+
+ for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
+ int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
+
+ cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
+ }
+
+ netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
+ }
+}
+
+static int mlx5e_update_tc_and_tx_queues(struct mlx5e_priv *priv)
{
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
@@ -2925,22 +3203,10 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
- err = netif_set_real_num_rx_queues(netdev, nch);
- if (err) {
- netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
- goto err_txqs;
- }
+ mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
return 0;
-err_txqs:
- /* netif_set_real_num_rx_queues could fail only when nch increased. Only
- * one of nch and ntc is changed in this function. That means, the call
- * to netif_set_real_num_tx_queues below should not fail, because it
- * decreases the number of TX queues.
- */
- WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
-
err_tcs:
WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
old_tc_to_txq));
@@ -2948,42 +3214,32 @@ err_out:
return err;
}
-static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
-
-static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
- struct mlx5e_params *params)
-{
- int ix;
-
- for (ix = 0; ix < params->num_channels; ix++) {
- int num_comp_vectors, irq, vec_ix;
- struct mlx5_core_dev *mdev;
-
- mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
- num_comp_vectors = mlx5_comp_vectors_max(mdev);
- cpumask_clear(priv->scratchpad.cpumask);
- vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
-
- for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
- int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
-
- cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
- }
-
- netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
- }
-}
+MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_tc_and_tx_queues);
static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
+ struct net_device *netdev = priv->netdev;
+ int old_num_rxqs;
int err;
- err = mlx5e_update_netdev_queues(priv);
- if (err)
+ old_num_rxqs = netdev->real_num_rx_queues;
+ err = netif_set_real_num_rx_queues(netdev, count);
+ if (err) {
+ netdev_warn(netdev, "%s: netif_set_real_num_rx_queues failed, %d\n",
+ __func__, err);
return err;
-
- mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
+ }
+ err = mlx5e_update_tc_and_tx_queues(priv);
+ if (err) {
+ /* mlx5e_update_tc_and_tx_queues can fail if channels or TCs number increases.
+ * Since channel number changed, it increased. That means, the call to
+ * netif_set_real_num_rx_queues below should not fail, because it
+ * decreases the number of RX queues.
+ */
+ WARN_ON_ONCE(netif_set_real_num_rx_queues(netdev, old_num_rxqs));
+ return err;
+ }
/* This function may be called on attach, before priv->rx_res is created. */
if (priv->rx_res) {
@@ -3011,6 +3267,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
+ priv->txq2sq_stats[sq->txq_ix] = sq->stats;
}
}
@@ -3025,6 +3282,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
+ priv->txq2sq_stats[sq->txq_ix] = sq->stats;
}
out:
@@ -3107,16 +3365,17 @@ static int mlx5e_switch_priv_params(struct mlx5e_priv *priv,
}
}
+ mlx5e_set_xdp_feature(priv);
return 0;
}
static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *old_chs,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
void *context)
{
struct net_device *netdev = priv->netdev;
- struct mlx5e_channels old_chs;
int carrier_ok;
int err = 0;
@@ -3125,7 +3384,6 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
mlx5e_deactivate_priv_channels(priv);
- old_chs = priv->channels;
priv->channels = *new_chs;
/* New channels are ready to roll, call the preactivate hook if needed
@@ -3134,12 +3392,14 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
if (preactivate) {
err = preactivate(priv, context);
if (err) {
- priv->channels = old_chs;
+ priv->channels = *old_chs;
goto out;
}
}
- mlx5e_close_channels(&old_chs);
+ mlx5e_set_xdp_feature(priv);
+ if (!MLX5_CAP_GEN(priv->mdev, tis_tir_td_order))
+ mlx5e_close_channels(old_chs);
priv->profile->update_rx(priv);
mlx5e_selq_apply(&priv->selq);
@@ -3158,16 +3418,20 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset)
{
- struct mlx5e_channels *new_chs;
+ struct mlx5e_channels *old_chs, *new_chs;
int err;
reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!reset)
return mlx5e_switch_priv_params(priv, params, preactivate, context);
+ old_chs = kzalloc(sizeof(*old_chs), GFP_KERNEL);
new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL);
- if (!new_chs)
- return -ENOMEM;
+ if (!old_chs || !new_chs) {
+ err = -ENOMEM;
+ goto err_free_chs;
+ }
+
new_chs->params = *params;
mlx5e_selq_prepare_params(&priv->selq, &new_chs->params);
@@ -3176,11 +3440,18 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
if (err)
goto err_cancel_selq;
- err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
+ *old_chs = priv->channels;
+
+ err = mlx5e_switch_priv_channels(priv, old_chs, new_chs,
+ preactivate, context);
if (err)
goto err_close;
+ if (MLX5_CAP_GEN(priv->mdev, tis_tir_td_order))
+ mlx5e_close_channels(old_chs);
+
kfree(new_chs);
+ kfree(old_chs);
return 0;
err_close:
@@ -3188,7 +3459,9 @@ err_close:
err_cancel_selq:
mlx5e_selq_cancel(&priv->selq);
+err_free_chs:
kfree(new_chs);
+ kfree(old_chs);
return err;
}
@@ -3199,8 +3472,8 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
- priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
- priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
+ priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
@@ -3344,7 +3617,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq);
+ return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq,
+ mdev->priv.bfreg.up, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
@@ -3516,7 +3790,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_num_channels_changed_ctx, NULL, true);
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (!err && priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
@@ -3617,10 +3891,8 @@ static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
- mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
struct mlx5e_mqprio_rl *rl;
- bool nch_changed;
int err;
err = mlx5e_mqprio_channel_validate(priv, mqprio);
@@ -3634,10 +3906,8 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
new_params = priv->channels.params;
mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
- nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
- preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
- mlx5e_update_netdev_queues_ctx;
- err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (err) {
if (rl) {
mlx5e_mqprio_rl_cleanup(rl);
@@ -3661,8 +3931,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
- return -EINVAL;
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "MQPRIO cannot be configured when HTB offload is enabled.");
+ return -EOPNOTSUPP;
+ }
switch (mqprio->mode) {
case TC_MQPRIO_MODE_DCB:
@@ -3753,6 +4026,11 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
s->rx_bytes += rq_stats->bytes;
s->multicast += rq_stats->mcast_packets;
}
+
+#ifdef CONFIG_MLX5_EN_PSP
+ if (priv->psp)
+ s->tx_dropped += atomic_read(&priv->psp->tx_drop);
+#endif
}
void
@@ -3790,7 +4068,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats);
}
- stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards);
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -3897,10 +4176,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
if (enable) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
- new_params.packet_merge.shampo.match_criteria_type =
- MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
- new_params.packet_merge.shampo.alignment_granularity =
- MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
} else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
} else {
@@ -3960,6 +4235,47 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable);
}
+static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+{
+ return (struct dim_cq_moder) {
+ .cq_period_mode = cq_period_mode,
+ .pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS,
+ .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC,
+ };
+}
+
+bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled)
+{
+ bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
+
+ if (dim_enabled)
+ *cq_moder = net_dim_get_def_rx_moderation(cq_period_mode);
+ else
+ *cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode);
+
+ return reset_needed;
+}
+
+bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state)
+{
+ bool reset = false;
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ if (keep_dim_state)
+ dim_enabled = !!chs->c[i]->rq.dim;
+
+ reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder,
+ cq_period_mode, dim_enabled);
+ }
+
+ return reset;
+}
+
static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
@@ -4095,23 +4411,23 @@ static int mlx5e_handle_feature(struct net_device *netdev,
return 0;
}
-void mlx5e_set_xdp_feature(struct net_device *netdev)
+void mlx5e_set_xdp_feature(struct mlx5e_priv *priv)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_params *params = &priv->channels.params;
- xdp_features_t val;
+ struct net_device *netdev = priv->netdev;
+ xdp_features_t val = 0;
- if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
- xdp_clear_features_flag(netdev);
- return;
- }
+ if (netdev->netdev_ops->ndo_bpf &&
+ params->packet_merge.type == MLX5E_PACKET_MERGE_NONE)
+ val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+
+ if (netdev->netdev_ops->ndo_xdp_xmit && params->xdp_prog)
+ val |= NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
- val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY |
- NETDEV_XDP_ACT_RX_SG |
- NETDEV_XDP_ACT_NDO_XMIT |
- NETDEV_XDP_ACT_NDO_XMIT_SG;
- xdp_set_features_flag(netdev, val);
+ xdp_set_features_flag_locked(netdev, val);
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
@@ -4122,13 +4438,19 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
#define MLX5E_HANDLE_FEATURE(feature, handler) \
mlx5e_handle_feature(netdev, &oper_features, feature, handler)
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ if (features & (NETIF_F_GRO_HW | NETIF_F_LRO)) {
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ } else {
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ }
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
@@ -4140,9 +4462,6 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
return -EINVAL;
}
- /* update XDP supported features */
- mlx5e_set_xdp_feature(netdev);
-
return 0;
}
@@ -4169,12 +4488,17 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_MACSEC;
+ if (netdev->features & NETIF_F_HW_MACSEC)
+ netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
+
return features;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
+ struct netdev_config *cfg = netdev->cfg_pending;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
@@ -4241,11 +4565,18 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
+ /* The header-data split ring param requires HW GRO to stay enabled. */
+ if (cfg && cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ !(features & NETIF_F_GRO_HW)) {
+ netdev_warn(netdev, "Keeping HW-GRO enabled, TCP header-data split depends on it\n");
+ features |= NETIF_F_GRO_HW;
+ }
+
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
- features |= NETIF_F_NETNS_LOCAL;
+ netdev->netns_immutable = true;
} else {
- features &= ~NETIF_F_NETNS_LOCAL;
+ netdev->netns_immutable = false;
}
mutex_unlock(&priv->state_lock);
@@ -4383,8 +4714,12 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
out:
- netdev->mtu = params->sw_mtu;
+ WRITE_ONCE(netdev->mtu, params->sw_mtu);
mutex_unlock(&priv->state_lock);
+
+ if (!err)
+ netdev_update_features(netdev);
+
return err;
}
@@ -4434,22 +4769,23 @@ static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
&new_params.ptp_rx, true);
}
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+int mlx5e_hwtstamp_set(struct mlx5e_priv *priv,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
bool rx_cqe_compress_def;
bool ptp_rx;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
- (mlx5_clock_get_ptp_index(priv->mdev) == -1))
+ (mlx5_clock_get_ptp_index(priv->mdev) == -1)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timestamps are not supported on this device");
return -EOPNOTSUPP;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ }
/* TX HW timestamp */
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -4461,7 +4797,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
/* RX HW timestamp */
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp_rx = false;
break;
@@ -4480,7 +4816,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
/* ptp_rx is set if both HW TS is set and CQE
* compression is set
*/
@@ -4493,47 +4829,50 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
err = mlx5e_hwstamp_config_no_ptp_rx(priv,
- config.rx_filter != HWTSTAMP_FILTER_NONE);
+ config->rx_filter != HWTSTAMP_FILTER_NONE);
else
err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
if (err)
goto err_unlock;
- memcpy(&priv->tstamp, &config, sizeof(config));
+ priv->hwtstamp_config = *config;
mutex_unlock(&priv->state_lock);
/* might need to fix some features */
netdev_update_features(priv->netdev);
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
err_unlock:
mutex_unlock(&priv->state_lock);
return err;
}
-int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwtstamp_set_ndo(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config *cfg = &priv->tstamp;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_hwtstamp_set(priv, config, extack);
+}
+int mlx5e_hwtstamp_get(struct mlx5e_priv *priv,
+ struct kernel_hwtstamp_config *config)
+{
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
+ *config = priv->hwtstamp_config;
+
+ return 0;
}
-static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int mlx5e_hwtstamp_get_ndo(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlx5e_hwstamp_set(priv, ifr);
- case SIOCGHWTSTAMP:
- return mlx5e_hwstamp_get(priv, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ return mlx5e_hwtstamp_get(priv, config);
}
#ifdef CONFIG_MLX5_ESWITCH
@@ -4738,7 +5077,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
- return features;
+ return vxlan_features_check(skb, features);
#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
@@ -4753,7 +5092,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
}
out:
- /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
+ /* Disable CSUM and GSO if skb cannot be offloaded by HW */
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
@@ -4764,7 +5103,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
- features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
@@ -4781,21 +5119,19 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
- * through this flow. However, channel closing flows have to wait for
- * this work to finish while holding rtnl lock too. So either get the
- * lock or find that channels are being closed for other reason and
- * this work is not relevant anymore.
+ /* Recovering the TX queues implies re-enabling NAPI, which requires
+ * the netdev instance lock.
+ * However, channel closing flows have to wait for this work to finish
+ * while holding the same lock. So either get the lock or find that
+ * channels are being closed for other reason and this work is not
+ * relevant anymore.
*/
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(netdev)) {
if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
return;
msleep(20);
}
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
-
for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue =
netdev_get_tx_queue(netdev, i);
@@ -4809,8 +5145,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
break;
}
-unlock:
- rtnl_unlock();
+ netdev_unlock(netdev);
}
static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -4925,11 +5260,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
- int err;
- err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
- if (err)
- return err;
+ if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
+ return -EOPNOTSUPP;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
@@ -4950,10 +5283,7 @@ static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
mode = nla_get_u16(attr);
if (mode > BRIDGE_MODE_VEPA)
return -EINVAL;
@@ -4983,13 +5313,14 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_features = mlx5e_set_features,
.ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_nic_mtu,
- .ndo_eth_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
.ndo_xdp_xmit = mlx5e_xdp_xmit,
.ndo_xsk_wakeup = mlx5e_xsk_wakeup,
+ .ndo_hwtstamp_get = mlx5e_hwtstamp_get_ndo,
+ .ndo_hwtstamp_set = mlx5e_hwtstamp_set_ndo,
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
@@ -5011,23 +5342,10 @@ const struct net_device_ops mlx5e_netdev_ops = {
#endif
};
-static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
-{
- int i;
-
- /* The supported periods are organized in ascending order */
- for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
- if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
- break;
-
- return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
-}
-
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev;
- u8 rx_cq_period_mode;
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
@@ -5061,13 +5379,16 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
- rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
- mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify);
+ params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify);
+ params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
+ params->tx_moder_use_cqe_mode = false;
+ mlx5e_reset_rx_moderation(&params->rx_cq_moderation, params->rx_moder_use_cqe_mode,
+ params->rx_dim_enabled);
+ mlx5e_reset_tx_moderation(&params->tx_cq_moderation, params->tx_moder_use_cqe_mode,
+ params->tx_dim_enabled);
/* TX inline */
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
@@ -5120,8 +5441,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
priv->nic_info.set_port = mlx5e_vxlan_set_port;
priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
- priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
+ priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
/* Don't count the space hard-coded to the IANA port */
priv->nic_info.tables[0].n_entries =
@@ -5141,6 +5461,257 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
}
+static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channel_stats *channel_stats;
+ struct mlx5e_rq_stats *xskrq_stats;
+ struct mlx5e_rq_stats *rq_stats;
+
+ if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
+ return;
+
+ channel_stats = priv->channel_stats[i];
+ xskrq_stats = &channel_stats->xskrq;
+ rq_stats = &channel_stats->rq;
+
+ stats->packets = rq_stats->packets + xskrq_stats->packets;
+ stats->bytes = rq_stats->bytes + xskrq_stats->bytes;
+ stats->alloc_fail = rq_stats->buff_alloc_err +
+ xskrq_stats->buff_alloc_err;
+}
+
+static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sq_stats *sq_stats;
+
+ if (!priv->stats_nch)
+ return;
+
+ /* no special case needed for ptp htb etc since txq2sq_stats is kept up
+ * to date for active sq_stats, otherwise get_base_stats takes care of
+ * inactive sqs.
+ */
+ sq_stats = priv->txq2sq_stats[i];
+ stats->packets = sq_stats->packets;
+ stats->bytes = sq_stats->bytes;
+}
+
+static void mlx5e_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_ptp *ptp_channel;
+ int i, tc;
+
+ if (!mlx5e_is_uplink_rep(priv)) {
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ for (i = priv->channels.params.num_channels; i < priv->stats_nch; i++) {
+ struct netdev_queue_stats_rx rx_i = {0};
+
+ mlx5e_get_queue_stats_rx(dev, i, &rx_i);
+
+ rx->packets += rx_i.packets;
+ rx->bytes += rx_i.bytes;
+ rx->alloc_fail += rx_i.alloc_fail;
+ }
+
+ /* always report PTP RX stats from base as there is no
+ * corresponding channel to report them under in
+ * mlx5e_get_queue_stats_rx.
+ */
+ if (priv->rx_ptp_opened) {
+ struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
+
+ rx->packets += rq_stats->packets;
+ rx->bytes += rq_stats->bytes;
+ }
+ }
+
+ tx->packets = 0;
+ tx->bytes = 0;
+
+ for (i = 0; i < priv->stats_nch; i++) {
+ struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
+
+ /* handle two cases:
+ *
+ * 1. channels which are active. In this case,
+ * report only deactivated TCs on these channels.
+ *
+ * 2. channels which were deactivated
+ * (i > priv->channels.params.num_channels)
+ * must have all of their TCs [0 .. priv->max_opened_tc)
+ * examined because deactivated channels will not be in the
+ * range of [0..real_num_tx_queues) and will not have their
+ * stats reported by mlx5e_get_queue_stats_tx.
+ */
+ if (i < priv->channels.params.num_channels)
+ tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ else
+ tc = 0;
+
+ for (; tc < priv->max_opened_tc; tc++) {
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[tc];
+
+ tx->packets += sq_stats->packets;
+ tx->bytes += sq_stats->bytes;
+ }
+ }
+
+ /* if PTP TX was opened at some point and has since either:
+ * - been shutdown and set to NULL, or
+ * - simply disabled (bit unset)
+ *
+ * report stats directly from the ptp_stats structures as these queues
+ * are now unavailable and there is no txq index to retrieve these
+ * stats via calls to mlx5e_get_queue_stats_tx.
+ */
+ ptp_channel = priv->channels.ptp;
+ if (priv->tx_ptp_opened && (!ptp_channel || !test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state))) {
+ for (tc = 0; tc < priv->max_opened_tc; tc++) {
+ struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[tc];
+
+ tx->packets += sq_stats->packets;
+ tx->bytes += sq_stats->bytes;
+ }
+ }
+}
+
+static const struct netdev_stat_ops mlx5e_stat_ops = {
+ .get_queue_stats_rx = mlx5e_get_queue_stats_rx,
+ .get_queue_stats_tx = mlx5e_get_queue_stats_tx,
+ .get_base_stats = mlx5e_get_base_stats,
+};
+
+struct mlx5_qmgmt_data {
+ struct mlx5e_channel *c;
+ struct mlx5e_channel_param cparam;
+};
+
+static int mlx5e_queue_mem_alloc(struct net_device *dev, void *newq,
+ int queue_index)
+{
+ struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *chs = &priv->channels;
+ struct mlx5e_params params = chs->params;
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (queue_index >= chs->num) {
+ err = -ERANGE;
+ goto unlock;
+ }
+
+ if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) ||
+ chs->params.ptp_rx ||
+ chs->params.xdp_prog ||
+ priv->htb) {
+ netdev_err(priv->netdev,
+ "Cloning channels with Port/rx PTP, XDP or HTB is not supported\n");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, queue_index);
+ err = mlx5e_build_channel_param(mdev, &params, &new->cparam);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_open_channel(priv, queue_index, &params, NULL, &new->c);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static void mlx5e_queue_mem_free(struct net_device *dev, void *mem)
+{
+ struct mlx5_qmgmt_data *data = (struct mlx5_qmgmt_data *)mem;
+
+ /* not supposed to happen since mlx5e_queue_start never fails
+ * but this is how this should be implemented just in case
+ */
+ if (data->c)
+ mlx5e_close_channel(data->c);
+}
+
+static int mlx5e_queue_stop(struct net_device *dev, void *oldq, int queue_index)
+{
+ /* In mlx5 a txq cannot be simply stopped in isolation, only restarted.
+ * mlx5e_queue_start does not fail, we stop the old queue there.
+ * TODO: Improve this.
+ */
+ return 0;
+}
+
+static int mlx5e_queue_start(struct net_device *dev, void *newq,
+ int queue_index)
+{
+ struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channel *old;
+
+ mutex_lock(&priv->state_lock);
+
+ /* stop and close the old */
+ old = priv->channels.c[queue_index];
+ mlx5e_deactivate_priv_channels(priv);
+ /* close old before activating new, to avoid napi conflict */
+ mlx5e_close_channel(old);
+
+ /* start the new */
+ priv->channels.c[queue_index] = new->c;
+ mlx5e_activate_priv_channels(priv);
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+static struct device *mlx5e_queue_get_dma_dev(struct net_device *dev,
+ int queue_index)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *channels;
+ struct device *pdev = NULL;
+ struct mlx5e_channel *ch;
+
+ channels = &priv->channels;
+
+ mutex_lock(&priv->state_lock);
+
+ if (queue_index >= channels->num)
+ goto out;
+
+ ch = channels->c[queue_index];
+ pdev = ch->pdev;
+out:
+ mutex_unlock(&priv->state_lock);
+
+ return pdev;
+}
+
+static const struct netdev_queue_mgmt_ops mlx5e_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct mlx5_qmgmt_data),
+ .ndo_queue_mem_alloc = mlx5e_queue_mem_alloc,
+ .ndo_queue_mem_free = mlx5e_queue_mem_free,
+ .ndo_queue_start = mlx5e_queue_start,
+ .ndo_queue_stop = mlx5e_queue_stop,
+ .ndo_queue_get_dma_dev = mlx5e_queue_get_dma_dev,
+};
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -5151,13 +5722,17 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->queue_mgmt_ops = &mlx5e_queue_mgmt_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
mlx5e_dcbnl_build_netdev(netdev);
netdev->watchdog_timeo = 15 * HZ;
+ netdev->stat_ops = &mlx5e_stat_ops;
netdev->ethtool_ops = &mlx5e_ethtool_ops;
netdev->vlan_features |= NETIF_F_SG;
@@ -5190,6 +5765,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO;
+ if (mlx5e_hw_gro_supported(mdev) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ netdev->vlan_features |= NETIF_F_GRO_HW;
+
netdev->hw_features = netdev->vlan_features;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
@@ -5262,8 +5842,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
-#ifdef CONFIG_MLX5_EN_ARFS
+#if IS_ENABLED(CONFIG_MLX5_EN_ARFS)
netdev->hw_features |= NETIF_F_NTUPLE;
+#elif IS_ENABLED(CONFIG_MLX5_EN_RXNFC)
+ netdev->features |= NETIF_F_NTUPLE;
#endif
}
@@ -5272,8 +5854,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->netmem_tx = true;
+
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
- mlx5e_set_xdp_feature(netdev);
+ mlx5e_set_xdp_feature(priv);
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
@@ -5353,6 +5937,10 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
+ err = mlx5e_psp_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "PSP initialization failed, %d\n", err);
+
err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5365,8 +5953,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (take_rtnl)
rtnl_lock();
+ mlx5e_psp_register(priv);
/* update XDP supported features */
- mlx5e_set_xdp_feature(netdev);
+ mlx5e_set_xdp_feature(priv);
if (take_rtnl)
rtnl_unlock();
@@ -5377,7 +5966,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
+ mlx5e_psp_unregister(priv);
mlx5e_ktls_cleanup(priv);
+ mlx5e_psp_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
@@ -5437,7 +6028,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
- mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
@@ -5453,7 +6044,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
- mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = NULL;
@@ -5518,6 +6109,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
+ mlx5e_pcie_cong_event_init(priv);
mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
@@ -5526,9 +6118,11 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_nic_set_rx_mode(priv);
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -5541,23 +6135,27 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_dcbnl_delete_app(priv);
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ if (priv->en_trap) {
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+ }
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_nic_set_rx_mode(priv);
+ mlx5e_pcie_cong_event_cleanup(priv);
mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_ipsec_disable_events(priv);
mlx5e_disable_blocking_events(priv);
- if (priv->en_trap) {
- mlx5e_deactivate_trap(priv);
- mlx5e_close_trap(priv->en_trap);
- priv->en_trap = NULL;
- }
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -5565,9 +6163,9 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_ipsec_cleanup(priv);
}
-int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
+static int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
{
- return mlx5e_refresh_tirs(priv, false, false);
+ return mlx5e_refresh_tirs(priv->mdev, false, false);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -5688,19 +6286,32 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->txq2sq)
goto err_destroy_workqueue;
+ priv->txq2sq_stats = kcalloc_node(num_txqs, sizeof(*priv->txq2sq_stats), GFP_KERNEL, node);
+ if (!priv->txq2sq_stats)
+ goto err_free_txq2sq;
+
priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
if (!priv->tx_rates)
- goto err_free_txq2sq;
+ goto err_free_txq2sq_stats;
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
if (!priv->channel_stats)
goto err_free_tx_rates;
+ priv->fec_ranges = kcalloc(ETHTOOL_FEC_HIST_MAX,
+ sizeof(*priv->fec_ranges), GFP_KERNEL);
+ if (!priv->fec_ranges)
+ goto err_free_channel_stats;
+
return 0;
+err_free_channel_stats:
+ kfree(priv->channel_stats);
err_free_tx_rates:
kfree(priv->tx_rates);
+err_free_txq2sq_stats:
+ kfree(priv->txq2sq_stats);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
@@ -5720,10 +6331,12 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
if (!priv->mdev)
return;
+ kfree(priv->fec_ranges);
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
kfree(priv->tx_rates);
+ kfree(priv->txq2sq_stats);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
mlx5e_selq_cleanup(&priv->selq);
@@ -5733,6 +6346,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb_qos_sq_stats[i]);
kvfree(priv->htb_qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
@@ -5800,7 +6418,9 @@ static void mlx5e_update_features(struct net_device *netdev)
return; /* features will be updated on netdev registration */
rtnl_lock();
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
}
@@ -5811,7 +6431,7 @@ static void mlx5e_reset_channels(struct net_device *netdev)
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
- const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
+ const bool need_lock = priv->netdev->reg_state == NETREG_REGISTERED;
const struct mlx5e_profile *profile = priv->profile;
int max_nch;
int err;
@@ -5853,15 +6473,19 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
*
- * rtnl_lock is required by netif_set_real_num_*_queues in case the
+ * Locking is required by netif_set_real_num_*_queues in case the
* netdev has been registered by this point (if this function was called
* in the reload or resume flow).
*/
- if (take_rtnl)
+ if (need_lock) {
rtnl_lock();
+ netdev_lock(priv->netdev);
+ }
err = mlx5e_num_channels_changed(priv);
- if (take_rtnl)
+ if (need_lock) {
+ netdev_unlock(priv->netdev);
rtnl_unlock();
+ }
if (err)
goto out;
@@ -6058,7 +6682,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
return 0;
}
-static int _mlx5e_suspend(struct auxiliary_device *adev)
+static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
@@ -6067,7 +6691,7 @@ static int _mlx5e_suspend(struct auxiliary_device *adev)
struct mlx5_core_dev *pos;
int i;
- if (!netif_device_present(netdev)) {
+ if (!pre_netdev_reg && !netif_device_present(netdev)) {
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5_sd_for_each_dev(i, mdev, pos)
mlx5e_destroy_mdev_resources(pos);
@@ -6090,7 +6714,7 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
if (actual_adev)
- err = _mlx5e_suspend(actual_adev);
+ err = _mlx5e_suspend(actual_adev, false);
mlx5_sd_cleanup(mdev);
return err;
@@ -6157,7 +6781,7 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
return 0;
err_resume:
- _mlx5e_suspend(adev);
+ _mlx5e_suspend(adev, true);
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
@@ -6196,9 +6820,27 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
- unregister_netdev(priv->netdev);
- _mlx5e_suspend(adev);
- priv->profile->cleanup(priv);
+ /* When unload driver, the netdev is in registered state
+ * if it's from legacy mode. If from switchdev mode, it
+ * is already unregistered before changing to NIC profile.
+ */
+ if (priv->netdev->reg_state == NETREG_REGISTERED) {
+ mlx5e_psp_unregister(priv);
+ unregister_netdev(priv->netdev);
+ _mlx5e_suspend(adev, false);
+ } else {
+ struct mlx5_core_dev *pos;
+ int i;
+
+ if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
+ else
+ _mlx5e_suspend(adev, true);
+ }
+ /* Avoid cleanup if profile rollback failed. */
+ if (priv->profile)
+ priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
mlx5e_destroy_devlink(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 05527418fa64..ee9595109649 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -30,8 +30,10 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
@@ -40,6 +42,7 @@
#include "eswitch.h"
#include "en.h"
+#include "en/dim.h"
#include "en_rep.h"
#include "en/params.h"
#include "en/txrx.h"
@@ -63,6 +66,7 @@
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
+#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -135,9 +139,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- sw_rep_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, sw_rep_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
@@ -145,9 +147,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_rep_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
@@ -176,11 +178,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ ethtool_puts(data, vport_rep_stats_desc[i].format);
for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_rep_loopback_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, vport_rep_loopback_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
@@ -188,12 +188,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
- vport_rep_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_stats_desc, i));
for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
- vport_rep_loopback_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_loopback_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
@@ -275,8 +277,42 @@ out:
kvfree(out);
}
+static int mlx5e_rep_query_aggr_q_counter(struct mlx5_core_dev *dev, int vport, void *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, other_vport, 1);
+ MLX5_SET(query_q_counter_in, in, vport_number, vport);
+ MLX5_SET(query_q_counter_in, in, aggregate, 1);
+
+ return mlx5_cmd_exec_inout(dev, query_q_counter, in, out);
+}
+
+static void mlx5e_rep_update_vport_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, q_counter_other_vport) ||
+ !MLX5_CAP_GEN(priv->mdev, q_counter_aggregation))
+ return;
+
+ err = mlx5e_rep_query_aggr_q_counter(priv->mdev, rep->vport, out);
+ if (err) {
+ netdev_warn(priv->netdev, "failed reading stats on vport %d, error %d\n",
+ rep->vport, err);
+ return;
+ }
+
+ rep_stats->rx_vport_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+}
+
static void mlx5e_rep_get_strings(struct net_device *dev,
- u32 stringset, uint8_t *data)
+ u32 stringset, u8 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -326,7 +362,7 @@ mlx5e_rep_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
static void mlx5e_rep_get_channels(struct net_device *dev,
@@ -352,7 +388,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5e_rep_set_coalesce(struct net_device *netdev,
@@ -394,6 +430,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.set_channels = mlx5e_rep_set_channels,
.get_coalesce = mlx5e_rep_get_coalesce,
.set_coalesce = mlx5e_rep_set_coalesce,
+ .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
};
@@ -564,7 +602,8 @@ mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
if (c->xdp)
sqs[num_sqs++] = c->rq_xdpsq.sqn;
- sqs[num_sqs++] = c->xdpsq.sqn;
+ if (c->xdpsq)
+ sqs[num_sqs++] = c->xdpsq->sqn;
}
}
if (ptp_sq) {
@@ -765,6 +804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -804,10 +844,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params *params;
- u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-
params = &priv->channels.params;
params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
@@ -822,6 +858,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
+ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* If netdev is already registered (e.g. move from nic profile to uplink,
* RTNL lock must be held before triggering netdev notifiers.
@@ -829,13 +867,13 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
if (take_rtnl)
rtnl_lock();
/* update XDP supported features */
- mlx5e_set_xdp_feature(netdev);
+ mlx5e_set_xdp_feature(priv);
if (take_rtnl)
rtnl_unlock();
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+ params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
params->mqprio.num_tc = 1;
if (rep->vport != MLX5_VPORT_UPLINK)
@@ -849,10 +887,14 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
{
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
eth_hw_addr_random(netdev);
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
+ if (mlx5_core_is_ecpf(mdev))
+ netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
@@ -866,7 +908,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
- netdev->features |= NETIF_F_NETNS_LOCAL;
+
+ netdev->netns_immutable = true;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
@@ -931,7 +974,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL), false);
/* The inner_ttc in the ttc params is intentionally not set */
- mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
+ mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
@@ -1231,6 +1274,12 @@ static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
return 0;
}
+static void mlx5e_rep_stats_update_ndo_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_update_ndo_stats(priv);
+ mlx5e_rep_update_vport_q_counter(priv);
+}
+
static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1299,9 +1348,11 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
netdev->wanted_features |= NETIF_F_HW_TC;
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -1311,9 +1362,11 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_rep_bridge_cleanup(priv);
@@ -1394,11 +1447,11 @@ static void mlx5e_rep_vnic_reporter_create(struct mlx5e_priv *priv,
reporter = devl_port_health_reporter_create(dl_port,
&mlx5_rep_vnic_reporter_ops,
- 0, rpriv);
+ rpriv);
if (IS_ERR(reporter)) {
mlx5_core_err(priv->mdev,
- "Failed to create representor vnic reporter, err = %ld\n",
- PTR_ERR(reporter));
+ "Failed to create representor vnic reporter, err = %pe\n",
+ reporter);
return;
}
@@ -1423,7 +1476,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.enable = mlx5e_rep_enable,
.disable = mlx5e_rep_disable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_stats_update_ndo_stats,
+ .update_stats = mlx5e_rep_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
.stats_grps = mlx5e_rep_stats_grps,
@@ -1453,12 +1506,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
static int
mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = mlx5_uplink_netdev_get(dev);
+ if (!netdev)
+ return 0;
+ priv = netdev_priv(netdev);
rpriv->netdev = priv->netdev;
- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
+ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
+ mlx5_uplink_netdev_put(dev, netdev);
+ return err;
}
static void
@@ -1469,6 +1531,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
priv = netdev_priv(netdev);
+ /* This bit is set when using devlink to change eswitch mode from
+ * switchdev to legacy. As need to keep uplink netdev ifindex, we
+ * detach uplink representor profile and attach NIC profile only.
+ * The netdev will be unregistered later when unload NIC auxiliary
+ * driver for this case.
+ * We explicitly block devlink eswitch mode change if any IPSec rules
+ * offloaded, but can't block other cases, such as driver unload
+ * and devlink reload. We have to unregister netdev before profile
+ * change for those cases. This is to avoid resource leak because
+ * the offloaded rules don't have the chance to be unoffloaded before
+ * cleanup which is triggered by detach uplink representor profile.
+ */
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
+ unregister_netdev(netdev);
+
mlx5e_netdev_attach_nic_profile(priv);
}
@@ -1570,8 +1647,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- void *ppriv = priv->ppriv;
+ struct mlx5e_priv *priv;
+ void *ppriv;
+
+ if (!netdev) {
+ ppriv = rpriv;
+ goto free_ppriv;
+ }
+
+ priv = netdev_priv(netdev);
+ ppriv = priv->ppriv;
if (rep->vport == MLX5_VPORT_UPLINK) {
mlx5e_vport_uplink_rep_unload(rpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d601b5faaed5..1f6930c77437 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -51,6 +51,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/ipsec.h"
#include "en_accel/macsec.h"
+#include "en_accel/psp_rxtx.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
@@ -273,33 +274,32 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
#define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
-static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
+static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
struct mlx5e_frag_page *frag_page)
{
- struct page *page;
+ netmem_ref netmem = page_pool_dev_alloc_netmems(pp);
- page = page_pool_dev_alloc_pages(rq->page_pool);
- if (unlikely(!page))
+ if (unlikely(!netmem))
return -ENOMEM;
- page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX);
+ page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
*frag_page = (struct mlx5e_frag_page) {
- .page = page,
+ .netmem = netmem,
.frags = 0,
};
return 0;
}
-static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
+static void mlx5e_page_release_fragmented(struct page_pool *pp,
struct mlx5e_frag_page *frag_page)
{
u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
- struct page *page = frag_page->page;
+ netmem_ref netmem = frag_page->netmem;
- if (page_pool_unref_page(page, drain_count) == 0)
- page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
+ if (page_pool_unref_netmem(netmem, drain_count) == 0)
+ page_pool_put_unrefed_netmem(pp, netmem, -1, true);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -313,7 +313,8 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->page_pool,
+ frag->frag_page);
return err;
}
@@ -332,7 +333,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
if (mlx5e_frag_can_release(frag))
- mlx5e_page_release_fragmented(rq, frag->frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -358,7 +359,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
headroom = i == 0 ? rq->buff.headroom : 0;
- addr = page_pool_get_dma_addr(frag->frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
@@ -499,9 +500,10 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
u32 frag_offset, u32 len)
{
+ netmem_ref netmem = frag_page->netmem;
skb_frag_t *frag;
- dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
if (!xdp_buff_has_frags(xdp)) {
@@ -514,32 +516,42 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
}
frag = &sinfo->frags[sinfo->nr_frags++];
- skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
+ skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
- if (page_is_pfmemalloc(frag_page->page))
+ if (netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
sinfo->xdp_frags_size += len;
}
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct page *page, u32 frag_offset, u32 len,
+ struct mlx5e_frag_page *frag_page,
+ u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_addr_t addr = page_pool_get_dma_addr(page);
+ dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
+ u8 next_frag = skb_shinfo(skb)->nr_frags;
+ netmem_ref netmem = frag_page->netmem;
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
rq->buff.map_dir);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, frag_offset, len, truesize);
+
+ if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
+ skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
+ return;
+ }
+
+ frag_page->frags++;
+ skb_add_rx_frag_netmem(skb, next_frag, netmem,
+ frag_offset, len, truesize);
}
static inline void
mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct page *page, dma_addr_t addr,
+ netmem_ref netmem, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
- const void *from = page_address(page) + offset_from;
+ const void *from = netmem_address(netmem) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
@@ -576,7 +588,8 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
struct mlx5e_frag_page *frag_page;
frag_page = &wi->alloc_units.frag_pages[i];
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ frag_page);
}
}
}
@@ -619,100 +632,100 @@ static int bitmap_find_window(unsigned long *bitmap, int len,
return min(len, count);
}
-static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
- __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
+static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
+ __be32 key, u16 offset, u16 ksm_len)
{
- memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
- umr_wqe->ctrl.opmod_idx_opcode =
+ memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->ctrl.umr_mkey = key;
- umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
- | MLX5E_KLM_UMR_DS_CNT(klm_len));
- umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
- umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr_wqe->hdr.ctrl.umr_mkey = key;
+ umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ | MLX5E_KSM_UMR_DS_CNT(ksm_len));
+ umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
+ umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+}
+
+static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq,
+ int header_index)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ return &shampo->pages[header_index >> shampo->log_hd_per_page];
+}
+
+static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u32 hd_per_page = shampo->hd_per_page;
+
+ return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size;
}
+static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
+
static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
- u16 klm_entries, u16 index)
+ u16 ksm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
+ u16 pi, header_offset, err, wqe_bbs;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
- u16 page_index = shampo->curr_page_index;
- struct mlx5e_frag_page *frag_page;
- u64 addr = shampo->last_addr;
- struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
int headroom, i;
headroom = rq->buff.headroom;
- new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
- entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
- wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
+ wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
-
- frag_page = &shampo->pages[page_index];
-
- for (i = 0; i < entries; i++, index++) {
- dma_info = &shampo->info[index];
- if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
- MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
- goto update_klm;
- header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
- MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
- if (!(header_offset & (PAGE_SIZE - 1))) {
- page_index = (page_index + 1) & (shampo->hd_per_wq - 1);
- frag_page = &shampo->pages[page_index];
-
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
- if (unlikely(err))
- goto err_unmap;
+ build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
- addr = page_pool_get_dma_addr(frag_page->page);
-
- dma_info->addr = addr;
- dma_info->frag_page = frag_page;
- } else {
- dma_info->addr = addr + header_offset;
- dma_info->frag_page = frag_page;
+ for (i = 0; i < ksm_entries; i++, index++) {
+ struct mlx5e_frag_page *frag_page;
+ u64 addr;
+
+ frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+ header_offset = mlx5e_shampo_hd_offset(rq, index);
+ if (!header_offset) {
+ err = mlx5e_page_alloc_fragmented(rq->hd_page_pool,
+ frag_page);
+ if (err)
+ goto err_unmap;
}
-update_klm:
- umr_wqe->inline_klms[i].bcount =
- cpu_to_be32(MLX5E_RX_MAX_HEAD);
- umr_wqe->inline_klms[i].key = cpu_to_be32(lkey);
- umr_wqe->inline_klms[i].va =
- cpu_to_be64(dma_info->addr + headroom);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(lkey),
+ .va = cpu_to_be64(addr + header_offset + headroom),
+ };
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
.num_wqebbs = wqe_bbs,
- .shampo.len = new_entries,
+ .shampo.len = ksm_entries,
};
- shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
- shampo->curr_page_index = page_index;
- shampo->last_addr = addr;
+ shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
sq->pc += wqe_bbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
err_unmap:
while (--i >= 0) {
- dma_info = &shampo->info[--index];
- if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
- dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
+ --index;
+ header_offset = mlx5e_shampo_hd_offset(rq, index);
+ if (!header_offset) {
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+
+ mlx5e_page_release_fragmented(rq->hd_page_pool,
+ frag_page);
}
}
+
rq->stats->buff_alloc_err++;
return err;
}
@@ -720,37 +733,38 @@ err_unmap:
static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 klm_entries, num_wqe, index, entries_before;
+ u16 ksm_entries, num_wqe, index, entries_before;
struct mlx5e_icosq *sq = rq->icosq;
- int i, err, max_klm_entries, len;
+ int i, err, max_ksm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
- klm_entries = bitmap_find_window(shampo->bitmap,
+ max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
+ ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
- if (!klm_entries)
+ ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page);
+ if (!ksm_entries)
return 0;
- klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
- index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
+ /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */
+ index = shampo->pi;
entries_before = shampo->hd_per_wq - index;
- if (unlikely(entries_before < klm_entries))
- num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
- DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
+ if (unlikely(entries_before < ksm_entries))
+ num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
+ DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
else
- num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
+ num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
for (i = 0; i < num_wqe; i++) {
- len = (klm_entries > max_klm_entries) ? max_klm_entries :
- klm_entries;
+ len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
+ ksm_entries;
if (unlikely(index + len > shampo->hd_per_wq))
len = shampo->hd_per_wq - index;
err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
if (unlikely(err))
return err;
index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
- klm_entries -= len;
+ ksm_entries -= len;
}
return 0;
@@ -783,10 +797,11 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
dma_addr_t addr;
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
if (unlikely(err))
goto err_unmap;
- addr = page_pool_get_dma_addr(frag_page->page);
+
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
@@ -806,12 +821,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -821,14 +836,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->pc += rq->mpwqe.umr_wqebbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
err_unmap:
while (--i >= 0) {
frag_page--;
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool, frag_page);
}
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
@@ -839,44 +854,26 @@ err:
return err;
}
-/* This function is responsible to dealloc SHAMPO header buffer.
- * close == true specifies that we are in the middle of closing RQ operation so
- * we go over all the entries and if they are not in use we free them,
- * otherwise we only go over a specific range inside the header buffer that are
- * not in use.
- */
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
+static void
+mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- struct mlx5e_frag_page *deleted_page = NULL;
- int hd_per_wq = shampo->hd_per_wq;
- struct mlx5e_dma_info *hd_info;
- int i, index = start;
-
- for (i = 0; i < len; i++, index++) {
- if (index == hd_per_wq)
- index = 0;
-
- if (close && !test_bit(index, shampo->bitmap))
- continue;
- hd_info = &shampo->info[index];
- hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
- if (hd_info->frag_page && hd_info->frag_page != deleted_page) {
- deleted_page = hd_info->frag_page;
- mlx5e_page_release_fragmented(rq, hd_info->frag_page);
- }
+ if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) {
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- hd_info->frag_page = NULL;
+ mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
}
+ clear_bit(header_index, shampo->bitmap);
+}
- if (start + len > hd_per_wq) {
- len -= hd_per_wq - start;
- bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
- start = 0;
- }
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int i;
- bitmap_clear(shampo->bitmap, start, len);
+ for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
+ mlx5e_free_rx_shampo_hd_entry(rq, i);
}
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
@@ -917,7 +914,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
if (!rq->xsk_pool) {
count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
- } else if (likely(!rq->xsk_pool->dma_need_sync)) {
+ } else if (likely(!dma_dev_need_sync(rq->pdev))) {
mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
} else {
@@ -971,26 +968,31 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
sq->cc = sqcc;
}
-static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
- struct mlx5e_icosq *sq)
+void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
{
- struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
- struct mlx5e_shampo_hd *shampo;
- /* assume 1:1 relationship between RQ and icosq */
- struct mlx5e_rq *rq = &c->rq;
- int end, from, len = umr.len;
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int end, from, full_len = len;
- shampo = rq->mpwqe.shampo;
end = shampo->hd_per_wq;
from = shampo->ci;
- if (from + len > shampo->hd_per_wq) {
+ if (from + len > end) {
len -= end - from;
bitmap_set(shampo->bitmap, from, end - from);
from = 0;
}
bitmap_set(shampo->bitmap, from, len);
- shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
+ shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
+}
+
+static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
+ struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
+ /* assume 1:1 relationship between RQ and icosq */
+ struct mlx5e_rq *rq = &c->rq;
+
+ mlx5e_shampo_fill_umr(rq, umr.len);
}
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
@@ -1035,6 +1037,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
netdev_WARN_ONCE(cq->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+#ifdef CONFIG_MLX5_EN_TLS
+ if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
+ mlx5e_ktls_rx_resync_async_request_cancel(wi);
+#endif
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
@@ -1105,6 +1111,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
+ if (rq->hd_page_pool)
+ page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
head = rq->mpwqe.actual_wq_head;
i = missing;
@@ -1159,8 +1167,9 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
}
}
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
- u32 cqe_bcnt)
+static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct tcphdr *tcp;
@@ -1191,9 +1200,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
- tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
- tot_len - sizeof(struct iphdr),
- IPPROTO_TCP, check);
+ tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
+ ipv4->saddr, ipv4->daddr, check);
} else {
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
struct ipv6hdr *ipv6 = ip_p;
@@ -1208,17 +1216,20 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
- tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
- IPPROTO_TCP, check);
+ tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
+ &ipv6->daddr, check);
}
+
+ return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
}
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
{
- struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
- u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
+ u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
+ void *addr = netmem_address(frag_page->netmem);
- return page_address(last_head->frag_page->page) + head_offset;
+ return addr + head_offset + rq->buff.headroom;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@@ -1285,8 +1296,12 @@ static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *
tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
ipv4->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
+ bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
+
+ skb_shinfo(skb)->gso_type |= encap ? SKB_GSO_TCP_FIXEDID_INNER :
+ SKB_GSO_TCP_FIXEDID;
+ }
skb->csum_start = (unsigned char *)tcp - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
@@ -1517,6 +1532,11 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
+ /* TBD: PSP csum complete corrections for now chose csum_unnecessary path */
+ goto csum_unnecessary;
+ }
+
if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
return; /* CQE csum covers all received bytes */
@@ -1545,7 +1565,7 @@ csum_none:
#define MLX5E_CE_BIT_MASK 0x80
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -1559,6 +1579,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
+ if (mlx5e_psp_offload_handle_rx_skb(netdev, skb, cqe))
+ return true;
+ }
+
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
be32_to_cpu(cqe->ft_metadata));
@@ -1567,8 +1592,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
if (lro_num_seg > 1) {
- mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
- skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+ unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
+ skb_shinfo(skb)->gso_segs = lro_num_seg;
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
@@ -1577,7 +1604,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
stats->lro_bytes += cqe_bcnt;
}
- if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(rq->hwtstamp_config)))
skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
@@ -1602,9 +1629,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_skb_is_multicast(skb)))
stats->mcast_packets++;
+
+ return false;
}
-static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1612,20 +1641,22 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5e_rq_stats *stats = rq->stats;
stats->packets++;
- stats->gro_packets++;
stats->bytes += cqe_bcnt;
- stats->gro_bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
- return;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return false;
+
+ if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
+ return true;
+
skb_reset_network_header(skb);
if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
+ return false;
}
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1634,7 +1665,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline
@@ -1681,28 +1712,28 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
dma_addr_t addr;
u32 frag_size;
- va = page_address(frag_page->page) + wi->offset;
+ va = netmem_address(frag_page->netmem) + wi->offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf))
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1721,11 +1752,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_frag_page *frag_page;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -1735,18 +1766,18 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_page = wi->frag_page;
- va = page_address(frag_page->page) + wi->offset;
+ va = netmem_address(frag_page->netmem) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
rq->buff.frame0_sz, rq->buff.map_dir);
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- frag_consumed_bytes, &mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ frag_consumed_bytes, mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1758,8 +1789,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
- wi->offset, frag_consumed_bytes);
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, wi->offset,
+ frag_consumed_bytes);
truesize += frag_info->frag_stride;
cqe_bcnt -= frag_consumed_bytes;
@@ -1768,31 +1800,45 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- struct mlx5e_wqe_frag_info *pwi;
+ if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
+ rq->flags)) {
+ struct mlx5e_wqe_frag_info *pwi;
+
+ wi -= old_nr_frags - sinfo->nr_frags;
+
+ for (pwi = head_wi; pwi < wi; pwi++)
+ pwi->frag_page->frags++;
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
- for (pwi = head_wi; pwi < wi; pwi++)
- pwi->frag_page->frags++;
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ wi -= nr_frags_free;
+ truesize -= nr_frags_free * frag_info->frag_stride;
}
- return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
- mxbuf.xdp.data_end - mxbuf.xdp.data,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
+ mxbuf->xdp.data_end - mxbuf->xdp.data,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb))
return NULL;
skb_mark_for_recycle(skb);
head_wi->frag_page->frags++;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
/* sinfo->nr_frags is reset by build_skb, calculate again. */
- xdp_update_skb_shared_info(skb, wi - head_wi - 1,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_update_skb_frags_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
pwi->frag_page->frags++;
@@ -1848,7 +1894,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -1895,7 +1942,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
@@ -1944,7 +1992,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
mlx5e_rep_tc_receive(cqe, rq, skb);
@@ -1964,30 +2013,24 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
#endif
static void
-mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
- struct mlx5e_frag_page *frag_page,
- u32 data_bcnt, u32 data_offset)
+mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
+ struct mlx5e_frag_page *frag_page,
+ u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
- while (data_bcnt) {
+ do {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
- unsigned int truesize;
+ unsigned int truesize = pg_consumed_bytes;
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- truesize = pg_consumed_bytes;
- else
- truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
-
- frag_page->frags++;
- mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
+ mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
frag_page++;
- }
+ } while (data_bcnt);
}
static struct sk_buff *
@@ -1998,11 +2041,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
u32 frag_offset = head_offset;
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
unsigned int truesize = 0;
+ u32 pg_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 linear_frame_sz;
@@ -2014,12 +2058,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
- net_prefetchw(page_address(frag_page->page) + frag_offset);
- if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
+ net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+ if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
+ &wi->linear_page))) {
rq->stats->buff_alloc_err++;
return NULL;
}
- va = page_address(wi->linear_page.page);
+
+ va = netmem_address(wi->linear_page.netmem);
net_prefetchw(va); /* xdp_frame data area */
linear_hr = XDP_PACKET_HEADROOM;
linear_data_len = 0;
@@ -2047,20 +2093,23 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
}
- mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
+ linear_data_len, mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
- u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
truesize += pg_consumed_bytes;
else
truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, frag_offset,
pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
@@ -2068,55 +2117,77 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+ u32 len;
+
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
+ frag_page -= old_nr_frags - sinfo->nr_frags;
+
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
wi->linear_page.frags++;
}
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ &wi->linear_page);
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
- linear_frame_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ frag_page -= nr_frags_free;
+ truesize -= (nr_frags_free - 1) * PAGE_SIZE +
+ ALIGN(pg_consumed_bytes,
+ BIT(rq->mpwqe.log_stride_sz));
+ }
+
+ len = mxbuf->xdp.data_end - mxbuf->xdp.data;
+
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ &wi->linear_page);
return NULL;
}
skb_mark_for_recycle(skb);
wi->linear_page.frags++;
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
- xdp_update_skb_shared_info(skb, frag_page - head_page,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_update_skb_frags_info(skb, frag_page - head_page,
+ sinfo->xdp_frags_size,
+ truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
pagep = head_page;
do
pagep->frags++;
while (++pagep < frag_page);
+
+ headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
+ skb->data_len);
+ __pskb_pull_tail(skb, headlen);
}
- __pskb_pull_tail(skb, headlen);
} else {
dma_addr_t addr;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
- xdp_update_skb_shared_info(skb, sinfo->nr_frags,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_update_skb_frags_info(skb, sinfo->nr_frags,
+ sinfo->xdp_frags_size,
+ truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
pagep = frag_page - sinfo->nr_frags;
do
@@ -2124,8 +2195,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (++pagep < frag_page);
}
/* copy header */
- addr = page_pool_get_dma_addr(head_page->page);
- mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
+ addr = page_pool_get_dma_addr_netmem(head_page->netmem);
+ mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
@@ -2155,31 +2226,31 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- va = page_address(frag_page->page) + head_offset;
+ va = netmem_address(frag_page->netmem) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
frag_page->frags++;
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -2197,29 +2268,34 @@ static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
- struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
- u16 head_offset = head->addr & (PAGE_SIZE - 1);
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
+ u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
+ dma_addr_t page_dma_addr;
+ dma_addr_t dma_addr;
void *hdr, *data;
u32 frag_size;
- hdr = page_address(head->frag_page->page) + head_offset;
+ page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
+ dma_addr = page_dma_addr + head_offset;
+
+ hdr = netmem_address(frag_page->netmem) + head_offset;
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
- if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
+ if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) {
/* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
- prefetchw(hdr);
- prefetch(data);
+ dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
+ net_prefetchw(hdr);
+ net_prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
-
if (unlikely(!skb))
return NULL;
- head->frag_page->frags++;
+ frag_page->frags++;
} else {
/* allocate SKB and copy header for large header */
rq->stats->gro_large_hds++;
@@ -2230,8 +2306,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- prefetchw(skb->data);
- mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
+ net_prefetchw(skb->data);
+ mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
@@ -2261,12 +2337,19 @@ mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
{
struct sk_buff *skb = rq->hw_gro_data->skb;
struct mlx5e_rq_stats *stats = rq->stats;
+ u16 gro_count = NAPI_GRO_CB(skb)->count;
- stats->gro_skbs++;
if (likely(skb_shinfo(skb)->nr_frags))
mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
- if (NAPI_GRO_CB(skb)->count > 1)
+ if (gro_count > 1) {
+ stats->gro_skbs++;
+ stats->gro_packets += gro_count;
+ stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
+
mlx5e_shampo_update_hdr(rq, cqe, match);
+ } else {
+ skb_shinfo(skb)->gso_size = 0;
+ }
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
@@ -2276,22 +2359,10 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
- return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
-}
-
-static void
-mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u64 addr = shampo->info[header_index].addr;
-
- if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
- struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
-
- dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
- }
- bitmap_clear(shampo->bitmap, header_index, 1);
+ if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
+ return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
+ else
+ return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -2327,19 +2398,29 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
goto mpwrq_cqe_out;
}
- stats->gro_match_packets += match;
-
if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
match = false;
mlx5e_shampo_flush_skb(rq, cqe, match);
}
if (!*skb) {
- if (likely(head_size))
+ if (likely(head_size)) {
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
- else
- *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
- data_offset, page_idx);
+ } else {
+ struct mlx5e_frag_page *frag_page;
+
+ frag_page = &wi->alloc_units.frag_pages[page_idx];
+ /* Drop packets with header in unreadable data area to
+ * prevent the kernel from touching it.
+ */
+ if (unlikely(netmem_is_net_iov(frag_page->netmem)))
+ goto free_hd_entry;
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
+ cqe_bcnt,
+ data_offset,
+ page_idx);
+ }
+
if (unlikely(!*skb))
goto free_hd_entry;
@@ -2359,21 +2440,36 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (likely(head_size)) {
- struct mlx5e_frag_page *frag_page;
+ if (data_bcnt) {
+ struct mlx5e_frag_page *frag_page;
- frag_page = &wi->alloc_units.frag_pages[page_idx];
- mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+ frag_page = &wi->alloc_units.frag_pages[page_idx];
+ mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+ } else {
+ stats->hds_nodata_packets++;
+ stats->hds_nodata_bytes += head_size;
+ }
+ } else {
+ stats->hds_nosplit_packets++;
+ stats->hds_nosplit_bytes += data_bcnt;
}
- mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
- if (flush)
+ if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
+ *skb = NULL;
+ goto free_hd_entry;
+ }
+ if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
- mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+ if (likely(head_size))
+ mlx5e_free_rx_shampo_hd_entry(rq, header_index);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
+ if (unlikely(!cstrides))
+ return;
+
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
@@ -2419,7 +2515,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -2446,7 +2543,7 @@ static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
struct mlx5e_cq_decomp *cqd = &rq->cqd;
int work_done = 0;
- cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq);
+ cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq);
if (!cqe)
return work_done;
@@ -2476,7 +2573,7 @@ static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
rq, cqe);
work_done++;
} while (work_done < budget_rem &&
- (cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)));
+ (cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)));
/* last cqe might be title on next poll bulk */
if (title_cqe) {
@@ -2559,7 +2656,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- struct hwtstamp_config *tstamp;
struct mlx5e_rq_stats *stats;
struct net_device *netdev;
struct mlx5e_priv *priv;
@@ -2583,7 +2679,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
}
priv = mlx5i_epriv(netdev);
- tstamp = &priv->tstamp;
stats = &priv->channel_stats[rq->ix]->rq;
flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
@@ -2619,7 +2714,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
stats->csum_none++;
}
- if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(&priv->hwtstamp_config)))
skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
@@ -2752,7 +2847,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
if (!skb)
goto wq_cyc_pop;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
skb_push(skb, ETH_HLEN);
mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 08a75654f5f1..fcad464bc4d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -36,6 +36,7 @@
#include "en.h"
#include "en/port.h"
#include "eswitch.h"
+#include "lib/mlx5.h"
static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
@@ -165,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct udphdr *udph;
struct iphdr *iph;
+ if (skb_linearize(skb))
+ goto out;
+
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
@@ -210,7 +214,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
return err;
}
- err = mlx5e_refresh_tirs(priv, true, false);
+ err = mlx5e_modify_tirs_lb(priv->mdev, true, false);
if (err)
goto out;
@@ -239,7 +243,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
- mlx5e_refresh_tirs(priv, false, false);
+ mlx5e_modify_tirs_lb(priv->mdev, false, false);
}
static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
@@ -247,6 +251,9 @@ static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
if (is_mdev_switchdev_mode(priv->mdev))
return -EOPNOTSUPP;
+ if (mlx5_get_sd(priv->mdev))
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -359,7 +366,7 @@ int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
if (st.cond_func && st.cond_func(priv))
continue;
if (data)
- strcpy(data + count * ETH_GSTRING_LEN, st.name);
+ ethtool_puts(&data, st.name);
count++;
}
return count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index f3d0898bdbc6..a2802cfc9b98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,9 +37,12 @@
#include "en/ptp.h"
#include "en/port.h"
-#ifdef CONFIG_PAGE_POOL_STATS
#include <net/page_pool/helpers.h>
-#endif
+
+void mlx5e_ethtool_put_stat(u64 **data, u64 val)
+{
+ *(*data)++ = val;
+}
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@@ -90,17 +93,17 @@ void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
int i;
for (i = 0; i < num_stats_grps; i++)
- idx = stats_grps[i]->fill_stats(priv, data, idx);
+ stats_grps[i]->fill_stats(priv, &data);
}
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
const unsigned int num_stats_grps = stats_grps_num(priv);
- int i, idx = 0;
+ int i;
for (i = 0; i < num_stats_grps; i++)
- idx = stats_grps[i]->fill_strings(priv, data, idx);
+ stats_grps[i]->fill_strings(priv, &data);
}
/* Concrete NIC Stats */
@@ -136,8 +139,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -188,7 +194,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
@@ -200,7 +205,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -257,8 +261,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
int i;
for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, sw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
@@ -266,8 +269,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
int i;
for (i = 0; i < NUM_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(data,
+ MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_stats_desc, i));
}
static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
@@ -338,8 +342,11 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_packets += rq_stats->gro_packets;
s->rx_gro_bytes += rq_stats->gro_bytes;
s->rx_gro_skbs += rq_stats->gro_skbs;
- s->rx_gro_match_packets += rq_stats->gro_match_packets;
s->rx_gro_large_hds += rq_stats->gro_large_hds;
+ s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
+ s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
+ s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets;
+ s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -366,7 +373,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_arfs_err += rq_stats->arfs_err;
#endif
s->rx_recover += rq_stats->recover;
-#ifdef CONFIG_PAGE_POOL_STATS
s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
@@ -378,7 +384,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -485,7 +490,6 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
-#ifdef CONFIG_PAGE_POOL_STATS
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
{
struct mlx5e_rq_stats *rq_stats = c->rq.stats;
@@ -508,11 +512,6 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
-#else
-static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
-{
-}
-#endif
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
@@ -591,14 +590,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
int i;
for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- q_stats_desc[i].format);
+ ethtool_puts(data, q_stats_desc[i].format);
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- drop_rq_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, drop_rq_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
@@ -606,12 +601,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
int i;
for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- q_stats_desc, i);
+ mlx5e_ethtool_put_stat(data,
+ MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ q_stats_desc, i));
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- drop_rq_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ drop_rq_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
@@ -685,18 +681,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
int i;
for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_steer_desc[i].format);
+ ethtool_puts(data, vnic_env_stats_steer_desc[i].format);
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_dev_oob_desc[i].format);
+ ethtool_puts(data, vnic_env_stats_dev_oob_desc[i].format);
for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_drop_desc[i].format);
-
- return idx;
+ ethtool_puts(data, vnic_env_stats_drop_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
@@ -704,18 +695,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
int i;
for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_steer_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_steer_desc, i));
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_dev_oob_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_dev_oob_desc, i));
for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_drop_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_drop_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
@@ -798,13 +793,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
int i;
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
+ ethtool_puts(data, vport_stats_desc[i].format);
for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_loopback_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, vport_loopback_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
@@ -812,14 +804,16 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
int i;
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i));
for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_loopback_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_loopback_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
@@ -868,8 +862,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
int i;
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_802_3_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
@@ -877,9 +870,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
int i;
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
- pport_802_3_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i));
}
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
@@ -1029,8 +1023,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
int i;
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_2863_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
@@ -1038,9 +1031,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
int i;
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
- pport_2863_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
@@ -1088,8 +1082,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
int i;
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_2819_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
@@ -1097,9 +1090,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
int i;
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
- pport_2819_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
@@ -1172,6 +1166,62 @@ void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
*ranges = mlx5e_rmon_ranges;
}
+void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
+ struct ethtool_ts_stats *ts_stats)
+{
+ int i, j;
+
+ mutex_lock(&priv->state_lock);
+
+ if (priv->tx_ptp_opened) {
+ struct mlx5e_ptp *ptp = priv->channels.ptp;
+
+ ts_stats->pkts = 0;
+ ts_stats->err = 0;
+ ts_stats->lost = 0;
+
+ if (!ptp)
+ goto out;
+
+ /* Aggregate stats across all TCs */
+ for (i = 0; i < ptp->num_tc; i++) {
+ struct mlx5e_ptp_cq_stats *stats =
+ ptp->ptpsq[i].cq_stats;
+
+ ts_stats->pkts += stats->cqe;
+ ts_stats->err += stats->abort + stats->err_cqe +
+ stats->late_cqe;
+ ts_stats->lost += stats->lost_cqe;
+ }
+ } else {
+ /* DMA layer will always successfully timestamp packets. Other
+ * counters do not make sense for this layer.
+ */
+ ts_stats->pkts = 0;
+
+ /* Aggregate stats across all SQs */
+ for (j = 0; j < priv->channels.num; j++) {
+ struct mlx5e_channel *c = priv->channels.c[j];
+
+ for (i = 0; i < c->num_tc; i++) {
+ struct mlx5e_sq_stats *stats = c->sq[i].stats;
+
+ ts_stats->pkts += stats->timestamps;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&priv->state_lock);
+}
+
+#define PPORT_PHY_LAYER_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_cntrs.c)
+static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = {
+ { "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) }
+};
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1188,25 +1238,45 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
};
+#define PPORT_PHY_RECOVERY_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c)
+static const struct counter_desc
+pport_phy_recovery_cntrs_stats_desc[] = {
+ { "total_success_recovery_phy",
+ PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) }
+};
+
+#define NUM_PPORT_PHY_LAYER_COUNTERS \
+ ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
+#define NUM_PPORT_PHY_RECOVERY_COUNTERS \
+ ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc)
+
+#define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0)
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0)
+#define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \
+ NUM_PPORT_PHY_RECOVERY_COUNTERS : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
- /* "1" for link_down_events special counter */
- num_stats = 1;
+ num_stats = NUM_PPORT_PHY_LAYER_COUNTERS;
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev);
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
- NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev);
return num_stats;
}
@@ -1215,21 +1285,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return idx;
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
+ ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_stats_desc[i].format);
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ ethtool_puts(data,
+ pport_phy_statistical_err_lanes_stats_desc[i]
+ .format);
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_err_lanes_stats_desc[i].format);
-
- return idx;
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ ethtool_puts(data,
+ pport_phy_recovery_cntrs_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
@@ -1237,25 +1308,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- /* link_down_events_phy has special handling since it is not stored in __be64 format */
- data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events);
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return idx;
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_stats_desc, i);
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_err_lanes_stats_desc,
- i);
- return idx;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(&priv->stats.pport
+ .phy_counters,
+ pport_phy_layer_cntrs_stats_desc, i));
+
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i));
+
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc, i));
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pport.phy_recovery_counters,
+ pport_phy_recovery_cntrs_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
@@ -1271,12 +1352,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) {
+ out = pstats->phy_recovery_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
}
void mlx5e_get_link_ext_stats(struct net_device *dev,
@@ -1356,16 +1446,13 @@ static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
}
static void fec_set_block_stats(struct mlx5e_priv *priv,
+ int mode,
struct ethtool_fec_stats *fec_stats)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- int mode = fec_active_mode(mdev);
-
- if (mode == MLX5E_FEC_NOFEC)
- return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
@@ -1376,6 +1463,7 @@ static void fec_set_block_stats(struct mlx5e_priv *priv,
case MLX5E_FEC_RS_528_514:
case MLX5E_FEC_RS_544_514:
case MLX5E_FEC_LLRS_272_257_1:
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
fec_set_rs_stats(fec_stats, out);
return;
case MLX5E_FEC_FIRECODE:
@@ -1403,14 +1491,132 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
phy_corrected_bits);
}
+#define MLX5_RS_HISTOGRAM_ENTRIES \
+ (MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist) / \
+ MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist[0]))
+
+enum {
+ MLX5E_HISTOGRAM_FEC_RS_544_514 = 1,
+ MLX5E_HISTOGRAM_FEC_LLRS = 2,
+ MLX5E_HISTOGRAM_FEC_RS_528_514 = 3,
+};
+
+static bool fec_rs_validate_hist_type(int mode, int hist_type)
+{
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ return hist_type == MLX5E_HISTOGRAM_FEC_RS_528_514;
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
+ case MLX5E_FEC_RS_544_514:
+ return hist_type == MLX5E_HISTOGRAM_FEC_RS_544_514;
+ case MLX5E_FEC_LLRS_272_257_1:
+ return hist_type == MLX5E_HISTOGRAM_FEC_LLRS;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static u8
+fec_rs_histogram_fill_ranges(struct mlx5e_priv *priv, int mode,
+ const struct ethtool_fec_hist_range **ranges)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(pphcr_reg);
+ u8 hist_type, num_of_bins;
+
+ memset(priv->fec_ranges, 0,
+ ETHTOOL_FEC_HIST_MAX * sizeof(*priv->fec_ranges));
+ MLX5_SET(pphcr_reg, in, local_port, 1);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPHCR, 0, 0))
+ return 0;
+
+ hist_type = MLX5_GET(pphcr_reg, out, active_hist_type);
+ if (!fec_rs_validate_hist_type(mode, hist_type))
+ return 0;
+
+ num_of_bins = MLX5_GET(pphcr_reg, out, num_of_bins);
+ if (WARN_ON_ONCE(num_of_bins > MLX5_RS_HISTOGRAM_ENTRIES))
+ return 0;
+
+ for (int i = 0; i < num_of_bins; i++) {
+ void *bin_range = MLX5_ADDR_OF(pphcr_reg, out, bin_range[i]);
+
+ priv->fec_ranges[i].high = MLX5_GET(bin_range_layout, bin_range,
+ high_val);
+ priv->fec_ranges[i].low = MLX5_GET(bin_range_layout, bin_range,
+ low_val);
+ }
+ *ranges = priv->fec_ranges;
+
+ return num_of_bins;
+}
+
+static void fec_rs_histogram_fill_stats(struct mlx5e_priv *priv,
+ u8 num_of_bins,
+ struct ethtool_fec_hist *hist)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *rs_histogram_cntrs;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RS_FEC_HISTOGRAM_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+ rs_histogram_cntrs = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set.rs_histogram_cntrs);
+ /* Guaranteed that num_of_bins is less than MLX5E_FEC_RS_HIST_MAX
+ * by fec_rs_histogram_fill_ranges().
+ */
+ for (int i = 0; i < num_of_bins; i++)
+ hist->values[i].sum = MLX5_GET64(rs_histogram_cntrs,
+ rs_histogram_cntrs,
+ hist[i]);
+}
+
+static void fec_set_histograms_stats(struct mlx5e_priv *priv, int mode,
+ struct ethtool_fec_hist *hist)
+{
+ u8 num_of_bins;
+
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ case MLX5E_FEC_RS_544_514:
+ case MLX5E_FEC_LLRS_272_257_1:
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
+ num_of_bins =
+ fec_rs_histogram_fill_ranges(priv, mode, &hist->ranges);
+ if (num_of_bins)
+ return fec_rs_histogram_fill_stats(priv, num_of_bins,
+ hist);
+ break;
+ default:
+ return;
+ }
+}
+
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
- if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ int mode = fec_active_mode(priv->mdev);
+
+ if (mode == MLX5E_FEC_NOFEC ||
+ !MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
return;
fec_set_corrected_bits_total(priv, fec_stats);
- fec_set_block_stats(priv, fec_stats);
+ fec_set_block_stats(priv, mode, fec_stats);
+
+ if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
+ fec_set_histograms_stats(priv, mode, hist);
}
#define PPORT_ETH_EXT_OFF(c) \
@@ -1436,9 +1642,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_eth_ext_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_eth_ext_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
@@ -1447,10 +1651,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
- pport_eth_ext_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.eth_ext_counters,
+ pport_eth_ext_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
@@ -1516,19 +1721,16 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
+ ethtool_puts(data, pcie_perf_stats_desc[i].format);
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc64[i].format);
+ ethtool_puts(data, pcie_perf_stats_desc64[i].format);
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stall_stats_desc[i].format);
- return idx;
+ ethtool_puts(data,
+ pcie_perf_stall_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
@@ -1537,22 +1739,27 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i));
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc64, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc64, i));
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stall_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stall_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
@@ -1609,18 +1816,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
int i, prio;
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
- return idx;
+ return;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_tc_prio_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_tc_prio_stats_desc[i].format,
+ prio);
for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_tc_congest_prio_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_tc_congest_prio_stats_desc[i].format,
+ prio);
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
@@ -1630,20 +1837,24 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
int i, prio;
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
- return idx;
+ return;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
- pport_per_tc_prio_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &pport->per_tc_prio_counters[prio],
+ pport_per_tc_prio_stats_desc, i));
for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
- pport_per_tc_congest_prio_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &pport->per_tc_congest_prio_counters
+ [prio],
+ pport_per_tc_congest_prio_stats_desc,
+ i));
}
-
- return idx;
}
static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
@@ -1728,35 +1939,33 @@ static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
}
-static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
- u8 *data,
- int idx)
+static void mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
+ u8 **data)
{
int i, prio;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_traffic_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_prio_traffic_stats_desc[i].format,
+ prio);
}
-
- return idx;
}
-static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
- u64 *data,
- int idx)
+static void mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
+ u64 **data)
{
int i, prio;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_traffic_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport
+ .per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i));
}
-
- return idx;
}
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
@@ -1816,9 +2025,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
NUM_PPORT_PFC_STALL_COUNTERS(priv);
}
-static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
- u8 *data,
- int idx)
+static void mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
+ u8 **data)
{
unsigned long pfc_combined;
int i, prio;
@@ -1829,28 +2037,26 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
char pfc_string[ETH_GSTRING_LEN];
snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ ethtool_sprintf(data,
+ pport_per_prio_pfc_stats_desc[i].format,
+ pfc_string);
}
}
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, "global");
+ ethtool_sprintf(data,
+ pport_per_prio_pfc_stats_desc[i].format,
+ "global");
}
}
for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_pfc_stall_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, pport_pfc_stall_stats_desc[i].format);
}
-static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
- u64 *data,
- int idx)
+static void mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
+ u64 **data)
{
unsigned long pfc_combined;
int i, prio;
@@ -1858,25 +2064,30 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_pfc_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport
+ .per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i));
}
}
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i));
}
}
for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_pfc_stall_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.per_prio_counters[0],
+ pport_pfc_stall_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
@@ -1887,16 +2098,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
{
- idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
- idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
- return idx;
+ mlx5e_grp_per_prio_traffic_fill_strings(priv, data);
+ mlx5e_grp_per_prio_pfc_fill_strings(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
{
- idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
- idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
- return idx;
+ mlx5e_grp_per_prio_traffic_fill_stats(priv, data);
+ mlx5e_grp_per_prio_pfc_fill_stats(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
@@ -1944,12 +2153,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
int i;
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+ ethtool_puts(data, mlx5e_pme_status_desc[i].format);
for (i = 0; i < NUM_PME_ERR_STATS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_pme_error_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
@@ -1960,14 +2167,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
mlx5_get_pme_stats(priv->mdev, &pme_stats);
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
- mlx5e_pme_status_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
+ mlx5e_pme_status_desc, i));
for (i = 0; i < NUM_PME_ERR_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
- mlx5e_pme_error_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
+ mlx5e_pme_error_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
@@ -1979,12 +2186,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
- return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+ mlx5e_ktls_get_strings(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
- return idx + mlx5e_ktls_get_stats(priv, data + idx);
+ mlx5e_ktls_get_stats(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
@@ -2005,8 +2212,11 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
@@ -2025,7 +2235,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
#endif
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
@@ -2037,7 +2246,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
@@ -2063,6 +2271,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, timestamps) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
@@ -2175,6 +2384,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, lost_cqe) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
@@ -2214,6 +2424,7 @@ static const struct counter_desc qos_sq_stats_desc[] = {
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, timestamps) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
@@ -2264,10 +2475,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
for (qid = 0; qid < max_qos_sqs; qid++)
for (i = 0; i < NUM_QOS_SQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- qos_sq_stats_desc[i].format, qid);
-
- return idx;
+ ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
@@ -2284,10 +2492,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
for (i = 0; i < NUM_QOS_SQ_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i));
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
@@ -2312,29 +2520,28 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
int i, tc;
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
- return idx;
+ return;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "%s", ptp_ch_stats_desc[i].format);
+ ethtool_puts(data, ptp_ch_stats_desc[i].format);
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_sq_stats_desc[i].format, tc);
+ ethtool_sprintf(data,
+ ptp_sq_stats_desc[i].format,
+ tc);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_cq_stats_desc[i].format, tc);
+ ethtool_sprintf(data,
+ ptp_cq_stats_desc[i].format,
+ tc);
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
+ ethtool_puts(data, ptp_rq_stats_desc[i].format);
}
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
@@ -2342,33 +2549,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
int i, tc;
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
- return idx;
+ return;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
- ptp_ch_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
+ ptp_ch_stats_desc, i));
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
- ptp_sq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->ptp_stats.sq[tc],
+ ptp_sq_stats_desc, i));
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
- ptp_cq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->ptp_stats.cq[tc],
+ ptp_cq_stats_desc, i));
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- data[idx++] =
+ mlx5e_ethtool_put_stat(
+ data,
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
- ptp_rq_stats_desc, i);
+ ptp_rq_stats_desc, i));
}
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
@@ -2394,38 +2603,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ch_stats_desc[j].format, i);
+ ethtool_sprintf(data, ch_stats_desc[j].format, i);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_stats_desc[j].format, i);
+ ethtool_sprintf(data, rq_stats_desc[j].format, i);
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xskrq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xskrq_stats_desc[j].format, i);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_xdpsq_stats_desc[j].format, i);
+ ethtool_sprintf(data, rq_xdpsq_stats_desc[j].format, i);
}
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- sq_stats_desc[j].format,
- i + tc * max_nch);
+ ethtool_sprintf(data, sq_stats_desc[j].format,
+ i + tc * max_nch);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xsksq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xsksq_stats_desc[j].format, i);
for (j = 0; j < NUM_XDPSQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xdpsq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xdpsq_stats_desc[j].format, i);
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
@@ -2436,44 +2636,50 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
- ch_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->ch,
+ ch_stats_desc, j));
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
- rq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->rq,
+ rq_stats_desc, j));
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
- xskrq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xskrq,
+ xskrq_stats_desc, j));
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
- rq_xdpsq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->rq_xdpsq,
+ rq_xdpsq_stats_desc, j));
}
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
- sq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->sq[tc],
+ sq_stats_desc, j));
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
- xsksq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xsksq,
+ xsksq_stats_desc, j));
for (j = 0; j < NUM_XDPSQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
- xdpsq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xdpsq,
+ xdpsq_stats_desc, j));
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
@@ -2522,6 +2728,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
#ifdef CONFIG_MLX5_MACSEC
&MLX5E_STATS_GRP(macsec_hw),
#endif
+ &MLX5E_STATS_GRP(pcie_cong),
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 12b3607afecd..09f155acb461 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -54,7 +54,7 @@
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
-#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq0_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
@@ -71,11 +71,13 @@ struct mlx5e_priv;
struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
- int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
- int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*fill_strings)(struct mlx5e_priv *priv, u8 **data);
+ void (*fill_stats)(struct mlx5e_priv *priv, u64 **data);
void (*update_stats)(struct mlx5e_priv *priv);
};
+void mlx5e_ethtool_put_stat(u64 **data, u64 val);
+
typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
@@ -87,10 +89,10 @@ typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
- int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
+ void MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 **data)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
- int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
+ void MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 **data)
#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
@@ -115,7 +117,8 @@ void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
struct ethtool_pause_stats *pause_stats);
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats);
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist);
void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
struct ethtool_eth_phy_stats *phy_stats);
@@ -126,6 +129,8 @@ void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
struct ethtool_rmon_stats *rmon,
const struct ethtool_rmon_hist_range **ranges);
+void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
+ struct ethtool_ts_stats *ts_stats);
void mlx5e_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats);
@@ -149,8 +154,11 @@ struct mlx5e_sw_stats {
u64 rx_gro_packets;
u64 rx_gro_bytes;
u64 rx_gro_skbs;
- u64 rx_gro_match_packets;
u64 rx_gro_large_hds;
+ u64 rx_hds_nodata_packets;
+ u64 rx_hds_nodata_bytes;
+ u64 rx_hds_nosplit_packets;
+ u64 rx_hds_nosplit_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -208,7 +216,6 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 rx_pp_alloc_fast;
u64 rx_pp_alloc_slow;
u64 rx_pp_alloc_slow_high_order;
@@ -220,7 +227,6 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -302,6 +308,9 @@ struct mlx5e_vport_stats {
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_RECOVERY_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, (pstats)->phy_recovery_counters, \
+ counter_set.phys_layer_recovery_cntrs.c)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -317,6 +326,7 @@ struct mlx5e_pport_stats {
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_recovery_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
@@ -348,8 +358,11 @@ struct mlx5e_rq_stats {
u64 gro_packets;
u64 gro_bytes;
u64 gro_skbs;
- u64 gro_match_packets;
u64 gro_large_hds;
+ u64 hds_nodata_packets;
+ u64 hds_nodata_bytes;
+ u64 hds_nosplit_packets;
+ u64 hds_nosplit_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
@@ -371,7 +384,6 @@ struct mlx5e_rq_stats {
u64 arfs_err;
#endif
u64 recover;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 pp_alloc_fast;
u64 pp_alloc_slow;
u64 pp_alloc_slow_high_order;
@@ -383,7 +395,6 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
@@ -429,6 +440,7 @@ struct mlx5e_sq_stats {
u64 stopped;
u64 dropped;
u64 recover;
+ u64 timestamps;
/* dirtied @completion */
u64 cqes ____cacheline_aligned_in_smp;
u64 wake;
@@ -461,6 +473,7 @@ struct mlx5e_ptp_cq_stats {
u64 abort;
u64 abort_abs_diff_ns;
u64 late_cqe;
+ u64 lost_cqe;
};
struct mlx5e_rep_stats {
@@ -478,6 +491,7 @@ struct mlx5e_rep_stats {
u64 tx_vport_rdma_multicast_bytes;
u64 vport_loopback_packets;
u64 vport_loopback_bytes;
+ u64 rx_vport_out_of_buffer;
};
struct mlx5e_stats {
@@ -498,6 +512,7 @@ static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport
vf_vport->tx_packets = rep_stats->vport_tx_packets;
vf_vport->rx_bytes = rep_stats->vport_rx_bytes;
vf_vport->tx_bytes = rep_stats->vport_tx_bytes;
+ vf_vport->rx_missed_errors = rep_stats->rx_vport_out_of_buffer;
}
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
@@ -521,5 +536,6 @@ extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
+extern MLX5E_DECLARE_STATS_GRP(pcie_cong);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 31ed26cac9bf..a8773b2342c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -66,6 +66,7 @@
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
+#include "lib/mlx5.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
#include "lag/lag.h"
@@ -757,11 +758,11 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rss_params_indir indir;
+ u32 rqt_size;
int err;
- err = mlx5e_rss_params_indir_init(&indir, mdev,
- mlx5e_rqt_size(mdev, hp->num_channels),
- mlx5e_rqt_size(mdev, hp->num_channels));
+ rqt_size = mlx5e_rqt_size(mdev, hp->num_channels);
+ err = mlx5e_rss_params_indir_init(&indir, rqt_size, rqt_size);
if (err)
return err;
@@ -835,9 +836,11 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -1283,7 +1286,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
+ dest[dest_ix].counter = attr->counter;
dest_ix++;
}
@@ -1741,10 +1744,115 @@ has_encap_dests(struct mlx5_flow_attr *attr)
}
static int
+extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+{
+ bool int_dest = false, ext_dest = false;
+ struct mlx5_esw_flow_attr *esw_attr;
+ int i;
+
+ if (flow->attr != attr ||
+ !list_is_first(&attr->list, &flow->attrs))
+ return 0;
+
+ esw_attr = attr->esw_attr;
+ if (!esw_attr->split_count ||
+ esw_attr->split_count == esw_attr->out_count - 1)
+ return 0;
+
+ if (esw_attr->dest_int_port &&
+ (esw_attr->dests[esw_attr->split_count].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
+ return esw_attr->split_count + 1;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
+ /* external dest with encap is considered as internal by firmware */
+ if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
+ !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
+ ext_dest = true;
+ else
+ int_dest = true;
+
+ if (ext_dest && int_dest)
+ return esw_attr->split_count;
+ }
+
+ return 0;
+}
+
+static int
+extra_split_attr_dests(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr, int split_count)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5e_tc_flow_parse_attr *parse_attr, *parse_attr2;
+ struct mlx5_esw_flow_attr *esw_attr, *esw_attr2;
+ struct mlx5e_post_act_handle *handle;
+ struct mlx5_flow_attr *attr2;
+ int i, j, err;
+
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
+ attr2 = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
+ parse_attr2 = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+ if (!attr2 || !parse_attr2) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ attr2->parse_attr = parse_attr2;
+
+ handle = mlx5e_tc_post_act_add(post_act, attr2);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto err_free;
+ }
+
+ esw_attr = attr->esw_attr;
+ esw_attr2 = attr2->esw_attr;
+ esw_attr2->in_rep = esw_attr->in_rep;
+
+ parse_attr = attr->parse_attr;
+ parse_attr2->filter_dev = parse_attr->filter_dev;
+
+ for (i = split_count, j = 0; i < esw_attr->out_count; i++, j++)
+ esw_attr2->dests[j] = esw_attr->dests[i];
+
+ esw_attr2->out_count = j;
+ attr2->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ err = mlx5e_tc_post_act_offload(post_act, handle);
+ if (err)
+ goto err_post_act_offload;
+
+ err = mlx5e_tc_post_act_set_handle(flow->priv->mdev, handle,
+ &parse_attr->mod_hdr_acts);
+ if (err)
+ goto err_post_act_set_handle;
+
+ esw_attr->out_count = split_count;
+ attr->extra_split_ft = mlx5e_tc_post_act_get_ft(post_act);
+ flow->extra_split_attr = attr2;
+
+ attr2->post_act_handle = handle;
+
+ return 0;
+
+err_post_act_set_handle:
+ mlx5e_tc_post_act_unoffload(post_act, handle);
+err_post_act_offload:
+ mlx5e_tc_post_act_del(post_act, handle);
+err_free:
+ kvfree(parse_attr2);
+ kfree(attr2);
+ return err;
+}
+
+static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
+ int extra_split;
bool vf_tun;
int err = 0;
@@ -1758,6 +1866,13 @@ post_process_attr(struct mlx5e_tc_flow *flow,
goto err_out;
}
+ extra_split = extra_split_attr_dests_needed(flow, attr);
+ if (extra_split > 0) {
+ err = extra_split_attr_dests(flow, attr, extra_split);
+ if (err)
+ goto err_out;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
@@ -1917,9 +2032,8 @@ err_out:
return err;
}
-static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
+static bool mlx5_flow_has_geneve_opt(struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -1958,7 +2072,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
complete_all(&flow->del_hw_done);
- if (mlx5_flow_has_geneve_opt(flow))
+ if (mlx5_flow_has_geneve_opt(&attr->parse_attr->spec))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
if (flow->decap_route)
@@ -1972,6 +2086,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow);
+ if (flow->extra_split_attr) {
+ mlx5_free_flow_attr_actions(flow, flow->extra_split_attr);
+ kvfree(flow->extra_split_attr->parse_attr);
+ kfree(flow->extra_split_attr);
+ }
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr);
@@ -2458,12 +2577,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
if (err) {
- kvfree(tmp_spec);
NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
- return err;
+ } else {
+ err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
}
- err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ if (mlx5_flow_has_geneve_opt(tmp_spec))
+ mlx5_geneve_tlv_option_del(priv->mdev->geneve);
kvfree(tmp_spec);
if (err)
return err;
@@ -2802,12 +2922,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
- /* the HW doesn't support frag first/later */
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
- NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
- return -EOPNOTSUPP;
- }
-
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
@@ -2820,6 +2934,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
else
*match_level = MLX5_MATCH_L3;
}
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -3496,15 +3614,11 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv
bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
- u64 fsystem_guid, psystem_guid;
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
- fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
- psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
-
- return (fsystem_guid == psystem_guid);
+ return mlx5_same_hw_devs(fmdev, pmdev);
}
static int
@@ -5119,10 +5233,11 @@ static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
- u64 mapping_id;
+ u8 id_len;
int err;
mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
@@ -5138,11 +5253,13 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
- mapping_id = mlx5_query_nic_system_image_guid(dev);
+ mlx5_query_nic_sw_system_image_guid(dev, mapping_id, &id_len);
- chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
+ chains_mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_CHAIN,
sizeof(struct mlx5_mapped_obj),
- MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
+ MLX5E_TC_TABLE_CHAIN_TAG_MASK,
+ true);
if (IS_ERR(chains_mapping)) {
err = PTR_ERR(chains_mapping);
@@ -5273,13 +5390,15 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ struct mlx5_devcom_match_attr attr = {};
struct netdev_phys_item_id ppid;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- u64 mapping_id, key;
int err = 0;
+ u8 id_len;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
@@ -5297,9 +5416,9 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
- mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
+ mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len);
- mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
+ mapping = mapping_create_for_id(mapping_id, id_len, MAPPING_TYPE_TUNNEL,
sizeof(struct tunnel_match_key),
TUNNEL_INFO_BITS_MASK, true);
@@ -5312,8 +5431,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
/* Two last values are reserved for stack devices slow path table mark
* and bridge ingress push mark.
*/
- mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
- sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
+ mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_TUNNEL_ENC_OPTS,
+ sz_enc_opts, ENC_OPTS_BITS_MASK - 2,
+ true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
@@ -5332,10 +5453,12 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_action_counter;
}
- err = dev_get_port_parent_id(priv->netdev, &ppid, false);
+ err = netif_get_port_parent_id(priv->netdev, &ppid, false);
if (!err) {
- memcpy(&key, &ppid.id, sizeof(key));
- mlx5_esw_offloads_devcom_init(esw, key);
+ memcpy(&attr.key.buf, &ppid.id, ppid.id_len);
+ attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
+ attr.net = mlx5_core_net(esw->dev);
+ mlx5_esw_offloads_devcom_init(esw, &attr);
}
return 0;
@@ -5464,6 +5587,7 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct tunnel_match_enc_opts enc_opts = {};
struct mlx5_rep_uplink_priv *uplink_priv;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct mlx5e_rep_priv *uplink_rpriv;
struct metadata_dst *tun_dst;
struct tunnel_match_key key;
@@ -5471,6 +5595,8 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
struct net_device *dev;
int err;
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+
enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
tun_id = tunnel_id >> ENC_OPTS_BITS;
@@ -5503,14 +5629,14 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, TUNNEL_KEY,
+ key.enc_tp.dst, flags,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, 0, TUNNEL_KEY,
+ key.enc_tp.dst, 0, flags,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
@@ -5528,11 +5654,16 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
- if (enc_opts.key.len)
+ if (enc_opts.key.len) {
+ ip_tunnel_flags_zero(flags);
+ if (enc_opts.key.dst_opt_type)
+ __set_bit(enc_opts.key.dst_opt_type, flags);
+
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
enc_opts.key.data,
enc_opts.key.len,
- enc_opts.key.dst_opt_type);
+ flags);
+ }
skb_dst_set(skb, (struct dst_entry *)tun_dst);
dev = dev_get_by_index(&init_net, key.filter_ifindex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index c24bda56b2b5..e1b8cb78369f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -86,6 +86,7 @@ struct mlx5_flow_attr {
u32 dest_chain;
struct mlx5_flow_table *ft;
struct mlx5_flow_table *dest_ft;
+ struct mlx5_flow_table *extra_split_ft;
u8 inner_match_level;
u8 outer_match_level;
u8 tun_ip_version;
@@ -139,7 +140,7 @@ struct mlx5_rx_tun_attr {
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
-#define MLX5E_TC_MAX_INT_PORT_NUM (8)
+#define MLX5E_TC_MAX_INT_PORT_NUM (32)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e21a3b4128ce..14884b9ea7f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,6 +39,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/psp_rxtx.h"
#include "en_accel/macsec.h"
#include "en/ptp.h"
#include <net/ipv6.h>
@@ -120,6 +121,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (unlikely(mlx5e_psp_txwqe_build_eseg_csum(sq, skb, &accel->psp_st, eseg)))
+ return;
+#endif
+
if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
return;
@@ -153,7 +159,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_tcp_all_headers(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
@@ -192,7 +202,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(headlen);
- mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
+ mlx5e_dma_push_single(sq, dma_addr, headlen);
num_dma++;
dseg++;
}
@@ -210,7 +220,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
- mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+ mlx5e_dma_push_netmem(sq, skb_frag_netmem(frag), dma_addr, fsz);
num_dma++;
dseg++;
}
@@ -246,14 +256,13 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 mode;
#ifdef CONFIG_MLX5_EN_TLS
- if (accel && accel->tls.tls_tisn)
+ if (accel->tls.tls_tisn)
return MLX5_INLINE_MODE_TCP_UDP;
#endif
mode = sq->min_inline_mode;
- if (skb_vlan_tag_present(skb) &&
- test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ if (skb_vlan_tag_present(skb))
mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
return mode;
@@ -294,7 +303,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->packets++;
}
- attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
+ attr->insz = mlx5e_accel_tx_ids_len(sq, skb, accel);
stats->bytes += attr->num_bytes;
}
@@ -333,10 +342,11 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
};
}
-static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
+static void mlx5e_tx_skb_update_ts_flags(struct sk_buff *skb)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
}
static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
@@ -388,7 +398,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
sq->pc += wi->num_wqebbs;
@@ -478,12 +488,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
eseg->inline_hdr.sz |= cpu_to_be16(ihs);
dseg += wqe_attr->ds_cnt_inl;
- } else if (skb_vlan_tag_present(skb)) {
- eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
- if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
- eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
- eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
- stats->added_vlan_packets++;
}
dseg += wqe_attr->ds_cnt_ids;
@@ -521,9 +525,9 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
+ u16 pi, num_wqebbs;
- pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
+ pi = mlx5e_txqsq_get_next_pi_anysize(sq, &num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -531,6 +535,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = num_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = 0,
};
@@ -617,12 +622,12 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->xmit_more += xmit_more;
- mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
+ mlx5e_dma_push_single(sq, txd.dma_addr, txd.len);
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
@@ -638,7 +643,6 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
return;
err_unmap:
- mlx5e_dma_unmap_wqe_err(sq, 1);
sq->stats->dropped++;
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
@@ -655,7 +659,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- eseg->flow_table_metadata =
+ eseg->flow_table_metadata |=
cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
@@ -663,7 +667,7 @@ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
+ mlx5e_accel_tx_eseg(priv, skb, accel, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
@@ -749,11 +753,13 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u64 ts = get_cqe_ts(cqe);
hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
- if (sq->ptpsq)
+ if (sq->ptpsq) {
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
- hwts.hwtstamp, sq->ptpsq->cq_stats);
- else
+ hwts.hwtstamp, sq->ptpsq);
+ } else {
skb_tstamp_tx(skb, &hwts);
+ sq->stats->timestamps++;
+ }
}
napi_consume_skb(skb, napi_budget);
@@ -976,6 +982,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5_wqe_datagram_seg *datagram;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
@@ -986,7 +993,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
int num_dma;
u16 pi;
- mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
@@ -1003,7 +1010,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
- mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &accel, eseg);
eseg->mss = attr.mss;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index a7d9b7cb4297..76108299ea57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&sq->dim, dim_sample);
+ net_dim(sq->dim, &dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&rq->dim, dim_sample);
+ net_dim(rq->dim, &dim_sample);
}
void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
@@ -165,7 +165,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!budget))
goto out;
- busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+ if (c->xdpsq)
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq->cq);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
@@ -236,7 +237,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->async_icosq.cq);
- mlx5e_cq_arm(&c->xdpsq.cq);
+ if (c->xdpsq)
+ mlx5e_cq_arm(&c->xdpsq->cq);
if (xsk_open) {
mlx5e_handle_rx_dim(xskrq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 40a6cb052a2d..25499da177bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -32,9 +32,7 @@ enum {
MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
};
-enum {
- MLX5_EQ_DOORBEL_OFFSET = 0x40,
-};
+#define MLX5_EQ_DOORBELL_OFFSET 0x40
/* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
* the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
@@ -114,14 +112,10 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
struct mlx5_eq *eq = &eq_comp->core;
struct mlx5_eqe *eqe;
int num_eqes = 0;
- u32 cqn = -1;
- eqe = next_eqe_sw(eq);
- if (!eqe)
- goto out;
-
- do {
+ while ((eqe = next_eqe_sw(eq))) {
struct mlx5_core_cq *cq;
+ u32 cqn;
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -142,14 +136,12 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
++eq->cons_index;
- } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+ if (++num_eqes >= MLX5_EQ_POLLING_BUDGET)
+ break;
+ }
-out:
eq_update_ci(eq, 1);
- if (cqn != -1)
- tasklet_schedule(&eq_comp->tasklet_ctx.task);
-
return 0;
}
@@ -215,11 +207,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
recovery = action == ASYNC_EQ_RECOVER;
mlx5_eq_async_int_lock(eq_async, recovery, &flags);
- eqe = next_eqe_sw(eq);
- if (!eqe)
- goto out;
-
- do {
+ while ((eqe = next_eqe_sw(eq))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -231,9 +219,10 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
++eq->cons_index;
- } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+ if (++num_eqes >= MLX5_EQ_POLLING_BUDGET)
+ break;
+ }
-out:
eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
@@ -318,7 +307,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
- MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
+ MLX5_SET(eqc, eqc, uar_page, priv->bfreg.up->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
@@ -331,7 +320,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev;
- eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
+ eq->doorbell = priv->bfreg.up->map + MLX5_EQ_DOORBELL_OFFSET;
err = mlx5_debug_eq_add(dev, eq);
if (err)
@@ -594,6 +583,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+ if (mlx5_pcie_cong_event_supported(dev))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
@@ -688,6 +680,12 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
if (err)
goto err2;
+ /* Skip page eq creation when the device does not request for page requests */
+ if (MLX5_CAP_GEN(dev, page_request_disable)) {
+ mlx5_core_dbg(dev, "Skip page EQ creation\n");
+ return 0;
+ }
+
param = (struct mlx5_eq_param) {
.irq = table->ctrl_irq,
.nent = /* TODO: sriov max_vf + */ 1,
@@ -708,7 +706,7 @@ err2:
err1:
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
- mlx5_ctrl_irq_release(table->ctrl_irq);
+ mlx5_ctrl_irq_release(dev, table->ctrl_irq);
return err;
}
@@ -716,14 +714,15 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- cleanup_async_eq(dev, &table->pages_eq, "pages");
+ if (!MLX5_CAP_GEN(dev, page_request_disable))
+ cleanup_async_eq(dev, &table->pages_eq, "pages");
cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
- mlx5_ctrl_irq_release(table->ctrl_irq);
+ mlx5_ctrl_irq_release(dev, table->ctrl_irq);
}
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
@@ -803,15 +802,8 @@ EXPORT_SYMBOL(mlx5_eq_get_eqe);
void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
{
- __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
- u32 val;
-
eq->cons_index += cc;
- val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
-
- __raw_writel((__force u32)cpu_to_be32(val), addr);
- /* We still want ordering, just not swabbing, so add a barrier */
- wmb();
+ eq_update_ci(eq, arm);
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
@@ -828,28 +820,9 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
mlx5_irq_release_vector(irq);
}
-static int mlx5_cpumask_default_spread(int numa_node, int index)
+static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index)
{
- const struct cpumask *prev = cpu_none_mask;
- const struct cpumask *mask;
- int found_cpu = 0;
- int i = 0;
- int cpu;
-
- rcu_read_lock();
- for_each_numa_hop_mask(mask, numa_node) {
- for_each_cpu_andnot(cpu, mask, prev) {
- if (i++ == index) {
- found_cpu = cpu;
- goto spread_done;
- }
- }
- prev = mask;
- }
-
-spread_done:
- rcu_read_unlock();
- return found_cpu;
+ return cpumask_local_spread(index, dev->priv.numa_node);
}
static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
@@ -873,7 +846,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
int cpu;
rmap = mlx5_eq_table_get_pci_rmap(dev);
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+ cpu = mlx5_cpumask_default_spread(dev, vecidx);
irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
if (IS_ERR(irq))
return PTR_ERR(irq);
@@ -899,21 +872,27 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
+ struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
- struct irq_affinity_desc af_desc = {};
+ struct irq_affinity_desc *af_desc;
struct mlx5_irq *irq;
- /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ /* In case SF irq pool does not exist, fallback to the PF irqs */
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
- af_desc.is_managed = 1;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
- irq = mlx5_irq_affinity_request(pool, &af_desc);
- if (IS_ERR(irq))
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return -ENOMEM;
+
+ af_desc->is_managed = false;
+ cpumask_copy(&af_desc->mask, cpu_online_mask);
+ cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
+ irq = mlx5_irq_affinity_request(dev, pool, af_desc);
+ if (IS_ERR(irq)) {
+ kvfree(af_desc);
return PTR_ERR(irq);
+ }
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
@@ -921,6 +900,8 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+ kvfree(af_desc);
+
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
@@ -1073,6 +1054,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
struct mlx5_eq_comp *eq;
int ret = 0;
+ if (vecidx >= table->max_comp_eqs) {
+ mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
+ vecidx, table->max_comp_eqs);
+ return -EINVAL;
+ }
+
mutex_lock(&table->comp_lock);
eq = xa_load(&table->comp_eqs, vecidx);
if (eq) {
@@ -1138,7 +1125,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
if (mask)
cpu = cpumask_first(mask);
else
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+ cpu = mlx5_cpumask_default_spread(dev, vector);
return cpu;
}
@@ -1180,7 +1167,6 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int max_dev_eqs;
- int max_eqs_sf;
int num_eqs;
/* If ethernet is disabled we use just a single completion vector to
@@ -1190,14 +1176,16 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev))
return 1;
- max_dev_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ max_dev_eqs = mlx5_max_eq_cap_get(dev);
num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
max_dev_eqs - MLX5_MAX_ASYNC_EQS);
if (mlx5_core_is_sf(dev)) {
- max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
+ int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ?
+ MLX5_CAP_GEN_2(dev, max_num_eqs_24b) :
+ MLX5_COMP_EQS_PER_SF;
+
+ max_eqs_sf = min_t(int, max_eqs_sf,
mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
num_eqs = min_t(int, num_eqs, max_eqs_sf);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 6b4c9ffad95b..c9a1654d83a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -87,8 +87,8 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(drop_counter)) {
esw_warn(esw->dev,
- "vport[%d] configure egress drop rule counter err(%ld)\n",
- vport->vport, PTR_ERR(drop_counter));
+ "vport[%d] configure egress drop rule counter err(%pe)\n",
+ vport->vport, drop_counter);
drop_counter = NULL;
}
vport->egress.legacy.drop_counter = drop_counter;
@@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ drop_ctr_dst.counter = drop_counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index d599e50af346..3ce455c2535c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -27,7 +27,7 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
+ root_ns = mlx5_get_flow_vport_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 093ed86a0acd..1c37098e09ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 50d2ea323979..a436ce895e45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -6,6 +6,9 @@
#include "helper.h"
#include "ofld.h"
+static int
+acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
static bool
esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
+ bool created = false;
int err = 0;
+ if (!vport->ingress.acl) {
+ err = acl_ingress_ofld_setup(esw, vport);
+ if (err)
+ return err;
+ created = true;
+ }
+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_act.fg = vport->ingress.offloads.drop_grp;
flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
- goto out;
+ goto err_out;
}
vport->ingress.offloads.drop_rule = flow_rule;
-out:
+
+ return 0;
+err_out:
+ /* Only destroy ingress acl created in this function. */
+ if (created)
+ esw_acl_ingress_ofld_cleanup(esw, vport);
return err;
}
@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
}
}
-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int
+acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int num_ftes = 0;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
- !esw_acl_ingress_prio_tag_enabled(esw, vport))
- return 0;
-
esw_acl_ingress_allow_rule_destroy(vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
@@ -347,6 +359,15 @@ group_err:
return err;
}
+int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ !esw_acl_ingress_prio_tag_enabled(esw, vport))
+ return 0;
+
+ return acl_ingress_ofld_setup(esw, vport);
+}
+
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
new file mode 100644
index 000000000000..250af09b5af2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "fs_core.h"
+#include "eswitch.h"
+
+int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport,
+ bool connect)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
+
+ MLX5_SET(modify_vport_state_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE);
+ MLX5_SET(modify_vport_state_in, in, op_mod,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT);
+ MLX5_SET(modify_vport_state_in, in, other_vport, 1);
+ MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+ MLX5_SET(modify_vport_state_in, in, ingress_connect_valid, 1);
+ MLX5_SET(modify_vport_state_in, in, egress_connect_valid, 1);
+ MLX5_SET(modify_vport_state_in, in, ingress_connect, connect);
+ MLX5_SET(modify_vport_state_in, in, egress_connect, connect);
+ MLX5_SET(modify_vport_state_in, in, admin_state, connect);
+ return mlx5_cmd_exec_in(dev, modify_vport_state, in);
+}
+
+static void mlx5_esw_destroy_esw_vport(struct mlx5_core_dev *dev, u16 vport)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_esw_vport_in)] = {};
+
+ MLX5_SET(destroy_esw_vport_in, in, opcode,
+ MLX5_CMD_OPCODE_DESTROY_ESW_VPORT);
+ MLX5_SET(destroy_esw_vport_in, in, vport_num, vport);
+
+ mlx5_cmd_exec_in(dev, destroy_esw_vport, in);
+}
+
+static int mlx5_esw_create_esw_vport(struct mlx5_core_dev *dev, u16 vhca_id,
+ u16 *vport_num)
+{
+ u32 out[MLX5_ST_SZ_DW(create_esw_vport_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_esw_vport_in)] = {};
+ int err;
+
+ MLX5_SET(create_esw_vport_in, in, opcode,
+ MLX5_CMD_OPCODE_CREATE_ESW_VPORT);
+ MLX5_SET(create_esw_vport_in, in, managed_vhca_id, vhca_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *vport_num = MLX5_GET(create_esw_vport_out, out, vport_num);
+
+ return err;
+}
+
+static int mlx5_esw_adj_vport_create(struct mlx5_eswitch *esw, u16 vhca_id,
+ const void *rid_info_reg)
+{
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ err = mlx5_esw_create_esw_vport(esw->dev, vhca_id, &vport_num);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to create adjacent vport for vhca_id %d, err %d\n",
+ vhca_id, err);
+ return err;
+ }
+
+ esw_debug(esw->dev, "Created adjacent vport[%d] %d for vhca_id 0x%x\n",
+ esw->last_vport_idx, vport_num, vhca_id);
+
+ err = mlx5_esw_vport_alloc(esw, esw->last_vport_idx++, vport_num);
+ if (err)
+ goto destroy_esw_vport;
+
+ xa_set_mark(&esw->vports, vport_num, MLX5_ESW_VPT_VF);
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ vport->adjacent = true;
+ vport->vhca_id = vhca_id;
+
+ vport->adj_info.parent_pci_devfn =
+ MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
+ parent_pci_device_function);
+ vport->adj_info.function_id =
+ MLX5_GET(function_vhca_rid_info_reg, rid_info_reg, function_id);
+
+ mlx5_fs_vport_egress_acl_ns_add(esw->dev->priv.steering, vport->index);
+ mlx5_fs_vport_ingress_acl_ns_add(esw->dev->priv.steering, vport->index);
+ err = mlx5_esw_offloads_rep_add(esw, vport);
+ if (err)
+ goto acl_ns_remove;
+
+ return 0;
+
+acl_ns_remove:
+ mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_esw_vport_free(esw, vport);
+destroy_esw_vport:
+ mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
+ return err;
+}
+
+static void mlx5_esw_adj_vport_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u16 vport_num = vport->vport;
+
+ esw_debug(esw->dev, "Destroying adjacent vport %d for vhca_id 0x%x\n",
+ vport_num, vport->vhca_id);
+
+ mlx5_esw_offloads_rep_remove(esw, vport);
+ mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_esw_vport_free(esw, vport);
+ /* Reset the vport index back so new adj vports can use this index.
+ * When vport count can incrementally change, this needs to be modified.
+ */
+ esw->last_vport_idx--;
+ mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
+}
+
+void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ if (!MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max))
+ return;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ mlx5_esw_adj_vport_destroy(esw, vport);
+ }
+}
+
+void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw)
+{
+ u32 delegated_vhca_max = MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max);
+ u32 in[MLX5_ST_SZ_DW(query_delegated_vhca_in)] = {};
+ int outlen, err, i = 0;
+ u8 *out;
+ u32 count;
+
+ if (!delegated_vhca_max)
+ return;
+
+ outlen = MLX5_ST_SZ_BYTES(query_delegated_vhca_out) +
+ delegated_vhca_max *
+ MLX5_ST_SZ_BYTES(delegated_function_vhca_rid_info);
+
+ esw_debug(esw->dev, "delegated_vhca_max=%d\n", delegated_vhca_max);
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return;
+
+ MLX5_SET(query_delegated_vhca_in, in, opcode,
+ MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA);
+
+ err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
+ if (err) {
+ kvfree(out);
+ esw_warn(esw->dev, "Failed to query delegated vhca, err %d\n",
+ err);
+ return;
+ }
+
+ count = MLX5_GET(query_delegated_vhca_out, out, functions_count);
+ esw_debug(esw->dev, "Delegated vhca functions count %d\n", count);
+
+ for (i = 0; i < count; i++) {
+ const void *rid_info, *rid_info_reg;
+ u16 vhca_id;
+
+ rid_info = MLX5_ADDR_OF(query_delegated_vhca_out, out,
+ delegated_function_vhca_rid_info[i]);
+
+ rid_info_reg = MLX5_ADDR_OF(delegated_function_vhca_rid_info,
+ rid_info, function_vhca_rid_info);
+
+ vhca_id = MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
+ vhca_id);
+ esw_debug(esw->dev, "Delegating vhca_id 0x%x\n", vhca_id);
+
+ err = mlx5_esw_adj_vport_create(esw, vhca_id, rid_info_reg);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to init adjacent vhca 0x%x, err %d\n",
+ vhca_id, err);
+ break;
+ }
+ }
+
+ kvfree(out);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 1b9bc32efd6f..60e10047770f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -81,7 +81,8 @@ mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
ft_attr.prio = FDB_BR_OFFLOAD;
fdb = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(fdb))
- esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
+ esw_warn(dev, "Failed to create bridge FDB Table (err=%pe)\n",
+ fdb);
return fdb;
}
@@ -121,8 +122,8 @@ mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to,
kvfree(in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
- vlan_proto, PTR_ERR(fg));
+ "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%pe)\n",
+ vlan_proto, fg);
return fg;
}
@@ -180,8 +181,8 @@ mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge ingress table VLAN filter flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -237,8 +238,8 @@ mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create MAC flow group for bridge ingress table (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
@@ -274,8 +275,8 @@ mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to,
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create VLAN flow group for bridge egress table (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -324,8 +325,8 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge egress table MAC flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge egress table MAC flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -354,8 +355,8 @@ mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge egress table miss flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge egress table miss flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -501,8 +502,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
if (IS_ERR(miss_fg)) {
- esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
- PTR_ERR(miss_fg));
+ esw_warn(esw->dev, "Failed to create miss flow group (err=%pe)\n",
+ miss_fg);
miss_fg = NULL;
goto skip_miss_flow;
}
@@ -510,8 +511,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(miss_pkt_reformat)) {
esw_warn(esw->dev,
- "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
- PTR_ERR(miss_pkt_reformat));
+ "Failed to alloc packet reformat REMOVE_HEADER (err=%pe)\n",
+ miss_pkt_reformat);
miss_pkt_reformat = NULL;
mlx5_destroy_flow_group(miss_fg);
miss_fg = NULL;
@@ -522,8 +523,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
br_offloads->skip_ft,
miss_pkt_reformat);
if (IS_ERR(miss_handle)) {
- esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
- PTR_ERR(miss_handle));
+ esw_warn(esw->dev, "Failed to create miss flow (err=%pe)\n",
+ miss_handle);
miss_handle = NULL;
mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
miss_pkt_reformat = NULL;
@@ -570,7 +571,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge,
struct mlx5_eswitch *esw)
{
@@ -628,7 +630,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[0].ft = bridge->egress_ft;
dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dests[1].counter_id = counter_id;
+ dests[1].counter = counter;
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
ARRAY_SIZE(dests));
@@ -639,17 +641,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
- return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, bridge->br_offloads->esw);
}
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
@@ -671,7 +675,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
goto out;
}
- handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, peer_esw);
out:
@@ -1045,8 +1049,8 @@ mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vl
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(pkt_reformat)) {
- esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
- PTR_ERR(pkt_reformat));
+ esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%pe)\n",
+ pkt_reformat);
return PTR_ERR(pkt_reformat);
}
@@ -1073,8 +1077,8 @@ mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_e
pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(pkt_reformat)) {
- esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
- PTR_ERR(pkt_reformat));
+ esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%pe)\n",
+ pkt_reformat);
return PTR_ERR(pkt_reformat);
}
@@ -1385,10 +1389,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
- addr, vlan, mlx5_fc_id(counter),
- bridge) :
+ addr, vlan, counter, bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
- mlx5_fc_id(counter), bridge);
+ counter, bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
@@ -1861,7 +1864,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
addr, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
+ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)",
addr, vport_num);
return -EINVAL;
}
@@ -1874,7 +1877,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return -EINVAL;
}
@@ -1882,7 +1885,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
if (err) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index d8e739cbcbce..89a58dee50b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -7,11 +7,7 @@
static void
mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
{
- u64 parent_id;
-
- parent_id = mlx5_query_nic_system_image_guid(dev);
- ppid->id_len = sizeof(parent_id);
- memcpy(ppid->id, &parent_id, sizeof(parent_id));
+ mlx5_query_nic_sw_system_image_guid(dev, ppid->id, &ppid->id_len);
}
static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
@@ -27,12 +23,13 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
{
struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {};
+ struct mlx5_vport *vport;
u32 controller_num = 0;
bool external;
u16 pfnum;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -42,15 +39,25 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
} else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
+ u16 func_id = vport_num - 1;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
+ if (vport->adjacent) {
+ func_id = vport->adj_info.function_id;
+ pfnum = vport->adj_info.parent_pci_devfn;
+ }
+
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
- vport_num - 1, external);
+ func_id, external);
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
+ u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
+
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
- vport_num - 1, false);
+ vport_num - base_vport, false);
}
}
@@ -98,6 +105,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
.port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
#endif /* CONFIG_XFRM_OFFLOAD */
+ .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
+ .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
};
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
@@ -108,7 +117,7 @@ static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw
struct netdev_phys_item_id ppid = {};
u16 pfnum;
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
@@ -143,6 +152,8 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
.port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
.port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
#endif
+ .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
+ .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
};
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
@@ -183,7 +194,7 @@ rate_err:
return err;
}
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport)
{
struct mlx5_devlink_port *dl_port;
@@ -191,7 +202,7 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct
return;
dl_port = vport->dl_port;
- mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
+ mlx5_esw_qos_vport_update_parent(vport, NULL, NULL);
devl_rate_leaf_destroy(&dl_port->dl_port);
devl_port_unregister(&dl_port->dl_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
index 458baf0c6415..43550a416a6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
@@ -9,107 +9,111 @@
#include <linux/tracepoint.h>
#include "eswitch.h"
+#include "qos.h"
TRACE_EVENT(mlx5_esw_vport_qos_destroy,
- TP_PROTO(const struct mlx5_vport *vport),
- TP_ARGS(vport),
- TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device))
+ TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport),
+ TP_ARGS(dev, vport),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
__field(unsigned short, vport_id)
- __field(unsigned int, tsar_ix)
+ __field(unsigned int, sched_elem_ix)
),
- TP_fast_assign(__assign_str(devname, dev_name(vport->dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->vport_id = vport->vport;
- __entry->tsar_ix = vport->qos.esw_tsar_ix;
+ __entry->sched_elem_ix = mlx5_esw_qos_vport_get_sched_elem_ix(vport);
),
- TP_printk("(%s) vport=%hu tsar_ix=%u\n",
- __get_str(devname), __entry->vport_id, __entry->tsar_ix
+ TP_printk("(%s) vport=%hu sched_elem_ix=%u\n",
+ __get_str(devname), __entry->vport_id, __entry->sched_elem_ix
)
);
DECLARE_EVENT_CLASS(mlx5_esw_vport_qos_template,
- TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate),
- TP_ARGS(vport, bw_share, max_rate),
- TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device))
+ TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport,
+ u32 bw_share, u32 max_rate),
+ TP_ARGS(dev, vport, bw_share, max_rate),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
__field(unsigned short, vport_id)
- __field(unsigned int, tsar_ix)
+ __field(unsigned int, sched_elem_ix)
__field(unsigned int, bw_share)
__field(unsigned int, max_rate)
- __field(void *, group)
+ __field(void *, parent)
),
- TP_fast_assign(__assign_str(devname, dev_name(vport->dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->vport_id = vport->vport;
- __entry->tsar_ix = vport->qos.esw_tsar_ix;
+ __entry->sched_elem_ix = mlx5_esw_qos_vport_get_sched_elem_ix(vport);
__entry->bw_share = bw_share;
__entry->max_rate = max_rate;
- __entry->group = vport->qos.group;
+ __entry->parent = mlx5_esw_qos_vport_get_parent(vport);
),
- TP_printk("(%s) vport=%hu tsar_ix=%u bw_share=%u, max_rate=%u group=%p\n",
- __get_str(devname), __entry->vport_id, __entry->tsar_ix,
- __entry->bw_share, __entry->max_rate, __entry->group
+ TP_printk("(%s) vport=%hu sched_elem_ix=%u bw_share=%u, max_rate=%u parent=%p\n",
+ __get_str(devname), __entry->vport_id, __entry->sched_elem_ix,
+ __entry->bw_share, __entry->max_rate, __entry->parent
)
);
DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_create,
- TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate),
- TP_ARGS(vport, bw_share, max_rate)
+ TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport,
+ u32 bw_share, u32 max_rate),
+ TP_ARGS(dev, vport, bw_share, max_rate)
);
DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_config,
- TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate),
- TP_ARGS(vport, bw_share, max_rate)
+ TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport,
+ u32 bw_share, u32 max_rate),
+ TP_ARGS(dev, vport, bw_share, max_rate)
);
-DECLARE_EVENT_CLASS(mlx5_esw_group_qos_template,
+DECLARE_EVENT_CLASS(mlx5_esw_node_qos_template,
TP_PROTO(const struct mlx5_core_dev *dev,
- const struct mlx5_esw_rate_group *group,
+ const struct mlx5_esw_sched_node *node,
unsigned int tsar_ix),
- TP_ARGS(dev, group, tsar_ix),
+ TP_ARGS(dev, node, tsar_ix),
TP_STRUCT__entry(__string(devname, dev_name(dev->device))
- __field(const void *, group)
+ __field(const void *, node)
__field(unsigned int, tsar_ix)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
- __entry->group = group;
+ TP_fast_assign(__assign_str(devname);
+ __entry->node = node;
__entry->tsar_ix = tsar_ix;
),
- TP_printk("(%s) group=%p tsar_ix=%u\n",
- __get_str(devname), __entry->group, __entry->tsar_ix
+ TP_printk("(%s) node=%p tsar_ix=%u\n",
+ __get_str(devname), __entry->node, __entry->tsar_ix
)
);
-DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_create,
+DEFINE_EVENT(mlx5_esw_node_qos_template, mlx5_esw_node_qos_create,
TP_PROTO(const struct mlx5_core_dev *dev,
- const struct mlx5_esw_rate_group *group,
+ const struct mlx5_esw_sched_node *node,
unsigned int tsar_ix),
- TP_ARGS(dev, group, tsar_ix)
+ TP_ARGS(dev, node, tsar_ix)
);
-DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_destroy,
+DEFINE_EVENT(mlx5_esw_node_qos_template, mlx5_esw_node_qos_destroy,
TP_PROTO(const struct mlx5_core_dev *dev,
- const struct mlx5_esw_rate_group *group,
+ const struct mlx5_esw_sched_node *node,
unsigned int tsar_ix),
- TP_ARGS(dev, group, tsar_ix)
+ TP_ARGS(dev, node, tsar_ix)
);
-TRACE_EVENT(mlx5_esw_group_qos_config,
+TRACE_EVENT(mlx5_esw_node_qos_config,
TP_PROTO(const struct mlx5_core_dev *dev,
- const struct mlx5_esw_rate_group *group,
+ const struct mlx5_esw_sched_node *node,
unsigned int tsar_ix, u32 bw_share, u32 max_rate),
- TP_ARGS(dev, group, tsar_ix, bw_share, max_rate),
+ TP_ARGS(dev, node, tsar_ix, bw_share, max_rate),
TP_STRUCT__entry(__string(devname, dev_name(dev->device))
- __field(const void *, group)
+ __field(const void *, node)
__field(unsigned int, tsar_ix)
__field(unsigned int, bw_share)
__field(unsigned int, max_rate)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
- __entry->group = group;
+ TP_fast_assign(__assign_str(devname);
+ __entry->node = node;
__entry->tsar_ix = tsar_ix;
__entry->bw_share = bw_share;
__entry->max_rate = max_rate;
),
- TP_printk("(%s) group=%p tsar_ix=%u bw_share=%u max_rate=%u\n",
- __get_str(devname), __entry->group, __entry->tsar_ix,
+ TP_printk("(%s) node=%p tsar_ix=%u bw_share=%u max_rate=%u\n",
+ __get_str(devname), __entry->node, __entry->tsar_ix,
__entry->bw_share, __entry->max_rate
)
);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 5a0047bdcb51..3cfe743610d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -10,9 +10,9 @@
#endif
enum {
- MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
};
enum {
@@ -85,6 +85,19 @@ err_header_alloc:
return err;
}
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec)
+{
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1,
+ ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_1,
+ sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -150,11 +163,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
unsigned long i;
int err;
- xa_for_each(&esw->offloads.vport_reps, i, rep) {
- rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev)
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
+ rpriv = rep->rep_data[REP_ETH].priv;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
rhashtable_walk_start(&iter);
while ((flow = rhashtable_walk_next(&iter)) != NULL) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index ac9c65b89166..514c15258b1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec);
#else
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
@@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+static inline void
+mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 255bc8b749f9..929adeb50a98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -66,7 +66,6 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL;
- atomic64_set(&esw->user_count, 0);
}
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
@@ -96,7 +95,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- ft_attr.max_fte = POOL_NEXT_SIZE;
+ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
@@ -176,20 +175,10 @@ static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
- int err;
-
memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
atomic64_set(&esw->user_count, 0);
- err = esw_create_legacy_vepa_table(esw);
- if (err)
- return err;
-
- err = esw_create_legacy_fdb_table(esw);
- if (err)
- esw_destroy_legacy_vepa_table(esw);
-
- return err;
+ return esw_create_legacy_fdb_table(esw);
}
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
@@ -259,15 +248,22 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
if (!setting) {
esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_vepa_table(esw);
return 0;
}
if (esw->fdb_table.legacy.vepa_uplink_rule)
return 0;
+ err = esw_create_legacy_vepa_table(esw);
+ if (err)
+ return err;
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
+ if (!spec) {
+ err = -ENOMEM;
+ goto out;
+ }
/* Uplink rule forward uplink traffic to FDB */
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
@@ -303,8 +299,10 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
out:
kvfree(spec);
- if (err)
+ if (err) {
esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_vepa_table(esw);
+ }
return err;
}
@@ -319,7 +317,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
return -EPERM;
mutex_lock(&esw->state_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
err = -EOPNOTSUPP;
goto out;
}
@@ -339,7 +337,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
if (!mlx5_esw_allowed(esw))
return -EPERM;
- if (esw->mode != MLX5_ESWITCH_LEGACY)
+ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
@@ -513,15 +511,11 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
u32 max_rate, u32 min_rate)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
- int err;
if (!mlx5_esw_allowed(esw))
return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
- mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
- mutex_unlock(&esw->state_lock);
- return err;
+ return mlx5_esw_qos_set_vport_rate(evport, max_rate, min_rate);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index d2ebe56c3977..4278bcb04c72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -11,694 +11,1482 @@
/* Minimum supported BW share value by the HW is 1 Mbit/sec */
#define MLX5_MIN_BW_SHARE 1
-#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
- min_t(u32, max_t(u32, DIV_ROUND_UP(rate, divider), MLX5_MIN_BW_SHARE), limit)
+/* Holds rate nodes associated with an E-Switch. */
+struct mlx5_qos_domain {
+ /* Serializes access to all qos changes in the qos domain. */
+ struct mutex lock;
+ /* List of all mlx5_esw_sched_nodes. */
+ struct list_head nodes;
+};
-struct mlx5_esw_rate_group {
- u32 tsar_ix;
+static void esw_qos_lock(struct mlx5_eswitch *esw)
+{
+ mutex_lock(&esw->qos.domain->lock);
+}
+
+static void esw_qos_unlock(struct mlx5_eswitch *esw)
+{
+ mutex_unlock(&esw->qos.domain->lock);
+}
+
+static void esw_assert_qos_lock_held(struct mlx5_eswitch *esw)
+{
+ lockdep_assert_held(&esw->qos.domain->lock);
+}
+
+static struct mlx5_qos_domain *esw_qos_domain_alloc(void)
+{
+ struct mlx5_qos_domain *qos_domain;
+
+ qos_domain = kzalloc(sizeof(*qos_domain), GFP_KERNEL);
+ if (!qos_domain)
+ return NULL;
+
+ mutex_init(&qos_domain->lock);
+ INIT_LIST_HEAD(&qos_domain->nodes);
+
+ return qos_domain;
+}
+
+static int esw_qos_domain_init(struct mlx5_eswitch *esw)
+{
+ esw->qos.domain = esw_qos_domain_alloc();
+
+ return esw->qos.domain ? 0 : -ENOMEM;
+}
+
+static void esw_qos_domain_release(struct mlx5_eswitch *esw)
+{
+ kfree(esw->qos.domain);
+ esw->qos.domain = NULL;
+}
+
+enum sched_node_type {
+ SCHED_NODE_TYPE_VPORTS_TSAR,
+ SCHED_NODE_TYPE_VPORT,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ SCHED_NODE_TYPE_RATE_LIMITER,
+ SCHED_NODE_TYPE_VPORT_TC,
+ SCHED_NODE_TYPE_VPORTS_TC_TSAR,
+};
+
+static const char * const sched_node_type_str[] = {
+ [SCHED_NODE_TYPE_VPORTS_TSAR] = "vports TSAR",
+ [SCHED_NODE_TYPE_VPORT] = "vport",
+ [SCHED_NODE_TYPE_TC_ARBITER_TSAR] = "TC Arbiter TSAR",
+ [SCHED_NODE_TYPE_RATE_LIMITER] = "Rate Limiter",
+ [SCHED_NODE_TYPE_VPORT_TC] = "vport TC",
+ [SCHED_NODE_TYPE_VPORTS_TC_TSAR] = "vports TC TSAR",
+};
+
+struct mlx5_esw_sched_node {
+ u32 ix;
+ /* Bandwidth parameters. */
u32 max_rate;
u32 min_rate;
+ /* A computed value indicating relative min_rate between node's children. */
u32 bw_share;
- struct list_head list;
+ /* The parent node in the rate hierarchy. */
+ struct mlx5_esw_sched_node *parent;
+ /* Entry in the parent node's children list. */
+ struct list_head entry;
+ /* The type of this node in the rate hierarchy. */
+ enum sched_node_type type;
+ /* The eswitch this node belongs to. */
+ struct mlx5_eswitch *esw;
+ /* The children nodes of this node, empty list for leaf nodes. */
+ struct list_head children;
+ /* Valid only if this node is associated with a vport. */
+ struct mlx5_vport *vport;
+ /* Level in the hierarchy. The root node level is 1. */
+ u8 level;
+ /* Valid only when this node represents a traffic class. */
+ u8 tc;
+ /* Valid only for a TC arbiter node or vport TC arbiter. */
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX];
};
-static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
- u32 tsar_ix, u32 max_rate, u32 bw_share)
+static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
{
- u32 bitmask = 0;
+ if (!node->parent) {
+ /* Root children are assigned a depth level of 2. */
+ node->level = 2;
+ list_add_tail(&node->entry, &node->esw->qos.domain->nodes);
+ } else {
+ node->level = node->parent->level + 1;
+ list_add_tail(&node->entry, &node->parent->children);
+ }
+}
- if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return -EOPNOTSUPP;
+static int esw_qos_num_tcs(struct mlx5_core_dev *dev)
+{
+ int num_tcs = mlx5_max_tc(dev) + 1;
- MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
- MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
- bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
- bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+ return num_tcs < DEVLINK_RATE_TCS_MAX ? num_tcs : DEVLINK_RATE_TCS_MAX;
+}
- return mlx5_modify_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- sched_ctx,
- tsar_ix,
- bitmask);
+static void
+esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
+{
+ list_del_init(&node->entry);
+ node->parent = parent;
+ if (parent)
+ node->esw = parent->esw;
+ esw_qos_node_attach_to_parent(node);
}
-static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group,
- u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
+static void esw_qos_nodes_set_parent(struct list_head *nodes,
+ struct mlx5_esw_sched_node *parent)
+{
+ struct mlx5_esw_sched_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, nodes, entry) {
+ esw_qos_node_set_parent(node, parent);
+ if (!list_empty(&node->children) &&
+ parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ struct mlx5_esw_sched_node *child;
+
+ list_for_each_entry(child, &node->children, entry) {
+ struct mlx5_vport *vport = child->vport;
+
+ if (vport)
+ vport->qos.sched_node->parent = parent;
+ }
+ }
+ }
+}
+
+void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport)
+{
+ if (vport->qos.sched_nodes) {
+ int num_tcs = esw_qos_num_tcs(vport->qos.sched_node->esw->dev);
+ int i;
+
+ for (i = 0; i < num_tcs; i++)
+ kfree(vport->qos.sched_nodes[i]);
+ kfree(vport->qos.sched_nodes);
+ }
+
+ kfree(vport->qos.sched_node);
+ memset(&vport->qos, 0, sizeof(vport->qos));
+}
+
+u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport)
+{
+ if (!vport->qos.sched_node)
+ return 0;
+
+ return vport->qos.sched_node->ix;
+}
+
+struct mlx5_esw_sched_node *
+mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport)
+{
+ if (!vport->qos.sched_node)
+ return NULL;
+
+ return vport->qos.sched_node->parent;
+}
+
+static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op)
+{
+ switch (node->type) {
+ case SCHED_NODE_TYPE_VPORTS_TC_TSAR:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (tc=%d,err=%d)\n",
+ op, sched_node_type_str[node->type], node->tc, err);
+ break;
+ case SCHED_NODE_TYPE_VPORT_TC:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (vport=%d,tc=%d,err=%d)\n",
+ op,
+ sched_node_type_str[node->type],
+ node->vport->vport, node->tc, err);
+ break;
+ case SCHED_NODE_TYPE_VPORT:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (vport=%d,err=%d)\n",
+ op, sched_node_type_str[node->type], node->vport->vport, err);
+ break;
+ case SCHED_NODE_TYPE_RATE_LIMITER:
+ case SCHED_NODE_TYPE_TC_ARBITER_TSAR:
+ case SCHED_NODE_TYPE_VPORTS_TSAR:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (err=%d)\n",
+ op, sched_node_type_str[node->type], err);
+ break;
+ default:
+ esw_warn(node->esw->dev,
+ "E-Switch %s scheduling element failed (err=%d)\n",
+ op, err);
+ break;
+ }
+}
+
+static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx,
+ struct netlink_ext_ack *extack)
{
- u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_core_dev *dev = esw->dev;
int err;
- err = esw_qos_tsar_config(dev, sched_ctx,
- group->tsar_ix,
- max_rate, bw_share);
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
+ err = mlx5_create_scheduling_element_cmd(node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx,
+ &node->ix);
+ if (err) {
+ esw_qos_sched_elem_warn(node, err, "create");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch create scheduling element failed");
+ }
+
+ return err;
+}
- trace_mlx5_esw_group_qos_config(dev, group, group->tsar_ix, bw_share, max_rate);
+static int esw_qos_node_destroy_sched_element(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlx5_destroy_scheduling_element_cmd(node->esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ if (err) {
+ esw_qos_sched_elem_warn(node, err, "destroy");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch destroying scheduling element failed.");
+ }
return err;
}
-static int esw_qos_vport_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share,
- struct netlink_ext_ack *extack)
+static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share,
+ struct netlink_ext_ack *extack)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_core_dev *dev = node->esw->dev;
+ u32 bitmask = 0;
int err;
- if (!vport->qos.enabled)
- return -EIO;
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ return -EOPNOTSUPP;
+
+ if (bw_share && (!MLX5_CAP_QOS(dev, esw_bw_share) ||
+ MLX5_CAP_QOS(dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE))
+ return -EOPNOTSUPP;
+
+ if (node->max_rate == max_rate && node->bw_share == bw_share)
+ return 0;
+
+ if (node->max_rate != max_rate) {
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ }
+ if (node->bw_share != bw_share) {
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+ }
- err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
- max_rate, bw_share);
+ err = mlx5_modify_scheduling_element_cmd(dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ sched_ctx,
+ node->ix,
+ bitmask);
if (err) {
- esw_warn(esw->dev,
- "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
- vport->vport, err);
- NL_SET_ERR_MSG_MOD(extack, "E-Switch modify TSAR vport element failed");
+ esw_qos_sched_elem_warn(node, err, "modify");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch modify scheduling element failed");
+
return err;
}
- trace_mlx5_esw_vport_qos_config(vport, bw_share, max_rate);
+ node->max_rate = max_rate;
+ node->bw_share = bw_share;
+ if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR)
+ trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate);
+ else if (node->type == SCHED_NODE_TYPE_VPORT)
+ trace_mlx5_esw_vport_qos_config(dev, node->vport, bw_share, max_rate);
return 0;
}
+static int esw_qos_create_rate_limit_element(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+
+ if (!mlx5_qos_element_type_supported(
+ node->esw->dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, node->max_rate);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT);
+
+ return esw_qos_node_create_sched_element(node, sched_ctx, extack);
+}
+
static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- bool group_level)
+ struct mlx5_esw_sched_node *parent)
{
+ struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes;
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- struct mlx5_vport *evport;
+ struct mlx5_esw_sched_node *node;
u32 max_guarantee = 0;
- unsigned long i;
-
- if (group_level) {
- struct mlx5_esw_rate_group *group;
- list_for_each_entry(group, &esw->qos.groups, list) {
- if (group->min_rate < max_guarantee)
- continue;
- max_guarantee = group->min_rate;
- }
- } else {
- mlx5_esw_for_each_vport(esw, i, evport) {
- if (!evport->enabled || !evport->qos.enabled ||
- evport->qos.group != group || evport->qos.min_rate < max_guarantee)
- continue;
- max_guarantee = evport->qos.min_rate;
- }
+ /* Find max min_rate across all nodes.
+ * This will correspond to fw_max_bw_share in the final bw_share calculation.
+ */
+ list_for_each_entry(node, nodes, entry) {
+ if (node->esw == esw && node->ix != esw->qos.root_tsar_ix &&
+ node->min_rate > max_guarantee)
+ max_guarantee = node->min_rate;
}
if (max_guarantee)
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
- /* If vports min rate divider is 0 but their group has bw_share configured, then
- * need to set bw_share for vports to minimal value.
+ /* If the node nodes has min_rate configured, a divider of 0 sets all
+ * nodes' bw_share to 0, effectively disabling min guarantees.
*/
- if (!group_level && !max_guarantee && group && group->bw_share)
- return 1;
return 0;
}
-static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max)
+static u32 esw_qos_calc_bw_share(u32 value, u32 divider, u32 fw_max)
{
- if (divider)
- return MLX5_RATE_TO_BW_SHARE(min_rate, divider, fw_max);
-
- return 0;
+ if (!divider)
+ return 0;
+ return min_t(u32, fw_max,
+ max_t(u32,
+ DIV_ROUND_UP(value, divider), MLX5_MIN_BW_SHARE));
}
-static int esw_qos_normalize_vports_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node,
+ u32 divider,
+ struct netlink_ext_ack *extack)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- u32 divider = esw_qos_calculate_min_rate_divider(esw, group, false);
- struct mlx5_vport *evport;
- unsigned long i;
+ u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share);
u32 bw_share;
- int err;
- mlx5_esw_for_each_vport(esw, i, evport) {
- if (!evport->enabled || !evport->qos.enabled || evport->qos.group != group)
- continue;
- bw_share = esw_qos_calc_bw_share(evport->qos.min_rate, divider, fw_max_bw_share);
+ bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share);
- if (bw_share == evport->qos.bw_share)
+ esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack);
+}
+
+static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes;
+ u32 divider = esw_qos_calculate_min_rate_divider(esw, parent);
+ struct mlx5_esw_sched_node *node;
+
+ list_for_each_entry(node, nodes, entry) {
+ if (node->esw != esw || node->ix == esw->qos.root_tsar_ix)
continue;
- err = esw_qos_vport_config(esw, evport, evport->qos.max_rate, bw_share, extack);
- if (err)
- return err;
+ /* Vports TC TSARs don't have a minimum rate configured,
+ * so there's no need to update the bw_share on them.
+ */
+ if (node->type != SCHED_NODE_TYPE_VPORTS_TC_TSAR) {
+ esw_qos_update_sched_node_bw_share(node, divider,
+ extack);
+ }
+
+ if (list_empty(&node->children))
+ continue;
- evport->qos.bw_share = bw_share;
+ esw_qos_normalize_min_rate(node->esw, node, extack);
}
+}
+
+static u32 esw_qos_calculate_tc_bw_divider(u32 *tc_bw)
+{
+ u32 total = 0;
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++)
+ total += tc_bw[i];
+
+ /* If total is zero, tc-bw config is disabled and we shouldn't reach
+ * here.
+ */
+ return WARN_ON(!total) ? 1 : total;
+}
+
+static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
+ u32 min_rate, struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = node->esw;
+
+ if (min_rate == node->min_rate)
+ return 0;
+
+ node->min_rate = min_rate;
+ esw_qos_normalize_min_rate(esw, node->parent, extack);
return 0;
}
-static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divider,
- struct netlink_ext_ack *extack)
+static int
+esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 max_rate, u32 bw_share, u32 *tsar_ix)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- struct mlx5_esw_rate_group *group;
- u32 bw_share;
- int err;
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ void *attr;
+
+ if (!mlx5_qos_element_type_supported(dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR,
+ SCHEDULING_HIERARCHY_E_SWITCH) ||
+ !mlx5_qos_tsar_type_supported(dev,
+ TSAR_ELEMENT_TSAR_TYPE_DWRR,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
- list_for_each_entry(group, &esw->qos.groups, list) {
- bw_share = esw_qos_calc_bw_share(group->min_rate, divider, fw_max_bw_share);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ parent_element_id);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw, max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share, bw_share);
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
- if (bw_share == group->bw_share)
- continue;
+ return mlx5_create_scheduling_element_cmd(dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ tsar_ctx,
+ tsar_ix);
+}
- err = esw_qos_group_config(esw, group, group->max_rate, bw_share, extack);
- if (err)
- return err;
+static int
+esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = vport_node->esw->dev;
+ void *attr;
- group->bw_share = bw_share;
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
- /* All the group's vports need to be set with default bw_share
- * to enable them with QOS
- */
- err = esw_qos_normalize_vports_min_rate(esw, group, extack);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+ attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
+ MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
+ parent ? parent->ix : vport_node->esw->qos.root_tsar_ix);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
+ vport_node->max_rate);
+
+ return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack);
+}
- if (err)
- return err;
+static int
+esw_qos_vport_tc_create_sched_element(struct mlx5_esw_sched_node *vport_tc_node,
+ u32 rate_limit_elem_ix,
+ struct netlink_ext_ack *extack)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = vport_tc_node->esw->dev;
+ void *attr;
+
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC);
+ attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
+ MLX5_SET(vport_tc_element, attr, vport_number,
+ vport_tc_node->vport->vport);
+ MLX5_SET(vport_tc_element, attr, traffic_class, vport_tc_node->tc);
+ MLX5_SET(scheduling_context, sched_ctx, max_bw_obj_id,
+ rate_limit_elem_ix);
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
+ vport_tc_node->parent->ix);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share,
+ vport_tc_node->bw_share);
+
+ return esw_qos_node_create_sched_element(vport_tc_node, sched_ctx,
+ extack);
+}
+
+static struct mlx5_esw_sched_node *
+__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent)
+{
+ struct mlx5_esw_sched_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->esw = esw;
+ node->ix = tsar_ix;
+ node->type = type;
+ node->parent = parent;
+ INIT_LIST_HEAD(&node->children);
+ esw_qos_node_attach_to_parent(node);
+ if (!parent) {
+ /* The caller is responsible for inserting the node into the
+ * parent list if necessary. This function can also be used with
+ * a NULL parent, which doesn't necessarily indicate that it
+ * refers to the root scheduling element.
+ */
+ list_del_init(&node->entry);
}
- return 0;
+ return node;
}
-static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
- u32 min_rate, struct netlink_ext_ack *extack)
+static void __esw_qos_free_node(struct mlx5_esw_sched_node *node)
{
- u32 fw_max_bw_share, previous_min_rate;
- bool min_rate_supported;
+ list_del(&node->entry);
+ kfree(node);
+}
+
+static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
+{
+ esw_qos_node_destroy_sched_element(node, extack);
+ __esw_qos_free_node(node);
+}
+
+static int esw_qos_create_vports_tc_node(struct mlx5_esw_sched_node *parent,
+ u8 tc, struct netlink_ext_ack *extack)
+{
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = parent->esw->dev;
+ struct mlx5_esw_sched_node *vports_tc_node;
+ void *attr;
int err;
- lockdep_assert_held(&esw->state_lock);
- fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
- fw_max_bw_share >= MLX5_MIN_BW_SHARE;
- if (min_rate && !min_rate_supported)
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR,
+ SCHEDULING_HIERARCHY_E_SWITCH) ||
+ !mlx5_qos_tsar_type_supported(dev,
+ TSAR_ELEMENT_TSAR_TYPE_DWRR,
+ SCHEDULING_HIERARCHY_E_SWITCH))
return -EOPNOTSUPP;
- if (min_rate == evport->qos.min_rate)
- return 0;
- previous_min_rate = evport->qos.min_rate;
- evport->qos.min_rate = min_rate;
- err = esw_qos_normalize_vports_min_rate(esw, evport->qos.group, extack);
+ vports_tc_node = __esw_qos_alloc_node(parent->esw, 0,
+ SCHED_NODE_TYPE_VPORTS_TC_TSAR,
+ parent);
+ if (!vports_tc_node) {
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc node failed");
+ esw_warn(dev, "Failed to alloc vports TC node (tc=%d)\n", tc);
+ return -ENOMEM;
+ }
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
+ MLX5_SET(tsar_element, attr, traffic_class, tc);
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, parent->ix);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ err = esw_qos_node_create_sched_element(vports_tc_node, tsar_ctx,
+ extack);
if (err)
- evport->qos.min_rate = previous_min_rate;
+ goto err_create_sched_element;
+ vports_tc_node->tc = tc;
+
+ return 0;
+
+err_create_sched_element:
+ __esw_qos_free_node(vports_tc_node);
return err;
}
-static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
- u32 max_rate, struct netlink_ext_ack *extack)
+static void
+esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
+ u32 *tc_bw)
{
- u32 act_max_rate = max_rate;
- bool max_rate_supported;
- int err;
+ memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw));
+}
- lockdep_assert_held(&esw->state_lock);
- max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
+static void
+esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
+ u32 *tc_bw, struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = tc_arbiter_node->esw;
+ struct mlx5_esw_sched_node *vports_tc_node;
+ u32 divider, fw_max_bw_share;
- if (max_rate && !max_rate_supported)
- return -EOPNOTSUPP;
- if (max_rate == evport->qos.max_rate)
- return 0;
+ fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ divider = esw_qos_calculate_tc_bw_divider(tc_bw);
+ list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry) {
+ u8 tc = vports_tc_node->tc;
+ u32 bw_share;
+
+ tc_arbiter_node->tc_bw[tc] = tc_bw[tc];
+ bw_share = tc_bw[tc] * fw_max_bw_share;
+ bw_share = esw_qos_calc_bw_share(bw_share, divider,
+ fw_max_bw_share);
+ esw_qos_sched_elem_config(vports_tc_node, 0, bw_share, extack);
+ }
+}
- /* If parent group has rate limit need to set to group
- * value when new max rate is 0.
- */
- if (evport->qos.group && !max_rate)
- act_max_rate = evport->qos.group->max_rate;
+static void
+esw_qos_destroy_vports_tc_nodes(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vports_tc_node, *tmp;
- err = esw_qos_vport_config(esw, evport, act_max_rate, evport->qos.bw_share, extack);
+ list_for_each_entry_safe(vports_tc_node, tmp,
+ &tc_arbiter_node->children, entry)
+ esw_qos_destroy_node(vports_tc_node, extack);
+}
- if (!err)
- evport->qos.max_rate = max_rate;
+static int
+esw_qos_create_vports_tc_nodes(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = tc_arbiter_node->esw;
+ int err, i, num_tcs = esw_qos_num_tcs(esw->dev);
+
+ for (i = 0; i < num_tcs; i++) {
+ err = esw_qos_create_vports_tc_node(tc_arbiter_node, i, extack);
+ if (err)
+ goto err_tc_node_create;
+ }
+
+ return 0;
+err_tc_node_create:
+ esw_qos_destroy_vports_tc_nodes(tc_arbiter_node, NULL);
return err;
}
-static int esw_qos_set_group_min_rate(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group,
- u32 min_rate, struct netlink_ext_ack *extack)
+static int esw_qos_create_tc_arbiter_sched_elem(
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- struct mlx5_core_dev *dev = esw->dev;
- u32 previous_min_rate, divider;
- int err;
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ u32 tsar_parent_ix;
+ void *attr;
- if (!(MLX5_CAP_QOS(dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE))
+ if (!mlx5_qos_tsar_type_supported(tc_arbiter_node->esw->dev,
+ TSAR_ELEMENT_TSAR_TYPE_TC_ARB,
+ SCHEDULING_HIERARCHY_E_SWITCH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch TC Arbiter scheduling element is not supported");
return -EOPNOTSUPP;
+ }
- if (min_rate == group->min_rate)
- return 0;
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_TC_ARB);
+ tsar_parent_ix = tc_arbiter_node->parent ? tc_arbiter_node->parent->ix :
+ tc_arbiter_node->esw->qos.root_tsar_ix;
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ tsar_parent_ix);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw,
+ tc_arbiter_node->max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share,
+ tc_arbiter_node->bw_share);
- previous_min_rate = group->min_rate;
- group->min_rate = min_rate;
- divider = esw_qos_calculate_min_rate_divider(esw, group, true);
- err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
+ return esw_qos_node_create_sched_element(tc_arbiter_node, tsar_ctx,
+ extack);
+}
+
+static struct mlx5_esw_sched_node *
+__esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node;
+ u32 tsar_ix;
+ int err;
+
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, 0,
+ 0, &tsar_ix);
if (err) {
- group->min_rate = previous_min_rate;
- NL_SET_ERR_MSG_MOD(extack, "E-Switch group min rate setting failed");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
+ return ERR_PTR(err);
+ }
- /* Attempt restoring previous configuration */
- divider = esw_qos_calculate_min_rate_divider(esw, group, true);
- if (esw_qos_normalize_groups_min_rate(esw, divider, extack))
- NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed");
+ node = __esw_qos_alloc_node(esw, tsar_ix, SCHED_NODE_TYPE_VPORTS_TSAR, parent);
+ if (!node) {
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc node failed");
+ err = -ENOMEM;
+ goto err_alloc_node;
}
- return err;
+ list_add_tail(&node->entry, &esw->qos.domain->nodes);
+ esw_qos_normalize_min_rate(esw, NULL, extack);
+ trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix);
+
+ return node;
+
+err_alloc_node:
+ if (mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ tsar_ix))
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for node failed");
+ return ERR_PTR(err);
}
-static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- u32 max_rate, struct netlink_ext_ack *extack)
+static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
+static void esw_qos_put(struct mlx5_eswitch *esw);
+
+static struct mlx5_esw_sched_node *
+esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
- struct mlx5_vport *vport;
- unsigned long i;
+ struct mlx5_esw_sched_node *node;
int err;
- if (group->max_rate == max_rate)
- return 0;
+ esw_assert_qos_lock_held(esw);
+ if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
+ return ERR_PTR(-EOPNOTSUPP);
- err = esw_qos_group_config(esw, group, max_rate, group->bw_share, extack);
+ err = esw_qos_get(esw, extack);
if (err)
- return err;
+ return ERR_PTR(err);
- group->max_rate = max_rate;
+ node = __esw_qos_create_vports_sched_node(esw, NULL, extack);
+ if (IS_ERR(node))
+ esw_qos_put(esw);
- /* Any unlimited vports in the group should be set
- * with the value of the group.
- */
- mlx5_esw_for_each_vport(esw, i, vport) {
- if (!vport->enabled || !vport->qos.enabled ||
- vport->qos.group != group || vport->qos.max_rate)
- continue;
+ return node;
+}
- err = esw_qos_vport_config(esw, vport, max_rate, vport->qos.bw_share, extack);
- if (err)
- NL_SET_ERR_MSG_MOD(extack,
- "E-Switch vport implicit rate limit setting failed");
- }
+static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = node->esw;
- return err;
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ esw_qos_destroy_vports_tc_nodes(node, extack);
+
+ trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix);
+ esw_qos_destroy_node(node, extack);
+ esw_qos_normalize_min_rate(esw, NULL, extack);
}
-static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share)
+static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
- u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_esw_rate_group *group = vport->qos.group;
struct mlx5_core_dev *dev = esw->dev;
- u32 parent_tsar_ix;
- void *vport_elem;
int err;
- parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
- MLX5_SET(scheduling_context, sched_ctx, element_type,
- SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
- MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_tsar_ix);
- MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
- MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ return -EOPNOTSUPP;
- err = mlx5_create_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- sched_ctx,
- &vport->qos.esw_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, 0, 0,
+ &esw->qos.root_tsar_ix);
if (err) {
- esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
- vport->vport, err);
+ esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
}
+ refcount_set(&esw->qos.refcnt, 1);
+
return 0;
}
-static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *curr_group,
- struct mlx5_esw_rate_group *new_group,
- struct netlink_ext_ack *extack)
+static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
- u32 max_rate;
int err;
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- vport->qos.esw_tsar_ix);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR vport element failed");
- return err;
- }
+ esw->qos.root_tsar_ix);
+ if (err)
+ esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
+}
- vport->qos.group = new_group;
- max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate;
+static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+{
+ int err = 0;
- /* If vport is unlimited, we set the group's value.
- * Therefore, if the group is limited it will apply to
- * the vport as well and if not, vport will remain unlimited.
- */
- err = esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch vport group set failed.");
- goto err_sched;
+ esw_assert_qos_lock_held(esw);
+ if (!refcount_inc_not_zero(&esw->qos.refcnt)) {
+ /* esw_qos_create() set refcount to 1 only on success.
+ * No need to decrement on failure.
+ */
+ err = esw_qos_create(esw, extack);
}
- return 0;
-
-err_sched:
- vport->qos.group = curr_group;
- max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate;
- if (esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share))
- esw_warn(esw->dev, "E-Switch vport group restore failed (vport=%d)\n",
- vport->vport);
-
return err;
}
-static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static void esw_qos_put(struct mlx5_eswitch *esw)
{
- struct mlx5_esw_rate_group *new_group, *curr_group;
- int err;
+ esw_assert_qos_lock_held(esw);
+ if (refcount_dec_and_test(&esw->qos.refcnt))
+ esw_qos_destroy(esw);
+}
- if (!vport->enabled)
- return -EINVAL;
+static void
+esw_qos_tc_arbiter_scheduling_teardown(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ /* Clean up all Vports TC nodes within the TC arbiter node. */
+ esw_qos_destroy_vports_tc_nodes(node, extack);
+ /* Destroy the scheduling element for the TC arbiter node itself. */
+ esw_qos_node_destroy_sched_element(node, extack);
+}
- curr_group = vport->qos.group;
- new_group = group ?: esw->qos.group0;
- if (curr_group == new_group)
- return 0;
+static int esw_qos_tc_arbiter_scheduling_setup(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ u32 curr_ix = node->ix;
+ int err;
- err = esw_qos_update_group_scheduling_element(esw, vport, curr_group, new_group, extack);
+ err = esw_qos_create_tc_arbiter_sched_elem(node, extack);
if (err)
return err;
+ /* Initialize the vports TC nodes within created TC arbiter TSAR. */
+ err = esw_qos_create_vports_tc_nodes(node, extack);
+ if (err)
+ goto err_vports_tc_nodes;
- /* Recalculate bw share weights of old and new groups */
- if (vport->qos.bw_share || new_group->bw_share) {
- esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
- esw_qos_normalize_vports_min_rate(esw, new_group, extack);
- }
+ node->type = SCHED_NODE_TYPE_TC_ARBITER_TSAR;
return 0;
+
+err_vports_tc_nodes:
+ /* If initialization fails, clean up the scheduling element
+ * for the TC arbiter node.
+ */
+ esw_qos_node_destroy_sched_element(node, NULL);
+ node->ix = curr_ix;
+ return err;
}
-static struct mlx5_esw_rate_group *
-__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+static int
+esw_qos_create_vport_tc_sched_node(struct mlx5_vport *vport,
+ u32 rate_limit_elem_ix,
+ struct mlx5_esw_sched_node *vports_tc_node,
+ struct netlink_ext_ack *extack)
{
- u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_esw_rate_group *group;
- u32 divider;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *vport_tc_node;
+ u8 tc = vports_tc_node->tc;
int err;
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group)
- return ERR_PTR(-ENOMEM);
+ vport_tc_node = __esw_qos_alloc_node(vport_node->esw, 0,
+ SCHED_NODE_TYPE_VPORT_TC,
+ vports_tc_node);
+ if (!vport_tc_node)
+ return -ENOMEM;
+
+ vport_tc_node->min_rate = vport_node->min_rate;
+ vport_tc_node->tc = tc;
+ vport_tc_node->vport = vport;
+ err = esw_qos_vport_tc_create_sched_element(vport_tc_node,
+ rate_limit_elem_ix,
+ extack);
+ if (err)
+ goto err_out;
- MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
- esw->qos.root_tsar_ix);
- err = mlx5_create_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- tsar_ctx,
- &group->tsar_ix);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed");
- goto err_sched_elem;
- }
+ vport->qos.sched_nodes[tc] = vport_tc_node;
+
+ return 0;
+err_out:
+ __esw_qos_free_node(vport_tc_node);
+ return err;
+}
- list_add_tail(&group->list, &esw->qos.groups);
+static void
+esw_qos_destroy_vport_tc_sched_elements(struct mlx5_vport *vport,
+ struct netlink_ext_ack *extack)
+{
+ int i, num_tcs = esw_qos_num_tcs(vport->qos.sched_node->esw->dev);
- divider = esw_qos_calculate_min_rate_divider(esw, group, true);
- if (divider) {
- err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed");
- goto err_min_rate;
+ for (i = 0; i < num_tcs; i++) {
+ if (vport->qos.sched_nodes[i]) {
+ __esw_qos_destroy_node(vport->qos.sched_nodes[i],
+ extack);
}
}
- trace_mlx5_esw_group_qos_create(esw->dev, group, group->tsar_ix);
-
- return group;
-err_min_rate:
- list_del(&group->list);
- if (mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix))
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
-err_sched_elem:
- kfree(group);
- return ERR_PTR(err);
+ kfree(vport->qos.sched_nodes);
+ vport->qos.sched_nodes = NULL;
}
-static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
-static void esw_qos_put(struct mlx5_eswitch *esw);
-
-static struct mlx5_esw_rate_group *
-esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+static int
+esw_qos_create_vport_tc_sched_elements(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
- int err;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *tc_arbiter_node, *vports_tc_node;
+ int err, num_tcs = esw_qos_num_tcs(vport_node->esw->dev);
+ u32 rate_limit_elem_ix;
+
+ vport->qos.sched_nodes = kcalloc(num_tcs,
+ sizeof(struct mlx5_esw_sched_node *),
+ GFP_KERNEL);
+ if (!vport->qos.sched_nodes) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Allocating the vport TC scheduling elements failed.");
+ return -ENOMEM;
+ }
- if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
- return ERR_PTR(-EOPNOTSUPP);
+ rate_limit_elem_ix = type == SCHED_NODE_TYPE_RATE_LIMITER ?
+ vport_node->ix : 0;
+ tc_arbiter_node = type == SCHED_NODE_TYPE_RATE_LIMITER ?
+ vport_node->parent : vport_node;
+ list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry) {
+ err = esw_qos_create_vport_tc_sched_node(vport,
+ rate_limit_elem_ix,
+ vports_tc_node,
+ extack);
+ if (err)
+ goto err_create_vport_tc;
+ }
- err = esw_qos_get(esw, extack);
- if (err)
- return ERR_PTR(err);
+ return 0;
- group = __esw_qos_create_rate_group(esw, extack);
- if (IS_ERR(group))
- esw_qos_put(esw);
+err_create_vport_tc:
+ esw_qos_destroy_vport_tc_sched_elements(vport, NULL);
- return group;
+ return err;
}
-static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static int
+esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
+ struct netlink_ext_ack *extack)
{
- u32 divider;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
int err;
- list_del(&group->list);
+ if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ int new_level, max_level;
- divider = esw_qos_calculate_min_rate_divider(esw, NULL, true);
- err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch groups' normalization failed");
+ /* Increase the parent's level by 2 to account for both the
+ * TC arbiter and the vports TC scheduling element.
+ */
+ new_level = (parent ? parent->level : 2) + 2;
+ max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
+ log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC arbitration on leafs is not supported beyond max depth %d",
+ max_level);
+ return -EOPNOTSUPP;
+ }
+ }
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix);
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER)
+ err = esw_qos_create_rate_limit_element(vport_node, extack);
+ else
+ err = esw_qos_tc_arbiter_scheduling_setup(vport_node, extack);
if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
+ return err;
- trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
+ /* Rate limiters impact multiple nodes not directly connected to them
+ * and are not direct members of the QoS hierarchy.
+ * Unlink it from the parent to reflect that.
+ */
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ list_del_init(&vport_node->entry);
+ vport_node->level = 0;
+ }
- kfree(group);
+ err = esw_qos_create_vport_tc_sched_elements(vport, type, extack);
+ if (err)
+ goto err_sched_nodes;
+ return 0;
+
+err_sched_nodes:
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ esw_qos_node_destroy_sched_element(vport_node, NULL);
+ esw_qos_node_attach_to_parent(vport_node);
+ } else {
+ esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL);
+ }
return err;
}
-static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static void esw_qos_vport_tc_disable(struct mlx5_vport *vport,
+ struct netlink_ext_ack *extack)
{
- int err;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ enum sched_node_type curr_type = vport_node->type;
- err = __esw_qos_destroy_rate_group(esw, group, extack);
- esw_qos_put(esw);
+ esw_qos_destroy_vport_tc_sched_elements(vport, extack);
- return err;
+ if (curr_type == SCHED_NODE_TYPE_RATE_LIMITER)
+ esw_qos_node_destroy_sched_element(vport_node, extack);
+ else
+ esw_qos_tc_arbiter_scheduling_teardown(vport_node, extack);
}
-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
+static int esw_qos_set_vport_tcs_min_rate(struct mlx5_vport *vport,
+ u32 min_rate,
+ struct netlink_ext_ack *extack)
{
- switch (type) {
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_TASR;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT_TC;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ int err, i, num_tcs = esw_qos_num_tcs(vport_node->esw->dev);
+
+ for (i = 0; i < num_tcs; i++) {
+ err = esw_qos_set_node_min_rate(vport->qos.sched_nodes[i],
+ min_rate, extack);
+ if (err)
+ goto err_out;
}
- return false;
+ vport_node->min_rate = min_rate;
+
+ return 0;
+err_out:
+ for (--i; i >= 0; i--) {
+ esw_qos_set_node_min_rate(vport->qos.sched_nodes[i],
+ vport_node->min_rate, extack);
+ }
+ return err;
}
-static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
{
- u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_core_dev *dev = esw->dev;
- __be32 *attr;
- int err;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ enum sched_node_type curr_type = vport_node->type;
- if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return -EOPNOTSUPP;
+ if (curr_type == SCHED_NODE_TYPE_VPORT)
+ esw_qos_node_destroy_sched_element(vport_node, extack);
+ else
+ esw_qos_vport_tc_disable(vport, extack);
- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
- return -EOPNOTSUPP;
+ vport_node->bw_share = 0;
+ memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw));
+ list_del_init(&vport_node->entry);
+ esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack);
- MLX5_SET(scheduling_context, tsar_ctx, element_type,
- SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
+}
- attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
- *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
+static int esw_qos_vport_enable(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ int err;
- err = mlx5_create_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- tsar_ctx,
- &esw->qos.root_tsar_ix);
- if (err) {
- esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ esw_qos_node_set_parent(vport_node, parent);
+ if (type == SCHED_NODE_TYPE_VPORT)
+ err = esw_qos_vport_create_sched_element(vport_node, extack);
+ else
+ err = esw_qos_vport_tc_enable(vport, type, extack);
+ if (err)
return err;
- }
- INIT_LIST_HEAD(&esw->qos.groups);
- if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.group0 = __esw_qos_create_rate_group(esw, extack);
- if (IS_ERR(esw->qos.group0)) {
- esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
- PTR_ERR(esw->qos.group0));
- err = PTR_ERR(esw->qos.group0);
- goto err_group0;
- }
- }
- refcount_set(&esw->qos.refcnt, 1);
+ vport_node->type = type;
+ esw_qos_normalize_min_rate(vport_node->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate,
+ vport_node->bw_share);
return 0;
+}
+
+static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent, u32 max_rate,
+ u32 min_rate, struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *sched_node;
+ struct mlx5_eswitch *parent_esw;
+ int err;
+
+ esw_assert_qos_lock_held(esw);
+ err = esw_qos_get(esw, extack);
+ if (err)
+ return err;
+
+ parent_esw = parent ? parent->esw : esw;
+ sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent);
+ if (!sched_node) {
+ esw_qos_put(esw);
+ return -ENOMEM;
+ }
+ if (!parent)
+ list_add_tail(&sched_node->entry, &esw->qos.domain->nodes);
-err_group0:
- if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
- esw->qos.root_tsar_ix))
- esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
+ sched_node->max_rate = max_rate;
+ sched_node->min_rate = min_rate;
+ sched_node->vport = vport;
+ vport->qos.sched_node = sched_node;
+ err = esw_qos_vport_enable(vport, type, parent, extack);
+ if (err) {
+ __esw_qos_free_node(sched_node);
+ esw_qos_put(esw);
+ vport->qos.sched_node = NULL;
+ }
return err;
}
-static void esw_qos_destroy(struct mlx5_eswitch *esw)
+static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport)
{
- int err;
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
- if (esw->qos.group0)
- __esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
+ esw_assert_qos_lock_held(esw);
+ if (!vport->qos.sched_node)
+ return;
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- esw->qos.root_tsar_ix);
- if (err)
- esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
+ esw_qos_vport_disable(vport, NULL);
+ mlx5_esw_qos_vport_qos_free(vport);
+ esw_qos_put(esw);
}
-static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
{
- int err = 0;
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *parent;
lockdep_assert_held(&esw->state_lock);
+ esw_qos_lock(esw);
+ if (!vport->qos.sched_node)
+ goto unlock;
- if (!refcount_inc_not_zero(&esw->qos.refcnt)) {
- /* esw_qos_create() set refcount to 1 only on success.
- * No need to decrement on failure.
- */
- err = esw_qos_create(esw, extack);
- }
+ parent = vport->qos.sched_node->parent;
+ WARN(parent, "Disabling QoS on port before detaching it from node");
+
+ mlx5_esw_qos_vport_disable_locked(vport);
+unlock:
+ esw_qos_unlock(esw);
+}
+
+static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ if (!vport_node)
+ return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, max_rate, 0,
+ extack);
+ else
+ return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share,
+ extack);
+}
+static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ if (!vport_node)
+ return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, 0, min_rate,
+ extack);
+ else if (vport_node->type == SCHED_NODE_TYPE_RATE_LIMITER)
+ return esw_qos_set_vport_tcs_min_rate(vport, min_rate, extack);
+ else
+ return esw_qos_set_node_min_rate(vport_node, min_rate, extack);
+}
+
+int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_rate)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ int err;
+
+ esw_qos_lock(esw);
+ err = mlx5_esw_qos_set_vport_min_rate(vport, min_rate, NULL);
+ if (!err)
+ err = mlx5_esw_qos_set_vport_max_rate(vport, max_rate, NULL);
+ esw_qos_unlock(esw);
return err;
}
-static void esw_qos_put(struct mlx5_eswitch *esw)
+bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate)
{
- lockdep_assert_held(&esw->state_lock);
- if (refcount_dec_and_test(&esw->qos.refcnt))
- esw_qos_destroy(esw);
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ bool enabled;
+
+ esw_qos_lock(esw);
+ enabled = !!vport->qos.sched_node;
+ if (enabled) {
+ *max_rate = vport->qos.sched_node->max_rate;
+ *min_rate = vport->qos.sched_node->min_rate;
+ }
+ esw_qos_unlock(esw);
+ return enabled;
+}
+
+static int esw_qos_vport_tc_check_type(enum sched_node_type curr_type,
+ enum sched_node_type new_type,
+ struct netlink_ext_ack *extack)
+{
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR &&
+ new_type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot switch from vport-level TC arbitration to node-level TC arbitration");
+ return -EOPNOTSUPP;
+ }
+
+ if (curr_type == SCHED_NODE_TYPE_RATE_LIMITER &&
+ new_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot switch from node-level TC arbitration to vport-level TC arbitration");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
-static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
+static int esw_qos_vport_update(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *curr_parent = vport_node->parent;
+ enum sched_node_type curr_type = vport_node->type;
+ u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
int err;
- lockdep_assert_held(&esw->state_lock);
- if (vport->qos.enabled)
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+ if (curr_type == type && curr_parent == parent)
return 0;
- err = esw_qos_get(esw, extack);
+ err = esw_qos_vport_tc_check_type(curr_type, type, extack);
if (err)
return err;
- vport->qos.group = esw->qos.group0;
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
+ esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw);
- err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
- if (err)
- goto err_out;
-
- vport->qos.enabled = true;
- trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
+ esw_qos_vport_disable(vport, extack);
- return 0;
+ err = esw_qos_vport_enable(vport, type, parent, extack);
+ if (err) {
+ esw_qos_vport_enable(vport, curr_type, curr_parent, NULL);
+ extack = NULL;
+ }
-err_out:
- esw_qos_put(esw);
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
+ esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw,
+ extack);
+ }
return err;
}
-void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *curr_parent;
+ enum sched_node_type type;
+
+ esw_assert_qos_lock_held(esw);
+ curr_parent = vport->qos.sched_node->parent;
+ if (curr_parent == parent)
+ return 0;
+
+ /* Set vport QoS type based on parent node type if different from
+ * default QoS; otherwise, use the vport's current QoS type.
+ */
+ if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ type = SCHED_NODE_TYPE_RATE_LIMITER;
+ else if (curr_parent &&
+ curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ type = SCHED_NODE_TYPE_VPORT;
+ else
+ type = vport->qos.sched_node->type;
+
+ return esw_qos_vport_update(vport, type, parent, extack);
+}
+
+static void
+esw_qos_switch_vport_tcs_to_vport(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vports_tc_node, *vport_tc_node, *tmp;
+
+ vports_tc_node = list_first_entry(&tc_arbiter_node->children,
+ struct mlx5_esw_sched_node,
+ entry);
+
+ list_for_each_entry_safe(vport_tc_node, tmp, &vports_tc_node->children,
+ entry)
+ esw_qos_vport_update_parent(vport_tc_node->vport, node, extack);
+}
+
+static int esw_qos_switch_tc_arbiter_node_to_vports(
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
{
+ u32 parent_tsar_ix = node->parent ?
+ node->parent->ix : node->esw->qos.root_tsar_ix;
int err;
- lockdep_assert_held(&esw->state_lock);
- if (!vport->qos.enabled)
- return;
- WARN(vport->qos.group && vport->qos.group != esw->qos.group0,
- "Disabling QoS on port before detaching it from group");
+ err = esw_qos_create_node_sched_elem(node->esw->dev, parent_tsar_ix,
+ node->max_rate, node->bw_share,
+ &node->ix);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create scheduling element for vports node when disabling vports TC QoS");
+ return err;
+ }
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
+ node->type = SCHED_NODE_TYPE_VPORTS_TSAR;
+
+ /* Disable TC QoS for vports in the arbiter node. */
+ esw_qos_switch_vport_tcs_to_vport(tc_arbiter_node, node, extack);
+
+ return 0;
+}
+
+static int esw_qos_switch_vports_node_to_tc_arbiter(
+ struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node, *tmp;
+ struct mlx5_vport *vport;
+ int err;
+
+ /* Enable TC QoS for each vport in the node. */
+ list_for_each_entry_safe(vport_node, tmp, &node->children, entry) {
+ vport = vport_node->vport;
+ err = esw_qos_vport_update_parent(vport, tc_arbiter_node,
+ extack);
+ if (err)
+ goto err_out;
+ }
+
+ /* Destroy the current vports node TSAR. */
+ err = mlx5_destroy_scheduling_element_cmd(node->esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- vport->qos.esw_tsar_ix);
+ node->ix);
if (err)
- esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
- vport->vport, err);
+ goto err_out;
- memset(&vport->qos, 0, sizeof(vport->qos));
- trace_mlx5_esw_vport_qos_destroy(vport);
+ return 0;
+err_out:
+ /* Restore vports back into the node if an error occurs. */
+ esw_qos_switch_vport_tcs_to_vport(tc_arbiter_node, node, NULL);
- esw_qos_put(esw);
+ return err;
}
-int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 min_rate)
+static struct mlx5_esw_sched_node *
+esw_qos_move_node(struct mlx5_esw_sched_node *curr_node)
{
+ struct mlx5_esw_sched_node *new_node;
+
+ new_node = __esw_qos_alloc_node(curr_node->esw, curr_node->ix,
+ curr_node->type, NULL);
+ if (!new_node)
+ return ERR_PTR(-ENOMEM);
+
+ esw_qos_nodes_set_parent(&curr_node->children, new_node);
+ return new_node;
+}
+
+static int esw_qos_node_disable_tc_arbitration(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_node;
int err;
- lockdep_assert_held(&esw->state_lock);
- err = esw_qos_vport_enable(esw, vport, 0, 0, NULL);
+ if (node->type != SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return 0;
+
+ /* Allocate a new rate node to hold the current state, which will allow
+ * for restoring the vports back to this node after disabling TC
+ * arbitration.
+ */
+ curr_node = esw_qos_move_node(node);
+ if (IS_ERR(curr_node)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting up vports node");
+ return PTR_ERR(curr_node);
+ }
+
+ /* Disable TC QoS for all vports, and assign them back to the node. */
+ err = esw_qos_switch_tc_arbiter_node_to_vports(curr_node, node, extack);
if (err)
- return err;
+ goto err_out;
- err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL);
- if (!err)
- err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL);
+ /* Clean up the TC arbiter node after disabling TC QoS for vports. */
+ esw_qos_tc_arbiter_scheduling_teardown(curr_node, extack);
+ goto out;
+err_out:
+ esw_qos_nodes_set_parent(&curr_node->children, node);
+out:
+ __esw_qos_free_node(curr_node);
+ return err;
+}
+
+static int esw_qos_node_enable_tc_arbitration(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_node, *child;
+ int err, new_level, max_level;
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return 0;
+
+ /* Increase the hierarchy level by one to account for the additional
+ * vports TC scheduling node, and verify that the new level does not
+ * exceed the maximum allowed depth.
+ */
+ new_level = node->level + 1;
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC arbitration on nodes is not supported beyond max depth %d",
+ max_level);
+ return -EOPNOTSUPP;
+ }
+
+ /* Ensure the node does not contain non-leaf children before assigning
+ * TC bandwidth.
+ */
+ if (!list_empty(&node->children)) {
+ list_for_each_entry(child, &node->children, entry) {
+ if (!child->vport) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot configure TC bandwidth on a node with non-leaf children");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ /* Allocate a new node that will store the information of the current
+ * node. This will be used later to restore the node if necessary.
+ */
+ curr_node = esw_qos_move_node(node);
+ if (IS_ERR(curr_node)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting up node TC QoS");
+ return PTR_ERR(curr_node);
+ }
+
+ /* Initialize the TC arbiter node for QoS management.
+ * This step prepares the node for handling Traffic Class arbitration.
+ */
+ err = esw_qos_tc_arbiter_scheduling_setup(node, extack);
+ if (err)
+ goto err_setup;
+
+ /* Enable TC QoS for each vport within the current node. */
+ err = esw_qos_switch_vports_node_to_tc_arbiter(curr_node, node, extack);
+ if (err)
+ goto err_switch_vports;
+ goto out;
+
+err_switch_vports:
+ esw_qos_tc_arbiter_scheduling_teardown(node, NULL);
+ node->ix = curr_node->ix;
+ node->type = curr_node->type;
+err_setup:
+ esw_qos_nodes_set_parent(&curr_node->children, node);
+out:
+ __esw_qos_free_node(curr_node);
return err;
}
@@ -722,6 +1510,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
speed = lksettings.base.speed;
out:
+ mlx5_uplink_netdev_put(mdev, slave);
return speed;
}
@@ -768,10 +1557,8 @@ static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
- u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
u32 link_speed_max;
- u32 bitmask;
int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
@@ -789,21 +1576,9 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
return err;
}
- mutex_lock(&esw->state_lock);
- if (!vport->qos.enabled) {
- /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
- err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL);
- } else {
- MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
-
- bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
- err = mlx5_modify_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- ctx,
- vport->qos.esw_tsar_ix,
- bitmask);
- }
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ err = mlx5_esw_qos_set_vport_max_rate(vport, rate_mbps, NULL);
+ esw_qos_unlock(esw);
return err;
}
@@ -841,6 +1616,71 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return 0;
}
+static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
+ u32 *tc_bw)
+{
+ int i, num_tcs = esw_qos_num_tcs(esw->dev);
+
+ for (i = num_tcs; i < DEVLINK_RATE_TCS_MAX; i++) {
+ if (tc_bw[i])
+ return false;
+ }
+
+ return true;
+}
+
+static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
+ u32 *tc_bw)
+{
+ struct mlx5_esw_sched_node *node = vport->qos.sched_node;
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw = (node && node->parent) ? node->parent->esw : esw;
+
+ return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
+}
+
+static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
+{
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++) {
+ if (tc_bw[i])
+ return false;
+ }
+
+ return true;
+}
+
+static void esw_vport_qos_prune_empty(struct mlx5_vport *vport)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+ if (!vport_node)
+ return;
+
+ if (vport_node->parent || vport_node->max_rate ||
+ vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw))
+ return;
+
+ mlx5_esw_qos_vport_disable_locked(vport);
+}
+
+int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
+{
+ if (esw->qos.domain)
+ return 0; /* Nothing to change. */
+
+ return esw_qos_domain_init(esw);
+}
+
+void mlx5_esw_qos_cleanup(struct mlx5_eswitch *esw)
+{
+ if (esw->qos.domain)
+ esw_qos_domain_release(esw);
+}
+
/* Eswitch devlink rate API */
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
@@ -858,14 +1698,13 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
if (err)
return err;
- mutex_lock(&esw->state_lock);
- err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ esw_qos_lock(esw);
+ err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
if (err)
- goto unlock;
-
- err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
-unlock:
- mutex_unlock(&esw->state_lock);
+ goto out;
+ esw_vport_qos_prune_empty(vport);
+out:
+ esw_qos_unlock(esw);
return err;
}
@@ -884,57 +1723,139 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
if (err)
return err;
- mutex_lock(&esw->state_lock);
- err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ esw_qos_lock(esw);
+ err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
if (err)
+ goto out;
+ esw_vport_qos_prune_empty(vport);
+out:
+ esw_qos_unlock(esw);
+ return err;
+}
+
+int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node;
+ struct mlx5_vport *vport = priv;
+ struct mlx5_eswitch *esw;
+ bool disable;
+ int err = 0;
+
+ esw = vport->dev->priv.eswitch;
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ disable = esw_qos_tc_bw_disabled(tc_bw);
+ esw_qos_lock(esw);
+
+ if (!esw_qos_vport_validate_unsupported_tc_bw(vport, tc_bw)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch traffic classes number is not supported");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ vport_node = vport->qos.sched_node;
+ if (disable && !vport_node)
+ goto unlock;
+
+ if (disable) {
+ if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
+ vport_node->parent, extack);
+ esw_vport_qos_prune_empty(vport);
+ goto unlock;
+ }
+
+ if (!vport_node) {
+ err = mlx5_esw_qos_vport_enable(vport,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ NULL, 0, 0, extack);
+ vport_node = vport->qos.sched_node;
+ } else {
+ err = esw_qos_vport_update(vport,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ vport_node->parent, extack);
+ }
+ if (!err)
+ esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
+unlock:
+ esw_qos_unlock(esw);
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
+ bool disable;
+ int err;
+
+ if (!esw_qos_validate_unsupported_tc_bw(esw, tc_bw)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch traffic classes number is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ disable = esw_qos_tc_bw_disabled(tc_bw);
+ esw_qos_lock(esw);
+ if (disable) {
+ err = esw_qos_node_disable_tc_arbitration(node, extack);
goto unlock;
+ }
- err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
+ err = esw_qos_node_enable_tc_arbitration(node, extack);
+ if (!err)
+ esw_qos_set_tc_arbiter_bw_shares(node, tc_bw, extack);
unlock:
- mutex_unlock(&esw->state_lock);
+ esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
- struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink);
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_esw_rate_group *group = priv;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
int err;
- err = esw_qos_devlink_rate_to_mbps(dev, "tx_share", &tx_share, extack);
+ err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_share", &tx_share, extack);
if (err)
return err;
- mutex_lock(&esw->state_lock);
- err = esw_qos_set_group_min_rate(esw, group, tx_share, extack);
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ err = esw_qos_set_node_min_rate(node, tx_share, extack);
+ esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
u64 tx_max, struct netlink_ext_ack *extack)
{
- struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink);
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_esw_rate_group *group = priv;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
int err;
- err = esw_qos_devlink_rate_to_mbps(dev, "tx_max", &tx_max, extack);
+ err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_max", &tx_max, extack);
if (err)
return err;
- mutex_lock(&esw->state_lock);
- err = esw_qos_set_group_max_rate(esw, group, tx_max, extack);
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack);
+ esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
struct mlx5_eswitch *esw;
int err = 0;
@@ -942,7 +1863,7 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->state_lock);
+ esw_qos_lock(esw);
if (esw->mode != MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
"Rate node creation supported only in switchdev mode");
@@ -950,66 +1871,237 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
goto unlock;
}
- group = esw_qos_create_rate_group(esw, extack);
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
+ node = esw_qos_create_vports_sched_node(esw, extack);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
goto unlock;
}
- *priv = group;
+ *priv = node;
unlock:
- mutex_unlock(&esw->state_lock);
+ esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group = priv;
- struct mlx5_eswitch *esw;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
+
+ esw_qos_lock(esw);
+ __esw_qos_destroy_node(node, extack);
+ esw_qos_put(esw);
+ esw_qos_unlock(esw);
+ return 0;
+}
+
+int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ int err = 0;
+
+ if (parent && parent->esw != esw) {
+ NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ esw_qos_lock(esw);
+ if (!vport->qos.sched_node && parent) {
+ enum sched_node_type type;
+
+ type = parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR ?
+ SCHED_NODE_TYPE_RATE_LIMITER : SCHED_NODE_TYPE_VPORT;
+ err = mlx5_esw_qos_vport_enable(vport, type, parent, 0, 0,
+ extack);
+ } else if (vport->qos.sched_node) {
+ err = esw_qos_vport_update_parent(vport, parent, extack);
+ }
+ esw_qos_unlock(esw);
+ return err;
+}
+
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL;
+ struct mlx5_vport *vport = priv;
int err;
- esw = mlx5_devlink_eswitch_get(rate_node->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
+ if (!err) {
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw_qos_lock(esw);
+ esw_vport_qos_prune_empty(vport);
+ esw_qos_unlock(esw);
+ }
- mutex_lock(&esw->state_lock);
- err = esw_qos_destroy_rate_group(esw, group, extack);
- mutex_unlock(&esw->state_lock);
return err;
}
-int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
{
- int err = 0;
+ if (list_empty(&node->children))
+ return true;
- mutex_lock(&esw->state_lock);
- if (!vport->qos.enabled && !group)
- goto unlock;
+ if (node->type != SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return false;
+
+ node = list_first_entry(&node->children, struct mlx5_esw_sched_node,
+ entry);
+
+ return esw_qos_is_node_empty(node);
+}
+
+static int
+mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ u8 new_level, max_level;
+
+ if (parent && parent->esw != node->esw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot assign node to another E-Switch");
+ return -EOPNOTSUPP;
+ }
+
+ if (!esw_qos_is_node_empty(node)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot reassign a node that contains rate objects");
+ return -EOPNOTSUPP;
+ }
+
+ if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot attach a node to a parent with TC bandwidth configured");
+ return -EOPNOTSUPP;
+ }
+
+ new_level = parent ? parent->level + 1 : 2;
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ /* Increase by one to account for the vports TC scheduling
+ * element.
+ */
+ new_level += 1;
+ }
+
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Node hierarchy depth %d exceeds the maximum supported level %d",
+ new_level, max_level);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+esw_qos_tc_arbiter_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ esw_qos_tc_arbiter_get_bw_shares(node, curr_tc_bw);
+ esw_qos_tc_arbiter_scheduling_teardown(node, extack);
+ esw_qos_node_set_parent(node, parent);
+ err = esw_qos_tc_arbiter_scheduling_setup(node, extack);
+ if (err) {
+ esw_qos_node_set_parent(node, curr_parent);
+ if (esw_qos_tc_arbiter_scheduling_setup(node, extack)) {
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+ return err;
+ }
+ }
+ esw_qos_set_tc_arbiter_bw_shares(node, curr_tc_bw, extack);
- err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
- if (!err)
- err = esw_qos_vport_update_group(esw, vport, group, extack);
-unlock:
- mutex_unlock(&esw->state_lock);
return err;
}
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack)
+static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
- struct mlx5_vport *vport = priv;
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ struct mlx5_eswitch *esw = node->esw;
+ u32 parent_ix;
+ int err;
+
+ parent_ix = parent ? parent->ix : node->esw->qos.root_tsar_ix;
+ mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, parent_ix,
+ node->max_rate, 0, &node->ix);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create a node under the new hierarchy.");
+ if (esw_qos_create_node_sched_elem(esw->dev, curr_parent->ix,
+ node->max_rate,
+ node->bw_share,
+ &node->ix))
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+
+ return err;
+ }
+ esw_qos_node_set_parent(node, parent);
+ node->bw_share = 0;
+
+ return 0;
+}
+
+static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent;
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack);
+ if (err)
+ return err;
+
+ esw_qos_lock(esw);
+ curr_parent = node->parent;
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ err = esw_qos_tc_arbiter_node_update_parent(node, parent,
+ extack);
+ } else {
+ err = esw_qos_vports_node_update_parent(node, parent, extack);
+ }
+
+ if (err)
+ goto out;
+
+ esw_qos_normalize_min_rate(esw, curr_parent, extack);
+ esw_qos_normalize_min_rate(esw, parent, extack);
+
+out:
+ esw_qos_unlock(esw);
+
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv, *parent_node;
if (!parent)
- return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch,
- vport, NULL, extack);
+ return mlx5_esw_qos_node_update_parent(node, NULL, extack);
- group = parent_priv;
- return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, vport, group, extack);
+ parent_node = parent_priv;
+ return mlx5_esw_qos_node_update_parent(node, parent_node, extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 0141e9d52037..0a50982b0e27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -6,14 +6,29 @@
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
- u32 max_rate, u32 min_rate);
-void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+int mlx5_esw_qos_init(struct mlx5_eswitch *esw);
+void mlx5_esw_qos_cleanup(struct mlx5_eswitch *esw);
+
+int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min_rate);
+bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate);
+void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport);
+
+void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport);
+u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport);
+struct mlx5_esw_sched_node *mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport);
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
u64 tx_share, struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
u64 tx_max, struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
u64 tx_share, struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
@@ -22,10 +37,14 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack);
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
index 749c3957a128..407062096a82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
@@ -45,8 +45,8 @@ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
ft_attr.flags = vport_ns->flags;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
- PTR_ERR(fdb));
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %pe\n",
+ fdb);
}
return fdb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1789800faaeb..4b7a1ce7f406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -257,8 +257,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
- "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
- dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
+ "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%pe)\n",
+ dmac_v, dmac_c, vport, flow_rule);
flow_rule = NULL;
}
@@ -820,6 +820,7 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
+ vport->vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2))
goto out_free;
@@ -839,6 +840,18 @@ out_free:
return err;
}
+bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vportn);
+ if (IS_ERR(vport) || MLX5_VPORT_INVAL_VHCA_ID(vport))
+ return false;
+
+ *vhca_id = vport->vhca_id;
+ return true;
+}
+
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
bool vst_mode_steering = esw_vst_mode_is_steering(esw);
@@ -862,13 +875,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
vport_num, 1,
vport->info.link_state);
- /* Host PF has its own mac/guid. */
- if (vport_num) {
- mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
- vport->info.mac);
- mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
- vport->info.node_guid);
- }
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true,
+ vport->info.mac);
+ mlx5_query_nic_vport_node_guid(esw->dev, vport_num, true,
+ &vport->info.node_guid);
flags = (vport->info.vlan || vport->info.qos) ?
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
@@ -894,7 +904,7 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
vport_num, 1,
MLX5_VPORT_ADMIN_STATE_DOWN);
- mlx5_esw_qos_vport_disable(esw, vport);
+ mlx5_esw_qos_vport_disable(vport);
esw_vport_cleanup_acl(esw, vport);
}
@@ -929,17 +939,11 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
- ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
+ ret = mlx5_esw_vport_vhca_id_map(esw, vport);
if (ret)
goto err_vhca_mapping;
}
- /* External controller host PF has factory programmed MAC.
- * Read it from the device.
- */
- if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
- mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
-
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -973,7 +977,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
- mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+ mlx5_esw_vport_vhca_id_unmap(esw, vport);
if (vport->vport != MLX5_VPORT_PF &&
(vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
@@ -1038,6 +1042,25 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
+static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
+{
+ const u32 *query_host_out;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+
+ query_host_out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(query_host_out))
+ return PTR_ERR(query_host_out);
+
+ esw->esw_funcs.host_funcs_disabled =
+ MLX5_GET(query_esw_functions_out, query_host_out,
+ host_params_context.host_pf_not_exist);
+
+ kvfree(query_host_out);
+ return 0;
+}
+
static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
@@ -1061,7 +1084,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
- memset(&vport->qos, 0, sizeof(vport->qos));
+ mlx5_esw_qos_vport_qos_free(vport);
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -1073,7 +1096,7 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
unsigned long i;
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
- memset(&vport->qos, 0, sizeof(vport->qos));
+ mlx5_esw_qos_vport_qos_free(vport);
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -1185,7 +1208,8 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
- if (!vport->enabled)
+ /* Adjacent VFs are unloaded separately */
+ if (!vport->enabled || vport->adjacent)
continue;
mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
}
@@ -1204,6 +1228,42 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
}
}
+static void mlx5_eswitch_unload_adj_vf_vports(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->enabled || !vport->adjacent)
+ continue;
+ mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
+ }
+}
+
+static int
+mlx5_eswitch_load_adj_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int err;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport,
+ enabled_events);
+ if (err)
+ goto unload_adj_vf_vport;
+ }
+
+ return 0;
+
+unload_adj_vf_vport:
+ mlx5_eswitch_unload_adj_vf_vports(esw);
+ return err;
+}
+
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
@@ -1278,29 +1338,34 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
esw->mode == MLX5_ESWITCH_LEGACY;
/* Enable PF vport */
- if (pf_needed) {
+ if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev)) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
enabled_events);
if (ret)
return ret;
}
- /* Enable external host PF HCA */
- ret = host_pf_enable_hca(esw->dev);
- if (ret)
- goto pf_hca_err;
+ if (mlx5_esw_host_functions_enabled(esw->dev)) {
+ /* Enable external host PF HCA */
+ ret = host_pf_enable_hca(esw->dev);
+ if (ret)
+ goto pf_hca_err;
+ }
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
- enabled_events);
- if (ret)
- goto ec_vf_err;
- }
+ }
+
+ /* Enable ECVF vports */
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs,
+ enabled_events);
+ if (ret)
+ goto ec_vf_err;
}
/* Enable VF vports */
@@ -1308,8 +1373,16 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enabled_events);
if (ret)
goto vf_err;
+
+ /* Enable adjacent VF vports */
+ ret = mlx5_eswitch_load_adj_vf_vports(esw, enabled_events);
+ if (ret)
+ goto unload_vf_vports;
+
return 0;
+unload_vf_vports:
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
vf_err:
if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
@@ -1317,9 +1390,10 @@ ec_vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
- host_pf_disable_hca(esw->dev);
+ if (mlx5_esw_host_functions_enabled(esw->dev))
+ host_pf_disable_hca(esw->dev);
pf_hca_err:
- if (pf_needed)
+ if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1329,18 +1403,24 @@ pf_hca_err:
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
+ mlx5_eswitch_unload_adj_vf_vports(esw);
+
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs);
+
if (mlx5_ecpf_vport_exists(esw->dev)) {
- if (mlx5_core_ec_sriov_enabled(esw->dev))
- mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
- host_pf_disable_hca(esw->dev);
+ if (mlx5_esw_host_functions_enabled(esw->dev))
+ host_pf_disable_hca(esw->dev);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
- esw->mode == MLX5_ESWITCH_LEGACY)
+ if ((mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY) &&
+ mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
@@ -1394,22 +1474,79 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
info.new_mode = mode;
- blocking_notifier_call_chain(&esw->n_head, 0, &info);
+ blocking_notifier_call_chain(&esw->dev->priv.esw_n_head, 0, &info);
+}
+
+static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int err;
+ int i;
+
+ for (i = 0; i < total_vports; i++) {
+ err = mlx5_fs_vport_egress_acl_ns_add(steering, i);
+ if (err)
+ goto acl_ns_remove;
+ }
+ return 0;
+
+acl_ns_remove:
+ while (i--)
+ mlx5_fs_vport_egress_acl_ns_remove(steering, i);
+ return err;
+}
+
+static void mlx5_esw_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int i;
+
+ for (i = total_vports - 1; i >= 0; i--)
+ mlx5_fs_vport_egress_acl_ns_remove(steering, i);
+}
+
+static int mlx5_esw_ingress_acls_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int err;
+ int i;
+
+ for (i = 0; i < total_vports; i++) {
+ err = mlx5_fs_vport_ingress_acl_ns_add(steering, i);
+ if (err)
+ goto acl_ns_remove;
+ }
+ return 0;
+
+acl_ns_remove:
+ while (i--)
+ mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
+ return err;
+}
+
+static void mlx5_esw_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int i;
+
+ for (i = total_vports - 1; i >= 0; i--)
+ mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
}
static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
- int total_vports;
int err;
if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
return 0;
- total_vports = mlx5_eswitch_get_total_vports(dev);
-
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = mlx5_fs_egress_acls_init(dev, total_vports);
+ err = mlx5_esw_egress_acls_init(dev);
if (err)
return err;
} else {
@@ -1417,7 +1554,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = mlx5_fs_ingress_acls_init(dev, total_vports);
+ err = mlx5_esw_ingress_acls_init(dev);
if (err)
goto err;
} else {
@@ -1428,7 +1565,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
err:
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- mlx5_fs_egress_acls_cleanup(dev);
+ mlx5_esw_egress_acls_cleanup(dev);
return err;
}
@@ -1438,9 +1575,9 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- mlx5_fs_ingress_acls_cleanup(dev);
+ mlx5_esw_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- mlx5_fs_egress_acls_cleanup(dev);
+ mlx5_esw_egress_acls_cleanup(dev);
}
/**
@@ -1481,15 +1618,18 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
mlx5_eq_notifier_register(esw->dev, &esw->nb);
+ err = mlx5_esw_qos_init(esw);
+ if (err)
+ goto err_esw_init;
+
if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
- mlx5_rescan_drivers(esw->dev);
err = esw_offloads_enable(esw);
}
if (err)
- goto abort;
+ goto err_esw_init;
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
@@ -1503,7 +1643,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
return 0;
-abort:
+err_esw_init:
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1665,7 +1806,8 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps;
int err;
- if (!mlx5_core_is_ecpf(dev)) {
+ if (!mlx5_core_is_ecpf(dev) ||
+ !mlx5_esw_host_functions_enabled(dev)) {
*max_sfs = 0;
return 0;
}
@@ -1687,8 +1829,7 @@ out_free:
return err;
}
-static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
- int index, u16 vport_num)
+int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num)
{
struct mlx5_vport *vport;
int err;
@@ -1701,6 +1842,7 @@ static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
vport->vport = vport_num;
vport->index = index;
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ vport->vhca_id = MLX5_VHCA_ID_INVALID;
INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
if (err)
@@ -1714,8 +1856,9 @@ insert_err:
return err;
}
-static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
+ esw->total_vports--;
xa_erase(&esw->vports, vport->vport);
kfree(vport);
}
@@ -1741,21 +1884,23 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
xa_init(&esw->vports);
- err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
- if (err)
- goto err;
- if (esw->first_host_vport == MLX5_VPORT_PF)
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
- idx++;
-
- for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
- err = mlx5_esw_vport_alloc(esw, idx, idx);
+ if (mlx5_esw_host_functions_enabled(dev)) {
+ err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
if (err)
goto err;
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ if (esw->first_host_vport == MLX5_VPORT_PF)
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
+ for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, idx, idx);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ idx++;
+ }
}
+
base_sf_num = mlx5_sf_start_function_id(dev);
for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
@@ -1797,6 +1942,9 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_UPLINK);
if (err)
goto err;
+
+ /* Adjacent vports or other dynamically create vports will use this */
+ esw->last_vport_idx = ++idx;
return 0;
err:
@@ -1805,7 +1953,8 @@ err:
}
static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -1820,7 +1969,8 @@ static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
}
static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -1854,6 +2004,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto free_esw;
esw->dev = dev;
+ dev->priv.eswitch = esw;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
@@ -1864,15 +2015,23 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ err = mlx5_esw_host_functions_enabled_query(esw);
+ if (err)
+ goto abort;
+
err = mlx5_esw_vports_init(esw);
if (err)
goto abort;
- dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err = mlx5_esw_qos_init(esw);
+ if (err)
+ goto reps_err;
+
mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->offloads.decap_tbl_lock);
@@ -1886,14 +2045,12 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
- esw->mode = MLX5_ESWITCH_LEGACY;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
@@ -1923,6 +2080,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
+ mlx5_esw_qos_cleanup(esw);
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@@ -2059,6 +2217,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
u16 vport, struct ifla_vf_info *ivi)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ u32 max_rate, min_rate;
if (IS_ERR(evport))
return PTR_ERR(evport);
@@ -2067,15 +2226,19 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->vf = vport - 1;
mutex_lock(&esw->state_lock);
+
+ mlx5_query_nic_vport_mac_address(esw->dev, vport, true,
+ evport->info.mac);
ether_addr_copy(ivi->mac, evport->info.mac);
ivi->linkstate = evport->info.link_state;
ivi->vlan = evport->info.vlan;
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
- if (evport->qos.enabled) {
- ivi->min_tx_rate = evport->qos.min_rate;
- ivi->max_tx_rate = evport->qos.max_rate;
+
+ if (mlx5_esw_qos_get_vport_rate(evport, &max_rate, &min_rate)) {
+ ivi->max_tx_rate = max_rate;
+ ivi->min_tx_rate = min_rate;
}
mutex_unlock(&esw->state_lock);
@@ -2216,14 +2379,16 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
-int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
+int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
+ struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&esw->n_head, nb);
+ return blocking_notifier_chain_register(&dev->priv.esw_n_head, nb);
}
-void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
+void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
+ struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&esw->n_head, nb);
+ blocking_notifier_chain_unregister(&dev->priv.esw_n_head, nb);
}
/**
@@ -2393,3 +2558,11 @@ void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
dev->num_ipsec_offloads--;
mutex_unlock(&esw->state_lock);
}
+
+bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
+{
+ if (!dev->priv.eswitch)
+ return true;
+
+ return !dev->priv.eswitch->esw_funcs.host_funcs_disabled;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 349e28a6dd8d..ad1073f7b79f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -197,6 +197,11 @@ static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port
return mlx5_devlink_port_get(dl_port)->vport;
}
+#define MLX5_VHCA_ID_INVALID (-1)
+
+#define MLX5_VPORT_INVAL_VHCA_ID(vport) \
+ ((vport)->vhca_id == MLX5_VHCA_ID_INVALID)
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -209,20 +214,35 @@ struct mlx5_vport {
struct vport_egress egress;
u32 default_metadata;
u32 metadata;
+ int vhca_id;
+
+ bool adjacent; /* delegated vhca from adjacent function */
+ struct {
+ u16 parent_pci_devfn; /* Adjacent parent PCI device function */
+ u16 function_id; /* Function ID of the delegated VPort */
+ } adj_info;
struct mlx5_vport_info info;
+ /* Protected with the E-Switch qos domain lock. The Vport QoS can
+ * either be disabled (sched_node is NULL) or in one of three states:
+ * 1. Regular QoS (sched_node is a vport node).
+ * 2. TC QoS enabled on the vport (sched_node is a TC arbiter).
+ * 3. TC QoS enabled on the vport's parent node
+ * (sched_node is a rate limit node).
+ * When TC is enabled in either mode, the vport owns vport TC scheduling
+ * nodes.
+ */
struct {
- bool enabled;
- u32 esw_tsar_ix;
- u32 bw_share;
- u32 min_rate;
- u32 max_rate;
- struct mlx5_esw_rate_group *group;
+ /* Vport scheduling node. */
+ struct mlx5_esw_sched_node *sched_node;
+ /* Array of vport traffic class scheduling nodes. */
+ struct mlx5_esw_sched_node **sched_nodes;
} qos;
u16 vport;
bool enabled;
+ bool max_eqs_set;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct mlx5_devlink_port *dl_port;
@@ -244,6 +264,9 @@ struct mlx5_eswitch_fdb {
struct offloads_fdb {
struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *drop_root;
+ struct mlx5_flow_handle *drop_root_rule;
+ struct mlx5_fc *drop_root_fc;
struct mlx5_flow_table *tc_miss_table;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
@@ -315,6 +338,7 @@ struct mlx5_host_work {
struct mlx5_esw_functions {
struct mlx5_nb nb;
+ bool host_funcs_disabled;
u16 num_vfs;
u16 num_ec_vfs;
};
@@ -332,6 +356,7 @@ enum {
};
struct dentry;
+struct mlx5_qos_domain;
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
@@ -358,20 +383,19 @@ struct mlx5_eswitch {
struct rw_semaphore mode_lock;
atomic64_t user_count;
+ /* Protected with the E-Switch qos domain lock. */
struct {
- u32 root_tsar_ix;
- struct mlx5_esw_rate_group *group0;
- struct list_head groups; /* Protected by esw->state_lock */
-
- /* Protected by esw->state_lock.
- * Initially 0, meaning no QoS users and QoS is disabled.
- */
+ /* Initially 0, meaning no QoS users and QoS is disabled. */
refcount_t refcnt;
+ u32 root_tsar_ix;
+ struct mlx5_qos_domain *domain;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_esw_offload offloads;
+ u32 last_vport_idx;
int mode;
+ bool offloads_inactive;
u16 manager_vport;
u16 first_host_vport;
u8 num_peers;
@@ -379,7 +403,6 @@ struct mlx5_eswitch {
struct {
u32 large_group_num;
} params;
- struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
@@ -404,6 +427,8 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
+int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num);
+void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
@@ -411,7 +436,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
@@ -426,10 +452,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
u16 vport_num, bool setting);
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
u32 max_rate, u32 min_rate);
-int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack);
+int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack);
int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
@@ -573,6 +597,15 @@ int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_en
int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
#endif /* CONFIG_XFRM_OFFLOAD */
+int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
+ u32 *max_io_eqs,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
+ u32 max_io_eqs,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
+ struct netlink_ext_ack *extack);
+
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@ -602,6 +635,11 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
+void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
+int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport,
+ bool connect);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(__dev, format, ...) \
@@ -706,6 +744,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
+#define mlx5_esw_for_each_rep(esw, i, rep) \
+ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);
@@ -796,17 +837,25 @@ int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5
void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
-int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
+bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id);
+
+void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport);
+int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport);
/**
- * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
+ * struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
*
* @new_mode: New mode of eswitch.
*/
@@ -814,8 +863,10 @@ struct mlx5_esw_event_info {
u16 new_mode;
};
-int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
-void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
+int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
+ struct notifier_block *n);
+void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
+ struct notifier_block *n);
bool mlx5_esw_hold(struct mlx5_core_dev *dev);
void mlx5_esw_release(struct mlx5_core_dev *dev);
@@ -833,9 +884,9 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw, int max_slaves);
void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
-bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
@@ -877,9 +928,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
bool enable);
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
-void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
-void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
-
+bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -887,7 +936,9 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
-static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
+static inline void
+mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
@@ -925,12 +976,13 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
static inline int
-mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
return 0;
}
-static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+static inline bool
+mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
{
return true;
}
@@ -947,6 +999,19 @@ static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
}
static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
+
+static inline bool
+mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
+static inline bool
+mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 844d3e3a65dd..8de6c7f6c294 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -53,9 +53,6 @@
#include "lag/lag.h"
#include "en/tc/post_meter.h"
-#define mlx5_esw_for_each_rep(esw, i, rep) \
- xa_for_each(&((esw)->offloads.vport_reps), i, rep)
-
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@@ -67,6 +64,9 @@
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+#define MLX5_ESW_MAX_CTRL_EQS 4
+#define MLX5_ESW_DEFAULT_SF_COMP_EQS 8
+
static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
@@ -610,6 +610,13 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
}
}
+ if (attr->extra_split_ft) {
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[*i].ft = attr->extra_split_ft;
+ (*i)++;
+ }
+
out:
return err;
}
@@ -641,6 +648,7 @@ esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
meter = attr->meter_attr.meter;
flow_act->exe_aso.type = attr->exe_aso_type;
flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter);
flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
/* use metadata reg 5 for packet color */
@@ -714,7 +722,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter_id = mlx5_fc_id(attr->counter);
+ dest[i].counter = attr->counter;
i++;
}
@@ -1008,8 +1016,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
- PTR_ERR(flow_rule));
+ esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %pe\n",
+ flow_rule);
out:
kvfree(spec);
return flow_rule;
@@ -1057,8 +1065,8 @@ mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
- vport_num, PTR_ERR(flow_rule));
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %pe\n",
+ vport_num, flow_rule);
kvfree(spec);
return flow_rule;
@@ -1174,19 +1182,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
- /* total vports is the same for both e-switches */
- int nvports = esw->total_vports;
struct mlx5_flow_handle *flow;
+ struct mlx5_vport *peer_vport;
struct mlx5_flow_spec *spec;
- struct mlx5_vport *vport;
int err, pfindex;
unsigned long i;
void *misc;
- if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ if (!MLX5_VPORT_MANAGER(peer_dev) &&
+ !mlx5_core_is_ecpf_esw_manager(peer_dev))
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -1195,7 +1203,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -1205,10 +1213,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, MLX5_VPORT_PF);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
+ mlx5_esw_host_functions_enabled(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
+ MLX5_VPORT_PF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1216,11 +1225,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_pf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1228,36 +1237,39 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_ecpf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
- esw_set_peer_miss_rule_source_port(esw,
- peer_dev->priv.eswitch,
- spec, vport->vport);
+ if (mlx5_esw_host_functions_enabled(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
- flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
- spec, &flow_act, &dest, 1);
- if (IS_ERR(flow)) {
- err = PTR_ERR(flow);
- goto add_vf_flow_err;
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto add_vf_flow_err;
+ }
+ flows[peer_vport->index] = flow;
}
- flows[vport->index] = flow;
}
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (i >= mlx5_core_max_ec_vfs(peer_dev))
- break;
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, vport->vport);
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_ec_vf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
}
@@ -1274,25 +1286,29 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0;
add_ec_vf_flow_err:
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_vf_flow_err:
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
+ mlx5_esw_host_functions_enabled(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
@@ -1305,37 +1321,34 @@ alloc_flows_err:
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows;
- struct mlx5_vport *vport;
+ struct mlx5_vport *peer_vport;
unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
if (!flows)
return;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- /* The flow for a particular vport could be NULL if the other ECPF
- * has fewer or no VFs enabled
- */
- if (!flows[vport->index])
- continue;
- mlx5_del_flow_rules(flows[vport->index]);
- }
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
kvfree(flows);
@@ -1564,6 +1577,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
attr.mapping = esw->offloads.reg_c0_obj_pool;
+ attr.fs_base_prio = FDB_BYPASS_PATH;
chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(chains)) {
@@ -1965,7 +1979,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
- atomic64_set(&esw->user_count, 0);
}
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
@@ -2146,7 +2159,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule)) {
- esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx rule err %pe\n",
+ flow_rule);
goto out;
}
@@ -2165,8 +2180,8 @@ static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
&flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
- "fs offloads: Failed to add vport rx drop rule err %ld\n",
- PTR_ERR(flow_rule));
+ "fs offloads: Failed to add vport rx drop rule err %pe\n",
+ flow_rule);
return PTR_ERR(flow_rule);
}
@@ -2322,18 +2337,161 @@ out_free:
return err;
}
+static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode)
+{
+ mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp);
+ if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV ||
+ mlx5_core_mp_enabled(esw->dev)) {
+ esw->mode = mode;
+ mlx5_rescan_drivers_locked(esw->dev);
+ mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
+ return;
+ }
+
+ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(esw->dev);
+ esw->mode = mode;
+ esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(esw->dev);
+ mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
+}
+
+static void mlx5_esw_fdb_drop_destroy(struct mlx5_eswitch *esw)
+{
+ if (!esw->fdb_table.offloads.drop_root)
+ return;
+
+ esw_debug(esw->dev, "Destroying FDB drop root table %#x fc %#x\n",
+ esw->fdb_table.offloads.drop_root->id,
+ esw->fdb_table.offloads.drop_root_fc->id);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.drop_root_rule);
+ /* Don't free flow counter here, can be reused on a later activation */
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.drop_root);
+ esw->fdb_table.offloads.drop_root_rule = NULL;
+ esw->fdb_table.offloads.drop_root = NULL;
+}
+
+static int mlx5_esw_fdb_drop_create(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_destination drop_fc_dst = {};
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_table *table;
+ int err = 0, dst_num = 0;
+
+ if (esw->fdb_table.offloads.drop_root)
+ return 0;
+
+ root_ns = esw->fdb_table.offloads.ns;
+
+ ft_attr.prio = FDB_DROP_ROOT;
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ table = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(table)) {
+ esw_warn(dev, "Failed to create fdb drop root table, err %pe\n",
+ table);
+ return PTR_ERR(table);
+ }
+
+ /* Drop FC reusable, create once on first deactivation of FDB */
+ if (!esw->fdb_table.offloads.drop_root_fc) {
+ struct mlx5_fc *counter = mlx5_fc_create(dev, 0);
+
+ err = PTR_ERR_OR_ZERO(counter);
+ if (err)
+ esw_warn(esw->dev, "create fdb drop fc err %d\n", err);
+ else
+ esw->fdb_table.offloads.drop_root_fc = counter;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ if (esw->fdb_table.offloads.drop_root_fc) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_fc_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_fc_dst.counter = esw->fdb_table.offloads.drop_root_fc;
+ dst = &drop_fc_dst;
+ dst_num++;
+ }
+
+ flow_rule = mlx5_add_flow_rules(table, NULL, &flow_act, dst, dst_num);
+ err = PTR_ERR_OR_ZERO(flow_rule);
+ if (err) {
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx drop rule err %d\n",
+ err);
+ goto err_flow_rule;
+ }
+
+ esw->fdb_table.offloads.drop_root = table;
+ esw->fdb_table.offloads.drop_root_rule = flow_rule;
+ esw_debug(esw->dev, "Created FDB drop root table %#x fc %#x\n",
+ table->id, dst ? dst->counter->id : 0);
+ return 0;
+
+err_flow_rule:
+ /* no need to free drop fc, esw_offloads_steering_cleanup will do it */
+ mlx5_destroy_flow_table(table);
+ return err;
+}
+
+static void mlx5_esw_fdb_active(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_fdb_drop_destroy(esw);
+ mlx5_mpfs_enable(esw->dev);
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ esw_debug(esw->dev, "Connecting vport %d to eswitch\n",
+ vport->vport);
+ mlx5_esw_adj_vport_modify(esw->dev, vport->vport, true);
+ }
+
+ esw->offloads_inactive = false;
+ esw_warn(esw->dev, "MPFS/FDB active\n");
+}
+
+static void mlx5_esw_fdb_inactive(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_mpfs_disable(esw->dev);
+ mlx5_esw_fdb_drop_create(esw);
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ esw_debug(esw->dev, "Disconnecting vport %u from eswitch\n",
+ vport->vport);
+
+ mlx5_esw_adj_vport_modify(esw->dev, vport->vport, false);
+ }
+
+ esw->offloads_inactive = true;
+ esw_warn(esw->dev, "MPFS/FDB inactive\n");
+}
+
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err;
- esw->mode = MLX5_ESWITCH_OFFLOADS;
+ esw_mode_change(esw, MLX5_ESWITCH_OFFLOADS);
err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- esw->mode = MLX5_ESWITCH_LEGACY;
- mlx5_rescan_drivers(esw->dev);
+ esw_mode_change(esw, MLX5_ESWITCH_LEGACY);
return err;
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
@@ -2347,7 +2505,20 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return 0;
}
-static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
+void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch_rep *rep = xa_load(&esw->offloads.vport_reps,
+ vport->vport);
+
+ if (!rep)
+ return;
+ xa_erase(&esw->offloads.vport_reps, vport->vport);
+ kfree(rep);
+}
+
+int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2359,9 +2530,19 @@ static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx
rep->vport = vport->vport;
rep->vport_index = vport->index;
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
-
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ if (!esw->offloads.rep_ops[rep_type]) {
+ atomic_set(&rep->rep_data[rep_type].state,
+ REP_UNREGISTERED);
+ continue;
+ }
+ /* Dynamic/delegated vports add their representors after
+ * mlx5_eswitch_register_vport_reps, so mark them as registered
+ * for them to be loaded later with the others.
+ */
+ rep->esw = esw;
+ atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
+ }
err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
if (err)
goto insert_err;
@@ -2399,7 +2580,7 @@ static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
xa_init(&esw->offloads.vport_reps);
mlx5_esw_for_each_vport(esw, i, vport) {
- err = mlx5_esw_offloads_rep_init(esw, vport);
+ err = mlx5_esw_offloads_rep_add(esw, vport);
if (err)
goto err;
}
@@ -2411,7 +2592,8 @@ err:
}
static int esw_port_metadata_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -2436,7 +2618,8 @@ done:
}
static int esw_port_metadata_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -2502,12 +2685,25 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
}
+static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
+ return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+
+ return 0;
+}
+
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_LOADED, REP_REGISTERED) == REP_LOADED)
+ REP_LOADED, REP_REGISTERED) == REP_LOADED) {
+ if (rep_type == REP_ETH)
+ __esw_offloads_unload_rep(esw, rep, REP_IB);
esw->offloads.rep_ops[rep_type]->unload(rep);
+ }
}
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
@@ -2526,13 +2722,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
int err;
rep = mlx5_eswitch_get_rep(esw, vport_num);
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
- if (err)
- goto err_reps;
- }
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ goto err_reps;
+ }
return 0;
@@ -2598,7 +2792,7 @@ int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
return err;
load_err:
- mlx5_esw_offloads_devlink_port_unregister(esw, vport);
+ mlx5_esw_offloads_devlink_port_unregister(vport);
return err;
}
@@ -2609,7 +2803,7 @@ void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *v
mlx5_esw_offloads_rep_unload(esw, vport->vport);
- mlx5_esw_offloads_devlink_port_unregister(esw, vport);
+ mlx5_esw_offloads_devlink_port_unregister(vport);
}
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
@@ -2791,9 +2985,9 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
if (IS_ERR(vport))
return PTR_ERR(vport);
- egress_ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->index);
+ egress_ns = mlx5_get_flow_vport_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->index);
if (!egress_ns)
return -EINVAL;
@@ -3038,7 +3232,8 @@ err_out:
return err;
}
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr)
{
int i;
@@ -3057,10 +3252,10 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
esw->num_peers = 0;
esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
MLX5_DEVCOM_ESW_OFFLOADS,
- key,
+ attr,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3071,7 +3266,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3277,7 +3472,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
@@ -3290,13 +3485,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
return 0;
- ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
+ ret = __esw_offloads_load_rep(esw, rep, REP_IB);
if (ret)
return ret;
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
- mlx5_esw_offloads_rep_load(esw, rep->vport);
+ __esw_offloads_load_rep(esw, rep, REP_IB);
}
return 0;
@@ -3370,6 +3565,10 @@ create_indir_err:
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
+ mlx5_esw_fdb_drop_destroy(esw);
+ if (esw->fdb_table.offloads.drop_root_fc)
+ mlx5_fc_destroy(esw->dev, esw->fdb_table.offloads.drop_root_fc);
+ esw->fdb_table.offloads.drop_root_fc = NULL;
esw_destroy_vport_rx_drop_rule(esw);
esw_destroy_vport_rx_drop_group(esw);
esw_destroy_vport_rx_group(esw);
@@ -3488,14 +3687,19 @@ bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 cont
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_vport *vport;
unsigned long i;
- u64 mapping_id;
+ u8 id_len;
int err;
mutex_init(&esw->offloads.termtbl_mutex);
- mlx5_rdma_enable_roce(esw->dev);
+ mlx5_esw_adjacent_vhcas_setup(esw);
+
+ err = mlx5_rdma_enable_roce(esw->dev);
+ if (err)
+ goto err_roce;
err = mlx5_esw_host_number_init(esw);
if (err)
@@ -3509,9 +3713,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
- mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
+ mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len);
- reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
+ reg_c0_obj_pool = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_CHAIN,
sizeof(struct mlx5_mapped_obj),
ESW_REG_C0_USER_DATA_METADATA_MASK,
true);
@@ -3526,6 +3731,11 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_steering_init;
+ if (esw->offloads_inactive)
+ mlx5_esw_fdb_inactive(esw);
+ else
+ mlx5_esw_fdb_active(esw);
+
/* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
@@ -3556,6 +3766,8 @@ err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
+err_roce:
+ mlx5_esw_adjacent_vhcas_cleanup(esw);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
@@ -3565,7 +3777,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err;
- esw->mode = MLX5_ESWITCH_LEGACY;
+ esw_mode_change(esw, MLX5_ESWITCH_LEGACY);
/* If changing from switchdev to legacy mode without sriov enabled,
* no need to create legacy fdb.
@@ -3589,6 +3801,10 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
mapping_destroy(esw->offloads.reg_c0_obj_pool);
esw_offloads_metadata_uninit(esw);
mlx5_rdma_disable_roce(esw->dev);
+ mlx5_esw_adjacent_vhcas_cleanup(esw);
+ /* must be done after vhcas cleanup to avoid adjacent vports connect */
+ if (esw->offloads_inactive)
+ mlx5_esw_fdb_active(esw); /* legacy mode always active */
mutex_destroy(&esw->offloads.termtbl_mutex);
}
@@ -3599,6 +3815,7 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
*mlx5_mode = MLX5_ESWITCH_LEGACY;
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE:
*mlx5_mode = MLX5_ESWITCH_OFFLOADS;
break;
default:
@@ -3608,14 +3825,17 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
return 0;
}
-static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+static int esw_mode_to_devlink(struct mlx5_eswitch *esw, u16 *mode)
{
- switch (mlx5_mode) {
+ switch (esw->mode) {
case MLX5_ESWITCH_LEGACY:
*mode = DEVLINK_ESWITCH_MODE_LEGACY;
break;
case MLX5_ESWITCH_OFFLOADS:
- *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ if (esw->offloads_inactive)
+ *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE;
+ else
+ *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
break;
default:
return -EINVAL;
@@ -3698,6 +3918,68 @@ void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
up_write(&esw->mode_lock);
}
+/* Returns false only when uplink netdev exists and its netns is different from
+ * devlink's netns. True for all others so entering switchdev mode is allowed.
+ */
+static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink,
+ bool immutable)
+{
+ struct mlx5_core_dev *mdev = devlink_priv(devlink);
+ struct net_device *netdev;
+ bool ret;
+
+ netdev = mlx5_uplink_netdev_get(mdev);
+ if (!netdev)
+ return true;
+
+ rtnl_lock();
+ netdev->netns_immutable = immutable;
+ ret = net_eq(dev_net(netdev), devlink_net(devlink));
+ rtnl_unlock();
+
+ mlx5_uplink_netdev_put(mdev, netdev);
+ return ret;
+}
+
+/* Returns true when only changing between active and inactive switchdev mode */
+static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw,
+ u16 devlink_mode)
+{
+ /* current mode is not switchdev */
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return false;
+
+ /* new mode is not switchdev */
+ if (devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE)
+ return false;
+
+ /* already inactive: no change in current state */
+ if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE &&
+ esw->offloads_inactive)
+ return false;
+
+ /* already active: no change in current state */
+ if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ !esw->offloads_inactive)
+ return false;
+
+ down_write(&esw->mode_lock);
+ esw->offloads_inactive = !esw->offloads_inactive;
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
+ if (esw->offloads_inactive)
+ mlx5_esw_fdb_inactive(esw);
+ else
+ mlx5_esw_fdb_active(esw);
+
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
+ up_write(&esw->mode_lock);
+ return true;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3712,12 +3994,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) {
+ if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && mlx5_get_sd(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured.");
return -EPERM;
}
+ /* Avoid try_lock, active/inactive mode change is not restricted */
+ if (mlx5_devlink_switchdev_active_mode_change(esw, mode))
+ return 0;
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -3740,23 +4026,36 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
+ if (mlx5_mode == MLX5_ESWITCH_OFFLOADS &&
+ !mlx5_devlink_netdev_netns_immutable_set(devlink, true)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+ err = -EINVAL;
+ goto skip;
+ }
+
+ if (mlx5_mode == MLX5_ESWITCH_LEGACY)
+ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+ if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change mode while devlink traps are active");
err = -EOPNOTSUPP;
goto skip;
}
+ esw->offloads_inactive =
+ (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE);
err = esw_offloads_start(esw, extack);
- } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
+ } else if (mlx5_mode == MLX5_ESWITCH_LEGACY) {
err = esw_offloads_stop(esw, extack);
- mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
skip:
+ if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && err)
+ mlx5_devlink_netdev_netns_immutable_set(devlink, false);
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
@@ -3774,7 +4073,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- return esw_mode_to_devlink(esw->mode, mode);
+ return esw_mode_to_devlink(esw, mode);
}
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
@@ -3896,23 +4195,25 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
-bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
+ enum devlink_eswitch_encap_mode encap;
+ bool allow_tunnel = false;
if (!mlx5_esw_allowed(esw))
return true;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- up_write(&esw->mode_lock);
- return false;
+ encap = esw->offloads.encap;
+ if (esw->mode == MLX5_ESWITCH_LEGACY ||
+ (encap == DEVLINK_ESWITCH_ENCAP_MODE_NONE && !from_fdb)) {
+ allow_tunnel = true;
+ esw->offloads.num_block_encap++;
}
-
- esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- return true;
+
+ return allow_tunnel;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
@@ -4017,7 +4318,8 @@ mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ (!mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ !mlx5_esw_host_functions_enabled(esw->dev)))
return false;
if (vport_num == MLX5_VPORT_ECPF &&
@@ -4119,48 +4421,28 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
-{
- int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *query_ctx;
- void *hca_caps;
- int err;
-
- *vhca_id = 0;
-
- query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
- if (!query_ctx)
- return -ENOMEM;
-
- err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
- if (err)
- goto out_free;
-
- hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
-
-out_free:
- kfree(query_ctx);
- return err;
-}
-
-int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
- int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
- if (err) {
- esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
- vport_num, err);
- return err;
+ if (WARN_ONCE(MLX5_VPORT_INVAL_VHCA_ID(vport),
+ "vport %d vhca_id is not set", vport->vport)) {
+ int err;
+
+ err = mlx5_vport_get_vhca_id(vport->dev, vport->vport,
+ &vhca_id);
+ if (err)
+ return err;
+ vport->vhca_id = vhca_id;
}
+ vhca_id = vport->vhca_id;
vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
if (!vhca_map_entry)
return -ENOMEM;
- *vhca_map_entry = vport_num;
+ *vhca_map_entry = vport->vport;
old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
if (xa_is_err(old_entry)) {
kfree(vhca_map_entry);
@@ -4170,17 +4452,12 @@ int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
return 0;
}
-void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- u16 *vhca_map_entry, vhca_id;
- int err;
-
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
- if (err)
- esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
- vport_num, err);
+ u16 *vhca_map_entry;
- vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
+ vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vport->vhca_id);
kfree(vhca_map_entry);
}
@@ -4215,6 +4492,9 @@ int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
+
+ mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, true,
+ vport->info.mac);
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
mutex_unlock(&esw->state_lock);
@@ -4568,3 +4848,122 @@ unlock:
return err;
}
#endif /* CONFIG_XFRM_OFFLOAD */
+
+int
+mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u16 vport_num = vport->vport;
+ struct mlx5_eswitch *esw;
+ void *query_ctx;
+ void *hca_caps;
+ u32 max_eqs;
+ int err;
+
+ esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support getting the max number of EQs");
+ return -EOPNOTSUPP;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ mutex_lock(&esw->state_lock);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL_2);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ max_eqs = MLX5_GET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b);
+ if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
+ *max_io_eqs = 0;
+ else
+ *max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS;
+out:
+ mutex_unlock(&esw->state_lock);
+ kfree(query_ctx);
+ return err;
+}
+
+int
+mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u16 vport_num = vport->vport;
+ struct mlx5_eswitch *esw;
+ void *query_ctx;
+ void *hca_caps;
+ u16 max_eqs;
+ int err;
+
+ esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support changing the max number of EQs");
+ return -EOPNOTSUPP;
+ }
+
+ if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) {
+ NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
+ return -EINVAL;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ mutex_lock(&esw->state_lock);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL_2);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs);
+
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1);
+
+ err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
+ vport->max_eqs_set = true;
+out:
+ mutex_unlock(&esw->state_lock);
+ kfree(query_ctx);
+ return err;
+}
+
+int
+mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return mlx5_devlink_port_fn_max_io_eqs_set(port,
+ MLX5_ESW_DEFAULT_SF_COMP_EQS,
+ extack);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index d91ea53eb394..01c5f5990f9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -6,6 +6,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/events.h"
+#include "hwmon.h"
struct mlx5_event_nb {
struct mlx5_nb nb;
@@ -153,21 +154,50 @@ static int any_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void print_sensor_names_in_bit_set(struct mlx5_core_dev *dev, struct mlx5_hwmon *hwmon,
+ u64 bit_set, int bit_set_offset)
+{
+ unsigned long *bit_set_ptr = (unsigned long *)&bit_set;
+ int num_bits = sizeof(bit_set) * BITS_PER_BYTE;
+ int i;
+
+ for_each_set_bit(i, bit_set_ptr, num_bits) {
+ const char *sensor_name = hwmon_get_sensor_name(hwmon, i + bit_set_offset);
+
+ mlx5_core_warn(dev, "Sensor name[%d]: %s\n", i + bit_set_offset, sensor_name);
+ }
+}
+#endif /* CONFIG_HWMON */
+
/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_core_dev *dev = events->dev;
struct mlx5_eqe *eqe = data;
u64 value_lsb;
u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ /* bit 1-63 are not supported for NICs,
+ * hence read only bit 0 (asic) from lsb.
+ */
+ value_lsb &= 0x1;
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
- mlx5_core_warn(events->dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
+ if (net_ratelimit()) {
+ mlx5_core_warn(dev, "High temperature on sensors with bit set %#llx %#llx.\n",
+ value_msb, value_lsb);
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon) {
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_lsb, 0);
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_msb,
+ sizeof(value_lsb) * BITS_PER_BYTE);
+ }
+#endif
+ }
return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index c4de6bf8d1b6..ccef64fb40b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -421,6 +421,13 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
__be64 *pas;
u32 i;
+ conn->cq.mcq.cqe_sz = 64;
+ conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
+ conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
+ *conn->cq.mcq.set_ci_db = 0;
+ conn->cq.mcq.vector = 0;
+ conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
+
cq_size = roundup_pow_of_two(cq_size);
MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
@@ -468,16 +475,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
if (err)
goto err_cqwq;
- conn->cq.mcq.cqe_sz = 64;
- conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
- conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
- *conn->cq.mcq.set_ci_db = 0;
- *conn->cq.mcq.arm_db = 0;
- conn->cq.mcq.vector = 0;
- conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
-
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index e5c1012921d2..1ec61164e6b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -211,7 +211,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
if (!max_num_qps) {
mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n");
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 9b8599c200e2..ced747bef641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -217,7 +217,8 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
- underlay_qpn == 0)
+ underlay_qpn == 0 &&
+ (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX))
return 0;
if (ft->type == FS_FT_FDB &&
@@ -238,6 +239,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
MLX5_SET(set_flow_table_root_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(set_flow_table_root_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(set_flow_table_root_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
if (!err &&
@@ -301,6 +306,10 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(create_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(create_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_decap);
@@ -359,6 +368,10 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(destroy_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(destroy_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
if (!err)
@@ -393,6 +406,10 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(modify_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(modify_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(modify_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -428,6 +445,10 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(create_flow_group_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
@@ -450,6 +471,10 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(destroy_flow_group_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(destroy_flow_group_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
@@ -463,7 +488,7 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
int num_encap = 0;
*extended_dest = false;
- if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -502,17 +527,17 @@ mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
execute_aso[0]);
MLX5_SET(execute_aso, execute_aso, valid, 1);
MLX5_SET(execute_aso, execute_aso, aso_object_id,
- fte->action.exe_aso.object_id);
+ fte->act_dests.action.exe_aso.object_id);
exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
- fte->action.exe_aso.return_reg_id);
+ fte->act_dests.action.exe_aso.return_reg_id);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
- fte->action.exe_aso.type);
+ fte->act_dests.action.exe_aso.type);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
- fte->action.exe_aso.flow_meter.init_color);
+ fte->act_dests.action.exe_aso.flow_meter.init_color);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
- fte->action.exe_aso.flow_meter.meter_idx);
+ fte->act_dests.action.exe_aso.flow_meter.meter_idx);
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
@@ -526,7 +551,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
- int reformat_id = 0;
+ u32 reformat_id = 0;
unsigned int inlen;
int dst_cnt_size;
u32 *in, action;
@@ -541,7 +566,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
- inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -553,84 +578,85 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level,
- !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
+ !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(set_fte_in, in, eswitch_owner_vhca_id, ft->esw_owner_vhca_id);
+ MLX5_SET(set_fte_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag,
- fte->flow_context.flow_tag);
+ fte->act_dests.flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
- fte->flow_context.flow_source);
+ fte->act_dests.flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
- !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
+ !!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
- action = fte->action.action;
+ action = fte->act_dests.action.action;
if (extended_dest)
action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
- if (!extended_dest && fte->action.pkt_reformat) {
- struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat;
-
- if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
- reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
- if (reformat_id < 0) {
- mlx5_core_err(dev,
- "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n",
- pkt_reformat->reformat_type);
- err = reformat_id;
- goto err_out;
- }
- } else {
- reformat_id = fte->action.pkt_reformat->id;
+ if (!extended_dest && fte->act_dests.action.pkt_reformat) {
+ struct mlx5_pkt_reformat *pkt_reformat =
+ fte->act_dests.action.pkt_reformat;
+
+ err = mlx5_fs_get_packet_reformat_id(pkt_reformat,
+ &reformat_id);
+ if (err) {
+ mlx5_core_err(dev,
+ "Unsupported pkt_reformat type (%d)\n",
+ pkt_reformat->reformat_type);
+ goto err_out;
}
}
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ reformat_id);
- if (fte->action.modify_hdr) {
- if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
+ if (fte->act_dests.action.modify_hdr) {
+ if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
err = -EOPNOTSUPP;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, modify_header_id,
- fte->action.modify_hdr->id);
+ fte->act_dests.action.modify_hdr->id);
}
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
- fte->action.crypto.type);
+ fte->act_dests.action.crypto.type);
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
- fte->action.crypto.obj_id);
+ fte->act_dests.action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -706,7 +732,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
@@ -718,7 +744,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter_id);
+ mlx5_fc_id(dst->dest_attr.counter));
in_dests += dst_cnt_size;
list_size++;
}
@@ -731,8 +757,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
} else {
err = -EOPNOTSUPP;
@@ -789,6 +815,10 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
MLX5_SET(delete_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(delete_fte_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(delete_fte_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
return mlx5_cmd_exec_in(dev, delete_fte, in);
}
@@ -1071,7 +1101,7 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
- return 0;
+ return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
@@ -1141,6 +1171,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
case FS_FT_PORT_SEL:
+ case FS_FT_RDMA_TRANSPORT_RX:
+ case FS_FT_RDMA_TRANSPORT_TX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 53e0e5137d3f..7eb7b3ffe3d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -124,4 +124,12 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
+
+static inline bool mlx5_fs_cmd_is_fw_term_table(struct mlx5_flow_table *ft)
+{
+ if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
+ return true;
+
+ return false;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index cf085a478e3e..0a6031a64c6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -113,13 +113,16 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
- * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
+/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
+ * IPsec policy miss, {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/
#define KERNEL_NIC_PRIO_NUM_LEVELS 11
#define KERNEL_NIC_NUM_PRIOS 1
-/* One more level for tc */
-#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+/* One more level for tc, and one more for promisc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
+
+#define KERNEL_NIC_PROMISC_NUM_PRIOS 1
+#define KERNEL_NIC_PROMISC_NUM_LEVELS 1
#define KERNEL_NIC_TC_NUM_PRIOS 1
#define KERNEL_NIC_TC_NUM_LEVELS 3
@@ -187,6 +190,8 @@ static struct init_tree_node {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
KERNEL_NIC_TC_NUM_LEVELS),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_PROMISC_NUM_PRIOS,
+ KERNEL_NIC_PROMISC_NUM_LEVELS),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
@@ -605,12 +610,37 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node);
root = find_root(&ft->node);
- err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
+ err = root->cmds->update_fte(root, ft, fg, fte->act_dests.modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
- fte->modify_mask = 0;
+ fte->act_dests.modify_mask = 0;
+}
+
+static void del_sw_hw_dup_rule(struct fs_node *node)
+{
+ struct mlx5_flow_rule *rule;
+ struct fs_fte *fte;
+
+ fs_get_obj(rule, node);
+ fs_get_obj(fte, rule->node.parent);
+ trace_mlx5_fs_del_rule(rule);
+
+ if (is_fwd_next_action(rule->sw_action)) {
+ mutex_lock(&rule->dest_attr.ft->lock);
+ list_del(&rule->next_ft);
+ mutex_unlock(&rule->dest_attr.ft->lock);
+ }
+
+ /* If a pending rule is being deleted it means
+ * this is a NO APPEND rule, so there are no partial deletions,
+ * all the rules of the mlx5_flow_handle are going to be deleted
+ * and the rules aren't shared with any other mlx5_flow_handle instance
+ * so no need to do any bookkeeping like in del_sw_hw_rule().
+ */
+
+ kfree(rule);
}
static void del_sw_hw_rule(struct fs_node *node)
@@ -628,29 +658,30 @@ static void del_sw_hw_rule(struct fs_node *node)
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
- --fte->dests_size;
- fte->modify_mask |=
+ --fte->act_dests.dests_size;
+ fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ mlx5_fc_local_put(rule->dest_attr.counter);
goto out;
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
- --fte->dests_size;
- fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ --fte->act_dests.dests_size;
+ fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
goto out;
}
if (is_fwd_dest_type(rule->dest_attr.type)) {
- --fte->dests_size;
- --fte->fwd_dests;
+ --fte->act_dests.dests_size;
+ --fte->act_dests.fwd_dests;
- if (!fte->fwd_dests)
- fte->action.action &=
+ if (!fte->act_dests.fwd_dests)
+ fte->act_dests.action.action &=
~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte->modify_mask |=
+ fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
goto out;
}
@@ -658,12 +689,33 @@ out:
kfree(rule);
}
+static void switch_to_pending_act_dests(struct fs_fte *fte)
+{
+ struct fs_node *iter;
+
+ memcpy(&fte->act_dests, &fte->dup->act_dests, sizeof(fte->act_dests));
+
+ list_bulk_move_tail(&fte->node.children,
+ fte->dup->children.next,
+ fte->dup->children.prev);
+
+ list_for_each_entry(iter, &fte->node.children, list)
+ iter->del_sw_func = del_sw_hw_rule;
+
+ /* Make sure the fte isn't deleted
+ * as mlx5_del_flow_rules() decreases the refcount
+ * of the fte to trigger deletion.
+ */
+ tree_get_node(&fte->node);
+}
+
static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_core_dev *dev;
+ bool pending_used = false;
struct fs_fte *fte;
int err;
@@ -672,16 +724,35 @@ static void del_hw_fte(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_fte(fte);
- WARN_ON(fte->dests_size);
+ WARN_ON(fte->act_dests.dests_size);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
+
+ if (fte->dup && !list_empty(&fte->dup->children)) {
+ switch_to_pending_act_dests(fte);
+ pending_used = true;
+ } else {
+ /* Avoid double call to del_hw_fte */
+ node->del_hw_func = NULL;
+ }
+
if (node->active) {
- err = root->cmds->delete_fte(root, ft, fte);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
- node->active = false;
+ if (pending_used) {
+ err = root->cmds->update_fte(root, ft, fg,
+ fte->act_dests.modify_mask, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't update to pending rule in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ fte->act_dests.modify_mask = 0;
+ } else {
+ err = root->cmds->delete_fte(root, ft, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ node->active = false;
+ }
}
}
@@ -700,6 +771,7 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_free(&fg->fte_allocator, fte->index - fg->start_index);
+ kvfree(fte->dup);
kmem_cache_free(steering->ftes_cache, fte);
}
@@ -754,11 +826,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return index;
fte->index = index + fg->start_index;
+retry_insert:
ret = rhashtable_insert_fast(&fg->ftes_hash,
&fte->hash,
rhash_fte);
- if (ret)
+ if (ret) {
+ if (ret == -EBUSY) {
+ cond_resched();
+ goto retry_insert;
+ }
goto err_ida_remove;
+ }
tree_add_node(&fte->node, &fg->node);
list_add_tail(&fte->node.list, &fg->node.children);
@@ -782,8 +860,8 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
memcpy(fte->val, &spec->match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
- fte->action = *flow_act;
- fte->flow_context = spec->flow_context;
+ fte->act_dests.action = *flow_act;
+ fte->act_dests.flow_context = spec->flow_context;
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
@@ -861,10 +939,10 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
- enum fs_flow_table_type table_type,
- enum fs_flow_table_op_mod op_mod,
- u32 flags)
+static struct mlx5_flow_table *
+alloc_flow_table(struct mlx5_flow_table_attr *ft_attr, u16 vport,
+ enum fs_flow_table_type table_type,
+ enum fs_flow_table_op_mod op_mod)
{
struct mlx5_flow_table *ft;
int ret;
@@ -879,12 +957,13 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
return ERR_PTR(ret);
}
- ft->level = level;
+ ft->level = ft_attr->level;
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->op_mod = op_mod;
ft->type = table_type;
ft->vport = vport;
- ft->flags = flags;
+ ft->esw_owner_vhca_id = ft_attr->esw_owner_vhca_id;
+ ft->flags = ft_attr->flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -1103,18 +1182,45 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
+static bool rule_is_pending(struct fs_fte *fte, struct mlx5_flow_rule *rule)
+{
+ struct mlx5_flow_rule *tmp_rule;
+ struct fs_node *iter;
+
+ if (!fte->dup || list_empty(&fte->dup->children))
+ return false;
+
+ list_for_each_entry(iter, &fte->dup->children, list) {
+ tmp_rule = container_of(iter, struct mlx5_flow_rule, node);
+
+ if (tmp_rule == rule)
+ return true;
+ }
+
+ return false;
+}
+
static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_root_namespace *root;
+ struct fs_fte_action *act_dests;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
+ bool pending = false;
struct fs_fte *fte;
int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int err = 0;
fs_get_obj(fte, rule->node.parent);
- if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+
+ pending = rule_is_pending(fte, rule);
+ if (pending)
+ act_dests = &fte->dup->act_dests;
+ else
+ act_dests = &fte->act_dests;
+
+ if (!(act_dests->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
down_write_ref_node(&fte->node, false);
fs_get_obj(fg, fte->node.parent);
@@ -1122,8 +1228,9 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node);
- err = root->cmds->update_fte(root, ft, fg,
- modify_mask, fte);
+ if (!pending)
+ err = root->cmds->update_fte(root, ft, fg,
+ modify_mask, fte);
up_write_ref_node(&fte->node, false);
return err;
@@ -1264,10 +1371,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
/* The level is related to the
* priority level range.
*/
- ft = alloc_flow_table(ft_attr->level,
- vport,
- root->table_type,
- op_mod, ft_attr->flags);
+ ft = alloc_flow_table(ft_attr, vport, root->table_type, op_mod);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto unlock_root;
@@ -1355,7 +1459,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *ft;
int autogroups_max_fte;
- ft = mlx5_create_flow_table(ns, ft_attr);
+ ft = mlx5_create_vport_flow_table(ns, ft_attr, ft_attr->vport);
if (IS_ERR(ft))
return ft;
@@ -1453,6 +1557,16 @@ static struct mlx5_flow_handle *alloc_handle(int num_rules)
return handle;
}
+static void destroy_flow_handle_dup(struct mlx5_flow_handle *handle,
+ int i)
+{
+ for (; --i >= 0;) {
+ list_del(&handle->rule[i]->node.list);
+ kfree(handle->rule[i]);
+ }
+ kfree(handle);
+}
+
static void destroy_flow_handle(struct fs_fte *fte,
struct mlx5_flow_handle *handle,
struct mlx5_flow_destination *dest,
@@ -1460,7 +1574,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
{
for (; --i >= 0;) {
if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
- fte->dests_size--;
+ fte->act_dests.dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
}
@@ -1469,6 +1583,61 @@ static void destroy_flow_handle(struct fs_fte *fte,
}
static struct mlx5_flow_handle *
+create_flow_handle_dup(struct list_head *children,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ struct fs_fte_action *act_dests)
+{
+ static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_flow_rule *rule = NULL;
+ struct mlx5_flow_handle *handle;
+ int i = 0;
+ int type;
+
+ handle = alloc_handle((dest_num) ? dest_num : 1);
+ if (!handle)
+ return NULL;
+
+ do {
+ rule = alloc_rule(dest + i);
+ if (!rule)
+ goto free_rules;
+
+ /* Add dest to dests list- we need flow tables to be in the
+ * end of the list for forward to next prio rules.
+ */
+ tree_init_node(&rule->node, NULL, del_sw_hw_dup_rule);
+ if (dest &&
+ dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ list_add(&rule->node.list, children);
+ else
+ list_add_tail(&rule->node.list, children);
+
+ if (dest) {
+ act_dests->dests_size++;
+
+ if (is_fwd_dest_type(dest[i].type))
+ act_dests->fwd_dests++;
+
+ type = dest[i].type ==
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ act_dests->modify_mask |= type ? count : dst;
+ }
+ handle->rule[i] = rule;
+ } while (++i < dest_num);
+
+ return handle;
+
+free_rules:
+ destroy_flow_handle_dup(handle, i);
+ act_dests->dests_size = 0;
+ act_dests->fwd_dests = 0;
+
+ return NULL;
+}
+
+static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte *fte,
struct mlx5_flow_destination *dest,
int dest_num,
@@ -1510,10 +1679,10 @@ create_flow_handle(struct fs_fte *fte,
else
list_add_tail(&rule->node.list, &fte->node.children);
if (dest) {
- fte->dests_size++;
+ fte->act_dests.dests_size++;
if (is_fwd_dest_type(dest[i].type))
- fte->fwd_dests++;
+ fte->act_dests.fwd_dests++;
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -1664,14 +1833,35 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id)
+{
+ switch (pkt_reformat->owner) {
+ case MLX5_FLOW_RESOURCE_OWNER_FW:
+ *id = pkt_reformat->id;
+ return 0;
+ case MLX5_FLOW_RESOURCE_OWNER_SW:
+ return mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat, id);
+ case MLX5_FLOW_RESOURCE_OWNER_HWS:
+ return mlx5_fs_hws_action_get_pkt_reformat_id(pkt_reformat, id);
+ default:
+ return -EINVAL;
+ }
+}
+
static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
struct mlx5_pkt_reformat *p2)
{
- return p1->owner == p2->owner &&
- (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
- p1->id == p2->id :
- mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
- mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+ int err1, err2;
+ u32 id1, id2;
+
+ if (p1->owner != p2->owner)
+ return false;
+
+ err1 = mlx5_fs_get_packet_reformat_id(p1, &id1);
+ err2 = mlx5_fs_get_packet_reformat_id(p2, &id2);
+
+ return !err1 && !err2 && id1 == id2;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -1774,17 +1964,17 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act, &fte->action)) {
+ if (check_conflicting_actions(flow_act, &fte->act_dests.action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
}
if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
- fte->flow_context.flow_tag != flow_context->flow_tag) {
+ fte->act_dests.flow_context.flow_tag != flow_context->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
- fte->flow_context.flow_tag,
+ fte->act_dests.flow_context.flow_tag,
flow_context->flow_tag);
return -EEXIST;
}
@@ -1808,12 +1998,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
if (ret)
return ERR_PTR(ret);
- old_action = fte->action.action;
- fte->action.action |= flow_act->action;
+ old_action = fte->act_dests.action.action;
+ fte->act_dests.action.action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num,
old_action != flow_act->action);
if (IS_ERR(handle)) {
- fte->action.action = old_action;
+ fte->act_dests.action.action = old_action;
return handle;
}
trace_mlx5_fs_set_fte(fte, false);
@@ -1946,13 +2136,22 @@ lookup_fte_locked(struct mlx5_flow_group *g,
fte_tmp = NULL;
goto out;
}
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
if (!fte_tmp->node.active) {
+ up_write_ref_node(&fte_tmp->node, false);
+
+ if (take_write)
+ up_write_ref_node(&g->node, false);
+ else
+ up_read_ref_node(&g->node);
+
tree_put_node(&fte_tmp->node, false);
- fte_tmp = NULL;
- goto out;
+
+ return NULL;
}
- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
if (take_write)
up_write_ref_node(&g->node, false);
@@ -1961,6 +2160,62 @@ out:
return fte_tmp;
}
+/* Native capability lacks support for adding an additional match with the same value
+ * to the same flow group. To accommodate the NO APPEND flag in these scenarios,
+ * we include the new rule in the existing flow table entry (fte) without immediate
+ * hardware commitment. When a request is made to delete the corresponding hardware rule,
+ * we then commit the pending rule to hardware.
+ */
+static struct mlx5_flow_handle *
+add_rule_dup_match_fte(struct fs_fte *fte,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num)
+{
+ struct mlx5_flow_handle *handle;
+ struct fs_fte_dup *dup;
+ int i = 0;
+
+ if (!fte->dup) {
+ dup = kvzalloc(sizeof(*dup), GFP_KERNEL);
+ if (!dup)
+ return ERR_PTR(-ENOMEM);
+ /* dup will be freed when the fte is freed
+ * this way we don't allocate / free dup on every rule deletion
+ * or creation
+ */
+ INIT_LIST_HEAD(&dup->children);
+ fte->dup = dup;
+ }
+
+ if (!list_empty(&fte->dup->children)) {
+ mlx5_core_warn(get_dev(&fte->node),
+ "Can have only a single duplicate rule\n");
+
+ return ERR_PTR(-EEXIST);
+ }
+
+ fte->dup->act_dests.action = *flow_act;
+ fte->dup->act_dests.flow_context = spec->flow_context;
+ fte->dup->act_dests.dests_size = 0;
+ fte->dup->act_dests.fwd_dests = 0;
+ fte->dup->act_dests.modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+
+ handle = create_flow_handle_dup(&fte->dup->children,
+ dest, dest_num,
+ &fte->dup->act_dests);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < handle->num_rules; i++) {
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ trace_mlx5_fs_add_rule(handle->rule[i]);
+ }
+
+ return handle;
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@@ -1971,10 +2226,12 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
int ft_version)
{
struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
struct match_list *iter;
bool take_write = false;
+ bool try_again = false;
struct fs_fte *fte;
u64 version = 0;
int err;
@@ -1984,7 +2241,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return ERR_PTR(-ENOMEM);
search_again_locked:
- if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ if (flow_act->flags & FLOW_ACT_NO_APPEND &&
+ (root->cmds->get_capabilities(root, root->table_type) &
+ MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH))
goto skip_search;
version = matched_fgs_get_version(match_head);
/* Try to find an fte with identical match value and attempt update its
@@ -1997,7 +2256,10 @@ search_again_locked:
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
if (!fte_tmp)
continue;
- rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ rule = add_rule_dup_match_fte(fte_tmp, spec, flow_act, dest, dest_num);
+ else
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
/* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
@@ -2034,6 +2296,7 @@ skip_search:
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
if (!g->node.active) {
+ try_again = true;
up_write_ref_node(&g->node, false);
continue;
}
@@ -2055,7 +2318,8 @@ skip_search:
tree_put_node(&fte->node, false);
return rule;
}
- rule = ERR_PTR(-ENOENT);
+ err = try_again ? -EAGAIN : -ENOENT;
+ rule = ERR_PTR(err);
out:
kmem_cache_free(steering->ftes_cache, fte);
return rule;
@@ -2265,12 +2529,10 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
tree_remove_node(&handle->rule[i]->node, true);
if (list_empty(&fte->node.children)) {
fte->node.del_hw_func(&fte->node);
- /* Avoid double call to del_hw_fte */
- fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
- } else if (fte->dests_size) {
- if (fte->modify_mask)
+ } else if (fte->act_dests.dests_size) {
+ if (fte->act_dests.modify_mask)
modify_fte(fte);
up_write_ref_node(&fte->node, false);
} else {
@@ -2481,6 +2743,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_BYPASS_PRIO;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
@@ -2528,36 +2791,56 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
-struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport)
+struct mlx5_vport_acl_root_ns {
+ u16 vport_idx;
+ struct mlx5_flow_root_namespace *root_ns;
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type, int vport_idx)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ struct mlx5_vport_acl_root_ns *vport_ns;
if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (vport >= steering->esw_egress_acl_vports)
- return NULL;
- if (steering->esw_egress_root_ns &&
- steering->esw_egress_root_ns[vport])
- return &steering->esw_egress_root_ns[vport]->ns;
+ vport_ns = xa_load(&steering->esw_egress_root_ns, vport_idx);
+ if (vport_ns)
+ return &vport_ns->root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (vport >= steering->esw_ingress_acl_vports)
+ vport_ns = xa_load(&steering->esw_ingress_root_ns, vport_idx);
+ if (vport_ns)
+ return &vport_ns->root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ if (vport_idx >= steering->rdma_transport_rx_vports)
+ return NULL;
+ if (steering->rdma_transport_rx_root_ns &&
+ steering->rdma_transport_rx_root_ns[vport_idx])
+ return &steering->rdma_transport_rx_root_ns[vport_idx]->ns;
+ else
return NULL;
- if (steering->esw_ingress_root_ns &&
- steering->esw_ingress_root_ns[vport])
- return &steering->esw_ingress_root_ns[vport]->ns;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ if (vport_idx >= steering->rdma_transport_tx_vports)
+ return NULL;
+
+ if (steering->rdma_transport_tx_root_ns &&
+ steering->rdma_transport_tx_root_ns[vport_idx])
+ return &steering->rdma_transport_tx_root_ns[vport_idx]->ns;
else
return NULL;
default:
return NULL;
}
}
+EXPORT_SYMBOL(mlx5_get_flow_vport_namespace);
static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio,
@@ -2963,6 +3246,211 @@ out_err:
return err;
}
+static int
+init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct fs_prio *prio;
+ int ret;
+ int i;
+
+ steering->rdma_transport_rx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX);
+ if (!steering->rdma_transport_rx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ root_ns = steering->rdma_transport_rx_root_ns[vport_idx];
+
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ prio = fs_create_prio(&root_ns->ns, i, 1);
+ if (IS_ERR(prio)) {
+ ret = PTR_ERR(prio);
+ goto err;
+ }
+ }
+ set_prio_attrs(root_ns);
+ return 0;
+
+err:
+ cleanup_root_ns(root_ns);
+ return ret;
+}
+
+static int
+init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct fs_prio *prio;
+ int ret;
+ int i;
+
+ steering->rdma_transport_tx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX);
+ if (!steering->rdma_transport_tx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ root_ns = steering->rdma_transport_tx_root_ns[vport_idx];
+
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ prio = fs_create_prio(&root_ns->ns, i, 1);
+ if (IS_ERR(prio)) {
+ ret = PTR_ERR(prio);
+ goto err;
+ }
+ }
+ set_prio_attrs(root_ns);
+ return 0;
+
+err:
+ cleanup_root_ns(root_ns);
+ return ret;
+}
+
+static bool mlx5_fs_ns_is_empty(struct mlx5_flow_namespace *ns)
+{
+ struct fs_prio *iter_prio;
+
+ fs_for_each_prio(iter_prio, ns) {
+ if (iter_prio->num_ft)
+ return false;
+ }
+
+ return true;
+}
+
+int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *new_dev,
+ enum fs_flow_table_type table_type)
+{
+ struct mlx5_flow_root_namespace **root;
+ int total_vports;
+ int i;
+
+ switch (table_type) {
+ case FS_FT_RDMA_TRANSPORT_TX:
+ root = dev->priv.steering->rdma_transport_tx_root_ns;
+ total_vports = dev->priv.steering->rdma_transport_tx_vports;
+ break;
+ case FS_FT_RDMA_TRANSPORT_RX:
+ root = dev->priv.steering->rdma_transport_rx_root_ns;
+ total_vports = dev->priv.steering->rdma_transport_rx_vports;
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < total_vports; i++) {
+ mutex_lock(&root[i]->chain_lock);
+ if (!mlx5_fs_ns_is_empty(&root[i]->ns)) {
+ mutex_unlock(&root[i]->chain_lock);
+ goto err;
+ }
+ root[i]->dev = new_dev;
+ mutex_unlock(&root[i]->chain_lock);
+ }
+ return 0;
+err:
+ while (i--) {
+ mutex_lock(&root[i]->chain_lock);
+ root[i]->dev = dev;
+ mutex_unlock(&root[i]->chain_lock);
+ }
+ /* If you hit this error try destroying all flow tables and try again */
+ mlx5_core_err(dev, "Failed to set root device for RDMA TRANSPORT\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL(mlx5_fs_set_root_dev);
+
+static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_rx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_rx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_rx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_rx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_rx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ return err;
+}
+
+static int init_rdma_transport_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_tx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_tx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_tx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_tx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_tx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ return err;
+}
+
+static void cleanup_rdma_transport_roots_ns(struct mlx5_flow_steering *steering)
+{
+ int i;
+
+ if (steering->rdma_transport_rx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_rx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ }
+
+ if (steering->rdma_transport_tx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_tx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ }
+}
+
/* FT and tc chains are stored in the same array so we can re-use the
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
* When creating a new ns for each chain store it in the first available slot.
@@ -3086,6 +3574,11 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_root_ns)
return -ENOMEM;
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_DROP_ROOT, 1);
+ err = PTR_ERR_OR_ZERO(maj_prio);
+ if (err)
+ goto out_err;
+
err = create_fdb_bypass(steering);
if (err)
goto out_err;
@@ -3143,118 +3636,102 @@ out_err:
return err;
}
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+static void
+mlx5_fs_remove_vport_acl_root_ns(struct xarray *esw_acl_root_ns, u16 vport_idx)
{
- struct fs_prio *prio;
-
- steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
- if (!steering->esw_egress_root_ns[vport])
- return -ENOMEM;
+ struct mlx5_vport_acl_root_ns *vport_ns;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ vport_ns = xa_erase(esw_acl_root_ns, vport_idx);
+ if (vport_ns) {
+ cleanup_root_ns(vport_ns->root_ns);
+ kfree(vport_ns);
+ }
}
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+static int
+mlx5_fs_add_vport_acl_root_ns(struct mlx5_flow_steering *steering,
+ struct xarray *esw_acl_root_ns,
+ enum fs_flow_table_type table_type,
+ u16 vport_idx)
{
+ struct mlx5_vport_acl_root_ns *vport_ns;
struct fs_prio *prio;
+ int err;
- steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
- if (!steering->esw_ingress_root_ns[vport])
- return -ENOMEM;
+ /* sanity check, intended xarrays are used */
+ if (WARN_ON(esw_acl_root_ns != &steering->esw_egress_root_ns &&
+ esw_acl_root_ns != &steering->esw_ingress_root_ns))
+ return -EINVAL;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
-}
+ if (table_type != FS_FT_ESW_EGRESS_ACL &&
+ table_type != FS_FT_ESW_INGRESS_ACL) {
+ mlx5_core_err(steering->dev,
+ "Invalid table type %d for egress/ingress ACLs\n",
+ table_type);
+ return -EINVAL;
+ }
-int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int err;
- int i;
+ if (xa_load(esw_acl_root_ns, vport_idx))
+ return -EEXIST;
- steering->esw_egress_root_ns =
- kcalloc(total_vports,
- sizeof(*steering->esw_egress_root_ns),
- GFP_KERNEL);
- if (!steering->esw_egress_root_ns)
+ vport_ns = kzalloc(sizeof(*vport_ns), GFP_KERNEL);
+ if (!vport_ns)
return -ENOMEM;
- for (i = 0; i < total_vports; i++) {
- err = init_egress_acl_root_ns(steering, i);
- if (err)
- goto cleanup_root_ns;
+ vport_ns->root_ns = create_root_ns(steering, table_type);
+ if (!vport_ns->root_ns) {
+ err = -ENOMEM;
+ goto kfree_vport_ns;
}
- steering->esw_egress_acl_vports = total_vports;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&vport_ns->root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ err = PTR_ERR(prio);
+ goto cleanup_root_ns;
+ }
+
+ vport_ns->vport_idx = vport_idx;
+ err = xa_insert(esw_acl_root_ns, vport_idx, vport_ns, GFP_KERNEL);
+ if (err)
+ goto cleanup_root_ns;
return 0;
cleanup_root_ns:
- for (i--; i >= 0; i--)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
+ cleanup_root_ns(vport_ns->root_ns);
+kfree_vport_ns:
+ kfree(vport_ns);
return err;
}
-void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_egress_root_ns)
- return;
-
- for (i = 0; i < steering->esw_egress_acl_vports; i++)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
-
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
+ return mlx5_fs_add_vport_acl_root_ns(steering,
+ &steering->esw_egress_root_ns,
+ FS_FT_ESW_EGRESS_ACL, vport_idx);
}
-int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
+int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int err;
- int i;
-
- steering->esw_ingress_root_ns =
- kcalloc(total_vports,
- sizeof(*steering->esw_ingress_root_ns),
- GFP_KERNEL);
- if (!steering->esw_ingress_root_ns)
- return -ENOMEM;
-
- for (i = 0; i < total_vports; i++) {
- err = init_ingress_acl_root_ns(steering, i);
- if (err)
- goto cleanup_root_ns;
- }
- steering->esw_ingress_acl_vports = total_vports;
- return 0;
-
-cleanup_root_ns:
- for (i--; i >= 0; i--)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
- return err;
+ return mlx5_fs_add_vport_acl_root_ns(steering,
+ &steering->esw_ingress_root_ns,
+ FS_FT_ESW_INGRESS_ACL, vport_idx);
}
-void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_ingress_root_ns)
- return;
-
- for (i = 0; i < steering->esw_ingress_acl_vports; i++)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ mlx5_fs_remove_vport_acl_root_ns(&steering->esw_egress_root_ns,
+ vport_idx);
+}
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
+void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ mlx5_fs_remove_vport_acl_root_ns(&steering->esw_ingress_root_ns,
+ vport_idx);
}
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
@@ -3300,45 +3777,54 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
char *value = val.vstr;
- int err = 0;
+ u8 eswitch_mode;
- if (!strcmp(value, "dmfs")) {
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Changing fs mode is not supported when eswitch offloads enabled.");
+ return -EOPNOTSUPP;
+ }
+
+ if (!strcmp(value, "dmfs"))
return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
+ if (!strcmp(value, "smfs")) {
+ bool smfs_cap = mlx5_fs_dr_is_supported(dev);
if (!smfs_cap) {
- err = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack,
"Software managed steering is not supported by current device");
+ return -EOPNOTSUPP;
}
+ } else if (!strcmp(value, "hmfs")) {
+ bool hmfs_cap = mlx5_fs_hws_is_supported(dev);
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ if (!hmfs_cap) {
NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
+ "Hardware steering is not supported by current device");
+ return -EOPNOTSUPP;
}
} else {
NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\", \"hmfs\"]");
+ return -EINVAL;
}
- return err;
+ return 0;
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
enum mlx5_flow_steering_mode mode;
if (!strcmp(ctx->val.vstr, "smfs"))
mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (!strcmp(ctx->val.vstr, "hmfs"))
+ mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
mode = MLX5_FLOW_STEERING_MODE_DMFS;
dev->priv.steering->mode = mode;
@@ -3347,14 +3833,22 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
}
static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
- strcpy(ctx->val.vstr, "smfs");
- else
- strcpy(ctx->val.vstr, "dmfs");
+ switch (dev->priv.steering->mode) {
+ case MLX5_FLOW_STEERING_MODE_SMFS:
+ strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
+ break;
+ case MLX5_FLOW_STEERING_MODE_HMFS:
+ strscpy(ctx->val.vstr, "hmfs", sizeof(ctx->val.vstr));
+ break;
+ default:
+ strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
+ }
+
return 0;
}
@@ -3370,6 +3864,11 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ WARN_ON(!xa_empty(&steering->esw_egress_root_ns));
+ WARN_ON(!xa_empty(&steering->esw_ingress_root_ns));
+ xa_destroy(&steering->esw_egress_root_ns);
+ xa_destroy(&steering->esw_ingress_root_ns);
+
cleanup_root_ns(steering->root_ns);
cleanup_fdb_root_ns(steering);
cleanup_root_ns(steering->port_sel_root_ns);
@@ -3378,6 +3877,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
+ cleanup_rdma_transport_roots_ns(steering);
devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
ARRAY_SIZE(mlx5_fs_params));
@@ -3429,8 +3929,7 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
- MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
@@ -3448,6 +3947,20 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(dev, ft_support)) {
+ err = init_rdma_transport_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(dev, ft_support)) {
+ err = init_rdma_transport_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ xa_init(&steering->esw_egress_root_ns);
+ xa_init(&steering->esw_ingress_root_ns);
return 0;
err:
@@ -3469,6 +3982,7 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev)
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
+ char name[80];
int err = 0;
err = mlx5_init_fc_stats(dev);
@@ -3490,13 +4004,17 @@ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
if (mlx5_fs_dr_is_supported(dev))
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (mlx5_fs_hws_is_supported(dev))
+ steering->mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ snprintf(name, sizeof(name), "%s-mlx5_fs_fgs", dev_name(dev->device));
+ steering->fgs_cache = kmem_cache_create(name,
sizeof(struct mlx5_flow_group), 0,
0, NULL);
- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ snprintf(name, sizeof(name), "%s-mlx5_fs_ftes", dev_name(dev->device));
+ steering->ftes_cache = kmem_cache_create(name, sizeof(struct fs_fte), 0,
0, NULL);
if (!steering->ftes_cache || !steering->fgs_cache) {
err = -ENOMEM;
@@ -3589,14 +4107,16 @@ out:
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
-static struct mlx5_flow_root_namespace
-*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_flow_namespace *ns;
if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
- ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
- ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX)
+ ns = mlx5_get_flow_vport_namespace(dev, ns_type, 0);
else
ns = mlx5_get_flow_namespace(dev, ns_type);
if (!ns)
@@ -3613,7 +4133,7 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr;
int err;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3638,7 +4158,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, modify_hdr->ns_type);
+ root = mlx5_get_root_namespace(dev, modify_hdr->ns_type);
if (WARN_ON(!root))
return;
root->cmds->modify_header_dealloc(root, modify_hdr);
@@ -3654,7 +4174,7 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
struct mlx5_flow_root_namespace *root;
int err;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3680,7 +4200,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, pkt_reformat->ns_type);
+ root = mlx5_get_root_namespace(dev, pkt_reformat->ns_type);
if (WARN_ON(!root))
return;
root->cmds->packet_reformat_dealloc(root, pkt_reformat);
@@ -3702,7 +4222,7 @@ mlx5_create_match_definer(struct mlx5_core_dev *dev,
struct mlx5_flow_definer *definer;
int id;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3726,7 +4246,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, definer->ns_type);
+ root = mlx5_get_root_namespace(dev, definer->ns_type);
if (WARN_ON(!root))
return;
@@ -3771,6 +4291,8 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
cmds = mlx5_fs_cmd_get_dr_cmds();
+ else if (mode == MLX5_FLOW_STEERING_MODE_HMFS)
+ cmds = mlx5_fs_cmd_get_hws_cmds();
else
cmds = mlx5_fs_cmd_get_fw_cmds();
if (!cmds)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 78eb6b7097e1..1c6591425260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -37,7 +37,8 @@
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <linux/llist.h>
-#include <steering/fs_dr.h>
+#include <steering/sws/fs_dr.h>
+#include <steering/hws/fs_hws.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -57,13 +58,15 @@ struct mlx5_flow_definer {
enum mlx5_flow_resource_owner {
MLX5_FLOW_RESOURCE_OWNER_FW,
MLX5_FLOW_RESOURCE_OWNER_SW,
+ MLX5_FLOW_RESOURCE_OWNER_HWS,
};
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
enum mlx5_flow_resource_owner owner;
union {
- struct mlx5_fs_dr_action action;
+ struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -73,7 +76,8 @@ struct mlx5_pkt_reformat {
int reformat_type; /* from mlx5_ifc */
enum mlx5_flow_resource_owner owner;
union {
- struct mlx5_fs_dr_action action;
+ struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -99,20 +103,6 @@ enum fs_node_type {
FS_TYPE_FLOW_DEST
};
-enum fs_flow_table_type {
- FS_FT_NIC_RX = 0x0,
- FS_FT_NIC_TX = 0x1,
- FS_FT_ESW_EGRESS_ACL = 0x2,
- FS_FT_ESW_INGRESS_ACL = 0x3,
- FS_FT_FDB = 0X4,
- FS_FT_SNIFFER_RX = 0X5,
- FS_FT_SNIFFER_TX = 0X6,
- FS_FT_RDMA_RX = 0X7,
- FS_FT_RDMA_TX = 0X8,
- FS_FT_PORT_SEL = 0X9,
- FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
-};
-
enum fs_flow_table_op_mod {
FS_FT_OP_MOD_NORMAL,
FS_FT_OP_MOD_LAG_DEMUX,
@@ -124,13 +114,15 @@ enum fs_fte_status {
enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_DMFS,
- MLX5_FLOW_STEERING_MODE_SMFS
+ MLX5_FLOW_STEERING_MODE_SMFS,
+ MLX5_FLOW_STEERING_MODE_HMFS,
};
enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
+ MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3,
};
struct mlx5_flow_steering {
@@ -141,16 +133,18 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_namespace **fdb_sub_ns;
- struct mlx5_flow_root_namespace **esw_egress_root_ns;
- struct mlx5_flow_root_namespace **esw_ingress_root_ns;
+ struct xarray esw_egress_root_ns;
+ struct xarray esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
struct mlx5_flow_root_namespace *port_sel_root_ns;
- int esw_egress_acl_vports;
- int esw_ingress_acl_vports;
+ struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
+ struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
+ int rdma_transport_rx_vports;
+ int rdma_transport_tx_vports;
};
struct fs_node {
@@ -187,9 +181,13 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
- struct mlx5_fs_dr_table fs_dr_table;
+ union {
+ struct mlx5_fs_dr_table fs_dr_table;
+ struct mlx5_fs_hws_table fs_hws_table;
+ };
u32 id;
u16 vport;
+ u16 esw_owner_vhca_id;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
@@ -228,20 +226,32 @@ struct mlx5_ft_underlay_qp {
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
+struct fs_fte_action {
+ int modify_mask;
+ u32 dests_size;
+ u32 fwd_dests;
+ struct mlx5_flow_context flow_context;
+ struct mlx5_flow_act action;
+};
+
+struct fs_fte_dup {
+ struct list_head children;
+ struct fs_fte_action act_dests;
+};
+
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
- struct mlx5_fs_dr_rule fs_dr_rule;
+ union {
+ struct mlx5_fs_dr_rule fs_dr_rule;
+ struct mlx5_fs_hws_rule fs_hws_rule;
+ };
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
- u32 dests_size;
- u32 fwd_dests;
+ struct fs_fte_action act_dests;
+ struct fs_fte_dup *dup;
u32 index;
- struct mlx5_flow_context flow_context;
- struct mlx5_flow_act action;
enum fs_fte_status status;
- struct mlx5_fc *counter;
struct rhash_head hash;
- int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */
@@ -268,7 +278,10 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
- struct mlx5_fs_dr_matcher fs_dr_matcher;
+ union {
+ struct mlx5_fs_dr_matcher fs_dr_matcher;
+ struct mlx5_fs_hws_matcher fs_hws_matcher;
+ };
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
@@ -281,7 +294,10 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
enum mlx5_flow_steering_mode mode;
- struct mlx5_fs_dr_domain fs_dr_domain;
+ union {
+ struct mlx5_fs_dr_domain fs_dr_domain;
+ struct mlx5_fs_hws_context fs_hws_context;
+ };
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
@@ -291,6 +307,37 @@ struct mlx5_flow_root_namespace {
const struct mlx5_flow_cmds *cmds;
};
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ u32 id;
+ bool aging;
+ enum mlx5_fc_type type;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ refcount_t fc_local_refcount;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
+ u64 lastpackets;
+ u64 lastbytes;
+};
+
+struct mlx5_fc_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ u32 base_id;
+ struct mlx5_fs_hws_data hws_data;
+ struct mlx5_fc fcs[];
+};
+
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
@@ -313,15 +360,22 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev);
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
-int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
-void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
-int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
-void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx);
+int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx);
+void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx);
+void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx);
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id);
+
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
@@ -368,7 +422,11 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
+ (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_RDMA_TRANSPORT_RX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) : \
+ (type == FS_FT_RDMA_TRANSPORT_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TRANSPORT_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 0c26d707eed2..83001eda3884 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -32,119 +32,35 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
-#include <linux/rbtree.h>
#include "mlx5_core.h"
#include "fs_core.h"
+#include "fs_pool.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
-#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
#define MLX5_INIT_COUNTERS_BULK 8
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
+struct mlx5_fc_stats {
+ struct xarray counters;
+
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ unsigned long sampling_interval; /* jiffies */
+ u32 *bulk_query_out;
+ int bulk_query_len;
+ bool bulk_query_alloc_failed;
+ unsigned long next_bulk_query_alloc;
+ struct mlx5_fs_pool fc_pool;
};
-struct mlx5_fc {
- struct list_head list;
- struct llist_node addlist;
- struct llist_node dellist;
-
- /* last{packets,bytes} members are used when calculating the delta since
- * last reading
- */
- u64 lastpackets;
- u64 lastbytes;
-
- struct mlx5_fc_bulk *bulk;
- u32 id;
- bool aging;
-
- struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
-};
-
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
-static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
-static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
-
-/* locking scheme:
- *
- * It is the responsibility of the user to prevent concurrent calls or bad
- * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
- * to struct mlx5_fc.
- * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
- * dump (access to struct mlx5_fc) after a counter is destroyed.
- *
- * access to counter list:
- * - create (user context)
- * - mlx5_fc_create() only adds to an addlist to be used by
- * mlx5_fc_stats_work(). addlist is a lockless single linked list
- * that doesn't require any additional synchronization when adding single
- * node.
- * - spawn thread to do the actual destroy
- *
- * - destroy (user context)
- * - add a counter to lockless dellist
- * - spawn thread to do the actual del
- *
- * - dump (user context)
- * user should not call dump after destroy
- *
- * - query (single thread workqueue context)
- * destroy/dump - no conflict (see destroy)
- * query/dump - packets and bytes might be inconsistent (since update is not
- * atomic)
- * query/create - no conflict (see create)
- * since every create/destroy spawn the work, only after necessary time has
- * elapsed, the thread will actually query the hardware.
- */
-
-static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
- u32 id)
-{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- unsigned long next_id = (unsigned long)id + 1;
- struct mlx5_fc *counter;
- unsigned long tmp;
-
- rcu_read_lock();
- /* skip counters that are in idr, but not yet in counters list */
- idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
- counter, tmp, next_id) {
- if (!list_empty(&counter->list))
- break;
- }
- rcu_read_unlock();
-
- return counter ? &counter->list : &fc_stats->counters;
-}
-
-static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
- struct mlx5_fc *counter)
-{
- struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
-
- list_add_tail(&counter->list, next);
-}
-
-static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
- struct mlx5_fc *counter)
-{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-
- list_del(&counter->list);
-
- spin_lock(&fc_stats->counters_idr_lock);
- WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
- spin_unlock(&fc_stats->counters_idr_lock);
-}
+static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
@@ -174,47 +90,64 @@ static void update_counter_cache(int index, u32 *bulk_raw_data,
cache->lastuse = jiffies;
}
-static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
- struct mlx5_fc *first,
- u32 last_id)
+/* Synchronization notes
+ *
+ * Access to counter array:
+ * - create - mlx5_fc_create() (user context)
+ * - inserts the counter into the xarray.
+ *
+ * - destroy - mlx5_fc_destroy() (user context)
+ * - erases the counter from the xarray and releases it.
+ *
+ * - query mlx5_fc_query(), mlx5_fc_query_cached{,_raw}() (user context)
+ * - user should not access a counter after destroy.
+ *
+ * - bulk query (single thread workqueue context)
+ * - create: query relies on 'lastuse' to avoid updating counters added
+ * around the same time as the current bulk cmd.
+ * - destroy: destroyed counters will not be accessed, even if they are
+ * destroyed during a bulk query command.
+ */
+static void mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- bool query_more_counters = (first->id <= last_id);
- int cur_bulk_len = fc_stats->bulk_query_len;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+ u32 bulk_len = fc_stats->bulk_query_len;
+ XA_STATE(xas, &fc_stats->counters, 0);
u32 *data = fc_stats->bulk_query_out;
- struct mlx5_fc *counter = first;
+ struct mlx5_fc *counter;
+ u32 last_bulk_id = 0;
+ u64 bulk_query_time;
u32 bulk_base_id;
- int bulk_len;
int err;
- while (query_more_counters) {
- /* first id must be aligned to 4 when using bulk query */
- bulk_base_id = counter->id & ~0x3;
-
- /* number of counters to query inc. the last counter */
- bulk_len = min_t(int, cur_bulk_len,
- ALIGN(last_id - bulk_base_id + 1, 4));
-
- err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
- data);
- if (err) {
- mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
- return;
- }
- query_more_counters = false;
-
- list_for_each_entry_from(counter, &fc_stats->counters, list) {
- int counter_index = counter->id - bulk_base_id;
- struct mlx5_fc_cache *cache = &counter->cache;
-
- if (counter->id >= bulk_base_id + bulk_len) {
- query_more_counters = true;
- break;
+ xas_lock(&xas);
+ xas_for_each(&xas, counter, U32_MAX) {
+ if (xas_retry(&xas, counter))
+ continue;
+ if (unlikely(counter->id >= last_bulk_id)) {
+ /* Start new bulk query. */
+ /* First id must be aligned to 4 when using bulk query. */
+ bulk_base_id = counter->id & ~0x3;
+ last_bulk_id = bulk_base_id + bulk_len;
+ /* The lock is released while querying the hw and reacquired after. */
+ xas_unlock(&xas);
+ /* The same id needs to be processed again in the next loop iteration. */
+ xas_reset(&xas);
+ bulk_query_time = jiffies;
+ err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, data);
+ if (err) {
+ mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+ return;
}
-
- update_counter_cache(counter_index, data, cache);
+ xas_lock(&xas);
+ continue;
}
+ /* Do not update counters added after bulk query was started. */
+ if (time_after64(bulk_query_time, counter->cache.lastuse))
+ update_counter_cache(counter->id - bulk_base_id, data,
+ &counter->cache);
}
+ xas_unlock(&xas);
}
static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
@@ -225,7 +158,10 @@ static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+
+ if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
+ return;
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
@@ -233,85 +169,55 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_fc_free(dev, counter);
}
-static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
+static void mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev *dev,
+ int bulk_query_len)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- int max_bulk_len = get_max_bulk_query_len(dev);
- unsigned long now = jiffies;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
u32 *bulk_query_out_tmp;
- int max_out_len;
+ int out_len;
- if (fc_stats->bulk_query_alloc_failed &&
- time_before(now, fc_stats->next_bulk_query_alloc))
- return;
-
- max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
- bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
+ out_len = mlx5_cmd_fc_get_bulk_query_out_len(bulk_query_len);
+ bulk_query_out_tmp = kvzalloc(out_len, GFP_KERNEL);
if (!bulk_query_out_tmp) {
mlx5_core_warn_once(dev,
- "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
- max_bulk_len);
- fc_stats->bulk_query_alloc_failed = true;
- fc_stats->next_bulk_query_alloc =
- now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
+ "Can't increase flow counters bulk query buffer size, alloc failed, bulk_query_len(%d)\n",
+ bulk_query_len);
return;
}
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
fc_stats->bulk_query_out = bulk_query_out_tmp;
- fc_stats->bulk_query_len = max_bulk_len;
- if (fc_stats->bulk_query_alloc_failed) {
- mlx5_core_info(dev,
- "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
- max_bulk_len);
- fc_stats->bulk_query_alloc_failed = false;
- }
+ fc_stats->bulk_query_len = bulk_query_len;
+ mlx5_core_info(dev,
+ "Flow counters bulk query buffer size increased, bulk_query_len(%d)\n",
+ bulk_query_len);
}
-static void mlx5_fc_stats_work(struct work_struct *work)
+static int mlx5_fc_num_counters(struct mlx5_fc_stats *fc_stats)
{
- struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
- priv.fc_stats.work.work);
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- /* Take dellist first to ensure that counters cannot be deleted before
- * they are inserted.
- */
- struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
- struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
- struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
- unsigned long now = jiffies;
-
- if (addlist || !list_empty(&fc_stats->counters))
- queue_delayed_work(fc_stats->wq, &fc_stats->work,
- fc_stats->sampling_interval);
-
- llist_for_each_entry(counter, addlist, addlist) {
- mlx5_fc_stats_insert(dev, counter);
- fc_stats->num_counters++;
- }
-
- llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
- mlx5_fc_stats_remove(dev, counter);
+ struct mlx5_fc *counter;
+ int num_counters = 0;
+ unsigned long id;
- mlx5_fc_release(dev, counter);
- fc_stats->num_counters--;
- }
+ xa_for_each(&fc_stats->counters, id, counter)
+ num_counters++;
+ return num_counters;
+}
- if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
- fc_stats->num_counters > get_init_bulk_query_len(dev))
- mlx5_fc_stats_bulk_query_size_increase(dev);
+static void mlx5_fc_stats_work(struct work_struct *work)
+{
+ struct mlx5_fc_stats *fc_stats = container_of(work, struct mlx5_fc_stats,
+ work.work);
+ struct mlx5_core_dev *dev = fc_stats->fc_pool.dev;
- if (time_before(now, fc_stats->next_query) ||
- list_empty(&fc_stats->counters))
- return;
- last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
+ queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval);
- counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
- list);
- if (counter)
- mlx5_fc_stats_query_counter_range(dev, counter, last->id);
+ /* Grow the bulk query buffer to max if not maxed and enough counters are present. */
+ if (unlikely(fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
+ mlx5_fc_num_counters(fc_stats) > get_init_bulk_query_len(dev)))
+ mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev));
- fc_stats->next_query = now + fc_stats->sampling_interval;
+ mlx5_fc_stats_query_all_counters(dev);
}
static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
@@ -334,7 +240,7 @@ static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
struct mlx5_fc *counter;
if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
@@ -346,16 +252,15 @@ static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
return mlx5_fc_single_alloc(dev);
}
-struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
{
struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
int err;
if (IS_ERR(counter))
return counter;
- INIT_LIST_HEAD(&counter->list);
counter->aging = aging;
if (aging) {
@@ -365,18 +270,9 @@ struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
counter->lastbytes = counter->cache.bytes;
counter->lastpackets = counter->cache.packets;
- idr_preload(GFP_KERNEL);
- spin_lock(&fc_stats->counters_idr_lock);
-
- err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
- GFP_NOWAIT);
-
- spin_unlock(&fc_stats->counters_idr_lock);
- idr_preload_end();
- if (err)
+ err = xa_err(xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
+ if (err != 0)
goto err_out_alloc;
-
- llist_add(&counter->addlist, &fc_stats->addlist);
}
return counter;
@@ -385,16 +281,6 @@ err_out_alloc:
mlx5_fc_release(dev, counter);
return ERR_PTR(err);
}
-
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
-{
- struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging);
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-
- if (aging)
- mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
- return counter;
-}
EXPORT_SYMBOL(mlx5_fc_create);
u32 mlx5_fc_id(struct mlx5_fc *counter)
@@ -405,39 +291,32 @@ EXPORT_SYMBOL(mlx5_fc_id);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
if (!counter)
return;
- if (counter->aging) {
- llist_add(&counter->dellist, &fc_stats->dellist);
- mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
- return;
- }
-
+ if (counter->aging)
+ xa_erase(&fc_stats->counters, counter->id);
mlx5_fc_release(dev, counter);
}
EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- int init_bulk_len;
- int init_out_len;
+ struct mlx5_fc_stats *fc_stats;
+
+ fc_stats = kzalloc(sizeof(*fc_stats), GFP_KERNEL);
+ if (!fc_stats)
+ return -ENOMEM;
+ dev->priv.fc_stats = fc_stats;
- spin_lock_init(&fc_stats->counters_idr_lock);
- idr_init(&fc_stats->counters_idr);
- INIT_LIST_HEAD(&fc_stats->counters);
- init_llist_head(&fc_stats->addlist);
- init_llist_head(&fc_stats->dellist);
+ xa_init(&fc_stats->counters);
- init_bulk_len = get_init_bulk_query_len(dev);
- init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
- fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
+ /* Allocate initial (small) bulk query buffer. */
+ mlx5_fc_stats_bulk_query_buf_realloc(dev, get_init_bulk_query_len(dev));
if (!fc_stats->bulk_query_out)
- return -ENOMEM;
- fc_stats->bulk_query_len = init_bulk_len;
+ goto err_bulk;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
@@ -447,34 +326,35 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
+ queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
return 0;
err_wq_create:
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
+err_bulk:
+ kfree(fc_stats);
return -ENOMEM;
}
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- struct llist_node *tmplist;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
struct mlx5_fc *counter;
- struct mlx5_fc *tmp;
+ unsigned long id;
- cancel_delayed_work_sync(&dev->priv.fc_stats.work);
- destroy_workqueue(dev->priv.fc_stats.wq);
- dev->priv.fc_stats.wq = NULL;
+ cancel_delayed_work_sync(&fc_stats->work);
+ destroy_workqueue(fc_stats->wq);
+ fc_stats->wq = NULL;
- tmplist = llist_del_all(&fc_stats->addlist);
- llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
- mlx5_fc_release(dev, counter);
-
- list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
+ xa_for_each(&fc_stats->counters, id, counter) {
+ xa_erase(&fc_stats->counters, id);
mlx5_fc_release(dev, counter);
+ }
+ xa_destroy(&fc_stats->counters);
mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
- idr_destroy(&fc_stats->counters_idr);
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
+ kfree(fc_stats);
}
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
@@ -518,7 +398,7 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
struct delayed_work *dwork,
unsigned long delay)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
queue_delayed_work(fc_stats->wq, dwork, delay);
}
@@ -526,21 +406,13 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
fc_stats->sampling_interval = min_t(unsigned long, interval,
fc_stats->sampling_interval);
}
-/* Flow counter bluks */
-
-struct mlx5_fc_bulk {
- struct list_head pool_list;
- u32 base_id;
- int bulk_len;
- unsigned long *bitmask;
- struct mlx5_fc fcs[] __counted_by(bulk_len);
-};
+/* Flow counter bulks */
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
@@ -549,16 +421,16 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
-static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
{
- return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+ return counter->bulk->base_id;
}
-static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
+ void *pool_ctx)
{
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
- struct mlx5_fc_bulk *bulk;
- int err = -ENOMEM;
+ struct mlx5_fc_bulk *fc_bulk;
int bulk_len;
u32 base_id;
int i;
@@ -566,207 +438,160 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
- if (!bulk)
- goto err_alloc_bulk;
+ fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
+ if (!fc_bulk)
+ return NULL;
- bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
- if (!bulk->bitmask)
- goto err_alloc_bitmask;
-
- err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
- if (err)
- goto err_mlx5_cmd_bulk_alloc;
+ if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ goto fc_bulk_free;
- bulk->base_id = base_id;
- bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++) {
- mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
- set_bit(i, bulk->bitmask);
- }
+ if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
+ goto fs_bulk_cleanup;
+ fc_bulk->base_id = base_id;
+ for (i = 0; i < bulk_len; i++)
+ mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- return bulk;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+ return &fc_bulk->fs_bulk;
-err_mlx5_cmd_bulk_alloc:
- kvfree(bulk->bitmask);
-err_alloc_bitmask:
- kvfree(bulk);
-err_alloc_bulk:
- return ERR_PTR(err);
+fs_bulk_cleanup:
+ mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
+fc_bulk_free:
+ kvfree(fc_bulk);
+ return NULL;
}
static int
-mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
{
- if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
+ struct mlx5_fc_bulk,
+ fs_bulk);
+
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
return -EBUSY;
}
- mlx5_cmd_fc_free(dev, bulk->base_id);
- kvfree(bulk->bitmask);
- kvfree(bulk);
+ mlx5_cmd_fc_free(dev, fc_bulk->base_id);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(fc_bulk);
return 0;
}
-static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
{
- int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
-
- if (free_fc_index >= bulk->bulk_len)
- return ERR_PTR(-ENOSPC);
-
- clear_bit(free_fc_index, bulk->bitmask);
- return &bulk->fcs[free_fc_index];
-}
-
-static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
-{
- int fc_index = fc->id - bulk->base_id;
-
- if (test_bit(fc_index, bulk->bitmask))
- return -EINVAL;
-
- set_bit(fc_index, bulk->bitmask);
- return 0;
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
}
/* Flow counters pool API */
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
-{
- fc_pool->dev = dev;
- mutex_init(&fc_pool->pool_lock);
- INIT_LIST_HEAD(&fc_pool->fully_used);
- INIT_LIST_HEAD(&fc_pool->partially_used);
- INIT_LIST_HEAD(&fc_pool->unused);
- fc_pool->available_fcs = 0;
- fc_pool->used_fcs = 0;
- fc_pool->threshold = 0;
-}
+static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
+ .bulk_destroy = mlx5_fc_bulk_destroy,
+ .bulk_create = mlx5_fc_bulk_create,
+ .update_threshold = mlx5_fc_pool_update_threshold,
+};
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+static void
+mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_bulk *tmp;
-
- list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL);
}
-static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
{
- fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
- fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+ mlx5_fs_pool_cleanup(fc_pool);
}
-static struct mlx5_fc_bulk *
-mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fc_bulk *fc_bulk;
+ int err;
- new_bulk = mlx5_fc_bulk_create(dev);
- if (!IS_ERR(new_bulk))
- fc_pool->available_fcs += new_bulk->bulk_len;
- mlx5_fc_pool_update_threshold(fc_pool);
- return new_bulk;
+ err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
+ return &fc_bulk->fcs[pool_index.index];
}
static void
-mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
{
+ struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
struct mlx5_core_dev *dev = fc_pool->dev;
- fc_pool->available_fcs -= bulk->bulk_len;
- mlx5_fc_bulk_destroy(dev, bulk);
- mlx5_fc_pool_update_threshold(fc_pool);
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = fc->id - fc->bulk->base_id;
+ if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
}
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
- struct list_head *next_list,
- bool move_non_full_bulk)
+/**
+ * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
+ * was already acquired using its counter id and bulk data.
+ *
+ * @counter_id: counter acquired counter id
+ * @offset: counter offset from bulk base
+ * @bulk_size: counter's bulk size as was allocated
+ *
+ * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
+ */
+struct mlx5_fc *
+mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
{
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc *fc;
+ struct mlx5_fc_bulk *fc_bulk;
+ struct mlx5_fc *counter;
- if (list_empty(src_list))
- return ERR_PTR(-ENODATA);
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ kfree(counter);
+ return ERR_PTR(-ENOMEM);
+ }
- bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
- fc = mlx5_fc_bulk_acquire_fc(bulk);
- if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
- list_move(&bulk->pool_list, next_list);
- return fc;
+ counter->type = MLX5_FC_TYPE_LOCAL;
+ counter->id = counter_id;
+ fc_bulk->base_id = counter_id - offset;
+ fc_bulk->fs_bulk.bulk_len = bulk_size;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+ counter->bulk = fc_bulk;
+ refcount_set(&counter->fc_local_refcount, 1);
+ return counter;
}
+EXPORT_SYMBOL(mlx5_fc_local_create);
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
-{
- struct mlx5_fc_bulk *new_bulk;
- struct mlx5_fc *fc;
-
- mutex_lock(&fc_pool->pool_lock);
-
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
- &fc_pool->fully_used, false);
- if (IS_ERR(fc))
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
- &fc_pool->partially_used,
- true);
- if (IS_ERR(fc)) {
- new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
- if (IS_ERR(new_bulk)) {
- fc = ERR_CAST(new_bulk);
- goto out;
- }
- fc = mlx5_fc_bulk_acquire_fc(new_bulk);
- list_add(&new_bulk->pool_list, &fc_pool->partially_used);
- }
- fc_pool->available_fcs--;
- fc_pool->used_fcs++;
-
-out:
- mutex_unlock(&fc_pool->pool_lock);
- return fc;
+void mlx5_fc_local_destroy(struct mlx5_fc *counter)
+{
+ kfree(counter->bulk);
+ kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_local_destroy);
-static void
-mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+void mlx5_fc_local_get(struct mlx5_fc *counter)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk = fc->bulk;
- int bulk_free_fcs_amount;
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
- mutex_lock(&fc_pool->pool_lock);
+ refcount_inc(&counter->fc_local_refcount);
+}
- if (mlx5_fc_bulk_release_fc(bulk, fc)) {
- mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
- goto unlock;
- }
+void mlx5_fc_local_put(struct mlx5_fc *counter)
+{
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
- fc_pool->available_fcs++;
- fc_pool->used_fcs--;
-
- bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
- if (bulk_free_fcs_amount == 1)
- list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
- if (bulk_free_fcs_amount == bulk->bulk_len) {
- list_del(&bulk->pool_list);
- if (fc_pool->available_fcs > fc_pool->threshold)
- mlx5_fc_pool_free_bulk(fc_pool, bulk);
- else
- list_add(&bulk->pool_list, &fc_pool->unused);
- }
+ if (!refcount_dec_and_test(&counter->fc_local_refcount))
+ return;
-unlock:
- mutex_unlock(&fc_pool->pool_lock);
+ mlx5_fc_local_destroy(counter);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
index c14590acc772..f6abfd00d7e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ if (dev->priv.ft_pool->ft_left[i] &&
+ (FT_POOLS[i] >= desired_size ||
+ desired_size == MLX5_FS_MAX_POOL_SIZE) &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
+ if (desired_size != MLX5_FS_MAX_POOL_SIZE)
break;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
index 25f4274b372b..173e312db720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -7,8 +7,6 @@
#include <linux/mlx5/driver.h>
#include "fs_core.h"
-#define POOL_NEXT_SIZE 0
-
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
new file mode 100644
index 000000000000..f6c226664602
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_pool.h"
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len)
+{
+ int i;
+
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fs_bulk->bitmask)
+ return -ENOMEM;
+
+ fs_bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++)
+ set_bit(i, fs_bulk->bitmask);
+
+ return 0;
+}
+
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
+{
+ kvfree(fs_bulk->bitmask);
+}
+
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
+
+ WARN_ON_ONCE(!pool_index || !fs_bulk);
+ if (free_index >= fs_bulk->bulk_len)
+ return -ENOSPC;
+
+ clear_bit(free_index, fs_bulk->bitmask);
+ pool_index->fs_bulk = fs_bulk;
+ pool_index->index = free_index;
+ return 0;
+}
+
+static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
+{
+ if (test_bit(index, fs_bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(index, fs_bulk->bitmask);
+ return 0;
+}
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx)
+{
+ WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
+ !ops->update_threshold);
+ pool->dev = dev;
+ pool->pool_ctx = pool_ctx;
+ mutex_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->fully_used);
+ INIT_LIST_HEAD(&pool->partially_used);
+ INIT_LIST_HEAD(&pool->unused);
+ pool->available_units = 0;
+ pool->used_units = 0;
+ pool->threshold = 0;
+ pool->ops = ops;
+}
+
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
+{
+ struct mlx5_core_dev *dev = pool->dev;
+ struct mlx5_fs_bulk *bulk;
+ struct mlx5_fs_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+ struct mlx5_fs_bulk *new_bulk;
+
+ new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx);
+ if (new_bulk)
+ fs_pool->available_units += new_bulk->bulk_len;
+ fs_pool->ops->update_threshold(fs_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+
+ fs_pool->available_units -= bulk->bulk_len;
+ fs_pool->ops->bulk_destroy(dev, bulk);
+ fs_pool->ops->update_threshold(fs_pool);
+}
+
+static int
+mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *fs_bulk;
+ int err;
+
+ if (list_empty(src_list))
+ return -ENODATA;
+
+ fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
+ err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
+ if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
+ list_move(&fs_bulk->pool_list, next_list);
+ return err;
+}
+
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *new_bulk;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
+ &fs_pool->fully_used, false,
+ pool_index);
+ if (err)
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
+ &fs_pool->partially_used,
+ true, pool_index);
+ if (err) {
+ new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
+ if (!new_bulk) {
+ err = -ENOENT;
+ goto out;
+ }
+ err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
+ WARN_ON_ONCE(err);
+ list_add(&new_bulk->pool_list, &fs_pool->partially_used);
+ }
+ fs_pool->available_units--;
+ fs_pool->used_units++;
+
+out:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
+
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
+ int bulk_free_amount;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ /* TBD would rather return void if there was no warn here in original code */
+ err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
+ if (err)
+ goto unlock;
+
+ fs_pool->available_units++;
+ fs_pool->used_units--;
+
+ bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
+ if (bulk_free_amount == 1)
+ list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
+ if (bulk_free_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fs_pool->available_units > fs_pool->threshold)
+ mlx5_fs_pool_free_bulk(fs_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fs_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
new file mode 100644
index 000000000000..f04ec3107498
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_POOL_H__
+#define __MLX5_FS_POOL_H__
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_fs_bulk {
+ struct list_head pool_list;
+ int bulk_len;
+ unsigned long *bitmask;
+};
+
+struct mlx5_fs_pool_index {
+ struct mlx5_fs_bulk *fs_bulk;
+ int index;
+};
+
+struct mlx5_fs_pool;
+
+struct mlx5_fs_pool_ops {
+ int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk);
+ struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev,
+ void *pool_ctx);
+ void (*update_threshold)(struct mlx5_fs_pool *pool);
+};
+
+struct mlx5_fs_pool {
+ struct mlx5_core_dev *dev;
+ void *pool_ctx;
+ const struct mlx5_fs_pool_ops *ops;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_units;
+ int used_units;
+ int threshold;
+};
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len);
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx);
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool);
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+
+#endif /* __MLX5_FS_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index e7faf7e73ca4..eeb4437975f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -224,6 +224,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, mcam_reg)) {
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9180_0x91FF);
}
if (MLX5_CAP_GEN(dev, qcam_reg))
@@ -280,10 +281,29 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, shampo)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_SHAMPO, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, adv_rdma)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_RDMA,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, psp)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PSP);
+ if (err)
+ return err;
+ }
+
return 0;
}
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id)
{
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {};
int i;
@@ -373,6 +393,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ return -EACCES;
+ }
cond_resched();
} while (!time_after(jiffies, end));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 2911aa34a5be..2bceb42c98cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -6,13 +6,15 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
#include "lib/tout.h"
+#include "sf/sf.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
- MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
+ MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED,
+ MLX5_FW_RESET_FLAGS_UNLOAD_EVENT,
};
struct mlx5_fw_reset {
@@ -25,7 +27,9 @@ struct mlx5_fw_reset {
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
+ struct delayed_work reset_timeout_work;
unsigned long reset_flags;
+ u8 reset_method;
struct timer_list timer;
struct completion done;
int ret;
@@ -34,6 +38,7 @@ struct mlx5_fw_reset {
enum {
MLX5_FW_RST_STATE_IDLE = 0,
MLX5_FW_RST_STATE_TOGGLE_REQ = 4,
+ MLX5_FW_RST_STATE_DROP_MODE = 5,
};
enum {
@@ -52,7 +57,8 @@ static void mlx5_set_fw_rst_ack(struct mlx5_core_dev *dev)
}
static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_fw_reset *fw_reset;
@@ -67,7 +73,8 @@ static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u3
}
static int mlx5_fw_reset_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_fw_reset *fw_reset;
@@ -94,7 +101,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
}
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
- u8 *reset_type, u8 *reset_state)
+ u8 *reset_type, u8 *reset_state, u8 *reset_method)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
@@ -110,13 +117,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
if (reset_state)
*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
+ if (reset_method)
+ *reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
- return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL);
+}
+
+static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev,
+ u8 *reset_method)
+{
+ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) {
+ *reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE;
+ return 0;
+ }
+
+ return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method);
}
static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
@@ -124,7 +144,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
{
u8 reset_state;
- if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
+ if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL))
goto out;
if (!reset_state)
@@ -203,23 +223,25 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
}
-static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
+static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ struct devlink *devlink = priv_to_devlink(dev);
/* if this is the driver that initiated the fw reset, devlink completed the reload */
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- if (!unloaded)
- mlx5_unload_one(dev, false);
+ mlx5_sync_reset_unload_flow(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
mlx5_load_one(dev, true);
- devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
+ devl_lock(devlink);
+ devlink_remote_reload_actions_performed(devlink, 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+ devl_unlock(devlink);
}
}
@@ -227,7 +249,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
- del_timer_sync(&fw_reset->timer);
+ timer_delete_sync(&fw_reset->timer);
}
static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
@@ -239,6 +261,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
return -EALREADY;
}
+ if (current_work() != &fw_reset->reset_timeout_work.work)
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
mlx5_stop_sync_reset_poll(dev);
if (poll_health)
mlx5_start_health_poll(dev);
@@ -253,13 +277,14 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
static void poll_sync_reset(struct timer_list *t)
{
- struct mlx5_fw_reset *fw_reset = from_timer(fw_reset, t, timer);
+ struct mlx5_fw_reset *fw_reset = timer_container_of(fw_reset, t,
+ timer);
struct mlx5_core_dev *dev = fw_reset->dev;
u32 fatal_error;
@@ -309,6 +334,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
}
mlx5_stop_health_poll(dev, true);
mlx5_start_sync_reset_poll(dev);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+ &fw_reset->reset_flags))
+ schedule_delayed_work(&fw_reset->reset_timeout_work,
+ msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
return 0;
}
@@ -326,15 +356,12 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
-static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev,
+ struct pci_dev *bridge)
{
- struct pci_dev *bridge = dev->pdev->bus->self;
u16 reg16;
int err;
- if (!bridge)
- return -EOPNOTSUPP;
-
err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
if (err)
return err;
@@ -394,20 +421,34 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
return 0;
}
-static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
+static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
+ u8 reset_method)
{
+ struct pci_dev *bridge = dev->pdev->bus->self;
u16 dev_id;
int err;
+ if (!bridge) {
+ mlx5_core_warn(dev, "PCI bus bridge is not accessible\n");
+ return false;
+ }
+
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return false;
}
-#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
- err = mlx5_check_hotplug_interrupt(dev);
- if (err)
+ if (!mlx5_core_is_ecpf(dev) && !mlx5_sf_table_empty(dev)) {
+ mlx5_core_warn(dev, "SFs should be removed before reset\n");
return false;
+ }
+
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+ if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
+ err = mlx5_check_hotplug_interrupt(dev, bridge);
+ if (err)
+ return false;
+ }
#endif
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
@@ -423,8 +464,12 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
- if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
- !mlx5_is_reset_now_capable(dev)) {
+ err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method);
+ if (err)
+ mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err);
+
+ if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
+ !mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
@@ -440,21 +485,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
-static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
+static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *bridge = bridge_bus->self;
unsigned long timeout;
struct pci_dev *sdev;
- u16 reg16, dev_id;
int cap, err;
+ u16 reg16;
- err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
- if (err)
- return pcibios_err_to_errno(err);
- err = mlx5_check_dev_ids(dev, dev_id);
- if (err)
- return err;
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (!cap)
return -EOPNOTSUPP;
@@ -524,64 +563,61 @@ restore:
return err;
}
-static void mlx5_sync_reset_now_event(struct work_struct *work)
+static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev)
{
- struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
- reset_now_work);
- struct mlx5_core_dev *dev = fw_reset->dev;
- int err;
+ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method))
+ return -EOPNOTSUPP;
- if (mlx5_sync_reset_clear_reset_requested(dev, false))
- return;
+ return pci_reset_bus(dev->pdev);
+}
- mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
+static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
+{
+ u16 dev_id;
+ int err;
- err = mlx5_cmd_fast_teardown_hca(dev);
- if (err) {
- mlx5_core_warn(dev, "Fast teardown failed, no reset done, err %d\n", err);
- goto done;
- }
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
+ if (err)
+ return pcibios_err_to_errno(err);
+ err = mlx5_check_dev_ids(dev, dev_id);
+ if (err)
+ return err;
- err = mlx5_pci_link_toggle(dev);
- if (err) {
- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
- set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
+ switch (reset_method) {
+ case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE:
+ err = mlx5_pci_link_toggle(dev, dev_id);
+ if (err)
+ mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n");
+ break;
+ case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET:
+ err = mlx5_pci_reset_bus(dev);
+ if (err)
+ mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n");
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- mlx5_enter_error_state(dev, true);
-done:
- fw_reset->ret = err;
- mlx5_fw_reset_complete_reload(dev, false);
+ return err;
}
-static void mlx5_sync_reset_unload_event(struct work_struct *work)
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked)
{
- struct mlx5_fw_reset *fw_reset;
- struct mlx5_core_dev *dev;
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
unsigned long timeout;
+ int poll_freq = 20;
bool reset_action;
u8 rst_state;
int err;
- fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
- dev = fw_reset->dev;
-
- if (mlx5_sync_reset_clear_reset_requested(dev, false))
- return;
-
- mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
-
- err = mlx5_cmd_fast_teardown_hca(dev);
- if (err)
- mlx5_core_warn(dev, "Fast teardown failed, unloading, err %d\n", err);
- else
- mlx5_enter_error_state(dev, true);
-
- if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
+ if (locked)
mlx5_unload_one_devl_locked(dev, false);
else
mlx5_unload_one(dev, false);
+ if (!test_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags))
+ return;
+
mlx5_set_fw_rst_ack(dev);
mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
@@ -594,7 +630,12 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
reset_action = true;
break;
}
- msleep(20);
+ if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
+ mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
+ mlx5_set_fw_rst_ack(dev);
+ poll_freq = 1000;
+ }
+ msleep(poll_freq);
} while (!time_after(jiffies, timeout));
if (!reset_action) {
@@ -604,17 +645,73 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
goto done;
}
- mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
+ mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n",
+ rst_state);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
- err = mlx5_pci_link_toggle(dev);
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n",
+ err);
fw_reset->ret = err;
}
}
done:
- mlx5_fw_reset_complete_reload(dev, true);
+ clear_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
+}
+
+static void mlx5_sync_reset_now_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_now_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ int err;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, false))
+ return;
+
+ mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
+
+ err = mlx5_cmd_fast_teardown_hca(dev);
+ if (err) {
+ mlx5_core_warn(dev, "Fast teardown failed, no reset done, err %d\n", err);
+ goto done;
+ }
+
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err);
+ set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
+ }
+
+ mlx5_enter_error_state(dev, true);
+done:
+ fw_reset->ret = err;
+ mlx5_fw_reset_complete_reload(dev);
+}
+
+static void mlx5_sync_reset_unload_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset;
+ struct mlx5_core_dev *dev;
+ int err;
+
+ fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
+ dev = fw_reset->dev;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, false))
+ return;
+
+ set_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
+ mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
+
+ err = mlx5_cmd_fast_teardown_hca(dev);
+ if (err)
+ mlx5_core_warn(dev, "Fast teardown failed, unloading, err %d\n", err);
+ else
+ mlx5_enter_error_state(dev, true);
+
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_abort_event(struct work_struct *work)
@@ -651,6 +748,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
}
}
+static void mlx5_sync_reset_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_fw_reset *fw_reset =
+ container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
+ return;
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
+}
+
static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
@@ -734,6 +844,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
}
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
@@ -777,6 +888,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
+ INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
+ mlx5_sync_reset_timeout_work);
init_completion(&fw_reset->done);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index ea527d06a85f..d5b28525c960 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -12,6 +12,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked);
int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
struct netlink_ext_ack *extack);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index ad38e31822df..aeeb136f5ebc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -96,6 +96,11 @@ static int mlx5_health_get_rfr(u8 rfr_severity)
return rfr_severity >> MLX5_RFR_BIT_OFFSET;
}
+static int mlx5_health_get_crr(u8 rfr_severity)
+{
+ return (rfr_severity >> MLX5_CRR_BIT_OFFSET) & 0x01;
+}
+
static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -248,6 +253,10 @@ recover_from_sw_reset:
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ goto unlock;
+ }
msleep(20);
} while (!time_after(jiffies, end));
@@ -317,6 +326,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
return -ENODEV;
}
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
+ return -EACCES;
+ }
msleep(100);
}
return 0;
@@ -367,6 +380,8 @@ static const char *hsynd_str(u8 synd)
return "High temperature";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
return "ICM fetch PCI data poisoned error";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR:
+ return "Trust lockdown error";
default:
return "unrecognized error";
}
@@ -434,12 +449,15 @@ static void print_health_info(struct mlx5_core_dev *dev)
mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
+ mlx5_log(dev, severity, "crr %d\n", mlx5_health_get_crr(rfr_severity));
mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
hsynd_str(ioread8(&h->synd)));
mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
+ if (mlx5_health_get_crr(rfr_severity))
+ mlx5_core_warn(dev, "Cold reset is required\n");
}
static int
@@ -651,57 +669,64 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
+#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
+#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
+#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
+#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD \
+ MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
+
+static
+const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ecpf_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+ .dump = mlx5_fw_fatal_reporter_dump,
+ .default_graceful_period =
+ MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD,
+};
+
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
+ .default_graceful_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD,
};
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
+ .default_graceful_period =
+ MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD,
};
-#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
-#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
-#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
-#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
-
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
- u64 grace_period;
- fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
- grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ecpf_ops;
} else if (mlx5_core_is_pf(dev)) {
- grace_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
} else {
/* VF or SF */
- grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
fw_ops = &mlx5_fw_reporter_ops;
}
- health->fw_reporter =
- devl_health_reporter_create(devlink, fw_ops, 0, dev);
+ health->fw_reporter = devl_health_reporter_create(devlink, fw_ops, dev);
if (IS_ERR(health->fw_reporter))
- mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
- PTR_ERR(health->fw_reporter));
-
- health->fw_fatal_reporter =
- devl_health_reporter_create(devlink,
- fw_fatal_ops,
- grace_period,
- dev);
+ mlx5_core_warn(dev, "Failed to create fw reporter, err = %pe\n",
+ health->fw_reporter);
+
+ health->fw_fatal_reporter = devl_health_reporter_create(devlink,
+ fw_fatal_ops,
+ dev);
if (IS_ERR(health->fw_fatal_reporter))
- mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
- PTR_ERR(health->fw_fatal_reporter));
+ mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %pe\n",
+ health->fw_fatal_reporter);
}
static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
@@ -761,7 +786,8 @@ static void mlx5_health_log_ts_update(struct work_struct *work)
static void poll_health(struct timer_list *t)
{
- struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
+ struct mlx5_core_dev *dev = timer_container_of(dev, t,
+ priv.health.timer);
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
u32 fatal_error;
@@ -791,14 +817,17 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
+ health->synd = ioread8(&h->synd);
print_health_info(dev);
queue_work(health->wq, &health->report_work);
}
prev_synd = health->synd;
health->synd = ioread8(&h->synd);
- if (health->synd && health->synd != prev_synd)
+ if (health->synd && health->synd != prev_synd) {
+ print_health_info(dev);
queue_work(health->wq, &health->report_work);
+ }
out:
mod_timer(&health->timer, get_next_poll_jiffies(dev));
@@ -826,7 +855,7 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
if (disable_health)
set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
- del_timer_sync(&health->timer);
+ timer_delete_sync(&health->timer);
}
void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
index 353f81dccd1c..4ba2636d7fb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
@@ -416,3 +416,8 @@ void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev)
mlx5_hwmon_free(hwmon);
mdev->hwmon = NULL;
}
+
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel)
+{
+ return hwmon->temp_channel_desc[channel].sensor_name;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
index 999654a9b9da..f38271c22c10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
@@ -10,6 +10,7 @@
int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev);
void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev);
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel);
#else
static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 779d92b762d3..3b2f54ca30a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -74,7 +74,7 @@ static int mlx5i_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
static void mlx5i_get_ringparam(struct net_device *dev,
@@ -132,11 +132,11 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5i_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
@@ -215,7 +215,7 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
int speed, ret;
ret = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, &ib_proto_oper,
- 1);
+ 1, 0);
if (ret)
return ret;
@@ -238,6 +238,23 @@ static u32 mlx5i_flow_type_mask(u32 flow_type)
return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
}
+static int mlx5i_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
+}
+
+static int mlx5i_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_rxfh_fields(priv, info);
+}
+
static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -249,21 +266,18 @@ static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
+static u32 mlx5i_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return priv->channels.params.num_channels;
+}
+
static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
- * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
- * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
- * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
- */
- if (info->cmd == ETHTOOL_GRXRINGS) {
- info->data = priv->channels.params.num_channels;
- return 0;
- }
-
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
@@ -283,8 +297,11 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
+ .get_rxfh_fields = mlx5i_get_rxfh_fields,
+ .set_rxfh_fields = mlx5i_set_rxfh_fields,
.get_rxnfc = mlx5i_get_rxnfc,
.set_rxnfc = mlx5i_set_rxnfc,
+ .get_rx_ring_count = mlx5i_get_rx_ring_count,
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index d77be1b4dd9c..0a6003fe60e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -32,6 +32,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
@@ -44,6 +45,23 @@ static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
+int mlx5i_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+
+ return mlx5e_hwtstamp_set(epriv, config, extack);
+}
+
+int mlx5i_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+
+ return mlx5e_hwtstamp_get(epriv, config);
+}
+
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
.ndo_stop = mlx5i_close,
@@ -51,7 +69,8 @@ static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_init = mlx5i_dev_init,
.ndo_uninit = mlx5i_dev_cleanup,
.ndo_change_mtu = mlx5i_change_mtu,
- .ndo_eth_ioctl = mlx5i_ioctl,
+ .ndo_hwtstamp_get = mlx5i_hwtstamp_get,
+ .ndo_hwtstamp_set = mlx5i_hwtstamp_set,
};
/* IPoIB mlx5 netdev profile */
@@ -102,6 +121,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
netdev->netdev_ops = &mlx5i_netdev_ops;
netdev->ethtool_ops = &mlx5i_ethtool_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
return 0;
}
@@ -313,7 +334,7 @@ void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
{
- return mlx5e_refresh_tirs(priv, true, true);
+ return mlx5e_refresh_tirs(priv->mdev, true, true);
}
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
@@ -372,7 +393,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
mlx5e_fs_set_ns(priv->fs, ns, false);
err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_fs_has_arfs(priv->netdev));
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
@@ -391,8 +412,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0;
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv->fs,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev));
return err;
}
@@ -400,14 +420,14 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv->fs);
- mlx5e_arfs_destroy_tables(priv->fs,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev));
mlx5e_ethtool_cleanup_steering(priv->fs);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ enum mlx5e_rx_res_features features;
int err;
priv->fs = mlx5e_fs_init(priv->profile, mdev,
@@ -426,7 +446,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
+ features = MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK;
+ priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch,
+ priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (IS_ERR(priv->rx_res)) {
@@ -531,7 +553,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
goto out;
- netdev->mtu = new_params.sw_mtu;
+ WRITE_ONCE(netdev->mtu, new_params.sw_mtu);
out:
mutex_unlock(&priv->state_lock);
@@ -556,20 +578,6 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
-int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mlx5e_priv *priv = mlx5i_epriv(dev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlx5e_hwstamp_set(priv, ifr);
- case SIOCGHWTSTAMP:
- return mlx5e_hwstamp_get(priv, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
void mlx5i_dev_cleanup(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 2ab6437a1c49..d67d5a72bb41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -88,7 +88,11 @@ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
-int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int mlx5i_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int mlx5i_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config);
/* Parent profile functions */
int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index f87471306f6b..04444dad3a0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,7 +140,6 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
@@ -149,7 +148,8 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_get_stats64 = mlx5i_get_stats,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
- .ndo_eth_ioctl = mlx5i_pkey_ioctl,
+ .ndo_hwtstamp_get = mlx5i_hwtstamp_get,
+ .ndo_hwtstamp_set = mlx5i_hwtstamp_set,
};
/* Child NDOs */
@@ -184,11 +184,6 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
-static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- return mlx5i_ioctl(dev, ifr, cmd);
-}
-
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
mlx5i_parent_put(netdev);
@@ -280,7 +275,7 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
mutex_lock(&priv->state_lock);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 612e666ec263..14d339eceb92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -47,26 +47,41 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{
- struct irq_affinity_desc auto_desc = {};
+ struct irq_affinity_desc *auto_desc;
+ struct mlx5_irq *irq;
u32 irq_index;
int err;
+ auto_desc = kvzalloc(sizeof(*auto_desc), GFP_KERNEL);
+ if (!auto_desc)
+ return ERR_PTR(-ENOMEM);
+
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
- if (err)
+ if (err) {
+ kvfree(auto_desc);
return ERR_PTR(err);
+ }
+
if (pool->irqs_per_cpu) {
if (cpumask_weight(&af_desc->mask) > 1)
/* if req_mask contain more then one CPU, set the least loadad CPU
* of req_mask
*/
cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
- &auto_desc.mask);
+ &auto_desc->mask);
else
cpu_get(pool, cpumask_first(&af_desc->mask));
}
- return mlx5_irq_alloc(pool, irq_index,
- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
- NULL);
+
+ irq = mlx5_irq_alloc(pool, irq_index,
+ cpumask_empty(&auto_desc->mask) ? af_desc : auto_desc,
+ NULL);
+ if (IS_ERR(irq))
+ xa_erase(&pool->irqs, irq_index);
+
+ kvfree(auto_desc);
+
+ return irq;
}
/* Looking for the IRQ with the smallest refcount that fits req_mask.
@@ -112,15 +127,18 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req
/**
* mlx5_irq_affinity_request - request an IRQ according to the given mask.
+ * @dev: mlx5 core device which is requesting the IRQ.
* @pool: IRQ pool to request from.
* @af_desc: affinity descriptor for this IRQ.
*
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
*/
struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc)
{
struct mlx5_irq *least_loaded_irq, *new_irq;
+ int ret;
mutex_lock(&pool->lock);
least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
@@ -132,8 +150,8 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
if (IS_ERR(new_irq)) {
if (!least_loaded_irq) {
/* We failed to create an IRQ and we didn't find an IRQ */
- mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
- PTR_ERR(new_irq));
+ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %pe\n",
+ new_irq);
mutex_unlock(&pool->lock);
return new_irq;
}
@@ -153,17 +171,30 @@ out:
mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
unlock:
mutex_unlock(&pool->lock);
+ if (mlx5_irq_pool_is_sf_pool(pool)) {
+ ret = auxiliary_device_sysfs_irq_add(mlx5_sf_coredev_to_adev(dev),
+ mlx5_irq_get_irq(least_loaded_irq));
+ if (ret) {
+ mlx5_core_err(dev, "Failed to create sysfs entry for irq %d, ret = %d\n",
+ mlx5_irq_get_irq(least_loaded_irq), ret);
+ mlx5_irq_put(least_loaded_irq);
+ least_loaded_irq = ERR_PTR(ret);
+ }
+ }
return least_loaded_irq;
}
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq);
int cpu;
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
synchronize_irq(pci_irq_vector(pool->dev->pdev,
mlx5_irq_get_index(irq)));
+ if (mlx5_irq_pool_is_sf_pool(pool))
+ auxiliary_device_sysfs_irq_remove(mlx5_sf_coredev_to_adev(dev),
+ mlx5_irq_get_irq(irq));
if (mlx5_irq_put(irq))
if (pool->irqs_per_cpu)
cpu_put(pool, cpu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index f4b777d4e108..62b6faa4276a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool hash = false;
bool lag_active;
+ int i, idx = 0;
int num_ports;
- int i;
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports,
&num_ports);
hash = true;
} else {
- for (i = 0; i < ldev->ports; i++)
- ports[i] = ldev->v2p_map[i];
+ mlx5_ldev_for_each(i, 0, ldev)
+ ports[idx++] = ldev->v2p_map[i];
num_ports = ldev->ports;
}
}
@@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv)
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
+ mlx5_ldev_for_each(i, 0, ldev)
seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
- }
mutex_unlock(&ldev->lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 69d482f7c5a2..1ac933cd8f02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
+#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
@@ -43,10 +44,6 @@
#include "mp.h"
#include "mpesw.h"
-enum {
- MLX5_LAG_EGRESS_PORT_1 = 1,
- MLX5_LAG_EGRESS_PORT_2,
-};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -72,7 +69,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
int num_enabled;
int idx;
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports,
&num_enabled);
for (idx = 0; idx < num_enabled; idx++)
active_port |= BIT_MASK(enabled_ports[idx]);
@@ -80,23 +77,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
return active_port;
}
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
- unsigned long flags)
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
+ int mode, unsigned long flags)
{
bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
&flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
+ u8 *ports = ldev->v2p_map;
+ int idx0, idx1;
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
break;
case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
@@ -113,17 +117,23 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
return mlx5_cmd_exec_in(dev, create_lag, in);
}
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
u8 *ports)
{
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+ int idx0, idx1;
+
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
return mlx5_cmd_exec_in(dev, modify_lag, in);
}
@@ -148,33 +158,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
+static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_disabled)
{
int i;
*num_disabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (!tracker->netdev_state[i].tx_enabled ||
!tracker->netdev_state[i].link_up)
ports[(*num_disabled)++] = i;
- }
}
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled)
{
int i;
*num_enabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
ports[(*num_enabled)++] = i;
- }
if (*num_enabled == 0)
- mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
+ mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled);
}
static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
@@ -192,7 +200,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
int j;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
- mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(tracker, ldev, enabled_ports,
&num_enabled);
for (i = 0; i < num_enabled; i++) {
err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
@@ -203,7 +211,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
buf[written - 2] = 0;
mlx5_core_info(dev, "lag map active ports: %s\n", buf);
} else {
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
err = scnprintf(buf + written, 10,
@@ -224,9 +232,13 @@ static void mlx5_do_bond_work(struct work_struct *work);
static void mlx5_ldev_free(struct kref *ref)
{
struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
+ struct net *net;
+
+ if (ldev->nb.notifier_call) {
+ net = read_pnet(&ldev->net);
+ unregister_netdevice_notifier_net(net, &ldev->nb);
+ }
- if (ldev->nb.notifier_call)
- unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
destroy_workqueue(ldev->wq);
@@ -264,7 +276,8 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
+ write_pnet(&ldev->net, mlx5_core_net(dev));
+ if (register_netdevice_notifier_net(read_pnet(&ldev->net), &ldev->nb)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -286,13 +299,55 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].netdev == ndev)
return i;
return -ENOENT;
}
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return -ENOENT;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (num == seq)
+ return i;
+ num++;
+ }
+ return -ENOENT;
+}
+
+int mlx5_lag_num_devs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ (void)i;
+ num++;
+ }
+ return num;
+}
+
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev)
+ if (ldev->pf[i].netdev)
+ num++;
+ return num;
+}
+
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
return ldev->mode == MLX5_LAG_MODE_ROCE;
@@ -310,7 +365,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
* with mapping that points to active ports.
*/
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
- u8 num_ports,
+ struct mlx5_lag *ldev,
u8 buckets,
u8 *ports)
{
@@ -323,7 +378,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
int i;
int j;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
enabled[enabled_ports_num++] = i;
@@ -334,15 +389,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
/* Use native mapping by default where each port's buckets
* point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
*/
- for (i = 0; i < num_ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < buckets; j++) {
idx = i * buckets + j;
- ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
+ ports[idx] = i + 1;
}
+ }
/* If all ports are disabled/enabled keep native mapping */
- if (enabled_ports_num == num_ports ||
- disabled_ports_num == num_ports)
+ if (enabled_ports_num == ldev->ports ||
+ disabled_ports_num == ldev->ports)
return;
/* Go over the disabled ports and for each assign a random active port */
@@ -358,7 +414,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].has_drop)
return true;
return false;
@@ -368,7 +424,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (!ldev->pf[i].has_drop)
continue;
@@ -396,7 +452,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
if (!ldev->tracker.has_inactive)
return;
- mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
+ mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled);
for (i = 0; i < num_disabled; i++) {
disabled_index = disabled_ports[i];
@@ -428,10 +484,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
u8 active_ports;
int ret;
+ if (idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[idx].dev;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
ret = mlx5_lag_port_sel_modify(ldev, ports);
if (ret ||
@@ -442,22 +503,58 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_active_port(dev0, active_ports);
}
- return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
+ return mlx5_cmd_modify_lag(dev0, ldev, ports);
+}
+
+static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
+{
+ struct net_device *ndev = NULL;
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ int i, last_idx;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+
+ if (!ldev)
+ goto unlock;
+
+ mlx5_ldev_for_each(i, 0, ldev)
+ if (ldev->tracker.netdev_state[i].tx_enabled)
+ ndev = ldev->pf[i].netdev;
+ if (!ndev) {
+ last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1);
+ if (last_idx < 0)
+ goto unlock;
+ ndev = ldev->pf[last_idx].netdev;
+ }
+
+ dev_hold(ndev);
+
+unlock:
+ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return ndev;
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
int idx;
int err;
int i;
int j;
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
+ if (first_idx < 0)
+ return;
+
+ dev0 = ldev->pf[first_idx].dev;
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ports[idx] == ldev->v2p_map[idx])
@@ -477,15 +574,36 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
- !(ldev->mode == MLX5_LAG_MODE_ROCE))
- mlx5_lag_drop_rule_setup(ldev, tracker);
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ struct net_device *ndev = mlx5_lag_active_backup_get_netdev(dev0);
+
+ if(!(ldev->mode == MLX5_LAG_MODE_ROCE))
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+ /** Only sriov and roce lag should have tracker->tx_type set so
+ * no need to check the mode
+ */
+ blocking_notifier_call_chain(&dev0->priv.lag_nh,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+ ndev);
+ dev_put(ndev);
+ }
}
-static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
- unsigned long *flags)
+static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ enum mlx5_lag_mode mode,
+ unsigned long *flags)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
+
+ if (first_idx < 0)
+ return -EINVAL;
+
+ if (mode == MLX5_LAG_MODE_MPESW ||
+ mode == MLX5_LAG_MODE_MULTIPATH)
+ return 0;
+
+ dev0 = ldev->pf[first_idx].dev;
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
@@ -501,30 +619,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
return 0;
}
-static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
- struct lag_tracker *tracker,
- enum mlx5_lag_mode mode,
- unsigned long *flags)
-{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
-
- if (mode == MLX5_LAG_MODE_MPESW)
- return;
-
- if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
- tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
- if (ldev->ports > 2)
- ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
- set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
- }
-}
-
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
struct lag_tracker *tracker, bool shared_fdb,
unsigned long *flags)
{
- bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
-
*flags = 0;
if (shared_fdb) {
set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
@@ -534,11 +632,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
if (mode == MLX5_LAG_MODE_MPESW)
set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
- if (roce_lag)
- return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
-
- mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
- return 0;
+ return mlx5_lag_set_port_sel_mode(ldev, mode, flags);
}
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
@@ -555,12 +649,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
+ int i, j;
int err;
- int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
+ mlx5_ldev_for_each(i, first_idx + 1, ldev) {
struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
@@ -570,9 +670,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
}
return 0;
err:
- for (; i > MLX5_LAG_P1; i--)
+ mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
- ldev->pf[i].dev->priv.eswitch);
+ ldev->pf[j].dev->priv.eswitch);
return err;
}
@@ -582,16 +682,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
unsigned long flags)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
+ struct mlx5_core_dev *dev0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -613,6 +718,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
mlx5_core_err(dev0,
"Failed to deactivate RoCE LAG; driver restart required\n");
}
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev0->priv.lag_nh);
return err;
}
@@ -622,17 +728,22 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
bool shared_fdb)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
unsigned long flags = 0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
if (err)
return err;
if (mode != MLX5_LAG_MODE_MPESW) {
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map);
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map);
@@ -670,20 +781,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
int err;
int i;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
@@ -713,39 +830,42 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return 0;
}
-#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
u8 mode;
#endif
+ bool roce_support;
int i;
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
- return false;
+ if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports)
+ return false;
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[first_idx].dev;
mode = mlx5_eswitch_mode(dev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
- if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
- return false;
#else
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
+ roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev);
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
+ if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
+ return false;
+
return true;
}
@@ -753,10 +873,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -770,10 +887,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -786,11 +900,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
bool roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
roce_lag = __mlx5_lag_is_roce(ldev);
if (shared_fdb) {
@@ -800,7 +919,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
}
- for (i = 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, idx + 1, ldev)
mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
}
@@ -812,17 +931,21 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (idx < 0)
+ return false;
+
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
dev = ldev->pf[i].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
@@ -834,7 +957,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[idx].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
@@ -850,11 +973,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
bool roce_lag = true;
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
@@ -875,12 +998,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct lag_tracker tracker = { };
+ struct mlx5_core_dev *dev0;
+ struct net_device *ndev;
bool do_bond, roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
@@ -894,7 +1023,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
if (do_bond && !__mlx5_lag_is_active(ldev)) {
- bool shared_fdb = mlx5_shared_fdb_supported(ldev);
+ bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev);
roce_lag = mlx5_lag_is_roce_lag(ldev);
@@ -908,21 +1037,27 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (err) {
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
+ if (shared_fdb) {
+ mlx5_ldev_for_each(i, 0, ldev)
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
return;
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++)
- mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
+ if (mlx5_get_roce_state(ldev->pf[i].dev))
+ mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ }
} else if (shared_fdb) {
int i;
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
}
@@ -932,12 +1067,22 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_ldev_for_each(i, 0, ldev)
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
}
+ if (tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ ndev = mlx5_lag_active_backup_get_netdev(dev0);
+ /** Only sriov and roce lag should have tracker->TX_type
+ * set so no need to check the mode
+ */
+ blocking_notifier_call_chain(&dev0->priv.lag_nh,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+ ndev);
+ dev_put(ndev);
+ }
} else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
mlx5_modify_lag(ldev, &tracker);
} else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
@@ -955,12 +1100,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
int i;
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[i].dev) {
- devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
- break;
- }
- }
+ i = mlx5_get_next_ldev_func(ldev, 0);
+ if (i < MLX5_MAX_PORTS)
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
mutex_unlock(&ldev->lock);
return devcom;
}
@@ -1013,7 +1155,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
u8 bond_status = 0;
int num_slaves = 0;
int changed = 0;
- int idx;
+ int i, idx = -1;
if (!netif_is_lag_master(upper))
return 0;
@@ -1028,8 +1170,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
- idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0) {
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == ndev_tmp) {
+ idx++;
+ break;
+ }
+ }
+ if (i < MLX5_MAX_PORTS) {
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
@@ -1179,15 +1326,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
}
static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
unsigned long flags;
- if (fn >= ldev->ports)
- return;
-
spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -1202,7 +1346,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
int i;
spin_lock_irqsave(&lag_lock, flags);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
@@ -1212,13 +1356,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+ struct mlx5_core_dev *dev)
{
unsigned int fn = mlx5_get_dev_index(dev);
- if (fn >= ldev->ports)
- return;
-
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
}
@@ -1226,16 +1367,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- int i;
-
- for (i = 0; i < ldev->ports; i++)
- if (ldev->pf[i].dev == dev)
- break;
+ int fn;
- if (i == ldev->ports)
+ fn = mlx5_get_dev_index(dev);
+ if (ldev->pf[fn].dev != dev)
return;
- ldev->pf[i].dev = NULL;
+ ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
@@ -1272,6 +1410,37 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
return 0;
}
+static void mlx5_lag_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
+}
+
+static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_match_attr attr = {
+ .flags = MLX5_DEVCOM_MATCH_FLAGS_NS,
+ .net = mlx5_core_net(dev),
+ };
+ u8 len __always_unused;
+
+ mlx5_query_nic_sw_system_image_guid(dev, attr.key.buf, &len);
+
+ /* This component is use to sync adding core_dev to lag_dev and to sync
+ * changes of mlx5_adev_devices between LAG layer and other layers.
+ */
+ dev->priv.hca_devcom_comp =
+ mlx5_devcom_register_component(dev->priv.devc,
+ MLX5_DEVCOM_HCA_PORTS,
+ &attr, NULL, dev);
+ if (!dev->priv.hca_devcom_comp) {
+ mlx5_core_err(dev,
+ "Failed to register devcom HCA component.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1293,6 +1462,7 @@ recheck:
}
mlx5_ldev_remove_mdev(ldev, dev);
mutex_unlock(&ldev->lock);
+ mlx5_lag_unregister_hca_devcom_comp(dev);
mlx5_ldev_put(ldev);
}
@@ -1303,7 +1473,7 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
if (!mlx5_lag_is_supported(dev))
return;
- if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ if (mlx5_lag_register_hca_devcom_comp(dev))
return;
recheck:
@@ -1343,7 +1513,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
+ int num = 0;
ldev = mlx5_lag_dev(dev);
if (!ldev)
@@ -1351,17 +1521,33 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev);
-
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].netdev)
- break;
-
- if (i >= ldev->ports)
+ num = mlx5_lag_num_netdevs(ldev);
+ if (num >= ldev->ports)
set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= end_idx; i--)
+ if (ldev->pf[i].dev)
+ return i;
+ return -1;
+}
+
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ return i;
+ return MLX5_MAX_PORTS;
+}
+
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1412,12 +1598,13 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
unsigned long flags;
- bool res;
+ bool res = false;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_active(ldev) &&
- dev == ldev->pf[MLX5_LAG_P1].dev;
+ idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev;
spin_unlock_irqrestore(&lag_lock, flags);
return res;
@@ -1487,38 +1674,6 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
mlx5_queue_bond_work(ldev, 0);
}
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
-{
- struct net_device *ndev = NULL;
- struct mlx5_lag *ldev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&lag_lock, flags);
- ldev = mlx5_lag_dev(dev);
-
- if (!(ldev && __mlx5_lag_is_roce(ldev)))
- goto unlock;
-
- if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- for (i = 0; i < ldev->ports; i++)
- if (ldev->tracker.netdev_state[i].tx_enabled)
- ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
- } else {
- ndev = ldev->pf[MLX5_LAG_P1].netdev;
- }
- if (ndev)
- dev_hold(ndev);
-
-unlock:
- spin_unlock_irqrestore(&lag_lock, flags);
-
- return ndev;
-}
-EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
-
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
@@ -1532,8 +1687,8 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
- for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == slave) {
port = i;
break;
}
@@ -1571,13 +1726,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int
if (!ldev)
goto unlock;
- if (*i == ldev->ports)
+ if (*i == MLX5_MAX_PORTS)
goto unlock;
- for (idx = *i; idx < ldev->ports; idx++)
+ mlx5_ldev_for_each(idx, *i, ldev)
if (ldev->pf[idx].dev != dev)
break;
- if (idx == ldev->ports) {
+ if (idx == MLX5_MAX_PORTS) {
*i = idx;
goto unlock;
}
@@ -1598,10 +1753,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
+ int ret = 0, i, j, idx = 0;
struct mlx5_lag *ldev;
unsigned long flags;
int num_ports;
- int ret, i, j;
void *out;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -1620,8 +1775,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
- for (i = 0; i < ldev->ports; i++)
- mdev[i] = ldev->pf[i].dev;
+ mlx5_ldev_for_each(i, 0, ldev)
+ mdev[idx++] = ldev->pf[i].dev;
} else {
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 50fcb1eee574..4918eee2b3da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -67,6 +67,7 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
+ possible_net_t net;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
/* Protect lag fields/state changes */
@@ -92,6 +93,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
@@ -103,7 +105,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
@@ -119,9 +121,24 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
+#define mlx5_ldev_for_each(i, start_index, ldev) \
+ for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
+ i = tmp, tmp < MLX5_MAX_PORTS; tmp++)
+
+#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \
+ for (int tmp = start_index, tmp1 = end_index; \
+ tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
+ i = tmp, tmp >= tmp1; tmp--)
+
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
+int mlx5_lag_num_devs(struct mlx5_lag *ldev);
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index b1aa494c76ba..aee17fcf3b36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
#define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!mlx5_lag_is_ready(ldev))
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
+
+ if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev))
return false;
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
@@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
- ldev->pf[MLX5_LAG_P2].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev,
+ ldev->pf[idx1].dev);
}
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
@@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
struct lag_tracker tracker = {};
- if (!__mlx5_lag_is_multipath(ldev))
+ if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev))
return;
switch (port) {
case MLX5_LAG_NORMAL_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
case MLX5_LAG_P1_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P2].link_up = false;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].tx_enabled = false;
+ tracker.netdev_state[idx1].link_up = false;
break;
case MLX5_LAG_P2_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P1].link_up = false;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = false;
+ tracker.netdev_state[idx0].link_up = false;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx0].dev,
"Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
+ if (tracker.netdev_state[idx0].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
+ if (tracker.netdev_state[idx1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -150,9 +155,14 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
+
+ if (idx < 0)
+ return;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -179,17 +189,19 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
if (nh_dev0 == nh_dev1) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx].dev,
"Multipath offload doesn't support routes with multiple nexthops of the same device");
return;
}
if (!nh_dev1) {
if (__mlx5_lag_is_active(ldev)) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
-
- i++;
- mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ dev_idx++;
+ if (ldev->pf[i].netdev == nh_dev0)
+ break;
+ }
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
@@ -214,6 +226,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
/* Check the nh event is related to the route */
if (!mp->fib.mfi || mp->fib.mfi != fi)
@@ -221,11 +234,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == fib_nh->fib_nh_dev)
+ break;
+ dev_idx++;
+ }
- if (i >= 0) {
- i = (i + 1) % 2 + 1; /* peer port */
- mlx5_lag_set_port_affinity(ldev, i);
+ if (dev_idx >= 0) {
+ dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 82889f30506e..aad52d3a90e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
u32 pf_metadata;
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = ldev->lag_mpesw.pf_metadata[i];
@@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
u32 pf_metadata;
int i, err;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = mlx5_esw_match_metadata_alloc(esw);
@@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
goto err_metadata;
}
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
(void *)0);
@@ -65,23 +65,22 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
int err;
int i;
- if (ldev->mode != MLX5_LAG_MODE_NONE)
+ if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
- if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
- return -EOPNOTSUPP;
-
+ dev0 = ldev->pf[idx].dev;
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
- !mlx5_lag_check_prereq(ldev))
+ !mlx5_lag_check_prereq(ldev) ||
+ !mlx5_lag_shared_fdb_supported(ldev))
return -EOPNOTSUPP;
err = mlx5_mpesw_metadata_set(ldev);
@@ -98,8 +97,8 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
}
@@ -112,8 +111,8 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_ldev_for_each(i, 0, ldev)
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index 101b3bb90863..d832a12ffec0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -39,15 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
- int err, i;
- int idx;
- int j;
+ struct mlx5_core_dev *dev;
+ int err, i, j, k, idx;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
@@ -74,7 +77,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
u8 affinity;
@@ -88,9 +91,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- while (i--)
- while (j--)
+ mlx5_ldev_for_each_reverse(k, i, 0, ldev) {
+ while (j--) {
+ idx = k * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
+ }
+ j = ldev->buckets;
+ }
goto destroy_fg;
}
}
@@ -291,11 +298,16 @@ static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_definer *lag_definer;
+ struct mlx5_core_dev *dev;
u32 *match_definer_mask;
int format_id, err;
+ if (first_idx < 0)
+ return ERR_PTR(-EINVAL);
+
+ dev = ldev->pf[first_idx].dev;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(-ENOMEM);
@@ -337,12 +349,15 @@ free_lag_definer:
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int idx;
- int i;
- int j;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev;
+ int idx, i, j;
- for (i = 0; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return;
+
+ dev = ldev->pf[first_idx].dev;
+ mlx5_ldev_for_each(i, first_idx, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
@@ -449,13 +464,11 @@ static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
- ttc_params->ns = mlx5_get_flow_namespace(dev,
- MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
@@ -470,13 +483,11 @@ static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
- ttc_params->ns = mlx5_get_flow_namespace(dev,
- MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
@@ -501,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
@@ -512,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
@@ -530,7 +551,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
- return err;
+ goto clear_port_sel;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
@@ -549,6 +570,8 @@ destroy_inner:
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
+clear_port_sel:
+ memset(port_sel, 0, sizeof(*port_sel));
return err;
}
@@ -565,7 +588,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[idx] == ports[idx])
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 58bd749b5e4d..129725159a93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -100,7 +100,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -129,7 +129,7 @@ static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
return -ENOMEM;
MLX5_SET(cqc, cqc_data, log_cq_size, 1);
- MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
@@ -163,7 +163,7 @@ static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
struct mlx5_wq_param param;
int err;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = mdev->priv.bfreg.map;
param.db_numa_node = numa_node;
param.buf_numa_node = numa_node;
@@ -203,7 +203,7 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0361741632a6..0ba0ef8bae42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -38,6 +38,12 @@
#include "lib/eq.h"
#include "en.h"
#include "clock.h"
+#ifdef CONFIG_X86
+#include <linux/timekeeping.h>
+#include <linux/cpufeature.h>
+#endif /* CONFIG_X86 */
+
+#define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity)
enum {
MLX5_PIN_MODE_IN = 0x0,
@@ -73,6 +79,56 @@ enum {
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
};
+struct mlx5_clock_dev_state {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_devcom_comp_dev *compdev;
+ struct mlx5_nb pps_nb;
+ struct work_struct out_work;
+};
+
+struct mlx5_clock_priv {
+ struct mlx5_clock clock;
+ struct mlx5_core_dev *mdev;
+ struct mutex lock; /* protect mdev and used in PTP callbacks */
+ struct mlx5_core_dev *event_mdev;
+};
+
+static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock)
+{
+ return container_of(clock, struct mlx5_clock_priv, clock);
+}
+
+static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock));
+}
+
+static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock)
+{
+ mlx5_clock_lockdep_assert(clock);
+
+ return clock_priv(clock)->mdev;
+}
+
+static void mlx5_clock_lock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_lock(&clock_priv(clock)->lock);
+}
+
+static void mlx5_clock_unlock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_unlock(&clock_priv(clock)->lock);
+}
+
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
@@ -90,6 +146,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev,
+ u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])
+{
+ u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in),
+ out, sizeof(out), MLX5_REG_MRTCQ, 0, 0);
+ if (!err)
+ memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity),
+ MLX5_RT_CLOCK_IDENTITY_SIZE);
+
+ return err;
+}
+
static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
{
/* Optimal shift constant leads to corrections above just 1 scaled ppm.
@@ -115,21 +187,30 @@ static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
}
+static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+}
+
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ s32 ret;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ ret = mlx5_clock_getmaxphase(mdev);
+ mlx5_clock_unlock(clock);
- return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
- MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
- MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+ return ret;
}
static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
{
- s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
+ s64 max = mlx5_clock_getmaxphase(mdev);
if (delta < -max || delta > max)
return false;
@@ -148,6 +229,152 @@ static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
MLX5_REG_MTUTC, 0, 1);
}
+#ifdef CONFIG_X86
+static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG3(dev, mtptm))
+ return false;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM,
+ 0, 0);
+ if (err)
+ return false;
+
+ return !!MLX5_GET(mtptm_reg, out, psta);
+}
+
+static int mlx5_mtctr_read(struct mlx5_core_dev *mdev,
+ bool real_time_mode,
+ struct system_counterval_t *sys_counterval,
+ u64 *device)
+{
+ u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
+ u64 host;
+ int err;
+
+ MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
+ MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
+ MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
+ real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTCTR, 0, 0);
+ if (err)
+ return err;
+
+ if (!MLX5_GET(mtctr_reg, out, first_clock_valid) ||
+ !MLX5_GET(mtctr_reg, out, second_clock_valid))
+ return -EINVAL;
+
+ host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp);
+ *sys_counterval = (struct system_counterval_t) {
+ .cycles = host,
+ .cs_id = CSID_X86_ART,
+ .use_nsecs = true,
+ };
+ *device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
+
+ return 0;
+}
+
+static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ struct mlx5_core_dev *mdev = ctx;
+ bool real_time_mode;
+ u64 device;
+ int err;
+
+ real_time_mode = mlx5_real_time_mode(mdev);
+
+ err = mlx5_mtctr_read(mdev, real_time_mode, sys_counterval, &device);
+ if (err)
+ return err;
+
+ if (real_time_mode)
+ *device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
+ else
+ *device_time = mlx5_timecounter_cyc2time(mdev->clock, device);
+
+ return 0;
+}
+
+static int
+mlx5_mtctr_syncdevicecyclestime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ struct mlx5_core_dev *mdev = ctx;
+ u64 device;
+ int err;
+
+ err = mlx5_mtctr_read(mdev, false, sys_counterval, &device);
+ if (err)
+ return err;
+ *device_time = ns_to_ktime(device);
+
+ return 0;
+}
+
+static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct system_time_snapshot history_begin = {0};
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ ktime_get_snapshot(&history_begin);
+
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
+ &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
+}
+
+static int mlx5_ptp_getcrosscycles(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct system_time_snapshot history_begin = {0};
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ ktime_get_snapshot(&history_begin);
+
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicecyclestime,
+ mdev, &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
+}
+#endif /* CONFIG_X86 */
+
static u64 mlx5_read_time(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts,
bool real_time)
@@ -174,12 +401,11 @@ static u64 mlx5_read_time(struct mlx5_core_dev *dev,
(u64)timer_l | (u64)timer_h1 << 32;
}
-static u64 read_internal_timer(const struct cyclecounter *cc)
+static u64 read_internal_timer(struct cyclecounter *cc)
{
struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock);
return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
@@ -187,7 +413,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer;
u32 sign;
@@ -210,12 +436,10 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
static void mlx5_pps_out(struct work_struct *work)
{
- struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
- out_work);
- struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
- pps_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
+ out_work);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
@@ -237,17 +461,17 @@ static void mlx5_pps_out(struct work_struct *work)
}
}
-static void mlx5_timestamp_overflow(struct work_struct *work)
+static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
{
- struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
- timer = container_of(dwork, struct mlx5_timer, overflow_work);
- clock = container_of(timer, struct mlx5_clock, timer);
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
@@ -258,7 +482,8 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
- schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+ mlx5_clock_unlock(clock);
+ return timer->overflow_period;
}
static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
@@ -277,15 +502,12 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock,
+ const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
- struct mlx5_core_dev *mdev;
unsigned long flags;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
if (mlx5_modify_mtutc_allowed(mdev)) {
int err = mlx5_ptp_settime_real_time(mdev, ts);
@@ -301,6 +523,20 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
return 0;
}
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_clock_settime(mdev, clock, ts);
+ mlx5_clock_unlock(clock);
+
+ return err;
+}
+
static
struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
struct ptp_system_timestamp *sts)
@@ -317,23 +553,39 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
- unsigned long flags;
u64 cycles, ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_real_time_mode(mdev)) {
*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
goto out;
}
- write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_time(mdev, sts, false);
- ns = timecounter_cyc2time(&timer->tc, cycles);
- write_sequnlock_irqrestore(&clock->lock, flags);
+ ns = mlx5_timecounter_cyc2time(clock, cycles);
*ts = ns_to_timespec64(ns);
out:
+ mlx5_clock_unlock(clock);
+ return 0;
+}
+
+static int mlx5_ptp_getcyclesx(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ struct mlx5_core_dev *mdev;
+ u64 cycles;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ cycles = mlx5_read_time(mdev, sts, false);
+ *ts = ns_to_timespec64(cycles);
+ mlx5_clock_unlock(clock);
return 0;
}
@@ -364,14 +616,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
if (err)
- return err;
+ goto unlock;
}
write_seqlock_irqsave(&clock->lock, flags);
@@ -379,17 +633,23 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ mlx5_clock_unlock(clock);
- return mlx5_ptp_adjtime_real_time(mdev, delta);
+ return err;
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
@@ -418,15 +678,17 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
u32 mult;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
+ err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
- return err;
+ goto unlock;
}
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@@ -436,8 +698,11 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
+ ptp_schedule_worker(clock->ptp, timer->overflow_period);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_extts_configure(struct ptp_clock_info *ptp,
@@ -446,25 +711,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -488,6 +742,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
field_select = MLX5_MTPPS_FS_ENABLE;
}
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern);
@@ -496,15 +758,23 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
+
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
+ if (err)
+ goto unlock;
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+ clock->pps_info.pin_armed[pin] = on;
+ clock_priv(clock)->event_mdev = mdev;
+
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta;
struct mlx5_timer *timer;
@@ -563,7 +833,7 @@ static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
struct ptp_clock_request *rq,
u32 *out_pulse_duration_ns)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
u32 out_pulse_duration;
struct timespec64 ts;
@@ -596,7 +866,7 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
u32 *field_select, u32 *out_pulse_duration_ns,
u64 *period, u64 *time_stamp)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
struct ptp_clock_time *time = &rq->perout.start;
struct timespec64 ts;
@@ -619,38 +889,24 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
return 0;
}
-static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
-{
- return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
- (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
-}
-
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
- bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 out_pulse_duration_ns = 0;
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
+ bool rt_mode;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
- /* Reject requests with unsupported flags */
- if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
- return -EOPNOTSUPP;
-
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
@@ -659,14 +915,23 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (pin < 0)
return -EBUSY;
- if (on) {
- bool rt_mode = mlx5_real_time_mode(mdev);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ rt_mode = mlx5_real_time_mode(mdev);
+
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
- if (rt_mode && rq->perout.start.sec > U32_MAX)
- return -EINVAL;
+ if (rt_mode && rq->perout.start.sec > U32_MAX) {
+ err = -EINVAL;
+ goto unlock;
+ }
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@@ -679,7 +944,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
else
err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
if (err)
- return err;
+ goto unlock;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -692,13 +957,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
if (rt_mode)
- return 0;
+ goto unlock;
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
+
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_pps_configure(struct ptp_clock_info *ptp,
@@ -771,6 +1039,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
+ .do_aux_work = mlx5_timestamp_overflow,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
@@ -784,10 +1053,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
-static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
-
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -806,8 +1073,9 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
-static void mlx5_init_pin_config(struct mlx5_clock *clock)
+static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
{
+ struct mlx5_clock *clock = mdev->clock;
int i;
if (!clock->ptp_info.n_pins)
@@ -823,20 +1091,27 @@ static void mlx5_init_pin_config(struct mlx5_clock *clock)
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
+ clock->ptp_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
+ if (mlx5_npps_real_time_supported(mdev))
+ clock->ptp_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
+
for (i = 0; i < clock->ptp_info.n_pins; i++) {
snprintf(clock->ptp_info.pin_config[i].name,
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i);
clock->ptp_info.pin_config[i].chan = 0;
}
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_clock *clock = mdev->clock;
mlx5_query_mtpps(mdev, out, sizeof(out));
@@ -886,16 +1161,16 @@ static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
- struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
+ struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state,
+ pps_nb);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
struct ptp_clock_event ptp_event;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
- struct mlx5_core_dev *mdev;
unsigned long flags;
u64 ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
@@ -915,11 +1190,15 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
+ if (clock->shared) {
+ mlx5_core_warn(mdev, " Received unexpected PPS out event\n");
+ break;
+ }
ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
- schedule_work(&clock->pps_info.out_work);
+ schedule_work(&clock_state->out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -931,7 +1210,7 @@ static int mlx5_pps_event(struct notifier_block *nb,
static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
@@ -947,10 +1226,10 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
}
-static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u64 overflow_cycles;
u64 frac = 0;
@@ -971,12 +1250,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
do_div(ns, NSEC_PER_SEC / HZ);
timer->overflow_period = ns;
- INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
- if (timer->overflow_period)
- schedule_delayed_work(&timer->overflow_work, 0);
- else
+ if (!timer->overflow_period) {
+ timer->overflow_period = HZ;
mlx5_core_warn(mdev,
- "invalid overflow period, overflow_work is not scheduled\n");
+ "invalid overflow period, overflow_work is scheduled once per second\n");
+ }
if (clock_info)
clock_info->overflow_period = timer->overflow_period;
@@ -984,7 +1262,7 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_ib_clock_info *info;
struct mlx5_timer *timer;
@@ -1007,7 +1285,7 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u8 log_max_freq_adjustment = 0;
@@ -1026,7 +1304,8 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
+ bool expose_cycles;
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -1034,40 +1313,48 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
if (MLX5_CAP_MCAM_REG(mdev, mtutc))
mlx5_init_timer_max_freq_adjustment(mdev);
+ expose_cycles = !MLX5_CAP_GEN(mdev, disciplined_fr_counter) ||
+ !mlx5_real_time_mode(mdev);
+
+#ifdef CONFIG_X86
+ if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
+ MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART)) {
+ clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
+ if (expose_cycles)
+ clock->ptp_info.getcrosscycles =
+ mlx5_ptp_getcrosscycles;
+ }
+#endif /* CONFIG_X86 */
+
+ if (expose_cycles)
+ clock->ptp_info.getcyclesx64 = mlx5_ptp_getcyclesx;
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
- mlx5_init_overflow_period(clock);
+ mlx5_init_overflow_period(mdev);
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
- mlx5_ptp_settime(&clock->ptp_info, &ts);
+ mlx5_clock_settime(mdev, clock, &ts);
}
}
static void mlx5_init_pps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
if (!MLX5_PPS_CAP(mdev))
return;
mlx5_get_pps_caps(mdev);
- mlx5_init_pin_config(clock);
+ mlx5_init_pin_config(mdev);
}
-void mlx5_init_clock(struct mlx5_core_dev *mdev)
+static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
- mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
+ struct mlx5_clock *clock = mdev->clock;
seqlock_init(&clock->lock);
- INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
/* Initialize the device clock */
mlx5_init_timer_clock(mdev);
@@ -1076,33 +1363,27 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
- &mdev->pdev->dev);
+ clock->shared ? NULL : &mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
- mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
- PTR_ERR(clock->ptp));
+ mlx5_core_warn(mdev, "%sptp_clock_register failed %pe\n",
+ clock->shared ? "shared clock " : "",
+ clock->ptp);
clock->ptp = NULL;
}
- MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
- mlx5_eq_notifier_register(mdev, &clock->pps_nb);
+ if (clock->ptp)
+ ptp_schedule_worker(clock->ptp, 0);
}
-void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
- return;
+ struct mlx5_clock *clock = mdev->clock;
- mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
}
- cancel_work_sync(&clock->pps_info.out_work);
- cancel_delayed_work_sync(&clock->timer.overflow_work);
-
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
mdev->clock_info = NULL;
@@ -1110,3 +1391,253 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
kfree(clock->ptp_info.pin_config);
}
+
+static void mlx5_clock_free(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock);
+
+ mlx5_destroy_clock_dev(mdev);
+ mutex_destroy(&cpriv->lock);
+ kfree(cpriv);
+ mdev->clock = NULL;
+}
+
+static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
+{
+ struct mlx5_clock_priv *cpriv;
+ struct mlx5_clock *clock;
+
+ cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL);
+ if (!cpriv)
+ return -ENOMEM;
+
+ mutex_init(&cpriv->lock);
+ cpriv->mdev = mdev;
+ clock = &cpriv->clock;
+ clock->shared = shared;
+ mdev->clock = clock;
+ mlx5_clock_lock(clock);
+ mlx5_init_clock_dev(mdev);
+ mlx5_clock_unlock(clock);
+
+ if (!clock->shared)
+ return 0;
+
+ if (!clock->ptp) {
+ mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions");
+ mlx5_clock_free(mdev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev,
+ u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE])
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_devcom_match_attr attr = {};
+ struct mlx5_devcom_comp_dev *compd;
+ struct mlx5_devcom_comp_dev *pos;
+
+ BUILD_BUG_ON(MLX5_RT_CLOCK_IDENTITY_SIZE > MLX5_DEVCOM_MATCH_KEY_MAX);
+ memcpy(attr.key.buf, identity, MLX5_RT_CLOCK_IDENTITY_SIZE);
+
+ compd = mlx5_devcom_register_component(mdev->priv.devc,
+ MLX5_DEVCOM_SHARED_CLOCK,
+ &attr, NULL, mdev);
+ if (!compd)
+ return;
+
+ mdev->clock_state->compdev = compd;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ mdev->clock = next->clock;
+ /* clock info is shared among all the functions using the same clock */
+ mdev->clock_info = next->clock_info;
+ } else {
+ mlx5_clock_alloc(mdev, true);
+ }
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ if (!mdev->clock) {
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+ mdev->clock_state->compdev = NULL;
+ }
+}
+
+static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev)
+ cpriv->mdev = next;
+ mlx5_clock_unlock(clock);
+ } else {
+ mlx5_clock_free(mdev);
+ }
+
+ mdev->clock = NULL;
+ mdev->clock_info = NULL;
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+}
+
+static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock,
+ struct mlx5_core_dev *new_mdev,
+ struct mlx5_core_dev *old_mdev)
+{
+ struct ptp_clock_info *ptp_info = &clock->ptp_info;
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+ int i;
+
+ for (i = 0; i < ptp_info->n_pins; i++) {
+ if (ptp_info->pin_config[i].func != PTP_PF_EXTTS ||
+ !clock->pps_info.pin_armed[i])
+ continue;
+
+ if (new_mdev) {
+ mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE);
+ cpriv->event_mdev = new_mdev;
+ } else {
+ cpriv->event_mdev = NULL;
+ }
+
+ if (old_mdev)
+ mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE);
+ }
+}
+
+void mlx5_clock_load(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_clock_priv *cpriv;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out);
+ MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT);
+ mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb);
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, mdev, NULL);
+ return;
+ }
+
+ cpriv = clock_priv(clock);
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev && mdev != cpriv->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+}
+
+void mlx5_clock_unload(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, NULL, mdev);
+ goto out;
+ }
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ mlx5_clock_lock(clock);
+ if (mdev == clock_priv(clock)->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, next, mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+out:
+ mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb);
+ cancel_work_sync(&mdev->clock_state->out_work);
+}
+
+static struct mlx5_clock null_clock;
+
+int mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE];
+ struct mlx5_clock_dev_state *clock_state;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
+ mdev->clock = &null_clock;
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return 0;
+ }
+
+ clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL);
+ if (!clock_state)
+ return -ENOMEM;
+ clock_state->mdev = mdev;
+ mdev->clock_state = clock_state;
+
+ if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) {
+ if (mlx5_clock_identity_get(mdev, identity))
+ mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n");
+ else
+ mlx5_shared_clock_register(mdev, identity);
+ }
+
+ if (!mdev->clock) {
+ err = mlx5_clock_alloc(mdev, false);
+ if (err) {
+ kfree(clock_state);
+ mdev->clock_state = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (mdev->clock->shared)
+ mlx5_shared_clock_unregister(mdev);
+ else
+ mlx5_clock_free(mdev);
+ kfree(mdev->clock_state);
+ mdev->clock_state = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index bd95b9f8d143..aff3aed62c74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,6 +33,34 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+#include <linux/ptp_clock_kernel.h>
+
+#define MAX_PIN_NUM 8
+struct mlx5_pps {
+ u8 pin_caps[MAX_PIN_NUM];
+ u64 start[MAX_PIN_NUM];
+ u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
+ bool pin_armed[MAX_PIN_NUM];
+};
+
+struct mlx5_timer {
+ struct cyclecounter cycles;
+ struct timecounter tc;
+ u32 nominal_c_mult;
+ unsigned long overflow_period;
+};
+
+struct mlx5_clock {
+ seqlock_t lock;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
+ bool shared;
+};
+
static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
{
u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
@@ -54,12 +82,14 @@ static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-void mlx5_init_clock(struct mlx5_core_dev *mdev);
+int mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+void mlx5_clock_load(struct mlx5_core_dev *mdev);
+void mlx5_clock_unload(struct mlx5_core_dev *mdev);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
- return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
+ return mdev->clock->ptp ? ptp_clock_index(mdev->clock->ptp) : -1;
}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
@@ -87,8 +117,10 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(time);
}
#else
-static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
+static inline int mlx5_init_clock(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_load(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_unload(struct mlx5_core_dev *mdev) {}
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
index c819c047bb9c..4821163a547f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
@@ -8,6 +8,7 @@ enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC,
MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC,
+ MLX5_ACCEL_OBJ_PSP_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_PSP,
MLX5_ACCEL_OBJ_TYPE_KEY_NUM,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index 7b0766c89f4c..e749618229bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -4,6 +4,7 @@
#include <linux/mlx5/vport.h>
#include <linux/list.h>
#include "lib/devcom.h"
+#include "lib/mlx5.h"
#include "mlx5_core.h"
static LIST_HEAD(devcom_dev_list);
@@ -22,11 +23,17 @@ struct mlx5_devcom_dev {
struct kref ref;
};
+struct mlx5_devcom_key {
+ u32 flags;
+ union mlx5_devcom_match_key key;
+ possible_net_t net;
+};
+
struct mlx5_devcom_comp {
struct list_head comp_list;
enum mlx5_devcom_component id;
- u64 key;
struct list_head comp_dev_list_head;
+ struct mlx5_devcom_key key;
mlx5_devcom_event_handler_t handler;
struct kref ref;
bool ready;
@@ -69,20 +76,18 @@ mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
struct mlx5_devcom_dev *
mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_dev *devc = NULL;
mutex_lock(&dev_list_lock);
if (devcom_dev_exists(dev)) {
- devc = ERR_PTR(-EEXIST);
+ mlx5_core_err(dev, "devcom device already exists");
goto out;
}
devc = mlx5_devcom_dev_alloc(dev);
- if (!devc) {
- devc = ERR_PTR(-ENOMEM);
+ if (!devc)
goto out;
- }
list_add_tail(&devc->list, &devcom_dev_list);
out:
@@ -103,21 +108,27 @@ mlx5_devcom_dev_release(struct kref *ref)
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
{
- if (!IS_ERR_OR_NULL(devc))
- kref_put(&devc->ref, mlx5_devcom_dev_release);
+ if (!devc)
+ return;
+
+ kref_put(&devc->ref, mlx5_devcom_dev_release);
}
static struct mlx5_devcom_comp *
-mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
+mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
+ mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
if (!comp)
- return ERR_PTR(-ENOMEM);
+ return NULL;
comp->id = id;
- comp->key = key;
+ comp->key.key = attr->key;
+ comp->key.flags = attr->flags;
+ if (attr->flags & MLX5_DEVCOM_MATCH_FLAGS_NS)
+ write_pnet(&comp->key.net, attr->net);
comp->handler = handler;
init_rwsem(&comp->sem);
lockdep_register_key(&comp->lock_key);
@@ -149,7 +160,7 @@ devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
if (!devcom)
- return ERR_PTR(-ENOMEM);
+ return NULL;
kref_get(&devc->ref);
devcom->devc = devc;
@@ -180,21 +191,34 @@ devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
static bool
devcom_component_equal(struct mlx5_devcom_comp *devcom,
enum mlx5_devcom_component id,
- u64 key)
+ const struct mlx5_devcom_match_attr *attr)
{
- return devcom->id == id && devcom->key == key;
+ if (devcom->id != id)
+ return false;
+
+ if (devcom->key.flags != attr->flags)
+ return false;
+
+ if (memcmp(&devcom->key.key, &attr->key, sizeof(devcom->key.key)))
+ return false;
+
+ if (devcom->key.flags & MLX5_DEVCOM_MATCH_FLAGS_NS &&
+ !net_eq(read_pnet(&devcom->key.net), attr->net))
+ return false;
+
+ return true;
}
static struct mlx5_devcom_comp *
devcom_component_get(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
devcom_for_each_component(comp) {
- if (devcom_component_equal(comp, id, key)) {
+ if (devcom_component_equal(comp, id, attr)) {
if (handler == comp->handler) {
kref_get(&comp->ref);
return comp;
@@ -212,35 +236,32 @@ devcom_component_get(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler,
void *data)
{
- struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp_dev *devcom = NULL;
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devc))
- return ERR_PTR(-EINVAL);
+ if (!devc)
+ return NULL;
mutex_lock(&comp_list_lock);
- comp = devcom_component_get(devc, id, key, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_PTR(-EINVAL);
+ comp = devcom_component_get(devc, id, attr, handler);
+ if (IS_ERR(comp))
goto out_unlock;
- }
if (!comp) {
- comp = mlx5_devcom_comp_alloc(id, key, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_CAST(comp);
+ comp = mlx5_devcom_comp_alloc(id, attr, handler);
+ if (!comp)
goto out_unlock;
- }
+
list_add_tail(&comp->comp_list, &devcom_comp_list);
}
mutex_unlock(&comp_list_lock);
devcom = devcom_alloc_comp_dev(devc, comp, data);
- if (IS_ERR(devcom))
+ if (!devcom)
kref_put(&comp->ref, mlx5_devcom_comp_release);
return devcom;
@@ -252,8 +273,10 @@ out_unlock:
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
{
- if (!IS_ERR_OR_NULL(devcom))
- devcom_free_comp_dev(devcom);
+ if (!devcom)
+ return;
+
+ devcom_free_comp_dev(devcom);
}
int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
@@ -272,7 +295,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int err = 0;
void *data;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return -ENODEV;
comp = devcom->comp;
@@ -314,7 +337,7 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
return READ_ONCE(devcom->comp->ready);
@@ -324,7 +347,7 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
{
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
comp = devcom->comp;
@@ -397,21 +420,21 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
down_write(&devcom->comp->sem);
}
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
up_write(&devcom->comp->sem);
}
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return 0;
return down_write_trylock(&devcom->comp->sem);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index d58032dd0df7..91e5ae529d5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -6,11 +6,28 @@
#include <linux/mlx5/driver.h>
+enum mlx5_devom_match_flags {
+ MLX5_DEVCOM_MATCH_FLAGS_NS = BIT(0),
+};
+
+#define MLX5_DEVCOM_MATCH_KEY_MAX 32
+union mlx5_devcom_match_key {
+ u64 val;
+ u8 buf[MLX5_DEVCOM_MATCH_KEY_MAX];
+};
+
+struct mlx5_devcom_match_attr {
+ u32 flags;
+ union mlx5_devcom_match_key key;
+ struct net *net;
+};
+
enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_SD_GROUP,
+ MLX5_DEVCOM_SHARED_CLOCK,
MLX5_DEVCOM_NUM_COMPONENTS,
};
@@ -24,7 +41,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler,
void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 7c5516b0a844..8115071c34a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -30,7 +30,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
if (!dm)
- return ERR_PTR(-ENOMEM);
+ return NULL;
spin_lock_init(&dm->lock);
@@ -96,7 +96,7 @@ err_modify_hdr:
err_steering:
kfree(dm);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 4b7f7131c560..b1edc71ffc6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -72,7 +72,7 @@ static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
__raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
- mb();
+ wmb();
}
int mlx5_eq_table_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index a80ecb672f33..0a3c260af377 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till
@@ -196,6 +197,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
+ if (!ns) {
+ mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = chains->group_num;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
@@ -699,7 +705,7 @@ mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
goto err_ignore;
}
- chain = mlx5_chains_get_chain_range(chains),
+ chain = mlx5_chains_get_chain_range(chains);
prio = mlx5_chains_get_prio_range(chains);
level = mlx5_chains_get_level_range(chains);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index b78f2ba25c19..7adad784ad46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -9,29 +9,36 @@
#include "mlx5_core.h"
#include "lib/fs_ttc.h"
-#define MLX5_TTC_NUM_GROUPS 3
-#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT)
-#define MLX5_TTC_GROUP2_SIZE BIT(1)
-#define MLX5_TTC_GROUP3_SIZE BIT(0)
-#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\
- MLX5_TTC_GROUP2_SIZE +\
- MLX5_TTC_GROUP3_SIZE)
-
-#define MLX5_INNER_TTC_NUM_GROUPS 3
-#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3)
-#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1)
-#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0)
-#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\
- MLX5_INNER_TTC_GROUP2_SIZE +\
- MLX5_INNER_TTC_GROUP3_SIZE)
+#define MLX5_TTC_MAX_NUM_GROUPS 7
+#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
+
+struct mlx5_fs_ttc_groups {
+ bool use_l4_type;
+ int num_groups;
+ int group_size[MLX5_TTC_MAX_NUM_GROUPS];
+};
+
+static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
+{
+ int i, sz = 0;
+
+ for (i = 0; i < groups->num_groups; i++)
+ sz += groups->group_size[i];
+
+ return sz;
+}
/* L3/L4 traffic type classifier */
struct mlx5_ttc_table {
int num_groups;
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_core_dev *mdev;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
struct mlx5_ttc_rule rules[MLX5_NUM_TT];
struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
+ u32 refcnt;
+ struct mutex mutex; /* Protect adding rules for ipsec crypto offload */
};
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
@@ -58,6 +65,25 @@ static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
}
}
+static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = {
+ [MLX5_TT_IPV4_TCP] = "TT_IPV4_TCP",
+ [MLX5_TT_IPV6_TCP] = "TT_IPV6_TCP",
+ [MLX5_TT_IPV4_UDP] = "TT_IPV4_UDP",
+ [MLX5_TT_IPV6_UDP] = "TT_IPV6_UDP",
+ [MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH",
+ [MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH",
+ [MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP",
+ [MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP",
+ [MLX5_TT_IPV4] = "TT_IPV4",
+ [MLX5_TT_IPV6] = "TT_IPV6",
+ [MLX5_TT_ANY] = "TT_ANY"
+};
+
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt)
+{
+ return mlx5_traffic_types_names[tt];
+}
+
struct mlx5_etype_proto {
u16 etype;
u8 proto;
@@ -138,6 +164,97 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = {
};
+enum TTC_GROUP_TYPE {
+ TTC_GROUPS_DEFAULT = 0,
+ TTC_GROUPS_USE_L4_TYPE = 1,
+ TTC_GROUPS_DEFAULT_ESP = 2,
+ TTC_GROUPS_USE_L4_TYPE_ESP = 3,
+};
+
+static const struct mlx5_fs_ttc_groups ttc_groups[] = {
+ [TTC_GROUPS_DEFAULT] = {
+ .num_groups = 3,
+ .group_size = {
+ BIT(3) + MLX5_NUM_TUNNEL_TT,
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE] = {
+ .use_l4_type = true,
+ .num_groups = 4,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_DEFAULT_ESP] = {
+ .num_groups = 6,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE + BIT(1) +
+ MLX5_NUM_TUNNEL_TT,
+ BIT(2), /* decrypted outer L4 */
+ BIT(2), /* decrypted inner L4 */
+ BIT(1), /* ESP */
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE_ESP] = {
+ .use_l4_type = true,
+ .num_groups = 7,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1) + MLX5_NUM_TUNNEL_TT,
+ BIT(2), /* decrypted outer L4 */
+ BIT(2), /* decrypted inner L4 */
+ BIT(1), /* ESP */
+ BIT(1),
+ BIT(0),
+ },
+ },
+};
+
+static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
+ [TTC_GROUPS_DEFAULT] = {
+ .num_groups = 3,
+ .group_size = {
+ BIT(3),
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE] = {
+ .use_l4_type = true,
+ .num_groups = 4,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1),
+ BIT(0),
+ },
+ },
+};
+
+static const struct mlx5_fs_ttc_groups *
+mlx5_ttc_get_fs_groups(bool use_l4_type, bool ipsec_rss)
+{
+ if (!ipsec_rss)
+ return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &ttc_groups[TTC_GROUPS_DEFAULT];
+
+ return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP] :
+ &ttc_groups[TTC_GROUPS_DEFAULT_ESP];
+}
+
+bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc)
+{
+ return ttc->groups == &ttc_groups[TTC_GROUPS_DEFAULT_ESP] ||
+ ttc->groups == &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP];
+}
+
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
{
return ttc_tunnel_rules[tt].proto;
@@ -188,18 +305,59 @@ static u8 mlx5_etype_to_ipv(u16 ethertype)
return 0;
}
-static struct mlx5_flow_handle *
-mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
- struct mlx5_flow_destination *dest, u16 etype, u8 proto)
+static void mlx5_fs_ttc_set_match_ipv_outer(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_spec *spec,
+ u16 etype)
{
int match_ipv_outer =
- MLX5_CAP_FLOWTABLE_NIC_RX(dev,
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
ft_field_support.outer_ip_version);
+ u8 ipv;
+
+ ipv = mlx5_etype_to_ipv(etype);
+ if (match_ipv_outer && ipv) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ip_version, ipv);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ethertype, etype);
+ }
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+}
+
+static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
+ u8 proto, bool use_l4_type)
+{
+ int l4_type;
+
+ if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
+ if (proto == IPPROTO_TCP)
+ l4_type = MLX5_PACKET_L4_TYPE_TCP;
+ else
+ l4_type = MLX5_PACKET_L4_TYPE_UDP;
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto);
+ }
+}
+
+static struct mlx5_flow_handle *
+mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
+ struct mlx5_flow_destination *dest, u16 etype, u8 proto,
+ bool use_l4_type, bool ipsec_rss)
+{
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
- u8 ipv;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -207,19 +365,24 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
+ mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ outer_headers),
+ MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ outer_headers),
+ proto, use_l4_type);
}
- ipv = mlx5_etype_to_ipv(etype);
- if (match_ipv_outer && ipv) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
- } else if (etype) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
+ if (etype)
+ mlx5_fs_ttc_set_match_ipv_outer(dev, spec, etype);
+
+ if (ipsec_rss && proto == IPPROTO_ESP) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_next_header);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_next_header, 0);
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
@@ -234,7 +397,8 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params,
- struct mlx5_ttc_table *ttc)
+ struct mlx5_ttc_table *ttc,
+ bool use_l4_type)
{
struct mlx5_flow_handle **trules;
struct mlx5_ttc_rule *rules;
@@ -247,11 +411,16 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
- ttc_rules[tt].proto);
+ ttc_rules[tt].proto,
+ use_l4_type,
+ params->ipsec_rss);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -273,7 +442,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
- ttc_tunnel_rules[tt].proto);
+ ttc_tunnel_rules[tt].proto,
+ use_l4_type, false);
if (IS_ERR(trules[tt])) {
err = PTR_ERR(trules[tt]);
trules[tt] = NULL;
@@ -288,16 +458,85 @@ del_rules:
return err;
}
+static int mlx5_create_ttc_table_ipsec_groups(struct mlx5_ttc_table *ttc,
+ bool use_ipv,
+ u32 *in, int *next_ix)
+{
+ u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ const struct mlx5_fs_ttc_groups *groups = ttc->groups;
+ int ix = *next_ix;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+
+ /* decrypted ESP outer group */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type_ext);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.l4_type_ext, 0);
+
+ /* decrypted ESP inner group */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
+ if (use_ipv)
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_version, 0);
+ else
+ MLX5_SET(fte_match_param, mc, outer_headers.ethertype, 0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type_ext);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, inner_headers.ip_version, 0);
+ MLX5_SET(fte_match_param, mc, inner_headers.l4_type_ext, 0);
+
+ /* undecrypted ESP group */
+ MLX5_SET_CFG(in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ if (use_ipv)
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
+ else
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_param, mc,
+ misc_parameters_2.ipsec_next_header);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ *next_ix = ix;
+
+ return 0;
+
+err:
+ return PTR_ERR(ttc->g[ttc->num_groups]);
+}
+
static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
bool use_ipv)
{
+ const struct mlx5_fs_ttc_groups *groups = ttc->groups;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
u32 *in;
int err;
u8 *mc;
- ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL);
+ ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -307,26 +546,51 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
return -ENOMEM;
}
- /* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
if (use_ipv)
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
else
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+
+ /* TCP UDP group */
+ if (groups->use_l4_type) {
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0);
+ }
+
+ /* L4 Group */
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP1_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
+ if (mlx5_ttc_has_esp_flow_group(ttc)) {
+ err = mlx5_create_ttc_table_ipsec_groups(ttc, use_ipv, in, &ix);
+ if (err)
+ goto err;
+
+ MLX5_SET(fte_match_param, mc,
+ misc_parameters_2.ipsec_next_header, 0);
+ }
+
/* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP2_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -336,7 +600,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
/* Any Group */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP3_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -358,7 +622,7 @@ static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest,
- u16 etype, u8 proto)
+ u16 etype, u8 proto, bool use_l4_type)
{
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
@@ -379,8 +643,13 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
- MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
+ mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ inner_headers),
+ MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ inner_headers),
+ proto, use_l4_type);
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
@@ -395,7 +664,8 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params,
- struct mlx5_ttc_table *ttc)
+ struct mlx5_ttc_table *ttc,
+ bool use_l4_type)
{
struct mlx5_ttc_rule *rules;
struct mlx5_flow_table *ft;
@@ -408,12 +678,16 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
&params->dests[tt],
ttc_rules[tt].etype,
- ttc_rules[tt].proto);
+ ttc_rules[tt].proto,
+ use_l4_type);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -430,7 +704,8 @@ del_rules:
return err;
}
-static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
+static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc,
+ const struct mlx5_fs_ttc_groups *groups)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
@@ -438,8 +713,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
int err;
u8 *mc;
- ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g),
- GFP_KERNEL);
+ ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -449,13 +723,28 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
return -ENOMEM;
}
- /* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
+
+ /* TCP UDP group */
+ if (groups->use_l4_type) {
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0);
+ }
+
+ /* L4 Group */
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP1_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -465,7 +754,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
/* L3 Group */
MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP2_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -475,7 +764,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
/* Any Group */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP3_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -496,27 +785,52 @@ err:
struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
struct ttc_params *params)
{
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
+ bool use_l4_type;
int err;
+ switch (params->ns_type) {
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc)
return ERR_PTR(-ENOMEM);
+ ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &inner_ttc_groups[TTC_GROUPS_DEFAULT];
+
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE;
- ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
+ ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
kvfree(ttc);
return ERR_PTR(err);
}
- err = mlx5_create_inner_ttc_table_groups(ttc);
+ err = mlx5_create_inner_ttc_table_groups(ttc, groups);
if (err)
goto destroy_ft;
- err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc);
+ err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err)
goto destroy_ft;
@@ -540,6 +854,7 @@ void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
kfree(ttc->g);
mlx5_destroy_flow_table(ttc->t);
+ mutex_destroy(&ttc->mutex);
kvfree(ttc);
}
@@ -549,16 +864,39 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version);
+ struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
+ bool use_l4_type;
int err;
+ switch (params->ns_type) {
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc)
return ERR_PTR(-ENOMEM);
+ ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ ttc->groups = mlx5_ttc_get_fs_groups(use_l4_type, params->ipsec_rss);
+
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE;
- ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(ttc->groups);
+ ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
kvfree(ttc);
@@ -569,10 +907,13 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
if (err)
goto destroy_ft;
- err = mlx5_generate_ttc_table_rules(dev, params, ttc);
+ err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err)
goto destroy_ft;
+ ttc->mdev = dev;
+ mutex_init(&ttc->mutex);
+
return ttc;
destroy_ft:
@@ -606,3 +947,194 @@ int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
return mlx5_ttc_fwd_dest(ttc, type, &dest);
}
+
+static void _mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
+{
+ enum mlx5_traffic_types i;
+
+ for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
+ if (!ttc->rules[i].rule)
+ continue;
+
+ mlx5_del_flow_rules(ttc->rules[i].rule);
+ ttc->rules[i].rule = NULL;
+ }
+}
+
+void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
+{
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return;
+
+ mutex_lock(&ttc->mutex);
+ if (--ttc->refcnt)
+ goto unlock;
+
+ _mlx5_ttc_destroy_ipsec_rules(ttc);
+unlock:
+ mutex_unlock(&ttc->mutex);
+}
+
+static int mlx5_ttc_get_tt_attrs(enum mlx5_traffic_types type,
+ u16 *etype, int *l4_type_ext,
+ enum mlx5_traffic_types *tir_tt)
+{
+ switch (type) {
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP:
+ *etype = ETH_P_IP;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
+ *tir_tt = MLX5_TT_IPV4_TCP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP:
+ *etype = ETH_P_IPV6;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
+ *tir_tt = MLX5_TT_IPV6_TCP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP:
+ *etype = ETH_P_IP;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
+ *tir_tt = MLX5_TT_IPV4_UDP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP:
+ *etype = ETH_P_IPV6;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
+ *tir_tt = MLX5_TT_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct mlx5_flow_handle *
+mlx5_ttc_create_ipsec_outer_rule(struct mlx5_ttc_table *ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ enum mlx5_traffic_types tir_tt;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int l4_type_ext;
+ u16 etype;
+ int err;
+
+ err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
+ if (err)
+ return ERR_PTR(err);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5_fs_ttc_set_match_ipv_outer(ttc->mdev, spec, etype);
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.l4_type_ext);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.l4_type_ext, l4_type_ext);
+
+ dest = mlx5_ttc_get_default_dest(ttc, tir_tt);
+
+ rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static struct mlx5_flow_handle *
+mlx5_ttc_create_ipsec_inner_rule(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ enum mlx5_traffic_types tir_tt;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int l4_type_ext;
+ u16 etype;
+ int err;
+
+ err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
+ if (err)
+ return ERR_PTR(err);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ inner_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value,
+ inner_headers.ip_version, mlx5_etype_to_ipv(etype));
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ inner_headers.l4_type_ext);
+ MLX5_SET(fte_match_param, spec->match_value,
+ inner_headers.l4_type_ext, l4_type_ext);
+
+ dest = mlx5_ttc_get_default_dest(inner_ttc, tir_tt);
+
+ spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
+
+ rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc)
+{
+ struct mlx5_flow_handle *rule;
+ enum mlx5_traffic_types i;
+
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return 0;
+
+ mutex_lock(&ttc->mutex);
+ if (ttc->refcnt)
+ goto skip;
+
+ for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP; i++) {
+ rule = mlx5_ttc_create_ipsec_outer_rule(ttc, i);
+ if (IS_ERR(rule))
+ goto err_out;
+
+ ttc->rules[i].rule = rule;
+ }
+
+ for (i = MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
+ rule = mlx5_ttc_create_ipsec_inner_rule(ttc, inner_ttc, i);
+ if (IS_ERR(rule))
+ goto err_out;
+
+ ttc->rules[i].rule = rule;
+ }
+
+skip:
+ ttc->refcnt++;
+ mutex_unlock(&ttc->mutex);
+ return 0;
+
+err_out:
+ _mlx5_ttc_destroy_ipsec_rules(ttc);
+ mutex_unlock(&ttc->mutex);
+ return PTR_ERR(rule);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index 85fef0cd1c07..95f6e56724a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -18,6 +18,14 @@ enum mlx5_traffic_types {
MLX5_TT_IPV4,
MLX5_TT_IPV6,
MLX5_TT_ANY,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP,
MLX5_NUM_TT,
MLX5_NUM_INDIR_TIRS = MLX5_TT_ANY,
};
@@ -40,15 +48,17 @@ struct mlx5_ttc_rule {
struct mlx5_ttc_table;
struct ttc_params {
- struct mlx5_flow_namespace *ns;
+ enum mlx5_flow_namespace_type ns_type;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
bool inner_ttc;
DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
+ bool ipsec_rss;
};
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt);
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc);
struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
@@ -69,4 +79,14 @@ int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt);
+bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc);
+int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc);
+void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc);
+static inline bool mlx5_ttc_is_decrypted_esp_tt(enum mlx5_traffic_types tt)
+{
+ return tt >= MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP &&
+ tt <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP;
+}
+
#endif /* __MLX5_FS_TTC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
index 234cd00f71a1..d524f0220513 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -164,6 +164,8 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
roce->rule = rule;
memset(spec, 0, sizeof(*spec));
+ if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -178,6 +180,8 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
goto out;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ flow_act.flags &= ~FLOW_ACT_IGNORE_FLOW_LEVEL;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
dst.ft = roce->ft_rdma;
rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst,
@@ -386,7 +390,8 @@ static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
- if (!peer_priv) {
+ if (!peer_priv || !peer_priv->ipsec) {
+ mlx5_core_err(mdev, "IPsec not supported on master device\n");
err = -EOPNOTSUPP;
goto release_peer;
}
@@ -455,7 +460,8 @@ static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
- if (!peer_priv) {
+ if (!peer_priv || !peer_priv->ipsec) {
+ mlx5_core_err(mdev, "IPsec not supported on master device\n");
err = -EOPNOTSUPP;
goto release_peer;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 4a078113e292..e6be2f01daf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -45,11 +45,7 @@
#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
-/* MACsec RX flow steering */
-#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
-
/* MACsec fs_id handling for steering */
-#define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2)
#define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30))
struct mlx5_sectag_header {
@@ -497,7 +493,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&dest, 0, sizeof(struct mlx5_flow_destination));
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ dest.counter = tx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -519,7 +515,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ dest.counter = tx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -597,7 +593,7 @@ static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs,
MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
MLX5_ETH_WQE_FT_META_MACSEC_MASK);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
- macsec_fs_set_tx_fs_id(id));
+ MLX5_MACSEC_TX_METADATA(id));
*fs_id = id;
flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
@@ -1200,7 +1196,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ roce_dest[dstn].counter = rx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
if (IS_ERR(rule)) {
@@ -1592,7 +1588,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ dest.counter = rx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -2219,9 +2215,11 @@ static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A);
- MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id));
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action, data,
+ mlx5_macsec_fs_set_tx_fs_id(fs_id));
+ MLX5_SET(set_action_in, action, offset,
+ MLX5_ETH_WQE_FT_META_MACSEC_SHIFT);
+ MLX5_SET(set_action_in, action, length, 8);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
1, action);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
index 34b80c3ef6a5..15acaff43641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
@@ -12,6 +12,21 @@
#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
+/* MACsec TX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK \
+ (MLX5_ETH_WQE_FT_META_MACSEC | MLX5_ETH_WQE_FT_META_MACSEC_FS_ID_MASK)
+#define MLX5_ETH_WQE_FT_META_MACSEC_SHIFT MLX5_ETH_WQE_FT_META_SHIFT
+
+/* MACsec fs_id handling for steering */
+#define mlx5_macsec_fs_set_tx_fs_id(fs_id) \
+ (((MLX5_ETH_WQE_FT_META_MACSEC) >> MLX5_ETH_WQE_FT_META_MACSEC_SHIFT) \
+ | ((fs_id) << 2))
+
+#define MLX5_MACSEC_TX_METADATA(fs_id) \
+ (mlx5_macsec_fs_set_tx_fs_id(fs_id) << \
+ MLX5_ETH_WQE_FT_META_MACSEC_SHIFT)
+
+/* MACsec fs_id uses 4 bits, supports up to 16 interfaces */
#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
struct mlx5_macsec_fs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 37d5f445598c..74ea5da58b7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -45,14 +45,22 @@ int mlx5_crdump_enable(struct mlx5_core_dev *dev);
void mlx5_crdump_disable(struct mlx5_core_dev *dev);
int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data);
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
- return devlink_net(priv_to_devlink(dev));
+ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
+ struct net_device *netdev;
+
+ mutex_lock(&mlx5e_res->uplink_netdev_lock);
+ netdev = mlx5e_res->uplink_netdev;
+ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
+ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
+ return netdev;
}
-static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
+static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
{
- return mdev->mlx5e_res.uplink_netdev;
+ netdev_put(netdev, &mdev->mlx5e_res.tracker);
}
struct mlx5_sd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 4450091e181a..4a88a42ae4f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -65,13 +65,14 @@ static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
/* UC L2 table hash node */
struct l2table_node {
struct l2addr_node node;
- u32 index; /* index in HW l2 table */
+ int index; /* index in HW l2 table */
int ref_count;
};
struct mlx5_mpfs {
struct hlist_head hash[MLX5_L2_ADDR_HASH_SIZE];
struct mutex lock; /* Synchronize l2 table access */
+ bool enabled;
u32 size;
unsigned long *bitmap;
};
@@ -114,6 +115,8 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
return -ENOMEM;
}
+ mpfs->enabled = true;
+
dev->priv.mpfs = mpfs;
return 0;
}
@@ -135,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
struct l2table_node *l2addr;
int err = 0;
- u32 index;
+ int index;
if (!mpfs)
return 0;
@@ -148,30 +151,34 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
goto out;
}
- err = alloc_l2table_index(mpfs, &index);
- if (err)
- goto out;
-
l2addr = l2addr_hash_add(mpfs->hash, mac, struct l2table_node, GFP_KERNEL);
if (!l2addr) {
err = -ENOMEM;
- goto hash_add_err;
+ goto out;
}
- err = set_l2table_entry_cmd(dev, index, mac);
- if (err)
- goto set_table_entry_err;
+ index = -1;
+
+ if (mpfs->enabled) {
+ err = alloc_l2table_index(mpfs, &index);
+ if (err)
+ goto hash_del;
+ err = set_l2table_entry_cmd(dev, index, mac);
+ if (err)
+ goto free_l2table_index;
+ mlx5_core_dbg(dev, "MPFS entry %pM, set @index (%d)\n",
+ l2addr->node.addr, index);
+ }
l2addr->index = index;
l2addr->ref_count = 1;
mlx5_core_dbg(dev, "MPFS mac added %pM, index (%d)\n", mac, index);
goto out;
-
-set_table_entry_err:
- l2addr_hash_del(l2addr);
-hash_add_err:
+free_l2table_index:
free_l2table_index(mpfs, index);
+hash_del:
+ l2addr_hash_del(l2addr);
out:
mutex_unlock(&mpfs->lock);
return err;
@@ -183,7 +190,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
struct l2table_node *l2addr;
int err = 0;
- u32 index;
+ int index;
if (!mpfs)
return 0;
@@ -200,12 +207,87 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
goto unlock;
index = l2addr->index;
- del_l2table_entry_cmd(dev, index);
+ if (index >= 0) {
+ del_l2table_entry_cmd(dev, index);
+ free_l2table_index(mpfs, index);
+ mlx5_core_dbg(dev, "MPFS entry %pM, deleted @index (%d)\n",
+ mac, index);
+ }
l2addr_hash_del(l2addr);
- free_l2table_index(mpfs, index);
mlx5_core_dbg(dev, "MPFS mac deleted %pM, index (%d)\n", mac, index);
unlock:
mutex_unlock(&mpfs->lock);
return err;
}
EXPORT_SYMBOL(mlx5_mpfs_del_mac);
+
+int mlx5_mpfs_enable(struct mlx5_core_dev *dev)
+{
+ struct mlx5_mpfs *mpfs = dev->priv.mpfs;
+ struct l2table_node *l2addr;
+ struct hlist_node *n;
+ int err = 0, i;
+
+ if (!mpfs)
+ return -ENODEV;
+
+ mutex_lock(&mpfs->lock);
+ if (mpfs->enabled)
+ goto out;
+ mpfs->enabled = true;
+ mlx5_core_dbg(dev, "MPFS enabling mpfs\n");
+
+ mlx5_mpfs_foreach(l2addr, n, mpfs, i) {
+ u32 index;
+
+ err = alloc_l2table_index(mpfs, &index);
+ if (err) {
+ mlx5_core_err(dev, "Failed to allocated MPFS index for %pM, err(%d)\n",
+ l2addr->node.addr, err);
+ goto out;
+ }
+
+ err = set_l2table_entry_cmd(dev, index, l2addr->node.addr);
+ if (err) {
+ mlx5_core_err(dev, "Failed to set MPFS l2table entry for %pM index=%d, err(%d)\n",
+ l2addr->node.addr, index, err);
+ free_l2table_index(mpfs, index);
+ goto out;
+ }
+
+ l2addr->index = index;
+ mlx5_core_dbg(dev, "MPFS entry %pM, set @index (%d)\n",
+ l2addr->node.addr, l2addr->index);
+ }
+out:
+ mutex_unlock(&mpfs->lock);
+ return err;
+}
+
+void mlx5_mpfs_disable(struct mlx5_core_dev *dev)
+{
+ struct mlx5_mpfs *mpfs = dev->priv.mpfs;
+ struct l2table_node *l2addr;
+ struct hlist_node *n;
+ int i;
+
+ if (!mpfs)
+ return;
+
+ mutex_lock(&mpfs->lock);
+ if (!mpfs->enabled)
+ goto unlock;
+ mlx5_mpfs_foreach(l2addr, n, mpfs, i) {
+ if (l2addr->index < 0)
+ continue;
+ del_l2table_entry_cmd(dev, l2addr->index);
+ free_l2table_index(mpfs, l2addr->index);
+ mlx5_core_dbg(dev, "MPFS entry %pM, deleted @index (%d)\n",
+ l2addr->node.addr, l2addr->index);
+ l2addr->index = -1;
+ }
+ mpfs->enabled = false;
+ mlx5_core_dbg(dev, "MPFS disabled\n");
+unlock:
+ mutex_unlock(&mpfs->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
index 4a293542a7aa..9c63838ce1f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
@@ -45,6 +45,10 @@ struct l2addr_node {
u8 addr[ETH_ALEN];
};
+#define mlx5_mpfs_foreach(hs, tmp, mpfs, i) \
+ for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
+ hlist_for_each_entry_safe(hs, tmp, &(mpfs)->hash[i], node.hlist)
+
#define for_each_l2hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &(hash)[i], hlist)
@@ -82,11 +86,16 @@ struct l2addr_node {
})
#ifdef CONFIG_MLX5_MPFS
+struct mlx5_core_dev;
int mlx5_mpfs_init(struct mlx5_core_dev *dev);
void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_mpfs_enable(struct mlx5_core_dev *dev);
+void mlx5_mpfs_disable(struct mlx5_core_dev *dev);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
+static inline int mlx5_mpfs_enable(struct mlx5_core_dev *dev) { return 0; }
+static inline void mlx5_mpfs_disable(struct mlx5_core_dev *dev) {}
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
new file mode 100644
index 000000000000..19bb620b7436
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "nv_param.h"
+#include "mlx5_core.h"
+
+enum {
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP = 0x10b,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF = 0x11d,
+
+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80,
+};
+
+struct mlx5_ifc_configuration_item_type_class_global_bits {
+ u8 type_class[0x8];
+ u8 parameter_index[0x18];
+};
+
+struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits {
+ u8 type_class[0x8];
+ u8 pf_index[0x6];
+ u8 pci_bus_index[0x8];
+ u8 parameter_index[0xa];
+};
+
+union mlx5_ifc_config_item_type_auto_bits {
+ struct mlx5_ifc_configuration_item_type_class_global_bits
+ configuration_item_type_class_global;
+ struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits
+ configuration_item_type_class_per_host_pf;
+ u8 reserved_at_0[0x20];
+};
+
+enum {
+ MLX5_ACCESS_MODE_NEXT = 0,
+ MLX5_ACCESS_MODE_CURRENT,
+ MLX5_ACCESS_MODE_DEFAULT,
+};
+
+struct mlx5_ifc_config_item_bits {
+ u8 valid[0x2];
+ u8 priority[0x2];
+ u8 header_type[0x2];
+ u8 ovr_en[0x1];
+ u8 rd_en[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_at_a[0x1];
+ u8 writer_id[0x5];
+ u8 version[0x4];
+ u8 reserved_at_14[0x2];
+ u8 host_id_valid[0x1];
+ u8 length[0x9];
+
+ union mlx5_ifc_config_item_type_auto_bits type;
+
+ u8 reserved_at_40[0x10];
+ u8 crc16[0x10];
+};
+
+struct mlx5_ifc_mnvda_reg_bits {
+ struct mlx5_ifc_config_item_bits configuration_item_header;
+
+ u8 configuration_item_data[64][0x20];
+};
+
+struct mlx5_ifc_nv_global_pci_conf_bits {
+ u8 sriov_valid[0x1];
+ u8 reserved_at_1[0x10];
+ u8 per_pf_total_vf[0x1];
+ u8 reserved_at_12[0xe];
+
+ u8 sriov_en[0x1];
+ u8 reserved_at_21[0xf];
+ u8 total_vfs[0x10];
+
+ u8 reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_nv_global_pci_cap_bits {
+ u8 max_vfs_per_pf_valid[0x1];
+ u8 reserved_at_1[0x13];
+ u8 per_pf_total_vf_supported[0x1];
+ u8 reserved_at_15[0xb];
+
+ u8 sriov_support[0x1];
+ u8 reserved_at_21[0xf];
+ u8 max_vfs_per_pf[0x10];
+
+ u8 reserved_at_40[0x60];
+};
+
+struct mlx5_ifc_nv_pf_pci_conf_bits {
+ u8 reserved_at_0[0x9];
+ u8 pf_total_vf_en[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 total_vf[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_nv_sw_offload_conf_bits {
+ u8 ip_over_vxlan_port[0x10];
+ u8 tunnel_ecn_copy_offload_disable[0x1];
+ u8 pci_atomic_mode[0x3];
+ u8 sr_enable[0x1];
+ u8 ptp_cyc2realtime[0x1];
+ u8 vector_calc_disable[0x1];
+ u8 uctx_en[0x1];
+ u8 prio_tag_required_en[0x1];
+ u8 esw_fdb_ipv4_ttl_modify_enable[0x1];
+ u8 mkey_by_name[0x1];
+ u8 ip_over_vxlan_en[0x1];
+ u8 one_qp_per_recovery[0x1];
+ u8 cqe_compression[0x3];
+ u8 tunnel_udp_entropy_proto_disable[0x1];
+ u8 reserved_at_21[0x1];
+ u8 ar_enable[0x1];
+ u8 log_max_outstanding_wqe[0x5];
+ u8 vf_migration[0x2];
+ u8 log_tx_psn_win[0x6];
+ u8 lro_log_timeout3[0x4];
+ u8 lro_log_timeout2[0x4];
+ u8 lro_log_timeout1[0x4];
+ u8 lro_log_timeout0[0x4];
+};
+
+struct mlx5_ifc_nv_sw_offload_cap_bits {
+ u8 reserved_at_0[0x19];
+ u8 swp_l4_csum_mode_l4_only[0x1];
+ u8 reserved_at_1a[0x6];
+};
+
+struct mlx5_ifc_nv_sw_accelerate_conf_bits {
+ u8 swp_l4_csum_mode[0x2];
+ u8 reserved_at_2[0x3e];
+};
+
+#define MNVDA_HDR_SZ \
+ (MLX5_ST_SZ_BYTES(mnvda_reg) - \
+ MLX5_BYTE_OFF(mnvda_reg, configuration_item_data))
+
+#define MLX5_SET_CFG_ITEM_TYPE(_cls_name, _mnvda_ptr, _field, _val) \
+ MLX5_SET(mnvda_reg, _mnvda_ptr, \
+ configuration_item_header.type.configuration_item_type_class_##_cls_name._field, \
+ _val)
+
+#define MLX5_SET_CFG_HDR_LEN(_mnvda_ptr, _cls_name) \
+ MLX5_SET(mnvda_reg, _mnvda_ptr, configuration_item_header.length, \
+ MLX5_ST_SZ_BYTES(_cls_name))
+
+#define MLX5_GET_CFG_HDR_LEN(_mnvda_ptr) \
+ MLX5_GET(mnvda_reg, _mnvda_ptr, configuration_item_header.length)
+
+static int mlx5_nv_param_read(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ u32 param_idx, type_class;
+ u32 header_len;
+ void *cls_ptr;
+ int err;
+
+ if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
+ return -EINVAL; /* A caller bug */
+
+ err = mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
+ 0, 0);
+ if (!err)
+ return 0;
+
+ cls_ptr = MLX5_ADDR_OF(mnvda_reg, mnvda,
+ configuration_item_header.type.configuration_item_type_class_global);
+
+ type_class = MLX5_GET(configuration_item_type_class_global, cls_ptr,
+ type_class);
+ param_idx = MLX5_GET(configuration_item_type_class_global, cls_ptr,
+ parameter_index);
+ header_len = MLX5_GET_CFG_HDR_LEN(mnvda);
+
+ mlx5_core_warn(dev, "Failed to read mnvda reg: type_class 0x%x, param_idx 0x%x, header_len %u, err %d\n",
+ type_class, param_idx, header_len, err);
+
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_nv_param_write(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
+ return -EINVAL;
+
+ if (WARN_ON(MLX5_GET_CFG_HDR_LEN(mnvda) == 0))
+ return -EINVAL;
+
+ return mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
+ 0, 1);
+}
+
+static int
+mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int
+mlx5_nv_param_read_sw_offload_cap(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_cap);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int
+mlx5_nv_param_read_sw_accelerate_conf(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len, int access_mode)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_accelerate_conf);
+ MLX5_SET(mnvda_reg, mnvda, configuration_item_header.access_mode,
+ access_mode);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static const char *const
+ cqe_compress_str[] = { "balanced", "aggressive" };
+
+static int
+mlx5_nv_param_devlink_cqe_compress_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ u8 value = U8_MAX;
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ value = MLX5_GET(nv_sw_offload_conf, data, cqe_compression);
+
+ if (value >= ARRAY_SIZE(cqe_compress_str))
+ return -EOPNOTSUPP;
+
+ strscpy(ctx->val.vstr, cqe_compress_str[value], sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_nv_param_devlink_cqe_compress_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cqe_compress_str); i++) {
+ if (!strcmp(val.vstr, cqe_compress_str[i]))
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value, supported values are balanced/aggressive");
+ return -EOPNOTSUPP;
+}
+
+static int
+mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ int err = 0;
+ void *data;
+ u8 value;
+
+ if (!strcmp(ctx->val.vstr, "aggressive"))
+ value = 1;
+ else /* balanced: can't be anything else already validated above */
+ value = 0;
+
+ err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_offload_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_sw_offload_conf, data, cqe_compression, value);
+
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+enum swp_l4_csum_mode {
+ SWP_L4_CSUM_MODE_DEFAULT = 0,
+ SWP_L4_CSUM_MODE_FULL_CSUM = 1,
+ SWP_L4_CSUM_MODE_L4_ONLY = 2,
+};
+
+static const char *const
+ swp_l4_csum_mode_str[] = { "default", "full_csum", "l4_only" };
+
+static int
+mlx5_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
+ int access_mode, u8 *value,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
+ access_mode);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_accelerate_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ *value = MLX5_GET(nv_sw_accelerate_conf, data, swp_l4_csum_mode);
+
+ if (*value >= ARRAY_SIZE(swp_l4_csum_mode_str)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Invalid swp_l4_csum_mode value %u read from device",
+ *value);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_NEXT,
+ &value, extack);
+ if (err)
+ return err;
+
+ strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
+ sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 cap[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(swp_l4_csum_mode_str); i++) {
+ if (!strcmp(val.vstr, swp_l4_csum_mode_str[i]))
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(swp_l4_csum_mode_str) ||
+ i == SWP_L4_CSUM_MODE_DEFAULT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value, supported values are full_csum/l4_only");
+ return -EINVAL;
+ }
+
+ if (i == SWP_L4_CSUM_MODE_L4_ONLY) {
+ err = mlx5_nv_param_read_sw_offload_cap(dev, cap, sizeof(cap));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_offload_cap");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
+ if (!MLX5_GET(nv_sw_offload_cap, data, swp_l4_csum_mode_l4_only)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "l4_only mode is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_swp_l4_csum_mode_set(struct devlink *devlink, u32 id, u8 value,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
+ MLX5_ACCESS_MODE_NEXT);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_accelerate_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_sw_accelerate_conf, data, swp_l4_csum_mode, value);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to write sw_accelerate_conf mnvda reg");
+
+ return err;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+
+ if (!strcmp(ctx->val.vstr, "full_csum"))
+ value = SWP_L4_CSUM_MODE_FULL_CSUM;
+ else
+ value = SWP_L4_CSUM_MODE_L4_ONLY;
+
+ return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_get_default(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
+ &value, extack);
+ if (err)
+ return err;
+
+ strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
+ sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_set_default(struct devlink *devlink, u32 id,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
+ &value, extack);
+ if (err)
+ return err;
+
+ return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
+}
+
+static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_cap);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, type_class, 3);
+ MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, parameter_index,
+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_pf_pci_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ bool sriov_en = false;
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ ctx->val.vbool = false;
+ return 0;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ sriov_en = MLX5_GET(nv_global_pci_conf, data, sriov_en);
+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
+ ctx->val.vbool = sriov_en;
+ return 0;
+ }
+
+ /* SRIOV is per PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ ctx->val.vbool = sriov_en &&
+ MLX5_GET(nv_pf_pci_conf, data, pf_total_vf_en);
+ return 0;
+}
+
+static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ bool per_pf_support;
+ void *cap, *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read global PCI capability");
+ return err;
+ }
+
+ cap = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ per_pf_support = MLX5_GET(nv_global_pci_cap, cap,
+ per_pf_total_vf_supported);
+
+ if (!MLX5_GET(nv_global_pci_cap, cap, sriov_support)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!per_pf_support) {
+ /* We don't allow global SRIOV setting on per PF devlink */
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not per PF on this device");
+ return -EOPNOTSUPP;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read global PCI configuration");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+
+ /* setup per PF sriov mode */
+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
+ MLX5_SET(nv_global_pci_conf, data, sriov_en, 1);
+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to write global PCI configuration");
+ return err;
+ }
+
+ /* enable/disable sriov on this PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read per host PF configuration");
+ return err;
+ }
+ MLX5_SET(nv_pf_pci_conf, data, pf_total_vf_en, ctx->val.vbool);
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_devlink_total_vfs_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ ctx->val.vu32 = 0;
+ return 0;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
+ ctx->val.vu32 = MLX5_GET(nv_global_pci_conf, data, total_vfs);
+ return 0;
+ }
+
+ /* SRIOV is per PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ ctx->val.vu32 = MLX5_GET(nv_pf_pci_conf, data, total_vf);
+
+ return 0;
+}
+
+static int mlx5_devlink_total_vfs_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)];
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read global pci cap");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not configurable on this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_GET(nv_global_pci_cap, data, per_pf_total_vf_supported)) {
+ /* We don't allow global SRIOV setting on per PF devlink */
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not per PF on this device");
+ return -EOPNOTSUPP;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_pf_pci_conf, data, total_vf, ctx->val.vu32);
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_devlink_total_vfs_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 cap[MLX5_ST_SZ_DW(mnvda_reg)];
+ void *data;
+ u16 max;
+ int err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, cap, sizeof(cap));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf_valid))
+ return 0; /* optimistic, but set might fail later */
+
+ max = MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf);
+ if (val.vu16 > max) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Max allowed by device is %u", max);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlx5_nv_param_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_enable_sriov_get,
+ mlx5_devlink_enable_sriov_set, NULL),
+ DEVLINK_PARAM_GENERIC(TOTAL_VFS, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_total_vfs_get,
+ mlx5_devlink_total_vfs_set,
+ mlx5_devlink_total_vfs_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
+ "cqe_compress_type", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_nv_param_devlink_cqe_compress_get,
+ mlx5_nv_param_devlink_cqe_compress_set,
+ mlx5_nv_param_devlink_cqe_compress_validate),
+ DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
+ "swp_l4_csum_mode", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_swp_l4_csum_mode_get,
+ mlx5_devlink_swp_l4_csum_mode_set,
+ mlx5_devlink_swp_l4_csum_mode_validate,
+ mlx5_devlink_swp_l4_csum_mode_get_default,
+ mlx5_devlink_swp_l4_csum_mode_set_default),
+};
+
+int mlx5_nv_param_register_dl_params(struct devlink *devlink)
+{
+ if (!mlx5_core_is_pf(devlink_priv(devlink)))
+ return 0;
+
+ return devl_params_register(devlink, mlx5_nv_param_devlink_params,
+ ARRAY_SIZE(mlx5_nv_param_devlink_params));
+}
+
+void mlx5_nv_param_unregister_dl_params(struct devlink *devlink)
+{
+ if (!mlx5_core_is_pf(devlink_priv(devlink)))
+ return;
+
+ devl_params_unregister(devlink, mlx5_nv_param_devlink_params,
+ ARRAY_SIZE(mlx5_nv_param_devlink_params));
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h
new file mode 100644
index 000000000000..9f4922ff7745
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_NV_PARAM_H
+#define __MLX5_NV_PARAM_H
+
+#include <linux/mlx5/driver.h>
+#include "devlink.h"
+
+int mlx5_nv_param_register_dl_params(struct devlink *devlink);
+void mlx5_nv_param_unregister_dl_params(struct devlink *devlink);
+
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
index 6b774e0c2766..432c98f2626d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
@@ -24,6 +24,11 @@
pci_write_config_dword((dev)->pdev, (dev)->vsc_addr + (offset), (val))
#define VSC_MAX_RETRIES 2048
+/* Reading VSC registers can take relatively long time.
+ * Yield the cpu every 128 registers read.
+ */
+#define VSC_GW_READ_BLOCK_COUNT 128
+
enum {
VSC_CTRL_OFFSET = 0x4,
VSC_COUNTER_OFFSET = 0x8,
@@ -74,6 +79,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
ret = -EBUSY;
goto pci_unlock;
}
+ if (pci_channel_offline(dev->pdev)) {
+ ret = -EACCES;
+ goto pci_unlock;
+ }
/* Check if semaphore is already locked */
ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
@@ -269,6 +278,7 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
{
unsigned int next_read_addr = 0;
unsigned int read_addr = 0;
+ unsigned int count = 0;
while (read_addr < length) {
if (mlx5_vsc_gw_read_fast(dev, read_addr, &next_read_addr,
@@ -276,6 +286,10 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
return read_addr;
read_addr = next_read_addr;
+ if (++count == VSC_GW_READ_BLOCK_COUNT) {
+ cond_resched();
+ count = 0;
+ }
}
return length;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index dd5d186dc614..8e17daae48af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
{
- /* Feature is currently implemented for PFs only */
- if (!mlx5_core_is_pf(dev))
- return false;
-
/* Honor the SW implementation limit */
if (host_buses > MLX5_SD_MAX_GROUP_SZ)
return false;
@@ -130,7 +126,7 @@ static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
}
static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
- u8 *host_buses, u8 *sd_group)
+ u8 *host_buses)
{
u32 out[MLX5_ST_SZ_DW(mpir_reg)];
int err;
@@ -139,10 +135,6 @@ static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
if (err)
return err;
- err = mlx5_query_nic_vport_sd_group(dev, sd_group);
- if (err)
- return err;
-
*sdm = MLX5_GET(mpir_reg, out, sdm);
*host_buses = MLX5_GET(mpir_reg, out, host_buses);
@@ -162,17 +154,29 @@ static int sd_init(struct mlx5_core_dev *dev)
bool sdm;
int err;
- if (!MLX5_CAP_MCAM_REG(dev, mpir))
+ /* Feature is currently implemented for PFs only */
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
+ /* Block on embedded CPU PFs */
+ if (mlx5_core_is_ecpf(dev))
return 0;
- err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group);
+ err = mlx5_query_nic_vport_sd_group(dev, &sd_group);
if (err)
return err;
- if (!sdm)
+ if (!sd_group)
return 0;
- if (!sd_group)
+ if (!MLX5_CAP_MCAM_REG(dev, mpir))
+ return 0;
+
+ err = mlx5_query_sd(dev, &sdm, &host_buses);
+ if (err)
+ return err;
+
+ if (!sdm)
return 0;
group_id = mlx5_sd_group_id(dev, sd_group);
@@ -206,15 +210,19 @@ static void sd_cleanup(struct mlx5_core_dev *dev)
static int sd_register(struct mlx5_core_dev *dev)
{
struct mlx5_devcom_comp_dev *devcom, *pos;
+ struct mlx5_devcom_match_attr attr = {};
struct mlx5_core_dev *peer, *primary;
struct mlx5_sd *sd, *primary_sd;
int err, i;
sd = mlx5_get_sd(dev);
+ attr.key.val = sd->group_id;
+ attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
+ attr.net = mlx5_core_net(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
- sd->group_id, NULL, dev);
- if (IS_ERR(devcom))
- return PTR_ERR(devcom);
+ &attr, NULL, dev);
+ if (!devcom)
+ return -EINVAL;
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
index 452d0df339ac..404f3d4b6380 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
@@ -4,8 +4,8 @@
#ifndef __MLX5_LIB_SMFS_H__
#define __MLX5_LIB_SMFS_H__
-#include "steering/mlx5dr.h"
-#include "steering/dr_types.h"
+#include "steering/sws/mlx5dr.h"
+#include "steering/sws/dr_types.h"
struct mlx5dr_matcher *
mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
new file mode 100644
index 000000000000..ef06fe6cbb51
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+
+struct mlx5_st_idx_data {
+ refcount_t usecount;
+ u16 tag;
+};
+
+struct mlx5_st {
+ /* serialize access upon alloc/free flows */
+ struct mutex lock;
+ struct xa_limit index_limit;
+ struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
+ u8 direct_mode : 1;
+};
+
+struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct mlx5_st *st;
+ u8 direct_mode = 0;
+ u16 num_entries;
+ u32 tbl_loc;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
+ return NULL;
+
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->st;
+#endif
+
+ /* Checking whether the device is capable */
+ if (!pdev->tph_cap)
+ return NULL;
+
+ tbl_loc = pcie_tph_get_st_table_loc(pdev);
+ if (tbl_loc == PCI_TPH_LOC_NONE)
+ direct_mode = 1;
+
+ if (!direct_mode) {
+ num_entries = pcie_tph_get_st_table_size(pdev);
+ /* We need a reserved entry for non TPH cases */
+ if (num_entries < 2)
+ return NULL;
+ }
+
+ /* The OS doesn't support ST */
+ ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
+ if (ret)
+ return NULL;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto end;
+
+ mutex_init(&st->lock);
+ xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
+ st->direct_mode = direct_mode;
+ if (st->direct_mode)
+ return st;
+
+ /* entry 0 is reserved for non TPH cases */
+ st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
+ st->index_limit.max = num_entries - 1;
+
+ return st;
+
+end:
+ pcie_disable_tph(dev->pdev);
+ return NULL;
+}
+
+void mlx5_st_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_st *st = dev->st;
+
+ if (mlx5_core_is_sf(dev) || !st)
+ return;
+
+ pcie_disable_tph(dev->pdev);
+ WARN_ON_ONCE(!xa_empty(&st->idx_xa));
+ kfree(st);
+}
+
+int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index)
+{
+ struct mlx5_st_idx_data *idx_data;
+ struct mlx5_st *st = dev->st;
+ unsigned long index;
+ u32 xa_id;
+ u16 tag;
+ int ret;
+
+ if (!st)
+ return -EOPNOTSUPP;
+
+ ret = pcie_tph_get_cpu_st(dev->pdev, mem_type, cpu_uid, &tag);
+ if (ret)
+ return ret;
+
+ if (st->direct_mode) {
+ *st_index = tag;
+ return 0;
+ }
+
+ mutex_lock(&st->lock);
+
+ xa_for_each(&st->idx_xa, index, idx_data) {
+ if (tag == idx_data->tag) {
+ refcount_inc(&idx_data->usecount);
+ *st_index = index;
+ goto end;
+ }
+ }
+
+ idx_data = kzalloc(sizeof(*idx_data), GFP_KERNEL);
+ if (!idx_data) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ refcount_set(&idx_data->usecount, 1);
+ idx_data->tag = tag;
+
+ ret = xa_alloc(&st->idx_xa, &xa_id, idx_data, st->index_limit, GFP_KERNEL);
+ if (ret)
+ goto clean_idx_data;
+
+ ret = pcie_tph_set_st_entry(dev->pdev, xa_id, tag);
+ if (ret)
+ goto clean_idx_xa;
+
+ *st_index = xa_id;
+ goto end;
+
+clean_idx_xa:
+ xa_erase(&st->idx_xa, xa_id);
+clean_idx_data:
+ kfree(idx_data);
+end:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_st_alloc_index);
+
+int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
+{
+ struct mlx5_st_idx_data *idx_data;
+ struct mlx5_st *st = dev->st;
+ int ret = 0;
+
+ if (!st)
+ return -EOPNOTSUPP;
+
+ if (st->direct_mode)
+ return 0;
+
+ mutex_lock(&st->lock);
+ idx_data = xa_load(&st->idx_xa, st_index);
+ if (WARN_ON_ONCE(!idx_data)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (refcount_dec_and_test(&idx_data->usecount)) {
+ xa_erase(&st->idx_xa, st_index);
+ /* We leave PCI config space as was before, no mkey will refer to it */
+ }
+
+end:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_st_dealloc_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index d55e15c1f380..304912637c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -149,7 +149,7 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
struct mlx5_vxlan *vxlan;
if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
if (!vxlan)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 331ce47f51a1..1ab569ce3fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -368,6 +368,10 @@ int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_ty
u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
+ if (WARN_ON(!dev->caps.hca[cap_type]))
+ /* this cap_type must be added to mlx5_hca_caps_alloc() */
+ return -EINVAL;
+
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
@@ -454,8 +458,8 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
+ bool do_set = false, mem_page_fault = false;
void *set_hca_cap;
- bool do_set = false;
int err;
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
@@ -470,6 +474,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
MLX5_ST_SZ_BYTES(odp_cap));
+ /* For best performance, enable memory scheme ODP only when
+ * it has page prefetch enabled.
+ */
+ if (MLX5_CAP_ODP_MAX(dev, mem_page_fault) &&
+ MLX5_CAP_ODP_MAX(dev, memory_page_fault_scheme_cap.page_prefetch)) {
+ mem_page_fault = true;
+ do_set = true;
+ MLX5_SET(odp_cap, set_hca_cap, mem_page_fault, mem_page_fault);
+ goto set;
+ }
+
#define ODP_CAP_SET_MAX(dev, field) \
do { \
u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
@@ -479,25 +494,28 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
} \
} while (0)
- ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
-
- if (!do_set)
- return 0;
-
- return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.ud_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.rc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.atomic);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic);
+
+set:
+ if (do_set)
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+
+ mlx5_core_dbg(dev, "Using ODP %s scheme\n",
+ mem_page_fault ? "memory" : "transport");
+ return err;
}
static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
@@ -535,6 +553,7 @@ EXPORT_SYMBOL(mlx5_is_roce_on);
static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
{
+ bool do_set = false;
void *set_hca_cap;
int err;
@@ -545,17 +564,27 @@ static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
if (err)
return err;
- if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
- !(dev->priv.sw_vhca_id > 0))
- return 0;
-
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
- MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
- return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
+ if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
+ dev->priv.sw_vhca_id > 0) {
+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
+ do_set = true;
+ }
+
+ if (MLX5_CAP_GEN_2_MAX(dev, lag_per_mp_group)) {
+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, lag_per_mp_group, 1);
+ do_set = true;
+ }
+
+ /* some FW versions that support querying MLX5_CAP_GENERAL_2
+ * capabilities but don't support setting them.
+ * Skip unnecessary update to hca_cap_2 when no changes were introduced
+ */
+ return do_set ? set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2) : 0;
}
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
@@ -619,6 +648,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pci_sync_for_fw_update_with_driver_unload, 1);
+ if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method))
+ MLX5_SET(cmd_hca_cap, set_hca_cap,
+ pcie_reset_using_hotreset_method, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,
@@ -647,6 +679,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
ilog2(max_uc_list));
+ /* enable absolute native port num */
+ if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -923,6 +959,9 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
+
+ pci_enable_ptm(pdev, NULL);
+
return 0;
err_clr_master:
@@ -939,41 +978,19 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
* before removing the pci bars
*/
mlx5_drain_health_wq(dev);
+ pci_disable_ptm(dev->pdev);
iounmap(dev->iseg);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
}
-static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
-{
- /* This component is use to sync adding core_dev to lag_dev and to sync
- * changes of mlx5_adev_devices between LAG layer and other layers.
- */
- if (!mlx5_lag_is_supported(dev))
- return;
-
- dev->priv.hca_devcom_comp =
- mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
- mlx5_query_nic_system_image_guid(dev),
- NULL, dev);
- if (IS_ERR(dev->priv.hca_devcom_comp))
- mlx5_core_err(dev, "Failed to register devcom HCA component\n");
-}
-
-static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
-{
- mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
-}
-
static int mlx5_init_once(struct mlx5_core_dev *dev)
{
int err;
dev->priv.devc = mlx5_devcom_register_device(dev);
- if (IS_ERR(dev->priv.devc))
- mlx5_core_warn(dev, "failed to register devcom device %ld\n",
- PTR_ERR(dev->priv.devc));
- mlx5_register_hca_devcom_comp(dev);
+ if (!dev->priv.devc)
+ mlx5_core_warn(dev, "failed to register devcom device\n");
err = mlx5_query_board_id(dev);
if (err) {
@@ -993,23 +1010,21 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_irq_cleanup;
}
- err = mlx5_events_init(dev);
- if (err) {
- mlx5_core_err(dev, "failed to initialize events\n");
- goto err_eq_cleanup;
- }
-
err = mlx5_fw_reset_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize fw reset events\n");
- goto err_events_cleanup;
+ goto err_eq_cleanup;
}
mlx5_cq_debugfs_init(dev);
mlx5_init_reserved_gids(dev);
- mlx5_init_clock(dev);
+ err = mlx5_init_clock(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize hardware clock\n");
+ goto err_tables_cleanup;
+ }
dev->vxlan = mlx5_vxlan_create(dev);
dev->geneve = mlx5_geneve_create(dev);
@@ -1017,7 +1032,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err = mlx5_init_rl_table(dev);
if (err) {
mlx5_core_err(dev, "Failed to init rate limiting\n");
- goto err_tables_cleanup;
+ goto err_clock_cleanup;
}
err = mlx5_mpfs_init(dev);
@@ -1069,9 +1084,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
}
dev->dm = mlx5_dm_create(dev);
- if (IS_ERR(dev->dm))
- mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
-
+ dev->st = mlx5_st_create(dev);
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
dev->rsc_dump = mlx5_rsc_dump_create(dev);
@@ -1094,21 +1107,19 @@ err_mpfs_cleanup:
mlx5_mpfs_cleanup(dev);
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
-err_tables_cleanup:
+err_clock_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
+err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
-err_events_cleanup:
- mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom:
- mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
return err;
@@ -1119,6 +1130,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_rsc_dump_destroy(dev);
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
+ mlx5_st_destroy(dev);
mlx5_dm_cleanup(dev);
mlx5_fs_core_free(dev);
mlx5_sf_table_cleanup(dev);
@@ -1135,10 +1147,8 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
- mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
- mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
}
@@ -1176,24 +1186,24 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
- mlx5_start_health_poll(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto stop_health_poll;
+ goto err_cmd_cleanup;
}
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_set_issi(dev);
if (err) {
mlx5_core_err(dev, "failed to set issi\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
mlx5_core_err(dev, "failed to allocate boot pages\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_tout_query_dtor(dev);
@@ -1206,10 +1216,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
stop_health_poll:
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
@@ -1220,8 +1229,8 @@ err_cmd_cleanup:
static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
{
mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
}
@@ -1298,6 +1307,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
if (!err)
mlx5_function_disable(dev, boot);
+ else
+ mlx5_stop_health_poll(dev, boot);
+
return err;
}
@@ -1305,10 +1317,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
{
int err;
- dev->priv.uar = mlx5_get_uars_page(dev);
- if (IS_ERR(dev->priv.uar)) {
- mlx5_core_err(dev, "Failed allocating uar, aborting\n");
- err = PTR_ERR(dev->priv.uar);
+ err = mlx5_alloc_bfreg(dev, &dev->priv.bfreg, false, false);
+ if (err) {
+ mlx5_core_err(dev, "Failed allocating bfreg, %d\n", err);
return err;
}
@@ -1327,6 +1338,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_eq_table;
}
+ mlx5_clock_load(dev);
+
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
@@ -1364,12 +1377,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_vhca_event_start(dev);
- err = mlx5_sf_hw_table_create(dev);
- if (err) {
- mlx5_core_err(dev, "sf table create failed %d\n", err);
- goto err_vhca;
- }
-
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
@@ -1398,8 +1405,6 @@ err_sriov:
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
err_ec:
- mlx5_sf_hw_table_destroy(dev);
-err_vhca:
mlx5_vhca_event_stop(dev);
err_set_hca:
mlx5_fs_core_cleanup(dev);
@@ -1410,13 +1415,14 @@ err_fpga_start:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
err_irq_table:
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, dev->priv.uar);
+ mlx5_free_bfreg(dev, &dev->priv.bfreg);
return err;
}
@@ -1424,23 +1430,24 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
+ mlx5_vhca_event_stop(dev);
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
- mlx5_vhca_event_stop(dev);
mlx5_fs_core_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, dev->priv.uar);
+ mlx5_free_bfreg(dev, &dev->priv.bfreg);
}
int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
@@ -1680,6 +1687,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
+ devl_register(devlink);
dev->state = MLX5_DEVICE_STATE_UP;
err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err) {
@@ -1693,27 +1702,21 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
goto query_hca_caps_err;
}
- devl_lock(devlink);
- devl_register(devlink);
-
err = mlx5_devlink_params_register(priv_to_devlink(dev));
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
- goto params_reg_err;
+ goto query_hca_caps_err;
}
devl_unlock(devlink);
return 0;
-params_reg_err:
- devl_unregister(devlink);
- devl_unlock(devlink);
query_hca_caps_err:
- devl_unregister(devlink);
- devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ devl_unregister(devlink);
+ devl_unlock(devlink);
return err;
}
@@ -1763,9 +1766,12 @@ static const int types[] = {
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
+ MLX5_CAP_PSP,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
+ MLX5_CAP_SHAMPO,
+ MLX5_CAP_ADV_RDMA,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1810,6 +1816,50 @@ static int vhca_id_show(struct seq_file *file, void *priv)
DEFINE_SHOW_ATTRIBUTE(vhca_id);
+static int mlx5_notifiers_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_events_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize events\n");
+ return err;
+ }
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.esw_n_head);
+ mlx5_vhca_state_notifier_init(dev);
+
+ err = mlx5_sf_hw_notifier_init(dev);
+ if (err)
+ goto err_sf_hw_notifier;
+
+ err = mlx5_sf_notifiers_init(dev);
+ if (err)
+ goto err_sf_notifiers;
+
+ err = mlx5_sf_dev_notifier_init(dev);
+ if (err)
+ goto err_sf_dev_notifier;
+
+ return 0;
+
+err_sf_dev_notifier:
+ mlx5_sf_notifiers_cleanup(dev);
+err_sf_notifiers:
+ mlx5_sf_hw_notifier_cleanup(dev);
+err_sf_hw_notifier:
+ mlx5_events_cleanup(dev);
+ return err;
+}
+
+static void mlx5_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+ mlx5_sf_dev_notifier_cleanup(dev);
+ mlx5_sf_notifiers_cleanup(dev);
+ mlx5_sf_hw_notifier_cleanup(dev);
+ mlx5_events_cleanup(dev);
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1820,6 +1870,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&dev->mlx5e_res.uplink_netdev_lock);
+ mutex_init(&dev->wc_state_lock);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1864,6 +1915,10 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
+ err = mlx5_notifiers_init(dev);
+ if (err)
+ goto err_hca_caps;
+
/* The conjunction of sw_vhca_id with sw_owner_id will be a global
* unique id per function which uses mlx5_core.
* Those values are supplied to FW as part of the init HCA command to
@@ -1906,6 +1961,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
if (priv->sw_vhca_id > 0)
ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
+ mlx5_notifiers_cleanup(dev);
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
@@ -1917,6 +1973,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->wc_state_lock);
mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock);
mutex_destroy(&dev->intf_state_mutex);
lockdep_unregister_key(&dev->lock_key);
@@ -2080,7 +2137,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
err = wait_vital(pdev);
if (err) {
@@ -2141,7 +2197,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
/* Panic tear down fw command will stop the PCI bus communication
* with the HCA, so the health poll is no longer needed.
*/
- mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_fast_teardown_hca(dev);
@@ -2176,6 +2231,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n");
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ mlx5_drain_health_wq(dev);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, false);
@@ -2216,6 +2272,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
+ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
+ { PCI_VDEVICE(MELLANOX, 0x1027) }, /* ConnectX-10 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 58732f44940f..cfebc110c02f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -114,6 +114,26 @@ struct mlx5_cmd_alias_obj_create_attr {
u8 access_key[ACCESS_KEY_LEN];
};
+struct mlx5_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
+
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
+
+struct mlx5_link_info {
+ u32 speed;
+ u32 lanes;
+};
+
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{
struct device *device = dev->device;
@@ -205,7 +225,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev);
void mlx5_cmd_disable(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
@@ -224,6 +244,8 @@ void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change);
int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
+bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy);
+bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *context, u32 *element_id);
int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
@@ -278,6 +300,89 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+#ifdef CONFIG_PCIE_TPH
+struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev);
+void mlx5_st_destroy(struct mlx5_core_dev *dev);
+#else
+static inline struct mlx5_st *
+mlx5_st_create(struct mlx5_core_dev *dev) { return NULL; }
+static inline void mlx5_st_destroy(struct mlx5_core_dev *dev) { return; }
+#endif
+
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+ u8 *pfc_en_rx);
+
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark,
+ u16 *stall_minor_watermark);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data, u8 *status);
+int
+mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data, u8 *status);
+
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
+ u8 *buffer_ownership);
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto);
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy);
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy);
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
@@ -320,6 +425,12 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_SF;
}
+static inline struct auxiliary_device *
+mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
+{
+ return container_of(mdev->device, struct auxiliary_device, dev);
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
@@ -333,6 +444,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev);
void mlx5_uninit_one_light(struct mlx5_core_dev *dev);
void mlx5_unload_one_light(struct mlx5_core_dev *dev);
+void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf,
+ u8 *len);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport,
u16 opmod);
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
@@ -383,4 +496,27 @@ static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vpo
: vport;
}
+static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
+{
+ if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b))
+ return MLX5_CAP_GEN_2(dev, max_num_eqs_24b);
+
+ if (MLX5_CAP_GEN(dev, max_num_eqs))
+ return MLX5_CAP_GEN(dev, max_num_eqs);
+
+ return 1 << MLX5_CAP_GEN(dev, log_max_eq);
+}
+
+static inline bool mlx5_pcie_cong_event_supported(struct mlx5_core_dev *dev)
+{
+ u64 features = MLX5_CAP_GEN_2_64(dev, general_obj_types_127_64);
+
+ if (!(features & MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT))
+ return false;
+
+ if (dev->sd)
+ return false;
+
+ return true;
+}
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 1088114e905d..586688da9940 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -10,12 +10,15 @@
struct mlx5_irq;
struct cpu_rmap;
+struct mlx5_irq_pool;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
@@ -25,7 +28,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct irq_affinity_desc *af_desc,
struct cpu_rmap **rmap);
@@ -36,13 +39,14 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
+int mlx5_irq_get_irq(const struct mlx5_irq *irq);
-struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
-struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
- struct irq_affinity_desc *af_desc);
+struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc);
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
#else
static inline
@@ -53,7 +57,8 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
}
static inline struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -61,6 +66,7 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
static inline
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
+ mlx5_irq_release_vector(irq);
}
#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index dcf58efac159..cd68c4b2c0bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -291,7 +291,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
- int nid = dev_to_node(device);
+ int nid = dev->priv.numa_node;
struct page *page;
u64 zero_addr = 1;
u64 addr;
@@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 func_id;
u32 npages;
u32 i = 0;
+ int err;
- if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_do(dev, in, in_size, out, out_size);
+ err = mlx5_cmd_do(dev, in, in_size, out, out_size);
+ /* If FW is gone (-ENXIO), proceed to forceful reclaim */
+ if (err != -ENXIO)
+ return err;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
@@ -608,6 +611,11 @@ enum {
RELEASE_ALL_PAGES_MASK = 0x4000,
};
+/* This limit is based on the capability of the firmware as it cannot release
+ * more than 50000 back to the host in one go.
+ */
+#define MAX_RECLAIM_NPAGES (-50000)
+
static int req_pages_handler(struct notifier_block *nb,
unsigned long type, void *data)
{
@@ -639,7 +647,16 @@ static int req_pages_handler(struct notifier_block *nb,
req->dev = dev;
req->func_id = func_id;
- req->npages = npages;
+
+ /* npages > 0 means HCA asking host to allocate/give pages,
+ * npages < 0 means HCA asking host to reclaim back the pages allocated.
+ * Here we are restricting the maximum number of pages that can be
+ * reclaimed to be MAX_RECLAIM_NPAGES. Note that MAX_RECLAIM_NPAGES is
+ * a negative value.
+ * Since MAX_RECLAIM is negative, we are using max() to restrict
+ * req->npages (and not min ()).
+ */
+ req->npages = max_t(s32, npages, MAX_RECLAIM_NPAGES);
req->ec_function = ec_function;
req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
@@ -660,6 +677,9 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
+ if (!npages)
+ return 0;
+
return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 6bac8ad70ba6..aa3b5878e3da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -16,6 +16,7 @@
#endif
#define MLX5_SFS_PER_CTRL_IRQ 64
+#define MLX5_MAX_MSIX_PER_SF 256
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
@@ -53,7 +54,7 @@ static int mlx5_core_func_to_vport(const struct mlx5_core_dev *dev,
/**
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
- * to be ssigned to each VF.
+ * to be assigned to each VF.
* @dev: PF to work on
* @num_vfs: Number of enabled VFs
*/
@@ -147,7 +148,7 @@ out:
* Free the IRQ and other resources such as rmap from the system.
* BUT doesn't free or remove reference from mlx5.
* This function is very important for the shutdown flow, where we need to
- * cleanup system resoruces but keep mlx5 objects alive,
+ * cleanup system resources but keep mlx5 objects alive,
* see mlx5_irq_table_free_irqs().
*/
static void mlx5_system_free_irq(struct mlx5_irq *irq)
@@ -323,10 +324,8 @@ err_xa:
free_irq(irq->map.virq, &irq->nh);
err_req_irq:
#ifdef CONFIG_RFS_ACCEL
- if (i && rmap && *rmap) {
- free_irq_cpu_rmap(*rmap);
- *rmap = NULL;
- }
+ if (i && rmap && *rmap)
+ irq_cpu_rmap_remove(*rmap, irq->map.virq);
err_irq_rmap:
#endif
if (i && pci_msix_can_alloc_dyn(dev->pdev))
@@ -367,11 +366,21 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
return irq->mask;
}
+int mlx5_irq_get_irq(const struct mlx5_irq *irq)
+{
+ return irq->map.virq;
+}
+
int mlx5_irq_get_index(struct mlx5_irq *irq)
{
return irq->map.index;
}
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
+{
+ return irq->pool;
+}
+
/* irq_pool API */
/* requesting an irq from a given pool according to given index */
@@ -399,18 +408,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab
return irq_table->sf_ctrl_pool;
}
-static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+static struct mlx5_irq_pool *
+sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
{
return irq_table->sf_comp_pool;
}
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = NULL;
if (mlx5_core_is_sf(dev))
- pool = sf_irq_pool_get(irq_table);
+ pool = sf_comp_irq_pool_get(irq_table);
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
* the PF IRQs pool in case the SF pool doesn't exist.
@@ -440,11 +451,12 @@ static void _mlx5_irq_release(struct mlx5_irq *irq)
/**
* mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
+ * @dev: mlx5 device that releasing the IRQ.
* @ctrl_irq: ctrl IRQ to be released.
*/
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
{
- _mlx5_irq_release(ctrl_irq);
+ mlx5_irq_affinity_irq_release(dev, ctrl_irq);
}
/**
@@ -456,26 +468,32 @@ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
{
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
- struct irq_affinity_desc af_desc;
+ struct irq_affinity_desc *af_desc;
struct mlx5_irq *irq;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- af_desc.is_managed = false;
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return ERR_PTR(-ENOMEM);
+
+ cpumask_copy(&af_desc->mask, cpu_online_mask);
+ af_desc->is_managed = false;
if (!mlx5_irq_pool_is_sf_pool(pool)) {
/* In case we are allocating a control IRQ from a pci device's pool.
* This can happen also for a SF if the SFs pool is empty.
*/
if (!pool->xa_num_irqs.max) {
- cpumask_clear(&af_desc.mask);
+ cpumask_clear(&af_desc->mask);
/* In case we only have a single IRQ for PF/VF */
- cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc->mask);
}
/* Allocate the IRQ in index 0. The vector was already allocated */
- irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
+ irq = irq_pool_request_vector(pool, 0, af_desc, NULL);
} else {
- irq = mlx5_irq_affinity_request(pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, af_desc);
}
+ kvfree(af_desc);
+
return irq;
}
@@ -508,58 +526,6 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
/**
- * mlx5_msix_alloc - allocate msix interrupt
- * @dev: mlx5 device from which to request
- * @handler: interrupt handler
- * @affdesc: affinity descriptor
- * @name: interrupt name
- *
- * Returns: struct msi_map with result encoded.
- * Note: the caller must make sure to release the irq by calling
- * mlx5_msix_free() if shutdown was initiated.
- */
-struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
- irqreturn_t (*handler)(int, void *),
- const struct irq_affinity_desc *affdesc,
- const char *name)
-{
- struct msi_map map;
- int err;
-
- if (!dev->pdev) {
- map.virq = 0;
- map.index = -EINVAL;
- return map;
- }
-
- map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, affdesc);
- if (!map.virq)
- return map;
-
- err = request_irq(map.virq, handler, 0, name, NULL);
- if (err) {
- mlx5_core_warn(dev, "err %d\n", err);
- pci_msix_free_irq(dev->pdev, map);
- map.virq = 0;
- map.index = -ENOMEM;
- }
- return map;
-}
-EXPORT_SYMBOL(mlx5_msix_alloc);
-
-/**
- * mlx5_msix_free - free a previously allocated msix interrupt
- * @dev: mlx5 device associated with interrupt
- * @map: map previously returned by mlx5_msix_alloc()
- */
-void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
-{
- free_irq(map.virq, NULL);
- pci_msix_free_irq(dev->pdev, map);
-}
-EXPORT_SYMBOL(mlx5_msix_free);
-
-/**
* mlx5_irq_release_vector - release one IRQ back to the system.
* @irq: the irq to release.
*/
@@ -586,16 +552,26 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
- struct irq_affinity_desc af_desc;
int offset = MLX5_IRQ_VEC_COMP_BASE;
+ struct irq_affinity_desc *af_desc;
+ struct mlx5_irq *irq;
+
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return ERR_PTR(-ENOMEM);
if (!pool->xa_num_irqs.max)
offset = 0;
- af_desc.is_managed = false;
- cpumask_clear(&af_desc.mask);
- cpumask_set_cpu(cpu, &af_desc.mask);
- return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
+ af_desc->is_managed = false;
+ cpumask_clear(&af_desc->mask);
+ cpumask_set_cpu(cpu, &af_desc->mask);
+
+ irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap);
+
+ kvfree(af_desc);
+
+ return irq;
}
static struct mlx5_irq_pool *
@@ -617,7 +593,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
- name, size, start);
+ name ? name : "mlx5_pcif_pool", size, start);
return pool;
}
@@ -626,7 +602,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
struct mlx5_irq *irq;
unsigned long index;
- /* There are cases in which we are destrying the irq_table before
+ /* There are cases in which we are destroying the irq_table before
* freeing all the IRQs, fast teardown for example. Hence, free the irqs
* which might not have been freed.
*/
@@ -638,11 +614,11 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
kvfree(pool);
}
-static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
+static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec,
+ bool dynamic_vec)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
- int num_sf_ctrl_by_msix;
- int num_sf_ctrl_by_sfs;
+ int sf_vec_available = sf_vec;
int num_sf_ctrl;
int err;
@@ -655,16 +631,21 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
if (!mlx5_sf_max_functions(dev))
return 0;
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
- mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
+ mlx5_core_dbg(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
return 0;
}
/* init sf_ctrl_pool */
- num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
- num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
- MLX5_SFS_PER_CTRL_IRQ);
- num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
+ num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
+ MLX5_SFS_PER_CTRL_IRQ);
num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
+ if (!dynamic_vec && (num_sf_ctrl + 1) > sf_vec_available) {
+ mlx5_core_dbg(dev,
+ "Not enough IRQs for SFs control and completion pool, required=%d avail=%d\n",
+ num_sf_ctrl + 1, sf_vec_available);
+ return 0;
+ }
+
table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
"mlx5_sf_ctrl",
MLX5_EQ_SHARE_IRQ_MIN_CTRL,
@@ -673,9 +654,11 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
err = PTR_ERR(table->sf_ctrl_pool);
goto err_pf;
}
- /* init sf_comp_pool */
+ sf_vec_available -= num_sf_ctrl;
+
+ /* init sf_comp_pool, remaining vectors are for the SF completions */
table->sf_comp_pool = irq_pool_alloc(dev, pcif_vec + num_sf_ctrl,
- sf_vec - num_sf_ctrl, "mlx5_sf_comp",
+ sf_vec_available, "mlx5_sf_comp",
MLX5_EQ_SHARE_IRQ_MIN_COMP,
MLX5_EQ_SHARE_IRQ_MAX_COMP);
if (IS_ERR(table->sf_comp_pool)) {
@@ -763,9 +746,8 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
- int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int num_eqs = mlx5_max_eq_cap_get(dev);
+ bool dynamic_vec;
int total_vec;
int pcif_vec;
int req_vec;
@@ -775,22 +757,31 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
+ /* PCI PF vectors usage is limited by online cpus, device EQs and
+ * PCI MSI-X capability.
+ */
pcif_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
pcif_vec = min_t(int, pcif_vec, num_eqs);
+ pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
total_vec = pcif_vec;
if (mlx5_sf_max_functions(dev))
- total_vec += MLX5_IRQ_CTRL_SF_MAX +
- MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
+ total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev);
total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev));
- pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
req_vec = pci_msix_can_alloc_dyn(dev->pdev) ? 1 : total_vec;
n = pci_alloc_irq_vectors(dev->pdev, 1, req_vec, PCI_IRQ_MSIX);
if (n < 0)
return n;
- err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec);
+ /* Further limit vectors of the pools based on platform for non dynamic case */
+ dynamic_vec = pci_msix_can_alloc_dyn(dev->pdev);
+ if (!dynamic_vec) {
+ pcif_vec = min_t(int, n, pcif_vec);
+ total_vec = min_t(int, n, total_vec);
+ }
+
+ err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec, dynamic_vec);
if (err)
pci_free_irq_vectors(dev->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index c4d377f8df30..cc064425fe16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -28,7 +28,6 @@ struct mlx5_irq_pool {
struct mlx5_core_dev *dev;
};
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
{
return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
@@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int mlx5_irq_get_locked(struct mlx5_irq *irq);
int mlx5_irq_read_locked(struct mlx5_irq *irq);
int mlx5_irq_put(struct mlx5_irq *irq);
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq);
#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7fba1c46e2ac..85a9e534f442 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -144,11 +144,13 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
- int ptys_size, int proto_mask, u8 local_port)
+ int ptys_size, int proto_mask,
+ u8 local_port, u8 plane_index)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
MLX5_SET(ptys_reg, in, local_port, local_port);
+ MLX5_SET(ptys_reg, in, plane_ind, plane_index);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
ptys_size, MLX5_REG_PTYS, 0, 0);
@@ -167,13 +169,13 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
}
int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
- u16 *proto_oper, u8 local_port)
+ u16 *proto_oper, u8 local_port, u8 plane_index)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB,
- local_port);
+ local_port, plane_index);
if (err)
return err;
@@ -194,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
if (ps == MLX5_PORT_UP)
mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
}
-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -208,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
@@ -225,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
@@ -255,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
@@ -291,11 +289,11 @@ int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
}
static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
- u8 *module_id)
+ u8 *module_id, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- int err, status;
+ int err;
u8 *ptr;
MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
@@ -310,12 +308,12 @@ static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
+ if (MLX5_GET(mcia_reg, out, status)) {
+ if (status)
+ *status = MLX5_GET(mcia_reg, out, status);
return -EIO;
}
+
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
*module_id = ptr[0];
@@ -372,13 +370,14 @@ static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev)
}
static int mlx5_query_mcia(struct mlx5_core_dev *dev,
- struct mlx5_module_eeprom_query_params *params, u8 *data)
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- int status, err;
void *ptr;
u16 size;
+ int err;
size = min_t(int, params->size, mlx5_mcia_max_bytes(dev));
@@ -394,12 +393,9 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
+ *status = MLX5_GET(mcia_reg, out, status);
+ if (*status)
return -EIO;
- }
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size);
@@ -408,7 +404,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
}
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data)
+ u16 offset, u16 size, u8 *data, u8 *status)
{
struct mlx5_module_eeprom_query_params query = {0};
u8 module_id;
@@ -418,7 +414,8 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
if (err)
return err;
- err = mlx5_query_module_id(dev, query.module_number, &module_id);
+ err = mlx5_query_module_id(dev, query.module_number, &module_id,
+ status);
if (err)
return err;
@@ -443,13 +440,12 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
query.size = size;
query.offset = offset;
- return mlx5_query_mcia(dev, &query, data);
+ return mlx5_query_mcia(dev, &query, data, status);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
- u8 *data)
+ u8 *data, u8 *status)
{
int err;
@@ -463,9 +459,8 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
return -EINVAL;
}
- return mlx5_query_mcia(dev, params, data);
+ return mlx5_query_mcia(dev, params, data, status);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
@@ -516,7 +511,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
@@ -536,7 +530,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
u16 stall_critical_watermark,
@@ -595,7 +588,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
@@ -614,7 +606,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
@@ -665,7 +656,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc)
@@ -687,7 +677,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
@@ -726,7 +715,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
@@ -747,7 +735,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
@@ -761,7 +748,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct)
@@ -782,7 +768,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -806,7 +791,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -832,7 +816,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
@@ -843,7 +826,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
@@ -858,7 +840,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
{
@@ -986,6 +967,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
return err;
}
+int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
+ u8 *buffer_ownership)
+{
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
+ int err;
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
+ *buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
+ return 0;
+ }
+
+ err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
+ if (err)
+ return err;
+
+ *buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
+
+ return 0;
+}
+
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
{
int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
@@ -1056,53 +1057,58 @@ out:
}
/* speed in units of 1Mb */
-static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = 1000,
- [MLX5E_1000BASE_KX] = 1000,
- [MLX5E_10GBASE_CX4] = 10000,
- [MLX5E_10GBASE_KX4] = 10000,
- [MLX5E_10GBASE_KR] = 10000,
- [MLX5E_20GBASE_KR2] = 20000,
- [MLX5E_40GBASE_CR4] = 40000,
- [MLX5E_40GBASE_KR4] = 40000,
- [MLX5E_56GBASE_R4] = 56000,
- [MLX5E_10GBASE_CR] = 10000,
- [MLX5E_10GBASE_SR] = 10000,
- [MLX5E_10GBASE_ER] = 10000,
- [MLX5E_40GBASE_SR4] = 40000,
- [MLX5E_40GBASE_LR4] = 40000,
- [MLX5E_50GBASE_SR2] = 50000,
- [MLX5E_100GBASE_CR4] = 100000,
- [MLX5E_100GBASE_SR4] = 100000,
- [MLX5E_100GBASE_KR4] = 100000,
- [MLX5E_100GBASE_LR4] = 100000,
- [MLX5E_100BASE_TX] = 100,
- [MLX5E_1000BASE_T] = 1000,
- [MLX5E_10GBASE_T] = 10000,
- [MLX5E_25GBASE_CR] = 25000,
- [MLX5E_25GBASE_KR] = 25000,
- [MLX5E_25GBASE_SR] = 25000,
- [MLX5E_50GBASE_CR2] = 50000,
- [MLX5E_50GBASE_KR2] = 50000,
+static const struct mlx5_link_info mlx5e_link_info[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_1000BASE_KX] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_CX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_20GBASE_KR2] = {.speed = 20000, .lanes = 2},
+ [MLX5E_40GBASE_CR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_KR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_56GBASE_R4] = {.speed = 56000, .lanes = 4},
+ [MLX5E_10GBASE_CR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_SR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_ER] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_SR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_LR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_50GBASE_SR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_100GBASE_CR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_SR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_LR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100BASE_TX] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_T] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_T] = {.speed = 10000, .lanes = 1},
+ [MLX5E_25GBASE_CR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_SR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GBASE_CR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GBASE_KR2] = {.speed = 50000, .lanes = 2},
};
-static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
- [MLX5E_SGMII_100M] = 100,
- [MLX5E_1000BASE_X_SGMII] = 1000,
- [MLX5E_5GBASE_R] = 5000,
- [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
- [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
- [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
- [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
- [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
- [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
- [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000,
+static const struct mlx5_link_info
+mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_SGMII_100M] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_X_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_5GBASE_R] = {.speed = 5000, .lanes = 1},
+ [MLX5E_10GBASE_XFI_XAUI_1] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_25GAUI_1_25GBASE_CR_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = {.speed = 50000, .lanes = 1},
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = {.speed = 100000, .lanes = 2},
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = {.speed = 200000, .lanes = 4},
+ [MLX5E_400GAUI_8_400GBASE_CR8] = {.speed = 400000, .lanes = 8},
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = {.speed = 100000, .lanes = 1},
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = {.speed = 200000, .lanes = 2},
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = {.speed = 400000, .lanes = 4},
+ [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = {.speed = 800000, .lanes = 8},
+ [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
+ [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
+ [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
+ [MLX5E_1600TAUI_8_1600TBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
@@ -1114,7 +1120,7 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
if (!eproto)
return -EINVAL;
- err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port, 0);
if (err)
return err;
@@ -1140,54 +1146,65 @@ bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev)
return !!eproto.cap;
}
-static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size,
- bool force_legacy)
+static void mlx5e_port_get_link_mode_info_arr(struct mlx5_core_dev *mdev,
+ const struct mlx5_link_info **arr,
+ u32 *size,
+ bool force_legacy)
{
bool ext = force_legacy ? false : mlx5_ptys_ext_supported(mdev);
- *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
- ARRAY_SIZE(mlx5e_link_speed);
- *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_info) :
+ ARRAY_SIZE(mlx5e_link_info);
+ *arr = ext ? mlx5e_ext_link_info : mlx5e_link_info;
}
-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy)
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy)
{
unsigned long temp = eth_proto_oper;
- const u32 *table;
- u32 speed = 0;
+ const struct mlx5_link_info *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
i = find_first_bit(&temp, max_size);
- if (i < max_size)
- speed = table[i];
- return speed;
+
+ /* mlx5e_link_info has holes. Check speed
+ * is not zero as indication of one.
+ */
+ if (i < max_size && table[i].speed)
+ return &table[i];
+
+ return NULL;
}
-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy)
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy)
{
+ const struct mlx5_link_info *table;
u32 link_modes = 0;
- const u32 *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
for (i = 0; i < max_size; ++i) {
- if (table[i] == speed)
- link_modes |= MLX5E_PROT_MASK(i);
+ if (table[i].speed == info->speed) {
+ if (!info->lanes || table[i].lanes == info->lanes)
+ link_modes |= MLX5E_PROT_MASK(i);
+ }
}
return link_modes;
}
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
+ const struct mlx5_link_info *table;
struct mlx5_port_eth_proto eproto;
u32 max_speed = 0;
- const u32 *table;
u32 max_size;
bool ext;
int err;
@@ -1198,10 +1215,10 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
return err;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, table[i]);
+ max_speed = max(max_speed, table[i].speed);
*speed = max_speed;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
index 8bce730b5c5b..6be9981bb6b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
@@ -28,6 +28,11 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ if (!mlx5_qos_element_type_supported(mdev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP,
+ SCHEDULING_HIERARCHY_NIC))
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
@@ -44,6 +49,14 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
void *attr;
+ if (!mlx5_qos_element_type_supported(mdev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR,
+ SCHEDULING_HIERARCHY_NIC) ||
+ !mlx5_qos_tsar_type_supported(mdev,
+ TSAR_ELEMENT_TSAR_TYPE_DWRR,
+ SCHEDULING_HIERARCHY_NIC))
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index a42f6cd99b74..5c552b71e371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
+ u8 mac[ETH_ALEN] = {};
union ib_gid gid;
- u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
mlx5_nic_vport_disable_roce(dev);
}
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, roce))
- return;
+ return 0;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
- return;
+ return err;
}
err = mlx5_rdma_add_roce_addr(dev);
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
goto del_roce_addr;
}
- return;
+ return err;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
index 750cff2a71a4..3d9e76c3d42f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
@@ -8,12 +8,12 @@
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 9f8b4005f4bd..39a209b9b684 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -34,6 +34,68 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
+bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy)
+{
+ int cap;
+
+ switch (hierarchy) {
+ case SCHEDULING_HIERARCHY_E_SWITCH:
+ cap = MLX5_CAP_QOS(dev, esw_tsar_type);
+ break;
+ case SCHEDULING_HIERARCHY_NIC:
+ cap = MLX5_CAP_QOS(dev, nic_tsar_type);
+ break;
+ default:
+ return false;
+ }
+
+ switch (type) {
+ case TSAR_ELEMENT_TSAR_TYPE_DWRR:
+ return cap & TSAR_TYPE_CAP_MASK_DWRR;
+ case TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN:
+ return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN;
+ case TSAR_ELEMENT_TSAR_TYPE_ETS:
+ return cap & TSAR_TYPE_CAP_MASK_ETS;
+ case TSAR_ELEMENT_TSAR_TYPE_TC_ARB:
+ return cap & TSAR_TYPE_CAP_MASK_TC_ARB;
+ }
+
+ return false;
+}
+
+bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy)
+{
+ int cap;
+
+ switch (hierarchy) {
+ case SCHEDULING_HIERARCHY_E_SWITCH:
+ cap = MLX5_CAP_QOS(dev, esw_element_type);
+ break;
+ case SCHEDULING_HIERARCHY_NIC:
+ cap = MLX5_CAP_QOS(dev, nic_element_type);
+ break;
+ default:
+ return false;
+ }
+
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return cap & ELEMENT_TYPE_CAP_MASK_TSAR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return cap & ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return cap & ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP:
+ return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT:
+ return cap & ELEMENT_TYPE_CAP_MASK_RATE_LIMIT;
+ }
+
+ return false;
+}
+
/* Scheduling element fw management */
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 *element_id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 99219ea52c4b..f310bde3d11f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -16,7 +16,6 @@ struct mlx5_sf_dev_table {
struct xarray devices;
phys_addr_t base_address;
u64 sf_bar_length;
- struct notifier_block nb;
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
@@ -156,18 +155,23 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
static int
mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
{
- struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_dev_nb);
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev;
u16 max_functions;
u16 sf_index;
u16 base_id;
- max_functions = mlx5_sf_max_functions(table->dev);
+ if (!table)
+ return 0;
+
+ max_functions = mlx5_sf_max_functions(dev);
if (!max_functions)
return 0;
- base_id = mlx5_sf_start_function_id(table->dev);
+ base_id = mlx5_sf_start_function_id(dev);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
@@ -177,19 +181,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
- mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ mlx5_sf_dev_del(dev, sf_dev, sf_index);
break;
case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
if (sf_dev)
- mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ mlx5_sf_dev_del(dev, sf_dev, sf_index);
else
- mlx5_core_err(table->dev,
+ mlx5_core_err(dev,
"SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n",
sf_index, event->sw_function_id);
break;
case MLX5_VHCA_STATE_ACTIVE:
if (!sf_dev)
- mlx5_sf_dev_add(table->dev, sf_index, event->function_id,
+ mlx5_sf_dev_add(dev, sf_index, event->function_id,
event->sw_function_id);
break;
default:
@@ -315,6 +319,15 @@ static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
}
}
+int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ dev->priv.sf_dev_nb.notifier_call = mlx5_sf_dev_state_change_handler;
+ return mlx5_vhca_event_notifier_register(dev, &dev->priv.sf_dev_nb);
+}
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
@@ -329,17 +342,12 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
goto table_err;
}
- table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
xa_init(&table->devices);
dev->priv.sf_dev_table = table;
- err = mlx5_vhca_event_notifier_register(dev, &table->nb);
- if (err)
- goto vhca_err;
-
err = mlx5_sf_dev_create_active_works(table);
if (err)
goto add_active_err;
@@ -351,10 +359,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
arm_err:
mlx5_sf_dev_destroy_active_works(table);
-add_active_err:
- mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mlx5_vhca_event_work_queues_flush(dev);
-vhca_err:
+add_active_err:
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
@@ -372,6 +378,14 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
}
}
+void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_dev_nb);
+}
+
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
@@ -380,8 +394,6 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
return;
mlx5_sf_dev_destroy_active_works(table);
- mlx5_vhca_event_notifier_unregister(dev, &table->nb);
- mlx5_vhca_event_work_queues_flush(dev);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index b99131e95e37..3ab0449c770c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -25,7 +25,9 @@ struct mlx5_sf_peer_devlink_event_ctx {
int err;
};
+int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_driver_register(void);
@@ -35,10 +37,19 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev);
#else
+static inline int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
}
+static inline void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
index 7f7c9af5deed..9b0f44253f33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
@@ -22,13 +22,13 @@ DECLARE_EVENT_CLASS(mlx5_sf_dev_template,
__field(u16, hw_fn_id)
__field(u32, sfnum)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->sfdev = sfdev;
__entry->aux_id = aux_id;
__entry->hw_fn_id = sfdev->fn_id;
__entry->sfnum = sfdev->sfnum;
),
- TP_printk("(%s) sfdev=%pK aux_id=%d hw_id=0x%x sfnum=%u\n",
+ TP_printk("(%s) sfdev=%p aux_id=%d hw_id=0x%x sfnum=%u\n",
__get_str(devname), __entry->sfdev,
__entry->aux_id, __entry->hw_fn_id,
__entry->sfnum)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 7ebe71280827..b706f1486504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -60,6 +60,13 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto remap_err;
}
+ /* Peer devlink logic expects to work on unregistered devlink instance. */
+ err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+ goto peer_devlink_set_err;
+ }
+
if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev))
err = mlx5_init_one_light(mdev);
else
@@ -69,20 +76,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto init_one_err;
}
- err = mlx5_core_peer_devlink_set(sf_dev, devlink);
- if (err) {
- mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
- goto peer_devlink_set_err;
- }
-
return 0;
-peer_devlink_set_err:
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
- else
- mlx5_uninit_one(sf_dev->mdev);
init_one_err:
+peer_devlink_set_err:
iounmap(mdev->iseg);
remap_err:
mlx5_mdev_uninit(mdev);
@@ -115,6 +112,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
struct mlx5_core_dev *mdev = sf_dev->mdev;
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
mlx5_unload_one(mdev, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 6c11e075cab0..b82323b8449e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -31,9 +31,6 @@ struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
- struct notifier_block esw_nb;
- struct notifier_block vhca_nb;
- struct notifier_block mdev_nb;
};
static struct mlx5_sf *
@@ -161,6 +158,7 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
struct netlink_ext_ack *extack)
{
+ struct mlx5_vport *vport;
int err;
if (mlx5_sf_is_active(sf))
@@ -170,6 +168,13 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
return -EBUSY;
}
+ vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
+ if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) {
+ err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port,
+ extack);
+ if (err)
+ return err;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
return err;
@@ -249,6 +254,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
return 0;
esw_err:
+ mlx5_sf_function_id_erase(table, sf);
mlx5_sf_free(table, sf);
return err;
}
@@ -276,7 +282,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
@@ -318,7 +324,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink,
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
+ struct mlx5_vport *vport;
+
mutex_lock(&table->sf_state_lock);
+ vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
+ vport->max_eqs_set = false;
mlx5_sf_function_id_erase(table, sf);
@@ -378,11 +388,16 @@ static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_vhca_nb);
+ struct mlx5_sf_table *table = dev->priv.sf_table;
const struct mlx5_vhca_state_event *event = data;
bool update = false;
struct mlx5_sf *sf;
+ if (!table)
+ return 0;
+
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
@@ -394,7 +409,7 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
if (update)
sf->hw_state = event->new_vhca_state;
- trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
+ trace_mlx5_sf_update_state(dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
unlock:
mutex_unlock(&table->sf_state_lock);
@@ -412,12 +427,16 @@ static void mlx5_sf_del_all(struct mlx5_sf_table *table)
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_esw_nb);
const struct mlx5_esw_event_info *mode = data;
+ if (!dev->priv.sf_table)
+ return 0;
+
switch (mode->new_mode) {
case MLX5_ESWITCH_LEGACY:
- mlx5_sf_del_all(table);
+ mlx5_sf_del_all(dev->priv.sf_table);
break;
default:
break;
@@ -428,15 +447,16 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_mdev_nb);
struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
int ret = NOTIFY_DONE;
struct mlx5_sf *sf;
- if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
+ if (!table || event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
return NOTIFY_DONE;
-
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
if (!sf)
@@ -451,10 +471,40 @@ out:
return ret;
}
+int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ dev->priv.sf_table_esw_nb.notifier_call = mlx5_sf_esw_event;
+ err = mlx5_esw_event_notifier_register(dev, &dev->priv.sf_table_esw_nb);
+ if (err)
+ return err;
+
+ dev->priv.sf_table_vhca_nb.notifier_call = mlx5_sf_vhca_event;
+ err = mlx5_vhca_event_notifier_register(dev,
+ &dev->priv.sf_table_vhca_nb);
+ if (err)
+ goto vhca_err;
+
+ dev->priv.sf_table_mdev_nb.notifier_call = mlx5_sf_mdev_event;
+ err = mlx5_blocking_notifier_register(dev, &dev->priv.sf_table_mdev_nb);
+ if (err)
+ goto mdev_err;
+
+ return 0;
+mdev_err:
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
+vhca_err:
+ mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
+ return err;
+}
+
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_table *table;
- int err;
if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
return 0;
@@ -467,28 +517,18 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
table->dev = dev;
xa_init(&table->function_ids);
dev->priv.sf_table = table;
- table->esw_nb.notifier_call = mlx5_sf_esw_event;
- err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
- if (err)
- goto reg_err;
-
- table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
- err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
- if (err)
- goto vhca_err;
-
- table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
- mlx5_blocking_notifier_register(dev, &table->mdev_nb);
return 0;
+}
-vhca_err:
- mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
-reg_err:
- mutex_destroy(&table->sf_state_lock);
- kfree(table);
- dev->priv.sf_table = NULL;
- return err;
+void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_blocking_notifier_unregister(dev, &dev->priv.sf_table_mdev_nb);
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
+ mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
}
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
@@ -498,10 +538,17 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
- mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
- mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
mutex_destroy(&table->sf_state_lock);
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
+
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return true;
+
+ return xa_empty(&table->function_ids);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
index 8bf1cd90930d..302ce00da5a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
@@ -24,7 +24,7 @@ TRACE_EVENT(mlx5_sf_add,
__field(u16, hw_fn_id)
__field(u32, sfnum)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->port_index = port_index;
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
@@ -46,7 +46,7 @@ TRACE_EVENT(mlx5_sf_free,
__field(u32, controller)
__field(u16, hw_fn_id)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->port_index = port_index;
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
@@ -67,7 +67,7 @@ TRACE_EVENT(mlx5_sf_hwc_alloc,
__field(u16, hw_fn_id)
__field(u32, sfnum)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
__entry->sfnum = sfnum;
@@ -84,7 +84,7 @@ TRACE_EVENT(mlx5_sf_hwc_free,
TP_STRUCT__entry(__string(devname, dev_name(dev->device))
__field(u16, hw_fn_id)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->hw_fn_id = hw_fn_id;
),
TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id)
@@ -97,7 +97,7 @@ TRACE_EVENT(mlx5_sf_hwc_deferred_free,
TP_STRUCT__entry(__string(devname, dev_name(dev->device))
__field(u16, hw_fn_id)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->hw_fn_id = hw_fn_id;
),
TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id)
@@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(mlx5_sf_state_template,
__field(unsigned int, port_index)
__field(u32, controller)
__field(u16, hw_fn_id)),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->port_index = port_index;
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
@@ -152,7 +152,7 @@ TRACE_EVENT(mlx5_sf_update_state,
__field(u16, hw_fn_id)
__field(u8, state)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->port_index = port_index;
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
index fd814a190b8b..6352cb004a18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
@@ -20,7 +20,7 @@ TRACE_EVENT(mlx5_sf_vhca_event,
__field(u32, sfnum)
__field(u8, vhca_state)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->hw_fn_id = event->function_id;
__entry->sfnum = event->sw_function_id;
__entry->vhca_state = event->new_vhca_state;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 1f613320fe07..bd968f3b3855 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -30,9 +30,7 @@ enum mlx5_sf_hwc_index {
};
struct mlx5_sf_hw_table {
- struct mlx5_core_dev *dev;
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
- struct notifier_block vhca_nb;
struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
};
@@ -71,14 +69,16 @@ mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
return NULL;
}
-static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
+static int mlx5_sf_hw_table_id_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table,
+ u32 controller,
u32 usr_sfnum)
{
struct mlx5_sf_hwc_table *hwc;
int free_idx = -1;
int i;
- hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
if (!hwc->sfs)
return -ENOSPC;
@@ -100,11 +100,13 @@ static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 control
return free_idx;
}
-static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
+static void mlx5_sf_hw_table_id_free(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table,
+ u32 controller, int id)
{
struct mlx5_sf_hwc_table *hwc;
- hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
hwc->sfs[id].allocated = false;
hwc->sfs[id].pending_delete = false;
}
@@ -120,7 +122,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
return -EOPNOTSUPP;
mutex_lock(&table->table_lock);
- sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
+ sw_id = mlx5_sf_hw_table_id_alloc(dev, table, controller, usr_sfnum);
if (sw_id < 0) {
err = sw_id;
goto exist_err;
@@ -151,7 +153,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
vhca_err:
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
err:
- mlx5_sf_hw_table_id_free(table, controller, sw_id);
+ mlx5_sf_hw_table_id_free(dev, table, controller, sw_id);
exist_err:
mutex_unlock(&table->table_lock);
return err;
@@ -165,7 +167,7 @@ void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
mutex_lock(&table->table_lock);
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
- mlx5_sf_hw_table_id_free(table, controller, id);
+ mlx5_sf_hw_table_id_free(dev, table, controller, id);
mutex_unlock(&table->table_lock);
}
@@ -216,10 +218,12 @@ static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
}
}
-static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
+static void mlx5_sf_hw_table_dealloc_all(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table)
{
- mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
- mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(dev,
+ &table->hwc[MLX5_SF_HWC_EXTERNAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
}
static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
@@ -301,7 +305,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
}
mutex_init(&table->table_lock);
- table->dev = dev;
dev->priv.sf_hw_table = table;
base_id = mlx5_sf_start_function_id(dev);
@@ -338,19 +341,22 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
mutex_destroy(&table->table_lock);
kfree(table);
+ dev->priv.sf_hw_table = NULL;
res_unregister:
mlx5_sf_hw_table_res_unregister(dev);
}
static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
- struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_hw_table_vhca_nb);
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_hwc_table *hwc;
struct mlx5_sf_hw *sf_hw;
u16 sw_id;
- if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
+ if (!table || event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
return 0;
hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
@@ -365,20 +371,28 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
* Hence recycle the sf hardware id for reuse.
*/
if (sf_hw->allocated && sf_hw->pending_delete)
- mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
+ mlx5_sf_hw_table_hwc_sf_free(dev, hwc, sw_id);
mutex_unlock(&table->table_lock);
return 0;
}
-int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
- struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
-
- if (!table)
+ if (mlx5_core_is_sf(dev))
return 0;
- table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
- return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
+ dev->priv.sf_hw_table_vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
+ return mlx5_vhca_event_notifier_register(dev,
+ &dev->priv.sf_hw_table_vhca_nb);
+}
+
+void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev,
+ &dev->priv.sf_hw_table_vhca_nb);
}
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
@@ -388,9 +402,8 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
/* Dealloc SFs whose firmware event has been missed. */
- mlx5_sf_hw_table_dealloc_all(table);
+ mlx5_sf_hw_table_dealloc_all(dev, table);
}
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 860f9ddb7107..d8a934a0e968 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -12,11 +12,15 @@
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev);
-int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev);
+int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
+int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev);
int mlx5_sf_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *add_attr,
@@ -43,24 +47,42 @@ static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
{
}
-static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+static inline int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
return 0;
}
+static inline void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
{
}
+static inline int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
return 0;
}
+static inline void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
{
}
+static inline bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
index cda01ba441ae..b04cf6cf8956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -9,15 +9,9 @@
#define CREATE_TRACE_POINTS
#include "diag/vhca_tracepoint.h"
-struct mlx5_vhca_state_notifier {
- struct mlx5_core_dev *dev;
- struct mlx5_nb nb;
- struct blocking_notifier_head n_head;
-};
-
struct mlx5_vhca_event_work {
struct work_struct work;
- struct mlx5_vhca_state_notifier *notifier;
+ struct mlx5_core_dev *dev;
struct mlx5_vhca_state_event event;
};
@@ -95,16 +89,14 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
mlx5_vhca_event_arm(dev, event->function_id);
trace_mlx5_sf_vhca_event(dev, event);
- blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
+ blocking_notifier_call_chain(&dev->priv.vhca_state_n_head, 0, event);
}
static void mlx5_vhca_state_work_handler(struct work_struct *_work)
{
struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
- struct mlx5_vhca_state_notifier *notifier = work->notifier;
- struct mlx5_core_dev *dev = notifier->dev;
- mlx5_vhca_event_notify(dev, &work->event);
+ mlx5_vhca_event_notify(work->dev, &work->event);
kfree(work);
}
@@ -116,8 +108,8 @@ void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct wo
static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{
- struct mlx5_vhca_state_notifier *notifier =
- mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
+ struct mlx5_core_dev *dev = mlx5_nb_cof(nb, struct mlx5_core_dev,
+ priv.vhca_state_nb);
struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data;
int wq_idx;
@@ -126,10 +118,10 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
- work->notifier = notifier;
+ work->dev = dev;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
- mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
+ mlx5_vhca_events_work_enqueue(dev, wq_idx, &work->work);
return NOTIFY_OK;
}
@@ -145,9 +137,15 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
}
+void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
+{
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.vhca_state_n_head);
+ MLX5_NB_INIT(&dev->priv.vhca_state_nb, mlx5_vhca_state_change_notifier,
+ VHCA_STATE_CHANGE);
+}
+
int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct mlx5_vhca_events *events;
int err, i;
@@ -160,7 +158,6 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
return -ENOMEM;
events->dev = dev;
- dev->priv.vhca_events = events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
events->handler[i].wq = create_singlethread_workqueue(wq_name);
@@ -169,20 +166,10 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
goto err_create_wq;
}
}
+ dev->priv.vhca_events = events;
- notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
- if (!notifier) {
- err = -ENOMEM;
- goto err_notifier;
- }
-
- dev->priv.vhca_state_notifier = notifier;
- notifier->dev = dev;
- BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
- MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0;
-err_notifier:
err_create_wq:
for (--i; i >= 0; i--)
destroy_workqueue(events->handler[i].wq);
@@ -211,8 +198,6 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_vhca_event_supported(dev))
return;
- kfree(dev->priv.vhca_state_notifier);
- dev->priv.vhca_state_notifier = NULL;
vhca_events = dev->priv.vhca_events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
destroy_workqueue(vhca_events->handler[i].wq);
@@ -221,34 +206,30 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
-
- if (!dev->priv.vhca_state_notifier)
+ if (!mlx5_vhca_event_supported(dev))
return;
- notifier = dev->priv.vhca_state_notifier;
- mlx5_eq_notifier_register(dev, &notifier->nb);
+ mlx5_eq_notifier_register(dev, &dev->priv.vhca_state_nb);
}
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
-
- if (!dev->priv.vhca_state_notifier)
+ if (!mlx5_vhca_event_supported(dev))
return;
- notifier = dev->priv.vhca_state_notifier;
- mlx5_eq_notifier_unregister(dev, &notifier->nb);
+ mlx5_eq_notifier_unregister(dev, &dev->priv.vhca_state_nb);
+
+ /* Flush workqueues of all pending events. */
+ mlx5_vhca_event_work_queues_flush(dev);
}
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
- if (!dev->priv.vhca_state_notifier)
- return -EOPNOTSUPP;
- return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
+ return blocking_notifier_chain_register(&dev->priv.vhca_state_n_head,
+ nb);
}
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
+ blocking_notifier_chain_unregister(&dev->priv.vhca_state_n_head, nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
index 1725ba64f8af..52790423874c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -18,6 +18,7 @@ static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
}
void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
+void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev);
int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
@@ -37,6 +38,10 @@ static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *s
{
}
+static inline void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
+{
+}
+
static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
deleted file mode 100644
index e2fc69867088..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#ifndef _DR_STE_V1_
-#define _DR_STE_V1_
-
-#include "dr_types.h"
-#include "dr_ste.h"
-
-bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
-void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
-u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
-void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
-u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
-void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
-u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
-void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
-void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
-void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
- u8 length, u32 data);
-void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
- u8 length, u32 data);
-void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
- u8 dst_len, u8 src_hw_field, u8 src_shifter);
-int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
- u32 hw_action_sz, u16 *used_hw_action_num);
-int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
-void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
-void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-
-#endif /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
new file mode 100644
index 000000000000..fe56b59e24c5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -0,0 +1,2651 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
+
+/* Header removal size limited to 128B (64 words) */
+#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128
+
+/* This is the longest supported action sequence for FDB table:
+ * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
+ */
+static const u32 action_order_arr[MLX5HWS_ACTION_TYP_MAX] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
+};
+
+static const char * const mlx5hws_action_type_str[] = {
+ [MLX5HWS_ACTION_TYP_LAST] = "LAST",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3",
+ [MLX5HWS_ACTION_TYP_DROP] = "DROP",
+ [MLX5HWS_ACTION_TYP_TBL] = "TBL",
+ [MLX5HWS_ACTION_TYP_CTR] = "CTR",
+ [MLX5HWS_ACTION_TYP_TAG] = "TAG",
+ [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR",
+ [MLX5HWS_ACTION_TYP_VPORT] = "VPORT",
+ [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS",
+ [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN",
+ [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN",
+ [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER",
+ [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
+ [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
+ [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
+ [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER",
+ [MLX5HWS_ACTION_TYP_RANGE] = "RANGE",
+};
+
+static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX,
+ "Missing mlx5hws_action_type_str");
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type)
+{
+ return mlx5hws_action_type_str[action_type];
+}
+
+enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
+{
+ return action->type;
+}
+
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action)
+{
+ return action->ctx->mdev;
+}
+
+static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
+ enum mlx5hws_context_shared_stc_type stc_type,
+ u8 tbl_type)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_shared_stc *shared_stc;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (ctx->common_res.shared_stc[stc_type]) {
+ ctx->common_res.shared_stc[stc_type]->refcount++;
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+ }
+
+ shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL);
+ if (!shared_stc) {
+ ret = -ENOMEM;
+ goto unlock_and_out;
+ }
+ switch (stc_type) {
+ case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.remove_header.decap = 0;
+ stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4;
+ break;
+ case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ default:
+ mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
+ pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
+ ret = -EINVAL;
+ goto free_shared_stc;
+ }
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &shared_stc->stc_chunk);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n");
+ goto free_shared_stc;
+ }
+
+ ctx->common_res.shared_stc[stc_type] = shared_stc;
+ ctx->common_res.shared_stc[stc_type]->refcount = 1;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+free_shared_stc:
+ kfree(shared_stc);
+unlock_and_out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_action_get_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return -EINVAL;
+ }
+
+ if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) {
+ pr_warn("HWS: Invalid action->flags: %d\n", action->flags);
+ return -EINVAL;
+ }
+
+ ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate memory for FDB shared STCs (type: %d)\n",
+ stc_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_action_put_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB;
+ struct mlx5hws_action_shared_stc *shared_stc;
+ struct mlx5hws_context *ctx = action->ctx;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (--ctx->common_res.shared_stc[stc_type]->refcount) {
+ mutex_unlock(&ctx->ctrl_lock);
+ return;
+ }
+
+ shared_stc = ctx->common_res.shared_stc[stc_type];
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
+ kfree(shared_stc);
+ ctx->common_res.shared_stc[stc_type] = NULL;
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static void hws_action_print_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions)
+{
+ mlx5hws_err(ctx, "Invalid action_type sequence");
+ while (*user_actions != MLX5HWS_ACTION_TYP_LAST) {
+ mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions));
+ user_actions++;
+ }
+ mlx5hws_err(ctx, "\n");
+}
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type)
+{
+ const u32 *order_arr = action_order_arr;
+ bool valid_combo;
+ u8 order_idx = 0;
+ u8 user_idx = 0;
+
+ if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
+ mlx5hws_err(ctx, "Invalid table_type %d", table_type);
+ return false;
+ }
+
+ while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) {
+ /* User action order validated move to next user action */
+ if (BIT(user_actions[user_idx]) & order_arr[order_idx])
+ user_idx++;
+
+ /* Iterate to the next supported action in the order */
+ order_idx++;
+ }
+
+ /* Combination is valid if all user action were processed */
+ valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST;
+ if (!valid_combo)
+ hws_action_print_combo(ctx, user_actions);
+
+ return valid_combo;
+}
+
+static bool
+hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr,
+ enum mlx5hws_table_type table_type,
+ bool is_mirror)
+{
+ struct mlx5hws_pool *pool;
+ bool use_fixup = false;
+ u32 fw_tbl_type;
+ u32 base_id;
+
+ fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror);
+
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ if (is_mirror && stc_attr->ste_table.ignore_tx) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ break;
+ }
+ pool = stc_attr->ste_table.ste_pool;
+ if (!is_mirror)
+ base_id = mlx5hws_pool_get_base_id(pool);
+ else
+ base_id = mlx5hws_pool_get_base_mirror_id(pool);
+
+ *fixup_stc_attr = *stc_attr;
+ fixup_stc_attr->ste_table.ste_obj_id = base_id;
+ use_fixup = true;
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ if (fw_tbl_type == FS_FT_FDB_TX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id;
+ fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ ctx->caps->merged_eswitch;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK)
+ break;
+
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.vport_num = 0;
+ fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ stc_attr->vport.eswitch_owner_vhca_id_valid;
+ }
+ use_fixup = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return use_fixup;
+}
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
+ struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
+ bool use_fixup;
+ u32 obj_0_id;
+ int ret;
+
+ ret = mlx5hws_pool_chunk_alloc(stc_pool, stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate single action STC\n");
+ return ret;
+ }
+
+ stc_attr->stc_offset = stc->offset;
+
+ /* Dynamic reparse not supported, overwrite and use default */
+ if (!mlx5hws_context_cap_dynamic_reparse(ctx))
+ stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ obj_0_id = mlx5hws_pool_get_base_id(stc_pool);
+
+ /* According to table/action limitation change the stc_attr */
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto free_chunk;
+ }
+
+ /* Modify the FDB peer */
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ u32 obj_1_id;
+
+ obj_1_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
+
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
+ &fixup_stc_attr,
+ table_type, true);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to modify peer STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto clean_obj_0;
+ }
+ }
+
+ return 0;
+
+clean_obj_0:
+ cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ cleanup_stc_attr.stc_offset = stc->offset;
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr);
+free_chunk:
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+ return ret;
+}
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
+ u32 obj_id;
+
+ /* Modify the STC not to point to an object */
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.stc_offset = stc->offset;
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+ }
+
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+}
+
+static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx,
+ __be64 pattern)
+{
+ u8 action_type = MLX5_GET(set_action_in, &pattern, action_type);
+
+ switch (action_type) {
+ case MLX5_MODIFICATION_TYPE_SET:
+ return MLX5_IFC_STC_ACTION_TYPE_SET;
+ case MLX5_MODIFICATION_TYPE_ADD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD;
+ case MLX5_MODIFICATION_TYPE_COPY:
+ return MLX5_IFC_STC_ACTION_TYPE_COPY;
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD;
+ default:
+ mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type);
+ return MLX5_IFC_STC_ACTION_TYPE_NOP;
+ }
+}
+
+static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
+ u32 obj_id,
+ struct mlx5hws_cmd_stc_modify_attr *attr)
+{
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_TAG:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ break;
+ case MLX5HWS_ACTION_TYP_DROP:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_MISS:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_CTR:
+ attr->id = obj_id;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ if (action->modify_header.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+
+ if (action->modify_header.num_of_actions == 1) {
+ attr->modify_action.data = action->modify_header.single_action;
+ attr->action_type = hws_action_get_mh_stc_type(action->ctx,
+ attr->modify_action.data);
+
+ if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD ||
+ attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET)
+ MLX5_SET(set_action_in, &attr->modify_action.data, data, 0);
+ } else {
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST;
+ attr->modify_header.arg_id = action->modify_header.arg_id;
+ attr->modify_header.pattern_id = action->modify_header.pat_id;
+ }
+ break;
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->dest_table_id = obj_id;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_header.decap = 1;
+ attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ if (!action->reformat.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->insert_header.encap = action->reformat.encap;
+ attr->insert_header.insert_anchor = action->reformat.anchor;
+ attr->insert_header.arg_id = action->reformat.arg_id;
+ attr->insert_header.header_size = action->reformat.header_size;
+ attr->insert_header.insert_offset = action->reformat.offset;
+ break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;
+ attr->aso.aso_type = ASO_OPC_MOD_POLICER;
+ attr->aso.devx_obj_id = obj_id;
+ attr->aso.return_reg_id = action->aso.return_reg_id;
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ attr->vport.vport_num = action->vport.vport_num;
+ attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id;
+ attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid;
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2;
+ break;
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->insert_header.encap = 0;
+ attr->insert_header.is_inline = 1;
+ attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS;
+ attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->remove_header.decap = 0; /* the mode we support decap is 0 */
+ attr->remove_words.start_anchor = action->remove_header.anchor;
+ /* the size is in already in words */
+ attr->remove_words.num_of_words = action->remove_header.size;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type);
+ }
+}
+
+static int
+hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ hws_action_fill_stc_attr(action, obj_id, &stc_attr);
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate STC for FDB */
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc);
+ if (ret)
+ goto out_err;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+out_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void
+hws_action_destroy_stcs(struct mlx5hws_action *action)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static bool hws_action_is_flag_hws_fdb(u32 flags)
+{
+ return flags & MLX5HWS_ACTION_FLAG_HWS_FDB;
+}
+
+static bool
+hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n");
+ return false;
+ }
+
+ if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) {
+ mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic_bulk(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type,
+ u8 bulk_sz)
+{
+ struct mlx5hws_action *action;
+ int i;
+
+ if (!hws_action_is_flag_hws_fdb(flags)) {
+ mlx5hws_err(ctx,
+ "Action (type: %d) flags must specify only HWS FDB\n", action_type);
+ return NULL;
+ }
+
+ if (!hws_action_validate_hws_action(ctx, flags))
+ return NULL;
+
+ action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ for (i = 0; i < bulk_sz; i++) {
+ action[i].ctx = ctx;
+ action[i].flags = flags;
+ action[i].type = action_type;
+ }
+
+ return action;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type)
+{
+ return hws_action_create_generic_bulk(ctx, flags, action_type, 1);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 table_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, table_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_obj.obj_id = table_id;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags)
+{
+ return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_action *
+hws_action_create_aso(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type action_type,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, action_type);
+ if (!action)
+ return NULL;
+
+ action->aso.obj_id = obj_id;
+ action->aso.return_reg_id = return_reg_id;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER,
+ obj_id, return_reg_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) {
+ mlx5hws_err(ctx, "Vport action is supported for FDB only\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT);
+ if (!action)
+ return NULL;
+
+ if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) {
+ mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n");
+ goto free_action;
+ }
+
+ action->vport.vport_num = vport_num;
+ action->vport.esw_owner_vhca_id_valid = vhca_id_valid;
+
+ if (vhca_id_valid)
+ action->vport.esw_owner_vhca_id = vhca_id;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for push vlan\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create remove stc for reformat\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for pop vlan\n");
+ goto free_shared;
+ }
+
+ return action;
+
+free_shared:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_handle_insert_with_ptr(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ size_t max_sz = 0;
+ u32 arg_id;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz % W_SIZE != 0) {
+ mlx5hws_err(action->ctx,
+ "Header data size should be in WORD granularity\n");
+ return -EINVAL;
+ }
+ max_sz = max(hdrs[i].sz, max_sz);
+ }
+
+ /* Allocate single shared arg object for all headers */
+ ret = mlx5hws_arg_create(action->ctx,
+ hdrs->data,
+ max_sz,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ action[i].reformat.arg_id = arg_id;
+ action[i].reformat.header_size = hdrs[i].sz;
+ action[i].reformat.num_of_hdrs = num_of_hdrs;
+ action[i].reformat.max_hdr_sz = max_sz;
+ action[i].reformat.require_reparse = true;
+
+ if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 ||
+ action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) {
+ action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ action[i].reformat.offset = 0;
+ action[i].reformat.encap = 1;
+ }
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create stc for reformat\n");
+ goto free_stc;
+ }
+ }
+
+ return 0;
+
+free_stc:
+ while (i--)
+ hws_action_destroy_stcs(&action[i]);
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ int ret;
+
+ /* The action is remove-l2-header + insert-l3-header */
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n");
+ return ret;
+ }
+
+ /* Reuse the insert with pointer for the L2L3 header */
+ ret = hws_action_handle_insert_with_ptr(action,
+ num_of_hdrs,
+ hdrs,
+ log_bulk_sz);
+ if (ret)
+ goto put_shared_stc;
+
+ return 0;
+
+put_shared_stc:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ return ret;
+}
+
+static void hws_action_prepare_decap_l3_actions(size_t data_sz,
+ u8 *mh_data,
+ int *num_of_actions)
+{
+ int actions;
+ u32 i;
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(stc_ste_param_remove, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */
+ actions = 1;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) {
+ MLX5_SET(stc_ste_param_insert, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE;
+ actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1);
+ actions++;
+
+ *num_of_actions = actions;
+}
+
+static int
+hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ u32 arg_id, pat_id;
+ int num_of_actions;
+ int mh_data_size;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 &&
+ hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) {
+ mlx5hws_err(ctx, "Data size is not supported for decap-l3\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Create a full modify header action list in case shared */
+ hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions);
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
+
+ /* All DecapL3 cases require the same max arg size */
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ (__be64 *)mh_data,
+ num_of_actions,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE);
+ hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions);
+ mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+
+ ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.max_num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_patterns = num_of_hdrs;
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions);
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_create_reformat_hws(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 bulk_size)
+{
+ int ret;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ ret = hws_action_create_stcs(action, 0);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_hdrs) {
+ mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) {
+ mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags);
+ goto free_action;
+ }
+
+ ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_create_modify_header_hws(struct mlx5hws_action *action,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *pattern,
+ u32 log_bulk_size)
+{
+ u16 num_actions, max_mh_actions = 0, hw_max_actions;
+ struct mlx5hws_context *ctx = action->ctx;
+ int i, ret, size_in_bytes;
+ u32 pat_id, arg_id = 0;
+ __be64 *new_pattern;
+ size_t pat_max_sz;
+
+ pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ hw_max_actions = pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ size_in_bytes = pat_max_sz * sizeof(__be64);
+ new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
+ if (!new_pattern)
+ return -ENOMEM;
+
+ /* Calculate maximum number of mh actions for shared arg allocation */
+ for (i = 0; i < num_of_patterns; i++) {
+ size_t new_num_actions;
+ size_t cur_num_actions;
+ u32 nop_locations;
+
+ cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+
+ ret = mlx5hws_pat_calc_nop(pattern[i].data, cur_num_actions,
+ hw_max_actions, &new_num_actions,
+ &nop_locations,
+ &new_pattern[i * pat_max_sz]);
+ if (ret) {
+ mlx5hws_err(ctx, "Too many actions after nop insertion\n");
+ goto free_new_pat;
+ }
+
+ action[i].modify_header.nop_locations = nop_locations;
+ action[i].modify_header.num_of_actions = new_num_actions;
+
+ max_mh_actions = max(max_mh_actions, new_num_actions);
+ }
+
+ if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n",
+ max_mh_actions);
+ ret = -EINVAL;
+ goto free_new_pat;
+ }
+
+ /* Allocate single shared arg for all patterns based on the max size */
+ if (max_mh_actions > 1) {
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ pattern->data,
+ max_mh_actions,
+ log_bulk_size,
+ action->flags &
+ MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ goto free_new_pat;
+ }
+
+ for (i = 0; i < num_of_patterns; i++) {
+ if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) {
+ mlx5hws_err(ctx, "Fail to verify pattern modify actions\n");
+ ret = -EINVAL;
+ goto free_stc_and_pat;
+ }
+ num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ action[i].modify_header.num_of_patterns = num_of_patterns;
+ action[i].modify_header.max_num_of_actions = max_mh_actions;
+
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse(pattern[i].data, num_actions);
+
+ if (num_actions == 1) {
+ pat_id = 0;
+ /* Optimize single modify action to be used inline */
+ action[i].modify_header.single_action = pattern[i].data[0];
+ action[i].modify_header.single_action_type =
+ MLX5_GET(set_action_in, pattern[i].data, action_type);
+ } else {
+ /* Multiple modify actions require a pattern */
+ if (unlikely(action[i].modify_header.nop_locations)) {
+ size_t pattern_sz;
+
+ pattern_sz = action[i].modify_header.num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE;
+ ret =
+ mlx5hws_pat_get_pattern(ctx,
+ &new_pattern[i * pat_max_sz],
+ pattern_sz, &pat_id);
+ } else {
+ ret = mlx5hws_pat_get_pattern(ctx,
+ pattern[i].data,
+ pattern[i].sz,
+ &pat_id);
+ }
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate pattern for modify header\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ }
+ /* Allocate STC for each action representing a header */
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ if (pat_id)
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ kfree(new_pattern);
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.pat_id)
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ if (arg_id)
+ mlx5hws_arg_destroy(ctx, arg_id);
+free_new_pat:
+ kfree(new_pattern);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_patterns) {
+ mlx5hws_err(ctx, "Invalid number of patterns\n");
+ return NULL;
+ }
+ action = hws_action_create_generic_bulk(ctx, flags,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ num_of_patterns);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) {
+ mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_modify_header_hws(action,
+ num_of_patterns,
+ patterns,
+ log_bulk_size);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ u32 flags)
+{
+ struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_action *action;
+ int ret, last_dest_idx = -1;
+ u32 i;
+
+ if (num_dest <= 1) {
+ mlx5hws_err(ctx, "Action must have multiple dests\n");
+ return NULL;
+ }
+
+ if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ } else {
+ mlx5hws_err(ctx, "Action flags not supported\n");
+ return NULL;
+ }
+
+ dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL);
+ if (!dest_list)
+ return NULL;
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5hws_action_type action_type = dests[i].dest->type;
+ struct mlx5hws_action *reformat_action = dests[i].reformat;
+
+ switch (action_type) {
+ case MLX5HWS_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+ if (dests[i].is_wire_ft)
+ last_dest_idx = i;
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ default:
+ mlx5hws_err(ctx, "Unsupported action in dest_array\n");
+ goto free_dest_list;
+ }
+
+ if (reformat_action) {
+ mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n");
+ goto free_dest_list;
+ }
+ }
+
+ if (last_dest_idx != -1)
+ swap(dest_list[last_dest_idx], dest_list[num_dest - 1]);
+
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
+free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(ctx->mdev,
+ dest_list[i].ext_reformat_id);
+ }
+ kfree(dest_list);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action_reformat_header *reformat_hdrs;
+ struct mlx5hws_action *action;
+ int ret;
+ int i;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (!action)
+ return NULL;
+
+ reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL);
+ if (!reformat_hdrs)
+ goto free_action;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].offset % W_SIZE != 0) {
+ mlx5hws_err(ctx, "Header offset should be in WORD granularity\n");
+ goto free_reformat_hdrs;
+ }
+
+ action[i].reformat.anchor = hdrs[i].anchor;
+ action[i].reformat.encap = hdrs[i].encap;
+ action[i].reformat.offset = hdrs[i].offset;
+
+ reformat_hdrs[i].sz = hdrs[i].hdr.sz;
+ reformat_hdrs[i].data = hdrs[i].hdr.data;
+ }
+
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs,
+ reformat_hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_reformat_hdrs;
+ }
+
+ kfree(reformat_hdrs);
+
+ return action;
+
+free_reformat_hdrs:
+ kfree(reformat_hdrs);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER);
+ if (!action)
+ return NULL;
+
+ /* support only remove anchor with size */
+ if (attr->size % W_SIZE != 0) {
+ mlx5hws_err(ctx,
+ "Invalid size, HW supports header remove in WORD granularity\n");
+ goto free_action;
+ }
+
+ if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) {
+ mlx5hws_err(ctx, "Header removal size limited to %u bytes\n",
+ MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE);
+ goto free_action;
+ }
+
+ action->remove_header.anchor = attr->anchor;
+ action->remove_header.size = attr->size / W_SIZE;
+
+ if (hws_action_create_stcs(action, 0))
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_definer *
+hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_definer *definer;
+ __be32 *tag;
+ int ret;
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+ /* Set DW0 tag mask */
+ tag = (__force __be32 *)definer->mask.jumbo;
+ tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0) {
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(definer);
+ return NULL;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+ definer->obj_id = ret;
+
+ return definer;
+}
+
+static struct mlx5hws_range_action_table *
+hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_range_action_table *table_ste;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ /* Check if STE range is supported */
+ if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) {
+ mlx5hws_err(ctx, "Range STE format not supported\n");
+ return NULL;
+ }
+
+ table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL);
+ if (!table_ste)
+ return NULL;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.alloc_log_sz = 1;
+ table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!table_ste->pool) {
+ mlx5hws_err(ctx, "Failed to allocate memory ste pool\n");
+ goto free_ste;
+ }
+
+ /* Allocate RTC */
+ rtc_0_id = &table_ste->rtc_0_id;
+ rtc_1_id = &table_ste->rtc_1_id;
+ ste_pool = table_ste->pool;
+
+ rtc_attr.log_size = 0;
+ rtc_attr.log_depth = 0;
+ rtc_attr.miss_ft_id = miss_ft_id;
+ rtc_attr.num_hash_definer = 1;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.fw_gen_wqe = true;
+ rtc_attr.is_scnd_range = true;
+
+ obj_id = mlx5hws_pool_get_base_id(ste_pool);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool;
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create RTC");
+ goto pool_destroy;
+ }
+
+ /* Create mirror RTC */
+ obj_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
+
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create mirror RTC");
+ goto destroy_rtc_0;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return table_ste;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+pool_destroy:
+ mlx5hws_pool_destroy(table_ste->pool);
+free_ste:
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(table_ste);
+ return NULL;
+}
+
+static void hws_action_destroy_dest_match_range_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id);
+ mlx5hws_pool_destroy(table_ste->pool);
+ kfree(table_ste);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static int hws_action_create_dest_match_range_fill_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer, u32 min, u32 max)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ u32 no_use, used_rtc_0_id, used_rtc_1_id, ret;
+ struct mlx5hws_context_common_res *common_res;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ __be32 *wqe_data_arr;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ ret = -EIO;
+ goto error;
+ }
+
+ /* Init default send STE attributes */
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.user_data = &no_use;
+ ste_attr.send_attr.rule = NULL;
+ ste_attr.send_attr.fence = 1;
+ ste_attr.send_attr.notify_hw = true;
+ ste_attr.rtc_0 = table_ste->rtc_0_id;
+ ste_attr.rtc_1 = table_ste->rtc_1_id;
+ ste_attr.used_id_rtc_0 = &used_rtc_0_id;
+ ste_attr.used_id_rtc_1 = &used_rtc_1_id;
+
+ common_res = &ctx->common_res;
+
+ /* init an empty match STE which will always hit */
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_data = &match_wqe_data;
+ ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer;
+
+ /* Fill WQE control data */
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(common_res->default_stc->nop_ctr.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(common_res->default_stc->nop_dw5.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(common_res->default_stc->nop_dw6.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(common_res->default_stc->nop_dw7.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(hit_ft_action->stc.offset);
+
+ wqe_data_arr = (__force __be32 *)&range_wqe_data;
+
+ ste_attr.range_wqe_data = &range_wqe_data;
+ ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer);
+
+ /* Fill range matching fields,
+ * min/max_value_2 corresponds to match_dw_0 in its definer,
+ * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE.
+ */
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16);
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16);
+
+ /* Send WQEs to FW */
+ mlx5hws_send_stes_fw(ctx, queue, &ste_attr);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to drain control queue");
+ goto error;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+error:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_range_action_table *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_action *action;
+ u32 miss_ft_id = miss_ft->id;
+ u32 hit_ft_id = hit_ft->id;
+ int ret;
+
+ if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+ min > 0xffff || max > 0xffff) {
+ mlx5hws_err(ctx, "Invalid match range parameters\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE);
+ if (!action)
+ return NULL;
+
+ definer = hws_action_create_dest_match_range_definer(ctx);
+ if (!definer)
+ goto free_action;
+
+ table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id);
+ if (!table_ste)
+ goto destroy_definer;
+
+ hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags);
+ if (!hit_ft_action)
+ goto destroy_table_ste;
+
+ ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste,
+ hit_ft_action,
+ definer, min, max);
+ if (ret)
+ goto destroy_hit_ft_action;
+
+ action->range.table_ste = table_ste;
+ action->range.definer = definer;
+ action->range.hit_ft_action = hit_ft_action;
+
+ /* Allocate STC for jumps to STE */
+ mutex_lock(&ctx->ctrl_lock);
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste_pool = table_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc);
+ if (ret)
+ goto error_unlock;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return action;
+
+error_unlock:
+ mutex_unlock(&ctx->ctrl_lock);
+destroy_hit_ft_action:
+ mlx5hws_action_destroy(hit_ft_action);
+destroy_table_ste:
+ hws_action_destroy_dest_match_range_table(ctx, table_ste);
+destroy_definer:
+ mlx5hws_definer_free(ctx, definer);
+free_action:
+ kfree(action);
+ mlx5hws_err(ctx, "Failed to create action dest match range");
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags)
+{
+ return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_cmd_set_fte_dest dest;
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (flags != (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ mlx5hws_err(ctx, "Unsupported flags for flow sampler\n");
+ return NULL;
+ }
+
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ dest.destination_id = sampler_id;
+
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ return NULL;
+
+ action = hws_action_create_generic(ctx, flags,
+ MLX5HWS_ACTION_TYP_SAMPLER);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->flow_sampler.fw_island = fw_island;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
+ return NULL;
+}
+
+static void hws_action_destroy_hws(struct mlx5hws_action *action)
+{
+ u32 ext_reformat_id;
+ bool shared_arg;
+ u32 obj_id;
+ u32 i;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_MISS:
+ case MLX5HWS_ACTION_TYP_TAG:
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_CTR:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ hws_action_destroy_stcs(action);
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ hws_action_destroy_stcs(action);
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ break;
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id;
+ if (ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev,
+ ext_reformat_id);
+ }
+ kfree(action->dest_array.dest_list);
+ break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev,
+ action->flow_sampler.fw_island);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ shared_arg = false;
+ for (i = 0; i < action->modify_header.num_of_patterns; i++) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.num_of_actions > 1) {
+ mlx5hws_pat_put_pattern(action[i].ctx,
+ action[i].modify_header.pat_id);
+ /* Save shared arg object to be freed after */
+ obj_id = action[i].modify_header.arg_id;
+ shared_arg = true;
+ }
+ }
+ if (shared_arg)
+ mlx5hws_arg_destroy(action->ctx, obj_id);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_RANGE:
+ hws_action_destroy_stcs(action);
+ hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste);
+ mlx5hws_definer_free(action->ctx, action->range.definer);
+ mlx5hws_action_destroy(action->range.hit_ft_action);
+ break;
+ case MLX5HWS_ACTION_TYP_LAST:
+ break;
+ default:
+ pr_warn("HWS: Invalid action type: %d\n", action->type);
+ }
+}
+
+int mlx5hws_action_destroy(struct mlx5hws_action *action)
+{
+ hws_action_destroy_hws(action);
+
+ kfree(action);
+ return 0;
+}
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ int ret;
+
+ if (ctx->common_res.default_stc) {
+ ctx->common_res.default_stc->refcount++;
+ return 0;
+ }
+
+ default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL);
+ if (!default_stc)
+ return -ENOMEM;
+
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_ctr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default counter STC\n");
+ goto free_default_stc;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw5);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n");
+ goto free_nop_ctr;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw6);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n");
+ goto free_nop_dw5;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw7);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n");
+ goto free_nop_dw6;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->default_hit);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default allow STC\n");
+ goto free_nop_dw7;
+ }
+
+ ctx->common_res.default_stc = default_stc;
+ ctx->common_res.default_stc->refcount++;
+
+ return 0;
+
+free_nop_dw7:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+free_nop_dw6:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+free_nop_dw5:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+free_nop_ctr:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+free_default_stc:
+ kfree(default_stc);
+ return ret;
+}
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_action_default_stc *default_stc;
+
+ default_stc = ctx->common_res.default_stc;
+ if (--default_stc->refcount)
+ return;
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+ kfree(default_stc);
+ ctx->common_res.default_stc = NULL;
+}
+
+static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions,
+ u32 nop_locations)
+{
+ u8 *new_arg_data = NULL;
+ int i, j;
+
+ if (unlikely(nop_locations)) {
+ new_arg_data = kcalloc(num_of_actions,
+ MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (unlikely(!new_arg_data))
+ return;
+
+ for (i = 0, j = 0; j < num_of_actions; i++, j++) {
+ if (BIT(i) & nop_locations)
+ j++;
+ memcpy(&new_arg_data[j * MLX5HWS_MODIFY_ACTION_SIZE],
+ &arg_data[i * MLX5HWS_MODIFY_ACTION_SIZE],
+ MLX5HWS_MODIFY_ACTION_SIZE);
+ }
+ }
+
+ mlx5hws_arg_write(queue, NULL, arg_idx,
+ new_arg_data ? new_arg_data : arg_data,
+ num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE);
+
+ kfree(new_arg_data);
+}
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions)
+{
+ u8 *e_src;
+ int i;
+
+ /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes
+ * copy from end of src to the start of dst.
+ * move to the end, 2 is the leftover from 14B or 18B
+ */
+ if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN)
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2;
+ else
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN;
+
+ /* Move dst over the first remove action + zero data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ /* Move dst over the first insert ctrl action */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2;
+ /* Actions:
+ * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * the loop is without the last insertion.
+ */
+ for (i = 0; i < num_of_actions - 3; i++) {
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE;
+ memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ }
+ /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ memcpy(dst, e_src, 2);
+}
+
+static int
+hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ return common_res->shared_stc[stc_type]->stc_chunk.offset;
+}
+
+static struct mlx5hws_actions_wqe_setter *
+hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter,
+ u8 req_flags)
+{
+ /* Use a new setter if requested flags are taken */
+ while (setter->flags & req_flags)
+ setter++;
+
+ /* Use current setter in required flags are not used */
+ return setter;
+}
+
+static void
+hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
+ enum mlx5hws_action_stc_idx stc_idx,
+ u8 action_idx)
+{
+ struct mlx5hws_action *action = apply->rule_action[action_idx].action;
+
+ apply->wqe_ctrl->stc_ix[stc_idx] = htonl(action->stc.offset);
+}
+
+static void
+hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr;
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ u8 *single_action;
+ u8 max_actions;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action;
+
+ stc_idx = htonl(action->stc.offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+
+ if (action->modify_header.num_of_actions == 1) {
+ if (action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_COPY ||
+ action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ return;
+ }
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ single_action = (u8 *)&action->modify_header.single_action;
+ else
+ single_action = rule_action->modify_header.data;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
+ *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
+ return;
+ }
+
+ /* Argument offset multiple with number of args per these actions */
+ max_actions = action->modify_header.max_num_of_actions;
+ arg_sz = mlx5hws_arg_get_arg_size(max_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nop_locations);
+ }
+}
+
+static void
+hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_idx, arg_sz;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for header size */
+ arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc.offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_write(apply->queue, NULL,
+ action->reformat.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->reformat.header_size);
+ }
+}
+
+static void
+hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for num of actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc.offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_decapl3_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->modify_header.num_of_actions);
+ }
+}
+
+static void
+hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ u32 exe_aso_ctrl;
+ u32 offset;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+
+ switch (rule_action->action->type) {
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* exe_aso_ctrl format:
+ * [STC only and reserved bits 29b][init_color 2b][meter_id 1b]
+ */
+ offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl |= rule_action->aso_meter.init_color <<
+ MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET;
+ break;
+ default:
+ mlx5hws_err(rule_action->action->ctx,
+ "Unsupported ASO action type: %d\n", rule_action->action->type);
+ return;
+ }
+
+ /* aso_object_offset format: [24B] */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset);
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl);
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_single];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_ctr];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr);
+}
+
+static void
+hws_action_setter_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP));
+}
+
+static void
+hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+static void
+hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(apply->common_res->default_stc->default_hit.offset);
+}
+
+static void
+hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc);
+}
+
+static void
+hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3));
+}
+
+static void
+hws_action_setter_range(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ /* Always jump to index zero */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1;
+ enum mlx5hws_action_type *action_type = at->action_type_arr;
+ struct mlx5hws_actions_wqe_setter *setter = at->setters;
+ struct mlx5hws_actions_wqe_setter *pop_setter = NULL;
+ struct mlx5hws_actions_wqe_setter *last_setter;
+ int i;
+
+ /* Note: Given action combination must be valid */
+
+ /* Check if action were already processed */
+ if (at->num_of_action_stes)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++)
+ setter[i].set_hit = &hws_action_setter_hit_next_action;
+
+ /* The same action template setters can be used with jumbo or match
+ * STE, to support both cases we reserve the first setter for cases
+ * with jumbo STE to allow jump to the first action STE.
+ * This extra setter can be reduced in some cases on rule creation.
+ */
+ setter = start_setter;
+ last_setter = start_setter;
+
+ for (i = 0; i < at->num_actions; i++) {
+ switch (action_type[i]) {
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ case MLX5HWS_ACTION_TYP_MISS:
+ /* Hit action */
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_hit;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_RANGE:
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_range;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ /* Single remove header to header */
+ if (pop_setter) {
+ /* We have 2 pops, use the shared */
+ pop_setter->set_single = &hws_action_setter_single_double_pop;
+ break;
+ }
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY |
+ ASF_INSERT);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ pop_setter = setter;
+ break;
+
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ /* Double insert inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_push_vlan;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ /* Double modify header list */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY;
+ setter->set_double = &hws_action_setter_modify_header;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* Double ASO action */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE);
+ setter->flags |= ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_aso;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ /* Single remove header to header */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ /* Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ /* Single remove + Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_DOUBLE);
+ setter->flags |= ASF_SINGLE1 | ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ setter->set_single = &hws_action_setter_common_decap;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ /* Double modify header list with remove and push inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT;
+ setter->set_double = &hws_action_setter_tnl_l3_to_l2;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_TAG:
+ /* Single TAG action, search for any room from the start */
+ setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1);
+ setter->flags |= ASF_SINGLE1;
+ setter->set_single = &hws_action_setter_tag;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_CTR:
+ /* Control counter action
+ * TODO: Current counter executed first. Support is needed
+ * for single ation counter action which is done last.
+ * Example: Decap + CTR
+ */
+ setter = hws_action_setter_find_first(start_setter, ASF_CTR);
+ setter->flags |= ASF_CTR;
+ setter->set_ctr = &hws_action_setter_ctrl_ctr;
+ setter->idx_ctr = i;
+ break;
+ default:
+ pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n",
+ i, action_type[i]);
+ return -EOPNOTSUPP;
+ }
+
+ last_setter = max(setter, last_setter);
+ }
+
+ /* Set default hit on the last STE if no hit action provided */
+ if (!(last_setter->flags & ASF_HIT))
+ last_setter->set_hit = &hws_action_setter_default_hit;
+
+ at->num_of_action_stes = last_setter - start_setter + 1;
+
+ /* Check if action template doesn't require any action DWs */
+ at->only_term = (at->num_of_action_stes == 1) &&
+ !(last_setter->flags & ~(ASF_CTR | ASF_HIT));
+
+ return 0;
+}
+
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[])
+{
+ struct mlx5hws_action_template *at;
+ u8 num_actions = 0;
+ int i;
+
+ at = kzalloc(sizeof(*at), GFP_KERNEL);
+ if (!at)
+ return NULL;
+
+ while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST)
+ ;
+
+ at->num_actions = num_actions - 1;
+ at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL);
+ if (!at->action_type_arr)
+ goto free_at;
+
+ for (i = 0; i < num_actions; i++)
+ at->action_type_arr[i] = action_type[i];
+
+ return at;
+
+free_at:
+ kfree(at);
+ return NULL;
+}
+
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at)
+{
+ kfree(at->action_type_arr);
+ kfree(at);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
new file mode 100644
index 000000000000..55a079fdd08f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_ACTION_H_
+#define HWS_ACTION_H_
+
+/* Max number of STEs needed for a rule (including match) */
+#define MLX5HWS_ACTION_MAX_STE 20
+
+/* Max number of internal subactions of ipv6_ext */
+#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4
+
+enum mlx5hws_action_stc_idx {
+ MLX5HWS_ACTION_STC_IDX_CTRL = 0,
+ MLX5HWS_ACTION_STC_IDX_HIT = 1,
+ MLX5HWS_ACTION_STC_IDX_DW5 = 2,
+ MLX5HWS_ACTION_STC_IDX_DW6 = 3,
+ MLX5HWS_ACTION_STC_IDX_DW7 = 4,
+ MLX5HWS_ACTION_STC_IDX_MAX = 5,
+ /* STC Jumvo STE combo: CTR, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1,
+ /* STC combo1: CTR, SINGLE, DOUBLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3,
+ /* STC combo2: CTR, 3 x SINGLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4,
+ /* STC combo2: CTR, TRIPLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2,
+};
+
+enum mlx5hws_action_offset {
+ MLX5HWS_ACTION_OFFSET_DW0 = 0,
+ MLX5HWS_ACTION_OFFSET_DW5 = 5,
+ MLX5HWS_ACTION_OFFSET_DW6 = 6,
+ MLX5HWS_ACTION_OFFSET_DW7 = 7,
+ MLX5HWS_ACTION_OFFSET_HIT = 3,
+ MLX5HWS_ACTION_OFFSET_HIT_LSB = 4,
+};
+
+enum {
+ MLX5HWS_ACTION_DOUBLE_SIZE = 8,
+ MLX5HWS_ACTION_INLINE_DATA_SIZE = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12,
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2,
+ MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS +
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER),
+ MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 +
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN),
+ MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64,
+ DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,
+ DECAP_L3_NUM_ACTIONS_W_VLAN = 7,
+};
+
+enum mlx5hws_action_setter_flag {
+ ASF_SINGLE1 = 1 << 0,
+ ASF_SINGLE2 = 1 << 1,
+ ASF_SINGLE3 = 1 << 2,
+ ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,
+ ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE,
+ ASF_INSERT = 1 << 3,
+ ASF_REMOVE = 1 << 4,
+ ASF_MODIFY = 1 << 5,
+ ASF_CTR = 1 << 6,
+ ASF_HIT = 1 << 7,
+};
+
+struct mlx5hws_action_default_stc {
+ struct mlx5hws_pool_chunk nop_ctr;
+ struct mlx5hws_pool_chunk nop_dw5;
+ struct mlx5hws_pool_chunk nop_dw6;
+ struct mlx5hws_pool_chunk nop_dw7;
+ struct mlx5hws_pool_chunk default_hit;
+ u32 refcount; /* protected by context ctrl lock */
+};
+
+struct mlx5hws_action_shared_stc {
+ struct mlx5hws_pool_chunk stc_chunk;
+ u32 refcount; /* protected by context ctrl lock */
+};
+
+struct mlx5hws_actions_apply_data {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_rule_action *rule_action;
+ __be32 *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ u32 jump_to_action_stc;
+ struct mlx5hws_context_common_res *common_res;
+ enum mlx5hws_table_type tbl_type;
+ u32 next_direct_idx;
+ u8 require_dep;
+};
+
+struct mlx5hws_actions_wqe_setter;
+
+typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter);
+
+struct mlx5hws_actions_wqe_setter {
+ mlx5hws_action_setter_fp set_single;
+ mlx5hws_action_setter_fp set_double;
+ mlx5hws_action_setter_fp set_triple;
+ mlx5hws_action_setter_fp set_hit;
+ mlx5hws_action_setter_fp set_ctr;
+ u8 idx_single;
+ u8 idx_double;
+ u8 idx_triple;
+ u8 idx_ctr;
+ u8 idx_hit;
+ u8 stage_idx;
+ u8 flags;
+};
+
+struct mlx5hws_action_template {
+ struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE];
+ enum mlx5hws_action_type *action_type_arr;
+ u8 num_of_action_stes;
+ u8 num_actions;
+ u8 only_term;
+};
+
+struct mlx5hws_range_action_table {
+ struct mlx5hws_pool *pool;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+};
+
+struct mlx5hws_action {
+ u8 type;
+ u8 flags;
+ struct mlx5hws_context *ctx;
+ union {
+ struct {
+ struct mlx5hws_pool_chunk stc;
+ union {
+ struct {
+ u32 pat_id;
+ u32 arg_id;
+ __be64 single_action;
+ u32 nop_locations;
+ u8 num_of_patterns;
+ u8 single_action_type;
+ u8 num_of_actions;
+ u8 max_num_of_actions;
+ u8 require_reparse;
+ } modify_header;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u16 max_hdr_sz;
+ u8 num_of_hdrs;
+ u8 anchor;
+ u8 e_anchor;
+ u8 offset;
+ bool encap;
+ u8 require_reparse;
+ } reformat;
+ struct {
+ u32 obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ bool esw_owner_vhca_id_valid;
+ } vport;
+ struct {
+ u32 obj_id;
+ } dest_obj;
+ struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5hws_cmd_set_fte_dest *dest_list;
+ } dest_array;
+ struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ } flow_sampler;
+ struct {
+ u8 type;
+ u8 start_anchor;
+ u8 end_anchor;
+ u8 num_of_words;
+ bool decap;
+ } insert_hdr;
+ struct {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+ } remove_header;
+ struct {
+ struct mlx5hws_range_action_table *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ } range;
+ };
+ };
+
+ struct ibv_flow_action *flow_action;
+ u32 obj_id;
+ struct ibv_qp *qp;
+ };
+};
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type);
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst,
+ u16 num_of_actions);
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at);
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type);
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+static inline void
+mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(apply->common_res->default_stc->nop_dw5.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(apply->common_res->default_stc->nop_dw6.offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(apply->common_res->default_stc->nop_dw7.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(apply->common_res->default_stc->nop_ctr.offset);
+}
+
+static inline void
+mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter,
+ bool is_jumbo)
+{
+ u8 num_of_actions;
+
+ /* Set control counter */
+ if (setter->set_ctr)
+ setter->set_ctr(apply, setter);
+ else
+ mlx5hws_action_setter_default_ctr(apply, setter);
+
+ if (!is_jumbo) {
+ if (unlikely(setter->set_triple)) {
+ /* Set triple on match */
+ setter->set_triple(apply, setter);
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3;
+ } else {
+ /* Set single and double on match */
+ if (setter->set_single)
+ setter->set_single(apply, setter);
+ else
+ mlx5hws_action_setter_default_single(apply, setter);
+
+ if (setter->set_double)
+ setter->set_double(apply, setter);
+ else
+ mlx5hws_action_setter_default_double(apply, setter);
+
+ num_of_actions = setter->set_double ?
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 :
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2;
+ }
+ } else {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE;
+ }
+
+ /* Set next/final hit action */
+ setter->set_hit(apply, setter);
+
+ /* Set number of actions */
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(num_of_actions << 29);
+}
+
+#endif /* HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
new file mode 100644
index 000000000000..5766a9c82f96
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static const char *
+hws_pool_opt_to_str(enum mlx5hws_pool_optimize opt)
+{
+ switch (opt) {
+ case MLX5HWS_POOL_OPTIMIZE_NONE:
+ return "rx-and-tx";
+ case MLX5HWS_POOL_OPTIMIZE_ORIG:
+ return "rx-only";
+ case MLX5HWS_POOL_OPTIMIZE_MIRROR:
+ return "tx-only";
+ default:
+ return "unknown";
+ }
+}
+
+static int
+hws_action_ste_table_create_pool(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ struct mlx5hws_pool_attr pool_attr = { 0 };
+
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.flags = MLX5HWS_POOL_FLAG_BUDDY;
+ pool_attr.opt_type = opt;
+ pool_attr.alloc_log_sz = log_sz;
+
+ action_tbl->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_tbl->pool) {
+ mlx5hws_err(ctx, "Failed to allocate STE pool\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hws_action_ste_table_create_single_rtc(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz, bool tx)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = { 0 };
+ u32 *rtc_id;
+
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* Action STEs use the default always hit definer. */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+
+ if (tx) {
+ rtc_attr.table_type = FS_FT_FDB_TX;
+ rtc_attr.ste_base =
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+ rtc_attr.stc_base =
+ mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_1_id;
+ } else {
+ rtc_attr.table_type = FS_FT_FDB_RX;
+ rtc_attr.ste_base = mlx5hws_pool_get_base_id(action_tbl->pool);
+ rtc_attr.stc_base = mlx5hws_pool_get_base_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_0_id;
+ }
+
+ return mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_id);
+}
+
+static int
+hws_action_ste_table_create_rtcs(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ int err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, false);
+ if (err)
+ return err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, true);
+ if (err) {
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, action_tbl->rtc_0_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+hws_action_ste_table_destroy_rtcs(struct mlx5hws_action_ste_table *action_tbl)
+{
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_0_id);
+}
+
+static int
+hws_action_ste_table_create_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = { 0 };
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste_pool = action_tbl->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ return mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+}
+
+static struct mlx5hws_action_ste_table *
+hws_action_ste_table_alloc(struct mlx5hws_action_ste_pool_element *parent_elem)
+{
+ enum mlx5hws_pool_optimize opt = parent_elem->opt;
+ struct mlx5hws_context *ctx = parent_elem->ctx;
+ struct mlx5hws_action_ste_table *action_tbl;
+ size_t log_sz;
+ int err;
+
+ log_sz = min(parent_elem->log_sz ?
+ parent_elem->log_sz +
+ MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ :
+ MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ,
+ MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ);
+
+ action_tbl = kzalloc(sizeof(*action_tbl), GFP_KERNEL);
+ if (!action_tbl)
+ return ERR_PTR(-ENOMEM);
+
+ err = hws_action_ste_table_create_pool(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto free_tbl;
+
+ err = hws_action_ste_table_create_rtcs(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto destroy_pool;
+
+ err = hws_action_ste_table_create_stc(ctx, action_tbl);
+ if (err)
+ goto destroy_rtcs;
+
+ action_tbl->parent_elem = parent_elem;
+ INIT_LIST_HEAD(&action_tbl->list_node);
+ action_tbl->last_used = jiffies;
+ list_add(&action_tbl->list_node, &parent_elem->available);
+ parent_elem->log_sz = log_sz;
+
+ mlx5hws_dbg(ctx,
+ "Allocated %s action STE table log_sz %zu; STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(opt), log_sz,
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ return action_tbl;
+
+destroy_rtcs:
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+destroy_pool:
+ mlx5hws_pool_destroy(action_tbl->pool);
+free_tbl:
+ kfree(action_tbl);
+
+ return ERR_PTR(err);
+}
+
+static void
+hws_action_ste_table_destroy(struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_context *ctx = action_tbl->parent_elem->ctx;
+
+ mlx5hws_dbg(ctx,
+ "Destroying %s action STE table: STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(action_tbl->parent_elem->opt),
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+ mlx5hws_pool_destroy(action_tbl->pool);
+
+ list_del(&action_tbl->list_node);
+ kfree(action_tbl);
+}
+
+static int
+hws_action_ste_pool_element_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool_element *elem,
+ enum mlx5hws_pool_optimize opt)
+{
+ elem->ctx = ctx;
+ elem->opt = opt;
+ INIT_LIST_HEAD(&elem->available);
+ INIT_LIST_HEAD(&elem->full);
+
+ return 0;
+}
+
+static void hws_action_ste_pool_element_destroy(
+ struct mlx5hws_action_ste_pool_element *elem)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ /* This should be empty, but attempt to free its elements anyway. */
+ list_for_each_entry_safe(action_tbl, p, &elem->full, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static int hws_action_ste_pool_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ enum mlx5hws_pool_optimize opt;
+ int err;
+
+ mutex_init(&pool->lock);
+
+ /* Rules which are added for both RX and TX must use the same action STE
+ * indices for both. If we were to use a single table, then RX-only and
+ * TX-only rules would waste the unused entries. Thus, we use separate
+ * table sets for the three cases.
+ */
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ err = hws_action_ste_pool_element_init(ctx, &pool->elems[opt],
+ opt);
+ if (err)
+ goto destroy_elems;
+ pool->elems[opt].parent_pool = pool;
+ }
+
+ return 0;
+
+destroy_elems:
+ while (opt-- > MLX5HWS_POOL_OPTIMIZE_NONE)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+
+ return err;
+}
+
+static void hws_action_ste_pool_destroy(struct mlx5hws_action_ste_pool *pool)
+{
+ int opt;
+
+ for (opt = MLX5HWS_POOL_OPTIMIZE_MAX - 1;
+ opt >= MLX5HWS_POOL_OPTIMIZE_NONE; opt--)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+}
+
+static void hws_action_ste_pool_element_collect_stale(
+ struct mlx5hws_action_ste_pool_element *elem, struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+ unsigned long expire_time, now;
+
+ expire_time = secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS);
+ now = jiffies;
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node) {
+ if (mlx5hws_pool_full(action_tbl->pool) &&
+ time_before(action_tbl->last_used + expire_time, now))
+ list_move(&action_tbl->list_node, cleanup);
+ }
+}
+
+static void hws_action_ste_table_cleanup_list(struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ list_for_each_entry_safe(action_tbl, p, cleanup, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static void hws_action_ste_pool_cleanup(struct work_struct *work)
+{
+ enum mlx5hws_pool_optimize opt;
+ struct mlx5hws_context *ctx;
+ LIST_HEAD(cleanup);
+ int i;
+
+ ctx = container_of(work, struct mlx5hws_context,
+ action_ste_cleanup.work);
+
+ for (i = 0; i < ctx->queues; i++) {
+ struct mlx5hws_action_ste_pool *p = &ctx->action_ste_pool[i];
+
+ mutex_lock(&p->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE;
+ opt < MLX5HWS_POOL_OPTIMIZE_MAX; opt++)
+ hws_action_ste_pool_element_collect_stale(
+ &p->elems[opt], &cleanup);
+ mutex_unlock(&p->lock);
+ }
+
+ hws_action_ste_table_cleanup_list(&cleanup);
+
+ schedule_delayed_work(&ctx->action_ste_cleanup,
+ secs_to_jiffies(
+ MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+}
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_ste_pool *pool;
+ size_t queues = ctx->queues;
+ int i, err;
+
+ pool = kcalloc(queues, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ for (i = 0; i < queues; i++) {
+ err = hws_action_ste_pool_init(ctx, &pool[i]);
+ if (err)
+ goto free_pool;
+ }
+
+ ctx->action_ste_pool = pool;
+
+ INIT_DELAYED_WORK(&ctx->action_ste_cleanup,
+ hws_action_ste_pool_cleanup);
+ schedule_delayed_work(
+ &ctx->action_ste_cleanup,
+ secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+
+ return 0;
+
+free_pool:
+ while (i--)
+ hws_action_ste_pool_destroy(&pool[i]);
+ kfree(pool);
+
+ return err;
+}
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx)
+{
+ size_t queues = ctx->queues;
+ int i;
+
+ cancel_delayed_work_sync(&ctx->action_ste_cleanup);
+
+ for (i = 0; i < queues; i++)
+ hws_action_ste_pool_destroy(&ctx->action_ste_pool[i]);
+
+ kfree(ctx->action_ste_pool);
+}
+
+static struct mlx5hws_action_ste_pool_element *
+hws_action_ste_choose_elem(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx)
+{
+ if (skip_rx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_MIRROR];
+
+ if (skip_tx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_ORIG];
+
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_NONE];
+}
+
+static int
+hws_action_ste_table_chunk_alloc(struct mlx5hws_action_ste_table *action_tbl,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ int err;
+
+ err = mlx5hws_pool_chunk_alloc(action_tbl->pool, &chunk->ste);
+ if (err)
+ return err;
+
+ chunk->action_tbl = action_tbl;
+ action_tbl->last_used = jiffies;
+
+ return 0;
+}
+
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mlx5hws_action_ste_pool_element *elem;
+ struct mlx5hws_action_ste_table *action_tbl;
+ bool found;
+ int err;
+
+ if (skip_rx && skip_tx)
+ return -EINVAL;
+
+ mutex_lock(&pool->lock);
+
+ elem = hws_action_ste_choose_elem(pool, skip_rx, skip_tx);
+
+ mlx5hws_dbg(elem->ctx,
+ "Allocating action STEs skip_rx %d skip_tx %d order %d\n",
+ skip_rx, skip_tx, chunk->ste.order);
+
+ found = false;
+ list_for_each_entry(action_tbl, &elem->available, list_node) {
+ if (!hws_action_ste_table_chunk_alloc(action_tbl, chunk)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ action_tbl = hws_action_ste_table_alloc(elem);
+ if (IS_ERR(action_tbl)) {
+ err = PTR_ERR(action_tbl);
+ goto out;
+ }
+
+ err = hws_action_ste_table_chunk_alloc(action_tbl, chunk);
+ if (err)
+ goto out;
+ }
+
+ if (mlx5hws_pool_empty(action_tbl->pool))
+ list_move(&action_tbl->list_node, &elem->full);
+
+ err = 0;
+
+out:
+ mutex_unlock(&pool->lock);
+
+ return err;
+}
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mutex *lock = &chunk->action_tbl->parent_elem->parent_pool->lock;
+
+ mlx5hws_dbg(chunk->action_tbl->pool->ctx,
+ "Freeing action STEs offset %d order %d\n",
+ chunk->ste.offset, chunk->ste.order);
+
+ mutex_lock(lock);
+ mlx5hws_pool_chunk_free(chunk->action_tbl->pool, &chunk->ste);
+ chunk->action_tbl->last_used = jiffies;
+ list_move(&chunk->action_tbl->list_node,
+ &chunk->action_tbl->parent_elem->available);
+ mutex_unlock(lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
new file mode 100644
index 000000000000..a8ba97359e31
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef ACTION_STE_POOL_H_
+#define ACTION_STE_POOL_H_
+
+#define MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ 10
+#define MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ 1
+#define MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ 20
+
+#define MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS 300
+#define MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS 300
+
+struct mlx5hws_action_ste_pool_element;
+
+struct mlx5hws_action_ste_table {
+ struct mlx5hws_action_ste_pool_element *parent_elem;
+ /* Wraps the RTC and STE range for this given action. */
+ struct mlx5hws_pool *pool;
+ /* Match STEs use this STC to jump to this pool's RTC. */
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct list_head list_node;
+ unsigned long last_used;
+};
+
+struct mlx5hws_action_ste_pool_element {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_action_ste_pool *parent_pool;
+ size_t log_sz; /* Size of the largest table so far. */
+ enum mlx5hws_pool_optimize opt;
+ struct list_head available;
+ struct list_head full;
+};
+
+/* Central repository of action STEs. The context contains one of these pools
+ * per queue.
+ */
+struct mlx5hws_action_ste_pool {
+ /* Protects the entire pool. We have one pool per queue and only one
+ * operation can be active per rule at a given time. Thus this lock
+ * protects solely against concurrent garbage collection and we expect
+ * very little contention.
+ */
+ struct mutex lock;
+ struct mlx5hws_action_ste_pool_element elems[MLX5HWS_POOL_OPTIMIZE_MAX];
+};
+
+/* A chunk of STEs and the table it was allocated from. Used by rules. */
+struct mlx5hws_action_ste_chunk {
+ struct mlx5hws_action_ste_table *action_tbl;
+ struct mlx5hws_pool_chunk ste;
+};
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx);
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx);
+
+/* Callers are expected to fill chunk->ste.order. On success, this function
+ * populates chunk->tbl and chunk->ste.offset.
+ */
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk);
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk);
+
+#endif /* ACTION_STE_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c
new file mode 100644
index 000000000000..b9aef80ba094
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+#include "buddy.h"
+
+static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
+{
+ int i, s, ret = 0;
+
+ buddy->max_order = max_order;
+
+ buddy->bitmap = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->bitmap),
+ GFP_KERNEL);
+ if (!buddy->bitmap)
+ return -ENOMEM;
+
+ buddy->num_free = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->num_free),
+ GFP_KERNEL);
+ if (!buddy->num_free) {
+ ret = -ENOMEM;
+ goto err_out_free_bits;
+ }
+
+ for (i = 0; i <= (int)buddy->max_order; ++i) {
+ s = 1 << (buddy->max_order - i);
+
+ buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL);
+ if (!buddy->bitmap[i]) {
+ ret = -ENOMEM;
+ goto err_out_free_num_free;
+ }
+ }
+
+ bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free_num_free:
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+
+err_out_free_bits:
+ kfree(buddy->bitmap);
+ return ret;
+}
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = kzalloc(sizeof(*buddy), GFP_KERNEL);
+ if (!buddy)
+ return NULL;
+
+ if (hws_buddy_init(buddy, max_order))
+ goto free_buddy;
+
+ return buddy;
+
+free_buddy:
+ kfree(buddy);
+ return NULL;
+}
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy)
+{
+ int i;
+
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+}
+
+static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy,
+ u32 start_order,
+ u32 *segment,
+ u32 *order)
+{
+ unsigned int seg, order_iter, m;
+
+ for (order_iter = start_order;
+ order_iter <= buddy->max_order; ++order_iter) {
+ if (!buddy->num_free[order_iter])
+ continue;
+
+ m = 1 << (buddy->max_order - order_iter);
+ seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+ if (WARN(seg >= m,
+ "ICM Buddy: failed finding free mem for order %d\n",
+ order_iter))
+ return -ENOMEM;
+
+ break;
+ }
+
+ if (order_iter > buddy->max_order)
+ return -ENOMEM;
+
+ *segment = seg;
+ *order = order_iter;
+ return 0;
+}
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order)
+{
+ u32 seg, order_iter, err;
+
+ err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+ if (err)
+ return err;
+
+ bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+ --buddy->num_free[order_iter];
+
+ while (order_iter > order) {
+ --order_iter;
+ seg <<= 1;
+ bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+ ++buddy->num_free[order_iter];
+ }
+
+ seg <<= order;
+
+ return seg;
+}
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order)
+{
+ seg >>= order;
+
+ while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+ bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+
+ bitmap_set(buddy->bitmap[order], seg, 1);
+ ++buddy->num_free[order];
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h
new file mode 100644
index 000000000000..ef6b223677aa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_BUDDY_H_
+#define HWS_BUDDY_H_
+
+struct mlx5hws_buddy_mem {
+ unsigned long **bitmap;
+ unsigned int *num_free;
+ u32 max_order;
+};
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order);
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy);
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
+
+#endif /* HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
new file mode 100644
index 000000000000..6ef0c4be27e1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -0,0 +1,1226 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
+{
+ /* assign random queue */
+ return get_random_u8() % mlx5hws_bwc_queues(ctx);
+}
+
+static u16
+hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
+{
+ return min(ctx->send_queue[queue_id].num_entries / 2,
+ MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
+}
+
+static struct mutex *
+hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
+{
+ return &ctx->bwc_send_queue_locks[idx];
+}
+
+static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i;
+
+ for (i = 0; i < bwc_queues; i++) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_lock(queue_lock);
+ }
+}
+
+static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i = bwc_queues;
+
+ while (i--) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_unlock(queue_lock);
+ }
+}
+
+static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u32 priority,
+ u8 size_log_rx, u8 size_log_tx,
+ struct mlx5hws_matcher_attr *attr)
+{
+ memset(attr, 0, sizeof(*attr));
+
+ attr->priority = priority;
+ attr->optimize_using_rule_idx = 0;
+ attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
+ attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
+ attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
+ attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].rule.num_log = size_log_rx;
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].rule.num_log = size_log_tx;
+ attr->resizable = true;
+ attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+}
+
+static int
+hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ int drain_error = 0, move_error = 0, poll_error = 0;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_rule_attr rule_attr;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5hws_send_engine *queue;
+ struct list_head *rules_list;
+ u32 pending_rules;
+ int i, ret = 0;
+ bool drain;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ for (i = 0; i < bwc_queues; i++) {
+ if (list_empty(&bwc_matcher->rules[i]))
+ continue;
+
+ pending_rules = 0;
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ rules_list = &bwc_matcher->rules[i];
+
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ bwc_rule->rule,
+ &rule_attr);
+ if (unlikely(ret)) {
+ if (!move_error) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = ret;
+ }
+ /* Rule wasn't queued, no need to poll */
+ continue;
+ }
+
+ pending_rules++;
+ drain = pending_rules >=
+ hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ drain);
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: timeout polling for completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!poll_error) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = ret;
+ }
+ }
+ }
+
+ if (pending_rules) {
+ queue = &ctx->send_queue[rule_attr.queue_id];
+ mlx5hws_send_engine_flush_queue(queue);
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ true);
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving bwc rule: timeout draining completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!drain_error) {
+ mlx5hws_err(ctx,
+ "Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n",
+ ret);
+ drain_error = ret;
+ }
+ }
+ }
+ }
+
+ /* Return the first error that happened */
+ if (unlikely(move_error))
+ return move_error;
+ if (unlikely(poll_error))
+ return poll_error;
+ if (unlikely(drain_error))
+ return drain_error;
+
+ return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ switch (bwc_matcher->matcher_type) {
+ case MLX5HWS_BWC_MATCHER_SIMPLE:
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+ case MLX5HWS_BWC_MATCHER_COMPLEX_FIRST:
+ return mlx5hws_bwc_matcher_complex_move_first(bwc_matcher);
+ case MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER:
+ return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher_attr matcher_attr = {0};
+ struct mlx5hws_matcher *old_matcher;
+ struct mlx5hws_matcher *new_matcher;
+ int ret;
+
+ hws_bwc_matcher_init_attr(bwc_matcher,
+ bwc_matcher->priority,
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log,
+ &matcher_attr);
+
+ old_matcher = bwc_matcher->matcher;
+ new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+ &bwc_matcher->mt, 1,
+ bwc_matcher->at,
+ bwc_matcher->num_of_at,
+ &matcher_attr);
+ if (!new_matcher) {
+ mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+ return ret;
+ }
+
+ ret = hws_bwc_matcher_move_all(bwc_matcher);
+ if (ret)
+ mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
+
+ /* Error during rehash can't be rolled back.
+ * The best option here is to allow the rehash to complete and remove
+ * the old matcher - can't leave the matcher in the 'in_resize' state.
+ */
+
+ bwc_matcher->matcher = new_matcher;
+ mlx5hws_matcher_destroy(old_matcher);
+
+ return ret;
+}
+
+int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[])
+{
+ enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
+ struct mlx5hws_context *ctx = table->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_matcher_attr attr = {0};
+ int i;
+
+ bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
+ if (!bwc_matcher->rules)
+ goto err;
+
+ for (i = 0; i < bwc_queues; i++)
+ INIT_LIST_HEAD(&bwc_matcher->rules[i]);
+
+ hws_bwc_matcher_init_attr(bwc_matcher,
+ priority,
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log,
+ &attr);
+
+ bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_SIMPLE;
+ bwc_matcher->priority = priority;
+
+ bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+ bwc_matcher->at = kcalloc(bwc_matcher->size_of_at_array,
+ sizeof(*bwc_matcher->at), GFP_KERNEL);
+ if (!bwc_matcher->at)
+ goto free_bwc_matcher_rules;
+
+ /* create dummy action template */
+ bwc_matcher->at[0] =
+ mlx5hws_action_template_create(action_types ?
+ action_types : init_action_types);
+ if (!bwc_matcher->at[0]) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
+ goto free_bwc_matcher_at_array;
+ }
+
+ bwc_matcher->num_of_at = 1;
+
+ bwc_matcher->mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!bwc_matcher->mt) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
+ goto free_at;
+ }
+
+ bwc_matcher->matcher = mlx5hws_matcher_create(table,
+ &bwc_matcher->mt, 1,
+ &bwc_matcher->at[0],
+ bwc_matcher->num_of_at,
+ &attr);
+ if (!bwc_matcher->matcher) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
+ goto free_mt;
+ }
+
+ return 0;
+
+free_mt:
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+free_at:
+ mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_at_array:
+ kfree(bwc_matcher->at);
+free_bwc_matcher_rules:
+ kfree(bwc_matcher->rules);
+err:
+ return -EINVAL;
+}
+
+static void
+hws_bwc_matcher_init_size_rxtx(struct mlx5hws_bwc_matcher_size *size)
+{
+ size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ atomic_set(&size->num_of_rules, 0);
+ atomic_set(&size->rehash_required, false);
+}
+
+static void hws_bwc_matcher_init_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ hws_bwc_matcher_init_size_rxtx(&bwc_matcher->rx_size);
+ hws_bwc_matcher_init_size_rxtx(&bwc_matcher->tx_size);
+}
+
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ bool is_complex;
+ int ret;
+
+ if (!mlx5hws_context_bwc_supported(table->ctx)) {
+ mlx5hws_err(table->ctx,
+ "BWC matcher: context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+ if (!bwc_matcher)
+ return NULL;
+
+ hws_bwc_matcher_init_size(bwc_matcher);
+
+ /* Check if the required match params can be all matched
+ * in single STE, otherwise complex matcher is needed.
+ */
+
+ is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
+ if (is_complex)
+ ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask);
+ else
+ ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask,
+ NULL);
+ if (ret)
+ goto free_bwc_matcher;
+
+ return bwc_matcher;
+
+free_bwc_matcher:
+ kfree(bwc_matcher);
+
+ return NULL;
+}
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ int i;
+
+ mlx5hws_matcher_destroy(bwc_matcher->matcher);
+ bwc_matcher->matcher = NULL;
+
+ for (i = 0; i < bwc_matcher->num_of_at; i++)
+ mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+ kfree(bwc_matcher->at);
+
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+ kfree(bwc_matcher->rules);
+
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ u32 rx_rules = atomic_read(&bwc_matcher->rx_size.num_of_rules);
+ u32 tx_rules = atomic_read(&bwc_matcher->tx_size.num_of_rules);
+
+ if (rx_rules || tx_rules)
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "BWC matcher destroy: matcher still has %u RX and %u TX rules\n",
+ rx_rules, tx_rules);
+
+ if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
+ mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
+ else
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+
+ kfree(bwc_matcher);
+ return 0;
+}
+
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
+{
+ unsigned long timeout = jiffies +
+ secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
+ struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
+ u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
+ bool got_comp = *pending_rules >= burst_th;
+ bool queue_full;
+ int err = 0;
+ int ret;
+ int i;
+
+ /* Check if there are any completions at all */
+ if (!got_comp && !drain)
+ return 0;
+
+ queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
+ while (queue_full || ((got_comp || drain) && *pending_rules)) {
+ ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
+ if (unlikely(ret < 0)) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
+ queue_id, ret);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ (*pending_rules) -= ret;
+ for (i = 0; i < ret; i++) {
+ if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
+ mlx5hws_err(ctx,
+ "BWC poll error: polling queue %d returned completion with error\n",
+ queue_id);
+ err = -EINVAL;
+ }
+ }
+ queue_full = false;
+ }
+
+ got_comp = !!ret;
+
+ if (unlikely(!got_comp && time_after(jiffies, timeout))) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return err;
+}
+
+void
+mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ /* no use of INSERT_BY_INDEX in bwc rule */
+ rule_attr->rule_idx = 0;
+
+ /* notify HW at each rule insertion/deletion */
+ rule_attr->burst = 0;
+
+ /* We don't need user data, but the API requires it to exist */
+ rule_attr->user_data = (void *)0xFACADE;
+
+ rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
+ rule_attr->flow_source = flow_source;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_rule *bwc_rule;
+
+ bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule))
+ goto out_err;
+
+ bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule->rule))
+ goto free_rule;
+
+ bwc_rule->bwc_matcher = bwc_matcher;
+ return bwc_rule;
+
+free_rule:
+ kfree(bwc_rule);
+out_err:
+ return NULL;
+}
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ if (likely(bwc_rule->rule))
+ kfree(bwc_rule->rule);
+ kfree(bwc_rule);
+}
+
+static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_rule->bwc_queue_idx = idx;
+ list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
+}
+
+static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ list_del_init(&bwc_rule->list_node);
+}
+
+static int
+hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ return mlx5hws_rule_destroy(bwc_rule->rule, attr);
+}
+
+static int
+hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n",
+ bwc_rule->rule->status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hws_bwc_rule_cnt_dec(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ if (!bwc_rule->skip_rx)
+ atomic_dec(&bwc_matcher->rx_size.num_of_rules);
+ if (!bwc_rule->skip_tx)
+ atomic_dec(&bwc_matcher->tx_size.num_of_rules);
+}
+
+static int
+hws_bwc_matcher_rehash_shrink(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_matcher_size *rx_size = &bwc_matcher->rx_size;
+ struct mlx5hws_bwc_matcher_size *tx_size = &bwc_matcher->tx_size;
+
+ /* It is possible that another thread has added a rule.
+ * Need to check again if we really need rehash/shrink.
+ */
+ if (atomic_read(&rx_size->num_of_rules) ||
+ atomic_read(&tx_size->num_of_rules))
+ return 0;
+
+ /* If the current matcher RX/TX size is already at its initial size. */
+ if (rx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG &&
+ tx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG)
+ return 0;
+
+ /* Now we've done all the checking - do the shrinking:
+ * - reset match RTC size to the initial size
+ * - create new matcher
+ * - move the rules, which will not do anything as the matcher is empty
+ * - destroy the old matcher
+ */
+
+ rx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ tx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int hws_bwc_rule_cnt_dec_with_shrink(struct mlx5hws_bwc_rule *bwc_rule,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ hws_bwc_rule_cnt_dec(bwc_rule);
+
+ if (atomic_read(&bwc_matcher->rx_size.num_of_rules) ||
+ atomic_read(&bwc_matcher->tx_size.num_of_rules))
+ return 0;
+
+ /* Matcher has no more rules - shrink it to save ICM. */
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_shrink(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ mutex_lock(queue_lock);
+
+ if (unlikely(ret))
+ mlx5hws_err(ctx,
+ "BWC rule deletion: shrinking empty matcher failed (%d)\n",
+ ret);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 idx = bwc_rule->bwc_queue_idx;
+ struct mlx5hws_rule_attr attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+ hws_bwc_rule_list_remove(bwc_rule);
+ hws_bwc_rule_cnt_dec_with_shrink(bwc_rule, idx);
+
+ mutex_unlock(queue_lock);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ bool is_complex = bwc_rule->bwc_matcher->matcher_type ==
+ MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
+ int ret = 0;
+
+ if (is_complex)
+ ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule);
+ else
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return ret;
+}
+
+static int
+hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
+ 0, /* only one match template supported */
+ match_param,
+ at_idx,
+ rule_actions,
+ rule_attr,
+ bwc_rule->rule);
+}
+
+static int
+hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = hws_bwc_rule_create_async(bwc_rule, match_param,
+ at_idx, rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
+}
+
+static int
+hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = mlx5hws_rule_action_update(bwc_rule->rule,
+ at_idx,
+ rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
+
+ return ret;
+}
+
+static bool
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size)
+{
+ struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
+
+ /* check the match RTC size */
+ return (size->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
+ (caps->ste_alloc_log_max - 1);
+}
+
+static bool
+hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size,
+ u32 num_of_rules)
+{
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size)))
+ return false;
+
+ if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
+ (1UL << size->size_log)))
+ return true;
+
+ return false;
+}
+
+static void
+hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
+ enum mlx5hws_action_type action_types[])
+{
+ int i = 0;
+
+ for (i = 0;
+ rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
+ i++) {
+ action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
+ }
+
+ action_types[i] = MLX5HWS_ACTION_TYP_LAST;
+}
+
+static int
+hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+ void *p;
+
+ if (unlikely(bwc_matcher->num_of_at >= bwc_matcher->size_of_at_array)) {
+ if (bwc_matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+ bwc_matcher->size_of_at_array *= 2;
+ p = krealloc(bwc_matcher->at,
+ bwc_matcher->size_of_at_array *
+ sizeof(*bwc_matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ bwc_matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ bwc_matcher->at = p;
+ }
+
+ hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
+
+ bwc_matcher->at[bwc_matcher->num_of_at] =
+ mlx5hws_action_template_create(action_types);
+
+ if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
+ return -ENOMEM;
+
+ bwc_matcher->num_of_at++;
+ return 0;
+}
+
+static int
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size))) {
+ mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -ENOMEM;
+ }
+
+ size->size_log = min(size->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ caps->ste_alloc_log_max -
+ MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type *action_type_arr;
+ int i, j;
+
+ /* start from index 1 - first action template is a dummy */
+ for (i = 1; i < bwc_matcher->num_of_at; i++) {
+ j = 0;
+ action_type_arr = bwc_matcher->at[i]->action_type_arr;
+
+ while (rule_actions[j].action &&
+ rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
+ if (action_type_arr[j] != rule_actions[j].action->type)
+ break;
+ j++;
+ }
+
+ if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
+ (!rule_actions[j].action ||
+ rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
+ return i;
+ }
+
+ return -1;
+}
+
+static int
+hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ bool need_rx_rehash, need_tx_rehash;
+ int ret;
+
+ need_rx_rehash = atomic_read(&bwc_matcher->rx_size.rehash_required);
+ need_tx_rehash = atomic_read(&bwc_matcher->tx_size.rehash_required);
+
+ /* It is possible that another rule has already performed rehash.
+ * Need to check again if we really need rehash.
+ */
+ if (!need_rx_rehash && !need_tx_rehash)
+ return 0;
+
+ /* If the current matcher RX/TX size is already at its max size,
+ * it can't be rehashed.
+ */
+ if (need_rx_rehash &&
+ hws_bwc_matcher_size_maxed_out(bwc_matcher,
+ &bwc_matcher->rx_size)) {
+ atomic_set(&bwc_matcher->rx_size.rehash_required, false);
+ need_rx_rehash = false;
+ }
+ if (need_tx_rehash &&
+ hws_bwc_matcher_size_maxed_out(bwc_matcher,
+ &bwc_matcher->tx_size)) {
+ atomic_set(&bwc_matcher->tx_size.rehash_required, false);
+ need_tx_rehash = false;
+ }
+
+ /* If both RX and TX rehash flags are now off, it means that whatever
+ * we wanted to rehash is now at its max size - no rehash can be done.
+ * Return and try adding the rule again - perhaps there was some change.
+ */
+ if (!need_rx_rehash && !need_tx_rehash)
+ return 0;
+
+ /* Now we're done all the checking - do the rehash:
+ * - extend match RTC size
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+ atomic_set(&bwc_matcher->rx_size.rehash_required, false);
+ atomic_set(&bwc_matcher->tx_size.rehash_required, false);
+
+ if (need_rx_rehash) {
+ ret = hws_bwc_matcher_extend_size(bwc_matcher,
+ &bwc_matcher->rx_size);
+ if (ret)
+ return ret;
+ }
+
+ if (need_tx_rehash) {
+ ret = hws_bwc_matcher_extend_size(bwc_matcher,
+ &bwc_matcher->tx_size);
+ if (ret)
+ return ret;
+ }
+
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx >= 0))
+ return at_idx;
+
+ /* we need to extend BWC matcher action templates array */
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (at_idx >= 0)
+ goto out;
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
+ }
+
+out:
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ return at_idx;
+}
+
+static void hws_bwc_rule_cnt_inc_rxtx(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_matcher_size *size)
+{
+ u32 num_of_rules = atomic_inc_return(&size->num_of_rules);
+
+ if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_rule->bwc_matcher,
+ size, num_of_rules)))
+ atomic_set(&size->rehash_required, true);
+}
+
+static void hws_bwc_rule_cnt_inc(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ if (!bwc_rule->skip_rx)
+ hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->rx_size);
+ if (!bwc_rule->skip_tx)
+ hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->tx_size);
+}
+
+static int hws_bwc_rule_cnt_inc_with_rehash(struct mlx5hws_bwc_rule *bwc_rule,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ hws_bwc_rule_cnt_inc(bwc_rule);
+
+ if (!atomic_read(&bwc_matcher->rx_size.rehash_required) &&
+ !atomic_read(&bwc_matcher->tx_size.rehash_required))
+ return 0;
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ mutex_lock(queue_lock);
+
+ if (likely(!ret))
+ return 0;
+
+ /* Failed to rehash. Print a diagnostic and rollback the counters. */
+ mlx5hws_err(ctx,
+ "BWC rule insertion: rehash to sizes [%d, %d] failed (%d)\n",
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log, ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret = 0;
+ int at_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+
+ mutex_lock(queue_lock);
+
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx);
+ if (unlikely(at_idx < 0)) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)",
+ ret);
+ return -EINVAL;
+ }
+
+ ret = hws_bwc_rule_cnt_inc_with_rehash(bwc_rule, bwc_queue_idx);
+ if (unlikely(ret)) {
+ mutex_unlock(queue_lock);
+ return ret;
+ }
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ if (likely(!ret)) {
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ return 0; /* rule inserted successfully */
+ }
+
+ /* Rule insertion could fail due to queue being full, timeout, or
+ * matcher in resize. In such cases, no point in trying to rehash.
+ */
+ if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx,
+ "BWC rule insertion failed - %s (%d)\n",
+ ret == -EBUSY ? "queue is full" :
+ ret == -ETIMEDOUT ? "timeout" :
+ ret == -EAGAIN ? "matcher in resize" : "N/A",
+ ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+ return ret;
+ }
+
+ /* At this point the rule wasn't added.
+ * It could be because there was collision, or some other problem.
+ * Try rehash by size and insert rule again - last chance.
+ */
+ if (!bwc_rule->skip_rx)
+ atomic_set(&bwc_matcher->rx_size.rehash_required, true);
+ if (!bwc_rule->skip_tx)
+ atomic_set(&bwc_matcher->tx_size.rehash_required, true);
+
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+ return ret;
+ }
+
+ /* Rehash done, but we still have that pesky rule to add */
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+
+ if (unlikely(ret)) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+ return ret;
+ }
+
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ return 0;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ u16 bwc_queue_idx;
+ int ret;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
+ if (unlikely(!bwc_rule))
+ return NULL;
+
+ bwc_rule->flow_source = flow_source;
+ mlx5hws_rule_skip(bwc_matcher->matcher, flow_source,
+ &bwc_rule->skip_rx, &bwc_rule->skip_tx);
+
+ bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
+
+ if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
+ ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
+ params,
+ flow_source,
+ rule_actions,
+ bwc_queue_idx);
+ else
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
+ if (unlikely(ret)) {
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return NULL;
+ }
+
+ return bwc_rule;
+}
+
+static int
+hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+ u16 idx;
+
+ idx = bwc_rule->bwc_queue_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, bwc_rule->flow_source,
+ &rule_attr);
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx);
+ if (unlikely(at_idx < 0)) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule update: failed getting AT\n");
+ return -EINVAL;
+ }
+
+ ret = hws_bwc_rule_update_sync(bwc_rule,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ mutex_unlock(queue_lock);
+
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return -EINVAL;
+ }
+
+ /* For complex rules, the update should happen on the last subrule. */
+ while (bwc_rule->next_subrule)
+ bwc_rule = bwc_rule->next_subrule;
+
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
new file mode 100644
index 000000000000..b905511f5c53
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_BWC_H_
+#define HWS_BWC_H_
+
+#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
+#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
+#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
+#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
+
+/* Max number of AT attach operations for the same matcher.
+ * When the limit is reached, a larger buffer is allocated for the ATs.
+ */
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
+
+#define MLX5HWS_BWC_MAX_ACTS 16
+
+#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+
+enum mlx5hws_bwc_matcher_type {
+ /* Standalone bwc matcher. */
+ MLX5HWS_BWC_MATCHER_SIMPLE,
+ /* The first matcher of a complex matcher. When rules are inserted into
+ * a matcher of this type, they are split into subrules and inserted
+ * into their corresponding submatchers.
+ */
+ MLX5HWS_BWC_MATCHER_COMPLEX_FIRST,
+ /* A submatcher that is part of a complex matcher. For most purposes
+ * these are treated as simple matchers, except when it comes to moving
+ * rules during resize.
+ */
+ MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER,
+};
+
+struct mlx5hws_bwc_matcher_complex_data;
+
+struct mlx5hws_bwc_matcher_size {
+ u8 size_log;
+ atomic_t num_of_rules;
+ atomic_t rehash_required;
+};
+
+struct mlx5hws_bwc_matcher {
+ struct mlx5hws_matcher *matcher;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template **at;
+ struct mlx5hws_bwc_matcher_complex_data *complex;
+ u8 num_of_at;
+ u8 size_of_at_array;
+ enum mlx5hws_bwc_matcher_type matcher_type;
+ u32 priority;
+ struct mlx5hws_bwc_matcher_size rx_size;
+ struct mlx5hws_bwc_matcher_size tx_size;
+ struct list_head *rules;
+};
+
+struct mlx5hws_bwc_rule {
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct mlx5hws_rule *rule;
+ struct mlx5hws_bwc_rule *next_subrule;
+ struct mlx5hws_bwc_complex_subrule_data *subrule_data;
+ u32 flow_source;
+ u16 bwc_queue_idx;
+ bool skip_rx;
+ bool skip_tx;
+ struct list_head list_node;
+};
+
+int
+mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[]);
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
+
+void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr);
+
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain);
+
+static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
+{
+ /* Besides the control queue, half of the queues are
+ * regular HWS queues, and the other half are BWC queues.
+ */
+ if (mlx5hws_context_bwc_supported(ctx))
+ return (ctx->queues - 1) / 2;
+ return 0;
+}
+
+static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
+{
+ return idx + mlx5hws_bwc_queues(ctx);
+}
+
+#endif /* HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
new file mode 100644
index 000000000000..660630f18ce9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+/* We chain submatchers by applying three rules on a subrule: modify header (to
+ * set register C6), jump to table (to the next submatcher) and the mandatory
+ * last rule.
+ */
+#define HWS_NUM_CHAIN_ACTIONS 3
+
+static const struct rhashtable_params hws_rules_hash_params = {
+ .key_len = sizeof_field(struct mlx5hws_bwc_complex_subrule_data,
+ match_tag),
+ .key_offset =
+ offsetof(struct mlx5hws_bwc_complex_subrule_data, match_tag),
+ .head_offset =
+ offsetof(struct mlx5hws_bwc_complex_subrule_data, hash_node),
+ .automatic_shrinking = true, .min_size = 1,
+};
+
+static bool
+hws_match_params_exceeds_definer(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ bool allow_jumbo)
+{
+ struct mlx5hws_definer match_layout = {0};
+ struct mlx5hws_match_template *mt;
+ bool is_complex = false;
+ int ret;
+
+ if (!match_criteria_enable)
+ return false; /* empty matcher */
+
+ mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!mt) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating match template\n");
+ return false;
+ }
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, allow_jumbo);
+ if (ret) {
+ /* The only case that we're interested in is E2BIG,
+ * which means that the match parameters need to be
+ * split into complex martcher.
+ * For all other cases (good or bad) - just return true
+ * and let the usual match creation path handle it,
+ * both for good and bad flows.
+ */
+ if (ret == -E2BIG) {
+ is_complex = true;
+ mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
+ } else {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ }
+ } else {
+ kfree(mt->fc);
+ }
+
+ mlx5hws_match_template_destroy(mt);
+
+ return is_complex;
+}
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ return hws_match_params_exceeds_definer(ctx, match_criteria_enable,
+ mask, true);
+}
+
+static int
+hws_get_last_set_dword_idx(const struct mlx5hws_match_parameters *mask)
+{
+ int i;
+
+ for (i = mask->match_sz / 4 - 1; i >= 0; i--)
+ if (mask->match_buf[i])
+ return i;
+
+ return -1;
+}
+
+static bool hws_match_mask_is_empty(const struct mlx5hws_match_parameters *mask)
+{
+ return hws_get_last_set_dword_idx(mask) == -1;
+}
+
+static bool hws_dword_is_inner_ipaddr_off(int dword_off)
+{
+ /* IPv4 and IPv6 addresses share the same entry via a union, and the
+ * source and dest addresses are contiguous in the fte_match_param. So
+ * we need to check 8 words.
+ */
+ static const int inner_ip_dword_off =
+ __mlx5_dw_off(fte_match_param, inner_headers.src_ipv4_src_ipv6);
+
+ return dword_off >= inner_ip_dword_off &&
+ dword_off < inner_ip_dword_off + 8;
+}
+
+static bool hws_dword_is_outer_ipaddr_off(int dword_off)
+{
+ static const int outer_ip_dword_off =
+ __mlx5_dw_off(fte_match_param, outer_headers.src_ipv4_src_ipv6);
+
+ return dword_off >= outer_ip_dword_off &&
+ dword_off < outer_ip_dword_off + 8;
+}
+
+static void hws_add_dword_to_mask(struct mlx5hws_match_parameters *mask,
+ const struct mlx5hws_match_parameters *orig,
+ int dword_idx, bool *added_inner_ipv,
+ bool *added_outer_ipv)
+{
+ mask->match_buf[dword_idx] |= orig->match_buf[dword_idx];
+
+ *added_inner_ipv = false;
+ *added_outer_ipv = false;
+
+ /* Any IP address fragment must be accompanied by a match on IP version.
+ * Use the `added_ipv` variables to keep track if we added IP versions
+ * specifically for this dword, so that we can roll them back if the
+ * match params become too large to fit into a definer.
+ */
+ if (hws_dword_is_inner_ipaddr_off(dword_idx) &&
+ !MLX5_GET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version)) {
+ MLX5_SET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version, 0xf);
+ *added_inner_ipv = true;
+ }
+ if (hws_dword_is_outer_ipaddr_off(dword_idx) &&
+ !MLX5_GET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version)) {
+ MLX5_SET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version, 0xf);
+ *added_outer_ipv = true;
+ }
+}
+
+static void hws_remove_dword_from_mask(struct mlx5hws_match_parameters *mask,
+ int dword_idx, bool added_inner_ipv,
+ bool added_outer_ipv)
+{
+ mask->match_buf[dword_idx] = 0;
+ if (added_inner_ipv)
+ MLX5_SET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version, 0);
+ if (added_outer_ipv)
+ MLX5_SET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version, 0);
+}
+
+/* Avoid leaving a single lower dword in `mask` if there are others present in
+ * `orig`. Splitting IPv6 addresses like this causes them to be interpreted as
+ * IPv4.
+ */
+static void hws_avoid_ipv6_split_of(struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask,
+ int off)
+{
+ /* Masks are allocated to a full fte_match_param, but it can't hurt to
+ * double check.
+ */
+ if (orig->match_sz <= off + 3 || mask->match_sz <= off + 3)
+ return;
+
+ /* Lower dword is not set, nothing to do. */
+ if (!mask->match_buf[off + 3])
+ return;
+
+ /* Higher dwords also present in `mask`, no ambiguity. */
+ if (mask->match_buf[off] || mask->match_buf[off + 1] ||
+ mask->match_buf[off + 2])
+ return;
+
+ /* There are no higher dwords in `orig`, i.e. we match on IPv4. */
+ if (!orig->match_buf[off] && !orig->match_buf[off + 1] &&
+ !orig->match_buf[off + 2])
+ return;
+
+ /* Put the lower dword back in `orig`. It is always safe to do this, the
+ * dword will just be picked up in the next submask.
+ */
+ orig->match_buf[off + 3] = mask->match_buf[off + 3];
+ mask->match_buf[off + 3] = 0;
+}
+
+static void hws_avoid_ipv6_split(struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask)
+{
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ outer_headers.src_ipv4_src_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ outer_headers.dst_ipv4_dst_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ inner_headers.src_ipv4_src_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ inner_headers.dst_ipv4_dst_ipv6));
+}
+
+/* Build a subset of the `orig` match parameters into `mask`. This subset is
+ * guaranteed to fit in a single definer an as such is a candidate for being a
+ * part of a complex matcher. Upon successful execution, the match params that
+ * go into `mask` are cleared from `orig`.
+ */
+static int hws_get_simple_params(struct mlx5hws_context *ctx, u8 match_criteria,
+ struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask)
+{
+ bool added_inner_ipv, added_outer_ipv;
+ int dword_idx;
+ u32 *backup;
+ int ret;
+
+ dword_idx = hws_get_last_set_dword_idx(orig);
+ /* Nothing to do, we consumed all of the match params before. */
+ if (dword_idx == -1)
+ return 0;
+
+ backup = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!backup)
+ return -ENOMEM;
+
+ while (1) {
+ dword_idx = hws_get_last_set_dword_idx(orig);
+ /* Nothing to do, we consumed all of the original match params
+ * into this subset, which still fits into a single matcher.
+ */
+ if (dword_idx == -1) {
+ ret = 0;
+ goto free_backup;
+ }
+
+ memcpy(backup, mask->match_buf, mask->match_sz);
+
+ /* Try to add this dword to the current subset. */
+ hws_add_dword_to_mask(mask, orig, dword_idx, &added_inner_ipv,
+ &added_outer_ipv);
+
+ if (hws_match_params_exceeds_definer(ctx, match_criteria, mask,
+ false)) {
+ /* We just added a match param that makes the definer
+ * too large. Revert and return what we had before.
+ * Note that we can't just zero out the affected fields,
+ * because it's possible that the dword we're looking at
+ * wasn't zero before (e.g. it included auto-added
+ * matches in IP version. This is why we employ the
+ * rather cumbersome memcpy for backing up.
+ */
+ memcpy(mask->match_buf, backup, mask->match_sz);
+ /* Possible future improvement: We can't add any more
+ * dwords, but it may be possible to squeeze in
+ * individual bytes, as definers have special slots for
+ * those.
+ *
+ * For now, keep the code simple. This results in an
+ * extra submatcher in some cases, but it's good enough.
+ */
+ ret = 0;
+ break;
+ }
+
+ /* The current subset of match params still fits in a single
+ * definer. Remove the dword from the original mask.
+ *
+ * Also remove any explicit match on IP version if we just
+ * included one here. We will still automatically add it to
+ * accompany any IP address fragment, but do not need to
+ * consider it by itself.
+ */
+ hws_remove_dword_from_mask(orig, dword_idx, added_inner_ipv,
+ added_outer_ipv);
+ }
+
+ /* Make sure we have not picked up a single lower dword of an IPv6
+ * address, as the firmware will erroneously treat it as an IPv4
+ * address.
+ */
+ hws_avoid_ipv6_split(orig, mask);
+
+free_backup:
+ kfree(backup);
+
+ return ret;
+}
+
+static int
+hws_bwc_matcher_split_mask(struct mlx5hws_context *ctx, u8 match_criteria,
+ const struct mlx5hws_match_parameters *mask,
+ struct mlx5hws_match_parameters *submasks,
+ int *num_submasks)
+{
+ struct mlx5hws_match_parameters mask_copy;
+ int ret, i = 0;
+
+ mask_copy.match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ mask_copy.match_buf = kzalloc(mask_copy.match_sz, GFP_KERNEL);
+ if (!mask_copy.match_buf)
+ return -ENOMEM;
+
+ memcpy(mask_copy.match_buf, mask->match_buf, mask->match_sz);
+
+ while (!hws_match_mask_is_empty(&mask_copy)) {
+ if (i >= MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS) {
+ mlx5hws_err(ctx,
+ "Complex matcher: mask too large for %d matchers\n",
+ MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS);
+ ret = -E2BIG;
+ goto free_copy;
+ }
+ /* All but the first matcher need to match on register C6 to
+ * connect pieces of the complex rule together.
+ */
+ if (i > 0) {
+ MLX5_SET(fte_match_param, submasks[i].match_buf,
+ misc_parameters_2.metadata_reg_c_6, -1);
+ match_criteria |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+ }
+ ret = hws_get_simple_params(ctx, match_criteria, &mask_copy,
+ &submasks[i]);
+ if (ret < 0)
+ goto free_copy;
+ i++;
+ }
+
+ *num_submasks = i;
+ ret = 0;
+
+free_copy:
+ kfree(mask_copy.match_buf);
+
+ return ret;
+}
+
+static struct mlx5hws_table *
+hws_isolated_table_create(const struct mlx5hws_bwc_matcher *cmatcher)
+{
+ struct mlx5hws_bwc_complex_submatcher *first_subm;
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ struct mlx5hws_table_attr tbl_attr = {0};
+ struct mlx5hws_table *orig_tbl;
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ first_subm = &cmatcher->complex->submatchers[0];
+ orig_tbl = first_subm->tbl;
+ ctx = orig_tbl->ctx;
+
+ tbl_attr.type = orig_tbl->type;
+ tbl_attr.level = orig_tbl->level;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl)
+ return ERR_PTR(-EINVAL);
+
+ /* Set the default miss of the isolated table to point
+ * to the end anchor of the original matcher.
+ */
+ mlx5hws_cmd_set_attr_connect_miss_tbl(ctx, tbl->fw_ft_type,
+ tbl->type, &ft_attr);
+ ft_attr.table_miss_id = first_subm->bwc_matcher->matcher->end_ft_id;
+
+ ret = mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed to set isolated tbl default miss\n");
+ goto destroy_tbl;
+ }
+
+ return tbl;
+
+destroy_tbl:
+ mlx5hws_table_destroy(tbl);
+
+ return ERR_PTR(ret);
+}
+
+static int hws_submatcher_init_first(struct mlx5hws_bwc_matcher *cmatcher,
+ struct mlx5hws_table *table, u32 priority,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask)
+{
+ enum mlx5hws_action_type action_types[HWS_NUM_CHAIN_ACTIONS];
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ int ret;
+
+ subm = &cmatcher->complex->submatchers[0];
+
+ /* The first submatcher lives in the original table and does not have an
+ * associated jump to table action. It also points to the outer complex
+ * matcher.
+ */
+ subm->tbl = table;
+ subm->action_tbl = NULL;
+ subm->bwc_matcher = cmatcher;
+
+ action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ ret = mlx5hws_bwc_matcher_create_simple(subm->bwc_matcher, subm->tbl,
+ priority, match_criteria, mask,
+ action_types);
+ if (ret)
+ return ret;
+
+ subm->bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
+
+ ret = rhashtable_init(&subm->rules_hash, &hws_rules_hash_params);
+ if (ret)
+ goto destroy_matcher;
+ mutex_init(&subm->hash_lock);
+ ida_init(&subm->chain_ida);
+
+ return 0;
+
+destroy_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+
+ return ret;
+}
+
+static int hws_submatcher_init(struct mlx5hws_bwc_matcher *cmatcher, int idx,
+ struct mlx5hws_table *table, u32 priority,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask)
+{
+ enum mlx5hws_action_type action_types[HWS_NUM_CHAIN_ACTIONS];
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ bool is_last;
+ int ret;
+
+ if (!idx)
+ return hws_submatcher_init_first(cmatcher, table, priority,
+ match_criteria, mask);
+
+ subm = &cmatcher->complex->submatchers[idx];
+ is_last = idx == cmatcher->complex->num_submatchers - 1;
+
+ subm->tbl = hws_isolated_table_create(cmatcher);
+ if (IS_ERR(subm->tbl))
+ return PTR_ERR(subm->tbl);
+
+ subm->action_tbl =
+ mlx5hws_action_create_dest_table(subm->tbl->ctx, subm->tbl,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!subm->action_tbl) {
+ ret = -EINVAL;
+ goto destroy_tbl;
+ }
+
+ subm->bwc_matcher = kzalloc(sizeof(*subm->bwc_matcher), GFP_KERNEL);
+ if (!subm->bwc_matcher) {
+ ret = -ENOMEM;
+ goto destroy_action;
+ }
+
+ /* Every matcher other than the first also matches of register C6 to
+ * bind subrules together in the complex rule using the chain ids.
+ */
+ match_criteria |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+
+ action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ /* Every matcher other than the last sets register C6 and jumps to the
+ * next submatcher's table. The final submatcher will use the
+ * user-supplied actions and will attach an action template at rule
+ * insertion time.
+ */
+ ret = mlx5hws_bwc_matcher_create_simple(subm->bwc_matcher, subm->tbl,
+ priority, match_criteria, mask,
+ is_last ? NULL : action_types);
+ if (ret)
+ goto free_matcher;
+
+ subm->bwc_matcher->matcher_type =
+ MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER;
+
+ ret = rhashtable_init(&subm->rules_hash, &hws_rules_hash_params);
+ if (ret)
+ goto destroy_matcher;
+ mutex_init(&subm->hash_lock);
+ ida_init(&subm->chain_ida);
+
+ return 0;
+
+destroy_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+free_matcher:
+ kfree(subm->bwc_matcher);
+destroy_action:
+ mlx5hws_action_destroy(subm->action_tbl);
+destroy_tbl:
+ mlx5hws_table_destroy(subm->tbl);
+
+ return ret;
+}
+
+static void hws_submatcher_destroy(struct mlx5hws_bwc_matcher *cmatcher,
+ int idx)
+{
+ struct mlx5hws_bwc_complex_submatcher *subm;
+
+ subm = &cmatcher->complex->submatchers[idx];
+
+ ida_destroy(&subm->chain_ida);
+ mutex_destroy(&subm->hash_lock);
+ rhashtable_destroy(&subm->rules_hash);
+
+ if (subm->bwc_matcher) {
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+ if (idx)
+ kfree(subm->bwc_matcher);
+ }
+
+ /* We own all of the isolated tables, but not the original one. */
+ if (idx) {
+ mlx5hws_action_destroy(subm->action_tbl);
+ mlx5hws_table_destroy(subm->tbl);
+ }
+}
+
+static int
+hws_complex_data_actions_init(struct mlx5hws_bwc_matcher_complex_data *cdata)
+{
+ struct mlx5hws_context *ctx = cdata->submatchers[0].tbl->ctx;
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_action_mh_pattern ptrn;
+ int ret = 0;
+
+ /* Create modify header action to set REG_C_6 */
+ MLX5_SET(set_action_in, modify_hdr_action,
+ action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ field, MLX5_MODI_META_REG_C_6);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ length, 0); /* zero means length of 32 */
+ MLX5_SET(set_action_in, modify_hdr_action, offset, 0);
+ MLX5_SET(set_action_in, modify_hdr_action, data, 0);
+
+ ptrn.data = (void *)modify_hdr_action;
+ ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE;
+
+ cdata->action_metadata =
+ mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!cdata->action_metadata) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create set reg C6 action\n");
+ return -EINVAL;
+ }
+
+ /* Create last action */
+ cdata->action_last =
+ mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!cdata->action_last) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create last action\n");
+ ret = -EINVAL;
+ goto destroy_action_metadata;
+ }
+
+ return 0;
+
+destroy_action_metadata:
+ mlx5hws_action_destroy(cdata->action_metadata);
+
+ return ret;
+}
+
+static void
+hws_complex_data_actions_destroy(struct mlx5hws_bwc_matcher_complex_data *cdata)
+{
+ mlx5hws_action_destroy(cdata->action_last);
+ mlx5hws_action_destroy(cdata->action_metadata);
+}
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority, u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_match_parameters
+ submasks[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_context *ctx = table->ctx;
+ int num_submatchers;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(submasks); i++) {
+ submasks[i].match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ submasks[i].match_buf = kzalloc(submasks[i].match_sz,
+ GFP_KERNEL);
+ if (!submasks[i].match_buf) {
+ ret = -ENOMEM;
+ goto free_submasks;
+ }
+ }
+
+ ret = hws_bwc_matcher_split_mask(ctx, match_criteria_enable, mask,
+ submasks, &num_submatchers);
+ if (ret)
+ goto free_submasks;
+
+ cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ if (!cdata) {
+ ret = -ENOMEM;
+ goto free_submasks;
+ }
+
+ bwc_matcher->complex = cdata;
+ cdata->num_submatchers = num_submatchers;
+
+ for (i = 0; i < num_submatchers; i++) {
+ ret = hws_submatcher_init(bwc_matcher, i, table, priority,
+ match_criteria_enable, &submasks[i]);
+ if (ret)
+ goto destroy_submatchers;
+ }
+
+ ret = hws_complex_data_actions_init(cdata);
+ if (ret)
+ goto destroy_submatchers;
+
+ ret = 0;
+ goto free_submasks;
+
+destroy_submatchers:
+ while (i--)
+ hws_submatcher_destroy(bwc_matcher, i);
+ kfree(cdata);
+ bwc_matcher->complex = NULL;
+
+free_submasks:
+ for (i = 0; i < ARRAY_SIZE(submasks); i++)
+ kfree(submasks[i].match_buf);
+
+ return ret;
+}
+
+void
+mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ int i;
+
+ hws_complex_data_actions_destroy(bwc_matcher->complex);
+ for (i = 0; i < bwc_matcher->complex->num_submatchers; i++)
+ hws_submatcher_destroy(bwc_matcher, i);
+ kfree(bwc_matcher->complex);
+ bwc_matcher->complex = NULL;
+}
+
+static int
+hws_complex_get_subrule_data(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_complex_submatcher *subm,
+ u32 *match_params)
+__must_hold(&subm->hash_lock)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = subm->bwc_matcher;
+ struct mlx5hws_bwc_complex_subrule_data *sr_data, *old_data;
+ struct mlx5hws_match_template *mt;
+ int ret;
+
+ sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL);
+ if (!sr_data)
+ return -ENOMEM;
+
+ ret = ida_alloc(&subm->chain_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto free_sr_data;
+ sr_data->chain_id = ret;
+
+ refcount_set(&sr_data->refcount, 1);
+
+ mt = bwc_matcher->matcher->mt;
+ mlx5hws_definer_create_tag(match_params, mt->fc, mt->fc_sz,
+ (u8 *)&sr_data->match_tag);
+
+ old_data = rhashtable_lookup_get_insert_fast(&subm->rules_hash,
+ &sr_data->hash_node,
+ hws_rules_hash_params);
+ if (IS_ERR(old_data)) {
+ ret = PTR_ERR(old_data);
+ goto free_ida;
+ }
+
+ if (old_data) {
+ /* Rule with the same tag already exists - update refcount */
+ refcount_inc(&old_data->refcount);
+ /* Let the new rule use the same tag as the existing rule.
+ * Note that we don't have any indication for the rule creation
+ * process that a rule with similar matching params already
+ * exists - no harm done when this rule is be overwritten by
+ * the same STE.
+ * There's some performance advantage in skipping such cases,
+ * so this is left for future optimizations.
+ */
+ bwc_rule->subrule_data = old_data;
+ ret = 0;
+ goto free_ida;
+ }
+
+ bwc_rule->subrule_data = sr_data;
+ return 0;
+
+free_ida:
+ ida_free(&subm->chain_ida, sr_data->chain_id);
+free_sr_data:
+ kfree(sr_data);
+
+ return ret;
+}
+
+static void
+hws_complex_put_subrule_data(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_complex_submatcher *subm,
+ bool *is_last_rule)
+__must_hold(&subm->hash_lock)
+{
+ struct mlx5hws_bwc_complex_subrule_data *sr_data;
+
+ if (is_last_rule)
+ *is_last_rule = false;
+
+ sr_data = bwc_rule->subrule_data;
+ if (refcount_dec_and_test(&sr_data->refcount)) {
+ rhashtable_remove_fast(&subm->rules_hash,
+ &sr_data->hash_node,
+ hws_rules_hash_params);
+ ida_free(&subm->chain_ida, sr_data->chain_id);
+ kfree(sr_data);
+ if (is_last_rule)
+ *is_last_rule = true;
+ }
+
+ bwc_rule->subrule_data = NULL;
+}
+
+static int hws_complex_subrule_create(struct mlx5hws_bwc_matcher *cmatcher,
+ struct mlx5hws_bwc_rule *subrule,
+ u32 *match_params, u32 flow_source,
+ int bwc_queue_idx, int subm_idx,
+ struct mlx5hws_rule_action *actions,
+ u32 *chain_id)
+{
+ struct mlx5hws_rule_action chain_actions[HWS_NUM_CHAIN_ACTIONS] = {0};
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ int ret;
+
+ cdata = cmatcher->complex;
+ subm = &cdata->submatchers[subm_idx];
+
+ mutex_lock(&subm->hash_lock);
+
+ ret = hws_complex_get_subrule_data(subrule, subm, match_params);
+ if (ret)
+ goto unlock;
+
+ *chain_id = subrule->subrule_data->chain_id;
+
+ if (!actions) {
+ MLX5_SET(set_action_in, modify_hdr_action, data, *chain_id);
+ chain_actions[0].action = cdata->action_metadata;
+ chain_actions[0].modify_header.data = modify_hdr_action;
+ chain_actions[1].action =
+ cdata->submatchers[subm_idx + 1].action_tbl;
+ chain_actions[2].action = cdata->action_last;
+ actions = chain_actions;
+ }
+
+ ret = mlx5hws_bwc_rule_create_simple(subrule, match_params, actions,
+ flow_source, bwc_queue_idx);
+ if (ret)
+ goto put_subrule_data;
+
+ ret = 0;
+ goto unlock;
+
+put_subrule_data:
+ hws_complex_put_subrule_data(subrule, subm, NULL);
+unlock:
+ mutex_unlock(&subm->hash_lock);
+
+ return ret;
+}
+
+static int hws_complex_subrule_destroy(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_matcher *cmatcher,
+ int subm_idx)
+{
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ struct mlx5hws_context *ctx;
+ bool is_last_rule;
+ int ret = 0;
+
+ cdata = cmatcher->complex;
+ subm = &cdata->submatchers[subm_idx];
+ ctx = subm->tbl->ctx;
+
+ mutex_lock(&subm->hash_lock);
+
+ hws_complex_put_subrule_data(bwc_rule, subm, &is_last_rule);
+ bwc_rule->rule->skip_delete = !is_last_rule;
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (unlikely(ret))
+ mlx5hws_err(ctx,
+ "Complex rule: failed to delete subrule %d (%d)\n",
+ subm_idx, ret);
+
+ if (subm_idx)
+ mlx5hws_bwc_rule_free(bwc_rule);
+
+ mutex_unlock(&subm->hash_lock);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_rule
+ *subrules[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher *cmatcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_rule_action *subrule_actions;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ struct mlx5hws_bwc_rule *subrule;
+ u32 *match_params;
+ u32 chain_id;
+ int i, ret;
+
+ cdata = cmatcher->complex;
+ if (!cdata)
+ return -EINVAL;
+
+ /* Duplicate user data because we will modify it to set register C6
+ * values. For the same reason, make sure that we allocate a full
+ * match_param even if the user gave us fewer bytes. We need to ensure
+ * there is space for the match on C6.
+ */
+ match_params = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_params)
+ return -ENOMEM;
+
+ memcpy(match_params, params->match_buf, params->match_sz);
+
+ ret = hws_complex_subrule_create(cmatcher, bwc_rule, match_params,
+ flow_source, bwc_queue_idx, 0,
+ NULL, &chain_id);
+ if (ret)
+ goto free_match_params;
+ subrules[0] = bwc_rule;
+
+ for (i = 1; i < cdata->num_submatchers; i++) {
+ subm = &cdata->submatchers[i];
+ subrule = mlx5hws_bwc_rule_alloc(subm->bwc_matcher);
+ if (!subrule) {
+ ret = -ENOMEM;
+ goto destroy_subrules;
+ }
+
+ /* Match on the previous subrule's chain_id. This is how
+ * subrules are connected in steering.
+ */
+ MLX5_SET(fte_match_param, match_params,
+ misc_parameters_2.metadata_reg_c_6, chain_id);
+
+ /* The last subrule uses the complex rule's user-specified
+ * actions. Everything else uses the chaining rules based on the
+ * next table and chain_id.
+ */
+ subrule_actions =
+ i == cdata->num_submatchers - 1 ? rule_actions : NULL;
+
+ ret = hws_complex_subrule_create(cmatcher, subrule,
+ match_params, flow_source,
+ bwc_queue_idx, i,
+ subrule_actions, &chain_id);
+ if (ret) {
+ mlx5hws_bwc_rule_free(subrule);
+ goto destroy_subrules;
+ }
+
+ subrules[i] = subrule;
+ }
+
+ for (i = 0; i < cdata->num_submatchers - 1; i++)
+ subrules[i]->next_subrule = subrules[i + 1];
+
+ kfree(match_params);
+
+ return 0;
+
+destroy_subrules:
+ while (i--)
+ hws_complex_subrule_destroy(subrules[i], cmatcher, i);
+free_match_params:
+ kfree(match_params);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_rule
+ *subrules[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ int i, err, ret_val;
+
+ cdata = bwc_matcher->complex;
+
+ /* Construct a list of all the subrules we need to destroy. */
+ subrules[0] = bwc_rule;
+ for (i = 1; i < cdata->num_submatchers; i++)
+ subrules[i] = subrules[i - 1]->next_subrule;
+
+ ret_val = 0;
+ for (i = 0; i < cdata->num_submatchers; i++) {
+ err = hws_complex_subrule_destroy(subrules[i], bwc_matcher, i);
+ /* If something goes wrong, plow along to destroy all of the
+ * subrules but return an error upstack.
+ */
+ if (unlikely(err))
+ ret_val = err;
+ }
+
+ return ret_val;
+}
+
+static void
+hws_bwc_matcher_init_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct list_head *rules_list;
+ int i;
+
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
+ continue;
+
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ if (!bwc_rule->subrule_data)
+ continue;
+ bwc_rule->subrule_data->was_moved = false;
+ }
+ }
+}
+
+int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *tmp_bwc_rule;
+ struct mlx5hws_rule_attr rule_attr;
+ int move_error = 0, poll_error = 0;
+ struct mlx5hws_rule *tmp_rule;
+ struct list_head *rules_list;
+ u32 expected_completions = 1;
+ int i, ret = 0;
+
+ hws_bwc_matcher_init_move(bwc_matcher);
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
+ continue;
+
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ list_for_each_entry(tmp_bwc_rule, rules_list, list_node) {
+ /* Check if a rule with similar tag has already
+ * been moved.
+ */
+ if (tmp_bwc_rule->subrule_data->was_moved) {
+ /* This rule is a duplicate of rule with
+ * identical tag that has already been moved
+ * earlier. Just update this rule's RTCs.
+ */
+ tmp_bwc_rule->rule->rtc_0 =
+ tmp_bwc_rule->subrule_data->rtc_0;
+ tmp_bwc_rule->rule->rtc_1 =
+ tmp_bwc_rule->subrule_data->rtc_1;
+ tmp_bwc_rule->rule->matcher =
+ tmp_bwc_rule->rule->matcher->resize_dst;
+ continue;
+ }
+
+ /* First time we're moving rule with this tag.
+ * Move it for real.
+ */
+ tmp_rule = tmp_bwc_rule->rule;
+ tmp_rule->skip_delete = false;
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ tmp_rule,
+ &rule_attr);
+ if (unlikely(ret)) {
+ if (!move_error) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = ret;
+ }
+ /* Rule wasn't queued, no need to poll */
+ continue;
+ }
+
+ expected_completions = 1;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &expected_completions,
+ true);
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: timeout polling for completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!poll_error) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = ret;
+ }
+ }
+
+ /* Done moving the rule to the new matcher,
+ * now update RTCs for all the duplicated rules.
+ */
+ tmp_bwc_rule->subrule_data->rtc_0 =
+ tmp_bwc_rule->rule->rtc_0;
+ tmp_bwc_rule->subrule_data->rtc_1 =
+ tmp_bwc_rule->rule->rtc_1;
+
+ tmp_bwc_rule->subrule_data->was_moved = true;
+ }
+ }
+
+ /* Return the first error that happened */
+ if (unlikely(move_error))
+ return move_error;
+ if (unlikely(poll_error))
+ return poll_error;
+
+ return ret;
+}
+
+int
+mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_table *isolated_tbl;
+ u32 end_ft_id;
+ int i, ret;
+
+ cdata = bwc_matcher->complex;
+
+ /* We are rehashing the first submatcher. We need to update the
+ * subsequent submatchers to point to the end_ft of this new matcher.
+ * This needs to be done before moving any rules to prevent possible
+ * steering loops.
+ */
+ end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
+ for (i = 1; i < cdata->num_submatchers; i++) {
+ isolated_tbl = cdata->submatchers[i].tbl;
+ ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Complex matcher: failed updating end_ft of isolated matcher (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
new file mode 100644
index 000000000000..d07de631ce9f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_BWC_COMPLEX_H_
+#define HWS_BWC_COMPLEX_H_
+
+#define MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS 4
+
+/* A matcher can't contain two rules with the same match tag, but it is possible
+ * that two different complex rules' subrules have the same match tag. In that
+ * case, those subrules correspond to a single rule, and we need to refcount.
+ */
+struct mlx5hws_bwc_complex_subrule_data {
+ struct mlx5hws_rule_match_tag match_tag;
+ refcount_t refcount;
+ /* The chain_id is what glues individual subrules into larger complex
+ * rules. It is the value that this subrule writes to register C6, and
+ * that the next subrule matches against.
+ */
+ u32 chain_id;
+ u32 rtc_0;
+ u32 rtc_1;
+ /* During rehash we iterate through all the subrules to move them. But
+ * two or more subrules can share the same physical rule in the
+ * submatcher, so we use `was_moved` to keep track if a given rule was
+ * already moved.
+ */
+ bool was_moved;
+ struct rhash_head hash_node;
+};
+
+struct mlx5hws_bwc_complex_submatcher {
+ /* Isolated table that the matcher lives in. Not set for the first
+ * matcher, which lives in the original table.
+ */
+ struct mlx5hws_table *tbl;
+ /* Match a rule with this action to go to `tbl`. This is set in all
+ * submatchers but the first.
+ */
+ struct mlx5hws_action *action_tbl;
+ /* This submatcher's simple matcher. The first submatcher points to the
+ * outer (complex) matcher.
+ */
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct rhashtable rules_hash;
+ struct ida chain_ida;
+ struct mutex hash_lock; /* Protect the hash and ida. */
+};
+
+struct mlx5hws_bwc_matcher_complex_data {
+ struct mlx5hws_bwc_complex_submatcher
+ submatchers[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS];
+ int num_submatchers;
+ /* Actions used by all but the last submatcher to point to the next
+ * submatcher in the chain. The last submatcher uses the action template
+ * from the complex matcher, to perform the actions that the user
+ * originally requested.
+ */
+ struct mlx5hws_action *action_metadata;
+ struct mlx5hws_action *action_last;
+};
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int
+mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
+
+#endif /* HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
new file mode 100644
index 000000000000..f22eaf506d28
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -0,0 +1,1219 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static enum mlx5_ifc_flow_destination_type
+hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
+{
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
+ case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ default:
+ pr_warn("HWS: unknown flow dest type %d\n", type);
+ return 0;
+ }
+};
+
+static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev,
+ u32 object_type,
+ u32 object_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
+
+ ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+ MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
+ MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
+ MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+ void *ft_ctx;
+
+ MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
+ MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
+ MLX5_SET(modify_flow_table_in, in, table_id, table_id);
+
+ ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
+
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1);
+
+ return mlx5_cmd_exec_in(mdev, modify_flow_table, in);
+}
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1)
+{
+ u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
+ MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
+ *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0);
+ *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1);
+
+ return ret;
+}
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+
+ MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
+}
+
+static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_fg_attr *fg_attr,
+ u32 *group_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in;
+ int ret;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
+ MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
+ if (ret)
+ goto out;
+
+ *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+ kvfree(in);
+ return ret;
+}
+
+static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev,
+ u32 ft_id, u32 fg_id, u8 ft_type)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+
+ MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, table_type, ft_type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, ft_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, fg_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
+}
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+ void *in_flow_context;
+ u32 dest_entry_sz;
+ u32 total_dest_sz;
+ u32 action_flags;
+ u8 *in_dests;
+ u32 inlen;
+ u32 *in;
+ int ret;
+ u32 i;
+
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, table_type, table_type);
+ MLX5_SET(set_fte_in, in, table_id, table_id);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+ MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+
+ action_flags = fte_attr->action_flags;
+ MLX5_SET(flow_context, in_flow_context, action, action_flags);
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+ }
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+ enum mlx5_ifc_flow_destination_type ifc_dest_type =
+ hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type);
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ fallthrough;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat_id);
+ }
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ MLX5_SET(dest_format, in_dests,
+ destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n");
+
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, table_type);
+ MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, delete_fte, in);
+}
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ struct mlx5hws_cmd_fg_attr fg_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *tbl;
+ int ret;
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FT\n");
+ goto free_tbl;
+ }
+
+ fg_attr.table_id = tbl->ft_id;
+ fg_attr.table_type = ft_attr->type;
+
+ ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FG\n");
+ goto free_ft;
+ }
+
+ ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type,
+ tbl->ft_id, tbl->fg_id, fte_attr);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FTE\n");
+ goto free_fg;
+ }
+
+ tbl->type = ft_attr->type;
+ return tbl;
+
+free_fg:
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type);
+free_ft:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id);
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl)
+{
+ mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id);
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type);
+ mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id);
+ kfree(tbl);
+}
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr)
+{
+ u32 default_miss_tbl;
+
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr->type = fw_ft_type;
+ ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+
+ default_miss_tbl = ctx->common_res.default_miss->ft_id;
+ if (!default_miss_tbl) {
+ pr_warn("HWS: no flow table ID for default miss\n");
+ return;
+ }
+
+ ft_attr->table_miss_id = default_miss_tbl;
+}
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_RTC);
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
+ MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
+ MLX5_IFC_RTC_STE_FORMAT_11DW :
+ MLX5_IFC_RTC_STE_FORMAT_8DW);
+
+ if (rtc_attr->is_scnd_range) {
+ MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
+ MLX5_SET(rtc, attr, num_match_ste, 2);
+ }
+
+ MLX5_SET(rtc, attr, pd, rtc_attr->pd);
+ MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
+ MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
+ MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
+ MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
+ MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
+ MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
+ MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
+ MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
+ MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
+ MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create RTC\n");
+ goto out;
+ }
+
+ *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id);
+}
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, stc_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, table_type, stc_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STC\n");
+ goto out;
+ }
+
+ *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id);
+}
+
+static int
+hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ void *stc_param)
+{
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
+ MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
+ MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
+ MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_pattern_id, stc_attr->modify_header.pattern_id);
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_argument_id, stc_attr->modify_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
+ MLX5_SET(stc_ste_param_remove, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, stc_param, decap,
+ stc_attr->remove_header.decap);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor,
+ stc_attr->remove_header.start_anchor);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor,
+ stc_attr->remove_header.end_anchor);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
+ MLX5_SET(stc_ste_param_insert, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, stc_param, encap,
+ stc_attr->insert_header.encap);
+ MLX5_SET(stc_ste_param_insert, stc_param, inline_data,
+ stc_attr->insert_header.is_inline);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor,
+ stc_attr->insert_header.insert_anchor);
+ /* HW gets the next 2 sizes in words */
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_size,
+ stc_attr->insert_header.header_size / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_offset,
+ stc_attr->insert_header.insert_offset / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_argument,
+ stc_attr->insert_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_COPY:
+ case MLX5_IFC_STC_ACTION_TYPE_SET:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
+ *(__be64 *)stc_param = stc_attr->modify_action.data;
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
+ MLX5_SET(stc_ste_param_vport, stc_param, vport_number,
+ stc_attr->vport.vport_num);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id,
+ stc_attr->vport.esw_owner_vhca_id);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid,
+ stc_attr->vport.eswitch_owner_vhca_id_valid);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_DROP:
+ case MLX5_IFC_STC_ACTION_TYPE_NOP:
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ASO:
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id,
+ stc_attr->aso.devx_obj_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id,
+ stc_attr->aso.return_reg_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type,
+ stc_attr->aso.aso_type);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id,
+ stc_attr->ste_table.ste_obj_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id,
+ stc_attr->ste_table.match_definer_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size,
+ stc_attr->ste_table.log_hash_size);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
+ MLX5_SET(stc_ste_param_remove_words, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor,
+ stc_attr->remove_words.start_anchor);
+ MLX5_SET(stc_ste_param_remove_words, stc_param,
+ remove_size, stc_attr->remove_words.num_of_words);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_TRAILER:
+ MLX5_SET(stc_ste_param_trailer, stc_param, command,
+ stc_attr->reformat_trailer.op);
+ MLX5_SET(stc_ste_param_trailer, stc_param, type,
+ stc_attr->reformat_trailer.type);
+ MLX5_SET(stc_ste_param_trailer, stc_param, length,
+ stc_attr->reformat_trailer.size);
+ break;
+ default:
+ mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *stc_param;
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in,
+ op_param.query.obj_offset, stc_attr->stc_offset);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
+ MLX5_SET(stc, attr, action_type, stc_attr->action_type);
+ MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode);
+ MLX5_SET64(stc, attr, modify_field_select,
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
+
+ /* Set destination TIRN, TAG, FT ID, STE ID */
+ stc_param = MLX5_ADDR_OF(stc, attr, stc_param);
+ ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param);
+ if (ret)
+ return ret;
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n",
+ stc_attr->action_type);
+
+ return ret;
+}
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
+ MLX5_SET(modify_header_arg, attr, access_pd, pd);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ARG\n");
+ goto out;
+ }
+
+ *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id);
+}
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ int num_of_actions;
+ u64 *pattern_data;
+ void *pattern;
+ void *attr;
+ int ret;
+ int i;
+
+ if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
+ mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n",
+ pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
+ return -EINVAL;
+ }
+
+ attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN);
+
+ pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
+ /* Pattern_length is in ddwords */
+ MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
+
+ pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
+ memcpy(pattern_data, actions, pattern_length);
+
+ num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE;
+ for (i = 0; i < num_of_actions; i++) {
+ int type;
+
+ type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
+ if (type != MLX5_MODIFICATION_TYPE_COPY &&
+ type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
+ /* Action typ-copy use all bytes for control */
+ MLX5_SET(set_action_in, &pattern_data[i], data, 0);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create header_modify_pattern\n");
+ goto out;
+ }
+
+ *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id);
+}
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STE);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, ste_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, ste);
+ MLX5_SET(ste, attr, table_type, ste_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STE\n");
+ goto out;
+ }
+
+ *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id);
+}
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
+ void *ptr;
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
+ MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
+
+ MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
+ MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
+ MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
+ MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
+ MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
+ MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
+ MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
+ MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
+ MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
+
+ MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
+ MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
+ MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
+ MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
+ MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
+ MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
+ MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
+ MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
+
+ ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
+ memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create Definer\n");
+ goto out;
+ }
+
+ *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id);
+}
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ void *prctx;
+ void *pdata;
+ void *in;
+ int ret;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = kzalloc(insz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create packet reformat\n");
+ goto out;
+ }
+
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {0};
+ int ret;
+
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, in,
+ packet_reformat_id, reformat_id);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to destroy packet_reformat\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ int ret;
+
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify SQ\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe)
+{
+ u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
+ u8 status;
+ void *ptr;
+ int ret;
+
+ MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
+ MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
+ memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
+ memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
+ memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
+
+ if (attr->gta_data_1) {
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
+ memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n");
+ return ret;
+ }
+
+ status = MLX5_GET(generate_wqe_out, out, status);
+ if (status) {
+ mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status);
+ return -EINVAL;
+ }
+
+ ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
+ memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
+
+ return ret;
+}
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ u32 out_size;
+ u32 *out;
+ int ret;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps\n");
+ goto out;
+ }
+
+ caps->wqe_based_update =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
+
+ caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.eswitch_manager);
+
+ caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_protocols);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+ caps->flex_parser_id_geneve_tlv_option_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+ caps->flex_parser_id_mpls_over_gre =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ caps->flex_parser_id_mpls_over_udp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label);
+
+ caps->log_header_modify_argument_granularity =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity);
+
+ caps->log_header_modify_argument_granularity -=
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity_offset);
+
+ caps->log_header_modify_argument_max_alloc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
+
+ caps->definer_format_sup =
+ MLX5_GET64(query_hca_cap_out, out,
+ capability.cmd_hca_cap.match_definer_format_supported);
+
+ caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+ caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.sq_ts_format);
+
+ caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.ipsec_offload);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps 2\n");
+ goto out;
+ }
+
+ caps->full_dw_jumbo_support =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_8_6_ext);
+
+ caps->format_select_gtpu_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0);
+
+ caps->format_select_gtpu_dw_1 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1);
+
+ caps->format_select_gtpu_dw_2 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2);
+
+ caps->format_select_gtpu_ext_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0);
+
+ caps->supp_type_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.generate_wqe_type);
+
+ caps->flow_table_hash_type =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.flow_table_hash_type);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table caps\n");
+ goto out;
+ }
+
+ caps->nic_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->nic_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ caps->nic_ft.ignore_flow_level_rtc_valid =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ caps->flex_parser_ok_bits_supp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
+ if (caps->wqe_based_update) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query WQE based FT caps\n");
+ goto out;
+ }
+
+ caps->rtc_reparse_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_reparse_mode);
+
+ caps->ste_format =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format);
+
+ caps->rtc_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_index_mode);
+
+ caps->rtc_log_depth_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_log_depth_max);
+
+ caps->ste_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_max);
+
+ caps->ste_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_granularity);
+
+ caps->trivial_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.trivial_match_definer);
+
+ caps->stc_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_max);
+
+ caps->stc_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_granularity);
+
+ caps->rtc_hash_split_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_hash_split_table);
+
+ caps->rtc_linear_lookup_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_linear_lookup_table);
+
+ caps->access_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.access_index_mode);
+
+ caps->linear_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3);
+
+ caps->rtc_max_hash_def_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe);
+
+ caps->supp_ste_format_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format_gen_wqe);
+
+ caps->fdb_tir_stc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc);
+ }
+
+ if (caps->eswitch_manager) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table esw caps\n");
+ goto out;
+ }
+
+ caps->fdb_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->fdb_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query eswitch capabilities\n");
+ goto out;
+ }
+
+ if (MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number_valid))
+ caps->eswitch_manager_vport_number =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device attributes\n");
+ goto out;
+ }
+
+ snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
+out:
+ kfree(out);
+ return ret;
+}
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi)
+{
+ int err;
+
+ if (!other_function) {
+ /* self vhca_id */
+ *gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ return 0;
+ }
+
+ err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
+ vport_number);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
new file mode 100644
index 000000000000..122ccc671628
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_CMD_H_
+#define HWS_CMD_H_
+
+#define WIRE_PORT 0xFFFF
+
+#define ACCESS_KEY_LEN 32
+
+enum mlx5hws_cmd_ext_dest_flags {
+ MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5hws_cmd_set_fte_dest {
+ u8 destination_type;
+ u32 destination_id;
+ enum mlx5hws_cmd_ext_dest_flags ext_flags;
+ u32 ext_reformat_id;
+ u16 esw_owner_vhca_id;
+};
+
+struct mlx5hws_cmd_set_fte_attr {
+ u32 action_flags;
+ bool ignore_flow_level;
+ u8 flow_source;
+ u8 extended_dest;
+ u8 encrypt_decrypt_type;
+ u32 encrypt_decrypt_obj_id;
+ u32 packet_reformat_id;
+ u32 dests_num;
+ struct mlx5hws_cmd_set_fte_dest *dests;
+};
+
+struct mlx5hws_cmd_ft_create_attr {
+ u8 type;
+ u8 level;
+ u16 uid;
+ bool rtc_valid;
+ bool decap_en;
+ bool reformat_en;
+};
+
+struct mlx5hws_cmd_ft_modify_attr {
+ u8 type;
+ u32 rtc_id_0;
+ u32 rtc_id_1;
+ u32 table_miss_id;
+ u8 table_miss_action;
+ u64 modify_fs;
+};
+
+struct mlx5hws_cmd_ft_query_attr {
+ u8 type;
+};
+
+struct mlx5hws_cmd_fg_attr {
+ u32 table_id;
+ u32 table_type;
+};
+
+struct mlx5hws_cmd_forward_tbl {
+ u8 type;
+ u32 ft_id;
+ u32 fg_id;
+ u32 refcount; /* protected by context ctrl lock */
+};
+
+struct mlx5hws_cmd_rtc_create_attr {
+ u32 pd;
+ u32 stc_base;
+ u32 ste_base;
+ u32 miss_ft_id;
+ bool fw_gen_wqe;
+ u8 update_index_mode;
+ u8 access_index_mode;
+ u8 num_hash_definer;
+ u8 log_depth;
+ u8 log_size;
+ u8 table_type;
+ u8 match_definer_0;
+ u8 match_definer_1;
+ u8 reparse_mode;
+ bool is_frst_jumbo;
+ bool is_scnd_range;
+};
+
+struct mlx5hws_cmd_alias_obj_create_attr {
+ u32 obj_id;
+ u16 vhca_id;
+ u16 obj_type;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_stc_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_stc_modify_attr {
+ u32 stc_offset;
+ u8 action_offset;
+ u8 reparse_mode;
+ enum mlx5_ifc_stc_action_type action_type;
+ union {
+ u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */
+ struct {
+ u8 decap;
+ u16 start_anchor;
+ u16 end_anchor;
+ } remove_header;
+ struct {
+ u32 arg_id;
+ u32 pattern_id;
+ } modify_header;
+ struct {
+ __be64 data;
+ } modify_action;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u8 is_inline;
+ u8 encap;
+ u16 insert_anchor;
+ u16 insert_offset;
+ } insert_header;
+ struct {
+ u8 aso_type;
+ u32 devx_obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ u8 eswitch_owner_vhca_id_valid;
+ } vport;
+ struct {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 ste_obj_id; /* Internal */
+ u32 match_definer_id;
+ u8 log_hash_size;
+ bool ignore_tx;
+ } ste_table;
+ struct {
+ u16 start_anchor;
+ u16 num_of_words;
+ } remove_words;
+ struct {
+ u8 type;
+ u8 op;
+ u8 size;
+ } reformat_trailer;
+
+ u32 dest_table_id;
+ u32 dest_tir_num;
+ };
+};
+
+struct mlx5hws_cmd_ste_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_definer_create_attr {
+ u8 *dw_selector;
+ u8 *byte_selector;
+ u8 *match_mask;
+};
+
+struct mlx5hws_cmd_allow_other_vhca_access_attr {
+ u16 obj_type;
+ u32 obj_id;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_packet_reformat_create_attr {
+ u8 type;
+ size_t data_sz;
+ void *data;
+ u8 reformat_param_0;
+};
+
+struct mlx5hws_cmd_query_ft_caps {
+ u8 max_level;
+ u8 reparse;
+ u8 ignore_flow_level_rtc_valid;
+};
+
+struct mlx5hws_cmd_generate_wqe_attr {
+ u8 *wqe_ctrl;
+ u8 *gta_ctrl;
+ u8 *gta_data_0;
+ u8 *gta_data_1;
+ u32 pdn;
+};
+
+struct mlx5hws_cmd_query_caps {
+ u32 flex_protocols;
+ u8 wqe_based_update;
+ u8 rtc_reparse_mode;
+ u16 ste_format;
+ u8 rtc_index_mode;
+ u8 ste_alloc_log_max;
+ u8 ste_alloc_log_gran;
+ u8 stc_alloc_log_max;
+ u8 stc_alloc_log_gran;
+ u8 rtc_log_depth_max;
+ u8 format_select_gtpu_dw_0;
+ u8 format_select_gtpu_dw_1;
+ u8 flow_table_hash_type;
+ u8 format_select_gtpu_dw_2;
+ u8 format_select_gtpu_ext_dw_0;
+ u8 access_index_mode;
+ u32 linear_match_definer;
+ bool full_dw_jumbo_support;
+ bool rtc_hash_split_table;
+ bool rtc_linear_lookup_table;
+ u32 supp_type_gen_wqe;
+ u8 rtc_max_hash_def_gen_wqe;
+ u16 supp_ste_format_gen_wqe;
+ struct mlx5hws_cmd_query_ft_caps nic_ft;
+ struct mlx5hws_cmd_query_ft_caps fdb_ft;
+ bool eswitch_manager;
+ bool merged_eswitch;
+ u32 eswitch_manager_vport_number;
+ u8 log_header_modify_argument_granularity;
+ u8 log_header_modify_argument_max_alloc;
+ u8 sq_ts_format;
+ u8 fdb_tir_stc;
+ u64 definer_format_sup;
+ u32 trivial_match_definer;
+ u32 vhca_id;
+ u32 shared_vhca_id;
+ char fw_ver[64];
+ bool ipsec_offload;
+ bool is_ecpf;
+ u8 flex_parser_ok_bits_supp;
+ u8 flex_parser_id_geneve_tlv_option_0;
+ u8 flex_parser_id_mpls_over_gre;
+ u8 flex_parser_id_mpls_over_udp;
+};
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id);
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id);
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 obj_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1);
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id);
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id);
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id);
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id);
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr);
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id);
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe);
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id);
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id);
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id);
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id);
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id);
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id);
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id);
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id);
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id);
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id);
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type, u32 table_id);
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl);
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps);
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr);
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi);
+
+#endif /* HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
new file mode 100644
index 000000000000..428dae869706
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
+
+#include "internal.h"
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
+{
+ return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
+}
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
+{
+ /* Prefer to use dynamic reparse, reparse only specific actions */
+ if (mlx5hws_context_cap_dynamic_reparse(ctx))
+ return MLX5_IFC_RTC_REPARSE_NEVER;
+
+ /* Otherwise use less efficient static */
+ return MLX5_IFC_RTC_REPARSE_ALWAYS;
+}
+
+static int hws_context_pools_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool_attr pool_attr = {0};
+ u8 max_log_sz;
+ int ret;
+
+ ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
+ if (ret)
+ goto uninit_pat_cache;
+
+ /* Create an STC pool per FT type */
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
+ max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
+ pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
+
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool\n");
+ ret = -ENOMEM;
+ goto uninit_cache;
+ }
+
+ return 0;
+
+uninit_cache:
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+uninit_pat_cache:
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+ return ret;
+}
+
+static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
+{
+ if (ctx->stc_pool)
+ mlx5hws_pool_destroy(ctx->stc_pool);
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+}
+
+static int hws_context_init_pd(struct mlx5hws_context *ctx)
+{
+ int ret = 0;
+
+ ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate PD\n");
+ return ret;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
+
+ return 0;
+}
+
+static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
+{
+ if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
+ mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
+
+ return 0;
+}
+
+static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ /* HWS not supported on device / FW */
+ if (!caps->wqe_based_update) {
+ mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
+ return;
+ }
+
+ if (!caps->eswitch_manager) {
+ mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
+ return;
+ }
+
+ /* Current solution requires all rules to set reparse bit */
+ if ((!caps->nic_ft.reparse ||
+ (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
+ !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
+ mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
+ return;
+ }
+
+ /* FW/HW must support 8DW STE */
+ if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
+ mlx5hws_err(ctx, "Required HWS STE format not supported\n");
+ return;
+ }
+
+ /* Adding rules by hash and by offset are requirements */
+ if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
+ !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
+ mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
+ return;
+ }
+
+ /* Support for SELECT definer ID is required */
+ if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
+ mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
+ return;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
+}
+
+static int hws_context_init_hws(struct mlx5hws_context *ctx,
+ struct mlx5hws_context_attr *attr)
+{
+ int ret;
+
+ hws_context_check_hws_supp(ctx);
+
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return 0;
+
+ ret = hws_context_init_pd(ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_context_pools_init(ctx);
+ if (ret)
+ goto uninit_pd;
+
+ /* Context has support for backward compatible API,
+ * and does not have support for native HWS API.
+ */
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+
+ ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
+ if (ret)
+ goto pools_uninit;
+
+ ret = mlx5hws_action_ste_pool_init(ctx);
+ if (ret)
+ goto close_queues;
+
+ INIT_LIST_HEAD(&ctx->tbl_list);
+
+ return 0;
+
+close_queues:
+ mlx5hws_send_queues_close(ctx);
+pools_uninit:
+ hws_context_pools_uninit(ctx);
+uninit_pd:
+ hws_context_uninit_pd(ctx);
+ return ret;
+}
+
+static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return;
+
+ mlx5hws_action_ste_pool_uninit(ctx);
+ mlx5hws_send_queues_close(ctx);
+ hws_context_pools_uninit(ctx);
+ hws_context_uninit_pd(ctx);
+}
+
+struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr)
+{
+ struct mlx5hws_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->mdev = mdev;
+
+ mutex_init(&ctx->ctrl_lock);
+ xa_init(&ctx->peer_ctx_xa);
+
+ ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
+ if (!ctx->caps)
+ goto free_ctx;
+
+ ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
+ if (ret)
+ goto free_caps;
+
+ ret = mlx5hws_vport_init_vports(ctx);
+ if (ret)
+ goto free_caps;
+
+ ret = hws_context_init_hws(ctx, attr);
+ if (ret)
+ goto uninit_vports;
+
+ mlx5hws_debug_init_dump(ctx);
+
+ return ctx;
+
+uninit_vports:
+ mlx5hws_vport_uninit_vports(ctx);
+free_caps:
+ kfree(ctx->caps);
+free_ctx:
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return NULL;
+}
+
+int mlx5hws_context_close(struct mlx5hws_context *ctx)
+{
+ mlx5hws_debug_uninit_dump(ctx);
+ hws_context_uninit_hws(ctx);
+ mlx5hws_vport_uninit_vports(ctx);
+ kfree(ctx->caps);
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return 0;
+}
+
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
+ pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
new file mode 100644
index 000000000000..3f8938c73dc0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_CONTEXT_H_
+#define HWS_CONTEXT_H_
+
+enum mlx5hws_context_flags {
+ MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
+ MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
+ MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+ MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3,
+};
+
+enum mlx5hws_context_shared_stc_type {
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1,
+ MLX5HWS_CONTEXT_SHARED_STC_MAX = 2,
+};
+
+struct mlx5hws_context_common_res {
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX];
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+};
+
+struct mlx5hws_context_debug_info {
+ struct dentry *steering_debugfs;
+ struct dentry *fdb_debugfs;
+};
+
+struct mlx5hws_context_vports {
+ u16 esw_manager_gvmi;
+ u16 uplink_gvmi;
+ struct xarray vport_gvmi_xa;
+};
+
+struct mlx5hws_context {
+ struct mlx5_core_dev *mdev;
+ struct mlx5hws_cmd_query_caps *caps;
+ u32 pd_num;
+ struct mlx5hws_pool *stc_pool;
+ struct mlx5hws_action_ste_pool *action_ste_pool; /* One per queue */
+ struct delayed_work action_ste_cleanup;
+ struct mlx5hws_context_common_res common_res;
+ struct mlx5hws_pattern_cache *pattern_cache;
+ struct mlx5hws_definer_cache *definer_cache;
+ struct mutex ctrl_lock; /* control lock to protect the whole context */
+ enum mlx5hws_context_flags flags;
+ struct mlx5hws_send_engine *send_queue;
+ size_t queues;
+ struct mutex *bwc_send_queue_locks; /* protect BWC queues */
+ struct lock_class_key *bwc_lock_class_keys;
+ struct list_head tbl_list;
+ struct mlx5hws_context_debug_info debug_info;
+ struct xarray peer_ctx_xa;
+ struct mlx5hws_context_vports vports;
+};
+
+static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+}
+
+static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT;
+}
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
+
+#endif /* HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
new file mode 100644
index 000000000000..2ec8cb10139a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#include "internal.h"
+
+static int
+hws_debug_dump_matcher_template_definer(struct seq_file *f,
+ void *parent_obj,
+ struct mlx5hws_definer *definer,
+ enum mlx5hws_debug_res_type type)
+{
+ int i;
+
+ if (!definer)
+ return 0;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,",
+ type,
+ HWS_PTR_TO_ID(definer),
+ HWS_PTR_TO_ID(parent_obj),
+ definer->obj_id,
+ definer->type);
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->dw_selector[i],
+ (i == DW_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->byte_selector[i],
+ (i == BYTE_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ seq_printf(f, "%02x", definer->mask.jumbo[i]);
+
+ seq_puts(f, "\n");
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_debug_res_type type;
+ int i, ret;
+
+ for (i = 0; i < matcher->num_of_mt; i++) {
+ struct mlx5hws_match_template *mt = &matcher->mt[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE,
+ HWS_PTR_TO_ID(mt),
+ HWS_PTR_TO_ID(matcher),
+ mt->fc_sz,
+ 0, 0);
+
+ type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER;
+ ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_action_type action_type;
+ int i, j;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE,
+ HWS_PTR_TO_ID(at),
+ HWS_PTR_TO_ID(matcher),
+ at->only_term,
+ at->num_of_action_stes,
+ at->num_actions);
+
+ for (j = 0; j < at->num_actions; j++) {
+ action_type = at->action_type_arr[j];
+ seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type));
+ }
+
+ seq_puts(f, "\n");
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,-1,-1,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
+ HWS_PTR_TO_ID(matcher),
+ attr->priority,
+ attr->mode,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].table.sz_row_log,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].table.sz_col_log,
+ attr->optimize_using_rule_idx,
+ attr->optimize_flow_src,
+ attr->insert_mode,
+ attr->distribute_mode,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].table.sz_row_log,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].table.sz_col_log);
+
+ return 0;
+}
+
+static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_table_type tbl_type = matcher->tbl->type;
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ u32 ste_0_id = -1;
+ u32 ste_1_id = -1;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER,
+ HWS_PTR_TO_ID(matcher),
+ HWS_PTR_TO_ID(matcher->tbl),
+ matcher->num_of_mt,
+ matcher->end_ft_id,
+ matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
+
+ ste_0_id = matcher->match_ste.ste_0_base;
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = matcher->match_ste.ste_1_base;
+
+ seq_printf(f, ",%d,%d,%d,%d",
+ matcher->match_ste.rtc_0_id,
+ (int)ste_0_id,
+ matcher->match_ste.rtc_1_id,
+ (int)ste_1_id);
+
+ ft_attr.type = matcher->tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
+ matcher->end_ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",-1,-1,-1,-1,0,0x%llx,0x%llx\n",
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1));
+
+ ret = hws_debug_dump_matcher_attr(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_match_template(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_action_template(f, matcher);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_matcher *matcher;
+ u64 local_icm_addr_0 = 0;
+ u64 local_icm_addr_1 = 0;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_TABLE,
+ HWS_PTR_TO_ID(tbl),
+ HWS_PTR_TO_ID(tbl->ctx),
+ tbl->ft_id,
+ MLX5HWS_TABLE_TYPE_BASE + tbl->type,
+ tbl->fw_ft_type,
+ tbl->level,
+ 0);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev,
+ tbl->ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n",
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_0),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_1),
+ HWS_PTR_TO_ID(tbl->default_miss.miss_tbl));
+
+ list_for_each_entry(matcher, &tbl->matchers_list, list_node) {
+ ret = hws_debug_dump_matcher(f, matcher);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_send_engine *send_queue;
+ struct mlx5hws_send_ring *send_ring;
+ struct mlx5hws_send_ring_cq *cq;
+ struct mlx5hws_send_ring_sq *sq;
+ int i;
+
+ for (i = 0; i < (int)ctx->queues; i++) {
+ send_queue = &ctx->send_queue[i];
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE,
+ HWS_PTR_TO_ID(ctx),
+ i,
+ send_queue->used_entries,
+ send_queue->num_entries,
+ 1, /* one send ring per queue */
+ send_queue->num_entries,
+ send_queue->err,
+ send_queue->completed.ci,
+ send_queue->completed.pi,
+ send_queue->completed.mask);
+
+ send_ring = &send_queue->send_ring;
+ cq = &send_ring->send_cq;
+ sq = &send_ring->send_sq;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING,
+ HWS_PTR_TO_ID(ctx),
+ 0, /* one send ring per send queue */
+ i,
+ cq->mcq.cqn,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ cq->mcq.cqe_sz,
+ sq->sqn,
+ 0,
+ 0,
+ 0);
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS,
+ HWS_PTR_TO_ID(ctx),
+ caps->fw_ver,
+ caps->wqe_based_update,
+ caps->ste_format,
+ caps->ste_alloc_log_max,
+ caps->log_header_modify_argument_max_alloc);
+
+ seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n",
+ caps->flex_protocols,
+ caps->rtc_reparse_mode,
+ caps->rtc_index_mode,
+ caps->ste_alloc_log_gran,
+ caps->stc_alloc_log_max,
+ caps->stc_alloc_log_gran,
+ caps->rtc_log_depth_max,
+ caps->format_select_gtpu_dw_0,
+ caps->format_select_gtpu_dw_1,
+ caps->format_select_gtpu_dw_2,
+ caps->format_select_gtpu_ext_dw_0,
+ caps->nic_ft.max_level,
+ caps->nic_ft.reparse,
+ caps->fdb_ft.max_level,
+ caps->fdb_ft.reparse,
+ caps->log_header_modify_argument_granularity,
+ caps->linear_match_definer,
+ "regc_3");
+
+ return 0;
+}
+
+static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR,
+ HWS_PTR_TO_ID(ctx),
+ ctx->pd_num,
+ ctx->queues,
+ ctx->send_queue->num_entries,
+ "None", /* no shared gvmi */
+ ctx->caps->vhca_id,
+ 0xffff); /* no shared gvmi */
+
+ return 0;
+}
+
+static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT,
+ HWS_PTR_TO_ID(ctx),
+ ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT,
+ pci_name(dev->pdev),
+ HWS_DEBUG_FORMAT_VERSION,
+ LINUX_VERSION_MAJOR,
+ LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL);
+
+ ret = hws_debug_dump_context_attr(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_caps(f, ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc_resource(struct seq_file *f,
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_pool_resource *resource)
+{
+ u32 tbl_type = MLX5HWS_TABLE_TYPE_BASE + MLX5HWS_TABLE_TYPE_FDB;
+
+ seq_printf(f, "%d,0x%llx,%u,%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
+ HWS_PTR_TO_ID(ctx),
+ tbl_type,
+ resource->base_id);
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
+ int ret;
+
+ if (!stc_pool)
+ return 0;
+
+ if (stc_pool->resource) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx,
+ stc_pool->resource);
+ if (ret)
+ return ret;
+ }
+
+ if (stc_pool->mirror_resource) {
+ struct mlx5hws_pool_resource *res = stc_pool->mirror_resource;
+
+ ret = hws_debug_dump_context_stc_resource(f, ctx, res);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+hws_debug_dump_action_ste_table(struct seq_file *f,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ int ste_0_id = mlx5hws_pool_get_base_id(action_tbl->pool);
+ int ste_1_id = mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE,
+ HWS_PTR_TO_ID(action_tbl),
+ action_tbl->rtc_0_id, ste_0_id,
+ action_tbl->rtc_1_id, ste_1_id);
+}
+
+static void hws_debug_dump_action_ste_pool(struct seq_file *f,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ struct mlx5hws_action_ste_table *action_tbl;
+ enum mlx5hws_pool_optimize opt;
+
+ mutex_lock(&pool->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ list_for_each_entry(action_tbl, &pool->elems[opt].available,
+ list_node) {
+ hws_debug_dump_action_ste_table(f, action_tbl);
+ }
+ }
+ mutex_unlock(&pool->lock);
+}
+
+static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_table *tbl;
+ int ret, i;
+
+ ret = hws_debug_dump_context_info(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_send_engine(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_stc(f, ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) {
+ ret = hws_debug_dump_table(f, tbl);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ctx->queues; i++)
+ hws_debug_dump_action_ste_pool(f, &ctx->action_ste_pool[i]);
+
+ return 0;
+}
+
+static int
+hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!f || !ctx)
+ return -EINVAL;
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = hws_debug_dump_context(f, ctx);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+static int hws_dump_show(struct seq_file *file, void *priv)
+{
+ return hws_debug_dump(file, file->private);
+}
+DEFINE_SHOW_ATTRIBUTE(hws_dump);
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ char file_name[128];
+
+ ctx->debug_info.steering_debugfs =
+ debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
+ ctx->debug_info.fdb_debugfs =
+ debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs);
+
+ sprintf(file_name, "ctx_%p", ctx);
+ debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs,
+ ctx, &hws_dump_fops);
+}
+
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx)
+{
+ debugfs_remove_recursive(ctx->debug_info.steering_debugfs);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
new file mode 100644
index 000000000000..89c396f9f266
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_DEBUG_H_
+#define HWS_DEBUG_H_
+
+#define HWS_DEBUG_FORMAT_VERSION "1.0"
+
+#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum mlx5hws_debug_res_type {
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005,
+
+ MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100,
+
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE = 4300,
+};
+
+static inline u64
+mlx5hws_debug_icm_to_idx(u64 icm_addr)
+{
+ return (icm_addr >> 6) & 0xffffffff;
+}
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
+
+#endif /* HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
new file mode 100644
index 000000000000..82fd122d4284
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -0,0 +1,2329 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN BIT(12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13)
+#define MLX5_FLOW_LAYER_GRE BIT(14)
+#define MLX5_FLOW_LAYER_MPLS BIT(15)
+
+/* Pattern tunnel Layer bits (continued). */
+#define MLX5_FLOW_LAYER_IPIP BIT(23)
+#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
+#define MLX5_FLOW_LAYER_NVGRE BIT(25)
+#define MLX5_FLOW_LAYER_GENEVE BIT(26)
+
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
+ MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
+
+#define GTP_PDU_SC 0x85
+#define BAD_PORT 0xBAD
+#define ETH_TYPE_IPV4_VXLAN 0x0800
+#define ETH_TYPE_IPV6_VXLAN 0x86DD
+#define UDP_GTPU_PORT 2152
+#define UDP_PORT_MPLS 6635
+#define UDP_GENEVE_PORT 6081
+#define UDP_ROCEV2_PORT 4791
+#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
+
+#define STE_NO_VLAN 0x0
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define STE_NO_L3 0x0
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_NO_L4 0x0
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_ICMP 0x3
+#define STE_ESP 0x3
+
+#define IPV4 0x4
+#define IPV6 0x6
+
+/* Setter function based on bit offset and mask, for 32bit DW */
+#define _HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ u32 _v = v; \
+ *((__be32 *)(p) + ((byte_off) / 4)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
+ ((byte_off) / 4))) & \
+ (~((mask) << (bit_off)))) | \
+ (((_v) & (mask)) << \
+ (bit_off))); \
+ } while (0)
+
+/* Setter function based on bit offset and mask, for unaligned 32bit DW */
+#define HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ if (unlikely((bit_off) < 0)) { \
+ u32 _bit_off = -1 * (bit_off); \
+ u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
+ _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
+ _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
+ (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
+ } else { \
+ _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
+ } \
+ } while (0)
+
+/* Getter for up to aligned 32bit DW */
+#define HWS_GET32(p, byte_off, bit_off, mask) \
+ ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
+
+#define HWS_CALC_FNAME(field, inner) \
+ ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
+ MLX5HWS_DEFINER_FNAME_##field##_O)
+
+#define HWS_GET_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD_SET(match_param, hdr) \
+ (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
+ BUILD_BUG_ON((sz_in_bits) % 32); \
+ u32 sz = sz_in_bits; \
+ u32 res = 0; \
+ u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
+ while (!res && sz >= 32) { \
+ res = *((match_param) + (dw_off++)); \
+ sz -= 32; \
+ } \
+ res; \
+ })
+
+#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
+ (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
+ !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_GET64_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET64(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD64_SET(match_param, hdr) \
+ (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_CALC_HDR_SRC(fc, s_hdr) \
+ do { \
+ (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
+ (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
+ (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR_DST(fc, d_hdr) \
+ do { \
+ (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
+ (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
+ (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
+ do { \
+ HWS_CALC_HDR_SRC(fc, s_hdr); \
+ HWS_CALC_HDR_DST(fc, d_hdr); \
+ (fc)->tag_set = &hws_definer_generic_set; \
+ } while (0)
+
+#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
+ do { \
+ if (HWS_IS_FLD_SET(match_param, s_hdr)) \
+ HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
+ } while (0)
+
+struct mlx5hws_definer_sel_ctrl {
+ u8 allowed_full_dw; /* Full DW selectors cover all offsets */
+ u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */
+ u8 allowed_bytes; /* Bytes selectors, up to offset 255 */
+ u8 used_full_dw;
+ u8 used_lim_dw;
+ u8 used_bytes;
+ u8 full_dw_selector[DW_SELECTORS];
+ u8 lim_dw_selector[DW_SELECTORS_LIMITED];
+ u8 byte_selector[BYTE_SELECTORS];
+};
+
+struct mlx5hws_definer_conv_data {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_definer_fc *fc;
+ /* enum mlx5hws_definer_match_flag */
+ u32 match_flags;
+};
+
+#define HWS_DEFINER_ENTRY(name)[MLX5HWS_DEFINER_FNAME_##name] = #name
+
+static const char * const hws_definer_fname_to_str[] = {
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_TYPE_I),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_ID_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_I),
+ HWS_DEFINER_ENTRY(IPV4_IHL_O),
+ HWS_DEFINER_ENTRY(IPV4_IHL_I),
+ HWS_DEFINER_ENTRY(IP_DSCP_O),
+ HWS_DEFINER_ENTRY(IP_DSCP_I),
+ HWS_DEFINER_ENTRY(IP_ECN_O),
+ HWS_DEFINER_ENTRY(IP_ECN_I),
+ HWS_DEFINER_ENTRY(IP_TTL_O),
+ HWS_DEFINER_ENTRY(IP_TTL_I),
+ HWS_DEFINER_ENTRY(IPV4_DST_O),
+ HWS_DEFINER_ENTRY(IPV4_DST_I),
+ HWS_DEFINER_ENTRY(IPV4_SRC_O),
+ HWS_DEFINER_ENTRY(IPV4_SRC_I),
+ HWS_DEFINER_ENTRY(IP_VERSION_O),
+ HWS_DEFINER_ENTRY(IP_VERSION_I),
+ HWS_DEFINER_ENTRY(IP_FRAG_O),
+ HWS_DEFINER_ENTRY(IP_FRAG_I),
+ HWS_DEFINER_ENTRY(IP_LEN_O),
+ HWS_DEFINER_ENTRY(IP_LEN_I),
+ HWS_DEFINER_ENTRY(IP_TOS_O),
+ HWS_DEFINER_ENTRY(IP_TOS_I),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_O),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_I),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_O),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_I),
+ HWS_DEFINER_ENTRY(L4_SPORT_O),
+ HWS_DEFINER_ENTRY(L4_SPORT_I),
+ HWS_DEFINER_ENTRY(L4_DPORT_O),
+ HWS_DEFINER_ENTRY(L4_DPORT_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_O),
+ HWS_DEFINER_ENTRY(TCP_SEQ_NUM),
+ HWS_DEFINER_ENTRY(TCP_ACK_NUM),
+ HWS_DEFINER_ENTRY(GTP_TEID),
+ HWS_DEFINER_ENTRY(GTP_MSG_TYPE),
+ HWS_DEFINER_ENTRY(GTP_EXT_FLAG),
+ HWS_DEFINER_ENTRY(GTP_NEXT_EXT_HDR),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_PDU),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_QFI),
+ HWS_DEFINER_ENTRY(GTPU_DW0),
+ HWS_DEFINER_ENTRY(GTPU_FIRST_EXT_DW0),
+ HWS_DEFINER_ENTRY(GTPU_DW2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_0),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_1),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_3),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_4),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_5),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_6),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_7),
+ HWS_DEFINER_ENTRY(VPORT_REG_C_0),
+ HWS_DEFINER_ENTRY(VXLAN_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD0),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_PROTO),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_LEN),
+ HWS_DEFINER_ENTRY(GENEVE_OAM),
+ HWS_DEFINER_ENTRY(GENEVE_PROTO),
+ HWS_DEFINER_ENTRY(GENEVE_VNI),
+ HWS_DEFINER_ENTRY(SOURCE_QP),
+ HWS_DEFINER_ENTRY(SOURCE_GVMI),
+ HWS_DEFINER_ENTRY(REG_0),
+ HWS_DEFINER_ENTRY(REG_1),
+ HWS_DEFINER_ENTRY(REG_2),
+ HWS_DEFINER_ENTRY(REG_3),
+ HWS_DEFINER_ENTRY(REG_4),
+ HWS_DEFINER_ENTRY(REG_5),
+ HWS_DEFINER_ENTRY(REG_6),
+ HWS_DEFINER_ENTRY(REG_7),
+ HWS_DEFINER_ENTRY(REG_8),
+ HWS_DEFINER_ENTRY(REG_9),
+ HWS_DEFINER_ENTRY(REG_10),
+ HWS_DEFINER_ENTRY(REG_11),
+ HWS_DEFINER_ENTRY(REG_A),
+ HWS_DEFINER_ENTRY(REG_B),
+ HWS_DEFINER_ENTRY(GRE_KEY_PRESENT),
+ HWS_DEFINER_ENTRY(GRE_C),
+ HWS_DEFINER_ENTRY(GRE_K),
+ HWS_DEFINER_ENTRY(GRE_S),
+ HWS_DEFINER_ENTRY(GRE_PROTOCOL),
+ HWS_DEFINER_ENTRY(GRE_OPT_KEY),
+ HWS_DEFINER_ENTRY(GRE_OPT_SEQ),
+ HWS_DEFINER_ENTRY(GRE_OPT_CHECKSUM),
+ HWS_DEFINER_ENTRY(INTEGRITY_O),
+ HWS_DEFINER_ENTRY(INTEGRITY_I),
+ HWS_DEFINER_ENTRY(ICMP_DW1),
+ HWS_DEFINER_ENTRY(ICMP_DW2),
+ HWS_DEFINER_ENTRY(ICMP_DW3),
+ HWS_DEFINER_ENTRY(IPSEC_SPI),
+ HWS_DEFINER_ENTRY(IPSEC_SEQUENCE_NUMBER),
+ HWS_DEFINER_ENTRY(IPSEC_SYNDROME),
+ HWS_DEFINER_ENTRY(MPLS0_O),
+ HWS_DEFINER_ENTRY(MPLS1_O),
+ HWS_DEFINER_ENTRY(MPLS2_O),
+ HWS_DEFINER_ENTRY(MPLS3_O),
+ HWS_DEFINER_ENTRY(MPLS4_O),
+ HWS_DEFINER_ENTRY(MPLS0_I),
+ HWS_DEFINER_ENTRY(MPLS1_I),
+ HWS_DEFINER_ENTRY(MPLS2_I),
+ HWS_DEFINER_ENTRY(MPLS3_I),
+ HWS_DEFINER_ENTRY(MPLS4_I),
+ HWS_DEFINER_ENTRY(FLEX_PARSER0_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER1_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER2_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER3_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER4_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER5_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER6_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER7_OK),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_I),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_7),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_7),
+ HWS_DEFINER_ENTRY(IB_L4_OPCODE),
+ HWS_DEFINER_ENTRY(IB_L4_QPN),
+ HWS_DEFINER_ENTRY(IB_L4_A),
+ HWS_DEFINER_ENTRY(RANDOM_NUM),
+ HWS_DEFINER_ENTRY(PTYPE_L2_O),
+ HWS_DEFINER_ENTRY(PTYPE_L2_I),
+ HWS_DEFINER_ENTRY(PTYPE_L3_O),
+ HWS_DEFINER_ENTRY(PTYPE_L3_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_I),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_O),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_I),
+ HWS_DEFINER_ENTRY(TNL_HDR_0),
+ HWS_DEFINER_ENTRY(TNL_HDR_1),
+ HWS_DEFINER_ENTRY(TNL_HDR_2),
+ HWS_DEFINER_ENTRY(TNL_HDR_3),
+ [MLX5HWS_DEFINER_FNAME_MAX] = "DEFINER_FNAME_UNKNOWN",
+};
+
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname)
+{
+ if (fname > MLX5HWS_DEFINER_FNAME_MAX)
+ fname = MLX5HWS_DEFINER_FNAME_MAX;
+ return hws_definer_fname_to_str[fname];
+}
+
+static void
+hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ /* Can be optimized */
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ bool inner)
+{
+ u32 second_cvlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
+ u32 second_svlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
+
+ if (second_cvlan_tag)
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (second_svlan_tag)
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, true);
+}
+
+static void
+hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, false);
+}
+
+static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ if (val == IPV4)
+ HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (val == IPV6)
+ HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ struct mlx5hws_context *peer_ctx)
+{
+ u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
+ u16 vport_gvmi = 0;
+ int ret;
+
+ ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
+ if (ret) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
+ return;
+ }
+
+ if (vport_gvmi)
+ HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+__must_hold(&fc->ctx->ctrl_lock)
+{
+ int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
+ struct mlx5hws_context *peer_ctx;
+
+ if (id == fc->ctx->caps->vhca_id)
+ peer_ctx = fc->ctx;
+ else
+ peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
+
+ if (!peer_ctx) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
+ return;
+ }
+
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
+}
+
+static void
+hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
+ bool *parser_is_used,
+ u32 id,
+ u32 value)
+{
+ if (id || value) {
+ if (id >= HWS_NUM_OF_FLEX_PARSERS) {
+ mlx5hws_err(cd->ctx, "Unsupported parser id\n");
+ return NULL;
+ }
+
+ if (parser_is_used[id]) {
+ mlx5hws_err(cd->ctx, "Parser id have been used\n");
+ return NULL;
+ }
+ }
+
+ parser_is_used[id] = true;
+
+ return hws_definer_flex_parser_handler(cd, id);
+}
+
+static int
+hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
+{
+ u32 flags;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
+
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ return 0;
+
+err_conflict:
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n",
+ cd->match_flags);
+ return -EINVAL;
+}
+
+static int
+hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type_ext, 0x4) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c6, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_d4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
+ return -EINVAL;
+ }
+
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
+ outer_headers.ethertype,
+ eth_l2_outer.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
+ outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
+ outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
+ outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
+ outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
+ outer_headers.first_prio, eth_l2_outer.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
+ outer_headers.first_cfi, eth_l2_outer.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_O,
+ outer_headers.first_vid, eth_l2_outer.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
+ outer_headers.ip_protocol,
+ eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_O,
+ outer_headers.ip_version,
+ eth_l3_outer.ip_version);
+ HWS_SET_HDR(fc, match_param, IP_TTL_O,
+ outer_headers.ttl_hoplimit,
+ eth_l3_outer.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
+
+ if (is_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_outer.ipv6_address_31_0);
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.tcp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.tcp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.udp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.udp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
+ outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_O,
+ outer_headers.ip_dscp, eth_l3_outer.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_O,
+ outer_headers.ip_ecn, eth_l3_outer.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
+ outer_headers.ipv4_ihl, eth_l3_outer.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
+ smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l4_outer.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l2_src_outer.ip_fragmented);
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type_ext, 0x4) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c6, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_d4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
+ return -EINVAL;
+ }
+
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
+ inner_headers.ethertype,
+ eth_l2_inner.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
+ inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
+ inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
+ inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
+ inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
+ inner_headers.first_prio, eth_l2_inner.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
+ inner_headers.first_cfi, eth_l2_inner.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_I,
+ inner_headers.first_vid, eth_l2_inner.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
+ inner_headers.ip_protocol,
+ eth_l3_inner.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_I,
+ inner_headers.ip_version,
+ eth_l3_inner.ip_version);
+ HWS_SET_HDR(fc, match_param, IP_TTL_I,
+ inner_headers.ttl_hoplimit,
+ eth_l3_inner.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
+
+ if (is_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_inner.ipv6_address_31_0);
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.tcp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.tcp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.udp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.udp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
+ inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_I,
+ inner_headers.ip_dscp, eth_l3_inner.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_I,
+ inner_headers.ip_ecn, eth_l3_inner.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
+ inner_headers.ipv4_ihl, eth_l3_inner.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_inner.ip_fragmented);
+ } else {
+ smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l4_inner.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_src_inner.ip_fragmented);
+ }
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
+ return -EINVAL;
+ }
+
+ /* Check GRE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_c_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_k_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_s_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
+ HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
+ misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
+ }
+
+ /* Check GENEVE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_opt_len,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_protocol_type,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_oam,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
+ }
+
+ HWS_SET_HDR(fc, match_param, SOURCE_QP,
+ misc_parameters.source_sqn, source_qp_gvmi.source_qp);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
+ misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
+ misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
+
+ /* L2 Second VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
+ misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
+ misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
+ misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
+ misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
+ misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
+ misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
+
+ /* L2 Second CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* VXLAN VNI */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
+ }
+
+ /* Flex protocol steering ok bits */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ if (!caps->flex_parser_ok_bits_supp) {
+ mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
+ cd, caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
+ HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
+ misc_parameters.source_eswitch_owner_vhca_id) ?
+ &hws_definer_set_source_gvmi_vhca_id :
+ &hws_definer_set_source_gvmi;
+ } else {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported source_eswitch_owner_vhca_id field usage\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.psp_syndrome, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ misc_parameters_2.ipsec_next_header, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
+ return -EINVAL;
+ }
+
+ HWS_SET_HDR(fc, match_param, MPLS0_O,
+ misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
+ HWS_SET_HDR(fc, match_param, MPLS0_I,
+ misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
+ HWS_SET_HDR(fc, match_param, REG_0,
+ misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
+ HWS_SET_HDR(fc, match_param, REG_1,
+ misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
+ HWS_SET_HDR(fc, match_param, REG_2,
+ misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
+ HWS_SET_HDR(fc, match_param, REG_3,
+ misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
+ HWS_SET_HDR(fc, match_param, REG_4,
+ misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
+ HWS_SET_HDR(fc, match_param, REG_5,
+ misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
+ HWS_SET_HDR(fc, match_param, REG_6,
+ misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
+ HWS_SET_HDR(fc, match_param, REG_7,
+ misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
+ HWS_SET_HDR(fc, match_param, REG_A,
+ misc_parameters_2.metadata_reg_a, metadata.general_purpose);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ bool vxlan_gpe_flex_parser_enabled;
+
+ /* Check reserved and unsupported fields */
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmp_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ curr_fc =
+ hws_definer_flex_parser_handler(cd,
+ caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, teid);
+ fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
+ struct mlx5hws_definer_fc *fc;
+ u32 id, value;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
+ return -EINVAL;
+ }
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_definer_fc *fc = cd->fc;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_0,
+ misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_1,
+ misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_2,
+ misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
+ }
+
+ HWS_SET_HDR(fc, match_param, TNL_HDR_3,
+ misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
+
+ return 0;
+}
+
+static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
+{
+ u32 fc_sz = 0;
+ int i;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (fc == ZERO_SIZE_PTR)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
+ if (fc[i].tag_set)
+ fc_sz++;
+ return fc_sz;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ u32 definer_size = hws_definer_get_fc_size(fc);
+ u32 fc_sz = 0;
+ int i;
+
+ compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
+ if (!compressed_fc)
+ return NULL;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (!definer_size)
+ return compressed_fc;
+
+ for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ fc[i].fname = i;
+ memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
+ }
+
+ return compressed_fc;
+}
+
+static void
+hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
+{
+ int i;
+
+ /* nothing to do for empty matcher */
+ if (fc == ZERO_SIZE_PTR)
+ return;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+ }
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_fc(struct mlx5hws_context *ctx,
+ size_t len)
+{
+ struct mlx5hws_definer_fc *fc;
+ int i;
+
+ fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return NULL;
+
+ for (i = 0; i < len; i++)
+ fc[i].ctx = ctx;
+
+ return fc;
+}
+
+static int
+hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ u8 *hl)
+{
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return -ENOMEM;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
+ mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Check there is no conflicted fields set together */
+ ret = hws_definer_check_match_flags(&cd);
+ if (ret)
+ goto err_free_fc;
+
+ /* Allocate fc array on mt */
+ mt->fc = hws_definer_alloc_compressed_fc(fc);
+ if (!mt->fc) {
+ mlx5hws_err(ctx,
+ "Convert match params: failed to set field copy to match template\n");
+ ret = -ENOMEM;
+ goto err_free_fc;
+ }
+ mt->fc_sz = hws_definer_get_fc_size(fc);
+
+ /* Fill in headers layout */
+ hws_definer_set_hl(hl, fc);
+
+ kfree(fc);
+ return 0;
+
+err_free_fc:
+ kfree(fc);
+ return ret;
+}
+
+static int
+hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
+ u32 hl_byte_off,
+ u32 *tag_byte_off)
+{
+ int i, dw_to_scan;
+ u8 byte_offset;
+
+ /* Avoid accessing unused DW selectors */
+ dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
+ DW_SELECTORS : DW_SELECTORS_MATCH;
+
+ /* Add offset since each DW covers multiple BYTEs */
+ byte_offset = hl_byte_off % DW_SIZE;
+ for (i = 0; i < dw_to_scan; i++) {
+ if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
+ *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ /* Add offset to skip DWs in definer */
+ byte_offset = DW_SIZE * DW_SELECTORS;
+ /* Iterate in reverse since the code uses bytes from 7 -> 0 */
+ for (i = BYTE_SELECTORS; i-- > 0 ;) {
+ if (definer->byte_selector[i] == hl_byte_off) {
+ *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+hws_definer_fc_bind(struct mlx5hws_definer *definer,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz)
+{
+ u32 tag_offset = 0;
+ int ret, byte_diff;
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ /* Map header layout byte offset to byte offset in tag */
+ ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
+ if (ret)
+ return ret;
+
+ /* Move setter based on the location in the definer */
+ byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
+ fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
+
+ /* Update offset in headers layout to offset in tag */
+ fc->byte_off = tag_offset;
+ fc++;
+ }
+
+ return 0;
+}
+
+static bool
+hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
+ u32 cur_dw,
+ u32 *data)
+{
+ u8 bytes_set;
+ int byte_idx;
+ bool ret;
+ int i;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+
+ /* No data set, can skip to next DW */
+ while (!*data) {
+ cur_dw++;
+ data++;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+ }
+
+ /* Used all DW selectors and Byte selectors, no possible solution */
+ if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
+ ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
+ ctrl->allowed_bytes == ctrl->used_bytes)
+ return false;
+
+ /* Try to use limited DW selectors */
+ if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
+ ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
+ }
+
+ /* Try to use DW selectors */
+ if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
+ ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
+ }
+
+ /* No byte selector for offset bigger than 255 */
+ if (cur_dw * DW_SIZE > 255)
+ return false;
+
+ bytes_set = !!(0x000000ff & *data) +
+ !!(0x0000ff00 & *data) +
+ !!(0x00ff0000 & *data) +
+ !!(0xff000000 & *data);
+
+ /* Check if there are enough byte selectors left */
+ if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
+ return false;
+
+ /* Try to use Byte selectors */
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ /* Use byte selectors high to low */
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
+ ctrl->used_bytes++;
+ }
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ ctrl->used_bytes--;
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = 0;
+ }
+
+ return false;
+}
+
+static void
+hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
+ struct mlx5hws_definer *definer)
+{
+ memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
+ memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
+ memcpy(definer->dw_selector + ctrl->allowed_full_dw,
+ ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
+}
+
+static int
+hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u8 *hl, bool allow_jumbo)
+{
+ struct mlx5hws_definer_sel_ctrl ctrl = {0};
+ bool found;
+
+ /* Try to create a match definer */
+ ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = 0;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
+ return 0;
+ }
+
+ if (!allow_jumbo)
+ return -E2BIG;
+
+ /* Try to create a full/limited jumbo definer */
+ ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
+ DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
+ DW_SELECTORS_LIMITED;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
+ return 0;
+ }
+
+ return -E2BIG;
+}
+
+static void
+hws_definer_create_tag_mask(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ if (fc->tag_mask_set)
+ fc->tag_mask_set(fc, match_param, tag);
+ else
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
+{
+ return definer->obj_id;
+}
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b)
+{
+ int i;
+
+ /* Future: Optimize by comparing selectors with valid mask only */
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
+ return 1;
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
+ return 1;
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
+ return 1;
+
+ return 0;
+}
+
+int
+mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer,
+ bool allow_jumbo)
+{
+ u8 *match_hl;
+ int ret;
+
+ /* Union header-layout (hl) is used for creating a single definer
+ * field layout used with different bitmasks for hash and match.
+ */
+ match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
+ if (!match_hl)
+ return -ENOMEM;
+
+ /* Convert all mt items to header layout (hl)
+ * and allocate the match and range field copy array (fc & fcr).
+ */
+ ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to convert items to header layout\n");
+ goto free_match_hl;
+ }
+
+ /* Find the match definer layout for header layout match union */
+ ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl,
+ allow_jumbo);
+ if (ret) {
+ if (ret == -E2BIG)
+ mlx5hws_dbg(ctx,
+ "Failed to create match definer from header layout - E2BIG\n");
+ else
+ mlx5hws_err(ctx,
+ "Failed to create match definer from header layout (%d)\n",
+ ret);
+ goto free_fc;
+ }
+
+ kfree(match_hl);
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+free_match_hl:
+ kfree(match_hl);
+ return ret;
+}
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
+{
+ struct mlx5hws_definer_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->list_head);
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
+{
+ kfree(cache);
+}
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ struct mlx5hws_definer_cache *cache = ctx->definer_cache;
+ struct mlx5hws_cmd_definer_create_attr def_attr = {0};
+ struct mlx5hws_definer_cache_item *cached_definer;
+ u32 obj_id;
+ int ret;
+
+ /* Search definer cache for requested definer */
+ list_for_each_entry(cached_definer, &cache->list_head, list_node) {
+ if (mlx5hws_definer_compare(&cached_definer->definer, definer))
+ continue;
+
+ /* Reuse definer and set LRU (move to be first in the list) */
+ list_move(&cached_definer->list_node, &cache->list_head);
+ cached_definer->refcount++;
+ return cached_definer->definer.obj_id;
+ }
+
+ /* Allocate and create definer based on the bitmask tag */
+ def_attr.match_mask = definer->mask.jumbo;
+ def_attr.dw_selector = definer->dw_selector;
+ def_attr.byte_selector = definer->byte_selector;
+
+ ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
+ if (ret)
+ return -1;
+
+ cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
+ if (!cached_definer)
+ goto free_definer_obj;
+
+ memcpy(&cached_definer->definer, definer, sizeof(*definer));
+ cached_definer->definer.obj_id = obj_id;
+ cached_definer->refcount = 1;
+ list_add(&cached_definer->list_node, &cache->list_head);
+
+ return obj_id;
+
+free_definer_obj:
+ mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
+ return -1;
+}
+
+static void
+hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
+{
+ struct mlx5hws_definer_cache_item *cached_definer;
+
+ list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
+ if (cached_definer->definer.obj_id != obj_id)
+ continue;
+
+ /* Object found */
+ if (--cached_definer->refcount)
+ return;
+
+ list_del_init(&cached_definer->list_node);
+ mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
+ kfree(cached_definer);
+ return;
+ }
+
+ /* Programming error, object must be part of cache */
+ pr_warn("HWS: failed putting definer object\n");
+}
+
+static struct mlx5hws_definer *
+hws_definer_alloc(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 *match_param,
+ struct mlx5hws_definer *layout,
+ bool bind_fc)
+{
+ struct mlx5hws_definer *definer;
+ int ret;
+
+ definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ /* Align field copy array based on given layout */
+ if (bind_fc) {
+ ret = hws_definer_fc_bind(definer, fc, fc_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
+ goto free_definer;
+ }
+ }
+
+ /* Create the tag mask used for definer creation */
+ hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0)
+ goto free_definer;
+
+ definer->obj_id = ret;
+ return definer;
+
+free_definer:
+ kfree(definer);
+ return NULL;
+}
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ hws_definer_put_obj(ctx, definer->obj_id);
+ kfree(definer);
+}
+
+static int
+hws_definer_mt_match_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_layout)
+{
+ /* Create mandatory match definer */
+ mt->definer = hws_definer_alloc(ctx,
+ mt->fc,
+ mt->fc_sz,
+ mt->match_param,
+ match_layout,
+ true);
+ if (!mt->definer) {
+ mlx5hws_err(ctx, "Failed to create match definer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ mlx5hws_definer_free(ctx, mt->definer);
+}
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ struct mlx5hws_definer match_layout = {0};
+ int ret;
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, true);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ return ret;
+ }
+
+ /* Calculate definers needed for exact match */
+ ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to init match definers\n");
+ goto free_fc;
+ }
+
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+ return ret;
+}
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ hws_definer_mt_match_uninit(ctx, mt);
+ kfree(mt->fc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
new file mode 100644
index 000000000000..141f3eb2e307
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -0,0 +1,831 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_DEFINER_H_
+#define HWS_DEFINER_H_
+
+/* Max available selecotrs */
+#define DW_SELECTORS 9
+#define BYTE_SELECTORS 8
+
+/* Selectors based on match TAG */
+#define DW_SELECTORS_MATCH 6
+#define DW_SELECTORS_LIMITED 3
+
+/* Selectors based on range TAG */
+#define DW_SELECTORS_RANGE 2
+#define BYTE_SELECTORS_RANGE 8
+
+#define HWS_NUM_OF_FLEX_PARSERS 8
+
+enum mlx5hws_definer_fname {
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_I,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_O,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_I,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_O,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_O,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_I,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_O,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_I,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_O,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_O,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_I,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O,
+ MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM,
+ MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM,
+ MLX5HWS_DEFINER_FNAME_GTP_TEID,
+ MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG,
+ MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7,
+ MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OAM,
+ MLX5HWS_DEFINER_FNAME_GENEVE_PROTO,
+ MLX5HWS_DEFINER_FNAME_GENEVE_VNI,
+ MLX5HWS_DEFINER_FNAME_SOURCE_QP,
+ MLX5HWS_DEFINER_FNAME_SOURCE_GVMI,
+ MLX5HWS_DEFINER_FNAME_REG_0,
+ MLX5HWS_DEFINER_FNAME_REG_1,
+ MLX5HWS_DEFINER_FNAME_REG_2,
+ MLX5HWS_DEFINER_FNAME_REG_3,
+ MLX5HWS_DEFINER_FNAME_REG_4,
+ MLX5HWS_DEFINER_FNAME_REG_5,
+ MLX5HWS_DEFINER_FNAME_REG_6,
+ MLX5HWS_DEFINER_FNAME_REG_7,
+ MLX5HWS_DEFINER_FNAME_REG_8,
+ MLX5HWS_DEFINER_FNAME_REG_9,
+ MLX5HWS_DEFINER_FNAME_REG_10,
+ MLX5HWS_DEFINER_FNAME_REG_11,
+ MLX5HWS_DEFINER_FNAME_REG_A,
+ MLX5HWS_DEFINER_FNAME_REG_B,
+ MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT,
+ MLX5HWS_DEFINER_FNAME_GRE_C,
+ MLX5HWS_DEFINER_FNAME_GRE_K,
+ MLX5HWS_DEFINER_FNAME_GRE_S,
+ MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_O,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_I,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW1,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW2,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW3,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SPI,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME,
+ MLX5HWS_DEFINER_FNAME_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7,
+ MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE,
+ MLX5HWS_DEFINER_FNAME_IB_L4_QPN,
+ MLX5HWS_DEFINER_FNAME_IB_L4_A,
+ MLX5HWS_DEFINER_FNAME_RANDOM_NUM,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_0,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_1,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_2,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_3,
+ MLX5HWS_DEFINER_FNAME_MAX,
+};
+
+enum mlx5hws_definer_match_criteria {
+ MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7,
+};
+
+enum mlx5hws_definer_type {
+ MLX5HWS_DEFINER_TYPE_MATCH,
+ MLX5HWS_DEFINER_TYPE_JUMBO,
+};
+
+enum mlx5hws_definer_match_flag {
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10,
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13,
+};
+
+struct mlx5hws_definer_fc {
+ struct mlx5hws_context *ctx;
+ /* Source */
+ u32 s_byte_off;
+ int s_bit_off;
+ u32 s_bit_mask;
+ /* Destination */
+ u32 byte_off;
+ int bit_off;
+ u32 bit_mask;
+ enum mlx5hws_definer_fname fname;
+ void (*tag_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+ void (*tag_mask_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_bits {
+ u8 dmac_47_16[0x20];
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+ u8 reserved_at_40[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encap_type[0x2];
+ u8 port_number[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+ u8 l4_type[0x4];
+ u8 reserved_at_64[0x2];
+ u8 ipsec_layer[0x2];
+ u8 l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_src_bits {
+ u8 smac_47_16[0x20];
+ u8 smac_15_0[0x10];
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 ip_fragmented[0x1];
+ u8 functional_lb[0x1];
+};
+
+struct mlx5_ifc_definer_hl_ib_l2_bits {
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 reserved_at_3[0x3];
+ u8 port_number[0x2];
+ u8 sl[0x4];
+ u8 qp_type[0x2];
+ u8 lnh[0x2];
+ u8 dlid[0x10];
+ u8 vl[0x4];
+ u8 lrh_packet_length[0xc];
+ u8 slid[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l3_bits {
+ u8 ip_version[0x4];
+ u8 ihl[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 time_to_live_hop_limit[0x8];
+ u8 protocol_next_header[0x8];
+ u8 identification[0x10];
+ union {
+ u8 ipv4_frag[0x10];
+ struct {
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+ };
+ };
+ u8 ipv4_total_length[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_60[0xc];
+ u8 flow_label[0x14];
+ u8 packet_length[0x10];
+ u8 ipv6_payload_length[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l4_bits {
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+ u8 data_offset[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 ip_fragmented[0x1];
+ u8 tcp_ns[0x1];
+ union {
+ u8 tcp_flags[0x8];
+ struct {
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ };
+ };
+ u8 first_fragment[0x1];
+ u8 reserved_at_31[0xf];
+};
+
+struct mlx5_ifc_definer_hl_src_qp_gvmi_bits {
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_e[0x1];
+ u8 functional_lb[0x1];
+ u8 source_gvmi[0x10];
+ u8 force_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 source_is_requestor[0x1];
+ u8 reserved_at_23[0x5];
+ u8 source_qp[0x18];
+};
+
+struct mlx5_ifc_definer_hl_ib_l4_bits {
+ u8 opcode[0x8];
+ u8 qp[0x18];
+ u8 se[0x1];
+ u8 migreq[0x1];
+ u8 ackreq[0x1];
+ u8 fecn[0x1];
+ u8 becn[0x1];
+ u8 bth[0x1];
+ u8 deth[0x1];
+ u8 dcceth[0x1];
+ u8 reserved_at_28[0x2];
+ u8 pad_count[0x2];
+ u8 tver[0x4];
+ u8 p_key[0x10];
+ u8 reserved_at_40[0x8];
+ u8 deth_source_qp[0x18];
+};
+
+enum mlx5hws_integrity_ok1_bits {
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24,
+ MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26,
+ MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27,
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28,
+ MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30,
+ MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31,
+};
+
+struct mlx5_ifc_definer_hl_oks1_bits {
+ union {
+ u8 oks1_bits[0x20];
+ struct {
+ u8 second_ipv4_checksum_ok[0x1];
+ u8 second_l4_checksum_ok[0x1];
+ u8 first_ipv4_checksum_ok[0x1];
+ u8 first_l4_checksum_ok[0x1];
+ u8 second_l3_ok[0x1];
+ u8 second_l4_ok[0x1];
+ u8 first_l3_ok[0x1];
+ u8 first_l4_ok[0x1];
+ u8 flex_parser7_steering_ok[0x1];
+ u8 flex_parser6_steering_ok[0x1];
+ u8 flex_parser5_steering_ok[0x1];
+ u8 flex_parser4_steering_ok[0x1];
+ u8 flex_parser3_steering_ok[0x1];
+ u8 flex_parser2_steering_ok[0x1];
+ u8 flex_parser1_steering_ok[0x1];
+ u8 flex_parser0_steering_ok[0x1];
+ u8 second_ipv6_extension_header_vld[0x1];
+ u8 first_ipv6_extension_header_vld[0x1];
+ u8 l3_tunneling_ok[0x1];
+ u8 l2_tunneling_ok[0x1];
+ u8 second_tcp_ok[0x1];
+ u8 second_udp_ok[0x1];
+ u8 second_ipv4_ok[0x1];
+ u8 second_ipv6_ok[0x1];
+ u8 second_l2_ok[0x1];
+ u8 vxlan_ok[0x1];
+ u8 gre_ok[0x1];
+ u8 first_tcp_ok[0x1];
+ u8 first_udp_ok[0x1];
+ u8 first_ipv4_ok[0x1];
+ u8 first_ipv6_ok[0x1];
+ u8 first_l2_ok[0x1];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_oks2_bits {
+ u8 reserved_at_0[0xa];
+ u8 second_mpls_ok[0x1];
+ u8 second_mpls4_s_bit[0x1];
+ u8 second_mpls4_qualifier[0x1];
+ u8 second_mpls3_s_bit[0x1];
+ u8 second_mpls3_qualifier[0x1];
+ u8 second_mpls2_s_bit[0x1];
+ u8 second_mpls2_qualifier[0x1];
+ u8 second_mpls1_s_bit[0x1];
+ u8 second_mpls1_qualifier[0x1];
+ u8 second_mpls0_s_bit[0x1];
+ u8 second_mpls0_qualifier[0x1];
+ u8 first_mpls_ok[0x1];
+ u8 first_mpls4_s_bit[0x1];
+ u8 first_mpls4_qualifier[0x1];
+ u8 first_mpls3_s_bit[0x1];
+ u8 first_mpls3_qualifier[0x1];
+ u8 first_mpls2_s_bit[0x1];
+ u8 first_mpls2_qualifier[0x1];
+ u8 first_mpls1_s_bit[0x1];
+ u8 first_mpls1_qualifier[0x1];
+ u8 first_mpls0_s_bit[0x1];
+ u8 first_mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_definer_hl_voq_bits {
+ u8 reserved_at_0[0x18];
+ u8 ecn_ok[0x1];
+ u8 congestion[0x1];
+ u8 profile[0x2];
+ u8 internal_prio[0x4];
+};
+
+struct mlx5_ifc_definer_hl_ipv4_src_dst_bits {
+ u8 source_address[0x20];
+ u8 destination_address[0x20];
+};
+
+struct mlx5_ifc_definer_hl_random_number_bits {
+ u8 random_number[0x10];
+ u8 reserved[0x10];
+};
+
+struct mlx5_ifc_definer_hl_ipv6_addr_bits {
+ u8 ipv6_address_127_96[0x20];
+ u8 ipv6_address_95_64[0x20];
+ u8 ipv6_address_63_32[0x20];
+ u8 ipv6_address_31_0[0x20];
+};
+
+struct mlx5_ifc_definer_tcp_icmp_header_bits {
+ union {
+ struct {
+ u8 icmp_dw1[0x20];
+ u8 icmp_dw2[0x20];
+ u8 icmp_dw3[0x20];
+ };
+ struct {
+ u8 tcp_seq[0x20];
+ u8 tcp_ack[0x20];
+ u8 tcp_win_urg[0x20];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_tunnel_header_bits {
+ u8 tunnel_header_0[0x20];
+ u8 tunnel_header_1[0x20];
+ u8 tunnel_header_2[0x20];
+ u8 tunnel_header_3[0x20];
+};
+
+struct mlx5_ifc_definer_hl_ipsec_bits {
+ u8 spi[0x20];
+ u8 sequence_number[0x20];
+ u8 reserved[0x10];
+ u8 ipsec_syndrome[0x8];
+ u8 next_header[0x8];
+};
+
+struct mlx5_ifc_definer_hl_metadata_bits {
+ u8 metadata_to_cqe[0x20];
+ u8 general_purpose[0x20];
+ u8 acomulated_hash[0x20];
+};
+
+struct mlx5_ifc_definer_hl_flex_parser_bits {
+ u8 flex_parser_7[0x20];
+ u8 flex_parser_6[0x20];
+ u8 flex_parser_5[0x20];
+ u8 flex_parser_4[0x20];
+ u8 flex_parser_3[0x20];
+ u8 flex_parser_2[0x20];
+ u8 flex_parser_1[0x20];
+ u8 flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_definer_hl_registers_bits {
+ u8 register_c_10[0x20];
+ u8 register_c_11[0x20];
+ u8 register_c_8[0x20];
+ u8 register_c_9[0x20];
+ u8 register_c_6[0x20];
+ u8 register_c_7[0x20];
+ u8 register_c_4[0x20];
+ u8 register_c_5[0x20];
+ u8 register_c_2[0x20];
+ u8 register_c_3[0x20];
+ u8 register_c_0[0x20];
+ u8 register_c_1[0x20];
+};
+
+struct mlx5_ifc_definer_hl_mpls_bits {
+ u8 mpls0_label[0x20];
+ u8 mpls1_label[0x20];
+ u8 mpls2_label[0x20];
+ u8 mpls3_label[0x20];
+ u8 mpls4_label[0x20];
+};
+
+struct mlx5_ifc_definer_hl_bits {
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner;
+ struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner;
+ struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi;
+ struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4;
+ struct mlx5_ifc_definer_hl_oks1_bits oks1;
+ struct mlx5_ifc_definer_hl_oks2_bits oks2;
+ struct mlx5_ifc_definer_hl_voq_bits voq;
+ u8 reserved_at_480[0x380];
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer;
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner;
+ u8 unsupported_dest_ib_l3[0x80];
+ u8 unsupported_source_ib_l3[0x80];
+ u8 unsupported_udp_misc_outer[0x20];
+ u8 unsupported_udp_misc_inner[0x20];
+ struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;
+ struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_outer;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_inner;
+ u8 unsupported_config_headers_outer[0x80];
+ u8 unsupported_config_headers_inner[0x80];
+ struct mlx5_ifc_definer_hl_random_number_bits random_number;
+ struct mlx5_ifc_definer_hl_ipsec_bits ipsec;
+ struct mlx5_ifc_definer_hl_metadata_bits metadata;
+ u8 unsupported_utc_timestamp[0x40];
+ u8 unsupported_free_running_timestamp[0x40];
+ struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser;
+ struct mlx5_ifc_definer_hl_registers_bits registers;
+ /* Reserved in case header layout on future HW */
+ u8 unsupported_reserved[0xd40];
+};
+
+enum mlx5hws_definer_gtp {
+ MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04,
+};
+
+struct mlx5_ifc_header_gtp_bits {
+ u8 version[0x3];
+ u8 proto_type[0x1];
+ u8 reserved1[0x1];
+ union {
+ u8 msg_flags[0x3];
+ struct {
+ u8 ext_hdr_flag[0x1];
+ u8 seq_num_flag[0x1];
+ u8 pdu_flag[0x1];
+ };
+ };
+ u8 msg_type[0x8];
+ u8 msg_len[0x8];
+ u8 teid[0x20];
+};
+
+struct mlx5_ifc_header_opt_gtp_bits {
+ u8 seq_num[0x10];
+ u8 pdu_num[0x8];
+ u8 next_ext_hdr_type[0x8];
+};
+
+struct mlx5_ifc_header_gtp_psc_bits {
+ u8 len[0x8];
+ u8 pdu_type[0x4];
+ u8 flags[0x4];
+ u8 qfi[0x8];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_ipv6_vtc_bits {
+ u8 version[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 flow_label[0x14];
+};
+
+struct mlx5_ifc_header_ipv6_routing_ext_bits {
+ u8 next_hdr[0x8];
+ u8 hdr_len[0x8];
+ u8 type[0x8];
+ u8 segments_left[0x8];
+ union {
+ u8 flags[0x20];
+ struct {
+ u8 last_entry[0x8];
+ u8 flag[0x8];
+ u8 tag[0x10];
+ };
+ };
+};
+
+struct mlx5_ifc_header_vxlan_bits {
+ u8 flags[0x8];
+ u8 reserved1[0x18];
+ u8 vni[0x18];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_vxlan_gpe_bits {
+ u8 flags[0x8];
+ u8 rsvd0[0x10];
+ u8 protocol[0x8];
+ u8 vni[0x18];
+ u8 rsvd1[0x8];
+};
+
+struct mlx5_ifc_header_gre_bits {
+ union {
+ u8 c_rsvd0_ver[0x10];
+ struct {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 reserved_at_4[0x9];
+ u8 version[0x3];
+ };
+ };
+ u8 gre_protocol[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_30[0x10];
+};
+
+struct mlx5_ifc_header_geneve_bits {
+ union {
+ u8 ver_opt_len_o_c_rsvd[0x10];
+ struct {
+ u8 version[0x2];
+ u8 opt_len[0x6];
+ u8 o_flag[0x1];
+ u8 c_flag[0x1];
+ u8 reserved_at_a[0x6];
+ };
+ };
+ u8 protocol_type[0x10];
+ u8 vni[0x18];
+ u8 reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_header_geneve_opt_bits {
+ u8 class[0x10];
+ u8 type[0x8];
+ u8 reserved[0x3];
+ u8 len[0x5];
+};
+
+struct mlx5_ifc_header_icmp_bits {
+ union {
+ u8 icmp_dw1[0x20];
+ struct {
+ u8 type[0x8];
+ u8 code[0x8];
+ u8 cksum[0x10];
+ };
+ };
+ union {
+ u8 icmp_dw2[0x20];
+ struct {
+ u8 ident[0x10];
+ u8 seq_nb[0x10];
+ };
+ };
+};
+
+struct mlx5hws_definer {
+ enum mlx5hws_definer_type type;
+ u8 dw_selector[DW_SELECTORS];
+ u8 byte_selector[BYTE_SELECTORS];
+ struct mlx5hws_rule_match_tag mask;
+ u32 obj_id;
+};
+
+struct mlx5hws_definer_cache {
+ struct list_head list_head;
+};
+
+struct mlx5hws_definer_cache_item {
+ struct mlx5hws_definer definer;
+ u32 refcount; /* protected by context ctrl lock */
+ struct list_head list_node;
+};
+
+static inline bool
+mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer)
+{
+ return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO);
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag);
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache);
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache);
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b);
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer,
+ bool allow_jumbo);
+
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname);
+
+#endif /* HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
new file mode 100644
index 000000000000..6a4c4cccd643
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -0,0 +1,1641 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <linux/mlx5/vport.h>
+#include <mlx5_core.h>
+#include <fs_core.h>
+#include <fs_cmd.h>
+#include "fs_hws_pools.h"
+#include "mlx5hws.h"
+
+#define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
+#define MLX5HWS_CTX_QUEUE_SIZE 256
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index);
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index);
+
+static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_context *fs_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ enum mlx5hws_action_type action_type;
+ int err = -ENOSPC;
+
+ hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
+ if (!hws_pool->tag_action)
+ return err;
+ hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
+ if (!hws_pool->pop_vlan_action)
+ goto destroy_tag;
+ hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
+ if (!hws_pool->push_vlan_action)
+ goto destroy_pop_vlan;
+ hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
+ if (!hws_pool->drop_action)
+ goto destroy_push_vlan;
+ action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
+ hws_pool->decapl2_action =
+ mlx5hws_action_create_reformat(ctx, action_type, 1,
+ &reformat_hdr, 0, flags);
+ if (!hws_pool->decapl2_action)
+ goto destroy_drop;
+ hws_pool->remove_hdr_vlan_action =
+ mlx5_fs_create_action_remove_header_vlan(ctx);
+ if (!hws_pool->remove_hdr_vlan_action)
+ goto destroy_decapl2;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (err)
+ goto destroy_remove_hdr;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
+ if (err)
+ goto cleanup_insert_hdr;
+ xa_init(&hws_pool->el2tol3tnl_pools);
+ xa_init(&hws_pool->el2tol2tnl_pools);
+ xa_init(&hws_pool->mh_pools);
+ xa_init(&hws_pool->table_dests);
+ xa_init(&hws_pool->vport_dests);
+ xa_init(&hws_pool->vport_vhca_dests);
+ xa_init(&hws_pool->aso_meters);
+ xa_init(&hws_pool->sample_dests);
+ return 0;
+
+cleanup_insert_hdr:
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+destroy_remove_hdr:
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+destroy_decapl2:
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+destroy_drop:
+ mlx5hws_action_destroy(hws_pool->drop_action);
+destroy_push_vlan:
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+destroy_pop_vlan:
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+destroy_tag:
+ mlx5hws_action_destroy(hws_pool->tag_action);
+ return err;
+}
+
+static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5_fs_hws_data *fs_hws_data;
+ struct mlx5hws_action *action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i;
+
+ xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->sample_dests);
+ xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->aso_meters);
+ xa_for_each(&hws_pool->vport_vhca_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_vhca_dests);
+ xa_for_each(&hws_pool->vport_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_dests);
+ xa_destroy(&hws_pool->table_dests);
+ xa_for_each(&hws_pool->mh_pools, i, pool)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
+ xa_destroy(&hws_pool->mh_pools);
+ xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol2tnl_pools);
+ xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol3tnl_pools);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+ mlx5hws_action_destroy(hws_pool->drop_action);
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+ mlx5hws_action_destroy(hws_pool->tag_action);
+}
+
+static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ struct mlx5hws_context_attr hws_ctx_attr = {};
+ int err;
+
+ hws_ctx_attr.queues = min_t(int, num_online_cpus(),
+ MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
+ hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
+
+ ns->fs_hws_context.hws_ctx =
+ mlx5hws_context_open(ns->dev, &hws_ctx_attr);
+ if (!ns->fs_hws_context.hws_ctx) {
+ mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
+ return -EINVAL;
+ }
+ err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
+ mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
+ return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+}
+
+static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns,
+ u16 peer_vhca_id)
+{
+ struct mlx5hws_context *peer_ctx = NULL;
+
+ if (peer_ns)
+ peer_ctx = peer_ns->fs_hws_context.hws_ctx;
+ mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
+ peer_vhca_id);
+ return 0;
+}
+
+static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_table *next_tbl;
+ int err;
+
+ if (!ns->fs_hws_context.hws_ctx)
+ return -EINVAL;
+
+ /* if no change required, return */
+ if (!next_ft && !ft->fs_hws_table.miss_ft_set)
+ return 0;
+
+ next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
+ err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
+ return err;
+ }
+ ft->fs_hws_table.miss_ft_set = !!next_tbl;
+ return 0;
+}
+
+static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
+ ft->id, flags);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed creating dest table action\n");
+ return -ENOMEM;
+ }
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
+ if (err)
+ mlx5hws_action_destroy(dest_ft_action);
+ return err;
+}
+
+static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ dest_ft_action = xa_erase(dests_xa, ft->id);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
+ return -ENOENT;
+ }
+
+ err = mlx5hws_action_destroy(dest_ft_action);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
+ return err;
+}
+
+static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
+ struct mlx5hws_table_attr tbl_attr = {};
+ struct mlx5hws_table *tbl;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
+ next_ft);
+ if (err)
+ return err;
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+ return err;
+ }
+
+ if (ns->table_type != FS_FT_FDB) {
+ mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
+ ns->table_type);
+ return -EOPNOTSUPP;
+ }
+
+ tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
+ tbl_attr.level = ft_attr->level;
+ tbl_attr.uid = ft_attr->uid;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
+ return -EINVAL;
+ }
+
+ ft->fs_hws_table.hws_table = tbl;
+ ft->id = mlx5hws_table_get_id(tbl);
+
+ if (next_ft) {
+ err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+ if (err)
+ goto destroy_table;
+ }
+
+ ft->max_fte = INT_MAX;
+
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ goto clear_ft_miss;
+ return 0;
+
+clear_ft_miss:
+ mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+destroy_table:
+ mlx5hws_table_destroy(tbl);
+ ft->fs_hws_table.hws_table = NULL;
+ return err;
+}
+
+static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ int err;
+
+ err = mlx5_fs_del_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+ err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
+
+ err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
+
+ return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+ disconnect);
+}
+
+static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft, u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5hws_match_parameters mask;
+ struct mlx5hws_bwc_matcher *matcher;
+ u8 match_criteria_enable;
+ u32 priority;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
+
+ mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mask.match_sz = sizeof(fg->mask.match_criteria);
+
+ match_criteria_enable = MLX5_GET(create_flow_group_in, in,
+ match_criteria_enable);
+ priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
+ matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
+ priority, match_criteria_enable,
+ &mask);
+ if (!matcher) {
+ mlx5_core_err(ns->dev, "Failed creating matcher\n");
+ return -EINVAL;
+ }
+
+ fg->fs_hws_matcher.matcher = matcher;
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+ return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst,
+ bool is_dest_type_uplink)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5hws_action *dest;
+ struct xarray *dests_xa;
+ bool vhca_id_valid;
+ unsigned long idx;
+ u16 vport_num;
+ int err;
+
+ vhca_id_valid = is_dest_type_uplink ||
+ (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
+ vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
+ if (vhca_id_valid) {
+ dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
+ idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
+ } else {
+ dests_xa = &fs_ctx->hws_pool.vport_dests;
+ idx = vport_num;
+ }
+dest_load:
+ dest = xa_load(dests_xa, idx);
+ if (dest)
+ return dest;
+
+ dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid,
+ dest_attr->vport.vhca_id, flags);
+
+ err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
+ if (err) {
+ mlx5hws_action_destroy(dest);
+ dest = NULL;
+
+ if (err == -EBUSY)
+ /* xarray entry was already stored by another thread */
+ goto dest_load;
+ }
+
+ return dest;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5hws_action_create_dest_match_range(ctx,
+ dest_attr->range.field,
+ dest_attr->range.hit_ft,
+ dest_attr->range.miss_ft,
+ dest_attr->range.min,
+ dest_attr->range.max,
+ flags);
+}
+
+static struct mlx5_fs_hws_data *
+mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
+{
+ struct mlx5_fs_hws_data *fs_hws_data;
+ int err;
+
+ xa_lock(cache_xa);
+ fs_hws_data = xa_load(cache_xa, index);
+ if (!fs_hws_data) {
+ fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
+ if (!fs_hws_data) {
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 0);
+ mutex_init(&fs_hws_data->lock);
+ err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
+ if (err) {
+ kfree(fs_hws_data);
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ }
+ xa_unlock(cache_xa);
+
+ return fs_hws_data;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *meter_hws_data;
+ u32 id = exe_aso->base_id;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
+ if (!meter_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
+ create_ctx.id = id;
+ create_ctx.return_reg_id = exe_aso->return_reg_id;
+
+ return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_data *meter_hws_data;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
+ if (!meter_hws_data)
+ return;
+ return mlx5_fs_put_hws_action(meter_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ u32 id = dst->dest_attr.sampler_id;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
+ if (!sampler_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
+ create_ctx.id = id;
+
+ return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ u32 sampler_id)
+{
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = xa_load(sampler_xa, sampler_id);
+ if (!sampler_hws_data)
+ return;
+
+ mlx5_fs_put_hws_action(sampler_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_dest_attr *dests,
+ u32 num_of_dests)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
+ flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.push_vlan_action;
+}
+
+static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
+{
+ u16 n_ethtype = vlan->ethtype;
+ u8 prio = vlan->prio;
+ u16 vid = vlan->vid;
+
+ return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.pop_vlan_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.decapl2_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.drop_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.tag_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_last(ctx, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ switch (create_ctx->actions_type) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ return mlx5hws_action_create_counter(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
+ create_ctx->id,
+ create_ctx->return_reg_id,
+ flags);
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ default:
+ return NULL;
+ }
+}
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
+ return fs_hws_data->hws_action;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return fs_hws_data->hws_action;
+ }
+ fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
+ if (!fs_hws_data->hws_action) {
+ mutex_unlock(&fs_hws_data->lock);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 1);
+ mutex_unlock(&fs_hws_data->lock);
+
+ return fs_hws_data->hws_action;
+}
+
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
+{
+ if (!fs_hws_data)
+ return;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
+ return;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fs_hws_data->hws_action);
+ fs_hws_data->hws_action = NULL;
+ mutex_unlock(&fs_hws_data->lock);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action *fs_action)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+
+ switch (mlx5hws_action_get_type(fs_action->action)) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ mlx5_fc_put_hws_action(fs_action->counter);
+ break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
+ break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
+ break;
+ default:
+ mlx5hws_action_destroy(fs_action->action);
+ }
+}
+
+static void
+mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action **fs_actions,
+ int *num_fs_actions)
+{
+ int i;
+
+ /* Free in reverse order to handle action dependencies */
+ for (i = *num_fs_actions - 1; i >= 0; i--)
+ mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
+ *num_fs_actions = 0;
+ kfree(*fs_actions);
+ *fs_actions = NULL;
+}
+
+/* Splits FTE's actions into cached, rule and destination actions.
+ * The cached and destination actions are saved on the fte hws rule.
+ * The rule actions are returned as a parameter, together with their count.
+ * We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
+static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte,
+ struct mlx5hws_rule_action **ractions)
+{
+ struct mlx5_flow_act *fte_action = &fte->act_dests.action;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action_dest_attr *dest_actions;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_rule_action *fs_actions;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5hws_action *dest_action;
+ struct mlx5hws_action *tmp_action;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ bool delay_encap_set = false;
+ struct mlx5_flow_rule *dst;
+ int num_dest_actions = 0;
+ int num_fs_actions = 0;
+ int num_actions = 0;
+ int err;
+
+ *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
+ GFP_KERNEL);
+ if (!*ractions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*fs_actions), GFP_KERNEL);
+ if (!fs_actions) {
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*dest_actions), GFP_KERNEL);
+ if (!dest_actions) {
+ err = -ENOMEM;
+ goto free_fs_actions_alloc;
+ }
+
+ /* The order of the actions are must to be kept, only the following
+ * order is supported by HW steering:
+ * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
+ * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
+ * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
+ */
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_dest_actions_alloc;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ int reformat_type = fte_action->pkt_reformat->reformat_type;
+
+ if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else {
+ delay_encap_set = true;
+ }
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
+ (*ractions)[num_actions].modify_header.offset = mh_data->offset;
+ (*ractions)[num_actions].modify_header.data = mh_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->modify_hdr->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (delay_encap_set) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_fc *counter;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ counter = dst->dest_attr.counter;
+ tmp_action = mlx5_fc_get_hws_action(ctx, counter);
+ if (!tmp_action) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ (*ractions)[num_actions].counter.offset =
+ mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].counter = counter;
+ }
+ }
+
+ if (fte->act_dests.flow_context.flow_tag) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ tmp_action = mlx5_fs_get_action_tag(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
+ num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
+ &fte_action->exe_aso);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].aso_meter.offset =
+ fte_action->exe_aso.flow_meter.meter_idx;
+ (*ractions)[num_actions].aso_meter.init_color =
+ fte_action->exe_aso.flow_meter.init_color;
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_flow_destination *attr = &dst->dest_attr;
+ bool type_uplink =
+ attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+
+ if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (attr->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ if (dst->dest_attr.ft->flags &
+ MLX5_FLOW_TABLE_UPLINK_VPORT)
+ dest_actions[num_dest_actions].is_wire_ft = true;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
+ dst);
+ if (dest_action)
+ break;
+ dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
+ type_uplink);
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ dest_action =
+ mlx5_fs_get_dest_action_sampler(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions].action = dest_action;
+ fs_actions[num_fs_actions++].sampler_id =
+ dst->dest_attr.sampler_id;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+ }
+
+ if (num_dest_actions == 1) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = dest_actions->dest;
+ } else if (num_dest_actions > 1) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ tmp_action =
+ mlx5_fs_create_action_dest_array(ctx, dest_actions,
+ num_dest_actions);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_create_action_last(ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+
+ kfree(dest_actions);
+
+ /* Actions created specifically for this rule will be destroyed
+ * once rule is deleted.
+ */
+ fte->fs_hws_rule.num_fs_actions = num_fs_actions;
+ fte->fs_hws_rule.hws_fs_actions = fs_actions;
+
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
+free_dest_actions_alloc:
+ kfree(dest_actions);
+free_fs_actions_alloc:
+ kfree(fs_actions);
+free_actions_alloc:
+ kfree(*ractions);
+ *ractions = NULL;
+out_err:
+ return err;
+}
+
+static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5hws_match_parameters params;
+ struct mlx5hws_rule_action *ractions;
+ struct mlx5hws_bwc_rule *rule;
+ int err = 0;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+
+ err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (err)
+ goto out_err;
+
+ params.match_sz = sizeof(fte->val);
+ params.match_buf = fte->val;
+
+ rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
+ fte->act_dests.flow_context.flow_source,
+ ractions);
+ kfree(ractions);
+ if (!rule) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ fte->fs_hws_rule.bwc_rule = rule;
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+out_err:
+ mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
+ return err;
+}
+
+static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+ err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
+ rule->bwc_rule = NULL;
+
+ mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
+ &rule->num_fs_actions);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
+ struct mlx5hws_rule_action *ractions;
+ int saved_num_fs_actions;
+ int ret;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
+ modify_mask, fte);
+
+ if ((modify_mask & ~allowed_mask) != 0)
+ return -EINVAL;
+
+ saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
+ saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
+
+ ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
+ kfree(ractions);
+ if (ret)
+ goto restore_actions;
+
+ mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
+ &saved_num_fs_actions);
+ return ret;
+
+restore_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+ fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
+ fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
+ return ret;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
+
+ /* MAC anchor not supported in HWS reformat, use VLAN anchor */
+ remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
+ remove_hdr_vlan.offset = 0;
+ remove_hdr_vlan.size = sizeof(struct vlan_hdr);
+ return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (!params ||
+ params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
+ params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
+ params->size != sizeof(struct vlan_hdr))
+ return NULL;
+
+ return fs_ctx->hws_pool.remove_hdr_vlan_action;
+}
+
+static int
+mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if ((!params->data && params->size) || (params->data && !params->size) ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
+ mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
+ return -EINVAL;
+ }
+ if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
+ params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
+ params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
+ mlx5_core_err(mdev, "Only vlan insert header supported\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (params->param_0 || params->param_1) {
+ mlx5_core_err(dev, "Invalid reformat params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
+ enum mlx5hws_action_type reformat_type, size_t size)
+{
+ struct mlx5_fs_pool *pr_pool;
+ unsigned long index = size;
+ int err;
+
+ pr_pool = xa_load(pr_pools, index);
+ if (pr_pool)
+ return pr_pool;
+
+ pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
+ if (!pr_pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
+ if (err)
+ goto free_pr_pool;
+ err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pr_pool;
+ return pr_pool;
+
+cleanup_pr_pool:
+ mlx5_fs_hws_pr_pool_cleanup(pr_pool);
+free_pr_pool:
+ kfree(pr_pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index)
+{
+ xa_erase(pr_pools, index);
+ mlx5_fs_hws_pr_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int
+mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5_fs_hws_actions_pool *hws_pool;
+ struct mlx5hws_action *hws_action = NULL;
+ struct mlx5_fs_hws_pr *pr_data = NULL;
+ struct mlx5_fs_pool *pr_pool = NULL;
+ struct mlx5_core_dev *dev = ns->dev;
+ u8 hdr_idx = 0;
+ int err;
+
+ if (!params)
+ return -EINVAL;
+
+ hws_pool = &fs_ctx->hws_pool;
+
+ switch (params->type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ hdr_idx = params->size == ETH_HLEN ?
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ err = mlx5_fs_verify_insert_header_params(dev, params);
+ if (err)
+ return err;
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_REMOVE_HDR:
+ hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
+ if (!hws_action)
+ mlx5_core_err(dev, "Only vlan remove header supported\n");
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+ params->type);
+ return -EOPNOTSUPP;
+ }
+
+ if (pr_pool) {
+ pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
+ if (IS_ERR_OR_NULL(pr_data))
+ return !pr_data ? -EINVAL : PTR_ERR(pr_data);
+ hws_action = pr_data->bulk->hws_action;
+ if (!hws_action) {
+ mlx5_core_err(dev,
+ "Failed allocating packet-reformat action\n");
+ err = -EINVAL;
+ goto release_pr;
+ }
+ pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
+ if (!pr_data->data) {
+ err = -ENOMEM;
+ goto release_pr;
+ }
+ pr_data->hdr_idx = hdr_idx;
+ pr_data->data_size = params->size;
+ pkt_reformat->fs_hws_action.pr_data = pr_data;
+ }
+
+ mutex_init(&pkt_reformat->fs_hws_action.lock);
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
+ pkt_reformat->fs_hws_action.hws_action = hws_action;
+ return 0;
+
+release_pr:
+ if (pr_pool && pr_data)
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ return err;
+}
+
+static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_pool *pr_pool;
+
+ if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+
+ fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
+ mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_dealloc(ns, &fw_pkt_reformat);
+ pkt_reformat->fs_hws_action.fw_reformat_id = 0;
+ }
+
+ if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
+ return;
+
+ if (!pkt_reformat->fs_hws_action.pr_data) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ pr_data = pkt_reformat->fs_hws_action.pr_data;
+
+ switch (pkt_reformat->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
+ return;
+ }
+ if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ kfree(pr_data->data);
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ pkt_reformat->fs_hws_action.pr_data = NULL;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern,
+ struct xarray *mh_pools, unsigned long index)
+{
+ struct mlx5_fs_pool *pool;
+ int err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
+ if (err)
+ goto free_pool;
+ err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pool;
+ return pool;
+
+cleanup_pool:
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+free_pool:
+ kfree(pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index)
+{
+ xa_erase(mh_pools, index);
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5hws_action_mh_pattern pattern = {};
+ struct mlx5_fs_hws_mh *mh_data = NULL;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i, cnt = 0;
+ bool known_pattern;
+ int err;
+
+ pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
+ pattern.data = modify_actions;
+
+ known_pattern = false;
+ xa_for_each(&hws_pool->mh_pools, i, pool) {
+ if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
+ known_pattern = true;
+ break;
+ }
+ cnt++;
+ }
+
+ if (!known_pattern) {
+ pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
+ &hws_pool->mh_pools, cnt);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ }
+ mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
+ if (IS_ERR(mh_data)) {
+ err = PTR_ERR(mh_data);
+ goto destroy_pool;
+ }
+ hws_action = mh_data->bulk->hws_action;
+ mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
+ if (!mh_data->data) {
+ err = -ENOMEM;
+ goto release_mh;
+ }
+ modify_hdr->fs_hws_action.mh_data = mh_data;
+ modify_hdr->fs_hws_action.fs_pool = pool;
+ modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ modify_hdr->fs_hws_action.hws_action = hws_action;
+
+ return 0;
+
+release_mh:
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+destroy_pool:
+ if (!known_pattern)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
+ return err;
+}
+
+static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_mh *mh_data;
+ struct mlx5_fs_pool *pool;
+
+ if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
+ mlx5_core_err(ns->dev, "Failed release modify-header\n");
+ return;
+ }
+
+ mh_data = modify_hdr->fs_hws_action.mh_data;
+ kfree(mh_data->data);
+ pool = modify_hdr->fs_hws_action.fs_pool;
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+ modify_hdr->fs_hws_action.mh_data = NULL;
+}
+
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
+ struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
+ u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+ struct mlx5_pkt_reformat_params params = { 0 };
+ struct mlx5_flow_root_namespace *ns;
+ struct mlx5_core_dev *dev;
+ int ret;
+
+ mutex_lock(lock);
+
+ if (*id != 0) {
+ *reformat_id = *id;
+ ret = 0;
+ goto unlock;
+ }
+
+ dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
+ if (!dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ns = mlx5_get_root_namespace(dev, ns_type);
+ if (!ns) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ params.type = pkt_reformat->reformat_type;
+ params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
+ params.data = pkt_reformat->fs_hws_action.pr_data->data;
+
+ ret = mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_alloc(ns, &params, ns_type, &fw_pkt_reformat);
+ if (ret)
+ goto unlock;
+
+ *id = fw_pkt_reformat.id;
+ *reformat_id = *id;
+ ret = 0;
+
+unlock:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+}
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return mlx5hws_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
+ .create_flow_table = mlx5_cmd_hws_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_hws_modify_flow_table,
+ .update_root_ft = mlx5_cmd_hws_update_root_ft,
+ .create_flow_group = mlx5_cmd_hws_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
+ .create_fte = mlx5_cmd_hws_create_fte,
+ .delete_fte = mlx5_cmd_hws_delete_fte,
+ .update_fte = mlx5_cmd_hws_update_fte,
+ .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_hws_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
+ .create_ns = mlx5_cmd_hws_create_ns,
+ .destroy_ns = mlx5_cmd_hws_destroy_ns,
+ .set_peer = mlx5_cmd_hws_set_peer,
+ .get_capabilities = mlx5_cmd_hws_get_capabilities,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return &mlx5_flow_cmds_hws;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
new file mode 100644
index 000000000000..b92d55b2d147
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef _MLX5_FS_HWS_
+#define _MLX5_FS_HWS_
+
+#include "mlx5hws.h"
+#include "fs_hws_pools.h"
+
+struct mlx5_fs_hws_actions_pool {
+ struct mlx5hws_action *tag_action;
+ struct mlx5hws_action *pop_vlan_action;
+ struct mlx5hws_action *push_vlan_action;
+ struct mlx5hws_action *drop_action;
+ struct mlx5hws_action *decapl2_action;
+ struct mlx5hws_action *remove_hdr_vlan_action;
+ struct mlx5_fs_pool insert_hdr_pool;
+ struct mlx5_fs_pool dl3tnltol2_pool;
+ struct xarray el2tol3tnl_pools;
+ struct xarray el2tol2tnl_pools;
+ struct xarray mh_pools;
+ struct xarray table_dests;
+ struct xarray vport_vhca_dests;
+ struct xarray vport_dests;
+ struct xarray aso_meters;
+ struct xarray sample_dests;
+};
+
+struct mlx5_fs_hws_context {
+ struct mlx5hws_context *hws_ctx;
+ struct mlx5_fs_hws_actions_pool hws_pool;
+};
+
+struct mlx5_fs_hws_table {
+ struct mlx5hws_table *hws_table;
+ bool miss_ft_set;
+};
+
+struct mlx5_fs_hws_action {
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *fs_pool;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ u32 fw_reformat_id;
+ /* Protect `fw_reformat_id` against being initialized from multiple
+ * threads.
+ */
+ struct mutex lock;
+};
+
+struct mlx5_fs_hws_matcher {
+ struct mlx5hws_bwc_matcher *matcher;
+};
+
+struct mlx5_fs_hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct mlx5_fc *counter;
+ struct mlx5_exe_aso *exe_aso;
+ u32 sampler_id;
+ };
+};
+
+struct mlx5_fs_hws_rule {
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5_fs_hws_rule_action *hws_fs_actions;
+ int num_fs_actions;
+};
+
+struct mlx5_fs_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fs_hws_create_action_ctx {
+ enum mlx5hws_action_type actions_type;
+ struct mlx5hws_context *hws_ctx;
+ u32 id;
+ union {
+ u8 return_reg_id;
+ };
+};
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx);
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
+
+#ifdef CONFIG_MLX5_HW_STEERING
+
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
+
+#else
+
+static inline int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MLX5_HWS_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
new file mode 100644
index 000000000000..839d71bd4216
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_hws_pools.h"
+
+#define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536
+#define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10
+
+static struct mlx5hws_action *
+mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr[2] = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 2,
+ reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_insert_header insert_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE;
+ insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR;
+ insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET;
+
+ return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr,
+ log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5hws_context *ctx;
+ size_t encap_data_size;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ encap_data_size = pr_pool_ctx->encap_data_size;
+ switch (pr_pool_ctx->reformat_type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ return mlx5_fs_dl3tnltol2_bulk_action_create(ctx);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ return mlx5_fs_insert_hdr_bulk_action_create(ctx);
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int bulk_len;
+ int i;
+
+ if (!pool_ctx)
+ return NULL;
+ pr_pool_ctx = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ pr_bulk = kvzalloc(struct_size(pr_bulk, prs_data, bulk_len), GFP_KERNEL);
+ if (!pr_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
+ goto free_pr_bulk;
+
+ for (i = 0; i < bulk_len; i++) {
+ pr_bulk->prs_data[i].bulk = pr_bulk;
+ pr_bulk->prs_data[i].offset = i;
+ }
+
+ pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx);
+ if (!pr_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &pr_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk);
+free_pr_bulk:
+ kvfree(pr_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+
+ pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all reformats were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(pr_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(pr_bulk);
+
+ return 0;
+}
+
+static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool)
+{
+ hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD,
+ hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO);
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = {
+ .bulk_create = mlx5_fs_hws_pr_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2)
+ return -EOPNOTSUPP;
+
+ pr_pool_ctx = kzalloc(sizeof(*pr_pool_ctx), GFP_KERNEL);
+ if (!pr_pool_ctx)
+ return -ENOMEM;
+ pr_pool_ctx->reformat_type = reformat_type;
+ pr_pool_ctx->encap_data_size = encap_data_size;
+ mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx);
+ return 0;
+}
+
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ mlx5_fs_pool_cleanup(pr_pool);
+ pr_pool_ctx = pr_pool->pool_ctx;
+ if (!pr_pool_ctx)
+ return;
+ kfree(pr_pool_ctx);
+}
+
+struct mlx5_fs_hws_pr *
+mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk,
+ fs_bulk);
+ return &pr_bulk->prs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = pr_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = pr_data->offset;
+ if (mlx5_fs_pool_release_index(pr_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n");
+}
+
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
+{
+ return pr_data->bulk->hws_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_modify_header(ctx, 1, pattern,
+ log_bulk_size, flags);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5hws_action_mh_pattern *pattern;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ struct mlx5hws_context *ctx;
+ int bulk_len;
+
+ if (!pool_ctx)
+ return NULL;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ pattern = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL);
+ if (!mh_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+ goto free_mh_bulk;
+
+ for (int i = 0; i < bulk_len; i++) {
+ mh_bulk->mhs_data[i].bulk = mh_bulk;
+ mh_bulk->mhs_data[i].offset = i;
+ }
+
+ mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern);
+ if (!mh_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &mh_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
+free_mh_bulk:
+ kvfree(mh_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+
+ mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(mh_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(mh_bulk);
+
+ return 0;
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
+ .bulk_create = mlx5_fs_hws_mh_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL);
+ if (!pool_pattern)
+ return -ENOMEM;
+ pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
+ if (!pool_pattern->data) {
+ kfree(pool_pattern);
+ return -ENOMEM;
+ }
+ pool_pattern->sz = pattern->sz;
+ mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
+ pool_pattern);
+ return 0;
+}
+
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ mlx5_fs_pool_cleanup(fs_hws_mh_pool);
+ pool_pattern = fs_hws_mh_pool->pool_ctx;
+ if (!pool_pattern)
+ return;
+ kfree(pool_pattern->data);
+ kfree(pool_pattern);
+}
+
+struct mlx5_fs_hws_mh *
+mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
+ fs_bulk);
+ return &mh_bulk->mhs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = mh_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = mh_data->offset;
+ if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
+}
+
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+ int num_actions, i;
+
+ pool_pattern = mh_pool->pool_ctx;
+ if (WARN_ON_ONCE(!pool_pattern))
+ return false;
+
+ if (pattern->sz != pool_pattern->sz)
+ return false;
+ num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
+ for (i = 0; i < num_actions; i++) {
+ if ((__force __be32)pattern->data[i] !=
+ (__force __be32)pool_pattern->data[i])
+ return false;
+ }
+ return true;
+}
+
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5hws_action *hws_action;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.id = fc_bulk->base_id;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
+
+ mlx5_fc_local_get(counter);
+ hws_action = mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
+ if (!hws_action)
+ mlx5_fc_local_put(counter);
+ return hws_action;
+}
+
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
+{
+ mlx5_fs_put_hws_action(&counter->bulk->hws_data);
+ mlx5_fc_local_put(counter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
new file mode 100644
index 000000000000..34072551dd21
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_HWS_POOLS_H__
+#define __MLX5_FS_HWS_POOLS_H__
+
+#include <linux/if_vlan.h>
+#include "fs_pool.h"
+#include "fs_core.h"
+
+#define MLX5_FS_INSERT_HDR_VLAN_ANCHOR MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START
+#define MLX5_FS_INSERT_HDR_VLAN_OFFSET offsetof(struct vlan_ethhdr, h_vlan_proto)
+#define MLX5_FS_INSERT_HDR_VLAN_SIZE sizeof(struct vlan_hdr)
+
+enum {
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX = 0,
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX,
+};
+
+struct mlx5_fs_hws_pr {
+ struct mlx5_fs_hws_pr_bulk *bulk;
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ size_t data_size;
+};
+
+struct mlx5_fs_hws_pr_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_pr prs_data[];
+};
+
+struct mlx5_fs_hws_pr_pool_ctx {
+ enum mlx5hws_action_type reformat_type;
+ size_t encap_data_size;
+};
+
+struct mlx5_fs_hws_mh {
+ struct mlx5_fs_hws_mh_bulk *bulk;
+ u32 offset;
+ u8 *data;
+};
+
+struct mlx5_fs_hws_mh_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5_fs_pool *mh_pool;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_mh mhs_data[];
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type);
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool);
+
+struct mlx5_fs_hws_pr *mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool);
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data);
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data);
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern);
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool);
+struct mlx5_fs_hws_mh *mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool);
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data);
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern);
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter);
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter);
+#endif /* __MLX5_FS_HWS_POOLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
new file mode 100644
index 000000000000..21279d503117
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_INTERNAL_H_
+#define HWS_INTERNAL_H_
+
+#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/vport.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+
+#include "prm.h"
+#include "mlx5hws.h"
+#include "pool.h"
+#include "vport.h"
+#include "context.h"
+#include "table.h"
+#include "send.h"
+#include "action_ste_pool.h"
+#include "rule.h"
+#include "cmd.h"
+#include "action.h"
+#include "definer.h"
+#include "matcher.h"
+#include "debug.h"
+#include "pat_arg.h"
+#include "bwc.h"
+#include "bwc_complex.h"
+
+#define W_SIZE 2
+#define DW_SIZE 4
+#define BITS_IN_BYTE 8
+#define BITS_IN_DW (BITS_IN_BYTE * DW_SIZE)
+
+#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit)))
+
+#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg)
+#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg)
+#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
+
+#define MLX5HWS_TABLE_TYPE_BASE 2
+
+static inline bool is_mem_zero(const u8 *mem, size_t size)
+{
+ if (unlikely(!size)) {
+ pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__);
+ return true;
+ }
+
+ return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0;
+}
+
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
+#endif /* HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
new file mode 100644
index 000000000000..32f87fdf3213
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
+{
+ /* Collision table concatenation is done only for large rule tables */
+ return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH;
+}
+
+static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules)
+{
+ if (hws_matcher_requires_col_tbl(log_num_of_rules))
+ return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH;
+
+ /* For small rule tables we use a single deep table to assure insertion */
+ return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH);
+}
+
+static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
+}
+
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_matcher *tmp_matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return -EINVAL;
+
+ /* Update isolated_matcher_end_ft_id attribute for all
+ * the matchers in isolated table.
+ */
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node)
+ tmp_matcher->attr.isolated_matcher_end_ft_id = miss_ft_id;
+
+ tmp_matcher = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ return mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tmp_matcher->end_ft_id,
+ tbl->fw_ft_type,
+ miss_ft_id);
+}
+
+static int hws_matcher_connect_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 end_ft_id;
+ int ret;
+
+ /* Reset end_ft next RTCs */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ /* Connect isolated matcher's end_ft to the complex matcher's end FT */
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ end_ft_id);
+
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ 0,
+ &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
+ return ret;
+ }
+
+ ret = hws_matcher_connect_end_ft_isolated(matcher);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to connect end FT\n");
+ goto destroy_default_ft;
+ }
+
+ return 0;
+
+destroy_default_ft:
+ mlx5hws_table_destroy_default_ft(tbl, matcher->end_ft_id);
+ return ret;
+}
+
+static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ if (mlx5hws_matcher_is_isolated(matcher))
+ ret = hws_matcher_create_end_ft_isolated(matcher);
+ else
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ 0,
+ &matcher->end_ft_id);
+
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_connect_isolated_first(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ /* Isolated matcher's end_ft is already pointing to the end_ft
+ * of the complex matcher - it was set at creation of end_ft,
+ * so no need to connect it.
+ * We still need to connect the isolated table's start FT to
+ * this matcher's RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to connect start FT to match RTC\n");
+ return ret;
+ }
+
+ /* Reset table's FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset table ft default miss\n");
+ return ret;
+ }
+
+ list_add(&matcher->list_node, &tbl->matchers_list);
+
+ return ret;
+}
+
+static int hws_matcher_connect_isolated_last(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *last;
+ int ret;
+
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ /* New matcher's end_ft is already pointing to the end_ft of
+ * the complex matcher.
+ * Connect previous matcher's end_ft to this new matcher RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ last->end_ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Isolated matcher: failed to connect matcher end_ft to new match RTC\n");
+ return ret;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, last->end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset matcher ft default miss\n");
+ return ret;
+ }
+
+ /* Insert after the last matcher */
+ list_add(&matcher->list_node, &last->list_node);
+
+ return 0;
+}
+
+static int hws_matcher_connect_isolated(struct mlx5hws_matcher *matcher)
+{
+ /* Isolated matcher is expected to be the only one in its table.
+ * However, it can have a collision matcher, and it can go through
+ * rehash process, in which case we will temporary have both old and
+ * new matchers in the isolated table.
+ * Check if this is the first matcher in the isolated table.
+ */
+ if (list_empty(&matcher->tbl->matchers_list))
+ return hws_matcher_connect_isolated_first(matcher);
+
+ /* If this wasn't the first matcher, then we have 3 possible cases:
+ * - this is a collision matcher for the first matcher
+ * - this is a new rehash dest matcher
+ * - this is a collision matcher for the new rehash dest matcher
+ * The logic to add new matcher is the same for all these cases.
+ */
+ return hws_matcher_connect_isolated_last(matcher);
+}
+
+static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *prev = NULL;
+ struct mlx5hws_matcher *next = NULL;
+ struct mlx5hws_matcher *tmp_matcher;
+ int ret;
+
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_connect_isolated(matcher);
+
+ /* Find location in matcher list */
+ if (list_empty(&tbl->matchers_list)) {
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ goto connect;
+ }
+
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) {
+ if (tmp_matcher->attr.priority > matcher->attr.priority) {
+ next = tmp_matcher;
+ break;
+ }
+ prev = tmp_matcher;
+ }
+
+ if (next)
+ /* insert before next */
+ list_add_tail(&matcher->list_node, &next->list_node);
+ else
+ /* insert after prev */
+ list_add(&matcher->list_node, &prev->list_node);
+
+connect:
+ if (next) {
+ /* Connect to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ matcher->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n");
+ goto remove_from_list;
+ }
+ } else {
+ /* Connect last matcher to next miss_tbl if exists */
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n");
+ goto remove_from_list;
+ }
+ }
+
+ /* Connect to previous FT */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ prev ? prev->end_ft_id : tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n");
+ goto remove_from_list;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n");
+ goto remove_from_list;
+ }
+
+ if (!prev) {
+ /* Update tables missing to current matcher in the table */
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n");
+ goto remove_from_list;
+ }
+ }
+
+ return 0;
+
+remove_from_list:
+ list_del_init(&matcher->list_node);
+ return ret;
+}
+
+static int hws_matcher_disconnect_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *first, *last, *prev, *next;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 end_ft_id;
+ int ret;
+
+ first = list_first_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ prev = list_prev_entry(matcher, list_node);
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (first == last) {
+ /* This was the only matcher in the list.
+ * Reset isolated table FT next RTCs and connect it
+ * to the whole complex matcher end FT instead.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* At this point we know that there are more matchers in the list */
+
+ if (matcher == first) {
+ /* We've disconnected the first matcher.
+ * Now update isolated table default FT.
+ */
+ if (!next)
+ return -EINVAL;
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ }
+
+ if (matcher == last) {
+ /* If we've disconnected the last matcher - update prev
+ * matcher's end_ft to point to the complex matcher end_ft.
+ */
+ if (!prev)
+ return -EINVAL;
+ return hws_matcher_connect_end_ft_isolated(prev);
+ }
+
+ /* This wasn't the first or the last matcher, which means that it has
+ * both prev and next matchers. Note that this only happens if we're
+ * disconnecting collision matcher of the old matcher during rehash.
+ */
+ if (!prev || !next ||
+ !(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ return -EINVAL;
+
+ /* Update prev end FT to point to next match RTC */
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ prev->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+}
+
+static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *next = NULL, *prev = NULL;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 prev_ft_id = tbl->ft_id;
+ int ret;
+
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_disconnect_isolated(matcher);
+
+ if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
+ prev = list_prev_entry(matcher, list_node);
+ prev_ft_id = prev->end_ft_id;
+ }
+
+ if (!list_is_last(&matcher->list_node, &tbl->matchers_list))
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (next) {
+ /* Connect previous end FT to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ prev_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n");
+ return ret;
+ }
+ } else {
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n");
+ return ret;
+ }
+ }
+
+ /* Removing first matcher, update connected miss tables if exists */
+ if (prev_ft_id == tbl->ft_id) {
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx,
+ "Fatal error, failed to update connected miss table\n");
+ return ret;
+ }
+ }
+
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ bool is_mirror)
+{
+ enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
+
+ if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
+ (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
+ /* Optimize FDB RTC */
+ rtc_attr->log_size = 0;
+ rtc_attr->log_depth = 0;
+ }
+}
+
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_match_template *mt = matcher->mt;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 obj_id;
+ int ret;
+
+ size_rx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+
+ rtc_attr.log_size = size_rx->table.sz_row_log;
+ rtc_attr.log_depth = size_rx->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 =
+ mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 =
+ ctx->caps->linear_match_definer;
+ }
+ }
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = matcher->match_ste.ste_0_base;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ obj_id = mlx5hws_pool_get_base_id(ctx->stc_pool);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create matcher RTC\n");
+ return ret;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ rtc_attr.log_size = size_tx->table.sz_row_log;
+ rtc_attr.log_depth = size_tx->table.sz_col_log;
+ rtc_attr.ste_base = matcher->match_ste.ste_1_base;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
+
+ obj_id = mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
+ rtc_attr.stc_base = obj_id;
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, true);
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create mirror matcher RTC\n");
+ goto destroy_rtc_0;
+ }
+ }
+
+ return 0;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, matcher->match_ste.rtc_0_id);
+ return ret;
+}
+
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5_core_dev *mdev = matcher->tbl->ctx->mdev;
+
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_1_id);
+
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_0_id);
+}
+
+static int
+hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size = &attr->size[i];
+
+ if (size->table.sz_col_log > caps->rtc_log_depth_max) {
+ mlx5hws_err(ctx, "Matcher depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (size->table.sz_col_log + size->table.sz_row_log >
+ caps->ste_alloc_log_max) {
+ mlx5hws_err(ctx,
+ "Total matcher size exceeds limit %d\n",
+ caps->ste_alloc_log_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (size->table.sz_col_log + size->table.sz_row_log <
+ caps->ste_alloc_log_gran) {
+ mlx5hws_err(ctx, "Total matcher size below limit %d\n",
+ caps->ste_alloc_log_gran);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ bool valid;
+ int ret;
+
+ valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type);
+ if (!valid) {
+ mlx5hws_err(ctx, "Invalid combination in action template\n");
+ return -EINVAL;
+ }
+
+ /* Process action template to setters */
+ ret = mlx5hws_action_template_process(at);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to process action template\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 required_stes, max_stes;
+ int i, ret;
+
+ if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+ return 0;
+
+ max_stes = 0;
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret) {
+ mlx5hws_err(ctx, "Invalid at %d", i);
+ return ret;
+ }
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ max_stes = max(max_stes, required_stes);
+
+ /* Future: Optimize reparse */
+ }
+
+ matcher->num_of_action_stes = max_stes;
+
+ return 0;
+}
+
+static void hws_matcher_set_ip_version_match(struct mlx5hws_matcher *matcher)
+{
+ int i;
+
+ for (i = 0; i < matcher->mt->fc_sz; i++) {
+ switch (matcher->mt->fc[i].fname) {
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
+ matcher->matches_outer_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
+ matcher->matches_outer_ip_version = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
+ matcher->matches_inner_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
+ matcher->matches_inner_ip_version = 1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_cmd_ste_create_attr ste_attr = {};
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size;
+ int ret;
+
+ /* Calculate match, range and hash definers */
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) {
+ ret = mlx5hws_definer_mt_init(ctx, matcher->mt);
+ if (ret) {
+ if (ret == -E2BIG)
+ mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n");
+ return ret;
+ }
+ }
+
+ hws_matcher_set_ip_version_match(matcher);
+
+ /* Create an STE range each for RX and TX. */
+ ste_attr.table_type = FS_FT_FDB_RX;
+ size = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ ste_attr.log_obj_range =
+ matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT ?
+ 0 : size->table.sz_col_log + size->table.sz_row_log;
+
+ ret = mlx5hws_cmd_ste_create(ctx->mdev, &ste_attr,
+ &matcher->match_ste.ste_0_base);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate RX STE range (%d)\n", ret);
+ goto uninit_match_definer;
+ }
+
+ ste_attr.table_type = FS_FT_FDB_TX;
+ size = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+ ste_attr.log_obj_range =
+ matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE ?
+ 0 : size->table.sz_col_log + size->table.sz_row_log;
+
+ ret = mlx5hws_cmd_ste_create(ctx->mdev, &ste_attr,
+ &matcher->match_ste.ste_1_base);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate TX STE range (%d)\n", ret);
+ goto destroy_rx_ste_range;
+ }
+
+ return 0;
+
+destroy_rx_ste_range:
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_0_base);
+uninit_match_definer:
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(ctx, matcher->mt);
+ return ret;
+}
+
+static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_1_base);
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_0_base);
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(ctx, matcher->mt);
+}
+
+static int
+hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
+
+ size_rx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+
+ switch (attr->insert_mode) {
+ case MLX5HWS_MATCHER_INSERT_BY_HASH:
+ if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ mlx5hws_err(ctx, "Invalid matcher distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case MLX5HWS_MATCHER_INSERT_BY_INDEX:
+ if (size_rx->table.sz_col_log || size_tx->table.sz_col_log) {
+ mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ if (!caps->rtc_hash_split_table) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ if (!caps->rtc_linear_lookup_table ||
+ !IS_BIT_SET(caps->access_index_mode,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (size_rx->table.sz_row_log >
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX ||
+ size_tx->table.sz_row_log >
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
+ mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ mlx5hws_err(ctx, "Matcher has unsupported insert mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
+
+ size_rx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+
+ if (hws_matcher_validate_insert_mode(caps, matcher))
+ return -EOPNOTSUPP;
+
+ if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) {
+ mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Convert number of rules to the required depth */
+ if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
+ attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ size_rx->table.sz_col_log =
+ hws_matcher_rules_to_tbl_depth(size_rx->rule.num_log);
+ size_tx->table.sz_col_log =
+ hws_matcher_rules_to_tbl_depth(size_tx->rule.num_log);
+ }
+
+ matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+ matcher->flags |= attr->isolated_matcher_end_ft_id ?
+ MLX5HWS_MATCHER_FLAGS_ISOLATED : 0;
+
+ return hws_matcher_check_attr_sz(caps, matcher);
+}
+
+static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
+{
+ int ret;
+
+ /* Select and create the definers for current matcher */
+ ret = hws_matcher_bind_mt(matcher);
+ if (ret)
+ return ret;
+
+ /* Calculate and verify action combination */
+ ret = hws_matcher_bind_at(matcher);
+ if (ret)
+ goto unbind_mt;
+
+ /* Create matcher end flow table anchor */
+ ret = hws_matcher_create_end_ft(matcher);
+ if (ret)
+ goto unbind_mt;
+
+ /* Allocate the RTC for the new matcher */
+ ret = hws_matcher_create_rtc(matcher);
+ if (ret)
+ goto destroy_end_ft;
+
+ /* Connect the matcher to the matcher list */
+ ret = hws_matcher_connect(matcher);
+ if (ret)
+ goto destroy_rtc;
+
+ return 0;
+
+destroy_rtc:
+ hws_matcher_destroy_rtc(matcher);
+destroy_end_ft:
+ hws_matcher_destroy_end_ft(matcher);
+unbind_mt:
+ hws_matcher_unbind_mt(matcher);
+ return ret;
+}
+
+static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_disconnect(matcher);
+ hws_matcher_destroy_rtc(matcher);
+ hws_matcher_destroy_end_ft(matcher);
+ hws_matcher_unbind_mt(matcher);
+}
+
+static int
+hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
+ struct mlx5hws_matcher *col_matcher;
+ int i, ret;
+
+ size_rx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return 0;
+
+ if (!hws_matcher_requires_col_tbl(size_rx->rule.num_log) &&
+ !hws_matcher_requires_col_tbl(size_tx->rule.num_log))
+ return 0;
+
+ col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!col_matcher)
+ return -ENOMEM;
+
+ col_matcher->tbl = matcher->tbl;
+ col_matcher->mt = matcher->mt;
+ col_matcher->at = matcher->at;
+ col_matcher->num_of_at = matcher->num_of_at;
+ col_matcher->num_of_mt = matcher->num_of_mt;
+ col_matcher->attr.priority = matcher->attr.priority;
+ col_matcher->flags = matcher->flags;
+ col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
+ col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
+ col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
+ for (i = 0; i < 2; i++) {
+ union mlx5hws_matcher_size *dst = &col_matcher->attr.size[i];
+ union mlx5hws_matcher_size *src = &matcher->attr.size[i];
+
+ dst->table.sz_row_log = src->rule.num_log;
+ dst->table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
+ if (dst->table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
+ dst->table.sz_row_log -=
+ MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+ }
+
+ col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+ col_matcher->attr.isolated_matcher_end_ft_id =
+ matcher->attr.isolated_matcher_end_ft_id;
+
+ ret = hws_matcher_process_attr(ctx->caps, col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ ret = hws_matcher_create_and_connect(col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ matcher->col_matcher = col_matcher;
+
+ return 0;
+
+free_col_matcher:
+ kfree(col_matcher);
+ mlx5hws_err(ctx, "Failed to create assured collision matcher\n");
+ return ret;
+}
+
+static void
+hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return;
+
+ if (matcher->col_matcher) {
+ hws_matcher_destroy_and_disconnect(matcher->col_matcher);
+ kfree(matcher->col_matcher);
+ }
+}
+
+static int hws_matcher_init(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate matcher resource and connect to the packet pipe */
+ ret = hws_matcher_create_and_connect(matcher);
+ if (ret)
+ goto unlock_err;
+
+ /* Create additional matcher for collision handling */
+ ret = hws_matcher_create_col_matcher(matcher);
+ if (ret)
+ goto destory_and_disconnect;
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+destory_and_disconnect:
+ hws_matcher_destroy_and_disconnect(matcher);
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ mutex_lock(&ctx->ctrl_lock);
+ hws_matcher_destroy_col_matcher(matcher);
+ hws_matcher_destroy_and_disconnect(matcher);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+}
+
+static int hws_matcher_grow_at_array(struct mlx5hws_matcher *matcher)
+{
+ void *p;
+
+ if (matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+
+ matcher->size_of_at_array *= 2;
+ p = krealloc(matcher->at,
+ matcher->size_of_at_array * sizeof(*matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ matcher->at = p;
+
+ return 0;
+}
+
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ u32 required_stes;
+ int ret;
+
+ if (unlikely(matcher->num_of_at >= matcher->size_of_at_array)) {
+ ret = hws_matcher_grow_at_array(matcher);
+ if (ret)
+ return ret;
+
+ if (matcher->col_matcher) {
+ ret = hws_matcher_grow_at_array(matcher->col_matcher);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret)
+ return ret;
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ if (matcher->num_of_action_stes < required_stes)
+ matcher->num_of_action_stes = required_stes;
+
+ matcher->at[matcher->num_of_at] = *at;
+ matcher->num_of_at += 1;
+
+ if (matcher->col_matcher)
+ matcher->col_matcher->num_of_at = matcher->num_of_at;
+
+ return 0;
+}
+
+static int
+hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret = 0;
+ int i;
+
+ if (!num_of_mt || !num_of_at) {
+ mlx5hws_err(ctx, "Number of action/match template cannot be zero\n");
+ return -EOPNOTSUPP;
+ }
+
+ matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL);
+ if (!matcher->mt)
+ return -ENOMEM;
+
+ matcher->size_of_at_array =
+ num_of_at + matcher->attr.max_num_of_at_attach;
+ matcher->at = kvcalloc(matcher->size_of_at_array, sizeof(*matcher->at),
+ GFP_KERNEL);
+ if (!matcher->at) {
+ mlx5hws_err(ctx, "Failed to allocate action template array\n");
+ ret = -ENOMEM;
+ goto free_mt;
+ }
+
+ for (i = 0; i < num_of_mt; i++)
+ matcher->mt[i] = *mt[i];
+
+ for (i = 0; i < num_of_at; i++)
+ matcher->at[i] = *at[i];
+
+ matcher->num_of_mt = num_of_mt;
+ matcher->num_of_at = num_of_at;
+
+ return 0;
+
+free_mt:
+ kfree(matcher->mt);
+ return ret;
+}
+
+static void
+hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
+{
+ kvfree(matcher->at);
+ kfree(matcher->mt);
+}
+
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *tbl,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *matcher;
+ int ret;
+
+ matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!matcher)
+ return NULL;
+
+ matcher->tbl = tbl;
+ matcher->attr = *attr;
+
+ ret = hws_matcher_process_attr(tbl->ctx->caps, matcher);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_init(matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret);
+ goto unset_templates;
+ }
+
+ return matcher;
+
+unset_templates:
+ hws_matcher_unset_templates(matcher);
+free_matcher:
+ kfree(matcher);
+ return NULL;
+}
+
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_uninit(matcher);
+ hws_matcher_unset_templates(matcher);
+ kfree(matcher);
+ return 0;
+}
+
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable)
+{
+ struct mlx5hws_match_template *mt;
+
+ mt = kzalloc(sizeof(*mt), GFP_KERNEL);
+ if (!mt)
+ return NULL;
+
+ mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!mt->match_param)
+ goto free_template;
+
+ memcpy(mt->match_param, match_param, match_param_sz);
+ mt->match_criteria_enable = match_criteria_enable;
+
+ return mt;
+
+free_template:
+ kfree(mt);
+ return NULL;
+}
+
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt)
+{
+ kfree(mt->match_param);
+ kfree(mt);
+ return 0;
+}
+
+static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+ int i;
+
+ if (src_matcher->tbl->type != dst_matcher->tbl->type) {
+ mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n");
+ return -EINVAL;
+ }
+
+ if (!mlx5hws_matcher_is_resizable(src_matcher) ||
+ !mlx5hws_matcher_is_resizable(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is not resizable\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_insert_by_idx(src_matcher) !=
+ mlx5hws_matcher_is_insert_by_idx(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_in_resize(src_matcher) ||
+ mlx5hws_matcher_is_in_resize(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is already in resize\n");
+ return -EINVAL;
+ }
+
+ /* Compare match templates - make sure the definers are equivalent */
+ if (src_matcher->num_of_mt != dst_matcher->num_of_mt) {
+ mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n");
+ return -EINVAL;
+ }
+
+ if (src_matcher->num_of_action_stes > dst_matcher->num_of_action_stes) {
+ mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < src_matcher->num_of_mt; i++) {
+ if (mlx5hws_definer_compare(src_matcher->mt[i].definer,
+ dst_matcher->mt[i].definer)) {
+ mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ int ret = 0;
+
+ mutex_lock(&src_matcher->tbl->ctx->ctrl_lock);
+
+ ret = hws_matcher_resize_precheck(src_matcher, dst_matcher);
+ if (ret)
+ goto out;
+
+ src_matcher->resize_dst = dst_matcher;
+
+out:
+ mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
+ return ret;
+}
+
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) {
+ mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(src_matcher != rule->matcher)) {
+ mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n");
+ return -EINVAL;
+ }
+
+ return mlx5hws_rule_move_hws_add(rule, attr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
new file mode 100644
index 000000000000..ae20bcebfdde
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_MATCHER_H_
+#define HWS_MATCHER_H_
+
+/* We calculated that concatenating a collision table to the main table with
+ * 3% of the main table rows will be enough resources for high insertion
+ * success probability.
+ *
+ * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5
+ */
+#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5
+/* Threshold to determine if amount of rules require a collision table */
+#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10
+/* Required depth of an assured collision table */
+#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4
+/* Required depth of the main large table */
+#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+
+/* Action RTC size multiplier that is required in order
+ * to support rule update for rules with action STEs.
+ */
+#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1
+
+/* Maximum number of action templates that can be attached to a matcher. */
+#define MLX5HWS_MATCHER_MAX_AT 128
+
+enum mlx5hws_matcher_offset {
+ MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
+ MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
+};
+
+enum mlx5hws_matcher_flags {
+ MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
+ MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+ MLX5HWS_MATCHER_FLAGS_ISOLATED = 1 << 4,
+};
+
+struct mlx5hws_match_template {
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_definer_fc *fc;
+ u32 *match_param;
+ u8 match_criteria_enable;
+ u16 fc_sz;
+};
+
+struct mlx5hws_matcher_match_ste {
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ u32 ste_0_base;
+ u32 ste_1_base;
+};
+
+enum {
+ MLX5HWS_MATCHER_IPV_UNSET = 0,
+ MLX5HWS_MATCHER_IPV_4 = 1,
+ MLX5HWS_MATCHER_IPV_6 = 2,
+};
+
+struct mlx5hws_matcher {
+ struct mlx5hws_table *tbl;
+ struct mlx5hws_matcher_attr attr;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at;
+ u8 num_of_at;
+ u8 size_of_at_array;
+ u8 num_of_mt;
+ u8 num_of_action_stes;
+ /* enum mlx5hws_matcher_flags */
+ u8 flags;
+ u8 matches_outer_ethertype:1;
+ u8 matches_outer_ip_version:1;
+ u8 matches_inner_ethertype:1;
+ u8 matches_inner_ip_version:1;
+ u8 outer_ip_version:2;
+ u8 inner_ip_version:2;
+ u32 end_ft_id;
+ struct mlx5hws_matcher *col_matcher;
+ struct mlx5hws_matcher *resize_dst;
+ struct mlx5hws_matcher_match_ste match_ste;
+ struct list_head list_node;
+};
+
+static inline bool
+mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt)
+{
+ return mlx5hws_definer_is_jumbo(mt->definer);
+}
+
+static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE);
+}
+
+static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
+{
+ return !!matcher->resize_dst;
+}
+
+static inline bool mlx5hws_matcher_is_isolated(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED);
+}
+
+static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
+{
+ return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
+}
+
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id);
+
+#endif /* HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
new file mode 100644
index 000000000000..1ad7a50d938b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -0,0 +1,941 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_H_
+#define MLX5HWS_H_
+
+struct mlx5hws_context;
+struct mlx5hws_table;
+struct mlx5hws_matcher;
+struct mlx5hws_rule;
+
+enum mlx5hws_table_type {
+ MLX5HWS_TABLE_TYPE_FDB,
+ MLX5HWS_TABLE_TYPE_MAX,
+};
+
+enum mlx5hws_matcher_resource_mode {
+ /* Allocate resources based on number of rules with minimal failure probability */
+ MLX5HWS_MATCHER_RESOURCE_MODE_RULE,
+ /* Allocate fixed size hash table based on given column and rows */
+ MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE,
+};
+
+enum mlx5hws_action_type {
+ MLX5HWS_ACTION_TYP_LAST,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ MLX5HWS_ACTION_TYP_DROP,
+ MLX5HWS_ACTION_TYP_MISS,
+ MLX5HWS_ACTION_TYP_TBL,
+ MLX5HWS_ACTION_TYP_CTR,
+ MLX5HWS_ACTION_TYP_TAG,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ MLX5HWS_ACTION_TYP_VPORT,
+ MLX5HWS_ACTION_TYP_POP_VLAN,
+ MLX5HWS_ACTION_TYP_PUSH_VLAN,
+ MLX5HWS_ACTION_TYP_ASO_METER,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER,
+ MLX5HWS_ACTION_TYP_REMOVE_HEADER,
+ MLX5HWS_ACTION_TYP_RANGE,
+ MLX5HWS_ACTION_TYP_SAMPLER,
+ MLX5HWS_ACTION_TYP_DEST_ARRAY,
+ MLX5HWS_ACTION_TYP_MAX,
+};
+
+enum mlx5hws_action_flags {
+ MLX5HWS_ACTION_FLAG_HWS_FDB = 1 << 0,
+ /* Shared action can be used over a few threads, since the
+ * data is written only once at the creation of the action.
+ */
+ MLX5HWS_ACTION_FLAG_SHARED = 1 << 1,
+};
+
+enum mlx5hws_action_aso_meter_color {
+ MLX5HWS_ACTION_ASO_METER_COLOR_RED = 0x0,
+ MLX5HWS_ACTION_ASO_METER_COLOR_YELLOW = 0x1,
+ MLX5HWS_ACTION_ASO_METER_COLOR_GREEN = 0x2,
+ MLX5HWS_ACTION_ASO_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum mlx5hws_send_queue_actions {
+ /* Start executing all pending queued rules */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0,
+ /* Start executing all pending queued rules wait till completion */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1,
+};
+
+struct mlx5hws_context_attr {
+ u16 queues;
+ u16 queue_size;
+};
+
+struct mlx5hws_table_attr {
+ enum mlx5hws_table_type type;
+ u32 level;
+ u16 uid;
+};
+
+enum mlx5hws_matcher_flow_src {
+ MLX5HWS_MATCHER_FLOW_SRC_ANY = 0x0,
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE = 0x1,
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT = 0x2,
+};
+
+enum mlx5hws_matcher_insert_mode {
+ MLX5HWS_MATCHER_INSERT_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_INSERT_BY_INDEX = 0x1,
+};
+
+enum mlx5hws_matcher_distribute_mode {
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1,
+};
+
+enum mlx5hws_matcher_size_type {
+ MLX5HWS_MATCHER_SIZE_TYPE_RX,
+ MLX5HWS_MATCHER_SIZE_TYPE_TX,
+ MLX5HWS_MATCHER_SIZE_TYPE_MAX,
+};
+
+union mlx5hws_matcher_size {
+ struct {
+ u8 sz_row_log;
+ u8 sz_col_log;
+ } table;
+
+ struct {
+ u8 num_log;
+ } rule;
+};
+
+struct mlx5hws_matcher_attr {
+ /* Processing priority inside table */
+ u32 priority;
+ /* Provide all rules with unique rule_idx in num_log range to reduce locking */
+ bool optimize_using_rule_idx;
+ /* Resource mode and corresponding size */
+ enum mlx5hws_matcher_resource_mode mode;
+ /* Optimize insertion in case packet origin is the same for all rules */
+ enum mlx5hws_matcher_flow_src optimize_flow_src;
+ /* Define the insertion and distribution modes for this matcher */
+ enum mlx5hws_matcher_insert_mode insert_mode;
+ enum mlx5hws_matcher_distribute_mode distribute_mode;
+ /* Define whether the created matcher supports resizing into a bigger matcher */
+ bool resizable;
+ union mlx5hws_matcher_size size[MLX5HWS_MATCHER_SIZE_TYPE_MAX];
+ /* Optional AT attach configuration - Max number of additional AT */
+ u8 max_num_of_at_attach;
+ /* Optional end FT (miss FT ID) for match RTC (for isolated matcher) */
+ u32 isolated_matcher_end_ft_id;
+};
+
+struct mlx5hws_rule_attr {
+ void *user_data;
+ /* Valid if matcher optimize_using_rule_idx is set or
+ * if matcher is configured to insert rules by index.
+ */
+ u32 rule_idx;
+ u32 flow_source;
+ u16 queue_id;
+ u32 burst:1;
+};
+
+/* In actions that take offset, the offset is unique, pointing to a single
+ * resource and the user should not reuse the same index because data changing
+ * is not atomic.
+ */
+struct mlx5hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct {
+ u32 value;
+ } tag;
+
+ struct {
+ u32 offset;
+ } counter;
+
+ struct {
+ u32 offset;
+ u8 *data;
+ } modify_header;
+
+ struct {
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ } reformat;
+
+ struct {
+ __be32 vlan_hdr;
+ } push_vlan;
+
+ struct {
+ u32 offset;
+ enum mlx5hws_action_aso_meter_color init_color;
+ } aso_meter;
+ };
+};
+
+struct mlx5hws_action_reformat_header {
+ size_t sz;
+ void *data;
+};
+
+struct mlx5hws_action_insert_header {
+ struct mlx5hws_action_reformat_header hdr;
+ /* PRM start anchor to which header will be inserted */
+ u8 anchor;
+ /* Header insertion offset in bytes, from the start
+ * anchor to the location where new header will be inserted.
+ */
+ u8 offset;
+ /* Indicates this header insertion adds encapsulation header to the packet,
+ * requiring device to update offloaded fields (for example IPv4 total length).
+ */
+ bool encap;
+};
+
+struct mlx5hws_action_remove_header_attr {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+};
+
+struct mlx5hws_action_mh_pattern {
+ /* Byte size of modify actions provided by "data" */
+ size_t sz;
+ /* PRM format modify actions pattern */
+ __be64 *data;
+};
+
+struct mlx5hws_action_dest_attr {
+ /* Required destination action to forward the packet */
+ struct mlx5hws_action *dest;
+ /* Optional reformat action */
+ struct mlx5hws_action *reformat;
+ bool is_wire_ft;
+};
+
+/**
+ * mlx5hws_is_supported - Check whether HWS is supported
+ *
+ * @mdev: The device to check.
+ *
+ * Return: true if supported, false otherwise.
+ */
+static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev)
+{
+ u8 ignore_flow_level_rtc_valid;
+ u8 wqe_based_flow_table_update;
+
+ wqe_based_flow_table_update =
+ MLX5_CAP_GEN(mdev, wqe_based_flow_table_update_cap);
+ ignore_flow_level_rtc_valid =
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ return wqe_based_flow_table_update && ignore_flow_level_rtc_valid;
+}
+
+/**
+ * mlx5hws_context_open - Open a context used for direct rule insertion
+ * using hardware steering.
+ *
+ * @mdev: The device to be used for HWS.
+ * @attr: Attributes used for context open.
+ *
+ * Return: pointer to mlx5hws_context on success NULL otherwise.
+ */
+struct mlx5hws_context *
+mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr);
+
+/**
+ * mlx5hws_context_close - Close a context used for direct hardware steering.
+ *
+ * @ctx: mlx5hws context to close.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_context_close(struct mlx5hws_context *ctx);
+
+/**
+ * mlx5hws_context_set_peer - Set a peer context.
+ * Each context can have multiple contexts as peers.
+ *
+ * @ctx: The context in which the peer_ctx will be peered to it.
+ * @peer_ctx: The peer context.
+ * @peer_vhca_id: The peer context vhca id.
+ */
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id);
+
+/**
+ * mlx5hws_table_create - Create a new direct rule table.
+ * Each table can contain multiple matchers.
+ *
+ * @ctx: The context in which the new table will be opened.
+ * @attr: Attributes used for table creation.
+ *
+ * Return: pointer to mlx5hws_table on success NULL otherwise.
+ */
+struct mlx5hws_table *
+mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr);
+
+/**
+ * mlx5hws_table_destroy - Destroy direct rule table.
+ *
+ * @tbl: Table to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl);
+
+/**
+ * mlx5hws_table_get_id() - Get ID of the flow table.
+ *
+ * @tbl:Table to get ID of.
+ *
+ * Return: ID of the table.
+ */
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl);
+
+/**
+ * mlx5hws_table_set_default_miss - Set default miss table for mlx5hws_table
+ * by using another mlx5hws_table.
+ * Traffic which all table matchers miss will be forwarded to miss table.
+ *
+ * @tbl: Source table
+ * @miss_tbl: Target (miss) table, or NULL to remove current miss table
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl);
+
+/**
+ * mlx5hws_match_template_create - Create a new match template based on items mask.
+ * The match template will be used for matcher creation.
+ *
+ * @ctx: The context in which the new template will be created.
+ * @match_param: Describe the mask based on PRM match parameters.
+ * @match_param_sz: Size of match param buffer.
+ * @match_criteria_enable: Bitmap for each sub-set in match_criteria buffer.
+ *
+ * Return: Pointer to mlx5hws_match_template on success, NULL otherwise.
+ */
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable);
+
+/**
+ * mlx5hws_match_template_destroy - Destroy a match template.
+ *
+ * @mt: Match template to destroy.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt);
+
+/**
+ * mlx5hws_action_template_create - Create a new action template based on an action_type array.
+ *
+ * @action_type: An array of actions based on the order of actions which will be provided
+ * with rule_actions to mlx5hws_rule_create. The last action is marked
+ * using MLX5HWS_ACTION_TYP_LAST.
+ *
+ * Return: Pointer to mlx5hws_action_template on success, NULL otherwise.
+ */
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]);
+
+/**
+ * mlx5hws_action_template_destroy - Destroy action template.
+ *
+ * @at: Action template to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at);
+
+/**
+ * mlx5hws_matcher_create - Create a new direct rule matcher.
+ *
+ * Each matcher can contain multiple rules. Matchers on the table will be
+ * processed by priority. Matching fields and mask are described by the
+ * match template. In some cases, multiple match templates can be used on
+ * the same matcher.
+ *
+ * @table: The table in which the new matcher will be opened.
+ * @mt: Array of match templates to be used on matcher.
+ * @num_of_mt: Number of match templates in mt array.
+ * @at: Array of action templates to be used on matcher.
+ * @num_of_at: Number of action templates in at array.
+ * @attr: Attributes used for matcher creation.
+ *
+ * Return: Pointer to mlx5hws_matcher on success, NULL otherwise.
+ *
+ */
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *table,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr);
+
+/**
+ * mlx5hws_matcher_destroy - Destroy a direct rule matcher.
+ *
+ * @matcher: Matcher to destroy.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher);
+
+/**
+ * mlx5hws_matcher_attach_at - Attach a new action template to a direct rule matcher.
+ *
+ * @matcher: Matcher to attach the action template to.
+ * @at: Action template to be attached to the matcher.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at);
+
+/**
+ * mlx5hws_matcher_resize_set_target - Link two matchers and enable moving rules.
+ *
+ * Both matchers must be in the same table type, must be created with the
+ * 'resizable' property, and should have the same characteristics (e.g., same
+ * match templates and action templates). It is the user's responsibility to
+ * ensure that the destination matcher is allocated with the appropriate size.
+ *
+ * Once the function is completed, the user is:
+ * - Allowed to move rules from the source into the destination matcher.
+ * - No longer allowed to insert rules into the source matcher.
+ *
+ * The user is always allowed to insert rules into the destination matcher and
+ * to delete rules from any matcher.
+ *
+ * @src_matcher: Source matcher for moving rules from.
+ * @dst_matcher: Destination matcher for moving rules to.
+ *
+ * Return: Zero on successful move, non-zero otherwise.
+ */
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher);
+
+/**
+ * mlx5hws_matcher_resize_rule_move - Enqueue moving rule operation.
+ *
+ * This function enqueues the operation of moving a rule from the source
+ * matcher to the destination matcher.
+ *
+ * @src_matcher: Matcher that the rule belongs to.
+ * @rule: The rule to move.
+ * @attr: Rule attributes.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_rule_create - Enqueue create rule operation.
+ *
+ * @matcher: The matcher in which the new rule will be created.
+ * @mt_idx: Match template index to create the match with.
+ * @match_param: The match parameter PRM buffer used for value matching.
+ * @at_idx: Action template index to apply the actions with.
+ * @rule_actions: Rule actions to be executed on match.
+ * @attr: Rule creation attributes.
+ * @rule_handle: A valid rule handle. The handle doesn't require any initialization.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle);
+
+/**
+ * mlx5hws_rule_destroy - Enqueue destroy rule operation.
+ *
+ * @rule: The rule destruction to enqueue.
+ * @attr: Rule destruction attributes.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_rule_action_update - Enqueue update actions on an existing rule.
+ *
+ * @rule: A valid rule handle to update.
+ * @at_idx: Action template index to update the actions with.
+ * @rule_actions: Rule actions to be executed on match.
+ * @attr: Rule update attributes.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_action_get_type - Get action type.
+ *
+ * @action: The action to get the type of.
+ *
+ * Return: action type.
+ */
+enum mlx5hws_action_type
+mlx5hws_action_get_type(struct mlx5hws_action *action);
+
+/**
+ * mlx5hws_action_get_dev - Get mlx5 core device.
+ *
+ * @action: The action to get the device from.
+ *
+ * Return: mlx5 core device.
+ */
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action);
+
+/**
+ * mlx5hws_action_create_dest_drop - Create a direct rule drop action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: Pointer to mlx5hws_action on success, NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_default_miss - Create a direct rule default miss action.
+ * Defaults are RX: Drop, TX: Wire.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: Pointer to mlx5hws_action on success, NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_table - Create direct rule goto table action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @tbl: Destination table.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_table_num - Create direct rule goto table number action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @tbl_num: Destination table number.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 tbl_num, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_match_range - Create direct rule range match action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @field: Field to comapare the value.
+ * @hit_ft: Flow table to go to on hit.
+ * @miss_ft: Flow table to go to on miss.
+ * @min: Minimal value of the field to be considered as hit.
+ * @max: Maximal value of the field to be considered as hit.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags);
+
+/**
+ * mlx5hws_action_create_flow_sampler - Create direct rule flow sampler action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @sampler_id: Flow sampler object ID.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_vport - Create direct rule goto vport action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @vport_num: Destination vport number.
+ * @vhca_id_valid: Tells if the vhca_id parameter is valid.
+ * @vhca_id: VHCA ID of the destination vport.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_tag - Create direct rule TAG action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_counter - Create direct rule counter action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @obj_id: Direct rule counter object ID.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_reformat - Create direct rule reformat action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @reformat_type: Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT.
+ * @num_of_hdrs: Number of provided headers in "hdrs" array.
+ * @hdrs: Headers array containing header information.
+ * @log_bulk_size: Number of unique values used with this reformat.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_modify_header - Create direct rule modify header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_of_patterns: Number of provided patterns in "patterns" array.
+ * @patterns: Patterns array containing pattern information.
+ * @log_bulk_size: Number of unique values used with this pattern.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_aso_meter - Create direct rule ASO flow meter action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @obj_id: ASO object ID.
+ * @return_reg_c: Copy the ASO object value into this reg_c,
+ * after a packet hits a rule with this ASO object.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_c,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_pop_vlan - Create direct rule pop vlan action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_push_vlan - Create direct rule push vlan action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_array - Create a dest array action, this action can
+ * duplicate packets and forward to multiple destinations in the destination list.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_dest: The number of dests attributes.
+ * @dests: The destination array. Each contains a destination action and can
+ * have additional actions.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_insert_header - Create insert header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_of_hdrs: Number of provided headers in "hdrs" array.
+ * @hdrs: Headers array containing header information.
+ * @log_bulk_size: Number of unique values used with this insert header.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_remove_header - Create remove header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @attr: attributes that specifie the remove header type, PRM start anchor and
+ * the PRM end anchor or the PRM start anchor and remove size in bytes.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_last - Create direct rule LAST action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_destroy - Destroy direct rule action.
+ *
+ * @action: The action to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_action_destroy(struct mlx5hws_action *action);
+
+enum mlx5hws_flow_op_status {
+ MLX5HWS_FLOW_OP_SUCCESS,
+ MLX5HWS_FLOW_OP_ERROR,
+};
+
+struct mlx5hws_flow_op_result {
+ enum mlx5hws_flow_op_status status;
+ void *user_data;
+};
+
+/**
+ * mlx5hws_send_queue_poll - Poll queue for rule creation and deletions completions.
+ *
+ * @ctx: The context to which the queue belong to.
+ * @queue_id: The id of the queue to poll.
+ * @res: Completion array.
+ * @res_nb: Maximum number of results to return.
+ *
+ * Return: negative number on failure, the number of completions otherwise.
+ */
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb);
+
+/**
+ * mlx5hws_send_queue_action - Perform an action on the queue
+ *
+ * @ctx: The context to which the queue belong to.
+ * @queue_id: The id of the queue to perform the action on.
+ * @actions: Actions to perform on the queue (enum mlx5hws_send_queue_actions)
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+/**
+ * mlx5hws_debug_dump - Dump HWS info
+ *
+ * @ctx: The context which to dump the info from.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_debug_dump(struct mlx5hws_context *ctx);
+
+struct mlx5hws_bwc_matcher;
+struct mlx5hws_bwc_rule;
+
+struct mlx5hws_match_parameters {
+ size_t match_sz;
+ u32 *match_buf; /* Device spec format */
+};
+
+/**
+ * mlx5hws_bwc_matcher_create - Create a new BWC direct rule matcher.
+ *
+ * This function does the following:
+ * - creates match template based on flow items
+ * - creates an empty action template
+ * - creates a usual mlx5hws_matcher with these mt and at, setting
+ * its size to minimal
+ * Notes:
+ * - table->ctx must have BWC support
+ * - complex rules are not supported
+ *
+ * @table: The table in which the new matcher will be opened
+ * @priority: Priority for this BWC matcher
+ * @match_criteria_enable: Bitmask that defines matching criteria
+ * @mask: Match parameters
+ *
+ * Return: pointer to mlx5hws_bwc_matcher on success or NULL otherwise.
+ */
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+/**
+ * mlx5hws_bwc_matcher_destroy - Destroy BWC direct rule matcher.
+ *
+ * @bwc_matcher: Matcher to destroy
+ *
+ * Return: zero on success, non zero otherwise
+ */
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+/**
+ * mlx5hws_bwc_rule_create - Create a new BWC rule.
+ *
+ * Unlike the usual rule creation function, this one is blocking: when the
+ * function returns, the rule is written to its place (no need to poll).
+ * This function does the following:
+ * - finds matching action template based on the provided rule_actions, or
+ * creates new action template if matching action template doesn't exist
+ * - updates corresponding BWC matcher stats
+ * - if needed, the function performs rehash:
+ * - creates a new matcher based on mt, at, new_sz
+ * - moves all the existing matcher rules to the new matcher
+ * - removes the old matcher
+ * - inserts new rule
+ * - polls till completion is received
+ * Notes:
+ * - matcher->tbl->ctx must have BWC support
+ * - separate BWC ctx queues are used
+ *
+ * @bwc_matcher: The BWC matcher in which the new rule will be created.
+ * @params: Match perameters
+ * @flow_source: Flow source for this rule
+ * @rule_actions: Rule action to be executed on match
+ *
+ * Return: valid BWC rule handle on success, NULL otherwise
+ */
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[]);
+
+/**
+ * mlx5hws_bwc_rule_destroy - Destroy BWC direct rule.
+ *
+ * @bwc_rule: Rule to destroy.
+ *
+ * Return: zero on success, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule);
+
+/**
+ * mlx5hws_bwc_rule_action_update - Update actions on an existing BWC rule.
+ *
+ * @bwc_rule: Rule to update
+ * @rule_actions: Rule action to update with
+ *
+ * Return: zero on successful update, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[]);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
new file mode 100644
index 000000000000..d56271a9e4f0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
+{
+ /* Return the roundup of log2(data_size) */
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE)
+ return MLX5HWS_ARG_CHUNK_SIZE_1;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
+ return MLX5HWS_ARG_CHUNK_SIZE_2;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
+ return MLX5HWS_ARG_CHUNK_SIZE_3;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
+ return MLX5HWS_ARG_CHUNK_SIZE_4;
+
+ return MLX5HWS_ARG_CHUNK_SIZE_MAX;
+}
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
+{
+ return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
+}
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
+{
+ return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE);
+}
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
+{
+ return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
+}
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
+{
+ u16 i, field;
+ u8 action_id;
+
+ for (i = 0; i < num_of_actions; i++) {
+ action_id = MLX5_GET(set_action_in, &actions[i], action_type);
+
+ switch (action_id) {
+ case MLX5_MODIFICATION_TYPE_NOP:
+ field = MLX5_MODI_OUT_NONE;
+ break;
+
+ case MLX5_MODIFICATION_TYPE_SET:
+ case MLX5_MODIFICATION_TYPE_ADD:
+ field = MLX5_GET(set_action_in, &actions[i], field);
+ break;
+
+ case MLX5_MODIFICATION_TYPE_COPY:
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ field = MLX5_GET(copy_action_in, &actions[i], dst_field);
+ break;
+
+ default:
+ /* Insert/Remove/Unknown actions require reparse */
+ return true;
+ }
+
+ /* Below fields can change packet structure require a reparse */
+ if (field == MLX5_MODI_OUT_ETHERTYPE ||
+ field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
+ return true;
+ }
+
+ return false;
+}
+
+/* Cache and cache element handling */
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
+{
+ struct mlx5hws_pattern_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->ptrn_list);
+ mutex_init(&new_cache->lock);
+
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
+{
+ mutex_destroy(&cache->lock);
+ kfree(cache);
+}
+
+static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
+ __be64 cur_actions[],
+ int num_of_actions,
+ __be64 actions[])
+{
+ int i;
+
+ if (cur_num_of_actions != num_of_actions)
+ return false;
+
+ for (i = 0; i < num_of_actions; i++) {
+ u8 action_id =
+ MLX5_GET(set_action_in, &actions[i], action_type);
+
+ if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
+ action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ if (actions[i] != cur_actions[i])
+ return false;
+ } else {
+ /* Compare just the control, not the values */
+ if ((__force __be32)actions[i] !=
+ (__force __be32)cur_actions[i])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pat = NULL;
+
+ list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
+ if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
+ (__be64 *)cached_pat->mh_data.data,
+ num_of_actions,
+ actions))
+ return cached_pat;
+ }
+
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
+ if (cached_pattern) {
+ /* LRU: move it to be first in the list */
+ list_move(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount++;
+ }
+
+ return cached_pattern;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
+ u32 pattern_id,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
+ if (!cached_pattern)
+ return NULL;
+
+ cached_pattern->mh_data.num_of_actions = num_of_actions;
+ cached_pattern->mh_data.pattern_id = pattern_id;
+ cached_pattern->mh_data.data =
+ kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (!cached_pattern->mh_data.data)
+ goto free_cached_obj;
+
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount = 1;
+
+ return cached_pattern;
+
+free_cached_obj:
+ kfree(cached_pattern);
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
+ u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
+
+ list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
+ if (cached_pattern->mh_data.pattern_id == ptrn_id)
+ return cached_pattern;
+ }
+
+ return NULL;
+}
+
+static void
+mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
+{
+ list_del_init(&cached_pattern->ptrn_list_node);
+
+ kfree(cached_pattern->mh_data.data);
+ kfree(cached_pattern);
+}
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ mutex_lock(&cache->lock);
+ cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
+ pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
+ goto out;
+ }
+
+ if (--cached_pattern->refcount)
+ goto out;
+
+ mlx5hws_pat_remove_pattern(cached_pattern);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
+
+out:
+ mutex_unlock(&cache->lock);
+}
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern, size_t pattern_sz,
+ u32 *pattern_id)
+{
+ u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+ u32 ptrn_id = 0;
+ int ret = 0;
+
+ mutex_lock(&ctx->pattern_cache->lock);
+
+ cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
+ num_of_actions,
+ pattern);
+ if (cached_pattern) {
+ *pattern_id = cached_pattern->mh_data.pattern_id;
+ goto out_unlock;
+ }
+
+ ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
+ pattern_sz,
+ (u8 *)pattern,
+ &ptrn_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create pattern FW object\n");
+ goto out_unlock;
+ }
+
+ cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
+ ptrn_id,
+ num_of_actions,
+ pattern);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to add pattern to cache\n");
+ ret = -EINVAL;
+ goto clean_pattern;
+ }
+
+ mutex_unlock(&ctx->pattern_cache->lock);
+ *pattern_id = ptrn_id;
+
+ return ret;
+
+clean_pattern:
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
+out_unlock:
+ mutex_unlock(&ctx->pattern_cache->lock);
+ return ret;
+}
+
+static void
+mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
+ void *comp_data,
+ u32 arg_idx)
+{
+ send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
+ send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ send_attr->id = arg_idx;
+ send_attr->user_data = comp_data;
+}
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
+ num_of_actions);
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ int i, full_iter, leftover;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
+
+ /* Each WQE can hold 64B of data, it might require multiple iteration */
+ full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
+ leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
+
+ for (i = 0; i < full_iter; i++) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, MLX5HWS_ARG_DATA_SIZE);
+ send_attr.id = arg_idx++;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+
+ /* Move to next argument data */
+ arg_data += MLX5HWS_ARG_DATA_SIZE;
+ }
+
+ if (leftover) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, leftover);
+ send_attr.id = arg_idx;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+ }
+}
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+
+ mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
+
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ret)
+ mlx5hws_err(ctx, "Failed to drain arg queue\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size)
+{
+ if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
+ arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
+ return false;
+ }
+ return true;
+}
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ u16 single_arg_log_sz;
+ u16 multi_arg_log_sz;
+ int ret;
+ u32 id;
+
+ single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
+ multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
+
+ if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
+ mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ /* Alloc bulk of args */
+ ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
+ return ret;
+ }
+
+ if (write_data) {
+ ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
+ data, data_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed writing arg data\n");
+ mlx5hws_cmd_arg_destroy(ctx->mdev, id);
+ return ret;
+ }
+ }
+
+ *arg_id = id;
+ return ret;
+}
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
+{
+ mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
+}
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+ int ret;
+
+ ret = mlx5hws_arg_create(ctx,
+ (u8 *)data,
+ data_sz,
+ log_bulk_sz,
+ write_data,
+ arg_id);
+ if (ret)
+ mlx5hws_err(ctx, "Failed creating modify header arg\n");
+
+ return ret;
+}
+
+static int
+hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
+{
+ /* Need to check field limitation here, but for now - return OK */
+ return 0;
+}
+
+#define INVALID_FIELD 0xffff
+
+static void
+hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
+ u16 *src_field, u16 *dst_field)
+{
+ switch (action_type) {
+ case MLX5_ACTION_TYPE_SET:
+ case MLX5_ACTION_TYPE_ADD:
+ *src_field = INVALID_FIELD;
+ *dst_field = MLX5_GET(set_action_in, pattern, field);
+ break;
+ case MLX5_ACTION_TYPE_COPY:
+ *src_field = MLX5_GET(copy_action_in, pattern, src_field);
+ *dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
+ break;
+ default:
+ pr_warn("HWS: invalid modify header action type %d\n", action_type);
+ }
+}
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
+{
+ size_t i;
+
+ for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
+ u8 action_type =
+ MLX5_GET(set_action_in, &pattern[i], action_type);
+ if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
+ mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
+ return false;
+ }
+ if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
+ mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat)
+{
+ u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
+ u8 action_type;
+ bool dependent;
+ size_t i, j;
+
+ *new_size = num_actions;
+ *nop_locations = 0;
+
+ if (num_actions == 1)
+ return 0;
+
+ for (i = 0, j = 0; i < num_actions; i++, j++) {
+ u16 src_field = INVALID_FIELD;
+ u16 dst_field = INVALID_FIELD;
+
+ if (j >= max_actions)
+ return -EINVAL;
+
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+ hws_action_modify_get_target_fields(action_type, &pattern[i],
+ &src_field, &dst_field);
+
+ /* For every action, look at it and the previous one. The two
+ * actions are dependent if:
+ */
+ dependent =
+ (i > 0) &&
+ /* At least one of the actions is a write and */
+ (dst_field != INVALID_FIELD ||
+ prev_dst_field != INVALID_FIELD) &&
+ /* One reads from the other's source */
+ (dst_field == prev_src_field ||
+ src_field == prev_dst_field ||
+ /* Or both write to the same destination */
+ dst_field == prev_dst_field);
+
+ if (dependent) {
+ *new_size += 1;
+ *nop_locations |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j], action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ if (j >= max_actions)
+ return -EINVAL;
+ }
+
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
+ prev_src_field = src_field;
+ prev_dst_field = dst_field;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
new file mode 100644
index 000000000000..7fbd8dc7aa18
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_PAT_ARG_H_
+#define MLX5HWS_PAT_ARG_H_
+
+/* Modify-header arg pool */
+enum mlx5hws_arg_chunk_size {
+ MLX5HWS_ARG_CHUNK_SIZE_1,
+ /* Keep MIN updated when changing */
+ MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1,
+ MLX5HWS_ARG_CHUNK_SIZE_2,
+ MLX5HWS_ARG_CHUNK_SIZE_3,
+ MLX5HWS_ARG_CHUNK_SIZE_4,
+ MLX5HWS_ARG_CHUNK_SIZE_MAX,
+};
+
+enum {
+ MLX5HWS_MODIFY_ACTION_SIZE = 8,
+ MLX5HWS_ARG_DATA_SIZE = 64,
+};
+
+struct mlx5hws_pattern_cache {
+ struct mutex lock; /* Protect pattern list */
+ struct list_head ptrn_list;
+};
+
+struct mlx5hws_pattern_cache_item {
+ struct {
+ u32 pattern_id;
+ u8 *data;
+ u16 num_of_actions;
+ } mh_data;
+ u32 refcount; /* protected by pattern_cache lock */
+ struct list_head ptrn_list_node;
+};
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions);
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions);
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size);
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size);
+
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache);
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache);
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz);
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id);
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id);
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *modify_hdr_arg_id);
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern,
+ size_t pattern_sz,
+ u32 *ptrn_id);
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx,
+ u32 ptrn_id);
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size);
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions);
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions);
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat);
+#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
new file mode 100644
index 000000000000..7b5071c3df36
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+#include "buddy.h"
+
+static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
+{
+ switch (resource->pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ default:
+ break;
+ }
+
+ kfree(resource);
+}
+
+static void hws_pool_resource_free(struct mlx5hws_pool *pool)
+{
+ hws_pool_free_one_resource(pool->resource);
+ pool->resource = NULL;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_pool_free_one_resource(pool->mirror_resource);
+ pool->mirror_resource = NULL;
+ }
+}
+
+static struct mlx5hws_pool_resource *
+hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
+ u32 fw_ft_type)
+{
+ struct mlx5hws_cmd_ste_create_attr ste_attr;
+ struct mlx5hws_cmd_stc_create_attr stc_attr;
+ struct mlx5hws_pool_resource *resource;
+ u32 obj_id = 0;
+ int ret;
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return NULL;
+
+ switch (pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ ste_attr.log_obj_range = log_range;
+ ste_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ stc_attr.log_obj_range = log_range;
+ stc_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto free_resource;
+
+ resource->pool = pool;
+ resource->range = 1 << log_range;
+ resource->base_id = obj_id;
+
+ return resource;
+
+free_resource:
+ kfree(resource);
+ return NULL;
+}
+
+static int hws_pool_resource_alloc(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_pool_resource *resource;
+ u32 fw_ft_type, opt_log_range;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ?
+ 0 : pool->alloc_log_sz;
+ resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!resource) {
+ mlx5hws_err(pool->ctx, "Failed to allocate resource\n");
+ return -EINVAL;
+ }
+
+ pool->resource = resource;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ struct mlx5hws_pool_resource *mirror_resource;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ?
+ 0 : pool->alloc_log_sz;
+ mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!mirror_resource) {
+ mlx5hws_err(pool->ctx, "Failed to allocate mirrored resource\n");
+ hws_pool_free_one_resource(resource);
+ pool->resource = NULL;
+ return -EINVAL;
+ }
+ pool->mirror_resource = mirror_resource;
+ }
+
+ return 0;
+}
+
+static int hws_pool_buddy_init(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = mlx5hws_buddy_create(pool->alloc_log_sz);
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %zu\n",
+ pool->alloc_log_sz);
+ return -ENOMEM;
+ }
+
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n",
+ pool->type, pool->alloc_log_sz);
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ return -ENOMEM;
+ }
+
+ pool->db.buddy = buddy;
+
+ return 0;
+}
+
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_buddy_mem *buddy = pool->db.buddy;
+
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return -EINVAL;
+ }
+
+ chunk->offset = mlx5hws_buddy_alloc_mem(buddy, chunk->order);
+ if (chunk->offset >= 0)
+ return 0;
+
+ return -ENOMEM;
+}
+
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = pool->db.buddy;
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return;
+ }
+
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
+}
+
+static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = pool->db.buddy;
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy = NULL;
+ }
+}
+
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool)
+{
+ int ret;
+
+ ret = hws_pool_buddy_init(pool);
+ if (ret)
+ return ret;
+
+ pool->p_db_uninit = &hws_pool_buddy_db_uninit;
+ pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
+
+ return 0;
+}
+
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
+{
+ unsigned long *bitmap;
+
+ bitmap = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!bitmap)
+ return NULL;
+
+ bitmap_fill(bitmap, 1 << log_range);
+
+ return bitmap;
+}
+
+static int hws_pool_bitmap_init(struct mlx5hws_pool *pool)
+{
+ unsigned long *bitmap;
+
+ bitmap = hws_pool_create_and_init_bitmap(pool->alloc_log_sz);
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Failed to create bitmap order: %zu\n",
+ pool->alloc_log_sz);
+ return -ENOMEM;
+ }
+
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %zu\n",
+ pool->type, pool->alloc_log_sz);
+ bitmap_free(bitmap);
+ return -ENOMEM;
+ }
+
+ pool->db.bitmap = bitmap;
+
+ return 0;
+}
+
+static int hws_pool_bitmap_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ unsigned long *bitmap, size;
+
+ if (chunk->order != 0) {
+ mlx5hws_err(pool->ctx, "Pool only supports order 0 allocs\n");
+ return -EINVAL;
+ }
+
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
+ return -EINVAL;
+ }
+
+ size = 1 << pool->alloc_log_sz;
+
+ chunk->offset = find_first_bit(bitmap, size);
+ if (chunk->offset >= size)
+ return -ENOMEM;
+
+ bitmap_clear(bitmap, chunk->offset, 1);
+
+ return 0;
+}
+
+static void hws_pool_bitmap_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ unsigned long *bitmap;
+
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
+ return;
+ }
+
+ bitmap_set(bitmap, chunk->offset, 1);
+}
+
+static void hws_pool_bitmap_db_uninit(struct mlx5hws_pool *pool)
+{
+ unsigned long *bitmap;
+
+ bitmap = pool->db.bitmap;
+ if (bitmap) {
+ bitmap_free(bitmap);
+ pool->db.bitmap = NULL;
+ }
+}
+
+static int hws_pool_bitmap_db_init(struct mlx5hws_pool *pool)
+{
+ int ret;
+
+ ret = hws_pool_bitmap_init(pool);
+ if (ret)
+ return ret;
+
+ pool->p_db_uninit = &hws_pool_bitmap_db_uninit;
+ pool->p_get_chunk = &hws_pool_bitmap_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_bitmap_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_db_init(struct mlx5hws_pool *pool,
+ enum mlx5hws_db_type db_type)
+{
+ int ret;
+
+ if (db_type == MLX5HWS_POOL_DB_TYPE_BITMAP)
+ ret = hws_pool_bitmap_db_init(pool);
+ else
+ ret = hws_pool_buddy_db_init(pool);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to init pool type: %d (ret: %d)\n",
+ db_type, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_pool_db_unint(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit(pool);
+}
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->p_get_chunk(pool, chunk);
+ if (ret == 0)
+ pool->available_elems -= 1 << chunk->order;
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ mutex_lock(&pool->lock);
+ pool->p_put_chunk(pool, chunk);
+ pool->available_elems += 1 << chunk->order;
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
+{
+ enum mlx5hws_db_type res_db_type;
+ struct mlx5hws_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->ctx = ctx;
+ pool->type = pool_attr->pool_type;
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->flags = pool_attr->flags;
+ pool->tbl_type = pool_attr->table_type;
+ pool->opt_type = pool_attr->opt_type;
+
+ if (pool->flags & MLX5HWS_POOL_FLAG_BUDDY)
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BITMAP;
+
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->available_elems = 1 << pool_attr->alloc_log_sz;
+
+ if (hws_pool_db_init(pool, res_db_type))
+ goto free_pool;
+
+ mutex_init(&pool->lock);
+
+ return pool;
+
+free_pool:
+ kfree(pool);
+ return NULL;
+}
+
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+{
+ mutex_destroy(&pool->lock);
+
+ if (pool->available_elems != 1 << pool->alloc_log_sz)
+ mlx5hws_err(pool->ctx, "Attempting to destroy non-empty pool\n");
+
+ if (pool->resource)
+ hws_pool_resource_free(pool);
+
+ hws_pool_db_unint(pool);
+
+ kfree(pool);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
new file mode 100644
index 000000000000..33e33d5f1fb3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_POOL_H_
+#define MLX5HWS_POOL_H_
+
+#define MLX5HWS_POOL_STC_LOG_SZ 15
+
+enum mlx5hws_pool_type {
+ MLX5HWS_POOL_TYPE_STE,
+ MLX5HWS_POOL_TYPE_STC,
+};
+
+struct mlx5hws_pool_chunk {
+ int offset;
+ int order;
+};
+
+struct mlx5hws_pool_resource {
+ struct mlx5hws_pool *pool;
+ u32 base_id;
+ u32 range;
+};
+
+enum mlx5hws_pool_flags {
+ /* Managed by a buddy allocator. If this is not set only allocations of
+ * order 0 are supported.
+ */
+ MLX5HWS_POOL_FLAG_BUDDY = BIT(0),
+};
+
+enum mlx5hws_pool_optimize {
+ MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
+ MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
+ MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+ MLX5HWS_POOL_OPTIMIZE_MAX = 0x3,
+};
+
+struct mlx5hws_pool_attr {
+ enum mlx5hws_pool_type pool_type;
+ enum mlx5hws_table_type table_type;
+ enum mlx5hws_pool_flags flags;
+ enum mlx5hws_pool_optimize opt_type;
+ /* Allocation size once memory is depleted */
+ size_t alloc_log_sz;
+};
+
+enum mlx5hws_db_type {
+ /* Uses a bitmap, supports only allocations of order 0. */
+ MLX5HWS_POOL_DB_TYPE_BITMAP,
+ /* Entries are managed using a buddy mechanism. */
+ MLX5HWS_POOL_DB_TYPE_BUDDY,
+};
+
+struct mlx5hws_pool_db {
+ enum mlx5hws_db_type type;
+ union {
+ unsigned long *bitmap;
+ struct mlx5hws_buddy_mem *buddy;
+ };
+};
+
+typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
+
+struct mlx5hws_pool {
+ struct mlx5hws_context *ctx;
+ enum mlx5hws_pool_type type;
+ enum mlx5hws_pool_flags flags;
+ struct mutex lock; /* protect the pool */
+ size_t alloc_log_sz;
+ size_t available_elems;
+ enum mlx5hws_table_type tbl_type;
+ enum mlx5hws_pool_optimize opt_type;
+ struct mlx5hws_pool_resource *resource;
+ struct mlx5hws_pool_resource *mirror_resource;
+ struct mlx5hws_pool_db db;
+ /* Functions */
+ mlx5hws_pool_unint_db p_db_uninit;
+ mlx5hws_pool_db_get_chunk p_get_chunk;
+ mlx5hws_pool_db_put_chunk p_put_chunk;
+};
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_pool_attr *pool_attr);
+
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+static inline u32 mlx5hws_pool_get_base_id(struct mlx5hws_pool *pool)
+{
+ return pool->resource->base_id;
+}
+
+static inline u32 mlx5hws_pool_get_base_mirror_id(struct mlx5hws_pool *pool)
+{
+ return pool->mirror_resource->base_id;
+}
+
+static inline bool
+mlx5hws_pool_empty(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == 0;
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+static inline bool
+mlx5hws_pool_full(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == (1 << pool->alloc_log_sz);
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
new file mode 100644
index 000000000000..271490a51b96
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
@@ -0,0 +1,472 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5_PRM_H_
+#define MLX5_PRM_H_
+
+#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512
+
+/* Action type of header modification. */
+enum {
+ MLX5_MODIFICATION_TYPE_SET = 0x1,
+ MLX5_MODIFICATION_TYPE_ADD = 0x2,
+ MLX5_MODIFICATION_TYPE_COPY = 0x3,
+ MLX5_MODIFICATION_TYPE_INSERT = 0x4,
+ MLX5_MODIFICATION_TYPE_REMOVE = 0x5,
+ MLX5_MODIFICATION_TYPE_NOP = 0x6,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7,
+ MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8,
+ MLX5_MODIFICATION_TYPE_MAX,
+};
+
+/* The field of packet to be modified. */
+enum mlx5_modification_field {
+ MLX5_MODI_OUT_NONE = -1,
+ MLX5_MODI_OUT_SMAC_47_16 = 1,
+ MLX5_MODI_OUT_SMAC_15_0,
+ MLX5_MODI_OUT_ETHERTYPE,
+ MLX5_MODI_OUT_DMAC_47_16,
+ MLX5_MODI_OUT_DMAC_15_0,
+ MLX5_MODI_OUT_IP_DSCP,
+ MLX5_MODI_OUT_TCP_FLAGS,
+ MLX5_MODI_OUT_TCP_SPORT,
+ MLX5_MODI_OUT_TCP_DPORT,
+ MLX5_MODI_OUT_IPV4_TTL,
+ MLX5_MODI_OUT_UDP_SPORT,
+ MLX5_MODI_OUT_UDP_DPORT,
+ MLX5_MODI_OUT_SIPV6_127_96,
+ MLX5_MODI_OUT_SIPV6_95_64,
+ MLX5_MODI_OUT_SIPV6_63_32,
+ MLX5_MODI_OUT_SIPV6_31_0,
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0,
+ MLX5_MODI_OUT_SIPV4,
+ MLX5_MODI_OUT_DIPV4,
+ MLX5_MODI_OUT_FIRST_VID,
+ MLX5_MODI_IN_SMAC_47_16 = 0x31,
+ MLX5_MODI_IN_SMAC_15_0,
+ MLX5_MODI_IN_ETHERTYPE,
+ MLX5_MODI_IN_DMAC_47_16,
+ MLX5_MODI_IN_DMAC_15_0,
+ MLX5_MODI_IN_IP_DSCP,
+ MLX5_MODI_IN_TCP_FLAGS,
+ MLX5_MODI_IN_TCP_SPORT,
+ MLX5_MODI_IN_TCP_DPORT,
+ MLX5_MODI_IN_IPV4_TTL,
+ MLX5_MODI_IN_UDP_SPORT,
+ MLX5_MODI_IN_UDP_DPORT,
+ MLX5_MODI_IN_SIPV6_127_96,
+ MLX5_MODI_IN_SIPV6_95_64,
+ MLX5_MODI_IN_SIPV6_63_32,
+ MLX5_MODI_IN_SIPV6_31_0,
+ MLX5_MODI_IN_DIPV6_127_96,
+ MLX5_MODI_IN_DIPV6_95_64,
+ MLX5_MODI_IN_DIPV6_63_32,
+ MLX5_MODI_IN_DIPV6_31_0,
+ MLX5_MODI_IN_SIPV4,
+ MLX5_MODI_IN_DIPV4,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT,
+ MLX5_MODI_IN_IPV6_HOPLIMIT,
+ MLX5_MODI_META_DATA_REG_A,
+ MLX5_MODI_META_DATA_REG_B = 0x50,
+ MLX5_MODI_META_REG_C_0,
+ MLX5_MODI_META_REG_C_1,
+ MLX5_MODI_META_REG_C_2,
+ MLX5_MODI_META_REG_C_3,
+ MLX5_MODI_META_REG_C_4,
+ MLX5_MODI_META_REG_C_5,
+ MLX5_MODI_META_REG_C_6,
+ MLX5_MODI_META_REG_C_7,
+ MLX5_MODI_OUT_TCP_SEQ_NUM,
+ MLX5_MODI_IN_TCP_SEQ_NUM,
+ MLX5_MODI_OUT_TCP_ACK_NUM,
+ MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
+ MLX5_MODI_GTP_TEID = 0x6E,
+ MLX5_MODI_OUT_IP_ECN = 0x73,
+ MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
+ MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
+ MLX5_MODI_HASH_RESULT = 0x81,
+ MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a,
+ MLX5_MODI_IN_MPLS_LABEL_1,
+ MLX5_MODI_IN_MPLS_LABEL_2,
+ MLX5_MODI_IN_MPLS_LABEL_3,
+ MLX5_MODI_IN_MPLS_LABEL_4,
+ MLX5_MODI_OUT_IP_PROTOCOL = 0x4A,
+ MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+ MLX5_MODI_META_REG_C_8 = 0x8F,
+ MLX5_MODI_META_REG_C_9 = 0x90,
+ MLX5_MODI_META_REG_C_10 = 0x91,
+ MLX5_MODI_META_REG_C_11 = 0x92,
+ MLX5_MODI_META_REG_C_12 = 0x93,
+ MLX5_MODI_META_REG_C_13 = 0x94,
+ MLX5_MODI_META_REG_C_14 = 0x95,
+ MLX5_MODI_META_REG_C_15 = 0x96,
+ MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D,
+ MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E,
+ MLX5_MODI_OUT_IPV4_IHL = 0x11F,
+ MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120,
+ MLX5_MODI_OUT_ESP_SPI = 0x5E,
+ MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82,
+ MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126,
+ MLX5_MODI_INVALID = INT_MAX,
+};
+
+enum {
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
+};
+
+enum mlx5_ifc_rtc_update_mode {
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1,
+};
+
+enum mlx5_ifc_rtc_access_mode {
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1,
+};
+
+enum mlx5_ifc_rtc_ste_format {
+ MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
+ MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
+ MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7,
+};
+
+enum mlx5_ifc_rtc_reparse_mode {
+ MLX5_IFC_RTC_REPARSE_NEVER = 0x0,
+ MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1,
+ MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
+};
+
+#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
+
+struct mlx5_ifc_rtc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 update_index_mode[0x2];
+ u8 reparse_mode[0x2];
+ u8 num_match_ste[0x4];
+ u8 pd[0x18];
+ u8 reserved_at_a0[0x9];
+ u8 access_index_mode[0x3];
+ u8 num_hash_definer[0x4];
+ u8 update_method[0x1];
+ u8 reserved_at_b1[0x2];
+ u8 log_depth[0x5];
+ u8 log_hash_size[0x8];
+ u8 ste_format_0[0x8];
+ u8 table_type[0x8];
+ u8 ste_format_1[0x8];
+ u8 reserved_at_d8[0x8];
+ u8 match_definer_0[0x20];
+ u8 stc_id[0x20];
+ u8 ste_table_base_id[0x20];
+ u8 ste_table_offset[0x20];
+ u8 reserved_at_160[0x8];
+ u8 miss_flow_table_id[0x18];
+ u8 match_definer_1[0x20];
+ u8 reserved_at_1a0[0x260];
+};
+
+enum mlx5_ifc_stc_action_type {
+ MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00,
+ MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05,
+ MLX5_IFC_STC_ACTION_TYPE_SET = 0x06,
+ MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07,
+ MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b,
+ MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c,
+ MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11,
+ MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12,
+ MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13,
+ MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14,
+ MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82,
+ MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83,
+ MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
+};
+
+enum mlx5_ifc_stc_reparse_mode {
+ MLX5_IFC_STC_REPARSE_IGNORE = 0x0,
+ MLX5_IFC_STC_REPARSE_NEVER = 0x1,
+ MLX5_IFC_STC_REPARSE_ALWAYS = 0x2,
+};
+
+struct mlx5_ifc_stc_ste_param_ste_table_bits {
+ u8 ste_obj_id[0x20];
+ u8 match_definer_id[0x20];
+ u8 reserved_at_40[0x3];
+ u8 log_hash_size[0x5];
+ u8 reserved_at_48[0x38];
+};
+
+struct mlx5_ifc_stc_ste_param_tir_bits {
+ u8 reserved_at_0[0x8];
+ u8 tirn[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_table_bits {
+ u8 reserved_at_0[0x8];
+ u8 table_id[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_flow_counter_bits {
+ u8 flow_counter_id[0x20];
+};
+
+enum {
+ MLX5_ASO_CT_NUM_PER_OBJ = 1,
+ MLX5_ASO_METER_NUM_PER_OBJ = 2,
+ MLX5_ASO_IPSEC_NUM_PER_OBJ = 1,
+ MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512,
+};
+
+struct mlx5_ifc_stc_ste_param_execute_aso_bits {
+ u8 aso_object_id[0x20];
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_28[0x18];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_trailer_bits {
+ u8 reserved_at_0[0x8];
+ u8 command[0x4];
+ u8 reserved_at_c[0x2];
+ u8 type[0x2];
+ u8 reserved_at_10[0xa];
+ u8 length[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_header_modify_list_bits {
+ u8 header_modify_pattern_id[0x20];
+ u8 header_modify_argument_id[0x20];
+};
+
+enum mlx5_ifc_header_anchors {
+ MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
+ MLX5_HEADER_ANCHOR_MAC = 0x1,
+ MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
+ MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ MLX5_HEADER_ANCHOR_ESP = 0x08,
+ MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
+ MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a,
+ MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+ MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a,
+ MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b,
+ MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c
+};
+
+struct mlx5_ifc_stc_ste_param_remove_bits {
+ u8 action_type[0x4];
+ u8 decap[0x1];
+ u8 reserved_at_5[0x5];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x2];
+ u8 remove_end_anchor[0x6];
+ u8 reserved_at_18[0x8];
+};
+
+struct mlx5_ifc_stc_ste_param_remove_words_bits {
+ u8 action_type[0x4];
+ u8 reserved_at_4[0x6];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 remove_offset[0x7];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_insert_bits {
+ u8 action_type[0x4];
+ u8 encap[0x1];
+ u8 inline_data[0x1];
+ u8 reserved_at_6[0x4];
+ u8 insert_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 insert_offset[0x7];
+ u8 reserved_at_18[0x1];
+ u8 insert_size[0x7];
+ u8 insert_argument[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_vport_bits {
+ u8 eswitch_owner_vhca_id[0x10];
+ u8 vport_number[0x10];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_21[0x5f];
+};
+
+union mlx5_ifc_stc_param_bits {
+ struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table;
+ struct mlx5_ifc_stc_ste_param_tir_bits tir;
+ struct mlx5_ifc_stc_ste_param_table_bits table;
+ struct mlx5_ifc_stc_ste_param_flow_counter_bits counter;
+ struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header;
+ struct mlx5_ifc_stc_ste_param_execute_aso_bits aso;
+ struct mlx5_ifc_stc_ste_param_remove_bits remove_header;
+ struct mlx5_ifc_stc_ste_param_insert_bits insert_header;
+ struct mlx5_ifc_set_action_in_bits add;
+ struct mlx5_ifc_set_action_in_bits set;
+ struct mlx5_ifc_copy_action_in_bits copy;
+ struct mlx5_ifc_stc_ste_param_vport_bits vport;
+ struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt;
+ struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt;
+ struct mlx5_ifc_stc_ste_param_trailer_bits trailer;
+ u8 reserved_at_0[0x80];
+};
+
+enum {
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0),
+};
+
+struct mlx5_ifc_stc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x46];
+ u8 reparse_mode[0x2];
+ u8 table_type[0x8];
+ u8 ste_action_offset[0x8];
+ u8 action_type[0x8];
+ u8 reserved_at_a0[0x60];
+ union mlx5_ifc_stc_param_bits stc_param;
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_ste_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x48];
+ u8 table_type[0x8];
+ u8 reserved_at_90[0x370];
+};
+
+struct mlx5_ifc_definer_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x50];
+ u8 format_id[0x10];
+ u8 reserved_at_60[0x60];
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+ u8 reserved_at_120[0x20];
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+ u8 reserved_at_180[0x40];
+ u8 ctrl[0xa0];
+ u8 match_mask[0x160];
+};
+
+struct mlx5_ifc_header_modify_pattern_in_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 pattern_length[0x8];
+ u8 reserved_at_88[0x18];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8];
+};
+
+struct mlx5_ifc_create_rtc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_rtc_bits rtc;
+};
+
+struct mlx5_ifc_create_stc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_stc_bits stc;
+};
+
+struct mlx5_ifc_create_ste_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_ste_bits ste;
+};
+
+struct mlx5_ifc_create_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_definer_bits definer;
+};
+
+struct mlx5_ifc_create_header_modify_pattern_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_header_modify_pattern_in_bits pattern;
+};
+
+struct mlx5_ifc_generate_wqe_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mode[0x10];
+ u8 reserved_at_40[0x40];
+ u8 reserved_at_80[0x8];
+ u8 pdn[0x18];
+ u8 reserved_at_a0[0x160];
+ u8 wqe_ctrl[0x80];
+ u8 wqe_gta_ctrl[0x180];
+ u8 wqe_gta_data_0[0x200];
+ u8 wqe_gta_data_1[0x200];
+};
+
+struct mlx5_ifc_generate_wqe_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x1c0];
+ u8 cqe_data[0x200];
+};
+
+enum mlx5_access_aso_opc_mod {
+ ASO_OPC_MOD_IPSEC = 0x0,
+ ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
+ ASO_OPC_MOD_POLICER = 0x2,
+ ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
+ ASO_OPC_MOD_FLOW_HIT = 0x4,
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0),
+ MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1),
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0,
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
+};
+
+#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
new file mode 100644
index 000000000000..a94f094e72ba
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+void mlx5hws_rule_skip(struct mlx5hws_matcher *matcher, u32 flow_source,
+ bool *skip_rx, bool *skip_tx)
+{
+ /* By default FDB rules are added to both RX and TX */
+ *skip_rx = false;
+ *skip_tx = false;
+
+ if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
+ *skip_rx = true;
+ return;
+ }
+
+ if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
+ *skip_tx = true;
+ return;
+ }
+
+ /* If no flow source was set for current rule,
+ * check for flow source in matcher attributes.
+ */
+ *skip_tx = matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE;
+ *skip_rx = matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT;
+}
+
+static void
+hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ bool is_jumbo)
+{
+ struct mlx5hws_rule_match_tag *tag;
+
+ if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
+ tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ }
+
+ if (is_jumbo)
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ bool skip_rx, skip_tx;
+
+ dep_wqe->rule = rule;
+ dep_wqe->user_data = attr->user_data;
+ dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_rule_skip(matcher, attr->flow_source,
+ &skip_rx, &skip_tx);
+
+ if (!skip_rx) {
+ dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
+ dep_wqe->retry_rtc_0 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_0_id : 0;
+ } else {
+ dep_wqe->rtc_0 = 0;
+ dep_wqe->retry_rtc_0 = 0;
+ }
+
+ if (!skip_tx) {
+ dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
+ dep_wqe->retry_rtc_1 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_1_id : 0;
+ } else {
+ dep_wqe->rtc_1 = 0;
+ dep_wqe->retry_rtc_1 = 0;
+ }
+ } else {
+ pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
+ }
+}
+
+static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
+
+ if (rule->resize_info->rtc_0) {
+ ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
+ ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
+ }
+ if (rule->resize_info->rtc_1) {
+ ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
+ ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
+ }
+}
+
+static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_rule *rule,
+ bool err,
+ void *user_data,
+ enum mlx5hws_rule_status rule_status_on_succ)
+{
+ enum mlx5hws_flow_op_status comp_status;
+
+ if (!err) {
+ comp_status = MLX5HWS_FLOW_OP_SUCCESS;
+ rule->status = rule_status_on_succ;
+ } else {
+ comp_status = MLX5HWS_FLOW_OP_ERROR;
+ rule->status = MLX5HWS_RULE_STATUS_FAILED;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+ mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
+}
+
+static void
+hws_rule_save_resize_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ if (!mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ /* resize_info might already exist (if we're in update flow) */
+ if (likely(!rule->resize_info)) {
+ rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
+ if (unlikely(!rule->resize_info)) {
+ pr_warn("HWS: resize info isn't allocated for rule\n");
+ return;
+ }
+ }
+
+ memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
+ sizeof(rule->resize_info->ctrl_seg));
+ memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
+ sizeof(rule->resize_info->data_seg));
+}
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
+{
+ if (mlx5hws_matcher_is_resizable(rule->matcher) &&
+ rule->resize_info) {
+ kfree(rule->resize_info);
+ rule->resize_info = NULL;
+ }
+}
+
+static void
+hws_rule_save_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_match_template *mt = rule->matcher->mt;
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+
+ if (mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (is_jumbo)
+ memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void
+hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
+{
+ /* nothing to do here */
+}
+
+static void
+hws_rule_load_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
+ ste_attr->wqe_tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ struct mlx5hws_rule_match_tag *tag =
+ (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ ste_attr->wqe_tag = tag;
+ }
+}
+
+static int mlx5hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ u16 queue_id, bool skip_rx,
+ bool skip_tx)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ rule->action_ste.ste.order =
+ ilog2(roundup_pow_of_two(matcher->num_of_action_stes));
+ return mlx5hws_action_ste_chunk_alloc(&ctx->action_ste_pool[queue_id],
+ skip_rx, skip_tx,
+ &rule->action_ste);
+}
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste)
+{
+ if (!action_ste->action_tbl)
+ return;
+
+ /* This release is safe only when the rule match STE was deleted
+ * (when the rule is being deleted) or replaced with the new STE that
+ * isn't pointing to old action STEs (when the rule is being updated).
+ */
+ mlx5hws_action_ste_chunk_free(action_ste);
+}
+
+static void hws_rule_create_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ struct mlx5hws_actions_apply_data *apply,
+ bool is_update)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+
+ /* Init rule before reuse */
+ if (!is_update) {
+ /* In update we use these rtc's */
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+ } else {
+ rule->status = MLX5HWS_RULE_STATUS_UPDATING;
+ /* Save the old action STE info so we can free it after writing
+ * new action STEs and a corresponding match STE.
+ */
+ rule->old_action_ste = rule->action_ste;
+ }
+
+ rule->pending_wqes = 0;
+
+ /* Init default send STE attributes */
+ ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ /* Init default action apply */
+ apply->tbl_type = tbl->type;
+ apply->common_res = &ctx->common_res;
+ apply->require_dep = 0;
+}
+
+static void hws_rule_move_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ /* Save the old RTC IDs to be later used in match STE delete */
+ rule->resize_info->rtc_0 = rule->rtc_0;
+ rule->resize_info->rtc_1 = rule->rtc_1;
+ rule->resize_info->rule_idx = attr->rule_idx;
+
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+
+ rule->pending_wqes = 0;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
+}
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
+{
+ return mlx5hws_matcher_is_in_resize(rule->matcher) &&
+ rule->resize_info &&
+ rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
+}
+
+static int hws_rule_create_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
+ struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ struct mlx5hws_actions_wqe_setter *setter;
+ struct mlx5hws_actions_apply_data apply;
+ struct mlx5hws_send_engine *queue;
+ u8 total_stes, action_stes;
+ bool is_update;
+ int i, ret;
+
+ is_update = !match_param;
+
+ setter = &at->setters[at->num_of_action_stes];
+ total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
+ action_stes = total_stes - 1;
+
+ queue = &ctx->send_queue[attr->queue_id];
+ if (unlikely(mlx5hws_send_engine_err(queue)))
+ return -EIO;
+
+ hws_rule_create_init(rule, &ste_attr, &apply, is_update);
+
+ /* Allocate dependent match WQE since rule might have dependent writes.
+ * The queued dependent WQE can be later aborted or kept as a dependency.
+ * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
+ */
+ dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
+ hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
+
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
+ apply.rule_action = rule_actions;
+ apply.queue = queue;
+
+ if (action_stes) {
+ /* Allocate action STEs for rules that need more than match STE */
+ ret = mlx5hws_rule_alloc_action_ste(rule, attr->queue_id,
+ !!ste_attr.rtc_0,
+ !!ste_attr.rtc_1);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
+ }
+ apply.jump_to_action_stc =
+ rule->action_ste.action_tbl->stc.offset;
+ /* Skip RX/TX based on the dep_wqe init */
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ?
+ rule->action_ste.action_tbl->rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ?
+ rule->action_ste.action_tbl->rtc_1_id : 0;
+ /* Action STEs are written to a specific index last to first */
+ ste_attr.direct_index =
+ rule->action_ste.ste.offset + action_stes;
+ apply.next_direct_idx = ste_attr.direct_index;
+ } else {
+ apply.next_direct_idx = 0;
+ }
+
+ for (i = total_stes; i-- > 0;) {
+ mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
+
+ if (i == 0) {
+ /* Handle last match STE.
+ * For hash split / linear lookup RTCs, packets reaching any STE
+ * will always match and perform the specified actions, which
+ * makes the tag irrelevant.
+ */
+ if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
+ mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
+ (u8 *)dep_wqe->wqe_data.action);
+ else if (is_update)
+ hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
+
+ /* Rule has dependent WQEs, match dep_wqe is queued */
+ if (action_stes || apply.require_dep)
+ break;
+
+ /* Rule has no dependencies, abort dep_wqe and send WQE now */
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.direct_index = dep_wqe->direct_index;
+ } else {
+ apply.next_direct_idx = --ste_attr.direct_index;
+ }
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ }
+
+ /* Backup TAG on the rule for deletion and resize info for
+ * moving rules to a new matcher, only after insertion.
+ */
+ if (!is_update)
+ hws_rule_save_delete_info(rule, &ste_attr);
+
+ hws_rule_save_resize_info(rule, &ste_attr);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ /* Rule failed now we can safely release action STEs */
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
+
+ /* Perhaps the rule failed updating - release old action STEs as well */
+ mlx5hws_rule_free_action_ste(&rule->old_action_ste);
+
+ /* Clear complex tag */
+ hws_rule_clear_delete_info(rule);
+
+ /* Clear info that was saved for resizing */
+ mlx5hws_rule_clear_resize_info(rule);
+
+ /* If a rule that was indicated as burst (need to trigger HW) has failed
+ * insertion we won't ring the HW as nothing is being written to the WQ.
+ * In such case update the last WQE and ring the HW with that work
+ */
+ if (attr->burst)
+ return;
+
+ mlx5hws_send_all_dep_wqe(queue);
+ mlx5hws_send_engine_flush_queue(queue);
+}
+
+static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ /* Rule is not completed yet */
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING ||
+ rule->status == MLX5HWS_RULE_STATUS_UPDATING)
+ return -EBUSY;
+
+ /* Rule failed and doesn't require cleanup */
+ if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ if (rule->skip_delete) {
+ /* Rule shouldn't be deleted in HW.
+ * Generate completion as if write succeeded, and we can
+ * safely release action STEs and clear resize info.
+ */
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
+ mlx5hws_rule_clear_resize_info(rule);
+ return 0;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+
+ /* Send dependent WQE */
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->status = MLX5HWS_RULE_STATUS_DELETING;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.rtc_0 = rule->rtc_0;
+ ste_attr.rtc_1 = rule->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = attr->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+ hws_rule_clear_delete_info(rule);
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+
+ if (unlikely(!attr->user_data))
+ return -EINVAL;
+
+ /* Check if there is room in queue */
+ if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EINVAL;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
+ /* Matcher in resize - new rules are not allowed */
+ return -EAGAIN;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
+ !matcher->attr.optimize_using_rule_idx &&
+ !mlx5hws_matcher_is_insert_by_idx(matcher))) {
+ return -EOPNOTSUPP;
+ }
+
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EBUSY;
+
+ return hws_rule_enqueue_precheck_create(rule, attr);
+}
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue_ptr,
+ void *user_data)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_engine *queue = queue_ptr;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
+
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = 1;
+ ste_attr.send_attr.user_data = user_data;
+ ste_attr.rtc_0 = rule->resize_info->rtc_0;
+ ste_attr.rtc_1 = rule->resize_info->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
+ ste_attr.wqe_ctrl = &empty_wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = rule->resize_info->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ return 0;
+}
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_move(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ ret = mlx5hws_send_engine_err(queue);
+ if (ret)
+ return ret;
+
+ hws_rule_move_init(rule, attr);
+ hws_rule_move_get_rtc(rule, &ste_attr);
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
+ ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
+ ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+static u8 hws_rule_ethertype_to_matcher_ipv(u32 ethertype)
+{
+ switch (ethertype) {
+ case ETH_P_IP:
+ return MLX5HWS_MATCHER_IPV_4;
+ case ETH_P_IPV6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static u8 hws_rule_ip_version_to_matcher_ipv(u32 ip_version)
+{
+ switch (ip_version) {
+ case 4:
+ return MLX5HWS_MATCHER_IPV_4;
+ case 6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static int hws_rule_check_outer_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 outer_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv, ver;
+
+ if (matcher->matches_outer_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ethertype);
+ outer_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_outer_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ip_version);
+ outer_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ether != outer_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent outer ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ outer_ipv = outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ outer_ipv_ether : outer_ipv_ip;
+ if (outer_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->outer_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv != matcher->outer_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on outer IP version\n");
+ return -EINVAL;
+ }
+ matcher->outer_ip_version = outer_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_inner_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 inner_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv, ver;
+
+ if (matcher->matches_inner_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ethertype);
+ inner_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_inner_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ip_version);
+ inner_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ether != inner_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent inner ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ inner_ipv = inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ inner_ipv_ether : inner_ipv_ip;
+ if (inner_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->inner_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv != matcher->inner_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on inner IP version\n");
+ return -EINVAL;
+ }
+ matcher->inner_ip_version = inner_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ int ret;
+
+ ret = hws_rule_check_outer_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_check_inner_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle)
+{
+ int ret;
+
+ ret = hws_rule_check_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ rule_handle->matcher = matcher;
+
+ ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
+ !(matcher->num_of_at >= at_idx) ||
+ !match_param)) {
+ pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
+ return -EINVAL;
+ }
+
+ ret = hws_rule_create_hws(rule_handle,
+ attr,
+ mt_idx,
+ match_param,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
+
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_destroy_hws(rule, attr);
+
+ return ret;
+}
+
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_update(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_create_hws(rule,
+ attr,
+ 0,
+ NULL,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
new file mode 100644
index 000000000000..d0f082b8dbf5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_RULE_H_
+#define MLX5HWS_RULE_H_
+
+enum {
+ MLX5HWS_STE_CTRL_SZ = 20,
+ MLX5HWS_ACTIONS_SZ = 12,
+ MLX5HWS_MATCH_TAG_SZ = 32,
+ MLX5HWS_JUMBO_TAG_SZ = 44,
+};
+
+enum mlx5hws_rule_status {
+ MLX5HWS_RULE_STATUS_UNKNOWN,
+ MLX5HWS_RULE_STATUS_CREATING,
+ MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_UPDATING,
+ MLX5HWS_RULE_STATUS_UPDATED,
+ MLX5HWS_RULE_STATUS_DELETING,
+ MLX5HWS_RULE_STATUS_DELETED,
+ MLX5HWS_RULE_STATUS_FAILING,
+ MLX5HWS_RULE_STATUS_FAILED,
+};
+
+enum mlx5hws_rule_move_state {
+ MLX5HWS_RULE_RESIZE_STATE_IDLE,
+ MLX5HWS_RULE_RESIZE_STATE_WRITING,
+ MLX5HWS_RULE_RESIZE_STATE_DELETING,
+};
+
+enum mlx5hws_rule_jumbo_match_tag_offset {
+ MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8,
+};
+
+struct mlx5hws_rule_match_tag {
+ union {
+ u8 jumbo[MLX5HWS_JUMBO_TAG_SZ];
+ struct {
+ u8 reserved[MLX5HWS_ACTIONS_SZ];
+ u8 match[MLX5HWS_MATCH_TAG_SZ];
+ };
+ };
+};
+
+struct mlx5hws_rule_resize_info {
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 rule_idx;
+ u8 state;
+ u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
+ u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
+};
+
+struct mlx5hws_rule {
+ struct mlx5hws_matcher *matcher;
+ union {
+ struct mlx5hws_rule_match_tag tag;
+ struct mlx5hws_rule_resize_info *resize_info;
+ };
+ struct mlx5hws_action_ste_chunk action_ste;
+ struct mlx5hws_action_ste_chunk old_action_ste;
+ u32 rtc_0; /* The RTC into which the STE was inserted */
+ u32 rtc_1; /* The RTC into which the STE was inserted */
+ u8 status; /* enum mlx5hws_rule_status */
+ u8 pending_wqes;
+ bool skip_delete; /* For complex rules - another rule with same tag
+ * still exists, so don't actually delete this rule.
+ */
+};
+
+void mlx5hws_rule_skip(struct mlx5hws_matcher *matcher, u32 flow_source,
+ bool *skip_rx, bool *skip_tx);
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste);
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue, void *user_data);
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule);
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule);
+
+#endif /* MLX5HWS_RULE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
new file mode 100644
index 000000000000..7510c46e58a5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -0,0 +1,1358 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+#include "lib/clock.h"
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
+
+ memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ);
+
+ return &send_sq->dep_wqe[idx];
+}
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ queue->send_ring.send_sq.head_dep_idx--;
+}
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+
+ /* Fence first from previous depend WQEs */
+ ste_attr.send_attr.fence = 1;
+
+ while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
+ dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
+
+ /* Notify HW on the last WQE */
+ ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ ste_attr.direct_index = dep_wqe->direct_index;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ /* Fencing is done only on the first WQE */
+ ste_attr.send_attr.fence = 0;
+ }
+}
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+
+ ctrl.queue = queue;
+ /* Currently only one send ring is supported */
+ ctrl.send_ring = &queue->send_ring;
+ ctrl.num_wqebbs = 0;
+
+ return ctrl;
+}
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
+ unsigned int idx;
+
+ idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
+
+ /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used
+ * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB
+ * can be on 2 different kernel memory pages.
+ */
+ *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+ *len = MLX5_SEND_WQE_BB;
+
+ if (!ctrl->num_wqebbs) {
+ *buf += sizeof(struct mlx5hws_wqe_ctrl_seg);
+ *len -= sizeof(struct mlx5hws_wqe_ctrl_seg);
+ }
+
+ ctrl->num_wqebbs++;
+}
+
+static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->cur_post);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+static void
+hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ struct mlx5hws_rule_match_tag *tag,
+ bool is_jumbo)
+{
+ if (is_jumbo) {
+ /* Clear previous possibly dirty control */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ);
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ } else {
+ /* Clear previous possibly dirty control and actions */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ);
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+ }
+}
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr)
+{
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_ring_sq *sq;
+ unsigned int idx;
+ u32 flags = 0;
+
+ sq = &ctrl->send_ring->send_sq;
+ idx = sq->cur_post & sq->buf_mask;
+ sq->last_idx = idx;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx);
+
+ wqe_ctrl->opmod_idx_opcode =
+ cpu_to_be32((attr->opmod << 24) |
+ ((sq->cur_post & 0xffff) << 8) |
+ attr->opcode);
+ wqe_ctrl->qpn_ds =
+ cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 |
+ sq->sqn << 8);
+ wqe_ctrl->imm = cpu_to_be32(attr->id);
+
+ flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
+ wqe_ctrl->flags = cpu_to_be32(flags);
+
+ sq->wr_priv[idx].id = attr->id;
+ sq->wr_priv[idx].retry_id = attr->retry_id;
+
+ sq->wr_priv[idx].rule = attr->rule;
+ sq->wr_priv[idx].user_data = attr->user_data;
+ sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
+
+ if (attr->rule) {
+ sq->wr_priv[idx].rule->pending_wqes++;
+ sq->wr_priv[idx].used_id = attr->used_id;
+ }
+
+ sq->cur_post += ctrl->num_wqebbs;
+
+ if (attr->notify_hw)
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void hws_send_wqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_data,
+ void *send_wqe_tag,
+ bool is_jumbo,
+ u8 gta_opcode,
+ u32 direct_index)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index);
+ memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix,
+ sizeof(send_wqe_ctrl->stc_ix));
+
+ if (send_wqe_data)
+ memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
+ else
+ hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
+
+ mlx5hws_send_engine_post_end(&ctrl, send_attr);
+}
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ u8 notify_hw = send_attr->notify_hw;
+ u8 fence = send_attr->fence;
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ send_attr->fence = fence;
+ send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ send_attr->fence = fence && !ste_attr->rtc_1;
+ send_attr->notify_hw = notify_hw;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ /* Restore to original requested values */
+ send_attr->notify_hw = notify_hw;
+ send_attr->fence = fence;
+}
+
+static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_send_ring_sq *send_sq;
+ unsigned int idx;
+ size_t wqe_len;
+ char *p;
+
+ send_attr.rule = priv->rule;
+ send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg);
+ send_attr.notify_hw = 1;
+ send_attr.fence = 0;
+ send_attr.user_data = priv->user_data;
+ send_attr.id = priv->retry_id;
+ send_attr.used_id = priv->used_id;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ send_sq = &ctrl.send_ring->send_sq;
+ idx = wqe_cnt & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta ctrl */
+ memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg));
+
+ idx = (wqe_cnt + 1) & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta data */
+ memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
+
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq;
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx);
+ wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE);
+
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void
+hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ enum mlx5hws_flow_op_status *status)
+{
+ switch (priv->rule->resize_info->state) {
+ case MLX5HWS_RULE_RESIZE_STATE_WRITING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ /* Backup original RTCs */
+ u32 orig_rtc_0 = priv->rule->resize_info->rtc_0;
+ u32 orig_rtc_1 = priv->rule->resize_info->rtc_1;
+
+ /* Delete partially failed move rule using resize_info */
+ priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
+ priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
+
+ /* Move rule to original RTC for future delete */
+ priv->rule->rtc_0 = orig_rtc_0;
+ priv->rule->rtc_1 = orig_rtc_1;
+ }
+ /* Clean leftovers */
+ mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data);
+ break;
+
+ case MLX5HWS_RULE_RESIZE_STATE_DELETING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ priv->rule->matcher = priv->rule->matcher->resize_dst;
+ }
+ priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE;
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hws_send_engine_dump_error_cqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5_cqe64 *cqe)
+{
+ u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0;
+ struct mlx5hws_context *ctx = priv->rule->matcher->tbl->ctx;
+ u32 opcode = cqe ? get_cqe_opcode(cqe) : 0;
+ struct mlx5hws_rule *rule = priv->rule;
+
+ /* If something bad happens and lots of rules are failing, we don't
+ * want to pollute dmesg. Print only the first bad cqe per engine,
+ * the one that started the avalanche.
+ */
+ if (queue->error_cqe_printed)
+ return;
+
+ queue->error_cqe_printed = true;
+
+ if (mlx5hws_rule_move_in_progress(rule))
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion moving rule: phase %s, wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_WRITING ? "WRITING" :
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_DELETING ? "DELETING" :
+ "UNKNOWN",
+ rule->pending_wqes);
+ else
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion %s (%d), wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->status ==
+ MLX5HWS_RULE_STATUS_CREATING ? "CREATING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_DELETING ? "DELETING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_FAILING ? "FAILING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_UPDATING ? "UPDATING" : "NA",
+ rule->status,
+ rule->pending_wqes);
+
+ mlx5hws_err(ctx, " rule 0x%08llx: matcher 0x%llx %s\n",
+ HWS_PTR_TO_ID(rule),
+ HWS_PTR_TO_ID(rule->matcher),
+ (rule->matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED) ?
+ "(isolated)" : "");
+
+ if (!cqe) {
+ mlx5hws_err(ctx, " rule 0x%08llx: no CQE\n",
+ HWS_PTR_TO_ID(rule));
+ return;
+ }
+
+ mlx5hws_err(ctx, " rule 0x%08llx: cqe->opcode = %d %s\n",
+ HWS_PTR_TO_ID(rule), opcode,
+ opcode == MLX5_CQE_REQ ? "(MLX5_CQE_REQ)" :
+ opcode == MLX5_CQE_REQ_ERR ? "(MLX5_CQE_REQ_ERR)" : " ");
+
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_error_syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[16]);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_syndrome_type = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[17] >> 4);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- vendor_err_synd = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->vendor_err_synd);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->syndrome);
+ }
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->byte_cnt = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->byte_cnt));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-- UPDATE STATUS = %s\n",
+ HWS_PTR_TO_ID(rule),
+ (be32_to_cpu(cqe->byte_cnt) & 0x80000000) ?
+ "FAILURE" : "SUCCESS");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------- SYNDROME = %s\n",
+ HWS_PTR_TO_ID(rule),
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 1) ?
+ "SET_FLOW_FAIL" :
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 2) ?
+ "DISABLE_FLOW_FAIL" : "UNKNOWN");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->sop_drop_qpn = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->sop_drop_qpn));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-send wqe opcode = 0x%02x %s\n",
+ HWS_PTR_TO_ID(rule), wqe_opcode,
+ wqe_opcode == MLX5HWS_WQE_OPCODE_TBL_ACCESS ?
+ "(MLX5HWS_WQE_OPCODE_TBL_ACCESS)" : "(UNKNOWN)");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------------ qpn = 0x%06x\n",
+ HWS_PTR_TO_ID(rule),
+ be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff);
+}
+
+static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt,
+ enum mlx5hws_flow_op_status *status,
+ struct mlx5_cqe64 *cqe)
+{
+ priv->rule->pending_wqes--;
+
+ if (unlikely(*status == MLX5HWS_FLOW_OP_ERROR)) {
+ if (priv->retry_id) {
+ /* If there is a retry_id, then it's not an error yet,
+ * retry to insert this rule in the collision RTC.
+ */
+ hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
+ return;
+ }
+ hws_send_engine_dump_error_cqe(queue, priv, cqe);
+ /* Some part of the rule failed */
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
+ *priv->used_id = 0;
+ } else {
+ *priv->used_id = priv->id;
+ }
+
+ /* Update rule status for the last completion */
+ if (!priv->rule->pending_wqes) {
+ if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) {
+ hws_send_engine_update_rule_resize(queue, priv, status);
+ return;
+ }
+
+ if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) {
+ /* Rule completely failed and doesn't require cleanup */
+ if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILED;
+
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ /* Increase the status, this only works on good flow as
+ * the enum is arranged this way:
+ * - creating -> created
+ * - updating -> updated
+ * - deleting -> deleted
+ */
+ priv->rule->status++;
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
+ /* Rule was deleted, now we can safely release
+ * action STEs and clear resize info
+ */
+ mlx5hws_rule_free_action_ste(&priv->rule->action_ste);
+ mlx5hws_rule_clear_resize_info(priv->rule);
+ } else if (priv->rule->status == MLX5HWS_RULE_STATUS_UPDATED) {
+ /* Rule was updated, free the old action STEs */
+ mlx5hws_rule_free_action_ste(&priv->rule->old_action_ste);
+ /* Update completed - move the rule back to "created" */
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
+ }
+ }
+ }
+}
+
+static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5hws_flow_op_result res[],
+ s64 *i,
+ u32 res_nb,
+ u16 wqe_cnt)
+{
+ enum mlx5hws_flow_op_status status;
+
+ if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) &&
+ likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
+ status = MLX5HWS_FLOW_OP_SUCCESS;
+ } else {
+ status = MLX5HWS_FLOW_OP_ERROR;
+ }
+
+ if (priv->user_data) {
+ if (priv->rule) {
+ hws_send_engine_update_rule(queue, priv, wqe_cnt,
+ &status, cqe);
+ /* Completion is provided on the last rule WQE */
+ if (priv->rule->pending_wqes)
+ return;
+ }
+
+ if (*i < res_nb) {
+ res[*i].user_data = priv->user_data;
+ res[*i].status = status;
+ (*i)++;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ mlx5hws_send_engine_gen_comp(queue, priv->user_data, status);
+ }
+ }
+}
+
+static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
+ struct mlx5_cqe64 *cqe64)
+{
+ if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64;
+
+ mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64));
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ return CQ_POLL_ERR;
+ }
+
+ return CQ_OK;
+}
+
+static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_cqe64 *cqe64;
+ int err;
+
+ cqe64 = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe64) {
+ if (unlikely(cq->mdev->state ==
+ MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+ mlx5_core_dbg_once(cq->mdev,
+ "Polling CQ while device is shutting down\n");
+ return CQ_POLL_ERR;
+ }
+ return CQ_EMPTY;
+ }
+
+ mlx5_cqwq_pop(&cq->wq);
+ err = mlx5hws_parse_cqe(cq, cqe64);
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ return err;
+}
+
+static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_send_ring *send_ring = &queue->send_ring;
+ struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq;
+ struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq;
+ struct mlx5hws_send_ring_priv *priv;
+ struct mlx5_cqe64 *cqe;
+ u8 cqe_opcode;
+ u16 wqe_cnt;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return;
+
+ cqe_opcode = get_cqe_opcode(cqe);
+ if (cqe_opcode == MLX5_CQE_INVALID)
+ return;
+
+ if (unlikely(cqe_opcode != MLX5_CQE_REQ))
+ queue->err = true;
+
+ wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask;
+
+ while (cq->poll_wqe != wqe_cnt) {
+ priv = &sq->wr_priv[cq->poll_wqe];
+ hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0);
+ cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
+ }
+
+ priv = &sq->wr_priv[wqe_cnt];
+ cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
+ hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt);
+ mlx5hws_cq_poll_one(cq);
+}
+
+static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ while (comp->ci != comp->pi) {
+ if (*polled < res_nb) {
+ res[*polled].status =
+ comp->entries[comp->ci].status;
+ res[*polled].user_data =
+ comp->entries[comp->ci].user_data;
+ (*polled)++;
+ comp->ci = (comp->ci + 1) & comp->mask;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ return;
+ }
+ }
+}
+
+static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ s64 polled = 0;
+
+ hws_send_engine_poll_list(queue, res, &polled, res_nb);
+
+ if (polled >= res_nb)
+ return polled;
+
+ hws_send_engine_poll_cq(queue, res, &polled, res_nb);
+
+ return polled;
+}
+
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb);
+}
+
+static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ void *sqc_data)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ size_t buf_sz;
+ int err;
+
+ sq->uar_map = mdev->priv.bfreg.map;
+ sq->mdev = mdev;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL);
+ if (!sq->dep_wqe) {
+ err = -ENOMEM;
+ goto destroy_wq_cyc;
+ }
+
+ sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL);
+ if (!sq->wr_priv) {
+ err = -ENOMEM;
+ goto free_dep_wqe;
+ }
+
+ sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1;
+
+ return 0;
+
+free_dep_wqe:
+ kfree(sq->dep_wqe);
+destroy_wq_cyc:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ return err;
+}
+
+static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ if (!sq)
+ return;
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ MLX5_SET(sqc, sqc, non_wire, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void hws_send_ring_destroy_sq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_ring_sq *sq)
+{
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+}
+
+static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ int err;
+
+ err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ hws_send_ring_destroy_sq(mdev, sq);
+
+ return err;
+}
+
+static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ size_t buf_sz, sq_log_buf_sz;
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz));
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, ctx->pd_num);
+ MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz);
+
+ err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data);
+ if (err)
+ goto err_free_sqc;
+
+ err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data,
+ queue, sq, cq);
+ if (err)
+ goto err_free_sq;
+
+ kvfree(sqc_data);
+
+ return 0;
+err_free_sq:
+ hws_send_ring_free_sq(sq);
+err_free_sqc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ struct mlx5_cqe64 *cqe;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ int numa_node,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.bfreg.up->index);
+ MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
+
+ err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
+ if (err)
+ goto err_out;
+
+ err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq);
+ if (err)
+ goto err_free_cq;
+
+ kvfree(cqc_data);
+
+ return 0;
+
+err_free_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close_sq(&queue->send_ring.send_sq);
+ hws_send_ring_close_cq(&queue->send_ring.send_cq);
+}
+
+static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev));
+ struct mlx5hws_send_ring *ring = &queue->send_ring;
+ int err;
+
+ err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq,
+ &ring->send_cq);
+ if (err)
+ goto close_cq;
+
+ return err;
+
+close_cq:
+ hws_send_ring_close_cq(&ring->send_cq);
+ return err;
+}
+
+static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+{
+ if (!queue->num_entries)
+ return; /* this queue wasn't initialized */
+
+ hws_send_ring_close(queue);
+ kfree(queue->completed.entries);
+}
+
+static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
+{
+ int err;
+
+ mutex_init(&queue->lock);
+
+ queue->num_entries = roundup_pow_of_two(queue_size);
+ queue->used_entries = 0;
+
+ queue->completed.entries = kcalloc(queue->num_entries,
+ sizeof(queue->completed.entries[0]),
+ GFP_KERNEL);
+ if (!queue->completed.entries)
+ return -ENOMEM;
+
+ queue->completed.pi = 0;
+ queue->completed.ci = 0;
+ queue->completed.mask = queue->num_entries - 1;
+ err = mlx5hws_send_ring_open(ctx, queue);
+ if (err)
+ goto free_completed_entries;
+
+ return 0;
+
+free_completed_entries:
+ kfree(queue->completed.entries);
+ return err;
+}
+
+static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
+{
+ while (queues--)
+ mlx5hws_send_queue_close(&ctx->send_queue[queues]);
+}
+
+static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
+{
+ int bwc_queues = mlx5hws_bwc_queues(ctx);
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return;
+
+ for (i = 0; i < bwc_queues; i++) {
+ mutex_destroy(&ctx->bwc_send_queue_locks[i]);
+ lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
+ }
+
+ kfree(ctx->bwc_lock_class_keys);
+ kfree(ctx->bwc_send_queue_locks);
+}
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
+{
+ hws_send_queues_bwc_locks_destroy(ctx);
+ __hws_send_queues_close(ctx, ctx->queues);
+ kfree(ctx->send_queue);
+}
+
+static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
+{
+ /* Number of BWC queues is equal to number of the usual HWS queues */
+ int bwc_queues = ctx->queues - 1;
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return 0;
+
+ ctx->queues += bwc_queues;
+
+ ctx->bwc_send_queue_locks = kcalloc(bwc_queues,
+ sizeof(*ctx->bwc_send_queue_locks),
+ GFP_KERNEL);
+
+ if (!ctx->bwc_send_queue_locks)
+ return -ENOMEM;
+
+ ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
+ sizeof(*ctx->bwc_lock_class_keys),
+ GFP_KERNEL);
+ if (!ctx->bwc_lock_class_keys)
+ goto err_lock_class_keys;
+
+ for (i = 0; i < bwc_queues; i++) {
+ mutex_init(&ctx->bwc_send_queue_locks[i]);
+ lockdep_register_key(ctx->bwc_lock_class_keys + i);
+ lockdep_set_class(ctx->bwc_send_queue_locks + i, ctx->bwc_lock_class_keys + i);
+ }
+
+ return 0;
+
+err_lock_class_keys:
+ kfree(ctx->bwc_send_queue_locks);
+ return -ENOMEM;
+}
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size)
+{
+ int err = 0;
+ int i = 0;
+
+ /* Open one extra queue for control path */
+ ctx->queues = queues + 1;
+
+ /* open a separate set of queues and locks for bwc API */
+ err = hws_bwc_send_queues_init(ctx);
+ if (err)
+ return err;
+
+ ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL);
+ if (!ctx->send_queue) {
+ err = -ENOMEM;
+ goto free_bwc_locks;
+ }
+
+ /* If native API isn't supported, skip the unused native queues:
+ * initialize BWC queues and control queue only.
+ */
+ if (!mlx5hws_context_native_supported(ctx))
+ i = mlx5hws_bwc_get_queue_id(ctx, 0);
+
+ for (; i < ctx->queues; i++) {
+ err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
+ if (err)
+ goto close_send_queues;
+ }
+
+ return 0;
+
+close_send_queues:
+ __hws_send_queues_close(ctx, i);
+
+ kfree(ctx->send_queue);
+
+free_bwc_locks:
+ hws_send_queues_bwc_locks_destroy(ctx);
+
+ return err;
+}
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions)
+{
+ struct mlx5hws_send_ring_sq *send_sq;
+ struct mlx5hws_send_engine *queue;
+ bool wait_comp = false;
+ s64 polled = 0;
+
+ queue = &ctx->send_queue[queue_id];
+ send_sq = &queue->send_ring.send_sq;
+
+ switch (actions) {
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC:
+ wait_comp = true;
+ fallthrough;
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC:
+ if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
+ /* Send dependent WQEs to drain the queue */
+ mlx5hws_send_all_dep_wqe(queue);
+ else
+ /* Signal on the last posted WQE */
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll queue until empty */
+ while (wait_comp && !mlx5hws_send_engine_empty(queue))
+ hws_send_engine_poll_cq(queue, NULL, &polled, 0);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hws_send_wqe_fw(struct mlx5_core_dev *mdev,
+ u32 pd_num,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_match_data,
+ void *send_wqe_match_tag,
+ void *send_wqe_range_data,
+ void *send_wqe_range_tag,
+ bool is_jumbo,
+ u8 gta_opcode)
+{
+ bool has_range = send_wqe_range_data || send_wqe_range_tag;
+ bool has_match = send_wqe_match_data || send_wqe_match_tag;
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
+ struct mlx5hws_cmd_generate_wqe_attr attr = {0};
+ struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0};
+ struct mlx5_cqe64 cqe;
+ u32 flags = 0;
+ int ret;
+
+ /* Set WQE control */
+ wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode);
+ wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16);
+ flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ wqe_ctrl.flags = cpu_to_be32(flags);
+ wqe_ctrl.imm = cpu_to_be32(send_attr->id);
+
+ /* Set GTA WQE CTRL */
+ memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
+ gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28);
+
+ /* Set GTA match WQE DATA */
+ if (has_match) {
+ if (send_wqe_match_data)
+ memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
+
+ gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8);
+ attr.gta_data_0 = (u8 *)&gta_wqe_data0;
+ }
+
+ /* Set GTA range WQE DATA */
+ if (has_range) {
+ if (send_wqe_range_data)
+ memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
+
+ gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8);
+ attr.gta_data_1 = (u8 *)&gta_wqe_data1;
+ }
+
+ attr.pdn = pd_num;
+ attr.wqe_ctrl = (u8 *)&wqe_ctrl;
+ attr.gta_ctrl = (u8 *)&gta_wqe_ctrl;
+
+send_wqe:
+ ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write WQE using command");
+ return ret;
+ }
+
+ if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
+ (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) {
+ *send_attr->used_id = send_attr->id;
+ return 0;
+ }
+
+ /* Retry if rule failed */
+ if (send_attr->retry_id) {
+ wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id);
+ send_attr->id = send_attr->retry_id;
+ send_attr->retry_id = 0;
+ goto send_wqe;
+ }
+
+ return -1;
+}
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ struct mlx5hws_rule *rule = send_attr->rule;
+ struct mlx5_core_dev *mdev;
+ u16 queue_id;
+ u32 pdn;
+ int ret;
+
+ queue_id = queue - ctx->send_queue;
+ mdev = ctx->mdev;
+ pdn = ctx->pd_num;
+
+ /* Writing through FW can't HW fence, therefore we drain the queue */
+ if (send_attr->fence)
+ mlx5hws_send_queue_action(ctx,
+ queue_id,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ if (likely(rule))
+ rule->status++;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS);
+
+ return;
+
+fail_rule:
+ if (likely(rule))
+ rule->status = !rule->rtc_0 && !rule->rtc_1 ?
+ MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
new file mode 100644
index 000000000000..3fb8e99309b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_SEND_H_
+#define MLX5HWS_SEND_H_
+
+/* As a single operation requires at least two WQEBBS.
+ * This means a maximum of 16 such operations per rule.
+ */
+#define MAX_WQES_PER_RULE 32
+
+enum mlx5hws_wqe_opcode {
+ MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
+};
+
+enum mlx5hws_wqe_opmod {
+ MLX5HWS_WQE_OPMOD_GTA_STE = 0,
+ MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_opcode {
+ MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
+ MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
+};
+
+enum mlx5hws_wqe_gta_opmod {
+ MLX5HWS_WQE_GTA_OPMOD_STE = 0,
+ MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_sz {
+ MLX5HWS_WQE_SZ_GTA_CTRL = 48,
+ MLX5HWS_WQE_SZ_GTA_DATA = 64,
+};
+
+/* WQE Control segment. */
+struct mlx5hws_wqe_ctrl_seg {
+ __be32 opmod_idx_opcode;
+ __be32 qpn_ds;
+ __be32 flags;
+ __be32 imm;
+};
+
+struct mlx5hws_wqe_gta_ctrl_seg {
+ __be32 op_dirix;
+ __be32 stc_ix[5];
+ __be32 rsvd0[6];
+};
+
+struct mlx5hws_wqe_gta_data_seg_ste {
+ __be32 rsvd0_ctr_id;
+ __be32 rsvd1_definer;
+ __be32 rsvd2[3];
+ union {
+ struct {
+ __be32 action[3];
+ __be32 tag[8];
+ };
+ __be32 jumbo[11];
+ };
+};
+
+struct mlx5hws_wqe_gta_data_seg_arg {
+ __be32 action_args[8];
+};
+
+struct mlx5hws_wqe_gta {
+ struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
+ union {
+ struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
+ struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
+ };
+};
+
+struct mlx5hws_send_ring_cq {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_cqwq wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ u16 poll_wqe;
+};
+
+struct mlx5hws_send_ring_priv {
+ struct mlx5hws_rule *rule;
+ void *user_data;
+ u32 num_wqebbs;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+};
+
+struct mlx5hws_send_ring_dep_wqe {
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
+ struct mlx5hws_rule *rule;
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 direct_index;
+ void *user_data;
+};
+
+struct mlx5hws_send_ring_sq {
+ struct mlx5_core_dev *mdev;
+ u16 cur_post;
+ u16 buf_mask;
+ struct mlx5hws_send_ring_priv *wr_priv;
+ unsigned int last_idx;
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ unsigned int head_dep_idx;
+ unsigned int tail_dep_idx;
+ u32 sqn;
+ struct mlx5_wq_cyc wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ void __iomem *uar_map;
+};
+
+struct mlx5hws_send_ring {
+ struct mlx5hws_send_ring_cq send_cq;
+ struct mlx5hws_send_ring_sq send_sq;
+};
+
+struct mlx5hws_completed_poll_entry {
+ void *user_data;
+ enum mlx5hws_flow_op_status status;
+};
+
+struct mlx5hws_completed_poll {
+ struct mlx5hws_completed_poll_entry *entries;
+ u16 ci;
+ u16 pi;
+ u16 mask;
+};
+
+struct mlx5hws_send_engine {
+ struct mlx5hws_send_ring send_ring;
+ struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
+ struct mlx5hws_completed_poll completed;
+ u16 used_entries;
+ u16 num_entries;
+ bool err;
+ bool error_cqe_printed;
+ struct mutex lock; /* Protects the send engine */
+};
+
+struct mlx5hws_send_engine_post_ctrl {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_send_ring *send_ring;
+ size_t num_wqebbs;
+};
+
+struct mlx5hws_send_engine_post_attr {
+ u8 opcode;
+ u8 opmod;
+ u8 notify_hw;
+ u8 fence;
+ u8 match_definer_id;
+ u8 range_definer_id;
+ size_t len;
+ struct mlx5hws_rule *rule;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+ void *user_data;
+};
+
+struct mlx5hws_send_ste_attr {
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 *used_id_rtc_0;
+ u32 *used_id_rtc_1;
+ bool wqe_tag_is_jumbo;
+ u8 gta_opcode;
+ u32 direct_index;
+ struct mlx5hws_send_engine_post_attr send_attr;
+ struct mlx5hws_rule_match_tag *wqe_tag;
+ struct mlx5hws_rule_match_tag *range_wqe_tag;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
+};
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+int mlx5hws_send_test(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len);
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr);
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
+
+static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
+
+ return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
+}
+
+static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
+{
+ return queue->used_entries >= queue->num_entries;
+}
+
+static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries++;
+}
+
+static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries--;
+}
+
+static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
+ void *user_data,
+ int comp_status)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ comp->entries[comp->pi].status = comp_status;
+ comp->entries[comp->pi].user_data = user_data;
+
+ comp->pi = (comp->pi + 1) & comp->mask;
+}
+
+static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
+{
+ return queue->err;
+}
+
+#endif /* MLX5HWS_SEND_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
new file mode 100644
index 000000000000..6113383ae47b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
+{
+ return tbl->ft_id;
+}
+
+static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ u16 uid,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ ft_attr->type = tbl->fw_ft_type;
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
+ else
+ ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+
+ ft_attr->rtc_valid = true;
+ ft_attr->uid = uid;
+}
+
+static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ /* Enabling reformat_en or decap_en for the first flow table
+ * must be done when all VFs are down.
+ * However, HWS doesn't know when it is required to create the first FT.
+ * On the other hand, HWS doesn't use all these FT capabilities at all
+ * (the API doesn't even provide a way to specify these flags), so we'll
+ * just set these caps on all the flow tables.
+ * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A.
+ */
+ if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) {
+ ft_attr->reformat_en = true;
+ ft_attr->decap_en = true;
+ }
+}
+
+static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_cmd_set_fte_dest dest = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return 0;
+
+ if (ctx->common_res.default_miss) {
+ ctx->common_res.default_miss->refcount++;
+ return 0;
+ }
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
+ ft_attr.rtc_valid = false;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
+
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+
+ default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!default_miss) {
+ mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type);
+ return -EINVAL;
+ }
+
+ ctx->common_res.default_miss = default_miss;
+ ctx->common_res.default_miss->refcount++;
+
+ return 0;
+}
+
+/* Called under ctx->ctrl_lock */
+static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_context *ctx = tbl->ctx;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ default_miss = ctx->common_res.default_miss;
+ if (--default_miss->refcount)
+ return;
+
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
+ ctx->common_res.default_miss = NULL;
+}
+
+static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB))
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+
+ mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx,
+ tbl->fw_ft_type,
+ tbl->type,
+ &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u16 uid, u32 *ft_id)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ int ret;
+
+ hws_table_init_next_ft_attr(tbl, uid, &ft_attr);
+ hws_table_set_cap_attr(tbl, &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed creating default ft\n");
+ return ret;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ /* Take/create ref over the default miss */
+ ret = hws_table_up_default_fdb_miss_tbl(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n");
+ goto free_ft_obj;
+ }
+ ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n");
+ goto down_miss_tbl;
+ }
+ }
+
+ return 0;
+
+down_miss_tbl:
+ hws_table_down_default_fdb_miss_tbl(tbl);
+free_ft_obj:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id);
+ return ret;
+}
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id)
+{
+ mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id);
+ hws_table_down_default_fdb_miss_tbl(tbl);
+}
+
+static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int hws_table_init(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ ret = hws_table_init_check_hws_support(ctx, tbl);
+ if (ret)
+ return ret;
+
+ if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) {
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ tbl->uid,
+ &tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+ }
+
+ ret = mlx5hws_action_get_default_stc(ctx, tbl->type);
+ if (ret)
+ goto tbl_destroy;
+
+ INIT_LIST_HEAD(&tbl->matchers_list);
+ INIT_LIST_HEAD(&tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+tbl_destroy:
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void hws_table_uninit(struct mlx5hws_table *tbl)
+{
+ mutex_lock(&tbl->ctx->ctrl_lock);
+ mlx5hws_action_put_default_stc(tbl->ctx, tbl->type);
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&tbl->ctx->ctrl_lock);
+}
+
+struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ if (attr->type > MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_err(ctx, "Invalid table type %d\n", attr->type);
+ return NULL;
+ }
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ tbl->ctx = ctx;
+ tbl->type = attr->type;
+ tbl->level = attr->level;
+ tbl->uid = attr->uid;
+
+ ret = hws_table_init(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise table\n");
+ goto free_tbl;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ list_add(&tbl->tbl_list_node, &ctx->tbl_list);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return tbl;
+
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (!list_empty(&tbl->matchers_list)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ if (!list_empty(&tbl->default_miss.head)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ list_del_init(&tbl->tbl_list_node);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ hws_table_uninit(tbl);
+ kfree(tbl);
+
+ return 0;
+
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_matcher *matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return tbl->ft_id;
+
+ matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node);
+ return matcher->end_ft_id;
+}
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ /* Due to FW limitation, resetting the flow table to default action will
+ * disconnect RTC when ignore_flow_level_rtc_valid is not supported.
+ */
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid)
+ return 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ return hws_table_connect_to_default_miss_tbl(tbl, ft_id);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT;
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID;
+ ft_attr.type = fw_ft_type;
+ ft_attr.rtc_id_0 = rtc_0_id;
+ ft_attr.rtc_id_1 = rtc_1_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+ ft_attr.type = fw_ft_type;
+ ft_attr.table_miss_id = next_ft_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_table *src_tbl;
+ int ret;
+
+ if (list_empty(&dst_tbl->default_miss.head))
+ return 0;
+
+ list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) {
+ ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl);
+ if (ret) {
+ mlx5hws_err(dst_tbl->ctx,
+ "Failed to update source miss table, unexpected behavior\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_matcher *matcher;
+ u32 last_ft_id;
+ int ret;
+
+ last_ft_id = hws_table_get_last_ft(src_tbl);
+
+ if (dst_tbl) {
+ if (list_empty(&dst_tbl->matchers_list)) {
+ /* Connect src_tbl last_ft to dst_tbl start anchor */
+ ret = mlx5hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ } else {
+ /* Connect src_tbl last_ft to first matcher RTC */
+ matcher = list_first_entry(&dst_tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret)
+ return ret;
+
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ src_tbl->default_miss.miss_tbl = dst_tbl;
+
+ return 0;
+}
+
+static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) {
+ mlx5hws_err(tbl->ctx, "Default miss table is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((miss_tbl && miss_tbl->type != tbl->type)) {
+ mlx5hws_err(tbl->ctx, "Invalid arguments\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_table *old_miss_tbl;
+ int ret;
+
+ ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl);
+ if (ret)
+ goto out;
+
+ if (old_miss_tbl)
+ list_del_init(&tbl->default_miss.next);
+
+ if (miss_tbl)
+ list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
+
+out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
new file mode 100644
index 000000000000..1246f9bd8422
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_TABLE_H_
+#define MLX5HWS_TABLE_H_
+
+struct mlx5hws_default_miss {
+ /* My miss table */
+ struct mlx5hws_table *miss_tbl;
+ struct list_head next;
+ /* Tables missing to my table */
+ struct list_head head;
+};
+
+struct mlx5hws_table {
+ struct mlx5hws_context *ctx;
+ u32 ft_id;
+ enum mlx5hws_table_type type;
+ u32 fw_ft_type;
+ u32 level;
+ u16 uid;
+ struct list_head matchers_list;
+ struct list_head tbl_list_node;
+ struct mlx5hws_default_miss default_miss;
+};
+
+static inline
+u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type,
+ u8 *ret_type)
+{
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return -EOPNOTSUPP;
+
+ *ret_type = FS_FT_FDB;
+
+ return 0;
+}
+
+static inline
+u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
+ bool is_mirror)
+{
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX;
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u16 uid, u32 *ft_id);
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id);
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id);
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id);
+
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id);
+
+#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c
new file mode 100644
index 000000000000..d8e382b9fa61
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return 0;
+
+ xa_init(&ctx->vports.vport_gvmi_xa);
+
+ /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports
+ * (vport 0 of other function, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi);
+ if (ret)
+ return ret;
+
+ ctx->vports.uplink_gvmi = 0;
+ return 0;
+}
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx)
+{
+ if (ctx->caps->eswitch_manager)
+ xa_destroy(&ctx->vports.vport_gvmi_xa);
+}
+
+static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport)
+{
+ u16 vport_gvmi;
+ int ret;
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi);
+ if (ret)
+ return -EINVAL;
+
+ ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport,
+ xa_mk_value(vport_gvmi), GFP_KERNEL);
+ if (ret)
+ mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret);
+
+ return ret;
+}
+
+static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport)
+{
+ return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF :
+ vport == MLX5_VPORT_PF;
+}
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi)
+{
+ void *entry;
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return -EINVAL;
+
+ if (hws_vport_is_esw_mgr_vport(ctx, vport)) {
+ *vport_gvmi = ctx->vports.esw_manager_gvmi;
+ return 0;
+ }
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ *vport_gvmi = ctx->vports.uplink_gvmi;
+ return 0;
+ }
+
+load_entry:
+ entry = xa_load(&ctx->vports.vport_gvmi_xa, vport);
+
+ if (!xa_is_value(entry)) {
+ ret = hws_vport_add_gvmi(ctx, vport);
+ if (ret && ret != -EBUSY)
+ return ret;
+ goto load_entry;
+ }
+
+ *vport_gvmi = (u16)xa_to_value(entry);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h
new file mode 100644
index 000000000000..0912fc166b3a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_VPORT_H_
+#define MLX5HWS_VPORT_H_
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx);
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx);
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi);
+
+#endif /* MLX5HWS_VPORT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
index 2ebb61ef3ea9..2ebb61ef3ea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
index 01ed6442095d..01ed6442095d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c
index fe228d948b47..fe228d948b47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
index 8c2a34a0d6be..1ebb2b15c080 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "dr_types.h"
+#include "eswitch.h"
int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
bool other_vport,
@@ -34,34 +35,21 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
u16 vport_number, u16 *gvmi)
{
- bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- int out_size;
- void *out;
int err;
- out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- out = kzalloc(out_size, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
- MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
- MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
- HCA_CAP_OPMOD_GET_CUR);
+ if (!other_vport) {
+ /* self vhca_id */
+ *gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ return 0;
+ }
- err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
if (err) {
- kfree(out);
+ mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
+ vport_number);
return err;
}
- *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
- kfree(out);
return 0;
}
@@ -251,9 +239,9 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_1);
+ flow_table_context.sws.sw_owner_icm_root_1);
output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_0);
+ flow_table_context.sws.sw_owner_icm_root_0);
return 0;
}
@@ -480,15 +468,15 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
*/
if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_tx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_1, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
index 030a5776c937..030a5776c937 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h
index 57c6b363b870..57c6b363b870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c
index d5ea97751945..d5ea97751945 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 3d74109f8230..e8c67ed9f748 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_8))
bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
{
@@ -297,7 +297,9 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
if (ret) {
mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
kvfree(vport_caps);
- return ERR_PTR(ret);
+ if (ret == -EBUSY)
+ return ERR_PTR(-EBUSY);
+ return NULL;
}
return vport_caps;
@@ -408,7 +410,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
@@ -417,7 +419,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
@@ -426,10 +428,10 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
break;
case MLX5DR_DOMAIN_TYPE_FDB:
if (!dmn->info.caps.eswitch_manager)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
@@ -514,30 +516,6 @@ def_xa_destroy:
return NULL;
}
-/* Assure synchronization of the device steering tables with updates made by SW
- * insertion.
- */
-int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
-{
- int ret = 0;
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
- mlx5dr_domain_lock(dmn);
- ret = mlx5dr_send_ring_force_drain(dmn);
- mlx5dr_domain_unlock(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
- flags, ret);
- return ret;
- }
- }
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
- ret = mlx5dr_cmd_sync_steering(dmn->mdev);
-
- return ret;
-}
-
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{
if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c
index f05ef0cd54ba..f05ef0cd54ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
index 0b5af9f3f605..0b5af9f3f605 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c
index 0726848eb3ff..0726848eb3ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c
index 8ca534ef5d03..8ca534ef5d03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c
index 042ca0349124..d1db04baa1fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c
@@ -7,7 +7,7 @@
/* don't try to optimize STE allocation if the stack is too constaraining */
#define DR_RULE_MAX_STES_OPTIMIZED 0
#else
-#define DR_RULE_MAX_STES_OPTIMIZED 5
+#define DR_RULE_MAX_STES_OPTIMIZED 2
#endif
#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 6fa06ba2d346..d034372fa047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1049,12 +1049,6 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
-static void dr_cq_complete(struct mlx5_core_cq *mcq,
- struct mlx5_eqe *eqe)
-{
- pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar,
size_t ncqe)
@@ -1067,7 +1061,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
int inlen, err, eqn;
void *cqc, *in;
__be64 *pas;
- int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -1090,14 +1083,20 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
}
+ cq->mcq.cqe_sz = 64;
+ cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
+ cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ cq->mcq.vector = 0;
+ cq->mdev = mdev;
+
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
goto err_cqwq;
- vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
- err = mlx5_comp_eqn_get(mdev, vector, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@@ -1114,28 +1113,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
- cq->mcq.comp = dr_cq_complete;
-
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
goto err_cqwq;
- cq->mcq.cqe_sz = 64;
- cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
- cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
- *cq->mcq.set_ci_db = 0;
-
- /* set no-zero value, in order to avoid the HW to run db-recovery on
- * CQ that used in polling mode.
- */
- *cq->mcq.arm_db = cpu_to_be32(2 << 28);
-
- cq->mcq.vector = 0;
- cq->mcq.uar = uar;
- cq->mdev = mdev;
-
return cq;
err_cqwq:
@@ -1333,36 +1316,3 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
kfree(send_ring->sync_buff);
kfree(send_ring);
}
-
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
-{
- struct mlx5dr_send_ring *send_ring = dmn->send_ring;
- struct postsend_info send_info = {};
- u8 data[DR_STE_SIZE];
- int num_of_sends_req;
- int ret;
- int i;
-
- /* Sending this amount of requests makes sure we will get drain */
- num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
-
- /* Send fake requests forcing the last to be signaled */
- send_info.write.addr = (uintptr_t)data;
- send_info.write.length = DR_STE_SIZE;
- send_info.write.lkey = 0;
- /* Using the sync_mr in order to write/read */
- send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
- send_info.rkey = send_ring->sync_mr->mkey;
-
- for (i = 0; i < num_of_sends_req; i++) {
- ret = dr_postsend_icm_data(dmn, &send_info);
- if (ret)
- return ret;
- }
-
- spin_lock(&send_ring->lock);
- ret = dr_handle_pending_wc(dmn, send_ring);
- spin_unlock(&send_ring->lock);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
index e94fbb015efa..c8b8ff80c7c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
@@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -1458,6 +1458,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
return mlx5dr_ste_get_ctx_v1();
else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
return mlx5dr_ste_get_ctx_v2();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_8)
+ return mlx5dr_ste_get_ctx_v3();
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 54a6619c3ecb..3d5afc832fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx {
/* Actions */
u32 actions_caps;
- void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
- void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
@@ -197,7 +199,21 @@ struct mlx5dr_ste_ctx {
u16 *used_hw_action_num);
int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-
+ /* Actions bit set */
+ void (*set_encap)(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size);
+ void (*set_push_vlan)(u8 *ste, u8 *d_action,
+ u32 vlan_hdr);
+ void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num);
+ void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action);
+ void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
+ u8 *scnd_d_action, u32 reformat_id,
+ int size);
+ void (*set_insert_hdr)(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+ void (*set_remove_hdr)(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
@@ -205,5 +221,6 @@ struct mlx5dr_ste_ctx {
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
index f708b029425a..42536bee55e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
@@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
}
static void
-dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
}
static void
-dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -1883,7 +1885,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index dd856cde188d..6447efbae00d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -5,136 +5,6 @@
#include "mlx5_ifc_dr_ste_v1.h"
#include "dr_ste_v1.h"
-#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
- ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
- DR_STE_V1_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_v1_entry_format {
- DR_STE_V1_TYPE_BWC_BYTE = 0x0,
- DR_STE_V1_TYPE_BWC_DW = 0x1,
- DR_STE_V1_TYPE_MATCH = 0x2,
- DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
-};
-
-/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
-enum {
- DR_STE_V1_LU_TYPE_NOP = 0x0000,
- DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
- DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
- DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
- DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
- DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
- DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
- DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
- DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
- DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
- DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
- DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
- DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
- DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
- DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
- DR_STE_V1_LU_TYPE_GRE = 0x010d,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
- DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
- DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
- DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum dr_ste_v1_header_anchors {
- DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
- DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
- DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
- DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
- DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
-};
-
-enum dr_ste_v1_action_size {
- DR_STE_ACTION_SINGLE_SZ = 4,
- DR_STE_ACTION_DOUBLE_SZ = 8,
- DR_STE_ACTION_TRIPLE_SZ = 12,
-};
-
-enum dr_ste_v1_action_insert_ptr_attr {
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
-};
-
-enum dr_ste_v1_action_id {
- DR_STE_V1_ACTION_ID_NOP = 0x00,
- DR_STE_V1_ACTION_ID_COPY = 0x05,
- DR_STE_V1_ACTION_ID_SET = 0x06,
- DR_STE_V1_ACTION_ID_ADD = 0x07,
- DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
- DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
- DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
- DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
- DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
- DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
- DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
- DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
- DR_STE_V1_ACTION_ID_ASO = 0x12,
- DR_STE_V1_ACTION_ID_TRAILER = 0x13,
- DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
- DR_STE_V1_ACTION_ID_MAX = 0x21,
- /* use for special cases */
- DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
-};
-
-enum {
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
-};
-
-enum dr_ste_v1_aso_ctx_type {
- DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
-};
-
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
}
-static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+void dr_ste_v1_set_reparse(u8 *hw_ste_p)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -397,10 +266,10 @@ static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -417,9 +286,9 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
- u32 vlan_hdr)
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
@@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
{
MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
@@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
}
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
@@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
}
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -718,11 +584,11 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
@@ -731,10 +597,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = false;
allow_ctr = false;
} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
- dr_ste_v1_set_rx_decap(last_ste, action);
+ ste_ctx->set_rx_decap(last_ste, action);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
@@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_ctr = false;
@@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -925,11 +792,11 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -941,10 +808,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = true;
allow_ctr = true;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action,
MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
}
-#define DR_STE_DECAP_L3_ACTION_NUM 8
-#define DR_STE_L2_HDR_MAX_SZ 20
-
int dr_ste_v1_set_action_decap_l3_list(void *data,
u32 data_sz,
u8 *hw_action,
@@ -1897,7 +1761,7 @@ void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
@@ -2129,7 +1993,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
struct mlx5dr_match_misc *misc = &value->misc;
@@ -2330,7 +2194,14 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
new file mode 100644
index 000000000000..591c20c95a6a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V1_
+#define _DR_STE_V1_
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
+bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
+void dr_ste_v1_set_reparse(u8 *hw_ste_p);
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size);
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr);
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size);
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
+ u8 dst_len, u8 src_hw_field, u8 src_shifter);
+int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
+ u32 hw_action_sz, u16 *used_hw_action_num);
+int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
+void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+
+#endif /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
new file mode 100644
index 000000000000..d0ebaf820d42
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static struct mlx5dr_ste_ctx ste_ctx_v2 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
+{
+ return &ste_ctx_v2;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
index 808b013cf48c..d853fde49cfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
@@ -1,7 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-#include "dr_ste_v1.h"
+#ifndef _DR_STE_V2_
+#define _DR_STE_V2_
enum {
DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
@@ -164,71 +165,4 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field
},
};
-static struct mlx5dr_ste_ctx ste_ctx_v2 = {
- /* Builders */
- .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
- .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
- .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
- .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
- .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
- .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
- .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
- .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
- .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
- .build_mpls_init = &dr_ste_v1_build_mpls_init,
- .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
- .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
- .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
- .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
- .build_icmp_init = &dr_ste_v1_build_icmp_init,
- .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
- .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
- .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
- .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
- .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
- .build_tnl_geneve_tlv_opt_exist_init =
- &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
- .build_register_0_init = &dr_ste_v1_build_register_0_init,
- .build_register_1_init = &dr_ste_v1_build_register_1_init,
- .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
- .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
- .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
- .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
- .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
- .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
- .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
-
- /* Getters and Setters */
- .ste_init = &dr_ste_v1_init,
- .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
- .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
- .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
- .set_miss_addr = &dr_ste_v1_set_miss_addr,
- .get_miss_addr = &dr_ste_v1_get_miss_addr,
- .set_hit_addr = &dr_ste_v1_set_hit_addr,
- .set_byte_mask = &dr_ste_v1_set_byte_mask,
- .get_byte_mask = &dr_ste_v1_get_byte_mask,
-
- /* Actions */
- .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
- DR_STE_CTX_ACTION_CAP_RX_PUSH |
- DR_STE_CTX_ACTION_CAP_RX_ENCAP,
- .set_actions_rx = &dr_ste_v1_set_actions_rx,
- .set_actions_tx = &dr_ste_v1_set_actions_tx,
- .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
- .modify_field_arr = dr_ste_v2_action_modify_field_arr,
- .set_action_set = &dr_ste_v1_set_action_set,
- .set_action_add = &dr_ste_v1_set_action_add,
- .set_action_copy = &dr_ste_v1_set_action_copy,
- .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
- .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
- .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
- /* Send */
- .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
-};
-
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
-{
- return &ste_ctx_v2;
-}
+#endif /* _DR_STE_V2_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
new file mode 100644
index 000000000000..e468a9ae44e8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static void dr_ste_v3_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_push_vlan(u8 *ste, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, start_offset,
+ HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, inline_data, vlan_hdr);
+ dr_ste_v1_set_reparse(ste);
+}
+
+static void dr_ste_v3_set_pop_vlan(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, u8 anchor,
+ u8 offset, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ attributes, DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset, int size)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, size / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_offset, offset / 2);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static int
+dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ uint16_t *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v3, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, start_offset, 0);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v3,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, start_offset, 0);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v3 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v3_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v3_set_encap,
+ .set_push_vlan = &dr_ste_v3_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v3_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v3_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v3_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v3_set_remove_hdr,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void)
+{
+ return &ste_ctx_v3;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c
index 69294a66fd7f..69294a66fd7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
index 81eff6c410ce..cc328292bf84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
@@ -1379,6 +1379,11 @@ int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
u32 obj_id);
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors,
+ u8 *match_mask, u32 *definer_id);
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
+
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type);
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
@@ -1468,7 +1473,6 @@ struct mlx5dr_send_ring {
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
struct mlx5dr_send_ring *send_ring);
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
struct mlx5dr_ste *ste,
u8 *data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 50c2554c9ccf..f367997ab61e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -9,14 +9,6 @@
#include "fs_dr.h"
#include "dr_types.h"
-static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
-{
- if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
- return true;
-
- return false;
-}
-
static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
@@ -70,7 +62,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 flags;
int err;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
ft_attr,
next_ft);
@@ -110,7 +102,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
@@ -135,7 +127,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
@@ -154,7 +146,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
@@ -179,7 +171,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
@@ -264,6 +256,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
{
struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
struct mlx5dr_action_dest *term_actions;
+ struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5dr_match_parameters params;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5dr_action **fs_dr_actions;
@@ -279,7 +272,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
int err = 0;
int i;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
@@ -306,12 +299,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
match_sz = sizeof(fte->val);
/* Drop reformat action bit if destination vport set with reformat */
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
if (!contain_vport_reformat_action(dst))
continue;
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
break;
}
}
@@ -321,7 +314,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
* TX: modify header -> push vlan -> encap
* RX: decap -> pop vlan -> modify header
*/
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
enum mlx5dr_action_reformat_type decap_type =
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
@@ -337,26 +330,27 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
bool is_decap;
- if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ pkt_reformat = fte->act_dests.action.pkt_reformat;
+ if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
err = -EINVAL;
mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
goto free_actions;
}
- is_decap = fte->action.pkt_reformat->reformat_type ==
+ is_decap = pkt_reformat->reformat_type ==
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
if (is_decap)
actions[num_actions++] =
- fte->action.pkt_reformat->action.dr_action;
+ pkt_reformat->fs_dr_action.dr_action;
else
delay_encap_set = true;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@@ -367,7 +361,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@@ -378,12 +372,14 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- actions[num_actions++] =
- fte->action.modify_hdr->action.dr_action;
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ struct mlx5_modify_hdr *modify_hdr = fte->act_dests.action.modify_hdr;
+
+ actions[num_actions++] = modify_hdr->fs_dr_action.dr_action;
+ }
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -392,8 +388,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -403,12 +399,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
if (delay_encap_set)
- actions[num_actions++] =
- fte->action.pkt_reformat->action.dr_action;
+ actions[num_actions++] = pkt_reformat->fs_dr_action.dr_action;
/* The order of the actions below is not important */
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
tmp_action = mlx5dr_action_create_drop();
if (!tmp_action) {
err = -ENOMEM;
@@ -418,9 +413,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
term_actions[num_term_actions++].dest = tmp_action;
}
- if (fte->flow_context.flow_tag) {
+ if (fte->act_dests.flow_context.flow_tag) {
tmp_action =
- mlx5dr_action_create_tag(fte->flow_context.flow_tag);
+ mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -429,7 +424,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
@@ -466,9 +461,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
term_actions[num_term_actions].dest = tmp_action;
if (dst->dest_attr.vport.flags &
- MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID) {
+ pkt_reformat = dst->dest_attr.vport.pkt_reformat;
term_actions[num_term_actions].reformat =
- dst->dest_attr.vport.pkt_reformat->action.dr_action;
+ pkt_reformat->fs_dr_action.dr_action;
+ }
num_term_actions++;
break;
@@ -510,7 +507,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
list_for_each_entry(dst, &fte->node.children, node.list) {
u32 id;
@@ -524,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
- id = dst->dest_attr.counter_id;
+ id = mlx5_fc_id(dst->dest_attr.counter);
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
@@ -537,19 +534,21 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ struct mlx5_flow_act *action = &fte->act_dests.action;
+
+ if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
err = -EOPNOTSUPP;
goto free_actions;
}
tmp_action =
mlx5dr_action_create_aso(domain,
- fte->action.exe_aso.object_id,
- fte->action.exe_aso.return_reg_id,
- fte->action.exe_aso.type,
- fte->action.exe_aso.flow_meter.init_color,
- fte->action.exe_aso.flow_meter.meter_idx);
+ action->exe_aso.object_id,
+ action->exe_aso.return_reg_id,
+ action->exe_aso.type,
+ action->exe_aso.flow_meter.init_color,
+ action->exe_aso.flow_meter.meter_idx);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -576,8 +575,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
- !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
- u32 flow_source = fte->flow_context.flow_source;
+ !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ u32 flow_source = fte->act_dests.flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -601,7 +600,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
&params,
num_actions,
actions,
- fte->flow_context.flow_source);
+ fte->act_dests.flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto free_actions;
@@ -677,7 +676,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
}
pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
- pkt_reformat->action.dr_action = action;
+ pkt_reformat->fs_dr_action.dr_action = action;
return 0;
}
@@ -685,7 +684,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat)
{
- mlx5dr_action_destroy(pkt_reformat->action.dr_action);
+ mlx5dr_action_destroy(pkt_reformat->fs_dr_action.dr_action);
}
static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
@@ -708,7 +707,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
}
modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
- modify_hdr->action.dr_action = action;
+ modify_hdr->fs_dr_action.dr_action = action;
return 0;
}
@@ -716,7 +715,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr)
{
- mlx5dr_action_destroy(modify_hdr->action.dr_action);
+ mlx5dr_action_destroy(modify_hdr->fs_dr_action.dr_action);
}
static int
@@ -740,7 +739,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
int err;
int i;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
@@ -765,7 +764,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte fte_tmp = {};
int ret;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
@@ -819,11 +818,11 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
- u32 steering_caps = 0;
+ u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
if (ft_type != FS_FT_FDB ||
MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
- return 0;
+ return steering_caps;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
@@ -834,15 +833,21 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
return steering_caps;
}
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
+ struct mlx5dr_action *dr_action;
+
switch (pkt_reformat->reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
case MLX5_REFORMAT_TYPE_INSERT_HDR:
- return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->action.dr_action);
+ dr_action = pkt_reformat->fs_dr_action.dr_action;
+ *reformat_id = mlx5dr_action_get_pkt_reformat_id(dr_action);
+ return 0;
}
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
index 99a3b2eff6b8..f869f2daefbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
@@ -38,7 +38,9 @@ struct mlx5_fs_dr_table {
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
@@ -49,9 +51,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
return NULL;
}
-static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+static inline int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
- return 0;
+ return -EOPNOTSUPP;
}
static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
index fb078fa0f0cc..898c3618ff26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
@@ -600,4 +600,44 @@ struct mlx5_ifc_ste_double_action_aso_v1_bits {
};
};
+struct mlx5_ifc_ste_single_action_remove_header_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 end_anchor[0x7];
+ u8 reserved_at_16[0x1];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h
index ca3b0f1453a7..ca3b0f1453a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 89fced86936f..fc8a2169d1a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -45,8 +45,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
-int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
-
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u16 peer_vhca_id);
@@ -153,11 +151,6 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action);
-int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
- u8 *dw_selectors, u8 *byte_selectors,
- u8 *match_mask, u32 *definer_id);
-void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
-
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
@@ -165,7 +158,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_7)));
+ MLX5_STEERING_FORMAT_CONNECTX_8)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 1005bb6935b6..306affbcfd3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
+#include "eswitch.h"
#include "sf/sf.h"
/* Mutex to hold while enabling or disabling RoCE */
@@ -77,15 +78,14 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
}
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
- u32 *out)
+ bool other_vport, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
}
@@ -96,7 +96,7 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
int err;
- err = mlx5_query_nic_vport_context(mdev, vport, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
if (!err)
*min_inline = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.min_wqe_inline_mode);
@@ -218,7 +218,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (!err)
*mtu = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.mtu);
@@ -428,7 +428,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -450,7 +450,7 @@ int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -461,23 +461,27 @@ out:
return err;
}
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u16 vport, bool other_vport, u64 *node_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, other_vport, out);
+ if (err)
+ goto out;
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
@@ -519,19 +523,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
+ if (err)
+ goto out;
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
@@ -737,6 +744,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
system_image_guid);
+ rep->num_plane = MLX5_GET_PR(hca_vport_context, ctx, num_port_plane);
ex:
kvfree(out);
@@ -796,7 +804,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, vport, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
if (err)
goto out;
@@ -900,7 +908,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -1182,27 +1190,101 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
+void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf,
+ u8 *len)
+{
+ u64 fw_system_image_guid;
+
+ *len = 0;
+
+ fw_system_image_guid = mlx5_query_nic_system_image_guid(mdev);
+ if (!fw_system_image_guid)
+ return;
+
+ memcpy(buf, &fw_system_image_guid, sizeof(fw_system_image_guid));
+ *len += sizeof(fw_system_image_guid);
+
+ if (MLX5_CAP_GEN_2(mdev, load_balance_id) &&
+ MLX5_CAP_GEN_2(mdev, lag_per_mp_group))
+ buf[(*len)++] = MLX5_CAP_GEN_2(mdev, load_balance_id);
+}
+
+static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev,
+ u16 vport_num, u16 *vhca_id)
+{
+ if (!MLX5_CAP_GEN_2(dev, function_id_type_vhca_id))
+ return false;
+
+ return mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport_num, vhca_id);
+}
+
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod)
{
- bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+ u16 vhca_id = 0, function_id = 0;
+ bool ec_vf_func = false;
+
+ /* if this vport is referring to a vport on the ec PF (embedded cpu )
+ * let the FW know which domain we are querying since vport numbers or
+ * function_ids are not unique across the different PF domains,
+ * unless we use vhca_id as the function_id below.
+ */
+ ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
+ function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
+
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
+ MLX5_SET(query_hca_cap_in, in, function_id_type, 1);
+ function_id = vhca_id;
+ ec_vf_func = false;
+ mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
+ __func__, vport, vhca_id);
+ }
opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
- MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(query_hca_cap_in, in, other_function, true);
MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+ MLX5_SET(query_hca_cap_in, in, function_id, function_id);
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ /* try get vhca_id via eswitch */
+ if (mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport, vhca_id))
+ return 0;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_vport_get_vhca_id);
+
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
u16 vport, u16 opmod)
{
- bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ u16 vhca_id = 0, function_id = 0;
+ bool ec_vf_func = false;
void *set_hca_cap;
void *set_ctx;
int ret;
@@ -1211,14 +1293,29 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
if (!set_ctx)
return -ENOMEM;
+ /* if this vport is referring to a vport on the ec PF (embedded cpu )
+ * let the FW know which domain we are querying since vport numbers or
+ * function_ids are not unique across the different PF domains,
+ * unless we use vhca_id as the function_id below.
+ */
+ ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
+ function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
+
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id_type, 1);
+ function_id = vhca_id;
+ ec_vf_func = false;
+ mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
+ __func__, vport, vhca_id);
+ }
+
MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
- MLX5_SET(set_hca_cap_in, set_ctx, function_id,
- mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func);
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
new file mode 100644
index 000000000000..815a7c97d6b0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/io.h>
+#include <linux/mlx5/transobj.h>
+#include "lib/clock.h"
+#include "mlx5_core.h"
+#include "wq.h"
+
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
+#include <asm/neon.h>
+#include <asm/simd.h>
+#endif
+
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
+#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+
+struct mlx5_wc_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+};
+
+struct mlx5_wc_sq {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+ struct mlx5_wc_cq cq;
+ struct mlx5_sq_bfreg bfreg;
+};
+
+static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc,
+ struct mlx5_wc_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param = {};
+ int err;
+ u32 i;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int err, inlen, eqn;
+ void *in, *cqc;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
+{
+ void *cqc;
+ int err;
+
+ cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_wc_create_cqwq(mdev, cqc, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err);
+ goto err_create_cqwq;
+ }
+
+ err = create_wc_cq(cq, cqc);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err);
+ goto err_create_cq;
+ }
+
+ kvfree(cqc);
+ return 0;
+
+err_create_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_create_cqwq:
+ kvfree(cqc);
+ return err;
+}
+
+static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data,
+ struct mlx5_wc_sq *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ?
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err);
+ goto err_create_sq;
+ }
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(modify_sq_in));
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n",
+ sq->sqn, err);
+ goto err_modify_sq;
+ }
+
+ kvfree(in);
+ return 0;
+
+err_modify_sq:
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+err_create_sq:
+ kvfree(in);
+ return err;
+}
+
+static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq)
+{
+ struct mlx5_wq_param param = {};
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ);
+
+ err = mlx5_wq_cyc_create(mdev, &param, wq, &sq->wq, &sq->wq_ctrl);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err);
+ goto err_create_wq_cyc;
+ }
+
+ err = create_wc_sq(mdev, sqc_data, sq);
+ if (err)
+ goto err_create_sq;
+
+ mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_create_sq:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+err_create_wq_cyc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16],
+ size_t mmio_wqe_size, unsigned int offset)
+{
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
+ if (cpu_has_neon()) {
+ scoped_ksimd() {
+ asm volatile(
+ ".arch_extension simd\n\t"
+ "ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t"
+ "st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]"
+ :
+ : "r"(mmio_wqe), "r"(sq->bfreg.map + offset)
+ : "memory", "v0", "v1", "v2", "v3");
+ }
+ return;
+ }
+#endif
+ __iowrite64_copy(sq->bfreg.map + offset, mmio_wqe,
+ mmio_wqe_size / 8);
+}
+
+static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, unsigned int *offset,
+ bool signaled)
+{
+ int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ __be32 mmio_wqe[16] = {};
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+ memset(ctrl, 0, sizeof(*ctrl));
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds =
+ cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS));
+ if (signaled)
+ ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ sq->pc++;
+ sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_iowrite64_copy(sq, mmio_wqe, sizeof(mmio_wqe), *offset);
+
+ *offset ^= buf_size;
+}
+
+static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
+{
+ struct mlx5_wc_cq *cq = &sq->cq;
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) {
+ int wqe_counter = be16_to_cpu(cqe->wqe_counter);
+ struct mlx5_core_dev *mdev = cq->mdev;
+
+ if (wqe_counter == TEST_WC_NUM_WQES - 1)
+ mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED;
+ else
+ mdev->wc_state = MLX5_WC_STATE_SUPPORTED;
+
+ mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc++;
+
+ return 0;
+}
+
+static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
+{
+ unsigned int offset = 0;
+ unsigned long expires;
+ struct mlx5_wc_sq *sq;
+ int i, err;
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ return;
+
+ sq = kzalloc(sizeof(*sq), GFP_KERNEL);
+ if (!sq)
+ return;
+
+ err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err);
+ goto err_alloc_bfreg;
+ }
+
+ err = mlx5_wc_create_cq(mdev, &sq->cq);
+ if (err)
+ goto err_create_cq;
+
+ err = mlx5_wc_create_sq(mdev, sq);
+ if (err)
+ goto err_create_sq;
+
+ for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
+ mlx5_wc_post_nop(sq, &offset, false);
+
+ mlx5_wc_post_nop(sq, &offset, true);
+
+ expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+ do {
+ err = mlx5_wc_poll_cq(sq);
+ if (err)
+ usleep_range(2, 10);
+ } while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED &&
+ time_is_after_jiffies(expires));
+
+ mlx5_wc_destroy_sq(sq);
+
+err_create_sq:
+ mlx5_wc_destroy_cq(&sq->cq);
+err_create_cq:
+ mlx5_free_bfreg(mdev, &sq->bfreg);
+err_alloc_bfreg:
+ kfree(sq);
+
+ if (mdev->wc_state == MLX5_WC_STATE_UNSUPPORTED)
+ mlx5_core_warn(mdev, "Write combining is not supported\n");
+}
+
+bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
+{
+ struct mutex *wc_state_lock = &mdev->wc_state_lock;
+ struct mlx5_core_dev *parent = NULL;
+
+ if (!MLX5_CAP_GEN(mdev, bf)) {
+ mlx5_core_dbg(mdev, "BlueFlame not supported\n");
+ goto out;
+ }
+
+ if (!MLX5_CAP_GEN(mdev, log_max_sq)) {
+ mlx5_core_dbg(mdev, "SQ not supported\n");
+ goto out;
+ }
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ /* No need to lock anything as we perform WC test only
+ * once for whole device and was already done.
+ */
+ goto out;
+
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(mdev)) {
+ parent = mdev->priv.parent_mdev;
+ wc_state_lock = &parent->wc_state_lock;
+ }
+#endif
+
+ mutex_lock(wc_state_lock);
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+ if (parent) {
+ mlx5_core_test_wc(parent);
+
+ mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
+ parent->wc_state);
+ mdev->wc_state = parent->wc_state;
+
+ } else {
+ mlx5_core_test_wc(mdev);
+ }
+
+unlock:
+ mutex_unlock(wc_state_lock);
+out:
+ mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
+
+ return mdev->wc_state == MLX5_WC_STATE_SUPPORTED;
+}
+EXPORT_SYMBOL(mlx5_wc_support_get);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e4ef1d24a3ad..6debb8fd33ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -244,7 +244,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
}
static inline
-struct mlx5_cqe64 *mlx5_cqwq_get_cqe_enahnced_comp(struct mlx5_cqwq *wq)
+struct mlx5_cqe64 *mlx5_cqwq_get_cqe_enhanced_comp(struct mlx5_cqwq *wq)
{
u8 sw_validity_iteration_count = mlx5_cqwq_get_wrap_cnt(wq) & 0xff;
u32 ci = mlx5_cqwq_get_ci(wq);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index bc94e75a7aeb..e7777700ee18 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -40,6 +40,7 @@
*/
#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
+#define MLXBF_GIGE_MAX_FILTER_IDX 3
/* Define for broadcast MAC literal */
#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
@@ -175,6 +176,13 @@ enum mlxbf_gige_res {
int mlxbf_gige_mdio_probe(struct platform_device *pdev,
struct mlxbf_gige *priv);
void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
+
+void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv);
+void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv);
+void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index);
+void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index);
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac);
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index ba303868686a..d1f8a72cae53 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -142,8 +142,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
mlxbf_gige_cache_stats(priv);
err = mlxbf_gige_clean_port(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: clean_port failed: %pe\n", ERR_PTR(err));
return err;
+ }
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
@@ -154,19 +156,29 @@ static int mlxbf_gige_open(struct net_device *netdev)
phy_start(phydev);
err = mlxbf_gige_tx_init(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: tx_init failed: %pe\n", ERR_PTR(err));
goto phy_deinit;
+ }
err = mlxbf_gige_rx_init(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: rx_init failed: %pe\n", ERR_PTR(err));
goto tx_deinit;
+ }
netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
netif_start_queue(netdev);
err = mlxbf_gige_request_irqs(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: request_irqs failed: %pe\n", ERR_PTR(err));
goto napi_deinit;
+ }
+
+ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
+ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
+ mlxbf_gige_enable_multicast_rx(priv);
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
@@ -379,6 +391,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
void __iomem *plu_base;
void __iomem *base;
int addr, phy_irq;
+ unsigned int i;
int err;
base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
@@ -413,8 +426,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
/* Attach MDIO device */
err = mlxbf_gige_mdio_probe(pdev, priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "probe: mdio_probe failed: %pe\n", ERR_PTR(err));
return err;
+ }
priv->base = base;
priv->llu_base = llu_base;
@@ -423,12 +438,17 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+ for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++)
+ mlxbf_gige_disable_mac_rx_filter(priv, i);
+ mlxbf_gige_disable_multicast_rx(priv);
+ mlxbf_gige_disable_promisc(priv);
+
/* Write initial MAC address to hardware */
mlxbf_gige_initial_mac(priv);
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ dev_err(&pdev->dev, "DMA configuration failed: %pe\n", ERR_PTR(err));
goto out;
}
@@ -436,9 +456,11 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
- phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy-gpios", 0);
- if (phy_irq < 0) {
- dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
+ phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
+ if (phy_irq == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out;
+ } else if (phy_irq < 0) {
phy_irq = PHY_POLL;
}
@@ -456,7 +478,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
if (err) {
- dev_err(&pdev->dev, "Could not attach to PHY\n");
+ dev_err(&pdev->dev, "Could not attach to PHY: %pe\n", ERR_PTR(err));
goto out;
}
@@ -467,7 +489,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
err = register_netdev(netdev);
if (err) {
- dev_err(&pdev->dev, "Failed to register netdev\n");
+ dev_err(&pdev->dev, "Failed to register netdev: %pe\n", ERR_PTR(err));
phy_disconnect(phydev);
goto out;
}
@@ -510,7 +532,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
static struct platform_driver mlxbf_gige_driver = {
.probe = mlxbf_gige_probe,
- .remove_new = mlxbf_gige_remove,
+ .remove = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 98a8681c21b9..4d14cb13fd64 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -62,6 +62,8 @@
#define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528
+#define MLXBF_GIGE_RX_MAC_FILTER_GENERAL 0x0530
+#define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 699984358493..eb62620b63c7 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -11,15 +11,31 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
-void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
- unsigned int index, u64 dmac)
+void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv)
{
void __iomem *base = priv->base;
- u64 control;
+ u64 data;
- /* Write destination MAC to specified MAC RX filter */
- writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
- (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+ data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
+ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+}
+
+void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv)
+{
+ void __iomem *base = priv->base;
+ u64 data;
+
+ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+ data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
+ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+}
+
+void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index)
+{
+ void __iomem *base = priv->base;
+ u64 control;
/* Enable MAC receive filter mask for specified index */
control = readq(base + MLXBF_GIGE_CONTROL);
@@ -27,6 +43,28 @@ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
writeq(control, base + MLXBF_GIGE_CONTROL);
}
+void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index)
+{
+ void __iomem *base = priv->base;
+ u64 control;
+
+ /* Disable MAC receive filter mask for specified index */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+ control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+}
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 dmac)
+{
+ void __iomem *base = priv->base;
+
+ /* Write destination MAC to specified MAC RX filter */
+ writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+ (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+}
+
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 *dmac)
{
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 46245e0b2462..43c84900369a 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -14,7 +14,6 @@
#define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
#define MLXFW_FSM_STATE_WAIT_ROUNDS \
(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
-#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
static const int mlxfw_fsm_state_errno[] = {
[MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
@@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
return err;
}
- comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
if (comp->data_size > comp_max_size) {
MLXFW_ERR_MSG(mlxfw_dev, extack,
"Component size is bigger than limit", -EINVAL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index a510bf2cff2f..74f7e27b490f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -33,6 +33,7 @@ config MLXSW_CORE_THERMAL
config MLXSW_PCI
tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
depends on PCI && HAS_IOMEM && MLXSW_CORE
+ select PAGE_POOL
default m
help
This is PCI bus implementation for Mellanox Technologies Switch ASICs.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e4d7739bd7c8..83c7cf3bbea3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -35,6 +35,7 @@
#include "reg.h"
#include "resources.h"
#include "../mlxfw/mlxfw.h"
+#include "txheader.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -677,7 +678,7 @@ struct mlxsw_reg_trans {
struct list_head bulk_list;
struct mlxsw_core *core;
struct sk_buff *tx_skb;
- struct mlxsw_tx_info tx_info;
+ struct mlxsw_txhdr_info txhdr_info;
struct delayed_work timeout_dw;
unsigned int retries;
u64 tid;
@@ -737,12 +738,11 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
if (!skb)
return -ENOMEM;
- trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
- skb->data + mlxsw_core->driver->txhdr_len,
- skb->len - mlxsw_core->driver->txhdr_len);
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, skb->data,
+ skb->len);
atomic_set(&trans->active, 1);
- err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->txhdr_info);
if (err) {
dev_kfree_skb(skb);
return err;
@@ -849,7 +849,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
- EMAD, DISCARD);
+ EMAD, FORWARD);
static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
{
@@ -886,7 +886,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
+ emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_PERCPU, 0);
if (!emad_wq)
return -ENOMEM;
mlxsw_core->emad_wq = emad_wq;
@@ -944,7 +944,7 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
- sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ sizeof(u32) + MLXSW_TXHDR_LEN);
if (mlxsw_core->emad.enable_string_tlv)
emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
if (mlxsw_core->emad.enable_latency_tlv)
@@ -984,8 +984,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
list_add_tail(&trans->bulk_list, bulk_list);
trans->core = mlxsw_core;
trans->tx_skb = skb;
- trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
- trans->tx_info.is_emad = true;
+ trans->txhdr_info.tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->txhdr_info.tx_info.is_emad = true;
INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
trans->tid = tid;
init_completion(&trans->completion);
@@ -995,7 +995,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->type = type;
mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid);
- mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
@@ -2044,7 +2043,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
return 0;
fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
- 0, mlxsw_core);
+ mlxsw_core);
if (IS_ERR(fw_fatal)) {
dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
return PTR_ERR(fw_fatal);
@@ -2330,10 +2329,10 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
- tx_info);
+ txhdr_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
@@ -3382,7 +3381,7 @@ static int __init mlxsw_core_module_init(void)
if (err)
return err;
- mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_PERCPU, 0);
if (!mlxsw_wq) {
err = -ENOMEM;
goto err_alloc_workqueue;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6d11225594dd..1a871397a6df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -72,7 +72,14 @@ struct mlxsw_tx_info {
bool is_emad;
};
+struct mlxsw_txhdr_info {
+ struct mlxsw_tx_info tx_info;
+ bool data;
+ u16 max_fid; /* Used for PTP packets which are sent as data. */
+};
+
struct mlxsw_rx_md_info {
+ struct napi_struct *napi;
u32 cookie_index;
u32 latency;
u32 tx_congestion;
@@ -94,7 +101,7 @@ struct mlxsw_rx_md_info {
bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
@@ -425,8 +432,6 @@ struct mlxsw_driver {
int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer,
u64 *p_drops);
- void (*txhdr_construct)(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
@@ -439,7 +444,6 @@ struct mlxsw_driver {
void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
- u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool sdq_supports_cqe_v2;
};
@@ -486,7 +490,7 @@ struct mlxsw_bus {
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 947500f8ed71..7aa1a462a103 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -67,7 +67,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
for (j = 0; j < block->instances_count; j++) {
const struct mlxsw_afk_element_info *elinfo;
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
elinfo = &mlxsw_afk_element_infos[elinst->element];
@@ -154,7 +154,7 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
for (j = 0; j < block->instances_count; j++) {
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
if (elinst->element == element) {
@@ -386,7 +386,7 @@ mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
int i;
for (i = 0; i < block->instances_count; i++) {
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[i];
if (elinst->element == element)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 98a05598178b..5aa1afb3f2ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -117,7 +117,7 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */
struct mlxsw_afk_block {
u16 encoding; /* block ID */
- struct mlxsw_afk_element_inst *instances;
+ const struct mlxsw_afk_element_inst *instances;
unsigned int instances_count;
bool high_entropy;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 53b150b7ae4e..294e758f1067 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -513,6 +513,63 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+int
+mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u32 bytes_written = 0;
+ u16 device_addr;
+ int err;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot write to EEPROM of a module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
+ return err;
+ }
+
+ device_addr = page->offset;
+
+ while (bytes_written < page->length) {
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char eeprom_tmp[128] = {};
+ u8 size;
+
+ size = min_t(u8, page->length - bytes_written,
+ mlxsw_env->max_eeprom_len);
+
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page,
+ device_addr + bytes_written, size,
+ page->i2c_address);
+ mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
+ memcpy(eeprom_tmp, page->data + bytes_written, size);
+ mlxsw_reg_mcia_eeprom_memcpy_to(mcia_pl, eeprom_tmp);
+
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM");
+ return err;
+ }
+
+ err = mlxsw_env_mcia_status_process(mcia_pl, extack);
+ if (err)
+ return err;
+
+ bytes_written += size;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_eeprom_by_page);
+
static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
u8 module)
{
@@ -1357,24 +1414,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
-static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
{
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool mcia_128b_supported;
+ bool mcia_128b_supported = false;
int err;
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
- &mcia_128b_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+ &mcia_128b_supported);
mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
-
- return 0;
}
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
@@ -1445,15 +1498,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
- err = mlxsw_env_max_module_eeprom_len_query(env);
- if (err)
- goto err_eeprom_len_query;
-
+ mlxsw_env_max_module_eeprom_len_query(env);
env->line_cards[0]->active = true;
return 0;
-err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index a197e3ae069c..e4ff17869400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -28,6 +28,12 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+int
+mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+
int mlxsw_env_reset_module(struct net_device *netdev,
struct mlxsw_core *mlxsw_core, u8 slot_index,
u8 module, u32 *flags);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index 025e0db983fe..10f5bc4892fc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -601,6 +601,8 @@ int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
info->psid);
+ if (err)
+ goto unlock;
sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
info->fw_sub_minor);
@@ -1484,6 +1486,7 @@ err_type_file_file_validate:
vfree(types_info->data);
err_data_alloc:
kfree(types_info);
+ linecards->types_info = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 5c511e1a8efa..eac9a14a6058 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -100,6 +100,12 @@ static const struct mlxsw_cooling_states default_cooling_states[] = {
struct mlxsw_thermal;
+struct mlxsw_thermal_cooling_device {
+ struct mlxsw_thermal *thermal;
+ struct thermal_cooling_device *cdev;
+ unsigned int idx;
+};
+
struct mlxsw_thermal_module {
struct mlxsw_thermal *parent;
struct thermal_zone_device *tzdev;
@@ -123,7 +129,7 @@ struct mlxsw_thermal {
const struct mlxsw_bus_info *bus_info;
struct thermal_zone_device *tzdev;
int polling_delay;
- struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ struct mlxsw_thermal_cooling_device cdevs[MLXSW_MFCR_PWMS_MAX];
struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_thermal_area line_cards[];
@@ -147,7 +153,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
int i;
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
- if (thermal->cdevs[i] == cdev)
+ if (thermal->cdevs[i].cdev == cdev)
return i;
/* Allow mlxsw thermal zone binding to an external cooling device */
@@ -159,52 +165,22 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
return -ENODEV;
}
-static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
+static bool mlxsw_thermal_should_bind(struct thermal_zone_device *tzdev,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct device *dev = thermal->bus_info->dev;
- int i, err;
+ const struct mlxsw_cooling_states *state = trip->priv;
/* If the cooling device is one of ours bind it */
if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
-
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_cooling_states *state = &thermal->cooling_states[i];
-
- err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- state->max_state,
- state->min_state,
- THERMAL_WEIGHT_DEFAULT);
- if (err < 0) {
- dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
- return err;
- }
- }
- return 0;
-}
-
-static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
-{
- struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct device *dev = thermal->bus_info->dev;
- int i;
- int err;
+ return false;
- /* If the cooling device is our one unbind it */
- if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
+ c->upper = state->max_state;
+ c->lower = state->min_state;
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
- if (err < 0) {
- dev_err(dev, "Failed to unbind cooling device\n");
- return err;
- }
- }
- return 0;
+ return true;
}
static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
@@ -229,62 +205,32 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_params mlxsw_thermal_params = {
+static const struct thermal_zone_params mlxsw_thermal_params = {
.no_hwmon = true,
};
-static struct thermal_zone_device_ops mlxsw_thermal_ops = {
- .bind = mlxsw_thermal_bind,
- .unbind = mlxsw_thermal_unbind,
+static const struct thermal_zone_device_ops mlxsw_thermal_ops = {
+ .should_bind = mlxsw_thermal_should_bind,
.get_temp = mlxsw_thermal_get_temp,
};
-static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
+static bool mlxsw_thermal_module_should_bind(struct thermal_zone_device *tzdev,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev);
+ const struct mlxsw_cooling_states *state = trip->priv;
struct mlxsw_thermal *thermal = tz->parent;
- int i, j, err;
/* If the cooling device is one of ours bind it */
if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
-
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_cooling_states *state = &tz->cooling_states[i];
-
- err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- state->max_state,
- state->min_state,
- THERMAL_WEIGHT_DEFAULT);
- if (err < 0)
- goto err_thermal_zone_bind_cooling_device;
- }
- return 0;
-
-err_thermal_zone_bind_cooling_device:
- for (j = i - 1; j >= 0; j--)
- thermal_zone_unbind_cooling_device(tzdev, j, cdev);
- return err;
-}
-
-static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
-{
- struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev);
- struct mlxsw_thermal *thermal = tz->parent;
- int i;
- int err;
+ return false;
- /* If the cooling device is one of ours unbind it */
- if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
+ c->upper = state->max_state;
+ c->lower = state->min_state;
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
- WARN_ON(err);
- }
- return err;
+ return true;
}
static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
@@ -306,9 +252,8 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
- .bind = mlxsw_thermal_module_bind,
- .unbind = mlxsw_thermal_module_unbind,
+static const struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+ .should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_module_temp_get,
};
@@ -335,9 +280,8 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
- .bind = mlxsw_thermal_module_bind,
- .unbind = mlxsw_thermal_module_unbind,
+static const struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+ .should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
};
@@ -352,17 +296,14 @@ static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *p_state)
{
- struct mlxsw_thermal *thermal = cdev->devdata;
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata;
+ struct mlxsw_thermal *thermal = mlxsw_cdev->thermal;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int err, idx;
u8 duty;
+ int err;
- idx = mlxsw_get_cooling_device_idx(thermal, cdev);
- if (idx < 0)
- return idx;
-
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx, 0);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(dev, "Failed to query PWM duty\n");
@@ -378,22 +319,19 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
- struct mlxsw_thermal *thermal = cdev->devdata;
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata;
+ struct mlxsw_thermal *thermal = mlxsw_cdev->thermal;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int idx;
int err;
if (state > MLXSW_THERMAL_MAX_STATE)
return -EINVAL;
- idx = mlxsw_get_cooling_device_idx(thermal, cdev);
- if (idx < 0)
- return idx;
-
/* Normalize the state to the valid speed range. */
state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx,
+ mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(dev, "Failed to write PWM duty\n");
@@ -411,7 +349,7 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
static int
mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
{
- char tz_name[THERMAL_NAME_LENGTH];
+ char tz_name[40];
int err;
if (module_tz->slot_index)
@@ -445,17 +383,14 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
thermal_zone_device_unregister(tzdev);
}
-static void
-mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal,
+static int
+mlxsw_thermal_module_init(struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
+ int i;
module_tz = &area->tz_module_arr[module];
- /* Skip if parent is already set (case of port split). */
- if (module_tz->parent)
- return;
module_tz->module = module;
module_tz->slot_index = area->slot_index;
module_tz->parent = thermal;
@@ -465,15 +400,15 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
memcpy(module_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++)
+ module_tz->trips[i].priv = &module_tz->cooling_states[i];
+
+ return mlxsw_thermal_module_tz_init(module_tz);
}
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
{
- if (module_tz && module_tz->tzdev) {
- mlxsw_thermal_module_tz_fini(module_tz->tzdev);
- module_tz->tzdev = NULL;
- module_tz->parent = NULL;
- }
+ mlxsw_thermal_module_tz_fini(module_tz->tzdev);
}
static int
@@ -481,7 +416,6 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area)
{
- struct mlxsw_thermal_module *module_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
@@ -503,22 +437,16 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
if (!area->tz_module_arr)
return -ENOMEM;
- for (i = 0; i < area->tz_module_num; i++)
- mlxsw_thermal_module_init(dev, core, thermal, area, i);
-
for (i = 0; i < area->tz_module_num; i++) {
- module_tz = &area->tz_module_arr[i];
- if (!module_tz->parent)
- continue;
- err = mlxsw_thermal_module_tz_init(module_tz);
+ err = mlxsw_thermal_module_init(thermal, area, i);
if (err)
- goto err_thermal_module_tz_init;
+ goto err_thermal_module_init;
}
return 0;
-err_thermal_module_tz_init:
- for (i = area->tz_module_num - 1; i >= 0; i--)
+err_thermal_module_init:
+ for (i--; i >= 0; i--)
mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
kfree(area->tz_module_arr);
return err;
@@ -579,7 +507,7 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal_module *gearbox_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
u8 gbox_num;
- int i;
+ int i, j;
int err;
mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
@@ -606,6 +534,9 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
memcpy(gearbox_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+ for (j = 0; j < MLXSW_THERMAL_NUM_TRIPS; j++)
+ gearbox_tz->trips[j].priv = &gearbox_tz->cooling_states[j];
+
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
gearbox_tz->slot_index = area->slot_index;
@@ -722,6 +653,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states));
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++)
+ thermal->trips[i].priv = &thermal->cooling_states[i];
+
thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
@@ -753,17 +687,21 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
if (pwm_active & BIT(i)) {
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev;
struct thermal_cooling_device *cdev;
+ mlxsw_cdev = &thermal->cdevs[i];
+ mlxsw_cdev->thermal = thermal;
+ mlxsw_cdev->idx = i;
cdev = thermal_cooling_device_register("mlxsw_fan",
- thermal,
+ mlxsw_cdev,
&mlxsw_cooling_ops);
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev, "Failed to register cooling device\n");
goto err_thermal_cooling_device_register;
}
- thermal->cdevs[i] = cdev;
+ mlxsw_cdev->cdev = cdev;
}
}
@@ -817,15 +755,11 @@ err_linecards_event_ops_register:
err_thermal_gearboxes_init:
mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
err_thermal_modules_init:
- if (thermal->tzdev) {
- thermal_zone_device_unregister(thermal->tzdev);
- thermal->tzdev = NULL;
- }
+ thermal_zone_device_unregister(thermal->tzdev);
err_thermal_zone_device_register:
err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
- if (thermal->cdevs[i])
- thermal_cooling_device_unregister(thermal->cdevs[i]);
+ thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
err_reg_write:
err_reg_query:
kfree(thermal);
@@ -842,17 +776,10 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
thermal);
mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
- if (thermal->tzdev) {
- thermal_zone_device_unregister(thermal->tzdev);
- thermal->tzdev = NULL;
- }
+ thermal_zone_device_unregister(thermal->tzdev);
- for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
- if (thermal->cdevs[i]) {
- thermal_cooling_device_unregister(thermal->cdevs[i]);
- thermal->cdevs[i] = NULL;
- }
- }
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
+ thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
kfree(thermal);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 1e150ce1c73a..f9f565c1036d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -516,7 +516,7 @@ static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index cfafbeb42586..a619a0736bd1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -218,6 +218,10 @@ __mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
}
max_index = (item->size.bytes << 3) / item->element_size - 1;
+ if (WARN_ONCE(index > max_index,
+ "name=%s,index=%u,max_index=%u\n", item->name, index,
+ max_index))
+ index = 0;
be_index = max_index - index;
offset = be_index * item->element_size >> 3;
in_byte_index = index % (BITS_PER_BYTE / item->element_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index f0ceb196a6ce..828c65036a4c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -140,6 +140,20 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
page, extack);
}
+static int
+mlxsw_m_set_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_eeprom_by_page(core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
+ page, extack);
+}
+
static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
{
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
@@ -181,6 +195,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = mlxsw_m_set_module_eeprom_by_page,
.reset = mlxsw_m_reset,
.get_module_power_mode = mlxsw_m_get_module_power_mode,
.set_module_power_mode = mlxsw_m_set_module_power_mode,
@@ -702,8 +717,8 @@ static struct mlxsw_driver mlxsw_m_driver = {
};
static const struct i2c_device_id mlxsw_m_i2c_id[] = {
- { "mlxsw_minimal", 0},
- { },
+ { "mlxsw_minimal" },
+ { }
};
static struct i2c_driver mlxsw_m_i2c_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index af99bf17eb36..8769cba2c746 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -8,12 +8,12 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/wait.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/log2.h>
#include <linux/string.h>
+#include <net/page_pool/helpers.h>
#include "pci_hw.h"
#include "pci.h"
@@ -21,6 +21,7 @@
#include "cmd.h"
#include "port.h"
#include "resources.h"
+#include "txheader.h"
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
@@ -36,6 +37,11 @@ enum mlxsw_pci_queue_type {
#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+enum mlxsw_pci_cq_type {
+ MLXSW_PCI_CQ_SDQ,
+ MLXSW_PCI_CQ_RDQ,
+};
+
static const u16 mlxsw_pci_doorbell_type_offset[] = {
MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
@@ -57,15 +63,11 @@ struct mlxsw_pci_mem_item {
};
struct mlxsw_pci_queue_elem_info {
+ struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
char *elem; /* pointer to actual dma mapped element mem chunk */
- union {
- struct {
- struct sk_buff *skb;
- } sdq;
- struct {
- struct sk_buff *skb;
- } rdq;
- } u;
+ struct {
+ struct sk_buff *skb;
+ } sdq;
};
struct mlxsw_pci_queue {
@@ -78,19 +80,20 @@ struct mlxsw_pci_queue {
u8 num; /* queue number */
u8 elem_size; /* size of one element */
enum mlxsw_pci_queue_type type;
- struct tasklet_struct tasklet; /* queue processing tasklet */
struct mlxsw_pci *pci;
union {
struct {
- u32 comp_sdq_count;
- u32 comp_rdq_count;
enum mlxsw_pci_cqe_v v;
+ struct mlxsw_pci_queue *dq;
+ struct napi_struct napi;
+ struct page_pool *page_pool;
} cq;
struct {
- u32 ev_cmd_count;
- u32 ev_comp_count;
- u32 ev_other_count;
+ struct tasklet_struct tasklet;
} eq;
+ struct {
+ struct mlxsw_pci_queue *cq;
+ } rdq;
} u;
};
@@ -109,6 +112,7 @@ struct mlxsw_pci {
bool cff_support;
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
+ u8 num_sg_entries; /* Number of scatter/gather entries for packets. */
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -120,9 +124,6 @@ struct mlxsw_pci {
struct mlxsw_pci_mem_item out_mbox;
struct mlxsw_pci_mem_item in_mbox;
struct mutex lock; /* Lock access to command registers */
- bool nopoll;
- wait_queue_head_t wait;
- bool wait_done;
struct {
u8 status;
u64 out_param;
@@ -131,13 +132,43 @@ struct mlxsw_pci {
struct mlxsw_bus_info bus_info;
const struct pci_device_id *id;
enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
- u8 num_sdq_cqs; /* Number of CQs used for SDQs */
+ u8 num_cqs; /* Number of CQs */
+ u8 num_sdqs; /* Number of SDQs */
bool skip_reset;
+ struct net_device *napi_dev_tx;
+ struct net_device *napi_dev_rx;
};
-static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci)
{
- tasklet_schedule(&q->tasklet);
+ int err;
+
+ mlxsw_pci->napi_dev_tx = alloc_netdev_dummy(0);
+ if (!mlxsw_pci->napi_dev_tx)
+ return -ENOMEM;
+ strscpy(mlxsw_pci->napi_dev_tx->name, "mlxsw_tx",
+ sizeof(mlxsw_pci->napi_dev_tx->name));
+
+ mlxsw_pci->napi_dev_rx = alloc_netdev_dummy(0);
+ if (!mlxsw_pci->napi_dev_rx) {
+ err = -ENOMEM;
+ goto err_alloc_rx;
+ }
+ strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
+ sizeof(mlxsw_pci->napi_dev_rx->name));
+ netif_threaded_enable(mlxsw_pci->napi_dev_rx);
+
+ return 0;
+
+err_alloc_rx:
+ free_netdev(mlxsw_pci->napi_dev_tx);
+ return err;
+}
+
+static void mlxsw_pci_napi_devs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ free_netdev(mlxsw_pci->napi_dev_rx);
+ free_netdev(mlxsw_pci->napi_dev_tx);
}
static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
@@ -187,25 +218,6 @@ mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
return &mlxsw_pci->queues[q_type];
}
-static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
- enum mlxsw_pci_queue_type q_type)
-{
- struct mlxsw_pci_queue_type_group *queue_group;
-
- queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
- return queue_group->count;
-}
-
-static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
-}
-
-static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
-}
-
static struct mlxsw_pci_queue *
__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_queue_type q_type, u8 q_num)
@@ -220,23 +232,16 @@ static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
}
-static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
- u8 q_num)
-{
- return __mlxsw_pci_queue_get(mlxsw_pci,
- MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
-}
-
static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
u8 q_num)
{
return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
}
-static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
- u8 q_num)
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci)
{
- return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+ /* There is only one EQ at index 0. */
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, 0);
}
static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
@@ -291,7 +296,9 @@ static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ struct mlxsw_pci_queue *cq;
int tclass;
+ u8 cq_num;
int lp;
int i;
int err;
@@ -304,7 +311,8 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
/* Set CQ of same number of this SDQ. */
- mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ cq_num = q->num;
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
@@ -317,6 +325,9 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
+
+ cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
+ cq->u.cq.dq = q;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
return 0;
}
@@ -327,6 +338,29 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
}
+#define MLXSW_PCI_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+#define MLXSW_PCI_RX_BUF_SW_OVERHEAD \
+ (MLXSW_PCI_SKB_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+static void
+mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci *mlxsw_pci, struct page *page,
+ char *wqe, int index, size_t frag_len)
+{
+ dma_addr_t mapaddr;
+
+ mapaddr = page_pool_get_dma_addr(page);
+
+ if (index == 0) {
+ mapaddr += MLXSW_PCI_SKB_HEADROOM;
+ frag_len = frag_len - MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+ }
+
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+}
+
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
int index, char *frag_data, size_t frag_len,
int direction)
@@ -356,51 +390,158 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
-static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info,
- gfp_t gfp)
+static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
+ struct page *pages[],
+ u16 byte_count)
{
- size_t buf_len = MLXSW_PORT_MAX_MTU;
- char *wqe = elem_info->elem;
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ unsigned int linear_data_size;
+ struct page_pool *page_pool;
struct sk_buff *skb;
- int err;
+ int page_index = 0;
+ bool linear_only;
+ void *data;
+
+ linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
+ linear_data_size = linear_only ? byte_count :
+ PAGE_SIZE -
+ MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+
+ page_pool = cq->u.cq.page_pool;
+ page_pool_dma_sync_for_cpu(page_pool, pages[page_index],
+ MLXSW_PCI_SKB_HEADROOM, linear_data_size);
+
+ data = page_address(pages[page_index]);
+ net_prefetch(data);
+
+ skb = napi_build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb))
+ return ERR_PTR(-ENOMEM);
+
+ skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
+ skb_put(skb, linear_data_size);
+
+ if (linear_only)
+ return skb;
- skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp);
- if (!skb)
+ byte_count -= linear_data_size;
+ page_index++;
+
+ while (byte_count > 0) {
+ unsigned int frag_size;
+ struct page *page;
+
+ page = pages[page_index];
+ frag_size = min(byte_count, PAGE_SIZE);
+ page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, 0, frag_size, PAGE_SIZE);
+ byte_count -= frag_size;
+ page_index++;
+ }
+
+ return skb;
+}
+
+static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ int index)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ char *wqe = elem_info->elem;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(cq->u.cq.page_pool);
+ if (unlikely(!page))
return -ENOMEM;
- err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
- buf_len, DMA_FROM_DEVICE);
- if (err)
- goto err_frag_map;
+ mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE);
+ elem_info->pages[index] = page;
+ return 0;
+}
+
+static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ int index)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+
+ page_pool_put_page(cq->u.cq.page_pool, elem_info->pages[index], -1,
+ false);
+}
+
+static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count)
+{
+ return DIV_ROUND_UP(byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD,
+ PAGE_SIZE);
+}
+
+static int
+mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
+ const struct mlxsw_pci_queue_elem_info *el,
+ u16 byte_count, struct page *pages[],
+ u8 *p_num_sg_entries)
+{
+ u8 num_sg_entries;
+ int i;
+
+ num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count);
+ if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries))
+ return -EINVAL;
+
+ for (i = 0; i < num_sg_entries; i++)
+ pages[i] = el->pages[i];
+
+ *p_num_sg_entries = num_sg_entries;
+ return 0;
+}
+
+static int
+mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ u8 num_sg_entries)
+{
+ struct page *old_pages[MLXSW_PCI_WQE_SG_ENTRIES];
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ int i, err;
+
+ for (i = 0; i < num_sg_entries; i++) {
+ old_pages[i] = elem_info->pages[i];
+ err = mlxsw_pci_rdq_page_alloc(q, elem_info, i);
+ if (err) {
+ dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n");
+ goto err_page_alloc;
+ }
+ }
- elem_info->u.rdq.skb = skb;
return 0;
-err_frag_map:
- dev_kfree_skb_any(skb);
+err_page_alloc:
+ for (i--; i >= 0; i--)
+ page_pool_recycle_direct(cq->u.cq.page_pool, old_pages[i]);
+
return err;
}
-static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info)
+static void
+mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[],
+ u8 num_sg_entries)
{
- struct sk_buff *skb;
- char *wqe;
-
- skb = elem_info->u.rdq.skb;
- wqe = elem_info->elem;
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ int i;
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ for (i = 0; i < num_sg_entries; i++)
+ page_pool_recycle_direct(cq->u.cq.page_pool, pages[i]);
}
static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
- u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
- int i;
+ u8 sdq_count = mlxsw_pci->num_sdqs;
+ struct mlxsw_pci_queue *cq;
+ u8 cq_num;
+ int i, j;
int err;
q->producer_counter = 0;
@@ -409,7 +550,8 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
/* Set CQ of same number of this RDQ with base
* above SDQ count as the lower ones are assigned to SDQs.
*/
- mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
+ cq_num = sdq_count + q->num;
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -421,14 +563,21 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
if (err)
return err;
+ cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
+ cq->u.cq.dq = q;
+ q->u.rdq.cq = cq;
+
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
BUG_ON(!elem_info);
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL);
- if (err)
- goto rollback;
+
+ for (j = 0; j < mlxsw_pci->num_sg_entries; j++) {
+ err = mlxsw_pci_rdq_page_alloc(q, elem_info, j);
+ if (err)
+ goto rollback;
+ }
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -439,8 +588,12 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
rollback:
for (i--; i >= 0; i--) {
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
- mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ for (j--; j >= 0; j--)
+ mlxsw_pci_rdq_page_free(q, elem_info, j);
+ j = mlxsw_pci->num_sg_entries;
}
+ q->u.rdq.cq = NULL;
+ cq->u.cq.dq = NULL;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
return err;
@@ -450,12 +603,13 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
- int i;
+ int i, j;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
- mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ for (j = 0; j < mlxsw_pci->num_sg_entries; j++)
+ mlxsw_pci_rdq_page_free(q, elem_info, j);
}
}
@@ -465,54 +619,11 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
q->u.cq.v = mlxsw_pci->max_cqe_ver;
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
- q->num < mlxsw_pci->num_sdq_cqs &&
+ q->num < mlxsw_pci->num_sdqs &&
!mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
q->u.cq.v = MLXSW_PCI_CQE_V1;
}
-static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
- struct mlxsw_pci_queue *q)
-{
- int i;
- int err;
-
- q->consumer_counter = 0;
-
- for (i = 0; i < q->count; i++) {
- char *elem = mlxsw_pci_queue_elem_get(q, i);
-
- mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
- }
-
- if (q->u.cq.v == MLXSW_PCI_CQE_V1)
- mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
- MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
- else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
- mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
- MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
-
- mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
- mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
- mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
- for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
- dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
-
- mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
- }
- err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
- if (err)
- return err;
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- return 0;
-}
-
-static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue *q)
-{
- mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
-}
-
static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
ptrdiff_t off)
{
@@ -543,7 +654,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v,
- char *cqe)
+ char *cqe, int budget)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
@@ -554,8 +665,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
spin_lock(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
- skb = elem_info->u.sdq.skb;
+ tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info;
+ skb = elem_info->sdq.skb;
wqe = elem_info->elem;
for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
@@ -569,8 +680,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
}
if (skb)
- dev_kfree_skb_any(skb);
- elem_info->u.sdq.skb = NULL;
+ napi_consume_skb(skb, budget);
+ elem_info->sdq.skb = NULL;
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
@@ -627,32 +738,46 @@ static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
}
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct napi_struct *napi,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
- char wqe[MLXSW_PCI_WQE_SIZE];
struct sk_buff *skb;
+ u8 num_sg_entries;
u16 byte_count;
int err;
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- skb = elem_info->u.rdq.skb;
- memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC);
- if (err) {
- dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
+ byte_count -= ETH_FCS_LEN;
+
+ err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
+ pages, &num_sg_entries);
+ if (err)
+ goto out;
+
+ err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
+ if (err)
+ goto out;
+
+ skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
+ if (IS_ERR(skb)) {
+ dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
+ mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
goto out;
}
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ skb_mark_for_recycle(skb);
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
@@ -684,17 +809,12 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
}
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+ mlxsw_skb_cb(skb)->rx_md_info.napi = napi;
- byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
- if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
- byte_count -= ETH_FCS_LEN;
- skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
out:
- /* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
- mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
return;
}
@@ -714,13 +834,86 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
+static bool mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue *q)
{
- struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem_info->elem);
+ return !mlxsw_pci_elem_hw_owned(q, owner_bit);
+}
+
+static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
+{
+ struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
+ u.cq.napi);
+ struct mlxsw_pci_queue *rdq = q->u.cq.dq;
struct mlxsw_pci *mlxsw_pci = q->pci;
+ int work_done = 0;
+ char *cqe;
+
+ /* If the budget is 0, Rx processing should be skipped. */
+ if (unlikely(!budget))
+ return 0;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+
+ if (unlikely(sendq)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ if (unlikely(dqn != rdq->num)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, napi, rdq,
+ wqe_counter, q->u.cq.v, cqe);
+
+ if (++work_done == budget)
+ break;
+ }
+
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, rdq);
+
+ if (work_done < budget)
+ goto processing_completed;
+
+ /* The driver still has outstanding work to do, budget was exhausted.
+ * Return exactly budget. In that case, the NAPI instance will be polled
+ * again.
+ */
+ if (mlxsw_pci_cq_cqe_to_handle(q))
+ goto out;
+
+ /* The driver processed all the completions and handled exactly
+ * 'budget'. Return 'budget - 1' to distinguish from the case that
+ * driver still has completions to handle.
+ */
+ if (work_done == budget)
+ work_done--;
+
+processing_completed:
+ if (napi_complete_done(napi, work_done))
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+out:
+ return work_done;
+}
+
+static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget)
+{
+ struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
+ u.cq.napi);
+ struct mlxsw_pci_queue *sdq = q->u.cq.dq;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ int work_done = 0;
char *cqe;
- int items = 0;
- int credits = q->count >> 1;
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
@@ -728,46 +921,113 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+ if (unlikely(!sendq)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ if (unlikely(dqn != sdq->num)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
memcpy(ncqe, cqe, q->elem_size);
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- if (sendq) {
- struct mlxsw_pci_queue *sdq;
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, q->u.cq.v, ncqe, budget);
- sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
- mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, q->u.cq.v, ncqe);
- q->u.cq.comp_sdq_count++;
- } else {
- struct mlxsw_pci_queue *rdq;
+ work_done++;
+ }
- rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, q->u.cq.v, ncqe);
- q->u.cq.comp_rdq_count++;
- }
- if (++items == credits)
- break;
+ /* If the budget is 0 napi_complete_done() should never be called. */
+ if (unlikely(!budget))
+ goto processing_completed;
+
+ work_done = min(work_done, budget - 1);
+ if (unlikely(!napi_complete_done(napi, work_done)))
+ goto out;
+
+processing_completed:
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+out:
+ return work_done;
+}
+
+static enum mlxsw_pci_cq_type
+mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue *q)
+{
+ /* Each CQ is mapped to one DQ. The first 'num_sdqs' queues are used
+ * for SDQs and the rest are used for RDQs.
+ */
+ if (q->num < mlxsw_pci->num_sdqs)
+ return MLXSW_PCI_CQ_SDQ;
+
+ return MLXSW_PCI_CQ_RDQ;
+}
+
+static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+
+ switch (cq_type) {
+ case MLXSW_PCI_CQ_SDQ:
+ netif_napi_add(mlxsw_pci->napi_dev_tx, &q->u.cq.napi,
+ mlxsw_pci_napi_poll_cq_tx);
+ break;
+ case MLXSW_PCI_CQ_RDQ:
+ netif_napi_add(mlxsw_pci->napi_dev_rx, &q->u.cq.napi,
+ mlxsw_pci_napi_poll_cq_rx);
+ break;
}
- if (items)
- mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
}
-static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
+static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q)
{
- return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
- MLXSW_PCI_CQE01_COUNT;
+ netif_napi_del(&q->u.cq.napi);
}
-static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
+static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
{
- return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
- MLXSW_PCI_CQE01_SIZE;
+ struct page_pool_params pp_params = {};
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ struct page_pool *page_pool;
+
+ if (cq_type != MLXSW_PCI_CQ_RDQ)
+ return 0;
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
+ pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
+ pp_params.dev = &mlxsw_pci->pdev->dev;
+ pp_params.napi = &q->u.cq.napi;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+
+ page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(page_pool))
+ return PTR_ERR(page_pool);
+
+ q->u.cq.page_pool = page_pool;
+ return 0;
}
-static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+static void mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ if (cq_type != MLXSW_PCI_CQ_RDQ)
+ return;
+
+ page_pool_destroy(q->u.cq.page_pool);
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
int i;
int err;
@@ -776,39 +1036,64 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
for (i = 0; i < q->count; i++) {
char *elem = mlxsw_pci_queue_elem_get(q, i);
- mlxsw_pci_eqe_owner_set(elem, 1);
+ mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
}
- mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
- mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
- mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ if (q->u.cq.v == MLXSW_PCI_CQE_V1)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
+ else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
+
+ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+ mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
- mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
}
- err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
+ mlxsw_pci_cq_napi_setup(q, cq_type);
+
+ err = mlxsw_pci_cq_page_pool_init(q, cq_type);
+ if (err)
+ goto err_page_pool_init;
+
+ napi_enable(&q->u.cq.napi);
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
return 0;
+
+err_page_pool_init:
+ mlxsw_pci_cq_napi_teardown(q);
+ return err;
}
-static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
- mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+ enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
+
+ napi_disable(&q->u.cq.napi);
+ mlxsw_pci_cq_page_pool_fini(q, cq_type);
+ mlxsw_pci_cq_napi_teardown(q);
+ mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
+ MLXSW_PCI_CQE01_COUNT;
}
-static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
{
- mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
- mlxsw_pci->cmd.comp.out_param =
- ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
- mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
- mlxsw_pci->cmd.wait_done = true;
- wake_up(&mlxsw_pci->cmd.wait);
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
+ MLXSW_PCI_CQE01_SIZE;
}
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
@@ -829,54 +1114,81 @@ static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
- struct mlxsw_pci *mlxsw_pci = q->pci;
- u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
- char *eqe;
- u8 cqn;
- bool cq_handle = false;
- int items = 0;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, u.eq.tasklet);
+ struct mlxsw_pci *mlxsw_pci = q->pci;
int credits = q->count >> 1;
+ u8 cqn, cq_count;
+ int items = 0;
+ char *eqe;
memset(&active_cqns, 0, sizeof(active_cqns));
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+ cqn = mlxsw_pci_eqe_cqn_get(eqe);
+ set_bit(cqn, active_cqns);
- /* Command interface completion events are always received on
- * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
- * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
- */
- switch (q->num) {
- case MLXSW_PCI_EQ_ASYNC_NUM:
- mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
- q->u.eq.ev_cmd_count++;
- break;
- case MLXSW_PCI_EQ_COMP_NUM:
- cqn = mlxsw_pci_eqe_cqn_get(eqe);
- set_bit(cqn, active_cqns);
- cq_handle = true;
- q->u.eq.ev_comp_count++;
- break;
- default:
- q->u.eq.ev_other_count++;
- }
if (++items == credits)
break;
}
- if (items) {
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- }
- if (!cq_handle)
+ if (!items)
return;
+
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+
+ cq_count = mlxsw_pci->num_cqs;
for_each_set_bit(cqn, active_cqns, cq_count) {
q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
- mlxsw_pci_queue_tasklet_schedule(q);
+ napi_schedule(&q->u.cq.napi);
}
}
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ /* We expect to initialize only one EQ, which gets num=0 as it is
+ * located at index zero. We use the EQ as EQ1, so set the number for
+ * future use.
+ */
+ WARN_ON_ONCE(q->num);
+ q->num = MLXSW_PCI_EQ_COMP_NUM;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_eqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+ mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ tasklet_setup(&q->u.eq.tasklet, mlxsw_pci_eq_tasklet);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
struct mlxsw_pci_queue_ops {
const char *name;
enum mlxsw_pci_queue_type type;
@@ -886,7 +1198,6 @@ struct mlxsw_pci_queue_ops {
struct mlxsw_pci_queue *q);
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
- void (*tasklet)(struct tasklet_struct *t);
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
u16 elem_count;
@@ -914,7 +1225,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
.pre_init = mlxsw_pci_cq_pre_init,
.init = mlxsw_pci_cq_init,
.fini = mlxsw_pci_cq_fini,
- .tasklet = mlxsw_pci_cq_tasklet,
.elem_count_f = mlxsw_pci_cq_elem_count,
.elem_size_f = mlxsw_pci_cq_elem_size
};
@@ -923,7 +1233,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_EQ,
.init = mlxsw_pci_eq_init,
.fini = mlxsw_pci_eq_fini,
- .tasklet = mlxsw_pci_eq_tasklet,
.elem_count = MLXSW_PCI_EQE_COUNT,
.elem_size = MLXSW_PCI_EQE_SIZE
};
@@ -948,9 +1257,6 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->type = q_ops->type;
q->pci = mlxsw_pci;
- if (q_ops->tasklet)
- tasklet_setup(&q->tasklet, q_ops->tasklet);
-
mem_item->size = MLXSW_PCI_AQ_SIZE;
mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
mem_item->size, &mem_item->mapaddr,
@@ -1074,7 +1380,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
if (num_sdqs + num_rdqs > num_cqs ||
num_sdqs < MLXSW_PCI_SDQS_MIN ||
- num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
+ num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_MAX) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
}
@@ -1089,10 +1395,11 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
return -EINVAL;
}
- mlxsw_pci->num_sdq_cqs = num_sdqs;
+ mlxsw_pci->num_cqs = num_cqs;
+ mlxsw_pci->num_sdqs = num_sdqs;
err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
- num_eqs);
+ MLXSW_PCI_EQS_COUNT);
if (err) {
dev_err(&pdev->dev, "Failed to initialize event queues\n");
return err;
@@ -1119,8 +1426,6 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
goto err_rdqs_init;
}
- /* We have to poll in command interface until queues are initialized */
- mlxsw_pci->cmd.nopoll = true;
return 0;
err_rdqs_init:
@@ -1134,7 +1439,6 @@ err_cqs_init:
static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
{
- mlxsw_pci->cmd.nopoll = false;
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
@@ -1432,12 +1736,9 @@ static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
{
struct mlxsw_pci *mlxsw_pci = dev_id;
struct mlxsw_pci_queue *q;
- int i;
- for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
- q = mlxsw_pci_eq_get(mlxsw_pci, i);
- mlxsw_pci_queue_tasklet_schedule(q);
- }
+ q = mlxsw_pci_eq_get(mlxsw_pci);
+ tasklet_schedule(&q->u.eq.tasklet);
return IRQ_HANDLED;
}
@@ -1490,20 +1791,31 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
return -EBUSY;
}
-static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
+ bool pci_reset_sbr_supported)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ struct pci_dev *bridge;
int err;
+ if (!pci_reset_sbr_supported) {
+ pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n");
+ goto sbr;
+ }
+
mlxsw_reg_mrsr_pack(mrsr_pl,
MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
if (err)
return err;
+sbr:
device_lock_assert(&pdev->dev);
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge)
+ pci_cfg_access_lock(bridge);
pci_cfg_access_lock(pdev);
pci_save_state(pdev);
@@ -1513,6 +1825,8 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
pci_restore_state(pdev);
pci_cfg_access_unlock(pdev);
+ if (bridge)
+ pci_cfg_access_unlock(bridge);
return err;
}
@@ -1529,8 +1843,9 @@ static int
mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ bool pci_reset_sbr_supported = false;
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool pci_reset_supported;
+ bool pci_reset_supported = false;
u32 sys_status;
int err;
@@ -1548,15 +1863,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
- &pci_reset_supported);
+ if (!err) {
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+ &pci_reset_supported);
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR,
+ &pci_reset_sbr_supported);
+ }
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
- err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
+ err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci,
+ pci_reset_sbr_supported);
} else {
pci_dbg(pdev, "Starting software reset flow\n");
err = mlxsw_pci_reset_sw(mlxsw_pci);
@@ -1589,6 +1906,17 @@ static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
pci_free_irq_vectors(mlxsw_pci->pdev);
}
+static void mlxsw_pci_num_sg_entries_set(struct mlxsw_pci *mlxsw_pci)
+{
+ u8 num_sg_entries;
+
+ num_sg_entries = mlxsw_pci_num_sg_entries_get(MLXSW_PORT_MAX_MTU);
+ mlxsw_pci->num_sg_entries = min(num_sg_entries,
+ MLXSW_PCI_WQE_SG_ENTRIES);
+
+ WARN_ON(num_sg_entries > MLXSW_PCI_WQE_SG_ENTRIES);
+}
+
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
@@ -1711,6 +2039,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_requery_resources;
+ mlxsw_pci_num_sg_entries_set(mlxsw_pci);
+
+ err = mlxsw_pci_napi_devs_init(mlxsw_pci);
+ if (err)
+ goto err_napi_devs_init;
+
err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
if (err)
goto err_aqs_init;
@@ -1728,6 +2062,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
+ mlxsw_pci_napi_devs_fini(mlxsw_pci);
+err_napi_devs_init:
err_requery_resources:
err_config_profile:
err_cqe_v_check:
@@ -1755,15 +2091,49 @@ static void mlxsw_pci_fini(void *bus_priv)
free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
mlxsw_pci_aqs_fini(mlxsw_pci);
+ mlxsw_pci_napi_devs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
mlxsw_pci_free_irq_vectors(mlxsw_pci);
}
+static int mlxsw_pci_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_txhdr_info *txhdr_info)
+{
+ const struct mlxsw_tx_info tx_info = txhdr_info->tx_info;
+ char *txhdr;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN))
+ return -ENOMEM;
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+
+ if (unlikely(txhdr_info->data)) {
+ u16 fid = txhdr_info->max_fid + tx_info.local_port - 1;
+
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, fid);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ } else {
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info.local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+ }
+
+ return 0;
+}
+
static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
{
- u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
+ u8 ctl_sdq_count = mlxsw_pci->num_sdqs - 1;
u8 sdqn;
if (tx_info->is_emad) {
@@ -1786,7 +2156,7 @@ static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct mlxsw_pci_queue *q;
@@ -1795,13 +2165,17 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
int i;
int err;
+ err = mlxsw_pci_txhdr_construct(skb, txhdr_info);
+ if (err)
+ return err;
+
if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
err = skb_linearize(skb);
if (err)
return err;
}
- q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, &txhdr_info->tx_info);
spin_lock_bh(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
if (!elem_info) {
@@ -1809,8 +2183,8 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
err = -EAGAIN;
goto unlock;
}
- mlxsw_skb_cb(skb)->tx_info = *tx_info;
- elem_info->u.sdq.skb = skb;
+ mlxsw_skb_cb(skb)->tx_info = txhdr_info->tx_info;
+ elem_info->sdq.skb = skb;
wqe = elem_info->elem;
mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
@@ -1840,6 +2214,8 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+ mlxsw_pci_wqe_ipcs_set(wqe, skb->ip_summed == CHECKSUM_PARTIAL);
+
/* Everything is set up, ring producer doorbell to get HW going */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -1862,9 +2238,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
- bool evreq = mlxsw_pci->cmd.nopoll;
unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
- bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ unsigned long end;
+ bool wait_done;
int err;
*p_status = MLXSW_CMD_STATUS_OK;
@@ -1888,36 +2264,28 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
- *p_wait_done = false;
+ wait_done = false;
wmb(); /* all needs to be written before we write control register */
mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
MLXSW_PCI_CIR_CTRL_GO_BIT |
- (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
(opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
opcode);
- if (!evreq) {
- unsigned long end;
-
- end = jiffies + timeout;
- do {
- u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
- if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
- *p_wait_done = true;
- *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
- break;
- }
- cond_resched();
- } while (time_before(jiffies, end));
- } else {
- wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
- *p_status = mlxsw_pci->cmd.comp.status;
- }
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
err = 0;
- if (*p_wait_done) {
+ if (wait_done) {
if (*p_status)
err = -EIO;
} else {
@@ -1931,14 +2299,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
*/
__be32 tmp;
- if (!evreq) {
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_HI));
- memcpy(out_mbox, &tmp, sizeof(tmp));
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_LO));
- memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
- }
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
} else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
}
@@ -2017,7 +2383,6 @@ static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
int err;
mutex_init(&mlxsw_pci->cmd.lock);
- init_waitqueue_head(&mlxsw_pci->cmd.wait);
err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 7cdf0ce24f28..7fa94e5828de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -42,8 +42,8 @@
((offset) + (type_offset) + (num) * 4)
#define MLXSW_PCI_CQS_MAX 96
-#define MLXSW_PCI_EQS_COUNT 2
-#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQS_MAX 2
+#define MLXSW_PCI_EQS_COUNT 1
#define MLXSW_PCI_EQ_COMP_NUM 1
#define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */
@@ -90,6 +90,11 @@ MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
*/
MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+/* pci_wqe_ipcs
+ * Calculate IPv4 and TCP / UDP checksums.
+ */
+MLXSW_ITEM32(pci, wqe, ipcs, 0x00, 14, 1);
+
/* pci_wqe_byte_count
* Size of i-th scatter/gather entry, 0 if entry is unused.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index ac4d4ea51597..0a73b1a4526e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -6,7 +6,8 @@
#include <linux/types.h>
-#define MLXSW_PORT_MAX_MTU 10000
+#define MLXSW_PORT_MAX_MTU (10 * 1024)
+#define MLXSW_PORT_ETH_FRAME_HDR (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
#define MLXSW_PORT_DEFAULT_VID 1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8892654c685f..3bb89045eaf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4786,8 +4786,11 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR BIT(8)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR BIT(11)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2 BIT(13)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4 BIT(16)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8 BIT(19)
/* reg_ptys_ext_eth_proto_cap
@@ -10668,6 +10671,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
MLXSW_REG_MCAM_MCIA_128B = 34,
/* If set, MRSR.command=6 is supported. */
MLXSW_REG_MCAM_PCI_RESET = 48,
+ /* If set, MRSR.command=6 is supported with Secondary Bus Reset. */
+ MLXSW_REG_MCAM_PCI_RESET_SBR = 67,
};
#define MLXSW_REG_BYTES_PER_DWORD 0x4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index bb642e9bb6cf..9a2d64a0a858 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -107,74 +107,6 @@ static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
};
-/* tx_hdr_version
- * Tx header version.
- * Must be set to 1.
- */
-MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
-
-/* tx_hdr_ctl
- * Packet control type.
- * 0 - Ethernet control (e.g. EMADs, LACP)
- * 1 - Ethernet data
- */
-MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
-
-/* tx_hdr_proto
- * Packet protocol type. Must be set to 1 (Ethernet).
- */
-MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
-
-/* tx_hdr_rx_is_router
- * Packet is sent from the router. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
-
-/* tx_hdr_fid_valid
- * Indicates if the 'fid' field is valid and should be used for
- * forwarding lookup. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
-
-/* tx_hdr_swid
- * Switch partition ID. Must be set to 0.
- */
-MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
-
-/* tx_hdr_control_tclass
- * Indicates if the packet should use the control TClass and not one
- * of the data TClasses.
- */
-MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
-
-/* tx_hdr_etclass
- * Egress TClass to be used on the egress device on the egress port.
- */
-MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
-
-/* tx_hdr_port_mid
- * Destination local port for unicast packets.
- * Destination multicast ID for multicast packets.
- *
- * Control packets are directed to a specific egress port, while data
- * packets are transmitted through the CPU port (0) into the switch partition,
- * where forwarding rules are applied.
- */
-MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
-
-/* tx_hdr_fid
- * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
- * set, otherwise calculated based on the packet's VID using VID to FID mapping.
- * Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
-
-/* tx_hdr_type
- * 0 - Data packets
- * 6 - Control packets
- */
-MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, bool clear,
u64 *packets, u64 *bytes)
@@ -233,61 +165,6 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
-
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_swid_set(txhdr, 0);
- mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
- mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
-}
-
-int
-mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr;
- u16 max_fid;
- int err;
-
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- err = -ENOMEM;
- goto err_skb_cow_head;
- }
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
- err = -EIO;
- goto err_res_valid;
- }
- max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
-
- txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
- mlxsw_tx_hdr_fid_valid_set(txhdr, true);
- mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
- return 0;
-
-err_res_valid:
-err_skb_cow_head:
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return err;
-}
-
static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
{
unsigned int type;
@@ -299,30 +176,49 @@ static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
return !!ptp_parse_header(skb, type);
}
-static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static void mlxsw_sp_txhdr_info_data_init(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ /* Resource validation was done as part of PTP init. */
+ u16 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
- /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
- * need special handling and cannot be transmitted as regular control
- * packets.
+ txhdr_info->data = true;
+ txhdr_info->max_fid = max_fid;
+}
+
+static struct sk_buff *
+mlxsw_sp_vlan_tag_push(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb)
+{
+ /* In some Spectrum ASICs, in order for PTP event packets to have their
+ * correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
*/
- if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
- return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
- mlxsw_sp_port, skb,
- tx_info);
+ if (skb_vlan_tagged(skb))
+ return skb;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
+ return vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+}
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
+static struct sk_buff *
+mlxsw_sp_txhdr_preparations(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
+{
+ if (likely(!mlxsw_sp_skb_requires_ts(skb)))
+ return skb;
+
+ if (!mlxsw_sp->ptp_ops->tx_as_data)
+ return skb;
+
+ /* Special handling for PTP events that require a time stamp and cannot
+ * be transmitted as regular control packets.
+ */
+ mlxsw_sp_txhdr_info_data_init(mlxsw_sp->core, skb, txhdr_info);
+ return mlxsw_sp_vlan_tag_push(mlxsw_sp, skb);
}
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
@@ -405,29 +301,12 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port->dev->dev_addr);
}
-static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int err;
-
- mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
- if (err)
- return err;
-
- *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
- return 0;
-}
-
static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmtu_pl[MLXSW_REG_PMTU_LEN];
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
- if (mtu > mlxsw_sp_port->max_mtu)
- return -EINVAL;
+ mtu += MLXSW_PORT_ETH_FRAME_HDR;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
@@ -738,16 +617,16 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
- const struct mlxsw_tx_info tx_info = {
- .local_port = mlxsw_sp_port->local_port,
- .is_emad = false,
+ struct mlxsw_txhdr_info txhdr_info = {
+ .tx_info.local_port = mlxsw_sp_port->local_port,
+ .tx_info.is_emad = false,
};
u64 len;
int err;
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
- if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &txhdr_info.tx_info))
return NETDEV_TX_BUSY;
if (eth_skb_pad(skb)) {
@@ -755,10 +634,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
- &tx_info);
- if (err)
+ skb = mlxsw_sp_txhdr_preparations(mlxsw_sp, skb, &txhdr_info);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
+ }
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
@@ -768,7 +648,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &txhdr_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -825,7 +705,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
if (err)
goto err_port_mtu_set;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
err_port_mtu_set:
@@ -1279,63 +1159,31 @@ static int mlxsw_sp_set_features(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
- int err;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
+ config, extack);
}
-static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
- int err;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
+ config);
}
static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct hwtstamp_config config = {0};
-
- mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
-}
-
-static int
-mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct kernel_hwtstamp_config config = {};
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
- case SIOCGHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config,
+ NULL);
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1352,7 +1200,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
+ .ndo_hwtstamp_get = mlxsw_sp_port_hwtstamp_get,
+ .ndo_hwtstamp_set = mlxsw_sp_port_hwtstamp_set,
};
static int
@@ -1693,12 +1542,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
netif_carrier_off(dev);
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_TC | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->lltx = true;
+ dev->netns_immutable = true;
- dev->min_mtu = 0;
- dev->max_mtu = ETH_MAX_MTU;
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
@@ -1727,13 +1580,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
goto err_max_speed_get;
}
- err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
- mlxsw_sp_port->local_port);
- goto err_port_max_mtu_get;
- }
-
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1877,7 +1723,6 @@ err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
-err_port_max_mtu_get:
err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
@@ -2472,7 +2317,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u64_stats_update_end(&pcpu_stats->syncp);
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
@@ -2530,11 +2375,11 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
- /* NVE traps */
- MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
};
static const struct mlxsw_listener mlxsw_sp1_listener[] = {
@@ -2809,11 +2654,12 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp1_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp1_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2826,11 +2672,13 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp2_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+ .tx_as_data = true,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
@@ -2843,11 +2691,12 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
.hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp2_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -4009,11 +3858,9 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.sdq_supports_cqe_v2 = false,
};
@@ -4047,10 +3894,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4084,10 +3929,8 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4119,10 +3962,8 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp4_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -5360,25 +5201,13 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
return -EOPNOTSUPP;
- if (cu_info->linking) {
- if (!netif_running(dev))
- return 0;
- /* When the bridge is VLAN-aware, the VNI of the VxLAN
- * device needs to be mapped to a VLAN, but at this
- * point no VLANs are configured on the VxLAN device
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ if (!netif_running(dev))
+ return 0;
+ if (cu_info->linking)
return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
dev, 0, extack);
- } else {
- /* VLANs were already flushed, which triggered the
- * necessary cleanup
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ else
mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
- }
break;
case NETDEV_PRE_UP:
upper_dev = netdev_master_upper_dev_get(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3beb5d0847ab..b03ff9e044f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -233,20 +233,18 @@ struct mlxsw_sp_ptp_ops {
u16 local_port);
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_stats_count)(void);
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
- int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ bool tx_as_data;
};
struct mlxsw_sp_fid_core_ops {
@@ -354,12 +352,11 @@ struct mlxsw_sp_port {
struct mlxsw_sp_flow_block *eg_flow_block;
struct {
struct delayed_work shaper_dw;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
u16 ing_types;
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
- int max_mtu;
u32 max_speed;
struct mlxsw_sp_hdroom *hdroom;
u64 module_overheat_initial_val;
@@ -665,10 +662,10 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
+ struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev);
+ struct net_device *vxlan_dev);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
@@ -712,12 +709,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -764,9 +755,6 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 4b713832fdd5..07cb1e26ca3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+ erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
priority, region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -480,26 +481,23 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->ht_key.full_enc_key, mask);
+ aentry->ht_key.enc_key, mask);
erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
if (IS_ERR(erp_mask))
return PTR_ERR(erp_mask);
aentry->erp_mask = erp_mask;
aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
- memcpy(aentry->enc_key, aentry->ht_key.full_enc_key,
- sizeof(aentry->enc_key));
/* Compute all needed delta information and clear the delta bits
- * from the encrypted key.
+ * from the encoded key.
*/
delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
aentry->delta_info.value =
- mlxsw_sp_acl_erp_delta_value(delta,
- aentry->ht_key.full_enc_key);
- mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key);
+ mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
/* Add rule to the list of A-TCAM rules, assuming this
* rule is intended to A-TCAM. In case this rule does
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 95f63fcf4ba1..067f0055a55a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -212,7 +212,22 @@ static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
* This array defines key offsets for easy access when copying key blocks from
* entry key to Bloom filter chunk.
*/
-static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+static char *
+mlxsw_sp_acl_bf_enc_key_get(struct mlxsw_sp_acl_atcam_entry *aentry,
+ u8 chunk_index)
+{
+ switch (chunk_index) {
+ case 0:
+ return &aentry->ht_key.enc_key[2];
+ case 1:
+ return &aentry->ht_key.enc_key[20];
+ case 2:
+ return &aentry->ht_key.enc_key[38];
+ default:
+ WARN_ON_ONCE(1);
+ return &aentry->ht_key.enc_key[0];
+ }
+}
static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
{
@@ -235,9 +250,10 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
u8 key_offset, u8 chunk_key_len, u8 chunk_len)
{
struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
- u8 chunk_index, chunk_count, block_count;
+ u8 chunk_index, chunk_count;
char *chunk = output;
__be16 erp_region_id;
+ u32 block_count;
block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
chunk_count = 1 + ((block_count - 1) >> 2);
@@ -245,12 +261,13 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
(aregion->region->id << 4));
for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
chunk_index++) {
+ char *enc_key;
+
memset(chunk, 0, pad_bytes);
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
- memcpy(chunk + key_offset,
- &aentry->enc_key[chunk_key_offsets[chunk_index]],
- chunk_key_len);
+ enc_key = mlxsw_sp_acl_bf_enc_key_get(aentry, chunk_index);
+ memcpy(chunk + key_offset, enc_key, chunk_key_len);
chunk += chunk_len;
}
*len = chunk_count * chunk_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index d231f4d2888b..9eee229303cc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj,
return err ? false : true;
}
-static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2)
-{
- const struct mlxsw_sp_acl_erp_key *key1 = obj1;
- const struct mlxsw_sp_acl_erp_key *key2 = obj2;
-
- /* For hints purposes, two objects are considered equal
- * in case the masks are the same. Does not matter what
- * the "ctcam" value is.
- */
- return memcmp(key1->mask, key2->mask, sizeof(key1->mask));
-}
-
static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
void *obj)
{
@@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
.obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
.delta_check = mlxsw_sp_acl_erp_delta_check,
- .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp,
.delta_create = mlxsw_sp_acl_erp_delta_create,
.delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
.root_create = mlxsw_sp_acl_erp_root_create,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index eaad78605602..1850a975b380 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -7,7 +7,7 @@
#include "item.h"
#include "core_acl_flex_keys.h"
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
@@ -15,7 +15,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
@@ -23,27 +23,27 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
@@ -51,35 +51,35 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
};
@@ -124,90 +124,90 @@ const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.clear_block = mlxsw_sp1_afk_clear_block,
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 8, -1, true), /* RX_ACL_SYSTEM_PORT */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16),
};
@@ -319,16 +319,20 @@ const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.clear_block = mlxsw_sp2_afk_clear_block,
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12),
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1b[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
@@ -341,7 +345,7 @@ static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b),
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0),
- MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x3F, mlxsw_sp_afk_element_info_ipv4_1b),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index f20052776b3f..69f9da9fb305 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/idr.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -58,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
- if (id < tcam->max_regions) {
- __set_bit(id, tcam->used_regions);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_regions);
+ ida_free(&tcam->used_regions, id);
}
static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
- if (id < tcam->max_groups) {
- __set_bit(id, tcam->used_groups);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_groups);
+ ida_free(&tcam->used_groups, id);
}
struct mlxsw_sp_acl_tcam_pattern {
@@ -715,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+ mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@@ -726,6 +731,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
}
static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ /* The entry markers are relative to the current chunk and therefore
+ * needs to be reset together with the chunk marker.
+ */
+ ctx->current_vchunk = NULL;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+}
+
+static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
@@ -747,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
- vregion->rehash.ctx.current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@@ -820,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
- cancel_delayed_work_sync(&vregion->rehash.dw);
+ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
+ ctx->hints_priv)
+ ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@@ -1154,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
- ventry->entry, activity);
+ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
+ int err;
+
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
+ activity);
+ mutex_unlock(&vregion->lock);
+ return err;
}
static int
@@ -1189,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+ WARN_ON(vchunk->chunk2);
+
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@@ -1207,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@@ -1230,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+ if (list_empty(&vchunk->ventry_list))
+ goto out;
+
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@@ -1239,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
+ WARN_ON(ventry->vchunk != vchunk);
+
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@@ -1279,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
+out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@@ -1292,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@@ -1320,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
- mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
+ if (ctx->this_is_rollback)
+ return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@@ -1340,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
- mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@@ -1389,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@@ -1441,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ return;
}
if (*credits >= 0)
@@ -1450,7 +1489,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp_acl_tcam *tcam;
@@ -1465,7 +1505,8 @@ mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
static int
mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp_acl_tcam_vregion *vregion;
@@ -1549,19 +1590,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions) {
- err = -ENOMEM;
- goto err_alloc_used_regions;
- }
+ ida_init(&tcam->used_regions);
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
+ ida_init(&tcam->used_groups);
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
@@ -1575,10 +1608,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
-err_alloc_used_regions:
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
err_rehash_params_register:
mutex_destroy(&tcam->lock);
@@ -1591,8 +1622,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
mutex_destroy(&tcam->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 462bf448497d..010204f73ea4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -6,15 +6,16 @@
#include <linux/list.h>
#include <linux/parman.h>
+#include <linux/idr.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
+ struct ida used_regions;
unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
+ struct ida used_groups;
unsigned int max_groups;
unsigned int max_group_size;
struct mutex lock; /* guards vregion list */
@@ -166,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded
- * key.
- */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus
+ * delta bits.
+ */
u8 erp_id;
};
@@ -180,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct list_head list; /* Member in entries_list */
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
- * minus delta bits.
- */
struct {
u16 start;
u8 mask;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index c9f1c79f3f9d..2c0cfa79d138 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -399,11 +399,13 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_hdroom *hdroom)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned int max_mtu = mlxsw_sp_port->dev->max_mtu;
u16 reserve_cells;
int i;
+ max_mtu += MLXSW_PORT_ETH_FRAME_HDR;
/* Internal buffer. */
- reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu,
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, max_mtu,
mlxsw_sp_port->max_speed);
reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
hdroom->int_buf.reserve_cells = reserve_cells;
@@ -613,7 +615,9 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
/* Buffer 9 is used for control traffic. */
- size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
+ size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port,
+ mlxsw_sp_port->dev->max_mtu +
+ MLXSW_PORT_ETH_FRAME_HDR);
hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
@@ -1607,8 +1611,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
+ u16 local_port, local_port_1, first_local_port, last_local_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
@@ -1628,6 +1632,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1645,9 +1650,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
&bulk_list);
@@ -1684,7 +1692,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, last_local_port;
+ u16 local_port, first_local_port, last_local_port;
LIST_HEAD(bulk_list);
unsigned int masked_count;
u8 current_page = 0;
@@ -1702,6 +1710,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1719,9 +1728,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
&bulk_list);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 50e591420bd9..b1094aaffa5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -170,8 +170,7 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
- WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
- pool->pool_size);
+ WARN_ON(!bitmap_empty(pool->usage, pool->pool_size));
WARN_ON(atomic_read(&pool->active_entries_count));
bitmap_free(pool->usage);
devl_resource_occ_get_unregister(devlink,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index ca80af06465f..fa6eddd27ecf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -283,7 +283,7 @@ static u64 mlxsw_sp_dpipe_table_erif_size_get(void *priv)
return MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
.matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_erif_entries_dump,
@@ -734,7 +734,7 @@ static u64 mlxsw_sp_dpipe_table_host4_size_get(void *priv)
return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.matches_dump = mlxsw_sp_dpipe_table_host4_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_host4_entries_dump,
@@ -811,7 +811,7 @@ static u64 mlxsw_sp_dpipe_table_host6_size_get(void *priv)
return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET6);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.matches_dump = mlxsw_sp_dpipe_table_host6_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_host6_entries_dump,
@@ -1230,7 +1230,7 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv)
return size;
}
-static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 0f29e9c19411..0a8fb9c842d3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -262,7 +262,7 @@ err_port_pause_configure:
}
struct mlxsw_sp_port_hw_stats {
- char str[ETH_GSTRING_LEN];
+ char str[ETH_GSTRING_LEN] __nonstring;
u64 (*getter)(const char *payload);
bool cells_bytes;
};
@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ if (err)
+ return;
for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
if (!hw_stats[i].cells_bytes)
@@ -1068,7 +1070,21 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
}
static int
-mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
+mlxsw_sp_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
+}
+
+static int
+mlxsw_sp_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -1256,6 +1272,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_sp_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = mlxsw_sp_set_module_eeprom_by_page,
.get_ts_info = mlxsw_sp_get_ts_info,
.get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats,
.get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
@@ -1649,6 +1666,18 @@ mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr[] = {
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
@@ -1661,6 +1690,18 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2[] = {
+ ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_400gaui_8[] = {
ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
@@ -1673,6 +1714,18 @@ mlxsw_sp2_mask_ethtool_400gaui_8[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4[] = {
+ ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_800gaui_8[] = {
ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT,
@@ -1817,6 +1870,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 2,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
+ .speed = SPEED_100000,
+ .width = 1,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
@@ -1826,6 +1887,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 4,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
+ .speed = SPEED_200000,
+ .width = 2,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
.mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
@@ -1834,6 +1903,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 8,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .speed = SPEED_400000,
+ .width = 4,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8,
.mask_ethtool = mlxsw_sp2_mask_ethtool_800gaui_8,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 9fd1ca079258..353fd9ca89a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -192,6 +192,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (sample_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirror action after sample action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
block, out_dev,
extack);
@@ -265,6 +270,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (mirror_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action after mirror action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
block,
act->sample.psample_group,
@@ -595,6 +605,10 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -816,8 +830,10 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
- if (!rule)
- return -EINVAL;
+ if (!rule) {
+ err = -EINVAL;
+ goto err_rule_get_stats;
+ }
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
&drops, &lastuse, &used_hw_stats);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 3340b4a694c3..7ea798a4949e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -8,7 +8,7 @@
#include "spectrum_ipip.h"
#include "reg.h"
-struct ip_tunnel_parm
+struct ip_tunnel_parm_kern
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
@@ -24,27 +24,29 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
+static bool
+mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm_kern *parms)
{
- return !!(parms->i_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags);
}
static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms->i_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags);
}
-static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
+static bool
+mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm_kern *parms)
{
- return !!(parms->o_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags);
}
static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms->o_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags);
}
-static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
+static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm_kern *parms)
{
return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
be32_to_cpu(parms->i_key) : 0;
@@ -56,7 +58,7 @@ static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
be32_to_cpu(parms->i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
+static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm_kern *parms)
{
return mlxsw_sp_ipip_parms4_has_okey(parms) ?
be32_to_cpu(parms->o_key) : 0;
@@ -69,7 +71,7 @@ static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
+mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm_kern *parms)
{
return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
}
@@ -81,7 +83,7 @@ mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
+mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm_kern *parms)
{
return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
}
@@ -96,7 +98,7 @@ union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4;
+ struct ip_tunnel_parm_kern parms4;
struct __ip6_tnl_parm parms6;
switch (proto) {
@@ -115,7 +117,9 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms4;
+
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
}
@@ -124,7 +128,7 @@ static union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4;
+ struct ip_tunnel_parm_kern parms4;
struct __ip6_tnl_parm parms6;
switch (proto) {
@@ -150,7 +154,7 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
static struct mlxsw_sp_ipip_parms
mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
return (struct mlxsw_sp_ipip_parms) {
.proto = MLXSW_SP_L3_PROTO_IPV4,
@@ -187,8 +191,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ struct ip_tunnel_parm_kern parms;
char rtdp_pl[MLXSW_REG_RTDP_LEN];
- struct ip_tunnel_parm parms;
unsigned int type_check;
bool has_ikey;
u32 daddr4;
@@ -238,12 +242,15 @@ static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
struct ip_tunnel *tunnel = netdev_priv(ol_dev);
- __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
bool inherit_ttl = tunnel->parms.iph.ttl == 0;
bool inherit_tos = tunnel->parms.iph.tos & 0x1;
+ IP_TUNNEL_DECLARE_FLAGS(okflags) = { };
+
+ /* We can't offload any other features. */
+ __set_bit(IP_TUNNEL_KEY_BIT, okflags);
- return (tunnel->parms.i_flags & ~okflags) == 0 &&
- (tunnel->parms.o_flags & ~okflags) == 0 &&
+ return ip_tunnel_flags_subset(tunnel->parms.i_flags, okflags) &&
+ ip_tunnel_flags_subset(tunnel->parms.o_flags, okflags) &&
inherit_ttl && inherit_tos &&
mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev);
}
@@ -252,7 +259,7 @@ static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
@@ -439,10 +446,13 @@ static bool mlxsw_sp_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
bool inherit_ttl = tparm.hop_limit == 0;
- __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+ IP_TUNNEL_DECLARE_FLAGS(okflags) = { };
- return (tparm.i_flags & ~okflags) == 0 &&
- (tparm.o_flags & ~okflags) == 0 &&
+ /* We can't offload any other features. */
+ __set_bit(IP_TUNNEL_KEY_BIT, okflags);
+
+ return ip_tunnel_flags_subset(tparm.i_flags, okflags) &&
+ ip_tunnel_flags_subset(tparm.o_flags, okflags) &&
inherit_ttl && inherit_tos &&
mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
}
@@ -471,11 +481,33 @@ mlxsw_sp_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack)
{
+ u32 new_kvdl_index, old_kvdl_index = ipip_entry->dip_kvdl_index;
+ struct in6_addr old_addr6 = ipip_entry->parms.daddr.addr6;
struct mlxsw_sp_ipip_parms new_parms;
+ int err;
new_parms = mlxsw_sp_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
- return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
- &new_parms, extack);
+
+ err = mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
+ &new_parms.daddr.addr6,
+ &new_kvdl_index);
+ if (err)
+ return err;
+ ipip_entry->dip_kvdl_index = new_kvdl_index;
+
+ err = mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+ if (err)
+ goto err_change_gre;
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &old_addr6);
+
+ return 0;
+
+err_change_gre:
+ ipip_entry->dip_kvdl_index = old_kvdl_index;
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &new_parms.daddr.addr6);
+ return err;
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index a35f009da561..a66173779641 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -9,7 +9,7 @@
#include <linux/if_tunnel.h>
#include <net/ip6_tunnel.h>
-struct ip_tunnel_parm
+struct ip_tunnel_parm_kern
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
struct __ip6_tnl_parm
mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 69cd689dbc83..5afe6b155ef0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
- if (mr_route->mfc->mfc_un.res.pkt != packets)
- mr_route->mfc->mfc_un.res.lastuse = jiffies;
- mr_route->mfc->mfc_un.res.pkt = packets;
- mr_route->mfc->mfc_un.res.bytes = bytes;
+ if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets)
+ WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes);
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index cbb6c75a6620..5b9f0844b8f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -16,6 +16,7 @@
#include "spectrum.h"
#include "spectrum_ptp.h"
#include "core.h"
+#include "txheader.h"
#define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT 29
#define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ 156257 /* 6.4nSec */
@@ -45,7 +46,7 @@ struct mlxsw_sp2_ptp_state {
refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
* enabled.
*/
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
struct mutex lock; /* Protects 'config' and HW configuration. */
};
@@ -130,7 +131,7 @@ static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
return (u64) frc_l | (u64) frc_h2 << 32;
}
-static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
+static u64 mlxsw_sp1_ptp_read_frc(struct cyclecounter *cc)
{
struct mlxsw_sp1_ptp_clock *clock =
container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
@@ -1082,14 +1083,14 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
}
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
*config = mlxsw_sp_port->ptp.hwtstamp_config;
return 0;
}
static int
-mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp1_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1245,7 +1246,8 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
}
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
enum hwtstamp_rx_filters rx_filter;
u16 ing_types;
@@ -1269,14 +1271,14 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
return 0;
}
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
@@ -1352,6 +1354,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
+ /* Max FID will be used in data path, check validity as part of init. */
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, FID))
+ return ERR_PTR(-EIO);
+
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
@@ -1446,7 +1452,7 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
@@ -1460,7 +1466,7 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp2_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1537,7 +1543,7 @@ static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1551,7 +1557,7 @@ static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
}
static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1566,7 +1572,7 @@ static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
u16 ing_types, u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1587,7 +1593,7 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1609,11 +1615,12 @@ err_ptp_disable:
}
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
+ struct kernel_hwtstamp_config new_config;
struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
- struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
@@ -1647,7 +1654,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->ptp.ing_types = new_ing_types;
mlxsw_sp_port->ptp.egr_types = new_egr_types;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
mutex_unlock(&ptp_state->lock);
@@ -1661,7 +1668,7 @@ err_get_message_types:
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
@@ -1678,37 +1685,3 @@ int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
-}
-
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
- * their correction field correctly set on the egress port they must be
- * transmitted as data packets. Such packets ingress the ASIC via the
- * CPU port and must have a VLAN tag, as the CPU port is not configured
- * with a PVID. Push the default VLAN (4095), which is configured as
- * egress untagged on all the ports.
- */
- if (!skb_vlan_tagged(skb)) {
- skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
- MLXSW_SP_DEFAULT_VID);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- return -ENOMEM;
- }
- }
-
- return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
- tx_info);
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index a8b88230959a..df37f1470830 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -11,14 +11,6 @@ struct mlxsw_sp;
struct mlxsw_sp_port;
struct mlxsw_sp_ptp_clock;
-static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info)
-{
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
- return 0;
-}
-
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
struct mlxsw_sp_ptp_clock *
@@ -42,26 +34,22 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u64 timestamp);
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlxsw_sp1_get_stats_count(void);
void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
@@ -78,18 +66,14 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb, u16 local_port);
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
-
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ struct kernel_ethtool_ts_info *info);
#else
@@ -135,14 +119,15 @@ mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
static inline int
mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -151,12 +136,6 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
{
}
-static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
-{
- return mlxsw_sp_ptp_get_ts_info_noptp(info);
-}
-
static inline int mlxsw_sp1_get_stats_count(void)
{
return 0;
@@ -171,15 +150,6 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-static inline int
-mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- return -EOPNOTSUPP;
-}
-
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
@@ -214,29 +184,15 @@ static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
static inline int
mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
-{
- return mlxsw_sp_ptp_get_ts_info_noptp(info);
-}
-
-static inline int
-mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 40ba314fbc72..a2033837182e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
.rif = rif,
};
+ if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
+ return 0;
+
neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
if (rms.err)
goto err_arp;
@@ -3197,7 +3200,6 @@ mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
struct mlxsw_sp_nexthop_counter *nhct;
- void *ptr;
int err;
nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
@@ -3210,12 +3212,10 @@ mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(nhct))
return nhct;
- ptr = xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
- GFP_KERNEL);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
+ err = xa_err(xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
+ GFP_KERNEL));
+ if (err)
goto err_store;
- }
return nhct;
@@ -8187,41 +8187,6 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- mutex_unlock(&mlxsw_sp->router->lock);
-
- return rif;
-}
-
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
- u16 vid = 0;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- goto out;
-
- /* We only return the VID for VLAN RIFs. Otherwise we return an
- * invalid value (0).
- */
- if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
- goto out;
-
- vid = mlxsw_sp_fid_8021q_vid(rif->fid);
-
-out:
- mutex_unlock(&mlxsw_sp->router->lock);
- return vid;
-}
-
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -8420,19 +8385,6 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->common.rif_index;
}
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
-{
- struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
- struct mlxsw_sp_vr *ul_vr;
-
- ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
- if (WARN_ON(IS_ERR(ul_vr)))
- return 0;
-
- return ul_vr->id;
-}
-
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
return lb_rif->ul_rif_id;
@@ -11450,12 +11402,16 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
bool old_inc_parsing_depth, new_inc_parsing_depth;
struct mlxsw_sp_mp_hash_config config = {};
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
char recr2_pl[MLXSW_REG_RECR2_LEN];
unsigned long bit;
u32 seed;
int err;
- seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+ seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).user_seed;
+ if (!seed)
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+
mlxsw_reg_recr2_pack(recr2_pl, seed);
mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 0432c7cc6b07..313efab5c324 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -90,7 +90,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index af50ff9e5f26..32d2e61f2b82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -413,8 +413,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
__be32 *saddrp, __be32 *daddrp)
{
struct ip_tunnel *tun = netdev_priv(to_dev);
+ struct ip_tunnel_parm_kern parms;
struct net_device *dev = NULL;
- struct ip_tunnel_parm parms;
struct rtable *rt = NULL;
struct flowi4 fl4;
@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
- 0);
+ 0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
@@ -451,7 +450,7 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
struct mlxsw_sp_span_parms *sparmsp)
{
- struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
bool inherit_tos = tparm.iph.tos & 0x1;
@@ -461,7 +460,8 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
if (!(to_dev->flags & IFF_UP) ||
/* Reject tunnels with GRE keys, checksums, etc. */
- tparm.i_flags || tparm.o_flags ||
+ !ip_tunnel_flags_empty(tparm.i_flags) ||
+ !ip_tunnel_flags_empty(tparm.o_flags) ||
/* Require a fixed TTL and a TOS copied from the mirrored packet. */
inherit_ttl || !inherit_tos ||
/* A destination address may not be "any". */
@@ -539,7 +539,7 @@ mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
if (!dst || dst->error)
goto out;
- rt6 = container_of(dst, struct rt6_info, dst);
+ rt6 = dst_rt6_info(dst);
dev = dst->dev;
*saddrp = fl6.saddr;
@@ -565,7 +565,8 @@ mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
if (!(to_dev->flags & IFF_UP) ||
/* Reject tunnels with GRE keys, checksums, etc. */
- tparm.i_flags || tparm.o_flags ||
+ !ip_tunnel_flags_empty(tparm.i_flags) ||
+ !ip_tunnel_flags_empty(tparm.o_flags) ||
/* Require a fixed TTL and a TOS copied from the mirrored packet. */
inherit_ttl || !inherit_tos ||
/* A destination address may not be "any". */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6397ff0dc951..a48bf342084d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2929,23 +2929,8 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
-int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return -EINVAL;
-
- return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
-}
-
-void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev)
+static void __mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *vxlan_dev)
{
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
struct mlxsw_sp_fid *fid;
@@ -2963,6 +2948,47 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
}
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(bridge_device->dev);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ return err;
+
+ err = switchdev_bridge_port_offload(vxlan_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_bridge_port_offload;
+
+ return 0;
+
+err_bridge_port_offload:
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ return err;
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vxlan_dev)
+{
+ switchdev_bridge_port_unoffload(vxlan_dev, NULL, NULL, NULL);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+}
+
static void
mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
enum mlxsw_sp_l3proto *proto,
@@ -3867,7 +3893,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
return -EINVAL;
}
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
return 0;
}
@@ -3883,7 +3909,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
* mapped to the VNI should be unmapped
*/
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
/* Fifth case: The new VLAN is also egress untagged, which means the
@@ -3923,7 +3949,7 @@ mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_fid_8021q_vid(fid) != vid)
goto out;
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
out:
mlxsw_sp_fid_put(fid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 899c954e0e5f..b5c3f789c685 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -173,7 +173,7 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
if (err)
return;
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
@@ -959,18 +959,18 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 83477c8e6971..9962dc157901 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -29,6 +29,8 @@ enum {
MLXSW_TRAP_ID_FDB_MISMATCH = 0x3B,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -66,13 +68,10 @@ enum {
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
- MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
- MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
- MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
@@ -95,6 +94,7 @@ enum {
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index da51dd9d5e44..e78cba5821b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -4,6 +4,69 @@
#ifndef _MLXSW_TXHEADER_H
#define _MLXSW_TXHEADER_H
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
#define MLXSW_TXHDR_LEN 0x10
#define MLXSW_TXHDR_VERSION_0 0
#define MLXSW_TXHDR_VERSION_1 1
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
new file mode 100644
index 000000000000..ca5c7ac2a5bc
--- /dev/null
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Meta Platforms network device configuration
+#
+
+config NET_VENDOR_META
+ bool "Meta Platforms devices"
+ default y
+ help
+ If you have a network (Ethernet) card designed by Meta, say Y.
+ That's Meta as in the parent company of Facebook.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Meta cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_META
+
+config FBNIC
+ tristate "Meta Platforms Host Network Interface"
+ depends on 64BIT || COMPILE_TEST
+ depends on !S390
+ depends on MAX_SKB_FRAGS < 22
+ depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select NET_DEVLINK
+ select PAGE_POOL
+ select PCS_XPCS
+ select PHYLINK
+ select PLDMFW
+ help
+ This driver supports Meta Platforms Host Network Interface.
+
+ To compile this driver as a module, choose M here. The module
+ will be called fbnic. MSI-X interrupt support is required.
+
+endif # NET_VENDOR_META
diff --git a/drivers/net/ethernet/meta/Makefile b/drivers/net/ethernet/meta/Makefile
new file mode 100644
index 000000000000..88804f3de963
--- /dev/null
+++ b/drivers/net/ethernet/meta/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Meta Platforms network device drivers.
+#
+
+obj-$(CONFIG_FBNIC) += fbnic/
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
new file mode 100644
index 000000000000..72c41af65364
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+
+#
+# Makefile for the Meta(R) Host Network Interface
+#
+
+obj-$(CONFIG_FBNIC) += fbnic.o
+
+fbnic-y := fbnic_csr.o \
+ fbnic_debugfs.o \
+ fbnic_devlink.o \
+ fbnic_ethtool.o \
+ fbnic_fw.o \
+ fbnic_fw_log.o \
+ fbnic_hw_stats.o \
+ fbnic_hwmon.o \
+ fbnic_irq.o \
+ fbnic_mac.o \
+ fbnic_netdev.o \
+ fbnic_pci.o \
+ fbnic_phylink.o \
+ fbnic_rpc.o \
+ fbnic_mdio.o \
+ fbnic_time.o \
+ fbnic_tlv.o \
+ fbnic_txrx.o \
+# End of objects
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
new file mode 100644
index 000000000000..779a083b9215
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_H_
+#define _FBNIC_H_
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "fbnic_csr.h"
+#include "fbnic_fw.h"
+#include "fbnic_fw_log.h"
+#include "fbnic_hw_stats.h"
+#include "fbnic_mac.h"
+#include "fbnic_rpc.h"
+
+struct fbnic_napi_vector;
+
+#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MBX_CMPL_SLOTS 4
+
+struct fbnic_dev {
+ struct device *dev;
+ struct net_device *netdev;
+ struct dentry *dbg_fbd;
+ struct device *hwmon;
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *otp_reporter;
+
+ u32 __iomem *uc_addr0;
+ u32 __iomem *uc_addr4;
+ const struct fbnic_mac *mac;
+ unsigned int fw_msix_vector;
+ unsigned int mac_msix_vector;
+ unsigned short num_irqs;
+
+ struct {
+ u8 users;
+ char name[IFNAMSIZ + 9];
+ } napi_irq[FBNIC_MAX_NAPI_VECTORS];
+
+ struct delayed_work service_task;
+
+ struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
+ struct fbnic_fw_cap fw_cap;
+ struct fbnic_fw_completion *cmpl_data[FBNIC_MBX_CMPL_SLOTS];
+ /* Lock protecting Tx Mailbox queue to prevent possible races */
+ spinlock_t fw_tx_lock;
+
+ unsigned long last_heartbeat_request;
+ unsigned long last_heartbeat_response;
+ u8 fw_heartbeat_enabled;
+
+ u64 dsn;
+ u32 mps;
+ u32 readrq;
+
+ /* Local copy of the devices TCAM */
+ struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES];
+ struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES];
+ u8 mac_addr_boundary;
+ u8 tce_tcam_last;
+
+ /* IP TCAM */
+ struct fbnic_ip_addr ip_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ip_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+
+ /* Number of TCQs/RCQs available on hardware */
+ u16 max_num_queues;
+
+ /* Lock protecting writes to @time_high, @time_offset of fbnic_netdev,
+ * and the HW time CSR machinery.
+ */
+ spinlock_t time_lock;
+ /* Externally accessible PTP clock, may be NULL */
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ /* Last @time_high refresh time in jiffies (to catch stalls) */
+ unsigned long last_read;
+
+ /* PMD specific data */
+ unsigned long end_of_pmd_training;
+ u8 pmd_state;
+
+ /* Local copy of hardware statistics */
+ struct fbnic_hw_stats hw_stats;
+
+ /* Firmware time since boot in milliseconds */
+ u64 firmware_time;
+ u64 prev_firmware_time;
+
+ struct fbnic_fw_log fw_log;
+
+ /* MDIO bus for PHYs */
+ struct mii_bus *mdio_bus;
+};
+
+/* Reserve entry 0 in the MSI-X "others" array until we have filled all
+ * 32 of the possible interrupt slots. By doing this we can avoid any
+ * potential conflicts should we need to enable one of the debug interrupt
+ * causes later.
+ */
+enum {
+ FBNIC_FW_MSIX_ENTRY,
+ FBNIC_PCS_MSIX_ENTRY,
+ FBNIC_NON_NAPI_VECTORS
+};
+
+static inline bool fbnic_present(struct fbnic_dev *fbd)
+{
+ return !!READ_ONCE(fbd->uc_addr0);
+}
+
+static inline void fbnic_wr32(struct fbnic_dev *fbd, u32 reg, u32 val)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr0);
+
+ if (csr)
+ writel(val, csr + reg);
+}
+
+u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg);
+
+static inline void fbnic_wrfl(struct fbnic_dev *fbd)
+{
+ fbnic_rd32(fbd, FBNIC_MASTER_SPARE_0);
+}
+
+static inline void
+fbnic_rmw32(struct fbnic_dev *fbd, u32 reg, u32 mask, u32 val)
+{
+ u32 v;
+
+ v = fbnic_rd32(fbd, reg);
+ v &= ~mask;
+ v |= val;
+ fbnic_wr32(fbd, reg, v);
+}
+
+#define wr32(_f, _r, _v) fbnic_wr32(_f, _r, _v)
+#define rd32(_f, _r) fbnic_rd32(_f, _r)
+#define wrfl(_f) fbnic_wrfl(_f)
+
+bool fbnic_fw_present(struct fbnic_dev *fbd);
+u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg);
+void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val);
+
+#define fw_rd32(_f, _r) fbnic_fw_rd32(_f, _r)
+#define fw_wr32(_f, _r, _v) fbnic_fw_wr32(_f, _r, _v)
+#define fw_wrfl(_f) fbnic_fw_rd32(_f, FBNIC_FW_ZERO_REG)
+
+static inline bool fbnic_bmc_present(struct fbnic_dev *fbd)
+{
+ return fbd->fw_cap.bmc_present;
+}
+
+static inline bool fbnic_init_failure(struct fbnic_dev *fbd)
+{
+ return !fbd->netdev;
+}
+
+extern char fbnic_driver_name[];
+
+void fbnic_devlink_free(struct fbnic_dev *fbd);
+struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
+int fbnic_devlink_health_create(struct fbnic_dev *fbd);
+void fbnic_devlink_health_destroy(struct fbnic_dev *fbd);
+void fbnic_devlink_register(struct fbnic_dev *fbd);
+void fbnic_devlink_unregister(struct fbnic_dev *fbd);
+void __printf(2, 3)
+fbnic_devlink_fw_report(struct fbnic_dev *fbd, const char *format, ...);
+void fbnic_devlink_otp_check(struct fbnic_dev *fbd, const char *msg);
+
+int fbnic_fw_request_mbx(struct fbnic_dev *fbd);
+void fbnic_fw_free_mbx(struct fbnic_dev *fbd);
+
+void fbnic_hwmon_register(struct fbnic_dev *fbd);
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
+
+int fbnic_mac_request_irq(struct fbnic_dev *fbd);
+void fbnic_mac_free_irq(struct fbnic_dev *fbd);
+
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr);
+int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
+ unsigned long flags, const char *name, void *data);
+void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
+void fbnic_free_irqs(struct fbnic_dev *fbd);
+int fbnic_alloc_irqs(struct fbnic_dev *fbd);
+
+void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
+ const size_t str_sz);
+
+void fbnic_dbg_fbd_init(struct fbnic_dev *fbd);
+void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd);
+void fbnic_dbg_init(void);
+void fbnic_dbg_exit(void);
+
+void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd);
+
+int fbnic_mdiobus_create(struct fbnic_dev *fbd);
+
+void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
+int fbnic_csr_regs_len(struct fbnic_dev *fbd);
+
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm);
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv);
+
+enum fbnic_boards {
+ fbnic_board_asic
+};
+
+struct fbnic_info {
+ unsigned int max_num_queues;
+ unsigned int bar_mask;
+};
+
+#endif /* _FBNIC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
new file mode 100644
index 000000000000..d9c0dc1c2af9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include "fbnic.h"
+
+#define FBNIC_BOUNDS(section) { \
+ .start = FBNIC_CSR_START_##section, \
+ .end = FBNIC_CSR_END_##section + 1, \
+}
+
+struct fbnic_csr_bounds {
+ u32 start;
+ u32 end;
+};
+
+static const struct fbnic_csr_bounds fbnic_csr_sects[] = {
+ FBNIC_BOUNDS(INTR),
+ FBNIC_BOUNDS(INTR_CQ),
+ FBNIC_BOUNDS(QM_TX),
+ FBNIC_BOUNDS(QM_RX),
+ FBNIC_BOUNDS(TCE),
+ FBNIC_BOUNDS(TCE_RAM),
+ FBNIC_BOUNDS(TMI),
+ FBNIC_BOUNDS(PTP),
+ FBNIC_BOUNDS(RXB),
+ FBNIC_BOUNDS(RPC),
+ FBNIC_BOUNDS(FAB),
+ FBNIC_BOUNDS(MASTER),
+ FBNIC_BOUNDS(PCS),
+ FBNIC_BOUNDS(RSFEC),
+ FBNIC_BOUNDS(MAC_MAC),
+ FBNIC_BOUNDS(SIG),
+ FBNIC_BOUNDS(PCIE_SS_COMPHY),
+ FBNIC_BOUNDS(PUL_USER),
+ FBNIC_BOUNDS(QUEUE),
+ FBNIC_BOUNDS(RPC_RAM),
+};
+
+#define FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY 14
+#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+
+#define FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY 4
+#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+
+#define FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES 8
+
+#define FBNIC_RPC_RSS_TBL_DW_PER_ENTRY 2
+#define FBNIC_RPC_RSS_TBL_NUM_ENTRIES 256
+
+static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
+{
+ u32 start = FBNIC_CSR_START_RPC_RAM;
+ u32 end = FBNIC_CSR_END_RPC_RAM;
+ u32 *data = *data_p;
+ u32 i, j;
+
+ *(data++) = start;
+ *(data++) = end;
+
+ /* FBNIC_RPC_TCAM_ACT */
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_ACT(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_MACDA */
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_MACDA(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_OUTER_IPSRC */
+ for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_OUTER_IPDST */
+ for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_IPSRC */
+ for (i = 0; i < FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPSRC(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_IPDST */
+ for (i = 0; i < FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPDST(i, j));
+ }
+
+ /* FBNIC_RPC_RSS_TBL */
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_RSS_TBL_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_RSS_TBL(i, j));
+ }
+
+ *data_p = data;
+}
+
+void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version)
+{
+ const struct fbnic_csr_bounds *bound;
+ u32 *start = data;
+ int i, j;
+
+ *regs_version = 1u;
+
+ /* Skip RPC_RAM section which cannot be dumped linearly */
+ for (i = 0, bound = fbnic_csr_sects;
+ i < ARRAY_SIZE(fbnic_csr_sects) - 1; i++, ++bound) {
+ *(data++) = bound->start;
+ *(data++) = bound->end - 1;
+ for (j = bound->start; j < bound->end; j++)
+ *(data++) = rd32(fbd, j);
+ }
+
+ /* Dump the RPC_RAM as special case registers */
+ fbnic_csr_get_regs_rpc_ram(fbd, &data);
+
+ WARN_ON(data - start != fbnic_csr_regs_len(fbd));
+}
+
+int fbnic_csr_regs_len(struct fbnic_dev *fbd)
+{
+ int i, len = 0;
+
+ /* Dump includes start and end information of each section
+ * which results in an offset of 2
+ */
+ for (i = 0; i < ARRAY_SIZE(fbnic_csr_sects); i++)
+ len += fbnic_csr_sects[i].end - fbnic_csr_sects[i].start + 2;
+
+ return len;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
new file mode 100644
index 000000000000..422265dc7abd
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -0,0 +1,1201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_CSR_H_
+#define _FBNIC_CSR_H_
+
+#include <linux/bitops.h>
+
+#define CSR_BIT(nr) (1u << (nr))
+#define CSR_GENMASK(h, l) GENMASK(h, l)
+
+#define DESC_BIT(nr) BIT_ULL(nr)
+#define DESC_GENMASK(h, l) GENMASK_ULL(h, l)
+
+#define FW_VER_CODE(_major, _minor, _patch, _build) ( \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_MAJOR, _major) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_MINOR, _minor) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_PATCH, _patch) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_BUILD, _build))
+
+/* Defines the minimum firmware version required by the driver */
+#define MIN_FW_VER_CODE FW_VER_CODE(0, 10, 6, 0)
+
+/* Defines the minimum firmware version required for firmware logs */
+#define MIN_FW_VER_CODE_LOG FW_VER_CODE(0, 12, 9, 0)
+
+/* Driver can request that firmware sends all cached logs in bulk. This
+ * feature was enabled on older firmware however firmware has a bug
+ * which attempted to send 30 messages per mbx message which caused an
+ * overflow flooding the mailbox. This results in a kernel warning
+ * related to corrupt mailbox messages.
+ *
+ * If firmware is new enough only request sending historical logs when
+ * the log buffer is empty to prevent duplicate logs.
+ */
+#define MIN_FW_VER_CODE_HIST FW_VER_CODE(25, 5, 7, 0)
+
+#define PCI_DEVICE_ID_META_FBNIC_ASIC 0x0013
+
+#define FBNIC_CLOCK_FREQ (600 * (1000 * 1000))
+
+/* Transmit Work Descriptor Format */
+/* Length, Type, Offset Masks and Shifts */
+#define FBNIC_TWD_L2_HLEN_MASK DESC_GENMASK(5, 0)
+
+#define FBNIC_TWD_L3_TYPE_MASK DESC_GENMASK(7, 6)
+enum {
+ FBNIC_TWD_L3_TYPE_OTHER = 0,
+ FBNIC_TWD_L3_TYPE_IPV4 = 1,
+ FBNIC_TWD_L3_TYPE_IPV6 = 2,
+ FBNIC_TWD_L3_TYPE_V6V6 = 3,
+};
+
+#define FBNIC_TWD_L3_OHLEN_MASK DESC_GENMASK(15, 8)
+#define FBNIC_TWD_L3_IHLEN_MASK DESC_GENMASK(23, 16)
+
+enum {
+ FBNIC_TWD_L4_TYPE_OTHER = 0,
+ FBNIC_TWD_L4_TYPE_TCP = 1,
+ FBNIC_TWD_L4_TYPE_UDP = 2,
+};
+
+#define FBNIC_TWD_CSUM_OFFSET_MASK DESC_GENMASK(27, 24)
+#define FBNIC_TWD_L4_HLEN_MASK DESC_GENMASK(31, 28)
+
+/* Flags and Type */
+#define FBNIC_TWD_L4_TYPE_MASK DESC_GENMASK(33, 32)
+#define FBNIC_TWD_FLAG_REQ_TS DESC_BIT(34)
+#define FBNIC_TWD_FLAG_REQ_LSO DESC_BIT(35)
+#define FBNIC_TWD_FLAG_REQ_CSO DESC_BIT(36)
+#define FBNIC_TWD_FLAG_REQ_COMPLETION DESC_BIT(37)
+#define FBNIC_TWD_FLAG_DEST_MAC DESC_BIT(43)
+#define FBNIC_TWD_FLAG_DEST_BMC DESC_BIT(44)
+#define FBNIC_TWD_FLAG_DEST_FW DESC_BIT(45)
+#define FBNIC_TWD_TYPE_MASK DESC_GENMASK(47, 46)
+enum {
+ FBNIC_TWD_TYPE_META = 0,
+ FBNIC_TWD_TYPE_OPT_META = 1,
+ FBNIC_TWD_TYPE_AL = 2,
+ FBNIC_TWD_TYPE_LAST_AL = 3,
+};
+
+/* MSS and Completion Req */
+#define FBNIC_TWD_MSS_MASK DESC_GENMASK(61, 48)
+
+#define FBNIC_TWD_TS_MASK DESC_GENMASK(39, 0)
+#define FBNIC_TWD_ADDR_MASK DESC_GENMASK(45, 0)
+#define FBNIC_TWD_LEN_MASK DESC_GENMASK(63, 48)
+
+/* Tx Completion Descriptor Format */
+#define FBNIC_TCD_TYPE0_HEAD0_MASK DESC_GENMASK(15, 0)
+#define FBNIC_TCD_TYPE0_HEAD1_MASK DESC_GENMASK(31, 16)
+
+#define FBNIC_TCD_TYPE1_TS_MASK DESC_GENMASK(39, 0)
+
+#define FBNIC_TCD_STATUS_MASK DESC_GENMASK(59, 48)
+#define FBNIC_TCD_STATUS_TS_INVALID DESC_BIT(48)
+#define FBNIC_TCD_STATUS_ILLEGAL_TS_REQ DESC_BIT(49)
+#define FBNIC_TCD_TWQ1 DESC_BIT(60)
+#define FBNIC_TCD_TYPE_MASK DESC_GENMASK(62, 61)
+enum {
+ FBNIC_TCD_TYPE_0 = 0,
+ FBNIC_TCD_TYPE_1 = 1,
+};
+
+#define FBNIC_TCD_DONE DESC_BIT(63)
+
+/* Rx Buffer Descriptor Format
+ *
+ * The layout of this can vary depending on the page size of the system.
+ *
+ * If the page size is 4K then the layout will simply consist of ID for
+ * the 16 most significant bits, and the lower 46 are essentially the page
+ * address with the lowest 12 bits being reserved 0 due to the fact that
+ * a page will be aligned.
+ *
+ * If the page size is larger than 4K then the lower n bits of the ID and
+ * page address will be reserved for the fragment ID. This fragment will
+ * be 4K in size and will be used to index both the DMA address and the ID
+ * by the same amount.
+ */
+#define FBNIC_BD_DESC_ADDR_MASK DESC_GENMASK(45, 12)
+#define FBNIC_BD_DESC_ID_MASK DESC_GENMASK(63, 48)
+#define FBNIC_BD_FRAG_SIZE \
+ (FBNIC_BD_DESC_ADDR_MASK & ~(FBNIC_BD_DESC_ADDR_MASK - 1))
+#define FBNIC_BD_FRAG_COUNT \
+ (PAGE_SIZE / FBNIC_BD_FRAG_SIZE)
+#define FBNIC_BD_FRAG_ADDR_MASK \
+ (FBNIC_BD_DESC_ADDR_MASK & \
+ ~(FBNIC_BD_DESC_ADDR_MASK * FBNIC_BD_FRAG_COUNT))
+#define FBNIC_BD_FRAG_ID_MASK \
+ (FBNIC_BD_DESC_ID_MASK & \
+ ~(FBNIC_BD_DESC_ID_MASK * FBNIC_BD_FRAG_COUNT))
+#define FBNIC_BD_PAGE_ADDR_MASK \
+ (FBNIC_BD_DESC_ADDR_MASK & ~FBNIC_BD_FRAG_ADDR_MASK)
+#define FBNIC_BD_PAGE_ID_MASK \
+ (FBNIC_BD_DESC_ID_MASK & ~FBNIC_BD_FRAG_ID_MASK)
+
+/* Rx Completion Queue Descriptors */
+#define FBNIC_RCD_TYPE_MASK DESC_GENMASK(62, 61)
+enum {
+ FBNIC_RCD_TYPE_HDR_AL = 0,
+ FBNIC_RCD_TYPE_PAY_AL = 1,
+ FBNIC_RCD_TYPE_OPT_META = 2,
+ FBNIC_RCD_TYPE_META = 3,
+};
+
+#define FBNIC_RCD_DONE DESC_BIT(63)
+
+/* Address/Length Completion Descriptors */
+#define FBNIC_RCD_AL_BUFF_ID_MASK DESC_GENMASK(15, 0)
+#define FBNIC_RCD_AL_BUFF_FRAG_MASK (FBNIC_BD_FRAG_COUNT - 1)
+#define FBNIC_RCD_AL_BUFF_PAGE_MASK \
+ (FBNIC_RCD_AL_BUFF_ID_MASK & ~FBNIC_RCD_AL_BUFF_FRAG_MASK)
+#define FBNIC_RCD_AL_BUFF_LEN_MASK DESC_GENMASK(28, 16)
+#define FBNIC_RCD_AL_BUFF_OFF_MASK DESC_GENMASK(43, 32)
+#define FBNIC_RCD_AL_PAGE_FIN DESC_BIT(60)
+
+/* Header AL specific values */
+#define FBNIC_RCD_HDR_AL_OVERFLOW DESC_BIT(53)
+#define FBNIC_RCD_HDR_AL_DMA_HINT_MASK DESC_GENMASK(59, 54)
+enum {
+ FBNIC_RCD_HDR_AL_DMA_HINT_NONE = 0,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L2 = 1,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L3 = 2,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4 = 4,
+};
+
+/* Optional Metadata Completion Descriptors */
+#define FBNIC_RCD_OPT_META_TS_MASK DESC_GENMASK(39, 0)
+#define FBNIC_RCD_OPT_META_ACTION_MASK DESC_GENMASK(45, 40)
+#define FBNIC_RCD_OPT_META_ACTION DESC_BIT(57)
+#define FBNIC_RCD_OPT_META_TS DESC_BIT(58)
+#define FBNIC_RCD_OPT_META_TYPE_MASK DESC_GENMASK(60, 59)
+
+/* Metadata Completion Descriptors */
+#define FBNIC_RCD_META_RSS_HASH_MASK DESC_GENMASK(31, 0)
+#define FBNIC_RCD_META_L2_CSUM_MASK DESC_GENMASK(47, 32)
+#define FBNIC_RCD_META_L3_TYPE_MASK DESC_GENMASK(49, 48)
+enum {
+ FBNIC_RCD_META_L3_TYPE_OTHER = 0,
+ FBNIC_RCD_META_L3_TYPE_IPV4 = 1,
+ FBNIC_RCD_META_L3_TYPE_IPV6 = 2,
+ FBNIC_RCD_META_L3_TYPE_V6V6 = 3,
+};
+
+#define FBNIC_RCD_META_L4_TYPE_MASK DESC_GENMASK(51, 50)
+enum {
+ FBNIC_RCD_META_L4_TYPE_OTHER = 0,
+ FBNIC_RCD_META_L4_TYPE_TCP = 1,
+ FBNIC_RCD_META_L4_TYPE_UDP = 2,
+};
+
+#define FBNIC_RCD_META_L4_CSUM_UNNECESSARY DESC_BIT(52)
+#define FBNIC_RCD_META_ERR_MAC_EOP DESC_BIT(53)
+#define FBNIC_RCD_META_ERR_TRUNCATED_FRAME DESC_BIT(54)
+#define FBNIC_RCD_META_ERR_PARSER DESC_BIT(55)
+#define FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK \
+ (FBNIC_RCD_META_ERR_MAC_EOP | FBNIC_RCD_META_ERR_TRUNCATED_FRAME)
+#define FBNIC_RCD_META_ECN DESC_BIT(60)
+
+/* Register Definitions
+ *
+ * The registers are laid as indexes into an le32 array. As such the actual
+ * address is 4 times the index value. Below each register is defined as 3
+ * fields, name, index, and Address.
+ *
+ * Name Index Address
+ *************************************************************************/
+/* Interrupt Registers */
+#define FBNIC_CSR_START_INTR 0x00000 /* CSR section delimiter */
+#define FBNIC_INTR_STATUS(n) (0x00000 + (n)) /* 0x00000 + 4*n */
+#define FBNIC_INTR_STATUS_CNT 8
+#define FBNIC_INTR_MASK(n) (0x00008 + (n)) /* 0x00020 + 4*n */
+#define FBNIC_INTR_MASK_CNT 8
+#define FBNIC_INTR_SET(n) (0x00010 + (n)) /* 0x00040 + 4*n */
+#define FBNIC_INTR_SET_CNT 8
+#define FBNIC_INTR_CLEAR(n) (0x00018 + (n)) /* 0x00060 + 4*n */
+#define FBNIC_INTR_CLEAR_CNT 8
+#define FBNIC_INTR_SW_STATUS(n) (0x00020 + (n)) /* 0x00080 + 4*n */
+#define FBNIC_INTR_SW_STATUS_CNT 8
+#define FBNIC_INTR_SW_AC_MODE(n) (0x00028 + (n)) /* 0x000a0 + 4*n */
+#define FBNIC_INTR_SW_AC_MODE_CNT 8
+#define FBNIC_INTR_MASK_SET(n) (0x00030 + (n)) /* 0x000c0 + 4*n */
+#define FBNIC_INTR_MASK_SET_CNT 8
+#define FBNIC_INTR_MASK_CLEAR(n) (0x00038 + (n)) /* 0x000e0 + 4*n */
+#define FBNIC_INTR_MASK_CLEAR_CNT 8
+#define FBNIC_MAX_MSIX_VECS 256U
+#define FBNIC_INTR_MSIX_CTRL(n) (0x00040 + (n)) /* 0x00100 + 4*n */
+#define FBNIC_INTR_MSIX_CTRL_VECTOR_MASK CSR_GENMASK(7, 0)
+#define FBNIC_INTR_MSIX_CTRL_ENABLE CSR_BIT(31)
+enum {
+ FBNIC_INTR_MSIX_CTRL_PCS_IDX = 34,
+};
+
+#define FBNIC_CSR_END_INTR 0x0005f /* CSR section delimiter */
+
+/* Interrupt MSIX Registers */
+#define FBNIC_CSR_START_INTR_CQ 0x00400 /* CSR section delimiter */
+#define FBNIC_INTR_CQ_REARM(n) \
+ (0x00400 + 4 * (n)) /* 0x01000 + 16*n */
+#define FBNIC_INTR_CQ_REARM_CNT 256
+#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT CSR_GENMASK(13, 0)
+#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN CSR_BIT(14)
+#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT CSR_GENMASK(28, 15)
+#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN CSR_BIT(29)
+#define FBNIC_INTR_CQ_REARM_INTR_RELOAD CSR_BIT(30)
+#define FBNIC_INTR_CQ_REARM_INTR_UNMASK CSR_BIT(31)
+
+#define FBNIC_INTR_RCQ_TIMEOUT(n) \
+ (0x00401 + 4 * (n)) /* 0x01004 + 16*n */
+#define FBNIC_INTR_RCQ_TIMEOUT_CNT 256
+#define FBNIC_INTR_TCQ_TIMEOUT(n) \
+ (0x00402 + 4 * (n)) /* 0x01008 + 16*n */
+#define FBNIC_INTR_TCQ_TIMEOUT_CNT 256
+#define FBNIC_CSR_END_INTR_CQ 0x007fe /* CSR section delimiter */
+
+/* Global QM Tx registers */
+#define FBNIC_CSR_START_QM_TX 0x00800 /* CSR section delimiter */
+#define FBNIC_QM_TWQ_IDLE(n) (0x00800 + (n)) /* 0x02000 + 4*n */
+#define FBNIC_QM_TWQ_IDLE_CNT 8
+#define FBNIC_QM_TWQ_DEFAULT_META_L 0x00818 /* 0x02060 */
+#define FBNIC_QM_TWQ_DEFAULT_META_H 0x00819 /* 0x02064 */
+
+#define FBNIC_QM_TQS_CTL0 0x0081b /* 0x0206c */
+#define FBNIC_QM_TQS_CTL0_LSO_TS_MASK CSR_BIT(0)
+enum {
+ FBNIC_QM_TQS_CTL0_LSO_TS_FIRST = 0,
+ FBNIC_QM_TQS_CTL0_LSO_TS_LAST = 1,
+};
+
+#define FBNIC_QM_TQS_CTL0_PREFETCH_THRESH CSR_GENMASK(7, 1)
+enum {
+ FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN = 16,
+};
+
+#define FBNIC_QM_TQS_CTL1 0x0081c /* 0x02070 */
+#define FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS CSR_GENMASK(7, 0)
+#define FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS CSR_GENMASK(15, 8)
+#define FBNIC_QM_TQS_MTU_CTL0 0x0081d /* 0x02074 */
+#define FBNIC_QM_TQS_MTU_CTL1 0x0081e /* 0x02078 */
+#define FBNIC_QM_TQS_MTU_CTL1_BULK CSR_GENMASK(13, 0)
+#define FBNIC_QM_TCQ_IDLE(n) (0x00821 + (n)) /* 0x02084 + 4*n */
+#define FBNIC_QM_TCQ_IDLE_CNT 4
+#define FBNIC_QM_TCQ_CTL0 0x0082d /* 0x020b4 */
+#define FBNIC_QM_TCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0)
+#define FBNIC_QM_TCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16)
+#define FBNIC_QM_TQS_IDLE(n) (0x00830 + (n)) /* 0x020c0 + 4*n */
+#define FBNIC_QM_TQS_IDLE_CNT 8
+#define FBNIC_QM_TQS_EDT_TS_RANGE 0x00849 /* 0x2124 */
+#define FBNIC_QM_TDE_IDLE(n) (0x00853 + (n)) /* 0x0214c + 4*n */
+#define FBNIC_QM_TDE_IDLE_CNT 8
+#define FBNIC_QM_TNI_TDF_CTL 0x0086c /* 0x021b0 */
+#define FBNIC_QM_TNI_TDF_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TDF_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TDF_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TDF_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_TNI_TDE_CTL 0x0086d /* 0x021b4 */
+#define FBNIC_QM_TNI_TDE_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TDE_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TDE_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TDE_CTL_MAX_OB CSR_GENMASK(24, 12)
+#define FBNIC_QM_TNI_TDE_CTL_MRRS_1K CSR_BIT(25)
+#define FBNIC_QM_TNI_TCM_CTL 0x0086e /* 0x021b8 */
+#define FBNIC_QM_TNI_TCM_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TCM_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TCM_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TCM_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_CSR_END_QM_TX 0x00873 /* CSR section delimiter */
+
+/* Global QM Rx registers */
+#define FBNIC_CSR_START_QM_RX 0x00c00 /* CSR section delimiter */
+#define FBNIC_QM_RCQ_IDLE(n) (0x00c00 + (n)) /* 0x03000 + 4*n */
+#define FBNIC_QM_RCQ_IDLE_CNT 4
+#define FBNIC_QM_RCQ_CTL0 0x00c0c /* 0x03030 */
+#define FBNIC_QM_RCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0)
+#define FBNIC_QM_RCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16)
+#define FBNIC_QM_HPQ_IDLE(n) (0x00c0f + (n)) /* 0x0303c + 4*n */
+#define FBNIC_QM_HPQ_IDLE_CNT 4
+#define FBNIC_QM_PPQ_IDLE(n) (0x00c13 + (n)) /* 0x0304c + 4*n */
+#define FBNIC_QM_PPQ_IDLE_CNT 4
+#define FBNIC_QM_RNI_RBP_CTL 0x00c2d /* 0x030b4 */
+#define FBNIC_QM_RNI_RBP_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RBP_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RBP_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RBP_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_RNI_RDE_CTL 0x00c2e /* 0x030b8 */
+#define FBNIC_QM_RNI_RDE_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RDE_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RDE_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RDE_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_RNI_RCM_CTL 0x00c2f /* 0x030bc */
+#define FBNIC_QM_RNI_RCM_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RCM_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RCM_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RCM_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_CSR_END_QM_RX 0x00c34 /* CSR section delimiter */
+
+/* TCE registers */
+#define FBNIC_CSR_START_TCE 0x04000 /* CSR section delimiter */
+#define FBNIC_TCE_REG_BASE 0x04000 /* 0x10000 */
+
+#define FBNIC_TCE_LSO_CTRL 0x04000 /* 0x10000 */
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST CSR_GENMASK(8, 0)
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID CSR_GENMASK(17, 9)
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_END CSR_GENMASK(26, 18)
+#define FBNIC_TCE_LSO_CTRL_IPID_MODE_INC CSR_BIT(27)
+
+#define FBNIC_TCE_CSO_CTRL 0x04001 /* 0x10004 */
+#define FBNIC_TCE_CSO_CTRL_TCP_ZERO_CSUM CSR_BIT(0)
+
+#define FBNIC_TCE_TXB_CTRL 0x04002 /* 0x10008 */
+#define FBNIC_TCE_TXB_CTRL_LOAD CSR_BIT(0)
+#define FBNIC_TCE_TXB_CTRL_TCAM_ENABLE CSR_BIT(1)
+#define FBNIC_TCE_TXB_CTRL_DISABLE CSR_BIT(2)
+
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL 0x04003 /* 0x1000c */
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2 CSR_GENMASK(23, 16)
+
+#define FBNIC_TCE_TXB_TEI_Q0_CTRL 0x04004 /* 0x10010 */
+#define FBNIC_TCE_TXB_TEI_Q1_CTRL 0x04005 /* 0x10014 */
+#define FBNIC_TCE_TXB_MC_Q_CTRL 0x04006 /* 0x10018 */
+#define FBNIC_TCE_TXB_RX_TEI_Q_CTRL 0x04007 /* 0x1001c */
+#define FBNIC_TCE_TXB_RX_BMC_Q_CTRL 0x04008 /* 0x10020 */
+#define FBNIC_TCE_TXB_Q_CTRL_START CSR_GENMASK(10, 0)
+#define FBNIC_TCE_TXB_Q_CTRL_SIZE CSR_GENMASK(22, 11)
+
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL 0x04009 /* 0x10024 */
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL 0x0400a /* 0x10028 */
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2 CSR_GENMASK(23, 16)
+
+#define FBNIC_TCE_TXB_CLDR_CFG 0x0400b /* 0x1002c */
+#define FBNIC_TCE_TXB_CLDR_CFG_NUM_SLOT CSR_GENMASK(5, 0)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG(n) (0x0400c + (n)) /* 0x10030 + 4*n */
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_CNT 16
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_0 CSR_GENMASK(1, 0)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_1 CSR_GENMASK(3, 2)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_2 CSR_GENMASK(5, 4)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_3 CSR_GENMASK(7, 6)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_0 CSR_GENMASK(9, 8)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_1 CSR_GENMASK(11, 10)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_2 CSR_GENMASK(13, 12)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_3 CSR_GENMASK(15, 14)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_0 CSR_GENMASK(17, 16)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_1 CSR_GENMASK(19, 18)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_2 CSR_GENMASK(21, 20)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_3 CSR_GENMASK(23, 22)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_0 CSR_GENMASK(25, 24)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_1 CSR_GENMASK(27, 26)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_2 CSR_GENMASK(29, 28)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_3 CSR_GENMASK(31, 30)
+
+#define FBNIC_TCE_BMC_MAX_PKTSZ 0x0403a /* 0x100e8 */
+#define FBNIC_TCE_BMC_MAX_PKTSZ_TX CSR_GENMASK(13, 0)
+#define FBNIC_TCE_BMC_MAX_PKTSZ_RX CSR_GENMASK(27, 14)
+#define FBNIC_TCE_MC_MAX_PKTSZ 0x0403b /* 0x100ec */
+#define FBNIC_TCE_MC_MAX_PKTSZ_TMI CSR_GENMASK(13, 0)
+
+#define FBNIC_TCE_SOP_PROT_CTRL 0x0403c /* 0x100f0 */
+#define FBNIC_TCE_SOP_PROT_CTRL_TBI CSR_GENMASK(7, 0)
+#define FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM CSR_GENMASK(14, 8)
+#define FBNIC_TCE_SOP_PROT_CTRL_TTI_CM CSR_GENMASK(18, 15)
+
+#define FBNIC_TCE_DROP_CTRL 0x0403d /* 0x100f4 */
+#define FBNIC_TCE_DROP_CTRL_TTI_CM_DROP_EN CSR_BIT(0)
+#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
+#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+
+#define FBNIC_TCE_TTI_CM_DROP_PKTS 0x0403e /* 0x100f8 */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_L 0x0403f /* 0x100fc */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_H 0x04040 /* 0x10100 */
+#define FBNIC_TCE_TTI_FRAME_DROP_PKTS 0x04041 /* 0x10104 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_L 0x04042 /* 0x10108 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_H 0x04043 /* 0x1010c */
+#define FBNIC_TCE_TBI_DROP_PKTS 0x04044 /* 0x10110 */
+#define FBNIC_TCE_TBI_DROP_BYTE_L 0x04045 /* 0x10114 */
+
+#define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */
+#define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0)
+enum {
+ FBNIC_TCE_TCAM_DEST_MAC = 1,
+ FBNIC_TCE_TCAM_DEST_BMC = 2,
+ FBNIC_TCE_TCAM_DEST_FW = 4,
+};
+
+#define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT 0x0404D /* 0x10134 */
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT \
+ 0x0404E /* 0x10138 */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */
+#define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */
+
+/* TCE RAM registers */
+#define FBNIC_CSR_START_TCE_RAM 0x04200 /* CSR section delimiter */
+#define FBNIC_TCE_RAM_TCAM(m, n) \
+ (0x04200 + 0x8 * (n) + (m)) /* 0x10800 + 32*n + 4*m */
+#define FBNIC_TCE_RAM_TCAM_MASK CSR_GENMASK(15, 0)
+#define FBNIC_TCE_RAM_TCAM_VALUE CSR_GENMASK(31, 16)
+#define FBNIC_TCE_RAM_TCAM3(n) (0x04218 + (n)) /* 0x010860 + 4*n */
+#define FBNIC_TCE_RAM_TCAM3_DEST_MASK CSR_GENMASK(5, 3)
+#define FBNIC_TCE_RAM_TCAM3_MCQ_MASK CSR_BIT(7)
+#define FBNIC_TCE_RAM_TCAM3_VALIDATE CSR_BIT(31)
+#define FBNIC_CSR_END_TCE_RAM 0x0421F /* CSR section delimiter */
+
+/* TMI registers */
+#define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */
+#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
+#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
+#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
+#define FBNIC_TMI_DROP_PKTS 0x04402 /* 0x11008 */
+#define FBNIC_TMI_DROP_BYTE_L 0x04403 /* 0x1100c */
+#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */
+#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */
+#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */
+#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_L \
+ 0x04433 /* 0x110cc */
+#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_H \
+ 0x04434 /* 0x110d0 */
+#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_L \
+ 0x04435 /* 0x110d4 */
+#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_H \
+ 0x04436 /* 0x110d8 */
+#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_L \
+ 0x04437 /* 0x110dc */
+#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_H \
+ 0x04438 /* 0x110e0 */
+#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_L \
+ 0x04439 /* 0x110e4 */
+#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_H \
+ 0x0443a /* 0x110e8 */
+#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_L \
+ 0x0443b /* 0x110ec */
+#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_H \
+ 0x0443c /* 0x110f0 */
+#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
+
+/* Precision Time Protocol Registers */
+#define FBNIC_CSR_START_PTP 0x04800 /* CSR section delimiter */
+#define FBNIC_PTP_REG_BASE 0x04800 /* 0x12000 */
+
+#define FBNIC_PTP_CTRL 0x04800 /* 0x12000 */
+#define FBNIC_PTP_CTRL_EN CSR_BIT(0)
+#define FBNIC_PTP_CTRL_MONO_EN CSR_BIT(4)
+#define FBNIC_PTP_CTRL_TQS_OUT_EN CSR_BIT(8)
+#define FBNIC_PTP_CTRL_MAC_OUT_IVAL CSR_GENMASK(16, 12)
+#define FBNIC_PTP_CTRL_TICK_IVAL CSR_GENMASK(23, 20)
+
+#define FBNIC_PTP_ADJUST 0x04801 /* 0x12004 */
+#define FBNIC_PTP_ADJUST_INIT CSR_BIT(0)
+#define FBNIC_PTP_ADJUST_SUB_NUDGE CSR_BIT(8)
+#define FBNIC_PTP_ADJUST_ADD_NUDGE CSR_BIT(16)
+#define FBNIC_PTP_ADJUST_ADDEND_SET CSR_BIT(24)
+
+#define FBNIC_PTP_INIT_HI 0x04802 /* 0x12008 */
+#define FBNIC_PTP_INIT_LO 0x04803 /* 0x1200c */
+
+#define FBNIC_PTP_NUDGE_NS 0x04804 /* 0x12010 */
+#define FBNIC_PTP_NUDGE_SUBNS 0x04805 /* 0x12014 */
+
+#define FBNIC_PTP_ADD_VAL_NS 0x04806 /* 0x12018 */
+#define FBNIC_PTP_ADD_VAL_NS_MASK CSR_GENMASK(15, 0)
+#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */
+
+#define FBNIC_PTP_CTR_VAL_HI 0x04808 /* 0x12020 */
+#define FBNIC_PTP_CTR_VAL_LO 0x04809 /* 0x12024 */
+
+#define FBNIC_PTP_MONO_PTP_CTR_HI 0x0480a /* 0x12028 */
+#define FBNIC_PTP_MONO_PTP_CTR_LO 0x0480b /* 0x1202c */
+
+#define FBNIC_PTP_CDC_FIFO_STATUS 0x0480c /* 0x12030 */
+#define FBNIC_PTP_SPARE 0x0480d /* 0x12034 */
+#define FBNIC_CSR_END_PTP 0x0480d /* CSR section delimiter */
+
+/* Rx Buffer Registers */
+#define FBNIC_CSR_START_RXB 0x08000 /* CSR section delimiter */
+enum {
+ FBNIC_RXB_FIFO_MC = 0,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_FIFO_NET_TO_BMC = 3,
+ FBNIC_RXB_FIFO_HOST = 4,
+ /* Unused */
+ FBNIC_RXB_FIFO_BMC_TO_HOST = 6,
+ /* Unused */
+ FBNIC_RXB_FIFO_INDICES = 8
+};
+
+enum {
+ FBNIC_RXB_INTF_NET = 0,
+ FBNIC_RXB_INTF_RBT = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_INTF_INDICES = 4
+};
+
+#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */
+#define FBNIC_RXB_CT_SIZE_CNT 8
+#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0)
+#define FBNIC_RXB_CT_SIZE_PAYLOAD CSR_GENMASK(11, 6)
+#define FBNIC_RXB_CT_SIZE_ENABLE CSR_BIT(12)
+#define FBNIC_RXB_PAUSE_DROP_CTRL 0x08008 /* 0x20020 */
+#define FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE CSR_GENMASK(7, 0)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE CSR_GENMASK(15, 8)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE CSR_GENMASK(23, 16)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_PS_ENABLE CSR_GENMASK(27, 24)
+#define FBNIC_RXB_PAUSE_THLD(n) (0x08009 + (n)) /* 0x20024 + 4*n */
+#define FBNIC_RXB_PAUSE_THLD_CNT 8
+#define FBNIC_RXB_PAUSE_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_PAUSE_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_DROP_THLD(n) (0x08011 + (n)) /* 0x20044 + 4*n */
+#define FBNIC_RXB_DROP_THLD_CNT 8
+#define FBNIC_RXB_DROP_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_DROP_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_ECN_THLD(n) (0x0801e + (n)) /* 0x20078 + 4*n */
+#define FBNIC_RXB_ECN_THLD_CNT 8
+#define FBNIC_RXB_ECN_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_ECN_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_PBUF_CFG(n) (0x08027 + (n)) /* 0x2009c + 4*n */
+#define FBNIC_RXB_PBUF_CFG_CNT 8
+#define FBNIC_RXB_PBUF_BASE_ADDR CSR_GENMASK(12, 0)
+#define FBNIC_RXB_PBUF_SIZE CSR_GENMASK(21, 13)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0 0x0802f /* 0x200bc */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2 CSR_GENMASK(23, 16)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM3 CSR_GENMASK(31, 24)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1 0x08030 /* 0x200c0 */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4 CSR_GENMASK(7, 0)
+#define FBNIC_RXB_DWRR_BMC_WEIGHT 0x08031 /* 0x200c4 */
+#define FBNIC_RXB_CLDR_PRIO_CFG(n) (0x8034 + (n)) /* 0x200d0 + 4*n */
+#define FBNIC_RXB_CLDR_PRIO_CFG_CNT 16
+#define FBNIC_RXB_ENDIAN_FCS 0x08044 /* 0x20110 */
+enum {
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_DEQUEUE_BMC = 2,
+ FBNIC_RXB_DEQUEUE_HOST = 3,
+ FBNIC_RXB_DEQUEUE_INDICES = 4
+};
+
+#define FBNIC_RXB_PBUF_CREDIT(n) (0x08047 + (n)) /* 0x2011C + 4*n */
+#define FBNIC_RXB_PBUF_CREDIT_CNT 8
+#define FBNIC_RXB_PBUF_CREDIT_MASK CSR_GENMASK(13, 0)
+#define FBNIC_RXB_INTF_CREDIT 0x0804f /* 0x2013C */
+#define FBNIC_RXB_INTF_CREDIT_MASK0 CSR_GENMASK(3, 0)
+#define FBNIC_RXB_INTF_CREDIT_MASK1 CSR_GENMASK(7, 4)
+#define FBNIC_RXB_INTF_CREDIT_MASK2 CSR_GENMASK(11, 8)
+#define FBNIC_RXB_INTF_CREDIT_MASK3 CSR_GENMASK(15, 12)
+
+#define FBNIC_RXB_PAUSE_EVENT_CNT(n) (0x08053 + (n)) /* 0x2014c + 4*n */
+#define FBNIC_RXB_DROP_FRMS_STS(n) (0x08057 + (n)) /* 0x2015c + 4*n */
+#define FBNIC_RXB_DROP_BYTES_STS_L(n) \
+ (0x08080 + 2 * (n)) /* 0x20200 + 8*n */
+#define FBNIC_RXB_DROP_BYTES_STS_H(n) \
+ (0x08081 + 2 * (n)) /* 0x20204 + 8*n */
+#define FBNIC_RXB_TRUN_FRMS_STS(n) (0x08091 + (n)) /* 0x20244 + 4*n */
+#define FBNIC_RXB_TRUN_BYTES_STS_L(n) \
+ (0x080c0 + 2 * (n)) /* 0x20300 + 8*n */
+#define FBNIC_RXB_TRUN_BYTES_STS_H(n) \
+ (0x080c1 + 2 * (n)) /* 0x20304 + 8*n */
+#define FBNIC_RXB_TRANS_PAUSE_STS(n) (0x080d1 + (n)) /* 0x20344 + 4*n */
+#define FBNIC_RXB_TRANS_DROP_STS(n) (0x080d9 + (n)) /* 0x20364 + 4*n */
+#define FBNIC_RXB_TRANS_ECN_STS(n) (0x080e1 + (n)) /* 0x20384 + 4*n */
+enum {
+ FBNIC_RXB_ENQUEUE_NET = 0,
+ FBNIC_RXB_ENQUEUE_BMC = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_ENQUEUE_INDICES = 4
+};
+
+#define FBNIC_RXB_DRBO_FRM_CNT_SRC(n) (0x080f9 + (n)) /* 0x203e4 + 4*n */
+#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(n) \
+ (0x080fd + (n)) /* 0x203f4 + 4*n */
+#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_H(n) \
+ (0x08101 + (n)) /* 0x20404 + 4*n */
+#define FBNIC_RXB_INTF_FRM_CNT_DST(n) (0x08105 + (n)) /* 0x20414 + 4*n */
+#define FBNIC_RXB_INTF_BYTE_CNT_DST_L(n) \
+ (0x08109 + (n)) /* 0x20424 + 4*n */
+#define FBNIC_RXB_INTF_BYTE_CNT_DST_H(n) \
+ (0x0810d + (n)) /* 0x20434 + 4*n */
+#define FBNIC_RXB_PBUF_FRM_CNT_DST(n) (0x08111 + (n)) /* 0x20444 + 4*n */
+#define FBNIC_RXB_PBUF_BYTE_CNT_DST_L(n) \
+ (0x08115 + (n)) /* 0x20454 + 4*n */
+#define FBNIC_RXB_PBUF_BYTE_CNT_DST_H(n) \
+ (0x08119 + (n)) /* 0x20464 + 4*n */
+
+#define FBNIC_RXB_PBUF_FIFO_LEVEL(n) (0x0811d + (n)) /* 0x20474 + 4*n */
+
+#define FBNIC_RXB_INTEGRITY_ERR(n) (0x0812f + (n)) /* 0x204bc + 4*n */
+#define FBNIC_RXB_MAC_ERR(n) (0x08133 + (n)) /* 0x204cc + 4*n */
+#define FBNIC_RXB_PARSER_ERR(n) (0x08137 + (n)) /* 0x204dc + 4*n */
+#define FBNIC_RXB_FRM_ERR(n) (0x0813b + (n)) /* 0x204ec + 4*n */
+
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT 0x08143 /* 0x2050c */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT 0x08144 /* 0x20510 */
+#define FBNIC_CSR_END_RXB 0x081b1 /* CSR section delimiter */
+
+/* Rx Parser and Classifier Registers */
+#define FBNIC_CSR_START_RPC 0x08400 /* CSR section delimiter */
+#define FBNIC_RPC_RMI_CONFIG 0x08400 /* 0x21000 */
+#define FBNIC_RPC_RMI_CONFIG_OH_BYTES CSR_GENMASK(4, 0)
+#define FBNIC_RPC_RMI_CONFIG_FCS_PRESENT CSR_BIT(8)
+#define FBNIC_RPC_RMI_CONFIG_ENABLE CSR_BIT(12)
+#define FBNIC_RPC_RMI_CONFIG_MTU CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_ACT_TBL0_DEFAULT 0x0840a /* 0x21028 */
+#define FBNIC_RPC_ACT_TBL0_DROP CSR_BIT(0)
+#define FBNIC_RPC_ACT_TBL0_DEST_MASK CSR_GENMASK(3, 1)
+enum {
+ FBNIC_RPC_ACT_TBL0_DEST_HOST = 1,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC = 2,
+ FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
+};
+
+#define FBNIC_RPC_ACT_TBL0_Q_SEL CSR_BIT(4)
+#define FBNIC_RPC_ACT_TBL0_Q_ID CSR_GENMASK(15, 8)
+#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
+#define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28)
+#define FBNIC_RPC_ACT_TBL0_ACT_TBL_IDX CSR_BIT(29)
+#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
+
+#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
+#define FBNIC_RPC_ACT_TBL1_RSS_ENA_MASK CSR_GENMASK(15, 0)
+enum {
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_SRC = 1,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_DST = 2,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_SRC = 4,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_DST = 8,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L2_DA = 16,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_RSS_BYTE = 32,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IV6_FL_LBL = 64,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_OV6_FL_LBL = 128,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_DSCP = 256,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L3_PROT = 512,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_PROT = 1024,
+};
+
+#define FBNIC_RPC_RSS_KEY(n) (0x0840c + (n)) /* 0x21030 + 4*n */
+#define FBNIC_RPC_RSS_KEY_BIT_LEN 425
+#define FBNIC_RPC_RSS_KEY_BYTE_LEN \
+ DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 8)
+#define FBNIC_RPC_RSS_KEY_DWORD_LEN \
+ DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 32)
+#define FBNIC_RPC_RSS_KEY_LAST_IDX \
+ (FBNIC_RPC_RSS_KEY_DWORD_LEN - 1)
+#define FBNIC_RPC_RSS_KEY_LAST_MASK \
+ CSR_GENMASK(31, \
+ FBNIC_RPC_RSS_KEY_DWORD_LEN * 32 - \
+ FBNIC_RPC_RSS_KEY_BIT_LEN)
+
+#define FBNIC_RPC_CNTR_TCP_OPT_ERR 0x0849e /* 0x21278 */
+#define FBNIC_RPC_CNTR_UNKN_ETYPE 0x0849f /* 0x2127c */
+#define FBNIC_RPC_CNTR_IPV4_FRAG 0x084a0 /* 0x21280 */
+#define FBNIC_RPC_CNTR_IPV6_FRAG 0x084a1 /* 0x21284 */
+#define FBNIC_RPC_CNTR_IPV4_ESP 0x084a2 /* 0x21288 */
+#define FBNIC_RPC_CNTR_IPV6_ESP 0x084a3 /* 0x2128c */
+#define FBNIC_RPC_CNTR_UNKN_EXT_HDR 0x084a4 /* 0x21290 */
+#define FBNIC_RPC_CNTR_OUT_OF_HDR_ERR 0x084a5 /* 0x21294 */
+#define FBNIC_RPC_CNTR_OVR_SIZE_ERR 0x084a6 /* 0x21298 */
+
+#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */
+#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_L \
+ 0x0855f /* 0x2157c */
+#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_H \
+ 0x08560 /* 0x21580 */
+#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_L \
+ 0x08561 /* 0x21584 */
+#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_H \
+ 0x08562 /* 0x21588 */
+#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_L \
+ 0x08563 /* 0x2158c */
+#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_H \
+ 0x08564 /* 0x21590 */
+#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_L \
+ 0x08565 /* 0x21594 */
+#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_H \
+ 0x08566 /* 0x21598 */
+#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_L \
+ 0x08567 /* 0x2159c */
+#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_H \
+ 0x08568 /* 0x215a0 */
+#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */
+
+/* RPC RAM Registers */
+
+#define FBNIC_CSR_START_RPC_RAM 0x08800 /* CSR section delimiter */
+#define FBNIC_RPC_ACT_TBL0(n) (0x08800 + (n)) /* 0x22000 + 4*n */
+#define FBNIC_RPC_ACT_TBL1(n) (0x08840 + (n)) /* 0x22100 + 4*n */
+#define FBNIC_RPC_ACT_TBL_NUM_ENTRIES 64
+
+/* TCAM Tables */
+#define FBNIC_RPC_TCAM_VALIDATE CSR_BIT(31)
+
+/* 64 Action TCAM Entries, 12 registers
+ * 3 mixed, src port, dst port, 6 L4 words, and Validate
+ */
+#define FBNIC_RPC_TCAM_ACT(m, n) \
+ (0x08880 + 0x40 * (n) + (m)) /* 0x22200 + 256*n + 4*m */
+
+#define FBNIC_RPC_TCAM_ACT_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_ACT_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_TCAM_MACDA(m, n) \
+ (0x08b80 + 0x20 * (n) + (m)) /* 0x022e00 + 128*n + 4*m */
+#define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\
+ (0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IP_ADDR_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_IP_ADDR_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\
+ (0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IPSRC(m, n)\
+ (0x08c90 + 0x08 * (n) + (m)) /* 0x023240 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IPDST(m, n)\
+ (0x08cd8 + 0x08 * (n) + (m)) /* 0x023360 + 32*n + 4*m */
+
+#define FBNIC_RPC_RSS_TBL(n, m) \
+ (0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */
+#define FBNIC_RPC_RSS_TBL_COUNT 2
+#define FBNIC_RPC_RSS_TBL_SIZE 256
+#define FBNIC_CSR_END_RPC_RAM 0x08f1f /* CSR section delimiter */
+
+/* Fab Registers */
+#define FBNIC_CSR_START_FAB 0x0C000 /* CSR section delimiter */
+#define FBNIC_FAB_AXI4_AR_SPACER_2_CFG 0x0C005 /* 0x30014 */
+#define FBNIC_FAB_AXI4_AR_SPACER_MASK CSR_BIT(16)
+#define FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD CSR_GENMASK(15, 0)
+#define FBNIC_CSR_END_FAB 0x0C020 /* CSR section delimiter */
+
+/* Master Registers */
+#define FBNIC_CSR_START_MASTER 0x0C400 /* CSR section delimiter */
+#define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */
+#define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */
+
+/* MAC PCS registers */
+#define FBNIC_CSR_START_PCS 0x10000 /* CSR section delimiter */
+#define FBNIC_PCS_PAGE(n) (0x10000 + 0x400 * (n)) /* 0x40000 + 1024*n */
+#define FBNIC_PCS(reg, n) ((reg) + FBNIC_PCS_PAGE(n))
+#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */
+
+#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */
+
+/* We have 4 RSFEC engines present in our part, however we are only using 1.
+ * As such only CCW(0) and NCCW(0) will never be non-zero and the other
+ * registers can be ignored.
+ */
+#define FBNIC_RSFEC_CCW_LO(n) (0x10802 + 8 * (n)) /* 0x42008 + 32*n */
+#define FBNIC_RSFEC_CCW_HI(n) (0x10803 + 8 * (n)) /* 0x4200c + 32*n */
+#define FBNIC_RSFEC_NCCW_LO(n) (0x10804 + 8 * (n)) /* 0x42010 + 32*n */
+#define FBNIC_RSFEC_NCCW_HI(n) (0x10805 + 8 * (n)) /* 0x42014 + 32*n */
+
+#define FBNIC_PCS_MAX_LANES 4
+#define FBNIC_PCS_SYMBLERR_LO(n) \
+ (0x10880 + 2 * (n)) /* 0x42200 + 8*n */
+#define FBNIC_PCS_SYMBLERR_HI(n) \
+ (0x10881 + 2 * (n)) /* 0x42204 + 8*n */
+#define FBNIC_CSR_END_RSFEC 0x108c8 /* CSR section delimiter */
+
+/* MAC MAC registers (ASIC only) */
+#define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */
+#define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */
+#define FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS CSR_BIT(29)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS CSR_BIT(28)
+#define FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS CSR_BIT(27)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN CSR_BIT(11)
+#define FBNIC_MAC_COMMAND_CONFIG_LOOPBACK_EN CSR_BIT(10)
+#define FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN CSR_BIT(4)
+#define FBNIC_MAC_COMMAND_CONFIG_RX_ENA CSR_BIT(1)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_ENA CSR_BIT(0)
+#define FBNIC_MAC_CL01_PAUSE_QUANTA 0x11015 /* 0x44054 */
+#define FBNIC_MAC_CL01_QUANTA_THRESH 0x11019 /* 0x44064 */
+#define FBNIC_CSR_END_MAC_MAC 0x11028 /* CSR section delimiter */
+
+/* Signals from MAC, AN, PCS, and LED CSR registers (ASIC only) */
+#define FBNIC_CSR_START_SIG 0x11800 /* CSR section delimiter */
+#define FBNIC_SIG_MAC_IN0 0x11800 /* 0x46000 */
+#define FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK CSR_BIT(14)
+#define FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK CSR_BIT(13)
+#define FBNIC_SIG_MAC_IN0_RESET_TX_CLK CSR_BIT(12)
+#define FBNIC_SIG_MAC_IN0_RESET_RX_CLK CSR_BIT(11)
+#define FBNIC_SIG_MAC_IN0_TX_CRC CSR_BIT(8)
+#define FBNIC_SIG_MAC_IN0_CFG_MODE128 CSR_BIT(10)
+#define FBNIC_SIG_PCS_OUT0 0x11808 /* 0x46020 */
+#define FBNIC_SIG_PCS_OUT0_LINK CSR_BIT(27)
+#define FBNIC_SIG_PCS_OUT0_BLOCK_LOCK CSR_GENMASK(24, 5)
+#define FBNIC_SIG_PCS_OUT0_AMPS_LOCK CSR_GENMASK(4, 1)
+#define FBNIC_SIG_PCS_OUT1 0x11809 /* 0x46024 */
+#define FBNIC_SIG_PCS_OUT1_FCFEC_LOCK CSR_GENMASK(11, 8)
+#define FBNIC_SIG_PCS_INTR_STS 0x11814 /* 0x46050 */
+#define FBNIC_SIG_PCS_INTR_LINK_DOWN CSR_BIT(1)
+#define FBNIC_SIG_PCS_INTR_LINK_UP CSR_BIT(0)
+#define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */
+#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
+
+#define FBNIC_CSR_START_MAC_STAT 0x11a00
+#define FBNIC_MAC_STAT_RX_XOFF_STB_L 0x11a00 /* 0x46800 */
+#define FBNIC_MAC_STAT_RX_XOFF_STB_H 0x11a01 /* 0x46804 */
+#define FBNIC_MAC_STAT_TX_XOFF_STB_L 0x11a04 /* 0x46810 */
+#define FBNIC_MAC_STAT_TX_XOFF_STB_H 0x11a05 /* 0x46814 */
+#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */
+#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L 0x11a0a /* 0x46828 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H 0x11a0b /* 0x4682c */
+#define FBNIC_MAC_STAT_RX_TOOLONG_L 0x11a0e /* 0x46838 */
+#define FBNIC_MAC_STAT_RX_TOOLONG_H 0x11a0f /* 0x4683c */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L 0x11a12 /* 0x46848 */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H 0x11a13 /* 0x4684c */
+#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_L \
+ 0x11a14 /* 0x46850 */
+#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_H \
+ 0x11a15 /* 0x46854 */
+#define FBNIC_MAC_STAT_RX_IFINERRORS_L 0x11a18 /* 0x46860 */
+#define FBNIC_MAC_STAT_RX_IFINERRORS_H 0x11a19 /* 0x46864 */
+#define FBNIC_MAC_STAT_RX_MULTICAST_L 0x11a1c /* 0x46870 */
+#define FBNIC_MAC_STAT_RX_MULTICAST_H 0x11a1d /* 0x46874 */
+#define FBNIC_MAC_STAT_RX_BROADCAST_L 0x11a1e /* 0x46878 */
+#define FBNIC_MAC_STAT_RX_BROADCAST_H 0x11a1f /* 0x4687c */
+#define FBNIC_MAC_STAT_RX_UNDERSIZE_L 0x11a24 /* 0x46890 */
+#define FBNIC_MAC_STAT_RX_UNDERSIZE_H 0x11a25 /* 0x46894 */
+#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_L \
+ 0x11a26 /* 0x46898 */
+#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_H \
+ 0x11a27 /* 0x4689c */
+#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_L \
+ 0x11a28 /* 0x468a0 */
+#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_H \
+ 0x11a29 /* 0x468a4 */
+#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_L \
+ 0x11a2a /* 0x468a8 */
+#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_H \
+ 0x11a2b /* 0x468ac */
+#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_L \
+ 0x11a2c /* 0x468b0 */
+#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_H \
+ 0x11a2d /* 0x468b4 */
+#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_L \
+ 0x11a2e /* 0x468b8 */
+#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_H \
+ 0x11a2f /* 0x468bc */
+#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_L \
+ 0x11a30 /* 0x468c0 */
+#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_H \
+ 0x11a31 /* 0x468c4 */
+#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_L \
+ 0x11a32 /* 0x468c8 */
+#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_H \
+ 0x11a33 /* 0x468cc */
+#define FBNIC_MAC_STAT_RX_OVERSIZE_L 0x11a34 /* 0x468d0 */
+#define FBNIC_MAC_STAT_RX_OVERSSIZE_H 0x11a35 /* 0x468d4 */
+#define FBNIC_MAC_STAT_RX_JABBER_L 0x11a36 /* 0x468d8 */
+#define FBNIC_MAC_STAT_RX_JABBER_H 0x11a37 /* 0x468dc */
+#define FBNIC_MAC_STAT_RX_FRAGMENT_L 0x11a38 /* 0x468e0 */
+#define FBNIC_MAC_STAT_RX_FRAGMENT_H 0x11a39 /* 0x468e4 */
+#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_L \
+ 0x11a3c /* 0x468f0 */
+#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_H \
+ 0x11a3d /* 0x468f4 */
+#define FBNIC_MAC_STAT_TX_BYTE_COUNT_L 0x11a3e /* 0x468f8 */
+#define FBNIC_MAC_STAT_TX_BYTE_COUNT_H 0x11a3f /* 0x468fc */
+#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_L \
+ 0x11a42 /* 0x46908 */
+#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_H \
+ 0x11a43 /* 0x4690c */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L 0x11a46 /* 0x46918 */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H 0x11a47 /* 0x4691c */
+#define FBNIC_MAC_STAT_TX_MULTICAST_L 0x11a4a /* 0x46928 */
+#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
+#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
+#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
+#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_L \
+ 0x11a4e /* 0x46938 */
+#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_H \
+ 0x11a4f /* 0x4693c */
+#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_L \
+ 0x11a50 /* 0x46940 */
+#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_H \
+ 0x11a51 /* 0x46944 */
+#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_L \
+ 0x11a52 /* 0x46948 */
+#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_H \
+ 0x11a53 /* 0x4694c */
+#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_L \
+ 0x11a54 /* 0x46950 */
+#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_H \
+ 0x11a55 /* 0x46954 */
+#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_L \
+ 0x11a56 /* 0x46958 */
+#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_H \
+ 0x11a57 /* 0x4695c */
+#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_L \
+ 0x11a58 /* 0x46960 */
+#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_H \
+ 0x11a59 /* 0x46964 */
+#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_L \
+ 0x11a5a /* 0x46968 */
+#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_H \
+ 0x11a5b /* 0x4696c */
+#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_L \
+ 0x11a5e /* 0x46978 */
+#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_H \
+ 0x11a5f /* 0x4697c */
+
+/* PCIE Comphy Registers */
+#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */
+#define FBNIC_CSR_END_PCIE_SS_COMPHY 0x279d7 /* CSR section delimiter */
+
+/* PUL User Registers */
+#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19)
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19)
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
+#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
+ 0x3106e /* 0xc41b8 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
+ 0x31070 /* 0xc41c0 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
+ 0x31071 /* 0xc41c4 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
+ 0x31072 /* 0xc41c8 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
+ 0x31073 /* 0xc41cc */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
+ 0x31074 /* 0xc41d0 */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
+ 0x31075 /* 0xc41d4 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
+ 0x31076 /* 0xc41d8 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
+ 0x31077 /* 0xc41dc */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
+ 0x31078 /* 0xc41e0 */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
+ 0x31079 /* 0xc41e4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
+ 0x3107a /* 0xc41e8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
+ 0x3107b /* 0xc41ec */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
+ 0x3107c /* 0xc41f0 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
+ 0x3107d /* 0xc41f4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
+ 0x3107e /* 0xc41f8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
+ 0x3107f /* 0xc41fc */
+#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
+
+/* Queue Registers
+ *
+ * The queue register offsets are specific for a given queue grouping. So to
+ * find the actual register offset it is necessary to combine FBNIC_QUEUE(n)
+ * with the register to get the actual register offset like so:
+ * FBNIC_QUEUE_TWQ0_CTL(n) == FBNIC_QUEUE(n) + FBNIC_QUEUE_TWQ0_CTL
+ */
+#define FBNIC_CSR_START_QUEUE 0x40000 /* CSR section delimiter */
+#define FBNIC_QUEUE_STRIDE 0x400 /* 0x1000 */
+#define FBNIC_QUEUE(n)\
+ (0x40000 + FBNIC_QUEUE_STRIDE * (n)) /* 0x100000 + 4096*n */
+
+#define FBNIC_QUEUE_TWQ0_CTL 0x000 /* 0x000 */
+#define FBNIC_QUEUE_TWQ1_CTL 0x001 /* 0x004 */
+#define FBNIC_QUEUE_TWQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_TWQ_CTL_ENABLE CSR_BIT(1)
+#define FBNIC_QUEUE_TWQ0_TAIL 0x002 /* 0x008 */
+#define FBNIC_QUEUE_TWQ1_TAIL 0x003 /* 0x00c */
+
+#define FBNIC_QUEUE_TWQ0_SIZE 0x00a /* 0x028 */
+#define FBNIC_QUEUE_TWQ1_SIZE 0x00b /* 0x02c */
+#define FBNIC_QUEUE_TWQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_TWQ0_BAL 0x020 /* 0x080 */
+#define FBNIC_QUEUE_BAL_MASK CSR_GENMASK(31, 7)
+#define FBNIC_QUEUE_TWQ0_BAH 0x021 /* 0x084 */
+#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */
+#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */
+
+/* Tx Work Queue Statistics Registers */
+#define FBNIC_QUEUE_TWQ0_PKT_CNT 0x062 /* 0x188 */
+#define FBNIC_QUEUE_TWQ0_ERR_CNT 0x063 /* 0x18c */
+#define FBNIC_QUEUE_TWQ1_PKT_CNT 0x072 /* 0x1c8 */
+#define FBNIC_QUEUE_TWQ1_ERR_CNT 0x073 /* 0x1cc */
+
+/* Tx Completion Queue Registers */
+#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */
+#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_TCQ_CTL_ENABLE CSR_BIT(1)
+
+#define FBNIC_QUEUE_TCQ_HEAD 0x081 /* 0x204 */
+
+#define FBNIC_QUEUE_TCQ_SIZE 0x084 /* 0x210 */
+#define FBNIC_QUEUE_TCQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_TCQ_BAL 0x0a0 /* 0x280 */
+#define FBNIC_QUEUE_TCQ_BAH 0x0a1 /* 0x284 */
+
+/* Tx Interrupt Manager Registers */
+#define FBNIC_QUEUE_TIM_CTL 0x0c0 /* 0x300 */
+#define FBNIC_QUEUE_TIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
+
+#define FBNIC_QUEUE_TIM_THRESHOLD 0x0c1 /* 0x304 */
+#define FBNIC_QUEUE_TIM_THRESHOLD_TWD_MASK CSR_GENMASK(14, 0)
+
+#define FBNIC_QUEUE_TIM_CLEAR 0x0c2 /* 0x308 */
+#define FBNIC_QUEUE_TIM_CLEAR_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_TIM_SET 0x0c3 /* 0x30c */
+#define FBNIC_QUEUE_TIM_SET_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_TIM_MASK 0x0c4 /* 0x310 */
+#define FBNIC_QUEUE_TIM_MASK_MASK CSR_BIT(0)
+
+#define FBNIC_QUEUE_TIM_TIMER 0x0c5 /* 0x314 */
+
+#define FBNIC_QUEUE_TIM_COUNTS 0x0c6 /* 0x318 */
+#define FBNIC_QUEUE_TIM_COUNTS_CNT1_MASK CSR_GENMASK(30, 16)
+#define FBNIC_QUEUE_TIM_COUNTS_CNT0_MASK CSR_GENMASK(14, 0)
+
+/* Rx Completion Queue Registers */
+#define FBNIC_QUEUE_RCQ_CTL 0x200 /* 0x800 */
+#define FBNIC_QUEUE_RCQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_RCQ_CTL_ENABLE CSR_BIT(1)
+
+#define FBNIC_QUEUE_RCQ_HEAD 0x201 /* 0x804 */
+
+#define FBNIC_QUEUE_RCQ_SIZE 0x204 /* 0x810 */
+#define FBNIC_QUEUE_RCQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_RCQ_BAL 0x220 /* 0x880 */
+#define FBNIC_QUEUE_RCQ_BAH 0x221 /* 0x884 */
+
+/* Rx Buffer Descriptor Queue Registers */
+#define FBNIC_QUEUE_BDQ_CTL 0x240 /* 0x900 */
+#define FBNIC_QUEUE_BDQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_BDQ_CTL_ENABLE CSR_BIT(1)
+#define FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE CSR_BIT(30)
+
+#define FBNIC_QUEUE_BDQ_HPQ_TAIL 0x241 /* 0x904 */
+#define FBNIC_QUEUE_BDQ_PPQ_TAIL 0x242 /* 0x908 */
+
+#define FBNIC_QUEUE_BDQ_HPQ_SIZE 0x247 /* 0x91c */
+#define FBNIC_QUEUE_BDQ_PPQ_SIZE 0x248 /* 0x920 */
+#define FBNIC_QUEUE_BDQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_BDQ_HPQ_BAL 0x260 /* 0x980 */
+#define FBNIC_QUEUE_BDQ_HPQ_BAH 0x261 /* 0x984 */
+#define FBNIC_QUEUE_BDQ_PPQ_BAL 0x262 /* 0x988 */
+#define FBNIC_QUEUE_BDQ_PPQ_BAH 0x263 /* 0x98c */
+
+/* Rx DMA Engine Configuration */
+#define FBNIC_QUEUE_RDE_CTL0 0x2a0 /* 0xa80 */
+#define FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT CSR_BIT(31)
+#define FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK CSR_GENMASK(30, 29)
+enum {
+ FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE = 0,
+ FBNIC_QUEUE_RDE_CTL0_DROP_WAIT = 1,
+ FBNIC_QUEUE_RDE_CTL0_DROP_NEVER = 2,
+};
+
+#define FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK CSR_GENMASK(28, 20)
+#define FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK CSR_GENMASK(19, 11)
+
+#define FBNIC_QUEUE_RDE_CTL1 0x2a1 /* 0xa84 */
+#define FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK CSR_GENMASK(24, 12)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK CSR_GENMASK(11, 9)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK CSR_GENMASK(8, 6)
+#define FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK CSR_GENMASK(5, 2)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_MASK CSR_GENMASK(1, 0)
+enum {
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_NONE = 0,
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_ALL = 1,
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2,
+};
+
+/* Rx Per CQ Statistics Counters */
+#define FBNIC_QUEUE_RDE_PKT_CNT 0x2a2 /* 0xa88 */
+#define FBNIC_QUEUE_RDE_PKT_ERR_CNT 0x2a3 /* 0xa8c */
+#define FBNIC_QUEUE_RDE_CQ_DROP_CNT 0x2a4 /* 0xa90 */
+#define FBNIC_QUEUE_RDE_BDQ_DROP_CNT 0x2a5 /* 0xa94 */
+
+/* Rx Interrupt Manager Registers */
+#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */
+#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
+
+#define FBNIC_QUEUE_RIM_THRESHOLD 0x2c1 /* 0xb04 */
+#define FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK CSR_GENMASK(14, 0)
+
+#define FBNIC_QUEUE_RIM_CLEAR 0x2c2 /* 0xb08 */
+#define FBNIC_QUEUE_RIM_CLEAR_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_RIM_SET 0x2c3 /* 0xb0c */
+#define FBNIC_QUEUE_RIM_SET_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_RIM_MASK 0x2c4 /* 0xb10 */
+#define FBNIC_QUEUE_RIM_MASK_MASK CSR_BIT(0)
+
+#define FBNIC_QUEUE_RIM_COAL_STATUS 0x2c5 /* 0xb14 */
+#define FBNIC_QUEUE_RIM_RCD_COUNT_MASK CSR_GENMASK(30, 16)
+#define FBNIC_QUEUE_RIM_TIMER_MASK CSR_GENMASK(13, 0)
+#define FBNIC_MAX_QUEUES 128
+#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
+
+/* BAR 4 CSRs */
+
+/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
+ * of 32 4 byte registers. We will use 2 registers per descriptor so the
+ * length of the mailbox is reduced to 16.
+ *
+ * Currently we use an offset of 0x6000 on BAR4 for the mailbox so we just
+ * have to do the math and determine the offset based on the mailbox
+ * direction and index inside that mailbox.
+ */
+#define FBNIC_IPC_MBX_DESC_LEN 16
+#define FBNIC_IPC_MBX(mbx_idx, desc_idx) \
+ ((((mbx_idx) * FBNIC_IPC_MBX_DESC_LEN + (desc_idx)) * 2) + 0x6000)
+
+/* Use first register in mailbox to flush writes */
+#define FBNIC_FW_ZERO_REG FBNIC_IPC_MBX(0, 0)
+
+enum {
+ FBNIC_IPC_MBX_RX_IDX,
+ FBNIC_IPC_MBX_TX_IDX,
+ FBNIC_IPC_MBX_INDICES,
+};
+
+#define FBNIC_IPC_MBX_DESC_LEN_MASK DESC_GENMASK(63, 48)
+#define FBNIC_IPC_MBX_DESC_EOM DESC_BIT(46)
+#define FBNIC_IPC_MBX_DESC_ADDR_MASK DESC_GENMASK(45, 3)
+#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1)
+#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0)
+
+/* OTP Registers
+ * These registers are accessible via bar4 offset and are written by CMRT
+ * on boot. For the write status, the register is broken up in half with OTP
+ * Write Data Status occupying the top 16 bits and the ECC status occupying the
+ * bottom 16 bits.
+ */
+#define FBNIC_NS_OTP_STATUS 0x0021d
+#define FBNIC_NS_OTP_WRITE_STATUS 0x0021e
+
+#define FBNIC_NS_OTP_WRITE_DATA_STATUS_MASK CSR_GENMASK(31, 16)
+#define FBNIC_NS_OTP_WRITE_ECC_STATUS_MASK CSR_GENMASK(15, 0)
+
+#define FBNIC_REGS_VERSION CSR_GENMASK(31, 16)
+#define FBNIC_REGS_HW_TYPE CSR_GENMASK(15, 8)
+enum{
+ FBNIC_CSR_VERSION_V1_0_ASIC = 1,
+};
+
+#endif /* _FBNIC_CSR_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
new file mode 100644
index 000000000000..b7238dd967fe
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/seq_file.h>
+
+#include "fbnic.h"
+
+static struct dentry *fbnic_dbg_root;
+
+static void fbnic_dbg_desc_break(struct seq_file *s, int i)
+{
+ while (i--)
+ seq_putc(s, '-');
+
+ seq_putc(s, '\n');
+}
+
+static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ i, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_mac_addr);
+
+static int fbnic_dbg_tce_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ int i, tcam_idx = 0;
+ char hdr[80];
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < ARRAY_SIZE(fbd->mac_addr); i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES)
+ break;
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ tcam_idx, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ tcam_idx++;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_tce_tcam);
+
+static int fbnic_dbg_act_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-55s %-4s %s\n",
+ "Idx", "S", "Value/Mask", "RSS", "Dest");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ seq_printf(s, "%02d %d %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %08x\n",
+ i, act_tcam->state,
+ act_tcam->value.tcam[10], act_tcam->value.tcam[9],
+ act_tcam->value.tcam[8], act_tcam->value.tcam[7],
+ act_tcam->value.tcam[6], act_tcam->value.tcam[5],
+ act_tcam->value.tcam[4], act_tcam->value.tcam[3],
+ act_tcam->value.tcam[2], act_tcam->value.tcam[1],
+ act_tcam->value.tcam[0], act_tcam->rss_en_mask,
+ act_tcam->dest);
+ seq_printf(s, " %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ act_tcam->mask.tcam[10], act_tcam->mask.tcam[9],
+ act_tcam->mask.tcam[8], act_tcam->mask.tcam[7],
+ act_tcam->mask.tcam[6], act_tcam->mask.tcam[5],
+ act_tcam->mask.tcam[4], act_tcam->mask.tcam[3],
+ act_tcam->mask.tcam[2], act_tcam->mask.tcam[1],
+ act_tcam->mask.tcam[0]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_act_tcam);
+
+static int fbnic_dbg_ip_addr_show(struct seq_file *s,
+ struct fbnic_ip_addr *ip_addr)
+{
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s %s\n",
+ "Idx", "S", "TCAM Bitmap", "V", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ seq_printf(s, "%02d %d %64pb %d %pi6\n",
+ i, ip_addr->state, ip_addr->act_tcam,
+ ip_addr->version, &ip_addr->value);
+ seq_printf(s, " %pi6\n",
+ &ip_addr->mask);
+ }
+
+ return 0;
+}
+
+static int fbnic_dbg_ip_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_src);
+
+static int fbnic_dbg_ip_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_dst);
+
+static int fbnic_dbg_ipo_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_src);
+
+static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+
+static int fbnic_dbg_fw_log_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ struct fbnic_fw_log_entry *entry;
+ unsigned long flags;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return -ENXIO;
+
+ spin_lock_irqsave(&fbd->fw_log.lock, flags);
+
+ list_for_each_entry_reverse(entry, &fbd->fw_log.entries, list) {
+ seq_printf(s, FBNIC_FW_LOG_FMT, entry->index,
+ (entry->timestamp / (MSEC_PER_SEC * 60 * 60 * 24)),
+ (entry->timestamp / (MSEC_PER_SEC * 60 * 60)) % 24,
+ ((entry->timestamp / (MSEC_PER_SEC * 60) % 60)),
+ ((entry->timestamp / MSEC_PER_SEC) % 60),
+ (entry->timestamp % MSEC_PER_SEC),
+ entry->msg);
+ }
+
+ spin_unlock_irqrestore(&fbd->fw_log.lock, flags);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_fw_log);
+
+static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ rtnl_lock();
+ fbnic_get_hw_stats(fbd);
+
+ seq_printf(s, "ob_rd_tlp: %llu\n", fbd->hw_stats.pcie.ob_rd_tlp.value);
+ seq_printf(s, "ob_rd_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_dword.value);
+ seq_printf(s, "ob_wr_tlp: %llu\n", fbd->hw_stats.pcie.ob_wr_tlp.value);
+ seq_printf(s, "ob_wr_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_wr_dword.value);
+ seq_printf(s, "ob_cpl_tlp: %llu\n",
+ fbd->hw_stats.pcie.ob_cpl_tlp.value);
+ seq_printf(s, "ob_cpl_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_cpl_dword.value);
+ seq_printf(s, "ob_rd_no_tag: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_tag.value);
+ seq_printf(s, "ob_rd_no_cpl_cred: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_cpl_cred.value);
+ seq_printf(s, "ob_rd_no_np_cred: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_np_cred.value);
+ rtnl_unlock();
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_pcie_stats);
+
+void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ const char *name = pci_name(pdev);
+
+ fbd->dbg_fbd = debugfs_create_dir(name, fbnic_dbg_root);
+ debugfs_create_file("pcie_stats", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_pcie_stats_fops);
+ debugfs_create_file("mac_addr", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_mac_addr_fops);
+ debugfs_create_file("tce_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_tce_tcam_fops);
+ debugfs_create_file("act_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_act_tcam_fops);
+ debugfs_create_file("ip_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_src_fops);
+ debugfs_create_file("ip_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_dst_fops);
+ debugfs_create_file("ipo_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_src_fops);
+ debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_dst_fops);
+ debugfs_create_file("fw_log", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_fw_log_fops);
+}
+
+void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd)
+{
+ debugfs_remove_recursive(fbd->dbg_fbd);
+ fbd->dbg_fbd = NULL;
+}
+
+void fbnic_dbg_init(void)
+{
+ fbnic_dbg_root = debugfs_create_dir(fbnic_driver_name, NULL);
+}
+
+void fbnic_dbg_exit(void)
+{
+ debugfs_remove_recursive(fbnic_dbg_root);
+ fbnic_dbg_root = NULL;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
new file mode 100644
index 000000000000..b62b1d5b1453
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/pldmfw.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "fbnic.h"
+#include "fbnic_fw.h"
+#include "fbnic_tlv.h"
+
+#define FBNIC_SN_STR_LEN 24
+
+static int fbnic_version_running_put(struct devlink_info_req *req,
+ struct fbnic_fw_ver *fw_ver,
+ char *ver_name)
+{
+ char running_ver[FBNIC_FW_VER_MAX_SIZE];
+ int err;
+
+ fbnic_mk_fw_ver_str(fw_ver->version, running_ver);
+ err = devlink_info_version_running_put(req, ver_name, running_ver);
+ if (err)
+ return err;
+
+ if (strlen(fw_ver->commit) > 0) {
+ char commit_name[FBNIC_SN_STR_LEN];
+
+ snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name);
+ err = devlink_info_version_running_put(req, commit_name,
+ fw_ver->commit);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fbnic_version_stored_put(struct devlink_info_req *req,
+ struct fbnic_fw_ver *fw_ver,
+ char *ver_name)
+{
+ char stored_ver[FBNIC_FW_VER_MAX_SIZE];
+ int err;
+
+ fbnic_mk_fw_ver_str(fw_ver->version, stored_ver);
+ err = devlink_info_version_stored_put(req, ver_name, stored_ver);
+ if (err)
+ return err;
+
+ if (strlen(fw_ver->commit) > 0) {
+ char commit_name[FBNIC_SN_STR_LEN];
+
+ snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name);
+ err = devlink_info_version_stored_put(req, commit_name,
+ fw_ver->commit);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fbnic_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ int err;
+
+ err = fbnic_version_running_put(req, &fbd->fw_cap.running.mgmt,
+ DEVLINK_INFO_VERSION_GENERIC_FW);
+ if (err)
+ return err;
+
+ err = fbnic_version_running_put(req, &fbd->fw_cap.running.bootloader,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.mgmt,
+ DEVLINK_INFO_VERSION_GENERIC_FW);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.bootloader,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.undi,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI);
+ if (err)
+ return err;
+
+ if (fbd->dsn) {
+ unsigned char serial[FBNIC_SN_STR_LEN];
+ u8 dsn[8];
+
+ put_unaligned_be64(fbd->dsn, dsn);
+ err = snprintf(serial, FBNIC_SN_STR_LEN, "%8phD", dsn);
+ if (err < 0)
+ return err;
+
+ err = devlink_info_serial_number_put(req, serial);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool
+fbnic_pldm_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pldmfw_desc_tlv *desc;
+ u32 anti_rollback_ver = 0;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+
+ /* First, use the standard PCI matching function */
+ if (!pldmfw_op_pci_match_record(context, record))
+ return false;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ /* If PCI match is successful, check for vendor-specific descriptors */
+ list_for_each_entry(desc, &record->descs, entry) {
+ if (desc->type != PLDM_DESC_ID_VENDOR_DEFINED)
+ continue;
+
+ if (desc->size < 21 || desc->data[0] != 1 ||
+ desc->data[1] != 15)
+ continue;
+
+ if (memcmp(desc->data + 2, "AntiRollbackVer", 15) != 0)
+ continue;
+
+ anti_rollback_ver = get_unaligned_le32(desc->data + 17);
+ break;
+ }
+
+ /* Compare versions and return error if they do not match */
+ if (anti_rollback_ver < fbd->fw_cap.anti_rollback_version) {
+ char buf[128];
+
+ snprintf(buf, sizeof(buf),
+ "New firmware anti-rollback version (0x%x) is older than device version (0x%x)!",
+ anti_rollback_ver, fbd->fw_cap.anti_rollback_version);
+ devlink_flash_update_status_notify(devlink, buf,
+ "Anti-Rollback", 0, 0);
+
+ return false;
+ }
+
+ return true;
+}
+
+static int
+fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
+{
+ struct fbnic_fw_completion *cmpl;
+ int err;
+
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_fw_start_upgrade(fbd, cmpl,
+ component->identifier,
+ component->component_size);
+ if (err)
+ goto cmpl_free;
+
+ /* Wait for firmware to ack firmware upgrade start */
+ if (wait_for_completion_timeout(&cmpl->done, 10 * HZ))
+ err = cmpl->result;
+ else
+ err = -ETIMEDOUT;
+
+ fbnic_mbx_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static int
+fbnic_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ const u8 *data = component->component_data;
+ const u32 size = component->component_size;
+ struct fbnic_fw_completion *cmpl;
+ const char *component_name;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+ u32 offset = 0;
+ u32 length = 0;
+ char buf[32];
+ int err;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ switch (component->identifier) {
+ case QSPI_SECTION_CMRT:
+ component_name = "boot1";
+ break;
+ case QSPI_SECTION_CONTROL_FW:
+ component_name = "boot2";
+ break;
+ case QSPI_SECTION_OPTION_ROM:
+ component_name = "option-rom";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "Unknown component ID %u!",
+ component->identifier);
+ devlink_flash_update_status_notify(devlink, buf, NULL, 0,
+ size);
+ return -EINVAL;
+ }
+
+ /* Once firmware receives the request to start upgrading it responds
+ * with two messages:
+ * 1. An ACK that it received the message and possible error code
+ * indicating that an upgrade is not currently possible.
+ * 2. A request for the first chunk of data
+ *
+ * Setup completions for write before issuing the start message so
+ * the driver can catch both messages.
+ */
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_mbx_set_cmpl(fbd, cmpl);
+ if (err)
+ goto cmpl_free;
+
+ devlink_flash_update_timeout_notify(devlink, "Initializing",
+ component_name, 15);
+ err = fbnic_flash_start(fbd, component);
+ if (err)
+ goto err_no_msg;
+
+ while (offset < size) {
+ if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ err = cmpl->result;
+ if (err)
+ break;
+
+ /* Verify firmware is requesting the next chunk in the seq. */
+ if (cmpl->u.fw_update.offset != offset + length) {
+ err = -EFAULT;
+ break;
+ }
+
+ offset = cmpl->u.fw_update.offset;
+ length = cmpl->u.fw_update.length;
+
+ if (length > TLV_MAX_DATA || offset + length > size) {
+ err = -EFAULT;
+ break;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component_name,
+ offset, size);
+
+ /* Mailbox will set length to 0 once it receives the finish
+ * message.
+ */
+ if (!length)
+ continue;
+
+ reinit_completion(&cmpl->done);
+ err = fbnic_fw_xmit_fw_write_chunk(fbd, data, offset, length,
+ 0);
+ if (err)
+ break;
+ }
+
+ if (err) {
+ fbnic_fw_xmit_fw_write_chunk(fbd, NULL, 0, 0, err);
+err_no_msg:
+ snprintf(buf, sizeof(buf), "Mailbox encountered error %d!",
+ err);
+ devlink_flash_update_status_notify(devlink, buf,
+ component_name, 0, 0);
+ }
+
+ fbnic_mbx_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static const struct pldmfw_ops fbnic_pldmfw_ops = {
+ .match_record = fbnic_pldm_match_record,
+ .flash_component = fbnic_flash_component,
+};
+
+static int
+fbnic_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ const struct firmware *fw = params->fw;
+ struct device *dev = fbd->dev;
+ struct pldmfw context;
+ char *err_msg;
+ int err;
+
+ context.ops = &fbnic_pldmfw_ops;
+ context.dev = dev;
+
+ err = pldmfw_flash_image(&context, fw);
+ if (err) {
+ switch (err) {
+ case -EINVAL:
+ err_msg = "Invalid image";
+ break;
+ case -EOPNOTSUPP:
+ err_msg = "Unsupported image";
+ break;
+ case -ENOMEM:
+ err_msg = "Out of memory";
+ break;
+ case -EFAULT:
+ err_msg = "Invalid header";
+ break;
+ case -ENOENT:
+ err_msg = "No matching record";
+ break;
+ case -ENODEV:
+ err_msg = "No matching device";
+ break;
+ case -ETIMEDOUT:
+ err_msg = "Timed out waiting for reply";
+ break;
+ default:
+ err_msg = "Unknown error";
+ break;
+ }
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to flash PLDM Image: %s (error: %d)",
+ err_msg, err);
+ }
+
+ return err;
+}
+
+static const struct devlink_ops fbnic_devlink_ops = {
+ .info_get = fbnic_devlink_info_get,
+ .flash_update = fbnic_devlink_flash_update,
+};
+
+static int fbnic_fw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 offset, index, index_count, length, size;
+ struct fbnic_fw_completion *fw_cmpl;
+ u8 *dump_data, **data;
+ int err;
+
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_coredump_info_msg(fbd, fw_cmpl, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit core dump info msg");
+ goto cmpl_free;
+ }
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timed out waiting on core dump info");
+ err = -ETIMEDOUT;
+ goto cmpl_cleanup;
+ }
+
+ size = fw_cmpl->u.coredump_info.size;
+ err = fw_cmpl->result;
+
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ /* Handle error returned by firmware */
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware core dump returned error");
+ return err;
+ }
+ if (!size) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware core dump returned size 0");
+ return -EIO;
+ }
+
+ /* Read the dump, we can only transfer TLV_MAX_DATA at a time */
+ index_count = DIV_ROUND_UP(size, TLV_MAX_DATA);
+
+ fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP,
+ sizeof(void *) * index_count + size);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Populate pointer table w/ pointer offsets */
+ dump_data = (void *)&fw_cmpl->u.coredump.data[index_count];
+ data = fw_cmpl->u.coredump.data;
+ fw_cmpl->u.coredump.size = size;
+ fw_cmpl->u.coredump.stride = TLV_MAX_DATA;
+
+ for (index = 0; index < index_count; index++) {
+ /* First iteration installs completion */
+ struct fbnic_fw_completion *cmpl_arg = index ? NULL : fw_cmpl;
+
+ offset = index * TLV_MAX_DATA;
+ length = min(size - offset, TLV_MAX_DATA);
+
+ data[index] = dump_data + offset;
+ err = fbnic_fw_xmit_coredump_read_msg(fbd, cmpl_arg,
+ offset, length);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit core dump msg");
+ if (cmpl_arg)
+ goto cmpl_free;
+ else
+ goto cmpl_cleanup;
+ }
+
+ if (wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ reinit_completion(&fw_cmpl->done);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Timed out waiting on core dump (%d/%d)",
+ index + 1, index_count);
+ err = -ETIMEDOUT;
+ goto cmpl_cleanup;
+ }
+
+ /* If we didn't see the reply record as incomplete */
+ if (fw_cmpl->u.coredump.data[index]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "No data for core dump chunk (%d/%d)",
+ index + 1, index_count);
+ err = -EIO;
+ goto cmpl_cleanup;
+ }
+ }
+
+ devlink_fmsg_binary_pair_nest_start(fmsg, "FW coredump");
+
+ for (offset = 0; offset < size; offset += length) {
+ length = min_t(u32, size - offset, TLV_MAX_DATA);
+
+ devlink_fmsg_binary_put(fmsg, dump_data + offset, length);
+ }
+
+ devlink_fmsg_binary_pair_nest_end(fmsg);
+
+cmpl_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err;
+}
+
+static int
+fbnic_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 sec, msec;
+
+ /* Device is most likely down, we're not exchanging heartbeats */
+ if (!fbd->prev_firmware_time)
+ return 0;
+
+ sec = div_u64_rem(fbd->firmware_time, MSEC_PER_SEC, &msec);
+
+ devlink_fmsg_pair_nest_start(fmsg, "last_heartbeat");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_pair_nest_start(fmsg, "fw_uptime");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "sec", sec);
+ devlink_fmsg_u32_pair_put(fmsg, "msec", msec);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ return 0;
+}
+
+void __printf(2, 3)
+fbnic_devlink_fw_report(struct fbnic_dev *fbd, const char *format, ...)
+{
+ char msg[FBNIC_FW_LOG_MAX_SIZE];
+ va_list args;
+
+ va_start(args, format);
+ vsnprintf(msg, FBNIC_FW_LOG_MAX_SIZE, format, args);
+ va_end(args);
+
+ devlink_health_report(fbd->fw_reporter, msg, fbd);
+ if (fbnic_fw_log_ready(fbd))
+ fbnic_fw_log_write(fbd, 0, fbd->firmware_time, msg);
+}
+
+static const struct devlink_health_reporter_ops fbnic_fw_ops = {
+ .name = "fw",
+ .dump = fbnic_fw_reporter_dump,
+ .diagnose = fbnic_fw_reporter_diagnose,
+};
+
+static u32 fbnic_read_otp_status(struct fbnic_dev *fbd)
+{
+ return fbnic_fw_rd32(fbd, FBNIC_NS_OTP_STATUS);
+}
+
+static int
+fbnic_otp_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 otp_status, otp_write_status, m;
+
+ otp_status = fbnic_read_otp_status(fbd);
+ otp_write_status = fbnic_fw_rd32(fbd, FBNIC_NS_OTP_WRITE_STATUS);
+
+ /* Dump OTP status */
+ devlink_fmsg_pair_nest_start(fmsg, "OTP");
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Status", otp_status);
+
+ /* Extract OTP Write Data status */
+ m = FBNIC_NS_OTP_WRITE_DATA_STATUS_MASK;
+ devlink_fmsg_u32_pair_put(fmsg, "Data",
+ FIELD_GET(m, otp_write_status));
+
+ /* Extract OTP Write ECC status */
+ m = FBNIC_NS_OTP_WRITE_ECC_STATUS_MASK;
+ devlink_fmsg_u32_pair_put(fmsg, "ECC",
+ FIELD_GET(m, otp_write_status));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ return 0;
+}
+
+void fbnic_devlink_otp_check(struct fbnic_dev *fbd, const char *msg)
+{
+ /* Check if there is anything to report */
+ if (!fbnic_read_otp_status(fbd))
+ return;
+
+ devlink_health_report(fbd->otp_reporter, msg, fbd);
+ if (fbnic_fw_log_ready(fbd))
+ fbnic_fw_log_write(fbd, 0, fbd->firmware_time, msg);
+}
+
+static const struct devlink_health_reporter_ops fbnic_otp_ops = {
+ .name = "otp",
+ .dump = fbnic_otp_reporter_dump,
+};
+
+int fbnic_devlink_health_create(struct fbnic_dev *fbd)
+{
+ fbd->fw_reporter = devlink_health_reporter_create(priv_to_devlink(fbd),
+ &fbnic_fw_ops, fbd);
+ if (IS_ERR(fbd->fw_reporter)) {
+ dev_warn(fbd->dev,
+ "Failed to create FW fault reporter: %pe\n",
+ fbd->fw_reporter);
+ return PTR_ERR(fbd->fw_reporter);
+ }
+
+ fbd->otp_reporter = devlink_health_reporter_create(priv_to_devlink(fbd),
+ &fbnic_otp_ops, fbd);
+ if (IS_ERR(fbd->otp_reporter)) {
+ devlink_health_reporter_destroy(fbd->fw_reporter);
+ dev_warn(fbd->dev,
+ "Failed to create OTP fault reporter: %pe\n",
+ fbd->otp_reporter);
+ return PTR_ERR(fbd->otp_reporter);
+ }
+
+ return 0;
+}
+
+void fbnic_devlink_health_destroy(struct fbnic_dev *fbd)
+{
+ devlink_health_reporter_destroy(fbd->otp_reporter);
+ devlink_health_reporter_destroy(fbd->fw_reporter);
+}
+
+void fbnic_devlink_free(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_free(devlink);
+}
+
+struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev)
+{
+ void __iomem * const *iomap_table;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+
+ devlink = devlink_alloc(&fbnic_devlink_ops, sizeof(struct fbnic_dev),
+ &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ fbd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, fbd);
+ fbd->dev = &pdev->dev;
+
+ iomap_table = pcim_iomap_table(pdev);
+ fbd->uc_addr0 = iomap_table[0];
+ fbd->uc_addr4 = iomap_table[4];
+
+ fbd->dsn = pci_get_dsn(pdev);
+ fbd->mps = pcie_get_mps(pdev);
+ fbd->readrq = pcie_get_readrq(pdev);
+
+ fbd->mac_addr_boundary = FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY;
+
+ return fbd;
+}
+
+void fbnic_devlink_register(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_register(devlink);
+}
+
+void fbnic_devlink_unregister(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h
new file mode 100644
index 000000000000..809ba6729442
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#define DRV_NAME "fbnic"
+#define DRV_SUMMARY "Meta(R) Host Network Interface Driver"
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
new file mode 100644
index 000000000000..693ebdf38705
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -0,0 +1,1924 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <net/ipv6.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_tlv.h"
+
+struct fbnic_stat {
+ u8 string[ETH_GSTRING_LEN];
+ unsigned int size;
+ unsigned int offset;
+};
+
+#define FBNIC_STAT_FIELDS(type, name, stat) { \
+ .string = name, \
+ .size = sizeof_field(struct type, stat), \
+ .offset = offsetof(struct type, stat), \
+}
+
+/* Hardware statistics not captured in rtnl_link_stats */
+#define FBNIC_HW_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
+ /* TTI */
+ FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
+ FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
+ FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
+ FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
+ FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
+ FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
+
+ /* TMI */
+ FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
+ FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
+ FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
+
+ /* RPC */
+ FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
+ FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
+ FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
+ FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
+ FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
+ FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
+ FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
+ FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
+};
+
+#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
+
+#define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
+ FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
+
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
+};
+
+#define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
+
+#define FBNIC_RXB_FIFO_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
+};
+
+#define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
+
+#define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
+};
+
+#define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
+
+#define FBNIC_HW_Q_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
+
+static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
+ FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
+};
+
+#define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
+#define FBNIC_HW_STATS_LEN \
+ (FBNIC_HW_FIXED_STATS_LEN + \
+ FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
+ FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
+ FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
+ FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
+
+#define FBNIC_QUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_ring, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_xdp_stats[] = {
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_packets", stats.packets),
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_bytes", stats.bytes),
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_dropped", stats.dropped),
+};
+
+#define FBNIC_XDP_STATS_LEN ARRAY_SIZE(fbnic_gstrings_xdp_stats)
+
+#define FBNIC_STATS_LEN \
+ (FBNIC_HW_STATS_LEN + FBNIC_XDP_STATS_LEN * FBNIC_MAX_XDPQS)
+
+static void
+fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+}
+
+static int fbnic_get_regs_len(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
+}
+
+static void fbnic_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
+}
+
+static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
+{
+ struct fbnic_net *clone;
+
+ clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
+ if (!clone)
+ return NULL;
+
+ memset(clone->tx, 0, sizeof(clone->tx));
+ memset(clone->rx, 0, sizeof(clone->rx));
+ memset(clone->napi, 0, sizeof(clone->napi));
+ return clone;
+}
+
+static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ swap(clone->rcq_size, orig->rcq_size);
+ swap(clone->hpq_size, orig->hpq_size);
+ swap(clone->ppq_size, orig->ppq_size);
+ swap(clone->txq_size, orig->txq_size);
+ swap(clone->num_rx_queues, orig->num_rx_queues);
+ swap(clone->num_tx_queues, orig->num_tx_queues);
+ swap(clone->num_napi, orig->num_napi);
+ swap(clone->hds_thresh, orig->hds_thresh);
+}
+
+static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_xdp_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
+ }
+}
+
+static void fbnic_clone_swap(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ struct fbnic_dev *fbd = orig->fbd;
+ unsigned int i;
+
+ for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
+ fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
+ for (i = 0; i < orig->num_napi; i++)
+ fbnic_aggregate_vector_counters(orig, orig->napi[i]);
+
+ fbnic_clone_swap_cfg(orig, clone);
+
+ for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
+ swap(clone->napi[i], orig->napi[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
+ swap(clone->tx[i], orig->tx[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
+ swap(clone->rx[i], orig->rx[i]);
+}
+
+static void fbnic_clone_free(struct fbnic_net *clone)
+{
+ kfree(clone);
+}
+
+static int fbnic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = fbn->tx_usecs;
+ ec->rx_coalesce_usecs = fbn->rx_usecs;
+ ec->rx_max_coalesced_frames = fbn->rx_max_frames;
+
+ return 0;
+}
+
+static int fbnic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ /* Verify against hardware limits */
+ if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->rx_max_coalesced_frames >
+ FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
+ FBNIC_MIN_RXD_PER_FRAME) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
+ return -EINVAL;
+ }
+
+ fbn->tx_usecs = ec->tx_coalesce_usecs;
+ fbn->rx_usecs = ec->rx_coalesce_usecs;
+ fbn->rx_max_frames = ec->rx_max_coalesced_frames;
+
+ if (netif_running(netdev)) {
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ fbnic_config_txrx_usecs(nv, 0);
+ fbnic_config_rx_frames(nv);
+ }
+ }
+
+ return 0;
+}
+
+static void
+fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+
+ ring->rx_pending = fbn->rcq_size;
+ ring->rx_mini_pending = fbn->hpq_size;
+ ring->rx_jumbo_pending = fbn->ppq_size;
+ ring->tx_pending = fbn->txq_size;
+
+ kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ kernel_ring->hds_thresh_max = FBNIC_HDS_THRESH_MAX;
+ kernel_ring->hds_thresh = fbn->hds_thresh;
+}
+
+static void fbnic_set_rings(struct fbnic_net *fbn,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring)
+{
+ fbn->rcq_size = ring->rx_pending;
+ fbn->hpq_size = ring->rx_mini_pending;
+ fbn->ppq_size = ring->rx_jumbo_pending;
+ fbn->txq_size = ring->tx_pending;
+ fbn->hds_thresh = kernel_ring->hds_thresh;
+}
+
+static int
+fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_net *clone;
+ int err;
+
+ ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
+ ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
+ ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
+ ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
+
+ /* These are absolute minimums allowing the device and driver to operate
+ * but not necessarily guarantee reasonable performance. Settings below
+ * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
+ * at best.
+ */
+ if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
+ ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
+ NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
+ return -EINVAL;
+ }
+
+ if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot disable TCP data split");
+ return -EINVAL;
+ }
+
+ /* If an XDP program is attached, we should check for potential frame
+ * splitting. If the new HDS threshold can cause splitting, we should
+ * only allow if the attached XDP program can handle frags.
+ */
+ if (fbnic_check_split_frames(fbn->xdp_prog, netdev->mtu,
+ kernel_ring->hds_thresh)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Use higher HDS threshold or multi-buf capable program");
+ return -EINVAL;
+ }
+
+ if (!netif_running(netdev)) {
+ fbnic_set_rings(fbn, ring, kernel_ring);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_rings(clone, ring, kernel_ring);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
+static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_enqueue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_fifo_stats;
+ for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_dequeue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_xdp_queue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_xdp_stats;
+ for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ const struct fbnic_stat *stat;
+ int i, idx;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
+ ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
+ fbnic_get_rxb_enqueue_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
+ fbnic_get_rxb_dequeue_strings(&data, i);
+
+ for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
+ stat = fbnic_gstrings_hw_q_stats;
+
+ for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
+ ethtool_sprintf(&data, stat->string, idx);
+ }
+
+ for (i = 0; i < FBNIC_MAX_XDPQS; i++)
+ fbnic_get_xdp_queue_strings(&data, i);
+ break;
+ }
+}
+
+static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
+ const void *base, int len, u64 **data)
+{
+ while (len--) {
+ u8 *curr = (u8 *)base + stat->offset;
+
+ **data = *(u64 *)curr;
+
+ stat++;
+ (*data)++;
+ }
+}
+
+static void fbnic_get_xdp_queue_stats(struct fbnic_ring *ring, u64 **data)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ if (!ring) {
+ *data += FBNIC_XDP_STATS_LEN;
+ return;
+ }
+
+ stat = fbnic_gstrings_xdp_stats;
+ for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++, (*data)++) {
+ u8 *p = (u8 *)ring + stat->offset;
+
+ **data = *(u64 *)p;
+ }
+}
+
+static void fbnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ fbnic_get_hw_stats(fbn->fbd);
+
+ spin_lock(&fbd->hw_stats.lock);
+ fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
+ FBNIC_HW_FIXED_STATS_LEN, &data);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_enqueue_stats *enq;
+
+ enq = &fbd->hw_stats.rxb.enq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
+ enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
+ const struct fbnic_rxb_fifo_stats *fifo;
+
+ fifo = &fbd->hw_stats.rxb.fifo[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
+ fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_dequeue_stats *deq;
+
+ deq = &fbd->hw_stats.rxb.deq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
+ deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_MAX_QUEUES; i++) {
+ const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
+
+ fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
+ FBNIC_HW_Q_STATS_LEN, &data);
+ }
+ spin_unlock(&fbd->hw_stats.lock);
+
+ for (i = 0; i < FBNIC_MAX_XDPQS; i++)
+ fbnic_get_xdp_queue_stats(fbn->tx[i + FBNIC_MAX_TXQS], &data);
+}
+
+static int fbnic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return FBNIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int fbnic_get_rss_hash_idx(u32 flow_type)
+{
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ return FBNIC_TCP4_HASH_OPT;
+ case TCP_V6_FLOW:
+ return FBNIC_TCP6_HASH_OPT;
+ case UDP_V4_FLOW:
+ return FBNIC_UDP4_HASH_OPT;
+ case UDP_V6_FLOW:
+ return FBNIC_UDP6_HASH_OPT;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IPV4_USER_FLOW:
+ return FBNIC_IPV4_HASH_OPT;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ return FBNIC_IPV6_HASH_OPT;
+ case ETHER_FLOW:
+ return FBNIC_ETHER_HASH_OPT;
+ }
+
+ return -1;
+}
+
+static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, cnt = 0;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ struct fbnic_act_tcam *act_tcam;
+
+ act_tcam = &fbd->act_tcam[idx];
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ if (rule_locs) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = i;
+ }
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ /* Set flow type field */
+ if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
+ fsp->flow_type = ETHER_FLOW;
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->mask.tcam[1])) {
+ struct fbnic_mac_addr *mac_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->value.tcam[1]);
+ mac_addr = &fbd->mac_addr[idx];
+
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ mac_addr->value.addr8);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ } else if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
+ fsp->flow_type = IPV6_USER_FLOW;
+ fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
+ fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V6_FLOW;
+ else
+ fsp->flow_type = TCP_V6_FLOW;
+ fsp->h_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV6_USER_FLOW;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V4_FLOW;
+ else
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV4_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4src =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4src =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4dst =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4dst =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+ }
+
+ /* Record action */
+ if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
+ fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
+ act_tcam->dest);
+ else
+ fsp->flow_type |= FLOW_RSS;
+
+ cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
+ act_tcam->dest);
+
+ return 0;
+}
+
+static int fbnic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+ u32 special = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = fbn->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = fbnic_get_cls_rule(fbn, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rule_locs = NULL;
+ special = RX_CLS_LOC_SPECIAL;
+ fallthrough;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
+ if (ret < 0)
+ break;
+
+ cmd->data |= special;
+ cmd->rule_cnt = ret;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+
+ if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
+ u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
+ u16 misc = 0, misc_mask = ~0;
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_HOST);
+ struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
+ struct fbnic_mac_addr *mac_addr = NULL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ struct in6_addr *addr6, *mask6;
+ struct in_addr *addr4, *mask4;
+ int hash_idx, location;
+ u32 flow_type;
+ int idx, j;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+ location = fbnic_cls_rule_any_loc(fbd);
+ if (location < 0)
+ return location;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+ } else if (fsp->flow_type & FLOW_RSS) {
+ if (cmd->rss_context == 1)
+ dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
+ } else {
+ u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+
+ if (ring_idx >= fbn->num_rx_queues)
+ return -EINVAL;
+
+ dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
+ FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
+ }
+
+ idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ /* Do not allow overwriting for now.
+ * To support overwriting rules we will need to add logic to free
+ * any IP or MACDA TCAMs that may be associated with the old rule.
+ */
+ if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
+ return -EBUSY;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
+ hash_idx = fbnic_get_rss_hash_idx(flow_type);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+udp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V4_FLOW:
+tcp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+ goto ip4_flow;
+ case IP_USER_FLOW:
+ if (!fsp->m_u.usr_ip4_spec.proto)
+ goto ip4_flow;
+ if (fsp->m_u.usr_ip4_spec.proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
+ goto udp4_flow;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
+ goto tcp4_flow;
+ return -EINVAL;
+ip4_flow:
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
+ if (mask4->s_addr) {
+ ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
+ addr4, mask4);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
+ if (mask4->s_addr) {
+ ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
+ addr4, mask4);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case UDP_V6_FLOW:
+udp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V6_FLOW:
+tcp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
+ goto ipv6_flow;
+ case IPV6_USER_FLOW:
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ goto ipv6_flow;
+
+ if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
+ goto udp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
+ goto tcp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
+ return -EINVAL;
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ ip_src - fbd->ipo_src);
+ ip_mask &=
+ ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ ip_dst - fbd->ipo_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ipv6_flow:
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ip_src && !ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ip_dst && !ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ u8 *addr = fsp->h_u.ether_spec.h_dest;
+ u8 *mask = fsp->m_u.ether_spec.h_dest;
+
+ /* Do not allow MAC addr of 0 */
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Only support full MAC address to avoid
+ * conflicts with other MAC addresses.
+ */
+ if (!is_broadcast_ether_addr(mask))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(addr))
+ mac_addr = __fbnic_mc_sync(fbd, addr);
+ else
+ mac_addr = __fbnic_uc_sync(fbd, addr);
+
+ if (!mac_addr)
+ return -ENOSPC;
+
+ set_bit(idx, mac_addr->act_tcam);
+ flow_value |=
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ mac_addr - fbd->mac_addr);
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write action table values */
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
+
+ /* Write IP Match value/mask to action_tcam[0] */
+ act_tcam->value.tcam[0] = ip_value;
+ act_tcam->mask.tcam[0] = ip_mask;
+
+ /* Write flow type value/mask to action_tcam[1] */
+ act_tcam->value.tcam[1] = flow_value;
+ act_tcam->mask.tcam[1] = flow_mask;
+
+ /* Write error, DSCP, extra L4 matches to action_tcam[2] */
+ act_tcam->value.tcam[2] = misc;
+ act_tcam->mask.tcam[2] = misc_mask;
+
+ /* Write source/destination port values */
+ act_tcam->value.tcam[3] = sport;
+ act_tcam->mask.tcam[3] = sport_mask;
+ act_tcam->value.tcam[4] = dport;
+ act_tcam->mask.tcam[4] = dport_mask;
+
+ for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ fsp->location = location;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_write_rules(fbd);
+ if (ip_src || ip_dst)
+ fbnic_write_ip_addr(fbd);
+ if (mac_addr)
+ fbnic_write_macda(fbd);
+ }
+
+ return 0;
+}
+
+static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
+ __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_ip_addr(fbd);
+}
+
+static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+
+ if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
+ (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
+ fbnic_clear_nfc_macda(fbn, idx);
+
+ if ((act_tcam->value.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
+ (~act_tcam->mask.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
+ fbnic_clear_nfc_ip_addr(fbn, idx);
+
+ if (netif_running(fbn->netdev))
+ fbnic_write_rules(fbd);
+
+ return 0;
+}
+
+static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = fbnic_set_cls_rule_ins(fbn, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = fbnic_set_cls_rule_del(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_KEY_BYTE_LEN;
+}
+
+static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_TBL_SIZE;
+}
+
+static int
+fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
+ u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
+
+ rxfh->key[i] = rss_key >> 24;
+ }
+ }
+
+ if (rxfh->indir) {
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ rxfh->indir[i] = fbn->indir_tbl[0][i];
+ }
+
+ return 0;
+}
+
+static unsigned int
+fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
+{
+ unsigned int i, changes = 0;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (fbn->indir_tbl[idx][i] == indir[i])
+ continue;
+
+ fbn->indir_tbl[idx][i] = indir[i];
+ changes++;
+ }
+
+ return changes;
+}
+
+static int
+fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i, changes = 0;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EINVAL;
+
+ if (rxfh->key) {
+ u32 rss_key = 0;
+
+ for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
+ rss_key >>= 8;
+ rss_key |= (u32)(rxfh->key[i]) << 24;
+
+ if (i % 4)
+ continue;
+
+ if (fbn->rss_key[i / 4] == rss_key)
+ continue;
+
+ fbn->rss_key[i / 4] = rss_key;
+ changes++;
+ }
+ }
+
+ if (rxfh->indir)
+ changes += fbnic_set_indir(fbn, 0, rxfh->indir);
+
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_get_rss_hash_opts(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Report options from rss_en table in fbn */
+ cmd->data = fbn->rss_flow_hash[hash_opt_idx];
+
+ return 0;
+}
+
+#define FBNIC_L2_HASH_OPTIONS \
+ (RXH_L2DA | RXH_DISCARD)
+#define FBNIC_L3_HASH_OPTIONS \
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL)
+#define FBNIC_L4_HASH_OPTIONS \
+ (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+static int
+fbnic_set_rss_hash_opts(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int hash_opt_idx;
+
+ /* Verify the type requested is correct */
+ hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Verify the fields asked for can actually be assigned based on type */
+ if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
+ (hash_opt_idx > FBNIC_L4_HASH_OPT &&
+ cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
+ (hash_opt_idx > FBNIC_IP_HASH_OPT &&
+ cmd->data & ~FBNIC_L2_HASH_OPTIONS))
+ return -EINVAL;
+
+ fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ return 0;
+}
+
+static int
+fbnic_modify_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ const u32 *indir = rxfh->indir;
+ unsigned int changes;
+
+ if (!indir)
+ indir = ethtool_rxfh_context_indir(ctx);
+
+ changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_create_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+
+ if (!rxfh->indir) {
+ u32 *indir = ethtool_rxfh_context_indir(ctx);
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+
+ return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
+}
+
+static int
+fbnic_remove_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx, u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ /* Nothing to do, contexts are allocated statically */
+ return 0;
+}
+
+static void fbnic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ ch->max_rx = fbd->max_num_queues;
+ ch->max_tx = fbd->max_num_queues;
+ ch->max_combined = min(ch->max_rx, ch->max_tx);
+ ch->max_other = FBNIC_NON_NAPI_VECTORS;
+
+ if (fbn->num_rx_queues > fbn->num_napi ||
+ fbn->num_tx_queues > fbn->num_napi)
+ ch->combined_count = min(fbn->num_rx_queues,
+ fbn->num_tx_queues);
+ else
+ ch->combined_count =
+ fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
+ ch->rx_count = fbn->num_rx_queues - ch->combined_count;
+ ch->tx_count = fbn->num_tx_queues - ch->combined_count;
+ ch->other_count = FBNIC_NON_NAPI_VECTORS;
+}
+
+static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
+ unsigned int max_napis)
+{
+ fbn->num_rx_queues = ch->rx_count + ch->combined_count;
+ fbn->num_tx_queues = ch->tx_count + ch->combined_count;
+ fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
+ max_napis);
+}
+
+static int fbnic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int max_napis, standalone;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
+ int err;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+ standalone = ch->rx_count + ch->tx_count;
+
+ /* Limits for standalone queues:
+ * - each queue has its own NAPI (num_napi >= rx + tx + combined)
+ * - combining queues (combined not 0, rx or tx must be 0)
+ */
+ if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
+ (standalone && standalone + ch->combined_count > max_napis) ||
+ ch->rx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->tx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->other_count != FBNIC_NON_NAPI_VECTORS)
+ return -EINVAL;
+
+ if (!netif_running(netdev)) {
+ fbnic_set_queues(fbn, ch, max_napis);
+ fbnic_reset_indir_tbl(fbn);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_queues(clone, ch, max_napis);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ /* Reset RSS indirection table */
+ fbnic_reset_indir_tbl(fbn);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
+static int
+fbnic_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *tsinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+
+ tsinfo->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ tsinfo->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ tsinfo->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static void fbnic_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u64 ts_packets, ts_lost;
+ struct fbnic_ring *ring;
+ unsigned int start;
+ int i;
+
+ ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
+ ts_stats->lost = fbn->tx_stats.twq.ts_lost;
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ ring = fbn->tx[i];
+ do {
+ start = u64_stats_fetch_begin(&ring->stats.syncp);
+ ts_packets = ring->stats.twq.ts_packets;
+ ts_lost = ring->stats.twq.ts_lost;
+ } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
+ ts_stats->pkts += ts_packets;
+ ts_stats->lost += ts_lost;
+ }
+}
+
+static int
+fbnic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_fw_completion *fw_cmpl;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ if (page_data->i2c_address != 0x50) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid i2c address. Only 0x50 is supported");
+ return -EINVAL;
+ }
+
+ fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_QSFP_READ_RESP,
+ page_data->length);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Initialize completion and queue it for FW to process */
+ fw_cmpl->u.qsfp.length = page_data->length;
+ fw_cmpl->u.qsfp.offset = page_data->offset;
+ fw_cmpl->u.qsfp.page = page_data->page;
+ fw_cmpl->u.qsfp.bank = page_data->bank;
+
+ err = fbnic_fw_xmit_qsfp_read_msg(fbd, fw_cmpl, page_data->page,
+ page_data->bank, page_data->offset,
+ page_data->length);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit EEPROM read request");
+ goto exit_free;
+ }
+
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ err = -ETIMEDOUT;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timed out waiting for firmware response");
+ goto exit_cleanup;
+ }
+
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read EEPROM");
+ goto exit_cleanup;
+ }
+
+ memcpy(page_data->data, fw_cmpl->u.qsfp.data, page_data->length);
+
+exit_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err ? : page_data->length;
+}
+
+static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+{
+ if (counter->reported)
+ *stat = counter->value;
+}
+
+static void
+fbnic_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_pause_stats(fbd, false, &mac_stats->pause);
+
+ pause_stats->tx_pause_frames = mac_stats->pause.tx_pause_frames.value;
+ pause_stats->rx_pause_frames = mac_stats->pause.rx_pause_frames.value;
+}
+
+static void
+fbnic_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_phy_stats *phy_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbnic_get_hw_stats32(fbd);
+ phy_stats = &fbd->hw_stats.phy;
+
+ spin_lock(&fbd->hw_stats.lock);
+ fec_stats->corrected_blocks.total =
+ phy_stats->fec.corrected_blocks.value;
+ fec_stats->uncorrectable_blocks.total =
+ phy_stats->fec.uncorrectable_blocks.value;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void
+fbnic_get_eth_phy_stats(struct net_device *netdev,
+ struct ethtool_eth_phy_stats *eth_phy_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_phy_stats *phy_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ u64 total = 0;
+ int i;
+
+ fbnic_get_hw_stats32(fbd);
+ phy_stats = &fbd->hw_stats.phy;
+
+ spin_lock(&fbd->hw_stats.lock);
+ for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
+ total += phy_stats->pcs.SymbolErrorDuringCarrier.lanes[i].value;
+
+ eth_phy_stats->SymbolErrorDuringCarrier = total;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void
+fbnic_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *eth_mac_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ const struct fbnic_mac *mac;
+
+ mac_stats = &fbd->hw_stats.mac;
+ mac = fbd->mac;
+
+ mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
+
+ fbnic_set_counter(&eth_mac_stats->FramesTransmittedOK,
+ &mac_stats->eth_mac.FramesTransmittedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesReceivedOK,
+ &mac_stats->eth_mac.FramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FrameCheckSequenceErrors,
+ &mac_stats->eth_mac.FrameCheckSequenceErrors);
+ fbnic_set_counter(&eth_mac_stats->AlignmentErrors,
+ &mac_stats->eth_mac.AlignmentErrors);
+ fbnic_set_counter(&eth_mac_stats->OctetsTransmittedOK,
+ &mac_stats->eth_mac.OctetsTransmittedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACXmitError,
+ &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
+ fbnic_set_counter(&eth_mac_stats->OctetsReceivedOK,
+ &mac_stats->eth_mac.OctetsReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACRcvError,
+ &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
+ fbnic_set_counter(&eth_mac_stats->MulticastFramesXmittedOK,
+ &mac_stats->eth_mac.MulticastFramesXmittedOK);
+ fbnic_set_counter(&eth_mac_stats->BroadcastFramesXmittedOK,
+ &mac_stats->eth_mac.BroadcastFramesXmittedOK);
+ fbnic_set_counter(&eth_mac_stats->MulticastFramesReceivedOK,
+ &mac_stats->eth_mac.MulticastFramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->BroadcastFramesReceivedOK,
+ &mac_stats->eth_mac.BroadcastFramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FrameTooLongErrors,
+ &mac_stats->eth_mac.FrameTooLongErrors);
+}
+
+static void
+fbnic_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *eth_ctrl_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl);
+
+ eth_ctrl_stats->MACControlFramesReceived =
+ mac_stats->eth_ctrl.MACControlFramesReceived.value;
+ eth_ctrl_stats->MACControlFramesTransmitted =
+ mac_stats->eth_ctrl.MACControlFramesTransmitted.value;
+}
+
+static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 9216 },
+ { 9217, FBNIC_MAX_JUMBO_FRAME_SIZE },
+ {}
+};
+
+static void
+fbnic_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon);
+
+ rmon_stats->undersize_pkts =
+ mac_stats->rmon.undersize_pkts.value;
+ rmon_stats->oversize_pkts =
+ mac_stats->rmon.oversize_pkts.value;
+ rmon_stats->fragments =
+ mac_stats->rmon.fragments.value;
+ rmon_stats->jabbers =
+ mac_stats->rmon.jabbers.value;
+
+ for (i = 0; fbnic_rmon_ranges[i].high; i++) {
+ rmon_stats->hist[i] = mac_stats->rmon.hist[i].value;
+ rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value;
+ }
+
+ *ranges = fbnic_rmon_ranges;
+}
+
+static void fbnic_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ stats->link_down_events = fbn->link_down_events;
+}
+
+static const struct ethtool_ops fbnic_ethtool_ops = {
+ .cap_link_lanes_supported = true,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
+ .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
+ .get_drvinfo = fbnic_get_drvinfo,
+ .get_regs_len = fbnic_get_regs_len,
+ .get_regs = fbnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_link_ext_stats = fbnic_get_link_ext_stats,
+ .get_coalesce = fbnic_get_coalesce,
+ .set_coalesce = fbnic_set_coalesce,
+ .get_ringparam = fbnic_get_ringparam,
+ .set_ringparam = fbnic_set_ringparam,
+ .get_pause_stats = fbnic_get_pause_stats,
+ .get_pauseparam = fbnic_phylink_get_pauseparam,
+ .set_pauseparam = fbnic_phylink_set_pauseparam,
+ .get_strings = fbnic_get_strings,
+ .get_ethtool_stats = fbnic_get_ethtool_stats,
+ .get_sset_count = fbnic_get_sset_count,
+ .get_rxnfc = fbnic_get_rxnfc,
+ .set_rxnfc = fbnic_set_rxnfc,
+ .get_rxfh_key_size = fbnic_get_rxfh_key_size,
+ .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
+ .get_rxfh = fbnic_get_rxfh,
+ .set_rxfh = fbnic_set_rxfh,
+ .get_rxfh_fields = fbnic_get_rss_hash_opts,
+ .set_rxfh_fields = fbnic_set_rss_hash_opts,
+ .create_rxfh_context = fbnic_create_rxfh_context,
+ .modify_rxfh_context = fbnic_modify_rxfh_context,
+ .remove_rxfh_context = fbnic_remove_rxfh_context,
+ .get_channels = fbnic_get_channels,
+ .set_channels = fbnic_set_channels,
+ .get_ts_info = fbnic_get_ts_info,
+ .get_ts_stats = fbnic_get_ts_stats,
+ .get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
+ .get_fec_stats = fbnic_get_fec_stats,
+ .get_fecparam = fbnic_phylink_get_fecparam,
+ .get_module_eeprom_by_page = fbnic_get_module_eeprom_by_page,
+ .get_eth_phy_stats = fbnic_get_eth_phy_stats,
+ .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+ .get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
+ .get_rmon_stats = fbnic_get_rmon_stats,
+};
+
+void fbnic_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &fbnic_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
new file mode 100644
index 000000000000..d8d9b6cfde82
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -0,0 +1,1920 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_tlv.h"
+
+static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u64 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ /* Write the upper 32b and then the lower 32b. Doing this the
+ * FW can then read lower, upper, lower to verify that the state
+ * of the descriptor wasn't changed mid-transaction.
+ */
+ fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset, lower_32_bits(desc));
+}
+
+static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u32 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ /* For initialization we write the lower 32b of the descriptor first.
+ * This way we can set the state to mark it invalid before we clear the
+ * upper 32b.
+ */
+ fw_wr32(fbd, desc_offset, desc);
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset + 1, 0);
+}
+
+static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ u64 desc;
+
+ desc = fw_rd32(fbd, desc_offset);
+ desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
+
+ return desc;
+}
+
+static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ int desc_idx;
+
+ /* Disable DMA transactions from the device,
+ * and flush any transactions triggered during cleaning
+ */
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
+ break;
+ }
+
+ wrfl(fbd);
+
+ /* Initialize first descriptor to all 0s. Doing this gives us a
+ * solid stop for the firmware to hit when it is done looping
+ * through the ring.
+ */
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
+
+ /* We then fill the rest of the ring starting at the end and moving
+ * back toward descriptor 0 with skip descriptors that have no
+ * length nor address, and tell the firmware that they can skip
+ * them and just move past them to the one we initialized to 0.
+ */
+ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
+ FBNIC_IPC_MBX_DESC_FW_CMPL |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+}
+
+void fbnic_mbx_init(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* Initialize lock to protect Tx ring */
+ spin_lock_init(&fbd->fw_tx_lock);
+
+ /* Reset FW Capabilities */
+ memset(&fbd->fw_cap, 0, sizeof(fbd->fw_cap));
+
+ /* Reinitialize mailbox memory */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
+
+ /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
+ wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
+
+ /* Clear any stale causes in vector 0 as that is used for doorbell */
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_reset_desc_ring(fbd, i);
+}
+
+static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
+ struct fbnic_tlv_msg *msg, u16 length, u8 eom)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ u8 tail = mbx->tail;
+ dma_addr_t addr;
+ int direction;
+
+ if (!mbx->ready || !fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+
+ if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
+ return -EBUSY;
+
+ addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
+ if (dma_mapping_error(fbd->dev, addr))
+ return -ENOSPC;
+
+ mbx->buf_info[tail].msg = msg;
+ mbx->buf_info[tail].addr = addr;
+
+ mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
+
+ fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
+
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
+ FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
+ (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
+ (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+
+ return 0;
+}
+
+static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ int direction;
+
+ if (!mbx->buf_info[desc_idx].msg)
+ return;
+
+ direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
+ PAGE_SIZE, direction);
+
+ free_page((unsigned long)mbx->buf_info[desc_idx].msg);
+ mbx->buf_info[desc_idx].msg = NULL;
+}
+
+static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ int i;
+
+ fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
+
+ for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
+ fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
+}
+
+void fbnic_mbx_clean(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_clean_desc_ring(fbd, i);
+}
+
+#define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
+#define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
+
+static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
+ u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
+ int err = 0;
+
+ /* Do nothing if mailbox is not ready, or we already have pages on
+ * the ring that can be used by the firmware
+ */
+ if (!rx_mbx->ready)
+ return -ENODEV;
+
+ /* Fill all but 1 unused descriptors in the Rx queue. */
+ count = (head - tail - 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
+ while (!err && count--) {
+ struct fbnic_tlv_msg *msg;
+
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
+ __GFP_NOWARN);
+ if (!msg) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
+ FBNIC_RX_PAGE_SIZE, 0);
+ if (err)
+ free_page((unsigned long)msg);
+ }
+
+ return err;
+}
+
+static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ int free = -EXFULL;
+ int i;
+
+ if (!tx_mbx->ready)
+ return -ENODEV;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (!fbd->cmpl_data[i])
+ free = i;
+ else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
+ return -EEXIST;
+ }
+
+ if (free == -EXFULL)
+ return -EXFULL;
+
+ fbd->cmpl_data[free] = cmpl_data;
+
+ return 0;
+}
+
+static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] == cmpl_data) {
+ fbd->cmpl_data[i] = NULL;
+ break;
+ }
+ }
+}
+
+static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ u8 head = tx_mbx->head;
+ u64 desc;
+
+ while (head != tx_mbx->tail) {
+ desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
+ if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
+ break;
+
+ fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
+
+ head++;
+ head %= FBNIC_IPC_MBX_DESC_LEN;
+ }
+
+ /* Record head for next interrupt */
+ tx_mbx->head = head;
+}
+
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ if (cmpl_data) {
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ if (err)
+ goto unlock_mbx;
+ }
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ /* If we successfully reserved a completion and msg failed
+ * then clear completion data for next caller
+ */
+ if (err && cmpl_data)
+ fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
+
+unlock_mbx:
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *fw_cmpl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+}
+
+static void fbnic_fw_release_cmpl_data(struct kref *kref)
+{
+ struct fbnic_fw_completion *cmpl_data;
+
+ cmpl_data = container_of(kref, struct fbnic_fw_completion,
+ ref_count);
+ kfree(cmpl_data);
+}
+
+static struct fbnic_fw_completion *
+fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_fw_completion *cmpl_data = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] &&
+ fbd->cmpl_data[i]->msg_type == msg_type) {
+ cmpl_data = fbd->cmpl_data[i];
+ kref_get(&cmpl_data->ref_count);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return cmpl_data;
+}
+
+/**
+ * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
+ * @fbd: FBNIC device structure
+ * @msg_type: ENUM value indicating message type to send
+ *
+ * Return:
+ * One the following values:
+ * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
+ * -ENODEV: Device I/O error
+ * -ENOMEM: Failed to allocate message
+ * -EBUSY: No space in mailbox
+ * -ENOSPC: DMA mapping failed
+ *
+ * This function sends a single TLV header indicating the host wants to take
+ * some action. However there are no other side effects which means that any
+ * response will need to be caught via a completion if this action is
+ * expected to kick off a resultant action.
+ */
+static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(msg_type);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ free_page((unsigned long)msg);
+
+ return err;
+}
+
+static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+
+ mbx->ready = true;
+
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ /* Enable DMA writes from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+
+ /* Make sure we have a page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ /* Enable DMA reads from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
+ break;
+ }
+}
+
+static bool fbnic_mbx_event(struct fbnic_dev *fbd)
+{
+ /* We only need to do this on the first interrupt following reset.
+ * this primes the mailbox so that we will have cleared all the
+ * skip descriptors.
+ */
+ if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
+ return false;
+
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return true;
+}
+
+/**
+ * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
+ * to FW mailbox
+ *
+ * @fbd: FBNIC device structure
+ * @take_ownership: take/release the ownership
+ *
+ * Return: zero on success, negative value on failure
+ *
+ * Notifies the firmware that the driver either takes ownership of the NIC
+ * (when @take_ownership is true) or releases it.
+ */
+int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
+{
+ unsigned long req_time = jiffies;
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (take_ownership) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ /* Initialize heartbeat, set last response to 1 second in the past
+ * so that we will trigger a timeout if the firmware doesn't respond
+ */
+ fbd->last_heartbeat_response = req_time - HZ;
+
+ fbd->last_heartbeat_request = req_time;
+
+ /* Set prev_firmware_time to 0 to avoid triggering firmware crash
+ * detection until we receive the second uptime in a heartbeat resp.
+ */
+ fbd->prev_firmware_time = 0;
+
+ /* Set heartbeat detection based on if we are taking ownership */
+ fbd->fw_heartbeat_enabled = take_ownership;
+
+ return err;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
+ FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
+ FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
+ struct fbnic_tlv_msg *attr, int len)
+{
+ int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
+ struct fbnic_tlv_msg *mac_results[8];
+ int err, i = 0;
+
+ /* Make sure we have enough room to process all the MAC addresses */
+ if (len > 8)
+ return -ENOSPC;
+
+ /* Parse the array */
+ err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
+ fbnic_fw_cap_resp_index,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
+ if (err)
+ return err;
+
+ /* Copy results into MAC addr array */
+ for (i = 0; i < len && mac_results[i]; i++)
+ fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
+
+ /* Zero remaining unused addresses */
+ while (i < len)
+ eth_zero_addr(bmc_mac_addr[i++]);
+
+ return 0;
+}
+
+static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
+{
+ u32 all_multi = 0, version = 0;
+ struct fbnic_dev *fbd = opaque;
+ bool bmc_present;
+ int err;
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
+ fbd->fw_cap.running.mgmt.version = version;
+ if (!fbd->fw_cap.running.mgmt.version)
+ return -EINVAL;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE) {
+ char required_ver[FBNIC_FW_VER_MAX_SIZE];
+ char running_ver[FBNIC_FW_VER_MAX_SIZE];
+
+ fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
+ running_ver);
+ fbnic_mk_fw_ver_str(MIN_FW_VER_CODE, required_ver);
+ dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%s)\n",
+ running_ver, required_ver);
+ /* Disable TX mailbox to prevent card use until firmware is
+ * updated.
+ */
+ fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
+ return -EINVAL;
+ }
+
+ if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
+ dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
+ fbd->fw_cap.stored.mgmt.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
+ fbd->fw_cap.running.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
+ fbd->fw_cap.stored.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
+ fbd->fw_cap.stored.undi.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ fbd->fw_cap.active_slot =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
+ fbd->fw_cap.link_speed =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
+ fbd->fw_cap.link_fec =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
+
+ bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
+ if (bmc_present) {
+ struct fbnic_tlv_msg *attr;
+
+ attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
+ if (!attr)
+ return -EINVAL;
+
+ err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
+ attr, 4);
+ if (err)
+ return err;
+
+ all_multi =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
+ } else {
+ memset(fbd->fw_cap.bmc_mac_addr, 0,
+ sizeof(fbd->fw_cap.bmc_mac_addr));
+ }
+
+ fbd->fw_cap.bmc_present = bmc_present;
+
+ if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
+ fbd->fw_cap.all_multi = all_multi;
+
+ fbd->fw_cap.anti_rollback_version =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
+
+ /* Always assume we need a BMC reinit */
+ fbd->fw_cap.need_bmc_tcam_reinit = true;
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_OWNERSHIP_TIME),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_ownership_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
+
+ /* Count the ownership response as a heartbeat reply */
+ fbd->last_heartbeat_response = jiffies;
+
+ /* Capture firmware time for logging and firmware crash check */
+ fbd->firmware_time = fta_get_uint(results, FBNIC_FW_OWNERSHIP_TIME);
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_HEARTBEAT_UPTIME),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_heartbeat_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
+
+ fbd->last_heartbeat_response = jiffies;
+
+ /* Capture firmware time for logging and firmware crash check */
+ fbd->firmware_time = fta_get_uint(results, FBNIC_FW_HEARTBEAT_UPTIME);
+
+ return 0;
+}
+
+static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
+{
+ unsigned long req_time = jiffies;
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ fbd->last_heartbeat_request = req_time;
+ fbd->prev_firmware_time = fbd->firmware_time;
+
+ return err;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
+{
+ unsigned long last_response = fbd->last_heartbeat_response;
+ unsigned long last_request = fbd->last_heartbeat_request;
+
+ return !time_before(last_response, last_request);
+}
+
+int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
+{
+ int err = -ETIMEDOUT;
+ int attempts = 50;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ while (attempts--) {
+ msleep(200);
+ if (poll)
+ fbnic_mbx_poll(fbd);
+
+ if (!fbnic_fw_heartbeat_current(fbd))
+ continue;
+
+ /* Place new message on mailbox to elicit a response */
+ err = fbnic_fw_xmit_heartbeat_message(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Failed to send heartbeat message: %d\n",
+ err);
+ break;
+ }
+
+ return err;
+}
+
+void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
+{
+ unsigned long last_request = fbd->last_heartbeat_request;
+ int err;
+
+ /* Do not check heartbeat or send another request until current
+ * period has expired. Otherwise we might start spamming requests.
+ */
+ if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
+ return;
+
+ /* We already reported no mailbox. Wait for it to come back */
+ if (!fbd->fw_heartbeat_enabled)
+ return;
+
+ /* Was the last heartbeat response long time ago? */
+ if (!fbnic_fw_heartbeat_current(fbd) ||
+ fbd->firmware_time < fbd->prev_firmware_time) {
+ dev_warn(fbd->dev,
+ "Firmware did not respond to heartbeat message\n");
+ fbd->fw_heartbeat_enabled = false;
+ }
+
+ /* Place new message on mailbox to elicit a response */
+ err = fbnic_fw_xmit_heartbeat_message(fbd);
+ if (err)
+ dev_warn(fbd->dev, "Failed to send heartbeat message\n");
+}
+
+/**
+ * fbnic_fw_xmit_coredump_info_msg - Create and transmit a coredump info message
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Structure to store info in
+ * @force: Force coredump event if one hasn't already occurred
+ *
+ * Return: zero on success, negative errno on failure
+ *
+ * Asks the FW for info related to coredump. If a coredump doesn't exist it
+ * can optionally force one if force is true.
+ */
+int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ bool force)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (force) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_COREDUMP_REQ_INFO_CREATE);
+ if (err)
+ goto free_msg;
+ }
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_msg;
+
+ return 0;
+
+free_msg:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_coredump_info_resp_index[] = {
+ FBNIC_TLV_ATTR_FLAG(FBNIC_FW_COREDUMP_INFO_AVAILABLE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_INFO_SIZE),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_INFO_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int
+fbnic_fw_parse_coredump_info_resp(void *opaque, struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ msg_type = FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err = fta_get_sint(results, FBNIC_FW_COREDUMP_INFO_ERROR);
+ if (err)
+ goto msg_err;
+
+ if (!results[FBNIC_FW_COREDUMP_INFO_AVAILABLE]) {
+ err = -ENOENT;
+ goto msg_err;
+ }
+
+ cmpl_data->u.coredump_info.size =
+ fta_get_uint(results, FBNIC_FW_COREDUMP_INFO_SIZE);
+
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_coredump_read_msg - Create and transmit a coredump read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion struct to store coredump
+ * @offset: Offset into coredump requested
+ * @length: Length of section of coredump to fetch
+ *
+ * Return: zero on success, negative errno on failure
+ *
+ * Asks the firmware to provide a section of the coredump back in a message.
+ * The response will have an offset and size matching the values provided.
+ */
+int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 offset, u32 length)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (offset) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+ }
+
+ if (length) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_coredump_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_LENGTH),
+ FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_COREDUMP_READ_DATA),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_READ_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_coredump_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ u32 index, last_offset, last_length;
+ struct fbnic_dev *fbd = opaque;
+ struct fbnic_tlv_msg *data_hdr;
+ u32 length, offset;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ msg_type = FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err = fta_get_sint(results, FBNIC_FW_COREDUMP_READ_ERROR);
+ if (err)
+ goto msg_err;
+
+ data_hdr = results[FBNIC_FW_COREDUMP_READ_DATA];
+ if (!data_hdr) {
+ err = -ENODATA;
+ goto msg_err;
+ }
+
+ offset = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_LENGTH);
+
+ if (length > le16_to_cpu(data_hdr->hdr.len) - sizeof(u32)) {
+ dev_err(fbd->dev, "length greater than size of message\n");
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ /* Only the last offset can have a length != stride */
+ last_length =
+ (cmpl_data->u.coredump.size % cmpl_data->u.coredump.stride) ? :
+ cmpl_data->u.coredump.stride;
+ last_offset = cmpl_data->u.coredump.size - last_length;
+
+ /* Verify offset and length */
+ if (offset % cmpl_data->u.coredump.stride || offset > last_offset) {
+ dev_err(fbd->dev, "offset %d out of range\n", offset);
+ err = -EINVAL;
+ } else if (length != ((offset == last_offset) ?
+ last_length : cmpl_data->u.coredump.stride)) {
+ dev_err(fbd->dev, "length %d out of range for offset %d\n",
+ length, offset);
+ err = -EINVAL;
+ }
+ if (err)
+ goto msg_err;
+
+ /* If data pointer is NULL it is already filled, just skip the copy */
+ index = offset / cmpl_data->u.coredump.stride;
+ if (!cmpl_data->u.coredump.data[index])
+ goto msg_err;
+
+ /* Copy data and mark index filled by setting pointer to NULL */
+ memcpy(cmpl_data->u.coredump.data[index],
+ fbnic_tlv_attr_get_value_ptr(data_hdr), length);
+ cmpl_data->u.coredump.data[index] = NULL;
+
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ if (!len)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
+ len);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
+ if (!msg)
+ return -ENOMEM;
+
+ /* Report error to FW to cancel upgrade */
+ if (cancel_error) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
+ cancel_error);
+ if (err)
+ goto free_message;
+ }
+
+ if (data) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
+ data + offset, length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ u32 offset;
+ u32 length;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Pull length/offset pair and mark it as complete */
+ offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
+ cmpl_data->u.fw_update.offset = offset;
+ cmpl_data->u.fw_update.length = length;
+
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
+
+ /* Close out update by incrementing offset by length which should
+ * match the total size of the component. Set length to 0 since no
+ * new chunks will be requested.
+ */
+ cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
+ cmpl_data->u.fw_update.length = 0;
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+/**
+ * fbnic_fw_xmit_qsfp_read_msg - Transmit a QSFP read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Structure to store EEPROM response in
+ * @page: Refers to page number on page enabled QSFP modules
+ * @bank: Refers to a collection of pages
+ * @offset: Offset into QSFP EEPROM requested
+ * @length: Length of section of QSFP EEPROM to fetch
+ *
+ * Return: zero on success, negative value on failure
+ *
+ * Asks the firmware to provide a section of the QSFP EEPROM back in a
+ * message. The response will have an offset and size matching the values
+ * provided.
+ */
+int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 page, u32 bank, u32 offset, u32 length)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!length || length > TLV_MAX_DATA)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_QSFP_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_BANK, bank);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_PAGE, page);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_OFFSET, offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_LENGTH, length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_qsfp_read_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_BANK),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_PAGE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_LENGTH),
+ FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_QSFP_DATA),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_QSFP_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_qsfp_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ struct fbnic_tlv_msg *data_hdr;
+ u32 length, offset, page, bank;
+ u8 *data;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_QSFP_READ_RESP);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ bank = fta_get_uint(results, FBNIC_FW_QSFP_BANK);
+ if (bank != cmpl_data->u.qsfp.bank) {
+ dev_warn(fbd->dev, "bank not equal to bank requested: %d vs %d\n",
+ bank, cmpl_data->u.qsfp.bank);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ page = fta_get_uint(results, FBNIC_FW_QSFP_PAGE);
+ if (page != cmpl_data->u.qsfp.page) {
+ dev_warn(fbd->dev, "page not equal to page requested: %d vs %d\n",
+ page, cmpl_data->u.qsfp.page);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ offset = fta_get_uint(results, FBNIC_FW_QSFP_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_QSFP_LENGTH);
+
+ if (length != cmpl_data->u.qsfp.length ||
+ offset != cmpl_data->u.qsfp.offset) {
+ dev_warn(fbd->dev,
+ "offset/length not equal to size requested: %d/%d vs %d/%d\n",
+ offset, length,
+ cmpl_data->u.qsfp.offset, cmpl_data->u.qsfp.length);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ err = fta_get_sint(results, FBNIC_FW_QSFP_ERROR);
+ if (err)
+ goto msg_err;
+
+ data_hdr = results[FBNIC_FW_QSFP_DATA];
+ if (!data_hdr) {
+ err = -ENODATA;
+ goto msg_err;
+ }
+
+ /* Copy data */
+ data = fbnic_tlv_attr_get_value_ptr(data_hdr);
+ memcpy(cmpl_data->u.qsfp.data, data, length);
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion data structure to store sensor response
+ *
+ * Asks the firmware to provide an update with the latest sensor data.
+ * The response will contain temperature and voltage readings.
+ *
+ * Return: 0 on success, negative error value on failure
+ */
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_tsene_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ s32 err_resp;
+ int err = 0;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
+ if (err_resp)
+ goto msg_err;
+
+ if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ cmpl_data->u.tsene.millidegrees =
+ fta_get_sint(results, FBNIC_FW_TSENE_THERM);
+ cmpl_data->u.tsene.millivolts =
+ fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
+
+msg_err:
+ cmpl_data->result = err_resp ? : err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_log_req_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_MSEC),
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_LOG_INDEX),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_LOG_MSG, FBNIC_FW_LOG_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_LENGTH),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSEC_ARRAY),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_INDEX_ARRAY),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSG_ARRAY),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_process_log_array(struct fbnic_tlv_msg **results,
+ u16 length, u16 arr_type_idx,
+ u16 attr_type_idx,
+ struct fbnic_tlv_msg **tlv_array_out)
+{
+ struct fbnic_tlv_msg *attr;
+ int attr_len;
+ int err;
+
+ if (!results[attr_type_idx])
+ return -EINVAL;
+
+ tlv_array_out[0] = results[attr_type_idx];
+
+ if (!length)
+ return 0;
+
+ if (!results[arr_type_idx])
+ return -EINVAL;
+
+ attr = results[arr_type_idx];
+ attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
+ err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, &tlv_array_out[1],
+ fbnic_fw_log_req_index,
+ attr_type_idx,
+ length);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int fbnic_fw_parse_logs(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg **msec_tlv,
+ struct fbnic_tlv_msg **index_tlv,
+ struct fbnic_tlv_msg **log_tlv,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ char log[FBNIC_FW_LOG_MAX_SIZE];
+ ssize_t len;
+ u64 index;
+ u32 msec;
+ int err;
+
+ if (!msec_tlv[i] || !index_tlv[i] || !log_tlv[i]) {
+ dev_warn(fbd->dev, "Received log message with missing attributes!\n");
+ return -EINVAL;
+ }
+
+ index = fbnic_tlv_attr_get_signed(index_tlv[i], 0);
+ msec = fbnic_tlv_attr_get_signed(msec_tlv[i], 0);
+ len = fbnic_tlv_attr_get_string(log_tlv[i], log,
+ FBNIC_FW_LOG_MAX_SIZE);
+ if (len < 0)
+ return len;
+
+ err = fbnic_fw_log_write(fbd, index, msec, log);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fbnic_fw_parse_log_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_tlv_msg *index_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_tlv_msg *msec_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_tlv_msg *log_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_dev *fbd = opaque;
+ u16 length;
+ int err;
+
+ length = fta_get_uint(results, FBNIC_FW_LOG_LENGTH);
+ if (length >= FBNIC_FW_MAX_LOG_HISTORY)
+ return -E2BIG;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_MSEC_ARRAY,
+ FBNIC_FW_LOG_MSEC, msec_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_INDEX_ARRAY,
+ FBNIC_FW_LOG_INDEX, index_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_MSG_ARRAY,
+ FBNIC_FW_LOG_MSG, log_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_parse_logs(fbd, msec_tlv, index_tlv, log_tlv,
+ length + 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
+ bool send_log_history)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_LOG) {
+ dev_warn(fbd->dev, "Firmware version is too old to support firmware logs!\n");
+ return -EOPNOTSUPP;
+ }
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (enable) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_SEND_LOGS);
+ if (err)
+ goto free_message;
+
+ /* Report request for version 1 of logs */
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_SEND_LOGS_VERSION,
+ FBNIC_FW_LOG_VERSION);
+ if (err)
+ goto free_message;
+
+ if (send_log_history) {
+ err = fbnic_tlv_attr_put_flag(msg,
+ FBNIC_SEND_LOGS_HISTORY);
+ if (err)
+ goto free_message;
+ }
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
+ FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
+ fbnic_fw_parse_cap_resp),
+ FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
+ fbnic_fw_parse_ownership_resp),
+ FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
+ fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(COREDUMP_GET_INFO_RESP,
+ fbnic_coredump_info_resp_index,
+ fbnic_fw_parse_coredump_info_resp),
+ FBNIC_TLV_PARSER(COREDUMP_READ_RESP, fbnic_coredump_resp_index,
+ fbnic_fw_parse_coredump_resp),
+ FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
+ fbnic_fw_start_upgrade_resp_index,
+ fbnic_fw_parse_fw_start_upgrade_resp),
+ FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
+ fbnic_fw_write_chunk_req_index,
+ fbnic_fw_parse_fw_write_chunk_req),
+ FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
+ fbnic_fw_finish_upgrade_req_index,
+ fbnic_fw_parse_fw_finish_upgrade_req),
+ FBNIC_TLV_PARSER(QSFP_READ_RESP,
+ fbnic_qsfp_read_resp_index,
+ fbnic_fw_parse_qsfp_read_resp),
+ FBNIC_TLV_PARSER(TSENE_READ_RESP,
+ fbnic_tsene_read_resp_index,
+ fbnic_fw_parse_tsene_read_resp),
+ FBNIC_TLV_PARSER(LOG_MSG_REQ,
+ fbnic_fw_log_req_index,
+ fbnic_fw_parse_log_req),
+ FBNIC_TLV_MSG_ERROR
+};
+
+static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
+ u8 head = rx_mbx->head;
+ u64 desc, length;
+
+ while (head != rx_mbx->tail) {
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
+ if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
+ break;
+
+ dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ msg = rx_mbx->buf_info[head].msg;
+
+ length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
+
+ /* Ignore NULL mailbox descriptors */
+ if (!length)
+ goto next_page;
+
+ /* Report descriptors with length greater than page size */
+ if (length > PAGE_SIZE) {
+ dev_warn(fbd->dev,
+ "Invalid mailbox descriptor length: %lld\n",
+ length);
+ goto next_page;
+ }
+
+ if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
+ dev_warn(fbd->dev, "Mailbox message length mismatch\n");
+
+ /* If parsing fails dump contents of message to dmesg */
+ err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
+ if (err) {
+ dev_warn(fbd->dev, "Unable to process message: %d\n",
+ err);
+ print_hex_dump(KERN_WARNING, "fbnic:",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ msg, length, true);
+ }
+
+ dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
+next_page:
+
+ free_page((unsigned long)rx_mbx->buf_info[head].msg);
+ rx_mbx->buf_info[head].msg = NULL;
+
+ head++;
+ head %= FBNIC_IPC_MBX_DESC_LEN;
+ }
+
+ /* Record head for next interrupt */
+ rx_mbx->head = head;
+
+ /* Make sure we have at least one page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+}
+
+void fbnic_mbx_poll(struct fbnic_dev *fbd)
+{
+ fbnic_mbx_event(fbd);
+
+ fbnic_mbx_process_tx_msgs(fbd);
+ fbnic_mbx_process_rx_msgs(fbd);
+}
+
+int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ unsigned long timeout = jiffies + 10 * HZ + 1;
+ int err, i;
+
+ do {
+ if (!time_is_after_jiffies(timeout))
+ return -ETIMEDOUT;
+
+ /* Force the firmware to trigger an interrupt response to
+ * avoid the mailbox getting stuck closed if the interrupt
+ * is reset.
+ */
+ fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+
+ /* Immediate fail if BAR4 went away */
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msleep(20);
+ } while (!fbnic_mbx_event(fbd));
+
+ /* FW has shown signs of life. Enable DMA and start Tx/Rx */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_init_desc_ring(fbd, i);
+
+ /* Request an update from the firmware. This should overwrite
+ * mgmt.version once we get the actual version from the firmware
+ * in the capabilities request message.
+ */
+ err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+ if (err)
+ goto clean_mbx;
+
+ /* Poll until we get a current management firmware version, use "1"
+ * to indicate we entered the polling state waiting for a response
+ */
+ for (fbd->fw_cap.running.mgmt.version = 1;
+ fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE;) {
+ if (!tx_mbx->ready)
+ err = -ENODEV;
+ if (err)
+ goto clean_mbx;
+
+ msleep(20);
+ fbnic_mbx_poll(fbd);
+
+ /* set err, but wait till mgmt.version check to report it */
+ if (!time_is_after_jiffies(timeout))
+ err = -ETIMEDOUT;
+ }
+
+ return 0;
+clean_mbx:
+ /* Cleanup Rx buffers and disable mailbox */
+ fbnic_mbx_clean(fbd);
+ return err;
+}
+
+static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
+{
+ cmpl_data->result = -EPIPE;
+ complete(&cmpl_data->done);
+}
+
+static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
+
+ if (cmpl_data)
+ __fbnic_fw_evict_cmpl(cmpl_data);
+ }
+
+ memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
+}
+
+void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
+{
+ unsigned long timeout = jiffies + 10 * HZ + 1;
+ struct fbnic_fw_mbx *tx_mbx;
+ u8 tail;
+
+ /* Record current Rx stats */
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+ spin_lock_irq(&fbd->fw_tx_lock);
+
+ /* Clear ready to prevent any further attempts to transmit */
+ tx_mbx->ready = false;
+
+ /* Read tail to determine the last tail state for the ring */
+ tail = tx_mbx->tail;
+
+ /* Flush any completions as we are no longer processing Rx */
+ fbnic_mbx_evict_all_cmpl(fbd);
+
+ spin_unlock_irq(&fbd->fw_tx_lock);
+
+ /* Give firmware time to process packet,
+ * we will wait up to 10 seconds which is 500 waits of 20ms.
+ */
+ do {
+ u8 head = tx_mbx->head;
+
+ /* Tx ring is empty once head == tail */
+ if (head == tail)
+ break;
+
+ msleep(20);
+ fbnic_mbx_process_tx_msgs(fbd);
+ } while (time_is_after_jiffies(timeout));
+}
+
+int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd)
+{
+ struct fbnic_tlv_msg *mac_array;
+ int i, addr_count = 0, err;
+ struct fbnic_tlv_msg *msg;
+ u32 rx_flags = 0;
+
+ /* Nothing to do if there is no FW to sync with */
+ if (!fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready)
+ return 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ mac_array = fbnic_tlv_attr_nest_start(msg,
+ FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY);
+ if (!mac_array)
+ goto free_message_nospc;
+
+ /* Populate the unicast MAC addrs and capture PROMISC/ALLMULTI flags */
+ for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_PROMISC_IDX;
+ i >= fbd->mac_addr_boundary; i--) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+ if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ if (test_bit(FBNIC_MAC_ADDR_T_PROMISC, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
+ if (!test_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam))
+ continue;
+ if (addr_count == FW_RPC_MAC_SYNC_UC_ARRAY_SIZE) {
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
+ continue;
+ }
+
+ err = fbnic_tlv_attr_put_value(mac_array,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
+ mac_addr->value.addr8,
+ ETH_ALEN);
+ if (err)
+ goto free_message;
+ addr_count++;
+ }
+
+ /* Close array */
+ fbnic_tlv_attr_nest_stop(msg);
+
+ mac_array = fbnic_tlv_attr_nest_start(msg,
+ FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY);
+ if (!mac_array)
+ goto free_message_nospc;
+
+ /* Repeat for multicast addrs, record BROADCAST/ALLMULTI flags */
+ for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;
+ i < fbd->mac_addr_boundary; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+ if (test_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST;
+ if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ if (!test_bit(FBNIC_MAC_ADDR_T_MULTICAST, mac_addr->act_tcam))
+ continue;
+ if (addr_count == FW_RPC_MAC_SYNC_MC_ARRAY_SIZE) {
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ continue;
+ }
+
+ err = fbnic_tlv_attr_put_value(mac_array,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
+ mac_addr->value.addr8,
+ ETH_ALEN);
+ if (err)
+ goto free_message;
+ addr_count++;
+ }
+
+ /* Close array */
+ fbnic_tlv_attr_nest_stop(msg);
+
+ /* Report flags at end of list */
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS,
+ rx_flags);
+ if (err)
+ goto free_message;
+
+ /* Send message off to FW notifying it of current RPC config */
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+ return 0;
+free_message_nospc:
+ err = -ENOSPC;
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
+ const size_t str_sz)
+{
+ struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
+ const char *delim = "";
+
+ if (mgmt->commit[0])
+ delim = "_";
+
+ fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
+ fw_version, str_sz);
+}
+
+struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
+ size_t priv_size)
+{
+ struct fbnic_fw_completion *cmpl;
+
+ cmpl = kzalloc(sizeof(*cmpl) + priv_size, GFP_KERNEL);
+ if (!cmpl)
+ return NULL;
+
+ cmpl->msg_type = msg_type;
+ init_completion(&cmpl->done);
+ kref_init(&cmpl->ref_count);
+
+ return cmpl;
+}
+
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
+{
+ return __fbnic_fw_alloc_cmpl(msg_type, 0);
+}
+
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
+{
+ kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
new file mode 100644
index 000000000000..1ecd777aaada
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_FW_H_
+#define _FBNIC_FW_H_
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+struct fbnic_dev;
+struct fbnic_tlv_msg;
+
+struct fbnic_fw_mbx {
+ u8 ready, head, tail;
+ struct {
+ struct fbnic_tlv_msg *msg;
+ dma_addr_t addr;
+ } buf_info[FBNIC_IPC_MBX_DESC_LEN];
+};
+
+// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN
+#define FBNIC_FW_VER_MAX_SIZE 32
+// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT
+#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13)
+#define FBNIC_FW_LOG_VERSION 1
+#define FBNIC_FW_LOG_MAX_SIZE 256
+/*
+ * The max amount of logs which can fit in a single mailbox message. Firmware
+ * assumes each mailbox message is 4096B. The amount of messages supported is
+ * calculated as 4096 minus headers for message, arrays, and length minus the
+ * size of length divided by headers for each array plus the maximum LOG size,
+ * and the size of MSEC and INDEX. Put another way:
+ *
+ * MAX_LOG_HISTORY = ((4096 - TLV_HDR_SZ * 5 - LENGTH_SZ)
+ * / (FBNIC_FW_LOG_MAX_SIZE + TLV_HDR_SZ * 3 + MSEC_SZ
+ * + INDEX_SZ))
+ */
+#define FBNIC_FW_MAX_LOG_HISTORY 14
+
+struct fbnic_fw_ver {
+ u32 version;
+ char commit[FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE];
+};
+
+struct fbnic_fw_cap {
+ struct {
+ struct fbnic_fw_ver mgmt, bootloader;
+ } running;
+ struct {
+ struct fbnic_fw_ver mgmt, bootloader, undi;
+ } stored;
+ u8 active_slot;
+ u8 bmc_mac_addr[4][ETH_ALEN];
+ u8 bmc_present : 1;
+ u8 need_bmc_tcam_reinit : 1;
+ u8 need_bmc_macda_sync : 1;
+ u8 all_multi : 1;
+ u8 link_speed;
+ u8 link_fec;
+ u32 anti_rollback_version;
+};
+
+struct fbnic_fw_completion {
+ u32 msg_type;
+ struct completion done;
+ struct kref ref_count;
+ int result;
+ union {
+ struct {
+ u32 size;
+ } coredump_info;
+ struct {
+ u32 size;
+ u16 stride;
+ u8 *data[];
+ } coredump;
+ struct {
+ u32 offset;
+ u32 length;
+ } fw_update;
+ struct {
+ u16 length;
+ u8 offset;
+ u8 page;
+ u8 bank;
+ u8 data[] __aligned(sizeof(u32)) __counted_by(length);
+ } qsfp;
+ struct {
+ s32 millivolts;
+ s32 millidegrees;
+ } tsene;
+ } u;
+};
+
+void fbnic_mbx_init(struct fbnic_dev *fbd);
+void fbnic_mbx_clean(struct fbnic_dev *fbd);
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+void fbnic_mbx_poll(struct fbnic_dev *fbd);
+int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
+void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
+int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
+void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ bool force);
+int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 offset, u32 length);
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len);
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error);
+int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 page, u32 bank, u32 offset, u32 length);
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
+ bool send_log_history);
+int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd);
+struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
+ size_t priv_size);
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
+
+#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
+do { \
+ const u32 __rev_id = _rev_id; \
+ snprintf(_str, _str_sz, "%02lu.%02lu.%02lu-%03lu%s%s", \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_BUILD, __rev_id), \
+ _delim, _commit); \
+} while (0)
+
+#define fbnic_mk_fw_ver_str(_rev_id, _str) \
+ fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str))
+
+enum {
+ QSPI_SECTION_CMRT = 0,
+ QSPI_SECTION_CONTROL_FW = 1,
+ QSPI_SECTION_UCODE = 2,
+ QSPI_SECTION_OPTION_ROM = 3,
+ QSPI_SECTION_USER = 4,
+ QSPI_SECTION_INVALID,
+};
+
+#define FW_HEARTBEAT_PERIOD (10 * HZ)
+
+enum {
+ FBNIC_TLV_MSG_ID_HOST_CAP_REQ = 0x10,
+ FBNIC_TLV_MSG_ID_FW_CAP_RESP = 0x11,
+ FBNIC_TLV_MSG_ID_OWNERSHIP_REQ = 0x12,
+ FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
+ FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
+ FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ = 0x18,
+ FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP = 0x19,
+ FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ = 0x20,
+ FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP = 0x21,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29,
+ FBNIC_TLV_MSG_ID_QSFP_READ_REQ = 0x38,
+ FBNIC_TLV_MSG_ID_QSFP_READ_RESP = 0x39,
+ FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
+ FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ = 0x43,
+ FBNIC_TLV_MSG_ID_LOG_MSG_REQ = 0x44,
+ FBNIC_TLV_MSG_ID_LOG_MSG_RESP = 0x45,
+ FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ = 0x46,
+};
+
+#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
+#define FBNIC_FW_CAP_RESP_VERSION_MINOR CSR_GENMASK(23, 16)
+#define FBNIC_FW_CAP_RESP_VERSION_PATCH CSR_GENMASK(15, 8)
+#define FBNIC_FW_CAP_RESP_VERSION_BUILD CSR_GENMASK(7, 0)
+enum {
+ FBNIC_FW_CAP_RESP_VERSION = 0x0,
+ FBNIC_FW_CAP_RESP_BMC_PRESENT = 0x1,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ADDR = 0x2,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY = 0x3,
+ FBNIC_FW_CAP_RESP_STORED_VERSION = 0x4,
+ FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT = 0x5,
+ FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR = 0x6,
+ FBNIC_FW_CAP_RESP_BMC_ALL_MULTI = 0x8,
+ FBNIC_FW_CAP_RESP_FW_STATE = 0x9,
+ FBNIC_FW_CAP_RESP_FW_LINK_SPEED = 0xa,
+ FBNIC_FW_CAP_RESP_FW_LINK_FEC = 0xb,
+ FBNIC_FW_CAP_RESP_STORED_COMMIT_STR = 0xc,
+ FBNIC_FW_CAP_RESP_CMRT_VERSION = 0xd,
+ FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION = 0xe,
+ FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR = 0xf,
+ FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10,
+ FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11,
+ FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12,
+ FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION = 0x15,
+ FBNIC_FW_CAP_RESP_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_LINK_MODE_25CR = 1,
+ FBNIC_FW_LINK_MODE_50CR2 = 2,
+ FBNIC_FW_LINK_MODE_50CR = 3,
+ FBNIC_FW_LINK_MODE_100CR2 = 4,
+};
+
+enum {
+ FBNIC_FW_LINK_FEC_NONE = 1,
+ FBNIC_FW_LINK_FEC_RS = 2,
+ FBNIC_FW_LINK_FEC_BASER = 3,
+};
+
+enum {
+ FBNIC_FW_QSFP_BANK = 0x0,
+ FBNIC_FW_QSFP_PAGE = 0x1,
+ FBNIC_FW_QSFP_OFFSET = 0x2,
+ FBNIC_FW_QSFP_LENGTH = 0x3,
+ FBNIC_FW_QSFP_ERROR = 0x4,
+ FBNIC_FW_QSFP_DATA = 0x5,
+ FBNIC_FW_QSFP_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_TSENE_THERM = 0x0,
+ FBNIC_FW_TSENE_VOLT = 0x1,
+ FBNIC_FW_TSENE_ERROR = 0x2,
+ FBNIC_FW_TSENE_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_OWNERSHIP_FLAG = 0x0,
+ FBNIC_FW_OWNERSHIP_TIME = 0x1,
+ FBNIC_FW_OWNERSHIP_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_HEARTBEAT_UPTIME = 0x0,
+ FBNIC_FW_HEARTBEAT_NUMBER_OF_MESSAGES = 0x1,
+ FBNIC_FW_HEARTBEAT_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_REQ_INFO_CREATE = 0x0,
+ FBNIC_FW_COREDUMP_REQ_INFO_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_INFO_AVAILABLE = 0x0,
+ FBNIC_FW_COREDUMP_INFO_SIZE = 0x1,
+ FBNIC_FW_COREDUMP_INFO_ERROR = 0x2,
+ FBNIC_FW_COREDUMP_INFO_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_READ_OFFSET = 0x0,
+ FBNIC_FW_COREDUMP_READ_LENGTH = 0x1,
+ FBNIC_FW_COREDUMP_READ_DATA = 0x2,
+ FBNIC_FW_COREDUMP_READ_ERROR = 0x3,
+ FBNIC_FW_COREDUMP_READ_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_START_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_START_UPGRADE_SECTION = 0x1,
+ FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2,
+ FBNIC_FW_START_UPGRADE_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_WRITE_CHUNK_OFFSET = 0x0,
+ FBNIC_FW_WRITE_CHUNK_LENGTH = 0x1,
+ FBNIC_FW_WRITE_CHUNK_DATA = 0x2,
+ FBNIC_FW_WRITE_CHUNK_ERROR = 0x3,
+ FBNIC_FW_WRITE_CHUNK_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_FINISH_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_FINISH_UPGRADE_MSG_MAX
+};
+
+enum {
+ FBNIC_SEND_LOGS = 0x0,
+ FBNIC_SEND_LOGS_VERSION = 0x1,
+ FBNIC_SEND_LOGS_HISTORY = 0x2,
+ FBNIC_SEND_LOGS_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_LOG_MSEC = 0x0,
+ FBNIC_FW_LOG_INDEX = 0x1,
+ FBNIC_FW_LOG_MSG = 0x2,
+ FBNIC_FW_LOG_LENGTH = 0x3,
+ FBNIC_FW_LOG_MSEC_ARRAY = 0x4,
+ FBNIC_FW_LOG_INDEX_ARRAY = 0x5,
+ FBNIC_FW_LOG_MSG_ARRAY = 0x6,
+ FBNIC_FW_LOG_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS = 0x0,
+ FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY = 0x1,
+ FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY = 0x2,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR = 0x3,
+ FBNIC_FW_RPC_MAC_SYNC_MSG_MAX
+};
+
+#define FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC 1
+#define FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI 2
+#define FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST 4
+
+#define FW_RPC_MAC_SYNC_UC_ARRAY_SIZE 8
+#define FW_RPC_MAC_SYNC_MC_ARRAY_SIZE 8
+
+#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
new file mode 100644
index 000000000000..85a883dba385
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+
+#include "fbnic.h"
+#include "fbnic_fw.h"
+#include "fbnic_fw_log.h"
+
+void fbnic_fw_log_enable(struct fbnic_dev *fbd, bool send_hist)
+{
+ int err;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_HIST)
+ send_hist = false;
+
+ err = fbnic_fw_xmit_send_logs(fbd, true, send_hist);
+ if (err && err != -EOPNOTSUPP)
+ dev_warn(fbd->dev, "Unable to enable firmware logs: %d\n", err);
+}
+
+void fbnic_fw_log_disable(struct fbnic_dev *fbd)
+{
+ int err;
+
+ err = fbnic_fw_xmit_send_logs(fbd, false, false);
+ if (err && err != -EOPNOTSUPP)
+ dev_warn(fbd->dev, "Unable to disable firmware logs: %d\n",
+ err);
+}
+
+int fbnic_fw_log_init(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_log *log = &fbd->fw_log;
+ void *data;
+
+ if (WARN_ON_ONCE(fbnic_fw_log_ready(fbd)))
+ return -EEXIST;
+
+ data = vmalloc(FBNIC_FW_LOG_SIZE);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&fbd->fw_log.lock);
+ INIT_LIST_HEAD(&log->entries);
+ log->size = FBNIC_FW_LOG_SIZE;
+ log->data_start = data;
+ log->data_end = data + FBNIC_FW_LOG_SIZE;
+
+ fbnic_fw_log_enable(fbd, true);
+
+ return 0;
+}
+
+void fbnic_fw_log_free(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_log *log = &fbd->fw_log;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return;
+
+ fbnic_fw_log_disable(fbd);
+ INIT_LIST_HEAD(&log->entries);
+ log->size = 0;
+ vfree(log->data_start);
+ log->data_start = NULL;
+ log->data_end = NULL;
+}
+
+int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
+ const char *msg)
+{
+ struct fbnic_fw_log_entry *entry, *head, *tail, *next;
+ struct fbnic_fw_log *log = &fbd->fw_log;
+ size_t msg_len = strlen(msg) + 1;
+ unsigned long flags;
+ void *entry_end;
+
+ if (!fbnic_fw_log_ready(fbd)) {
+ dev_err(fbd->dev, "Firmware sent log entry without being requested!\n");
+ return -ENOSPC;
+ }
+
+ spin_lock_irqsave(&log->lock, flags);
+
+ if (list_empty(&log->entries)) {
+ entry = log->data_start;
+ } else {
+ head = list_first_entry(&log->entries, typeof(*head), list);
+ entry_end = head->msg + head->len + 1;
+ entry = PTR_ALIGN(entry_end, 8);
+ }
+
+ entry_end = entry->msg + msg_len + 1;
+
+ /* We've reached the end of the buffer, wrap around */
+ if (entry_end > log->data_end) {
+ entry = log->data_start;
+ entry_end = entry->msg + msg_len + 1;
+ }
+
+ /* Make room for entry by removing from tail. */
+ list_for_each_entry_safe_reverse(tail, next, &log->entries, list) {
+ if (entry <= tail && entry_end > (void *)tail)
+ list_del(&tail->list);
+ else
+ break;
+ }
+
+ entry->index = index;
+ entry->timestamp = timestamp;
+ entry->len = msg_len;
+ strscpy(entry->msg, msg, entry->len);
+ list_add(&entry->list, &log->entries);
+
+ spin_unlock_irqrestore(&log->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
new file mode 100644
index 000000000000..50ec79003108
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_FW_LOG_H_
+#define _FBNIC_FW_LOG_H_
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/* A 512K log buffer was chosen fairly arbitrarily */
+#define FBNIC_FW_LOG_SIZE (512 * 1024) /* bytes */
+
+/* Firmware log output is prepended with log index followed by a timestamp.
+ * The timestamp is similar to Zephyr's format DD:HH:MM:SS.MMM
+ */
+#define FBNIC_FW_LOG_FMT "[%5lld] [%02ld:%02ld:%02ld:%02ld.%03ld] %s\n"
+
+struct fbnic_dev;
+
+struct fbnic_fw_log_entry {
+ struct list_head list;
+ u64 index;
+ u32 timestamp;
+ u16 len;
+ char msg[] __counted_by(len);
+};
+
+struct fbnic_fw_log {
+ void *data_start;
+ void *data_end;
+ size_t size;
+ struct list_head entries;
+ /* Spin lock for accessing or modifying entries */
+ spinlock_t lock;
+};
+
+#define fbnic_fw_log_ready(_fbd) (!!(_fbd)->fw_log.data_start)
+
+void fbnic_fw_log_enable(struct fbnic_dev *fbd, bool send_hist);
+void fbnic_fw_log_disable(struct fbnic_dev *fbd);
+int fbnic_fw_log_init(struct fbnic_dev *fbd);
+void fbnic_fw_log_free(struct fbnic_dev *fbd);
+int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
+ const char *msg);
+#endif /* _FBNIC_FW_LOG_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
new file mode 100644
index 000000000000..8b9b2076beec
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/rtnetlink.h>
+
+#include "fbnic.h"
+
+static void fbnic_hw_stat_rst32(struct fbnic_dev *fbd, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ /* We do not touch the "value" field here.
+ * It gets zeroed out on fbd structure allocation.
+ * After that we want it to grow continuously
+ * through device resets and power state changes.
+ */
+ stat->u.old_reg_value_32 = rd32(fbd, reg);
+}
+
+static void fbnic_hw_stat_rd32(struct fbnic_dev *fbd, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ u32 new_reg_value;
+
+ new_reg_value = rd32(fbd, reg);
+ stat->value += new_reg_value - stat->u.old_reg_value_32;
+ stat->u.old_reg_value_32 = new_reg_value;
+}
+
+u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset)
+{
+ u32 prev_upper, upper, lower, diff;
+
+ prev_upper = rd32(fbd, reg + offset);
+ lower = rd32(fbd, reg);
+ upper = rd32(fbd, reg + offset);
+
+ diff = upper - prev_upper;
+ if (!diff)
+ return ((u64)upper << 32) | lower;
+
+ if (diff > 1)
+ dev_warn_once(fbd->dev,
+ "Stats inconsistent, upper 32b of %#010x updating too quickly\n",
+ reg * 4);
+
+ /* Return only the upper bits as we cannot guarantee
+ * the accuracy of the lower bits. We will add them in
+ * when the counter slows down enough that we can get
+ * a snapshot with both upper values being the same
+ * between reads.
+ */
+ return ((u64)upper << 32);
+}
+
+static void fbnic_hw_stat_rst64(struct fbnic_dev *fbd, u32 reg, s32 offset,
+ struct fbnic_stat_counter *stat)
+{
+ /* Record initial counter values and compute deltas from there to ensure
+ * stats start at 0 after reboot/reset. This avoids exposing absolute
+ * hardware counter values to userspace.
+ */
+ stat->u.old_reg_value_64 = fbnic_stat_rd64(fbd, reg, offset);
+}
+
+static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset,
+ struct fbnic_stat_counter *stat)
+{
+ u64 new_reg_value;
+
+ new_reg_value = fbnic_stat_rd64(fbd, reg, offset);
+ stat->value += new_reg_value - stat->u.old_reg_value_64;
+ stat->u.old_reg_value_64 = new_reg_value;
+}
+
+static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+}
+
+static void fbnic_reset_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
+static void fbnic_get_tti_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+}
+
+static void fbnic_get_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
+static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd,
+ struct fbnic_rpc_stats *rpc)
+{
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_UNKN_ETYPE,
+ &rpc->unkn_etype);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_UNKN_EXT_HDR,
+ &rpc->unkn_ext_hdr);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV4_FRAG, &rpc->ipv4_frag);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV6_FRAG, &rpc->ipv6_frag);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV4_ESP, &rpc->ipv4_esp);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV6_ESP, &rpc->ipv6_esp);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_TCP_OPT_ERR, &rpc->tcp_opt_err);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_OUT_OF_HDR_ERR,
+ &rpc->out_of_hdr_err);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_OVR_SIZE_ERR,
+ &rpc->ovr_size_err);
+}
+
+static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd,
+ struct fbnic_rpc_stats *rpc)
+{
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_UNKN_ETYPE,
+ &rpc->unkn_etype);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_UNKN_EXT_HDR,
+ &rpc->unkn_ext_hdr);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV4_FRAG, &rpc->ipv4_frag);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV6_FRAG, &rpc->ipv6_frag);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV4_ESP, &rpc->ipv4_esp);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV6_ESP, &rpc->ipv6_esp);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_TCP_OPT_ERR, &rpc->tcp_opt_err);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_OUT_OF_HDR_ERR,
+ &rpc->out_of_hdr_err);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_OVR_SIZE_ERR,
+ &rpc->ovr_size_err);
+}
+
+static void fbnic_reset_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.u.old_reg_value_32 = 0;
+}
+
+static void fbnic_reset_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_reset_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+}
+
+static void fbnic_reset_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_reset_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_reset_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_reset_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_fifo_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.value = rd32(fbd, FBNIC_RXB_PBUF_FIFO_LEVEL(i));
+}
+
+static void fbnic_get_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_get_rxb_fifo_stats32(fbd, i, fifo);
+}
+
+static void fbnic_get_rxb_enq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_get_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_get_rxb_enq_stats32(fbd, i, enq);
+}
+
+static void fbnic_get_rxb_deq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+}
+
+static void fbnic_get_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+
+ fbnic_get_rxb_deq_stats32(fbd, i, deq);
+}
+
+static void fbnic_get_rxb_stats32(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats32(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats32(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats32(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_reset_hw_rxq_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ spin_lock(&fbd->hw_stats.lock);
+ fbnic_get_hw_rxq_stats32(fbd, hw_q);
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd,
+ struct fbnic_pcie_stats *pcie)
+{
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_rd_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_rd_dword);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_dword);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_wr_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_wr_dword);
+
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0,
+ 1,
+ &pcie->ob_rd_no_tag);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_cpl_cred);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_np_cred);
+}
+
+static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd,
+ struct fbnic_pcie_stats *pcie)
+{
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_rd_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_rd_dword);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_wr_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_wr_dword);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_dword);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0,
+ 1,
+ &pcie->ob_rd_no_tag);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_cpl_cred);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_np_cred);
+}
+
+static void fbnic_reset_phy_stats(struct fbnic_dev *fbd,
+ struct fbnic_phy_stats *phy_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_fec_stats(fbd, true, &phy_stats->fec);
+ mac->get_pcs_stats(fbd, true, &phy_stats->pcs);
+}
+
+static void fbnic_get_phy_stats32(struct fbnic_dev *fbd,
+ struct fbnic_phy_stats *phy_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_fec_stats(fbd, false, &phy_stats->fec);
+ mac->get_pcs_stats(fbd, false, &phy_stats->pcs);
+}
+
+static void fbnic_reset_hw_mac_stats(struct fbnic_dev *fbd,
+ struct fbnic_mac_stats *mac_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_eth_mac_stats(fbd, true, &mac_stats->eth_mac);
+ mac->get_pause_stats(fbd, true, &mac_stats->pause);
+ mac->get_eth_ctrl_stats(fbd, true, &mac_stats->eth_ctrl);
+ mac->get_rmon_stats(fbd, true, &mac_stats->rmon);
+}
+
+void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats.lock);
+ fbnic_reset_phy_stats(fbd, &fbd->hw_stats.phy);
+ fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti);
+ fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
+ fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb);
+ fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
+ fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats.lock);
+
+ /* Once registered, the only other access to MAC stats is via the
+ * ethtool API which is protected by the rtnl_lock. The call to
+ * fbnic_reset_hw_stats() during PCI recovery is also protected
+ * by the rtnl_lock hence, we don't need the spinlock to access
+ * the MAC stats.
+ */
+ if (fbd->netdev)
+ ASSERT_RTNL();
+ fbnic_reset_hw_mac_stats(fbd, &fbd->hw_stats.mac);
+}
+
+void fbnic_init_hw_stats(struct fbnic_dev *fbd)
+{
+ spin_lock_init(&fbd->hw_stats.lock);
+
+ fbnic_reset_hw_stats(fbd);
+}
+
+static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+{
+ fbnic_get_phy_stats32(fbd, &fbd->hw_stats.phy);
+ fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti);
+ fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
+ fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb);
+ fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q);
+}
+
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats.lock);
+ __fbnic_get_hw_stats32(fbd);
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+void fbnic_get_hw_stats(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats.lock);
+ __fbnic_get_hw_stats32(fbd);
+
+ fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti);
+ fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb);
+ fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats.lock);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
new file mode 100644
index 000000000000..aa3f429a9aed
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_HW_STATS_H_
+#define _FBNIC_HW_STATS_H_
+
+#include <linux/ethtool.h>
+#include <linux/spinlock.h>
+
+#include "fbnic_csr.h"
+
+struct fbnic_stat_counter {
+ u64 value;
+ union {
+ u32 old_reg_value_32;
+ u64 old_reg_value_64;
+ } u;
+ bool reported;
+};
+
+struct fbnic_hw_stat {
+ struct fbnic_stat_counter frames;
+ struct fbnic_stat_counter bytes;
+};
+
+struct fbnic_fec_stats {
+ struct fbnic_stat_counter corrected_blocks, uncorrectable_blocks;
+};
+
+struct fbnic_pcs_stats {
+ struct {
+ struct fbnic_stat_counter lanes[FBNIC_PCS_MAX_LANES];
+ } SymbolErrorDuringCarrier;
+};
+
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_eth_ctrl_stats {
+ struct fbnic_stat_counter MACControlFramesTransmitted;
+ struct fbnic_stat_counter MACControlFramesReceived;
+};
+
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_rmon_stats {
+ struct fbnic_stat_counter undersize_pkts;
+ struct fbnic_stat_counter oversize_pkts;
+ struct fbnic_stat_counter fragments;
+ struct fbnic_stat_counter jabbers;
+
+ struct fbnic_stat_counter hist[ETHTOOL_RMON_HIST_MAX];
+ struct fbnic_stat_counter hist_tx[ETHTOOL_RMON_HIST_MAX];
+};
+
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_pause_stats {
+ struct fbnic_stat_counter tx_pause_frames;
+ struct fbnic_stat_counter rx_pause_frames;
+};
+
+struct fbnic_eth_mac_stats {
+ struct fbnic_stat_counter FramesTransmittedOK;
+ struct fbnic_stat_counter FramesReceivedOK;
+ struct fbnic_stat_counter FrameCheckSequenceErrors;
+ struct fbnic_stat_counter AlignmentErrors;
+ struct fbnic_stat_counter OctetsTransmittedOK;
+ struct fbnic_stat_counter FramesLostDueToIntMACXmitError;
+ struct fbnic_stat_counter OctetsReceivedOK;
+ struct fbnic_stat_counter FramesLostDueToIntMACRcvError;
+ struct fbnic_stat_counter MulticastFramesXmittedOK;
+ struct fbnic_stat_counter BroadcastFramesXmittedOK;
+ struct fbnic_stat_counter MulticastFramesReceivedOK;
+ struct fbnic_stat_counter BroadcastFramesReceivedOK;
+ struct fbnic_stat_counter FrameTooLongErrors;
+};
+
+struct fbnic_phy_stats {
+ struct fbnic_fec_stats fec;
+ struct fbnic_pcs_stats pcs;
+};
+
+struct fbnic_mac_stats {
+ struct fbnic_eth_mac_stats eth_mac;
+ struct fbnic_pause_stats pause;
+ struct fbnic_eth_ctrl_stats eth_ctrl;
+ struct fbnic_rmon_stats rmon;
+};
+
+struct fbnic_tmi_stats {
+ struct fbnic_hw_stat drop;
+ struct fbnic_stat_counter ptp_illegal_req, ptp_good_ts, ptp_bad_ts;
+};
+
+struct fbnic_tti_stats {
+ struct fbnic_hw_stat cm_drop, frame_drop, tbi_drop;
+};
+
+struct fbnic_rpc_stats {
+ struct fbnic_stat_counter unkn_etype, unkn_ext_hdr;
+ struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp;
+ struct fbnic_stat_counter tcp_opt_err, out_of_hdr_err, ovr_size_err;
+};
+
+struct fbnic_rxb_enqueue_stats {
+ struct fbnic_hw_stat drbo;
+ struct fbnic_stat_counter integrity_err, mac_err;
+ struct fbnic_stat_counter parser_err, frm_err;
+};
+
+struct fbnic_rxb_fifo_stats {
+ struct fbnic_hw_stat drop, trunc;
+ struct fbnic_stat_counter trans_drop, trans_ecn;
+ struct fbnic_stat_counter level;
+};
+
+struct fbnic_rxb_dequeue_stats {
+ struct fbnic_hw_stat intf, pbuf;
+};
+
+struct fbnic_rxb_stats {
+ struct fbnic_rxb_enqueue_stats enq[FBNIC_RXB_ENQUEUE_INDICES];
+ struct fbnic_rxb_fifo_stats fifo[FBNIC_RXB_FIFO_INDICES];
+ struct fbnic_rxb_dequeue_stats deq[FBNIC_RXB_DEQUEUE_INDICES];
+};
+
+struct fbnic_hw_q_stats {
+ struct fbnic_stat_counter rde_pkt_err;
+ struct fbnic_stat_counter rde_pkt_cq_drop;
+ struct fbnic_stat_counter rde_pkt_bdq_drop;
+};
+
+struct fbnic_pcie_stats {
+ struct fbnic_stat_counter ob_rd_tlp, ob_rd_dword;
+ struct fbnic_stat_counter ob_wr_tlp, ob_wr_dword;
+ struct fbnic_stat_counter ob_cpl_tlp, ob_cpl_dword;
+
+ struct fbnic_stat_counter ob_rd_no_tag;
+ struct fbnic_stat_counter ob_rd_no_cpl_cred;
+ struct fbnic_stat_counter ob_rd_no_np_cred;
+};
+
+struct fbnic_hw_stats {
+ struct fbnic_phy_stats phy;
+ struct fbnic_mac_stats mac;
+ struct fbnic_tmi_stats tmi;
+ struct fbnic_tti_stats tti;
+ struct fbnic_rpc_stats rpc;
+ struct fbnic_rxb_stats rxb;
+ struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];
+ struct fbnic_pcie_stats pcie;
+
+ /* Lock protecting the access to hw stats */
+ spinlock_t lock;
+};
+
+u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
+
+void fbnic_reset_hw_stats(struct fbnic_dev *fbd);
+void fbnic_init_hw_stats(struct fbnic_dev *fbd);
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q);
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd);
+void fbnic_get_hw_stats(struct fbnic_dev *fbd);
+
+#endif /* _FBNIC_HW_STATS_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
new file mode 100644
index 000000000000..def8598aceec
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/hwmon.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+
+static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type)
+{
+ if (type == hwmon_temp)
+ return FBNIC_SENSOR_TEMP;
+ if (type == hwmon_in)
+ return FBNIC_SENSOR_VOLTAGE;
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t fbnic_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_temp && attr == hwmon_temp_input)
+ return 0444;
+ if (type == hwmon_in && attr == hwmon_in_input)
+ return 0444;
+
+ return 0;
+}
+
+static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ const struct fbnic_mac *mac = fbd->mac;
+ int id;
+
+ id = fbnic_hwmon_sensor_id(type);
+ return id < 0 ? id : mac->get_sensor(fbd, id, val);
+}
+
+static const struct hwmon_ops fbnic_hwmon_ops = {
+ .is_visible = fbnic_hwmon_is_visible,
+ .read = fbnic_hwmon_read,
+};
+
+static const struct hwmon_channel_info *fbnic_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info fbnic_chip_info = {
+ .ops = &fbnic_hwmon_ops,
+ .info = fbnic_hwmon_info,
+};
+
+void fbnic_hwmon_register(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return;
+
+ fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic",
+ fbd, &fbnic_chip_info,
+ NULL);
+ if (IS_ERR(fbd->hwmon)) {
+ dev_notice(fbd->dev,
+ "Failed to register hwmon device %pe\n",
+ fbd->hwmon);
+ fbd->hwmon = NULL;
+ }
+}
+
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon)
+ return;
+
+ hwmon_device_unregister(fbd->hwmon);
+ fbd->hwmon = NULL;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
new file mode 100644
index 000000000000..02e8b0b257fe
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)data;
+
+ fbnic_mbx_poll(fbd);
+
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return IRQ_HANDLED;
+}
+
+static int __fbnic_fw_enable_mbx(struct fbnic_dev *fbd, int vector)
+{
+ int err;
+
+ /* Initialize mailbox and attempt to poll it into ready state */
+ fbnic_mbx_init(fbd);
+ err = fbnic_mbx_poll_tx_ready(fbd);
+ if (err) {
+ dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ return err;
+ }
+
+ /* Enable interrupt and unmask the vector */
+ enable_irq(vector);
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
+
+/**
+ * fbnic_fw_request_mbx - Configure and initialize Firmware Mailbox
+ * @fbd: Pointer to device to initialize
+ *
+ * This function will allocate the IRQ and then reinitialize the mailbox
+ * starting communication between the host and firmware.
+ *
+ * Return: non-zero on failure.
+ **/
+int fbnic_fw_request_mbx(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int vector, err;
+
+ WARN_ON(fbd->fw_msix_vector);
+
+ vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
+ if (vector < 0)
+ return vector;
+
+ /* Request the IRQ for FW Mailbox vector. */
+ err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ dev_name(fbd->dev), fbd);
+ if (err)
+ return err;
+
+ /* Initialize mailbox and attempt to poll it into ready state */
+ err = __fbnic_fw_enable_mbx(fbd, vector);
+ if (err)
+ free_irq(vector, fbd);
+
+ fbd->fw_msix_vector = vector;
+
+ return err;
+}
+
+/**
+ * fbnic_fw_disable_mbx - Temporarily place mailbox in standby state
+ * @fbd: Pointer to device
+ *
+ * Shutdown the mailbox by notifying the firmware to stop sending us logs, mask
+ * and synchronize the IRQ, and then clean up the rings.
+ **/
+static void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
+{
+ /* Disable interrupt and synchronize the IRQ */
+ disable_irq(fbd->fw_msix_vector);
+
+ /* Mask the vector */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ /* Make sure disabling logs message is sent, must be done here to
+ * avoid risk of completing without a running interrupt.
+ */
+ fbnic_mbx_flush_tx(fbd);
+ fbnic_mbx_clean(fbd);
+}
+
+/**
+ * fbnic_fw_free_mbx - Disable mailbox and place it in standby state
+ * @fbd: Pointer to device to disable
+ *
+ * This function will disable the mailbox interrupt, free any messages still
+ * in the mailbox and place it into a disabled state. The firmware is
+ * expected to see the update and assume that the host is in the reset state.
+ **/
+void fbnic_fw_free_mbx(struct fbnic_dev *fbd)
+{
+ /* Vector has already been freed */
+ if (!fbd->fw_msix_vector)
+ return;
+
+ fbnic_fw_disable_mbx(fbd);
+
+ /* Free the vector */
+ free_irq(fbd->fw_msix_vector, fbd);
+ fbd->fw_msix_vector = 0;
+}
+
+static irqreturn_t fbnic_mac_msix_intr(int __always_unused irq, void *data)
+{
+ struct fbnic_dev *fbd = data;
+ struct fbnic_net *fbn;
+
+ if (fbd->mac->get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0),
+ 1u << FBNIC_PCS_MSIX_ENTRY);
+ return IRQ_HANDLED;
+ }
+
+ fbn = netdev_priv(fbd->netdev);
+
+ /* Record link down events */
+ if (!fbd->mac->get_link(fbd, fbn->aui, fbn->fec))
+ phylink_pcs_change(fbn->pcs, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * fbnic_mac_request_irq - Configure the MAC to enable it to advertise link
+ * @fbd: Pointer to device to initialize
+ *
+ * This function provides basic bringup for the MAC/PHY IRQ. For now the IRQ
+ * will remain disabled until we start the MAC/PCS/PHY logic via phylink.
+ *
+ * Return: non-zero on failure.
+ **/
+int fbnic_mac_request_irq(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int vector, err;
+
+ WARN_ON(fbd->mac_msix_vector);
+
+ vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
+ if (vector < 0)
+ return vector;
+
+ /* Request the IRQ for PCS link vector.
+ * Map PCS cause to it, and unmask it
+ */
+ err = request_irq(vector, &fbnic_mac_msix_intr, 0,
+ fbd->netdev->name, fbd);
+ if (err)
+ return err;
+
+ /* Map and enable interrupt, unmask vector after link is configured */
+ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
+ FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
+
+ fbd->mac_msix_vector = vector;
+
+ return 0;
+}
+
+/**
+ * fbnic_mac_free_irq - Teardown the MAC IRQ to prepare for stopping
+ * @fbd: Pointer to device that is stopping
+ *
+ * This function undoes the work done in fbnic_mac_request_irq and prepares
+ * the device to no longer receive traffic on the host interface.
+ **/
+void fbnic_mac_free_irq(struct fbnic_dev *fbd)
+{
+ /* Vector has already been freed */
+ if (!fbd->mac_msix_vector)
+ return;
+
+ /* Disable interrupt */
+ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
+ FBNIC_PCS_MSIX_ENTRY);
+ fbnic_wrfl(fbd);
+
+ /* Synchronize IRQ to prevent race that would unmask vector */
+ synchronize_irq(fbd->mac_msix_vector);
+
+ /* Mask the vector */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
+
+ /* Free the vector */
+ free_irq(fbd->mac_msix_vector, fbd);
+ fbd->mac_msix_vector = 0;
+}
+
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ synchronize_irq(irq);
+}
+
+int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
+ unsigned long flags, const char *name, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return irq;
+
+ return request_irq(irq, handler, flags, name, data);
+}
+
+void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fbd->napi_irq); i++)
+ snprintf(fbd->napi_irq[i].name,
+ sizeof(fbd->napi_irq[i].name),
+ "%s-TxRx-%u", fbd->netdev->name, i);
+}
+
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+ int err;
+
+ if (!fbd->napi_irq[i].users) {
+ err = fbnic_request_irq(fbd, nv->v_idx,
+ fbnic_msix_clean_rings, 0,
+ fbd->napi_irq[i].name,
+ &fbn->napi[i]);
+ if (err)
+ return err;
+ }
+
+ fbd->napi_irq[i].users++;
+ return 0;
+}
+
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+
+ if (--fbd->napi_irq[i].users)
+ return;
+
+ fbnic_free_irq(fbd, nv->v_idx, &fbn->napi[i]);
+}
+
+void fbnic_free_irqs(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+
+ fbd->num_irqs = 0;
+
+ pci_free_irq_vectors(pdev);
+}
+
+int fbnic_alloc_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int wanted_irqs = FBNIC_NON_NAPI_VECTORS;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int num_irqs;
+
+ wanted_irqs += min_t(unsigned int, num_online_cpus(), FBNIC_MAX_RXQS);
+ num_irqs = pci_alloc_irq_vectors(pdev, FBNIC_NON_NAPI_VECTORS + 1,
+ wanted_irqs, PCI_IRQ_MSIX);
+ if (num_irqs < 0) {
+ dev_err(fbd->dev, "Failed to allocate MSI-X entries\n");
+ return num_irqs;
+ }
+
+ if (num_irqs < wanted_irqs)
+ dev_warn(fbd->dev, "Allocated %d IRQs, expected %d\n",
+ num_irqs, wanted_irqs);
+
+ fbd->num_irqs = num_irqs;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
new file mode 100644
index 000000000000..fc7abea4ef5b
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <net/tcp.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+#include "fbnic_netdev.h"
+
+static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset,
+ unsigned int cls, unsigned int readrq)
+{
+ u32 val = rd32(fbd, offset);
+
+ /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can
+ * use them when setting either the TDE_CTF or RNI_RBP registers.
+ */
+ val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB;
+
+ val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) |
+ FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls);
+
+ wr32(fbd, offset, val);
+}
+
+static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset,
+ unsigned int cls, unsigned int mps)
+{
+ u32 val = rd32(fbd, offset);
+
+ /* Currently all MPS masks are identical so just use the first one */
+ val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS);
+
+ val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) |
+ FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls);
+
+ wr32(fbd, offset, val);
+}
+
+static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
+{
+ bool override_1k = false;
+ int readrq, mps, cls;
+
+ /* All of the values are based on being a power of 2 starting
+ * with 64 == 0. Therefore we can either divide by 64 in the
+ * case of constants, or just subtract 6 from the log2 of the value
+ * in order to get the value we will be programming into the
+ * registers.
+ */
+ readrq = ilog2(fbd->readrq) - 6;
+ if (readrq > 3)
+ override_1k = true;
+ readrq = clamp(readrq, 0, 3);
+
+ mps = ilog2(fbd->mps) - 6;
+ mps = clamp(mps, 0, 3);
+
+ cls = ilog2(L1_CACHE_BYTES) - 6;
+ cls = clamp(cls, 0, 3);
+
+ /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */
+ fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps);
+
+ /* Configure QM TNI TDE:
+ * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of
+ * buffer capacity to descriptors.
+ * - Max outstanding transactions to 128
+ */
+ wr32(fbd, FBNIC_QM_TNI_TDE_CTL,
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls));
+
+ fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
+}
+
+static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
+{
+ u64 default_meta = FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, ETH_HLEN) |
+ FBNIC_TWD_FLAG_REQ_COMPLETION;
+ u32 clock_freq;
+
+ /* Configure default TWQ Metadata descriptor */
+ wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_L,
+ lower_32_bits(default_meta));
+ wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_H,
+ upper_32_bits(default_meta));
+
+ /* Configure TSO behavior */
+ wr32(fbd, FBNIC_QM_TQS_CTL0,
+ FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK,
+ FBNIC_QM_TQS_CTL0_LSO_TS_LAST) |
+ FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH,
+ FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN));
+
+ /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */
+ wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX);
+
+ /* Configure MTU
+ * Due to known HW issue we cannot set the MTU to within 16 octets
+ * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to
+ * MTU + 1.
+ */
+ wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1);
+ wr32(fbd, FBNIC_QM_TQS_MTU_CTL1,
+ FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK,
+ FBNIC_MAX_JUMBO_FRAME_SIZE + 1));
+
+ clock_freq = FBNIC_CLOCK_FREQ;
+
+ /* Be aggressive on the timings. We will have the interrupt
+ * threshold timer tick once every 1 usec and coalesce writes for
+ * up to 80 usecs.
+ */
+ wr32(fbd, FBNIC_QM_TCQ_CTL0,
+ FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES,
+ clock_freq / 1000000) |
+ FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT,
+ clock_freq / 12500));
+
+ /* We will have the interrupt threshold timer tick once every
+ * 1 usec and coalesce writes for up to 2 usecs.
+ */
+ wr32(fbd, FBNIC_QM_RCQ_CTL0,
+ FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES,
+ clock_freq / 1000000) |
+ FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT,
+ clock_freq / 500000));
+
+ /* Configure spacer control to 64 beats. */
+ wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG,
+ FBNIC_FAB_AXI4_AR_SPACER_MASK |
+ FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2));
+}
+
+#define FBNIC_DROP_EN_MASK 0x7d
+#define FBNIC_PAUSE_EN_MASK 0x14
+#define FBNIC_ECN_EN_MASK 0x10
+
+struct fbnic_fifo_config {
+ unsigned int addr;
+ unsigned int size;
+};
+
+/* Rx FIFO Configuration
+ * The table consists of 8 entries, of which only 4 are currently used
+ * The starting addr is in units of 64B and the size is in 2KB units
+ * Below is the human readable version of the table defined below:
+ * Function Addr Size
+ * ----------------------------------
+ * Network to Host/BMC 384K 64K
+ * Unused
+ * Unused
+ * Network to BMC 448K 32K
+ * Network to Host 0 384K
+ * Unused
+ * BMC to Host 480K 32K
+ * Unused
+ */
+static const struct fbnic_fifo_config fifo_config[] = {
+ { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */
+ { }, /* Unused */
+ { }, /* Unused */
+ { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */
+ { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */
+ { }, /* Unused */
+ { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */
+ { } /* Unused */
+};
+
+static void fbnic_mac_init_rxb(struct fbnic_dev *fbd)
+{
+ bool rx_enable;
+ int i;
+
+ rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) &
+ FBNIC_RPC_RMI_CONFIG_ENABLE);
+
+ for (i = 0; i < 8; i++) {
+ unsigned int size = fifo_config[i].size;
+
+ /* If we are coming up on a system that already has the
+ * Rx data path enabled we don't need to reconfigure the
+ * FIFOs. Instead we can check to verify the values are
+ * large enough to meet our needs, and use the values to
+ * populate the flow control, ECN, and drop thresholds.
+ */
+ if (rx_enable) {
+ size = FIELD_GET(FBNIC_RXB_PBUF_SIZE,
+ rd32(fbd, FBNIC_RXB_PBUF_CFG(i)));
+ if (size < fifo_config[i].size)
+ dev_warn(fbd->dev,
+ "fifo%d size of %d smaller than expected value of %d\n",
+ i, size << 11,
+ fifo_config[i].size << 11);
+ } else {
+ /* Program RXB Cuthrough */
+ wr32(fbd, FBNIC_RXB_CT_SIZE(i),
+ FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) |
+ FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2));
+
+ /* The granularity for the packet buffer size is 2KB
+ * granularity while the packet buffer base address is
+ * only 64B granularity
+ */
+ wr32(fbd, FBNIC_RXB_PBUF_CFG(i),
+ FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR,
+ fifo_config[i].addr) |
+ FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size));
+
+ /* The granularity for the credits is 64B. This is
+ * based on RXB_PBUF_SIZE * 32 + 4.
+ */
+ wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i),
+ FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK,
+ size ? size * 32 + 4 : 0));
+ }
+
+ if (!size)
+ continue;
+
+ /* Pause is size of FIFO with 56KB skid to start/stop */
+ wr32(fbd, FBNIC_RXB_PAUSE_THLD(i),
+ !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON,
+ size * 32 - 0x380) |
+ FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380));
+
+ /* Enable Drop when only one packet is left in the FIFO */
+ wr32(fbd, FBNIC_RXB_DROP_THLD(i),
+ !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_DROP_THLD_ON,
+ size * 32 -
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64) |
+ FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF,
+ size * 32 -
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64));
+
+ /* Enable ECN bit when 1/4 of RXB is filled with at least
+ * 1 room for one full jumbo frame before setting ECN
+ */
+ wr32(fbd, FBNIC_RXB_ECN_THLD(i),
+ !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_ECN_THLD_ON,
+ max_t(unsigned int,
+ size * 32 / 4,
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) |
+ FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF,
+ max_t(unsigned int,
+ size * 32 / 4,
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64)));
+ }
+
+ /* For now only enable drop and ECN. We need to add driver/kernel
+ * interfaces for configuring pause.
+ */
+ wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL,
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE,
+ FBNIC_DROP_EN_MASK) |
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE,
+ FBNIC_ECN_EN_MASK));
+
+ /* Program INTF credits */
+ wr32(fbd, FBNIC_RXB_INTF_CREDIT,
+ FBNIC_RXB_INTF_CREDIT_MASK0 |
+ FBNIC_RXB_INTF_CREDIT_MASK1 |
+ FBNIC_RXB_INTF_CREDIT_MASK2 |
+ FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8));
+
+ /* Configure calendar slots.
+ * Rx: 0 - 62 RDE 1st, BMC 2nd
+ * 63 BMC 1st, RDE 2nd
+ */
+ for (i = 0; i < 16; i++) {
+ u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
+
+ wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val);
+ }
+
+ /* Split the credits for the DRR up as follows:
+ * Quantum0: 8000 Network to Host
+ * Quantum1: 0 Not used
+ * Quantum2: 80 BMC to Host
+ * Quantum3: 0 Not used
+ * Quantum4: 8000 Multicast to Host and BMC
+ */
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) |
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f));
+
+ /* Program RXB FCS Endian register */
+ wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0);
+}
+
+static void fbnic_mac_init_txb(struct fbnic_dev *fbd)
+{
+ int i;
+
+ wr32(fbd, FBNIC_TCE_TXB_CTRL, 0);
+
+ /* Configure Tx QM Credits */
+ wr32(fbd, FBNIC_QM_TQS_CTL1,
+ FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) |
+ FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20));
+
+ /* Initialize internal Tx queues */
+ wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000));
+ wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400));
+ wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600));
+
+ wr32(fbd, FBNIC_TCE_LSO_CTRL,
+ FBNIC_TCE_LSO_CTRL_IPID_MODE_INC |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH |
+ TCPHDR_FIN) |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH |
+ TCPHDR_CWR |
+ TCPHDR_FIN) |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR));
+ wr32(fbd, FBNIC_TCE_CSO_CTRL, 0);
+
+ wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ,
+ FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX,
+ FBNIC_MAX_JUMBO_FRAME_SIZE) |
+ FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX,
+ FBNIC_MAX_JUMBO_FRAME_SIZE));
+ wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ,
+ FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI,
+ FBNIC_MAX_JUMBO_FRAME_SIZE));
+
+ /* Configure calendar slots.
+ * Tx: 0 - 62 TMI 1st, BMC 2nd
+ * 63 BMC 1st, TMI 2nd
+ */
+ for (i = 0; i < 16; i++) {
+ u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
+
+ wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val);
+ }
+
+ /* Configure DWRR */
+ wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) |
+ FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04));
+ wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0);
+ wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) |
+ FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82));
+ wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0);
+ wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) |
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20));
+ wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT,
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03));
+
+ /* Configure SOP protocol protection */
+ wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL,
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) |
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) |
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c));
+
+ /* Conservative configuration on MAC interface Start of Packet
+ * protection FIFO. This sets the minimum depth of the FIFO before
+ * we start sending packets to the MAC measured in 64B units and
+ * up to 160 entries deep.
+ *
+ * For the ASIC the clock is fast enough that we will likely fill
+ * the SOP FIFO before the MAC can drain it. So just use a minimum
+ * value of 8.
+ */
+ wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8);
+
+ wrfl(fbd);
+ wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE |
+ FBNIC_TCE_TXB_CTRL_LOAD);
+}
+
+static void fbnic_mac_init_regs(struct fbnic_dev *fbd)
+{
+ fbnic_mac_init_axi(fbd);
+ fbnic_mac_init_qm(fbd);
+ fbnic_mac_init_rxb(fbd);
+ fbnic_mac_init_txb(fbd);
+}
+
+static void __fbnic_mac_stat_rd64(struct fbnic_dev *fbd, bool reset, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ u64 new_reg_value;
+
+ new_reg_value = fbnic_stat_rd64(fbd, reg, 1);
+ if (!reset)
+ stat->value += new_reg_value - stat->u.old_reg_value_64;
+ stat->u.old_reg_value_64 = new_reg_value;
+ stat->reported = true;
+}
+
+#define fbnic_mac_stat_rd64(fbd, reset, __stat, __CSR) \
+ __fbnic_mac_stat_rd64(fbd, reset, FBNIC_##__CSR##_L, &(__stat))
+
+static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
+{
+ u32 rxb_pause_ctrl;
+
+ /* Enable generation of pause frames if enabled */
+ rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL);
+ rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE;
+ if (tx_pause)
+ rxb_pause_ctrl |=
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE,
+ FBNIC_PAUSE_EN_MASK);
+ wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl);
+}
+
+static int fbnic_mac_get_link_event(struct fbnic_dev *fbd)
+{
+ u32 intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
+
+ if (intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
+ return FBNIC_LINK_EVENT_DOWN;
+
+ return (intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
+ FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE;
+}
+
+static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
+ bool tx_pause, bool rx_pause)
+{
+ /* Enable MAC Promiscuous mode and Tx padding */
+ u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN |
+ FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN;
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+
+ /* Disable pause frames if not enabled */
+ if (!tx_pause)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS;
+ if (!rx_pause)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS;
+
+ /* Disable fault handling if no FEC is requested */
+ if (fbn->fec == FBNIC_FEC_OFF)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS;
+
+ return command_config;
+}
+
+static bool fbnic_mac_get_link_status(struct fbnic_dev *fbd, u8 aui, u8 fec)
+{
+ u32 pcs_status, lane_mask = ~0;
+
+ pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0);
+ if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK))
+ return false;
+
+ /* Define the expected lane mask for the status bits we need to check */
+ switch (aui) {
+ case FBNIC_AUI_100GAUI2:
+ lane_mask = 0xf;
+ break;
+ case FBNIC_AUI_50GAUI1:
+ lane_mask = 3;
+ break;
+ case FBNIC_AUI_LAUI2:
+ switch (fec) {
+ case FBNIC_FEC_OFF:
+ lane_mask = 0x63;
+ break;
+ case FBNIC_FEC_RS:
+ lane_mask = 5;
+ break;
+ case FBNIC_FEC_BASER:
+ lane_mask = 0xf;
+ break;
+ }
+ break;
+ case FBNIC_AUI_25GAUI:
+ lane_mask = 1;
+ break;
+ }
+
+ /* Use an XOR to remove the bits we expect to see set */
+ switch (fec) {
+ case FBNIC_FEC_OFF:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
+ pcs_status);
+ break;
+ case FBNIC_FEC_RS:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK,
+ pcs_status);
+ break;
+ case FBNIC_FEC_BASER:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK,
+ rd32(fbd, FBNIC_SIG_PCS_OUT1));
+ break;
+ }
+
+ /* If all lanes cancelled then we have a lock on all lanes */
+ return !lane_mask;
+}
+
+static bool fbnic_pmd_update_state(struct fbnic_dev *fbd, bool signal_detect)
+{
+ /* Delay link up for 4 seconds to allow for link training.
+ * The state transitions for this are as follows:
+ *
+ * All states have the following two transitions in common:
+ * Loss of signal -> FBNIC_PMD_INITIALIZE
+ * The condition handled below (!signal)
+ * Reconfiguration -> FBNIC_PMD_INITIALIZE
+ * Occurs when mac_prepare starts a PHY reconfig
+ * FBNIC_PMD_TRAINING:
+ * signal still detected && 4s have passed -> Report link up
+ * When link is brought up in link_up -> FBNIC_PMD_SEND_DATA
+ * FBNIC_PMD_INITIALIZE:
+ * signal detected -> FBNIC_PMD_TRAINING
+ */
+ if (!signal_detect) {
+ fbd->pmd_state = FBNIC_PMD_INITIALIZE;
+ return false;
+ }
+
+ switch (fbd->pmd_state) {
+ case FBNIC_PMD_TRAINING:
+ return time_before(fbd->end_of_pmd_training, jiffies);
+ case FBNIC_PMD_LINK_READY:
+ case FBNIC_PMD_SEND_DATA:
+ return true;
+ }
+
+ fbd->end_of_pmd_training = jiffies + 4 * HZ;
+
+ /* Ensure end_of_training is visible before the state change */
+ smp_wmb();
+
+ fbd->pmd_state = FBNIC_PMD_TRAINING;
+
+ return false;
+}
+
+static bool fbnic_mac_get_link(struct fbnic_dev *fbd, u8 aui, u8 fec)
+{
+ bool link;
+
+ /* Flush status bits to clear possible stale data,
+ * bits should reset themselves back to 1 if link is truly up
+ */
+ wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK |
+ FBNIC_SIG_PCS_OUT0_BLOCK_LOCK |
+ FBNIC_SIG_PCS_OUT0_AMPS_LOCK);
+ wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK);
+ wrfl(fbd);
+
+ /* Clear interrupt state due to recent changes. */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS,
+ FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP);
+
+ link = fbnic_mac_get_link_status(fbd, aui, fec);
+ link = fbnic_pmd_update_state(fbd, link);
+
+ /* Enable interrupt to only capture changes in link state */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK,
+ ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP);
+ wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY);
+
+ return link;
+}
+
+void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec)
+{
+ /* Retrieve default speed from FW */
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_MODE_25CR:
+ *aui = FBNIC_AUI_25GAUI;
+ break;
+ case FBNIC_FW_LINK_MODE_50CR2:
+ *aui = FBNIC_AUI_LAUI2;
+ break;
+ case FBNIC_FW_LINK_MODE_50CR:
+ *aui = FBNIC_AUI_50GAUI1;
+ *fec = FBNIC_FEC_RS;
+ return;
+ case FBNIC_FW_LINK_MODE_100CR2:
+ *aui = FBNIC_AUI_100GAUI2;
+ *fec = FBNIC_FEC_RS;
+ return;
+ default:
+ *aui = FBNIC_AUI_UNKNOWN;
+ return;
+ }
+
+ /* Update FEC first to reflect FW current mode */
+ switch (fbd->fw_cap.link_fec) {
+ case FBNIC_FW_LINK_FEC_NONE:
+ *fec = FBNIC_FEC_OFF;
+ break;
+ case FBNIC_FW_LINK_FEC_RS:
+ default:
+ *fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_FEC_BASER:
+ *fec = FBNIC_FEC_BASER;
+ break;
+ }
+}
+
+static void fbnic_mac_prepare(struct fbnic_dev *fbd, u8 aui, u8 fec)
+{
+ /* Mask and clear the PCS interrupt, will be enabled by link handler */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+
+ /* If we don't have link tear it all down and start over */
+ if (!fbnic_mac_get_link_status(fbd, aui, fec))
+ fbd->pmd_state = FBNIC_PMD_INITIALIZE;
+}
+
+static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd)
+{
+ u32 cmd_cfg, mac_ctrl;
+
+ cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false);
+ mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
+
+ mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_RX_CLK;
+
+ wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
+ wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
+}
+
+static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
+ bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg, mac_ctrl;
+
+ fbnic_mac_tx_pause_config(fbd, tx_pause);
+
+ cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause);
+ mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
+
+ mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_RX_CLK);
+ cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA |
+ FBNIC_MAC_COMMAND_CONFIG_TX_ENA;
+
+ wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
+ wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
+}
+
+static void
+fbnic_pcs_rsfec_stat_rd32(struct fbnic_dev *fbd, u32 reg, bool reset,
+ struct fbnic_stat_counter *stat)
+{
+ u32 pcs_rsfec_stat;
+
+ /* The PCS/RFSEC registers are only 16b wide each. So what we will
+ * have after the 64b read is 0x0000xxxx0000xxxx. To make it usable
+ * as a full stat we will shift the upper bits into the lower set of
+ * 0s and then mask off the math at 32b.
+ *
+ * Read ordering must be lower reg followed by upper reg.
+ */
+ pcs_rsfec_stat = rd32(fbd, reg) & 0xffff;
+ pcs_rsfec_stat |= rd32(fbd, reg + 1) << 16;
+
+ /* RFSEC registers clear themselves upon being read so there is no
+ * need to store the old_reg_value.
+ */
+ if (!reset)
+ stat->value += pcs_rsfec_stat;
+}
+
+static void
+fbnic_mac_get_fec_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_fec_stats *s)
+{
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_CCW_LO(0), reset,
+ &s->corrected_blocks);
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_NCCW_LO(0), reset,
+ &s->uncorrectable_blocks);
+}
+
+static void
+fbnic_mac_get_pcs_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pcs_stats *s)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_PCS_SYMBLERR_LO(i), reset,
+ &s->SymbolErrorDuringCarrier.lanes[i]);
+}
+
+static void
+fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_mac_stats *mac_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsReceivedOK,
+ MAC_STAT_RX_BYTE_COUNT);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->AlignmentErrors,
+ MAC_STAT_RX_ALIGN_ERROR);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameTooLongErrors,
+ MAC_STAT_RX_TOOLONG);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesReceivedOK,
+ MAC_STAT_RX_RECEIVED_OK);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameCheckSequenceErrors,
+ MAC_STAT_RX_PACKET_BAD_FCS);
+ fbnic_mac_stat_rd64(fbd, reset,
+ mac_stats->FramesLostDueToIntMACRcvError,
+ MAC_STAT_RX_IFINERRORS);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesReceivedOK,
+ MAC_STAT_RX_MULTICAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesReceivedOK,
+ MAC_STAT_RX_BROADCAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsTransmittedOK,
+ MAC_STAT_TX_BYTE_COUNT);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesTransmittedOK,
+ MAC_STAT_TX_TRANSMITTED_OK);
+ fbnic_mac_stat_rd64(fbd, reset,
+ mac_stats->FramesLostDueToIntMACXmitError,
+ MAC_STAT_TX_IFOUTERRORS);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesXmittedOK,
+ MAC_STAT_TX_MULTICAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesXmittedOK,
+ MAC_STAT_TX_BROADCAST);
+}
+
+static void
+fbnic_mac_get_pause_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pause_stats *pause_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, pause_stats->tx_pause_frames,
+ MAC_STAT_TX_XOFF_STB);
+ fbnic_mac_stat_rd64(fbd, reset, pause_stats->rx_pause_frames,
+ MAC_STAT_RX_XOFF_STB);
+}
+
+static void
+fbnic_mac_get_eth_ctrl_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_ctrl_stats *ctrl_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesReceived,
+ MAC_STAT_RX_CONTROL_FRAMES);
+ fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesTransmitted,
+ MAC_STAT_TX_CONTROL_FRAMES);
+}
+
+static void
+fbnic_mac_get_rmon_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_rmon_stats *rmon_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->undersize_pkts,
+ MAC_STAT_RX_UNDERSIZE);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->oversize_pkts,
+ MAC_STAT_RX_OVERSIZE);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->fragments,
+ MAC_STAT_RX_FRAGMENT);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->jabbers,
+ MAC_STAT_RX_JABBER);
+
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[0],
+ MAC_STAT_RX_PACKET_64_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[1],
+ MAC_STAT_RX_PACKET_65_127_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[2],
+ MAC_STAT_RX_PACKET_128_255_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[3],
+ MAC_STAT_RX_PACKET_256_511_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[4],
+ MAC_STAT_RX_PACKET_512_1023_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[5],
+ MAC_STAT_RX_PACKET_1024_1518_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[6],
+ RPC_STAT_RX_PACKET_1519_2047_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[7],
+ RPC_STAT_RX_PACKET_2048_4095_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[8],
+ RPC_STAT_RX_PACKET_4096_8191_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[9],
+ RPC_STAT_RX_PACKET_8192_9216_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[10],
+ RPC_STAT_RX_PACKET_9217_MAX_BYTES);
+
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[0],
+ MAC_STAT_TX_PACKET_64_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[1],
+ MAC_STAT_TX_PACKET_65_127_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[2],
+ MAC_STAT_TX_PACKET_128_255_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[3],
+ MAC_STAT_TX_PACKET_256_511_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[4],
+ MAC_STAT_TX_PACKET_512_1023_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[5],
+ MAC_STAT_TX_PACKET_1024_1518_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[6],
+ TMI_STAT_TX_PACKET_1519_2047_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[7],
+ TMI_STAT_TX_PACKET_2048_4095_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[8],
+ TMI_STAT_TX_PACKET_4096_8191_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[9],
+ TMI_STAT_TX_PACKET_8192_9216_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[10],
+ TMI_STAT_TX_PACKET_9217_MAX_BYTES);
+}
+
+static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
+ long *val)
+{
+ struct fbnic_fw_completion *fw_cmpl;
+ int err = 0, retries = 5;
+ s32 *sensor;
+
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ switch (id) {
+ case FBNIC_SENSOR_TEMP:
+ sensor = &fw_cmpl->u.tsene.millidegrees;
+ break;
+ case FBNIC_SENSOR_VOLTAGE:
+ sensor = &fw_cmpl->u.tsene.millivolts;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit_free;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_free;
+ }
+
+ /* Allow 2 seconds for reply, resend and try up to 5 times */
+ while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ retries--;
+
+ if (retries == 0) {
+ dev_err(fbd->dev,
+ "Timed out waiting for TSENE read\n");
+ err = -ETIMEDOUT;
+ goto exit_cleanup;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_cleanup;
+ }
+ }
+
+ /* Handle error returned by firmware */
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ dev_err(fbd->dev, "%s: Firmware returned error %d\n",
+ __func__, err);
+ goto exit_cleanup;
+ }
+
+ *val = *sensor;
+exit_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err;
+}
+
+static const struct fbnic_mac fbnic_mac_asic = {
+ .init_regs = fbnic_mac_init_regs,
+ .get_link = fbnic_mac_get_link,
+ .get_link_event = fbnic_mac_get_link_event,
+ .prepare = fbnic_mac_prepare,
+ .get_fec_stats = fbnic_mac_get_fec_stats,
+ .get_pcs_stats = fbnic_mac_get_pcs_stats,
+ .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
+ .get_pause_stats = fbnic_mac_get_pause_stats,
+ .get_eth_ctrl_stats = fbnic_mac_get_eth_ctrl_stats,
+ .get_rmon_stats = fbnic_mac_get_rmon_stats,
+ .link_down = fbnic_mac_link_down_asic,
+ .link_up = fbnic_mac_link_up_asic,
+ .get_sensor = fbnic_mac_get_sensor_asic,
+};
+
+/**
+ * fbnic_mac_init - Assign a MAC type and initialize the fbnic device
+ * @fbd: Device pointer to device to initialize
+ *
+ * Return: zero on success, negative on failure
+ *
+ * Initialize the MAC function pointers and initializes the MAC of
+ * the device.
+ **/
+int fbnic_mac_init(struct fbnic_dev *fbd)
+{
+ fbd->mac = &fbnic_mac_asic;
+
+ fbd->mac->init_regs(fbd);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
new file mode 100644
index 000000000000..f08fe8b7c497
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_MAC_H_
+#define _FBNIC_MAC_H_
+
+#include <linux/types.h>
+
+struct fbnic_dev;
+
+#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742
+
+/* States loosely based on section 136.8.11.7.5 of IEEE 802.3-2022 Ethernet
+ * Standard. These are needed to track the state of the PHY as it has a delay
+ * of several seconds from the time link comes up until it has completed
+ * training that we need to wait to report the link.
+ *
+ * Currently we treat training as a single block as this is managed by the
+ * firmware.
+ *
+ * We have FBNIC_PMD_SEND_DATA set to 0 as the expected default at driver load
+ * and we initialize the structure containing it to zero at allocation.
+ */
+enum {
+ FBNIC_PMD_SEND_DATA = 0x0,
+ FBNIC_PMD_INITIALIZE = 0x1,
+ FBNIC_PMD_TRAINING = 0x2,
+ FBNIC_PMD_LINK_READY = 0x3,
+};
+
+enum {
+ FBNIC_LINK_EVENT_NONE = 0,
+ FBNIC_LINK_EVENT_UP = 1,
+ FBNIC_LINK_EVENT_DOWN = 2,
+};
+
+/* Treat the FEC bits as a bitmask laid out as follows:
+ * Bit 0: RS Enabled
+ * Bit 1: BASER(Firecode) Enabled
+ * Bit 2: Retrieve FEC from FW
+ */
+enum {
+ FBNIC_FEC_OFF = 0,
+ FBNIC_FEC_RS = 1,
+ FBNIC_FEC_BASER = 2,
+};
+
+/* Treat the AUI modes as a modulation/lanes bitmask:
+ * Bit 0: Lane Count, 0 = R1, 1 = R2
+ * Bit 1: Modulation, 0 = NRZ, 1 = PAM4
+ * Bit 2: Unknown Modulation/Lane Configuration
+ */
+enum {
+ FBNIC_AUI_25GAUI = 0, /* 25.7812GBd 25.78125 * 1 */
+ FBNIC_AUI_LAUI2 = 1, /* 51.5625GBd 25.78128 * 2 */
+ FBNIC_AUI_50GAUI1 = 2, /* 53.125GBd 53.125 * 1 */
+ FBNIC_AUI_100GAUI2 = 3, /* 106.25GBd 53.125 * 2 */
+ FBNIC_AUI_UNKNOWN = 4,
+ __FBNIC_AUI_MAX__
+};
+
+#define FBNIC_AUI_MODE_R2 (FBNIC_AUI_LAUI2)
+#define FBNIC_AUI_MODE_PAM4 (FBNIC_AUI_50GAUI1)
+
+enum fbnic_sensor_id {
+ FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
+ FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */
+};
+
+/* This structure defines the interface hooks for the MAC. The MAC hooks
+ * will be configured as a const struct provided with a set of function
+ * pointers.
+ *
+ * void (*init_regs)(struct fbnic_dev *fbd);
+ * Initialize MAC registers to enable Tx/Rx paths and FIFOs.
+ *
+ * int (*get_link_event)(struct fbnic_dev *fbd)
+ * Get the current link event status, reports true if link has
+ * changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP
+ * bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ * Check link status
+ *
+ * void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ * Prepare PHY for init by fetching settings, disabling interrupts,
+ * and sending an updated PHY config to FW if needed.
+ *
+ * void (*link_down)(struct fbnic_dev *fbd);
+ * Configure MAC for link down event
+ * void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+ * Configure MAC for link up event;
+ *
+ */
+struct fbnic_mac {
+ void (*init_regs)(struct fbnic_dev *fbd);
+
+ int (*get_link_event)(struct fbnic_dev *fbd);
+ bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+
+ void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+
+ void (*get_fec_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_fec_stats *fec_stats);
+ void (*get_pcs_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pcs_stats *pcs_stats);
+ void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_mac_stats *mac_stats);
+ void (*get_pause_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pause_stats *pause_stats);
+ void (*get_eth_ctrl_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_rmon_stats *rmon_stats);
+
+ void (*link_down)(struct fbnic_dev *fbd);
+ void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+
+ int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val);
+};
+
+int fbnic_mac_init(struct fbnic_dev *fbd);
+void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec);
+#endif /* _FBNIC_MAC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c
new file mode 100644
index 000000000000..709041f7fc43
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/mdio.h>
+#include <linux/pcs/pcs-xpcs.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+
+#define DW_VENDOR BIT(15)
+#define FBNIC_PCS_VENDOR BIT(9)
+#define FBNIC_PCS_ZERO_MASK (DW_VENDOR - FBNIC_PCS_VENDOR)
+
+static int
+fbnic_mdio_read_pmd(struct fbnic_dev *fbd, int addr, int regnum)
+{
+ u8 aui = FBNIC_AUI_UNKNOWN;
+ struct fbnic_net *fbn;
+ int ret = 0;
+
+ /* We don't need a second PMD, just one can handle both lanes */
+ if (addr)
+ return 0;
+
+ if (fbd->netdev) {
+ fbn = netdev_priv(fbd->netdev);
+ if (fbn->aui < FBNIC_AUI_UNKNOWN)
+ aui = fbn->aui;
+ }
+
+ switch (regnum) {
+ case MDIO_DEVID1:
+ ret = MP_FBNIC_XPCS_PMA_100G_ID >> 16;
+ break;
+ case MDIO_DEVID2:
+ ret = MP_FBNIC_XPCS_PMA_100G_ID & 0xffff;
+ break;
+ case MDIO_DEVS1:
+ ret = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS;
+ break;
+ case MDIO_STAT2:
+ ret = MDIO_STAT2_DEVPRST_VAL;
+ break;
+ case MDIO_PMA_RXDET:
+ /* If training isn't complete default to 0 */
+ if (fbd->pmd_state != FBNIC_PMD_SEND_DATA)
+ break;
+ /* Report either 1 or 2 lanes detected depending on config */
+ ret = (MDIO_PMD_RXDET_GLOBAL | MDIO_PMD_RXDET_0) |
+ ((aui & FBNIC_AUI_MODE_R2) *
+ (MDIO_PMD_RXDET_1 / FBNIC_AUI_MODE_R2));
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(fbd->dev,
+ "SWMII PMD Rd: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, ret);
+
+ return ret;
+}
+
+static int
+fbnic_mdio_read_pcs(struct fbnic_dev *fbd, int addr, int regnum)
+{
+ int ret, offset = 0;
+
+ /* We will need access to both PCS instances to get config info */
+ if (addr >= 2)
+ return 0;
+
+ /* Report 0 for reserved registers */
+ if (regnum & FBNIC_PCS_ZERO_MASK)
+ return 0;
+
+ /* Intercept and return correct ID for PCS */
+ if (regnum == MDIO_DEVID1)
+ return DW_XPCS_ID >> 16;
+ if (regnum == MDIO_DEVID2)
+ return DW_XPCS_ID & 0xffff;
+ if (regnum == MDIO_DEVS1)
+ return MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS;
+
+ /* Swap vendor page bit for FBNIC PCS vendor page bit */
+ if (regnum & DW_VENDOR)
+ offset ^= DW_VENDOR | FBNIC_PCS_VENDOR;
+
+ ret = fbnic_rd32(fbd, FBNIC_PCS_PAGE(addr) + (regnum ^ offset));
+
+ dev_dbg(fbd->dev,
+ "SWMII PCS Rd: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, ret);
+
+ return ret;
+}
+
+static int
+fbnic_mdio_read_c45(struct mii_bus *bus, int addr, int devnum, int regnum)
+{
+ struct fbnic_dev *fbd = bus->priv;
+
+ if (devnum == MDIO_MMD_PMAPMD)
+ return fbnic_mdio_read_pmd(fbd, addr, regnum);
+
+ if (devnum == MDIO_MMD_PCS)
+ return fbnic_mdio_read_pcs(fbd, addr, regnum);
+
+ return 0;
+}
+
+static void
+fbnic_mdio_write_pmd(struct fbnic_dev *fbd, int addr, int regnum, u16 val)
+{
+ dev_dbg(fbd->dev,
+ "SWMII PMD Wr: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, val);
+}
+
+static void
+fbnic_mdio_write_pcs(struct fbnic_dev *fbd, int addr, int regnum, u16 val)
+{
+ dev_dbg(fbd->dev,
+ "SWMII PCS Wr: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, val);
+
+ /* Allow access to both halves of PCS for 50R2 config */
+ if (addr > 2)
+ return;
+
+ /* Skip write for reserved registers */
+ if (regnum & FBNIC_PCS_ZERO_MASK)
+ return;
+
+ /* Swap vendor page bit for FBNIC PCS vendor page bit */
+ if (regnum & DW_VENDOR)
+ regnum ^= DW_VENDOR | FBNIC_PCS_VENDOR;
+
+ fbnic_wr32(fbd, FBNIC_PCS_PAGE(addr) + regnum, val);
+}
+
+static int
+fbnic_mdio_write_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ struct fbnic_dev *fbd = bus->priv;
+
+ if (devnum == MDIO_MMD_PMAPMD)
+ fbnic_mdio_write_pmd(fbd, addr, regnum, val);
+
+ if (devnum == MDIO_MMD_PCS)
+ fbnic_mdio_write_pcs(fbd, addr, regnum, val);
+
+ return 0;
+}
+
+/**
+ * fbnic_mdiobus_create - Create an MDIO bus to allow interfacing w/ PHYs
+ * @fbd: Pointer to FBNIC device structure to populate bus on
+ *
+ * Initialize an MDIO bus and place a pointer to it on the fbd struct. This bus
+ * will be used to interface with the PMA/PMD and PCS.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_mdiobus_create(struct fbnic_dev *fbd)
+{
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc(fbd->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "fbnic_mii_bus";
+ bus->read_c45 = &fbnic_mdio_read_c45;
+ bus->write_c45 = &fbnic_mdio_write_c45;
+
+ /* Disable PHY auto probing. We will add PCS manually */
+ bus->phy_mask = ~0;
+
+ bus->parent = fbd->dev;
+ bus->priv = fbd;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(fbd->dev));
+
+ err = devm_mdiobus_register(fbd->dev, bus);
+ if (err) {
+ dev_err(fbd->dev, "Failed to create MDIO bus: %d\n", err);
+ return err;
+ }
+
+ fbd->mdio_bus = bus;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
new file mode 100644
index 000000000000..81c9d5c9a4b2
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/etherdevice.h>
+#include <linux/ipv6.h>
+#include <linux/types.h>
+#include <net/netdev_queues.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+int __fbnic_open(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ err = fbnic_alloc_napi_vectors(fbn);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_resources(fbn);
+ if (err)
+ goto free_napi_vectors;
+
+ err = fbnic_set_netif_queues(fbn);
+ if (err)
+ goto free_resources;
+
+ /* Send ownership message and flush to verify FW has seen it */
+ err = fbnic_fw_xmit_ownership_msg(fbd, true);
+ if (err) {
+ dev_warn(fbd->dev,
+ "Error %d sending host ownership message to the firmware\n",
+ err);
+ goto err_reset_queues;
+ }
+
+ err = fbnic_time_start(fbn);
+ if (err)
+ goto release_ownership;
+
+ err = fbnic_fw_init_heartbeat(fbd, false);
+ if (err)
+ goto time_stop;
+
+ err = fbnic_mac_request_irq(fbd);
+ if (err)
+ goto time_stop;
+
+ /* Pull the BMC config and initialize the RPC */
+ fbnic_bmc_rpc_init(fbd);
+ fbnic_rss_reinit(fbd, fbn);
+
+ phylink_resume(fbn->phylink);
+
+ return 0;
+time_stop:
+ fbnic_time_stop(fbn);
+release_ownership:
+ fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+err_reset_queues:
+ fbnic_reset_netif_queues(fbn);
+free_resources:
+ fbnic_free_resources(fbn);
+free_napi_vectors:
+ fbnic_free_napi_vectors(fbn);
+ return err;
+}
+
+static int fbnic_open(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int err;
+
+ fbnic_napi_name_irqs(fbn->fbd);
+
+ err = __fbnic_open(fbn);
+ if (!err)
+ fbnic_up(fbn);
+
+ return err;
+}
+
+static int fbnic_stop(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ fbnic_mac_free_irq(fbn->fbd);
+ phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
+
+ fbnic_down(fbn);
+
+ fbnic_time_stop(fbn);
+ fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+
+ fbnic_reset_netif_queues(fbn);
+ fbnic_free_resources(fbn);
+ fbnic_free_napi_vectors(fbn);
+
+ return 0;
+}
+
+static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_addr *avail_addr;
+
+ if (WARN_ON(!is_valid_ether_addr(addr)))
+ return -EADDRNOTAVAIL;
+
+ avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
+ if (!avail_addr)
+ return -ENOSPC;
+
+ /* Add type flag indicating this address is in use by the host */
+ set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
+
+ return 0;
+}
+
+static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, ret;
+
+ /* Scan from middle of list to bottom, filling bottom up.
+ * Skip the first entry which is reserved for dev_addr and
+ * leave the last entry to use for promiscuous filtering.
+ */
+ for (i = fbd->mac_addr_boundary, ret = -ENOENT;
+ i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ ret = __fbnic_uc_unsync(mac_addr);
+ }
+
+ return ret;
+}
+
+static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_addr *avail_addr;
+
+ if (WARN_ON(!is_multicast_ether_addr(addr)))
+ return -EADDRNOTAVAIL;
+
+ avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
+ if (!avail_addr)
+ return -ENOSPC;
+
+ /* Add type flag indicating this address is in use by the host */
+ set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
+
+ return 0;
+}
+
+static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, ret;
+
+ /* Scan from middle of list to top, filling top down.
+ * Skip over the address reserved for the BMC MAC and
+ * exclude index 0 as that belongs to the broadcast address
+ */
+ for (i = fbd->mac_addr_boundary, ret = -ENOENT;
+ --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ ret = __fbnic_mc_unsync(mac_addr);
+ }
+
+ return ret;
+}
+
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd)
+{
+ bool uc_promisc = false, mc_promisc = false;
+ struct net_device *netdev = fbd->netdev;
+ struct fbnic_mac_addr *mac_addr;
+ int err;
+
+ /* Populate host address from dev_addr */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
+ if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
+ }
+
+ /* Populate broadcast address if broadcast is enabled */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
+ if (netdev->flags & IFF_BROADCAST) {
+ if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_broadcast_addr(mac_addr->value.addr8);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
+ }
+
+ /* Synchronize unicast and multicast address lists */
+ err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
+ if (err == -ENOSPC)
+ uc_promisc = true;
+ err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
+ if (err == -ENOSPC)
+ mc_promisc = true;
+
+ uc_promisc |= !!(netdev->flags & IFF_PROMISC);
+ mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
+
+ /* Update the promiscuous rules */
+ fbnic_promisc_sync(fbd, uc_promisc, mc_promisc);
+
+ /* Add rules for BMC all multicast if it is enabled */
+ fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
+
+ /* Sift out any unshared BMC rules and place them in BMC only section */
+ fbnic_sift_macda(fbd);
+
+ /* Write updates to hardware */
+ fbnic_write_rules(fbd);
+ fbnic_write_macda(fbd);
+ fbnic_write_tce_tcam(fbd);
+}
+
+static void fbnic_set_rx_mode(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ /* No need to update the hardware if we are not running */
+ if (netif_running(netdev))
+ __fbnic_set_rx_mode(fbd);
+}
+
+static int fbnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ fbnic_set_rx_mode(netdev);
+
+ return 0;
+}
+
+void fbnic_clear_rx_mode(struct fbnic_dev *fbd)
+{
+ struct net_device *netdev = fbd->netdev;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ bitmap_clear(mac_addr->act_tcam,
+ FBNIC_MAC_ADDR_T_HOST_START,
+ FBNIC_MAC_ADDR_T_HOST_LEN);
+
+ if (bitmap_empty(mac_addr->act_tcam,
+ FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+
+ /* Write updates to hardware */
+ fbnic_write_macda(fbd);
+
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+}
+
+static int fbnic_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ *config = fbn->hwtstamp_config;
+
+ return 0;
+}
+
+static int fbnic_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int old_rx_filter;
+
+ if (config->source != HWTSTAMP_SOURCE_NETDEV)
+ return -EOPNOTSUPP;
+
+ if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config))
+ return 0;
+
+ /* Upscale the filters */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ break;
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Configure */
+ old_rx_filter = fbn->hwtstamp_config.rx_filter;
+ memcpy(&fbn->hwtstamp_config, config, sizeof(*config));
+
+ if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ /* Save / report back filter configuration
+ * Note that our filter configuration is inexact. Instead of
+ * filtering for a specific UDP port or L2 Ethertype we are
+ * filtering in all UDP or all non-IP packets for timestamping. So
+ * if anything other than FILTER_ALL is requested we report
+ * FILTER_SOME indicating that we will be timestamping a few
+ * additional packets.
+ */
+ if (config->rx_filter > HWTSTAMP_FILTER_ALL)
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
+
+ return 0;
+}
+
+static void fbnic_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats64)
+{
+ u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
+ u64 rx_over = 0, rx_missed = 0, rx_length = 0;
+ u64 tx_bytes, tx_packets, tx_dropped = 0;
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_queue_stats *stats;
+
+ unsigned int start, i;
+
+ fbnic_get_hw_stats(fbd);
+
+ stats = &fbn->tx_stats;
+
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+
+ /* Record drops from Tx HW Datapath */
+ spin_lock(&fbd->hw_stats.lock);
+ tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
+ fbd->hw_stats.tti.cm_drop.frames.value +
+ fbd->hw_stats.tti.frame_drop.frames.value +
+ fbd->hw_stats.tti.tbi_drop.frames.value;
+ spin_unlock(&fbd->hw_stats.lock);
+
+ stats64->tx_bytes = tx_bytes;
+ stats64->tx_packets = tx_packets;
+ stats64->tx_dropped = tx_dropped;
+
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ struct fbnic_ring *txr = fbn->tx[i];
+
+ if (!txr)
+ continue;
+
+ stats = &txr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
+ }
+
+ stats = &fbn->rx_stats;
+
+ rx_bytes = stats->bytes;
+ rx_packets = stats->packets;
+ rx_dropped = stats->dropped;
+
+ spin_lock(&fbd->hw_stats.lock);
+ /* Record drops for the host FIFOs.
+ * 4: network to Host, 6: BMC to Host
+ * Exclude the BMC and MC FIFOs as those stats may contain drops
+ * due to unrelated items such as TCAM misses. They are still
+ * accessible through the ethtool stats.
+ */
+ i = FBNIC_RXB_FIFO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+ i = FBNIC_RXB_FIFO_BMC_TO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+
+ for (i = 0; i < fbd->max_num_queues; i++) {
+ /* Report packets dropped due to CQ/BDQ being full/empty */
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
+
+ /* Report packets with errors */
+ rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
+ }
+ spin_unlock(&fbd->hw_stats.lock);
+
+ stats64->rx_bytes = rx_bytes;
+ stats64->rx_packets = rx_packets;
+ stats64->rx_dropped = rx_dropped;
+ stats64->rx_over_errors = rx_over;
+ stats64->rx_errors = rx_errors;
+ stats64->rx_missed_errors = rx_missed;
+
+ for (i = 0; i < fbn->num_rx_queues; i++) {
+ struct fbnic_ring *xdpr = fbn->tx[FBNIC_MAX_TXQS + i];
+ struct fbnic_ring *rxr = fbn->rx[i];
+
+ if (!rxr)
+ continue;
+
+ stats = &rxr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rx_bytes = stats->bytes;
+ rx_packets = stats->packets;
+ rx_dropped = stats->dropped;
+ rx_length = stats->rx.length_errors;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->rx_bytes += rx_bytes;
+ stats64->rx_packets += rx_packets;
+ stats64->rx_dropped += rx_dropped;
+ stats64->rx_errors += rx_length;
+ stats64->rx_length_errors += rx_length;
+
+ if (!xdpr)
+ continue;
+
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
+ }
+}
+
+bool fbnic_check_split_frames(struct bpf_prog *prog, unsigned int mtu,
+ u32 hds_thresh)
+{
+ if (!prog)
+ return false;
+
+ if (prog->aux->xdp_has_frags)
+ return false;
+
+ return mtu + ETH_HLEN > hds_thresh;
+}
+
+static int fbnic_bpf(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog, *prev_prog;
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (bpf->command != XDP_SETUP_PROG)
+ return -EINVAL;
+
+ if (fbnic_check_split_frames(prog, netdev->mtu,
+ fbn->hds_thresh)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "MTU too high, or HDS threshold is too low for single buffer XDP");
+ return -EOPNOTSUPP;
+ }
+
+ prev_prog = xchg(&fbn->xdp_prog, prog);
+ if (prev_prog)
+ bpf_prog_put(prev_prog);
+
+ return 0;
+}
+
+static const struct net_device_ops fbnic_netdev_ops = {
+ .ndo_open = fbnic_open,
+ .ndo_stop = fbnic_stop,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_start_xmit = fbnic_xmit_frame,
+ .ndo_features_check = fbnic_features_check,
+ .ndo_set_mac_address = fbnic_set_mac,
+ .ndo_set_rx_mode = fbnic_set_rx_mode,
+ .ndo_get_stats64 = fbnic_get_stats64,
+ .ndo_bpf = fbnic_bpf,
+ .ndo_hwtstamp_get = fbnic_hwtstamp_get,
+ .ndo_hwtstamp_set = fbnic_hwtstamp_set,
+};
+
+static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rx)
+{
+ u64 bytes, packets, alloc_fail, alloc_fail_bdq;
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_ring *rxr = fbn->rx[idx];
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_queue_stats *stats;
+ u64 csum_complete, csum_none;
+ struct fbnic_q_triad *qt;
+ unsigned int start;
+
+ if (!rxr)
+ return;
+
+ /* fbn->rx points to completion queues */
+ qt = container_of(rxr, struct fbnic_q_triad, cmpl);
+
+ stats = &rxr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ alloc_fail = stats->rx.alloc_failed;
+ csum_complete = stats->rx.csum_complete;
+ csum_none = stats->rx.csum_none;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats = &qt->sub0.stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ alloc_fail_bdq = stats->bdq.alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ alloc_fail += alloc_fail_bdq;
+
+ stats = &qt->sub1.stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ alloc_fail_bdq = stats->bdq.alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ alloc_fail += alloc_fail_bdq;
+
+ rx->bytes = bytes;
+ rx->packets = packets;
+ rx->alloc_fail = alloc_fail;
+ rx->csum_complete = csum_complete;
+ rx->csum_none = csum_none;
+
+ fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
+
+ spin_lock(&fbd->hw_stats.lock);
+ rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
+ fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
+ rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
+ rx->hw_drop_overruns;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_ring *txr = fbn->tx[idx];
+ struct fbnic_queue_stats *stats;
+ u64 stop, wake, csum, lso;
+ struct fbnic_ring *xdpr;
+ unsigned int start;
+ u64 bytes, packets;
+
+ if (!txr)
+ return;
+
+ stats = &txr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ csum = stats->twq.csum_partial;
+ lso = stats->twq.lso;
+ stop = stats->twq.stop;
+ wake = stats->twq.wake;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes = bytes;
+ tx->packets = packets;
+ tx->needs_csum = csum + lso;
+ tx->hw_gso_wire_packets = lso;
+ tx->stop = stop;
+ tx->wake = wake;
+
+ xdpr = fbn->tx[FBNIC_MAX_TXQS + idx];
+ if (xdpr) {
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes += bytes;
+ tx->packets += packets;
+ }
+}
+
+static void fbnic_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+
+ tx->bytes = fbn->tx_stats.bytes;
+ tx->packets = fbn->tx_stats.packets;
+ tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
+ tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
+ tx->stop = fbn->tx_stats.twq.stop;
+ tx->wake = fbn->tx_stats.twq.wake;
+
+ rx->bytes = fbn->rx_stats.bytes;
+ rx->packets = fbn->rx_stats.packets;
+ rx->alloc_fail = fbn->rx_stats.rx.alloc_failed +
+ fbn->bdq_stats.bdq.alloc_failed;
+ rx->csum_complete = fbn->rx_stats.rx.csum_complete;
+ rx->csum_none = fbn->rx_stats.rx.csum_none;
+}
+
+static const struct netdev_stat_ops fbnic_stat_ops = {
+ .get_queue_stats_rx = fbnic_get_queue_stats_rx,
+ .get_queue_stats_tx = fbnic_get_queue_stats_tx,
+ .get_base_stats = fbnic_get_base_stats,
+};
+
+void fbnic_reset_queues(struct fbnic_net *fbn,
+ unsigned int tx, unsigned int rx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ unsigned int max_napis;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+
+ tx = min(tx, max_napis);
+ fbn->num_tx_queues = tx;
+
+ rx = min(rx, max_napis);
+ fbn->num_rx_queues = rx;
+
+ fbn->num_napi = max(tx, rx);
+}
+
+/**
+ * fbnic_netdev_free - Free the netdev associate with fbnic
+ * @fbd: Driver specific structure to free netdev from
+ *
+ * Allocate and initialize the netdev and netdev private structure. Bind
+ * together the hardware, netdev, and pci data structures.
+ **/
+void fbnic_netdev_free(struct fbnic_dev *fbd)
+{
+ fbnic_phylink_destroy(fbd->netdev);
+
+ free_netdev(fbd->netdev);
+ fbd->netdev = NULL;
+}
+
+/**
+ * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
+ * @fbd: Driver specific structure to associate netdev with
+ *
+ * Allocate and initialize the netdev and netdev private structure. Bind
+ * together the hardware, netdev, and pci data structures.
+ *
+ * Return: Pointer to net_device on success, NULL on failure
+ **/
+struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
+{
+ struct net_device *netdev;
+ struct fbnic_net *fbn;
+ int default_queues;
+
+ netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
+ if (!netdev)
+ return NULL;
+
+ SET_NETDEV_DEV(netdev, fbd->dev);
+ fbd->netdev = netdev;
+
+ netdev->netdev_ops = &fbnic_netdev_ops;
+ netdev->stat_ops = &fbnic_stat_ops;
+ netdev->queue_mgmt_ops = &fbnic_queue_mgmt_ops;
+ netdev->netmem_tx = true;
+
+ fbnic_set_ethtool_ops(netdev);
+
+ fbn = netdev_priv(netdev);
+
+ fbn->netdev = netdev;
+ fbn->fbd = fbd;
+
+ fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
+ fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
+ fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
+ fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+
+ fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
+ fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
+ fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
+
+ /* Initialize the hds_thresh */
+ netdev->cfg->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
+ fbn->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
+
+ default_queues = netif_get_num_default_rss_queues();
+ if (default_queues > fbd->max_num_queues)
+ default_queues = fbd->max_num_queues;
+
+ fbnic_reset_queues(fbn, default_queues, default_queues);
+
+ fbnic_reset_indir_tbl(fbn);
+ fbnic_rss_key_fill(fbn->rss_key);
+ fbnic_rss_init_en_mask(fbn);
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->gso_partial_features =
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->features |=
+ netdev->gso_partial_features |
+ FBNIC_TUN_GSO_FEATURES |
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_L4;
+
+ netdev->hw_features |= netdev->features;
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_enc_features |= netdev->features;
+ netdev->features |= NETIF_F_NTUPLE;
+
+ netdev->min_mtu = IPV6_MIN_MTU;
+ netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
+
+ /* TBD: This is workaround for BMC as phylink doesn't have support
+ * for leavling the link enabled if a BMC is present.
+ */
+ netdev->ethtool->wol_enabled = true;
+
+ netif_carrier_off(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ if (fbnic_phylink_create(netdev)) {
+ fbnic_netdev_free(fbd);
+ return NULL;
+ }
+
+ return netdev;
+}
+
+static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
+{
+ addr[0] = (dsn >> 56) & 0xFF;
+ addr[1] = (dsn >> 48) & 0xFF;
+ addr[2] = (dsn >> 40) & 0xFF;
+ addr[3] = (dsn >> 16) & 0xFF;
+ addr[4] = (dsn >> 8) & 0xFF;
+ addr[5] = dsn & 0xFF;
+
+ return is_valid_ether_addr(addr) ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_netdev_register - Initialize general software structures
+ * @netdev: Netdev containing structure to initialize and register
+ *
+ * Initialize the MAC address for the netdev and register it.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_netdev_register(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ u64 dsn = fbd->dsn;
+ u8 addr[ETH_ALEN];
+ int err;
+
+ err = fbnic_dsn_to_mac_addr(dsn, addr);
+ if (!err) {
+ ether_addr_copy(netdev->perm_addr, addr);
+ eth_hw_addr_set(netdev, addr);
+ } else {
+ /* A randomly assigned MAC address will cause provisioning
+ * issues so instead just fail to spawn the netdev and
+ * avoid any confusion.
+ */
+ dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
+ return err;
+ }
+
+ return register_netdev(netdev);
+}
+
+void fbnic_netdev_unregister(struct net_device *netdev)
+{
+ unregister_netdev(netdev);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
new file mode 100644
index 000000000000..9129a658f8fa
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_NETDEV_H_
+#define _FBNIC_NETDEV_H_
+
+#include <linux/phylink.h>
+#include <linux/types.h>
+
+#include "fbnic_csr.h"
+#include "fbnic_rpc.h"
+#include "fbnic_txrx.h"
+
+#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MIN_RXD_PER_FRAME 2
+
+/* Natively supported tunnel GSO features (not thru GSO_PARTIAL) */
+#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6
+
+struct fbnic_net {
+ struct bpf_prog *xdp_prog;
+
+ struct fbnic_ring *tx[FBNIC_MAX_TXQS + FBNIC_MAX_XDPQS];
+ struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+
+ struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS];
+
+ struct net_device *netdev;
+ struct fbnic_dev *fbd;
+
+ u32 txq_size;
+ u32 hpq_size;
+ u32 ppq_size;
+ u32 rcq_size;
+
+ u32 hds_thresh;
+
+ u16 rx_usecs;
+ u16 tx_usecs;
+
+ u32 rx_max_frames;
+
+ u16 num_napi;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs *pcs;
+
+ u8 aui;
+ u8 fec;
+
+ /* Cached top bits of the HW time counter for 40b -> 64b conversion */
+ u32 time_high;
+ /* Protect readers of @time_offset, writers take @time_lock. */
+ struct u64_stats_sync time_seq;
+ /* Offset in ns between free running NIC PHC and time set via PTP
+ * clock callbacks
+ */
+ s64 time_offset;
+
+ u16 num_tx_queues;
+ u16 num_rx_queues;
+
+ u8 indir_tbl[FBNIC_RPC_RSS_TBL_COUNT][FBNIC_RPC_RSS_TBL_SIZE];
+ u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
+ u32 rss_flow_hash[FBNIC_NUM_HASH_OPT];
+
+ /* Storage for stats after ring destruction */
+ struct fbnic_queue_stats tx_stats;
+ struct fbnic_queue_stats rx_stats;
+ struct fbnic_queue_stats bdq_stats;
+ u64 link_down_events;
+
+ /* Time stamping filter config */
+ struct kernel_hwtstamp_config hwtstamp_config;
+
+ bool tx_pause;
+};
+
+int __fbnic_open(struct fbnic_net *fbn);
+void fbnic_up(struct fbnic_net *fbn);
+void fbnic_down(struct fbnic_net *fbn);
+void fbnic_down_noidle(struct fbnic_net *fbn);
+
+struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
+void fbnic_netdev_free(struct fbnic_dev *fbd);
+int fbnic_netdev_register(struct net_device *netdev);
+void fbnic_netdev_unregister(struct net_device *netdev);
+void fbnic_reset_queues(struct fbnic_net *fbn,
+ unsigned int tx, unsigned int rx);
+
+void fbnic_set_ethtool_ops(struct net_device *dev);
+
+int fbnic_ptp_setup(struct fbnic_dev *fbd);
+void fbnic_ptp_destroy(struct fbnic_dev *fbd);
+void fbnic_time_init(struct fbnic_net *fbn);
+int fbnic_time_start(struct fbnic_net *fbn);
+void fbnic_time_stop(struct fbnic_net *fbn);
+
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd);
+void fbnic_clear_rx_mode(struct fbnic_dev *fbd);
+
+void fbnic_phylink_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int fbnic_phylink_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
+int fbnic_phylink_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam);
+int fbnic_phylink_create(struct net_device *netdev);
+void fbnic_phylink_destroy(struct net_device *netdev);
+int fbnic_phylink_init(struct net_device *netdev);
+void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev);
+bool fbnic_check_split_frames(struct bpf_prog *prog,
+ unsigned int mtu, u32 hds_threshold);
+#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
new file mode 100644
index 000000000000..9240673c7533
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "fbnic.h"
+#include "fbnic_drvinfo.h"
+#include "fbnic_hw_stats.h"
+#include "fbnic_netdev.h"
+
+char fbnic_driver_name[] = DRV_NAME;
+
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL");
+
+static const struct fbnic_info fbnic_asic_info = {
+ .max_num_queues = FBNIC_MAX_QUEUES,
+ .bar_mask = BIT(0) | BIT(4)
+};
+
+static const struct fbnic_info *fbnic_info_tbl[] = {
+ [fbnic_board_asic] = &fbnic_asic_info,
+};
+
+static const struct pci_device_id fbnic_pci_tbl[] = {
+ { PCI_DEVICE_DATA(META, FBNIC_ASIC, fbnic_board_asic) },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, fbnic_pci_tbl);
+
+u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr0);
+ u32 value;
+
+ if (!csr)
+ return ~0U;
+
+ value = readl(csr + reg);
+
+ /* If any bits are 0 value should be valid */
+ if (~value)
+ return value;
+
+ /* All 1's may be valid if ZEROs register still works */
+ if (reg != FBNIC_MASTER_SPARE_0 && ~readl(csr + FBNIC_MASTER_SPARE_0))
+ return value;
+
+ /* Hardware is giving us all 1's reads, assume it is gone */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ dev_err(fbd->dev,
+ "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n",
+ reg, reg << 2);
+
+ /* Notify stack that device has lost (PCIe) link */
+ if (!fbnic_init_failure(fbd))
+ netif_device_detach(fbd->netdev);
+
+ return ~0U;
+}
+
+bool fbnic_fw_present(struct fbnic_dev *fbd)
+{
+ return !!READ_ONCE(fbd->uc_addr4);
+}
+
+void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr4);
+
+ if (csr)
+ writel(val, csr + reg);
+}
+
+u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr4);
+ u32 value;
+
+ if (!csr)
+ return ~0U;
+
+ value = readl(csr + reg);
+
+ /* If any bits are 0 value should be valid */
+ if (~value)
+ return value;
+
+ /* All 1's may be valid if ZEROs register still works */
+ if (reg != FBNIC_FW_ZERO_REG && ~readl(csr + FBNIC_FW_ZERO_REG))
+ return value;
+
+ /* Hardware is giving us all 1's reads, assume it is gone */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ dev_err(fbd->dev,
+ "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n",
+ reg, reg << 2);
+
+ /* Notify stack that device has lost (PCIe) link */
+ if (!fbnic_init_failure(fbd))
+ netif_device_detach(fbd->netdev);
+
+ return ~0U;
+}
+
+static void fbnic_service_task_start(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ schedule_delayed_work(&fbd->service_task, HZ);
+}
+
+static void fbnic_service_task_stop(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ cancel_delayed_work(&fbd->service_task);
+}
+
+void fbnic_up(struct fbnic_net *fbn)
+{
+ fbnic_enable(fbn);
+
+ fbnic_fill(fbn);
+
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ __fbnic_set_rx_mode(fbn->fbd);
+
+ /* Enable Tx/Rx processing */
+ fbnic_napi_enable(fbn);
+ netif_tx_start_all_queues(fbn->netdev);
+
+ fbnic_service_task_start(fbn);
+}
+
+void fbnic_down_noidle(struct fbnic_net *fbn)
+{
+ fbnic_service_task_stop(fbn);
+
+ /* Disable Tx/Rx Processing */
+ fbnic_napi_disable(fbn);
+ netif_tx_disable(fbn->netdev);
+
+ fbnic_clear_rx_mode(fbn->fbd);
+ fbnic_clear_rules(fbn->fbd);
+ fbnic_rss_disable_hw(fbn->fbd);
+ fbnic_disable(fbn);
+}
+
+void fbnic_down(struct fbnic_net *fbn)
+{
+ fbnic_down_noidle(fbn);
+
+ fbnic_wait_all_queues_idle(fbn->fbd, false);
+
+ fbnic_flush(fbn);
+}
+
+static int fbnic_fw_config_after_crash(struct fbnic_dev *fbd)
+{
+ if (fbnic_fw_xmit_ownership_msg(fbd, true)) {
+ dev_err(fbd->dev, "NIC failed to take ownership\n");
+
+ return -1;
+ }
+
+ fbnic_rpc_reset_valid_entries(fbd);
+ __fbnic_set_rx_mode(fbd);
+
+ return 0;
+}
+
+static void fbnic_health_check(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+ /* As long as the heart is beating the FW is healthy */
+ if (fbd->fw_heartbeat_enabled)
+ return;
+
+ /* If the Tx mailbox still has messages sitting in it then there likely
+ * isn't anything we can do. We will wait until the mailbox is empty to
+ * report the fault so we can collect the crashlog.
+ */
+ if (tx_mbx->head != tx_mbx->tail)
+ return;
+
+ fbnic_devlink_fw_report(fbd, "Firmware crash detected!");
+ fbnic_devlink_otp_check(fbd, "error detected after firmware recovery");
+
+ if (fbnic_fw_config_after_crash(fbd))
+ dev_err(fbd->dev, "Firmware recovery failed after crash\n");
+}
+
+static void fbnic_service_task(struct work_struct *work)
+{
+ struct fbnic_dev *fbd = container_of(to_delayed_work(work),
+ struct fbnic_dev, service_task);
+ struct net_device *netdev = fbd->netdev;
+
+ if (netif_running(netdev))
+ fbnic_phylink_pmd_training_complete_notify(netdev);
+
+ rtnl_lock();
+
+ fbnic_get_hw_stats32(fbd);
+
+ fbnic_fw_check_heartbeat(fbd);
+
+ fbnic_health_check(fbd);
+
+ fbnic_bmc_rpc_check(fbd);
+
+ if (netif_carrier_ok(fbd->netdev)) {
+ netdev_lock(fbd->netdev);
+ fbnic_napi_depletion_check(fbd->netdev);
+ netdev_unlock(fbd->netdev);
+ }
+
+ if (netif_running(netdev))
+ schedule_delayed_work(&fbd->service_task, HZ);
+
+ rtnl_unlock();
+}
+
+/**
+ * fbnic_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in fbnic_pci_tbl
+ *
+ * Initializes a PCI device identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct fbnic_info *info = fbnic_info_tbl[ent->driver_data];
+ struct net_device *netdev;
+ struct fbnic_dev *fbd;
+ int err;
+
+ if (pdev->error_state != pci_channel_io_normal) {
+ dev_err(&pdev->dev,
+ "PCI device still in an error state. Unable to load...\n");
+ return -EIO;
+ }
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "PCI enable device failed: %d\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, info->bar_mask, fbnic_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed: %d\n", err);
+ return err;
+ }
+
+ fbd = fbnic_devlink_alloc(pdev);
+ if (!fbd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ return -ENOMEM;
+ }
+
+ err = fbnic_devlink_health_create(fbd);
+ if (err)
+ goto free_fbd;
+
+ /* Populate driver with hardware-specific info and handlers */
+ fbd->max_num_queues = info->max_num_queues;
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ INIT_DELAYED_WORK(&fbd->service_task, fbnic_service_task);
+
+ err = fbnic_alloc_irqs(fbd);
+ if (err)
+ goto err_destroy_health;
+
+ err = fbnic_mac_init(fbd);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize MAC: %d\n", err);
+ goto free_irqs;
+ }
+
+ err = fbnic_fw_request_mbx(fbd);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Firmware mailbox initialization failure\n");
+ goto free_irqs;
+ }
+
+ /* Send the request to enable the FW logging to host. Note if this
+ * fails we ignore the error and just display a message as it is
+ * possible the FW is just too old to support the logging and needs
+ * to be updated.
+ */
+ err = fbnic_fw_log_init(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Unable to initialize firmware log buffer: %d\n",
+ err);
+
+ fbnic_devlink_register(fbd);
+ fbnic_devlink_otp_check(fbd, "error detected during probe");
+ fbnic_dbg_fbd_init(fbd);
+
+ /* Capture snapshot of hardware stats so netdev can calculate delta */
+ fbnic_init_hw_stats(fbd);
+
+ fbnic_hwmon_register(fbd);
+
+ if (!fbd->dsn) {
+ dev_warn(&pdev->dev, "Reading serial number failed\n");
+ goto init_failure_mode;
+ }
+
+ if (fbnic_mdiobus_create(fbd))
+ goto init_failure_mode;
+
+ netdev = fbnic_netdev_alloc(fbd);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Netdev allocation failed\n");
+ goto init_failure_mode;
+ }
+
+ err = fbnic_ptp_setup(fbd);
+ if (err)
+ goto ifm_free_netdev;
+
+ err = fbnic_netdev_register(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Netdev registration failed: %d\n", err);
+ goto ifm_destroy_ptp;
+ }
+
+ return 0;
+
+ifm_destroy_ptp:
+ fbnic_ptp_destroy(fbd);
+ifm_free_netdev:
+ fbnic_netdev_free(fbd);
+init_failure_mode:
+ dev_warn(&pdev->dev, "Probe error encountered, entering init failure mode. Normal networking functionality will not be available.\n");
+ /* Always return 0 even on error so devlink is registered to allow
+ * firmware updates for fixes.
+ */
+ return 0;
+free_irqs:
+ fbnic_free_irqs(fbd);
+err_destroy_health:
+ fbnic_devlink_health_destroy(fbd);
+free_fbd:
+ fbnic_devlink_free(fbd);
+
+ return err;
+}
+
+/**
+ * fbnic_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * Called by the PCI subsystem to alert the driver that it should release
+ * a PCI device. This could be caused by a Hot-Plug event, or because the
+ * driver is going to be removed from memory.
+ **/
+static void fbnic_remove(struct pci_dev *pdev)
+{
+ struct fbnic_dev *fbd = pci_get_drvdata(pdev);
+
+ if (!fbnic_init_failure(fbd)) {
+ struct net_device *netdev = fbd->netdev;
+
+ fbnic_netdev_unregister(netdev);
+ cancel_delayed_work_sync(&fbd->service_task);
+ fbnic_ptp_destroy(fbd);
+ fbnic_netdev_free(fbd);
+ }
+
+ fbnic_hwmon_unregister(fbd);
+ fbnic_dbg_fbd_exit(fbd);
+ fbnic_devlink_unregister(fbd);
+ fbnic_fw_log_free(fbd);
+ fbnic_fw_free_mbx(fbd);
+ fbnic_free_irqs(fbd);
+
+ fbnic_devlink_health_destroy(fbd);
+ fbnic_devlink_free(fbd);
+}
+
+static int fbnic_pm_suspend(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+
+ if (fbnic_init_failure(fbd))
+ goto null_uc_addr;
+
+ rtnl_lock();
+ netdev_lock(netdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ netdev_unlock(netdev);
+ rtnl_unlock();
+
+null_uc_addr:
+ fbnic_fw_log_disable(fbd);
+
+ devl_lock(priv_to_devlink(fbd));
+
+ fbnic_fw_free_mbx(fbd);
+
+ devl_unlock(priv_to_devlink(fbd));
+
+ /* Free the IRQs so they aren't trying to occupy sleeping CPUs */
+ fbnic_free_irqs(fbd);
+
+ /* Hardware is about to go away, so switch off MMIO access internally */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ return 0;
+}
+
+static int __fbnic_pm_resume(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+ void __iomem * const *iomap_table;
+ struct fbnic_net *fbn;
+ int err;
+
+ /* Restore MMIO access */
+ iomap_table = pcim_iomap_table(to_pci_dev(dev));
+ fbd->uc_addr0 = iomap_table[0];
+ fbd->uc_addr4 = iomap_table[4];
+
+ /* Rerequest the IRQs */
+ err = fbnic_alloc_irqs(fbd);
+ if (err)
+ goto err_invalidate_uc_addr;
+
+ fbd->mac->init_regs(fbd);
+
+ devl_lock(priv_to_devlink(fbd));
+
+ /* Re-enable mailbox */
+ err = fbnic_fw_request_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
+ if (err)
+ goto err_free_irqs;
+
+ /* Only send log history if log buffer is empty to prevent duplicate
+ * log entries.
+ */
+ fbnic_fw_log_enable(fbd, list_empty(&fbd->fw_log.entries));
+
+ /* Since the FW should be up, check if it reported OTP errors */
+ fbnic_devlink_otp_check(fbd, "error detected after PM resume");
+
+ /* No netdev means there isn't a network interface to bring up */
+ if (fbnic_init_failure(fbd))
+ return 0;
+
+ fbn = netdev_priv(netdev);
+
+ /* Reset the queues if needed */
+ fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues);
+
+ rtnl_lock();
+ netdev_lock(netdev);
+
+ if (netif_running(netdev))
+ err = __fbnic_open(fbn);
+
+ netdev_unlock(netdev);
+ rtnl_unlock();
+ if (err)
+ goto err_free_mbx;
+
+ return 0;
+err_free_mbx:
+ fbnic_fw_log_disable(fbd);
+
+ devl_lock(priv_to_devlink(fbd));
+ fbnic_fw_free_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
+err_free_irqs:
+ fbnic_free_irqs(fbd);
+err_invalidate_uc_addr:
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+ return err;
+}
+
+static void __fbnic_pm_attach(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+ struct fbnic_net *fbn;
+
+ rtnl_lock();
+ fbnic_reset_hw_stats(fbd);
+ rtnl_unlock();
+
+ if (fbnic_init_failure(fbd))
+ return;
+
+ fbn = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ fbnic_up(fbn);
+
+ netif_device_attach(netdev);
+}
+
+static int __maybe_unused fbnic_pm_resume(struct device *dev)
+{
+ int err;
+
+ err = __fbnic_pm_resume(dev);
+ if (!err)
+ __fbnic_pm_attach(dev);
+
+ return err;
+}
+
+static const struct dev_pm_ops fbnic_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fbnic_pm_suspend, fbnic_pm_resume)
+};
+
+static void fbnic_shutdown(struct pci_dev *pdev)
+{
+ fbnic_pm_suspend(&pdev->dev);
+}
+
+static pci_ers_result_t fbnic_err_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ /* Disconnect device if failure is not recoverable via reset */
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ fbnic_pm_suspend(&pdev->dev);
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t fbnic_err_slot_reset(struct pci_dev *pdev)
+{
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Restore device to previous state */
+ err = __fbnic_pm_resume(&pdev->dev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void fbnic_err_resume(struct pci_dev *pdev)
+{
+ __fbnic_pm_attach(&pdev->dev);
+}
+
+static const struct pci_error_handlers fbnic_err_handler = {
+ .error_detected = fbnic_err_error_detected,
+ .slot_reset = fbnic_err_slot_reset,
+ .resume = fbnic_err_resume,
+};
+
+static struct pci_driver fbnic_driver = {
+ .name = fbnic_driver_name,
+ .id_table = fbnic_pci_tbl,
+ .probe = fbnic_probe,
+ .remove = fbnic_remove,
+ .driver.pm = &fbnic_pm_ops,
+ .shutdown = fbnic_shutdown,
+ .err_handler = &fbnic_err_handler,
+};
+
+/**
+ * fbnic_init_module - Driver Registration Routine
+ *
+ * The first routine called when the driver is loaded. All it does is
+ * register with the PCI subsystem.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+static int __init fbnic_init_module(void)
+{
+ int err;
+
+ fbnic_dbg_init();
+
+ err = pci_register_driver(&fbnic_driver);
+ if (err) {
+ fbnic_dbg_exit();
+ goto out;
+ }
+
+ pr_info(DRV_SUMMARY " (%s)", fbnic_driver.name);
+out:
+ return err;
+}
+module_init(fbnic_init_module);
+
+/**
+ * fbnic_exit_module - Driver Exit Cleanup Routine
+ *
+ * Called just before the driver is removed from memory.
+ **/
+static void __exit fbnic_exit_module(void)
+{
+ pci_unregister_driver(&fbnic_driver);
+
+ fbnic_dbg_exit();
+}
+module_exit(fbnic_exit_module);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
new file mode 100644
index 000000000000..09c5225111be
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+#include "fbnic_netdev.h"
+
+static phy_interface_t fbnic_phylink_select_interface(u8 aui)
+{
+ switch (aui) {
+ case FBNIC_AUI_100GAUI2:
+ return PHY_INTERFACE_MODE_100GBASEP;
+ case FBNIC_AUI_50GAUI1:
+ return PHY_INTERFACE_MODE_50GBASER;
+ case FBNIC_AUI_LAUI2:
+ return PHY_INTERFACE_MODE_LAUI;
+ case FBNIC_AUI_25GAUI:
+ return PHY_INTERFACE_MODE_25GBASER;
+ }
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
+void fbnic_phylink_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ phylink_ethtool_get_pauseparam(fbn->phylink, pause);
+}
+
+int fbnic_phylink_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return phylink_ethtool_set_pauseparam(fbn->phylink, pause);
+}
+
+static void
+fbnic_phylink_get_supported_fec_modes(unsigned long *supported)
+{
+ /* The NIC can support up to 8 possible combinations.
+ * Either 50G-CR, or 100G-CR2
+ * This is with RS FEC mode only
+ * Either 25G-CR, or 50G-CR2
+ * This is with No FEC, RS, or Base-R
+ */
+ if (phylink_test(supported, 100000baseCR2_Full) ||
+ phylink_test(supported, 50000baseCR_Full))
+ phylink_set(supported, FEC_RS);
+ if (phylink_test(supported, 50000baseCR2_Full) ||
+ phylink_test(supported, 25000baseCR_Full)) {
+ phylink_set(supported, FEC_BASER);
+ phylink_set(supported, FEC_NONE);
+ phylink_set(supported, FEC_RS);
+ }
+}
+
+int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int err;
+
+ err = phylink_ethtool_ksettings_get(fbn->phylink, cmd);
+ if (!err) {
+ unsigned long *supp = cmd->link_modes.supported;
+
+ cmd->base.port = PORT_DA;
+ cmd->lanes = (fbn->aui & FBNIC_AUI_MODE_R2) ? 2 : 1;
+
+ fbnic_phylink_get_supported_fec_modes(supp);
+ }
+
+ return err;
+}
+
+int fbnic_phylink_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (fbn->fec & FBNIC_FEC_RS) {
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ fecparam->fec = ETHTOOL_FEC_RS;
+ } else if (fbn->fec & FBNIC_FEC_BASER) {
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ fecparam->fec = ETHTOOL_FEC_BASER;
+ } else {
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+ fecparam->fec = ETHTOOL_FEC_OFF;
+ }
+
+ if (fbn->aui & FBNIC_AUI_MODE_PAM4)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+
+ return 0;
+}
+
+static struct phylink_pcs *
+fbnic_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return fbn->pcs;
+}
+
+static int
+fbnic_phylink_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->prepare(fbd, fbn->aui, fbn->fec);
+
+ return 0;
+}
+
+static void
+fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static int
+fbnic_phylink_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ /* Retest the link state and restart interrupts */
+ fbd->mac->get_link(fbd, fbn->aui, fbn->fec);
+
+ return 0;
+}
+
+static void
+fbnic_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->link_down(fbd);
+
+ fbn->link_down_events++;
+}
+
+static void
+fbnic_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbn->tx_pause = tx_pause;
+ fbnic_config_drop_mode(fbn, tx_pause);
+
+ fbd->mac->link_up(fbd, tx_pause, rx_pause);
+}
+
+static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
+ .mac_select_pcs = fbnic_phylink_mac_select_pcs,
+ .mac_prepare = fbnic_phylink_mac_prepare,
+ .mac_config = fbnic_phylink_mac_config,
+ .mac_finish = fbnic_phylink_mac_finish,
+ .mac_link_down = fbnic_phylink_mac_link_down,
+ .mac_link_up = fbnic_phylink_mac_link_up,
+};
+
+/**
+ * fbnic_phylink_create - Phylink device creation
+ * @netdev: Network Device struct to attach phylink device
+ *
+ * Initialize and attach a phylink instance to the device. The phylink
+ * device will make use of the netdev struct to track carrier and will
+ * eventually be used to expose the current state of the MAC and PCS
+ * setup.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_phylink_create(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct phylink_pcs *pcs;
+ struct phylink *phylink;
+ int err;
+
+ pcs = xpcs_create_pcs_mdiodev(fbd->mdio_bus, 0);
+ if (IS_ERR(pcs)) {
+ err = PTR_ERR(pcs);
+ dev_err(fbd->dev, "Failed to create PCS device: %d\n", err);
+ return err;
+ }
+
+ fbn->pcs = pcs;
+
+ fbn->phylink_config.dev = &netdev->dev;
+ fbn->phylink_config.type = PHYLINK_NETDEV;
+ fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_25000FD | MAC_50000FD |
+ MAC_100000FD;
+ fbn->phylink_config.default_an_inband = true;
+
+ __set_bit(PHY_INTERFACE_MODE_100GBASEP,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_50GBASER,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_LAUI,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER,
+ fbn->phylink_config.supported_interfaces);
+
+ fbnic_mac_get_fw_settings(fbd, &fbn->aui, &fbn->fec);
+
+ phylink = phylink_create(&fbn->phylink_config, NULL,
+ fbnic_phylink_select_interface(fbn->aui),
+ &fbnic_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ dev_err(netdev->dev.parent,
+ "Failed to create Phylink interface, err: %d\n", err);
+ xpcs_destroy_pcs(pcs);
+ return err;
+ }
+
+ fbn->phylink = phylink;
+
+ return 0;
+}
+
+/**
+ * fbnic_phylink_destroy - Teardown phylink related interfaces
+ * @netdev: Network Device struct containing phylink device
+ *
+ * Detach and free resources related to phylink interface.
+ **/
+void fbnic_phylink_destroy(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (fbn->phylink)
+ phylink_destroy(fbn->phylink);
+ if (fbn->pcs)
+ xpcs_destroy_pcs(fbn->pcs);
+}
+
+/**
+ * fbnic_phylink_pmd_training_complete_notify - PMD training complete notifier
+ * @netdev: Netdev struct phylink device attached to
+ *
+ * When the link first comes up the PMD will have a period of 2 to 3 seconds
+ * where the link will flutter due to link training. To avoid spamming the
+ * kernel log with messages about this we add a delay of 4 seconds from the
+ * time of the last PCS report of link so that we can guarantee we are unlikely
+ * to see any further link loss events due to link training.
+ **/
+void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ if (fbd->pmd_state != FBNIC_PMD_TRAINING)
+ return;
+
+ /* Prevent reading end_of_pmd_training until we verified state */
+ smp_rmb();
+
+ if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies))
+ return;
+
+ /* At this point we have verified that the link has been up for
+ * the full training duration. As a first step we will try
+ * transitioning to link ready.
+ */
+ if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_TRAINING,
+ FBNIC_PMD_LINK_READY) != FBNIC_PMD_TRAINING)
+ return;
+
+ /* Perform a follow-up check to verify that the link didn't flap
+ * just before our transition by rechecking the training timer.
+ */
+ if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies))
+ return;
+
+ /* The training timeout has been completed. We are good to swap out
+ * link_ready for send_data assuming no other events have occurred
+ * that would have pulled us back into initialization or training.
+ */
+ if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_LINK_READY,
+ FBNIC_PMD_SEND_DATA) != FBNIC_PMD_LINK_READY)
+ return;
+
+ phylink_pcs_change(fbn->pcs, false);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
new file mode 100644
index 000000000000..7f31e890031c
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -0,0 +1,1246 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <net/ipv6.h>
+
+#include "fbnic.h"
+#include "fbnic_fw.h"
+#include "fbnic_netdev.h"
+#include "fbnic_rpc.h"
+
+void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
+{
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ if (netif_is_rxfh_configured(fbn->netdev))
+ return;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
+}
+
+void fbnic_rss_key_fill(u32 *buffer)
+{
+ static u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
+
+ net_get_random_once(rss_key, sizeof(rss_key));
+ rss_key[FBNIC_RPC_RSS_KEY_LAST_IDX] &= FBNIC_RPC_RSS_KEY_LAST_MASK;
+
+ memcpy(buffer, rss_key, sizeof(rss_key));
+}
+
+#define RX_HASH_OPT_L4 \
+ (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+#define RX_HASH_OPT_L3 \
+ (RXH_IP_SRC | RXH_IP_DST)
+#define RX_HASH_OPT_L2 RXH_L2DA
+
+void fbnic_rss_init_en_mask(struct fbnic_net *fbn)
+{
+ fbn->rss_flow_hash[FBNIC_TCP4_HASH_OPT] = RX_HASH_OPT_L4;
+ fbn->rss_flow_hash[FBNIC_TCP6_HASH_OPT] = RX_HASH_OPT_L4;
+
+ fbn->rss_flow_hash[FBNIC_UDP4_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_UDP6_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_IPV4_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_IPV6_HASH_OPT] = RX_HASH_OPT_L3;
+
+ fbn->rss_flow_hash[FBNIC_ETHER_HASH_OPT] = RX_HASH_OPT_L2;
+}
+
+void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
+{
+ /* Disable RPC by clearing enable bit and configuration */
+ if (!fbnic_bmc_present(fbd))
+ wr32(fbd, FBNIC_RPC_RMI_CONFIG,
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20));
+}
+
+#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
+ FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
+ FIELD_GET(RXH_##_fh, _val))
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+{
+ u32 flow_hash = fbn->rss_flow_hash[flow_type];
+ u32 rss_en_mask = 0;
+
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L2DA, L2_DA, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_SRC, IP_SRC, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, OV6_FL_LBL, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, IV6_FL_LBL, flow_hash);
+
+ return rss_en_mask;
+}
+
+void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ wr32(fbd, FBNIC_RPC_RSS_TBL(0, i), fbn->indir_tbl[0][i]);
+ wr32(fbd, FBNIC_RPC_RSS_TBL(1, i), fbn->indir_tbl[1][i]);
+ }
+
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_DWORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_RSS_KEY(i), fbn->rss_key[i]);
+
+ /* Default action for this to drop w/ no destination */
+ wr32(fbd, FBNIC_RPC_ACT_TBL0_DEFAULT, FBNIC_RPC_ACT_TBL0_DROP);
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_ACT_TBL1_DEFAULT, 0);
+
+ /* If it isn't already enabled set the RMI Config value to enable RPC */
+ wr32(fbd, FBNIC_RPC_RMI_CONFIG,
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_MTU, FBNIC_MAX_JUMBO_FRAME_SIZE) |
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20) |
+ FBNIC_RPC_RMI_CONFIG_ENABLE);
+}
+
+void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd,
+ bool enable_host)
+{
+ struct fbnic_act_tcam *act_tcam;
+ struct fbnic_mac_addr *mac_addr;
+ int j;
+
+ /* We need to add the all multicast filter at the end of the
+ * multicast address list. This way if there are any that are
+ * shared between the host and the BMC they can be directed to
+ * both. Otherwise the remainder just get sent directly to the
+ * BMC.
+ */
+ mac_addr = &fbd->mac_addr[fbd->mac_addr_boundary - 1];
+ if (fbnic_bmc_present(fbd) && fbd->fw_cap.all_multi) {
+ if (mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ if (enable_host)
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ else
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ } else {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BMC);
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
+ }
+
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET];
+
+ /* If we are not enabling the rule just delete it. We will fall
+ * back to the RSS rules that support the multicast addresses.
+ */
+ if (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi || enable_host) {
+ if (act_tcam->state == FBNIC_TCAM_S_VALID)
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+ return;
+ }
+
+ /* Rewrite TCAM rule 23 to handle BMC all-multi traffic */
+ act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* MACDA 0 - 3 is reserved for the BMC MAC address */
+ act_tcam->value.tcam[1] =
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ fbd->mac_addr_boundary - 1) |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ act_tcam->mask.tcam[1] = 0xffff &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+}
+
+void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
+{
+ int i = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX;
+ struct fbnic_act_tcam *act_tcam;
+ struct fbnic_mac_addr *mac_addr;
+ int j;
+
+ /* Check if BMC is present */
+ if (!fbnic_bmc_present(fbd))
+ return;
+
+ /* Fetch BMC MAC addresses from firmware capabilities */
+ for (j = 0; j < 4; j++) {
+ u8 *bmc_mac = fbd->fw_cap.bmc_mac_addr[j];
+
+ /* Validate BMC MAC addresses */
+ if (is_zero_ether_addr(bmc_mac))
+ continue;
+
+ if (is_multicast_ether_addr(bmc_mac))
+ mac_addr = __fbnic_mc_sync(fbd, bmc_mac);
+ else
+ mac_addr = &fbd->mac_addr[i++];
+
+ if (!mac_addr) {
+ netdev_err(fbd->netdev,
+ "No slot for BMC MAC address[%d]\n", j);
+ continue;
+ }
+
+ ether_addr_copy(mac_addr->value.addr8, bmc_mac);
+ eth_zero_addr(mac_addr->mask.addr8);
+
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ /* Validate Broadcast is also present, record it and tag it */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
+ eth_broadcast_addr(mac_addr->value.addr8);
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+
+ /* Rewrite TCAM rule 0 if it isn't present to relocate BMC rules */
+ act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_OFFSET];
+ act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* MACDA 0 - 3 is reserved for the BMC MAC address
+ * to account for that we have to mask out the lower 2 bits
+ * of the macda by performing an &= with 0x1c.
+ */
+ act_tcam->value.tcam[1] = FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ act_tcam->mask.tcam[1] = 0xffff &
+ ~FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 0x1c) &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+}
+
+void fbnic_bmc_rpc_check(struct fbnic_dev *fbd)
+{
+ int err;
+
+ if (fbd->fw_cap.need_bmc_tcam_reinit) {
+ fbnic_bmc_rpc_init(fbd);
+ __fbnic_set_rx_mode(fbd);
+ fbd->fw_cap.need_bmc_tcam_reinit = false;
+ }
+
+ if (fbd->fw_cap.need_bmc_macda_sync) {
+ err = fbnic_fw_xmit_rpc_macda_sync(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Writing MACDA table to FW failed, err: %d\n", err);
+ fbd->fw_cap.need_bmc_macda_sync = false;
+ }
+}
+
+#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \
+ (((_l4) ? FBNIC_RPC_TCAM_ACT1_L4_VALID : 0) | \
+ ((_udp) ? FBNIC_RPC_TCAM_ACT1_L4_IS_UDP : 0) | \
+ ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \
+ ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0))
+
+#define FBNIC_TSTAMP_MASK(_all, _udp, _ether) \
+ (((_all) ? ((1u << FBNIC_NUM_HASH_OPT) - 1) : 0) | \
+ ((_udp) ? (1u << FBNIC_UDP6_HASH_OPT) | \
+ (1u << FBNIC_UDP4_HASH_OPT) : 0) | \
+ ((_ether) ? (1u << FBNIC_ETHER_HASH_OPT) : 0))
+
+void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ static const u32 act1_value[FBNIC_NUM_HASH_OPT] = {
+ FBNIC_ACT1_INIT(1, 1, 1, 1), /* UDP6 */
+ FBNIC_ACT1_INIT(1, 1, 1, 0), /* UDP4 */
+ FBNIC_ACT1_INIT(1, 0, 1, 1), /* TCP6 */
+ FBNIC_ACT1_INIT(1, 0, 1, 0), /* TCP4 */
+ FBNIC_ACT1_INIT(0, 0, 1, 1), /* IP6 */
+ FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */
+ 0 /* Ether */
+ };
+ u32 tstamp_mask = 0;
+ unsigned int i;
+
+ /* To support scenarios where a BMC is present we must write the
+ * rules twice, once for the unicast cases, and once again for
+ * the broadcast/multicast cases as we have to support 2 destinations.
+ */
+ BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES);
+ BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT);
+
+ /* Set timestamp mask with 1b per flow type */
+ if (fbn->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ switch (fbn->hwtstamp_config.rx_filter) {
+ case HWTSTAMP_FILTER_ALL:
+ tstamp_mask = FBNIC_TSTAMP_MASK(1, 1, 1);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 1);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 0);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_mask = FBNIC_TSTAMP_MASK(0, 0, 1);
+ break;
+ default:
+ netdev_warn(fbn->netdev, "Unsupported hwtstamp_rx_filter\n");
+ break;
+ }
+ }
+
+ /* Program RSS hash enable mask for host in action TCAM/table. */
+ for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST;
+ i < FBNIC_RSS_EN_NUM_ENTRIES; i++) {
+ unsigned int idx = i + FBNIC_RPC_ACT_TBL_RSS_OFFSET;
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
+ u32 flow_hash, dest, rss_en_mask;
+ int flow_type, j;
+ u16 value = 0;
+
+ flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
+ flow_hash = fbn->rss_flow_hash[flow_type];
+
+ /* Set DEST_HOST based on absence of RXH_DISCARD */
+ dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ !(RXH_DISCARD & flow_hash) ?
+ FBNIC_RPC_ACT_TBL0_DEST_HOST : 0);
+
+ if (i >= FBNIC_RSS_EN_NUM_UNICAST && fbnic_bmc_present(fbd))
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+
+ if (!dest)
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+ else if (tstamp_mask & (1u << flow_type))
+ dest |= FBNIC_RPC_ACT_TBL0_TS_ENA;
+
+ if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID)
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4);
+
+ rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type);
+
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = rss_en_mask;
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* We reserve the upper 8 MACDA TCAM entries for host
+ * unicast. So we set the value to 24, and the mask the
+ * lower bits so that the lower entries can be used as
+ * multicast or BMC addresses.
+ */
+ if (i < FBNIC_RSS_EN_NUM_UNICAST)
+ value = FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ fbd->mac_addr_boundary);
+ value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
+ value |= act1_value[flow_type];
+
+ act_tcam->value.tcam[1] = value;
+ act_tcam->mask.tcam[1] = ~value;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ }
+}
+
+struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr)
+{
+ struct fbnic_mac_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from middle of list to bottom, filling bottom up.
+ * Skip the first entry which is reserved for dev_addr and
+ * leave the last entry to use for promiscuous filtering.
+ */
+ for (i = fbd->mac_addr_boundary - 1;
+ i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = mac_addr;
+ } else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
+ avail_addr = mac_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ether_addr_copy(avail_addr->value.addr8, addr);
+ eth_zero_addr(avail_addr->mask.addr8);
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr)
+{
+ struct fbnic_mac_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from middle of list to top, filling top down.
+ * Skip over the address reserved for the BMC MAC and
+ * exclude index 0 as that belongs to the broadcast address
+ */
+ for (i = fbd->mac_addr_boundary;
+ --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = mac_addr;
+ } else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
+ avail_addr = mac_addr;
+ break;
+ }
+ }
+
+ /* Scan the BMC addresses to see if it may have already
+ * reserved the address.
+ */
+ while (--i) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!is_zero_ether_addr(mac_addr->mask.addr8))
+ continue;
+
+ /* Only move on if we find a match */
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ /* We need to pull this address to the shared area */
+ if (avail_addr) {
+ memcpy(avail_addr, mac_addr, sizeof(*mac_addr));
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ether_addr_copy(avail_addr->value.addr8, addr);
+ eth_zero_addr(avail_addr->mask.addr8);
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, mac_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(mac_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+void fbnic_promisc_sync(struct fbnic_dev *fbd,
+ bool uc_promisc, bool mc_promisc)
+{
+ struct fbnic_mac_addr *mac_addr;
+
+ /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
+ if (uc_promisc) {
+ if (!is_zero_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ set_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mc_promisc &&
+ (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_PROMISC);
+ }
+}
+
+void fbnic_sift_macda(struct fbnic_dev *fbd)
+{
+ int dest, src;
+
+ /* Move BMC only addresses back into BMC region */
+ for (dest = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX,
+ src = FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX;
+ ++dest < FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX &&
+ src < fbd->mac_addr_boundary;) {
+ struct fbnic_mac_addr *dest_addr = &fbd->mac_addr[dest];
+
+ if (dest_addr->state != FBNIC_TCAM_S_DISABLED)
+ continue;
+
+ while (src < fbd->mac_addr_boundary) {
+ struct fbnic_mac_addr *src_addr = &fbd->mac_addr[src++];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, src_addr->act_tcam))
+ continue;
+
+ /* Verify filter isn't already disabled */
+ if (src_addr->state == FBNIC_TCAM_S_DISABLED ||
+ src_addr->state == FBNIC_TCAM_S_DELETE)
+ continue;
+
+ /* Verify only BMC bit is set */
+ if (bitmap_weight(src_addr->act_tcam,
+ FBNIC_RPC_TCAM_ACT_NUM_ENTRIES) != 1)
+ continue;
+
+ /* Verify we are not moving wildcard address */
+ if (!is_zero_ether_addr(src_addr->mask.addr8))
+ continue;
+
+ memcpy(dest_addr, src_addr, sizeof(*src_addr));
+ src_addr->state = FBNIC_TCAM_S_DELETE;
+ dest_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ }
+}
+
+static void fbnic_clear_macda_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 0);
+}
+
+static void fbnic_clear_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED)
+ continue;
+
+ if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
+ if (fbnic_bmc_present(fbd))
+ continue;
+ dev_warn_once(fbd->dev,
+ "Found BMC MAC address w/ BMC not present\n");
+ }
+
+ fbnic_clear_macda_entry(fbd, idx);
+
+ /* If rule was already destined for deletion just wipe it now */
+ if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
+ memset(mac_addr, 0, sizeof(*mac_addr));
+ continue;
+ }
+
+ /* Change state to update so that we will rewrite
+ * this tcam the next time fbnic_write_macda is called.
+ */
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ }
+}
+
+static void fbnic_clear_valid_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ fbnic_clear_macda_entry(fbd, idx);
+
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ }
+ }
+}
+
+static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &mac_addr->mask.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
+ value = &mac_addr->value.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_MACDA_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_MACDA_VALUE, ntohs(*value--)));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_macda(struct fbnic_dev *fbd)
+{
+ int idx, updates = 0;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ /* Check if update flag is set else exit. */
+ if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Record update count */
+ updates++;
+
+ /* Clear by writing 0s. */
+ if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_macda_entry(fbd, idx);
+ memset(mac_addr, 0, sizeof(*mac_addr));
+
+ continue;
+ }
+
+ fbnic_write_macda_entry(fbd, idx, mac_addr);
+
+ mac_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ /* If reinitializing the BMC TCAM we are doing an initial update */
+ if (fbd->fw_cap.need_bmc_tcam_reinit)
+ updates++;
+
+ /* If needed notify firmware of changes to MACDA TCAM */
+ if (updates != 0 && fbnic_bmc_present(fbd))
+ fbd->fw_cap.need_bmc_macda_sync = true;
+}
+
+static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0);
+}
+
+static void fbnic_clear_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_TCE_TCAM_WORD_LEN; i++)
+ wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i), 0);
+}
+
+static void fbnic_write_tce_tcam_dest(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ u32 dest = FBNIC_TCE_TCAM_DEST_BMC;
+ u32 idx2dest_map;
+
+ if (is_multicast_ether_addr(mac_addr->value.addr8))
+ dest |= FBNIC_TCE_TCAM_DEST_MAC;
+
+ idx2dest_map = rd32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP);
+ idx2dest_map &= ~(FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 << (4 * idx));
+ idx2dest_map |= dest << (4 * idx);
+
+ wr32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP, idx2dest_map);
+}
+
+static void fbnic_write_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &mac_addr->mask.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1];
+ value = &mac_addr->value.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_TCE_TCAM_WORD_LEN; i++)
+ wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i),
+ FIELD_PREP(FBNIC_TCE_RAM_TCAM_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_TCE_RAM_TCAM_VALUE, ntohs(*value--)));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_TCE_RAM_TCAM3(idx), FBNIC_TCE_RAM_TCAM3_MCQ_MASK |
+ FBNIC_TCE_RAM_TCAM3_DEST_MASK |
+ FBNIC_TCE_RAM_TCAM3_VALIDATE);
+}
+
+static void __fbnic_write_tce_tcam_rev(struct fbnic_dev *fbd)
+{
+ int tcam_idx = FBNIC_TCE_TCAM_NUM_ENTRIES;
+ int mac_idx;
+
+ for (mac_idx = ARRAY_SIZE(fbd->mac_addr); mac_idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (!tcam_idx) {
+ dev_err(fbd->dev, "TCE TCAM overflow\n");
+ return;
+ }
+
+ tcam_idx--;
+ fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr);
+ fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr);
+ }
+
+ while (tcam_idx)
+ fbnic_clear_tce_tcam_entry(fbd, --tcam_idx);
+
+ fbd->tce_tcam_last = tcam_idx;
+}
+
+static void __fbnic_write_tce_tcam(struct fbnic_dev *fbd)
+{
+ int tcam_idx = 0;
+ int mac_idx;
+
+ for (mac_idx = 0; mac_idx < ARRAY_SIZE(fbd->mac_addr); mac_idx++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES) {
+ dev_err(fbd->dev, "TCE TCAM overflow\n");
+ return;
+ }
+
+ fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr);
+ fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr);
+ tcam_idx++;
+ }
+
+ while (tcam_idx < FBNIC_TCE_TCAM_NUM_ENTRIES)
+ fbnic_clear_tce_tcam_entry(fbd, tcam_idx++);
+
+ fbd->tce_tcam_last = tcam_idx;
+}
+
+void fbnic_write_tce_tcam(struct fbnic_dev *fbd)
+{
+ if (fbd->tce_tcam_last)
+ __fbnic_write_tce_tcam_rev(fbd);
+ else
+ __fbnic_write_tce_tcam(fbd);
+}
+
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from top of list to bottom, filling bottom up. */
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 4)
+ continue;
+
+ /* Drop avail_addr if mask is a subset of our current mask,
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m ffff ffff ffff ffff ffff ffff ffff 0000 0000
+ * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff
+ *
+ * "m" and "mask" represent typical IPv4 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result if there is a 0 stored
+ * anywhere in "m" where "mask" has a 0.
+ */
+ if (~m->s6_addr32[3] & ~mask->s_addr) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* Check to see if the mask actually contains fewer bits than
+ * our new mask "m". The XOR below should only result in 0 if
+ * "m" is masking a bit that we are looking for in our new
+ * "mask", we eliminated the 0^0 case with the check above.
+ *
+ * If it contains fewer bits we need to stop here, otherwise
+ * we might be adding an unreachable rule.
+ */
+ if (~(m->s6_addr32[3] ^ mask->s_addr))
+ break;
+
+ if (ip_addr->value.s6_addr32[3] == addr->s_addr) {
+ avail_addr = ip_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr);
+ ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0),
+ htonl(~0), ~mask->s_addr);
+ avail_addr->version = 4;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1];
+
+ /* Scan from bottom of list to top, filling top down. */
+ for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 6)
+ continue;
+
+ /* Drop avail_addr if mask is a superset of our current mask.
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m 0000 0000 0000 0000 0000 0000 0000 0000 0000
+ * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff
+ *
+ * "m" and "mask" represent typical IPv6 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result which will cause us
+ * to drop the avail_addr value that might be cached
+ * to prevent us from dropping a v6 address behind it.
+ */
+ if ((m->s6_addr32[0] & mask->s6_addr32[0]) |
+ (m->s6_addr32[1] & mask->s6_addr32[1]) |
+ (m->s6_addr32[2] & mask->s6_addr32[2]) |
+ (m->s6_addr32[3] & mask->s6_addr32[3])) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* The previous test eliminated any overlap between the
+ * two values so now we need to check for gaps.
+ *
+ * If the mask is equal to our current mask then it should
+ * result with m ^ mask = ffff ffff, if however the value
+ * stored in m is bigger then we should see a 0 appear
+ * somewhere in the mask.
+ */
+ if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) |
+ ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) |
+ ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) |
+ ~(m->s6_addr32[3] ^ mask->s6_addr32[3]))
+ break;
+
+ if (ipv6_addr_cmp(&ip_addr->value, addr))
+ continue;
+
+ avail_addr = ip_addr;
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ memcpy(&avail_addr->value, addr, sizeof(*addr));
+ ipv6_addr_set(&avail_addr->mask,
+ ~mask->s6_addr32[0], ~mask->s6_addr32[1],
+ ~mask->s6_addr32[2], ~mask->s6_addr32[3]);
+ avail_addr->version = 6;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ ip_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0);
+}
+
+static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_ip_addr(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx];
+
+ /* Check if update flag is set else skip. */
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_ip_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ /* Repeat process for other IP TCAMs */
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
+static void fbnic_clear_valid_act_tcam(struct fbnic_dev *fbd)
+{
+ int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
+ struct fbnic_act_tcam *act_tcam;
+
+ /* Work from the bottom up deleting all other rules from hardware */
+ do {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ fbnic_clear_act_tcam(fbd, i);
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ } while (i--);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd)
+{
+ /* Clear MAC rules */
+ fbnic_clear_macda(fbd);
+
+ /* If BMC is present we need to preserve the last rule which
+ * will be used to route traffic to the BMC if it is received.
+ *
+ * At this point it should be the only MAC address in the MACDA
+ * so any unicast or multicast traffic received should be routed
+ * to it. So leave the last rule in place.
+ *
+ * It will be rewritten to add the host again when we bring
+ * the interface back up.
+ */
+ if (fbnic_bmc_present(fbd)) {
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
+ struct fbnic_act_tcam *act_tcam;
+
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state == FBNIC_TCAM_S_VALID &&
+ (act_tcam->dest & dest)) {
+ wr32(fbd, FBNIC_RPC_ACT_TBL0(i), dest);
+ wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0);
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ }
+ }
+
+ fbnic_clear_valid_act_tcam(fbd);
+}
+
+static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ fbnic_clear_act_tcam(fbd, idx);
+ memset(&fbd->act_tcam[idx], 0, sizeof(struct fbnic_act_tcam));
+}
+
+static void fbnic_update_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
+ int i;
+
+ /* Update entry by writing the destination and RSS mask */
+ wr32(fbd, FBNIC_RPC_ACT_TBL0(idx), act_tcam->dest);
+ wr32(fbd, FBNIC_RPC_ACT_TBL1(idx), act_tcam->rss_en_mask);
+
+ /* Write new TCAM rule to hardware */
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT_MASK,
+ act_tcam->mask.tcam[i]) |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT_VALUE,
+ act_tcam->value.tcam[i]));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+ act_tcam->state = FBNIC_TCAM_S_VALID;
+}
+
+void fbnic_write_rules(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* Flush any pending action table rules */
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ /* Check if update flag is set else exit. */
+ if (!(act_tcam->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (act_tcam->state == FBNIC_TCAM_S_DELETE)
+ fbnic_delete_act_tcam(fbd, i);
+ else
+ fbnic_update_act_tcam(fbd, i);
+ }
+}
+
+void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd)
+{
+ fbnic_clear_valid_act_tcam(fbd);
+ fbnic_clear_valid_macda(fbd);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
new file mode 100644
index 000000000000..3d4925b2ac75
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_RPC_H_
+#define _FBNIC_RPC_H_
+
+#include <uapi/linux/in6.h>
+#include <linux/bitfield.h>
+
+struct in_addr;
+
+/* The TCAM state definitions follow an expected ordering.
+ * They start out disabled, then move through the following states:
+ * Disabled 0 -> Add 2
+ * Add 2 -> Valid 1
+ *
+ * Valid 1 -> Add/Update 2
+ * Add 2 -> Valid 1
+ *
+ * Valid 1 -> Delete 3
+ * Delete 3 -> Disabled 0
+ */
+enum {
+ FBNIC_TCAM_S_DISABLED = 0,
+ FBNIC_TCAM_S_VALID = 1,
+ FBNIC_TCAM_S_ADD = 2,
+ FBNIC_TCAM_S_UPDATE = FBNIC_TCAM_S_ADD,
+ FBNIC_TCAM_S_DELETE = 3,
+};
+
+/* 32 MAC Destination Address TCAM Entries
+ * 4 registers DA[1:0], DA[3:2], DA[5:4], Validate
+ */
+#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
+#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+
+/* 8 IPSRC and IPDST TCAM Entries each
+ * 8 registers, Validate each
+ */
+#define FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN 8
+#define FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
+#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+
+#define FBNIC_TCE_TCAM_WORD_LEN 3
+#define FBNIC_TCE_TCAM_NUM_ENTRIES 8
+
+struct fbnic_mac_addr {
+ union {
+ unsigned char addr8[ETH_ALEN];
+ __be16 addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN];
+ } mask, value;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
+struct fbnic_ip_addr {
+ struct in6_addr mask, value;
+ unsigned char version;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
+struct fbnic_act_tcam {
+ struct {
+ u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
+ } mask, value;
+ unsigned char state;
+ u16 rss_en_mask;
+ u32 dest;
+};
+
+enum {
+ FBNIC_RSS_EN_HOST_UDP6,
+ FBNIC_RSS_EN_HOST_UDP4,
+ FBNIC_RSS_EN_HOST_TCP6,
+ FBNIC_RSS_EN_HOST_TCP4,
+ FBNIC_RSS_EN_HOST_IP6,
+ FBNIC_RSS_EN_HOST_IP4,
+ FBNIC_RSS_EN_HOST_ETHER,
+ FBNIC_RSS_EN_XCAST_UDP6,
+#define FBNIC_RSS_EN_NUM_UNICAST FBNIC_RSS_EN_XCAST_UDP6
+ FBNIC_RSS_EN_XCAST_UDP4,
+ FBNIC_RSS_EN_XCAST_TCP6,
+ FBNIC_RSS_EN_XCAST_TCP4,
+ FBNIC_RSS_EN_XCAST_IP6,
+ FBNIC_RSS_EN_XCAST_IP4,
+ FBNIC_RSS_EN_XCAST_ETHER,
+ FBNIC_RSS_EN_NUM_ENTRIES
+};
+
+/* Reserve the first 2 entries for the use by the BMC so that we can
+ * avoid allowing rules to get in the way of BMC unicast traffic.
+ */
+#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
+#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+
+/* This should leave us with 48 total entries in the TCAM that can be used
+ * for NFC after also deducting the 14 needed for RSS table programming.
+ */
+#define FBNIC_RPC_ACT_TBL_NFC_OFFSET 2
+
+/* We reserve the last 14 entries for RSS rules on the host. The BMC
+ * unicast rule will need to be populated above these and is expected to
+ * use MACDA TCAM entry 23 to store the BMC MAC address.
+ */
+#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
+ (FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+
+#define FBNIC_RPC_ACT_TBL_NFC_ENTRIES \
+ (FBNIC_RPC_ACT_TBL_RSS_OFFSET - FBNIC_RPC_ACT_TBL_NFC_OFFSET)
+
+/* Flags used to identify the owner for this MAC filter. Note that any
+ * flags set for Broadcast thru Promisc indicate that the rule belongs
+ * to the RSS filters for the host.
+ */
+enum {
+ FBNIC_MAC_ADDR_T_BMC = 0,
+ FBNIC_MAC_ADDR_T_BROADCAST = FBNIC_RPC_ACT_TBL_RSS_OFFSET,
+#define FBNIC_MAC_ADDR_T_HOST_START FBNIC_MAC_ADDR_T_BROADCAST
+ FBNIC_MAC_ADDR_T_MULTICAST,
+ FBNIC_MAC_ADDR_T_UNICAST,
+ FBNIC_MAC_ADDR_T_ALLMULTI, /* BROADCAST ... MULTICAST*/
+ FBNIC_MAC_ADDR_T_PROMISC, /* BROADCAST ... UNICAST */
+ FBNIC_MAC_ADDR_T_HOST_LAST
+};
+
+#define FBNIC_MAC_ADDR_T_HOST_LEN \
+ (FBNIC_MAC_ADDR_T_HOST_LAST - FBNIC_MAC_ADDR_T_HOST_START)
+
+#define FBNIC_RPC_TCAM_ACT0_IPSRC_IDX CSR_GENMASK(2, 0)
+#define FBNIC_RPC_TCAM_ACT0_IPSRC_VALID CSR_BIT(3)
+#define FBNIC_RPC_TCAM_ACT0_IPDST_IDX CSR_GENMASK(6, 4)
+#define FBNIC_RPC_TCAM_ACT0_IPDST_VALID CSR_BIT(7)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX CSR_GENMASK(10, 8)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID CSR_BIT(11)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX CSR_GENMASK(14, 12)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID CSR_BIT(15)
+
+#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX CSR_GENMASK(9, 5)
+#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID CSR_BIT(10)
+#define FBNIC_RPC_TCAM_ACT1_IP_IS_V6 CSR_BIT(11)
+#define FBNIC_RPC_TCAM_ACT1_IP_VALID CSR_BIT(12)
+#define FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID CSR_BIT(13)
+#define FBNIC_RPC_TCAM_ACT1_L4_IS_UDP CSR_BIT(14)
+#define FBNIC_RPC_TCAM_ACT1_L4_VALID CSR_BIT(15)
+
+/* TCAM 0 - 3 reserved for BMC MAC addresses */
+#define FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX 0
+/* TCAM 4 reserved for broadcast MAC address */
+#define FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX 4
+/* TCAMs 5 - 30 will be used for multicast and unicast addresses. The
+ * boundary between the two can be variable it is currently set to 24
+ * on which the unicast addresses start. The general idea is that we will
+ * always go top-down with unicast, and bottom-up with multicast so that
+ * there should be free-space in the middle between the two.
+ *
+ * The entry at MADCA_DEFAULT_BOUNDARY is a special case as it can be used
+ * for the ALL MULTI address if the list is full, or the BMC has requested
+ * it.
+ */
+#define FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX 5
+#define FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY 24
+#define FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX 30
+/* Reserved for use to record Multicast promisc, or Promiscuous */
+#define FBNIC_RPC_TCAM_MACDA_PROMISC_IDX 31
+
+enum {
+ FBNIC_UDP6_HASH_OPT,
+ FBNIC_UDP4_HASH_OPT,
+ FBNIC_TCP6_HASH_OPT,
+ FBNIC_TCP4_HASH_OPT,
+#define FBNIC_L4_HASH_OPT FBNIC_TCP4_HASH_OPT
+ FBNIC_IPV6_HASH_OPT,
+ FBNIC_IPV4_HASH_OPT,
+#define FBNIC_IP_HASH_OPT FBNIC_IPV4_HASH_OPT
+ FBNIC_ETHER_HASH_OPT,
+ FBNIC_NUM_HASH_OPT,
+};
+
+struct fbnic_dev;
+struct fbnic_net;
+
+void fbnic_bmc_rpc_init(struct fbnic_dev *fbd);
+void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host);
+void fbnic_bmc_rpc_check(struct fbnic_dev *fbd);
+
+void fbnic_reset_indir_tbl(struct fbnic_net *fbn);
+void fbnic_rss_key_fill(u32 *buffer);
+void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
+void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
+void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type);
+
+int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
+struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr);
+struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr);
+void fbnic_sift_macda(struct fbnic_dev *fbd);
+void fbnic_write_macda(struct fbnic_dev *fbd);
+
+void fbnic_promisc_sync(struct fbnic_dev *fbd,
+ bool uc_promisc, bool mc_promisc);
+
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask);
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask);
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx);
+void fbnic_write_ip_addr(struct fbnic_dev *fbd);
+
+static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
+{
+ return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
+}
+
+static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr)
+{
+ return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_MULTICAST);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd);
+void fbnic_write_rules(struct fbnic_dev *fbd);
+void fbnic_write_tce_tcam(struct fbnic_dev *fbd);
+#endif /* _FBNIC_RPC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_time.c b/drivers/net/ethernet/meta/fbnic/fbnic_time.c
new file mode 100644
index 000000000000..db7748189f45
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_time.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/jiffies.h>
+#include <linux/limits.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timer.h>
+
+#include "fbnic.h"
+#include "fbnic_csr.h"
+#include "fbnic_netdev.h"
+
+/* FBNIC timing & PTP implementation
+ * Datapath uses truncated 40b timestamps for scheduling and event reporting.
+ * We need to promote those to full 64b, hence we periodically cache the top
+ * 32bit of the HW time counter. Since this makes our time reporting non-atomic
+ * we leave the HW clock free running and adjust time offsets in SW as needed.
+ * Time offset is 64bit - we need a seq counter for 32bit machines.
+ * Time offset and the cache of top bits are independent so we don't need
+ * a coherent snapshot of both - READ_ONCE()/WRITE_ONCE() + writer side lock
+ * are enough.
+ */
+
+/* Period of refresh of top bits of timestamp, give ourselves a 8x margin.
+ * This should translate to once a minute.
+ * The use of nsecs_to_jiffies() should be safe for a <=40b nsec value.
+ */
+#define FBNIC_TS_HIGH_REFRESH_JIF nsecs_to_jiffies((1ULL << 40) / 16)
+
+static struct fbnic_dev *fbnic_from_ptp_info(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct fbnic_dev, ptp_info);
+}
+
+/* This function is "slow" because we could try guessing which high part
+ * is correct based on low instead of re-reading, and skip reading @hi
+ * twice altogether if @lo is far enough from 0.
+ */
+static u64 __fbnic_time_get_slow(struct fbnic_dev *fbd)
+{
+ u32 hi, lo;
+
+ lockdep_assert_held(&fbd->time_lock);
+
+ do {
+ hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
+ lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
+ } while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
+
+ return (u64)hi << 32 | lo;
+}
+
+static void __fbnic_time_set_addend(struct fbnic_dev *fbd, u64 addend)
+{
+ lockdep_assert_held(&fbd->time_lock);
+
+ fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_NS,
+ FIELD_PREP(FBNIC_PTP_ADD_VAL_NS_MASK, addend >> 32));
+ fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_SUBNS, (u32)addend);
+}
+
+static void fbnic_ptp_fresh_check(struct fbnic_dev *fbd)
+{
+ if (time_is_after_jiffies(fbd->last_read +
+ FBNIC_TS_HIGH_REFRESH_JIF * 3 / 2))
+ return;
+
+ dev_warn(fbd->dev, "NIC timestamp refresh stall, delayed by %lu sec\n",
+ (jiffies - fbd->last_read - FBNIC_TS_HIGH_REFRESH_JIF) / HZ);
+}
+
+static void fbnic_ptp_refresh_time(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ unsigned long flags;
+ u32 hi;
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+ hi = fbnic_rd32(fbn->fbd, FBNIC_PTP_CTR_VAL_HI);
+ if (!fbnic_present(fbd))
+ goto out; /* Don't bother handling, reset is pending */
+ /* Let's keep high cached value a bit lower to avoid race with
+ * incoming timestamps. The logic in fbnic_ts40_to_ns() will
+ * take care of overflow in this case. It will make cached time
+ * ~1 minute lower and incoming timestamp will always be later
+ * then cached time.
+ */
+ WRITE_ONCE(fbn->time_high, hi - 16);
+ fbd->last_read = jiffies;
+ out:
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+}
+
+static long fbnic_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ struct fbnic_net *fbn;
+
+ fbn = netdev_priv(fbd->netdev);
+
+ fbnic_ptp_fresh_check(fbd);
+ fbnic_ptp_refresh_time(fbd, fbn);
+
+ return FBNIC_TS_HIGH_REFRESH_JIF;
+}
+
+static int fbnic_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ u64 addend, dclk_period;
+ unsigned long flags;
+
+ /* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
+ dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
+ addend = adjust_by_scaled_ppm(dclk_period, scaled_ppm);
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+ __fbnic_time_set_addend(fbd, addend);
+ fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_ADDEND_SET);
+
+ /* Flush, make sure FBNIC_PTP_ADD_VAL_* is stable for at least 4 clks */
+ fbnic_rd32(fbd, FBNIC_PTP_SPARE);
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ return fbnic_present(fbd) ? 0 : -EIO;
+}
+
+static int fbnic_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ struct fbnic_net *fbn;
+ unsigned long flags;
+
+ fbn = netdev_priv(fbd->netdev);
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+ u64_stats_update_begin(&fbn->time_seq);
+ WRITE_ONCE(fbn->time_offset, READ_ONCE(fbn->time_offset) + delta);
+ u64_stats_update_end(&fbn->time_seq);
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ return 0;
+}
+
+static int
+fbnic_ptp_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ struct fbnic_net *fbn;
+ unsigned long flags;
+ u64 time_ns;
+ u32 hi, lo;
+
+ fbn = netdev_priv(fbd->netdev);
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+
+ do {
+ hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
+ ptp_read_system_prets(sts);
+ lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
+ ptp_read_system_postts(sts);
+ /* Similarly to comment above __fbnic_time_get_slow()
+ * - this can be optimized if needed.
+ */
+ } while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
+
+ time_ns = ((u64)hi << 32 | lo) + fbn->time_offset;
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ if (!fbnic_present(fbd))
+ return -EIO;
+
+ *ts = ns_to_timespec64(time_ns);
+
+ return 0;
+}
+
+static int
+fbnic_ptp_settime64(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ struct fbnic_net *fbn;
+ unsigned long flags;
+ u64 dev_ns, host_ns;
+ int ret;
+
+ fbn = netdev_priv(fbd->netdev);
+
+ host_ns = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+
+ dev_ns = __fbnic_time_get_slow(fbd);
+
+ if (fbnic_present(fbd)) {
+ u64_stats_update_begin(&fbn->time_seq);
+ WRITE_ONCE(fbn->time_offset, host_ns - dev_ns);
+ u64_stats_update_end(&fbn->time_seq);
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ return ret;
+}
+
+static const struct ptp_clock_info fbnic_ptp_info = {
+ .owner = THIS_MODULE,
+ /* 1,000,000,000 - 1 PPB to ensure increment is positive
+ * after max negative adjustment.
+ */
+ .max_adj = 999999999,
+ .do_aux_work = fbnic_ptp_do_aux_work,
+ .adjfine = fbnic_ptp_adjfine,
+ .adjtime = fbnic_ptp_adjtime,
+ .gettimex64 = fbnic_ptp_gettimex64,
+ .settime64 = fbnic_ptp_settime64,
+};
+
+static void fbnic_ptp_reset(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ u64 dclk_period;
+
+ fbnic_wr32(fbd, FBNIC_PTP_CTRL,
+ FBNIC_PTP_CTRL_EN |
+ FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
+
+ /* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
+ dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
+
+ __fbnic_time_set_addend(fbd, dclk_period);
+
+ fbnic_wr32(fbd, FBNIC_PTP_INIT_HI, 0);
+ fbnic_wr32(fbd, FBNIC_PTP_INIT_LO, 0);
+
+ fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_INIT);
+
+ fbnic_wr32(fbd, FBNIC_PTP_CTRL,
+ FBNIC_PTP_CTRL_EN |
+ FBNIC_PTP_CTRL_TQS_OUT_EN |
+ FIELD_PREP(FBNIC_PTP_CTRL_MAC_OUT_IVAL, 3) |
+ FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
+
+ fbnic_rd32(fbd, FBNIC_PTP_SPARE);
+
+ fbn->time_offset = 0;
+ fbn->time_high = 0;
+}
+
+void fbnic_time_init(struct fbnic_net *fbn)
+{
+ /* This is not really a statistic, but the locking primitive fits
+ * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() /
+ * WRITE_ONCE() behavior.
+ */
+ u64_stats_init(&fbn->time_seq);
+}
+
+int fbnic_time_start(struct fbnic_net *fbn)
+{
+ fbnic_ptp_refresh_time(fbn->fbd, fbn);
+ /* Assume that fbnic_ptp_do_aux_work() will never be called if not
+ * scheduled here
+ */
+ return ptp_schedule_worker(fbn->fbd->ptp, FBNIC_TS_HIGH_REFRESH_JIF);
+}
+
+void fbnic_time_stop(struct fbnic_net *fbn)
+{
+ ptp_cancel_worker_sync(fbn->fbd->ptp);
+ fbnic_ptp_fresh_check(fbn->fbd);
+}
+
+int fbnic_ptp_setup(struct fbnic_dev *fbd)
+{
+ struct device *dev = fbd->dev;
+ unsigned long flags;
+
+ spin_lock_init(&fbd->time_lock);
+
+ spin_lock_irqsave(&fbd->time_lock, flags); /* Appease lockdep */
+ fbnic_ptp_reset(fbd);
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ memcpy(&fbd->ptp_info, &fbnic_ptp_info, sizeof(fbnic_ptp_info));
+
+ fbd->ptp = ptp_clock_register(&fbd->ptp_info, dev);
+ if (IS_ERR(fbd->ptp))
+ dev_err(dev, "Failed to register PTP: %pe\n", fbd->ptp);
+
+ return PTR_ERR_OR_ZERO(fbd->ptp);
+}
+
+void fbnic_ptp_destroy(struct fbnic_dev *fbd)
+{
+ if (!fbd->ptp)
+ return;
+ ptp_clock_unregister(fbd->ptp);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
new file mode 100644
index 000000000000..517ed8b2f1cb
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/once.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <uapi/linux/if_ether.h>
+
+#include "fbnic_tlv.h"
+
+/**
+ * fbnic_tlv_msg_alloc - Allocate page and initialize FW message header
+ * @msg_id: Identifier for new message we are starting
+ *
+ * Return: pointer to start of message, or NULL on failure.
+ *
+ * Allocates a page and initializes message header at start of page.
+ * Initial message size is 1 DWORD which is just the header.
+ **/
+struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id)
+{
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *msg;
+
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ /* Start with zero filled header and then back fill with data */
+ hdr.type = msg_id;
+ hdr.is_msg = 1;
+ hdr.len = cpu_to_le16(1);
+
+ /* Copy header into start of message */
+ msg->hdr = hdr;
+
+ return msg;
+}
+
+/**
+ * fbnic_tlv_attr_put_flag - Add flag value to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds a 1 DWORD flag attribute to the message. The presence of this
+ * attribute can be used as a boolean value indicating true, otherwise the
+ * value is considered false.
+ **/
+int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *attr;
+
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr))
+ return -ENOSPC;
+
+ /* Get header pointer and bump attr to start of data */
+ attr = &msg[le16_to_cpu(msg->hdr.len)];
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+ hdr.len = cpu_to_le16(sizeof(hdr));
+
+ attr->hdr = hdr;
+ le16_add_cpu(&msg->hdr.len,
+ FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len)));
+
+ return 0;
+}
+
+/**
+ * fbnic_tlv_attr_put_value - Add data to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @value: Pointer to data to be stored
+ * @len: Size of data to be stored.
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by value into the message. The
+ * result is rounded up to the nearest DWORD for sizing so that the
+ * headers remain aligned.
+ *
+ * The assumption is that the value field is in a format where byte
+ * ordering can be guaranteed such as a byte array or a little endian
+ * format.
+ **/
+int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const void *value, const int len)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *attr;
+
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr) + len)
+ return -ENOSPC;
+
+ /* Get header pointer and bump attr to start of data */
+ attr = &msg[le16_to_cpu(msg->hdr.len)];
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+ hdr.len = cpu_to_le16(sizeof(hdr) + len);
+
+ /* Zero pad end of region to be written if we aren't aligned */
+ if (len % sizeof(hdr))
+ attr->value[len / sizeof(hdr)] = 0;
+
+ /* Copy data over */
+ memcpy(attr->value, value, len);
+
+ attr->hdr = hdr;
+ le16_add_cpu(&msg->hdr.len,
+ FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len)));
+
+ return 0;
+}
+
+/**
+ * __fbnic_tlv_attr_put_int - Add integer to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @value: Data to be stored
+ * @len: Size of data to be stored, either 4 or 8 bytes.
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by value into the message. Will
+ * format the data as little endian.
+ **/
+int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ s64 value, const int len)
+{
+ __le64 le64_value = cpu_to_le64(value);
+
+ return fbnic_tlv_attr_put_value(msg, attr_id, &le64_value, len);
+}
+
+/**
+ * fbnic_tlv_attr_put_mac_addr - Add mac_addr to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @mac_addr: Byte pointer to MAC address to be stored
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by mac_addr into the message. Will
+ * copy the address raw so it will be in big endian with start of MAC
+ * address at start of attribute.
+ **/
+int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const u8 *mac_addr)
+{
+ return fbnic_tlv_attr_put_value(msg, attr_id, mac_addr, ETH_ALEN);
+}
+
+/**
+ * fbnic_tlv_attr_put_string - Add string to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @string: Byte pointer to null terminated string to be stored
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by string into the message. Will
+ * copy the address raw so it will be in byte order.
+ **/
+int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
+ const char *string)
+{
+ int attr_max_len = PAGE_SIZE - sizeof(*msg);
+ int str_len = 1;
+
+ /* The max length will be message minus existing message and new
+ * attribute header. Since the message is measured in DWORDs we have
+ * to multiply the size by 4.
+ *
+ * The string length doesn't include the \0 so we have to add one to
+ * the final value, so start with that as our initial value.
+ *
+ * We will verify if the string will fit in fbnic_tlv_attr_put_value()
+ */
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ str_len += strnlen(string, attr_max_len);
+
+ return fbnic_tlv_attr_put_value(msg, attr_id, string, str_len);
+}
+
+/**
+ * fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
+ * @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
+ *
+ * Return: unsigned 64b value containing integer value
+ **/
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def)
+{
+ __le64 le64_value = 0;
+
+ if (!attr)
+ return def;
+
+ memcpy(&le64_value, &attr->value[0],
+ le16_to_cpu(attr->hdr.len) - sizeof(*attr));
+
+ return le64_to_cpu(le64_value);
+}
+
+/**
+ * fbnic_tlv_attr_get_signed - Retrieve signed value from result
+ * @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
+ *
+ * Return: signed 64b value containing integer value
+ **/
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def)
+{
+ __le64 le64_value = 0;
+ int shift;
+ s64 value;
+
+ if (!attr)
+ return def;
+
+ shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+
+ /* Copy the value and adjust for byte ordering */
+ memcpy(&le64_value, &attr->value[0],
+ le16_to_cpu(attr->hdr.len) - sizeof(*attr));
+ value = le64_to_cpu(le64_value);
+
+ /* Sign extend the return value by using a pair of shifts */
+ return (value << shift) >> shift;
+}
+
+/**
+ * fbnic_tlv_attr_get_string - Retrieve string value from result
+ * @attr: Attribute to retrieve data from
+ * @dst: Pointer to an allocated string to store the data
+ * @dstsize: The maximum size which can be in dst
+ *
+ * Return: the size of the string read from firmware or negative error.
+ **/
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize)
+{
+ size_t srclen, len;
+ ssize_t ret;
+
+ if (!attr)
+ return -EINVAL;
+
+ if (dstsize == 0)
+ return -E2BIG;
+
+ srclen = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ if (srclen > 0 && ((char *)attr->value)[srclen - 1] == '\0')
+ srclen--;
+
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
+ }
+
+ memcpy(dst, &attr->value, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
+
+ return ret;
+}
+
+/**
+ * fbnic_tlv_attr_nest_start - Add nested attribute header to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ *
+ * Return: NULL if there is no room for the attribute. Otherwise a pointer
+ * to the new attribute header.
+ *
+ * New header length is stored initially in DWORDs.
+ **/
+struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg,
+ u16 attr_id)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)];
+ struct fbnic_tlv_hdr hdr = { 0 };
+
+ /* Make sure we have space for at least the nest header plus one more */
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr) * 2)
+ return NULL;
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+
+ /* Add current message length to account for consumption within the
+ * page and leave it as a multiple of DWORDs, we will shift to
+ * bytes when we close it out.
+ */
+ hdr.len = cpu_to_le16(1);
+
+ attr->hdr = hdr;
+
+ return attr;
+}
+
+/**
+ * fbnic_tlv_attr_nest_stop - Close out nested attribute and add it to message
+ * @msg: Message header we are adding flag attribute to
+ *
+ * Closes out nested attribute, adds length to message, and then bumps
+ * length from DWORDs to bytes to match other attributes.
+ **/
+void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg)
+{
+ struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)];
+ u16 len = le16_to_cpu(attr->hdr.len);
+
+ /* Add attribute to message if there is more than just a header */
+ if (len <= 1)
+ return;
+
+ le16_add_cpu(&msg->hdr.len, len);
+
+ /* Convert from DWORDs to bytes */
+ attr->hdr.len = cpu_to_le16(len * sizeof(u32));
+}
+
+static int
+fbnic_tlv_attr_validate(struct fbnic_tlv_msg *attr,
+ const struct fbnic_tlv_index *tlv_index)
+{
+ u16 len = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ u16 attr_id = attr->hdr.type;
+ __le32 *value = &attr->value[0];
+
+ if (attr->hdr.is_msg)
+ return -EINVAL;
+
+ if (attr_id >= FBNIC_TLV_RESULTS_MAX)
+ return -EINVAL;
+
+ while (tlv_index->id != attr_id) {
+ if (tlv_index->id == FBNIC_TLV_ATTR_ID_UNKNOWN) {
+ if (attr->hdr.cannot_ignore)
+ return -ENOENT;
+ return le16_to_cpu(attr->hdr.len);
+ }
+
+ tlv_index++;
+ }
+
+ if (offset_in_page(attr) + len > PAGE_SIZE - sizeof(*attr))
+ return -E2BIG;
+
+ switch (tlv_index->type) {
+ case FBNIC_TLV_STRING:
+ if (!len || len > tlv_index->len)
+ return -EINVAL;
+ if (((char *)value)[len - 1])
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_FLAG:
+ if (len)
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_UNSIGNED:
+ case FBNIC_TLV_SIGNED:
+ if (tlv_index->len > sizeof(__le64))
+ return -EINVAL;
+ fallthrough;
+ case FBNIC_TLV_BINARY:
+ if (!len || len > tlv_index->len)
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_NESTED:
+ case FBNIC_TLV_ARRAY:
+ if (len % 4)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * fbnic_tlv_attr_parse_array - Parse array of attributes into results array
+ * @attr: Start of attributes in the message
+ * @len: Length of attributes in the message
+ * @results: Array of pointers to store the results of parsing
+ * @tlv_index: List of TLV attributes to be parsed from message
+ * @tlv_attr_id: Specific ID that is repeated in array
+ * @array_len: Number of results to store in results array
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a list of attributes and a parser definition and will capture
+ * the results in the results array to have the data extracted later.
+ **/
+int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index,
+ u16 tlv_attr_id, size_t array_len)
+{
+ int i = 0;
+
+ /* Initialize results table to NULL. */
+ memset(results, 0, array_len * sizeof(results[0]));
+
+ /* Nothing to parse if header was only thing there */
+ if (!len)
+ return 0;
+
+ /* Work through list of attributes, parsing them as necessary */
+ while (len > 0) {
+ u16 attr_id = attr->hdr.type;
+ u16 attr_len;
+ int err;
+
+ if (tlv_attr_id != attr_id)
+ return -EINVAL;
+
+ /* Stop parsing on full error */
+ err = fbnic_tlv_attr_validate(attr, tlv_index);
+ if (err < 0)
+ return err;
+
+ if (i >= array_len)
+ return -ENOSPC;
+
+ results[i++] = attr;
+
+ attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len));
+ len -= attr_len;
+ attr += attr_len;
+ }
+
+ return len == 0 ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_tlv_attr_parse - Parse attributes into a list of attribute results
+ * @attr: Start of attributes in the message
+ * @len: Length of attributes in the message
+ * @results: Array of pointers to store the results of parsing
+ * @tlv_index: List of TLV attributes to be parsed from message
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a list of attributes and a parser definition and will capture
+ * the results in the results array to have the data extracted later.
+ **/
+int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index)
+{
+ /* Initialize results table to NULL. */
+ memset(results, 0, sizeof(results[0]) * FBNIC_TLV_RESULTS_MAX);
+
+ /* Nothing to parse if header was only thing there */
+ if (!len)
+ return 0;
+
+ /* Work through list of attributes, parsing them as necessary */
+ while (len > 0) {
+ int err = fbnic_tlv_attr_validate(attr, tlv_index);
+ u16 attr_id = attr->hdr.type;
+ u16 attr_len;
+
+ /* Stop parsing on full error */
+ if (err < 0)
+ return err;
+
+ /* Ignore results for unsupported values */
+ if (!err) {
+ /* Do not overwrite existing entries */
+ if (results[attr_id])
+ return -EADDRINUSE;
+
+ results[attr_id] = attr;
+ }
+
+ attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len));
+ len -= attr_len;
+ attr += attr_len;
+ }
+
+ return len == 0 ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_tlv_msg_parse - Parse message and process via predetermined functions
+ * @opaque: Value passed to parser function to enable driver access
+ * @msg: Message to be parsed.
+ * @parser: TLV message parser definition.
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a message a number of message types via the attribute parsing
+ * definitions and function provided for the parser array.
+ **/
+int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
+ const struct fbnic_tlv_parser *parser)
+{
+ struct fbnic_tlv_msg *results[FBNIC_TLV_RESULTS_MAX];
+ u16 msg_id = msg->hdr.type;
+ int err;
+
+ if (!msg->hdr.is_msg)
+ return -EINVAL;
+
+ if (le16_to_cpu(msg->hdr.len) > PAGE_SIZE / sizeof(u32))
+ return -E2BIG;
+
+ while (parser->id != msg_id) {
+ if (parser->id == FBNIC_TLV_MSG_ID_UNKNOWN)
+ return -ENOENT;
+ parser++;
+ }
+
+ err = fbnic_tlv_attr_parse(&msg[1], le16_to_cpu(msg->hdr.len) - 1,
+ results, parser->attr);
+ if (err)
+ return err;
+
+ return parser->func(opaque, results);
+}
+
+/**
+ * fbnic_tlv_parser_error - called if message doesn't match known type
+ * @opaque: (unused)
+ * @results: (unused)
+ *
+ * Return: -EBADMSG to indicate the message is an unsupported type
+ **/
+int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results)
+{
+ return -EBADMSG;
+}
+
+void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src)
+{
+ u8 *mac_addr;
+
+ mac_addr = fbnic_tlv_attr_get_value_ptr(src);
+ memcpy(dest, mac_addr, ETH_ALEN);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
new file mode 100644
index 000000000000..3508b46ebdd0
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_TLV_H_
+#define _FBNIC_TLV_H_
+
+#include <asm/byteorder.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#define FBNIC_TLV_MSG_ALIGN(len) ALIGN(len, sizeof(u32))
+#define FBNIC_TLV_MSG_SIZE(len) \
+ (FBNIC_TLV_MSG_ALIGN(len) / sizeof(u32))
+
+/* TLV Header Format
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Length |M|I|RSV| Type / ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The TLV header format described above will be used for transferring
+ * messages between the host and the firmware. To ensure byte ordering
+ * we have defined all fields as being little endian.
+ * Type/ID: Identifier for message and/or attribute
+ * RSV: Reserved field for future use, likely as additional flags
+ * I: cannot_ignore flag, identifies if unrecognized attribute can be ignored
+ * M: is_msg, indicates that this is the start of a new message
+ * Length: Total length of message in dwords including header
+ * or
+ * Total length of attribute in bytes including header
+ */
+struct fbnic_tlv_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type : 12; /* 0 .. 11 Type / ID */
+ u16 rsvd : 2; /* 12 .. 13 Reserved for future use */
+ u16 cannot_ignore : 1; /* 14 Attribute can be ignored */
+ u16 is_msg : 1; /* 15 Header belongs to message */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 is_msg : 1; /* 15 Header belongs to message */
+ u16 cannot_ignore : 1; /* 14 Attribute can be ignored */
+ u16 rsvd : 2; /* 13 .. 12 Reserved for future use */
+ u16 type : 12; /* 11 .. 0 Type / ID */
+#else
+#error "Missing defines from byteorder.h"
+#endif
+ __le16 len; /* 16 .. 32 length including TLV header */
+};
+
+#define FBNIC_TLV_RESULTS_MAX 32
+
+struct fbnic_tlv_msg {
+ struct fbnic_tlv_hdr hdr;
+ __le32 value[];
+};
+
+#define FBNIC_TLV_MSG_ID_UNKNOWN USHRT_MAX
+
+enum fbnic_tlv_type {
+ FBNIC_TLV_STRING,
+ FBNIC_TLV_FLAG,
+ FBNIC_TLV_UNSIGNED,
+ FBNIC_TLV_SIGNED,
+ FBNIC_TLV_BINARY,
+ FBNIC_TLV_NESTED,
+ FBNIC_TLV_ARRAY,
+ __FBNIC_TLV_MAX_TYPE
+};
+
+/* TLV Index
+ * Defines the relationship between the attribute IDs and their types.
+ * For each entry in the index there will be a size and type associated
+ * with it so that we can use this to parse the data and verify it matches
+ * the expected layout.
+ */
+struct fbnic_tlv_index {
+ u16 id;
+ u16 len;
+ enum fbnic_tlv_type type;
+};
+
+#define TLV_MAX_DATA ((PAGE_SIZE - 512) & 0xFFFF)
+#define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX
+#define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING }
+#define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG }
+#define FBNIC_TLV_ATTR_U32(id) { id, sizeof(u32), FBNIC_TLV_UNSIGNED }
+#define FBNIC_TLV_ATTR_U64(id) { id, sizeof(u64), FBNIC_TLV_UNSIGNED }
+#define FBNIC_TLV_ATTR_S32(id) { id, sizeof(s32), FBNIC_TLV_SIGNED }
+#define FBNIC_TLV_ATTR_S64(id) { id, sizeof(s64), FBNIC_TLV_SIGNED }
+#define FBNIC_TLV_ATTR_MAC_ADDR(id) { id, ETH_ALEN, FBNIC_TLV_BINARY }
+#define FBNIC_TLV_ATTR_NESTED(id) { id, 0, FBNIC_TLV_NESTED }
+#define FBNIC_TLV_ATTR_ARRAY(id) { id, 0, FBNIC_TLV_ARRAY }
+#define FBNIC_TLV_ATTR_RAW_DATA(id) { id, TLV_MAX_DATA, FBNIC_TLV_BINARY }
+#define FBNIC_TLV_ATTR_LAST { FBNIC_TLV_ATTR_ID_UNKNOWN, 0, 0 }
+
+struct fbnic_tlv_parser {
+ u16 id;
+ const struct fbnic_tlv_index *attr;
+ int (*func)(void *opaque,
+ struct fbnic_tlv_msg **results);
+};
+
+#define FBNIC_TLV_PARSER(id, attr, func) { FBNIC_TLV_MSG_ID_##id, attr, func }
+
+static inline void *
+fbnic_tlv_attr_get_value_ptr(struct fbnic_tlv_msg *attr)
+{
+ return (void *)&attr->value[0];
+}
+
+static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
+{
+ return !!attr;
+}
+
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def);
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize);
+struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
+int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
+int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const void *value, const int len);
+int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ s64 value, const int len);
+#define fbnic_tlv_attr_put_int(msg, attr_id, value) \
+ __fbnic_tlv_attr_put_int(msg, attr_id, value, \
+ FBNIC_TLV_MSG_ALIGN(sizeof(value)))
+int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const u8 *mac_addr);
+int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
+ const char *string);
+struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg,
+ u16 attr_id);
+void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg);
+void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src);
+int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index,
+ u16 tlv_attr_id, size_t array_len);
+int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index);
+int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
+ const struct fbnic_tlv_parser *parser);
+int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+
+#define fta_get_uint(_results, _id) \
+ fbnic_tlv_attr_get_unsigned(_results[_id], 0)
+#define fta_get_sint(_results, _id) \
+ fbnic_tlv_attr_get_signed(_results[_id], 0)
+#define fta_get_str(_results, _id, _dst, _dstsize) \
+ fbnic_tlv_attr_get_string(_results[_id], _dst, _dstsize)
+
+#define FBNIC_TLV_MSG_ERROR \
+ FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
+#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
new file mode 100644
index 000000000000..13d508ce637f
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -0,0 +1,2930 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+#include <net/tcp.h>
+#include <net/xdp.h>
+
+#include "fbnic.h"
+#include "fbnic_csr.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+enum {
+ FBNIC_XDP_PASS = 0,
+ FBNIC_XDP_CONSUME,
+ FBNIC_XDP_TX,
+ FBNIC_XDP_LEN_ERR,
+};
+
+enum {
+ FBNIC_XMIT_CB_TS = 0x01,
+};
+
+struct fbnic_xmit_cb {
+ u32 bytecount;
+ u16 gso_segs;
+ u8 desc_count;
+ u8 flags;
+ int hw_head;
+};
+
+#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
+
+#define FBNIC_XMIT_NOUNMAP ((void *)1)
+
+static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
+{
+ unsigned long csr_base = (unsigned long)ring->doorbell;
+
+ csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1);
+
+ return (u32 __iomem *)csr_base;
+}
+
+static u32 fbnic_ring_rd32(struct fbnic_ring *ring, unsigned int csr)
+{
+ u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
+
+ return readl(csr_base + csr);
+}
+
+static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val)
+{
+ u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
+
+ writel(val, csr_base + csr);
+}
+
+/**
+ * fbnic_ts40_to_ns() - convert descriptor timestamp to PHC time
+ * @fbn: netdev priv of the FB NIC
+ * @ts40: timestamp read from a descriptor
+ *
+ * Return: u64 value of PHC time in nanoseconds
+ *
+ * Convert truncated 40 bit device timestamp as read from a descriptor
+ * to the full PHC time in nanoseconds.
+ */
+static __maybe_unused u64 fbnic_ts40_to_ns(struct fbnic_net *fbn, u64 ts40)
+{
+ unsigned int s;
+ u64 time_ns;
+ s64 offset;
+ u8 ts_top;
+ u32 high;
+
+ do {
+ s = u64_stats_fetch_begin(&fbn->time_seq);
+ offset = READ_ONCE(fbn->time_offset);
+ } while (u64_stats_fetch_retry(&fbn->time_seq, s));
+
+ high = READ_ONCE(fbn->time_high);
+
+ /* Bits 63..40 from periodic clock reads, 39..0 from ts40 */
+ time_ns = (u64)(high >> 8) << 40 | ts40;
+
+ /* Compare bits 32-39 between periodic reads and ts40,
+ * see if HW clock may have wrapped since last read. We are sure
+ * that periodic reads are always at least ~1 minute behind, so
+ * this logic works perfectly fine.
+ */
+ ts_top = ts40 >> 32;
+ if (ts_top < (u8)high && (u8)high - ts_top > U8_MAX / 2)
+ time_ns += 1ULL << 40;
+
+ return time_ns + offset;
+}
+
+static unsigned int fbnic_desc_unused(struct fbnic_ring *ring)
+{
+ return (ring->head - ring->tail - 1) & ring->size_mask;
+}
+
+static unsigned int fbnic_desc_used(struct fbnic_ring *ring)
+{
+ return (ring->tail - ring->head) & ring->size_mask;
+}
+
+static struct netdev_queue *txring_txq(const struct net_device *dev,
+ const struct fbnic_ring *ring)
+{
+ return netdev_get_tx_queue(dev, ring->q_idx);
+}
+
+static int fbnic_maybe_stop_tx(const struct net_device *dev,
+ struct fbnic_ring *ring,
+ const unsigned int size)
+{
+ struct netdev_queue *txq = txring_txq(dev, ring);
+ int res;
+
+ res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
+ FBNIC_TX_DESC_WAKEUP);
+ if (!res) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.stop++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
+
+ return !res;
+}
+
+static bool fbnic_tx_sent_queue(struct sk_buff *skb, struct fbnic_ring *ring)
+{
+ struct netdev_queue *dev_queue = txring_txq(skb->dev, ring);
+ unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount;
+ bool xmit_more = netdev_xmit_more();
+
+ /* TBD: Request completion more often if xmit_more becomes large */
+
+ return __netdev_tx_sent_queue(dev_queue, bytecount, xmit_more);
+}
+
+static void fbnic_unmap_single_twd(struct device *dev, __le64 *twd)
+{
+ u64 raw_twd = le64_to_cpu(*twd);
+ unsigned int len;
+ dma_addr_t dma;
+
+ dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
+ len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
+
+ dma_unmap_single(dev, dma, len, DMA_TO_DEVICE);
+}
+
+static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd)
+{
+ u64 raw_twd = le64_to_cpu(*twd);
+ unsigned int len;
+ dma_addr_t dma;
+
+ dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
+ len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
+
+ dma_unmap_page(dev, dma, len, DMA_TO_DEVICE);
+}
+
+#define FBNIC_TWD_TYPE(_type) \
+ cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type))
+
+static bool fbnic_tx_tstamp(struct sk_buff *skb)
+{
+ struct fbnic_net *fbn;
+
+ if (!unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ fbn = netdev_priv(skb->dev);
+ if (fbn->hwtstamp_config.tx_type == HWTSTAMP_TX_OFF)
+ return false;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ FBNIC_XMIT_CB(skb)->flags |= FBNIC_XMIT_CB_TS;
+ FBNIC_XMIT_CB(skb)->hw_head = -1;
+
+ return true;
+}
+
+static bool
+fbnic_tx_lso(struct fbnic_ring *ring, struct sk_buff *skb,
+ struct skb_shared_info *shinfo, __le64 *meta,
+ unsigned int *l2len, unsigned int *i3len)
+{
+ unsigned int l3_type, l4_type, l4len, hdrlen;
+ unsigned char *l4hdr;
+ __be16 payload_len;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return true;
+
+ if (shinfo->gso_type & SKB_GSO_PARTIAL) {
+ l3_type = FBNIC_TWD_L3_TYPE_OTHER;
+ } else if (!skb->encapsulation) {
+ if (ip_hdr(skb)->version == 4)
+ l3_type = FBNIC_TWD_L3_TYPE_IPV4;
+ else
+ l3_type = FBNIC_TWD_L3_TYPE_IPV6;
+ } else {
+ unsigned int o3len;
+
+ o3len = skb_inner_network_header(skb) - skb_network_header(skb);
+ *i3len -= o3len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_OHLEN_MASK,
+ o3len / 2));
+ l3_type = FBNIC_TWD_L3_TYPE_V6V6;
+ }
+
+ l4hdr = skb_checksum_start(skb);
+ payload_len = cpu_to_be16(skb->len - (l4hdr - skb->data));
+
+ if (shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ struct tcphdr *tcph = (struct tcphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_TCP;
+ l4len = __tcp_hdrlen((struct tcphdr *)l4hdr);
+ csum_replace_by_diff(&tcph->check, (__force __wsum)payload_len);
+ } else {
+ struct udphdr *udph = (struct udphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_UDP;
+ l4len = sizeof(struct udphdr);
+ csum_replace_by_diff(&udph->check, (__force __wsum)payload_len);
+ }
+
+ hdrlen = (l4hdr - skb->data) + l4len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_TYPE_MASK, l3_type) |
+ FIELD_PREP(FBNIC_TWD_L4_TYPE_MASK, l4_type) |
+ FIELD_PREP(FBNIC_TWD_L4_HLEN_MASK, l4len / 4) |
+ FIELD_PREP(FBNIC_TWD_MSS_MASK, shinfo->gso_size) |
+ FBNIC_TWD_FLAG_REQ_LSO);
+
+ FBNIC_XMIT_CB(skb)->bytecount += (shinfo->gso_segs - 1) * hdrlen;
+ FBNIC_XMIT_CB(skb)->gso_segs = shinfo->gso_segs;
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.lso += shinfo->gso_segs;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ return false;
+}
+
+static bool
+fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ unsigned int l2len, i3len;
+
+ if (fbnic_tx_tstamp(skb))
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_TS);
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ return false;
+
+ l2len = skb_mac_header_len(skb);
+ i3len = skb_checksum_start(skb) - skb_network_header(skb);
+
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
+ skb->csum_offset / 2));
+
+ if (shinfo->gso_size) {
+ if (fbnic_tx_lso(ring, skb, shinfo, meta, &l2len, &i3len))
+ return true;
+ } else {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.csum_partial++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
+
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
+ FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
+ return false;
+}
+
+static void
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq,
+ u64 *csum_cmpl, u64 *csum_none)
+{
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ (*csum_none)++;
+ return;
+ }
+
+ if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ u16 csum = FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum)csum;
+ (*csum_cmpl)++;
+ }
+}
+
+static bool
+fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
+{
+ struct device *dev = skb->dev->dev.parent;
+ unsigned int tail = ring->tail, first;
+ unsigned int size, data_len;
+ skb_frag_t *frag;
+ bool is_net_iov;
+ dma_addr_t dma;
+ __le64 *twd;
+
+ ring->tx_buf[tail] = skb;
+
+ tail++;
+ tail &= ring->size_mask;
+ first = tail;
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+
+ if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
+ goto dma_error;
+
+ is_net_iov = false;
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ twd = &ring->desc[tail];
+
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+ FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+ FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+ FBNIC_TWD_TYPE_AL));
+ if (is_net_iov)
+ ring->tx_buf[tail] = FBNIC_XMIT_NOUNMAP;
+
+ tail++;
+ tail &= ring->size_mask;
+
+ if (!data_len)
+ break;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
+ goto dma_error;
+
+ is_net_iov = skb_frag_is_net_iov(frag);
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+ *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+ FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask;
+
+ ring->tail = tail;
+
+ /* Record SW timestamp */
+ skb_tx_timestamp(skb);
+
+ /* Verify there is room for another packet */
+ fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC);
+
+ if (fbnic_tx_sent_queue(skb, ring)) {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_COMPLETION);
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(tail, ring->doorbell);
+ }
+
+ return false;
+dma_error:
+ if (net_ratelimit())
+ netdev_err(skb->dev, "TX DMA map failed\n");
+
+ while (tail != first) {
+ tail--;
+ tail &= ring->size_mask;
+ twd = &ring->desc[tail];
+ if (tail == first)
+ fbnic_unmap_single_twd(dev, twd);
+ else if (ring->tx_buf[tail] == FBNIC_XMIT_NOUNMAP)
+ ring->tx_buf[tail] = NULL;
+ else
+ fbnic_unmap_page_twd(dev, twd);
+ }
+
+ return true;
+}
+
+#define FBNIC_MIN_FRAME_LEN 60
+
+static netdev_tx_t
+fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
+{
+ __le64 *meta = &ring->desc[ring->tail];
+ u16 desc_needed;
+
+ if (skb_put_padto(skb, FBNIC_MIN_FRAME_LEN))
+ goto err_count;
+
+ /* Need: 1 descriptor per page,
+ * + 1 desc for skb_head,
+ * + 2 desc for metadata and timestamp metadata
+ * + 7 desc gap to keep tail from touching head
+ * otherwise try next time
+ */
+ desc_needed = skb_shinfo(skb)->nr_frags + 10;
+ if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed))
+ return NETDEV_TX_BUSY;
+
+ *meta = cpu_to_le64(FBNIC_TWD_FLAG_DEST_MAC);
+
+ /* Write all members within DWORD to condense this into 2 4B writes */
+ FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->gso_segs = 1;
+ FBNIC_XMIT_CB(skb)->desc_count = 0;
+ FBNIC_XMIT_CB(skb)->flags = 0;
+
+ if (fbnic_tx_offloads(ring, skb, meta))
+ goto err_free;
+
+ if (fbnic_tx_map(ring, skb, meta))
+ goto err_free;
+
+ return NETDEV_TX_OK;
+
+err_free:
+ dev_kfree_skb_any(skb);
+err_count:
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ unsigned int q_map = skb->queue_mapping;
+
+ return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
+}
+
+static netdev_features_t
+fbnic_features_check_encap_gso(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features, unsigned int l3len)
+{
+ netdev_features_t skb_gso_features;
+ struct ipv6hdr *ip6_hdr;
+ unsigned char l4_hdr;
+ unsigned int start;
+ __be16 frag_off;
+
+ /* Require MANGLEID for GSO_PARTIAL of IPv4.
+ * In theory we could support TSO with single, innermost v4 header
+ * by pretending everything before it is L2, but that needs to be
+ * parsed case by case.. so leaving it for when the need arises.
+ */
+ if (!(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ skb_gso_features = skb_shinfo(skb)->gso_type;
+ skb_gso_features <<= NETIF_F_GSO_SHIFT;
+
+ /* We'd only clear the native GSO features, so don't bother validating
+ * if the match can only be on those supported thru GSO_PARTIAL.
+ */
+ if (!(skb_gso_features & FBNIC_TUN_GSO_FEATURES))
+ return features;
+
+ /* We can only do IPv6-in-IPv6, not v4-in-v6. It'd be nice
+ * to fall back to partial for this, or any failure below.
+ * This is just an optimization, UDPv4 will be caught later on.
+ */
+ if (skb_gso_features & NETIF_F_TSO)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Inner headers multiple of 2 */
+ if ((skb_inner_network_header(skb) - skb_network_header(skb)) % 2)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Encapsulated GSO packet, make 100% sure it's IPv6-in-IPv6. */
+ ip6_hdr = ipv6_hdr(skb);
+ if (ip6_hdr->version != 6)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ l4_hdr = ip6_hdr->nexthdr;
+ start = (unsigned char *)ip6_hdr - skb->data + sizeof(struct ipv6hdr);
+ start = ipv6_skip_exthdr(skb, start, &l4_hdr, &frag_off);
+ if (frag_off || l4_hdr != IPPROTO_IPV6 ||
+ skb->data + start != skb_inner_network_header(skb))
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ return features;
+}
+
+netdev_features_t
+fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int l2len, l3len;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ return features;
+
+ l2len = skb_mac_header_len(skb);
+ l3len = skb_checksum_start(skb) - skb_network_header(skb);
+
+ /* Check header lengths are multiple of 2.
+ * In case of 6in6 we support longer headers (IHLEN + OHLEN)
+ * but keep things simple for now, 512B is plenty.
+ */
+ if ((l2len | l3len | skb->csum_offset) % 2 ||
+ !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
+ !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
+ !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ if (likely(!skb->encapsulation) || !skb_is_gso(skb))
+ return features;
+
+ return fbnic_features_check_encap_gso(skb, dev, features, l3len);
+}
+
+static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_ring *ring, bool discard,
+ unsigned int hw_head)
+{
+ u64 total_bytes = 0, total_packets = 0, ts_lost = 0;
+ unsigned int head = ring->head;
+ struct netdev_queue *txq;
+ unsigned int clean_desc;
+
+ clean_desc = (hw_head - head) & ring->size_mask;
+
+ while (clean_desc) {
+ struct sk_buff *skb = ring->tx_buf[head];
+ unsigned int desc_cnt;
+
+ desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
+ if (desc_cnt > clean_desc)
+ break;
+
+ if (unlikely(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)) {
+ FBNIC_XMIT_CB(skb)->hw_head = hw_head;
+ if (likely(!discard))
+ break;
+ ts_lost++;
+ }
+
+ ring->tx_buf[head] = NULL;
+
+ clean_desc -= desc_cnt;
+
+ while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) {
+ head++;
+ head &= ring->size_mask;
+ desc_cnt--;
+ }
+
+ fbnic_unmap_single_twd(nv->dev, &ring->desc[head]);
+ head++;
+ head &= ring->size_mask;
+ desc_cnt--;
+
+ while (desc_cnt--) {
+ if (ring->tx_buf[head] != FBNIC_XMIT_NOUNMAP)
+ fbnic_unmap_page_twd(nv->dev,
+ &ring->desc[head]);
+ else
+ ring->tx_buf[head] = NULL;
+ head++;
+ head &= ring->size_mask;
+ }
+
+ total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
+ total_packets += FBNIC_XMIT_CB(skb)->gso_segs;
+
+ napi_consume_skb(skb, napi_budget);
+ }
+
+ if (!total_bytes)
+ return;
+
+ ring->head = head;
+
+ txq = txring_txq(nv->napi.dev, ring);
+
+ if (unlikely(discard)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ ring->stats.twq.ts_lost += ts_lost;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ netdev_tx_completed_queue(txq, total_packets, total_bytes);
+ return;
+ }
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ if (!netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.wake++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
+}
+
+static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct,
+ struct fbnic_ring *ring, bool discard,
+ unsigned int hw_head)
+{
+ u64 total_bytes = 0, total_packets = 0;
+ unsigned int head = ring->head;
+
+ while (hw_head != head) {
+ struct page *page;
+ u64 twd;
+
+ if (unlikely(!(ring->desc[head] & FBNIC_TWD_TYPE(AL))))
+ goto next_desc;
+
+ twd = le64_to_cpu(ring->desc[head]);
+ page = ring->tx_buf[head];
+
+ /* TYPE_AL is 2, TYPE_LAST_AL is 3. So this trick gives
+ * us one increment per packet, with no branches.
+ */
+ total_packets += FIELD_GET(FBNIC_TWD_TYPE_MASK, twd) -
+ FBNIC_TWD_TYPE_AL;
+ total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd);
+
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page, -1,
+ pp_allow_direct);
+next_desc:
+ head++;
+ head &= ring->size_mask;
+ }
+
+ if (!total_bytes)
+ return;
+
+ ring->head = head;
+
+ if (discard) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+ return;
+ }
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+}
+
+static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *ring,
+ u64 tcd, int *ts_head, int *head0)
+{
+ struct skb_shared_hwtstamps hwtstamp;
+ struct fbnic_net *fbn;
+ struct sk_buff *skb;
+ int head;
+ u64 ns;
+
+ head = (*ts_head < 0) ? ring->head : *ts_head;
+
+ do {
+ unsigned int desc_cnt;
+
+ if (head == ring->tail) {
+ if (unlikely(net_ratelimit()))
+ netdev_err(nv->napi.dev,
+ "Tx timestamp without matching packet\n");
+ return;
+ }
+
+ skb = ring->tx_buf[head];
+ desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
+
+ head += desc_cnt;
+ head &= ring->size_mask;
+ } while (!(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS));
+
+ fbn = netdev_priv(nv->napi.dev);
+ ns = fbnic_ts40_to_ns(fbn, FIELD_GET(FBNIC_TCD_TYPE1_TS_MASK, tcd));
+
+ memset(&hwtstamp, 0, sizeof(hwtstamp));
+ hwtstamp.hwtstamp = ns_to_ktime(ns);
+
+ *ts_head = head;
+
+ FBNIC_XMIT_CB(skb)->flags &= ~FBNIC_XMIT_CB_TS;
+ if (*head0 < 0) {
+ head = FBNIC_XMIT_CB(skb)->hw_head;
+ if (head >= 0)
+ *head0 = head;
+ }
+
+ skb_tstamp_tx(skb, &hwtstamp);
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.ts_packets++;
+ u64_stats_update_end(&ring->stats.syncp);
+}
+
+static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
+ netmem_ref netmem)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+
+ page_pool_fragment_netmem(netmem, FBNIC_PAGECNT_BIAS_MAX);
+ rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
+ rx_buf->netmem = netmem;
+}
+
+static struct page *
+fbnic_page_pool_get_head(struct fbnic_q_triad *qt, unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &qt->sub0.rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ /* sub0 is always fed system pages, from the NAPI-level page_pool */
+ return netmem_to_page(rx_buf->netmem);
+}
+
+static netmem_ref
+fbnic_page_pool_get_data(struct fbnic_q_triad *qt, unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &qt->sub1.rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ return rx_buf->netmem;
+}
+
+static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
+ int budget)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ netmem_ref netmem = rx_buf->netmem;
+
+ if (!page_pool_unref_netmem(netmem, rx_buf->pagecnt_bias))
+ page_pool_put_unrefed_netmem(ring->page_pool, netmem, -1,
+ !!budget);
+
+ rx_buf->netmem = 0;
+}
+
+static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_q_triad *qt, s32 ts_head, s32 head0,
+ s32 head1)
+{
+ if (head0 >= 0)
+ fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
+ else if (ts_head >= 0)
+ fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head);
+
+ if (head1 >= 0) {
+ qt->cmpl.deferred_head = -1;
+ if (napi_budget)
+ fbnic_clean_twq1(nv, true, &qt->sub1, false, head1);
+ else
+ qt->cmpl.deferred_head = head1;
+ }
+}
+
+static void
+fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
+ int napi_budget)
+{
+ struct fbnic_ring *cmpl = &qt->cmpl;
+ s32 head1 = cmpl->deferred_head;
+ s32 head0 = -1, ts_head = -1;
+ __le64 *raw_tcd, done;
+ u32 head = cmpl->head;
+
+ done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE);
+ raw_tcd = &cmpl->desc[head & cmpl->size_mask];
+
+ /* Walk the completion queue collecting the heads reported by NIC */
+ while ((*raw_tcd & cpu_to_le64(FBNIC_TCD_DONE)) == done) {
+ u64 tcd;
+
+ dma_rmb();
+
+ tcd = le64_to_cpu(*raw_tcd);
+
+ switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
+ case FBNIC_TCD_TYPE_0:
+ if (tcd & FBNIC_TCD_TWQ1)
+ head1 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD1_MASK,
+ tcd);
+ else
+ head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
+ tcd);
+ /* Currently all err status bits are related to
+ * timestamps and as those have yet to be added
+ * they are skipped for now.
+ */
+ break;
+ case FBNIC_TCD_TYPE_1:
+ if (WARN_ON_ONCE(tcd & FBNIC_TCD_TWQ1))
+ break;
+
+ fbnic_clean_tsq(nv, &qt->sub0, tcd, &ts_head, &head0);
+ break;
+ default:
+ break;
+ }
+
+ raw_tcd++;
+ head++;
+ if (!(head & cmpl->size_mask)) {
+ done ^= cpu_to_le64(FBNIC_TCD_DONE);
+ raw_tcd = &cmpl->desc[0];
+ }
+ }
+
+ /* Record the current head/tail of the queue */
+ if (cmpl->head != head) {
+ cmpl->head = head;
+ writel(head & cmpl->size_mask, cmpl->doorbell);
+ }
+
+ /* Unmap and free processed buffers */
+ fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0, head1);
+}
+
+static void fbnic_clean_bdq(struct fbnic_ring *ring, unsigned int hw_head,
+ int napi_budget)
+{
+ unsigned int head = ring->head;
+
+ if (head == hw_head)
+ return;
+
+ do {
+ fbnic_page_pool_drain(ring, head, napi_budget);
+
+ head++;
+ head &= ring->size_mask;
+ } while (head != hw_head);
+
+ ring->head = head;
+}
+
+static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, netmem_ref netmem)
+{
+ __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
+ dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
+ u64 bd, i = FBNIC_BD_FRAG_COUNT;
+
+ bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
+ FIELD_PREP(FBNIC_BD_PAGE_ID_MASK, id);
+
+ /* In the case that a page size is larger than 4K we will map a
+ * single page to multiple fragments. The fragments will be
+ * FBNIC_BD_FRAG_COUNT in size and the lower n bits will be use
+ * to indicate the individual fragment IDs.
+ */
+ do {
+ *bdq_desc = cpu_to_le64(bd);
+ bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) |
+ FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1);
+ bdq_desc++;
+ } while (--i);
+}
+
+static void fbnic_fill_bdq(struct fbnic_ring *bdq)
+{
+ unsigned int count = fbnic_desc_unused(bdq);
+ unsigned int i = bdq->tail;
+
+ if (!count)
+ return;
+
+ do {
+ netmem_ref netmem;
+
+ netmem = page_pool_dev_alloc_netmems(bdq->page_pool);
+ if (!netmem) {
+ u64_stats_update_begin(&bdq->stats.syncp);
+ bdq->stats.bdq.alloc_failed++;
+ u64_stats_update_end(&bdq->stats.syncp);
+
+ break;
+ }
+
+ fbnic_page_pool_init(bdq, i, netmem);
+ fbnic_bd_prep(bdq, i, netmem);
+
+ i++;
+ i &= bdq->size_mask;
+
+ count--;
+ } while (count);
+
+ if (bdq->tail != i) {
+ bdq->tail = i;
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(i, bdq->doorbell);
+ }
+}
+
+static unsigned int fbnic_hdr_pg_start(unsigned int pg_off)
+{
+ /* The headroom of the first header may be larger than FBNIC_RX_HROOM
+ * due to alignment. So account for that by just making the page
+ * offset 0 if we are starting at the first header.
+ */
+ if (ALIGN(FBNIC_RX_HROOM, 128) > FBNIC_RX_HROOM &&
+ pg_off == ALIGN(FBNIC_RX_HROOM, 128))
+ return 0;
+
+ return pg_off - FBNIC_RX_HROOM;
+}
+
+static unsigned int fbnic_hdr_pg_end(unsigned int pg_off, unsigned int len)
+{
+ /* Determine the end of the buffer by finding the start of the next
+ * and then subtracting the headroom from that frame.
+ */
+ pg_off += len + FBNIC_RX_TROOM + FBNIC_RX_HROOM;
+
+ return ALIGN(pg_off, 128) - FBNIC_RX_HROOM;
+}
+
+static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt,
+ struct fbnic_q_triad *qt)
+{
+ unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
+ struct page *page = fbnic_page_pool_get_head(qt, hdr_pg_idx);
+ unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
+ unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
+ unsigned char *hdr_start;
+
+ /* data_hard_start should always be NULL when this is called */
+ WARN_ON_ONCE(pkt->buff.data_hard_start);
+
+ /* Short-cut the end calculation if we know page is fully consumed */
+ hdr_pg_end = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
+ FBNIC_BD_FRAG_SIZE : fbnic_hdr_pg_end(hdr_pg_off, len);
+ hdr_pg_start = fbnic_hdr_pg_start(hdr_pg_off);
+
+ headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
+ frame_sz = hdr_pg_end - hdr_pg_start;
+ xdp_init_buff(&pkt->buff, frame_sz, &qt->xdp_rxq);
+ hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
+ FBNIC_BD_FRAG_SIZE;
+
+ /* Sync DMA buffer */
+ dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
+ hdr_pg_start, frame_sz,
+ DMA_BIDIRECTIONAL);
+
+ /* Build frame around buffer */
+ hdr_start = page_address(page) + hdr_pg_start;
+ net_prefetch(pkt->buff.data);
+ xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
+ len - FBNIC_RX_PAD, true);
+
+ pkt->hwtstamp = 0;
+ pkt->add_frag_failed = false;
+}
+
+static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt,
+ struct fbnic_q_triad *qt)
+{
+ unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
+ unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
+ netmem_ref netmem = fbnic_page_pool_get_data(qt, pg_idx);
+ unsigned int truesize;
+ bool added;
+
+ truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
+ FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
+
+ pg_off += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
+ FBNIC_BD_FRAG_SIZE;
+
+ /* Sync DMA buffer */
+ page_pool_dma_sync_netmem_for_cpu(qt->sub1.page_pool, netmem,
+ pg_off, truesize);
+
+ added = xdp_buff_add_frag(&pkt->buff, netmem, pg_off, len, truesize);
+ if (unlikely(!added)) {
+ pkt->add_frag_failed = true;
+ netdev_err_once(nv->napi.dev,
+ "Failed to add fragment to xdp_buff\n");
+ }
+}
+
+static void fbnic_put_pkt_buff(struct fbnic_q_triad *qt,
+ struct fbnic_pkt_buff *pkt, int budget)
+{
+ struct page *page;
+
+ if (!pkt->buff.data_hard_start)
+ return;
+
+ if (xdp_buff_has_frags(&pkt->buff)) {
+ struct skb_shared_info *shinfo;
+ netmem_ref netmem;
+ int nr_frags;
+
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ nr_frags = shinfo->nr_frags;
+
+ while (nr_frags--) {
+ netmem = skb_frag_netmem(&shinfo->frags[nr_frags]);
+ page_pool_put_full_netmem(qt->sub1.page_pool, netmem,
+ !!budget);
+ }
+ }
+
+ page = virt_to_page(pkt->buff.data_hard_start);
+ page_pool_put_full_page(qt->sub0.page_pool, page, !!budget);
+}
+
+static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_buff(&pkt->buff);
+ if (!skb)
+ return NULL;
+
+ /* Add timestamp if present */
+ if (pkt->hwtstamp)
+ skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
+
+ return skb;
+}
+
+static long fbnic_pkt_tx(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_ring *ring = &nv->qt[0].sub1;
+ int size, offset, nsegs = 1, data_len = 0;
+ unsigned int tail = ring->tail;
+ struct skb_shared_info *shinfo;
+ skb_frag_t *frag = NULL;
+ struct page *page;
+ dma_addr_t dma;
+ __le64 *twd;
+
+ if (unlikely(xdp_buff_has_frags(&pkt->buff))) {
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ nsegs += shinfo->nr_frags;
+ data_len = shinfo->xdp_frags_size;
+ frag = &shinfo->frags[0];
+ }
+
+ if (fbnic_desc_unused(ring) < nsegs) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
+ return -FBNIC_XDP_CONSUME;
+ }
+
+ page = virt_to_page(pkt->buff.data_hard_start);
+ offset = offset_in_page(pkt->buff.data);
+ dma = page_pool_get_dma_addr(page);
+
+ size = pkt->buff.data_end - pkt->buff.data;
+
+ while (nsegs--) {
+ dma_sync_single_range_for_device(nv->dev, dma, offset, size,
+ DMA_BIDIRECTIONAL);
+ dma += offset;
+
+ ring->tx_buf[tail] = page;
+
+ twd = &ring->desc[tail];
+ *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+ FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+ FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+ FBNIC_TWD_TYPE_AL));
+
+ tail++;
+ tail &= ring->size_mask;
+
+ if (!data_len)
+ break;
+
+ offset = skb_frag_off(frag);
+ page = skb_frag_page(frag);
+ dma = page_pool_get_dma_addr(page);
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+ frag++;
+ }
+
+ *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+ ring->tail = tail;
+
+ return -FBNIC_XDP_TX;
+}
+
+static void fbnic_pkt_commit_tail(struct fbnic_napi_vector *nv,
+ unsigned int pkt_tail)
+{
+ struct fbnic_ring *ring = &nv->qt[0].sub1;
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(pkt_tail, ring->doorbell);
+}
+
+static struct sk_buff *fbnic_run_xdp(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct bpf_prog *xdp_prog;
+ int act;
+
+ xdp_prog = READ_ONCE(fbn->xdp_prog);
+ if (!xdp_prog)
+ goto xdp_pass;
+
+ /* Should never happen, config paths enforce HDS threshold > MTU */
+ if (xdp_buff_has_frags(&pkt->buff) && !xdp_prog->aux->xdp_has_frags)
+ return ERR_PTR(-FBNIC_XDP_LEN_ERR);
+
+ act = bpf_prog_run_xdp(xdp_prog, &pkt->buff);
+ switch (act) {
+ case XDP_PASS:
+xdp_pass:
+ return fbnic_build_skb(nv, pkt);
+ case XDP_TX:
+ return ERR_PTR(fbnic_pkt_tx(nv, pkt));
+ default:
+ bpf_warn_invalid_xdp_action(nv->napi.dev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(nv->napi.dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ return ERR_PTR(-FBNIC_XDP_CONSUME);
+}
+
+static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
+{
+ return (FBNIC_RCD_META_L4_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L4 :
+ (FBNIC_RCD_META_L3_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_L2;
+}
+
+static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_net *fbn;
+ u64 ns, ts;
+
+ if (!FIELD_GET(FBNIC_RCD_OPT_META_TS, rcd))
+ return;
+
+ fbn = netdev_priv(nv->napi.dev);
+ ts = FIELD_GET(FBNIC_RCD_OPT_META_TS_MASK, rcd);
+ ns = fbnic_ts40_to_ns(fbn, ts);
+
+ /* Add timestamp to shared info */
+ pkt->hwtstamp = ns_to_ktime(ns);
+}
+
+static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
+ u64 rcd, struct sk_buff *skb,
+ struct fbnic_q_triad *qt,
+ u64 *csum_cmpl, u64 *csum_none)
+{
+ struct net_device *netdev = nv->napi.dev;
+ struct fbnic_ring *rcq = &qt->cmpl;
+
+ fbnic_rx_csum(rcd, skb, rcq, csum_cmpl, csum_none);
+
+ if (netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb,
+ FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd),
+ fbnic_skb_hash_type(rcd));
+
+ skb_record_rx_queue(skb, rcq->q_idx);
+}
+
+static bool fbnic_rcd_metadata_err(u64 rcd)
+{
+ return !!(FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK & rcd);
+}
+
+static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_q_triad *qt, int budget)
+{
+ unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+ u64 csum_complete = 0, csum_none = 0, length_errors = 0;
+ s32 head0 = -1, head1 = -1, pkt_tail = -1;
+ struct fbnic_ring *rcq = &qt->cmpl;
+ struct fbnic_pkt_buff *pkt;
+ __le64 *raw_rcd, done;
+ u32 head = rcq->head;
+
+ done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
+ raw_rcd = &rcq->desc[head & rcq->size_mask];
+ pkt = rcq->pkt;
+
+ /* Walk the completion queue collecting the heads reported by NIC */
+ while (likely(packets < budget)) {
+ struct sk_buff *skb = ERR_PTR(-EINVAL);
+ u32 pkt_bytes;
+ u64 rcd;
+
+ if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done)
+ break;
+
+ dma_rmb();
+
+ rcd = le64_to_cpu(*raw_rcd);
+
+ switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) {
+ case FBNIC_RCD_TYPE_HDR_AL:
+ head0 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ fbnic_pkt_prepare(nv, rcd, pkt, qt);
+
+ break;
+ case FBNIC_RCD_TYPE_PAY_AL:
+ head1 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ fbnic_add_rx_frag(nv, rcd, pkt, qt);
+
+ break;
+ case FBNIC_RCD_TYPE_OPT_META:
+ /* Only type 0 is currently supported */
+ if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd))
+ break;
+
+ fbnic_rx_tstamp(nv, rcd, pkt);
+
+ /* We currently ignore the action table index */
+ break;
+ case FBNIC_RCD_TYPE_META:
+ if (likely(!fbnic_rcd_metadata_err(rcd) &&
+ !pkt->add_frag_failed)) {
+ pkt_bytes = xdp_get_buff_len(&pkt->buff);
+ skb = fbnic_run_xdp(nv, pkt);
+ }
+
+ /* Populate skb and invalidate XDP */
+ if (!IS_ERR_OR_NULL(skb)) {
+ fbnic_populate_skb_fields(nv, rcd, skb, qt,
+ &csum_complete,
+ &csum_none);
+ napi_gro_receive(&nv->napi, skb);
+ } else if (skb == ERR_PTR(-FBNIC_XDP_TX)) {
+ pkt_tail = nv->qt[0].sub1.tail;
+ } else if (PTR_ERR(skb) == -FBNIC_XDP_CONSUME) {
+ fbnic_put_pkt_buff(qt, pkt, 1);
+ } else {
+ if (!skb)
+ alloc_failed++;
+
+ if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR))
+ length_errors++;
+ else
+ dropped++;
+
+ fbnic_put_pkt_buff(qt, pkt, 1);
+ goto next_dont_count;
+ }
+
+ packets++;
+ bytes += pkt_bytes;
+next_dont_count:
+ pkt->buff.data_hard_start = NULL;
+
+ break;
+ }
+
+ raw_rcd++;
+ head++;
+ if (!(head & rcq->size_mask)) {
+ done ^= cpu_to_le64(FBNIC_RCD_DONE);
+ raw_rcd = &rcq->desc[0];
+ }
+ }
+
+ u64_stats_update_begin(&rcq->stats.syncp);
+ rcq->stats.packets += packets;
+ rcq->stats.bytes += bytes;
+ rcq->stats.dropped += dropped;
+ rcq->stats.rx.alloc_failed += alloc_failed;
+ rcq->stats.rx.csum_complete += csum_complete;
+ rcq->stats.rx.csum_none += csum_none;
+ rcq->stats.rx.length_errors += length_errors;
+ u64_stats_update_end(&rcq->stats.syncp);
+
+ if (pkt_tail >= 0)
+ fbnic_pkt_commit_tail(nv, pkt_tail);
+
+ /* Unmap and free processed buffers */
+ if (head0 >= 0)
+ fbnic_clean_bdq(&qt->sub0, head0, budget);
+ fbnic_fill_bdq(&qt->sub0);
+
+ if (head1 >= 0)
+ fbnic_clean_bdq(&qt->sub1, head1, budget);
+ fbnic_fill_bdq(&qt->sub1);
+
+ /* Record the current head/tail of the queue */
+ if (rcq->head != head) {
+ rcq->head = head;
+ writel(head & rcq->size_mask, rcq->doorbell);
+ }
+
+ return packets;
+}
+
+static void fbnic_nv_irq_disable(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(v_idx / 32), 1 << (v_idx % 32));
+}
+
+static void fbnic_nv_irq_rearm(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(v_idx),
+ FBNIC_INTR_CQ_REARM_INTR_UNMASK);
+}
+
+static int fbnic_poll(struct napi_struct *napi, int budget)
+{
+ struct fbnic_napi_vector *nv = container_of(napi,
+ struct fbnic_napi_vector,
+ napi);
+ int i, j, work_done = 0;
+
+ for (i = 0; i < nv->txt_count; i++)
+ fbnic_clean_tcq(nv, &nv->qt[i], budget);
+
+ for (j = 0; j < nv->rxt_count; j++, i++)
+ work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget);
+
+ if (work_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ fbnic_nv_irq_rearm(nv);
+
+ return work_done;
+}
+
+irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct fbnic_napi_vector *nv = *(void **)data;
+
+ napi_schedule_irqoff(&nv->napi);
+
+ return IRQ_HANDLED;
+}
+
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct fbnic_queue_stats *stats = &rxr->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->rx_stats.bytes += stats->bytes;
+ fbn->rx_stats.packets += stats->packets;
+ fbn->rx_stats.dropped += stats->dropped;
+ fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
+ fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
+ fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ fbn->rx_stats.rx.length_errors += stats->rx.length_errors;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4);
+}
+
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *bdq)
+{
+ struct fbnic_queue_stats *stats = &bdq->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->bdq_stats.bdq.alloc_failed += stats->bdq.alloc_failed;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.bdq) / 8 != 1);
+}
+
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct fbnic_queue_stats *stats = &txr->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
+ fbn->tx_stats.dropped += stats->dropped;
+ fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial;
+ fbn->tx_stats.twq.lso += stats->twq.lso;
+ fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost;
+ fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets;
+ fbn->tx_stats.twq.stop += stats->twq.stop;
+ fbn->tx_stats.twq.wake += stats->twq.wake;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
+}
+
+void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ struct fbnic_queue_stats *stats = &xdpr->stats;
+
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->tx_stats.dropped += stats->dropped;
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
+}
+
+static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ if (!(txr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_tx_counters(fbn, txr);
+
+ /* Remove pointer to the Tx ring */
+ WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
+ fbn->tx[txr->q_idx] = NULL;
+}
+
+static void fbnic_remove_xdp_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_xdp_counters(fbn, xdpr);
+
+ /* Remove pointer to the Tx ring */
+ WARN_ON(fbn->tx[xdpr->q_idx] && fbn->tx[xdpr->q_idx] != xdpr);
+ fbn->tx[xdpr->q_idx] = NULL;
+}
+
+static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ if (!(rxr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_rx_counters(fbn, rxr);
+
+ /* Remove pointer to the Rx ring */
+ WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
+ fbn->rx[rxr->q_idx] = NULL;
+}
+
+static void fbnic_remove_bdq_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *bdq)
+{
+ if (!(bdq->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_bdq_counters(fbn, bdq);
+}
+
+static void fbnic_free_qt_page_pools(struct fbnic_q_triad *qt)
+{
+ page_pool_destroy(qt->sub0.page_pool);
+ page_pool_destroy(qt->sub1.page_pool);
+}
+
+static void fbnic_free_napi_vector(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_xdp_ring(fbn, &nv->qt[i].sub1);
+ fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub1);
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
+ }
+
+ fbnic_napi_free_irq(fbd, nv);
+ netif_napi_del_locked(&nv->napi);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
+ kfree(nv);
+}
+
+void fbnic_free_napi_vectors(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ if (fbn->napi[i])
+ fbnic_free_napi_vector(fbn, fbn->napi[i]);
+}
+
+static int
+fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_q_triad *qt,
+ unsigned int rxq_idx)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP |
+ PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fbn->hpq_size + fbn->ppq_size,
+ .nid = NUMA_NO_NODE,
+ .dev = fbn->netdev->dev.parent,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .offset = 0,
+ .max_len = PAGE_SIZE,
+ .netdev = fbn->netdev,
+ .queue_idx = rxq_idx,
+ };
+ struct page_pool *pp;
+
+ /* Page pool cannot exceed a size of 32768. This doesn't limit the
+ * pages on the ring but the number we can have cached waiting on
+ * the next use.
+ *
+ * TBD: Can this be reduced further? Would a multiple of
+ * NAPI_POLL_WEIGHT possibly make more sense? The question is how
+ * may pages do we need to hold in reserve to get the best return
+ * without hogging too much system memory.
+ */
+ if (pp_params.pool_size > 32768)
+ pp_params.pool_size = 32768;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ qt->sub0.page_pool = pp;
+ if (netif_rxq_has_unreadable_mp(fbn->netdev, rxq_idx)) {
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ goto err_destroy_sub0;
+ } else {
+ page_pool_get(pp);
+ }
+ qt->sub1.page_pool = pp;
+
+ return 0;
+
+err_destroy_sub0:
+ page_pool_destroy(pp);
+ return PTR_ERR(pp);
+}
+
+static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
+ int q_idx, u8 flags)
+{
+ u64_stats_init(&ring->stats.syncp);
+ ring->doorbell = doorbell;
+ ring->q_idx = q_idx;
+ ring->flags = flags;
+ ring->deferred_head = -1;
+}
+
+static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txq_count, unsigned int txq_idx,
+ unsigned int rxq_count, unsigned int rxq_idx)
+{
+ int txt_count = txq_count, rxt_count = rxq_count;
+ u32 __iomem *uc_addr = fbd->uc_addr0;
+ int xdp_count = 0, qt_count, err;
+ struct fbnic_napi_vector *nv;
+ struct fbnic_q_triad *qt;
+ u32 __iomem *db;
+
+ /* We need to reserve at least one Tx Queue Triad for an XDP ring */
+ if (rxq_count) {
+ xdp_count = 1;
+ if (!txt_count)
+ txt_count = 1;
+ }
+
+ qt_count = txt_count + rxq_count;
+ if (!qt_count)
+ return -EINVAL;
+
+ /* If MMIO has already failed there are no rings to initialize */
+ if (!uc_addr)
+ return -EIO;
+
+ /* Allocate NAPI vector and queue triads */
+ nv = kzalloc(struct_size(nv, qt, qt_count), GFP_KERNEL);
+ if (!nv)
+ return -ENOMEM;
+
+ /* Record queue triad counts */
+ nv->txt_count = txt_count;
+ nv->rxt_count = rxt_count;
+
+ /* Provide pointer back to fbnic and MSI-X vectors */
+ nv->fbd = fbd;
+ nv->v_idx = v_idx;
+
+ /* Tie napi to netdev */
+ fbn->napi[fbnic_napi_idx(nv)] = nv;
+ netif_napi_add_config_locked(fbn->netdev, &nv->napi, fbnic_poll,
+ fbnic_napi_idx(nv));
+
+ /* Record IRQ to NAPI struct */
+ netif_napi_set_irq_locked(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev),
+ nv->v_idx));
+
+ /* Tie nv back to PCIe dev */
+ nv->dev = fbd->dev;
+
+ /* Request the IRQ for napi vector */
+ err = fbnic_napi_request_irq(fbd, nv);
+ if (err)
+ goto napi_del;
+
+ /* Initialize queue triads */
+ qt = nv->qt;
+
+ while (txt_count) {
+ u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
+
+ /* Configure Tx queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
+
+ /* Assign Tx queue to netdev if applicable */
+ if (txq_count > 0) {
+
+ fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
+ fbn->tx[txq_idx] = &qt->sub0;
+ txq_count--;
+ } else {
+ fbnic_ring_init(&qt->sub0, db, 0,
+ FBNIC_RING_F_DISABLED);
+ }
+
+ /* Configure XDP queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ1_TAIL];
+
+ /* Assign XDP queue to netdev if applicable
+ *
+ * The setup for this is in itself a bit different.
+ * 1. We only need one XDP Tx queue per NAPI vector.
+ * 2. We associate it to the first Rx queue index.
+ * 3. The hardware side is associated based on the Tx Queue.
+ * 4. The netdev queue is offset by FBNIC_MAX_TXQs.
+ */
+ if (xdp_count > 0) {
+ unsigned int xdp_idx = FBNIC_MAX_TXQS + rxq_idx;
+
+ fbnic_ring_init(&qt->sub1, db, xdp_idx, flags);
+ fbn->tx[xdp_idx] = &qt->sub1;
+ xdp_count--;
+ } else {
+ fbnic_ring_init(&qt->sub1, db, 0,
+ FBNIC_RING_F_DISABLED);
+ }
+
+ /* Configure Tx completion queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
+ fbnic_ring_init(&qt->cmpl, db, 0, 0);
+
+ /* Update Tx queue index */
+ txt_count--;
+ txq_idx += v_count;
+
+ /* Move to next queue triad */
+ qt++;
+ }
+
+ while (rxt_count) {
+ /* Configure header queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
+ fbnic_ring_init(&qt->sub0, db, 0,
+ FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
+
+ /* Configure payload queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
+ fbnic_ring_init(&qt->sub1, db, 0,
+ FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
+
+ /* Configure Rx completion queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
+ fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS);
+ fbn->rx[rxq_idx] = &qt->cmpl;
+
+ /* Update Rx queue index */
+ rxt_count--;
+ rxq_idx += v_count;
+
+ /* Move to next queue triad */
+ qt++;
+ }
+
+ return 0;
+
+napi_del:
+ netif_napi_del_locked(&nv->napi);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
+ kfree(nv);
+ return err;
+}
+
+int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
+{
+ unsigned int txq_idx = 0, rxq_idx = 0, v_idx = FBNIC_NON_NAPI_VECTORS;
+ unsigned int num_tx = fbn->num_tx_queues;
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int num_napi = fbn->num_napi;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ /* Allocate 1 Tx queue per napi vector */
+ if (num_napi < FBNIC_MAX_TXQS && num_napi == num_tx + num_rx) {
+ while (num_tx) {
+ err = fbnic_alloc_napi_vector(fbd, fbn,
+ num_napi, v_idx,
+ 1, txq_idx, 0, 0);
+ if (err)
+ goto free_vectors;
+
+ /* Update counts and index */
+ num_tx--;
+ txq_idx++;
+
+ v_idx++;
+ }
+ }
+
+ /* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */
+ while (num_rx | num_tx) {
+ int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx);
+ int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx);
+
+ err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx,
+ tqpv, txq_idx, rqpv, rxq_idx);
+ if (err)
+ goto free_vectors;
+
+ /* Update counts and index */
+ num_tx -= tqpv;
+ txq_idx++;
+
+ num_rx -= rqpv;
+ rxq_idx++;
+
+ v_idx++;
+ }
+
+ return 0;
+
+free_vectors:
+ fbnic_free_napi_vectors(fbn);
+
+ return err;
+}
+
+static void fbnic_free_ring_resources(struct device *dev,
+ struct fbnic_ring *ring)
+{
+ kvfree(ring->buffer);
+ ring->buffer = NULL;
+
+ /* If size is not set there are no descriptors present */
+ if (!ring->size)
+ return;
+
+ dma_free_coherent(dev, ring->size, ring->desc, ring->dma);
+ ring->size_mask = 0;
+ ring->size = 0;
+}
+
+static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ size_t size;
+
+ /* Round size up to nearest 4K */
+ size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096);
+
+ txr->desc = dma_alloc_coherent(dev, size, &txr->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!txr->desc)
+ return -ENOMEM;
+
+ /* txq_size should be a power of 2, so mask is just that -1 */
+ txr->size_mask = fbn->txq_size - 1;
+ txr->size = size;
+
+ return 0;
+}
+
+static int fbnic_alloc_tx_ring_buffer(struct fbnic_ring *txr)
+{
+ size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1);
+
+ txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ return txr->tx_buf ? 0 : -ENOMEM;
+}
+
+static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ if (txr->flags & FBNIC_RING_F_DISABLED)
+ return 0;
+
+ err = fbnic_alloc_tx_ring_desc(fbn, txr);
+ if (err)
+ return err;
+
+ if (!(txr->flags & FBNIC_RING_F_CTX))
+ return 0;
+
+ err = fbnic_alloc_tx_ring_buffer(txr);
+ if (err)
+ goto free_desc;
+
+ return 0;
+
+free_desc:
+ fbnic_free_ring_resources(dev, txr);
+ return err;
+}
+
+static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ size_t desc_size = sizeof(*rxr->desc);
+ u32 rxq_size;
+ size_t size;
+
+ switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) {
+ case FBNIC_QUEUE_BDQ_HPQ_TAIL:
+ rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT;
+ desc_size *= FBNIC_BD_FRAG_COUNT;
+ break;
+ case FBNIC_QUEUE_BDQ_PPQ_TAIL:
+ rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT;
+ desc_size *= FBNIC_BD_FRAG_COUNT;
+ break;
+ case FBNIC_QUEUE_RCQ_HEAD:
+ rxq_size = fbn->rcq_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Round size up to nearest 4K */
+ size = ALIGN(array_size(desc_size, rxq_size), 4096);
+
+ rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rxr->desc)
+ return -ENOMEM;
+
+ /* rxq_size should be a power of 2, so mask is just that -1 */
+ rxr->size_mask = rxq_size - 1;
+ rxr->size = size;
+
+ return 0;
+}
+
+static int fbnic_alloc_rx_ring_buffer(struct fbnic_ring *rxr)
+{
+ size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1);
+
+ if (rxr->flags & FBNIC_RING_F_CTX)
+ size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1);
+ else
+ size = sizeof(*rxr->pkt);
+
+ rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ return rxr->rx_buf ? 0 : -ENOMEM;
+}
+
+static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_rx_ring_desc(fbn, rxr);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_rx_ring_buffer(rxr);
+ if (err)
+ goto free_desc;
+
+ return 0;
+
+free_desc:
+ fbnic_free_ring_resources(dev, rxr);
+ return err;
+}
+
+static void fbnic_free_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+
+ fbnic_free_ring_resources(dev, &qt->cmpl);
+ fbnic_free_ring_resources(dev, &qt->sub1);
+ fbnic_free_ring_resources(dev, &qt->sub0);
+
+ if (xdp_rxq_info_is_reg(&qt->xdp_rxq)) {
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+ fbnic_free_qt_page_pools(qt);
+ }
+}
+
+static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub1);
+ if (err)
+ goto free_sub0;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
+ if (err)
+ goto free_sub1;
+
+ return 0;
+
+free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub1);
+free_sub0:
+ fbnic_free_ring_resources(dev, &qt->sub0);
+ return err;
+}
+
+static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_qt_page_pools(fbn, qt, qt->cmpl.q_idx);
+ if (err)
+ return err;
+
+ err = xdp_rxq_info_reg(&qt->xdp_rxq, fbn->netdev, qt->sub0.q_idx,
+ nv->napi.napi_id);
+ if (err)
+ goto free_page_pools;
+
+ err = xdp_rxq_info_reg_mem_model(&qt->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ qt->sub0.page_pool);
+ if (err)
+ goto unreg_rxq;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ goto unreg_mm;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
+ if (err)
+ goto free_sub0;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl);
+ if (err)
+ goto free_sub1;
+
+ return 0;
+
+free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub1);
+free_sub0:
+ fbnic_free_ring_resources(dev, &qt->sub0);
+unreg_mm:
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+unreg_rxq:
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+free_page_pools:
+ fbnic_free_qt_page_pools(qt);
+ return err;
+}
+
+static void fbnic_free_nv_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i;
+
+ for (i = 0; i < nv->txt_count + nv->rxt_count; i++)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+}
+
+static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j, err;
+
+ /* Allocate Tx Resources */
+ for (i = 0; i < nv->txt_count; i++) {
+ err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
+ if (err)
+ goto free_qt_resources;
+ }
+
+ /* Allocate Rx Resources */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ err = fbnic_alloc_rx_qt_resources(fbn, nv, &nv->qt[i]);
+ if (err)
+ goto free_qt_resources;
+ }
+
+ return 0;
+
+free_qt_resources:
+ while (i--)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+ return err;
+}
+
+void fbnic_free_resources(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
+}
+
+int fbnic_alloc_resources(struct fbnic_net *fbn)
+{
+ int i, err = -ENODEV;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]);
+ if (err)
+ goto free_resources;
+ }
+
+ return 0;
+
+free_resources:
+ while (i--)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
+
+ return err;
+}
+
+static void fbnic_set_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Associate Tx queue with NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Associate Rx queue with NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ }
+}
+
+static void fbnic_reset_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Disassociate Tx queue from NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Disassociate Rx queue from NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+}
+
+int fbnic_set_netif_queues(struct fbnic_net *fbn)
+{
+ int i, err;
+
+ err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues,
+ fbn->num_rx_queues);
+ if (err)
+ return err;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_set_netif_napi(fbn->napi[i]);
+
+ return 0;
+}
+
+void fbnic_reset_netif_queues(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_reset_netif_napi(fbn->napi[i]);
+}
+
+static void fbnic_disable_twq0(struct fbnic_ring *txr)
+{
+ u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
+
+ twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
+}
+
+static void fbnic_disable_twq1(struct fbnic_ring *txr)
+{
+ u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ1_CTL);
+
+ twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ1_CTL, twq_ctl);
+}
+
+static void fbnic_disable_tcq(struct fbnic_ring *txr)
+{
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TIM_MASK, FBNIC_QUEUE_TIM_MASK_MASK);
+}
+
+static void fbnic_disable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
+{
+ u32 bdq_ctl = fbnic_ring_rd32(hpq, FBNIC_QUEUE_BDQ_CTL);
+
+ bdq_ctl &= ~FBNIC_QUEUE_BDQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
+}
+
+static void fbnic_disable_rcq(struct fbnic_ring *rxr)
+{
+ fbnic_ring_wr32(rxr, FBNIC_QUEUE_RCQ_CTL, 0);
+ fbnic_ring_wr32(rxr, FBNIC_QUEUE_RIM_MASK, FBNIC_QUEUE_RIM_MASK_MASK);
+}
+
+void fbnic_napi_disable(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ napi_disable_locked(&fbn->napi[i]->napi);
+
+ fbnic_nv_irq_disable(fbn->napi[i]);
+ }
+}
+
+static void __fbnic_nv_disable(struct fbnic_napi_vector *nv)
+{
+ int i, t;
+
+ /* Disable Tx queue triads */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ fbnic_disable_twq0(&qt->sub0);
+ fbnic_disable_twq1(&qt->sub1);
+ fbnic_disable_tcq(&qt->cmpl);
+ }
+
+ /* Disable Rx queue triads */
+ for (i = 0; i < nv->rxt_count; i++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ fbnic_disable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_disable_rcq(&qt->cmpl);
+ }
+}
+
+static void
+fbnic_nv_disable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
+{
+ __fbnic_nv_disable(nv);
+ fbnic_wrfl(fbn->fbd);
+}
+
+void fbnic_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ __fbnic_nv_disable(fbn->napi[i]);
+
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_tx_flush(struct fbnic_dev *fbd)
+{
+ netdev_warn(fbd->netdev, "triggering Tx flush\n");
+
+ fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN,
+ FBNIC_TMI_DROP_CTRL_EN);
+}
+
+static void fbnic_tx_flush_off(struct fbnic_dev *fbd)
+{
+ fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, 0);
+}
+
+struct fbnic_idle_regs {
+ u32 reg_base;
+ u8 reg_cnt;
+};
+
+static bool fbnic_all_idle(struct fbnic_dev *fbd,
+ const struct fbnic_idle_regs *regs,
+ unsigned int nregs)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < nregs; i++) {
+ for (j = 0; j < regs[i].reg_cnt; j++) {
+ if (fbnic_rd32(fbd, regs[i].reg_base + j) != ~0U)
+ return false;
+ }
+ }
+ return true;
+}
+
+static void fbnic_idle_dump(struct fbnic_dev *fbd,
+ const struct fbnic_idle_regs *regs,
+ unsigned int nregs, const char *dir, int err)
+{
+ unsigned int i, j;
+
+ netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err);
+ for (i = 0; i < nregs; i++)
+ for (j = 0; j < regs[i].reg_cnt; j++)
+ netdev_err(fbd->netdev, "0x%04x: %08x\n",
+ regs[i].reg_base + j,
+ fbnic_rd32(fbd, regs[i].reg_base + j));
+}
+
+int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
+{
+ static const struct fbnic_idle_regs tx[] = {
+ { FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TWQ_IDLE_CNT, },
+ { FBNIC_QM_TQS_IDLE(0), FBNIC_QM_TQS_IDLE_CNT, },
+ { FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TDE_IDLE_CNT, },
+ { FBNIC_QM_TCQ_IDLE(0), FBNIC_QM_TCQ_IDLE_CNT, },
+ }, rx[] = {
+ { FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_HPQ_IDLE_CNT, },
+ { FBNIC_QM_PPQ_IDLE(0), FBNIC_QM_PPQ_IDLE_CNT, },
+ { FBNIC_QM_RCQ_IDLE(0), FBNIC_QM_RCQ_IDLE_CNT, },
+ };
+ bool idle;
+ int err;
+
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
+ false, fbd, tx, ARRAY_SIZE(tx));
+ if (err == -ETIMEDOUT) {
+ fbnic_tx_flush(fbd);
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle,
+ 2, 500000, false,
+ fbd, tx, ARRAY_SIZE(tx));
+ fbnic_tx_flush_off(fbd);
+ }
+ if (err) {
+ fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err);
+ if (may_fail)
+ return err;
+ }
+
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
+ false, fbd, rx, ARRAY_SIZE(rx));
+ if (err)
+ fbnic_idle_dump(fbd, rx, ARRAY_SIZE(rx), "Rx", err);
+ return err;
+}
+
+static int
+fbnic_wait_queue_idle(struct fbnic_net *fbn, bool rx, unsigned int idx)
+{
+ static const unsigned int tx_regs[] = {
+ FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TQS_IDLE(0),
+ FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TCQ_IDLE(0),
+ }, rx_regs[] = {
+ FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_PPQ_IDLE(0),
+ FBNIC_QM_RCQ_IDLE(0),
+ };
+ struct fbnic_dev *fbd = fbn->fbd;
+ unsigned int val, mask, off;
+ const unsigned int *regs;
+ unsigned int reg_cnt;
+ int i, err;
+
+ regs = rx ? rx_regs : tx_regs;
+ reg_cnt = rx ? ARRAY_SIZE(rx_regs) : ARRAY_SIZE(tx_regs);
+
+ off = idx / 32;
+ mask = BIT(idx % 32);
+
+ for (i = 0; i < reg_cnt; i++) {
+ err = read_poll_timeout_atomic(fbnic_rd32, val, val & mask,
+ 2, 500000, false,
+ fbd, regs[i] + off);
+ if (err) {
+ netdev_err(fbd->netdev,
+ "wait for queue %s%d idle failed 0x%04x(%d): %08x (mask: %08x)\n",
+ rx ? "Rx" : "Tx", idx, regs[i] + off, i,
+ val, mask);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void fbnic_nv_flush(struct fbnic_napi_vector *nv)
+{
+ int j, t;
+
+ /* Flush any processed Tx Queue Triads and drop the rest */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+ struct netdev_queue *tx_queue;
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+ fbnic_clean_twq1(nv, false, &qt->sub1, true,
+ qt->sub1.tail);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ /* Nothing else to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
+
+ /* Reset BQL associated with Tx queue */
+ tx_queue = netdev_get_tx_queue(nv->napi.dev,
+ qt->sub0.q_idx);
+ netdev_tx_reset_queue(tx_queue);
+ }
+
+ /* Flush any processed Rx Queue Triads and drop the rest */
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_bdq(&qt->sub0, qt->sub0.tail, 0);
+ fbnic_clean_bdq(&qt->sub1, qt->sub1.tail, 0);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ fbnic_put_pkt_buff(qt, qt->cmpl.pkt, 0);
+ memset(qt->cmpl.pkt, 0, sizeof(struct fbnic_pkt_buff));
+ }
+}
+
+void fbnic_flush(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_nv_flush(fbn->napi[i]);
+}
+
+static void fbnic_nv_fill(struct fbnic_napi_vector *nv)
+{
+ int j, t;
+
+ /* Configure NAPI mapping and populate pages
+ * in the BDQ rings to use for Rx
+ */
+ for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ /* Populate the header and payload BDQs */
+ fbnic_fill_bdq(&qt->sub0);
+ fbnic_fill_bdq(&qt->sub1);
+ }
+}
+
+void fbnic_fill(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_nv_fill(fbn->napi[i]);
+}
+
+static void fbnic_enable_twq0(struct fbnic_ring *twq)
+{
+ u32 log_size = fls(twq->size_mask);
+
+ if (!twq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
+ twq->tail = 0;
+ twq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma));
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_SIZE, log_size & 0xf);
+
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_twq1(struct fbnic_ring *twq)
+{
+ u32 log_size = fls(twq->size_mask);
+
+ if (!twq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
+ twq->tail = 0;
+ twq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_BAL, lower_32_bits(twq->dma));
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_BAH, upper_32_bits(twq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_SIZE, log_size & 0xf);
+
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *tcq)
+{
+ u32 log_size = fls(tcq->size_mask);
+
+ if (!tcq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_RESET);
+ tcq->tail = 0;
+ tcq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma));
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_SIZE, log_size & 0xf);
+
+ /* Store interrupt information for the completion queue */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx);
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2);
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_MASK, 0);
+
+ /* Enable queue */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
+{
+ u32 bdq_ctl = FBNIC_QUEUE_BDQ_CTL_ENABLE;
+ u32 log_size;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, FBNIC_QUEUE_BDQ_CTL_RESET);
+ ppq->tail = 0;
+ ppq->head = 0;
+ hpq->tail = 0;
+ hpq->head = 0;
+
+ log_size = fls(hpq->size_mask);
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma));
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_SIZE, log_size & 0xf);
+
+ if (!ppq->size_mask)
+ goto write_ctl;
+
+ log_size = fls(ppq->size_mask);
+
+ /* Add enabling of PPQ to BDQ control */
+ bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma));
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma));
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_SIZE, log_size & 0xf);
+
+write_ctl:
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
+}
+
+static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *rcq, bool tx_pause)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ u32 drop_mode, rcq_ctl;
+
+ if (!tx_pause && fbn->num_rx_queues > 1)
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+ else
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_NEVER;
+
+ /* Specify packet layout */
+ rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM);
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
+}
+
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause)
+{
+ int i, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ for (t = 0; t < nv->rxt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[nv->txt_count + t];
+
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl, tx_pause);
+ }
+ }
+}
+
+static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc)
+{
+ u32 threshold;
+
+ /* Set the threhsold to half the ring size if rx_frames
+ * is not configured
+ */
+ threshold = rx_desc ? : rcq->size_mask / 2;
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, threshold);
+}
+
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val = arm;
+
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) |
+ FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN;
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) |
+ FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ int i;
+
+ for (i = nv->txt_count; i < nv->rxt_count + nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_config_rim_threshold(&qt->cmpl, nv->v_idx,
+ fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
+ }
+}
+
+static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *rcq)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ u32 log_size = fls(rcq->size_mask);
+ u32 hds_thresh = fbn->hds_thresh;
+ u32 rcq_ctl = 0;
+
+ fbnic_config_drop_mode_rcq(nv, rcq, fbn->tx_pause);
+
+ /* Force lower bound on MAX_HEADER_BYTES. Below this, all frames should
+ * be split at L4. It would also result in the frames being split at
+ * L2/L3 depending on the frame size.
+ */
+ if (fbn->hds_thresh < FBNIC_HDR_BYTES_MIN) {
+ rcq_ctl = FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT;
+ hds_thresh = FBNIC_HDR_BYTES_MIN;
+ }
+
+ rcq_ctl |= FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK, hds_thresh) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
+ FBNIC_RX_PAYLD_OFFSET) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
+ FBNIC_RX_PAYLD_PG_CL);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL1, rcq_ctl);
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_RESET);
+ rcq->head = 0;
+ rcq->tail = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma));
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
+
+ /* Store interrupt information for the completion queue */
+ fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
+
+ /* Enable queue */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
+}
+
+static void __fbnic_nv_enable(struct fbnic_napi_vector *nv)
+{
+ int j, t;
+
+ /* Setup Tx Queue Triads */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ fbnic_enable_twq0(&qt->sub0);
+ fbnic_enable_twq1(&qt->sub1);
+ fbnic_enable_tcq(nv, &qt->cmpl);
+ }
+
+ /* Setup Rx Queue Triads */
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ page_pool_enable_direct_recycling(qt->sub0.page_pool,
+ &nv->napi);
+ page_pool_enable_direct_recycling(qt->sub1.page_pool,
+ &nv->napi);
+
+ fbnic_enable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_enable_rcq(nv, &qt->cmpl);
+ }
+}
+
+static void fbnic_nv_enable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
+{
+ __fbnic_nv_enable(nv);
+ fbnic_wrfl(fbn->fbd);
+}
+
+void fbnic_enable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ __fbnic_nv_enable(fbn->napi[i]);
+
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
+{
+ fbnic_config_txrx_usecs(nv, FBNIC_INTR_CQ_REARM_INTR_UNMASK);
+}
+
+void fbnic_napi_enable(struct fbnic_net *fbn)
+{
+ u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ napi_enable_locked(&nv->napi);
+
+ fbnic_nv_irq_enable(nv);
+
+ /* Record bit used for NAPI IRQs so we can
+ * set the mask appropriately
+ */
+ irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
+ }
+
+ /* Force the first interrupt on the device to guarantee
+ * that any packets that may have been enqueued during the
+ * bringup are processed.
+ */
+ for (i = 0; i < ARRAY_SIZE(irqs); i++) {
+ if (!irqs[i])
+ continue;
+ fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+void fbnic_napi_depletion_check(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ /* Find RQs which are completely out of pages */
+ for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) {
+ /* Assume 4 pages is always enough to fit a packet
+ * and therefore generate a completion and an IRQ.
+ */
+ if (fbnic_desc_used(&nv->qt[t].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[t].sub1) < 4)
+ irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(irqs); i++) {
+ if (!irqs[i])
+ continue;
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(i), irqs[i]);
+ fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+static int fbnic_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_q_triad *real;
+ struct fbnic_q_triad *qt = qmem;
+ struct fbnic_napi_vector *nv;
+
+ if (!netif_running(dev))
+ return fbnic_alloc_qt_page_pools(fbn, qt, idx);
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ fbnic_ring_init(&qt->sub0, real->sub0.doorbell, real->sub0.q_idx,
+ real->sub0.flags);
+ fbnic_ring_init(&qt->sub1, real->sub1.doorbell, real->sub1.q_idx,
+ real->sub1.flags);
+ fbnic_ring_init(&qt->cmpl, real->cmpl.doorbell, real->cmpl.q_idx,
+ real->cmpl.flags);
+
+ return fbnic_alloc_rx_qt_resources(fbn, nv, qt);
+}
+
+static void fbnic_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_q_triad *qt = qmem;
+
+ if (!netif_running(dev))
+ fbnic_free_qt_page_pools(qt);
+ else
+ fbnic_free_qt_resources(fbn, qt);
+}
+
+static void __fbnic_nv_restart(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ fbnic_nv_enable(fbn, nv);
+ fbnic_nv_fill(nv);
+
+ napi_enable_locked(&nv->napi);
+ fbnic_nv_irq_enable(nv);
+ fbnic_wr32(fbd, FBNIC_INTR_SET(nv->v_idx / 32), BIT(nv->v_idx % 32));
+ fbnic_wrfl(fbd);
+
+ for (i = 0; i < nv->txt_count; i++)
+ netif_wake_subqueue(fbn->netdev, nv->qt[i].sub0.q_idx);
+}
+
+static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_napi_vector *nv;
+ struct fbnic_q_triad *real;
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ fbnic_aggregate_ring_bdq_counters(fbn, &real->sub0);
+ fbnic_aggregate_ring_bdq_counters(fbn, &real->sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &real->cmpl);
+
+ memcpy(real, qmem, sizeof(*real));
+
+ __fbnic_nv_restart(fbn, nv);
+
+ return 0;
+}
+
+static int fbnic_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_q_triad *real;
+ struct fbnic_napi_vector *nv;
+ int i, t;
+ int err;
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ napi_disable_locked(&nv->napi);
+ fbnic_nv_irq_disable(nv);
+
+ for (i = 0; i < nv->txt_count; i++)
+ netif_stop_subqueue(dev, nv->qt[i].sub0.q_idx);
+ fbnic_nv_disable(fbn, nv);
+
+ for (t = 0; t < nv->txt_count + nv->rxt_count; t++) {
+ err = fbnic_wait_queue_idle(fbn, t >= nv->txt_count,
+ nv->qt[t].sub0.q_idx);
+ if (err)
+ goto err_restart;
+ }
+
+ fbnic_synchronize_irq(fbn->fbd, nv->v_idx);
+ fbnic_nv_flush(nv);
+
+ page_pool_disable_direct_recycling(real->sub0.page_pool);
+ page_pool_disable_direct_recycling(real->sub1.page_pool);
+
+ memcpy(qmem, real, sizeof(*real));
+
+ return 0;
+
+err_restart:
+ __fbnic_nv_restart(fbn, nv);
+ return err;
+}
+
+const struct netdev_queue_mgmt_ops fbnic_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct fbnic_q_triad),
+ .ndo_queue_mem_alloc = fbnic_queue_mem_alloc,
+ .ndo_queue_mem_free = fbnic_queue_mem_free,
+ .ndo_queue_start = fbnic_queue_start,
+ .ndo_queue_stop = fbnic_queue_stop,
+};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
new file mode 100644
index 000000000000..27776e844e29
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_TXRX_H_
+#define _FBNIC_TXRX_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+#include <net/xdp.h>
+
+struct fbnic_net;
+
+/* Guarantee we have space needed for storing the buffer
+ * To store the buffer we need:
+ * 1 descriptor per page
+ * + 1 descriptor for skb head
+ * + 2 descriptors for metadata and optional metadata
+ * + 7 descriptors to keep tail out of the same cacheline as head
+ * If we cannot guarantee that then we should return TX_BUSY
+ */
+#define FBNIC_MAX_SKB_DESC (MAX_SKB_FRAGS + 10)
+#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
+#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+
+/* To receive the worst case packet we need:
+ * 1 descriptor for primary metadata
+ * + 1 descriptor for optional metadata
+ * + 1 descriptor for headers
+ * + 4 descriptors for payload
+ */
+#define FBNIC_MAX_RX_PKT_DESC 7
+#define FBNIC_RX_DESC_MIN roundup_pow_of_two(FBNIC_MAX_RX_PKT_DESC * 2)
+
+#define FBNIC_MAX_TXQS 128u
+#define FBNIC_MAX_RXQS 128u
+#define FBNIC_MAX_XDPQS 128u
+
+/* These apply to TWQs, TCQ, RCQ */
+#define FBNIC_QUEUE_SIZE_MIN 16u
+#define FBNIC_QUEUE_SIZE_MAX SZ_64K
+
+#define FBNIC_TXQ_SIZE_DEFAULT 1024
+#define FBNIC_HPQ_SIZE_DEFAULT 256
+#define FBNIC_PPQ_SIZE_DEFAULT 256
+#define FBNIC_RCQ_SIZE_DEFAULT 1024
+#define FBNIC_TX_USECS_DEFAULT 35
+#define FBNIC_RX_USECS_DEFAULT 30
+#define FBNIC_RX_FRAMES_DEFAULT 0
+
+#define FBNIC_RX_TROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define FBNIC_RX_HROOM_PAD 128
+#define FBNIC_RX_HROOM \
+ (ALIGN(FBNIC_RX_TROOM + FBNIC_RX_HROOM_PAD, 128) - FBNIC_RX_TROOM)
+#define FBNIC_RX_PAD 0
+#define FBNIC_RX_PAYLD_OFFSET 0
+#define FBNIC_RX_PAYLD_PG_CL 0
+
+#define FBNIC_RING_F_DISABLED BIT(0)
+#define FBNIC_RING_F_CTX BIT(1)
+#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */
+
+#define FBNIC_HDS_THRESH_MAX \
+ (4096 - FBNIC_RX_HROOM - FBNIC_RX_TROOM - FBNIC_RX_PAD)
+#define FBNIC_HDS_THRESH_DEFAULT \
+ (1536 - FBNIC_RX_PAD)
+#define FBNIC_HDR_BYTES_MIN 128
+
+struct fbnic_pkt_buff {
+ struct xdp_buff buff;
+ ktime_t hwtstamp;
+ bool add_frag_failed;
+};
+
+struct fbnic_queue_stats {
+ u64 packets;
+ u64 bytes;
+ union {
+ struct {
+ u64 csum_partial;
+ u64 lso;
+ u64 ts_packets;
+ u64 ts_lost;
+ u64 stop;
+ u64 wake;
+ } twq;
+ struct {
+ u64 alloc_failed;
+ u64 csum_complete;
+ u64 csum_none;
+ u64 length_errors;
+ } rx;
+ struct {
+ u64 alloc_failed;
+ } bdq;
+ };
+ u64 dropped;
+ struct u64_stats_sync syncp;
+};
+
+#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
+
+struct fbnic_rx_buf {
+ netmem_ref netmem;
+ long pagecnt_bias;
+};
+
+struct fbnic_ring {
+ /* Pointer to buffer specific info */
+ union {
+ struct fbnic_pkt_buff *pkt; /* RCQ */
+ struct fbnic_rx_buf *rx_buf; /* BDQ */
+ void **tx_buf; /* TWQ */
+ void *buffer; /* Generic pointer */
+ };
+
+ u32 __iomem *doorbell; /* Pointer to CSR space for ring */
+ __le64 *desc; /* Descriptor ring memory */
+ u16 size_mask; /* Size of ring in descriptors - 1 */
+ u8 q_idx; /* Logical netdev ring index */
+ u8 flags; /* Ring flags (FBNIC_RING_F_*) */
+
+ u32 head, tail; /* Head/Tail of ring */
+
+ union {
+ /* Rx BDQs only */
+ struct page_pool *page_pool;
+
+ /* Deferred_head is used to cache the head for TWQ1 if
+ * an attempt is made to clean TWQ1 with zero napi_budget.
+ * We do not use it for any other ring.
+ */
+ s32 deferred_head;
+ };
+
+ struct fbnic_queue_stats stats;
+
+ /* Slow path fields follow */
+ dma_addr_t dma; /* Phys addr of descriptor memory */
+ size_t size; /* Size of descriptor ring in memory */
+};
+
+struct fbnic_q_triad {
+ struct fbnic_ring sub0, sub1, cmpl;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+struct fbnic_napi_vector {
+ struct napi_struct napi;
+ struct device *dev; /* Device for DMA unmapping */
+ struct fbnic_dev *fbd;
+
+ u16 v_idx;
+ u8 txt_count;
+ u8 rxt_count;
+
+ struct fbnic_q_triad qt[];
+};
+
+extern const struct netdev_queue_mgmt_ops fbnic_queue_mgmt_ops;
+
+netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
+netdev_features_t
+fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features);
+
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr);
+void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr);
+
+int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
+void fbnic_free_napi_vectors(struct fbnic_net *fbn);
+int fbnic_alloc_resources(struct fbnic_net *fbn);
+void fbnic_free_resources(struct fbnic_net *fbn);
+int fbnic_set_netif_queues(struct fbnic_net *fbn);
+void fbnic_reset_netif_queues(struct fbnic_net *fbn);
+irqreturn_t fbnic_msix_clean_rings(int irq, void *data);
+void fbnic_napi_enable(struct fbnic_net *fbn);
+void fbnic_napi_disable(struct fbnic_net *fbn);
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause);
+void fbnic_enable(struct fbnic_net *fbn);
+void fbnic_disable(struct fbnic_net *fbn);
+void fbnic_flush(struct fbnic_net *fbn);
+void fbnic_fill(struct fbnic_net *fbn);
+
+void fbnic_napi_depletion_check(struct net_device *netdev);
+int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+
+static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv)
+{
+ return nv->v_idx - FBNIC_NON_NAPI_VECTORS;
+}
+
+#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index ddd87ef71caf..541c41a9077a 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -335,7 +335,7 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
/* When running in DMA Mode the RX interrupt is not enabled in
timberdale because RX data is received by DMA callbacks
it must still be enabled in the KS8842 because it indicates
- to timberdale when there is RX data for it's DMA FIFOs */
+ to timberdale when there is RX data for its DMA FIFOs */
iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
} else {
@@ -1247,7 +1247,7 @@ static struct platform_driver ks8842_platform_driver = {
.name = DRV_NAME,
},
.probe = ks8842_probe,
- .remove_new = ks8842_remove,
+ .remove = ks8842_remove,
};
module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index d4cdf3d4f552..bb5138806c3f 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -216,30 +216,15 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
}
/**
- * ks8851_dbg_dumpkkt - dump initial packet contents to debug
- * @ks: The device state
- * @rxpkt: The data for the received packet
- *
- * Dump the initial data from the packet to dev_dbg().
- */
-static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
-{
- netdev_dbg(ks->netdev,
- "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
- rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
- rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
- rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
-}
-
-/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
+ * @rxq: Queue of packets received in this function.
*
* This is called from the IRQ work queue when the system detects that there
* are packets in the receive queue. Find out how many packets there are and
* read them from the FIFO.
*/
-static void ks8851_rx_pkts(struct ks8851_net *ks)
+static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq)
{
struct sk_buff *skb;
unsigned rxfc;
@@ -295,11 +280,11 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks->rdfifo(ks, rxpkt, rxalign + 8);
- if (netif_msg_pktdata(ks))
- ks8851_dbg_dumpkkt(ks, rxpkt);
+ netif_dbg(ks, pktdata, ks->netdev,
+ "pkt %12ph\n", &rxpkt[4]);
skb->protocol = eth_type_trans(skb, ks->netdev);
- __netif_rx(skb);
+ __skb_queue_tail(rxq, skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -326,65 +311,50 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
static irqreturn_t ks8851_irq(int irq, void *_ks)
{
struct ks8851_net *ks = _ks;
- unsigned handled = 0;
+ struct sk_buff_head rxq;
unsigned long flags;
unsigned int status;
-
- local_bh_disable();
+ struct sk_buff *skb;
ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
+ ks8851_wrreg16(ks, KS_ISR, status);
netif_dbg(ks, intr, ks->netdev,
"%s: status 0x%04x\n", __func__, status);
- if (status & IRQ_LCI)
- handled |= IRQ_LCI;
-
if (status & IRQ_LDI) {
u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
pmecr &= ~PMECR_WKEVT_MASK;
ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
-
- handled |= IRQ_LDI;
}
- if (status & IRQ_RXPSI)
- handled |= IRQ_RXPSI;
-
if (status & IRQ_TXI) {
unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
netif_dbg(ks, intr, ks->netdev,
"%s: txspace %d\n", __func__, tx_space);
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
ks->tx_space = tx_space;
if (netif_queue_stopped(ks->netdev))
netif_wake_queue(ks->netdev);
- spin_unlock(&ks->statelock);
-
- handled |= IRQ_TXI;
+ spin_unlock_bh(&ks->statelock);
}
- if (status & IRQ_RXI)
- handled |= IRQ_RXI;
-
if (status & IRQ_SPIBEI) {
netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
- handled |= IRQ_SPIBEI;
}
- ks8851_wrreg16(ks, KS_ISR, handled);
-
if (status & IRQ_RXI) {
/* the datasheet says to disable the rx interrupt during
* packet read-out, however we're masking the interrupt
* from the device so do not bother masking just the RX
* from the device. */
- ks8851_rx_pkts(ks);
+ __skb_queue_head_init(&rxq);
+ ks8851_rx_pkts(ks, &rxq);
}
/* if something stopped the rx process, probably due to wanting
@@ -408,7 +378,9 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
- local_bh_enable();
+ if (status & IRQ_RXI)
+ while ((skb = __skb_dequeue(&rxq)))
+ netif_rx(skb);
return IRQ_HANDLED;
}
@@ -494,6 +466,7 @@ static int ks8851_net_open(struct net_device *dev)
ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
ks->queued_len = 0;
+ ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
netif_start_queue(ks->netdev);
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
@@ -647,14 +620,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
/* schedule work to do the actual set of the data if needed */
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
schedule_work(&ks->rxctrl_work);
}
- spin_unlock(&ks->statelock);
+ spin_unlock_bh(&ks->statelock);
}
static int ks8851_set_mac_address(struct net_device *dev, void *addr)
@@ -1113,7 +1086,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int ret;
ks->netdev = netdev;
- ks->tx_space = 6144;
ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
ret = PTR_ERR_OR_ZERO(ks->gpio);
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 381b9cd285eb..78695be2570b 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -334,7 +334,7 @@ static struct platform_driver ks8851_driver = {
.pm = &ks8851_pm_ops,
},
.probe = ks8851_probe_par,
- .remove_new = ks8851_remove_par,
+ .remove = ks8851_remove_par,
};
module_platform_driver(ks8851_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 670c1de966db..c862b13b447a 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -20,8 +20,6 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "ks8851.h"
@@ -340,10 +338,10 @@ static void ks8851_tx_work(struct work_struct *work)
tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
ks->queued_len -= dequeued_len;
ks->tx_space = tx_space;
- spin_unlock(&ks->statelock);
+ spin_unlock_bh(&ks->statelock);
ks8851_unlock_spi(ks, &flags);
}
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index c5aeeb964c17..cdde19b8edc4 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3951,7 +3951,7 @@ static void ksz_stop_timer(struct ksz_timer_info *info)
{
if (info->max) {
info->max = 0;
- del_timer_sync(&info->timer);
+ timer_delete_sync(&info->timer);
}
}
@@ -5427,7 +5427,7 @@ static int netdev_change_mtu(struct net_device *dev, int new_mtu)
}
hw_mtu = (hw_mtu + 3) & ~3;
hw_priv->mtu = hw_mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -6304,7 +6304,8 @@ static void mib_read_work(struct work_struct *work)
static void mib_monitor(struct timer_list *t)
{
- struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
+ struct dev_info *hw_priv = timer_container_of(hw_priv, t,
+ mib_timer_info.timer);
mib_read_work(&hw_priv->mib_read);
@@ -6331,7 +6332,8 @@ static void mib_monitor(struct timer_list *t)
*/
static void dev_monitor(struct timer_list *t)
{
- struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
+ struct dev_priv *priv = timer_container_of(priv, t,
+ monitor_timer_info.timer);
struct net_device *dev = priv->mii_if.dev;
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 43ba71e82260..ee046468652c 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,18 +46,21 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
- select PHYLIB
select FIXED_PHY
select CRC16
select CRC32
+ select PHYLINK
help
- Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
+ Support for the Microchip LAN743x and PCI11x1x families of PCI
+ Express Ethernet devices
To compile this driver as a module, choose M here. The module will be
called lan743x.
+source "drivers/net/ethernet/microchip/lan865x/Kconfig"
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
source "drivers/net/ethernet/microchip/vcap/Kconfig"
+source "drivers/net/ethernet/microchip/fdma/Kconfig"
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index bbd349264e6f..3c65baed9fd8 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_LAN743X) += lan743x.o
lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+obj-$(CONFIG_LAN865X) += lan865x/
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
obj-$(CONFIG_VCAP) += vcap/
+obj-$(CONFIG_FDMA) += fdma/
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 443128adbcb6..26b00e66d912 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -75,7 +75,7 @@ static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val,
if (unlikely(ret))
return ret;
} else {
- /* Translate registers that are more effecient using
+ /* Translate registers that are more efficient using
* 3-byte SPI commands
*/
switch (reg) {
@@ -129,7 +129,7 @@ static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
if (unlikely(ret))
return ret;
} else {
- /* Translate registers that are more effecient using
+ /* Translate registers that are more efficient using
* 3-byte SPI commands
*/
switch (reg) {
@@ -474,13 +474,13 @@ static struct regmap_config regcfg = {
.unlock = regmap_unlock_mutex,
};
-static struct regmap_bus regmap_encx24j600 = {
+static const struct regmap_bus regmap_encx24j600 = {
.write = regmap_encx24j600_write,
.read = regmap_encx24j600_read,
.reg_update_bits = regmap_encx24j600_reg_update_bits,
};
-static struct regmap_config phycfg = {
+static const struct regmap_config phycfg = {
.name = "phy",
.reg_bits = 8,
.val_bits = 16,
@@ -492,7 +492,7 @@ static struct regmap_config phycfg = {
.volatile_reg = encx24j600_phymap_volatile,
};
-static struct regmap_bus phymap_encx24j600 = {
+static const struct regmap_bus phymap_encx24j600 = {
.reg_write = regmap_encx24j600_phy_reg_write,
.reg_read = regmap_encx24j600_phy_reg_read,
};
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index d7c8aa77ec75..b011bf5c2305 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -569,7 +569,7 @@ static void encx24j600_dump_config(struct encx24j600_priv *priv,
pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
MABBIPG));
- /* PHY configuation */
+ /* PHY configuration */
pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1));
pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2));
pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA));
@@ -837,7 +837,9 @@ static void encx24j600_hw_tx(struct encx24j600_priv *priv)
dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
if (encx24j600_read_reg(priv, EIR) & TXABTIF)
- /* Last transmition aborted due to error. Reset TX interface */
+ /* Last transmission aborted due to error.
+ * Reset TX interface
+ */
encx24j600_reset_hw_tx(priv);
/* Clear the TXIF flag if were previously set */
@@ -1112,7 +1114,6 @@ MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table);
static struct spi_driver encx24j600_spi_net_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.bus = &spi_bus_type,
},
.probe = encx24j600_spi_probe,
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index 34c5a289898c..2522f4f48b67 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -243,7 +243,7 @@ int devm_regmap_init_encx24j600(struct device *dev,
/* MAIPG */
/* value of the high byte is given by the reserved bits,
- * value of the low byte is recomended setting of the
+ * value of the low byte is recommended setting of the
* IPG parameter.
*/
#define MAIPGH_VAL 0x0C
diff --git a/drivers/net/ethernet/microchip/fdma/Kconfig b/drivers/net/ethernet/microchip/fdma/Kconfig
new file mode 100644
index 000000000000..ec228c061351
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip FDMA API configuration
+#
+
+if NET_VENDOR_MICROCHIP
+
+config FDMA
+ bool "FDMA API" if COMPILE_TEST
+ help
+ Provides the basic FDMA functionality for multiple Microchip
+ switchcores.
+
+ Say Y here if you want to build the FDMA API that provides a common
+ set of functions and data structures for interacting with the Frame
+ DMA engine in multiple microchip switchcores.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/fdma/Makefile b/drivers/net/ethernet/microchip/fdma/Makefile
new file mode 100644
index 000000000000..cc9a736be357
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Microchip FDMA
+#
+
+obj-$(CONFIG_FDMA) += fdma.o
+fdma-y += fdma_api.o
diff --git a/drivers/net/ethernet/microchip/fdma/fdma_api.c b/drivers/net/ethernet/microchip/fdma/fdma_api.c
new file mode 100644
index 000000000000..e78c3590da9e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/fdma_api.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "fdma_api.h"
+
+#include <linux/bits.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+
+/* Add a DB to a DCB, providing a callback for getting the DB dataptr. */
+static int __fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status,
+ int (*cb)(struct fdma *fdma, int dcb_idx,
+ int db_idx, u64 *dataptr))
+{
+ struct fdma_db *db = fdma_db_get(fdma, dcb_idx, db_idx);
+
+ db->status = status;
+
+ return cb(fdma, dcb_idx, db_idx, &db->dataptr);
+}
+
+/* Add a DB to a DCB, using the callback set in the fdma_ops struct. */
+int fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status)
+{
+ return __fdma_db_add(fdma,
+ dcb_idx,
+ db_idx,
+ status,
+ fdma->ops.dataptr_cb);
+}
+
+/* Add a DCB with callbacks for getting the DB dataptr and the DCB nextptr. */
+int __fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status,
+ int (*dcb_cb)(struct fdma *fdma, int dcb_idx, u64 *nextptr),
+ int (*db_cb)(struct fdma *fdma, int dcb_idx, int db_idx,
+ u64 *dataptr))
+{
+ struct fdma_dcb *dcb = fdma_dcb_get(fdma, dcb_idx);
+ int i, err;
+
+ for (i = 0; i < fdma->n_dbs; i++) {
+ err = __fdma_db_add(fdma, dcb_idx, i, status, db_cb);
+ if (unlikely(err))
+ return err;
+ }
+
+ err = dcb_cb(fdma, dcb_idx, &fdma->last_dcb->nextptr);
+ if (unlikely(err))
+ return err;
+
+ fdma->last_dcb = dcb;
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = info;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fdma_dcb_add);
+
+/* Add a DCB, using the preset callbacks in the fdma_ops struct. */
+int fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status)
+{
+ return __fdma_dcb_add(fdma,
+ dcb_idx,
+ info, status,
+ fdma->ops.nextptr_cb,
+ fdma->ops.dataptr_cb);
+}
+EXPORT_SYMBOL_GPL(fdma_dcb_add);
+
+/* Initialize the DCB's and DB's. */
+int fdma_dcbs_init(struct fdma *fdma, u64 info, u64 status)
+{
+ int i, err;
+
+ fdma->last_dcb = fdma->dcbs;
+ fdma->db_index = 0;
+ fdma->dcb_index = 0;
+
+ for (i = 0; i < fdma->n_dcbs; i++) {
+ err = fdma_dcb_add(fdma, i, info, status);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_dcbs_init);
+
+/* Allocate coherent DMA memory for FDMA. */
+int fdma_alloc_coherent(struct device *dev, struct fdma *fdma)
+{
+ fdma->dcbs = dma_alloc_coherent(dev,
+ fdma->size,
+ &fdma->dma,
+ GFP_KERNEL);
+ if (!fdma->dcbs)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_alloc_coherent);
+
+/* Allocate physical memory for FDMA. */
+int fdma_alloc_phys(struct fdma *fdma)
+{
+ fdma->dcbs = kzalloc(fdma->size, GFP_KERNEL);
+ if (!fdma->dcbs)
+ return -ENOMEM;
+
+ fdma->dma = virt_to_phys(fdma->dcbs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_alloc_phys);
+
+/* Free coherent DMA memory. */
+void fdma_free_coherent(struct device *dev, struct fdma *fdma)
+{
+ dma_free_coherent(dev, fdma->size, fdma->dcbs, fdma->dma);
+}
+EXPORT_SYMBOL_GPL(fdma_free_coherent);
+
+/* Free virtual memory. */
+void fdma_free_phys(struct fdma *fdma)
+{
+ kfree(fdma->dcbs);
+}
+EXPORT_SYMBOL_GPL(fdma_free_phys);
+
+/* Get the size of the FDMA memory */
+u32 fdma_get_size(struct fdma *fdma)
+{
+ return ALIGN(sizeof(struct fdma_dcb) * fdma->n_dcbs, PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(fdma_get_size);
+
+/* Get the size of the FDMA memory. This function is only applicable if the
+ * dataptr addresses and DCB's are in contiguous memory.
+ */
+u32 fdma_get_size_contiguous(struct fdma *fdma)
+{
+ return ALIGN(fdma->n_dcbs * sizeof(struct fdma_dcb) +
+ fdma->n_dcbs * fdma->n_dbs * fdma->db_size,
+ PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(fdma_get_size_contiguous);
diff --git a/drivers/net/ethernet/microchip/fdma/fdma_api.h b/drivers/net/ethernet/microchip/fdma/fdma_api.h
new file mode 100644
index 000000000000..d91affe8bd98
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/fdma_api.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _FDMA_API_H_
+#define _FDMA_API_H_
+
+#include <linux/bits.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+
+/* This provides a common set of functions and data structures for interacting
+ * with the Frame DMA engine on multiple Microchip switchcores.
+ *
+ * Frame DMA DCB format:
+ *
+ * +---------------------------+
+ * | Next Ptr |
+ * +---------------------------+
+ * | Reserved | Info |
+ * +---------------------------+
+ * | Data0 Ptr |
+ * +---------------------------+
+ * | Reserved | Status0 |
+ * +---------------------------+
+ * | Data1 Ptr |
+ * +---------------------------+
+ * | Reserved | Status1 |
+ * +---------------------------+
+ * | Data2 Ptr |
+ * +---------------------------+
+ * | Reserved | Status2 |
+ * |-------------|-------------|
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * |---------------------------|
+ * | Data14 Ptr |
+ * +-------------|-------------+
+ * | Reserved | Status14 |
+ * +-------------|-------------+
+ *
+ * The data pointers points to the actual frame data to be received or sent. The
+ * addresses of the data pointers can, as of writing, be either a: DMA address,
+ * physical address or mapped address.
+ *
+ */
+
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_INFO_TOKEN BIT(17)
+#define FDMA_DCB_INFO_INTR BIT(18)
+#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_DB_MAX 15 /* Max number of DB's on Sparx5 */
+
+struct fdma;
+
+struct fdma_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct fdma_dcb {
+ u64 nextptr;
+ u64 info;
+ struct fdma_db db[FDMA_DB_MAX];
+};
+
+struct fdma_ops {
+ /* User-provided callback to set the dataptr */
+ int (*dataptr_cb)(struct fdma *fdma, int dcb_idx, int db_idx, u64 *ptr);
+ /* User-provided callback to set the nextptr */
+ int (*nextptr_cb)(struct fdma *fdma, int dcb_idx, u64 *ptr);
+};
+
+struct fdma {
+ void *priv;
+
+ /* Virtual addresses */
+ struct fdma_dcb *dcbs;
+ struct fdma_dcb *last_dcb;
+
+ /* DMA address */
+ dma_addr_t dma;
+
+ /* Size of DCB + DB memory */
+ int size;
+
+ /* Indexes used to access the next-to-be-used DCB or DB */
+ int db_index;
+ int dcb_index;
+
+ /* Number of DCB's and DB's */
+ u32 n_dcbs;
+ u32 n_dbs;
+
+ /* Size of DB's */
+ u32 db_size;
+
+ /* Channel id this FDMA object operates on */
+ u32 channel_id;
+
+ struct fdma_ops ops;
+};
+
+/* Advance the DCB index and wrap if required. */
+static inline void fdma_dcb_advance(struct fdma *fdma)
+{
+ fdma->dcb_index++;
+ if (fdma->dcb_index >= fdma->n_dcbs)
+ fdma->dcb_index = 0;
+}
+
+/* Advance the DB index. */
+static inline void fdma_db_advance(struct fdma *fdma)
+{
+ fdma->db_index++;
+}
+
+/* Reset the db index to zero. */
+static inline void fdma_db_reset(struct fdma *fdma)
+{
+ fdma->db_index = 0;
+}
+
+/* Check if a DCB can be reused in case of multiple DB's per DCB. */
+static inline bool fdma_dcb_is_reusable(struct fdma *fdma)
+{
+ return fdma->db_index != fdma->n_dbs;
+}
+
+/* Check if the FDMA has marked this DB as done. */
+static inline bool fdma_db_is_done(struct fdma_db *db)
+{
+ return db->status & FDMA_DCB_STATUS_DONE;
+}
+
+/* Get the length of a DB. */
+static inline int fdma_db_len_get(struct fdma_db *db)
+{
+ return FDMA_DCB_STATUS_BLOCKL(db->status);
+}
+
+/* Set the length of a DB. */
+static inline void fdma_dcb_len_set(struct fdma_dcb *dcb, u32 len)
+{
+ dcb->info = FDMA_DCB_INFO_DATAL(len);
+}
+
+/* Get a DB by index. */
+static inline struct fdma_db *fdma_db_get(struct fdma *fdma, int dcb_idx,
+ int db_idx)
+{
+ return &fdma->dcbs[dcb_idx].db[db_idx];
+}
+
+/* Get the next DB. */
+static inline struct fdma_db *fdma_db_next_get(struct fdma *fdma)
+{
+ return fdma_db_get(fdma, fdma->dcb_index, fdma->db_index);
+}
+
+/* Get a DCB by index. */
+static inline struct fdma_dcb *fdma_dcb_get(struct fdma *fdma, int dcb_idx)
+{
+ return &fdma->dcbs[dcb_idx];
+}
+
+/* Get the next DCB. */
+static inline struct fdma_dcb *fdma_dcb_next_get(struct fdma *fdma)
+{
+ return fdma_dcb_get(fdma, fdma->dcb_index);
+}
+
+/* Check if the FDMA has frames ready for extraction. */
+static inline bool fdma_has_frames(struct fdma *fdma)
+{
+ return fdma_db_is_done(fdma_db_next_get(fdma));
+}
+
+/* Get a nextptr by index */
+static inline int fdma_nextptr_cb(struct fdma *fdma, int dcb_idx, u64 *nextptr)
+{
+ *nextptr = fdma->dma + (sizeof(struct fdma_dcb) * dcb_idx);
+ return 0;
+}
+
+/* Get the DMA address of a dataptr, by index. This function is only applicable
+ * if the dataptr addresses and DCB's are in contiguous memory and the driver
+ * supports XDP.
+ */
+static inline u64 fdma_dataptr_get_contiguous(struct fdma *fdma, int dcb_idx,
+ int db_idx)
+{
+ return fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ (dcb_idx * fdma->n_dbs + db_idx) * fdma->db_size +
+ XDP_PACKET_HEADROOM;
+}
+
+/* Get the virtual address of a dataptr, by index. This function is only
+ * applicable if the dataptr addresses and DCB's are in contiguous memory and
+ * the driver supports XDP.
+ */
+static inline void *fdma_dataptr_virt_get_contiguous(struct fdma *fdma,
+ int dcb_idx, int db_idx)
+{
+ return (u8 *)fdma->dcbs + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ (dcb_idx * fdma->n_dbs + db_idx) * fdma->db_size +
+ XDP_PACKET_HEADROOM;
+}
+
+/* Check if this DCB is the last used DCB. */
+static inline bool fdma_is_last(struct fdma *fdma, struct fdma_dcb *dcb)
+{
+ return dcb == fdma->last_dcb;
+}
+
+int fdma_dcbs_init(struct fdma *fdma, u64 info, u64 status);
+int fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status);
+int fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status);
+int __fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status,
+ int (*dcb_cb)(struct fdma *fdma, int dcb_idx, u64 *nextptr),
+ int (*db_cb)(struct fdma *fdma, int dcb_idx, int db_idx,
+ u64 *dataptr));
+
+int fdma_alloc_coherent(struct device *dev, struct fdma *fdma);
+int fdma_alloc_phys(struct fdma *fdma);
+
+void fdma_free_coherent(struct device *dev, struct fdma *fdma);
+void fdma_free_phys(struct fdma *fdma);
+
+u32 fdma_get_size(struct fdma *fdma);
+u32 fdma_get_size_contiguous(struct fdma *fdma);
+
+#endif
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 8a6ae171e375..40002d9fe274 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -18,6 +18,8 @@
#define EEPROM_MAC_OFFSET (0x01)
#define MAX_EEPROM_SIZE (512)
#define MAX_OTP_SIZE (1024)
+#define MAX_HS_OTP_SIZE (8 * 1024)
+#define MAX_HS_EEPROM_SIZE (64 * 1024)
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
@@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
struct lan743x_adapter *adapter = netdev_priv(netdev);
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
- return MAX_OTP_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE;
- return MAX_EEPROM_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE;
}
static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
@@ -899,23 +913,29 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int lan743x_ethtool_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *fields)
+{
+ fields->data = 0;
+
+ switch (fields->flow_type) {
+ case TCP_V4_FLOW:case UDP_V4_FLOW:
+ case TCP_V6_FLOW:case UDP_V6_FLOW:
+ fields->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case IPV4_FLOW: case IPV6_FLOW:
+ fields->data |= RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+ }
+
+ return 0;
+}
+
static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *rxnfc,
u32 *rule_locs)
{
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- rxnfc->data = 0;
- switch (rxnfc->flow_type) {
- case TCP_V4_FLOW:case UDP_V4_FLOW:
- case TCP_V6_FLOW:case UDP_V6_FLOW:
- rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case IPV4_FLOW: case IPV6_FLOW:
- rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
- return 0;
- }
- break;
case ETHTOOL_GRXRINGS:
rxnfc->data = LAN743X_USED_RX_CHANNELS;
return 0;
@@ -1029,21 +1049,17 @@ static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
}
static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp.ptp_clock)
ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock);
- else
- ts_info->phc_index = -1;
ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON) |
@@ -1058,82 +1074,34 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 buf;
- int ret;
-
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
-
- ret = phy_ethtool_get_eee(phydev, eee);
- if (ret < 0)
- return ret;
- buf = lan743x_csr_read(adapter, MAC_CR);
- if (buf & MAC_CR_EEE_EN_) {
- eee->eee_enabled = true;
- eee->tx_lpi_enabled = true;
- /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
- buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- eee->tx_lpi_timer = buf;
- } else {
- eee->eee_enabled = false;
- eee->eee_active = false;
- eee->tx_lpi_enabled = false;
- eee->tx_lpi_timer = 0;
- }
-
- return 0;
+ return phylink_ethtool_get_eee(adapter->phylink, eee);
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
- struct lan743x_adapter *adapter;
- struct phy_device *phydev;
- u32 buf = 0;
- int ret = 0;
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
- if (!netdev)
- return -EINVAL;
- adapter = netdev_priv(netdev);
- if (!adapter)
- return -EINVAL;
- phydev = netdev->phydev;
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
+ return phylink_ethtool_set_eee(adapter->phylink, eee);
+}
- if (eee->eee_enabled) {
- ret = phy_init_eee(phydev, false);
- if (ret) {
- netif_err(adapter, drv, adapter->netdev,
- "EEE initialization failed\n");
- return ret;
- }
+static int
+lan743x_ethtool_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
- buf = (u32)eee->tx_lpi_timer;
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+ return phylink_ethtool_ksettings_set(adapter->phylink, cmd);
+}
- buf = lan743x_csr_read(adapter, MAC_CR);
- buf |= MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, buf);
- } else {
- buf = lan743x_csr_read(adapter, MAC_CR);
- buf &= ~MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, buf);
- }
+static int
+lan743x_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
- return phy_ethtool_set_eee(phydev, eee);
+ return phylink_ethtool_ksettings_get(adapter->phylink, cmd);
}
#ifdef CONFIG_PM
@@ -1145,11 +1113,14 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- if (netdev->phydev)
- phy_ethtool_get_wol(netdev->phydev, wol);
+ phylink_ethtool_get_wol(adapter->phylink, wol);
+
+ if (wol->supported != adapter->phy_wol_supported)
+ netif_warn(adapter, drv, adapter->netdev,
+ "PHY changed its supported WOL! old=%x, new=%x\n",
+ adapter->phy_wol_supported, wol->supported);
- wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
- WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ wol->supported |= MAC_SUPPORTED_WAKES;
if (adapter->is_pci11x1x)
wol->supported |= WAKE_MAGICSECURE;
@@ -1164,7 +1135,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ /* WAKE_MAGICSEGURE is a modifier of and only valid together with
+ * WAKE_MAGIC
+ */
+ if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
+ return -EINVAL;
+
+ if (netdev->phydev) {
+ struct ethtool_wolinfo phy_wol;
+ int ret;
+
+ phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
+
+ /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
+ * for PHYs that do not support WAKE_MAGICSECURE
+ */
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
+ phy_wol.wolopts &= ~WAKE_MAGIC;
+
+ ret = phylink_ethtool_set_wol(adapter->phylink, wol);
+ if (ret && (ret != -EOPNOTSUPP))
+ return ret;
+
+ if (ret == -EOPNOTSUPP)
+ adapter->phy_wolopts = 0;
+ else
+ adapter->phy_wolopts = phy_wol.wolopts;
+ } else {
+ adapter->phy_wolopts = 0;
+ }
+
adapter->wolopts = 0;
+ wol->wolopts &= ~adapter->phy_wolopts;
if (wol->wolopts & WAKE_UCAST)
adapter->wolopts |= WAKE_UCAST;
if (wol->wolopts & WAKE_MCAST)
@@ -1185,10 +1188,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
}
+ wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
- : -ENETDOWN;
+ return 0;
}
#endif /* CONFIG_PM */
@@ -1340,44 +1343,16 @@ static void lan743x_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct lan743x_phy *phy = &adapter->phy;
- if (phy->fc_request_control & FLOW_CTRL_TX)
- pause->tx_pause = 1;
- if (phy->fc_request_control & FLOW_CTRL_RX)
- pause->rx_pause = 1;
- pause->autoneg = phy->fc_autoneg;
+ phylink_ethtool_get_pauseparam(adapter->phylink, pause);
}
static int lan743x_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- struct lan743x_phy *phy = &adapter->phy;
-
- if (!phydev)
- return -ENODEV;
-
- if (!phy_validate_pause(phydev, pause))
- return -EINVAL;
-
- phy->fc_request_control = 0;
- if (pause->rx_pause)
- phy->fc_request_control |= FLOW_CTRL_RX;
- if (pause->tx_pause)
- phy->fc_request_control |= FLOW_CTRL_TX;
-
- phy->fc_autoneg = pause->autoneg;
-
- if (pause->autoneg == AUTONEG_DISABLE)
- lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause,
- pause->rx_pause);
- else
- phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(adapter->phylink, pause);
}
const struct ethtool_ops lan743x_ethtool_ops = {
@@ -1399,11 +1374,12 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
.get_rxfh = lan743x_ethtool_get_rxfh,
.set_rxfh = lan743x_ethtool_set_rxfh,
+ .get_rxfh_fields = lan743x_ethtool_get_rxfh_fields,
.get_ts_info = lan743x_ethtool_get_ts_info,
.get_eee = lan743x_ethtool_get_eee,
.set_eee = lan743x_ethtool_set_eee,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = lan743x_ethtool_get_link_ksettings,
+ .set_link_ksettings = lan743x_ethtool_set_link_ksettings,
.get_regs_len = lan743x_get_regs_len,
.get_regs = lan743x_get_regs,
.get_pauseparam = lan743x_get_pauseparam,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 75a988c0bd79..e4c542fc6c2b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
#include <linux/crc16.h>
+#include <linux/phylink.h>
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
@@ -803,7 +804,7 @@ static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
u32 val, mii_access;
int ret;
- /* comfirm MII not busy */
+ /* confirm MII not busy */
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
@@ -868,7 +869,7 @@ static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
u32 mmd_access;
int ret;
- /* comfirm MII not busy */
+ /* confirm MII not busy */
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
@@ -992,6 +993,42 @@ static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
return ret;
}
+static int lan743x_get_lsd(int speed, int duplex, u8 mss)
+{
+ int lsd;
+
+ switch (speed) {
+ case SPEED_2500:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_2500_SLAVE;
+ else
+ lsd = LINK_2500_MASTER;
+ break;
+ case SPEED_1000:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_1000_SLAVE;
+ else
+ lsd = LINK_1000_MASTER;
+ break;
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ lsd = -EINVAL;
+ }
+
+ return lsd;
+}
+
static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
u16 baud)
{
@@ -1041,26 +1078,7 @@ static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
VR_MII_BAUD_RATE_1P25GBPS);
}
-static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
- bool *status)
-{
- int ret;
-
- ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
- VR_MII_GEN2_4_MPLL_CTRL1);
- if (ret < 0)
- return ret;
-
- if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
- ret == VR_MII_MPLL_MULTIPLIER_50)
- *status = true;
- else
- *status = false;
-
- return 0;
-}
-
-static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter)
{
enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
int mii_ctrl;
@@ -1147,68 +1165,11 @@ static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
return 0;
}
-static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
- enum lan743x_sgmii_lsd lsd = POWER_DOWN;
int mii_ctl;
- bool status;
int ret;
- switch (phydev->speed) {
- case SPEED_2500:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_2500_MASTER;
- else
- lsd = LINK_2500_SLAVE;
- break;
- case SPEED_1000:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_1000_MASTER;
- else
- lsd = LINK_1000_SLAVE;
- break;
- case SPEED_100:
- if (phydev->duplex)
- lsd = LINK_100FD;
- else
- lsd = LINK_100HD;
- break;
- case SPEED_10:
- if (phydev->duplex)
- lsd = LINK_10FD;
- else
- lsd = LINK_10HD;
- break;
- default:
- netif_err(adapter, drv, adapter->netdev,
- "Invalid speed %d\n", phydev->speed);
- return -EINVAL;
- }
-
- adapter->sgmii_lsd = lsd;
- ret = lan743x_sgmii_aneg_update(adapter);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII cfg failed\n", ret);
- return ret;
- }
-
- ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII get mode failed\n", ret);
- return ret;
- }
-
- if (status)
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 2.5G mode enable\n");
- else
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 1G mode enable\n");
-
/* SGMII/1000/2500BASE-X PCS power down */
mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
if (mii_ctl < 0)
@@ -1229,11 +1190,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
if (ret < 0)
return ret;
- ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
}
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
@@ -1373,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
}
/* PHY */
-static int lan743x_phy_reset(struct lan743x_adapter *adapter)
+static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter)
{
u32 data;
@@ -1389,96 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
- u16 local_adv, u16 remote_adv)
-{
- struct lan743x_phy *phy = &adapter->phy;
- u8 cap;
-
- if (phy->fc_autoneg)
- cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
- else
- cap = phy->fc_request_control;
-
- lan743x_mac_flow_ctrl_set_enables(adapter,
- cap & FLOW_CTRL_TX,
- cap & FLOW_CTRL_RX);
-}
-
-static int lan743x_phy_init(struct lan743x_adapter *adapter)
-{
- return lan743x_phy_reset(adapter);
-}
-
-static void lan743x_phy_link_status_change(struct net_device *netdev)
-{
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 data;
-
- phy_print_status(phydev);
- if (phydev->state == PHY_RUNNING) {
- int remote_advertisement = 0;
- int local_advertisement = 0;
-
- data = lan743x_csr_read(adapter, MAC_CR);
-
- /* set duplex mode */
- if (phydev->duplex)
- data |= MAC_CR_DPX_;
- else
- data &= ~MAC_CR_DPX_;
-
- /* set bus speed */
- switch (phydev->speed) {
- case SPEED_10:
- data &= ~MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_100:
- data &= ~MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- case SPEED_1000:
- data |= MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_2500:
- data |= MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- }
- lan743x_csr_write(adapter, MAC_CR, data);
-
- local_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->advertising);
- remote_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
-
- lan743x_phy_update_flowcontrol(adapter, local_advertisement,
- remote_advertisement);
- lan743x_ptp_update_latency(adapter, phydev->speed);
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
- phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
- lan743x_sgmii_config(adapter);
- }
-}
-
-static void lan743x_phy_close(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
-
- phy_stop(netdev->phydev);
- phy_disconnect(netdev->phydev);
-
- /* using phydev here as phy_disconnect NULLs netdev->phydev */
- if (phy_is_pseudo_fixed_link(phydev))
- fixed_phy_unregister(phydev);
-
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1495,65 +1362,9 @@ static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
adapter->phy_interface = PHY_INTERFACE_MODE_MII;
else
adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
-}
-
-static int lan743x_phy_open(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct lan743x_phy *phy = &adapter->phy;
- struct fixed_phy_status fphy_status = {
- .link = 1,
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- };
- struct phy_device *phydev;
- int ret = -EIO;
-
- /* try devicetree phy, or fixed link */
- phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
- lan743x_phy_link_status_change);
-
- if (!phydev) {
- /* try internal phy */
- phydev = phy_find_first(adapter->mdiobus);
- if (!phydev) {
- if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
- ID_REV_ID_LAN7431_) {
- phydev = fixed_phy_register(PHY_POLL,
- &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "No PHY/fixed_PHY found\n");
- return PTR_ERR(phydev);
- }
- } else {
- goto return_error;
- }
- }
-
- lan743x_phy_interface_select(adapter);
-
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- adapter->phy_interface);
- if (ret)
- goto return_error;
- }
- /* MAC doesn't support 1000T Half */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* support both flow controls */
- phy_support_asym_pause(phydev);
- phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phy->fc_autoneg = phydev->autoneg;
-
- phy_start(phydev);
- phy_start_aneg(phydev);
- phy_attached_info(phydev);
- return 0;
-
-return_error:
- return ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "selected phy interface: 0x%X\n", adapter->phy_interface);
}
static void lan743x_rfe_open(struct lan743x_adapter *adapter)
@@ -1913,6 +1724,7 @@ int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
default:
return -ERANGE;
}
+ adapter->rx_tstamp_filter = rx_filter;
return 0;
}
@@ -1999,6 +1811,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
if (nr_frags <= 0) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_first;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
@@ -2068,6 +1881,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
tx->frame_first = 0;
tx->frame_data0 = 0;
tx->frame_tail = 0;
+ tx->frame_last = 0;
return -ENOMEM;
}
@@ -2108,16 +1922,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_tail;
}
- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- buffer_info = &tx->buffer_info[tx->frame_tail];
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
+ buffer_info = &tx->buffer_info[tx->frame_last];
buffer_info->skb = skb;
if (time_stamp)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;
@@ -2679,8 +2495,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head,
- GFP_ATOMIC | GFP_DMA)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head, GFP_ATOMIC)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@ -3054,6 +2869,365 @@ return_error:
return ret;
}
+static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from the External PHY via SGMII */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d sgmii aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 1000basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 2500basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+{
+ u32 mac_cr;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ if (enable)
+ mac_cr |= MAC_CR_EEE_EN_;
+ else
+ mac_cr &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+}
+
+static void lan743x_phylink_mac_config(struct phylink_config *config,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_2500BASEX:
+ ret = lan743x_phylink_2500basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "2500BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "2500BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ret = lan743x_phylink_1000basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "1000BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "1000BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ ret = lan743x_phylink_sgmii_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "SGMII config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII mode selected and configured\n");
+ break;
+ default:
+ netif_dbg(adapter, drv, adapter->netdev,
+ "RGMII/GMII/MII(0x%X) mode enable\n",
+ state->interface);
+ break;
+ }
+}
+
+static void lan743x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+
+ netif_tx_stop_all_queues(netdev);
+}
+
+static void lan743x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int mac_cr;
+ u8 cap;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ /* Pre-initialize register bits.
+ * Resulting value corresponds to SPEED_10
+ */
+ mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_);
+ if (speed == SPEED_2500)
+ mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_;
+ else if (speed == SPEED_1000)
+ mac_cr |= MAC_CR_CFG_H_;
+ else if (speed == SPEED_100)
+ mac_cr |= MAC_CR_CFG_L_;
+
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+
+ lan743x_ptp_update_latency(adapter, speed);
+
+ /* Flow Control operation */
+ cap = 0;
+ if (tx_pause)
+ cap |= FLOW_CTRL_TX;
+ if (rx_pause)
+ cap |= FLOW_CTRL_RX;
+
+ lan743x_mac_flow_ctrl_set_enables(adapter,
+ cap & FLOW_CTRL_TX,
+ cap & FLOW_CTRL_RX);
+
+ netif_tx_wake_all_queues(netdev);
+}
+
+static void lan743x_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static int lan743x_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
+ * EEEEN during probe, and phylink itself guarantees that
+ * mac_disable_tx_lpi() will have been previously called.
+ */
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, timer);
+ lan743x_mac_eee_enable(adapter, true);
+
+ return 0;
+}
+
+static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
+ .mac_config = lan743x_phylink_mac_config,
+ .mac_link_down = lan743x_phylink_mac_link_down,
+ .mac_link_up = lan743x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = lan743x_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = lan743x_mac_enable_tx_lpi,
+};
+
+static int lan743x_phylink_create(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phylink *pl;
+
+ adapter->phylink_config.dev = &netdev->dev;
+ adapter->phylink_config.type = PHYLINK_NETDEV;
+ adapter->phylink_config.mac_managed_pm = false;
+
+ adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+ adapter->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ adapter->phylink_config.lpi_timer_default =
+ lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+
+ lan743x_phy_interface_select(adapter);
+
+ switch (adapter->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ adapter->phylink_config.supported_interfaces);
+ adapter->phylink_config.mac_capabilities |= MAC_2500FD;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ default:
+ phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
+ }
+
+ memcpy(adapter->phylink_config.lpi_interfaces,
+ adapter->phylink_config.supported_interfaces,
+ sizeof(adapter->phylink_config.lpi_interfaces));
+
+ pl = phylink_create(&adapter->phylink_config, NULL,
+ adapter->phy_interface, &lan743x_phylink_mac_ops);
+
+ if (IS_ERR(pl)) {
+ netdev_err(netdev, "Could not create phylink (%pe)\n", pl);
+ return PTR_ERR(pl);
+ }
+
+ adapter->phylink = pl;
+ netdev_dbg(netdev, "lan743x phylink created");
+
+ return 0;
+}
+
+static bool lan743x_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
+static int lan743x_phylink_connect(struct lan743x_adapter *adapter)
+{
+ struct device_node *dn = adapter->pdev->dev.of_node;
+ struct net_device *dev = adapter->netdev;
+ struct phy_device *phydev;
+ int ret;
+
+ if (dn)
+ ret = phylink_of_phy_connect(adapter->phylink, dn, 0);
+
+ if (!dn || (ret && !lan743x_phy_handle_exists(dn))) {
+ phydev = phy_find_first(adapter->mdiobus);
+ if (phydev) {
+ /* attach the mac to the phy */
+ ret = phylink_connect_phy(adapter->phylink, phydev);
+ } else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
+ ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) {
+ struct phylink_link_state state;
+ unsigned long caps;
+
+ caps = adapter->phylink_config.mac_capabilities;
+ if (caps & MAC_2500FD) {
+ state.speed = SPEED_2500;
+ state.duplex = DUPLEX_FULL;
+ } else if (caps & MAC_1000FD) {
+ state.speed = SPEED_1000;
+ state.duplex = DUPLEX_FULL;
+ } else {
+ state.speed = SPEED_UNKNOWN;
+ state.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ret = phylink_set_fixed_link(adapter->phylink, &state);
+ if (ret) {
+ netdev_err(dev, "Could not set fixed link\n");
+ return ret;
+ }
+ } else {
+ netdev_err(dev, "no PHY found\n");
+ return -ENXIO;
+ }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
+ }
+
+ phylink_start(adapter->phylink);
+
+ return 0;
+}
+
+static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter)
+{
+ phylink_stop(adapter->phylink);
+ phylink_disconnect_phy(adapter->phylink);
+}
+
static int lan743x_netdev_close(struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
@@ -3067,7 +3241,7 @@ static int lan743x_netdev_close(struct net_device *netdev)
lan743x_ptp_close(adapter);
- lan743x_phy_close(adapter);
+ lan743x_phylink_disconnect(adapter);
lan743x_mac_close(adapter);
@@ -3090,13 +3264,13 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_intr;
- ret = lan743x_phy_open(adapter);
+ ret = lan743x_phylink_connect(adapter);
if (ret)
goto close_mac;
ret = lan743x_ptp_open(adapter);
if (ret)
- goto close_phy;
+ goto close_mac;
lan743x_rfe_open(adapter);
@@ -3111,6 +3285,20 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_tx;
}
+
+ if (netdev->phydev)
+ phy_support_eee(netdev->phydev);
+
+#ifdef CONFIG_PM
+ if (adapter->netdev->phydev) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(netdev->phydev, &wol);
+ adapter->phy_wol_supported = wol.supported;
+ adapter->phy_wolopts = wol.wolopts;
+ }
+#endif
+
return 0;
close_tx:
@@ -3125,9 +3313,8 @@ close_rx:
lan743x_rx_close(&adapter->rx[index]);
}
lan743x_ptp_close(adapter);
-
-close_phy:
- lan743x_phy_close(adapter);
+ if (adapter->phylink)
+ lan743x_phylink_disconnect(adapter);
close_mac:
lan743x_mac_close(adapter);
@@ -3156,11 +3343,12 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
static int lan743x_netdev_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
if (!netif_running(netdev))
return -EINVAL;
- if (cmd == SIOCSHWTSTAMP)
- return lan743x_ptp_ioctl(netdev, ifr, cmd);
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+
+ return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
static void lan743x_netdev_set_multicast(struct net_device *netdev)
@@ -3177,7 +3365,7 @@ static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
ret = lan743x_mac_set_mtu(adapter, new_mtu);
if (!ret)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
@@ -3253,6 +3441,8 @@ static const struct net_device_ops lan743x_netdev_ops = {
.ndo_change_mtu = lan743x_netdev_change_mtu,
.ndo_get_stats64 = lan743x_netdev_get_stats64,
.ndo_set_mac_address = lan743x_netdev_set_mac_address,
+ .ndo_hwtstamp_get = lan743x_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = lan743x_ptp_hwtstamp_set,
};
static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
@@ -3265,10 +3455,17 @@ static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
mdiobus_unregister(adapter->mdiobus);
}
+static void lan743x_destroy_phylink(struct lan743x_adapter *adapter)
+{
+ phylink_destroy(adapter->phylink);
+ adapter->phylink = NULL;
+}
+
static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
{
unregister_netdev(adapter->netdev);
+ lan743x_destroy_phylink(adapter);
lan743x_mdiobus_cleanup(adapter);
lan743x_hardware_cleanup(adapter);
lan743x_pci_cleanup(adapter);
@@ -3293,6 +3490,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
struct lan743x_tx *tx;
+ u32 sgmii_ctl;
int index;
int ret;
@@ -3305,6 +3503,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
pci11x1x_set_rfe_rd_fifo_threshold(adapter);
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ } else {
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ }
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
@@ -3322,10 +3529,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
- ret = lan743x_phy_init(adapter);
- if (ret)
- return ret;
-
ret = lan743x_ptp_init(adapter);
if (ret)
return ret;
@@ -3348,12 +3551,14 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&tx->ring_lock);
}
+ /* Ensure EEEEN is clear */
+ lan743x_mac_eee_enable(adapter, false);
+
return 0;
}
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
- u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -3365,10 +3570,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
adapter->mdiobus->priv = (void *)adapter;
if (adapter->is_pci11x1x) {
if (adapter->is_sgmii_en) {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
adapter->mdiobus->read = lan743x_mdiobus_read_c22;
@@ -3379,10 +3580,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus-c45\n");
} else {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"RGMII operation\n");
// Only C22 support when RGMII I/F
@@ -3468,6 +3665,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
if (ret)
goto cleanup_pci;
+ ret = lan743x_hw_reset_phy(adapter);
+ if (ret)
+ goto cleanup_pci;
+
ret = lan743x_hardware_init(adapter, pdev);
if (ret)
goto cleanup_pci;
@@ -3482,14 +3683,21 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
adapter->netdev->hw_features = adapter->netdev->features;
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
+ ret = lan743x_phylink_create(adapter);
+ if (ret < 0) {
+ netif_err(adapter, probe, netdev,
+ "failed to setup phylink (%d)\n", ret);
+ goto cleanup_mdiobus;
+ }
ret = register_netdev(adapter->netdev);
if (ret < 0)
- goto cleanup_mdiobus;
+ goto cleanup_phylink;
return 0;
+cleanup_phylink:
+ lan743x_destroy_phylink(adapter);
+
cleanup_mdiobus:
lan743x_mdiobus_cleanup(adapter);
@@ -3568,7 +3776,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
/* clear wake settings */
pmtctl = lan743x_csr_read(adapter, PMT_CTL);
- pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
@@ -3580,10 +3788,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
- if (adapter->wolopts & WAKE_PHY) {
- pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ if (adapter->phy_wolopts)
pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
- }
+
if (adapter->wolopts & WAKE_MAGIC) {
wucsr |= MAC_WUCSR_MPEN_;
macrx |= MAC_RX_RXEN_;
@@ -3679,7 +3886,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_csr_write(adapter, MAC_WUCSR2, 0);
lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
- if (adapter->wolopts)
+ if (adapter->wolopts || adapter->phy_wolopts)
lan743x_pm_set_wol(adapter);
if (adapter->is_pci11x1x) {
@@ -3703,11 +3910,11 @@ static int lan743x_pm_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
int ret;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
/* Restore HW_CFG that was saved during pm suspend */
if (adapter->is_pci11x1x)
@@ -3721,6 +3928,31 @@ static int lan743x_pm_resume(struct device *dev)
return ret;
}
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
+
+ /* Clear the wol configuration and status bits. Note that
+ * the status bits are "Write One to Clear (W1C)"
+ */
+ data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
+ MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
+ MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
+ lan743x_csr_write(adapter, MAC_WUCSR, data);
+
+ data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
+ MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
+ lan743x_csr_write(adapter, MAC_WUCSR2, data);
+
+ data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
+ MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
+ MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
+ MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
+ MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
+ MAC_WK_SRC_WK_FR_SAVED_;
+ lan743x_csr_write(adapter, MAC_WK_SRC, data);
+
+ rtnl_lock();
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3729,9 +3961,7 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
- ret = lan743x_csr_read(adapter, MAC_WK_SRC);
- netif_info(adapter, drv, adapter->netdev,
- "Wakeup source : 0x%08X\n", ret);
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 645bc048e52e..02a28b709163 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -5,6 +5,7 @@
#define _LAN743X_H
#include <linux/phy.h>
+#include <linux/phylink.h>
#include "lan743x_ptp.h"
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
@@ -61,6 +62,7 @@
#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
+#define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
#define PMT_CTL_WOL_EN_ BIT(3)
@@ -227,12 +229,31 @@
#define MAC_WUCSR (0x140)
#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13)
+#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11)
+#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9)
+#define MAC_WUCSR_PFDA_FR_ BIT(7)
+#define MAC_WUCSR_WUFR_ BIT(6)
+#define MAC_WUCSR_MPR_ BIT(5)
+#define MAC_WUCSR_BCAST_FR_ BIT(4)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
#define MAC_WUCSR_MPEN_ BIT(1)
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17)
+#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16)
+#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15)
+#define MAC_WK_SRC_EEE_TX_WK_ BIT(14)
+#define MAC_WK_SRC_EEE_RX_WK_ BIT(13)
+#define MAC_WK_SRC_RFE_FR_WK_ BIT(12)
+#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11)
+#define MAC_WK_SRC_MP_FR_WK_ BIT(10)
+#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9)
+#define MAC_WK_SRC_WU_FR_WK_ BIT(8)
+#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7)
+
#define MAC_MP_SO_HI (0x148)
#define MAC_MP_SO_LO (0x14C)
@@ -295,6 +316,10 @@
#define RFE_INDX(index) (0x580 + (index << 2))
#define MAC_WUCSR2 (0x600)
+#define MAC_WUCSR2_NS_RCD_ BIT(7)
+#define MAC_WUCSR2_ARP_RCD_ BIT(6)
+#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5)
+#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4)
#define SGMII_ACC (0x720)
#define SGMII_ACC_SGMII_BZY_ BIT(31)
@@ -955,6 +980,7 @@ struct lan743x_tx {
u32 frame_first;
u32 frame_data0;
u32 frame_tail;
+ u32 frame_last;
struct lan743x_tx_buffer_info *buffer_info;
@@ -1018,6 +1044,8 @@ enum lan743x_sgmii_lsd {
LINK_2500_SLAVE
};
+#define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
+ WAKE_MAGIC | WAKE_ARP)
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
@@ -1025,6 +1053,8 @@ struct lan743x_adapter {
#ifdef CONFIG_PM
u32 wolopts;
u8 sopass[SOPASS_MAX];
+ u32 phy_wolopts;
+ u32 phy_wol_supported;
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
@@ -1055,6 +1085,9 @@ struct lan743x_adapter {
u32 flags;
u32 hw_cfg;
phy_interface_t phy_interface;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ int rx_tstamp_filter;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 2801f08bf1c9..a3b48388b3fd 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -58,7 +58,7 @@ int lan743x_gpio_init(struct lan743x_adapter *adapter)
static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter,
u32 bit_mask)
{
- int timeout = 1000;
+ int timeout = PTP_CMD_CTL_TIMEOUT_CNT;
u32 data = 0;
while (timeout &&
@@ -401,28 +401,21 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
u32 nano_seconds = 0;
u32 seconds = 0;
- if (ts) {
- if (ts->tv_sec > 0xFFFFFFFFLL ||
- ts->tv_sec < 0) {
- netif_warn(adapter, drv, adapter->netdev,
- "ts->tv_sec out of range, %lld\n",
- ts->tv_sec);
- return -ERANGE;
- }
- if (ts->tv_nsec >= 1000000000L ||
- ts->tv_nsec < 0) {
- netif_warn(adapter, drv, adapter->netdev,
- "ts->tv_nsec out of range, %ld\n",
- ts->tv_nsec);
- return -ERANGE;
- }
- seconds = ts->tv_sec;
- nano_seconds = ts->tv_nsec;
- lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
- } else {
- netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n");
- return -EINVAL;
+ if (ts->tv_sec > 0xFFFFFFFFLL) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_sec out of range, %lld\n",
+ ts->tv_sec);
+ return -ERANGE;
}
+ if (ts->tv_nsec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_nsec out of range, %ld\n",
+ ts->tv_nsec);
+ return -ERANGE;
+ }
+ seconds = ts->tv_sec;
+ nano_seconds = ts->tv_nsec;
+ lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
return 0;
}
@@ -470,10 +463,6 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
struct lan743x_ptp_perout *perout = &ptp->perout[index];
int ret = 0;
- /* Reject requests with unsupported flags */
- if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
if (on) {
perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
perout_request->index);
@@ -555,7 +544,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
if (half == wf_high) {
/* It's 50% match. Use the toggle option */
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_;
- /* In this case, devide period value by 2 */
+ /* In this case, divide period value by 2 */
ts_period = ns_to_timespec64(div_s64(period64, 2));
period_sec = ts_period.tv_sec;
period_nsec = ts_period.tv_nsec;
@@ -1544,6 +1533,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
ptp->ptp_clock_info.n_pins = n_pins;
ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
+ ptp->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ ptp->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
@@ -1743,23 +1736,32 @@ void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
lan743x_ptp_tx_ts_complete(adapter);
}
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
- int ret = 0;
- int index;
+ struct lan743x_tx *tx = &adapter->tx[0];
- if (!ifr) {
- netif_err(adapter, drv, adapter->netdev,
- "SIOCSHWTSTAMP, ifr == NULL\n");
- return -EINVAL;
- }
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED)
+ config->tx_type = HWTSTAMP_TX_ON;
+ else
+ config->tx_type = HWTSTAMP_TX_OFF;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->rx_filter = adapter->rx_tstamp_filter;
+
+ return 0;
+}
+
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int index;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
for (index = 0; index < adapter->used_tx_channels;
index++)
@@ -1783,19 +1785,12 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
lan743x_ptp_set_sync_ts_insert(adapter, true);
break;
case HWTSTAMP_TX_ONESTEP_P2P:
- ret = -ERANGE;
- break;
+ return -ERANGE;
default:
netif_warn(adapter, drv, adapter->netdev,
- " tx_type = %d, UNKNOWN\n", config.tx_type);
- ret = -EINVAL;
- break;
+ " tx_type = %d, UNKNOWN\n", config->tx_type);
+ return -EINVAL;
}
- ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter);
-
- if (!ret)
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
- return ret;
+ return lan743x_rx_set_tstamp_mode(adapter, config->rx_filter);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index e26d4eff7133..f33dc83c5700 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -18,9 +18,10 @@
*/
#define LAN743X_PTP_N_EVENT_CHAN 2
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
-#define LAN743X_PTP_N_EXTTS 4
-#define LAN743X_PTP_N_PPS 0
#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
+#define LAN743X_PTP_N_EXTTS PCI11X1X_PTP_IO_MAX_CHANNELS
+#define LAN743X_PTP_N_PPS 0
+#define PTP_CMD_CTL_TIMEOUT_CNT 50
struct lan743x_adapter;
@@ -50,8 +51,11 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter);
void lan743x_ptp_close(struct lan743x_adapter *adapter);
void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
u32 link_speed);
-
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
diff --git a/drivers/net/ethernet/microchip/lan865x/Kconfig b/drivers/net/ethernet/microchip/lan865x/Kconfig
new file mode 100644
index 000000000000..7f2a4e7e1915
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip LAN865x Driver Support
+#
+
+if NET_VENDOR_MICROCHIP
+
+config LAN865X
+ tristate "LAN865x support"
+ depends on SPI
+ select OA_TC6
+ help
+ Support for the Microchip LAN8650/1 Rev.B0/B1 MACPHY Ethernet chip. It
+ uses OPEN Alliance 10BASE-T1x Serial Interface specification.
+
+ To compile this driver as a module, choose M here. The module will be
+ called lan865x.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/lan865x/Makefile b/drivers/net/ethernet/microchip/lan865x/Makefile
new file mode 100644
index 000000000000..9f5dd89c1eb8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip LAN865x Driver
+#
+
+obj-$(CONFIG_LAN865X) += lan865x.o
diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
new file mode 100644
index 000000000000..0277d9737369
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Microchip's LAN865x 10BASE-T1S MAC-PHY driver
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/oa_tc6.h>
+
+#define DRV_NAME "lan8650"
+
+/* MAC Network Control Register */
+#define LAN865X_REG_MAC_NET_CTL 0x00010000
+#define MAC_NET_CTL_TXEN BIT(3) /* Transmit Enable */
+#define MAC_NET_CTL_RXEN BIT(2) /* Receive Enable */
+
+/* MAC Network Configuration Reg */
+#define LAN865X_REG_MAC_NET_CFG 0x00010001
+#define MAC_NET_CFG_PROMISCUOUS_MODE BIT(4)
+#define MAC_NET_CFG_MULTICAST_MODE BIT(6)
+#define MAC_NET_CFG_UNICAST_MODE BIT(7)
+
+/* MAC Hash Register Bottom */
+#define LAN865X_REG_MAC_L_HASH 0x00010020
+/* MAC Hash Register Top */
+#define LAN865X_REG_MAC_H_HASH 0x00010021
+/* MAC Specific Addr 1 Bottom Reg */
+#define LAN865X_REG_MAC_L_SADDR1 0x00010022
+/* MAC Specific Addr 1 Top Reg */
+#define LAN865X_REG_MAC_H_SADDR1 0x00010023
+
+/* MAC TSU Timer Increment Register */
+#define LAN865X_REG_MAC_TSU_TIMER_INCR 0x00010077
+#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x0028
+
+struct lan865x_priv {
+ struct work_struct multicast_work;
+ struct net_device *netdev;
+ struct spi_device *spi;
+ struct oa_tc6 *tc6;
+};
+
+static int lan865x_set_hw_macaddr_low_bytes(struct oa_tc6 *tc6, const u8 *mac)
+{
+ u32 regval;
+
+ regval = (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0];
+
+ return oa_tc6_write_register(tc6, LAN865X_REG_MAC_L_SADDR1, regval);
+}
+
+static int lan865x_set_hw_macaddr(struct lan865x_priv *priv, const u8 *mac)
+{
+ int restore_ret;
+ u32 regval;
+ int ret;
+
+ /* Configure MAC address low bytes */
+ ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6, mac);
+ if (ret)
+ return ret;
+
+ /* Prepare and configure MAC address high bytes */
+ regval = (mac[5] << 8) | mac[4];
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_SADDR1,
+ regval);
+ if (!ret)
+ return 0;
+
+ /* Restore the old MAC address low bytes from netdev if the new MAC
+ * address high bytes setting failed.
+ */
+ restore_ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6,
+ priv->netdev->dev_addr);
+ if (restore_ret)
+ return restore_ret;
+
+ return ret;
+}
+
+static const struct ethtool_ops lan865x_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int lan865x_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ struct sockaddr *address = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(address->sa_data, netdev->dev_addr))
+ return 0;
+
+ ret = lan865x_set_hw_macaddr(priv, address->sa_data);
+ if (ret)
+ return ret;
+
+ eth_commit_mac_addr_change(netdev, addr);
+
+ return 0;
+}
+
+static u32 get_address_bit(u8 addr[ETH_ALEN], u32 bit)
+{
+ return ((addr[bit / 8]) >> (bit % 8)) & 1;
+}
+
+static u32 lan865x_hash(u8 addr[ETH_ALEN])
+{
+ u32 hash_index = 0;
+
+ for (int i = 0; i < 6; i++) {
+ u32 hash = 0;
+
+ for (int j = 0; j < 8; j++)
+ hash ^= get_address_bit(addr, (j * 6) + i);
+
+ hash_index |= (hash << i);
+ }
+
+ return hash_index;
+}
+
+static int lan865x_set_specific_multicast_addr(struct lan865x_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ u32 hash_lo = 0;
+ u32 hash_hi = 0;
+ int ret;
+
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 bit_num = lan865x_hash(ha->addr);
+
+ if (bit_num >= BIT(5))
+ hash_hi |= (1 << (bit_num - BIT(5)));
+ else
+ hash_lo |= (1 << bit_num);
+ }
+
+ /* Enabling specific multicast addresses */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, hash_hi);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, hash_lo);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int lan865x_set_all_multicast_addr(struct lan865x_priv *priv)
+{
+ int ret;
+
+ /* Enabling all multicast addresses */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH,
+ 0xffffffff);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH,
+ 0xffffffff);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int lan865x_clear_all_multicast_addr(struct lan865x_priv *priv)
+{
+ int ret;
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, 0);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, 0);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void lan865x_multicast_work_handler(struct work_struct *work)
+{
+ struct lan865x_priv *priv = container_of(work, struct lan865x_priv,
+ multicast_work);
+ u32 regval = 0;
+ int ret;
+
+ if (priv->netdev->flags & IFF_PROMISC) {
+ /* Enabling promiscuous mode */
+ regval |= MAC_NET_CFG_PROMISCUOUS_MODE;
+ regval &= (~MAC_NET_CFG_MULTICAST_MODE);
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else if (priv->netdev->flags & IFF_ALLMULTI) {
+ /* Enabling all multicast mode */
+ if (lan865x_set_all_multicast_addr(priv))
+ return;
+
+ regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE);
+ regval |= MAC_NET_CFG_MULTICAST_MODE;
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else if (!netdev_mc_empty(priv->netdev)) {
+ /* Enabling specific multicast mode */
+ if (lan865x_set_specific_multicast_addr(priv))
+ return;
+
+ regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE);
+ regval |= MAC_NET_CFG_MULTICAST_MODE;
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else {
+ /* Enabling local mac address only */
+ if (lan865x_clear_all_multicast_addr(priv))
+ return;
+ }
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CFG, regval);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to enable promiscuous/multicast/normal mode: %d\n",
+ ret);
+}
+
+static void lan865x_set_multicast_list(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+
+ schedule_work(&priv->multicast_work);
+}
+
+static netdev_tx_t lan865x_send_packet(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+
+ return oa_tc6_start_xmit(priv->tc6, skb);
+}
+
+static int lan865x_hw_disable(struct lan865x_priv *priv)
+{
+ u32 regval;
+
+ if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, &regval))
+ return -ENODEV;
+
+ regval &= ~(MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN);
+
+ if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lan865x_net_close(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ netif_stop_queue(netdev);
+ phy_stop(netdev->phydev);
+ ret = lan865x_hw_disable(priv);
+ if (ret) {
+ netdev_err(netdev, "Failed to disable the hardware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan865x_hw_enable(struct lan865x_priv *priv)
+{
+ u32 regval;
+
+ if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, &regval))
+ return -ENODEV;
+
+ regval |= MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN;
+
+ if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lan865x_net_open(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ ret = lan865x_hw_enable(priv);
+ if (ret) {
+ netdev_err(netdev, "Failed to enable hardware: %d\n", ret);
+ return ret;
+ }
+
+ phy_start(netdev->phydev);
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static const struct net_device_ops lan865x_netdev_ops = {
+ .ndo_open = lan865x_net_open,
+ .ndo_stop = lan865x_net_close,
+ .ndo_start_xmit = lan865x_send_packet,
+ .ndo_set_rx_mode = lan865x_set_multicast_list,
+ .ndo_set_mac_address = lan865x_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+};
+
+static int lan865x_probe(struct spi_device *spi)
+{
+ struct net_device *netdev;
+ struct lan865x_priv *priv;
+ int ret;
+
+ netdev = alloc_etherdev(sizeof(struct lan865x_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->spi = spi;
+ spi_set_drvdata(spi, priv);
+ INIT_WORK(&priv->multicast_work, lan865x_multicast_work_handler);
+
+ priv->tc6 = oa_tc6_init(spi, netdev);
+ if (!priv->tc6) {
+ ret = -ENODEV;
+ goto free_netdev;
+ }
+
+ /* LAN865x Rev.B0/B1 configuration parameters from AN1760
+ * As per the Configuration Application Note AN1760 published in the
+ * link, https://www.microchip.com/en-us/application-notes/an1760
+ * Revision F (DS60001760G - June 2024), configure the MAC to set time
+ * stamping at the end of the Start of Frame Delimiter (SFD) and set the
+ * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock.
+ */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR,
+ MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n",
+ ret);
+ goto oa_tc6_exit;
+ }
+
+ /* As per the point s3 in the below errata, SPI receive Ethernet frame
+ * transfer may halt when starting the next frame in the same data block
+ * (chunk) as the end of a previous frame. The RFA field should be
+ * configured to 01b or 10b for proper operation. In these modes, only
+ * one receive Ethernet frame will be placed in a single data block.
+ * When the RFA field is written to 01b, received frames will be forced
+ * to only start in the first word of the data block payload (SWO=0). As
+ * recommended, enable zero align receive frame feature for proper
+ * operation.
+ *
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/Errata/LAN8650-1-Errata-80001075.pdf
+ */
+ ret = oa_tc6_zero_align_receive_frame_enable(priv->tc6);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to set ZARFE: %d\n", ret);
+ goto oa_tc6_exit;
+ }
+
+ /* Get the MAC address from the SPI device tree node */
+ if (device_get_ethdev_address(&spi->dev, netdev))
+ eth_hw_addr_random(netdev);
+
+ ret = lan865x_set_hw_macaddr(priv, netdev->dev_addr);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to configure MAC: %d\n", ret);
+ goto oa_tc6_exit;
+ }
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->irq = spi->irq;
+ netdev->netdev_ops = &lan865x_netdev_ops;
+ netdev->ethtool_ops = &lan865x_ethtool_ops;
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(&spi->dev, "Register netdev failed (ret = %d)", ret);
+ goto oa_tc6_exit;
+ }
+
+ return 0;
+
+oa_tc6_exit:
+ oa_tc6_exit(priv->tc6);
+free_netdev:
+ free_netdev(priv->netdev);
+ return ret;
+}
+
+static void lan865x_remove(struct spi_device *spi)
+{
+ struct lan865x_priv *priv = spi_get_drvdata(spi);
+
+ cancel_work_sync(&priv->multicast_work);
+ unregister_netdev(priv->netdev);
+ oa_tc6_exit(priv->tc6);
+ free_netdev(priv->netdev);
+}
+
+static const struct spi_device_id lan865x_ids[] = {
+ { .name = "lan8650" },
+ { .name = "lan8651" },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, lan865x_ids);
+
+static const struct of_device_id lan865x_dt_ids[] = {
+ { .compatible = "microchip,lan8650" },
+ { .compatible = "microchip,lan8651" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lan865x_dt_ids);
+
+static struct spi_driver lan865x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = lan865x_dt_ids,
+ },
+ .probe = lan865x_probe,
+ .remove = lan865x_remove,
+ .id_table = lan865x_ids,
+};
+module_spi_driver(lan865x_driver);
+
+MODULE_DESCRIPTION(DRV_NAME " 10Base-T1S MACPHY Ethernet Driver");
+MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index f9ebffc04eb8..f663b6e12466 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -8,6 +8,7 @@ config LAN966X_SWITCH
select PHYLINK
select PAGE_POOL
select VCAP
+ select FDMA
help
This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 3b6ac331691d..4cdbe263502c 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -20,3 +20,4 @@ lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 06811c60d598..fe4e61405284 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -294,7 +294,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
{
int i, j;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
for (i = 0; i < lan966x->num_phys_ports; i++) {
uint idx = i * lan966x->num_stats;
@@ -310,7 +310,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
}
}
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static int lan966x_get_sset_count(struct net_device *dev, int sset)
@@ -365,7 +365,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
mac_stats->FramesTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_UC] +
@@ -376,7 +376,6 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
mac_stats->SingleCollisionFrames =
lan966x->stats[idx + SYS_COUNT_TX_COL];
- mac_stats->MultipleCollisionFrames = 0;
mac_stats->FramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_UC] +
lan966x->stats[idx + SYS_COUNT_RX_MC] +
@@ -384,26 +383,19 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
mac_stats->FrameCheckSequenceErrors =
lan966x->stats[idx + SYS_COUNT_RX_CRC] +
lan966x->stats[idx + SYS_COUNT_RX_CRC];
- mac_stats->AlignmentErrors = 0;
mac_stats->OctetsTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_OCT] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
mac_stats->FramesWithDeferredXmissions =
lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
- mac_stats->LateCollisions = 0;
- mac_stats->FramesAbortedDueToXSColls = 0;
- mac_stats->FramesLostDueToIntMACXmitError = 0;
- mac_stats->CarrierSenseErrors = 0;
mac_stats->OctetsReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_OCT];
- mac_stats->FramesLostDueToIntMACRcvError = 0;
mac_stats->MulticastFramesXmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_MC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
mac_stats->BroadcastFramesXmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_BC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
- mac_stats->FramesWithExcessiveDeferral = 0;
mac_stats->MulticastFramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_MC];
mac_stats->BroadcastFramesReceivedOK =
@@ -424,7 +416,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
@@ -450,7 +442,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
rmon_stats->undersize_pkts =
lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
@@ -508,7 +500,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
*ranges = lan966x_rmon_ranges;
}
@@ -546,7 +538,7 @@ static int lan966x_set_pauseparam(struct net_device *dev,
}
static int lan966x_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
@@ -557,16 +549,13 @@ static int lan966x_get_ts_info(struct net_device *dev,
phc = &lan966x->phc[LAN966X_PHC_PORT];
- info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (phc->clock) {
+ info->phc_index = ptp_clock_index(phc->clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -614,7 +603,7 @@ void lan966x_stats_get(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
@@ -696,7 +685,7 @@ void lan966x_stats_get(struct net_device *dev,
stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
int lan966x_stats_init(struct lan966x *lan966x)
@@ -712,7 +701,7 @@ int lan966x_stats_init(struct lan966x *lan966x)
return -ENOMEM;
/* Init stats worker */
- mutex_init(&lan966x->stats_lock);
+ spin_lock_init(&lan966x->stats_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(lan966x->dev));
lan966x->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 3960534ac2ad..502670718104 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -6,31 +6,55 @@
#include "lan966x_main.h"
-static int lan966x_fdma_channel_active(struct lan966x *lan966x)
-{
- return lan_rd(lan966x, FDMA_CH_ACTIVE);
-}
-
-static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
- struct lan966x_db *db)
+static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+ struct lan966x_rx *rx = &lan966x->rx;
struct page *page;
page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page))
- return NULL;
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+ *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+
+ return 0;
+}
- db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+
+ *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
- return page;
+ return 0;
+}
+
+static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+
+ *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
}
static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
int i, j;
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ for (j = 0; j < fdma->n_dbs; ++j)
page_pool_put_full_page(rx->page_pool,
rx->page[i][j], false);
}
@@ -38,41 +62,23 @@ static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
struct page *page;
- page = rx->page[rx->dcb_index][rx->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return;
page_pool_recycle_direct(rx->page_pool, page);
}
-static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
- struct lan966x_rx_dcb *dcb,
- u64 nextptr)
-{
- struct lan966x_db *db;
- int i;
-
- for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
- db = &dcb->db[i];
- db->status = FDMA_DCB_STATUS_INTR;
- }
-
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
-
- rx->last_entry->nextptr = nextptr;
- rx->last_entry = dcb;
-}
-
static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct page_pool_params pp_params = {
.order = rx->page_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .pool_size = FDMA_DCB_MAX,
+ .pool_size = rx->fdma.n_dcbs,
.nid = NUMA_NO_NODE,
.dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE,
@@ -104,84 +110,41 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
- struct lan966x_rx_dcb *dcb;
- struct lan966x_db *db;
- struct page *page;
- int i, j;
- int size;
+ struct fdma *fdma = &rx->fdma;
+ int err;
if (lan966x_fdma_rx_alloc_page_pool(rx))
return PTR_ERR(rx->page_pool);
- /* calculate how many pages are needed to allocate the dcbs */
- size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
-
- rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
- if (!rx->dcbs)
- return -ENOMEM;
-
- rx->last_entry = rx->dcbs;
- rx->db_index = 0;
- rx->dcb_index = 0;
-
- /* Now for each dcb allocate the dbs */
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &rx->dcbs[i];
- dcb->info = 0;
-
- /* For each db allocate a page and map it to the DB dataptr. */
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- page = lan966x_fdma_rx_alloc_page(rx, db);
- if (!page)
- return -ENOMEM;
-
- db->status = 0;
- rx->page[i][j] = page;
- }
+ err = fdma_alloc_coherent(lan966x->dev, fdma);
+ if (err)
+ return err;
- lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
- }
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
return 0;
}
-static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
-{
- rx->dcb_index++;
- rx->dcb_index &= FDMA_DCB_MAX - 1;
-}
-
-static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
-{
- struct lan966x *lan966x = rx->lan966x;
- u32 size;
-
- /* Now it is possible to do the cleanup of dcb */
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
-}
-
static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
- lan_wr(lower_32_bits((u64)rx->dma), lan966x,
- FDMA_DCB_LLP(rx->channel_id));
- lan_wr(upper_32_bits((u64)rx->dma), lan966x,
- FDMA_DCB_LLP1(rx->channel_id));
+ lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP(fdma->channel_id));
+ lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP1(fdma->channel_id));
- lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
- lan966x, FDMA_CH_CFG(rx->channel_id));
+ lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
@@ -191,13 +154,13 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
- mask |= BIT(rx->channel_id);
+ mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
- lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
@@ -205,18 +168,19 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
u32 val;
/* Disable the channel */
- lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
- val, !(val & BIT(rx->channel_id)),
+ val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US);
- lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD);
}
@@ -225,50 +189,27 @@ static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
- lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD);
}
-static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
- struct lan966x_tx_dcb *dcb)
-{
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = 0;
-}
-
static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- struct lan966x_tx_dcb *dcb;
- struct lan966x_db *db;
- int size;
- int i, j;
+ struct fdma *fdma = &tx->fdma;
+ int err;
- tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
GFP_KERNEL);
if (!tx->dcbs_buf)
return -ENOMEM;
- /* calculate how many pages are needed to allocate the dcbs */
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
- if (!tx->dcbs)
+ err = fdma_alloc_coherent(lan966x->dev, fdma);
+ if (err)
goto out;
- /* Now for each dcb allocate the db */
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &tx->dcbs[i];
-
- for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- db->dataptr = 0;
- db->status = 0;
- }
-
- lan966x_fdma_tx_add_dcb(tx, dcb);
- }
+ fdma_dcbs_init(fdma, 0, 0);
return 0;
@@ -280,33 +221,30 @@ out:
static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- int size;
kfree(tx->dcbs_buf);
-
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+ fdma_free_coherent(lan966x->dev, &tx->fdma);
}
static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
+ struct fdma *fdma = &tx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
- lan_wr(lower_32_bits((u64)tx->dma), lan966x,
- FDMA_DCB_LLP(tx->channel_id));
- lan_wr(upper_32_bits((u64)tx->dma), lan966x,
- FDMA_DCB_LLP1(tx->channel_id));
+ lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP(fdma->channel_id));
+ lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP1(fdma->channel_id));
- lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
- lan966x, FDMA_CH_CFG(tx->channel_id));
+ lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
@@ -316,13 +254,13 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
- mask |= BIT(tx->channel_id);
+ mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
- lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
@@ -330,23 +268,23 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
+ struct fdma *fdma = &tx->fdma;
u32 val;
/* Disable the channel */
- lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
- val, !(val & BIT(tx->channel_id)),
+ val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US);
- lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD);
tx->activated = false;
- tx->last_in_use = -1;
}
static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
@@ -354,7 +292,7 @@ static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
struct lan966x *lan966x = tx->lan966x;
/* Write the registers to reload the channel */
- lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD);
}
@@ -393,23 +331,24 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_rx *rx = &lan966x->rx;
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct fdma *fdma = &tx->fdma;
struct xdp_frame_bulk bq;
- struct lan966x_db *db;
unsigned long flags;
bool clear = false;
+ struct fdma_db *db;
int i;
xdp_frame_bulk_init(&bq);
spin_lock_irqsave(&lan966x->tx_lock, flags);
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i];
if (!dcb_buf->used)
continue;
- db = &tx->dcbs[i].db[0];
- if (!(db->status & FDMA_DCB_STATUS_DONE))
+ db = fdma_db_get(fdma, i, 0);
+ if (!fdma_db_is_done(db))
continue;
dcb_buf->dev->stats.tx_packets++;
@@ -449,27 +388,16 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
spin_unlock_irqrestore(&lan966x->tx_lock, flags);
}
-static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
-{
- struct lan966x_db *db;
-
- /* Check if there is any data */
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
- return false;
-
- return true;
-}
-
static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
struct lan966x_port *port;
- struct lan966x_db *db;
+ struct fdma_db *db;
struct page *page;
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- page = rx->page[rx->dcb_index][rx->db_index];
+ db = fdma_db_next_get(fdma);
+ page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return FDMA_ERROR;
@@ -494,16 +422,17 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
u64 src_port)
{
struct lan966x *lan966x = rx->lan966x;
- struct lan966x_db *db;
+ struct fdma *fdma = &rx->fdma;
struct sk_buff *skb;
+ struct fdma_db *db;
struct page *page;
u64 timestamp;
/* Get the received frame and unmap it */
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- page = rx->page[rx->dcb_index][rx->db_index];
+ db = fdma_db_next_get(fdma);
+ page = rx->page[fdma->dcb_index][fdma->db_index];
- skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ skb = build_skb(page_address(page), fdma->db_size);
if (unlikely(!skb))
goto free_page;
@@ -546,21 +475,19 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
{
struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
struct lan966x_rx *rx = &lan966x->rx;
- int dcb_reload = rx->dcb_index;
- struct lan966x_rx_dcb *old_dcb;
- struct lan966x_db *db;
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
bool redirect = false;
struct sk_buff *skb;
- struct page *page;
- int counter = 0;
u64 src_port;
- u64 nextptr;
+
+ dcb_reload = fdma->dcb_index;
lan966x_fdma_tx_clear_buf(lan966x, weight);
/* Get all received skb */
while (counter < weight) {
- if (!lan966x_fdma_rx_more_frames(rx))
+ if (!fdma_has_frames(fdma))
break;
counter++;
@@ -570,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
break;
case FDMA_ERROR:
lan966x_fdma_rx_free_page(rx);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
goto allocate_new;
case FDMA_REDIRECT:
redirect = true;
fallthrough;
case FDMA_TX:
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
continue;
case FDMA_DROP:
lan966x_fdma_rx_free_page(rx);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
continue;
}
skb = lan966x_fdma_rx_get_frame(rx, src_port);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
if (!skb)
goto allocate_new;
@@ -594,20 +521,14 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
allocate_new:
/* Allocate new pages and map them */
- while (dcb_reload != rx->dcb_index) {
- db = &rx->dcbs[dcb_reload].db[rx->db_index];
- page = lan966x_fdma_rx_alloc_page(rx, db);
- if (unlikely(!page))
- break;
- rx->page[dcb_reload][rx->db_index] = page;
-
- old_dcb = &rx->dcbs[dcb_reload];
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
dcb_reload++;
- dcb_reload &= FDMA_DCB_MAX - 1;
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
- nextptr = rx->dma + ((unsigned long)old_dcb -
- (unsigned long)rx->dcbs);
- lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
lan966x_fdma_rx_reload(rx);
}
@@ -650,56 +571,30 @@ irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
{
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct fdma *fdma = &tx->fdma;
int i;
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i];
- if (!dcb_buf->used && i != tx->last_in_use)
+ if (!dcb_buf->used &&
+ !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
return i;
}
return -1;
}
-static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
- int next_to_use, int len,
- dma_addr_t dma_addr)
-{
- struct lan966x_tx_dcb *next_dcb;
- struct lan966x_db *next_db;
-
- next_dcb = &tx->dcbs[next_to_use];
- next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
-
- next_db = &next_dcb->db[0];
- next_db->dataptr = dma_addr;
- next_db->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_INTR |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(len);
-}
-
-static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
+static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- struct lan966x_tx_dcb *dcb;
if (likely(lan966x->tx.activated)) {
- /* Connect current dcb to the next db */
- dcb = &tx->dcbs[tx->last_in_use];
- dcb->nextptr = tx->dma + (next_to_use *
- sizeof(struct lan966x_tx_dcb));
-
lan966x_fdma_tx_reload(tx);
} else {
/* Because it is first time, then just activate */
lan966x->tx.activated = true;
lan966x_fdma_tx_activate(tx);
}
-
- /* Move to next dcb because this last in use */
- tx->last_in_use = next_to_use;
}
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
@@ -752,11 +647,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.xdpf = xdpf;
next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
-
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- xdpf->len + IFH_LEN_BYTES,
- dma_addr);
} else {
page = ptr;
@@ -773,11 +663,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.page = page;
next_dcb_buf->len = len + IFH_LEN_BYTES;
-
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- len + IFH_LEN_BYTES,
- dma_addr + XDP_PACKET_HEADROOM);
}
/* Fill up the buffer */
@@ -788,8 +673,19 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->ptp = false;
next_dcb_buf->dev = port->dev;
+ __fdma_dcb_add(&tx->fdma,
+ next_to_use,
+ 0,
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
+ &fdma_nextptr_cb,
+ &lan966x_fdma_xdp_tx_dataptr_cb);
+
/* Start the transmission */
- lan966x_fdma_tx_start(tx, next_to_use);
+ lan966x_fdma_tx_start(tx);
out:
spin_unlock(&lan966x->tx_lock);
@@ -847,9 +743,6 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
goto release;
}
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
-
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->use_skb = true;
@@ -861,12 +754,21 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
next_dcb_buf->ptp = false;
next_dcb_buf->dev = dev;
+ fdma_dcb_add(&tx->fdma,
+ next_to_use,
+ 0,
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len));
+
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
next_dcb_buf->ptp = true;
/* Start the transmission */
- lan966x_fdma_tx_start(tx, next_to_use);
+ lan966x_fdma_tx_start(tx);
return NETDEV_TX_OK;
@@ -908,14 +810,11 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
{
struct page_pool *page_pool;
- dma_addr_t rx_dma;
- void *rx_dcbs;
- u32 size;
+ struct fdma fdma_rx_old;
int err;
/* Store these for later to free them */
- rx_dma = lan966x->rx.dma;
- rx_dcbs = lan966x->rx.dcbs;
+ memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
page_pool = lan966x->rx.page_pool;
napi_synchronize(&lan966x->napi);
@@ -931,9 +830,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
goto restore;
lan966x_fdma_rx_start(&lan966x->rx);
- size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+ fdma_free_coherent(lan966x->dev, &fdma_rx_old);
page_pool_destroy(page_pool);
@@ -943,8 +840,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
return err;
restore:
lan966x->rx.page_pool = page_pool;
- lan966x->rx.dma = rx_dma;
- lan966x->rx.dcbs = rx_dcbs;
+ memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
lan966x_fdma_rx_start(&lan966x->rx);
return err;
@@ -1034,11 +930,24 @@ int lan966x_fdma_init(struct lan966x *lan966x)
return 0;
lan966x->rx.lan966x = lan966x;
- lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
+ lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
+ lan966x->rx.fdma.priv = lan966x;
+ lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
+ lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
+ lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
+ lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
lan966x->tx.lan966x = lan966x;
- lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
- lan966x->tx.last_in_use = -1;
+ lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
+ lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
+ lan966x->tx.fdma.priv = lan966x;
+ lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
+ lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
+ lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
+ lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
err = lan966x_fdma_rx_alloc(&lan966x->rx);
if (err)
@@ -1046,7 +955,7 @@ int lan966x_fdma_init(struct lan966x *lan966x)
err = lan966x_fdma_tx_alloc(&lan966x->tx);
if (err) {
- lan966x_fdma_rx_free(&lan966x->rx);
+ fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
return err;
}
@@ -1067,7 +976,7 @@ void lan966x_fdma_deinit(struct lan966x *lan966x)
napi_disable(&lan966x->napi);
lan966x_fdma_rx_free_pages(&lan966x->rx);
- lan966x_fdma_rx_free(&lan966x->rx);
+ fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
page_pool_destroy(lan966x->rx.page_pool);
lan966x_fdma_tx_free(&lan966x->tx);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
index f3b1e0d31826..e706163ce9cc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -78,7 +78,7 @@
/* Classified internal priority for queuing */
#define IFH_POS_QOS_CLASS 100
-/* Bit mask with eight cpu copy classses */
+/* Bit mask with eight cpu copy classes */
#define IFH_POS_CPUQ 92
/* Relearn + learn flags (*) */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2635ef8958c8..47752d3fde0b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -276,7 +276,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
++i;
}
- /* Inidcate EOF and valid bytes in the last word */
+ /* Indicate EOF and valid bytes in the last word */
lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
0 : last) |
@@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
}
+static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type)
+{
+ lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE);
+}
+
static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
{
lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
@@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
return err;
lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type);
lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
}
@@ -402,7 +408,7 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!lan966x->fdma)
return 0;
@@ -474,14 +480,14 @@ static int lan966x_port_hwtstamp_set(struct net_device *dev,
cfg->source != HWTSTAMP_SOURCE_PHYLIB)
return -EOPNOTSUPP;
+ if (cfg->source == HWTSTAMP_SOURCE_NETDEV && !port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
err = lan966x_ptp_setup_traps(port, cfg);
if (err)
return err;
if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
- if (!port->lan966x->ptp)
- return -EOPNOTSUPP;
-
err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
if (err) {
lan966x_ptp_del_traps(port);
@@ -520,7 +526,7 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
u32 val;
/* The IGMP and MLD frames are not forward by the HW if
- * multicast snooping is enabled, therefor don't mark as
+ * multicast snooping is enabled, therefore don't mark as
* offload to allow the SW to forward the frames accordingly.
*/
val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
@@ -816,7 +822,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC;
- dev->priv_flags |= IFF_SEE_ALL_HWTSTAMP_REQUESTS;
+ dev->see_all_hwtstamp_requests = true;
dev->needed_headroom = IFH_LEN_BYTES;
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
@@ -828,7 +834,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.type = PHYLINK_NETDEV;
port->phylink_pcs.poll = true;
port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
@@ -874,6 +879,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
lan966x_vlan_port_set_vlan_aware(port, 0);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
return 0;
}
@@ -1087,8 +1093,6 @@ static int lan966x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
- lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
-
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
@@ -1179,6 +1183,8 @@ static int lan966x_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"no ethernet-ports child found\n");
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -1255,7 +1261,8 @@ cleanup_ports:
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
- mutex_destroy(&lan966x->stats_lock);
+
+ debugfs_remove_recursive(lan966x->debugfs_root);
return err;
}
@@ -1271,7 +1278,6 @@ static void lan966x_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
- mutex_destroy(&lan966x->stats_lock);
lan966x_mac_purge_entries(lan966x);
lan966x_mdb_deinit(lan966x);
@@ -1283,7 +1289,7 @@ static void lan966x_remove(struct platform_device *pdev)
static struct platform_driver lan966x_driver = {
.probe = lan966x_probe,
- .remove_new = lan966x_remove,
+ .remove = lan966x_remove,
.driver = {
.name = "lan966x-switch",
.of_match_table = lan966x_match,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index caa9e0533c96..eea286c29474 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -16,6 +16,7 @@
#include <net/switchdev.h>
#include <net/xdp.h>
+#include <fdma_api.h>
#include <vcap_api.h>
#include <vcap_api_client.h>
@@ -74,17 +75,12 @@
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
+#define IFH_PDU_TYPE_NONE 0
+#define IFH_PDU_TYPE_IPV4 7
+#define IFH_PDU_TYPE_IPV6 8
+
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
-#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
-
-#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_STATUS_SOF BIT(16)
-#define FDMA_DCB_STATUS_EOF BIT(17)
-#define FDMA_DCB_STATUS_INTR BIT(18)
-#define FDMA_DCB_STATUS_DONE BIT(19)
-#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
-#define FDMA_DCB_INVALID_DATA 0x1
#define FDMA_XTR_CHANNEL 6
#define FDMA_INJ_CHANNEL 0
@@ -199,49 +195,14 @@ enum vcap_is1_port_sel_rt {
struct lan966x_port;
-struct lan966x_db {
- u64 dataptr;
- u64 status;
-};
-
-struct lan966x_rx_dcb {
- u64 nextptr;
- u64 info;
- struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
-};
-
-struct lan966x_tx_dcb {
- u64 nextptr;
- u64 info;
- struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
-};
-
struct lan966x_rx {
struct lan966x *lan966x;
- /* Pointer to the array of hardware dcbs. */
- struct lan966x_rx_dcb *dcbs;
-
- /* Pointer to the last address in the dcbs. */
- struct lan966x_rx_dcb *last_entry;
+ struct fdma fdma;
/* For each DB, there is a page */
struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
- /* Represents the db_index, it can have a value between 0 and
- * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
- * it means that the DCB can be reused.
- */
- int db_index;
-
- /* Represents the index in the dcbs. It has a value between 0 and
- * FDMA_DCB_MAX
- */
- int dcb_index;
-
- /* Represents the dma address to the dcbs array */
- dma_addr_t dma;
-
/* Represents the page order that is used to allocate the pages for the
* RX buffers. This value is calculated based on max MTU of the devices.
*/
@@ -252,8 +213,6 @@ struct lan966x_rx {
*/
u32 max_mtu;
- u8 channel_id;
-
struct page_pool *page_pool;
};
@@ -275,18 +234,11 @@ struct lan966x_tx_dcb_buf {
struct lan966x_tx {
struct lan966x *lan966x;
- /* Pointer to the dcb list */
- struct lan966x_tx_dcb *dcbs;
- u16 last_in_use;
-
- /* Represents the DMA address to the first entry of the dcb entries. */
- dma_addr_t dma;
+ struct fdma fdma;
/* Array of dcbs that are given to the HW */
struct lan966x_tx_dcb_buf *dcbs_buf;
- u8 channel_id;
-
bool activated;
};
@@ -306,6 +258,7 @@ struct lan966x_phc {
struct lan966x_skb_cb {
u8 rew_op;
+ u8 pdu_type;
u16 ts_id;
unsigned long jiffies;
};
@@ -326,7 +279,7 @@ struct lan966x {
u8 base_mac[ETH_ALEN];
- spinlock_t tx_lock; /* lock for frame transmition */
+ spinlock_t tx_lock; /* lock for frame transmission */
struct net_device *bridge;
u16 bridge_mask;
@@ -342,8 +295,8 @@ struct lan966x {
const struct lan966x_stat_layout *stats_layout;
u32 num_stats;
- /* workqueue for reading stats */
- struct mutex stats_lock;
+ /* lock for reading stats */
+ spinlock_t stats_lock;
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
@@ -495,7 +448,7 @@ int lan966x_stats_init(struct lan966x *lan966x);
void lan966x_port_config_down(struct lan966x_port *port);
void lan966x_port_config_up(struct lan966x_port *port);
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state);
int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x_port_config *config);
@@ -544,6 +497,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port);
bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
bool vlan_aware);
+void lan966x_vlan_port_rew_host(struct lan966x_port *port);
int lan966x_vlan_port_set_vid(struct lan966x_port *port,
u16 vid,
bool pvid,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index 1d63903f9006..75188b99e4e7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -88,11 +88,12 @@ static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
}
static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x_port *port = lan966x_pcs_to_port(pcs);
- lan966x_port_status_get(port, state);
+ lan966x_port_status_get(port, neg_mode, state);
}
static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 2e83bbb9477e..cf7de0267c32 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -88,7 +88,7 @@ static void lan966x_port_link_down(struct lan966x_port *port)
SYS_FRONT_PORT_MODE_HDX_MODE,
lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
- /* 8: Flush the queues accociated with the port */
+ /* 8: Flush the queues associated with the port */
lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
QSYS_SW_PORT_MODE_AGING_MODE,
lan966x, QSYS_SW_PORT_MODE(port->chip_port));
@@ -284,7 +284,7 @@ void lan966x_port_config_up(struct lan966x_port *port)
lan966x_port_link_up(port);
}
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x *lan966x = port->lan966x;
@@ -314,7 +314,7 @@ void lan966x_port_status_get(struct lan966x_port *port,
bmsr |= BMSR_ANEGCOMPLETE;
lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
- phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lp_adv);
} else {
if (!state->link)
return;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 63905bb5a63a..8c40db90ee8f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -1,11 +1,14 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/ptp_classify.h>
+#include <linux/units.h>
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define LAN9X66_CLOCK_RATE 165617754
+
#define LAN966X_MAX_PTP_ID 512
/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
@@ -322,34 +325,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
*cfg = phc->hwtstamp_config;
}
-static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type)
{
struct ptp_header *header;
u8 msgtype;
int type;
- if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
- return IFH_REW_OP_NOOP;
+ if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
type = ptp_classify_raw(skb);
- if (type == PTP_CLASS_NONE)
- return IFH_REW_OP_NOOP;
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
header = ptp_parse_header(skb, type);
- if (!header)
- return IFH_REW_OP_NOOP;
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
+
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6;
- if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
- return IFH_REW_OP_TWO_STEP_PTP;
+ if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
/* If it is sync and run 1 step then set the correct operation,
* otherwise run as 2 step
*/
msgtype = ptp_get_msgtype(header, type);
- if ((msgtype & 0xf) == 0)
- return IFH_REW_OP_ONE_STEP_PTP;
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
- return IFH_REW_OP_TWO_STEP_PTP;
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
}
static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
@@ -374,10 +398,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
{
struct lan966x *lan966x = port->lan966x;
unsigned long flags;
+ u8 pdu_type;
u8 rew_op;
- rew_op = lan966x_ptp_classify(port, skb);
+ lan966x_ptp_classify(port, skb, &rew_op, &pdu_type);
LAN966X_SKB_CB(skb)->rew_op = rew_op;
+ LAN966X_SKB_CB(skb)->pdu_type = pdu_type;
if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
return 0;
@@ -815,10 +841,6 @@ static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
bool pps = false;
int pin;
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -917,12 +939,6 @@ static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
if (lan966x->ptp_ext_irq <= 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -978,6 +994,10 @@ static struct ptp_clock_info lan966x_ptp_clock_info = {
.n_per_out = LAN966X_PHC_PINS_NUM,
.n_ext_ts = LAN966X_PHC_PINS_NUM,
.n_pins = LAN966X_PHC_PINS_NUM,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
@@ -1109,5 +1129,5 @@ void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
u32 lan966x_ptp_get_period_ps(void)
{
/* This represents the system clock period in picoseconds */
- return 15125;
+ return PICO / LAN9X66_CLOCK_RATE;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index 1c88120eb291..bcb4db76b75c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -297,6 +297,7 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_set_vlan_aware(port, false);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
}
int lan966x_port_changeupper(struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index d696cf9dbd19..43913d6204e1 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -45,6 +45,7 @@ static bool lan966x_tc_is_known_etype(struct vcap_tc_flower_parse_usage *st,
static int
lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
+ struct netlink_ext_ack *extack = st->fco->common.extack;
struct flow_match_control match;
int err = 0;
@@ -59,7 +60,7 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
VCAP_KF_L3_FRAGMENT,
VCAP_BIT_0);
if (err)
- goto out;
+ goto bad_frag_out;
}
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -72,15 +73,20 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
VCAP_KF_L3_FRAG_OFS_GT0,
VCAP_BIT_1);
if (err)
- goto out;
+ goto bad_frag_out;
}
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
+
st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
return err;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+bad_frag_out:
+ NL_SET_ERR_MSG_MOD(extack, "ip_frag parse error");
return err;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index a4414f63c9b1..2a37fc1ba4bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -403,11 +403,11 @@ static void lan966x_es0_read_esdx_counter(struct lan966x *lan966x,
u32 counter;
id = id & 0xff; /* counter limit */
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) +
lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
if (counter)
admin->cache.counter = counter;
}
@@ -417,14 +417,14 @@ static void lan966x_es0_write_esdx_counter(struct lan966x *lan966x,
{
id = id & 0xff; /* counter limit */
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES));
lan_wr(admin->cache.counter, lan966x,
SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static void lan966x_vcap_cache_write(struct net_device *dev,
@@ -581,7 +581,7 @@ static void lan966x_vcap_move(struct net_device *dev,
lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
}
-static struct vcap_operations lan966x_vcap_ops = {
+static const struct vcap_operations lan966x_vcap_ops = {
.validate_keyset = lan966x_vcap_validate_keyset,
.add_default_fields = lan966x_vcap_add_default_fields,
.cache_erase = lan966x_vcap_cache_erase,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
index 3c44660128da..7da22520724c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -149,6 +149,27 @@ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
port->vlan_aware = vlan_aware;
}
+/* When the interface is in host mode, the interface should not be vlan aware
+ * but it should insert all the tags that it gets from the network stack.
+ * The tags are not in the data of the frame but actually in the skb and the ifh
+ * is configured already to get this tag. So what we need to do is to update the
+ * rewriter to insert the vlan tag for all frames which have a vlan tag
+ * different than 0.
+ */
+void lan966x_vlan_port_rew_host(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ /* Tag all frames except when VID=0*/
+ val = REW_TAG_CFG_TAG_CFG_SET(2);
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+}
+
void lan966x_vlan_port_apply(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
@@ -157,7 +178,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port)
pvid = lan966x_vlan_port_get_pvid(port);
- /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Ingress classification (ANA_PORT_VLAN_CFG) */
/* Default vlan to classify for untagged frames (may be zero) */
val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
if (port->vlan_aware)
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index f58c506bda22..a4d6706590d2 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -3,13 +3,14 @@ config SPARX5_SWITCH
depends on NET_SWITCHDEV
depends on HAS_IOMEM
depends on OF
- depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on ARCH_SPARX5 || ARCH_LAN969X || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
depends on BRIDGE || BRIDGE=n
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
select VCAP
+ select FDMA
help
This driver supports the Sparx5 network switch device.
@@ -23,3 +24,10 @@ config SPARX5_DCB
DSCP and PCP.
If unsure, set to Y.
+
+config LAN969X_SWITCH
+ bool "Lan969x switch driver"
+ depends on SPARX5_SWITCH
+ select PAGE_POOL
+ help
+ This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 1cb1cc3f1a85..d447f9e84d92 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -10,10 +10,20 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \
- sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o sparx5_psfp.o
+ sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o \
+ sparx5_psfp.o sparx5_mirror.o sparx5_regs.o
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
+sparx5-switch-$(CONFIG_LAN969X_SWITCH) += lan969x/lan969x_regs.o \
+ lan969x/lan969x.o \
+ lan969x/lan969x_calendar.o \
+ lan969x/lan969x_vcap_ag_api.o \
+ lan969x/lan969x_vcap_impl.o \
+ lan969x/lan969x_rgmii.o \
+ lan969x/lan969x_fdma.o
+
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
new file mode 100644
index 000000000000..f3a9c71bea36
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+#define LAN969X_SDLB_GRP_CNT 5
+#define LAN969X_HSCH_LEAK_GRP_CNT 4
+
+static const struct sparx5_main_io_resource lan969x_main_iomap[] = {
+ { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */
+ { TARGET_GCB, 0x2010000, 1 }, /* 0xe2010000 */
+ { TARGET_QS, 0x2030000, 1 }, /* 0xe2030000 */
+ { TARGET_PTP, 0x2040000, 1 }, /* 0xe2040000 */
+ { TARGET_ANA_ACL, 0x2050000, 1 }, /* 0xe2050000 */
+ { TARGET_LRN, 0x2060000, 1 }, /* 0xe2060000 */
+ { TARGET_VCAP_SUPER, 0x2080000, 1 }, /* 0xe2080000 */
+ { TARGET_QSYS, 0x20a0000, 1 }, /* 0xe20a0000 */
+ { TARGET_QFWD, 0x20b0000, 1 }, /* 0xe20b0000 */
+ { TARGET_XQS, 0x20c0000, 1 }, /* 0xe20c0000 */
+ { TARGET_VCAP_ES2, 0x20d0000, 1 }, /* 0xe20d0000 */
+ { TARGET_VCAP_ES0, 0x20e0000, 1 }, /* 0xe20e0000 */
+ { TARGET_ANA_AC_POL, 0x2200000, 1 }, /* 0xe2200000 */
+ { TARGET_QRES, 0x2280000, 1 }, /* 0xe2280000 */
+ { TARGET_EACL, 0x22c0000, 1 }, /* 0xe22c0000 */
+ { TARGET_ANA_CL, 0x2400000, 1 }, /* 0xe2400000 */
+ { TARGET_ANA_L3, 0x2480000, 1 }, /* 0xe2480000 */
+ { TARGET_ANA_AC_SDLB, 0x2500000, 1 }, /* 0xe2500000 */
+ { TARGET_HSCH, 0x2580000, 1 }, /* 0xe2580000 */
+ { TARGET_REW, 0x2600000, 1 }, /* 0xe2600000 */
+ { TARGET_ANA_L2, 0x2800000, 1 }, /* 0xe2800000 */
+ { TARGET_ANA_AC, 0x2900000, 1 }, /* 0xe2900000 */
+ { TARGET_VOP, 0x2a00000, 1 }, /* 0xe2a00000 */
+ { TARGET_DEV2G5, 0x3004000, 1 }, /* 0xe3004000 */
+ { TARGET_DEV10G, 0x3008000, 1 }, /* 0xe3008000 */
+ { TARGET_PCS10G_BR, 0x300c000, 1 }, /* 0xe300c000 */
+ { TARGET_DEV2G5 + 1, 0x3010000, 1 }, /* 0xe3010000 */
+ { TARGET_DEV2G5 + 2, 0x3014000, 1 }, /* 0xe3014000 */
+ { TARGET_DEV2G5 + 3, 0x3018000, 1 }, /* 0xe3018000 */
+ { TARGET_DEV2G5 + 4, 0x301c000, 1 }, /* 0xe301c000 */
+ { TARGET_DEV10G + 1, 0x3020000, 1 }, /* 0xe3020000 */
+ { TARGET_PCS10G_BR + 1, 0x3024000, 1 }, /* 0xe3024000 */
+ { TARGET_DEV2G5 + 5, 0x3028000, 1 }, /* 0xe3028000 */
+ { TARGET_DEV2G5 + 6, 0x302c000, 1 }, /* 0xe302c000 */
+ { TARGET_DEV2G5 + 7, 0x3030000, 1 }, /* 0xe3030000 */
+ { TARGET_DEV2G5 + 8, 0x3034000, 1 }, /* 0xe3034000 */
+ { TARGET_DEV10G + 2, 0x3038000, 1 }, /* 0xe3038000 */
+ { TARGET_PCS10G_BR + 2, 0x303c000, 1 }, /* 0xe303c000 */
+ { TARGET_DEV2G5 + 9, 0x3040000, 1 }, /* 0xe3040000 */
+ { TARGET_DEV5G, 0x3044000, 1 }, /* 0xe3044000 */
+ { TARGET_PCS5G_BR, 0x3048000, 1 }, /* 0xe3048000 */
+ { TARGET_DEV2G5 + 10, 0x304c000, 1 }, /* 0xe304c000 */
+ { TARGET_DEV2G5 + 11, 0x3050000, 1 }, /* 0xe3050000 */
+ { TARGET_DEV2G5 + 12, 0x3054000, 1 }, /* 0xe3054000 */
+ { TARGET_DEV10G + 3, 0x3058000, 1 }, /* 0xe3058000 */
+ { TARGET_PCS10G_BR + 3, 0x305c000, 1 }, /* 0xe305c000 */
+ { TARGET_DEV2G5 + 13, 0x3060000, 1 }, /* 0xe3060000 */
+ { TARGET_DEV5G + 1, 0x3064000, 1 }, /* 0xe3064000 */
+ { TARGET_PCS5G_BR + 1, 0x3068000, 1 }, /* 0xe3068000 */
+ { TARGET_DEV2G5 + 14, 0x306c000, 1 }, /* 0xe306c000 */
+ { TARGET_DEV2G5 + 15, 0x3070000, 1 }, /* 0xe3070000 */
+ { TARGET_DEV2G5 + 16, 0x3074000, 1 }, /* 0xe3074000 */
+ { TARGET_DEV10G + 4, 0x3078000, 1 }, /* 0xe3078000 */
+ { TARGET_PCS10G_BR + 4, 0x307c000, 1 }, /* 0xe307c000 */
+ { TARGET_DEV2G5 + 17, 0x3080000, 1 }, /* 0xe3080000 */
+ { TARGET_DEV5G + 2, 0x3084000, 1 }, /* 0xe3084000 */
+ { TARGET_PCS5G_BR + 2, 0x3088000, 1 }, /* 0xe3088000 */
+ { TARGET_DEV2G5 + 18, 0x308c000, 1 }, /* 0xe308c000 */
+ { TARGET_DEV2G5 + 19, 0x3090000, 1 }, /* 0xe3090000 */
+ { TARGET_DEV2G5 + 20, 0x3094000, 1 }, /* 0xe3094000 */
+ { TARGET_DEV10G + 5, 0x3098000, 1 }, /* 0xe3098000 */
+ { TARGET_PCS10G_BR + 5, 0x309c000, 1 }, /* 0xe309c000 */
+ { TARGET_DEV2G5 + 21, 0x30a0000, 1 }, /* 0xe30a0000 */
+ { TARGET_DEV5G + 3, 0x30a4000, 1 }, /* 0xe30a4000 */
+ { TARGET_PCS5G_BR + 3, 0x30a8000, 1 }, /* 0xe30a8000 */
+ { TARGET_DEV2G5 + 22, 0x30ac000, 1 }, /* 0xe30ac000 */
+ { TARGET_DEV2G5 + 23, 0x30b0000, 1 }, /* 0xe30b0000 */
+ { TARGET_DEV2G5 + 24, 0x30b4000, 1 }, /* 0xe30b4000 */
+ { TARGET_DEV10G + 6, 0x30b8000, 1 }, /* 0xe30b8000 */
+ { TARGET_PCS10G_BR + 6, 0x30bc000, 1 }, /* 0xe30bc000 */
+ { TARGET_DEV2G5 + 25, 0x30c0000, 1 }, /* 0xe30c0000 */
+ { TARGET_DEV10G + 7, 0x30c4000, 1 }, /* 0xe30c4000 */
+ { TARGET_PCS10G_BR + 7, 0x30c8000, 1 }, /* 0xe30c8000 */
+ { TARGET_DEV2G5 + 26, 0x30cc000, 1 }, /* 0xe30cc000 */
+ { TARGET_DEV10G + 8, 0x30d0000, 1 }, /* 0xe30d0000 */
+ { TARGET_PCS10G_BR + 8, 0x30d4000, 1 }, /* 0xe30d4000 */
+ { TARGET_DEV2G5 + 27, 0x30d8000, 1 }, /* 0xe30d8000 */
+ { TARGET_DEV10G + 9, 0x30dc000, 1 }, /* 0xe30dc000 */
+ { TARGET_PCS10G_BR + 9, 0x30e0000, 1 }, /* 0xe30e0000 */
+ { TARGET_DEVRGMII, 0x30e4000, 1 }, /* 0xe30e4000 */
+ { TARGET_DEVRGMII + 1, 0x30e8000, 1 }, /* 0xe30e8000 */
+ { TARGET_DSM, 0x30ec000, 1 }, /* 0xe30ec000 */
+ { TARGET_PORT_CONF, 0x30f0000, 1 }, /* 0xe30f0000 */
+ { TARGET_ASM, 0x3200000, 1 }, /* 0xe3200000 */
+ { TARGET_HSIO_WRAP, 0x3408000, 1 }, /* 0xe3408000 */
+};
+
+static struct sparx5_sdlb_group lan969x_sdlb_groups[LAN969X_SDLB_GRP_CNT] = {
+ { 1000000000, 8192 / 2, 64 }, /* 1 G */
+ { 500000000, 8192 / 2, 64 }, /* 500 M */
+ { 100000000, 8192 / 4, 64 }, /* 100 M */
+ { 50000000, 8192 / 4, 64 }, /* 50 M */
+ { 5000000, 8192 / 8, 64 }, /* 10 M */
+};
+
+static u32 lan969x_hsch_max_group_rate[LAN969X_HSCH_LEAK_GRP_CNT] = {
+ 655355, 1048568, 6553550, 10485680
+};
+
+static struct sparx5_sdlb_group *lan969x_get_sdlb_group(int idx)
+{
+ return &lan969x_sdlb_groups[idx];
+}
+
+static u32 lan969x_get_hsch_max_group_rate(int grp)
+{
+ return lan969x_hsch_max_group_rate[grp];
+}
+
+static u32 lan969x_get_dev_mode_bit(struct sparx5 *sparx5, int port)
+{
+ if (lan969x_port_is_2g5(port) || lan969x_port_is_5g(port))
+ return port;
+
+ /* 10G */
+ switch (port) {
+ case 0:
+ return 12;
+ case 4:
+ return 13;
+ case 8:
+ return 14;
+ case 12:
+ return 0;
+ default:
+ return port;
+ }
+}
+
+static u32 lan969x_port_dev_mapping(struct sparx5 *sparx5, int port)
+{
+ if (lan969x_port_is_5g(port)) {
+ switch (port) {
+ case 9:
+ return 0;
+ case 13:
+ return 1;
+ case 17:
+ return 2;
+ case 21:
+ return 3;
+ }
+ }
+
+ if (lan969x_port_is_10g(port)) {
+ switch (port) {
+ case 0:
+ return 0;
+ case 4:
+ return 1;
+ case 8:
+ return 2;
+ case 12:
+ return 3;
+ case 16:
+ return 4;
+ case 20:
+ return 5;
+ case 24:
+ return 6;
+ case 25:
+ return 7;
+ case 26:
+ return 8;
+ case 27:
+ return 9;
+ }
+ }
+
+ /* 2g5 port */
+ return port;
+}
+
+static int lan969x_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 portno = port->portno;
+ u32 inst;
+
+ if (port->conf.portmode == conf->portmode)
+ return 0; /* Nothing to do */
+
+ switch (conf->portmode) {
+ case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
+ inst = (portno - portno % 4) / 4;
+ spx5_rmw(BIT(inst), BIT(inst), sparx5, PORT_CONF_QSGMII_ENA);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static irqreturn_t lan969x_ptp_irq_handler(int irq, void *args)
+{
+ int budget = SPARX5_MAX_PTP_ID;
+ struct sparx5 *sparx5 = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sparx5_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = spx5_rd(sparx5, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ WARN_ON(val & PTP_TWOSTEP_CTRL_PTP_OVFL);
+
+ if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = sparx5->ports[txport];
+
+ /* Retrieve the delay */
+ delay = spx5_rd(sparx5, PTP_TWOSTEP_STAMP_NSEC);
+ delay = PTP_TWOSTEP_STAMP_NSEC_NS_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ spx5_rmw(PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, PTP_TWOSTEP_CTRL);
+
+ val = spx5_rd(sparx5, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = spx5_rd(sparx5, PTP_TWOSTEP_STAMP_NSEC);
+ id <<= 8;
+ id |= spx5_rd(sparx5, PTP_TWOSTEP_STAMP_SUBNS);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (SPARX5_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ spx5_rmw(PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ sparx5->ptp_skbs--;
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+
+ /* Get the h/w timestamp */
+ sparx5_get_hwtimestamp(sparx5, &ts, delay);
+
+ /* Set the timestamp in the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct sparx5_regs lan969x_regs = {
+ .tsize = lan969x_tsize,
+ .gaddr = lan969x_gaddr,
+ .gcnt = lan969x_gcnt,
+ .gsize = lan969x_gsize,
+ .raddr = lan969x_raddr,
+ .rcnt = lan969x_rcnt,
+ .fpos = lan969x_fpos,
+ .fsize = lan969x_fsize,
+};
+
+static const struct sparx5_consts lan969x_consts = {
+ .n_ports = 30,
+ .n_ports_all = 35,
+ .n_hsch_l1_elems = 32,
+ .n_hsch_queues = 4,
+ .n_lb_groups = 5,
+ .n_pgids = 1054, /* (1024 + n_ports) */
+ .n_sio_clks = 1,
+ .n_own_upsids = 1,
+ .n_auto_cals = 4,
+ .n_filters = 256,
+ .n_gates = 256,
+ .n_sdlbs = 496,
+ .n_dsm_cal_taxis = 5,
+ .buf_size = 1572864,
+ .qres_max_prio_idx = 315,
+ .qres_max_colour_idx = 323,
+ .tod_pin = 4,
+ .vcaps = lan969x_vcaps,
+ .vcap_stats = &lan969x_vcap_stats,
+ .vcaps_cfg = lan969x_vcap_inst_cfg,
+};
+
+static const struct sparx5_ops lan969x_ops = {
+ .is_port_2g5 = &lan969x_port_is_2g5,
+ .is_port_5g = &lan969x_port_is_5g,
+ .is_port_10g = &lan969x_port_is_10g,
+ .is_port_25g = &lan969x_port_is_25g,
+ .is_port_rgmii = &lan969x_port_is_rgmii,
+ .get_port_dev_index = &lan969x_port_dev_mapping,
+ .get_port_dev_bit = &lan969x_get_dev_mode_bit,
+ .get_hsch_max_group_rate = &lan969x_get_hsch_max_group_rate,
+ .get_sdlb_group = &lan969x_get_sdlb_group,
+ .set_port_mux = &lan969x_port_mux_set,
+ .ptp_irq_handler = &lan969x_ptp_irq_handler,
+ .dsm_calendar_calc = &lan969x_dsm_calendar_calc,
+ .port_config_rgmii = &lan969x_port_config_rgmii,
+ .fdma_init = &lan969x_fdma_init,
+ .fdma_deinit = &lan969x_fdma_deinit,
+ .fdma_poll = &lan969x_fdma_napi_poll,
+ .fdma_xmit = &lan969x_fdma_xmit,
+};
+
+const struct sparx5_match_data lan969x_desc = {
+ .iomap = lan969x_main_iomap,
+ .iomap_size = ARRAY_SIZE(lan969x_main_iomap),
+ .ioranges = 2,
+ .regs = &lan969x_regs,
+ .consts = &lan969x_consts,
+ .ops = &lan969x_ops,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
new file mode 100644
index 000000000000..529fde3d4deb
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __LAN969X_H__
+#define __LAN969X_H__
+
+#include "../sparx5/sparx5_main.h"
+#include "../sparx5/sparx5_regs.h"
+#include "../sparx5/sparx5_vcap_impl.h"
+
+/* lan969x.c */
+extern const struct sparx5_match_data lan969x_desc;
+
+/* lan969x_vcap_ag_api.c */
+extern const struct vcap_statistics lan969x_vcap_stats;
+extern const struct vcap_info lan969x_vcaps[];
+
+/* lan969x_vcap_impl.c */
+extern const struct sparx5_vcap_inst lan969x_vcap_inst_cfg[];
+
+/* lan969x_regs.c */
+extern const unsigned int lan969x_tsize[TSIZE_LAST];
+extern const unsigned int lan969x_raddr[RADDR_LAST];
+extern const unsigned int lan969x_rcnt[RCNT_LAST];
+extern const unsigned int lan969x_gaddr[GADDR_LAST];
+extern const unsigned int lan969x_gcnt[GCNT_LAST];
+extern const unsigned int lan969x_gsize[GSIZE_LAST];
+extern const unsigned int lan969x_fpos[FPOS_LAST];
+extern const unsigned int lan969x_fsize[FSIZE_LAST];
+
+static inline bool lan969x_port_is_2g5(int portno)
+{
+ return portno == 1 || portno == 2 || portno == 3 ||
+ portno == 5 || portno == 6 || portno == 7 ||
+ portno == 10 || portno == 11 || portno == 14 ||
+ portno == 15 || portno == 18 || portno == 19 ||
+ portno == 22 || portno == 23;
+}
+
+static inline bool lan969x_port_is_5g(int portno)
+{
+ return portno == 9 || portno == 13 || portno == 17 ||
+ portno == 21;
+}
+
+static inline bool lan969x_port_is_10g(int portno)
+{
+ return portno == 0 || portno == 4 || portno == 8 ||
+ portno == 12 || portno == 16 || portno == 20 ||
+ portno == 24 || portno == 25 || portno == 26 ||
+ portno == 27;
+}
+
+static inline bool lan969x_port_is_25g(int portno)
+{
+ return false;
+}
+
+static inline bool lan969x_port_is_rgmii(int portno)
+{
+ return portno == 28 || portno == 29;
+}
+
+/* lan969x_calendar.c */
+int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data);
+
+/* lan969x_rgmii.c */
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+/* lan969x_fdma.c */
+int lan969x_fdma_init(struct sparx5 *sparx5);
+int lan969x_fdma_deinit(struct sparx5 *sparx5);
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight);
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c
new file mode 100644
index 000000000000..e857640df185
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+#define LAN969X_DSM_CAL_DEVS_PER_TAXI 10
+#define LAN969X_DSM_CAL_TAXIS 5
+
+enum lan969x_dsm_cal_dev {
+ DSM_CAL_DEV_2G5,
+ DSM_CAL_DEV_5G,
+ DSM_CAL_DEV_10G,
+ DSM_CAL_DEV_OTHER, /* 1G or less */
+ DSM_CAL_DEV_MAX
+};
+
+/* Each entry in the following struct defines properties for a given speed
+ * (10G, 5G, 2.5G, or 1G or less).
+ */
+struct lan969x_dsm_cal_dev_speed {
+ /* Number of devices that requires this speed. */
+ u32 n_devs;
+
+ /* Array of devices that requires this speed. */
+ u32 devs[LAN969X_DSM_CAL_DEVS_PER_TAXI];
+
+ /* Number of slots required for one device running this speed. */
+ u32 n_slots;
+
+ /* Gap between two slots for one device running this speed. */
+ u32 gap;
+};
+
+static u32
+lan969x_taxi_ports[LAN969X_DSM_CAL_TAXIS][LAN969X_DSM_CAL_DEVS_PER_TAXI] = {
+ { 0, 4, 1, 2, 3, 5, 6, 7, 28, 29 },
+ { 8, 12, 9, 13, 10, 11, 14, 15, 99, 99 },
+ { 16, 20, 17, 21, 18, 19, 22, 23, 99, 99 },
+ { 24, 25, 99, 99, 99, 99, 99, 99, 99, 99 },
+ { 26, 27, 99, 99, 99, 99, 99, 99, 99, 99 }
+};
+
+static int lan969x_dsm_cal_idx_get(u32 *calendar, u32 cal_len, u32 *cal_idx)
+{
+ if (*cal_idx >= cal_len)
+ return -EINVAL;
+
+ do {
+ if (calendar[*cal_idx] == SPX5_DSM_CAL_EMPTY)
+ return 0;
+
+ (*cal_idx)++;
+ } while (*cal_idx < cal_len);
+
+ return -ENOENT;
+}
+
+static enum lan969x_dsm_cal_dev lan969x_dsm_cal_get_dev(int speed)
+{
+ return (speed == 10000 ? DSM_CAL_DEV_10G :
+ speed == 5000 ? DSM_CAL_DEV_5G :
+ speed == 2500 ? DSM_CAL_DEV_2G5 :
+ DSM_CAL_DEV_OTHER);
+}
+
+static int lan969x_dsm_cal_get_speed(enum lan969x_dsm_cal_dev dev)
+{
+ return (dev == DSM_CAL_DEV_10G ? 10000 :
+ dev == DSM_CAL_DEV_5G ? 5000 :
+ dev == DSM_CAL_DEV_2G5 ? 2500 :
+ 1000);
+}
+
+int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
+{
+ struct lan969x_dsm_cal_dev_speed dev_speeds[DSM_CAL_DEV_MAX] = {};
+ u32 cal_len, n_slots, taxi_bw, n_devs = 0, required_bw = 0;
+ struct lan969x_dsm_cal_dev_speed *speed;
+ int err;
+
+ /* Maximum bandwidth for this taxi */
+ taxi_bw = (128 * 1000000) / sparx5_clk_period(sparx5->coreclock);
+
+ memcpy(data->taxi_ports, &lan969x_taxi_ports[taxi],
+ LAN969X_DSM_CAL_DEVS_PER_TAXI * sizeof(u32));
+
+ for (int i = 0; i < LAN969X_DSM_CAL_DEVS_PER_TAXI; i++) {
+ u32 portno = data->taxi_ports[i];
+ enum sparx5_cal_bw bw;
+
+ bw = sparx5_get_port_cal_speed(sparx5, portno);
+
+ if (portno < sparx5->data->consts->n_ports_all)
+ data->taxi_speeds[i] = sparx5_cal_speed_to_value(bw);
+ else
+ data->taxi_speeds[i] = 0;
+ }
+
+ /* Determine the different port types (10G, 5G, 2.5G, <= 1G) in the
+ * this taxi map.
+ */
+ for (int i = 0; i < LAN969X_DSM_CAL_DEVS_PER_TAXI; i++) {
+ u32 taxi_speed = data->taxi_speeds[i];
+ enum lan969x_dsm_cal_dev dev;
+
+ if (taxi_speed == 0)
+ continue;
+
+ required_bw += taxi_speed;
+
+ dev = lan969x_dsm_cal_get_dev(taxi_speed);
+ speed = &dev_speeds[dev];
+ speed->devs[speed->n_devs++] = i;
+ n_devs++;
+ }
+
+ if (required_bw > taxi_bw) {
+ pr_err("Required bandwidth: %u is higher than total taxi bandwidth: %u",
+ required_bw, taxi_bw);
+ return -EINVAL;
+ }
+
+ if (n_devs == 0) {
+ data->schedule[0] = SPX5_DSM_CAL_EMPTY;
+ return 0;
+ }
+
+ cal_len = n_devs;
+
+ /* Search for a calendar length that fits all active devices. */
+ while (cal_len < SPX5_DSM_CAL_LEN) {
+ u32 bw_per_slot = taxi_bw / cal_len;
+
+ n_slots = 0;
+
+ for (int i = 0; i < DSM_CAL_DEV_MAX; i++) {
+ speed = &dev_speeds[i];
+
+ if (speed->n_devs == 0)
+ continue;
+
+ required_bw = lan969x_dsm_cal_get_speed(i);
+ speed->n_slots = DIV_ROUND_UP(required_bw, bw_per_slot);
+
+ if (speed->n_slots)
+ speed->gap = DIV_ROUND_UP(cal_len,
+ speed->n_slots);
+ else
+ speed->gap = 0;
+
+ n_slots += speed->n_slots * speed->n_devs;
+ }
+
+ if (n_slots <= cal_len)
+ break; /* Found a suitable calendar length. */
+
+ /* Not good enough yet. */
+ cal_len = n_slots;
+ }
+
+ if (cal_len > SPX5_DSM_CAL_LEN) {
+ pr_err("Invalid length: %u for taxi: %u", cal_len, taxi);
+ return -EINVAL;
+ }
+
+ for (u32 i = 0; i < SPX5_DSM_CAL_LEN; i++)
+ data->schedule[i] = SPX5_DSM_CAL_EMPTY;
+
+ /* Place the remaining devices */
+ for (u32 i = 0; i < DSM_CAL_DEV_MAX; i++) {
+ speed = &dev_speeds[i];
+ for (u32 dev = 0; dev < speed->n_devs; dev++) {
+ u32 idx = 0;
+
+ for (n_slots = 0; n_slots < speed->n_slots; n_slots++) {
+ err = lan969x_dsm_cal_idx_get(data->schedule,
+ cal_len, &idx);
+ if (err)
+ return err;
+ data->schedule[idx] = speed->devs[dev];
+ idx += speed->gap;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
new file mode 100644
index 000000000000..1282f5c3ee6d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
+ */
+#include <net/page_pool/helpers.h>
+
+#include "../sparx5_main.h"
+#include "../sparx5_main_regs.h"
+#include "../sparx5_port.h"
+
+#include "fdma_api.h"
+#include "lan969x.h"
+
+#define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
+
+static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ *dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
+
+ return 0;
+}
+
+static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+
+ *dataptr = page_pool_get_dma_addr(page);
+
+ return 0;
+}
+
+static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
+{
+ struct fdma *fdma = &tx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i)
+ if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
+ return i;
+
+ return -ENOSPC;
+}
+
+static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+ struct sparx5_tx_buf *db;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&sparx5->tx_lock, flags);
+
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ db = &sparx5->tx.dbs[i];
+
+ if (!db->used)
+ continue;
+
+ if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
+ continue;
+
+ db->dev->stats.tx_bytes += db->skb->len;
+ db->dev->stats.tx_packets++;
+ sparx5->tx.packets++;
+
+ dma_unmap_single(sparx5->dev,
+ db->dma_addr,
+ db->skb->len,
+ DMA_TO_DEVICE);
+
+ if (!db->ptp)
+ napi_consume_skb(db->skb, weight);
+
+ db->used = false;
+ }
+
+ spin_unlock_irqrestore(&sparx5->tx_lock, flags);
+}
+
+static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
+{
+ struct fdma *fdma = &rx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i) {
+ for (int j = 0; j < fdma->n_dbs; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
+ }
+}
+
+static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
+ struct sparx5_rx *rx)
+{
+ const struct sparx5_consts *consts = sparx5->data->consts;
+ struct fdma *fdma = &rx->fdma;
+ struct sparx5_port *port;
+ struct frame_info fi;
+ struct sk_buff *skb;
+ struct fdma_db *db;
+ struct page *page;
+
+ db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
+
+ sparx5_ifh_parse(sparx5, page_address(page), &fi);
+ port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
+ NULL;
+ if (WARN_ON(!port))
+ goto free_page;
+
+ skb = build_skb(page_address(page), fdma->db_size);
+ if (unlikely(!skb))
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+ skb_put(skb, fdma_db_len_get(db));
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ skb->dev = port->ndev;
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ return NULL;
+}
+
+static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fdma->n_dcbs * fdma->n_dbs,
+ .nid = NUMA_NO_NODE,
+ .dev = sparx5->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = fdma->db_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool))
+ return PTR_ERR(rx->page_pool);
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ return 0;
+}
+
+static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ tx->dbs = kcalloc(fdma->n_dcbs,
+ sizeof(struct sparx5_tx_buf),
+ GFP_KERNEL);
+ if (!tx->dbs)
+ return -ENOMEM;
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err) {
+ kfree(tx->dbs);
+ return err;
+ }
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
+ return 0;
+}
+
+static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->rx.fdma;
+
+ fdma->channel_id = FDMA_XTR_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+
+ /* Fetch a netdev for SKB and NAPI use, any will do */
+ for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
+ struct sparx5_port *port = sparx5->ports[idx];
+
+ if (port && port->ndev) {
+ sparx5->rx.ndev = port->ndev;
+ break;
+ }
+ }
+}
+
+static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+
+ fdma->channel_id = FDMA_INJ_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+}
+
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
+ struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
+ struct sk_buff *skb;
+
+ dcb_reload = fdma->dcb_index;
+
+ lan969x_fdma_tx_clear_buf(sparx5, weight);
+
+ /* Process RX data */
+ while (counter < weight) {
+ if (!fdma_has_frames(fdma))
+ break;
+
+ skb = lan969x_fdma_rx_get_frame(sparx5, rx);
+ if (!skb)
+ break;
+
+ napi_gro_receive(&rx->napi, skb);
+
+ fdma_db_advance(fdma);
+ counter++;
+ /* Check if the DCB can be reused */
+ if (fdma_dcb_is_reusable(fdma))
+ continue;
+
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
+ dcb_reload++;
+ /* n_dcbs must be a power of 2 */
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma,
+ old_dcb,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ sparx5_fdma_reload(sparx5, fdma);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int next_dcb, needed_headroom, needed_tailroom, err;
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ struct sparx5_tx_buf *db_buf;
+ u64 status;
+
+ next_dcb = lan969x_fdma_get_next_dcb(tx);
+ if (next_dcb < 0)
+ return -EBUSY;
+
+ needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ }
+
+ skb_push(skb, IFH_LEN * 4);
+ memcpy(skb->data, ifh, IFH_LEN * 4);
+ skb_put(skb, ETH_FCS_LEN);
+
+ db_buf = &tx->dbs[next_dcb];
+ db_buf->dma_addr = dma_map_single(sparx5->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
+ return -ENOMEM;
+
+ db_buf->dev = dev;
+ db_buf->skb = skb;
+ db_buf->ptp = false;
+ db_buf->used = true;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ db_buf->ptp = true;
+
+ status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len) |
+ FDMA_DCB_STATUS_INTR;
+
+ fdma_dcb_advance(fdma);
+ fdma_dcb_add(fdma, next_dcb, 0, status);
+
+ sparx5_fdma_reload(sparx5, fdma);
+
+ return NETDEV_TX_OK;
+}
+
+int lan969x_fdma_init(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ int err;
+
+ lan969x_fdma_rx_init(sparx5);
+ lan969x_fdma_tx_init(sparx5);
+ sparx5_fdma_injection_mode(sparx5);
+
+ err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
+ return err;
+ }
+
+ err = lan969x_fdma_rx_alloc(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ err = lan969x_fdma_tx_alloc(sparx5);
+ if (err) {
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ /* Reset FDMA state */
+ spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
+ spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
+
+ return err;
+}
+
+int lan969x_fdma_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ sparx5_fdma_stop(sparx5);
+ fdma_free_coherent(sparx5->dev, &tx->fdma);
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ lan969x_fdma_free_pages(rx);
+ page_pool_destroy(rx->page_pool);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c
new file mode 100644
index 000000000000..ace4ba21eec4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2024-09-30 11:48:29 +0200.
+ * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b
+ */
+
+#include "lan969x.h"
+
+const unsigned int lan969x_tsize[TSIZE_LAST] = {
+ [TC_DEV10G] = 10,
+ [TC_DEV2G5] = 28,
+ [TC_DEV5G] = 4,
+ [TC_PCS10G_BR] = 10,
+ [TC_PCS5G_BR] = 4,
+};
+
+const unsigned int lan969x_raddr[RADDR_LAST] = {
+ [RA_CPU_PROC_CTRL] = 160,
+ [RA_GCB_SOFT_RST] = 12,
+ [RA_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 20,
+};
+
+const unsigned int lan969x_rcnt[RCNT_LAST] = {
+ [RC_ANA_AC_OWN_UPSID] = 1,
+ [RC_ANA_ACL_VCAP_S2_CFG] = 35,
+ [RC_ANA_ACL_OWN_UPSID] = 1,
+ [RC_ANA_CL_OWN_UPSID] = 1,
+ [RC_ANA_L2_OWN_UPSID] = 1,
+ [RC_ASM_PORT_CFG] = 32,
+ [RC_DSM_BUF_CFG] = 32,
+ [RC_DSM_DEV_TX_STOP_WM_CFG] = 32,
+ [RC_DSM_RX_PAUSE_CFG] = 32,
+ [RC_DSM_MAC_CFG] = 32,
+ [RC_DSM_MAC_ADDR_BASE_HIGH_CFG] = 30,
+ [RC_DSM_MAC_ADDR_BASE_LOW_CFG] = 30,
+ [RC_DSM_TAXI_CAL_CFG] = 6,
+ [RC_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 30,
+ [RC_HSCH_PORT_MODE] = 35,
+ [RC_QFWD_SWITCH_PORT_MODE] = 35,
+ [RC_QSYS_PAUSE_CFG] = 35,
+ [RC_QSYS_ATOP] = 35,
+ [RC_QSYS_FWD_PRESSURE] = 35,
+ [RC_QSYS_CAL_AUTO] = 4,
+ [RC_REW_OWN_UPSID] = 1,
+ [RC_REW_RTAG_ETAG_CTRL] = 35,
+};
+
+const unsigned int lan969x_gaddr[GADDR_LAST] = {
+ [GA_ANA_AC_RAM_CTRL] = 202000,
+ [GA_ANA_AC_PS_COMMON] = 202880,
+ [GA_ANA_AC_MIRROR_PROBE] = 203232,
+ [GA_ANA_AC_SRC] = 201728,
+ [GA_ANA_AC_PGID] = 131072,
+ [GA_ANA_AC_TSN_SF] = 202028,
+ [GA_ANA_AC_TSN_SF_CFG] = 148480,
+ [GA_ANA_AC_TSN_SF_STATUS] = 147936,
+ [GA_ANA_AC_SG_ACCESS] = 202032,
+ [GA_ANA_AC_SG_CONFIG] = 202752,
+ [GA_ANA_AC_SG_STATUS] = 147952,
+ [GA_ANA_AC_SG_STATUS_STICKY] = 202044,
+ [GA_ANA_AC_STAT_GLOBAL_CFG_PORT] = 202048,
+ [GA_ANA_AC_STAT_CNT_CFG_PORT] = 204800,
+ [GA_ANA_AC_STAT_GLOBAL_CFG_ACL] = 202068,
+ [GA_ANA_ACL_COMMON] = 8192,
+ [GA_ANA_ACL_KEY_SEL] = 9204,
+ [GA_ANA_ACL_CNT_B] = 4096,
+ [GA_ANA_ACL_STICKY] = 10852,
+ [GA_ANA_AC_POL_POL_ALL_CFG] = 17504,
+ [GA_ANA_AC_POL_COMMON_BDLB] = 19464,
+ [GA_ANA_AC_POL_COMMON_BUM_SLB] = 19472,
+ [GA_ANA_AC_SDLB_LBGRP_TBL] = 31788,
+ [GA_ANA_CL_PORT] = 65536,
+ [GA_ANA_CL_COMMON] = 87040,
+ [GA_ANA_L2_COMMON] = 561928,
+ [GA_ANA_L3_COMMON] = 370752,
+ [GA_ANA_L3_VLAN_ARP_L3MC_STICKY] = 368580,
+ [GA_ASM_CFG] = 18304,
+ [GA_ASM_PFC_TIMER_CFG] = 15568,
+ [GA_ASM_LBK_WM_CFG] = 15596,
+ [GA_ASM_LBK_MISC_CFG] = 15608,
+ [GA_ASM_RAM_CTRL] = 15684,
+ [GA_EACL_ES2_KEY_SELECT_PROFILE] = 36864,
+ [GA_EACL_CNT_TBL] = 30720,
+ [GA_EACL_POL_CFG] = 38400,
+ [GA_EACL_ES2_STICKY] = 29072,
+ [GA_EACL_RAM_CTRL] = 29112,
+ [GA_GCB_SIO_CTRL] = 560,
+ [GA_HSCH_HSCH_DWRR] = 36480,
+ [GA_HSCH_HSCH_MISC] = 36608,
+ [GA_HSCH_HSCH_LEAK_LISTS] = 37256,
+ [GA_HSCH_SYSTEM] = 37384,
+ [GA_HSCH_MMGT] = 36260,
+ [GA_HSCH_TAS_CONFIG] = 37696,
+ [GA_PTP_PTP_CFG] = 512,
+ [GA_PTP_PTP_TOD_DOMAINS] = 528,
+ [GA_PTP_PHASE_DETECTOR_CTRL] = 628,
+ [GA_QSYS_CALCFG] = 2164,
+ [GA_QSYS_RAM_CTRL] = 2204,
+ [GA_REW_COMMON] = 98304,
+ [GA_REW_PORT] = 49152,
+ [GA_REW_VOE_PORT_LM_CNT] = 90112,
+ [GA_REW_RAM_CTRL] = 93992,
+ [GA_VOP_RAM_CTRL] = 16368,
+ [GA_XQS_SYSTEM] = 5744,
+ [GA_XQS_QLIMIT_SHR] = 6912,
+};
+
+const unsigned int lan969x_gcnt[GCNT_LAST] = {
+ [GC_ANA_AC_SRC] = 67,
+ [GC_ANA_AC_PGID] = 1054,
+ [GC_ANA_AC_TSN_SF_CFG] = 256,
+ [GC_ANA_AC_STAT_CNT_CFG_PORT] = 35,
+ [GC_ANA_ACL_KEY_SEL] = 99,
+ [GC_ANA_ACL_CNT_A] = 1024,
+ [GC_ANA_ACL_CNT_B] = 1024,
+ [GC_ANA_AC_SDLB_LBGRP_TBL] = 5,
+ [GC_ANA_AC_SDLB_LBSET_TBL] = 496,
+ [GC_ANA_CL_PORT] = 35,
+ [GC_ANA_L2_ISDX_LIMIT] = 256,
+ [GC_ANA_L2_ISDX] = 1024,
+ [GC_ANA_L3_VLAN] = 4608,
+ [GC_ASM_DEV_STATISTICS] = 30,
+ [GC_EACL_ES2_KEY_SELECT_PROFILE] = 68,
+ [GC_EACL_CNT_TBL] = 512,
+ [GC_GCB_SIO_CTRL] = 1,
+ [GC_HSCH_HSCH_CFG] = 1120,
+ [GC_HSCH_HSCH_DWRR] = 32,
+ [GC_PTP_PTP_PINS] = 8,
+ [GC_PTP_PHASE_DETECTOR_CTRL] = 8,
+ [GC_REW_PORT] = 35,
+ [GC_REW_VOE_PORT_LM_CNT] = 240,
+};
+
+const unsigned int lan969x_gsize[GSIZE_LAST] = {
+ [GW_ANA_AC_SRC] = 4,
+ [GW_ANA_L2_COMMON] = 712,
+ [GW_ASM_CFG] = 1092,
+ [GW_CPU_CPU_REGS] = 180,
+ [GW_DEV2G5_PHASE_DETECTOR_CTRL] = 12,
+ [GW_FDMA_FDMA] = 448,
+ [GW_GCB_CHIP_REGS] = 180,
+ [GW_HSCH_TAS_CONFIG] = 16,
+ [GW_PTP_PHASE_DETECTOR_CTRL] = 12,
+ [GW_QSYS_PAUSE_CFG] = 988,
+};
+
+const unsigned int lan969x_fpos[FPOS_LAST] = {
+ [FP_CPU_PROC_CTRL_AARCH64_MODE_ENA] = 7,
+ [FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS] = 6,
+ [FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS] = 5,
+ [FP_CPU_PROC_CTRL_BE_EXCEP_MODE] = 4,
+ [FP_CPU_PROC_CTRL_VINITHI] = 3,
+ [FP_CPU_PROC_CTRL_CFGTE] = 2,
+ [FP_CPU_PROC_CTRL_CP15S_DISABLE] = 1,
+ [FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE] = 0,
+ [FP_CPU_PROC_CTRL_L2_FLUSH_REQ] = 8,
+ [FP_DEV2G5_PHAD_CTRL_PHAD_ENA] = 5,
+ [FP_DEV2G5_PHAD_CTRL_PHAD_FAILED] = 3,
+ [FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE] = 5,
+ [FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY] = 4,
+ [FP_FDMA_CH_CFG_CH_INJ_PORT] = 3,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION] = 27,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC] = 25,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL] = 24,
+ [FP_PTP_PHAD_CTRL_PHAD_ENA] = 5,
+ [FP_PTP_PHAD_CTRL_PHAD_FAILED] = 3,
+};
+
+const unsigned int lan969x_fsize[FSIZE_LAST] = {
+ [FW_ANA_AC_PROBE_PORT_CFG_PROBE_PORT_MASK] = 30,
+ [FW_ANA_AC_SRC_CFG_PORT_MASK] = 30,
+ [FW_ANA_AC_PGID_CFG_PORT_MASK] = 30,
+ [FW_ANA_AC_TSN_SF_PORT_NUM] = 7,
+ [FW_ANA_AC_TSN_SF_CFG_TSN_SGID] = 8,
+ [FW_ANA_AC_TSN_SF_STATUS_TSN_SFID] = 8,
+ [FW_ANA_AC_SG_ACCESS_CTRL_SGID] = 8,
+ [FW_ANA_AC_PORT_SGE_CFG_MASK] = 17,
+ [FW_ANA_AC_SDLB_XLB_START_LBSET_START] = 9,
+ [FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT] = 3,
+ [FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT] = 9,
+ [FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT] = 9,
+ [FW_ANA_AC_SDLB_XLB_NEXT_LBGRP] = 3,
+ [FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR] = 9,
+ [FW_ANA_L2_AUTO_LRN_CFG_AUTO_LRN_ENA] = 30,
+ [FW_ANA_L2_DLB_CFG_DLB_IDX] = 9,
+ [FW_ANA_L2_TSN_CFG_TSN_SFID] = 8,
+ [FW_ANA_L3_VLAN_MASK_CFG_VLAN_PORT_MASK] = 30,
+ [FW_FDMA_CH_CFG_CH_DCB_DB_CNT] = 2,
+ [FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL] = 7,
+ [FW_HSCH_SE_CFG_SE_DWRR_CNT] = 5,
+ [FW_HSCH_SE_CONNECT_SE_LEAK_LINK] = 14,
+ [FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT] = 6,
+ [FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX] = 11,
+ [FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST] = 14,
+ [FW_HSCH_FLUSH_CTRL_FLUSH_PORT] = 6,
+ [FW_HSCH_FLUSH_CTRL_FLUSH_HIER] = 14,
+ [FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW] = 13,
+ [FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX] = 8,
+ [FW_LRN_AUTOAGE_CFG_2_NEXT_ROW] = 13,
+ [FW_PTP_PTP_PIN_INTR_INTR_PTP] = 8,
+ [FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA] = 8,
+ [FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT] = 8,
+ [FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT] = 3,
+ [FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL] = 6,
+ [FW_QRES_RES_CFG_WM_HIGH] = 11,
+ [FW_QRES_RES_STAT_MAXUSE] = 19,
+ [FW_QRES_RES_STAT_CUR_INUSE] = 19,
+ [FW_QSYS_PAUSE_CFG_PAUSE_START] = 11,
+ [FW_QSYS_PAUSE_CFG_PAUSE_STOP] = 11,
+ [FW_QSYS_ATOP_ATOP] = 11,
+ [FW_QSYS_ATOP_TOT_CFG_ATOP_TOT] = 11,
+ [FW_REW_RTAG_ETAG_CTRL_IPE_TBL] = 6,
+ [FW_XQS_STAT_CFG_STAT_VIEW] = 10,
+ [FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP] = 14,
+ [FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP] = 14,
+ [FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP] = 14,
+ [FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM] = 14,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
new file mode 100644
index 000000000000..4e422ca50828
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+/* Tx clock selectors */
+#define LAN969X_RGMII_TX_CLK_SEL_125MHZ 1 /* 1000Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_25MHZ 2 /* 100Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_2M5MHZ 3 /* 10Mbps */
+
+/* Port speed selectors */
+#define LAN969X_RGMII_SPEED_SEL_10 0 /* Select 10Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_100 1 /* Select 100Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_1000 2 /* Select 1000Mbps speed */
+
+/* Clock delay selectors */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS 2 /* Phase shift 45deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS 3 /* Phase shift 77deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS 4 /* Phase shift 90deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS 5 /* Phase shift 112deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS 6 /* Phase shift 135deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS 7 /* Phase shift 147deg */
+
+#define LAN969X_RGMII_PORT_START_IDX 28 /* Index of the first RGMII port */
+#define LAN969X_RGMII_IFG_TX 4 /* TX Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX1 5 /* RX1 Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX2 1 /* RX2 Inter Frame Gap value */
+
+#define RGMII_PORT_IDX(port) ((port)->portno - LAN969X_RGMII_PORT_START_IDX)
+
+/* Get the tx clock selector based on the port speed. */
+static int lan969x_rgmii_get_clk_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_TX_CLK_SEL_2M5MHZ :
+ speed == SPEED_100 ? LAN969X_RGMII_TX_CLK_SEL_25MHZ :
+ LAN969X_RGMII_TX_CLK_SEL_125MHZ);
+}
+
+/* Get the port speed selector based on the port speed. */
+static int lan969x_rgmii_get_speed_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_SPEED_SEL_10 :
+ speed == SPEED_100 ? LAN969X_RGMII_SPEED_SEL_100 :
+ LAN969X_RGMII_SPEED_SEL_1000);
+}
+
+/* Get the clock delay selector based on the clock delay in picoseconds. */
+static int lan969x_rgmii_get_clk_delay_sel(struct sparx5_port *port,
+ u32 delay_ps, u32 *clk_delay_sel)
+{
+ switch (delay_ps) {
+ case 0:
+ /* Hardware default selector. */
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 1000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS;
+ break;
+ case 1700:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS;
+ break;
+ case 2000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS;
+ break;
+ case 2500:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 3000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS;
+ break;
+ case 3300:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS;
+ break;
+ default:
+ dev_err(port->sparx5->dev, "Invalid RGMII delay: %u", delay_ps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Configure the RGMII tx clock frequency. */
+static void lan969x_rgmii_tx_clk_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_sel = lan969x_rgmii_get_clk_sel(conf->speed);
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Take the RGMII clock domain out of reset and set tx clock
+ * frequency.
+ */
+ spx5_rmw(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(clk_sel) |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(0) |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(0),
+ HSIO_WRAP_RGMII_CFG_TX_CLK_CFG |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST,
+ port->sparx5, HSIO_WRAP_RGMII_CFG(idx));
+}
+
+/* Configure the RGMII port device. */
+static void lan969x_rgmii_port_device_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 dtag, dotag, etype, speed_sel, idx = RGMII_PORT_IDX(port);
+
+ speed_sel = lan969x_rgmii_get_speed_sel(conf->speed);
+
+ etype = (port->vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+ port->custom_etype :
+ port->vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+ ETH_P_8021Q : ETH_P_8021AD);
+
+ dtag = port->max_vlan_tags == SPX5_PORT_MAX_TAGS_TWO;
+ dotag = port->max_vlan_tags != SPX5_PORT_MAX_TAGS_NONE;
+
+ /* Enable the MAC. */
+ spx5_wr(DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(1),
+ port->sparx5, DEVRGMII_MAC_ENA_CFG(idx));
+
+ /* Configure the Inter Frame Gap. */
+ spx5_wr(DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(LAN969X_RGMII_IFG_TX) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(LAN969X_RGMII_IFG_RX1) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(LAN969X_RGMII_IFG_RX2),
+ port->sparx5, DEVRGMII_MAC_IFG_CFG(idx));
+
+ /* Configure port data rate. */
+ spx5_wr(DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(speed_sel),
+ port->sparx5, DEVRGMII_DEV_RST_CTRL(idx));
+
+ /* Configure VLAN awareness. */
+ spx5_wr(DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+ port->sparx5,
+ DEVRGMII_MAC_TAGS_CFG(idx));
+}
+
+/* Configure the RGMII delay lines in the MAC.
+ *
+ * We use the rx-internal-delay-ps" and "tx-internal-delay-ps" properties to
+ * configure the rx and tx delays for the MAC. If these properties are missing
+ * or set to zero, the MAC will not apply any delay.
+ *
+ * The PHY side delays are determined by the PHY mode
+ * (e.g. PHY_INTERFACE_MODE_RGMII_{ID, RXID, TXID}), and ignored by the MAC side
+ * entirely.
+ */
+static int lan969x_rgmii_delay_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 tx_clk_sel, rx_clk_sel, tx_delay_ps = 0, rx_delay_ps = 0;
+ u32 idx = RGMII_PORT_IDX(port);
+ int err;
+
+ of_property_read_u32(port->of_node, "rx-internal-delay-ps",
+ &rx_delay_ps);
+
+ of_property_read_u32(port->of_node, "tx-internal-delay-ps",
+ &tx_delay_ps);
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, rx_delay_ps, &rx_clk_sel);
+ if (err)
+ return err;
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, tx_delay_ps, &tx_clk_sel);
+ if (err)
+ return err;
+
+ /* Configure rx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!rx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(rx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 0));
+
+ /* Configure tx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!tx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(tx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 1));
+
+ return 0;
+}
+
+/* Configure GPIO's to be used as RGMII interface. */
+static void lan969x_rgmii_gpio_config(struct sparx5_port *port)
+{
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Enable the RGMII on the GPIOs. */
+ spx5_wr(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(1), port->sparx5,
+ HSIO_WRAP_XMII_CFG(!idx));
+}
+
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ int err;
+
+ err = lan969x_rgmii_delay_config(port, conf);
+ if (err)
+ return err;
+
+ lan969x_rgmii_tx_clk_config(port, conf);
+ lan969x_rgmii_gpio_config(port);
+ lan969x_rgmii_port_device_config(port, conf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c
new file mode 100644
index 000000000000..7acc5bcf337a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c
@@ -0,0 +1,3843 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2024-10-07 11:10:56 +0200.
+ * Commit ID: b5ddc8e244eb2481a9524f1ddc630a8b41e7c391
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "lan969x.h"
+
+/* keyfields */
+static const struct vcap_field is0_normal_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 10,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 16,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 83,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 105,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 111,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 124,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 130,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 192,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 240,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 241,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 242,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 258,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 260,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 262,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 263,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 264,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 270,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 398,
+ .width = 128,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 526,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 527,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 528,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 544,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 10,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 15,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 17,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 94,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 106,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 112,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 125,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 128,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 131,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 132,
+ .width = 12,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 145,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 147,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 157,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 189,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 221,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 231,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 239,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 89,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 137,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 185,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 202,
+ .width = 64,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 266,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 282,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 283,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 85,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 136,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 137,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 138,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 173,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 205,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 206,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 103,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 135,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 169,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 185,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 201,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 217,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 220,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 221,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 222,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 225,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 103,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 135,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 168,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 176,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 192,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 90,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 219,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 227,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 243,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 98,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 111,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 115,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 118,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 119,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 120,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 168,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 220,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 228,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 356,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 484,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 485,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 486,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 487,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 503,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 519,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 535,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 536,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 537,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 538,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 539,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 540,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 541,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 542,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 543,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es0_isdx_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_IF_EGR_PORT_NO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 7,
+ .width = 13,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 27,
+ .width = 1,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 193,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 209,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 273,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 274,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 95,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 143,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 144,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 145,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 149,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 183,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 215,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 173,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 174,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 191,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 207,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 231,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 173,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 182,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 74,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 93,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 141,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 191,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 193,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 201,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 329,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 457,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 460,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 476,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 492,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 508,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 516,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 97,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 226,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 234,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 250,
+ .width = 40,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is0_keyfield_set[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = {
+ .type_id = 0,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es0_keyfield_set[] = {
+ [VCAP_KFS_ISDX] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is0_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
+};
+
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
+};
+
+static const struct vcap_field *es0_keyfield_set_map[] = {
+ [VCAP_KFS_ISDX] = es0_isdx_keyfield,
+};
+
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is0_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
+};
+
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
+};
+
+static int es0_keyfield_set_map_size[] = {
+ [VCAP_KFS_ISDX] = ARRAY_SIZE(es0_isdx_keyfield),
+};
+
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is0_classification_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 7,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 43,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 66,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 10,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 167,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is0_full_actionfield[] = {
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 7,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 36,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 10,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 76,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 79,
+ .width = 37,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 182,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 266,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 269,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is0_class_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 10,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 5,
+ },
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_RT_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 5,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 30,
+ .width = 37,
+ },
+ [VCAP_AF_MIRROR_PROBE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 2,
+ },
+ [VCAP_AF_MATCH_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 147,
+ .width = 16,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 163,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field es0_es0_actionfield[] = {
+ [VCAP_AF_PUSH_OUTER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_AF_PUSH_INNER_TAG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 3,
+ },
+ [VCAP_AF_VID_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 34,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_A_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_AF_VID_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_B_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_VID_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_C_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_AF_POP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 2,
+ },
+ [VCAP_AF_UNTAG_VID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_C_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 2,
+ },
+ [VCAP_AF_DSCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 6,
+ },
+ [VCAP_AF_ESDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 319,
+ .width = 10,
+ },
+ [VCAP_AF_FWD_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 438,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_QU] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 440,
+ .width = 3,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 443,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 445,
+ .width = 1,
+ },
+ [VCAP_AF_SWAP_MACS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 454,
+ .width = 1,
+ },
+ [VCAP_AF_LOOP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 455,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 14,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 6,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 30,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 5,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 9,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is0_actionfield_set[] = {
+ [VCAP_AFS_CLASSIFICATION] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_FULL] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_AFS_CLASS_REDUCED] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+};
+
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set es0_actionfield_set[] = {
+ [VCAP_AFS_ES0] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is0_actionfield_set_map[] = {
+ [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
+ [VCAP_AFS_FULL] = is0_full_actionfield,
+ [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
+};
+
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+};
+
+static const struct vcap_field *es0_actionfield_set_map[] = {
+ [VCAP_AFS_ES0] = es0_es0_actionfield,
+};
+
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
+/* actionfield_set map size */
+static int is0_actionfield_set_map_size[] = {
+ [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
+ [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
+ [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
+};
+
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+};
+
+static int es0_actionfield_set_map_size[] = {
+ [VCAP_AFS_ES0] = ARRAY_SIZE(es0_es0_actionfield),
+};
+
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 5,
+ .value = 16,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 4,
+ .value = 0,
+ },
+ {
+ .offset = 364,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 416,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 520,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 572,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 4,
+ .value = 8,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
+ [12] = is0_x12_keyfield_set_typegroups,
+ [6] = is0_x6_keyfield_set_typegroups,
+ [3] = is0_x3_keyfield_set_typegroups,
+ [2] = is0_x2_keyfield_set_typegroups,
+ [1] = is0_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [12] = is2_x12_keyfield_set_typegroups,
+ [6] = is2_x6_keyfield_set_typegroups,
+ [3] = is2_x3_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es0_keyfield_set_typegroups[] = {
+ [1] = es0_x1_keyfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 103,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 206,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 103,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 95,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 190,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 19,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 38,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
+ [3] = is0_x3_actionfield_set_typegroups,
+ [2] = is0_x2_actionfield_set_typegroups,
+ [1] = is0_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [3] = is2_x3_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es0_actionfield_set_typegroups[] = {
+ [1] = es0_x1_actionfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+};
+
+/* VCAPs */
+const struct vcap_info lan969x_vcaps[] = {
+ [VCAP_TYPE_IS0] = {
+ .name = "is0",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 103,
+ .default_cnt = 70,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set),
+ .actionfield_set = is0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set),
+ .keyfield_set_map = is0_keyfield_set_map,
+ .keyfield_set_map_size = is0_keyfield_set_map_size,
+ .actionfield_set_map = is0_actionfield_set_map,
+ .actionfield_set_map_size = is0_actionfield_set_map_size,
+ .keyfield_set_typegroups = is0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 103,
+ .default_cnt = 38,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES0] = {
+ .name = "es0",
+ .rows = 1536,
+ .sw_count = 1,
+ .sw_width = 51,
+ .sticky_width = 1,
+ .act_width = 469,
+ .default_cnt = 35,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es0_keyfield_set),
+ .actionfield_set = es0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es0_actionfield_set),
+ .keyfield_set_map = es0_keyfield_set_map,
+ .keyfield_set_map_size = es0_keyfield_set_map_size,
+ .actionfield_set_map = es0_actionfield_set_map,
+ .actionfield_set_map_size = es0_actionfield_set_map_size,
+ .keyfield_set_typegroups = es0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 19,
+ .default_cnt = 39,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics lan969x_vcap_stats = {
+ .name = "lan969x",
+ .count = 4,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c
new file mode 100644
index 000000000000..543a1f2bf6bd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "vcap_api.h"
+#include "lan969x.h"
+
+const struct sparx5_vcap_inst lan969x_vcap_inst_cfg[] = {
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-0 */
+ .vinst = 0,
+ .map_id = 1,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L0,
+ .last_cid = SPARX5_VCAP_CID_IS0_L2 - 1,
+ .blockno = 2,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-1 */
+ .vinst = 1,
+ .map_id = 2,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L2,
+ .last_cid = SPARX5_VCAP_CID_IS0_L4 - 1,
+ .blockno = 3,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-2 */
+ .vinst = 2,
+ .map_id = 3,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L4,
+ .last_cid = SPARX5_VCAP_CID_IS0_MAX,
+ .blockno = 4,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-0 */
+ .vinst = 0,
+ .map_id = 4,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L0,
+ .last_cid = SPARX5_VCAP_CID_IS2_L2 - 1,
+ .blockno = 0,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-1 */
+ .vinst = 1,
+ .map_id = 5,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L2,
+ .last_cid = SPARX5_VCAP_CID_IS2_MAX,
+ .blockno = 1,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_ES0,
+ .lookups = SPARX5_ES0_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES0_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES0_L0,
+ .last_cid = SPARX5_VCAP_CID_ES0_MAX,
+ .count = 1536,
+ .ingress = false,
+ },
+ {
+ .vtype = VCAP_TYPE_ES2,
+ .lookups = SPARX5_ES2_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES2_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES2_L0,
+ .last_cid = SPARX5_VCAP_CID_ES2_MAX,
+ .count = 1024,
+ .ingress = false,
+ },
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
index 76a8bb596aec..5c46d81de530 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
@@ -15,9 +15,6 @@
#define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */
/* DSM calendar information */
-#define SPX5_DSM_CAL_LEN 64
-#define SPX5_DSM_CAL_EMPTY 0xFFFF
-#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
#define SPX5_DSM_CAL_TAXIS 8
#define SPX5_DSM_CAL_BW_LOSS 553
@@ -37,19 +34,6 @@ static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]
{64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
};
-struct sparx5_calendar_data {
- u32 schedule[SPX5_DSM_CAL_LEN];
- u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 new_slots[SPX5_DSM_CAL_LEN];
- u32 temp_sched[SPX5_DSM_CAL_LEN];
- u32 indices[SPX5_DSM_CAL_LEN];
- u32 short_list[SPX5_DSM_CAL_LEN];
- u32 long_list[SPX5_DSM_CAL_LEN];
-};
-
static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
{
switch (sparx5->target_ct) {
@@ -68,27 +52,32 @@ static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
case SPX5_TARGET_CT_7558:
case SPX5_TARGET_CT_7558TSN:
return 201000;
+ case SPX5_TARGET_CT_LAN9691VAO:
+ return 46000;
+ case SPX5_TARGET_CT_LAN9694RED:
+ case SPX5_TARGET_CT_LAN9694TSN:
+ case SPX5_TARGET_CT_LAN9694:
+ return 68000;
+ case SPX5_TARGET_CT_LAN9696RED:
+ case SPX5_TARGET_CT_LAN9696TSN:
+ case SPX5_TARGET_CT_LAN9692VAO:
+ case SPX5_TARGET_CT_LAN9696:
+ return 88000;
+ case SPX5_TARGET_CT_LAN9698RED:
+ case SPX5_TARGET_CT_LAN9698TSN:
+ case SPX5_TARGET_CT_LAN9693VAO:
+ case SPX5_TARGET_CT_LAN9698:
+ return 101000;
default:
return 0;
}
}
-/* This is used in calendar configuration */
-enum sparx5_cal_bw {
- SPX5_CAL_SPEED_NONE = 0,
- SPX5_CAL_SPEED_1G = 1,
- SPX5_CAL_SPEED_2G5 = 2,
- SPX5_CAL_SPEED_5G = 3,
- SPX5_CAL_SPEED_10G = 4,
- SPX5_CAL_SPEED_25G = 5,
- SPX5_CAL_SPEED_0G5 = 6,
- SPX5_CAL_SPEED_12G5 = 7
-};
-
static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
{
switch (cclock) {
case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
+ case SPX5_CORE_CLOCK_328MHZ: return 109375; /* 328000 / 3 */
case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
case SPX5_CORE_CLOCK_625MHZ: return 208000; /* 625000 / 3 */
default: return 0;
@@ -96,7 +85,7 @@ static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
return 0;
}
-static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
+u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
{
switch (speed) {
case SPX5_CAL_SPEED_1G: return 1000;
@@ -126,23 +115,28 @@ static u32 sparx5_bandwidth_to_calendar(u32 bw)
}
}
-static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
- u32 portno)
+enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno)
{
struct sparx5_port *port;
- if (portno >= SPX5_PORTS) {
+ if (portno >= sparx5->data->consts->n_ports) {
/* Internal ports */
- if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) {
+ if (portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0) ||
+ portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1)) {
/* Equals 1.25G */
return SPX5_CAL_SPEED_2G5;
- } else if (portno == SPX5_PORT_VD0) {
+ } else if (portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_VD0)) {
/* IPMC only idle BW */
return SPX5_CAL_SPEED_NONE;
- } else if (portno == SPX5_PORT_VD1) {
+ } else if (portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_VD1)) {
/* OAM only idle BW */
return SPX5_CAL_SPEED_NONE;
- } else if (portno == SPX5_PORT_VD2) {
+ } else if (portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_VD2)) {
/* IPinIP gets only idle BW */
return SPX5_CAL_SPEED_NONE;
}
@@ -159,6 +153,7 @@ static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
/* Auto configure the QSYS calendar based on port configuration */
int sparx5_config_auto_calendar(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
u32 cal[7], value, idx, portno;
u32 max_core_bw;
u32 total_bw = 0, used_port_bw = 0;
@@ -174,7 +169,7 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5)
}
/* Setup the calendar with the bandwidth to each port */
- for (portno = 0; portno < SPX5_PORTS_ALL; portno++) {
+ for (portno = 0; portno < consts->n_ports_all; portno++) {
u64 reg, offset, this_bw;
spd = sparx5_get_port_cal_speed(sparx5, portno);
@@ -182,7 +177,7 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5)
continue;
this_bw = sparx5_cal_speed_to_value(spd);
- if (portno < SPX5_PORTS)
+ if (portno < consts->n_ports)
used_port_bw += this_bw;
else
/* Internal ports are granted half the value */
@@ -208,12 +203,13 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5)
}
/* Halt the calendar while changing it */
- spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
- QSYS_CAL_CTRL_CAL_MODE,
- sparx5, QSYS_CAL_CTRL);
+ if (is_sparx5(sparx5))
+ spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
+ QSYS_CAL_CTRL_CAL_MODE,
+ sparx5, QSYS_CAL_CTRL);
/* Assign port bandwidth to auto calendar */
- for (idx = 0; idx < ARRAY_SIZE(cal); idx++)
+ for (idx = 0; idx < consts->n_auto_cals; idx++)
spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
/* Increase grant rate of all ports to account for
@@ -278,8 +274,8 @@ static u32 sparx5_dsm_cp_cal(u32 *sched)
return SPX5_DSM_CAL_EMPTY;
}
-static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
- struct sparx5_calendar_data *data)
+int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
{
bool slow_mode;
u32 gcd, idx, sum, min, factor;
@@ -304,7 +300,7 @@ static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
u32 portno = data->taxi_ports[idx];
- if (portno < SPX5_TAXI_PORT_MAX) {
+ if (portno < sparx5->data->consts->n_ports_all) {
data->taxi_speeds[idx] = sparx5_cal_speed_to_value
(sparx5_get_port_cal_speed(sparx5, portno));
} else {
@@ -533,12 +529,23 @@ check_err:
static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data)
{
- u32 idx;
- u32 cal_len = sparx5_dsm_cal_len(data->schedule), len;
+ u32 cal_len = sparx5_dsm_cal_len(data->schedule), len, idx;
+
+ if (!is_sparx5(sparx5)) {
+ u32 val, act;
+
+ val = spx5_rd(sparx5, DSM_TAXI_CAL_CFG(taxi));
+ act = DSM_TAXI_CAL_CFG_CAL_SEL_STAT_GET(val);
- spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
- sparx5,
- DSM_TAXI_CAL_CFG(taxi));
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_SEL_SET(!act),
+ DSM_TAXI_CAL_CFG_CAL_PGM_SEL,
+ sparx5, DSM_TAXI_CAL_CFG(taxi));
+ }
+
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
+ DSM_TAXI_CAL_CFG_CAL_PGM_ENA,
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
for (idx = 0; idx < cal_len; idx++) {
spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
DSM_TAXI_CAL_CFG_CAL_IDX,
@@ -549,13 +556,21 @@ static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
sparx5,
DSM_TAXI_CAL_CFG(taxi));
}
- spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
- sparx5,
- DSM_TAXI_CAL_CFG(taxi));
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
+ DSM_TAXI_CAL_CFG_CAL_PGM_ENA,
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
DSM_TAXI_CAL_CFG(taxi)));
if (len != cal_len - 1)
goto update_err;
+
+ if (!is_sparx5(sparx5)) {
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_SWITCH_SET(1),
+ DSM_TAXI_CAL_CFG_CAL_SWITCH,
+ sparx5, DSM_TAXI_CAL_CFG(taxi));
+ }
+
return 0;
update_err:
dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
@@ -565,6 +580,7 @@ update_err:
/* Configure the DSM calendar based on port configuration */
int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
int taxi;
struct sparx5_calendar_data *data;
int err = 0;
@@ -573,8 +589,8 @@ int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
if (!data)
return -ENOMEM;
- for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) {
- err = sparx5_dsm_calendar_calc(sparx5, taxi, data);
+ for (taxi = 0; taxi < sparx5->data->consts->n_dsm_cal_taxis; ++taxi) {
+ err = ops->dsm_calendar_calc(sparx5, taxi, data);
if (err) {
dev_err(sparx5->dev, "DSM calendar calculation failed\n");
goto cal_out;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
index 2d763664dcda..10224ad63a78 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
@@ -234,10 +234,11 @@ static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev,
struct dcb_app *))
{
struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
struct sparx5_port *port_itr;
int err, i;
- for (i = 0; i < SPX5_PORTS; i++) {
+ for (i = 0; i < sparx5->data->consts->n_ports; i++) {
port_itr = port->sparx5->ports[i];
if (!port_itr)
continue;
@@ -386,7 +387,7 @@ int sparx5_dcb_init(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
- for (i = 0; i < SPX5_PORTS; i++) {
+ for (i = 0; i < sparx5->data->consts->n_ports; i++) {
port = sparx5->ports[i];
if (!port)
continue;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index a06dc5a9b355..049541eeaae0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -505,8 +505,8 @@ static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32
static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno)
{
u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
void __iomem *inst;
inst = spx5_inst_get(sparx5, dev, tinst);
@@ -819,8 +819,8 @@ static void sparx5_get_eth_phy_stats(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_phy_stats(portstats, inst, tinst);
@@ -844,8 +844,8 @@ static void sparx5_get_eth_mac_stats(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_mac_stats(portstats, inst, tinst);
@@ -912,8 +912,8 @@ static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
@@ -944,8 +944,8 @@ static void sparx5_get_eth_rmon_stats(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_rmon_stats(portstats, inst, tinst);
@@ -1027,8 +1027,8 @@ static void sparx5_get_sset_data(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_misc_stats(portstats, inst, tinst);
@@ -1122,7 +1122,7 @@ static void sparx5_update_stats(struct sparx5 *sparx5)
{
int idx;
- for (idx = 0; idx < SPX5_PORTS; idx++)
+ for (idx = 0; idx < sparx5->data->consts->n_ports; idx++)
if (sparx5->ports[idx])
sparx5_update_port_stats(sparx5, idx);
}
@@ -1183,27 +1183,24 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
}
static int sparx5_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
struct sparx5_phc *phc;
- if (!sparx5->ptp)
+ if (!sparx5->ptp && is_sparx5(sparx5))
return ethtool_op_get_ts_info(dev, info);
phc = &sparx5->phc[SPARX5_PHC_PORT];
- info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (phc->clock) {
+ info->phc_index = ptp_clock_index(phc->clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1215,6 +1212,22 @@ static int sparx5_get_ts_info(struct net_device *dev,
return 0;
}
+static void sparx5_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int sparx5_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
const struct ethtool_ops sparx5_ethtool_ops = {
.get_sset_count = sparx5_get_sset_count,
.get_strings = sparx5_get_sset_strings,
@@ -1227,10 +1240,13 @@ const struct ethtool_ops sparx5_ethtool_ops = {
.get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
.get_rmon_stats = sparx5_get_eth_rmon_stats,
.get_ts_info = sparx5_get_ts_info,
+ .get_pauseparam = sparx5_get_pauseparam,
+ .set_pauseparam = sparx5_set_pauseparam,
};
int sparx_stats_init(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
char queue_name[32];
int portno;
@@ -1238,14 +1254,15 @@ int sparx_stats_init(struct sparx5 *sparx5)
sparx5->num_stats = spx5_stats_count;
sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout);
sparx5->stats = devm_kcalloc(sparx5->dev,
- SPX5_PORTS_ALL * sparx5->num_stats,
+ consts->n_ports_all *
+ sparx5->num_stats,
sizeof(u64), GFP_KERNEL);
if (!sparx5->stats)
return -ENOMEM;
mutex_init(&sparx5->queue_stats_lock);
sparx5_config_stats(sparx5);
- for (portno = 0; portno < SPX5_PORTS; portno++)
+ for (portno = 0; portno < consts->n_ports; portno++)
if (sparx5->ports[portno])
sparx5_config_port_stats(sparx5, portno);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 141897dfe388..dbe86f937b21 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -18,110 +18,51 @@
#include "sparx5_main.h"
#include "sparx5_port.h"
-#define FDMA_XTR_CHANNEL 6
-#define FDMA_INJ_CHANNEL 0
-
-#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_INFO_TOKEN BIT(17)
-#define FDMA_DCB_INFO_INTR BIT(18)
-#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24))
-
-#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_STATUS_SOF BIT(16)
-#define FDMA_DCB_STATUS_EOF BIT(17)
-#define FDMA_DCB_STATUS_INTR BIT(18)
-#define FDMA_DCB_STATUS_DONE BIT(19)
-#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
-#define FDMA_DCB_INVALID_DATA 0x1
-
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
-/* Frame DMA DCB format
- *
- * +---------------------------+
- * | Next Ptr |
- * +---------------------------+
- * | Reserved | Info |
- * +---------------------------+
- * | Data0 Ptr |
- * +---------------------------+
- * | Reserved | Status0 |
- * +---------------------------+
- * | Data1 Ptr |
- * +---------------------------+
- * | Reserved | Status1 |
- * +---------------------------+
- * | Data2 Ptr |
- * +---------------------------+
- * | Reserved | Status2 |
- * |-------------|-------------|
- * | |
- * | |
- * | |
- * | |
- * | |
- * |---------------------------|
- * | Data14 Ptr |
- * +-------------|-------------+
- * | Reserved | Status14 |
- * +-------------|-------------+
- */
-
-/* For each hardware DB there is an entry in this list and when the HW DB
- * entry is used, this SW DB entry is moved to the back of the list
- */
-struct sparx5_db {
- struct list_head list;
- void *cpu_addr;
-};
-
-static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx,
- struct sparx5_rx_dcb_hw *dcb,
- u64 nextptr)
+static int sparx5_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
- int idx = 0;
-
- /* Reset the status of the DB */
- for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) {
- struct sparx5_db_hw *db = &dcb->db[idx];
+ *dataptr = fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ ((dcb * fdma->n_dbs + db) * fdma->db_size);
- db->status = FDMA_DCB_STATUS_INTR;
- }
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
- rx->last_entry->nextptr = nextptr;
- rx->last_entry = dcb;
+ return 0;
}
-static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx,
- struct sparx5_tx_dcb_hw *dcb,
- u64 nextptr)
+static int sparx5_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
- int idx = 0;
+ struct sparx5 *sparx5 = fdma->priv;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sk_buff *skb;
- /* Reset the status of the DB */
- for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) {
- struct sparx5_db_hw *db = &dcb->db[idx];
+ skb = __netdev_alloc_skb(rx->ndev, fdma->db_size, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
- db->status = FDMA_DCB_STATUS_DONE;
- }
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
+ *dataptr = virt_to_phys(skb->data);
+
+ rx->skb[dcb][db] = skb;
+
+ return 0;
}
static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
+
/* Write the buffer address in the LLP and LLP1 regs */
- spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5,
- FDMA_DCB_LLP(rx->channel_id));
- spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id));
+ spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5,
+ FDMA_DCB_LLP(fdma->channel_id));
+ spx5_wr(((u64)fdma->dma) >> 32, sparx5,
+ FDMA_DCB_LLP1(fdma->channel_id));
/* Set the number of RX DBs to be used, and DB end-of-frame interrupt */
- spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE),
- sparx5, FDMA_CH_CFG(rx->channel_id));
+ sparx5, FDMA_CH_CFG(fdma->channel_id));
/* Set the RX Watermark to max */
spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM,
@@ -133,22 +74,24 @@ static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
sparx5, FDMA_PORT_CTRL(0));
/* Enable RX channel DB interrupt */
- spx5_rmw(BIT(rx->channel_id),
- BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(BIT(fdma->channel_id),
+ BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
/* Activate the RX channel */
- spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE);
}
static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
- /* Dectivate the RX channel */
- spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ struct fdma *fdma = &rx->fdma;
+
+ /* Deactivate the RX channel */
+ spx5_rmw(0, BIT(fdma->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
sparx5, FDMA_CH_ACTIVATE);
/* Disable RX channel DB interrupt */
- spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(0, BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
/* Stop RX fdma */
@@ -158,79 +101,61 @@ static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *r
static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx)
{
+ struct fdma *fdma = &tx->fdma;
+
/* Write the buffer address in the LLP and LLP1 regs */
- spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5,
- FDMA_DCB_LLP(tx->channel_id));
- spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id));
+ spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5,
+ FDMA_DCB_LLP(fdma->channel_id));
+ spx5_wr(((u64)fdma->dma) >> 32, sparx5,
+ FDMA_DCB_LLP1(fdma->channel_id));
/* Set the number of TX DBs to be used, and DB end-of-frame interrupt */
- spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE),
- sparx5, FDMA_CH_CFG(tx->channel_id));
+ sparx5, FDMA_CH_CFG(fdma->channel_id));
/* Start TX fdma */
spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP,
sparx5, FDMA_PORT_CTRL(0));
/* Activate the channel */
- spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE);
}
static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx)
{
/* Disable the channel */
- spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ spx5_rmw(0, BIT(tx->fdma.channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
sparx5, FDMA_CH_ACTIVATE);
}
-static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx)
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
{
/* Reload the RX channel */
- spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD);
-}
-
-static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx)
-{
- /* Reload the TX channel */
- spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD);
-}
-
-static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx)
-{
- return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE,
- GFP_ATOMIC);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
}
static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
- struct sparx5_db_hw *db_hw;
- unsigned int packet_size;
+ struct fdma *fdma = &rx->fdma;
struct sparx5_port *port;
- struct sk_buff *new_skb;
+ struct fdma_db *db_hw;
struct frame_info fi;
struct sk_buff *skb;
- dma_addr_t dma_addr;
/* Check if the DCB is done */
- db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index];
- if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE)))
+ db_hw = fdma_db_next_get(fdma);
+ if (unlikely(!fdma_db_is_done(db_hw)))
return false;
- skb = rx->skb[rx->dcb_index][rx->db_index];
- /* Replace the DB entry with a new SKB */
- new_skb = sparx5_fdma_rx_alloc_skb(rx);
- if (unlikely(!new_skb))
- return false;
- /* Map the new skb data and set the new skb */
- dma_addr = virt_to_phys(new_skb->data);
- rx->skb[rx->dcb_index][rx->db_index] = new_skb;
- db_hw->dataptr = dma_addr;
- packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status);
- skb_put(skb, packet_size);
+ skb = rx->skb[fdma->dcb_index][fdma->db_index];
+ skb_put(skb, fdma_db_len_get(db_hw));
/* Now do the normal processing of the skb */
- sparx5_ifh_parse((u32 *)skb->data, &fi);
+ sparx5_ifh_parse(sparx5, (u32 *)skb->data, &fi);
/* Map to port netdev */
- port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL;
+ port = fi.src_port < sparx5->data->consts->n_ports ?
+ sparx5->ports[fi.src_port] :
+ NULL;
if (!port || !port->ndev) {
dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
sparx5_xtr_flush(sparx5, XTR_QUEUE);
@@ -255,195 +180,114 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
return true;
}
-static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
{
struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ struct fdma *fdma = &rx->fdma;
int counter = 0;
while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) {
- struct sparx5_rx_dcb_hw *old_dcb;
-
- rx->db_index++;
+ fdma_db_advance(fdma);
counter++;
/* Check if the DCB can be reused */
- if (rx->db_index != FDMA_RX_DCB_MAX_DBS)
+ if (fdma_dcb_is_reusable(fdma))
continue;
- /* As the DCB can be reused, just advance the dcb_index
- * pointer and set the nextptr in the DCB
- */
- rx->db_index = 0;
- old_dcb = &rx->dcb_entries[rx->dcb_index];
- rx->dcb_index++;
- rx->dcb_index &= FDMA_DCB_MAX - 1;
- sparx5_fdma_rx_add_dcb(rx, old_dcb,
- rx->dma +
- ((unsigned long)old_dcb -
- (unsigned long)rx->dcb_entries));
+ fdma_dcb_add(fdma, fdma->dcb_index,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
}
if (counter < weight) {
napi_complete_done(&rx->napi, counter);
- spx5_rmw(BIT(rx->channel_id),
- BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(BIT(fdma->channel_id),
+ BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
}
if (counter)
- sparx5_fdma_rx_reload(sparx5, rx);
+ sparx5_fdma_reload(sparx5, fdma);
return counter;
}
-static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx,
- struct sparx5_tx_dcb_hw *dcb)
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
{
- struct sparx5_tx_dcb_hw *next_dcb;
-
- next_dcb = dcb;
- next_dcb++;
- /* Handle wrap-around */
- if ((unsigned long)next_dcb >=
- ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb)))
- next_dcb = tx->first_entry;
- return next_dcb;
-}
-
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
-{
- struct sparx5_tx_dcb_hw *next_dcb_hw;
struct sparx5_tx *tx = &sparx5->tx;
- static bool first_time = true;
- struct sparx5_db_hw *db_hw;
- struct sparx5_db *db;
+ struct fdma *fdma = &tx->fdma;
+ void *virt_addr;
- next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry);
- db_hw = &next_dcb_hw->db[0];
- if (!(db_hw->status & FDMA_DCB_STATUS_DONE))
+ fdma_dcb_advance(fdma);
+ if (!fdma_db_is_done(fdma_db_get(fdma, fdma->dcb_index, 0)))
return -EINVAL;
- db = list_first_entry(&tx->db_list, struct sparx5_db, list);
- list_move_tail(&db->list, &tx->db_list);
- next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA;
- tx->curr_entry->nextptr = tx->dma +
- ((unsigned long)next_dcb_hw -
- (unsigned long)tx->first_entry);
- tx->curr_entry = next_dcb_hw;
- memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE);
- memcpy(db->cpu_addr, ifh, IFH_LEN * 4);
- memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len);
- db_hw->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4);
- if (first_time) {
- sparx5_fdma_tx_activate(sparx5, tx);
- first_time = false;
- } else {
- sparx5_fdma_tx_reload(sparx5, tx);
- }
+
+ /* Get the virtual address of the dataptr for the next DB */
+ virt_addr = ((u8 *)fdma->dcbs +
+ (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ ((fdma->dcb_index * fdma->n_dbs) * fdma->db_size));
+
+ memcpy(virt_addr, ifh, IFH_LEN * 4);
+ memcpy(virt_addr + IFH_LEN * 4, skb->data, skb->len);
+
+ fdma_dcb_add(fdma, fdma->dcb_index, 0,
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
+
+ sparx5_fdma_reload(sparx5, fdma);
+
return NETDEV_TX_OK;
}
static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
{
struct sparx5_rx *rx = &sparx5->rx;
- struct sparx5_rx_dcb_hw *dcb;
- int idx, jdx;
- int size;
-
- size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
- if (!rx->dcb_entries)
- return -ENOMEM;
- rx->dma = virt_to_phys(rx->dcb_entries);
- rx->last_entry = rx->dcb_entries;
- rx->db_index = 0;
- rx->dcb_index = 0;
- /* Now for each dcb allocate the db */
- for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
- dcb = &rx->dcb_entries[idx];
- dcb->info = 0;
- /* For each db allocate an skb and map skb data pointer to the DB
- * dataptr. In this way when the frame is received the skb->data
- * will contain the frame, so no memcpy is needed
- */
- for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) {
- struct sparx5_db_hw *db_hw = &dcb->db[jdx];
- dma_addr_t dma_addr;
- struct sk_buff *skb;
-
- skb = sparx5_fdma_rx_alloc_skb(rx);
- if (!skb)
- return -ENOMEM;
-
- dma_addr = virt_to_phys(skb->data);
- db_hw->dataptr = dma_addr;
- db_hw->status = 0;
- rx->skb[idx][jdx] = skb;
- }
- sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx);
- }
- netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
- FDMA_WEIGHT);
- napi_enable(&rx->napi);
- sparx5_fdma_rx_activate(sparx5, rx);
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ err = fdma_alloc_phys(fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
return 0;
}
static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
{
struct sparx5_tx *tx = &sparx5->tx;
- struct sparx5_tx_dcb_hw *dcb;
- int idx, jdx;
- int size;
-
- size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
- if (!tx->curr_entry)
- return -ENOMEM;
- tx->dma = virt_to_phys(tx->curr_entry);
- tx->first_entry = tx->curr_entry;
- INIT_LIST_HEAD(&tx->db_list);
- /* Now for each dcb allocate the db */
- for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
- dcb = &tx->curr_entry[idx];
- dcb->info = 0;
- /* TX databuffers must be 16byte aligned */
- for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) {
- struct sparx5_db_hw *db_hw = &dcb->db[jdx];
- struct sparx5_db *db;
- dma_addr_t phys;
- void *cpu_addr;
-
- cpu_addr = devm_kzalloc(sparx5->dev,
- FDMA_XTR_BUFFER_SIZE,
- GFP_KERNEL);
- if (!cpu_addr)
- return -ENOMEM;
- phys = virt_to_phys(cpu_addr);
- db_hw->dataptr = phys;
- db_hw->status = 0;
- db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
- db->cpu_addr = cpu_addr;
- list_add_tail(&db->list, &tx->db_list);
- }
- sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx);
- /* Let the curr_entry to point to the last allocated entry */
- if (idx == FDMA_DCB_MAX - 1)
- tx->curr_entry = dcb;
- }
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ err = fdma_alloc_phys(fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
return 0;
}
static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
struct sparx5_rx *rx, int channel)
{
+ struct fdma *fdma = &rx->fdma;
int idx;
- rx->channel_id = channel;
+ fdma->channel_id = channel;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = FDMA_RX_DCB_MAX_DBS;
+ fdma->priv = sparx5;
+ fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
+ fdma->size = fdma_get_size(&sparx5->rx.fdma);
+ fdma->ops.dataptr_cb = &sparx5_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
/* Fetch a netdev for SKB and NAPI use, any will do */
- for (idx = 0; idx < SPX5_PORTS; ++idx) {
+ for (idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
struct sparx5_port *port = sparx5->ports[idx];
if (port && port->ndev) {
@@ -456,7 +300,16 @@ static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
static void sparx5_fdma_tx_init(struct sparx5 *sparx5,
struct sparx5_tx *tx, int channel)
{
- tx->channel_id = channel;
+ struct fdma *fdma = &tx->fdma;
+
+ fdma->channel_id = channel;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = FDMA_TX_DCB_MAX_DBS;
+ fdma->priv = sparx5;
+ fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
+ fdma->size = fdma_get_size_contiguous(&sparx5->tx.fdma);
+ fdma->ops.dataptr_cb = &sparx5_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
}
irqreturn_t sparx5_fdma_handler(int irq, void *args)
@@ -484,7 +337,7 @@ irqreturn_t sparx5_fdma_handler(int irq, void *args)
return IRQ_HANDLED;
}
-static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
{
const int byte_swap = 1;
int portno;
@@ -500,7 +353,9 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
/* CPU ports capture setup */
- for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+ for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0);
+ portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1);
+ portno++) {
/* ASM CPU port: No preamble, IFH, enable padding */
spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
@@ -544,7 +399,7 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
}
}
-int sparx5_fdma_start(struct sparx5 *sparx5)
+int sparx5_fdma_init(struct sparx5 *sparx5)
{
int err;
@@ -577,22 +432,55 @@ int sparx5_fdma_start(struct sparx5 *sparx5)
return err;
}
+int sparx5_fdma_deinit(struct sparx5 *sparx5)
+{
+ sparx5_fdma_stop(sparx5);
+ fdma_free_phys(&sparx5->rx.fdma);
+ fdma_free_phys(&sparx5->tx.fdma);
+
+ return 0;
+}
+
static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
{
return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
}
+int sparx5_fdma_start(struct sparx5 *sparx5)
+{
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ netif_napi_add_weight(rx->ndev,
+ &rx->napi,
+ ops->fdma_poll,
+ FDMA_WEIGHT);
+
+ napi_enable(&rx->napi);
+
+ sparx5_fdma_rx_activate(sparx5, rx);
+ sparx5_fdma_tx_activate(sparx5, tx);
+
+ return 0;
+}
+
int sparx5_fdma_stop(struct sparx5 *sparx5)
{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
u32 val;
- napi_disable(&sparx5->rx.napi);
+ napi_disable(&rx->napi);
+
/* Stop the fdma and channel interrupts */
- sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
- sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
+ sparx5_fdma_rx_deactivate(sparx5, rx);
+ sparx5_fdma_tx_deactivate(sparx5, tx);
+
/* Wait for the RX channel to stop */
read_poll_timeout(sparx5_fdma_port_ctrl, val,
FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
500, 10000, 0, sparx5);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 75868b3f548e..f5584244612c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -80,15 +80,16 @@ static void sparx5_mact_select(struct sparx5 *sparx5,
int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
const unsigned char mac[ETH_ALEN], u16 vid)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int addr, type, ret;
- if (pgid < SPX5_PORTS) {
+ if (pgid < consts->n_ports) {
type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
addr = pgid % 32;
addr += (pgid / 32) << 5; /* Add upsid */
} else {
type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
- addr = pgid - SPX5_PORTS;
+ addr = pgid - consts->n_ports;
}
mutex_lock(&sparx5->lock);
@@ -128,7 +129,8 @@ int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
- return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
+ return sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
+ addr, port->pvid);
}
static int sparx5_mact_get(struct sparx5 *sparx5,
@@ -371,7 +373,7 @@ static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
return;
port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
- if (port >= SPX5_PORTS)
+ if (port >= sparx5->data->consts->n_ports)
return;
if (!test_bit(port, sparx5->bridge_mask))
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 3c066b62e689..582145713cfd 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -24,13 +24,17 @@
#include <linux/types.h>
#include <linux/reset.h>
+#include "lan969x/lan969x.h" /* for lan969x match data */
+
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
#include "sparx5_qos.h"
+#include "sparx5_vcap_ag_api.h"
+#include "sparx5_vcap_impl.h"
+
+const struct sparx5_regs *regs;
-#define QLIM_WM(fraction) \
- ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
#define IO_RANGES 3
struct initial_port_config {
@@ -45,12 +49,6 @@ struct sparx5_ram_config {
u32 init_val;
};
-struct sparx5_main_io_resource {
- enum sparx5_target id;
- phys_addr_t offset;
- int range;
-};
-
static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_CPU, 0, 0 }, /* 0x600000000 */
{ TARGET_FDMA, 0x80000, 0 }, /* 0x600080000 */
@@ -214,23 +212,79 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_VOP, 0x11a00000, 2 }, /* 0x611a00000 */
};
+bool is_sparx5(struct sparx5 *sparx5)
+{
+ switch (sparx5->target_ct) {
+ case SPX5_TARGET_CT_7546:
+ case SPX5_TARGET_CT_7549:
+ case SPX5_TARGET_CT_7552:
+ case SPX5_TARGET_CT_7556:
+ case SPX5_TARGET_CT_7558:
+ case SPX5_TARGET_CT_7546TSN:
+ case SPX5_TARGET_CT_7549TSN:
+ case SPX5_TARGET_CT_7552TSN:
+ case SPX5_TARGET_CT_7556TSN:
+ case SPX5_TARGET_CT_7558TSN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void sparx5_init_features(struct sparx5 *sparx5)
+{
+ switch (sparx5->target_ct) {
+ case SPX5_TARGET_CT_7546:
+ case SPX5_TARGET_CT_7549:
+ case SPX5_TARGET_CT_7552:
+ case SPX5_TARGET_CT_7556:
+ case SPX5_TARGET_CT_7558:
+ case SPX5_TARGET_CT_7546TSN:
+ case SPX5_TARGET_CT_7549TSN:
+ case SPX5_TARGET_CT_7552TSN:
+ case SPX5_TARGET_CT_7556TSN:
+ case SPX5_TARGET_CT_7558TSN:
+ case SPX5_TARGET_CT_LAN9691VAO:
+ case SPX5_TARGET_CT_LAN9694TSN:
+ case SPX5_TARGET_CT_LAN9694RED:
+ case SPX5_TARGET_CT_LAN9692VAO:
+ case SPX5_TARGET_CT_LAN9696TSN:
+ case SPX5_TARGET_CT_LAN9696RED:
+ case SPX5_TARGET_CT_LAN9693VAO:
+ case SPX5_TARGET_CT_LAN9698TSN:
+ case SPX5_TARGET_CT_LAN9698RED:
+ sparx5->features = (SPX5_FEATURE_PSFP | SPX5_FEATURE_PTP);
+ break;
+ default:
+ break;
+ }
+}
+
+bool sparx5_has_feature(struct sparx5 *sparx5, enum sparx5_feature feature)
+{
+ return sparx5->features & feature;
+}
+
static int sparx5_create_targets(struct sparx5 *sparx5)
{
+ const struct sparx5_main_io_resource *iomap = sparx5->data->iomap;
+ int iomap_size = sparx5->data->iomap_size;
+ int ioranges = sparx5->data->ioranges;
struct resource *iores[IO_RANGES];
void __iomem *iomem[IO_RANGES];
void __iomem *begin[IO_RANGES];
int range_id[IO_RANGES];
int idx, jdx;
- for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
- const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+ for (idx = 0, jdx = 0; jdx < iomap_size; jdx++) {
+ const struct sparx5_main_io_resource *io = &iomap[jdx];
- if (idx == iomap->range) {
+ if (idx == io->range) {
range_id[idx] = jdx;
idx++;
}
}
- for (idx = 0; idx < IO_RANGES; idx++) {
+ for (idx = 0; idx < ioranges; idx++) {
iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM,
idx);
if (!iores[idx]) {
@@ -245,12 +299,12 @@ static int sparx5_create_targets(struct sparx5 *sparx5)
iores[idx]->name);
return -ENOMEM;
}
- begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset;
+ begin[idx] = iomem[idx] - iomap[range_id[idx]].offset;
}
- for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
- const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+ for (jdx = 0; jdx < iomap_size; jdx++) {
+ const struct sparx5_main_io_resource *io = &iomap[jdx];
- sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ sparx5->regs[io->id] = begin[io->range] + io->offset;
}
return 0;
}
@@ -259,10 +313,13 @@ static int sparx5_create_port(struct sparx5 *sparx5,
struct initial_port_config *config)
{
struct sparx5_port *spx5_port;
+ const struct sparx5_ops *ops;
struct net_device *ndev;
struct phylink *phylink;
int err;
+ ops = sparx5->data->ops;
+
ndev = sparx5_create_netdev(sparx5, config->portno);
if (IS_ERR(ndev)) {
dev_err(sparx5->dev, "Could not create net device: %02u\n",
@@ -281,7 +338,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
- spx5_port->phylink_pcs.neg_mode = true;
spx5_port->is_mrouter = false;
INIT_LIST_HEAD(&spx5_port->tc_templates);
sparx5->ports[config->portno] = spx5_port;
@@ -303,6 +359,9 @@ static int sparx5_create_port(struct sparx5 *sparx5,
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+ if (ops->is_port_rgmii(spx5_port->portno))
+ phy_interface_set_rgmii(spx5_port->phylink_config.supported_interfaces);
+
__set_bit(PHY_INTERFACE_MODE_SGMII,
spx5_port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
@@ -336,6 +395,8 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->phylink = phylink;
+ spx5_port->ndev->dev.of_node = spx5_port->of_node;
+
return 0;
}
@@ -459,56 +520,74 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
else if (sparx5->coreclock == SPX5_CORE_CLOCK_250MHZ)
freq = 0; /* Not supported */
break;
+ case SPX5_TARGET_CT_LAN9694:
+ case SPX5_TARGET_CT_LAN9691VAO:
+ case SPX5_TARGET_CT_LAN9694TSN:
+ case SPX5_TARGET_CT_LAN9694RED:
+ case SPX5_TARGET_CT_LAN9696:
+ case SPX5_TARGET_CT_LAN9692VAO:
+ case SPX5_TARGET_CT_LAN9696TSN:
+ case SPX5_TARGET_CT_LAN9696RED:
+ case SPX5_TARGET_CT_LAN9698:
+ case SPX5_TARGET_CT_LAN9693VAO:
+ case SPX5_TARGET_CT_LAN9698TSN:
+ case SPX5_TARGET_CT_LAN9698RED:
+ freq = SPX5_CORE_CLOCK_328MHZ;
+ break;
default:
dev_err(sparx5->dev, "Target (%#04x) not supported\n",
sparx5->target_ct);
return -ENODEV;
}
- switch (freq) {
- case SPX5_CORE_CLOCK_250MHZ:
- clk_div = 10;
- pol_upd_int = 312;
- break;
- case SPX5_CORE_CLOCK_500MHZ:
- clk_div = 5;
- pol_upd_int = 624;
- break;
- case SPX5_CORE_CLOCK_625MHZ:
- clk_div = 4;
- pol_upd_int = 780;
- break;
- default:
- dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n",
- sparx5->coreclock, sparx5->target_ct);
- return -EINVAL;
+ if (is_sparx5(sparx5)) {
+ switch (freq) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ clk_div = 10;
+ pol_upd_int = 312;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ clk_div = 5;
+ pol_upd_int = 624;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ clk_div = 4;
+ pol_upd_int = 780;
+ break;
+ default:
+ dev_err(sparx5->dev,
+ "%d coreclock not supported on (%#04x)\n",
+ sparx5->coreclock, sparx5->target_ct);
+ return -EINVAL;
+ }
+
+ /* Configure the LCPLL */
+ spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
+ sparx5, CLKGEN_LCPLL1_CORE_CLK_CFG);
+ } else {
+ pol_upd_int = 820; // SPX5_CORE_CLOCK_328MHZ
}
/* Update state with chosen frequency */
sparx5->coreclock = freq;
-
- /* Configure the LCPLL */
- spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
- sparx5,
- CLKGEN_LCPLL1_CORE_CLK_CFG);
-
clk_period = sparx5_clk_period(freq);
- spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
- HSCH_SYS_CLK_PER_100PS,
- sparx5,
- HSCH_SYS_CLK_PER);
+ if (is_sparx5(sparx5))
+ spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
+ HSCH_SYS_CLK_PER_100PS,
+ sparx5,
+ HSCH_SYS_CLK_PER);
spx5_rmw(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS,
@@ -525,7 +604,7 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
sparx5,
LRN_AUTOAGE_CFG_1);
- for (idx = 0; idx < 3; idx++)
+ for (idx = 0; idx < sparx5->data->consts->n_sio_clks; idx++)
spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100),
GCB_SIO_CLOCK_SYS_CLK_PERIOD,
sparx5,
@@ -545,25 +624,36 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
return 0;
}
+static u32 qlim_wm(struct sparx5 *sparx5, int fraction)
+{
+ return (sparx5->data->consts->buf_size / SPX5_BUFFER_CELL_SZ - 100) *
+ fraction / 100;
+}
+
static int sparx5_qlim_set(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
u32 res, dp, prio;
for (res = 0; res < 2; res++) {
for (prio = 0; prio < 8; prio++)
spx5_wr(0xFFF, sparx5,
- QRES_RES_CFG(prio + 630 + res * 1024));
+ QRES_RES_CFG(prio +
+ consts->qres_max_prio_idx +
+ res * 1024));
for (dp = 0; dp < 4; dp++)
spx5_wr(0xFFF, sparx5,
- QRES_RES_CFG(dp + 638 + res * 1024));
+ QRES_RES_CFG(dp +
+ consts->qres_max_colour_idx +
+ res * 1024));
}
/* Set 80,90,95,100% of memory size for top watermarks */
- spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0));
- spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0));
- spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0));
- spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0));
+ spx5_wr(qlim_wm(sparx5, 80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0));
+ spx5_wr(qlim_wm(sparx5, 90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0));
+ spx5_wr(qlim_wm(sparx5, 95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0));
+ spx5_wr(qlim_wm(sparx5, 100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0));
return 0;
}
@@ -585,7 +675,7 @@ static void sparx5_board_init(struct sparx5 *sparx5)
GCB_HW_SGPIO_SD_CFG);
/* Refer to LOS SGPIO */
- for (idx = 0; idx < SPX5_PORTS; idx++)
+ for (idx = 0; idx < sparx5->data->consts->n_ports; idx++)
if (sparx5->ports[idx])
if (sparx5->ports[idx]->conf.sd_sgpio != ~0)
spx5_wr(sparx5->ports[idx]->conf.sd_sgpio,
@@ -596,12 +686,14 @@ static void sparx5_board_init(struct sparx5 *sparx5)
static int sparx5_start(struct sparx5 *sparx5)
{
u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ const struct sparx5_consts *consts = sparx5->data->consts;
+ const struct sparx5_ops *ops = sparx5->data->ops;
char queue_name[32];
u32 idx;
int err;
/* Setup own UPSIDs */
- for (idx = 0; idx < 3; idx++) {
+ for (idx = 0; idx < consts->n_own_upsids; idx++) {
spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx));
spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx));
spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx));
@@ -609,7 +701,7 @@ static int sparx5_start(struct sparx5 *sparx5)
}
/* Enable CPU ports */
- for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++)
+ for (idx = consts->n_ports; idx < consts->n_ports_all; idx++)
spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1),
QFWD_SWITCH_PORT_MODE_PORT_ENA,
sparx5,
@@ -618,14 +710,20 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Init masks */
sparx5_update_fwd(sparx5);
+ /* Init flood masks */
+ for (int pgid = sparx5_get_pgid(sparx5, PGID_UC_FLOOD);
+ pgid <= sparx5_get_pgid(sparx5, PGID_BCAST); pgid++)
+ sparx5_pgid_clear(sparx5, pgid);
+
/* CPU copy CPU pgids */
- spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
- sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU));
- spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
- sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST));
+ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5,
+ ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_CPU)));
+ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5,
+ ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_BCAST)));
/* Recalc injected frame FCS */
- for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++)
+ for (idx = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0);
+ idx <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1); idx++)
spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1),
ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA,
sparx5, ANA_CL_FILTER_CTRL(idx));
@@ -640,7 +738,8 @@ static int sparx5_start(struct sparx5 *sparx5)
sparx5_vlan_init(sparx5);
/* Add host mode BC address (points only to CPU) */
- sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU), broadcast,
+ NULL_VID);
/* Enable queue limitation watermarks */
sparx5_qlim_set(sparx5);
@@ -692,15 +791,18 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Start Frame DMA with fallback to register based INJ/XTR */
err = -ENXIO;
if (sparx5->fdma_irq >= 0) {
- if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
- err = devm_request_threaded_irq(sparx5->dev,
- sparx5->fdma_irq,
- NULL,
- sparx5_fdma_handler,
- IRQF_ONESHOT,
- "sparx5-fdma", sparx5);
- if (!err)
- err = sparx5_fdma_start(sparx5);
+ if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0 ||
+ !is_sparx5(sparx5))
+ err = devm_request_irq(sparx5->dev,
+ sparx5->fdma_irq,
+ sparx5_fdma_handler,
+ 0,
+ "sparx5-fdma", sparx5);
+ if (!err) {
+ err = ops->fdma_init(sparx5);
+ if (!err)
+ sparx5_fdma_start(sparx5);
+ }
if (err)
sparx5->fdma_irq = -ENXIO;
} else {
@@ -718,9 +820,10 @@ static int sparx5_start(struct sparx5 *sparx5)
sparx5->xtr_irq = -ENXIO;
}
- if (sparx5->ptp_irq >= 0) {
+ if (sparx5->ptp_irq >= 0 &&
+ sparx5_has_feature(sparx5, SPX5_FEATURE_PTP)) {
err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq,
- NULL, sparx5_ptp_irq_handler,
+ NULL, ops->ptp_irq_handler,
IRQF_ONESHOT, "sparx5-ptp",
sparx5);
if (err)
@@ -743,6 +846,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
struct initial_port_config *configs, *config;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ const struct sparx5_ops *ops;
struct reset_control *reset;
struct sparx5 *sparx5;
int idx = 0, err = 0;
@@ -759,6 +863,13 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
sparx5->dev = &pdev->dev;
spin_lock_init(&sparx5->tx_lock);
+ sparx5->data = device_get_match_data(sparx5->dev);
+ if (!sparx5->data)
+ return -EINVAL;
+
+ regs = sparx5->data->regs;
+ ops = sparx5->data->ops;
+
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
if (IS_ERR(reset))
@@ -787,7 +898,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
for_each_available_child_of_node(ports, portnp) {
struct sparx5_port_config *conf;
- struct phy *serdes;
+ struct phy *serdes = NULL;
u32 portno;
err = of_property_read_u32(portnp, "reg", &portno);
@@ -817,13 +928,17 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
conf->sd_sgpio = ~0;
else
sparx5->sd_sgpio_remapping = true;
- serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
- "port %u: missing serdes\n",
- portno);
- of_node_put(portnp);
- goto cleanup_config;
+ /* There is no SerDes node for RGMII ports. */
+ if (!ops->is_port_rgmii(portno)) {
+ serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = dev_err_probe(sparx5->dev,
+ PTR_ERR(serdes),
+ "port %u: missing serdes\n",
+ portno);
+ of_node_put(portnp);
+ goto cleanup_config;
+ }
}
config->portno = portno;
config->node = portnp;
@@ -856,6 +971,9 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
sparx5->target_ct = (enum spx5_target_chiptype)
GCB_CHIP_ID_PART_ID_GET(sparx5->chip_id);
+ /* Initialize the features based on the target */
+ sparx5_init_features(sparx5);
+
/* Initialize Switchcore and internal RAMs */
err = sparx5_init_switchcore(sparx5);
if (err) {
@@ -899,6 +1017,9 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
dev_err(sparx5->dev, "PTP failed\n");
goto cleanup_ports;
}
+
+ INIT_LIST_HEAD(&sparx5->mall_entries);
+
goto cleanup_config;
cleanup_ports:
@@ -915,6 +1036,7 @@ cleanup_pnode:
static void mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+ const struct sparx5_ops *ops = sparx5->data->ops;
debugfs_remove_recursive(sparx5->debugfs_root);
if (sparx5->xtr_irq) {
@@ -926,7 +1048,7 @@ static void mchp_sparx5_remove(struct platform_device *pdev)
sparx5->fdma_irq = -ENXIO;
}
sparx5_ptp_deinit(sparx5);
- sparx5_fdma_stop(sparx5);
+ ops->fdma_deinit(sparx5);
sparx5_cleanup_ports(sparx5);
sparx5_vcap_destroy(sparx5);
/* Unregister netdevs */
@@ -934,15 +1056,80 @@ static void mchp_sparx5_remove(struct platform_device *pdev)
destroy_workqueue(sparx5->mact_queue);
}
+static const struct sparx5_regs sparx5_regs = {
+ .tsize = sparx5_tsize,
+ .gaddr = sparx5_gaddr,
+ .gcnt = sparx5_gcnt,
+ .gsize = sparx5_gsize,
+ .raddr = sparx5_raddr,
+ .rcnt = sparx5_rcnt,
+ .fpos = sparx5_fpos,
+ .fsize = sparx5_fsize,
+};
+
+static const struct sparx5_consts sparx5_consts = {
+ .n_ports = 65,
+ .n_ports_all = 70,
+ .n_hsch_l1_elems = 64,
+ .n_hsch_queues = 8,
+ .n_lb_groups = 10,
+ .n_pgids = 2113, /* (2048 + n_ports) */
+ .n_sio_clks = 3,
+ .n_own_upsids = 3,
+ .n_auto_cals = 7,
+ .n_filters = 1024,
+ .n_gates = 1024,
+ .n_sdlbs = 4096,
+ .n_dsm_cal_taxis = 8,
+ .buf_size = 4194280,
+ .qres_max_prio_idx = 630,
+ .qres_max_colour_idx = 638,
+ .tod_pin = 4,
+ .vcaps = sparx5_vcaps,
+ .vcaps_cfg = sparx5_vcap_inst_cfg,
+ .vcap_stats = &sparx5_vcap_stats,
+};
+
+static const struct sparx5_ops sparx5_ops = {
+ .is_port_2g5 = &sparx5_port_is_2g5,
+ .is_port_5g = &sparx5_port_is_5g,
+ .is_port_10g = &sparx5_port_is_10g,
+ .is_port_25g = &sparx5_port_is_25g,
+ .is_port_rgmii = &sparx5_port_is_rgmii,
+ .get_port_dev_index = &sparx5_port_dev_mapping,
+ .get_port_dev_bit = &sparx5_port_dev_mapping,
+ .get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate,
+ .get_sdlb_group = &sparx5_get_sdlb_group,
+ .set_port_mux = &sparx5_port_mux_set,
+ .ptp_irq_handler = &sparx5_ptp_irq_handler,
+ .dsm_calendar_calc = &sparx5_dsm_calendar_calc,
+ .fdma_init = &sparx5_fdma_init,
+ .fdma_deinit = &sparx5_fdma_deinit,
+ .fdma_poll = &sparx5_fdma_napi_callback,
+ .fdma_xmit = &sparx5_fdma_xmit,
+};
+
+static const struct sparx5_match_data sparx5_desc = {
+ .iomap = sparx5_main_iomap,
+ .iomap_size = ARRAY_SIZE(sparx5_main_iomap),
+ .ioranges = 3,
+ .regs = &sparx5_regs,
+ .consts = &sparx5_consts,
+ .ops = &sparx5_ops,
+};
+
static const struct of_device_id mchp_sparx5_match[] = {
- { .compatible = "microchip,sparx5-switch" },
+ { .compatible = "microchip,sparx5-switch", .data = &sparx5_desc },
+#ifdef CONFIG_LAN969X_SWITCH
+ { .compatible = "microchip,lan9691-switch", .data = &lan969x_desc },
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, mchp_sparx5_match);
static struct platform_driver mchp_sparx5_driver = {
.probe = mchp_sparx5_probe,
- .remove_new = mchp_sparx5_remove,
+ .remove = mchp_sparx5_remove,
.driver = {
.name = "sparx5-switch",
.of_match_table = mchp_sparx5_match,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 316fed5f2735..fe7d8bcc0cd9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -18,21 +18,36 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/hrtimer.h>
#include <linux/debugfs.h>
+#include <net/flow_offload.h>
+
+#include <fdma_api.h>
#include "sparx5_main_regs.h"
/* Target chip type */
enum spx5_target_chiptype {
- SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
- SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */
- SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */
- SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */
- SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */
- SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
- SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
- SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
- SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
- SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+ SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
+ SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */
+ SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */
+ SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */
+ SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */
+ SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
+ SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
+ SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
+ SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
+ SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+ SPX5_TARGET_CT_LAN9694 = 0x9694, /* lan969x-40 */
+ SPX5_TARGET_CT_LAN9691VAO = 0x9691, /* lan969x-40-VAO */
+ SPX5_TARGET_CT_LAN9694TSN = 0x9695, /* lan969x-40-TSN */
+ SPX5_TARGET_CT_LAN9694RED = 0x969A, /* lan969x-40-RED */
+ SPX5_TARGET_CT_LAN9696 = 0x9696, /* lan969x-60 */
+ SPX5_TARGET_CT_LAN9692VAO = 0x9692, /* lan969x-65-VAO */
+ SPX5_TARGET_CT_LAN9696TSN = 0x9697, /* lan969x-60-TSN */
+ SPX5_TARGET_CT_LAN9696RED = 0x969B, /* lan969x-60-RED */
+ SPX5_TARGET_CT_LAN9698 = 0x9698, /* lan969x-100 */
+ SPX5_TARGET_CT_LAN9693VAO = 0x9693, /* lan969x-100-VAO */
+ SPX5_TARGET_CT_LAN9698TSN = 0x9699, /* lan969x-100-TSN */
+ SPX5_TARGET_CT_LAN9698RED = 0x969C, /* lan969x-100-RED */
};
enum sparx5_port_max_tags {
@@ -48,25 +63,41 @@ enum sparx5_vlan_port_type {
SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */
};
+/* This is used in calendar configuration */
+enum sparx5_cal_bw {
+ SPX5_CAL_SPEED_NONE = 0,
+ SPX5_CAL_SPEED_1G = 1,
+ SPX5_CAL_SPEED_2G5 = 2,
+ SPX5_CAL_SPEED_5G = 3,
+ SPX5_CAL_SPEED_10G = 4,
+ SPX5_CAL_SPEED_25G = 5,
+ SPX5_CAL_SPEED_0G5 = 6,
+ SPX5_CAL_SPEED_12G5 = 7
+};
+
+enum sparx5_feature {
+ SPX5_FEATURE_PSFP = BIT(0),
+ SPX5_FEATURE_PTP = BIT(1),
+};
+
#define SPX5_PORTS 65
-#define SPX5_PORT_CPU (SPX5_PORTS) /* Next port is CPU port */
-#define SPX5_PORT_CPU_0 (SPX5_PORT_CPU + 0) /* CPU Port 65 */
-#define SPX5_PORT_CPU_1 (SPX5_PORT_CPU + 1) /* CPU Port 66 */
-#define SPX5_PORT_VD0 (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */
-#define SPX5_PORT_VD1 (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */
-#define SPX5_PORT_VD2 (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/
-#define SPX5_PORTS_ALL (SPX5_PORT_CPU + 5) /* Total number of ports */
-
-#define PGID_BASE SPX5_PORTS /* Starts after port PGIDs */
-#define PGID_UC_FLOOD (PGID_BASE + 0)
-#define PGID_MC_FLOOD (PGID_BASE + 1)
-#define PGID_IPV4_MC_DATA (PGID_BASE + 2)
-#define PGID_IPV4_MC_CTRL (PGID_BASE + 3)
-#define PGID_IPV6_MC_DATA (PGID_BASE + 4)
-#define PGID_IPV6_MC_CTRL (PGID_BASE + 5)
-#define PGID_BCAST (PGID_BASE + 6)
-#define PGID_CPU (PGID_BASE + 7)
-#define PGID_MCAST_START (PGID_BASE + 8)
+#define SPX5_PORTS_ALL 70 /* Total number of ports */
+
+#define SPX5_PORT_CPU_0 0 /* CPU Port 0 */
+#define SPX5_PORT_CPU_1 1 /* CPU Port 1 */
+#define SPX5_PORT_VD0 2 /* VD0/Port used for IPMC */
+#define SPX5_PORT_VD1 3 /* VD1/Port used for AFI/OAM */
+#define SPX5_PORT_VD2 4 /* VD2/Port used for IPinIP*/
+
+#define PGID_UC_FLOOD 0
+#define PGID_MC_FLOOD 1
+#define PGID_IPV4_MC_DATA 2
+#define PGID_IPV4_MC_CTRL 3
+#define PGID_IPV6_MC_DATA 4
+#define PGID_IPV6_MC_CTRL 5
+#define PGID_BCAST 6
+#define PGID_CPU 7
+#define PGID_MCAST_START 8
#define PGID_TABLE_SIZE 3290
@@ -81,6 +112,8 @@ enum sparx5_vlan_port_type {
#define XTR_QUEUE 0
#define INJ_QUEUE 0
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
#define FDMA_DCB_MAX 64
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
@@ -97,23 +130,25 @@ enum sparx5_vlan_port_type {
#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6
#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7
-struct sparx5;
+#define SPX5_DSM_CAL_LEN 64
+#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
+#define SPX5_DSM_CAL_EMPTY 0xFFFF
-struct sparx5_db_hw {
- u64 dataptr;
- u64 status;
-};
+#define SPARX5_MAX_PTP_ID 512
-struct sparx5_rx_dcb_hw {
- u64 nextptr;
- u64 info;
- struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS];
-};
+struct sparx5;
-struct sparx5_tx_dcb_hw {
- u64 nextptr;
- u64 info;
- struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS];
+struct sparx5_calendar_data {
+ u32 schedule[SPX5_DSM_CAL_LEN];
+ u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 new_slots[SPX5_DSM_CAL_LEN];
+ u32 temp_sched[SPX5_DSM_CAL_LEN];
+ u32 indices[SPX5_DSM_CAL_LEN];
+ u32 short_list[SPX5_DSM_CAL_LEN];
+ u32 long_list[SPX5_DSM_CAL_LEN];
};
/* Frame DMA receive state:
@@ -123,27 +158,34 @@ struct sparx5_tx_dcb_hw {
* When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused.
*/
struct sparx5_rx {
- struct sparx5_rx_dcb_hw *dcb_entries;
- struct sparx5_rx_dcb_hw *last_entry;
- struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
- int db_index;
- int dcb_index;
+ struct fdma fdma;
+ struct page_pool *page_pool;
+ union {
+ struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ };
dma_addr_t dma;
struct napi_struct napi;
- u32 channel_id;
struct net_device *ndev;
u64 packets;
+ u8 page_order;
+};
+
+/* Used to store information about TX buffers. */
+struct sparx5_tx_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
};
/* Frame DMA transmit state:
* DCBs are chained using the DCBs nextptr field.
*/
struct sparx5_tx {
- struct sparx5_tx_dcb_hw *curr_entry;
- struct sparx5_tx_dcb_hw *first_entry;
- struct list_head db_list;
- dma_addr_t dma;
- u32 channel_id;
+ struct fdma fdma;
+ struct sparx5_tx_buf *dbs;
u64 packets;
u64 dropped;
};
@@ -173,6 +215,7 @@ struct sparx5_port {
struct phylink_config phylink_config;
struct phylink *phylink;
struct phylink_pcs phylink_pcs;
+ struct flow_stats mirror_stats;
u16 portno;
/* Ingress default VLAN (pvid) */
u16 pvid;
@@ -198,6 +241,7 @@ struct sparx5_port {
enum sparx5_core_clockfreq {
SPX5_CORE_CLOCK_DEFAULT, /* Defaults to the highest supported frequency */
SPX5_CORE_CLOCK_250MHZ, /* 250MHZ core clock frequency */
+ SPX5_CORE_CLOCK_328MHZ, /* 328MHZ core clock frequency */
SPX5_CORE_CLOCK_500MHZ, /* 500MHZ core clock frequency */
SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */
};
@@ -227,15 +271,106 @@ struct sparx5_mdb_entry {
u16 pgid_idx;
};
+struct sparx5_mall_mirror_entry {
+ u32 idx;
+ struct sparx5_port *port;
+};
+
+struct sparx5_mall_entry {
+ struct list_head list;
+ struct sparx5_port *port;
+ unsigned long cookie;
+ enum flow_action_id type;
+ bool ingress;
+ union {
+ struct sparx5_mall_mirror_entry mirror;
+ };
+};
+
#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
#define SPARX5_SKB_CB(skb) \
((struct sparx5_skb_cb *)((skb)->cb))
+struct sparx5_regs {
+ const unsigned int *tsize;
+ const unsigned int *gaddr;
+ const unsigned int *gcnt;
+ const unsigned int *gsize;
+ const unsigned int *raddr;
+ const unsigned int *rcnt;
+ const unsigned int *fpos;
+ const unsigned int *fsize;
+};
+
+struct sparx5_consts {
+ u32 n_ports; /* Number of front ports */
+ u32 n_ports_all; /* Number of front ports + internal ports */
+ u32 n_hsch_l1_elems; /* Number of HSCH layer 1 elements */
+ u32 n_hsch_queues; /* Number of HSCH queues */
+ u32 n_lb_groups; /* Number of leacky bucket groupd */
+ u32 n_pgids; /* Number of PGID's */
+ u32 n_sio_clks; /* Number of serial IO clocks */
+ u32 n_own_upsids; /* Number of own UPSID's */
+ u32 n_auto_cals; /* Number of auto calendars */
+ u32 n_filters; /* Number of PSFP filters */
+ u32 n_gates; /* Number of PSFP gates */
+ u32 n_sdlbs; /* Number of service dual leaky buckets */
+ u32 n_dsm_cal_taxis; /* Number of DSM calendar taxis */
+ u32 buf_size; /* Amount of QLIM watermark memory */
+ u32 qres_max_prio_idx; /* Maximum QRES prio index */
+ u32 qres_max_colour_idx; /* Maximum QRES colour index */
+ u32 tod_pin; /* PTP TOD pin */
+ const struct sparx5_vcap_inst *vcaps_cfg;
+ const struct vcap_info *vcaps;
+ const struct vcap_statistics *vcap_stats;
+};
+
+struct sparx5_ops {
+ bool (*is_port_2g5)(int portno);
+ bool (*is_port_5g)(int portno);
+ bool (*is_port_10g)(int portno);
+ bool (*is_port_25g)(int portno);
+ bool (*is_port_rgmii)(int portno);
+ u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port);
+ u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port);
+ u32 (*get_hsch_max_group_rate)(int grp);
+ struct sparx5_sdlb_group *(*get_sdlb_group)(int idx);
+ int (*set_port_mux)(struct sparx5 *sparx5, struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+ irqreturn_t (*ptp_irq_handler)(int irq, void *args);
+ int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data);
+ int (*port_config_rgmii)(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+ int (*fdma_init)(struct sparx5 *sparx5);
+ int (*fdma_deinit)(struct sparx5 *sparx5);
+ int (*fdma_poll)(struct napi_struct *napi, int weight);
+ int (*fdma_xmit)(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
+};
+
+struct sparx5_main_io_resource {
+ enum sparx5_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+struct sparx5_match_data {
+ const struct sparx5_regs *regs;
+ const struct sparx5_consts *consts;
+ const struct sparx5_ops *ops;
+ const struct sparx5_main_io_resource *iomap;
+ int ioranges;
+ int iomap_size;
+};
+
struct sparx5 {
struct platform_device *pdev;
struct device *dev;
u32 chip_id;
enum spx5_target_chiptype target_ct;
+ u32 features;
void __iomem *regs[NUM_TARGETS];
int port_count;
struct mutex lock; /* MAC reg lock */
@@ -295,10 +430,16 @@ struct sparx5 {
struct vcap_control *vcap_ctrl;
/* PGID allocation map */
u8 pgid_map[PGID_TABLE_SIZE];
+ struct list_head mall_entries;
/* Common root for debugfs */
struct dentry *debugfs_root;
+ const struct sparx5_match_data *data;
};
+/* sparx5_main.c */
+bool is_sparx5(struct sparx5 *sparx5);
+bool sparx5_has_feature(struct sparx5 *sparx5, enum sparx5_feature feature);
+
/* sparx5_switchdev.c */
int sparx5_register_notifier_blocks(struct sparx5 *sparx5);
void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
@@ -310,17 +451,23 @@ struct frame_info {
};
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
-void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
+void sparx5_ifh_parse(struct sparx5 *sparx5, u32 *ifh, struct frame_info *info);
irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
/* sparx5_fdma.c */
+int sparx5_fdma_init(struct sparx5 *sparx5);
+int sparx5_fdma_deinit(struct sparx5 *sparx5);
int sparx5_fdma_start(struct sparx5 *sparx5);
int sparx5_fdma_stop(struct sparx5 *sparx5);
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb);
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight);
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
irqreturn_t sparx5_fdma_handler(int irq, void *args);
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma);
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5);
/* sparx5_mactable.c */
void sparx5_mact_pull_work(struct work_struct *work);
@@ -359,6 +506,11 @@ void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port);
/* sparx5_calendar.c */
int sparx5_config_auto_calendar(struct sparx5 *sparx5);
int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
+int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data);
+u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed);
+enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno);
+
/* sparx5_ethtool.c */
void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
@@ -375,11 +527,14 @@ static inline int sparx5_dcb_init(struct sparx5 *sparx5)
#endif
/* sparx5_netdev.c */
-void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp);
+void sparx5_set_port_ifh_timestamp(struct sparx5 *sparx5, void *ifh_hdr,
+ u64 timestamp);
void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op);
-void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type);
-void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset);
-void sparx5_set_port_ifh(void *ifh_hdr, u16 portno);
+void sparx5_set_port_ifh_pdu_type(struct sparx5 *sparx5, void *ifh_hdr,
+ u32 pdu_type);
+void sparx5_set_port_ifh_pdu_w16_offset(struct sparx5 *sparx5, void *ifh_hdr,
+ u32 pdu_w16_offset);
+void sparx5_set_port_ifh(struct sparx5 *sparx5, void *ifh_hdr, u16 portno);
bool sparx5_netdevice_check(const struct net_device *dev);
struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
int sparx5_register_netdevs(struct sparx5 *sparx5);
@@ -402,6 +557,9 @@ void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
struct sk_buff *skb);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
+ struct timespec64 *ts,
+ u32 nsec);
/* sparx5_vcap_impl.c */
int sparx5_vcap_init(struct sparx5 *sparx5);
@@ -417,6 +575,7 @@ enum sparx5_pgid_type {
void sparx5_pgid_init(struct sparx5 *spx5);
int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+int sparx5_get_pgid(struct sparx5 *sparx5, int pgid);
/* sparx5_pool.c */
struct sparx5_pool_entry {
@@ -430,6 +589,11 @@ int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id);
int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
u32 *id);
+/* sparx5_port.c */
+int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+int sparx5_get_internal_port(struct sparx5 *sparx5, int port);
+
/* sparx5_sdlb.c */
#define SPX5_SDLB_PUP_TOKEN_DISABLE 0x1FFF
#define SPX5_SDLB_PUP_TOKEN_MAX (SPX5_SDLB_PUP_TOKEN_DISABLE - 1)
@@ -448,10 +612,11 @@ struct sparx5_sdlb_group {
};
extern struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT];
+struct sparx5_sdlb_group *sparx5_get_sdlb_group(int idx);
int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval,
u64 rate);
-int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5);
+u64 sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5);
int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst);
int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group);
@@ -541,12 +706,20 @@ void sparx5_psfp_init(struct sparx5 *sparx5);
void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
const ktime_t org_base_time, ktime_t *new_base_time);
+/* sparx5_mirror.c */
+int sparx5_mirror_add(struct sparx5_mall_entry *entry);
+void sparx5_mirror_del(struct sparx5_mall_entry *entry);
+void sparx5_mirror_stats(struct sparx5_mall_entry *entry,
+ struct flow_stats *fstats);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
switch (cclock) {
case SPX5_CORE_CLOCK_250MHZ:
return 4000;
+ case SPX5_CORE_CLOCK_328MHZ:
+ return 3048;
case SPX5_CORE_CLOCK_500MHZ:
return 2000;
case SPX5_CORE_CLOCK_625MHZ:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index bd03a0a3c1da..d9ef4ef137b8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0+
* Microchip Sparx5 Switch driver
*
- * Copyright (c) 2021 Microchip Technology Inc.
+ * Copyright (c) 2024 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2023-02-10 11:18:53 +0100.
- * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+/* This file is autogenerated by cml-utils 2024-10-04 10:40:40 +0200.
+ * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/bug.h>
+#include "sparx5_regs.h"
+
enum sparx5_target {
TARGET_ANA_AC = 1,
TARGET_ANA_ACL = 2,
@@ -35,6 +37,7 @@ enum sparx5_target {
TARGET_FDMA = 117,
TARGET_GCB = 118,
TARGET_HSCH = 119,
+ TARGET_HSIO_WRAP = 120,
TARGET_LRN = 122,
TARGET_PCEP = 129,
TARGET_PCS10G_BR = 132,
@@ -52,14 +55,28 @@ enum sparx5_target {
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
- NUM_TARGETS = 332
+ TARGET_DEVRGMII = 392,
+ NUM_TARGETS = 517
};
+/* sparx5_main.c
+ *
+ * This is used by the register macros to access chip differences (if any) in:
+ * target size, register address, register count, group address, group count,
+ * group size, field position and field size.
+ */
+extern const struct sparx5_regs *regs;
+
+/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
+#define spx5_field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define spx5_field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
#define __REG(...) __VA_ARGS__
-/* ANA_AC:RAM_CTRL:RAM_INIT */
-#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC,\
- 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
+/* ANA_AC:RAM_CTRL:RAM_INIT */
+#define ANA_AC_RAM_INIT \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_RAM_CTRL], 0, 1, 4, 0,\
+ 0, 1, 4)
#define ANA_AC_RAM_INIT_RAM_INIT BIT(1)
#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\
@@ -73,9 +90,10 @@ enum sparx5_target {
#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
-/* ANA_AC:PS_COMMON:OWN_UPSID */
-#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC,\
- 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
+/* ANA_AC:PS_COMMON:OWN_UPSID */
+#define ANA_AC_OWN_UPSID(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PS_COMMON], 0, 1, 352,\
+ 52, r, regs->rcnt[RC_ANA_AC_OWN_UPSID], 4)
#define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -83,17 +101,86 @@ enum sparx5_target {
#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
-/* ANA_AC:SRC:SRC_CFG */
-#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\
- 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
-
-/* ANA_AC:SRC:SRC_CFG1 */
-#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC,\
- 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
-
-/* ANA_AC:SRC:SRC_CFG2 */
-#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC,\
- 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
+/* ANA_AC:MIRROR_PROBE:PROBE_CFG */
+#define ANA_AC_PROBE_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \
+ 32, 0, 0, 1, 4)
+
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD GENMASK(31, 27)
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x)
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET GENMASK(26, 19)
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x)
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_VID GENMASK(18, 6)
+#define ANA_AC_PROBE_CFG_PROBE_VID_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VID, x)
+#define ANA_AC_PROBE_CFG_PROBE_VID_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VID, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE GENMASK(5, 4)
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x)
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE GENMASK(3, 2)
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x)
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION GENMASK(1, 0)
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x)
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x)
+
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG */
+#define ANA_AC_PROBE_PORT_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \
+ 32, 8, 0, 1, 4)
+
+/* SPARX5 ONLY */
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG1 */
+#define ANA_AC_PROBE_PORT_CFG1(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \
+ 32, 12, 0, 1, 4)
+
+/* SPARX5 ONLY */
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG2 */
+#define ANA_AC_PROBE_PORT_CFG2(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \
+ 32, 16, 0, 1, 4)
+
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2 BIT(0)
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x)
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x)
+
+/* ANA_AC:SRC:SRC_CFG */
+#define ANA_AC_SRC_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \
+ regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 0, 0, 1, 4)
+
+/* SPARX5 ONLY */
+/* ANA_AC:SRC:SRC_CFG1 */
+#define ANA_AC_SRC_CFG1(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \
+ regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 4, 0, 1, 4)
+
+/* SPARX5 ONLY */
+/* ANA_AC:SRC:SRC_CFG2 */
+#define ANA_AC_SRC_CFG2(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \
+ regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 8, 0, 1, 4)
#define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0)
#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\
@@ -101,17 +188,22 @@ enum sparx5_target {
#define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\
FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x)
-/* ANA_AC:PGID:PGID_CFG */
-#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC,\
- 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
+/* ANA_AC:PGID:PGID_CFG */
+#define ANA_AC_PGID_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \
+ regs->gcnt[GC_ANA_AC_PGID], 16, 0, 0, 1, 4)
-/* ANA_AC:PGID:PGID_CFG1 */
-#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC,\
- 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_AC:PGID:PGID_CFG1 */
+#define ANA_AC_PGID_CFG1(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \
+ regs->gcnt[GC_ANA_AC_PGID], 16, 4, 0, 1, 4)
-/* ANA_AC:PGID:PGID_CFG2 */
-#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC,\
- 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_AC:PGID:PGID_CFG2 */
+#define ANA_AC_PGID_CFG2(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \
+ regs->gcnt[GC_ANA_AC_PGID], 16, 8, 0, 1, 4)
#define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0)
#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\
@@ -119,9 +211,10 @@ enum sparx5_target {
#define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\
FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x)
-/* ANA_AC:PGID:PGID_MISC_CFG */
-#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC,\
- 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
+/* ANA_AC:PGID:PGID_MISC_CFG */
+#define ANA_AC_PGID_MISC_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \
+ regs->gcnt[GC_ANA_AC_PGID], 16, 12, 0, 1, 4)
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4)
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\
@@ -141,9 +234,10 @@ enum sparx5_target {
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
-/* ANA_AC:TSN_SF:TSN_SF */
-#define ANA_AC_TSN_SF __REG(TARGET_ANA_AC,\
- 0, 1, 839136, 0, 1, 4, 0, 0, 1, 4)
+/* ANA_AC:TSN_SF:TSN_SF */
+#define ANA_AC_TSN_SF \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF], 0, 1, 4, 0, \
+ 0, 1, 4)
#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY BIT(9)
#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_SET(x)\
@@ -151,21 +245,24 @@ enum sparx5_target {
#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_GET(x)\
FIELD_GET(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
-#define ANA_AC_TSN_SF_PORT_NUM GENMASK(8, 0)
+#define ANA_AC_TSN_SF_PORT_NUM\
+ GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_PORT_NUM] + 0 - 1, 0)
#define ANA_AC_TSN_SF_PORT_NUM_SET(x)\
- FIELD_PREP(ANA_AC_TSN_SF_PORT_NUM, x)
+ spx5_field_prep(ANA_AC_TSN_SF_PORT_NUM, x)
#define ANA_AC_TSN_SF_PORT_NUM_GET(x)\
- FIELD_GET(ANA_AC_TSN_SF_PORT_NUM, x)
+ spx5_field_get(ANA_AC_TSN_SF_PORT_NUM, x)
-/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */
-#define ANA_AC_TSN_SF_CFG(g) __REG(TARGET_ANA_AC,\
- 0, 1, 839680, g, 1024, 4, 0, 0, 1, 4)
+/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */
+#define ANA_AC_TSN_SF_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF_CFG], g, \
+ regs->gcnt[GC_ANA_AC_TSN_SF_CFG], 4, 0, 0, 1, 4)
-#define ANA_AC_TSN_SF_CFG_TSN_SGID GENMASK(25, 16)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID\
+ GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_CFG_TSN_SGID] + 16 - 1, 16)
#define ANA_AC_TSN_SF_CFG_TSN_SGID_SET(x)\
- FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+ spx5_field_prep(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
#define ANA_AC_TSN_SF_CFG_TSN_SGID_GET(x)\
- FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+ spx5_field_get(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU GENMASK(15, 2)
#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(x)\
@@ -185,9 +282,10 @@ enum sparx5_target {
#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_GET(x)\
FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
-/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */
-#define ANA_AC_TSN_SF_STATUS __REG(TARGET_ANA_AC,\
- 0, 1, 839072, 0, 1, 16, 0, 0, 1, 4)
+/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */
+#define ANA_AC_TSN_SF_STATUS \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF_STATUS], 0, 1, \
+ 16, 0, 0, 1, 4)
#define ANA_AC_TSN_SF_STATUS_FRM_LEN GENMASK(25, 12)
#define ANA_AC_TSN_SF_STATUS_FRM_LEN_SET(x)\
@@ -201,11 +299,12 @@ enum sparx5_target {
#define ANA_AC_TSN_SF_STATUS_DLB_DROP_GET(x)\
FIELD_GET(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
-#define ANA_AC_TSN_SF_STATUS_TSN_SFID GENMASK(10, 1)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID\
+ GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_STATUS_TSN_SFID] + 1 - 1, 1)
#define ANA_AC_TSN_SF_STATUS_TSN_SFID_SET(x)\
- FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+ spx5_field_prep(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
#define ANA_AC_TSN_SF_STATUS_TSN_SFID_GET(x)\
- FIELD_GET(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+ spx5_field_get(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD BIT(0)
#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_SET(x)\
@@ -213,15 +312,17 @@ enum sparx5_target {
#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_GET(x)\
FIELD_GET(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
-/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */
-#define ANA_AC_SG_ACCESS_CTRL __REG(TARGET_ANA_AC,\
- 0, 1, 839140, 0, 1, 12, 0, 0, 1, 4)
+/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */
+#define ANA_AC_SG_ACCESS_CTRL \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_ACCESS], 0, 1, 12, \
+ 0, 0, 1, 4)
-#define ANA_AC_SG_ACCESS_CTRL_SGID GENMASK(9, 0)
+#define ANA_AC_SG_ACCESS_CTRL_SGID\
+ GENMASK(regs->fsize[FW_ANA_AC_SG_ACCESS_CTRL_SGID] + 0 - 1, 0)
#define ANA_AC_SG_ACCESS_CTRL_SGID_SET(x)\
- FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+ spx5_field_prep(ANA_AC_SG_ACCESS_CTRL_SGID, x)
#define ANA_AC_SG_ACCESS_CTRL_SGID_GET(x)\
- FIELD_GET(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+ spx5_field_get(ANA_AC_SG_ACCESS_CTRL_SGID, x)
#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(x)\
@@ -229,9 +330,10 @@ enum sparx5_target {
#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(x)\
FIELD_GET(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
-/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */
-#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD __REG(TARGET_ANA_AC,\
- 0, 1, 839140, 0, 1, 12, 8, 0, 1, 4)
+/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_ACCESS], 0, 1, 12, \
+ 8, 0, 1, 4)
#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS GENMASK(15, 0)
#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_SET(x)\
@@ -245,17 +347,20 @@ enum sparx5_target {
#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_GET(x)\
FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */
-#define ANA_AC_SG_CONFIG_REG_1 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 48, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */
+#define ANA_AC_SG_CONFIG_REG_1 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 48, 0, 1, 4)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */
-#define ANA_AC_SG_CONFIG_REG_2 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 52, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */
+#define ANA_AC_SG_CONFIG_REG_2 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 52, 0, 1, 4)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */
-#define ANA_AC_SG_CONFIG_REG_3 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 56, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */
+#define ANA_AC_SG_CONFIG_REG_3 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 56, 0, 1, 4)
#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB GENMASK(15, 0)
#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(x)\
@@ -311,17 +416,20 @@ enum sparx5_target {
#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_GET(x)\
FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */
-#define ANA_AC_SG_CONFIG_REG_4 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 60, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */
+#define ANA_AC_SG_CONFIG_REG_4 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 60, 0, 1, 4)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */
-#define ANA_AC_SG_CONFIG_REG_5 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 64, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */
+#define ANA_AC_SG_CONFIG_REG_5 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 64, 0, 1, 4)
-/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */
-#define ANA_AC_SG_GCL_GS_CONFIG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 0, r, 4, 4)
+/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */
+#define ANA_AC_SG_GCL_GS_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 0, r, 4, 4)
#define ANA_AC_SG_GCL_GS_CONFIG_IPS GENMASK(3, 0)
#define ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(x)\
@@ -335,25 +443,30 @@ enum sparx5_target {
#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_GET(x)\
FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
-/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */
-#define ANA_AC_SG_GCL_TI_CONFIG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 16, r, 4, 4)
+/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */
+#define ANA_AC_SG_GCL_TI_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 16, r, 4, 4)
-/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */
-#define ANA_AC_SG_GCL_OCT_CONFIG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 32, r, 4, 4)
+/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */
+#define ANA_AC_SG_GCL_OCT_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 32, r, 4, 4)
-/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */
-#define ANA_AC_SG_STATUS_REG_1 __REG(TARGET_ANA_AC,\
- 0, 1, 839088, 0, 1, 16, 0, 0, 1, 4)
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */
+#define ANA_AC_SG_STATUS_REG_1 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \
+ 0, 0, 1, 4)
-/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */
-#define ANA_AC_SG_STATUS_REG_2 __REG(TARGET_ANA_AC,\
- 0, 1, 839088, 0, 1, 16, 4, 0, 1, 4)
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */
+#define ANA_AC_SG_STATUS_REG_2 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \
+ 4, 0, 1, 4)
-/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */
-#define ANA_AC_SG_STATUS_REG_3 __REG(TARGET_ANA_AC,\
- 0, 1, 839088, 0, 1, 16, 8, 0, 1, 4)
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */
+#define ANA_AC_SG_STATUS_REG_3 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \
+ 8, 0, 1, 4)
#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB GENMASK(15, 0)
#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_SET(x)\
@@ -385,23 +498,27 @@ enum sparx5_target {
#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_GET(x)\
FIELD_GET(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
-/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */
-#define ANA_AC_SG_STATUS_REG_4 __REG(TARGET_ANA_AC,\
- 0, 1, 839088, 0, 1, 16, 12, 0, 1, 4)
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */
+#define ANA_AC_SG_STATUS_REG_4 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \
+ 12, 0, 1, 4)
-/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
-#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_PORT_SGE_CFG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_PORT],\
+ 0, 1, 20, 0, r, 4, 4)
-#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0)
+#define ANA_AC_PORT_SGE_CFG_MASK\
+ GENMASK(regs->fsize[FW_ANA_AC_PORT_SGE_CFG_MASK] + 0 - 1, 0)
#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\
- FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x)
+ spx5_field_prep(ANA_AC_PORT_SGE_CFG_MASK, x)
#define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\
- FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x)
+ spx5_field_get(ANA_AC_PORT_SGE_CFG_MASK, x)
-/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
-#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC,\
- 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
+#define ANA_AC_STAT_RESET \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_PORT],\
+ 0, 1, 20, 16, 0, 1, 4)
#define ANA_AC_STAT_RESET_RESET BIT(0)
#define ANA_AC_STAT_RESET_RESET_SET(x)\
@@ -409,9 +526,10 @@ enum sparx5_target {
#define ANA_AC_STAT_RESET_RESET_GET(x)\
FIELD_GET(ANA_AC_STAT_RESET_RESET, x)
-/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
-#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC,\
- 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
+/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
+#define ANA_AC_PORT_STAT_CFG(g, r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_CNT_CFG_PORT], g,\
+ regs->gcnt[GC_ANA_AC_STAT_CNT_CFG_PORT], 64, 4, r, 4, 4)
#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4)
#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\
@@ -431,13 +549,15 @@ enum sparx5_target {
#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\
FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
-/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
-#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC,\
- 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
+#define ANA_AC_PORT_STAT_LSB_CNT(g, r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_CNT_CFG_PORT], g,\
+ regs->gcnt[GC_ANA_AC_STAT_CNT_CFG_PORT], 64, 20, r, 4, 4)
-/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */
-#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 893792, 0, 1, 24, 0, r, 2, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \
+ 0, 1, 24, 0, r, 2, 4)
#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE GENMASK(2, 0)
#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_SET(x)\
@@ -445,9 +565,10 @@ enum sparx5_target {
#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_GET(x)\
FIELD_GET(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x)
-/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */
-#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 893792, 0, 1, 24, 8, r, 2, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */
+#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \
+ 0, 1, 24, 8, r, 2, 4)
#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE BIT(0)
#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_SET(x)\
@@ -455,9 +576,10 @@ enum sparx5_target {
#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_GET(x)\
FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x)
-/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */
-#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) __REG(TARGET_ANA_AC,\
- 0, 1, 893792, 0, 1, 24, 16, r, 2, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \
+ 0, 1, 24, 16, r, 2, 4)
#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK GENMASK(3, 0)
#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_SET(x)\
@@ -465,9 +587,10 @@ enum sparx5_target {
#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_GET(x)\
FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x)
-/* ANA_ACL:COMMON:VCAP_S2_CFG */
-#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 0, r, 70, 4)
+/* ANA_ACL:COMMON:VCAP_S2_CFG */
+#define ANA_ACL_VCAP_S2_CFG(r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 0, r, regs->rcnt[RC_ANA_ACL_VCAP_S2_CFG], 4)
#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA BIT(28)
#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_SET(x)\
@@ -553,9 +676,10 @@ enum sparx5_target {
#define ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(x)\
FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x)
-/* ANA_ACL:COMMON:SWAP_IP_CTRL */
-#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4)
+/* ANA_ACL:COMMON:SWAP_IP_CTRL */
+#define ANA_ACL_SWAP_IP_CTRL \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 412, 0, 1, 4)
#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL GENMASK(23, 18)
#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_SET(x)\
@@ -587,9 +711,10 @@ enum sparx5_target {
#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_GET(x)\
FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x)
-/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */
-#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 424, r, 4, 4)
+/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */
+#define ANA_ACL_VCAP_S2_RLEG_STAT(r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 424, r, 4, 4)
#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK GENMASK(12, 6)
#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_SET(x)\
@@ -603,9 +728,10 @@ enum sparx5_target {
#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_GET(x)\
FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x)
-/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */
-#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4)
+/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 440, 0, 1, 4)
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN GENMASK(9, 5)
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_SET(x)\
@@ -625,9 +751,10 @@ enum sparx5_target {
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_GET(x)\
FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x)
-/* ANA_ACL:COMMON:OWN_UPSID */
-#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
+/* ANA_ACL:COMMON:OWN_UPSID */
+#define ANA_ACL_OWN_UPSID(r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 580, r, regs->rcnt[RC_ANA_ACL_OWN_UPSID], 4)
#define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -635,9 +762,10 @@ enum sparx5_target {
#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
-/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */
-#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL,\
- 0, 1, 34200, g, 134, 16, 0, r, 4, 4)
+/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */
+#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_KEY_SEL], g, \
+ regs->gcnt[GC_ANA_ACL_KEY_SEL], 16, 0, r, 4, 4)
#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA BIT(13)
#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(x)\
@@ -687,17 +815,20 @@ enum sparx5_target {
#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(x)\
FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x)
-/* ANA_ACL:CNT_A:CNT_A */
-#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL,\
- 0, 1, 0, g, 4096, 4, 0, 0, 1, 4)
+/* ANA_ACL:CNT_A:CNT_A */
+#define ANA_ACL_CNT_A(g) \
+ __REG(TARGET_ANA_ACL, 0, 1, 0, g, regs->gcnt[GC_ANA_ACL_CNT_A], 4, 0, \
+ 0, 1, 4)
-/* ANA_ACL:CNT_B:CNT_B */
-#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL,\
- 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4)
+/* ANA_ACL:CNT_B:CNT_B */
+#define ANA_ACL_CNT_B(g) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_CNT_B], g, \
+ regs->gcnt[GC_ANA_ACL_CNT_B], 4, 0, 0, 1, 4)
-/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */
-#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL,\
- 0, 1, 36408, 0, 1, 16, 0, r, 4, 4)
+/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */
+#define ANA_ACL_SEC_LOOKUP_STICKY(r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_STICKY], 0, 1, 16, \
+ 0, r, 4, 4)
#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY BIT(17)
#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_SET(x)\
@@ -807,9 +938,10 @@ enum sparx5_target {
#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
-/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
-#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL,\
- 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
+/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
+#define ANA_AC_POL_POL_UPD_INT_CFG \
+ __REG(TARGET_ANA_AC_POL, 0, 1, regs->gaddr[GA_ANA_AC_POL_POL_ALL_CFG], \
+ 0, 1, 1160, 1148, 0, 1, 4)
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0)
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\
@@ -817,9 +949,10 @@ enum sparx5_target {
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\
FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
-/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
-#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
- 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
+/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
+#define ANA_AC_POL_BDLB_DLB_CTRL \
+ __REG(TARGET_ANA_AC_POL, 0, 1, regs->gaddr[GA_ANA_AC_POL_COMMON_BDLB], \
+ 0, 1, 8, 0, 0, 1, 4)
#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
@@ -845,9 +978,10 @@ enum sparx5_target {
#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
-/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
-#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
- 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
+/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
+#define ANA_AC_POL_SLB_DLB_CTRL \
+ __REG(TARGET_ANA_AC_POL, 0, 1, \
+ regs->gaddr[GA_ANA_AC_POL_COMMON_BUM_SLB], 0, 1, 20, 0, 0, 1, 4)
#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
@@ -873,19 +1007,22 @@ enum sparx5_target {
#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
-/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */
-#define ANA_AC_SDLB_XLB_START(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 0, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */
+#define ANA_AC_SDLB_XLB_START(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 0, 0, 1, 4)
-#define ANA_AC_SDLB_XLB_START_LBSET_START GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_START_LBSET_START\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_START_LBSET_START] + 0 - 1, 0)
#define ANA_AC_SDLB_XLB_START_LBSET_START_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+ spx5_field_prep(ANA_AC_SDLB_XLB_START_LBSET_START, x)
#define ANA_AC_SDLB_XLB_START_LBSET_START_GET(x)\
- FIELD_GET(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+ spx5_field_get(ANA_AC_SDLB_XLB_START_LBSET_START, x)
-/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */
-#define ANA_AC_SDLB_PUP_INTERVAL(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 4, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */
+#define ANA_AC_SDLB_PUP_INTERVAL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 4, 0, 1, 4)
#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL GENMASK(19, 0)
#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(x)\
@@ -893,9 +1030,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_GET(x)\
FIELD_GET(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
-/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */
-#define ANA_AC_SDLB_PUP_CTRL(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 8, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */
+#define ANA_AC_SDLB_PUP_CTRL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 8, 0, 1, 4)
#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT GENMASK(18, 0)
#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_SET(x)\
@@ -909,19 +1047,22 @@ enum sparx5_target {
#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(x)\
FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
-/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */
-#define ANA_AC_SDLB_LBGRP_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 12, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */
+#define ANA_AC_SDLB_LBGRP_MISC(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 12, 0, 1, 4)
-#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT GENMASK(12, 8)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT] + 8 - 1, 8)
#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+ spx5_field_prep(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_GET(x)\
- FIELD_GET(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+ spx5_field_get(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
-/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */
-#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 16, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */
+#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 16, 0, 1, 4)
#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS GENMASK(12, 0)
#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(x)\
@@ -929,9 +1070,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_GET(x)\
FIELD_GET(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
-/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */
-#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 20, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */
+#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 20, 0, 1, 4)
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING BIT(0)
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_SET(x)\
@@ -945,15 +1087,17 @@ enum sparx5_target {
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_GET(x)\
FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
-#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT GENMASK(28, 16)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT] + 16 - 1, 16)
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+ spx5_field_prep(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_GET(x)\
- FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+ spx5_field_get(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
-/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */
-#define ANA_AC_SDLB_PUP_TOKENS(g, r) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 0, r, 2, 4)
+/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */
+#define ANA_AC_SDLB_PUP_TOKENS(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 0, r, 2, 4)
#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS GENMASK(12, 0)
#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(x)\
@@ -961,9 +1105,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_GET(x)\
FIELD_GET(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
-/* ANA_AC_SDLB:LBSET_TBL:THRES */
-#define ANA_AC_SDLB_THRES(g, r) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 8, r, 2, 4)
+/* ANA_AC_SDLB:LBSET_TBL:THRES */
+#define ANA_AC_SDLB_THRES(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 8, r, 2, 4)
#define ANA_AC_SDLB_THRES_THRES GENMASK(9, 0)
#define ANA_AC_SDLB_THRES_THRES_SET(x)\
@@ -977,25 +1122,29 @@ enum sparx5_target {
#define ANA_AC_SDLB_THRES_THRES_HYS_GET(x)\
FIELD_GET(ANA_AC_SDLB_THRES_THRES_HYS, x)
-/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */
-#define ANA_AC_SDLB_XLB_NEXT(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 16, 0, 1, 4)
+/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */
+#define ANA_AC_SDLB_XLB_NEXT(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 16, 0, 1, 4)
-#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT] + 0 - 1, 0)
#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+ spx5_field_prep(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(x)\
- FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+ spx5_field_get(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
-#define ANA_AC_SDLB_XLB_NEXT_LBGRP GENMASK(27, 24)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_NEXT_LBGRP] + 24 - 1, 24)
#define ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+ spx5_field_prep(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
#define ANA_AC_SDLB_XLB_NEXT_LBGRP_GET(x)\
- FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+ spx5_field_get(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
-/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */
-#define ANA_AC_SDLB_INH_CTRL(g, r) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 20, r, 2, 4)
+/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */
+#define ANA_AC_SDLB_INH_CTRL(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 20, r, 2, 4)
#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX GENMASK(12, 0)
#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(x)\
@@ -1015,19 +1164,22 @@ enum sparx5_target {
#define ANA_AC_SDLB_INH_CTRL_INH_LB_GET(x)\
FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
-/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */
-#define ANA_AC_SDLB_INH_LBSET_ADDR(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 28, 0, 1, 4)
+/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */
+#define ANA_AC_SDLB_INH_LBSET_ADDR(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 28, 0, 1, 4)
-#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR] + 0 - 1, 0)
#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+ spx5_field_prep(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_GET(x)\
- FIELD_GET(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+ spx5_field_get(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
-/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */
-#define ANA_AC_SDLB_DLB_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 32, 0, 1, 4)
+/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */
+#define ANA_AC_SDLB_DLB_MISC(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 32, 0, 1, 4)
#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA BIT(0)
#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_SET(x)\
@@ -1047,9 +1199,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_GET(x)\
FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
-/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */
-#define ANA_AC_SDLB_DLB_CFG(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 36, 0, 1, 4)
+/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */
+#define ANA_AC_SDLB_DLB_CFG(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 36, 0, 1, 4)
#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA BIT(11)
#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_SET(x)\
@@ -1099,9 +1252,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_GET(x)\
FIELD_GET(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
-/* ANA_CL:PORT:FILTER_CTRL */
-#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
+/* ANA_CL:PORT:FILTER_CTRL */
+#define ANA_CL_FILTER_CTRL(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 4, 0, 1, 4)
#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2)
#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\
@@ -1121,9 +1275,10 @@ enum sparx5_target {
#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\
FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
-/* ANA_CL:PORT:VLAN_FILTER_CTRL */
-#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
+/* ANA_CL:PORT:VLAN_FILTER_CTRL */
+#define ANA_CL_VLAN_FILTER_CTRL(g, r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 8, r, 3, 4)
#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10)
#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\
@@ -1191,9 +1346,10 @@ enum sparx5_target {
#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\
FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
-/* ANA_CL:PORT:ETAG_FILTER_CTRL */
-#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
+/* ANA_CL:PORT:ETAG_FILTER_CTRL */
+#define ANA_CL_ETAG_FILTER_CTRL(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 20, 0, 1, 4)
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1)
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\
@@ -1207,9 +1363,10 @@ enum sparx5_target {
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\
FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
-/* ANA_CL:PORT:VLAN_CTRL */
-#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
+/* ANA_CL:PORT:VLAN_CTRL */
+#define ANA_CL_VLAN_CTRL(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 32, 0, 1, 4)
#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26)
#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\
@@ -1277,9 +1434,10 @@ enum sparx5_target {
#define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\
FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x)
-/* ANA_CL:PORT:VLAN_CTRL_2 */
-#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
+/* ANA_CL:PORT:VLAN_CTRL_2 */
+#define ANA_CL_VLAN_CTRL_2(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 36, 0, 1, 4)
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0)
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\
@@ -1287,9 +1445,10 @@ enum sparx5_target {
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\
FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
-/* ANA_CL:PORT:PCP_DEI_MAP_CFG */
-#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 108, r, 16, 4)
+/* ANA_CL:PORT:PCP_DEI_MAP_CFG */
+#define ANA_CL_PCP_DEI_MAP_CFG(g, r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 108, r, 16, 4)
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL GENMASK(4, 3)
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(x)\
@@ -1303,9 +1462,10 @@ enum sparx5_target {
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_GET(x)\
FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x)
-/* ANA_CL:PORT:QOS_CFG */
-#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 172, 0, 1, 4)
+/* ANA_CL:PORT:QOS_CFG */
+#define ANA_CL_QOS_CFG(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 172, 0, 1, 4)
#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA BIT(17)
#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_SET(x)\
@@ -1379,13 +1539,15 @@ enum sparx5_target {
#define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_GET(x)\
FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x)
-/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
-#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
+#define ANA_CL_CAPTURE_BPDU_CFG(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 196, 0, 1, 4)
-/* ANA_CL:PORT:ADV_CL_CFG_2 */
-#define ANA_CL_ADV_CL_CFG_2(g, r) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 200, r, 6, 4)
+/* ANA_CL:PORT:ADV_CL_CFG_2 */
+#define ANA_CL_ADV_CL_CFG_2(g, r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 200, r, 6, 4)
#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA BIT(1)
#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_SET(x)\
@@ -1399,9 +1561,10 @@ enum sparx5_target {
#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_GET(x)\
FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x)
-/* ANA_CL:PORT:ADV_CL_CFG */
-#define ANA_CL_ADV_CL_CFG(g, r) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 224, r, 6, 4)
+/* ANA_CL:PORT:ADV_CL_CFG */
+#define ANA_CL_ADV_CL_CFG(g, r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 224, r, 6, 4)
#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL GENMASK(30, 26)
#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(x)\
@@ -1445,9 +1608,10 @@ enum sparx5_target {
#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(x)\
FIELD_GET(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x)
-/* ANA_CL:COMMON:OWN_UPSID */
-#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL,\
- 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
+/* ANA_CL:COMMON:OWN_UPSID */
+#define ANA_CL_OWN_UPSID(r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, 0,\
+ r, regs->rcnt[RC_ANA_CL_OWN_UPSID], 4)
#define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -1455,9 +1619,10 @@ enum sparx5_target {
#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
-/* ANA_CL:COMMON:DSCP_CFG */
-#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL,\
- 0, 1, 166912, 0, 1, 756, 256, r, 64, 4)
+/* ANA_CL:COMMON:DSCP_CFG */
+#define ANA_CL_DSCP_CFG(r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, \
+ 256, r, 64, 4)
#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL GENMASK(12, 7)
#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_SET(x)\
@@ -1489,9 +1654,10 @@ enum sparx5_target {
#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\
FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x)
-/* ANA_CL:COMMON:QOS_MAP_CFG */
-#define ANA_CL_QOS_MAP_CFG(r) __REG(TARGET_ANA_CL,\
- 0, 1, 166912, 0, 1, 756, 512, r, 32, 4)
+/* ANA_CL:COMMON:QOS_MAP_CFG */
+#define ANA_CL_QOS_MAP_CFG(r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, \
+ 512, r, 32, 4)
#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL GENMASK(9, 4)
#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(x)\
@@ -1499,9 +1665,10 @@ enum sparx5_target {
#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_GET(x)\
FIELD_GET(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
-/* ANA_L2:COMMON:FWD_CFG */
-#define ANA_L2_FWD_CFG __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 0, 0, 1, 4)
+/* ANA_L2:COMMON:FWD_CFG */
+#define ANA_L2_FWD_CFG \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 0, 0, 1, 4)
#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL GENMASK(21, 20)
#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_SET(x)\
@@ -1575,17 +1742,22 @@ enum sparx5_target {
#define ANA_L2_FWD_CFG_FWD_ENA_GET(x)\
FIELD_GET(ANA_L2_FWD_CFG_FWD_ENA, x)
-/* ANA_L2:COMMON:AUTO_LRN_CFG */
-#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
+/* ANA_L2:COMMON:AUTO_LRN_CFG */
+#define ANA_L2_AUTO_LRN_CFG \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 24, 0, 1, 4)
-/* ANA_L2:COMMON:AUTO_LRN_CFG1 */
-#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_L2:COMMON:AUTO_LRN_CFG1 */
+#define ANA_L2_AUTO_LRN_CFG1 \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 28, 0, 1, 4)
-/* ANA_L2:COMMON:AUTO_LRN_CFG2 */
-#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_L2:COMMON:AUTO_LRN_CFG2 */
+#define ANA_L2_AUTO_LRN_CFG2 \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 32, 0, 1, 4)
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0)
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\
@@ -1593,9 +1765,11 @@ enum sparx5_target {
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\
FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
-/* ANA_L2:COMMON:OWN_UPSID */
-#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
+/* ANA_L2:COMMON:OWN_UPSID */
+#define ANA_L2_OWN_UPSID(r) \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 672, r, \
+ regs->rcnt[RC_ANA_L2_OWN_UPSID], 4)
#define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -1603,29 +1777,34 @@ enum sparx5_target {
#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
-/* ANA_L2:ISDX:DLB_CFG */
-#define ANA_L2_DLB_CFG(g) __REG(TARGET_ANA_L2,\
- 0, 1, 0, g, 4096, 128, 56, 0, 1, 4)
+/* ANA_L2:ISDX:DLB_CFG */
+#define ANA_L2_DLB_CFG(g) \
+ __REG(TARGET_ANA_L2, 0, 1, 0, g, regs->gcnt[GC_ANA_L2_ISDX], 128, 56, \
+ 0, 1, 4)
-#define ANA_L2_DLB_CFG_DLB_IDX GENMASK(12, 0)
+#define ANA_L2_DLB_CFG_DLB_IDX\
+ GENMASK(regs->fsize[FW_ANA_L2_DLB_CFG_DLB_IDX] + 0 - 1, 0)
#define ANA_L2_DLB_CFG_DLB_IDX_SET(x)\
- FIELD_PREP(ANA_L2_DLB_CFG_DLB_IDX, x)
+ spx5_field_prep(ANA_L2_DLB_CFG_DLB_IDX, x)
#define ANA_L2_DLB_CFG_DLB_IDX_GET(x)\
- FIELD_GET(ANA_L2_DLB_CFG_DLB_IDX, x)
+ spx5_field_get(ANA_L2_DLB_CFG_DLB_IDX, x)
-/* ANA_L2:ISDX:TSN_CFG */
-#define ANA_L2_TSN_CFG(g) __REG(TARGET_ANA_L2,\
- 0, 1, 0, g, 4096, 128, 100, 0, 1, 4)
+/* ANA_L2:ISDX:TSN_CFG */
+#define ANA_L2_TSN_CFG(g) \
+ __REG(TARGET_ANA_L2, 0, 1, 0, g, regs->gcnt[GC_ANA_L2_ISDX], 128, 100, \
+ 0, 1, 4)
-#define ANA_L2_TSN_CFG_TSN_SFID GENMASK(9, 0)
+#define ANA_L2_TSN_CFG_TSN_SFID\
+ GENMASK(regs->fsize[FW_ANA_L2_TSN_CFG_TSN_SFID] + 0 - 1, 0)
#define ANA_L2_TSN_CFG_TSN_SFID_SET(x)\
- FIELD_PREP(ANA_L2_TSN_CFG_TSN_SFID, x)
+ spx5_field_prep(ANA_L2_TSN_CFG_TSN_SFID, x)
#define ANA_L2_TSN_CFG_TSN_SFID_GET(x)\
- FIELD_GET(ANA_L2_TSN_CFG_TSN_SFID, x)
+ spx5_field_get(ANA_L2_TSN_CFG_TSN_SFID, x)
-/* ANA_L3:COMMON:VLAN_CTRL */
-#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3,\
- 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
+/* ANA_L3:COMMON:VLAN_CTRL */
+#define ANA_L3_VLAN_CTRL \
+ __REG(TARGET_ANA_L3, 0, 1, regs->gaddr[GA_ANA_L3_COMMON], 0, 1, 184, 4,\
+ 0, 1, 4)
#define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0)
#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\
@@ -1633,9 +1812,10 @@ enum sparx5_target {
#define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\
FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
-/* ANA_L3:VLAN:VLAN_CFG */
-#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3,\
- 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
+/* ANA_L3:VLAN:VLAN_CFG */
+#define ANA_L3_VLAN_CFG(g) \
+ __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 8, 0, \
+ 1, 4)
#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24)
#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\
@@ -1691,17 +1871,22 @@ enum sparx5_target {
#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\
FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
-/* ANA_L3:VLAN:VLAN_MASK_CFG */
-#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3,\
- 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
+/* ANA_L3:VLAN:VLAN_MASK_CFG */
+#define ANA_L3_VLAN_MASK_CFG(g) \
+ __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 16, 0,\
+ 1, 4)
-/* ANA_L3:VLAN:VLAN_MASK_CFG1 */
-#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3,\
- 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_L3:VLAN:VLAN_MASK_CFG1 */
+#define ANA_L3_VLAN_MASK_CFG1(g) \
+ __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 20, 0,\
+ 1, 4)
-/* ANA_L3:VLAN:VLAN_MASK_CFG2 */
-#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3,\
- 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_L3:VLAN:VLAN_MASK_CFG2 */
+#define ANA_L3_VLAN_MASK_CFG2(g) \
+ __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 24, 0,\
+ 1, 4)
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0)
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\
@@ -1709,365 +1894,455 @@ enum sparx5_target {
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\
FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
-/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
-#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
-#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */
-#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
-#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
-#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
-#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_UC_CNT */
-#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_MC_CNT */
-#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_BC_CNT */
-#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
-#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
-#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
-#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
-#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
-#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */
-#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */
-#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
-#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
-#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
-#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
-#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
-#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
-#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
-#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
-#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */
-#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
-#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_UC_CNT */
-#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_MC_CNT */
-#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_BC_CNT */
-#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */
-#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
-#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
-#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
-#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
-#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
-#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
-#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
-#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
-#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
-#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
-#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
-#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
-#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
-#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
-#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
-#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
-#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
-#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
-#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
-#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
-#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
-#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
-#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
-#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
-#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
-#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
-#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
-#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
-#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
-#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
-#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
-#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
-#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
-#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
-#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
-#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
-#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
-#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
-#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
-#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
-#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
-#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
-#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
-#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
-#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
-#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
-#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
-#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
-#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
-#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
-#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
-#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
-#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */
-#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_DEFER_CNT */
-#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */
-#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
-#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */
-#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
-#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
+/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
+#define ASM_RX_IN_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 0, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
+#define ASM_RX_SYMBOL_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 4, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */
+#define ASM_RX_PAUSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 8, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
+#define ASM_RX_UNSUP_OPCODE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 12, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
+#define ASM_RX_OK_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 16, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
+#define ASM_RX_BAD_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 20, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UC_CNT */
+#define ASM_RX_UC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 24, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_MC_CNT */
+#define ASM_RX_MC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 28, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_BC_CNT */
+#define ASM_RX_BC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 32, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
+#define ASM_RX_CRC_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 36, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
+#define ASM_RX_UNDERSIZE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 40, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
+#define ASM_RX_FRAGMENTS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 44, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 48, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 52, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
+#define ASM_RX_OVERSIZE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 56, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */
+#define ASM_RX_JABBERS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 60, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */
+#define ASM_RX_SIZE64_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 64, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
+#define ASM_RX_SIZE65TO127_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 68, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
+#define ASM_RX_SIZE128TO255_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 72, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
+#define ASM_RX_SIZE256TO511_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 76, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
+#define ASM_RX_SIZE512TO1023_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 80, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
+#define ASM_RX_SIZE1024TO1518_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 84, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
+#define ASM_RX_SIZE1519TOMAX_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 88, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
+#define ASM_RX_IPG_SHRINK_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 92, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
+#define ASM_TX_OUT_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 96, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */
+#define ASM_TX_PAUSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 100, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
+#define ASM_TX_OK_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 104, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_UC_CNT */
+#define ASM_TX_UC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 108, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_MC_CNT */
+#define ASM_TX_MC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 112, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_BC_CNT */
+#define ASM_TX_BC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 116, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */
+#define ASM_TX_SIZE64_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 120, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
+#define ASM_TX_SIZE65TO127_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 124, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
+#define ASM_TX_SIZE128TO255_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 128, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
+#define ASM_TX_SIZE256TO511_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 132, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
+#define ASM_TX_SIZE512TO1023_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 136, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
+#define ASM_TX_SIZE1024TO1518_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 140, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
+#define ASM_TX_SIZE1519TOMAX_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 144, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
+#define ASM_RX_ALIGNMENT_LOST_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 148, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
+#define ASM_RX_TAGGED_FRMS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 152, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
+#define ASM_RX_UNTAGGED_FRMS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 156, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
+#define ASM_TX_TAGGED_FRMS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 160, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
+#define ASM_TX_UNTAGGED_FRMS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 164, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
+#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 168, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
+#define ASM_PMAC_RX_PAUSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 172, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
+#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 176, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
+#define ASM_PMAC_RX_OK_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 180, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 184, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
+#define ASM_PMAC_RX_UC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 188, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
+#define ASM_PMAC_RX_MC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 192, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
+#define ASM_PMAC_RX_BC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 196, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
+#define ASM_PMAC_RX_CRC_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 200, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
+#define ASM_PMAC_RX_UNDERSIZE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 204, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
+#define ASM_PMAC_RX_FRAGMENTS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 208, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 212, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 216, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
+#define ASM_PMAC_RX_OVERSIZE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 220, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
+#define ASM_PMAC_RX_JABBERS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 224, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
+#define ASM_PMAC_RX_SIZE64_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 228, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
+#define ASM_PMAC_RX_SIZE65TO127_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 232, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
+#define ASM_PMAC_RX_SIZE128TO255_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 236, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
+#define ASM_PMAC_RX_SIZE256TO511_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 240, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
+#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 244, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 248, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 252, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
+#define ASM_PMAC_TX_PAUSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 256, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
+#define ASM_PMAC_TX_OK_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 260, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
+#define ASM_PMAC_TX_UC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 264, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
+#define ASM_PMAC_TX_MC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 268, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
+#define ASM_PMAC_TX_BC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 272, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
+#define ASM_PMAC_TX_SIZE64_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 276, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
+#define ASM_PMAC_TX_SIZE65TO127_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 280, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
+#define ASM_PMAC_TX_SIZE128TO255_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 284, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
+#define ASM_PMAC_TX_SIZE256TO511_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 288, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
+#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 292, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 296, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 300, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 304, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
+#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 308, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
+#define ASM_MM_RX_SMD_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 312, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
+#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 316, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
+#define ASM_MM_RX_MERGE_FRAG_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 320, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
+#define ASM_MM_TX_PFRAGMENT_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 324, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
+#define ASM_TX_MULTI_COLL_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 328, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
+#define ASM_TX_LATE_COLL_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 332, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */
+#define ASM_TX_XCOLL_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 336, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_DEFER_CNT */
+#define ASM_TX_DEFER_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 340, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */
+#define ASM_TX_XDEFER_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 344, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
+#define ASM_TX_BACKOFF1_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 348, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */
+#define ASM_TX_CSENSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 352, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
+#define ASM_RX_IN_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 356, 0, 1, 4)
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
@@ -2075,9 +2350,10 @@ enum sparx5_target {
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
-#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
+/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
+#define ASM_RX_OK_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 360, 0, 1, 4)
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2085,9 +2361,10 @@ enum sparx5_target {
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
-#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
+/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 364, 0, 1, 4)
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2095,9 +2372,10 @@ enum sparx5_target {
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
-#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
+/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
+#define ASM_RX_BAD_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 368, 0, 1, 4)
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -2105,9 +2383,10 @@ enum sparx5_target {
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
-#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
+/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 372, 0, 1, 4)
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -2115,9 +2394,10 @@ enum sparx5_target {
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
-#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
+/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
+#define ASM_TX_OUT_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 376, 0, 1, 4)
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
@@ -2125,9 +2405,10 @@ enum sparx5_target {
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
-#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
+/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
+#define ASM_TX_OK_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 380, 0, 1, 4)
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2135,9 +2416,10 @@ enum sparx5_target {
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
-#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
+/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 384, 0, 1, 4)
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2145,13 +2427,15 @@ enum sparx5_target {
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
-#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
+/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
+#define ASM_RX_SYNC_LOST_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 388, 0, 1, 4)
-/* ASM:CFG:STAT_CFG */
-#define ASM_STAT_CFG __REG(TARGET_ASM,\
- 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
+/* ASM:CFG:STAT_CFG */
+#define ASM_STAT_CFG \
+ __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_CFG], 0, 1, \
+ regs->gsize[GW_ASM_CFG], 0, 0, 1, 4)
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0)
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\
@@ -2159,9 +2443,10 @@ enum sparx5_target {
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\
FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
-/* ASM:CFG:PORT_CFG */
-#define ASM_PORT_CFG(r) __REG(TARGET_ASM,\
- 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
+/* ASM:CFG:PORT_CFG */
+#define ASM_PORT_CFG(r) \
+ __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_CFG], 0, 1, \
+ regs->gsize[GW_ASM_CFG], 540, r, regs->rcnt[RC_ASM_PORT_CFG], 4)
#define ASM_PORT_CFG_CSC_STAT_DIS BIT(12)
#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\
@@ -2229,9 +2514,10 @@ enum sparx5_target {
#define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\
FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x)
-/* ASM:RAM_CTRL:RAM_INIT */
-#define ASM_RAM_INIT __REG(TARGET_ASM,\
- 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
+/* ASM:RAM_CTRL:RAM_INIT */
+#define ASM_RAM_INIT \
+ __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_RAM_CTRL], 0, 1, 4, 0, 0, 1,\
+ 4)
#define ASM_RAM_INIT_RAM_INIT BIT(1)
#define ASM_RAM_INIT_RAM_INIT_SET(x)\
@@ -2245,9 +2531,10 @@ enum sparx5_target {
#define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x)
-/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
-#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN,\
- 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
+#define CLKGEN_LCPLL1_CORE_CLK_CFG \
+ __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0)
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\
@@ -2285,91 +2572,144 @@ enum sparx5_target {
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\
FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
-/* CPU:CPU_REGS:PROC_CTRL */
-#define CPU_PROC_CTRL __REG(TARGET_CPU,\
- 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
+/* CPU:CPU_REGS:PROC_CTRL */
+#define CPU_PROC_CTRL \
+ __REG(TARGET_CPU, 0, 1, 0, 0, 1, regs->gsize[GW_CPU_CPU_REGS], \
+ regs->raddr[RA_CPU_PROC_CTRL], 0, 1, 4)
-#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_AARCH64_MODE_ENA])
#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+ spx5_field_prep(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
#define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+ spx5_field_get(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
-#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS BIT(11)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS])
#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+ spx5_field_prep(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+ spx5_field_get(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
-#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS BIT(10)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS])
#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+ spx5_field_prep(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+ spx5_field_get(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
-#define CPU_PROC_CTRL_BE_EXCEP_MODE BIT(9)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_BE_EXCEP_MODE])
#define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+ spx5_field_prep(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
#define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+ spx5_field_get(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
-#define CPU_PROC_CTRL_VINITHI BIT(8)
+#define CPU_PROC_CTRL_VINITHI\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_VINITHI])
#define CPU_PROC_CTRL_VINITHI_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_VINITHI, x)
+ spx5_field_prep(CPU_PROC_CTRL_VINITHI, x)
#define CPU_PROC_CTRL_VINITHI_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_VINITHI, x)
+ spx5_field_get(CPU_PROC_CTRL_VINITHI, x)
-#define CPU_PROC_CTRL_CFGTE BIT(7)
+#define CPU_PROC_CTRL_CFGTE\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_CFGTE])
#define CPU_PROC_CTRL_CFGTE_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_CFGTE, x)
+ spx5_field_prep(CPU_PROC_CTRL_CFGTE, x)
#define CPU_PROC_CTRL_CFGTE_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_CFGTE, x)
+ spx5_field_get(CPU_PROC_CTRL_CFGTE, x)
-#define CPU_PROC_CTRL_CP15S_DISABLE BIT(6)
+#define CPU_PROC_CTRL_CP15S_DISABLE\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_CP15S_DISABLE])
#define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x)
+ spx5_field_prep(CPU_PROC_CTRL_CP15S_DISABLE, x)
#define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x)
+ spx5_field_get(CPU_PROC_CTRL_CP15S_DISABLE, x)
-#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE BIT(5)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE])
#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+ spx5_field_prep(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+ spx5_field_get(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+/* SPARX5 ONLY */
#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA BIT(4)
#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\
FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\
FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+/* SPARX5 ONLY */
#define CPU_PROC_CTRL_ACP_AWCACHE BIT(3)
#define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\
FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x)
#define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\
FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x)
+/* SPARX5 ONLY */
#define CPU_PROC_CTRL_ACP_ARCACHE BIT(2)
#define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\
FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x)
#define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\
FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x)
-#define CPU_PROC_CTRL_L2_FLUSH_REQ BIT(1)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_L2_FLUSH_REQ])
#define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+ spx5_field_prep(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
#define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+ spx5_field_get(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+/* SPARX5 ONLY */
#define CPU_PROC_CTRL_ACP_DISABLE BIT(0)
#define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\
FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x)
#define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\
FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
+/* DEV1G:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define DEV2G5_PHAD_CTRL(t, g) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 200, g, 2, \
+ regs->gsize[GW_DEV2G5_PHASE_DETECTOR_CTRL], 0, 0, 1, 4)
+
+#define DEV2G5_PHAD_CTRL_PHAD_ENA\
+ BIT(regs->fpos[FP_DEV2G5_PHAD_CTRL_PHAD_ENA])
+#define DEV2G5_PHAD_CTRL_PHAD_ENA_SET(x)\
+ spx5_field_prep(DEV2G5_PHAD_CTRL_PHAD_ENA, x)
+#define DEV2G5_PHAD_CTRL_PHAD_ENA_GET(x)\
+ spx5_field_get(DEV2G5_PHAD_CTRL_PHAD_ENA, x)
+
+/* LAN969X ONLY */
+#define DEV2G5_PHAD_CTRL_DIV_CFG GENMASK(11, 9)
+#define DEV2G5_PHAD_CTRL_DIV_CFG_SET(x)\
+ FIELD_PREP(DEV2G5_PHAD_CTRL_DIV_CFG, x)
+#define DEV2G5_PHAD_CTRL_DIV_CFG_GET(x)\
+ FIELD_GET(DEV2G5_PHAD_CTRL_DIV_CFG, x)
+
+/* DEV1G:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define DEV2G5_PHAD_CTRL(t, g) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 200, g, 2, \
+ regs->gsize[GW_DEV2G5_PHASE_DETECTOR_CTRL], 0, 0, 1, 4)
+
+#define DEV2G5_PHAD_CTRL_PHAD_ENA\
+ BIT(regs->fpos[FP_DEV2G5_PHAD_CTRL_PHAD_ENA])
+#define DEV2G5_PHAD_CTRL_PHAD_ENA_SET(x)\
+ spx5_field_prep(DEV2G5_PHAD_CTRL_PHAD_ENA, x)
+#define DEV2G5_PHAD_CTRL_PHAD_ENA_GET(x)\
+ spx5_field_get(DEV2G5_PHAD_CTRL_PHAD_ENA, x)
+
+/* LAN969X ONLY */
+#define DEV2G5_PHAD_CTRL_DIV_CFG GENMASK(11, 9)
+#define DEV2G5_PHAD_CTRL_DIV_CFG_SET(x)\
+ FIELD_PREP(DEV2G5_PHAD_CTRL_DIV_CFG, x)
+#define DEV2G5_PHAD_CTRL_DIV_CFG_GET(x)\
+ FIELD_GET(DEV2G5_PHAD_CTRL_DIV_CFG, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV10G_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 0, 0, 1, \
+ 4)
#define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -2383,9 +2723,10 @@ enum sparx5_target {
#define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV10G_MAC_MAXLEN_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 8, 0, 1, \
+ 4)
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -2399,9 +2740,10 @@ enum sparx5_target {
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
-#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
+#define DEV10G_MAC_NUM_TAGS_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 12, 0, 1, \
+ 4)
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0)
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\
@@ -2409,9 +2751,10 @@ enum sparx5_target {
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\
FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
-#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 16, r, 3, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV10G_MAC_TAGS_CFG(t, r) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 16, r, 3, \
+ 4)
#define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\
@@ -2425,9 +2768,10 @@ enum sparx5_target {
#define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\
FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV10G_MAC_ADV_CHK_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 28, 0, 1, \
+ 4)
#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -2471,9 +2815,10 @@ enum sparx5_target {
#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
-#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
+#define DEV10G_MAC_TX_MONITOR_STICKY(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 48, 0, 1, \
+ 4)
#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4)
#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\
@@ -2505,9 +2850,10 @@ enum sparx5_target {
#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\
FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
-/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G,\
- t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV10G_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 436, 0, 1, 52, 0, 0, 1,\
+ 4)
#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -2563,9 +2909,15 @@ enum sparx5_target {
#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
-/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
-#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
+/* DEV10G:DEV_CFG_STATUS:PTP_STAMPER_CFG */
+#define DEV10G_PTP_STAMPER_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 436, 0, 1, 52, 20, 0, \
+ 1, 4)
+
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV10G_PCS25G_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 488, 0, 1, 32, 0, 0, 1,\
+ 4)
#define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0)
#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\
@@ -2573,9 +2925,10 @@ enum sparx5_target {
#define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\
FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV25G_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -2589,9 +2942,10 @@ enum sparx5_target {
#define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV25G_MAC_MAXLEN_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -2605,9 +2959,10 @@ enum sparx5_target {
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV25G_MAC_ADV_CHK_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -2651,9 +3006,10 @@ enum sparx5_target {
#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
-/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G,\
- t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV25G_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -2709,9 +3065,10 @@ enum sparx5_target {
#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
-/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
-#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV25G_PCS25G_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
#define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0)
#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\
@@ -2719,9 +3076,10 @@ enum sparx5_target {
#define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\
FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
-/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
-#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
+#define DEV25G_PCS25G_SD_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
#define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8)
#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\
@@ -2741,9 +3099,10 @@ enum sparx5_target {
#define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
-/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5,\
- t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV2G5_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 0, 0, 1, 36, 0, 0, 1, \
+ 4)
#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23)
#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
@@ -2793,9 +3152,10 @@ enum sparx5_target {
#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV2G5_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 0, 0, 1, \
+ 4)
#define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -2809,9 +3169,10 @@ enum sparx5_target {
#define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
-#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV2G5_MAC_MODE_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 4, 0, 1, \
+ 4)
#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\
@@ -2831,9 +3192,10 @@ enum sparx5_target {
#define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\
FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV2G5_MAC_MAXLEN_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 8, 0, 1, \
+ 4)
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
@@ -2841,9 +3203,10 @@ enum sparx5_target {
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
-#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV2G5_MAC_TAGS_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 12, 0, 1,\
+ 4)
#define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\
@@ -2869,9 +3232,10 @@ enum sparx5_target {
#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
-#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
+#define DEV2G5_MAC_TAGS_CFG2(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 16, 0, 1,\
+ 4)
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16)
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\
@@ -2885,9 +3249,10 @@ enum sparx5_target {
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\
FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV2G5_MAC_ADV_CHK_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 20, 0, 1,\
+ 4)
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\
@@ -2895,9 +3260,10 @@ enum sparx5_target {
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\
FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
-#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV2G5_MAC_IFG_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 24, 0, 1,\
+ 4)
#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\
@@ -2923,9 +3289,10 @@ enum sparx5_target {
#define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\
FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
-#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV2G5_MAC_HDX_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 28, 0, 1,\
+ 4)
#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\
@@ -2957,9 +3324,10 @@ enum sparx5_target {
#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\
FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
-#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV2G5_PCS1G_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 0, 0, 1, \
+ 4)
#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\
@@ -2979,9 +3347,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
-#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV2G5_PCS1G_MODE_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 4, 0, 1, \
+ 4)
#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\
@@ -3001,9 +3370,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
-#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV2G5_PCS1G_SD_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 8, 0, 1, \
+ 4)
#define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8)
#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\
@@ -3023,9 +3393,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
-#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV2G5_PCS1G_ANEG_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 12, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
@@ -3051,9 +3422,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
-#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
+#define DEV2G5_PCS1G_LB_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 20, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4)
#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\
@@ -3073,9 +3445,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
-#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV2G5_PCS1G_ANEG_STATUS(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 32, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16)
#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\
@@ -3101,9 +3474,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
-#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV2G5_PCS1G_LINK_STATUS(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 40, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12)
#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\
@@ -3129,9 +3503,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
-#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV2G5_PCS1G_STICKY(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 48, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
@@ -3145,9 +3520,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\
FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
-/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
-#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
+/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
+#define DEV2G5_PCS_FX100_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 164, 0, 1, 4, 0, 0, 1, \
+ 4)
#define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26)
#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\
@@ -3227,9 +3603,10 @@ enum sparx5_target {
#define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
-/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
-#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5,\
- t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
+/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
+#define DEV2G5_PCS_FX100_STATUS(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 168, 0, 1, 4, 0, 0, 1, \
+ 4)
#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8)
#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\
@@ -3279,9 +3656,9 @@ enum sparx5_target {
#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\
FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G,\
- t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV5G_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -3295,9 +3672,9 @@ enum sparx5_target {
#define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G,\
- t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV5G_MAC_MAXLEN_CFG(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -3311,9 +3688,10 @@ enum sparx5_target {
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G,\
- t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV5G_MAC_ADV_CHK_CFG(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 28, 0, 1, \
+ 4)
#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -3357,325 +3735,405 @@ enum sparx5_target {
#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
-#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
-#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
-#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
-#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
-#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
-#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
-#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
-#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
-#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
-#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
-#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
-#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
-#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
-#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
-#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
-#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
-#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
-#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
-#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
-#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
-#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
-#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
-#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
-#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
-#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
-#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
-#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
-#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
-#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
-#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
-#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
-#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
-#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
-#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
-#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
-#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
-#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
-#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
-#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
-#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
-#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
-#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
-#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
-#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
-#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
-#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 184, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 188, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
-#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
-#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
-#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
-#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
-#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
-#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
-#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
-#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
-#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
-#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
-#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
-#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
-#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
-#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
-#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
-#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
-#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
-#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
-#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
-#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
-#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
-#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
-#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
-#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
-#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
-#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
-#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
-#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
-#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
-#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
-#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
-#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
+#define DEV5G_RX_SYMBOL_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 0, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
+#define DEV5G_RX_PAUSE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 4, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
+#define DEV5G_RX_UNSUP_OPCODE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 8, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
+#define DEV5G_RX_UC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 12, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
+#define DEV5G_RX_MC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 16, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
+#define DEV5G_RX_BC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 20, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
+#define DEV5G_RX_CRC_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 24, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
+#define DEV5G_RX_UNDERSIZE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 28, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
+#define DEV5G_RX_FRAGMENTS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 32, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 36, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 40, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
+#define DEV5G_RX_OVERSIZE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 44, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
+#define DEV5G_RX_JABBERS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 48, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
+#define DEV5G_RX_SIZE64_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 52, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
+#define DEV5G_RX_SIZE65TO127_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 56, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
+#define DEV5G_RX_SIZE128TO255_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 60, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
+#define DEV5G_RX_SIZE256TO511_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 64, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
+#define DEV5G_RX_SIZE512TO1023_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 68, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
+#define DEV5G_RX_SIZE1024TO1518_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 72, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
+#define DEV5G_RX_SIZE1519TOMAX_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 76, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
+#define DEV5G_RX_IPG_SHRINK_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 80, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
+#define DEV5G_TX_PAUSE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 84, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
+#define DEV5G_TX_UC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 88, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
+#define DEV5G_TX_MC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 92, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
+#define DEV5G_TX_BC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 96, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
+#define DEV5G_TX_SIZE64_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 100, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
+#define DEV5G_TX_SIZE65TO127_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 104, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
+#define DEV5G_TX_SIZE128TO255_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 108, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
+#define DEV5G_TX_SIZE256TO511_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 112, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
+#define DEV5G_TX_SIZE512TO1023_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 116, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
+#define DEV5G_TX_SIZE1024TO1518_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 120, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
+#define DEV5G_TX_SIZE1519TOMAX_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 124, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 128, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
+#define DEV5G_RX_TAGGED_FRMS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 132, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
+#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 136, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
+#define DEV5G_TX_TAGGED_FRMS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 140, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
+#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 144, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
+#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 148, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
+#define DEV5G_PMAC_RX_PAUSE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 152, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
+#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 156, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
+#define DEV5G_PMAC_RX_UC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 160, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
+#define DEV5G_PMAC_RX_MC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 164, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
+#define DEV5G_PMAC_RX_BC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 168, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
+#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 172, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
+#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 176, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
+#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 180, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 184, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 188, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
+#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 192, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
+#define DEV5G_PMAC_RX_JABBERS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 196, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
+#define DEV5G_PMAC_RX_SIZE64_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 200, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 204, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 208, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 212, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 216, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 220, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 224, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
+#define DEV5G_PMAC_TX_PAUSE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 228, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
+#define DEV5G_PMAC_TX_UC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 232, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
+#define DEV5G_PMAC_TX_MC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 236, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
+#define DEV5G_PMAC_TX_BC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 240, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
+#define DEV5G_PMAC_TX_SIZE64_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 244, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 248, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 252, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 256, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 260, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 264, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 268, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 272, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 276, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
+#define DEV5G_MM_RX_SMD_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 280, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 284, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
+#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 288, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
+#define DEV5G_MM_TX_PFRAGMENT_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 292, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 296, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 300, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 304, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 308, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
+#define DEV5G_RX_IN_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 0, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
+#define DEV5G_RX_IN_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 4, 0, 1, \
+ 4)
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
@@ -3683,13 +4141,15 @@ enum sparx5_target {
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
-#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
+#define DEV5G_RX_OK_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 8, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
-#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
+#define DEV5G_RX_OK_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 12, 0, 1, \
+ 4)
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -3697,13 +4157,15 @@ enum sparx5_target {
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
-#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
+#define DEV5G_RX_BAD_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 16, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
-#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 20, 0, 1, \
+ 4)
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -3711,13 +4173,15 @@ enum sparx5_target {
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
-#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
+#define DEV5G_TX_OUT_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 24, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
-#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
+#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 28, 0, 1, \
+ 4)
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
@@ -3725,13 +4189,15 @@ enum sparx5_target {
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
-#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
+#define DEV5G_TX_OK_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 32, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
-#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
+#define DEV5G_TX_OK_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 36, 0, 1, \
+ 4)
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -3739,13 +4205,15 @@ enum sparx5_target {
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
-#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 40, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
-#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 44, 0, 1, \
+ 4)
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -3753,13 +4221,15 @@ enum sparx5_target {
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
-#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 48, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
-#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 52, 0, 1, \
+ 4)
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -3767,13 +4237,15 @@ enum sparx5_target {
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
-#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 56, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
-#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 60, 0, 1, \
+ 4)
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -3781,9 +4253,10 @@ enum sparx5_target {
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G,\
- t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV5G_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 436, 0, 1, 52, 0, 0, 1, \
+ 4)
#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -3839,9 +4312,14 @@ enum sparx5_target {
#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
-/* DSM:RAM_CTRL:RAM_INIT */
-#define DSM_RAM_INIT __REG(TARGET_DSM,\
- 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
+/* DEV10G:DEV_CFG_STATUS:PTP_STAMPER_CFG */
+#define DEV5G_PTP_STAMPER_CFG(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 436, 0, 1, 52, 20, 0, 1, \
+ 4)
+
+/* DSM:RAM_CTRL:RAM_INIT */
+#define DSM_RAM_INIT \
+ __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
#define DSM_RAM_INIT_RAM_INIT BIT(1)
#define DSM_RAM_INIT_RAM_INIT_SET(x)\
@@ -3855,9 +4333,10 @@ enum sparx5_target {
#define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x)
-/* DSM:CFG:BUF_CFG */
-#define DSM_BUF_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
+/* DSM:CFG:BUF_CFG */
+#define DSM_BUF_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, \
+ regs->rcnt[RC_DSM_BUF_CFG], 4)
#define DSM_BUF_CFG_CSC_STAT_DIS BIT(13)
#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\
@@ -3883,9 +4362,10 @@ enum sparx5_target {
#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\
FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
-/* DSM:CFG:DEV_TX_STOP_WM_CFG */
-#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
+/* DSM:CFG:DEV_TX_STOP_WM_CFG */
+#define DSM_DEV_TX_STOP_WM_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, \
+ regs->rcnt[RC_DSM_DEV_TX_STOP_WM_CFG], 4)
#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9)
#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\
@@ -3911,9 +4391,10 @@ enum sparx5_target {
#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\
FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
-/* DSM:CFG:RX_PAUSE_CFG */
-#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
+/* DSM:CFG:RX_PAUSE_CFG */
+#define DSM_RX_PAUSE_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, \
+ regs->rcnt[RC_DSM_RX_PAUSE_CFG], 4)
#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1)
#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\
@@ -3927,9 +4408,10 @@ enum sparx5_target {
#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\
FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
-/* DSM:CFG:MAC_CFG */
-#define DSM_MAC_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
+/* DSM:CFG:MAC_CFG */
+#define DSM_MAC_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, \
+ regs->rcnt[RC_DSM_MAC_CFG], 4)
#define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16)
#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\
@@ -3955,9 +4437,10 @@ enum sparx5_target {
#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\
FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
-/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
-#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
+/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
+#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, \
+ regs->rcnt[RC_DSM_MAC_ADDR_BASE_HIGH_CFG], 4)
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0)
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\
@@ -3965,9 +4448,10 @@ enum sparx5_target {
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\
FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
-/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
-#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
+/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
+#define DSM_MAC_ADDR_BASE_LOW_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, \
+ regs->rcnt[RC_DSM_MAC_ADDR_BASE_LOW_CFG], 4)
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0)
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\
@@ -3975,9 +4459,10 @@ enum sparx5_target {
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\
FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
-/* DSM:CFG:TAXI_CAL_CFG */
-#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
+/* DSM:CFG:TAXI_CAL_CFG */
+#define DSM_TAXI_CAL_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, \
+ regs->rcnt[RC_DSM_TAXI_CAL_CFG], 4)
#define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15)
#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\
@@ -4009,9 +4494,31 @@ enum sparx5_target {
#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
-/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */
-#define EACL_VCAP_ES2_KEY_SEL(g, r) __REG(TARGET_EACL,\
- 0, 1, 149504, g, 138, 8, 0, r, 2, 4)
+/* LAN969X ONLY */
+#define DSM_TAXI_CAL_CFG_CAL_SEL_STAT BIT(23)
+#define DSM_TAXI_CAL_CFG_CAL_SEL_STAT_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_SEL_STAT, x)
+#define DSM_TAXI_CAL_CFG_CAL_SEL_STAT_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_SEL_STAT, x)
+
+/* LAN969X ONLY */
+#define DSM_TAXI_CAL_CFG_CAL_SWITCH BIT(22)
+#define DSM_TAXI_CAL_CFG_CAL_SWITCH_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_SWITCH, x)
+#define DSM_TAXI_CAL_CFG_CAL_SWITCH_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_SWITCH, x)
+
+/* LAN969X ONLY */
+#define DSM_TAXI_CAL_CFG_CAL_PGM_SEL BIT(21)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_SEL_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_SEL, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_SEL_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_SEL, x)
+
+/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */
+#define EACL_VCAP_ES2_KEY_SEL(g, r) \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_ES2_KEY_SELECT_PROFILE], \
+ g, regs->gcnt[GC_EACL_ES2_KEY_SELECT_PROFILE], 8, 0, r, 2, 4)
#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL GENMASK(7, 5)
#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(x)\
@@ -4037,13 +4544,15 @@ enum sparx5_target {
#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(x)\
FIELD_GET(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
-/* EACL:CNT_TBL:ES2_CNT */
-#define EACL_ES2_CNT(g) __REG(TARGET_EACL,\
- 0, 1, 122880, g, 2048, 4, 0, 0, 1, 4)
+/* EACL:CNT_TBL:ES2_CNT */
+#define EACL_ES2_CNT(g) \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_CNT_TBL], g, \
+ regs->gcnt[GC_EACL_CNT_TBL], 4, 0, 0, 1, 4)
-/* EACL:POL_CFG:POL_EACL_CFG */
-#define EACL_POL_EACL_CFG __REG(TARGET_EACL,\
- 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
+/* EACL:POL_CFG:POL_EACL_CFG */
+#define EACL_POL_EACL_CFG \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_POL_CFG], 0, 1, 780, 768, \
+ 0, 1, 4)
#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5)
#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\
@@ -4081,9 +4590,10 @@ enum sparx5_target {
#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
-/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */
-#define EACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_EACL,\
- 0, 1, 118696, 0, 1, 8, 0, r, 2, 4)
+/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */
+#define EACL_SEC_LOOKUP_STICKY(r) \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_ES2_STICKY], 0, 1, 8, 0, \
+ r, 2, 4)
#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7)
#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\
@@ -4133,9 +4643,10 @@ enum sparx5_target {
#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
-/* EACL:RAM_CTRL:RAM_INIT */
-#define EACL_RAM_INIT __REG(TARGET_EACL,\
- 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
+/* EACL:RAM_CTRL:RAM_INIT */
+#define EACL_RAM_INIT \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_RAM_CTRL], 0, 1, 4, 0, 0, \
+ 1, 4)
#define EACL_RAM_INIT_RAM_INIT BIT(1)
#define EACL_RAM_INIT_RAM_INIT_SET(x)\
@@ -4149,9 +4660,10 @@ enum sparx5_target {
#define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x)
-/* FDMA:FDMA:FDMA_CH_ACTIVATE */
-#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 0, 0, 1, \
+ 4)
#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
@@ -4159,9 +4671,10 @@ enum sparx5_target {
#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
-/* FDMA:FDMA:FDMA_CH_RELOAD */
-#define FDMA_CH_RELOAD __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 4, 0, 1, \
+ 4)
#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
@@ -4169,9 +4682,10 @@ enum sparx5_target {
#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
-/* FDMA:FDMA:FDMA_CH_DISABLE */
-#define FDMA_CH_DISABLE __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 8, 0, 1, \
+ 4)
#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
@@ -4179,49 +4693,58 @@ enum sparx5_target {
#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
-/* FDMA:FDMA:FDMA_DCB_LLP */
-#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
-
-/* FDMA:FDMA:FDMA_DCB_LLP1 */
-#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
-
-/* FDMA:FDMA:FDMA_DCB_LLP_PREV */
-#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
-
-/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
-#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
-
-/* FDMA:FDMA:FDMA_CH_CFG */
-#define FDMA_CH_CFG(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
-
-#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7)
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 52, r, 8, \
+ 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 84, r, 8, \
+ 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP_PREV */
+#define FDMA_DCB_LLP_PREV(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 116, r, 8,\
+ 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
+#define FDMA_DCB_LLP_PREV1(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 148, r, 8,\
+ 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 224, r, 8,\
+ 4)
+
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE\
+ BIT(regs->fpos[FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE])
#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\
- FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+ spx5_field_prep(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\
- FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+ spx5_field_get(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
-#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(6)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY\
+ BIT(regs->fpos[FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY])
#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
- FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+ spx5_field_prep(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
- FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+ spx5_field_get(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
-#define FDMA_CH_CFG_CH_INJ_PORT BIT(5)
+#define FDMA_CH_CFG_CH_INJ_PORT\
+ BIT(regs->fpos[FP_FDMA_CH_CFG_CH_INJ_PORT])
#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
- FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+ spx5_field_prep(FDMA_CH_CFG_CH_INJ_PORT, x)
#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
- FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+ spx5_field_get(FDMA_CH_CFG_CH_INJ_PORT, x)
-#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(4, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT\
+ GENMASK(regs->fsize[FW_FDMA_CH_CFG_CH_DCB_DB_CNT] + 1 - 1, 1)
#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
- FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+ spx5_field_prep(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
- FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+ spx5_field_get(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
#define FDMA_CH_CFG_CH_MEM BIT(0)
#define FDMA_CH_CFG_CH_MEM_SET(x)\
@@ -4229,9 +4752,10 @@ enum sparx5_target {
#define FDMA_CH_CFG_CH_MEM_GET(x)\
FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
-/* FDMA:FDMA:FDMA_CH_TRANSLATE */
-#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
+/* FDMA:FDMA:FDMA_CH_TRANSLATE */
+#define FDMA_CH_TRANSLATE(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 256, r, 8,\
+ 4)
#define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0)
#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\
@@ -4239,9 +4763,10 @@ enum sparx5_target {
#define FDMA_CH_TRANSLATE_OFFSET_GET(x)\
FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x)
-/* FDMA:FDMA:FDMA_XTR_CFG */
-#define FDMA_XTR_CFG __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
+/* FDMA:FDMA:FDMA_XTR_CFG */
+#define FDMA_XTR_CFG \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 364, 0, 1,\
+ 4)
#define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11)
#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\
@@ -4255,9 +4780,10 @@ enum sparx5_target {
#define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\
FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x)
-/* FDMA:FDMA:FDMA_PORT_CTRL */
-#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 376, r, 2,\
+ 4)
#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
@@ -4289,9 +4815,10 @@ enum sparx5_target {
#define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\
FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x)
-/* FDMA:FDMA:FDMA_INTR_DCB */
-#define FDMA_INTR_DCB __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_DCB */
+#define FDMA_INTR_DCB \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 384, 0, 1,\
+ 4)
#define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0)
#define FDMA_INTR_DCB_INTR_DCB_SET(x)\
@@ -4299,9 +4826,10 @@ enum sparx5_target {
#define FDMA_INTR_DCB_INTR_DCB_GET(x)\
FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x)
-/* FDMA:FDMA:FDMA_INTR_DCB_ENA */
-#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_DCB_ENA */
+#define FDMA_INTR_DCB_ENA \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 388, 0, 1,\
+ 4)
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0)
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\
@@ -4309,9 +4837,10 @@ enum sparx5_target {
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\
FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
-/* FDMA:FDMA:FDMA_INTR_DB */
-#define FDMA_INTR_DB __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 392, 0, 1,\
+ 4)
#define FDMA_INTR_DB_INTR_DB GENMASK(7, 0)
#define FDMA_INTR_DB_INTR_DB_SET(x)\
@@ -4319,9 +4848,10 @@ enum sparx5_target {
#define FDMA_INTR_DB_INTR_DB_GET(x)\
FIELD_GET(FDMA_INTR_DB_INTR_DB, x)
-/* FDMA:FDMA:FDMA_INTR_DB_ENA */
-#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 396, 0, 1,\
+ 4)
#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
@@ -4329,9 +4859,10 @@ enum sparx5_target {
#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
-/* FDMA:FDMA:FDMA_INTR_ERR */
-#define FDMA_INTR_ERR __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 400, 0, 1,\
+ 4)
#define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8)
#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\
@@ -4345,9 +4876,10 @@ enum sparx5_target {
#define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\
FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x)
-/* FDMA:FDMA:FDMA_ERRORS */
-#define FDMA_ERRORS __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 412, 0, 1,\
+ 4)
#define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30)
#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\
@@ -4397,9 +4929,10 @@ enum sparx5_target {
#define FDMA_ERRORS_ERR_CH_WR_GET(x)\
FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x)
-/* FDMA:FDMA:FDMA_ERRORS_2 */
-#define FDMA_ERRORS_2 __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
+/* FDMA:FDMA:FDMA_ERRORS_2 */
+#define FDMA_ERRORS_2 \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 416, 0, 1,\
+ 4)
#define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0)
#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\
@@ -4407,9 +4940,10 @@ enum sparx5_target {
#define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\
FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
-/* FDMA:FDMA:FDMA_CTRL */
-#define FDMA_CTRL __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
+/* FDMA:FDMA:FDMA_CTRL */
+#define FDMA_CTRL \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 424, 0, 1,\
+ 4)
#define FDMA_CTRL_NRESET BIT(0)
#define FDMA_CTRL_NRESET_SET(x)\
@@ -4417,9 +4951,10 @@ enum sparx5_target {
#define FDMA_CTRL_NRESET_GET(x)\
FIELD_GET(FDMA_CTRL_NRESET, x)
-/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */
-#define GCB_CHIP_ID __REG(TARGET_GCB,\
- 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
+/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */
+#define GCB_CHIP_ID \
+ __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], 0, 0, \
+ 1, 4)
#define GCB_CHIP_ID_REV_ID GENMASK(31, 28)
#define GCB_CHIP_ID_REV_ID_SET(x)\
@@ -4445,10 +4980,12 @@ enum sparx5_target {
#define GCB_CHIP_ID_ONE_GET(x)\
FIELD_GET(GCB_CHIP_ID_ONE, x)
-/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */
-#define GCB_SOFT_RST __REG(TARGET_GCB,\
- 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
+/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */
+#define GCB_SOFT_RST \
+ __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], \
+ regs->raddr[RA_GCB_SOFT_RST], 0, 1, 4)
+/* SPARX5 ONLY */
#define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2)
#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\
FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
@@ -4467,9 +5004,11 @@ enum sparx5_target {
#define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\
FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x)
-/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
-#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB,\
- 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
+#define GCB_HW_SGPIO_SD_CFG \
+ __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], 20, 0, \
+ 1, 4)
#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1)
#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\
@@ -4483,19 +5022,23 @@ enum sparx5_target {
#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\
FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
-/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
-#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB,\
- 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
+/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) \
+ __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], \
+ regs->raddr[RA_GCB_HW_SGPIO_TO_SD_MAP_CFG], r, \
+ regs->rcnt[RC_GCB_HW_SGPIO_TO_SD_MAP_CFG], 4)
-#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL\
+ GENMASK(regs->fsize[FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL] + 0 - 1, 0)
#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\
- FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+ spx5_field_prep(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\
- FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+ spx5_field_get(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
-/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
-#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB,\
- 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
+/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
+#define GCB_SIO_CLOCK(g) \
+ __REG(TARGET_GCB, 0, 1, regs->gaddr[GA_GCB_SIO_CTRL], g, \
+ regs->gcnt[GC_GCB_SIO_CTRL], 280, 20, 0, 1, 4)
#define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8)
#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\
@@ -4509,9 +5052,10 @@ enum sparx5_target {
#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
-/* HSCH:HSCH_CFG:CIR_CFG */
-#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+/* HSCH:HSCH_CFG:CIR_CFG */
+#define HSCH_CIR_CFG(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 0, 0, \
+ 1, 4)
#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
@@ -4525,9 +5069,10 @@ enum sparx5_target {
#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
-/* HSCH:HSCH_CFG:EIR_CFG */
-#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+/* HSCH:HSCH_CFG:EIR_CFG */
+#define HSCH_EIR_CFG(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 4, 0, \
+ 1, 4)
#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
@@ -4541,15 +5086,17 @@ enum sparx5_target {
#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
-/* HSCH:HSCH_CFG:SE_CFG */
-#define HSCH_SE_CFG(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+/* HSCH:HSCH_CFG:SE_CFG */
+#define HSCH_SE_CFG(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 8, 0, \
+ 1, 4)
-#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
+#define HSCH_SE_CFG_SE_DWRR_CNT\
+ GENMASK(regs->fsize[FW_HSCH_SE_CFG_SE_DWRR_CNT] + 6 - 1, 6)
#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
- FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
+ spx5_field_prep(HSCH_SE_CFG_SE_DWRR_CNT, x)
#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
- FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
+ spx5_field_get(HSCH_SE_CFG_SE_DWRR_CNT, x)
#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
@@ -4575,19 +5122,22 @@ enum sparx5_target {
#define HSCH_SE_CFG_SE_STOP_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
-/* HSCH:HSCH_CFG:SE_CONNECT */
-#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+/* HSCH:HSCH_CFG:SE_CONNECT */
+#define HSCH_SE_CONNECT(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 12, 0,\
+ 1, 4)
-#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK\
+ GENMASK(regs->fsize[FW_HSCH_SE_CONNECT_SE_LEAK_LINK] + 0 - 1, 0)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
- FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+ spx5_field_prep(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
- FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+ spx5_field_get(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
-/* HSCH:HSCH_CFG:SE_DLB_SENSE */
-#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+/* HSCH:HSCH_CFG:SE_DLB_SENSE */
+#define HSCH_SE_DLB_SENSE(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 16, 0,\
+ 1, 4)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
@@ -4595,11 +5145,12 @@ enum sparx5_target {
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
-#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT\
+ GENMASK(regs->fsize[FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT] + 3 - 1, 3)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
- FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+ spx5_field_prep(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
- FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+ spx5_field_get(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
@@ -4619,9 +5170,10 @@ enum sparx5_target {
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
-/* HSCH:HSCH_DWRR:DWRR_ENTRY */
-#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH,\
- 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+/* HSCH:HSCH_DWRR:DWRR_ENTRY */
+#define HSCH_DWRR_ENTRY(g) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_DWRR], g, \
+ regs->gcnt[GC_HSCH_HSCH_DWRR], 4, 0, 0, 1, 4)
#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
@@ -4635,15 +5187,17 @@ enum sparx5_target {
#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
-/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
-#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH,\
- 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
+#define HSCH_HSCH_CFG_CFG \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_MISC], 0, 1, 648, \
+ 284, 0, 1, 4)
-#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX\
+ GENMASK(regs->fsize[FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX] + 14 - 1, 14)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
- FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+ spx5_field_prep(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
- FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+ spx5_field_get(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
@@ -4657,9 +5211,11 @@ enum sparx5_target {
#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
-/* HSCH:HSCH_MISC:SYS_CLK_PER */
-#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH,\
- 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
+/* SPARX5 ONLY */
+/* HSCH:HSCH_MISC:SYS_CLK_PER */
+#define HSCH_SYS_CLK_PER \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_MISC], 0, 1, 648, \
+ 640, 0, 1, 4)
#define HSCH_SYS_CLK_PER_100PS GENMASK(7, 0)
#define HSCH_SYS_CLK_PER_100PS_SET(x)\
@@ -4667,9 +5223,10 @@ enum sparx5_target {
#define HSCH_SYS_CLK_PER_100PS_GET(x)\
FIELD_GET(HSCH_SYS_CLK_PER_100PS, x)
-/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
-#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH,\
- 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
+#define HSCH_HSCH_TIMER_CFG(g, r) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_LEAK_LISTS], g, 4, \
+ 32, 0, r, 4, 4)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
@@ -4677,15 +5234,17 @@ enum sparx5_target {
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
-/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
-#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH,\
- 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
+#define HSCH_HSCH_LEAK_CFG(g, r) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_LEAK_LISTS], g, 4, \
+ 32, 16, r, 4, 4)
-#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST\
+ GENMASK(regs->fsize[FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST] + 1 - 1, 1)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
- FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+ spx5_field_prep(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
- FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+ spx5_field_get(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
@@ -4693,9 +5252,10 @@ enum sparx5_target {
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
-/* HSCH:SYSTEM:FLUSH_CTRL */
-#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH,\
- 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
+/* HSCH:SYSTEM:FLUSH_CTRL */
+#define HSCH_FLUSH_CTRL \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 4, 0, \
+ 1, 4)
#define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27)
#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\
@@ -4715,11 +5275,12 @@ enum sparx5_target {
#define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\
FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x)
-#define HSCH_FLUSH_CTRL_FLUSH_PORT GENMASK(24, 18)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT\
+ GENMASK(regs->fsize[FW_HSCH_FLUSH_CTRL_FLUSH_PORT] + 18 - 1, 18)
#define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\
- FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+ spx5_field_prep(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
#define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\
- FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+ spx5_field_get(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
#define HSCH_FLUSH_CTRL_FLUSH_QUEUE BIT(17)
#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\
@@ -4733,15 +5294,17 @@ enum sparx5_target {
#define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\
FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x)
-#define HSCH_FLUSH_CTRL_FLUSH_HIER GENMASK(15, 0)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER\
+ GENMASK(regs->fsize[FW_HSCH_FLUSH_CTRL_FLUSH_HIER] + 0 - 1, 0)
#define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\
- FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+ spx5_field_prep(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
#define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\
- FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+ spx5_field_get(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
-/* HSCH:SYSTEM:PORT_MODE */
-#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH,\
- 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
+/* HSCH:SYSTEM:PORT_MODE */
+#define HSCH_PORT_MODE(r) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 8, r, \
+ regs->rcnt[RC_HSCH_PORT_MODE], 4)
#define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4)
#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\
@@ -4773,9 +5336,10 @@ enum sparx5_target {
#define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\
FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
-/* HSCH:SYSTEM:OUTB_SHARE_ENA */
-#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH,\
- 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
+/* HSCH:SYSTEM:OUTB_SHARE_ENA */
+#define HSCH_OUTB_SHARE_ENA(r) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 288, \
+ r, 5, 4)
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0)
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\
@@ -4783,9 +5347,10 @@ enum sparx5_target {
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\
FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
-/* HSCH:MMGT:RESET_CFG */
-#define HSCH_RESET_CFG __REG(TARGET_HSCH,\
- 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
+/* HSCH:MMGT:RESET_CFG */
+#define HSCH_RESET_CFG \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_MMGT], 0, 1, 16, 8, 0, 1, \
+ 4)
#define HSCH_RESET_CFG_CORE_ENA BIT(0)
#define HSCH_RESET_CFG_CORE_ENA_SET(x)\
@@ -4793,9 +5358,10 @@ enum sparx5_target {
#define HSCH_RESET_CFG_CORE_ENA_GET(x)\
FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x)
-/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
-#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH,\
- 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
+/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define HSCH_TAS_STATEMACHINE_CFG \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_TAS_CONFIG], 0, 1, \
+ regs->gsize[GW_HSCH_TAS_CONFIG], 8, 0, 1, 4)
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0)
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\
@@ -4803,9 +5369,72 @@ enum sparx5_target {
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
-/* LRN:COMMON:COMMON_ACCESS_CTRL */
-#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:XMII_CFG */
+#define HSIO_WRAP_XMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 0, 0, 1, 4)
+
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG GENMASK(2, 1)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:RGMII_CFG */
+#define HSIO_WRAP_RGMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 4, 0, 1, 4)
+
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST BIT(1)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST BIT(0)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:DLL_CFG */
+#define HSIO_WRAP_DLL_CFG(g, r) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 12, r, 2, 4)
+
+#define HSIO_WRAP_DLL_CFG_DLL_ENA BIT(19)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA BIT(18)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL GENMASK(17, 15)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_RST BIT(0)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+
+/* LRN:COMMON:COMMON_ACCESS_CTRL */
+#define LRN_COMMON_ACCESS_CTRL \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\
@@ -4819,11 +5448,12 @@ enum sparx5_target {
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\
FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
-#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW\
+ GENMASK(regs->fsize[FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW] + 5 - 1, 5)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\
- FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+ spx5_field_prep(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\
- FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+ spx5_field_get(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD GENMASK(4, 1)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\
@@ -4837,9 +5467,9 @@ enum sparx5_target {
#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\
FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
-/* LRN:COMMON:MAC_ACCESS_CFG_0 */
-#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
+/* LRN:COMMON:MAC_ACCESS_CFG_0 */
+#define LRN_MAC_ACCESS_CFG_0 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16)
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\
@@ -4853,13 +5483,13 @@ enum sparx5_target {
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\
FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
-/* LRN:COMMON:MAC_ACCESS_CFG_1 */
-#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
+/* LRN:COMMON:MAC_ACCESS_CFG_1 */
+#define LRN_MAC_ACCESS_CFG_1 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
-/* LRN:COMMON:MAC_ACCESS_CFG_2 */
-#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
+/* LRN:COMMON:MAC_ACCESS_CFG_2 */
+#define LRN_MAC_ACCESS_CFG_2 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28)
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\
@@ -4933,19 +5563,20 @@ enum sparx5_target {
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\
FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
-/* LRN:COMMON:MAC_ACCESS_CFG_3 */
-#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
+/* LRN:COMMON:MAC_ACCESS_CFG_3 */
+#define LRN_MAC_ACCESS_CFG_3 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
-#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX\
+ GENMASK(regs->fsize[FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX] + 0 - 1, 0)
#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\
- FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+ spx5_field_prep(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\
- FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+ spx5_field_get(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
-/* LRN:COMMON:SCAN_NEXT_CFG */
-#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
+/* LRN:COMMON:SCAN_NEXT_CFG */
+#define LRN_SCAN_NEXT_CFG \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19)
#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\
@@ -5037,9 +5668,9 @@ enum sparx5_target {
#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\
FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
-/* LRN:COMMON:SCAN_NEXT_CFG_1 */
-#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
+/* LRN:COMMON:SCAN_NEXT_CFG_1 */
+#define LRN_SCAN_NEXT_CFG_1 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16)
#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\
@@ -5053,9 +5684,9 @@ enum sparx5_target {
#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\
FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
-/* LRN:COMMON:AUTOAGE_CFG */
-#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
+/* LRN:COMMON:AUTOAGE_CFG */
+#define LRN_AUTOAGE_CFG(r) \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
#define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28)
#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\
@@ -5069,9 +5700,9 @@ enum sparx5_target {
#define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\
FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
-/* LRN:COMMON:AUTOAGE_CFG_1 */
-#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
+/* LRN:COMMON:AUTOAGE_CFG_1 */
+#define LRN_AUTOAGE_CFG_1 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25)
#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\
@@ -5115,15 +5746,16 @@ enum sparx5_target {
#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\
FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
-/* LRN:COMMON:AUTOAGE_CFG_2 */
-#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
+/* LRN:COMMON:AUTOAGE_CFG_2 */
+#define LRN_AUTOAGE_CFG_2 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
-#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW\
+ GENMASK(regs->fsize[FW_LRN_AUTOAGE_CFG_2_NEXT_ROW] + 4 - 1, 4)
#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\
- FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+ spx5_field_prep(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
#define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\
- FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+ spx5_field_get(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS GENMASK(3, 0)
#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\
@@ -5131,9 +5763,10 @@ enum sparx5_target {
#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\
FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
-#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
+#define PCEP_RCTRL_2_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
#define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0)
#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\
@@ -5195,9 +5828,10 @@ enum sparx5_target {
#define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\
FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0)
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\
@@ -5211,13 +5845,15 @@ enum sparx5_target {
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\
FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LIM_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0)
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\
@@ -5231,17 +5867,20 @@ enum sparx5_target {
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\
FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_TGT_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_TGT_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_LIM_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0)
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\
@@ -5255,9 +5894,10 @@ enum sparx5_target {
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\
FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR,\
- t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS10G_BR_PCS_CFG(t) \
+ __REG(TARGET_PCS10G_BR, t, regs->tsize[TC_PCS10G_BR], 0, 0, 1, 56, 0, \
+ 0, 1, 4)
#define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -5331,9 +5971,10 @@ enum sparx5_target {
#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR,\
- t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS10G_BR_PCS_SD_CFG(t) \
+ __REG(TARGET_PCS10G_BR, t, regs->tsize[TC_PCS10G_BR], 0, 0, 1, 56, 4, \
+ 0, 1, 4)
#define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -5353,9 +5994,10 @@ enum sparx5_target {
#define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR,\
- t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS25G_BR_PCS_CFG(t) \
+ __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
#define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -5429,9 +6071,10 @@ enum sparx5_target {
#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR,\
- t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS25G_BR_PCS_SD_CFG(t) \
+ __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
#define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -5451,9 +6094,10 @@ enum sparx5_target {
#define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR,\
- t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS5G_BR_PCS_CFG(t) \
+ __REG(TARGET_PCS5G_BR, t, regs->tsize[TC_PCS5G_BR], 0, 0, 1, 56, 0, 0, \
+ 1, 4)
#define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -5527,9 +6171,10 @@ enum sparx5_target {
#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR,\
- t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS5G_BR_PCS_SD_CFG(t) \
+ __REG(TARGET_PCS5G_BR, t, regs->tsize[TC_PCS5G_BR], 0, 0, 1, 56, 4, 0, \
+ 1, 4)
#define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -5549,58 +6194,67 @@ enum sparx5_target {
#define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
-/* PORT_CONF:HW_CFG:DEV5G_MODES */
-#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF,\
- 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+/* PORT_CONF:HW_CFG:DEV5G_MODES */
+#define PORT_CONF_DEV5G_MODES \
+ __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0)
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE BIT(1)
#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE BIT(2)
#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE BIT(3)
#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE BIT(4)
#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE BIT(5)
#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE BIT(6)
#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE BIT(7)
#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE BIT(8)
#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
@@ -5613,27 +6267,30 @@ enum sparx5_target {
#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE BIT(10)
#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE BIT(11)
#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE BIT(12)
#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
-/* PORT_CONF:HW_CFG:DEV10G_MODES */
-#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF,\
- 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
+/* PORT_CONF:HW_CFG:DEV10G_MODES */
+#define PORT_CONF_DEV10G_MODES \
+ __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0)
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\
@@ -5641,75 +6298,87 @@ enum sparx5_target {
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE BIT(1)
#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE BIT(2)
#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE BIT(3)
#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE BIT(4)
#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE BIT(5)
#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE BIT(6)
#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE BIT(7)
#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE BIT(8)
#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE BIT(9)
#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE BIT(10)
#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE BIT(11)
#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
-/* PORT_CONF:HW_CFG:DEV25G_MODES */
-#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF,\
- 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PORT_CONF:HW_CFG:DEV25G_MODES */
+#define PORT_CONF_DEV25G_MODES \
+ __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0)
#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\
@@ -5759,9 +6428,9 @@ enum sparx5_target {
#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
-/* PORT_CONF:HW_CFG:QSGMII_ENA */
-#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF,\
- 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
+/* PORT_CONF:HW_CFG:QSGMII_ENA */
+#define PORT_CONF_QSGMII_ENA \
+ __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\
@@ -5799,45 +6468,52 @@ enum sparx5_target {
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6 BIT(6)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7 BIT(7)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8 BIT(8)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9 BIT(9)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10 BIT(10)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11 BIT(11)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
-/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
-#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF,\
- 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
+#define PORT_CONF_USGMII_CFG(g) \
+ __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9)
#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\
@@ -5881,39 +6557,46 @@ enum sparx5_target {
#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
-/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
-#define PTP_PTP_PIN_INTR __REG(TARGET_PTP,\
- 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PTP_PIN_INTR \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 0, 0, 1,\
+ 4)
-#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_INTR_PTP\
+ GENMASK(regs->fsize[FW_PTP_PTP_PIN_INTR_INTR_PTP] + 0 - 1, 0)
#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x)
+ spx5_field_prep(PTP_PTP_PIN_INTR_INTR_PTP, x)
#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\
- FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x)
+ spx5_field_get(PTP_PTP_PIN_INTR_INTR_PTP, x)
-/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
-#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP,\
- 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PTP_PIN_INTR_ENA \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 4, 0, 1,\
+ 4)
-#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA\
+ GENMASK(regs->fsize[FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA] + 0 - 1, 0)
#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+ spx5_field_prep(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\
- FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+ spx5_field_get(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
-/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
-#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP,\
- 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
+/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
+#define PTP_PTP_INTR_IDENT \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 8, 0, 1,\
+ 4)
-#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT\
+ GENMASK(regs->fsize[FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT] + 0 - 1, 0)
#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\
- FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+ spx5_field_prep(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\
- FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+ spx5_field_get(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
-/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
-#define PTP_PTP_DOM_CFG __REG(TARGET_PTP,\
- 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
+/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_PTP_DOM_CFG \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 12, 0, \
+ 1, 4)
#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9)
#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\
@@ -5939,13 +6622,15 @@ enum sparx5_target {
#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\
FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
-#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 0, r, 2, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 0, r, 2, 4)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
-#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
+#define PTP_PTP_CUR_NSEC(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 8, 0, 1, 4)
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0)
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\
@@ -5953,9 +6638,10 @@ enum sparx5_target {
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\
FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
-#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
+#define PTP_PTP_CUR_NSEC_FRAC(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 12, 0, 1, 4)
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0)
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\
@@ -5963,13 +6649,15 @@ enum sparx5_target {
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\
FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
-#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
+#define PTP_PTP_CUR_SEC_LSB(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 16, 0, 1, 4)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
-#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
+#define PTP_PTP_CUR_SEC_MSB(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 20, 0, 1, 4)
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0)
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\
@@ -5977,37 +6665,43 @@ enum sparx5_target {
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\
FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
-#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
+#define PTP_NTP_CUR_NSEC(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 24, 0, 1, 4)
-/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
-#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PTP_PIN_CFG(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 0, 0, 1,\
+ 4)
-#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION\
+ GENMASK(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION] + 2, regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION])
#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+ spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\
- FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+ spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
-#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC\
+ GENMASK(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC] + 1, regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC])
#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+ spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\
- FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+ spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
-#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL\
+ BIT(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL])
#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+ spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\
- FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+ spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
-#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT\
+ GENMASK(regs->fsize[FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT] + 21 - 1, 21)
#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+ spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\
- FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+ spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18)
#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\
@@ -6039,9 +6733,10 @@ enum sparx5_target {
#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\
FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
-/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
-#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_PTP_TOD_SEC_MSB(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 4, 0, 1,\
+ 4)
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0)
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\
@@ -6049,13 +6744,15 @@ enum sparx5_target {
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\
FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
-/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
-#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_PTP_TOD_SEC_LSB(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 8, 0, 1,\
+ 4)
-/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
-#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_PTP_TOD_NSEC(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 12, 0, \
+ 1, 4)
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0)
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\
@@ -6063,9 +6760,10 @@ enum sparx5_target {
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\
FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
-/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
-#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
+#define PTP_PTP_TOD_NSEC_FRAC(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 16, 0, \
+ 1, 4)
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0)
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\
@@ -6073,13 +6771,15 @@ enum sparx5_target {
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\
FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
-/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
-#define PTP_NTP_NSEC(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
+#define PTP_NTP_NSEC(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 20, 0, \
+ 1, 4)
-/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
-#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
+#define PTP_PIN_WF_HIGH_PERIOD(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 24, 0, \
+ 1, 4)
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0)
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\
@@ -6087,9 +6787,10 @@ enum sparx5_target {
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\
FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
-/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
-#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
+#define PTP_PIN_WF_LOW_PERIOD(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 28, 0, \
+ 1, 4)
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0)
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\
@@ -6097,9 +6798,10 @@ enum sparx5_target {
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\
FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
-/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
-#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
+#define PTP_PIN_IOBOUNCH_DELAY(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 32, 0, \
+ 1, 4)
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3)
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\
@@ -6113,22 +6815,27 @@ enum sparx5_target {
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\
FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
-/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
-#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP,\
- 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define PTP_PHAD_CTRL(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PHASE_DETECTOR_CTRL], g, \
+ regs->gcnt[GC_PTP_PHASE_DETECTOR_CTRL], \
+ regs->gsize[GW_PTP_PHASE_DETECTOR_CTRL], 0, 0, 1, 4)
-#define PTP_PHAD_CTRL_PHAD_ENA BIT(7)
+#define PTP_PHAD_CTRL_PHAD_ENA\
+ BIT(regs->fpos[FP_PTP_PHAD_CTRL_PHAD_ENA])
#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\
- FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x)
+ spx5_field_prep(PTP_PHAD_CTRL_PHAD_ENA, x)
#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\
- FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x)
+ spx5_field_get(PTP_PHAD_CTRL_PHAD_ENA, x)
-#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6)
+#define PTP_PHAD_CTRL_PHAD_FAILED\
+ BIT(regs->fpos[FP_PTP_PHAD_CTRL_PHAD_FAILED])
#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\
- FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x)
+ spx5_field_prep(PTP_PHAD_CTRL_PHAD_FAILED, x)
#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\
- FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x)
+ spx5_field_get(PTP_PHAD_CTRL_PHAD_FAILED, x)
+/* SPARX5 ONLY */
#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3)
#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\
FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x)
@@ -6141,13 +6848,79 @@ enum sparx5_target {
#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\
FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x)
-/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
-#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP,\
- 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
-
-/* QFWD:SYSTEM:SWITCH_PORT_MODE */
-#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD,\
- 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
+#define PTP_PHAD_CYC_STAT(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PHASE_DETECTOR_CTRL], g, \
+ regs->gcnt[GC_PTP_PHASE_DETECTOR_CTRL], \
+ regs->gsize[GW_PTP_PHASE_DETECTOR_CTRL], 4, 0, 1, 4)
+
+/* LAN969X ONLY */
+/* DEVCPU_PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
+#define PTP_TWOSTEP_CTRL \
+ __REG(TARGET_PTP, 0, 1, 612, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
+#define PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+#define PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+
+#define PTP_TWOSTEP_CTRL_PTP_NXT BIT(11)
+#define PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_PTP_NXT, x)
+#define PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_PTP_NXT, x)
+
+#define PTP_TWOSTEP_CTRL_PTP_VLD BIT(10)
+#define PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_PTP_VLD, x)
+#define PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_PTP_VLD, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0)
+#define PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+#define PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+
+/* LAN969X ONLY */
+/* DEVCPU_PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP_NSEC */
+#define PTP_TWOSTEP_STAMP_NSEC \
+ __REG(TARGET_PTP, 0, 1, 612, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_NSEC_NS GENMASK(29, 0)
+#define PTP_TWOSTEP_STAMP_NSEC_NS_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_NSEC_NS, x)
+#define PTP_TWOSTEP_STAMP_NSEC_NS_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_NSEC_NS, x)
+
+/* LAN969X ONLY */
+/* DEVCPU_PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP_SUBNS */
+#define PTP_TWOSTEP_STAMP_SUBNS \
+ __REG(TARGET_PTP, 0, 1, 612, 0, 1, 16, 8, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_SUBNS_NS GENMASK(7, 0)
+#define PTP_TWOSTEP_STAMP_SUBNS_NS_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_SUBNS_NS, x)
+#define PTP_TWOSTEP_STAMP_SUBNS_NS_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_SUBNS_NS, x)
+
+/* QFWD:SYSTEM:SWITCH_PORT_MODE */
+#define QFWD_SWITCH_PORT_MODE(r) \
+ __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, \
+ regs->rcnt[RC_QFWD_SWITCH_PORT_MODE], 4)
#define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19)
#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\
@@ -6203,39 +6976,53 @@ enum sparx5_target {
#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\
FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
-/* QRES:RES_CTRL:RES_CFG */
-#define QRES_RES_CFG(g) __REG(TARGET_QRES,\
- 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+/* QFWD:SYSTEM:FRAME_COPY_CFG */
+#define QFWD_FRAME_COPY_CFG(r) \
+ __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 284, r, 12, 4)
+
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL\
+ GENMASK(regs->fsize[FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL] + 6 - 1, 6)
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(x)\
+ spx5_field_prep(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(x)\
+ spx5_field_get(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
-#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0)
+/* QRES:RES_CTRL:RES_CFG */
+#define QRES_RES_CFG(g) \
+ __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+
+#define QRES_RES_CFG_WM_HIGH\
+ GENMASK(regs->fsize[FW_QRES_RES_CFG_WM_HIGH] + 0 - 1, 0)
#define QRES_RES_CFG_WM_HIGH_SET(x)\
- FIELD_PREP(QRES_RES_CFG_WM_HIGH, x)
+ spx5_field_prep(QRES_RES_CFG_WM_HIGH, x)
#define QRES_RES_CFG_WM_HIGH_GET(x)\
- FIELD_GET(QRES_RES_CFG_WM_HIGH, x)
+ spx5_field_get(QRES_RES_CFG_WM_HIGH, x)
-/* QRES:RES_CTRL:RES_STAT */
-#define QRES_RES_STAT(g) __REG(TARGET_QRES,\
- 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
+/* QRES:RES_CTRL:RES_STAT */
+#define QRES_RES_STAT(g) \
+ __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
-#define QRES_RES_STAT_MAXUSE GENMASK(20, 0)
+#define QRES_RES_STAT_MAXUSE\
+ GENMASK(regs->fsize[FW_QRES_RES_STAT_MAXUSE] + 0 - 1, 0)
#define QRES_RES_STAT_MAXUSE_SET(x)\
- FIELD_PREP(QRES_RES_STAT_MAXUSE, x)
+ spx5_field_prep(QRES_RES_STAT_MAXUSE, x)
#define QRES_RES_STAT_MAXUSE_GET(x)\
- FIELD_GET(QRES_RES_STAT_MAXUSE, x)
+ spx5_field_get(QRES_RES_STAT_MAXUSE, x)
-/* QRES:RES_CTRL:RES_STAT_CUR */
-#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES,\
- 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
+/* QRES:RES_CTRL:RES_STAT_CUR */
+#define QRES_RES_STAT_CUR(g) \
+ __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
-#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0)
+#define QRES_RES_STAT_CUR_INUSE\
+ GENMASK(regs->fsize[FW_QRES_RES_STAT_CUR_INUSE] + 0 - 1, 0)
#define QRES_RES_STAT_CUR_INUSE_SET(x)\
- FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x)
+ spx5_field_prep(QRES_RES_STAT_CUR_INUSE, x)
#define QRES_RES_STAT_CUR_INUSE_GET(x)\
- FIELD_GET(QRES_RES_STAT_CUR_INUSE, x)
+ spx5_field_get(QRES_RES_STAT_CUR_INUSE, x)
-/* DEVCPU_QS:XTR:XTR_GRP_CFG */
-#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS,\
- 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) \
+ __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
#define QS_XTR_GRP_CFG_MODE_SET(x)\
@@ -6255,13 +7042,13 @@ enum sparx5_target {
#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
-/* DEVCPU_QS:XTR:XTR_RD */
-#define QS_XTR_RD(r) __REG(TARGET_QS,\
- 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) \
+ __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
-/* DEVCPU_QS:XTR:XTR_FLUSH */
-#define QS_XTR_FLUSH __REG(TARGET_QS,\
- 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH \
+ __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0)
#define QS_XTR_FLUSH_FLUSH_SET(x)\
@@ -6269,9 +7056,9 @@ enum sparx5_target {
#define QS_XTR_FLUSH_FLUSH_GET(x)\
FIELD_GET(QS_XTR_FLUSH_FLUSH, x)
-/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
-#define QS_XTR_DATA_PRESENT __REG(TARGET_QS,\
- 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT \
+ __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
#define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0)
#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\
@@ -6279,9 +7066,9 @@ enum sparx5_target {
#define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\
FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
-/* DEVCPU_QS:INJ:INJ_GRP_CFG */
-#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS,\
- 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) \
+ __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
#define QS_INJ_GRP_CFG_MODE_SET(x)\
@@ -6295,13 +7082,13 @@ enum sparx5_target {
#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
-/* DEVCPU_QS:INJ:INJ_WR */
-#define QS_INJ_WR(r) __REG(TARGET_QS,\
- 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) \
+ __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
-/* DEVCPU_QS:INJ:INJ_CTRL */
-#define QS_INJ_CTRL(r) __REG(TARGET_QS,\
- 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) \
+ __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
@@ -6333,9 +7120,9 @@ enum sparx5_target {
#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
-/* DEVCPU_QS:INJ:INJ_STATUS */
-#define QS_INJ_STATUS __REG(TARGET_QS,\
- 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS \
+ __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
@@ -6355,21 +7142,24 @@ enum sparx5_target {
#define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\
FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
-/* QSYS:PAUSE_CFG:PAUSE_CFG */
-#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS,\
- 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
+/* QSYS:PAUSE_CFG:PAUSE_CFG */
+#define QSYS_PAUSE_CFG(r) \
+ __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], 0, \
+ r, regs->rcnt[RC_QSYS_PAUSE_CFG], 4)
-#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14)
+#define QSYS_PAUSE_CFG_PAUSE_START\
+ GENMASK(regs->fsize[FW_QSYS_PAUSE_CFG_PAUSE_START] + 14 - 1, 14)
#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\
- FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x)
+ spx5_field_prep(QSYS_PAUSE_CFG_PAUSE_START, x)
#define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\
- FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x)
+ spx5_field_get(QSYS_PAUSE_CFG_PAUSE_START, x)
-#define QSYS_PAUSE_CFG_PAUSE_STOP GENMASK(13, 2)
+#define QSYS_PAUSE_CFG_PAUSE_STOP\
+ GENMASK(regs->fsize[FW_QSYS_PAUSE_CFG_PAUSE_STOP] + 2 - 1, 2)
#define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
- FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+ spx5_field_prep(QSYS_PAUSE_CFG_PAUSE_STOP, x)
#define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
- FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+ spx5_field_get(QSYS_PAUSE_CFG_PAUSE_STOP, x)
#define QSYS_PAUSE_CFG_PAUSE_ENA BIT(1)
#define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
@@ -6383,19 +7173,22 @@ enum sparx5_target {
#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\
FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
-/* QSYS:PAUSE_CFG:ATOP */
-#define QSYS_ATOP(r) __REG(TARGET_QSYS,\
- 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
+/* QSYS:PAUSE_CFG:ATOP */
+#define QSYS_ATOP(r) \
+ __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \
+ 284, r, regs->rcnt[RC_QSYS_ATOP], 4)
-#define QSYS_ATOP_ATOP GENMASK(11, 0)
+#define QSYS_ATOP_ATOP\
+ GENMASK(regs->fsize[FW_QSYS_ATOP_ATOP] + 0 - 1, 0)
#define QSYS_ATOP_ATOP_SET(x)\
- FIELD_PREP(QSYS_ATOP_ATOP, x)
+ spx5_field_prep(QSYS_ATOP_ATOP, x)
#define QSYS_ATOP_ATOP_GET(x)\
- FIELD_GET(QSYS_ATOP_ATOP, x)
+ spx5_field_get(QSYS_ATOP_ATOP, x)
-/* QSYS:PAUSE_CFG:FWD_PRESSURE */
-#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS,\
- 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
+/* QSYS:PAUSE_CFG:FWD_PRESSURE */
+#define QSYS_FWD_PRESSURE(r) \
+ __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \
+ 564, r, regs->rcnt[RC_QSYS_FWD_PRESSURE], 4)
#define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1)
#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\
@@ -6409,19 +7202,22 @@ enum sparx5_target {
#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\
FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
-/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */
-#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS,\
- 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
+/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define QSYS_ATOP_TOT_CFG \
+ __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \
+ 844, 0, 1, 4)
-#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT\
+ GENMASK(regs->fsize[FW_QSYS_ATOP_TOT_CFG_ATOP_TOT] + 0 - 1, 0)
#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\
- FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+ spx5_field_prep(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
#define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\
- FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+ spx5_field_get(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
-/* QSYS:CALCFG:CAL_AUTO */
-#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS,\
- 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
+/* QSYS:CALCFG:CAL_AUTO */
+#define QSYS_CAL_AUTO(r) \
+ __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_CALCFG], 0, 1, 40, 0, r, \
+ regs->rcnt[RC_QSYS_CAL_AUTO], 4)
#define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0)
#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\
@@ -6429,9 +7225,10 @@ enum sparx5_target {
#define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\
FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x)
-/* QSYS:CALCFG:CAL_CTRL */
-#define QSYS_CAL_CTRL __REG(TARGET_QSYS,\
- 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
+/* QSYS:CALCFG:CAL_CTRL */
+#define QSYS_CAL_CTRL \
+ __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_CALCFG], 0, 1, 40, 36, 0, \
+ 1, 4)
#define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11)
#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\
@@ -6451,9 +7248,10 @@ enum sparx5_target {
#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\
FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
-/* QSYS:RAM_CTRL:RAM_INIT */
-#define QSYS_RAM_INIT __REG(TARGET_QSYS,\
- 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
+/* QSYS:RAM_CTRL:RAM_INIT */
+#define QSYS_RAM_INIT \
+ __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_RAM_CTRL], 0, 1, 4, 0, 0, \
+ 1, 4)
#define QSYS_RAM_INIT_RAM_INIT BIT(1)
#define QSYS_RAM_INIT_RAM_INIT_SET(x)\
@@ -6467,9 +7265,10 @@ enum sparx5_target {
#define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
-/* REW:COMMON:OWN_UPSID */
-#define REW_OWN_UPSID(r) __REG(TARGET_REW,\
- 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
+/* REW:COMMON:OWN_UPSID */
+#define REW_OWN_UPSID(r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 0, r, \
+ regs->rcnt[RC_REW_OWN_UPSID], 4)
#define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define REW_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -6477,15 +7276,17 @@ enum sparx5_target {
#define REW_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x)
-/* REW:COMMON:RTAG_ETAG_CTRL */
-#define REW_RTAG_ETAG_CTRL(r) __REG(TARGET_REW,\
- 0, 1, 387264, 0, 1, 1232, 560, r, 70, 4)
+/* REW:COMMON:RTAG_ETAG_CTRL */
+#define REW_RTAG_ETAG_CTRL(r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 560, r,\
+ regs->rcnt[RC_REW_RTAG_ETAG_CTRL], 4)
-#define REW_RTAG_ETAG_CTRL_IPE_TBL GENMASK(9, 3)
+#define REW_RTAG_ETAG_CTRL_IPE_TBL\
+ GENMASK(regs->fsize[FW_REW_RTAG_ETAG_CTRL_IPE_TBL] + 3 - 1, 3)
#define REW_RTAG_ETAG_CTRL_IPE_TBL_SET(x)\
- FIELD_PREP(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+ spx5_field_prep(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
#define REW_RTAG_ETAG_CTRL_IPE_TBL_GET(x)\
- FIELD_GET(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+ spx5_field_get(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA GENMASK(2, 1)
#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(x)\
@@ -6499,9 +7300,10 @@ enum sparx5_target {
#define REW_RTAG_ETAG_CTRL_KEEP_ETAG_GET(x)\
FIELD_GET(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x)
-/* REW:COMMON:ES0_CTRL */
-#define REW_ES0_CTRL __REG(TARGET_REW,\
- 0, 1, 387264, 0, 1, 1232, 852, 0, 1, 4)
+/* REW:COMMON:ES0_CTRL */
+#define REW_ES0_CTRL \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 852, 0,\
+ 1, 4)
#define REW_ES0_CTRL_ES0_BY_RT_FWD BIT(5)
#define REW_ES0_CTRL_ES0_BY_RT_FWD_SET(x)\
@@ -6539,9 +7341,10 @@ enum sparx5_target {
#define REW_ES0_CTRL_ES0_LU_ENA_GET(x)\
FIELD_GET(REW_ES0_CTRL_ES0_LU_ENA, x)
-/* REW:PORT:PORT_VLAN_CFG */
-#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 0, 0, 1, 4)
#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13)
#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\
@@ -6561,9 +7364,10 @@ enum sparx5_target {
#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
-/* REW:PORT:PCP_MAP_DE0 */
-#define REW_PCP_MAP_DE0(g, r) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 4, r, 8, 4)
+/* REW:PORT:PCP_MAP_DE0 */
+#define REW_PCP_MAP_DE0(g, r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 4, r, 8, 4)
#define REW_PCP_MAP_DE0_PCP_DE0 GENMASK(2, 0)
#define REW_PCP_MAP_DE0_PCP_DE0_SET(x)\
@@ -6571,9 +7375,10 @@ enum sparx5_target {
#define REW_PCP_MAP_DE0_PCP_DE0_GET(x)\
FIELD_GET(REW_PCP_MAP_DE0_PCP_DE0, x)
-/* REW:PORT:PCP_MAP_DE1 */
-#define REW_PCP_MAP_DE1(g, r) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 36, r, 8, 4)
+/* REW:PORT:PCP_MAP_DE1 */
+#define REW_PCP_MAP_DE1(g, r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 36, r, 8, 4)
#define REW_PCP_MAP_DE1_PCP_DE1 GENMASK(2, 0)
#define REW_PCP_MAP_DE1_PCP_DE1_SET(x)\
@@ -6581,9 +7386,10 @@ enum sparx5_target {
#define REW_PCP_MAP_DE1_PCP_DE1_GET(x)\
FIELD_GET(REW_PCP_MAP_DE1_PCP_DE1, x)
-/* REW:PORT:DEI_MAP_DE0 */
-#define REW_DEI_MAP_DE0(g, r) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 68, r, 8, 4)
+/* REW:PORT:DEI_MAP_DE0 */
+#define REW_DEI_MAP_DE0(g, r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 68, r, 8, 4)
#define REW_DEI_MAP_DE0_DEI_DE0 BIT(0)
#define REW_DEI_MAP_DE0_DEI_DE0_SET(x)\
@@ -6591,9 +7397,10 @@ enum sparx5_target {
#define REW_DEI_MAP_DE0_DEI_DE0_GET(x)\
FIELD_GET(REW_DEI_MAP_DE0_DEI_DE0, x)
-/* REW:PORT:DEI_MAP_DE1 */
-#define REW_DEI_MAP_DE1(g, r) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 100, r, 8, 4)
+/* REW:PORT:DEI_MAP_DE1 */
+#define REW_DEI_MAP_DE1(g, r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 100, r, 8, 4)
#define REW_DEI_MAP_DE1_DEI_DE1 BIT(0)
#define REW_DEI_MAP_DE1_DEI_DE1_SET(x)\
@@ -6601,9 +7408,10 @@ enum sparx5_target {
#define REW_DEI_MAP_DE1_DEI_DE1_GET(x)\
FIELD_GET(REW_DEI_MAP_DE1_DEI_DE1, x)
-/* REW:PORT:TAG_CTRL */
-#define REW_TAG_CTRL(g) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
+/* REW:PORT:TAG_CTRL */
+#define REW_TAG_CTRL(g) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 132, 0, 1, 4)
#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13)
#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\
@@ -6641,9 +7449,10 @@ enum sparx5_target {
#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
-/* REW:PORT:DSCP_MAP */
-#define REW_DSCP_MAP(g) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 136, 0, 1, 4)
+/* REW:PORT:DSCP_MAP */
+#define REW_DSCP_MAP(g) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 136, 0, 1, 4)
#define REW_DSCP_MAP_DSCP_UPDATE_ENA BIT(1)
#define REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(x)\
@@ -6657,9 +7466,10 @@ enum sparx5_target {
#define REW_DSCP_MAP_DSCP_REMAP_ENA_GET(x)\
FIELD_GET(REW_DSCP_MAP_DSCP_REMAP_ENA, x)
-/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
-#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
+#define REW_PTP_TWOSTEP_CTRL \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
@@ -6697,9 +7507,10 @@ enum sparx5_target {
#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\
FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
-/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
-#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
+#define REW_PTP_TWOSTEP_STAMP \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0)
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
@@ -6707,9 +7518,10 @@ enum sparx5_target {
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
-/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
-#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
+#define REW_PTP_TWOSTEP_STAMP_SUBNS \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0)
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\
@@ -6717,17 +7529,20 @@ enum sparx5_target {
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\
FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
-/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
-#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
+#define REW_PTP_RSRV_NOT_ZERO \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
-/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
-#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
+#define REW_PTP_RSRV_NOT_ZERO1 \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
-/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
-#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
+#define REW_PTP_RSRV_NOT_ZERO2 \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0)
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\
@@ -6735,9 +7550,10 @@ enum sparx5_target {
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\
FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
-/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
-#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
+#define REW_PTP_GEN_STAMP_FMT(r) \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2)
#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\
@@ -6751,9 +7567,10 @@ enum sparx5_target {
#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\
FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
-/* REW:RAM_CTRL:RAM_INIT */
-#define REW_RAM_INIT __REG(TARGET_REW,\
- 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
+/* REW:RAM_CTRL:RAM_INIT */
+#define REW_RAM_INIT \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_RAM_CTRL], 0, 1, 4, 0, 0, 1,\
+ 4)
#define REW_RAM_INIT_RAM_INIT BIT(1)
#define REW_RAM_INIT_RAM_INIT_SET(x)\
@@ -6767,9 +7584,9 @@ enum sparx5_target {
#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
-/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
-#define VCAP_ES0_CTRL __REG(TARGET_VCAP_ES0,\
- 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES0_CTRL \
+ __REG(TARGET_VCAP_ES0, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_ES0_CTRL_UPDATE_CMD GENMASK(24, 22)
#define VCAP_ES0_CTRL_UPDATE_CMD_SET(x)\
@@ -6819,9 +7636,9 @@ enum sparx5_target {
#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_GET(x)\
FIELD_GET(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x)
-/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */
-#define VCAP_ES0_CFG __REG(TARGET_VCAP_ES0,\
- 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES0_CFG \
+ __REG(TARGET_VCAP_ES0, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_ES0_CFG_MV_NUM_POS GENMASK(31, 16)
#define VCAP_ES0_CFG_MV_NUM_POS_SET(x)\
@@ -6835,33 +7652,33 @@ enum sparx5_target {
#define VCAP_ES0_CFG_MV_SIZE_GET(x)\
FIELD_GET(VCAP_ES0_CFG_MV_SIZE, x)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
-#define VCAP_ES0_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES0_VCAP_ENTRY_DAT(r) \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */
-#define VCAP_ES0_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES0_VCAP_MASK_DAT(r) \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
-#define VCAP_ES0_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES0_VCAP_ACTION_DAT(r) \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */
-#define VCAP_ES0_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES0_VCAP_CNT_DAT(r) \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
-#define VCAP_ES0_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES0_VCAP_CNT_FW_DAT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */
-#define VCAP_ES0_VCAP_TG_DAT __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES0_VCAP_TG_DAT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
-/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */
-#define VCAP_ES0_IDX __REG(TARGET_VCAP_ES0,\
- 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES0_IDX \
+ __REG(TARGET_VCAP_ES0, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_ES0_IDX_CORE_IDX GENMASK(3, 0)
#define VCAP_ES0_IDX_CORE_IDX_SET(x)\
@@ -6869,9 +7686,9 @@ enum sparx5_target {
#define VCAP_ES0_IDX_CORE_IDX_GET(x)\
FIELD_GET(VCAP_ES0_IDX_CORE_IDX, x)
-/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */
-#define VCAP_ES0_MAP __REG(TARGET_VCAP_ES0,\
- 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES0_MAP \
+ __REG(TARGET_VCAP_ES0, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_ES0_MAP_CORE_MAP GENMASK(2, 0)
#define VCAP_ES0_MAP_CORE_MAP_SET(x)\
@@ -6879,9 +7696,9 @@ enum sparx5_target {
#define VCAP_ES0_MAP_CORE_MAP_GET(x)\
FIELD_GET(VCAP_ES0_MAP_CORE_MAP, x)
-/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */
-#define VCAP_ES0_VCAP_STICKY __REG(TARGET_VCAP_ES0,\
- 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES0_VCAP_STICKY \
+ __REG(TARGET_VCAP_ES0, 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
@@ -6889,49 +7706,49 @@ enum sparx5_target {
#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
FIELD_GET(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
-/* VCAP_ES0:VCAP_CONST:VCAP_VER */
-#define VCAP_ES0_VCAP_VER __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:VCAP_VER */
+#define VCAP_ES0_VCAP_VER \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */
-#define VCAP_ES0_ENTRY_WIDTH __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES0_ENTRY_WIDTH \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */
-#define VCAP_ES0_ENTRY_CNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES0_ENTRY_CNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */
-#define VCAP_ES0_ENTRY_SWCNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES0_ENTRY_SWCNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */
-#define VCAP_ES0_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES0_ENTRY_TG_WIDTH \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */
-#define VCAP_ES0_ACTION_DEF_CNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES0_ACTION_DEF_CNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */
-#define VCAP_ES0_ACTION_WIDTH __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES0_ACTION_WIDTH \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */
-#define VCAP_ES0_CNT_WIDTH __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES0_CNT_WIDTH \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:CORE_CNT */
-#define VCAP_ES0_CORE_CNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:CORE_CNT */
+#define VCAP_ES0_CORE_CNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:IF_CNT */
-#define VCAP_ES0_IF_CNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:IF_CNT */
+#define VCAP_ES0_IF_CNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
-/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
-#define VCAP_ES2_CTRL __REG(TARGET_VCAP_ES2,\
- 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES2_CTRL \
+ __REG(TARGET_VCAP_ES2, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_ES2_CTRL_UPDATE_CMD GENMASK(24, 22)
#define VCAP_ES2_CTRL_UPDATE_CMD_SET(x)\
@@ -6981,9 +7798,9 @@ enum sparx5_target {
#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_GET(x)\
FIELD_GET(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
-/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */
-#define VCAP_ES2_CFG __REG(TARGET_VCAP_ES2,\
- 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES2_CFG \
+ __REG(TARGET_VCAP_ES2, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_ES2_CFG_MV_NUM_POS GENMASK(31, 16)
#define VCAP_ES2_CFG_MV_NUM_POS_SET(x)\
@@ -6997,33 +7814,33 @@ enum sparx5_target {
#define VCAP_ES2_CFG_MV_SIZE_GET(x)\
FIELD_GET(VCAP_ES2_CFG_MV_SIZE, x)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
-#define VCAP_ES2_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES2_VCAP_ENTRY_DAT(r) \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */
-#define VCAP_ES2_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES2_VCAP_MASK_DAT(r) \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
-#define VCAP_ES2_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES2_VCAP_ACTION_DAT(r) \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */
-#define VCAP_ES2_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES2_VCAP_CNT_DAT(r) \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
-#define VCAP_ES2_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES2_VCAP_CNT_FW_DAT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */
-#define VCAP_ES2_VCAP_TG_DAT __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES2_VCAP_TG_DAT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
-/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */
-#define VCAP_ES2_IDX __REG(TARGET_VCAP_ES2,\
- 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES2_IDX \
+ __REG(TARGET_VCAP_ES2, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_ES2_IDX_CORE_IDX GENMASK(3, 0)
#define VCAP_ES2_IDX_CORE_IDX_SET(x)\
@@ -7031,9 +7848,9 @@ enum sparx5_target {
#define VCAP_ES2_IDX_CORE_IDX_GET(x)\
FIELD_GET(VCAP_ES2_IDX_CORE_IDX, x)
-/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */
-#define VCAP_ES2_MAP __REG(TARGET_VCAP_ES2,\
- 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES2_MAP \
+ __REG(TARGET_VCAP_ES2, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_ES2_MAP_CORE_MAP GENMASK(2, 0)
#define VCAP_ES2_MAP_CORE_MAP_SET(x)\
@@ -7041,9 +7858,9 @@ enum sparx5_target {
#define VCAP_ES2_MAP_CORE_MAP_GET(x)\
FIELD_GET(VCAP_ES2_MAP_CORE_MAP, x)
-/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */
-#define VCAP_ES2_VCAP_STICKY __REG(TARGET_VCAP_ES2,\
- 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES2_VCAP_STICKY \
+ __REG(TARGET_VCAP_ES2, 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
@@ -7051,49 +7868,49 @@ enum sparx5_target {
#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
FIELD_GET(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
-/* VCAP_ES2:VCAP_CONST:VCAP_VER */
-#define VCAP_ES2_VCAP_VER __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:VCAP_VER */
+#define VCAP_ES2_VCAP_VER \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */
-#define VCAP_ES2_ENTRY_WIDTH __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES2_ENTRY_WIDTH \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */
-#define VCAP_ES2_ENTRY_CNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES2_ENTRY_CNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */
-#define VCAP_ES2_ENTRY_SWCNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES2_ENTRY_SWCNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */
-#define VCAP_ES2_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES2_ENTRY_TG_WIDTH \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */
-#define VCAP_ES2_ACTION_DEF_CNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES2_ACTION_DEF_CNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */
-#define VCAP_ES2_ACTION_WIDTH __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES2_ACTION_WIDTH \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */
-#define VCAP_ES2_CNT_WIDTH __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES2_CNT_WIDTH \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:CORE_CNT */
-#define VCAP_ES2_CORE_CNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:CORE_CNT */
+#define VCAP_ES2_CORE_CNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:IF_CNT */
-#define VCAP_ES2_IF_CNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:IF_CNT */
+#define VCAP_ES2_IF_CNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
-#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER,\
- 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_SUPER_CTRL \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_SUPER_CTRL_UPDATE_CMD GENMASK(24, 22)
#define VCAP_SUPER_CTRL_UPDATE_CMD_SET(x)\
@@ -7143,9 +7960,9 @@ enum sparx5_target {
#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_GET(x)\
FIELD_GET(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x)
-/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */
-#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER,\
- 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_SUPER_CFG \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_SUPER_CFG_MV_NUM_POS GENMASK(31, 16)
#define VCAP_SUPER_CFG_MV_NUM_POS_SET(x)\
@@ -7159,33 +7976,33 @@ enum sparx5_target {
#define VCAP_SUPER_CFG_MV_SIZE_GET(x)\
FIELD_GET(VCAP_SUPER_CFG_MV_SIZE, x)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
-#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_SUPER_VCAP_ENTRY_DAT(r) \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */
-#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_SUPER_VCAP_MASK_DAT(r) \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
-#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_SUPER_VCAP_ACTION_DAT(r) \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */
-#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_SUPER_VCAP_CNT_DAT(r) \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
-#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_SUPER_VCAP_CNT_FW_DAT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */
-#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_SUPER_VCAP_TG_DAT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */
-#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER,\
- 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_SUPER_IDX \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_SUPER_IDX_CORE_IDX GENMASK(3, 0)
#define VCAP_SUPER_IDX_CORE_IDX_SET(x)\
@@ -7193,9 +8010,9 @@ enum sparx5_target {
#define VCAP_SUPER_IDX_CORE_IDX_GET(x)\
FIELD_GET(VCAP_SUPER_IDX_CORE_IDX, x)
-/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */
-#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER,\
- 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_SUPER_MAP \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_SUPER_MAP_CORE_MAP GENMASK(2, 0)
#define VCAP_SUPER_MAP_CORE_MAP_SET(x)\
@@ -7203,49 +8020,49 @@ enum sparx5_target {
#define VCAP_SUPER_MAP_CORE_MAP_GET(x)\
FIELD_GET(VCAP_SUPER_MAP_CORE_MAP, x)
-/* VCAP_SUPER:VCAP_CONST:VCAP_VER */
-#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:VCAP_VER */
+#define VCAP_SUPER_VCAP_VER \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */
-#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_SUPER_ENTRY_WIDTH \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */
-#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */
+#define VCAP_SUPER_ENTRY_CNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */
-#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_SUPER_ENTRY_SWCNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */
-#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_SUPER_ENTRY_TG_WIDTH \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */
-#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_SUPER_ACTION_DEF_CNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */
-#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_SUPER_ACTION_WIDTH \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */
-#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */
+#define VCAP_SUPER_CNT_WIDTH \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:CORE_CNT */
-#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:CORE_CNT */
+#define VCAP_SUPER_CORE_CNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:IF_CNT */
-#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:IF_CNT */
+#define VCAP_SUPER_IF_CNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
-/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
-#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
+/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
+#define VCAP_SUPER_RAM_INIT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
#define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1)
#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\
@@ -7259,9 +8076,10 @@ enum sparx5_target {
#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
-/* VOP:RAM_CTRL:RAM_INIT */
-#define VOP_RAM_INIT __REG(TARGET_VOP,\
- 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
+/* VOP:RAM_CTRL:RAM_INIT */
+#define VOP_RAM_INIT \
+ __REG(TARGET_VOP, 0, 1, regs->gaddr[GA_VOP_RAM_CTRL], 0, 1, 4, 0, 0, 1,\
+ 4)
#define VOP_RAM_INIT_RAM_INIT BIT(1)
#define VOP_RAM_INIT_RAM_INIT_SET(x)\
@@ -7275,9 +8093,10 @@ enum sparx5_target {
#define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x)
-/* XQS:SYSTEM:STAT_CFG */
-#define XQS_STAT_CFG __REG(TARGET_XQS,\
- 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
+/* XQS:SYSTEM:STAT_CFG */
+#define XQS_STAT_CFG \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_SYSTEM], 0, 1, 872, 860, 0, \
+ 1, 4)
#define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18)
#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\
@@ -7285,11 +8104,12 @@ enum sparx5_target {
#define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\
FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
-#define XQS_STAT_CFG_STAT_VIEW GENMASK(17, 5)
+#define XQS_STAT_CFG_STAT_VIEW\
+ GENMASK(regs->fsize[FW_XQS_STAT_CFG_STAT_VIEW] + 5 - 1, 5)
#define XQS_STAT_CFG_STAT_VIEW_SET(x)\
- FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x)
+ spx5_field_prep(XQS_STAT_CFG_STAT_VIEW, x)
#define XQS_STAT_CFG_STAT_VIEW_GET(x)\
- FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x)
+ spx5_field_get(XQS_STAT_CFG_STAT_VIEW, x)
#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY BIT(4)
#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\
@@ -7303,48 +8123,136 @@ enum sparx5_target {
#define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\
FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x)
-/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
-#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS,\
- 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
+#define XQS_QLIMIT_SHR_TOP_CFG(g) \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 0, 0,\
+ 1, 4)
-#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP\
+ GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP] + 0 - 1, 0)
#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\
- FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+ spx5_field_prep(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\
- FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+ spx5_field_get(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
-/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
-#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS,\
- 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
+#define XQS_QLIMIT_SHR_ATOP_CFG(g) \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 4, 0,\
+ 1, 4)
-#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP\
+ GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP] + 0 - 1, 0)
#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\
- FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+ spx5_field_prep(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\
- FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+ spx5_field_get(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
-/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
-#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS,\
- 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
+#define XQS_QLIMIT_SHR_CTOP_CFG(g) \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 8, 0,\
+ 1, 4)
-#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP\
+ GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP] + 0 - 1, 0)
#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\
- FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+ spx5_field_prep(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\
- FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+ spx5_field_get(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
-/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
-#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS,\
- 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
+#define XQS_QLIMIT_SHR_QLIM_CFG(g) \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 12, \
+ 0, 1, 4)
-#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM\
+ GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM] + 0 - 1, 0)
#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\
- FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+ spx5_field_prep(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\
- FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
-
-/* XQS:STAT:CNT */
-#define XQS_CNT(g) __REG(TARGET_XQS,\
- 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+ spx5_field_get(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+
+/* XQS:STAT:CNT */
+#define XQS_CNT(g) \
+ __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+
+/* LAN969X ONLY */
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEVRGMII_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEVRGMII_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEVRGMII_MAC_TAGS_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEVRGMII_MAC_IFG_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
new file mode 100644
index 000000000000..76097761fa97
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_tc.h"
+
+#define SPX5_MIRROR_PROBE_MAX 3
+#define SPX5_MIRROR_DISABLED 0
+#define SPX5_MIRROR_EGRESS 1
+#define SPX5_MIRROR_INGRESS 2
+#define SPX5_QFWD_MP_OFFSET 9 /* Mirror port offset in the QFWD register */
+
+/* Convert from bool ingress/egress to mirror direction */
+static u32 sparx5_mirror_to_dir(bool ingress)
+{
+ return ingress ? SPX5_MIRROR_INGRESS : SPX5_MIRROR_EGRESS;
+}
+
+/* Get ports belonging to this mirror */
+static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx)
+{
+ u64 val;
+
+ val = spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+
+ if (is_sparx5(sparx5))
+ val |= (u64)spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG1(idx)) << 32;
+
+ return val;
+}
+
+/* Add port to mirror (only front ports) */
+static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ u64 reg = portno;
+ u32 val;
+
+ val = BIT(do_div(reg, 32));
+
+ if (reg == 0)
+ return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+ else
+ return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG1(idx));
+}
+
+/* Delete port from mirror (only front ports) */
+static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ u64 reg = portno;
+ u32 val;
+
+ val = BIT(do_div(reg, 32));
+
+ if (reg == 0)
+ return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+ else
+ return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG1(idx));
+}
+
+/* Check if mirror contains port */
+static bool sparx5_mirror_contains(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ return (sparx5_mirror_port_get(sparx5, idx) & BIT_ULL(portno)) != 0;
+}
+
+/* Check if mirror is empty */
+static bool sparx5_mirror_is_empty(struct sparx5 *sparx5, u32 idx)
+{
+ return sparx5_mirror_port_get(sparx5, idx) == 0;
+}
+
+/* Get direction of mirror */
+static u32 sparx5_mirror_dir_get(struct sparx5 *sparx5, u32 idx)
+{
+ u32 val = spx5_rd(sparx5, ANA_AC_PROBE_CFG(idx));
+
+ return ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(val);
+}
+
+/* Set direction of mirror */
+static void sparx5_mirror_dir_set(struct sparx5 *sparx5, u32 idx, u32 dir)
+{
+ spx5_rmw(ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(dir),
+ ANA_AC_PROBE_CFG_PROBE_DIRECTION, sparx5,
+ ANA_AC_PROBE_CFG(idx));
+}
+
+/* Set the monitor port for this mirror */
+static void sparx5_mirror_monitor_set(struct sparx5 *sparx5, u32 idx,
+ u32 portno)
+{
+ spx5_rmw(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(portno),
+ QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, sparx5,
+ QFWD_FRAME_COPY_CFG(idx + SPX5_QFWD_MP_OFFSET));
+}
+
+/* Get the monitor port of this mirror */
+static u32 sparx5_mirror_monitor_get(struct sparx5 *sparx5, u32 idx)
+{
+ u32 val = spx5_rd(sparx5,
+ QFWD_FRAME_COPY_CFG(idx + SPX5_QFWD_MP_OFFSET));
+
+ return QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(val);
+}
+
+/* Check if port is the monitor port of this mirror */
+static bool sparx5_mirror_has_monitor(struct sparx5 *sparx5, u32 idx,
+ u32 portno)
+{
+ return sparx5_mirror_monitor_get(sparx5, idx) == portno;
+}
+
+/* Get a suitable mirror for this port */
+static int sparx5_mirror_get(struct sparx5_port *sport,
+ struct sparx5_port *mport, u32 dir, u32 *idx)
+{
+ struct sparx5 *sparx5 = sport->sparx5;
+ u32 i;
+
+ /* Check if this port is already used as a monitor port */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++)
+ if (sparx5_mirror_has_monitor(sparx5, i, sport->portno))
+ return -EINVAL;
+
+ /* Check if existing mirror can be reused
+ * (same direction and monitor port).
+ */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) {
+ if (sparx5_mirror_dir_get(sparx5, i) == dir &&
+ sparx5_mirror_has_monitor(sparx5, i, mport->portno)) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ /* Return free mirror */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) {
+ if (sparx5_mirror_is_empty(sparx5, i)) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int sparx5_mirror_add(struct sparx5_mall_entry *entry)
+{
+ u32 mirror_idx, dir = sparx5_mirror_to_dir(entry->ingress);
+ struct sparx5_port *sport, *mport;
+ struct sparx5 *sparx5;
+ int err;
+
+ /* Source port */
+ sport = entry->port;
+ /* monitor port */
+ mport = entry->mirror.port;
+ sparx5 = sport->sparx5;
+
+ if (sport->portno == mport->portno)
+ return -EINVAL;
+
+ err = sparx5_mirror_get(sport, mport, dir, &mirror_idx);
+ if (err)
+ return err;
+
+ if (sparx5_mirror_contains(sparx5, mirror_idx, sport->portno))
+ return -EEXIST;
+
+ /* Add port to mirror */
+ sparx5_mirror_port_add(sparx5, mirror_idx, sport->portno);
+
+ /* Set direction of mirror */
+ sparx5_mirror_dir_set(sparx5, mirror_idx, dir);
+
+ /* Set monitor port for mirror */
+ sparx5_mirror_monitor_set(sparx5, mirror_idx, mport->portno);
+
+ entry->mirror.idx = mirror_idx;
+
+ return 0;
+}
+
+void sparx5_mirror_del(struct sparx5_mall_entry *entry)
+{
+ struct sparx5_port *port = entry->port;
+ struct sparx5 *sparx5 = port->sparx5;
+ u32 mirror_idx = entry->mirror.idx;
+
+ sparx5_mirror_port_del(sparx5, mirror_idx, port->portno);
+ if (!sparx5_mirror_is_empty(sparx5, mirror_idx))
+ return;
+
+ sparx5_mirror_dir_set(sparx5, mirror_idx, SPX5_MIRROR_DISABLED);
+
+ sparx5_mirror_monitor_set(sparx5,
+ mirror_idx,
+ sparx5->data->consts->n_ports);
+}
+
+void sparx5_mirror_stats(struct sparx5_mall_entry *entry,
+ struct flow_stats *fstats)
+{
+ struct sparx5_port *port = entry->port;
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &entry->port->mirror_stats;
+ sparx5_get_stats64(port->ndev, &new_stats);
+
+ if (entry->ingress) {
+ flow_stats_update(fstats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+ } else {
+ flow_stats_update(fstats,
+ new_stats.tx_bytes - old_stats->bytes,
+ new_stats.tx_packets - old_stats->pkts,
+ new_stats.tx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.tx_bytes;
+ old_stats->pkts = new_stats.tx_packets;
+ old_stats->drops = new_stats.tx_dropped;
+ old_stats->lastused = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index 705a004b324f..1d34af78166a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -55,7 +55,7 @@ static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
}
-void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+void sparx5_set_port_ifh(struct sparx5 *sparx5, void *ifh_hdr, u16 portno)
{
/* VSTAX.RSV = 1. MSBit must be 1 */
ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1);
@@ -64,15 +64,16 @@ void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
/* MISC.CPU_MASK/DPORT = Destination port */
ifh_encode_bitfield(ifh_hdr, portno, 29, 8);
/* MISC.PIPELINE_PT */
- ifh_encode_bitfield(ifh_hdr, 16, 37, 5);
+ ifh_encode_bitfield(ifh_hdr, is_sparx5(sparx5) ? 16 : 17, 37, 5);
/* MISC.PIPELINE_ACT */
ifh_encode_bitfield(ifh_hdr, 1, 42, 3);
/* FWD.SRC_PORT = CPU */
- ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7);
+ ifh_encode_bitfield(ifh_hdr, sparx5_get_pgid(sparx5, SPX5_PORT_CPU_0),
+ 46, is_sparx5(sparx5) ? 7 : 6);
/* FWD.SFLOW_ID (disable SFlow sampling) */
- ifh_encode_bitfield(ifh_hdr, 124, 57, 7);
+ ifh_encode_bitfield(ifh_hdr, 124, is_sparx5(sparx5) ? 57 : 56, 7);
/* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */
- ifh_encode_bitfield(ifh_hdr, 1, 67, 1);
+ ifh_encode_bitfield(ifh_hdr, 1, is_sparx5(sparx5) ? 67 : 66, 1);
}
void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op)
@@ -80,19 +81,25 @@ void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op)
ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10);
}
-void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type)
+void sparx5_set_port_ifh_pdu_type(struct sparx5 *sparx5, void *ifh_hdr,
+ u32 pdu_type)
{
- ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4);
+ ifh_encode_bitfield(ifh_hdr, pdu_type, is_sparx5(sparx5) ? 191 : 190,
+ 4);
}
-void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset)
+void sparx5_set_port_ifh_pdu_w16_offset(struct sparx5 *sparx5, void *ifh_hdr,
+ u32 pdu_w16_offset)
{
- ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6);
+ ifh_encode_bitfield(ifh_hdr, pdu_w16_offset,
+ is_sparx5(sparx5) ? 195 : 194, 6);
}
-void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp)
+void sparx5_set_port_ifh_timestamp(struct sparx5 *sparx5, void *ifh_hdr,
+ u64 timestamp)
{
- ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40);
+ ifh_encode_bitfield(ifh_hdr, timestamp, 232,
+ is_sparx5(sparx5) ? 40 : 38);
}
static int sparx5_port_open(struct net_device *ndev)
@@ -190,7 +197,8 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p)
sparx5_mact_forget(sparx5, dev->dev_addr, port->pvid);
/* Add new */
- sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
+ addr->sa_data, port->pvid);
/* Record the address */
eth_hw_addr_set(dev, addr->sa_data);
@@ -290,7 +298,7 @@ int sparx5_register_netdevs(struct sparx5 *sparx5)
int portno;
int err;
- for (portno = 0; portno < SPX5_PORTS; portno++)
+ for (portno = 0; portno < sparx5->data->consts->n_ports; portno++)
if (sparx5->ports[portno]) {
err = register_netdev(sparx5->ports[portno]->ndev);
if (err) {
@@ -309,7 +317,7 @@ void sparx5_destroy_netdevs(struct sparx5 *sparx5)
struct sparx5_port *port;
int portno;
- for (portno = 0; portno < SPX5_PORTS; portno++) {
+ for (portno = 0; portno < sparx5->data->consts->n_ports; portno++) {
port = sparx5->ports[portno];
if (port && port->phylink) {
/* Disconnect the phy */
@@ -327,8 +335,7 @@ void sparx5_unregister_netdevs(struct sparx5 *sparx5)
{
int portno;
- for (portno = 0; portno < SPX5_PORTS; portno++)
+ for (portno = 0; portno < sparx5->data->consts->n_ports; portno++)
if (sparx5->ports[portno])
unregister_netdev(sparx5->ports[portno]->ndev);
}
-
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index ac7e1cffbcec..f713656f1fae 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -32,7 +32,7 @@ void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
spx5_wr(0, sparx5, QS_XTR_FLUSH);
}
-void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
+void sparx5_ifh_parse(struct sparx5 *sparx5, u32 *ifh, struct frame_info *info)
{
u8 *xtr_hdr = (u8 *)ifh;
@@ -43,10 +43,15 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
((u32)xtr_hdr[29] << 8) |
((u32)xtr_hdr[30] << 0);
fwd = (fwd >> 5);
- info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+ info->src_port = spx5_field_get(GENMASK(is_sparx5(sparx5) ? 7 : 6, 1),
+ fwd);
+ /*
+ * Bit 270-271 are occasionally unexpectedly set by the hardware,
+ * clear bits before extracting timestamp
+ */
info->timestamp =
- ((u64)xtr_hdr[2] << 24) |
+ ((u64)(xtr_hdr[2] & GENMASK(5, 0)) << 24) |
((u64)xtr_hdr[3] << 16) |
((u64)xtr_hdr[4] << 8) |
((u64)xtr_hdr[5] << 0);
@@ -67,11 +72,11 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
for (i = 0; i < IFH_LEN; i++)
ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
- /* Decode IFH (whats needed) */
- sparx5_ifh_parse(ifh, &fi);
+ /* Decode IFH (what's needed) */
+ sparx5_ifh_parse(sparx5, ifh, &fi);
/* Map to port netdev */
- port = fi.src_port < SPX5_PORTS ?
+ port = fi.src_port < sparx5->data->consts->n_ports ?
sparx5->ports[fi.src_port] : NULL;
if (!port || !port->ndev) {
dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
@@ -227,26 +232,32 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
+ const struct sparx5_ops *ops;
u32 ifh[IFH_LEN];
netdev_tx_t ret;
+ ops = sparx5->data->ops;
+
memset(ifh, 0, IFH_LEN * 4);
- sparx5_set_port_ifh(ifh, port->portno);
+ sparx5_set_port_ifh(sparx5, ifh, port->portno);
if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
if (sparx5_ptp_txtstamp_request(port, skb) < 0)
return NETDEV_TX_BUSY;
sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
- sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
- sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
- sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
+ sparx5_set_port_ifh_pdu_type(sparx5, ifh,
+ SPARX5_SKB_CB(skb)->pdu_type);
+ sparx5_set_port_ifh_pdu_w16_offset(sparx5, ifh,
+ SPARX5_SKB_CB(skb)->pdu_w16_offset);
+ sparx5_set_port_ifh_timestamp(sparx5, ifh,
+ SPARX5_SKB_CB(skb)->ts_id);
}
skb_tx_timestamp(skb);
spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ ret = ops->fdma_xmit(sparx5, ifh, skb, dev);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
spin_unlock(&sparx5->tx_lock);
@@ -256,6 +267,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
if (ret < 0)
goto drop;
+ if (!is_sparx5(sparx5))
+ /* When lan969x and TX_OK, stats and SKB consumption is handled
+ * in the TX completion loop, so dont go any further.
+ */
+ return NETDEV_TX_OK;
+
stats->tx_bytes += skb->len;
stats->tx_packets++;
sparx5->tx.packets++;
@@ -313,7 +330,9 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5)
sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
/* CPU ports capture setup */
- for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+ for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0);
+ portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1);
+ portno++) {
/* ASM CPU port: No preamble, IFH, enable padding */
spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
@@ -356,6 +375,6 @@ irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
void sparx5_port_inj_timer_setup(struct sparx5_port *port)
{
- hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->inj_timer.function = sparx5_injection_timeout;
+ hrtimer_setup(&port->inj_timer, sparx5_injection_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
index af8b435009f4..eae819fa9486 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
@@ -5,13 +5,13 @@ void sparx5_pgid_init(struct sparx5 *spx5)
{
int i;
- for (i = 0; i < PGID_TABLE_SIZE; i++)
+ for (i = 0; i < spx5->data->consts->n_pgids; i++)
spx5->pgid_map[i] = SPX5_PGID_FREE;
/* Reserved for unicast, flood control, broadcast, and CPU.
* These cannot be freed.
*/
- for (i = 0; i <= PGID_CPU; i++)
+ for (i = 0; i <= sparx5_get_pgid(spx5, PGID_CPU); i++)
spx5->pgid_map[i] = SPX5_PGID_RESERVED;
}
@@ -22,7 +22,8 @@ int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
/* The multicast area starts at index 65, but the first 7
* are reserved for flood masks and CPU. Start alloc after that.
*/
- for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) {
+ for (i = sparx5_get_pgid(spx5, PGID_MCAST_START);
+ i < spx5->data->consts->n_pgids; i++) {
if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
spx5->pgid_map[i] = SPX5_PGID_MULTICAST;
*idx = i;
@@ -35,7 +36,8 @@ int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
{
- if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE)
+ if (idx <= sparx5_get_pgid(spx5, PGID_CPU) ||
+ idx >= spx5->data->consts->n_pgids)
return -EINVAL;
if (spx5->pgid_map[idx] == SPX5_PGID_FREE)
@@ -44,3 +46,8 @@ int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
spx5->pgid_map[idx] = SPX5_PGID_FREE;
return 0;
}
+
+int sparx5_get_pgid(struct sparx5 *sparx5, int pgid)
+{
+ return sparx5->data->consts->n_ports + pgid;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index f8562c1a894d..cfb4b2e17ace 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -32,7 +32,19 @@ sparx5_phylink_mac_select_pcs(struct phylink_config *config,
{
struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- return &port->phylink_pcs;
+ /* Return the PCS for all the modes that require it. */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ return &port->phylink_pcs;
+ default:
+ return NULL;
+ }
}
static void sparx5_phylink_mac_config(struct phylink_config *config,
@@ -77,7 +89,7 @@ static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct sparx5_port, phylink_pcs);
}
-static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct sparx5_port *port = sparx5_pcs_to_port(pcs);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
index 8ada5cee1342..c88820e83812 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
@@ -11,10 +11,11 @@ static int sparx5_policer_service_conf_set(struct sparx5 *sparx5,
struct sparx5_policer *pol)
{
u32 idx, pup_tokens, max_pup_tokens, burst, thres;
+ const struct sparx5_ops *ops = sparx5->data->ops;
struct sparx5_sdlb_group *g;
u64 rate;
- g = &sdlb_groups[pol->group];
+ g = ops->get_sdlb_group(pol->group);
idx = pol->idx;
rate = pol->rate * 1000;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 60dd2fd603a8..04bc8fffaf96 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -132,8 +132,8 @@ static int sparx5_get_sfi_status(struct sparx5 *sparx5,
return -EINVAL;
}
- dev = sparx5_to_high_dev(portno);
- tinst = sparx5_port_dev_index(portno);
+ dev = sparx5_to_high_dev(sparx5, portno);
+ tinst = sparx5_port_dev_index(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
@@ -213,11 +213,13 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
struct sparx5_port *port,
struct sparx5_port_config *conf)
{
- if ((sparx5_port_is_2g5(port->portno) &&
+ const struct sparx5_ops *ops = sparx5->data->ops;
+
+ if ((ops->is_port_2g5(port->portno) &&
conf->speed > SPEED_2500) ||
- (sparx5_port_is_5g(port->portno) &&
+ (ops->is_port_5g(port->portno) &&
conf->speed > SPEED_5000) ||
- (sparx5_port_is_10g(port->portno) &&
+ (ops->is_port_10g(port->portno) &&
conf->speed > SPEED_10000))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
@@ -226,14 +228,14 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
return -EINVAL;
case PHY_INTERFACE_MODE_1000BASEX:
if (conf->speed != SPEED_1000 ||
- sparx5_port_is_2g5(port->portno))
+ ops->is_port_2g5(port->portno))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
- if (sparx5_port_is_2g5(port->portno))
+ if (ops->is_port_2g5(port->portno))
return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
break;
case PHY_INTERFACE_MODE_2500BASEX:
if (conf->speed != SPEED_2500 ||
- sparx5_port_is_2g5(port->portno))
+ ops->is_port_2g5(port->portno))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
break;
case PHY_INTERFACE_MODE_QSGMII:
@@ -255,6 +257,15 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
conf->speed != SPEED_25000))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ if (conf->speed != SPEED_1000 &&
+ conf->speed != SPEED_100 &&
+ conf->speed != SPEED_10)
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
default:
return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
}
@@ -316,10 +327,11 @@ static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
{
u32 tinst = high_spd_dev ?
- sparx5_port_dev_index(port->portno) : port->portno;
+ sparx5_port_dev_index(sparx5, port->portno) : port->portno;
u32 dev = high_spd_dev ?
- sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
+ sparx5_to_high_dev(sparx5, port->portno) : TARGET_DEV2G5;
void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
+ const struct sparx5_ops *ops = sparx5->data->ops;
u32 spd = port->conf.speed;
u32 spd_prm;
int err;
@@ -370,7 +382,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port,
/* 6: Wait while the last frame is exiting the queues */
usleep_range(8 * spd_prm, 10 * spd_prm);
- /* 7: Flush the queues accociated with the port->portno */
+ /* 7: Flush the queues associated with the port->portno */
spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
@@ -427,7 +439,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port,
HSCH_FLUSH_CTRL);
if (high_spd_dev) {
- u32 pcs = sparx5_to_pcs_dev(port->portno);
+ u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
/* 12: Disable 5G/10G/25 BaseR PCS */
@@ -436,7 +448,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port,
pcsinst,
PCS10G_BR_PCS_CFG(0));
- if (sparx5_port_is_25g(port->portno))
+ if (ops->is_port_25g(port->portno))
/* Disable 25G PCS */
spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
DEV25G_PCS25G_CFG_PCS25G_ENA,
@@ -473,6 +485,9 @@ static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
u32 mac_width = 8;
u32 addition = 0;
+ if (!is_sparx5(sparx5))
+ return 0;
+
switch (speed) {
case SPEED_25000:
return 0;
@@ -513,9 +528,8 @@ static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
/* Configure port muxing:
* QSGMII: 4x2G5 devices
*/
-static int sparx5_port_mux_set(struct sparx5 *sparx5,
- struct sparx5_port *port,
- struct sparx5_port_config *conf)
+int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
+ struct sparx5_port_config *conf)
{
u32 portno = port->portno;
u32 inst;
@@ -558,9 +572,10 @@ static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
enum sparx5_vlan_port_type vlan_type = port->vlan_type;
bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
- u32 dev = sparx5_to_high_dev(port->portno);
- u32 tinst = sparx5_port_dev_index(port->portno);
+ u32 dev = sparx5_to_high_dev(sparx5, port->portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, port->portno);
void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
+ const struct sparx5_ops *ops = sparx5->data->ops;
u32 etype;
etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
@@ -575,7 +590,7 @@ static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
sparx5,
DEV2G5_MAC_TAGS_CFG(port->portno));
- if (sparx5_port_is_2g5(port->portno))
+ if (ops->is_port_2g5(port->portno))
return 0;
spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
@@ -789,9 +804,9 @@ static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
struct sparx5_port_config *conf)
{
u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
- u32 pix = sparx5_port_dev_index(port->portno);
- u32 dev = sparx5_to_high_dev(port->portno);
- u32 pcs = sparx5_to_pcs_dev(port->portno);
+ u32 pix = sparx5_port_dev_index(sparx5, port->portno);
+ u32 dev = sparx5_to_high_dev(sparx5, port->portno);
+ u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
void __iomem *devinst;
void __iomem *pcsinst;
int err;
@@ -843,19 +858,22 @@ static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
/* Switch between 1G/2500 and 5G/10G/25G devices */
static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
{
- int bt_indx = BIT(sparx5_port_dev_index(port));
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ int bt_indx;
+
+ bt_indx = BIT(ops->get_port_dev_bit(sparx5, port));
- if (sparx5_port_is_5g(port)) {
+ if (ops->is_port_5g(port)) {
spx5_rmw(hsd ? 0 : bt_indx,
bt_indx,
sparx5,
PORT_CONF_DEV5G_MODES);
- } else if (sparx5_port_is_10g(port)) {
+ } else if (ops->is_port_10g(port)) {
spx5_rmw(hsd ? 0 : bt_indx,
bt_indx,
sparx5,
PORT_CONF_DEV10G_MODES);
- } else if (sparx5_port_is_25g(port)) {
+ } else if (ops->is_port_25g(port)) {
spx5_rmw(hsd ? 0 : bt_indx,
bt_indx,
sparx5,
@@ -915,6 +933,20 @@ static int sparx5_port_config_low_set(struct sparx5 *sparx5,
sparx5,
DEV2G5_DEV_RST_CTRL(port->portno));
+ /* Enable PHAD_CTRL for better timestamping */
+ if (!is_sparx5(sparx5)) {
+ for (int i = 0; i < 2; ++i) {
+ /* Divide the port clock by three for the two
+ * phase detection registers.
+ */
+ spx5_rmw(DEV2G5_PHAD_CTRL_DIV_CFG_SET(3) |
+ DEV2G5_PHAD_CTRL_PHAD_ENA_SET(1),
+ DEV2G5_PHAD_CTRL_DIV_CFG |
+ DEV2G5_PHAD_CTRL_PHAD_ENA,
+ sparx5, DEV2G5_PHAD_CTRL(port->portno, i));
+ }
+ }
+
return 0;
}
@@ -971,15 +1003,23 @@ int sparx5_port_config(struct sparx5 *sparx5,
struct sparx5_port *port,
struct sparx5_port_config *conf)
{
+ bool rgmii = phy_interface_mode_is_rgmii(conf->phy_mode);
bool high_speed_dev = sparx5_is_baser(conf->portmode);
+ const struct sparx5_ops *ops = sparx5->data->ops;
int err, urgency, stop_wm;
err = sparx5_port_verify_speed(sparx5, port, conf);
if (err)
return err;
+ if (rgmii) {
+ err = ops->port_config_rgmii(port, conf);
+ if (err)
+ return err;
+ }
+
/* high speed device is already configured */
- if (!high_speed_dev)
+ if (!rgmii && !high_speed_dev)
sparx5_port_config_low_set(sparx5, port, conf);
/* Configure flow control */
@@ -987,6 +1027,13 @@ int sparx5_port_config(struct sparx5 *sparx5,
if (err)
return err;
+ if (!is_sparx5(sparx5) && ops->is_port_10g(port->portno) &&
+ conf->speed < SPEED_10000)
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
/* Set the DSM stop watermark */
stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
@@ -1016,9 +1063,10 @@ int sparx5_port_init(struct sparx5 *sparx5,
{
u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
- u32 devhigh = sparx5_to_high_dev(port->portno);
- u32 pix = sparx5_port_dev_index(port->portno);
- u32 pcs = sparx5_to_pcs_dev(port->portno);
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ u32 devhigh = sparx5_to_high_dev(sparx5, port->portno);
+ u32 pix = sparx5_port_dev_index(sparx5, port->portno);
+ u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
bool sd_pol = port->signd_active_high;
bool sd_sel = !port->signd_internal;
bool sd_ena = port->signd_enable;
@@ -1031,28 +1079,10 @@ int sparx5_port_init(struct sparx5 *sparx5,
pcsinst = spx5_inst_get(sparx5, pcs, pix);
/* Set the mux port mode */
- err = sparx5_port_mux_set(sparx5, port, conf);
- if (err)
- return err;
-
- /* Configure MAC vlan awareness */
- err = sparx5_port_max_tags_set(sparx5, port);
+ err = ops->set_port_mux(sparx5, port, conf);
if (err)
return err;
- /* Set Max Length */
- spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
- DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
- sparx5,
- DEV2G5_MAC_MAXLEN_CFG(port->portno));
-
- /* 1G/2G5: Signal Detect configuration */
- spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
- DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
- DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
- sparx5,
- DEV2G5_PCS1G_SD_CFG(port->portno));
-
/* Set Pause WM hysteresis */
spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
@@ -1076,13 +1106,34 @@ int sparx5_port_init(struct sparx5 *sparx5,
ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
sparx5, ANA_CL_FILTER_CTRL(port->portno));
+ if (ops->is_port_rgmii(port->portno))
+ return 0; /* RGMII device - nothing more to configure */
+
+ /* Configure MAC vlan awareness */
+ err = sparx5_port_max_tags_set(sparx5, port);
+ if (err)
+ return err;
+
+ /* Set Max Length */
+ spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+ sparx5,
+ DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+ /* 1G/2G5: Signal Detect configuration */
+ spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV2G5_PCS1G_SD_CFG(port->portno));
+
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
conf->portmode == PHY_INTERFACE_MODE_SGMII) {
err = sparx5_serdes_set(sparx5, port, conf);
if (err)
return err;
- if (!sparx5_port_is_2g5(port->portno))
+ if (!ops->is_port_2g5(port->portno))
/* Enable shadow device */
spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
@@ -1105,7 +1156,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
sparx5,
DEV2G5_MAC_IFG_CFG(port->portno));
- if (sparx5_port_is_2g5(port->portno))
+ if (ops->is_port_2g5(port->portno))
return 0; /* Low speed device only - return */
/* Now setup the high speed device */
@@ -1119,7 +1170,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
devinst,
- DEV10G_MAC_ENA_CFG(0));
+ DEV10G_MAC_MAXLEN_CFG(0));
/* Handle Signal Detect in 10G PCS */
spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
@@ -1128,7 +1179,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
pcsinst,
PCS10G_BR_PCS_SD_CFG(0));
- if (sparx5_port_is_25g(port->portno)) {
+ if (ops->is_port_25g(port->portno)) {
/* Handle Signal Detect in 25G PCS */
spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
@@ -1137,6 +1188,27 @@ int sparx5_port_init(struct sparx5 *sparx5,
DEV25G_PCS25G_SD_CFG(pix));
}
+ if (!is_sparx5(sparx5)) {
+ void __iomem *inst;
+ u32 dev, tinst;
+
+ if (ops->is_port_10g(port->portno)) {
+ dev = sparx5_to_high_dev(sparx5, port->portno);
+ tinst = sparx5_port_dev_index(sparx5, port->portno);
+ inst = spx5_inst_get(sparx5, dev, tinst);
+
+ spx5_inst_wr(5, inst,
+ DEV10G_PTP_STAMPER_CFG(port->portno));
+ } else if (ops->is_port_5g(port->portno)) {
+ dev = sparx5_to_high_dev(sparx5, port->portno);
+ tinst = sparx5_port_dev_index(sparx5, port->portno);
+ inst = spx5_inst_get(sparx5, dev, tinst);
+
+ spx5_inst_wr(5, inst,
+ DEV5G_PTP_STAMPER_CFG(port->portno));
+ }
+ }
+
return 0;
}
@@ -1345,3 +1417,8 @@ int sparx5_port_qos_default_set(const struct sparx5_port *port,
return 0;
}
+
+int sparx5_get_internal_port(struct sparx5 *sparx5, int port)
+{
+ return sparx5->data->consts->n_ports + port;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index 607c4ff1df6b..c8a37468a3d1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -40,25 +40,34 @@ static inline bool sparx5_port_is_25g(int portno)
return portno >= 56 && portno <= 63;
}
-static inline u32 sparx5_to_high_dev(int port)
+static inline bool sparx5_port_is_rgmii(int portno)
{
- if (sparx5_port_is_5g(port))
+ return false;
+}
+
+static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port)
+{
+ const struct sparx5_ops *ops = sparx5->data->ops;
+
+ if (ops->is_port_5g(port))
return TARGET_DEV5G;
- if (sparx5_port_is_10g(port))
+ if (ops->is_port_10g(port))
return TARGET_DEV10G;
return TARGET_DEV25G;
}
-static inline u32 sparx5_to_pcs_dev(int port)
+static inline u32 sparx5_to_pcs_dev(struct sparx5 *sparx5, int port)
{
- if (sparx5_port_is_5g(port))
+ const struct sparx5_ops *ops = sparx5->data->ops;
+
+ if (ops->is_port_5g(port))
return TARGET_PCS5G_BR;
- if (sparx5_port_is_10g(port))
+ if (ops->is_port_10g(port))
return TARGET_PCS10G_BR;
return TARGET_PCS25G_BR;
}
-static inline int sparx5_port_dev_index(int port)
+static inline u32 sparx5_port_dev_mapping(struct sparx5 *sparx5, int port)
{
if (sparx5_port_is_2g5(port))
return port;
@@ -70,6 +79,11 @@ static inline int sparx5_port_dev_index(int port)
return (port - 56);
}
+static inline u32 sparx5_port_dev_index(struct sparx5 *sparx5, int port)
+{
+ return sparx5->data->ops->get_port_dev_index(sparx5, port);
+}
+
int sparx5_port_init(struct sparx5 *sparx5,
struct sparx5_port *spx5_port,
struct sparx5_port_config *conf);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
index 8dee1ab1fa75..cd4f42c3f7eb 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
@@ -20,36 +20,40 @@ static struct sparx5_pool_entry sparx5_psfp_sg_pool[SPX5_PSFP_SG_CNT];
/* Pool of available stream filters */
static struct sparx5_pool_entry sparx5_psfp_sf_pool[SPX5_PSFP_SF_CNT];
-static int sparx5_psfp_sf_get(u32 *id)
+static int sparx5_psfp_sf_get(struct sparx5 *sparx5, u32 *id)
{
- return sparx5_pool_get(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+ return sparx5_pool_get(sparx5_psfp_sf_pool,
+ sparx5->data->consts->n_filters, id);
}
-static int sparx5_psfp_sf_put(u32 id)
+static int sparx5_psfp_sf_put(struct sparx5 *sparx5, u32 id)
{
- return sparx5_pool_put(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+ return sparx5_pool_put(sparx5_psfp_sf_pool,
+ sparx5->data->consts->n_filters, id);
}
-static int sparx5_psfp_sg_get(u32 idx, u32 *id)
+static int sparx5_psfp_sg_get(struct sparx5 *sparx5, u32 idx, u32 *id)
{
- return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT,
- idx, id);
+ return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool,
+ sparx5->data->consts->n_gates, idx, id);
}
-static int sparx5_psfp_sg_put(u32 id)
+static int sparx5_psfp_sg_put(struct sparx5 *sparx5, u32 id)
{
- return sparx5_pool_put(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT, id);
+ return sparx5_pool_put(sparx5_psfp_sg_pool,
+ sparx5->data->consts->n_gates, id);
}
-static int sparx5_psfp_fm_get(u32 idx, u32 *id)
+static int sparx5_psfp_fm_get(struct sparx5 *sparx5, u32 idx, u32 *id)
{
- return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, idx,
- id);
+ return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool,
+ sparx5->data->consts->n_sdlbs, idx, id);
}
-static int sparx5_psfp_fm_put(u32 id)
+static int sparx5_psfp_fm_put(struct sparx5 *sparx5, u32 id)
{
- return sparx5_pool_put(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, id);
+ return sparx5_pool_put(sparx5_psfp_fm_pool,
+ sparx5->data->consts->n_sdlbs, id);
}
u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx)
@@ -205,7 +209,7 @@ int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
{
int ret;
- ret = sparx5_psfp_sf_get(id);
+ ret = sparx5_psfp_sf_get(sparx5, id);
if (ret < 0)
return ret;
@@ -220,7 +224,7 @@ int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id)
sparx5_psfp_sf_set(sparx5, id, &sf);
- return sparx5_psfp_sf_put(id);
+ return sparx5_psfp_sf_put(sparx5, id);
}
int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
@@ -229,7 +233,7 @@ int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
ktime_t basetime;
int ret;
- ret = sparx5_psfp_sg_get(uidx, id);
+ ret = sparx5_psfp_sg_get(sparx5, uidx, id);
if (ret < 0)
return ret;
/* Was already in use, no need to reconfigure */
@@ -253,7 +257,7 @@ int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id)
const struct sparx5_psfp_sg sg = { 0 };
int ret;
- ret = sparx5_psfp_sg_put(id);
+ ret = sparx5_psfp_sg_put(sparx5, id);
if (ret < 0)
return ret;
/* Stream gate still in use ? */
@@ -270,7 +274,7 @@ int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
int ret;
/* Get flow meter */
- ret = sparx5_psfp_fm_get(uidx, &fm->pol.idx);
+ ret = sparx5_psfp_fm_get(sparx5, uidx, &fm->pol.idx);
if (ret < 0)
return ret;
/* Was already in use, no need to reconfigure */
@@ -303,7 +307,7 @@ int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id)
if (ret < 0)
return ret;
- ret = sparx5_psfp_fm_put(id);
+ ret = sparx5_psfp_fm_put(sparx5, id);
if (ret < 0)
return ret;
/* Do not reset flow-meter if still in use. */
@@ -315,11 +319,12 @@ int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id)
void sparx5_psfp_init(struct sparx5 *sparx5)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
const struct sparx5_sdlb_group *group;
int i;
- for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
- group = &sdlb_groups[i];
+ for (i = 0; i < sparx5->data->consts->n_lb_groups; i++) {
+ group = ops->get_sdlb_group(i);
sparx5_sdlb_group_init(sparx5, group->max_rate,
group->min_burst, group->frame_size, i);
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 5a932460db58..2f168700f63c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -11,8 +11,6 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
-#define SPARX5_MAX_PTP_ID 512
-
#define TOD_ACC_PIN 0x4
enum {
@@ -38,6 +36,9 @@ static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
case SPX5_CORE_CLOCK_250MHZ:
res = 2301339409586;
break;
+ case SPX5_CORE_CLOCK_328MHZ:
+ res = 1756832768924;
+ break;
case SPX5_CORE_CLOCK_500MHZ:
res = 1150669704793;
break;
@@ -60,6 +61,9 @@ static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
case SPX5_CORE_CLOCK_250MHZ:
res = 0x1FF0000000000000;
break;
+ case SPX5_CORE_CLOCK_328MHZ:
+ res = 0x18604697DD0F9B5B;
+ break;
case SPX5_CORE_CLOCK_500MHZ:
res = 0x0FF8000000000000;
break;
@@ -269,11 +273,12 @@ void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
}
-static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
- struct timespec64 *ts,
- u32 nsec)
+void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
+ struct timespec64 *ts,
+ u32 nsec)
{
/* Read current PTP time to get seconds */
+ const struct sparx5_consts *consts = sparx5->data->consts;
unsigned long flags;
u32 curr_nsec;
@@ -285,10 +290,10 @@ static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
- ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
- curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin));
+ curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin));
ts->tv_nsec = nsec;
@@ -440,8 +445,11 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
+ const struct sparx5_consts *consts;
unsigned long flags;
+ consts = sparx5->data->consts;
+
spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
/* Must be in IDLE mode before the time can be loaded */
@@ -451,14 +459,14 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
/* Set new value */
spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
- sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ sparx5, PTP_PTP_TOD_SEC_MSB(consts->tod_pin));
spx5_wr(lower_32_bits(ts->tv_sec),
- sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
- spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin));
+ spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin));
/* Apply new values */
spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
@@ -467,7 +475,7 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
@@ -478,10 +486,13 @@ int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
+ const struct sparx5_consts *consts;
unsigned long flags;
time64_t s;
s64 ns;
+ consts = sparx5->data->consts;
+
spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
@@ -490,12 +501,12 @@ int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
- s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(consts->tod_pin));
s <<= 32;
- s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
- ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin));
+ ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin));
ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
@@ -515,6 +526,9 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
+ const struct sparx5_consts *consts;
+
+ consts = sparx5->data->consts;
if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
unsigned long flags;
@@ -528,10 +542,10 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
- sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin));
/* Adjust time with the value of PTP_TOD_NSEC */
spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
@@ -540,7 +554,7 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
} else {
@@ -630,7 +644,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
/* Enable master counters */
spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
- for (i = 0; i < SPX5_PORTS; i++) {
+ for (i = 0; i < sparx5->data->consts->n_ports; i++) {
port = sparx5->ports[i];
if (!port)
continue;
@@ -646,7 +660,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
- for (i = 0; i < SPX5_PORTS; i++) {
+ for (i = 0; i < sparx5->data->consts->n_ports; i++) {
port = sparx5->ports[i];
if (!port)
continue;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
index 5f34febaee6b..e580670f3992 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -74,6 +74,11 @@ static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
26214200 /* 26.214 Gbps */
};
+u32 sparx5_get_hsch_max_group_rate(int grp)
+{
+ return spx5_hsch_max_group_rate[grp];
+}
+
static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
@@ -362,9 +367,10 @@ static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
static int sparx5_dwrr_conf_set(struct sparx5_port *port,
struct sparx5_dwrr *dwrr)
{
+ u32 layer = is_sparx5(port->sparx5) ? 2 : 1;
int i;
- spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer) |
HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
port->sparx5, HSCH_HSCH_CFG_CFG);
@@ -385,6 +391,7 @@ static int sparx5_dwrr_conf_set(struct sparx5_port *port,
static int sparx5_leak_groups_init(struct sparx5 *sparx5)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
struct sparx5_layer *layer;
u32 sys_clk_per_100ps;
struct sparx5_lg *lg;
@@ -397,7 +404,7 @@ static int sparx5_leak_groups_init(struct sparx5 *sparx5)
layer = &layers[i];
for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
lg = &layer->leak_groups[ii];
- lg->max_rate = spx5_hsch_max_group_rate[ii];
+ lg->max_rate = ops->get_hsch_max_group_rate(i);
/* Calculate the leak time in us, to serve a maximum
* rate of 'max_rate' for this group
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
index ced35033a6c5..1231a80335d7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
@@ -79,4 +79,6 @@ int sparx5_tc_ets_add(struct sparx5_port *port,
int sparx5_tc_ets_del(struct sparx5_port *port);
+u32 sparx5_get_hsch_max_group_rate(int grp);
+
#endif /* __SPARX5_QOS_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c
new file mode 100644
index 000000000000..220e81b714d4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2024-09-30 11:48:29 +0200.
+ * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b
+ */
+
+#include "sparx5_regs.h"
+
+const unsigned int sparx5_tsize[TSIZE_LAST] = {
+ [TC_DEV10G] = 12,
+ [TC_DEV2G5] = 65,
+ [TC_DEV5G] = 13,
+ [TC_PCS10G_BR] = 12,
+ [TC_PCS5G_BR] = 13,
+};
+
+const unsigned int sparx5_raddr[RADDR_LAST] = {
+ [RA_CPU_PROC_CTRL] = 176,
+ [RA_GCB_SOFT_RST] = 8,
+ [RA_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 24,
+};
+
+const unsigned int sparx5_rcnt[RCNT_LAST] = {
+ [RC_ANA_AC_OWN_UPSID] = 3,
+ [RC_ANA_ACL_VCAP_S2_CFG] = 70,
+ [RC_ANA_ACL_OWN_UPSID] = 3,
+ [RC_ANA_CL_OWN_UPSID] = 3,
+ [RC_ANA_L2_OWN_UPSID] = 3,
+ [RC_ASM_PORT_CFG] = 67,
+ [RC_DSM_BUF_CFG] = 67,
+ [RC_DSM_DEV_TX_STOP_WM_CFG] = 67,
+ [RC_DSM_RX_PAUSE_CFG] = 67,
+ [RC_DSM_MAC_CFG] = 67,
+ [RC_DSM_MAC_ADDR_BASE_HIGH_CFG] = 65,
+ [RC_DSM_MAC_ADDR_BASE_LOW_CFG] = 65,
+ [RC_DSM_TAXI_CAL_CFG] = 9,
+ [RC_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 65,
+ [RC_HSCH_PORT_MODE] = 70,
+ [RC_QFWD_SWITCH_PORT_MODE] = 70,
+ [RC_QSYS_PAUSE_CFG] = 70,
+ [RC_QSYS_ATOP] = 70,
+ [RC_QSYS_FWD_PRESSURE] = 70,
+ [RC_QSYS_CAL_AUTO] = 7,
+ [RC_REW_OWN_UPSID] = 3,
+ [RC_REW_RTAG_ETAG_CTRL] = 70,
+};
+
+const unsigned int sparx5_gaddr[GADDR_LAST] = {
+ [GA_ANA_AC_RAM_CTRL] = 839108,
+ [GA_ANA_AC_PS_COMMON] = 894472,
+ [GA_ANA_AC_MIRROR_PROBE] = 893696,
+ [GA_ANA_AC_SRC] = 849920,
+ [GA_ANA_AC_PGID] = 786432,
+ [GA_ANA_AC_TSN_SF] = 839136,
+ [GA_ANA_AC_TSN_SF_CFG] = 839680,
+ [GA_ANA_AC_TSN_SF_STATUS] = 839072,
+ [GA_ANA_AC_SG_ACCESS] = 839140,
+ [GA_ANA_AC_SG_CONFIG] = 851584,
+ [GA_ANA_AC_SG_STATUS] = 839088,
+ [GA_ANA_AC_SG_STATUS_STICKY] = 839152,
+ [GA_ANA_AC_STAT_GLOBAL_CFG_PORT] = 851552,
+ [GA_ANA_AC_STAT_CNT_CFG_PORT] = 843776,
+ [GA_ANA_AC_STAT_GLOBAL_CFG_ACL] = 893792,
+ [GA_ANA_ACL_COMMON] = 32768,
+ [GA_ANA_ACL_KEY_SEL] = 34200,
+ [GA_ANA_ACL_CNT_B] = 16384,
+ [GA_ANA_ACL_STICKY] = 36408,
+ [GA_ANA_AC_POL_POL_ALL_CFG] = 75968,
+ [GA_ANA_AC_POL_COMMON_BDLB] = 79048,
+ [GA_ANA_AC_POL_COMMON_BUM_SLB] = 79056,
+ [GA_ANA_AC_SDLB_LBGRP_TBL] = 295468,
+ [GA_ANA_CL_PORT] = 131072,
+ [GA_ANA_CL_COMMON] = 166912,
+ [GA_ANA_L2_COMMON] = 566024,
+ [GA_ANA_L3_COMMON] = 493632,
+ [GA_ANA_L3_VLAN_ARP_L3MC_STICKY] = 491460,
+ [GA_ASM_CFG] = 33280,
+ [GA_ASM_PFC_TIMER_CFG] = 34716,
+ [GA_ASM_LBK_WM_CFG] = 34744,
+ [GA_ASM_LBK_MISC_CFG] = 34756,
+ [GA_ASM_RAM_CTRL] = 34832,
+ [GA_EACL_ES2_KEY_SELECT_PROFILE] = 149504,
+ [GA_EACL_CNT_TBL] = 122880,
+ [GA_EACL_POL_CFG] = 150608,
+ [GA_EACL_ES2_STICKY] = 118696,
+ [GA_EACL_RAM_CTRL] = 118736,
+ [GA_GCB_SIO_CTRL] = 876,
+ [GA_HSCH_HSCH_DWRR] = 162816,
+ [GA_HSCH_HSCH_MISC] = 163104,
+ [GA_HSCH_HSCH_LEAK_LISTS] = 161664,
+ [GA_HSCH_SYSTEM] = 184000,
+ [GA_HSCH_MMGT] = 162368,
+ [GA_HSCH_TAS_CONFIG] = 162384,
+ [GA_PTP_PTP_CFG] = 320,
+ [GA_PTP_PTP_TOD_DOMAINS] = 336,
+ [GA_PTP_PHASE_DETECTOR_CTRL] = 420,
+ [GA_QSYS_CALCFG] = 2304,
+ [GA_QSYS_RAM_CTRL] = 2344,
+ [GA_REW_COMMON] = 387264,
+ [GA_REW_PORT] = 360448,
+ [GA_REW_VOE_PORT_LM_CNT] = 393216,
+ [GA_REW_RAM_CTRL] = 378696,
+ [GA_VOP_RAM_CTRL] = 279176,
+ [GA_XQS_SYSTEM] = 6768,
+ [GA_XQS_QLIMIT_SHR] = 7936,
+};
+
+const unsigned int sparx5_gcnt[GCNT_LAST] = {
+ [GC_ANA_AC_SRC] = 102,
+ [GC_ANA_AC_PGID] = 3290,
+ [GC_ANA_AC_TSN_SF_CFG] = 1024,
+ [GC_ANA_AC_STAT_CNT_CFG_PORT] = 70,
+ [GC_ANA_ACL_KEY_SEL] = 134,
+ [GC_ANA_ACL_CNT_A] = 4096,
+ [GC_ANA_ACL_CNT_B] = 4096,
+ [GC_ANA_AC_SDLB_LBGRP_TBL] = 10,
+ [GC_ANA_AC_SDLB_LBSET_TBL] = 4616,
+ [GC_ANA_CL_PORT] = 70,
+ [GC_ANA_L2_ISDX_LIMIT] = 1536,
+ [GC_ANA_L2_ISDX] = 4096,
+ [GC_ANA_L3_VLAN] = 5120,
+ [GC_ASM_DEV_STATISTICS] = 65,
+ [GC_EACL_ES2_KEY_SELECT_PROFILE] = 138,
+ [GC_EACL_CNT_TBL] = 2048,
+ [GC_GCB_SIO_CTRL] = 3,
+ [GC_HSCH_HSCH_CFG] = 5040,
+ [GC_HSCH_HSCH_DWRR] = 72,
+ [GC_PTP_PTP_PINS] = 5,
+ [GC_PTP_PHASE_DETECTOR_CTRL] = 5,
+ [GC_REW_PORT] = 70,
+ [GC_REW_VOE_PORT_LM_CNT] = 520,
+};
+
+const unsigned int sparx5_gsize[GSIZE_LAST] = {
+ [GW_ANA_AC_SRC] = 16,
+ [GW_ANA_L2_COMMON] = 700,
+ [GW_ASM_CFG] = 1088,
+ [GW_CPU_CPU_REGS] = 204,
+ [GW_DEV2G5_PHASE_DETECTOR_CTRL] = 8,
+ [GW_FDMA_FDMA] = 428,
+ [GW_GCB_CHIP_REGS] = 424,
+ [GW_HSCH_TAS_CONFIG] = 12,
+ [GW_PTP_PHASE_DETECTOR_CTRL] = 8,
+ [GW_QSYS_PAUSE_CFG] = 1128,
+};
+
+const unsigned int sparx5_fpos[FPOS_LAST] = {
+ [FP_CPU_PROC_CTRL_AARCH64_MODE_ENA] = 12,
+ [FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS] = 11,
+ [FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS] = 10,
+ [FP_CPU_PROC_CTRL_BE_EXCEP_MODE] = 9,
+ [FP_CPU_PROC_CTRL_VINITHI] = 8,
+ [FP_CPU_PROC_CTRL_CFGTE] = 7,
+ [FP_CPU_PROC_CTRL_CP15S_DISABLE] = 6,
+ [FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE] = 5,
+ [FP_CPU_PROC_CTRL_L2_FLUSH_REQ] = 1,
+ [FP_DEV2G5_PHAD_CTRL_PHAD_ENA] = 7,
+ [FP_DEV2G5_PHAD_CTRL_PHAD_FAILED] = 6,
+ [FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE] = 7,
+ [FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY] = 6,
+ [FP_FDMA_CH_CFG_CH_INJ_PORT] = 5,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION] = 26,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC] = 24,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL] = 23,
+ [FP_PTP_PHAD_CTRL_PHAD_ENA] = 7,
+ [FP_PTP_PHAD_CTRL_PHAD_FAILED] = 6,
+};
+
+const unsigned int sparx5_fsize[FSIZE_LAST] = {
+ [FW_ANA_AC_PROBE_PORT_CFG_PROBE_PORT_MASK] = 32,
+ [FW_ANA_AC_SRC_CFG_PORT_MASK] = 32,
+ [FW_ANA_AC_PGID_CFG_PORT_MASK] = 32,
+ [FW_ANA_AC_TSN_SF_PORT_NUM] = 9,
+ [FW_ANA_AC_TSN_SF_CFG_TSN_SGID] = 10,
+ [FW_ANA_AC_TSN_SF_STATUS_TSN_SFID] = 10,
+ [FW_ANA_AC_SG_ACCESS_CTRL_SGID] = 10,
+ [FW_ANA_AC_PORT_SGE_CFG_MASK] = 16,
+ [FW_ANA_AC_SDLB_XLB_START_LBSET_START] = 13,
+ [FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT] = 5,
+ [FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT] = 13,
+ [FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT] = 13,
+ [FW_ANA_AC_SDLB_XLB_NEXT_LBGRP] = 4,
+ [FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR] = 13,
+ [FW_ANA_L2_AUTO_LRN_CFG_AUTO_LRN_ENA] = 32,
+ [FW_ANA_L2_DLB_CFG_DLB_IDX] = 13,
+ [FW_ANA_L2_TSN_CFG_TSN_SFID] = 10,
+ [FW_ANA_L3_VLAN_MASK_CFG_VLAN_PORT_MASK] = 32,
+ [FW_FDMA_CH_CFG_CH_DCB_DB_CNT] = 4,
+ [FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL] = 9,
+ [FW_HSCH_SE_CFG_SE_DWRR_CNT] = 7,
+ [FW_HSCH_SE_CONNECT_SE_LEAK_LINK] = 16,
+ [FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT] = 7,
+ [FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX] = 13,
+ [FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST] = 16,
+ [FW_HSCH_FLUSH_CTRL_FLUSH_PORT] = 7,
+ [FW_HSCH_FLUSH_CTRL_FLUSH_HIER] = 16,
+ [FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW] = 14,
+ [FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX] = 11,
+ [FW_LRN_AUTOAGE_CFG_2_NEXT_ROW] = 14,
+ [FW_PTP_PTP_PIN_INTR_INTR_PTP] = 5,
+ [FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA] = 5,
+ [FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT] = 5,
+ [FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT] = 2,
+ [FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL] = 7,
+ [FW_QRES_RES_CFG_WM_HIGH] = 12,
+ [FW_QRES_RES_STAT_MAXUSE] = 21,
+ [FW_QRES_RES_STAT_CUR_INUSE] = 21,
+ [FW_QSYS_PAUSE_CFG_PAUSE_START] = 12,
+ [FW_QSYS_PAUSE_CFG_PAUSE_STOP] = 12,
+ [FW_QSYS_ATOP_ATOP] = 12,
+ [FW_QSYS_ATOP_TOT_CFG_ATOP_TOT] = 12,
+ [FW_REW_RTAG_ETAG_CTRL_IPE_TBL] = 7,
+ [FW_XQS_STAT_CFG_STAT_VIEW] = 13,
+ [FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP] = 15,
+ [FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP] = 15,
+ [FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP] = 15,
+ [FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM] = 15,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h
new file mode 100644
index 000000000000..ea28130c2341
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2024-09-30 11:48:29 +0200.
+ * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b
+ */
+
+#ifndef _SPARX5_REGS_H_
+#define _SPARX5_REGS_H_
+
+/* These enumerated values are used to index the platform specific structs
+ * containing the addresses, counts, size and positions, of register groups,
+ * registers and fields.
+ */
+
+enum sparx5_tsize_enum {
+ TC_DEV10G,
+ TC_DEV2G5,
+ TC_DEV5G,
+ TC_PCS10G_BR,
+ TC_PCS5G_BR,
+ TSIZE_LAST,
+};
+
+enum sparx5_raddr_enum {
+ RA_CPU_PROC_CTRL,
+ RA_GCB_SOFT_RST,
+ RA_GCB_HW_SGPIO_TO_SD_MAP_CFG,
+ RADDR_LAST,
+};
+
+enum sparx5_rcnt_enum {
+ RC_ANA_AC_OWN_UPSID,
+ RC_ANA_ACL_VCAP_S2_CFG,
+ RC_ANA_ACL_OWN_UPSID,
+ RC_ANA_CL_OWN_UPSID,
+ RC_ANA_L2_OWN_UPSID,
+ RC_ASM_PORT_CFG,
+ RC_DSM_BUF_CFG,
+ RC_DSM_DEV_TX_STOP_WM_CFG,
+ RC_DSM_RX_PAUSE_CFG,
+ RC_DSM_MAC_CFG,
+ RC_DSM_MAC_ADDR_BASE_HIGH_CFG,
+ RC_DSM_MAC_ADDR_BASE_LOW_CFG,
+ RC_DSM_TAXI_CAL_CFG,
+ RC_GCB_HW_SGPIO_TO_SD_MAP_CFG,
+ RC_HSCH_PORT_MODE,
+ RC_QFWD_SWITCH_PORT_MODE,
+ RC_QSYS_PAUSE_CFG,
+ RC_QSYS_ATOP,
+ RC_QSYS_FWD_PRESSURE,
+ RC_QSYS_CAL_AUTO,
+ RC_REW_OWN_UPSID,
+ RC_REW_RTAG_ETAG_CTRL,
+ RCNT_LAST,
+};
+
+enum sparx5_gaddr_enum {
+ GA_ANA_AC_RAM_CTRL,
+ GA_ANA_AC_PS_COMMON,
+ GA_ANA_AC_MIRROR_PROBE,
+ GA_ANA_AC_SRC,
+ GA_ANA_AC_PGID,
+ GA_ANA_AC_TSN_SF,
+ GA_ANA_AC_TSN_SF_CFG,
+ GA_ANA_AC_TSN_SF_STATUS,
+ GA_ANA_AC_SG_ACCESS,
+ GA_ANA_AC_SG_CONFIG,
+ GA_ANA_AC_SG_STATUS,
+ GA_ANA_AC_SG_STATUS_STICKY,
+ GA_ANA_AC_STAT_GLOBAL_CFG_PORT,
+ GA_ANA_AC_STAT_CNT_CFG_PORT,
+ GA_ANA_AC_STAT_GLOBAL_CFG_ACL,
+ GA_ANA_ACL_COMMON,
+ GA_ANA_ACL_KEY_SEL,
+ GA_ANA_ACL_CNT_B,
+ GA_ANA_ACL_STICKY,
+ GA_ANA_AC_POL_POL_ALL_CFG,
+ GA_ANA_AC_POL_COMMON_BDLB,
+ GA_ANA_AC_POL_COMMON_BUM_SLB,
+ GA_ANA_AC_SDLB_LBGRP_TBL,
+ GA_ANA_CL_PORT,
+ GA_ANA_CL_COMMON,
+ GA_ANA_L2_COMMON,
+ GA_ANA_L3_COMMON,
+ GA_ANA_L3_VLAN_ARP_L3MC_STICKY,
+ GA_ASM_CFG,
+ GA_ASM_PFC_TIMER_CFG,
+ GA_ASM_LBK_WM_CFG,
+ GA_ASM_LBK_MISC_CFG,
+ GA_ASM_RAM_CTRL,
+ GA_EACL_ES2_KEY_SELECT_PROFILE,
+ GA_EACL_CNT_TBL,
+ GA_EACL_POL_CFG,
+ GA_EACL_ES2_STICKY,
+ GA_EACL_RAM_CTRL,
+ GA_GCB_SIO_CTRL,
+ GA_HSCH_HSCH_DWRR,
+ GA_HSCH_HSCH_MISC,
+ GA_HSCH_HSCH_LEAK_LISTS,
+ GA_HSCH_SYSTEM,
+ GA_HSCH_MMGT,
+ GA_HSCH_TAS_CONFIG,
+ GA_PTP_PTP_CFG,
+ GA_PTP_PTP_TOD_DOMAINS,
+ GA_PTP_PHASE_DETECTOR_CTRL,
+ GA_QSYS_CALCFG,
+ GA_QSYS_RAM_CTRL,
+ GA_REW_COMMON,
+ GA_REW_PORT,
+ GA_REW_VOE_PORT_LM_CNT,
+ GA_REW_RAM_CTRL,
+ GA_VOP_RAM_CTRL,
+ GA_XQS_SYSTEM,
+ GA_XQS_QLIMIT_SHR,
+ GADDR_LAST,
+};
+
+enum sparx5_gcnt_enum {
+ GC_ANA_AC_SRC,
+ GC_ANA_AC_PGID,
+ GC_ANA_AC_TSN_SF_CFG,
+ GC_ANA_AC_STAT_CNT_CFG_PORT,
+ GC_ANA_ACL_KEY_SEL,
+ GC_ANA_ACL_CNT_A,
+ GC_ANA_ACL_CNT_B,
+ GC_ANA_AC_SDLB_LBGRP_TBL,
+ GC_ANA_AC_SDLB_LBSET_TBL,
+ GC_ANA_CL_PORT,
+ GC_ANA_L2_ISDX_LIMIT,
+ GC_ANA_L2_ISDX,
+ GC_ANA_L3_VLAN,
+ GC_ASM_DEV_STATISTICS,
+ GC_EACL_ES2_KEY_SELECT_PROFILE,
+ GC_EACL_CNT_TBL,
+ GC_GCB_SIO_CTRL,
+ GC_HSCH_HSCH_CFG,
+ GC_HSCH_HSCH_DWRR,
+ GC_PTP_PTP_PINS,
+ GC_PTP_PHASE_DETECTOR_CTRL,
+ GC_REW_PORT,
+ GC_REW_VOE_PORT_LM_CNT,
+ GCNT_LAST,
+};
+
+enum sparx5_gsize_enum {
+ GW_ANA_AC_SRC,
+ GW_ANA_L2_COMMON,
+ GW_ASM_CFG,
+ GW_CPU_CPU_REGS,
+ GW_DEV2G5_PHASE_DETECTOR_CTRL,
+ GW_FDMA_FDMA,
+ GW_GCB_CHIP_REGS,
+ GW_HSCH_TAS_CONFIG,
+ GW_PTP_PHASE_DETECTOR_CTRL,
+ GW_QSYS_PAUSE_CFG,
+ GSIZE_LAST,
+};
+
+enum sparx5_fpos_enum {
+ FP_CPU_PROC_CTRL_AARCH64_MODE_ENA,
+ FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS,
+ FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS,
+ FP_CPU_PROC_CTRL_BE_EXCEP_MODE,
+ FP_CPU_PROC_CTRL_VINITHI,
+ FP_CPU_PROC_CTRL_CFGTE,
+ FP_CPU_PROC_CTRL_CP15S_DISABLE,
+ FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE,
+ FP_CPU_PROC_CTRL_L2_FLUSH_REQ,
+ FP_DEV2G5_PHAD_CTRL_PHAD_ENA,
+ FP_DEV2G5_PHAD_CTRL_PHAD_FAILED,
+ FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE,
+ FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY,
+ FP_FDMA_CH_CFG_CH_INJ_PORT,
+ FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION,
+ FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL,
+ FP_PTP_PHAD_CTRL_PHAD_ENA,
+ FP_PTP_PHAD_CTRL_PHAD_FAILED,
+ FPOS_LAST,
+};
+
+enum sparx5_fsize_enum {
+ FW_ANA_AC_PROBE_PORT_CFG_PROBE_PORT_MASK,
+ FW_ANA_AC_SRC_CFG_PORT_MASK,
+ FW_ANA_AC_PGID_CFG_PORT_MASK,
+ FW_ANA_AC_TSN_SF_PORT_NUM,
+ FW_ANA_AC_TSN_SF_CFG_TSN_SGID,
+ FW_ANA_AC_TSN_SF_STATUS_TSN_SFID,
+ FW_ANA_AC_SG_ACCESS_CTRL_SGID,
+ FW_ANA_AC_PORT_SGE_CFG_MASK,
+ FW_ANA_AC_SDLB_XLB_START_LBSET_START,
+ FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT,
+ FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT,
+ FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT,
+ FW_ANA_AC_SDLB_XLB_NEXT_LBGRP,
+ FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR,
+ FW_ANA_L2_AUTO_LRN_CFG_AUTO_LRN_ENA,
+ FW_ANA_L2_DLB_CFG_DLB_IDX,
+ FW_ANA_L2_TSN_CFG_TSN_SFID,
+ FW_ANA_L3_VLAN_MASK_CFG_VLAN_PORT_MASK,
+ FW_FDMA_CH_CFG_CH_DCB_DB_CNT,
+ FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL,
+ FW_HSCH_SE_CFG_SE_DWRR_CNT,
+ FW_HSCH_SE_CONNECT_SE_LEAK_LINK,
+ FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT,
+ FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
+ FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST,
+ FW_HSCH_FLUSH_CTRL_FLUSH_PORT,
+ FW_HSCH_FLUSH_CTRL_FLUSH_HIER,
+ FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW,
+ FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX,
+ FW_LRN_AUTOAGE_CFG_2_NEXT_ROW,
+ FW_PTP_PTP_PIN_INTR_INTR_PTP,
+ FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA,
+ FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT,
+ FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT,
+ FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL,
+ FW_QRES_RES_CFG_WM_HIGH,
+ FW_QRES_RES_STAT_MAXUSE,
+ FW_QRES_RES_STAT_CUR_INUSE,
+ FW_QSYS_PAUSE_CFG_PAUSE_START,
+ FW_QSYS_PAUSE_CFG_PAUSE_STOP,
+ FW_QSYS_ATOP_ATOP,
+ FW_QSYS_ATOP_TOT_CFG_ATOP_TOT,
+ FW_REW_RTAG_ETAG_CTRL_IPE_TBL,
+ FW_XQS_STAT_CFG_STAT_VIEW,
+ FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP,
+ FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP,
+ FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP,
+ FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM,
+ FSIZE_LAST,
+};
+
+extern const unsigned int sparx5_tsize[TSIZE_LAST];
+extern const unsigned int sparx5_raddr[RADDR_LAST];
+extern const unsigned int sparx5_rcnt[RCNT_LAST];
+extern const unsigned int sparx5_gaddr[GADDR_LAST];
+extern const unsigned int sparx5_gcnt[GCNT_LAST];
+extern const unsigned int sparx5_gsize[GSIZE_LAST];
+extern const unsigned int sparx5_fpos[FPOS_LAST];
+extern const unsigned int sparx5_fsize[FSIZE_LAST];
+
+#endif /* _SPARX5_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
index f5267218caeb..98a3f44c569c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
@@ -20,17 +20,18 @@ struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT] = {
{ 5000000ULL, 8192 / 8, 64 } /* 5 M */
};
-int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5)
+struct sparx5_sdlb_group *sparx5_get_sdlb_group(int idx)
+{
+ return &sdlb_groups[idx];
+}
+
+u64 sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5)
{
- u32 clk_per_100ps;
u64 clk_hz;
- clk_per_100ps = HSCH_SYS_CLK_PER_100PS_GET(spx5_rd(sparx5,
- HSCH_SYS_CLK_PER));
- if (!clk_per_100ps)
- clk_per_100ps = SPX5_CLK_PER_100PS_DEFAULT;
+ clk_hz = (10 * 1000 * 1000) /
+ (sparx5_clk_period(sparx5->coreclock) / 100);
- clk_hz = (10 * 1000 * 1000) / clk_per_100ps;
return clk_hz *= 1000;
}
@@ -178,14 +179,15 @@ static int sparx5_sdlb_group_get_count(struct sparx5 *sparx5, u32 group)
int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
const struct sparx5_sdlb_group *group;
u64 rate_bps;
int i, count;
rate_bps = rate * 1000;
- for (i = SPX5_SDLB_GROUP_CNT - 1; i >= 0; i--) {
- group = &sdlb_groups[i];
+ for (i = sparx5->data->consts->n_lb_groups - 1; i >= 0; i--) {
+ group = ops->get_sdlb_group(i);
count = sparx5_sdlb_group_get_count(sparx5, i);
@@ -208,7 +210,7 @@ int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group)
u32 itr, next;
int i;
- for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ for (i = 0; i < sparx5->data->consts->n_lb_groups; i++) {
if (sparx5_sdlb_group_is_empty(sparx5, i))
continue;
@@ -303,11 +305,12 @@ int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx)
void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
u32 frame_size, u32 idx)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
u32 thres_shift, mask = 0x01, power = 0;
struct sparx5_sdlb_group *group;
u64 max_token;
- group = &sdlb_groups[idx];
+ group = ops->get_sdlb_group(idx);
/* Number of positions to right-shift LB's threshold value. */
while ((min_burst & mask) == 0) {
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 4af85d108a06..0a71abbd3da5 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -32,24 +32,34 @@ static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
{
bool should_flood = flood_flag || port->is_mrouter;
+ struct sparx5 *sparx5 = port->sparx5;
int pgid;
- for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
+ for (pgid = sparx5_get_pgid(sparx5, PGID_IPV4_MC_DATA);
+ pgid <= sparx5_get_pgid(sparx5, PGID_IPV6_MC_CTRL); pgid++)
sparx5_pgid_update_mask(port, pgid, should_flood);
}
static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
struct switchdev_brport_flags flags)
{
+ struct sparx5 *sparx5 = port->sparx5;
+
if (flags.mask & BR_MCAST_FLOOD) {
- sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
+ sparx5_pgid_update_mask(port,
+ sparx5_get_pgid(sparx5, PGID_MC_FLOOD),
+ !!(flags.val & BR_MCAST_FLOOD));
sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
}
if (flags.mask & BR_FLOOD)
- sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
+ sparx5_pgid_update_mask(port,
+ sparx5_get_pgid(sparx5, PGID_UC_FLOOD),
+ !!(flags.val & BR_FLOOD));
if (flags.mask & BR_BCAST_FLOOD)
- sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
+ sparx5_pgid_update_mask(port,
+ sparx5_get_pgid(sparx5, PGID_BCAST),
+ !!(flags.val & BR_BCAST_FLOOD));
}
static void sparx5_attr_stp_state_set(struct sparx5_port *port,
@@ -166,6 +176,7 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
struct net_device *bridge,
struct netlink_ext_ack *extack)
{
+ struct switchdev_brport_flags flags = {0};
struct sparx5 *sparx5 = port->sparx5;
struct net_device *ndev = port->ndev;
int err;
@@ -190,11 +201,16 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
/* Remove standalone port entry */
sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
- /* Port enters in bridge mode therefor don't need to copy to CPU
+ /* Port enters in bridge mode therefore don't need to copy to CPU
* frames for multicast in case the bridge is not requesting them
*/
__dev_mc_unsync(ndev, sparx5_mc_unsync);
+ /* Enable uc/mc/bc flooding */
+ flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ sparx5_port_attr_bridge_flags(port, flags);
+
return 0;
err_switchdev_offload:
@@ -205,6 +221,7 @@ err_switchdev_offload:
static void sparx5_port_bridge_leave(struct sparx5_port *port,
struct net_device *bridge)
{
+ struct switchdev_brport_flags flags = {0};
struct sparx5 *sparx5 = port->sparx5;
switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
@@ -219,10 +236,16 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
port->vid = NULL_VID;
/* Forward frames to CPU */
- sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
+ port->ndev->dev_addr, 0);
/* Port enters in host more therefore restore mc list */
__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
+
+ /* Disable uc/mc/bc flooding */
+ flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = 0;
+ sparx5_port_attr_bridge_flags(port, flags);
}
static int sparx5_port_changeupper(struct net_device *dev,
@@ -254,7 +277,8 @@ static int sparx5_port_add_addr(struct net_device *dev, bool up)
u16 vid = port->pvid;
if (up)
- sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
+ port->ndev->dev_addr, vid);
else
sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
@@ -330,7 +354,8 @@ static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (host_addr)
- sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
+ sparx5_add_mact_entry(sparx5, dev,
+ sparx5_get_pgid(sparx5, PGID_CPU),
fdb_info->addr, vid);
else
sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
@@ -418,8 +443,8 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev,
switchdev_blocking_nb);
/* Flood broadcast to CPU */
- sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
- v->vid);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_BCAST),
+ dev->broadcast, v->vid);
return 0;
}
@@ -547,7 +572,7 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
/* Add any mrouter ports to the new entry */
if (is_new && ether_addr_is_ip_mcast(v->addr))
- for (i = 0; i < SPX5_PORTS; i++)
+ for (i = 0; i < spx5->data->consts->n_ports; i++)
if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
sparx5_pgid_update_mask(spx5->ports[i],
entry->pgid_idx,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
index e80f3166db7d..28b2514c8330 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -60,8 +60,8 @@ static int sparx5_tc_setup_block(struct net_device *ndev,
cb, ndev, ndev, false);
}
-static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
- u32 *idx)
+static void sparx5_tc_get_layer_and_idx(struct sparx5 *sparx5, u32 parent,
+ u32 portno, u32 *layer, u32 *idx)
{
if (parent == TC_H_ROOT) {
*layer = 2;
@@ -90,8 +90,8 @@ static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
struct sparx5_port *port = netdev_priv(ndev);
u32 layer, se_idx;
- sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
- &se_idx);
+ sparx5_tc_get_layer_and_idx(port->sparx5, qopt->parent, port->portno,
+ &layer, &se_idx);
switch (qopt->command) {
case TC_TBF_REPLACE:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 55f255a3c9db..4dc1ebd5d510 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -159,13 +159,14 @@ out:
static int
sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
+ struct netlink_ext_ack *extack = st->fco->common.extack;
struct flow_match_control mt;
u32 value, mask;
int err = 0;
flow_rule_match_control(st->frule, &mt);
- if (mt.mask->flags) {
+ if (mt.mask->flags & (FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
@@ -178,7 +179,7 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
if (vdt == FRAG_INVAL) {
- NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ NL_SET_ERR_MSG_MOD(extack,
"Match on invalid fragment flag combination");
return -EINVAL;
}
@@ -190,16 +191,19 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);
- if (err)
- goto out;
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "ip_frag parse error");
+ return err;
+ }
}
- st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ mt.mask->flags, extack))
+ return -EOPNOTSUPP;
- return err;
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
@@ -781,7 +785,9 @@ static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
* allocate a stream gate that is always open.
*/
if (sg_idx < 0) {
- sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
+ /* Always-open stream gate is always the last */
+ sg_idx = sparx5_pool_idx_to_id(sparx5->data->consts->n_gates -
+ 1);
sg->ipv = 0; /* Disabled */
sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
sg->num_entries = 1;
@@ -1023,6 +1029,64 @@ static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
return err;
}
+static void sparx5_tc_flower_set_port_mask(struct vcap_u72_action *ports,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ int byidx = port->portno / BITS_PER_BYTE;
+ int biidx = port->portno % BITS_PER_BYTE;
+
+ ports->value[byidx] |= BIT(biidx);
+}
+
+static int sparx5_tc_action_mirred(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act)
+{
+ struct vcap_u72_action ports = {0};
+ int err;
+
+ if (admin->vtype != VCAP_TYPE_IS0 && admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Mirror action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ SPX5_PMM_OR_DSTMASK);
+ if (err)
+ return err;
+
+ sparx5_tc_flower_set_port_mask(&ports, act->dev);
+
+ return vcap_rule_add_action_u72(vrule, VCAP_AF_PORT_MASK, &ports);
+}
+
+static int sparx5_tc_action_redirect(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act)
+{
+ struct vcap_u72_action ports = {0};
+ int err;
+
+ if (admin->vtype != VCAP_TYPE_IS0 && admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Redirect action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ SPX5_PMM_REPLACE_ALL);
+ if (err)
+ return err;
+
+ sparx5_tc_flower_set_port_mask(&ports, act->dev);
+
+ return vcap_rule_add_action_u72(vrule, VCAP_AF_PORT_MASK, &ports);
+}
+
/* Remove rule keys that may prevent templates from matching a keyset */
static void sparx5_tc_flower_simplify_rule(struct vcap_admin *admin,
struct vcap_rule *vrule,
@@ -1169,6 +1233,16 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
if (err)
goto out;
break;
+ case FLOW_ACTION_MIRRED:
+ err = sparx5_tc_action_mirred(admin, vrule, fco, act);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ err = sparx5_tc_action_redirect(admin, vrule, fco, act);
+ if (err)
+ goto out;
+ break;
case FLOW_ACTION_ACCEPT:
err = sparx5_tc_set_actionset(admin, vrule);
if (err)
@@ -1210,6 +1284,11 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
/* Setup PSFP */
if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
+ if (!sparx5_has_feature(sparx5, SPX5_FEATURE_PSFP)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
tc_pol_idx, &sg, &fm, &sf);
if (err)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
index d88a93f22606..6b4d1d7b9730 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
@@ -11,11 +11,44 @@
#include "sparx5_main.h"
#include "sparx5_vcap_impl.h"
+static struct sparx5_mall_entry *
+sparx5_tc_matchall_entry_find(struct list_head *entries, unsigned long cookie)
+{
+ struct sparx5_mall_entry *entry;
+
+ list_for_each_entry(entry, entries, list) {
+ if (entry->cookie == cookie)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void sparx5_tc_matchall_parse_action(struct sparx5_port *port,
+ struct sparx5_mall_entry *entry,
+ struct flow_action_entry *action,
+ bool ingress,
+ unsigned long cookie)
+{
+ entry->port = port;
+ entry->type = action->id;
+ entry->ingress = ingress;
+ entry->cookie = cookie;
+}
+
+static void
+sparx5_tc_matchall_parse_mirror_action(struct sparx5_mall_entry *entry,
+ struct flow_action_entry *action)
+{
+ entry->mirror.port = netdev_priv(action->dev);
+}
+
static int sparx5_tc_matchall_replace(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress)
{
struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_mall_entry *mall_entry;
struct flow_action_entry *action;
struct sparx5 *sparx5;
int err;
@@ -27,8 +60,45 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
}
action = &tmo->rule->action.entries[0];
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+ if (!mall_entry)
+ return -ENOMEM;
+
+ sparx5_tc_matchall_parse_action(port,
+ mall_entry,
+ action,
+ ingress,
+ tmo->cookie);
+
sparx5 = port->sparx5;
switch (action->id) {
+ case FLOW_ACTION_MIRRED:
+ sparx5_tc_matchall_parse_mirror_action(mall_entry, action);
+ err = sparx5_mirror_add(mall_entry);
+ if (err) {
+ switch (err) {
+ case -EEXIST:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Mirroring already exists");
+ break;
+ case -EINVAL:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Cannot mirror a monitor port");
+ break;
+ case -ENOENT:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "No more mirror probes available");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Unknown error");
+ break;
+ }
+ return err;
+ }
+ /* Get baseline stats for this port */
+ sparx5_mirror_stats(mall_entry, &tmo->stats);
+ break;
case FLOW_ACTION_GOTO:
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
tmo->common.chain_index,
@@ -59,6 +129,9 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
return -EOPNOTSUPP;
}
+
+ list_add_tail(&mall_entry->list, &sparx5->mall_entries);
+
return 0;
}
@@ -67,19 +140,51 @@ static int sparx5_tc_matchall_destroy(struct net_device *ndev,
bool ingress)
{
struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5;
- int err;
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mall_entry *entry;
+ int err = 0;
- sparx5 = port->sparx5;
- if (!tmo->rule && tmo->cookie) {
+ entry = sparx5_tc_matchall_entry_find(&sparx5->mall_entries,
+ tmo->cookie);
+ if (!entry)
+ return -ENOENT;
+
+ if (entry->type == FLOW_ACTION_MIRRED) {
+ sparx5_mirror_del(entry);
+ } else if (entry->type == FLOW_ACTION_GOTO) {
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
0, 0, tmo->cookie, false);
- if (err)
- return err;
- return 0;
+ } else {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ err = -EOPNOTSUPP;
}
- NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
- return -EOPNOTSUPP;
+
+ list_del(&entry->list);
+
+ return err;
+}
+
+static int sparx5_tc_matchall_stats(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mall_entry *entry;
+
+ entry = sparx5_tc_matchall_entry_find(&sparx5->mall_entries,
+ tmo->cookie);
+ if (!entry)
+ return -ENOENT;
+
+ if (entry->type == FLOW_ACTION_MIRRED) {
+ sparx5_mirror_stats(entry, &tmo->stats);
+ } else {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
int sparx5_tc_matchall(struct net_device *ndev,
@@ -91,6 +196,8 @@ int sparx5_tc_matchall(struct net_device *ndev,
return sparx5_tc_matchall_replace(ndev, tmo, ingress);
case TC_CLSMATCHALL_DESTROY:
return sparx5_tc_matchall_destroy(ndev, tmo, ingress);
+ case TC_CLSMATCHALL_STATS:
+ return sparx5_tc_matchall_stats(ndev, tmo, ingress);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
index 7d106f1276fe..e68f5639a40a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
@@ -10,6 +10,8 @@
#ifndef __SPARX5_VCAP_AG_API_H__
#define __SPARX5_VCAP_AG_API_H__
+#include "vcap_api.h"
+
/* VCAPs */
extern const struct vcap_info sparx5_vcaps[];
extern const struct vcap_statistics sparx5_vcap_stats;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index 187efa1fc904..25066ddb8d4d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -17,7 +17,6 @@
#define SUPER_VCAP_BLK_SIZE 3072 /* addresses per Super VCAP block */
#define STREAMSIZE (64 * 4) /* bytes in the VCAP cache area */
-#define SPARX5_IS2_LOOKUPS 4
#define VCAP_IS2_KEYSEL(_ena, _noneth, _v4_mc, _v4_uc, _v6_mc, _v6_uc, _arp) \
(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(_ena) | \
ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(_noneth) | \
@@ -27,7 +26,6 @@
ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \
ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp))
-#define SPARX5_IS0_LOOKUPS 6
#define VCAP_IS0_KEYSEL(_ena, _etype, _ipv4, _ipv6, _mpls_uc, _mpls_mc, _mlbs) \
(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(_ena) | \
ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(_etype) | \
@@ -37,31 +35,17 @@
ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(_mpls_mc) | \
ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(_mlbs))
-#define SPARX5_ES0_LOOKUPS 1
#define VCAP_ES0_KEYSEL(_key) (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(_key))
#define SPARX5_STAT_ESDX_GRN_PKTS 0x300
#define SPARX5_STAT_ESDX_YEL_PKTS 0x301
-#define SPARX5_ES2_LOOKUPS 2
#define VCAP_ES2_KEYSEL(_ena, _arp, _ipv4, _ipv6) \
(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(_ena) | \
EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(_arp) | \
EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(_ipv4) | \
EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(_ipv6))
-static struct sparx5_vcap_inst {
- enum vcap_type vtype; /* type of vcap */
- int vinst; /* instance number within the same type */
- int lookups; /* number of lookups in this vcap type */
- int lookups_per_instance; /* number of lookups in this instance */
- int first_cid; /* first chain id in this vcap */
- int last_cid; /* last chain id in this vcap */
- int count; /* number of available addresses, not in super vcap */
- int map_id; /* id in the super vcap block mapping (if applicable) */
- int blockno; /* starting block in super vcap (if applicable) */
- int blocks; /* number of blocks in super vcap (if applicable) */
- bool ingress; /* is vcap in the ingress path */
-} sparx5_vcap_inst_cfg[] = {
+const struct sparx5_vcap_inst sparx5_vcap_inst_cfg[] = {
{
.vtype = VCAP_TYPE_IS0, /* CLM-0 */
.vinst = 0,
@@ -1507,7 +1491,7 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
}
}
-static struct vcap_operations sparx5_vcap_ops = {
+static const struct vcap_operations sparx5_vcap_ops = {
.validate_keyset = sparx5_vcap_validate_keyset,
.add_default_fields = sparx5_vcap_add_default_fields,
.cache_erase = sparx5_vcap_cache_erase,
@@ -1793,6 +1777,7 @@ void sparx5_vcap_set_port_keyset(struct net_device *ndev,
static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1804,7 +1789,7 @@ static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
VCAP_IS0_PS_MLBS_FOLLOW_ETYPE);
for (lookup = 0; lookup < admin->lookups; ++lookup) {
- for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ for (portno = 0; portno < consts->n_ports; ++portno) {
spx5_wr(keysel, sparx5,
ANA_CL_ADV_CL_CFG(portno, lookup));
spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
@@ -1819,6 +1804,7 @@ static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1829,13 +1815,13 @@ static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
VCAP_IS2_PS_IPV6_UC_IP_7TUPLE,
VCAP_IS2_PS_ARP_ARP);
for (lookup = 0; lookup < admin->lookups; ++lookup) {
- for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ for (portno = 0; portno < consts->n_ports; ++portno) {
spx5_wr(keysel, sparx5,
ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
}
}
/* IS2 lookups are in bit 0:3 */
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf),
ANA_ACL_VCAP_S2_CFG_SEC_ENA,
sparx5,
@@ -1846,11 +1832,12 @@ static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno;
u32 keysel;
keysel = VCAP_ES0_KEYSEL(VCAP_ES0_PS_FORCE_ISDX_LOOKUPS);
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(keysel, REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA,
sparx5, REW_RTAG_ETAG_CTRL(portno));
@@ -1862,6 +1849,7 @@ static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1869,7 +1857,7 @@ static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
VCAP_ES2_PS_IPV6_IP_7TUPLE);
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_wr(keysel, sparx5,
EACL_VCAP_ES2_KEY_SEL(portno, lookup));
}
@@ -1901,19 +1889,20 @@ static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
switch (admin->vtype) {
case VCAP_TYPE_IS0:
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(0),
ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
sparx5,
ANA_CL_ADV_CL_CFG(portno, lookup));
break;
case VCAP_TYPE_IS2:
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
ANA_ACL_VCAP_S2_CFG_SEC_ENA,
sparx5,
@@ -1925,7 +1914,7 @@ static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
break;
case VCAP_TYPE_ES2:
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(0),
EACL_VCAP_ES2_KEY_SEL_KEY_ENA,
sparx5,
@@ -2042,6 +2031,7 @@ static void sparx5_vcap_block_alloc(struct sparx5 *sparx5,
/* Allocate a vcap control and vcap instances and configure the system */
int sparx5_vcap_init(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
const struct sparx5_vcap_inst *cfg;
struct vcap_control *ctrl;
struct vcap_admin *admin;
@@ -2063,14 +2053,14 @@ int sparx5_vcap_init(struct sparx5 *sparx5)
sparx5->vcap_ctrl = ctrl;
/* select the sparx5 VCAP model */
- ctrl->vcaps = sparx5_vcaps;
- ctrl->stats = &sparx5_vcap_stats;
+ ctrl->vcaps = consts->vcaps;
+ ctrl->stats = consts->vcap_stats;
/* Setup callbacks to allow the API to use the VCAP HW */
ctrl->ops = &sparx5_vcap_ops;
INIT_LIST_HEAD(&ctrl->list);
for (idx = 0; idx < ARRAY_SIZE(sparx5_vcap_inst_cfg); ++idx) {
- cfg = &sparx5_vcap_inst_cfg[idx];
+ cfg = &consts->vcaps_cfg[idx];
admin = sparx5_vcap_admin_alloc(sparx5, ctrl, cfg);
if (IS_ERR(admin)) {
err = PTR_ERR(admin);
@@ -2085,7 +2075,7 @@ int sparx5_vcap_init(struct sparx5 *sparx5)
list_add_tail(&admin->list, &ctrl->list);
}
dir = vcap_debugfs(sparx5->dev, sparx5->debugfs_root, ctrl);
- for (idx = 0; idx < SPX5_PORTS; ++idx)
+ for (idx = 0; idx < consts->n_ports; ++idx)
if (sparx5->ports[idx])
vcap_port_debugfs(sparx5->dev, dir, ctrl,
sparx5->ports[idx]->ndev);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
index 2684d9199b05..d0a42406bf26 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -16,6 +16,11 @@
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define SPARX5_IS2_LOOKUPS 4
+#define SPARX5_IS0_LOOKUPS 6
+#define SPARX5_ES0_LOOKUPS 1
+#define SPARX5_ES2_LOOKUPS 2
+
#define SPARX5_VCAP_CID_IS0_L0 VCAP_CID_INGRESS_L0 /* IS0/CLM lookup 0 */
#define SPARX5_VCAP_CID_IS0_L1 VCAP_CID_INGRESS_L1 /* IS0/CLM lookup 1 */
#define SPARX5_VCAP_CID_IS0_L2 VCAP_CID_INGRESS_L2 /* IS0/CLM lookup 2 */
@@ -40,6 +45,22 @@
#define SPARX5_VCAP_CID_ES2_MAX \
(VCAP_CID_EGRESS_STAGE2_L1 + VCAP_CID_LOOKUP_SIZE - 1) /* ES2 Max */
+struct sparx5_vcap_inst {
+ enum vcap_type vtype; /* type of vcap */
+ int vinst; /* instance number within the same type */
+ int lookups; /* number of lookups in this vcap type */
+ int lookups_per_instance; /* number of lookups in this instance */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int count; /* number of available addresses, not in super vcap */
+ int map_id; /* id in the super vcap block mapping (if applicable) */
+ int blockno; /* starting block in super vcap (if applicable) */
+ int blocks; /* number of blocks in super vcap (if applicable) */
+ bool ingress; /* is vcap in the ingress path */
+};
+
+extern const struct sparx5_vcap_inst sparx5_vcap_inst_cfg[];
+
/* IS0 port keyset selection control */
/* IS0 ethernet, IPv4, IPv6 traffic type keyset generation */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index ac001ae59a38..494782871903 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -16,8 +16,10 @@ static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
/* Output mask to respective registers */
spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
- spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
- spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
+ if (is_sparx5(sparx5)) {
+ spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
+ spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
+ }
return 0;
}
@@ -141,15 +143,19 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
{
spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
- spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
- spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
+ if (is_sparx5(spx5)) {
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
+ }
}
void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
{
portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
- portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
- portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
+ if (is_sparx5(spx5)) {
+ portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
+ portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
+ }
}
void sparx5_update_fwd(struct sparx5 *sparx5)
@@ -161,27 +167,24 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
/* Divide up fwd mask in 32 bit words */
bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
- /* Update flood masks */
- for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
- spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
- spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
- spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
- }
-
/* Update SRC masks */
- for (port = 0; port < SPX5_PORTS; port++) {
+ for (port = 0; port < sparx5->data->consts->n_ports; port++) {
if (test_bit(port, sparx5->bridge_fwd_mask)) {
/* Allow to send to all bridged but self */
bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
clear_bit(port, workmask);
bitmap_to_arr32(mask, workmask, SPX5_PORTS);
spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
- spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
- spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
+ if (is_sparx5(sparx5)) {
+ spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
+ spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
+ }
} else {
spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
- spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
- spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
+ if (is_sparx5(sparx5)) {
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
+ }
}
}
@@ -192,8 +195,10 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
/* Apply learning mask */
spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
- spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
- spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
+ if (is_sparx5(sparx5)) {
+ spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
+ spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
+ }
}
void sparx5_vlan_port_apply(struct sparx5 *sparx5,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
index c3569a4c7b69..4735fad05708 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -290,7 +290,7 @@ enum vcap_keyfield_set {
* Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType
* bit 3
* VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2/es2, lan966x: is2
- * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP:
+ * Set if TCP sequence number is 0, LAN966x: Overlaid with PTP over UDP:
* messageType bit 0
* VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2, lan966x: is1/is2
* TCP/UDP source port
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index ef980e4e5bc2..2687765abe52 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -327,7 +327,7 @@ static int vcap_find_keystream_typegroup_sw(struct vcap_control *vctrl,
}
/* Verify that the typegroup information, subword count, keyset and type id
- * are in sync and correct, return the list of matchin keysets
+ * are in sync and correct, return the list of matching keysets
*/
int
vcap_find_keystream_keysets(struct vcap_control *vctrl,
@@ -2907,6 +2907,18 @@ int vcap_rule_add_action_u32(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_rule_add_action_u32);
+/* Add a 72 bit action field with value to the rule */
+int vcap_rule_add_action_u72(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ struct vcap_u72_action *fieldval)
+{
+ struct vcap_client_actionfield_data data;
+
+ memcpy(&data.u72, fieldval, sizeof(data.u72));
+ return vcap_rule_add_action(rule, action, VCAP_FIELD_U72, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_action_u72);
+
static int vcap_read_counter(struct vcap_rule_internal *ri,
struct vcap_counter *ctr)
{
@@ -2931,7 +2943,7 @@ void vcap_netbytes_copy(u8 *dst, u8 *src, int count)
}
EXPORT_SYMBOL_GPL(vcap_netbytes_copy);
-/* Convert validation error code into tc extact error message */
+/* Convert validation error code into tc extack error message */
void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule)
{
switch (vrule->exterr) {
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
index 9eccfa633c1a..6069ad95c27e 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -271,7 +271,7 @@ struct vcap_operations {
/* VCAP API Client control interface */
struct vcap_control {
- struct vcap_operations *ops; /* client supplied operations */
+ const struct vcap_operations *ops; /* client supplied operations */
const struct vcap_info *vcaps; /* client supplied vcap models */
const struct vcap_statistics *stats; /* client supplied vcap stats */
struct list_head list; /* list of vcap instances */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index 88641508f885..cdf79e17ca54 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -200,6 +200,8 @@ int vcap_rule_add_action_bit(struct vcap_rule *rule,
enum vcap_action_field action, enum vcap_bit val);
int vcap_rule_add_action_u32(struct vcap_rule *rule,
enum vcap_action_field action, u32 value);
+int vcap_rule_add_action_u72(struct vcap_rule *rule, enum vcap_action_field action,
+ struct vcap_u72_action *fieldval);
/* Get number of rules in a vcap instance lookup chain id range */
int vcap_admin_rule_count(struct vcap_admin *admin, int cid);
@@ -236,7 +238,7 @@ const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
/* Copy to host byte order */
void vcap_netbytes_copy(u8 *dst, u8 *src, int count);
-/* Convert validation error code into tc extact error message */
+/* Convert validation error code into tc extack error message */
void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule);
/* Cleanup a VCAP instance */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
index b23c11b0647c..9c9d38042125 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -221,7 +221,7 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static struct vcap_operations test_callbacks = {
+static const struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
.cache_erase = test_cache_erase,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index fe4e166de8a0..16eb3de60eb6 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -211,7 +211,7 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static struct vcap_operations test_callbacks = {
+static const struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
.cache_erase = test_cache_erase,
@@ -366,12 +366,13 @@ static void vcap_api_iterator_init_test(struct kunit *test)
struct vcap_typegroup typegroups[] = {
{ .offset = 0, .width = 2, .value = 2, },
{ .offset = 156, .width = 1, .value = 0, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
struct vcap_typegroup typegroups2[] = {
{ .offset = 0, .width = 3, .value = 4, },
{ .offset = 49, .width = 2, .value = 0, },
{ .offset = 98, .width = 2, .value = 0, },
+ { }
};
vcap_iter_init(&iter, 52, typegroups, 86);
@@ -399,6 +400,7 @@ static void vcap_api_iterator_next_test(struct kunit *test)
{ .offset = 147, .width = 3, .value = 0, },
{ .offset = 196, .width = 2, .value = 0, },
{ .offset = 245, .width = 1, .value = 0, },
+ { }
};
int idx;
@@ -433,7 +435,7 @@ static void vcap_api_encode_typegroups_test(struct kunit *test)
{ .offset = 147, .width = 3, .value = 5, },
{ .offset = 196, .width = 2, .value = 2, },
{ .offset = 245, .width = 5, .value = 27, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
vcap_encode_typegroups(stream, 49, typegroups, false);
@@ -463,6 +465,7 @@ static void vcap_api_encode_bit_test(struct kunit *test)
{ .offset = 147, .width = 3, .value = 5, },
{ .offset = 196, .width = 2, .value = 2, },
{ .offset = 245, .width = 1, .value = 0, },
+ { }
};
vcap_iter_init(&iter, 49, typegroups, 44);
@@ -489,7 +492,7 @@ static void vcap_api_encode_field_test(struct kunit *test)
{ .offset = 147, .width = 3, .value = 5, },
{ .offset = 196, .width = 2, .value = 2, },
{ .offset = 245, .width = 5, .value = 27, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
struct vcap_field rf = {
.type = VCAP_FIELD_U32,
@@ -538,7 +541,7 @@ static void vcap_api_encode_short_field_test(struct kunit *test)
{ .offset = 0, .width = 3, .value = 7, },
{ .offset = 21, .width = 2, .value = 3, },
{ .offset = 42, .width = 1, .value = 1, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
struct vcap_field rf = {
.type = VCAP_FIELD_U32,
@@ -608,7 +611,7 @@ static void vcap_api_encode_keyfield_test(struct kunit *test)
struct vcap_typegroup tgt[] = {
{ .offset = 0, .width = 2, .value = 2, },
{ .offset = 156, .width = 1, .value = 1, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
vcap_test_api_init(&admin);
@@ -671,7 +674,7 @@ static void vcap_api_encode_max_keyfield_test(struct kunit *test)
struct vcap_typegroup tgt[] = {
{ .offset = 0, .width = 2, .value = 2, },
{ .offset = 156, .width = 1, .value = 1, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
u32 keyres[] = {
0x928e8a84,
@@ -732,7 +735,7 @@ static void vcap_api_encode_actionfield_test(struct kunit *test)
{ .offset = 0, .width = 2, .value = 2, },
{ .offset = 21, .width = 1, .value = 1, },
{ .offset = 42, .width = 1, .value = 0, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
vcap_encode_actionfield(&rule, &caf, &rf, tgt);
@@ -1442,18 +1445,10 @@ static void vcap_api_encode_rule_test(struct kunit *test)
vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
rule->cookie, false);
- vcap_free_rule(rule);
-
- /* Check that the rule has been freed: tricky to access since this
- * memory should not be accessible anymore
- */
- KUNIT_EXPECT_PTR_NE(test, NULL, rule);
- ret = list_empty(&rule->keyfields);
- KUNIT_EXPECT_EQ(test, true, ret);
- ret = list_empty(&rule->actionfields);
- KUNIT_EXPECT_EQ(test, true, ret);
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
+ KUNIT_EXPECT_EQ(test, 0, ret);
- vcap_del_rule(&test_vctrl, &test_netdev, id);
+ vcap_free_rule(rule);
}
static void vcap_api_set_rule_counter_test(struct kunit *test)
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
index df81d9ff502b..844bdf6b5f45 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
@@ -109,7 +109,7 @@ int vcap_addr_keysets(struct vcap_control *vctrl, struct net_device *ndev,
struct vcap_keyset_list *kslist);
/* Verify that the typegroup information, subword count, keyset and type id
- * are in sync and correct, return the list of matchin keysets
+ * are in sync and correct, return the list of matching keysets
*/
int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt,
u32 *keystream, u32 *mskstream, bool mask,
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index 01eb7445ead9..3f36ee6a8ece 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -17,10 +17,12 @@ if NET_VENDOR_MICROSOFT
config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support"
- depends on PCI_MSI && X86_64
+ depends on PCI_MSI
+ depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN)
depends on PCI_HYPERV
select AUXILIARY_BUS
select PAGE_POOL
+ select NET_SHAPER
help
This driver supports Microsoft Azure Network Adapter (MANA).
So far, the driver is only supported on X86_64.
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 1332db9a08eb..efb4e412ec7e 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1,12 +1,33 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <linux/msi.h>
+#include <linux/irqdomain.h>
+#include <linux/export.h>
#include <net/mana/mana.h>
+#include <net/mana/hw_channel.h>
+
+struct dentry *mana_debugfs_root;
+
+struct mana_dev_recovery {
+ struct list_head list;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
+static struct mana_dev_recovery_work {
+ struct list_head dev_list;
+ struct delayed_work work;
+
+ /* Lock for dev_list above */
+ spinlock_t lock;
+} mana_dev_recovery_work;
static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
{
@@ -28,6 +49,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+ gc->phys_db_page_base = gc->bar0_pa +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
sriov_base_va = gc->bar0_va + sriov_base_off;
@@ -60,6 +84,24 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
mana_gd_init_vf_regs(pdev);
}
+/* Suppress logging when we set timeout to zero */
+bool mana_need_log(struct gdma_context *gc, int err)
+{
+ struct hw_channel_context *hwc;
+
+ if (err != -ETIMEDOUT)
+ return true;
+
+ if (!gc)
+ return true;
+
+ hwc = gc->hwc.driver_data;
+ if (hwc && hwc->hwc_timeout == 0)
+ return false;
+
+ return true;
+}
+
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -77,8 +119,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- if (gc->num_msix_usable > resp.max_msix)
- gc->num_msix_usable = resp.max_msix;
+ if (!pci_msix_can_alloc_dyn(pdev)) {
+ if (gc->num_msix_usable > resp.max_msix)
+ gc->num_msix_usable = resp.max_msix;
+ } else {
+ /* If dynamic allocation is enabled we have already allocated
+ * hwc msi
+ */
+ gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1);
+ }
if (gc->num_msix_usable <= 1)
return -ENOSPC;
@@ -131,9 +180,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -145,12 +195,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -174,7 +229,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
}
-EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_send_request, "NET_MANA");
int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
struct gdma_mem_info *gmi)
@@ -182,7 +237,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
dma_addr_t dma_handle;
void *buf;
- if (length < PAGE_SIZE || !is_power_of_2(length))
+ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;
gmi->dev = gc->dev;
@@ -257,8 +312,9 @@ static int mana_gd_disable_queue(struct gdma_queue *queue)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
- resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
+ resp.hdr.status);
return err ? err : -EPROTO;
}
@@ -328,6 +384,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
}
+EXPORT_SYMBOL_NS(mana_gd_wq_ring_doorbell, "NET_MANA");
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{
@@ -340,12 +397,173 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
head, arm_bit);
}
+EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
+
+#define MANA_SERVICE_PERIOD 10
+
+static void mana_serv_rescan(struct pci_dev *pdev)
+{
+ struct pci_bus *parent;
+
+ pci_lock_rescan_remove();
+
+ parent = pdev->bus;
+ if (!parent) {
+ dev_err(&pdev->dev, "MANA service: no parent bus\n");
+ goto out;
+ }
+
+ pci_stop_and_remove_bus_device(pdev);
+ pci_rescan_bus(parent);
+
+out:
+ pci_unlock_rescan_remove();
+}
+
+static void mana_serv_fpga(struct pci_dev *pdev)
+{
+ struct pci_bus *bus, *parent;
+
+ pci_lock_rescan_remove();
+
+ bus = pdev->bus;
+ if (!bus) {
+ dev_err(&pdev->dev, "MANA service: no bus\n");
+ goto out;
+ }
+
+ parent = bus->parent;
+ if (!parent) {
+ dev_err(&pdev->dev, "MANA service: no parent bus\n");
+ goto out;
+ }
+
+ pci_stop_and_remove_bus_device(bus->self);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ pci_rescan_bus(parent);
+
+out:
+ pci_unlock_rescan_remove();
+}
+
+static void mana_serv_reset(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct hw_channel_context *hwc;
+ int ret;
+
+ if (!gc) {
+ /* Perform PCI rescan on device if GC is not set up */
+ dev_err(&pdev->dev, "MANA service: GC not setup, rescanning\n");
+ mana_serv_rescan(pdev);
+ return;
+ }
+
+ hwc = gc->hwc.driver_data;
+ if (!hwc) {
+ dev_err(&pdev->dev, "MANA service: no HWC\n");
+ goto out;
+ }
+
+ /* HWC is not responding in this case, so don't wait */
+ hwc->hwc_timeout = 0;
+
+ dev_info(&pdev->dev, "MANA reset cycle start\n");
+
+ mana_gd_suspend(pdev, PMSG_SUSPEND);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ ret = mana_gd_resume(pdev);
+ if (ret == -ETIMEDOUT || ret == -EPROTO) {
+ /* Perform PCI rescan on device if we failed on HWC */
+ dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n");
+ mana_serv_rescan(pdev);
+ goto out;
+ }
+
+ if (ret)
+ dev_info(&pdev->dev, "MANA reset cycle failed err %d\n", ret);
+ else
+ dev_info(&pdev->dev, "MANA reset cycle completed\n");
+
+out:
+ gc->in_service = false;
+}
+
+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
+static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev)
+{
+ switch (type) {
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ mana_serv_fpga(pdev);
+ break;
+
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ mana_serv_reset(pdev);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "MANA service: unknown type %d\n", type);
+ break;
+ }
+}
+
+static void mana_recovery_delayed_func(struct work_struct *w)
+{
+ struct mana_dev_recovery_work *work;
+ struct mana_dev_recovery *dev;
+ unsigned long flags;
+
+ work = container_of(w, struct mana_dev_recovery_work, work.work);
+
+ spin_lock_irqsave(&work->lock, flags);
+
+ while (!list_empty(&work->dev_list)) {
+ dev = list_first_entry(&work->dev_list,
+ struct mana_dev_recovery, list);
+ list_del(&dev->list);
+ spin_unlock_irqrestore(&work->lock, flags);
+
+ mana_do_service(dev->type, dev->pdev);
+ pci_dev_put(dev->pdev);
+ kfree(dev);
+
+ spin_lock_irqsave(&work->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&work->lock, flags);
+}
+
+static void mana_serv_func(struct work_struct *w)
+{
+ struct mana_serv_work *mns_wk;
+ struct pci_dev *pdev;
+
+ mns_wk = container_of(w, struct mana_serv_work, serv_work);
+ pdev = mns_wk->pdev;
+
+ if (pdev)
+ mana_do_service(mns_wk->type, pdev);
+
+ pci_dev_put(pdev);
+ kfree(mns_wk);
+ module_put(THIS_MODULE);
+}
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
struct gdma_context *gc = eq->gdma_dev->gdma_context;
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
+ struct mana_serv_work *mns_wk;
union gdma_eqe_info eqe_info;
enum gdma_eqe_type type;
struct gdma_event event;
@@ -380,6 +598,9 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
case GDMA_EQE_HWC_INIT_DATA:
case GDMA_EQE_HWC_INIT_DONE:
+ case GDMA_EQE_HWC_SOC_SERVICE:
+ case GDMA_EQE_RNIC_QP_FATAL:
+ case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
if (!eq->eq.callback)
break;
@@ -388,6 +609,46 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
eq->eq.callback(eq->eq.context, eq, &event);
break;
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ dev_info(gc->dev, "Recv MANA service type:%d\n", type);
+
+ if (!test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) {
+ /*
+ * Device is in probe and we received a hardware reset
+ * event, the probe function will detect that the flag
+ * has changed and perform service procedure.
+ */
+ dev_info(gc->dev,
+ "Service is to be processed in probe\n");
+ break;
+ }
+
+ if (gc->in_service) {
+ dev_info(gc->dev, "Already in service\n");
+ break;
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_info(gc->dev, "Module is unloading\n");
+ break;
+ }
+
+ mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
+ if (!mns_wk) {
+ module_put(THIS_MODULE);
+ break;
+ }
+
+ dev_info(gc->dev, "Start MANA service type:%d\n", type);
+ gc->in_service = true;
+ mns_wk->pdev = to_pci_dev(gc->dev);
+ mns_wk->type = type;
+ pci_dev_get(mns_wk->pdev);
+ INIT_WORK(&mns_wk->serv_work, mana_serv_func);
+ schedule_work(&mns_wk->serv_work);
+ break;
+
default:
break;
}
@@ -470,7 +731,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
}
queue->eq.msix_index = msi_index;
- gic = &gc->irq_contexts[msi_index];
+ gic = xa_load(&gc->irq_contexts, msi_index);
+ if (WARN_ON(!gic))
+ return -EINVAL;
spin_lock_irqsave(&gic->lock, flags);
list_add_rcu(&queue->entry, &gic->eq_list);
@@ -479,7 +742,7 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
return 0;
}
-static void mana_gd_deregiser_irq(struct gdma_queue *queue)
+static void mana_gd_deregister_irq(struct gdma_queue *queue)
{
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
@@ -495,7 +758,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
if (WARN_ON(msix_index >= gc->num_msix_usable))
return;
- gic = &gc->irq_contexts[msix_index];
+ gic = xa_load(&gc->irq_contexts, msix_index);
+ if (WARN_ON(!gic))
+ return;
+
spin_lock_irqsave(&gic->lock, flags);
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
if (queue == eq) {
@@ -529,7 +795,8 @@ int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
- dev_err(dev, "test_eq failed: %d\n", err);
+ if (mana_need_log(gc, err))
+ dev_err(dev, "test_eq failed: %d\n", err);
goto out;
}
@@ -564,11 +831,11 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
if (flush_evenets) {
err = mana_gd_test_eq(gc, queue);
- if (err)
+ if (err && mana_need_log(gc, err))
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
}
- mana_gd_deregiser_irq(queue);
+ mana_gd_deregister_irq(queue);
if (queue->eq.disable_needed)
mana_gd_disable_queue(queue);
@@ -662,8 +929,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
queue->head = 0;
queue->tail = 0;
@@ -684,6 +954,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -705,19 +977,20 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
+ err, resp.hdr.status);
return -EPROTO;
}
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, "NET_MANA");
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
{
- unsigned int num_page = gmi->length / PAGE_SIZE;
+ unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
struct gdma_create_dma_region_req *req = NULL;
struct gdma_create_dma_region_resp resp = {};
struct gdma_context *gc = gd->gdma_context;
@@ -727,10 +1000,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
int err;
int i;
- if (length < PAGE_SIZE || !is_power_of_2(length))
+ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;
- if (offset_in_page(gmi->virt_addr) != 0)
+ if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
return -EINVAL;
hwc = gc->hwc.driver_data;
@@ -751,7 +1024,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
req->page_addr_list_len = num_page;
for (i = 0; i < num_page; i++)
- req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
+ req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;
err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
if (err)
@@ -766,7 +1039,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
}
gmi->dma_region_handle = resp.dma_region_handle;
+ dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
+ gmi->dma_region_handle);
out:
+ if (err)
+ dev_dbg(gc->dev,
+ "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
+ length, req->gdma_page_type, resp.hdr.status, err);
kfree(req);
return err;
}
@@ -789,8 +1068,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -811,12 +1093,14 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, "NET_MANA");
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
@@ -837,8 +1121,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -858,11 +1145,14 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
{
@@ -893,7 +1183,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_free_memory(gmi);
kfree(queue);
}
-EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_destroy_queue, "NET_MANA");
int mana_gd_verify_vf_version(struct pci_dev *pdev)
{
@@ -930,6 +1220,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
+ gc->pf_cap_flags1 = resp.pf_cap_flags1;
if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
if (err) {
@@ -970,7 +1261,6 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_register_device, NET_MANA);
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -989,8 +1279,9 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
+ err, resp.hdr.status);
if (!err)
err = -EPROTO;
}
@@ -1001,7 +1292,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_deregister_device, NET_MANA);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1037,7 +1327,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) {
- WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+ WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;
@@ -1093,7 +1383,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
struct gdma_posted_wqe_info *wqe_info)
{
u32 client_oob_size = wqe_req->inline_oob_size;
- struct gdma_context *gc;
u32 sgl_data_size;
u32 max_wqe_size;
u32 wqe_size;
@@ -1123,11 +1412,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
if (wqe_size > max_wqe_size)
return -EINVAL;
- if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
- gc = wq->gdma_dev->gdma_context;
- dev_err(gc->dev, "unsuccessful flow control!\n");
+ if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq))
return -ENOSPC;
- }
if (wqe_info)
wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
@@ -1144,6 +1430,7 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
int mana_gd_post_and_ring(struct gdma_queue *queue,
const struct gdma_wqe_request *wqe_req,
@@ -1153,8 +1440,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
int err;
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
+ queue->type, queue->queue_size, err);
return err;
+ }
mana_gd_wq_ring_doorbell(gc, queue);
@@ -1214,6 +1504,7 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
return cqe_idx;
}
+EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
@@ -1249,7 +1540,49 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
-static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+/*
+ * Spread on CPUs with the following heuristics:
+ *
+ * 1. No more than one IRQ per CPU, if possible;
+ * 2. NUMA locality is the second priority;
+ * 3. Sibling dislocality is the last priority.
+ *
+ * Let's consider this topology:
+ *
+ * Node 0 1
+ * Core 0 1 2 3
+ * CPU 0 1 2 3 4 5 6 7
+ *
+ * The most performant IRQ distribution based on the above topology
+ * and heuristics may look like this:
+ *
+ * IRQ Nodes Cores CPUs
+ * 0 1 0 0-1
+ * 1 1 1 2-3
+ * 2 1 0 0-1
+ * 3 1 1 2-3
+ * 4 2 2 4-5
+ * 5 2 3 6-7
+ * 6 2 2 4-5
+ * 7 2 3 6-7
+ *
+ * The heuristics is implemented as follows.
+ *
+ * The outer for_each() loop resets the 'weight' to the actual number
+ * of CPUs in the hop. Then inner for_each() loop decrements it by the
+ * number of sibling groups (cores) while assigning first set of IRQs
+ * to each group. IRQs 0 and 1 above are distributed this way.
+ *
+ * Now, because NUMA locality is more important, we should walk the
+ * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's
+ * implemented by the medium while() loop. We do like this unless the
+ * number of IRQs assigned on this hop will not become equal to number
+ * of CPUs in the hop (weight == 0). Then we switch to the next hop and
+ * do the same thing.
+ */
+
+static int irq_setup(unsigned int *irqs, unsigned int len, int node,
+ bool skip_first_cpu)
{
const struct cpumask *next, *prev = cpu_none_mask;
cpumask_var_t cpus __free(free_cpumask_var);
@@ -1264,11 +1597,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node)
while (weight > 0) {
cpumask_andnot(cpus, next, prev);
for_each_cpu(cpu, cpus) {
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+
+ if (unlikely(skip_first_cpu)) {
+ skip_first_cpu = false;
+ continue;
+ }
+
if (len-- == 0)
goto done;
+
irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
- cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
- --weight;
}
}
prev = next;
@@ -1278,47 +1618,108 @@ done:
return 0;
}
-static int mana_gd_setup_irqs(struct pci_dev *pdev)
+static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
- unsigned int max_irqs, cpu;
- int start_irq_index = 1;
- int nvec, *irqs, irq;
- int err, i = 0, j;
+ bool skip_first_cpu = false;
+ int *irqs, irq, err, i;
- cpus_read_lock();
- max_queues_per_port = num_online_cpus();
- if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
- max_queues_per_port = MANA_MAX_NUM_QUEUES;
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
- /* Need 1 interrupt for the Hardware communication Channel (HWC) */
- max_irqs = max_queues_per_port + 1;
+ /*
+ * While processing the next pci irq vector, we start with index 1,
+ * as IRQ vector at index 0 is already processed for HWC.
+ * However, the population of irqs array starts with index 0, to be
+ * further used in irq_setup()
+ */
+ for (i = 1; i <= nvec; i++) {
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ gic->handler = mana_gd_process_eq_events;
+ INIT_LIST_HEAD(&gic->eq_list);
+ spin_lock_init(&gic->lock);
- nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0) {
- cpus_read_unlock();
- return nvec;
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+ i - 1, pci_name(pdev));
+
+ /* one pci vector is already allocated for HWC */
+ irqs[i - 1] = pci_irq_vector(pdev, i);
+ if (irqs[i - 1] < 0) {
+ err = irqs[i - 1];
+ goto free_current_gic;
+ }
+
+ err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- if (nvec <= num_online_cpus())
- start_irq_index = 0;
- irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
- if (!irqs) {
- err = -ENOMEM;
- goto free_irq_vector;
+ /*
+ * When calling irq_setup() for dynamically added IRQs, if number of
+ * CPUs is more than or equal to allocated MSI-X, we need to skip the
+ * first CPU sibling group since they are already affinitized to HWC IRQ
+ */
+ cpus_read_lock();
+ if (gc->num_msix_usable <= num_online_cpus())
+ skip_first_cpu = true;
+
+ err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu);
+ if (err) {
+ cpus_read_unlock();
+ goto free_irq;
}
- gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
- GFP_KERNEL);
- if (!gc->irq_contexts) {
- err = -ENOMEM;
- goto free_irq_vector;
+ cpus_read_unlock();
+ kfree(irqs);
+ return 0;
+
+free_current_gic:
+ kfree(gic);
+free_irq:
+ for (i -= 1; i > 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
+
+ irq_update_affinity_hint(irq, NULL);
+ free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
+ kfree(irqs);
+ return err;
+}
+
+static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ int *irqs, *start_irqs, irq;
+ unsigned int cpu;
+ int err, i;
+
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ start_irqs = irqs;
for (i = 0; i < nvec; i++) {
- gic = &gc->irq_contexts[i];
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
gic->handler = mana_gd_process_eq_events;
INIT_LIST_HEAD(&gic->eq_list);
spin_lock_init(&gic->lock);
@@ -1330,67 +1731,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
i - 1, pci_name(pdev));
- irq = pci_irq_vector(pdev, i);
- if (irq < 0) {
- err = irq;
- goto free_irq;
+ irqs[i] = pci_irq_vector(pdev, i);
+ if (irqs[i] < 0) {
+ err = irqs[i];
+ goto free_current_gic;
}
- if (!i) {
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- /* If number of IRQ is one extra than number of online CPUs,
- * then we need to assign IRQ0 (hwc irq) and IRQ1 to
- * same CPU.
- * Else we will use different CPUs for IRQ0 and IRQ1.
- * Also we are using cpumask_local_spread instead of
- * cpumask_first for the node, because the node can be
- * mem only.
- */
- if (start_irq_index) {
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
- } else {
- irqs[start_irq_index] = irq;
- }
- } else {
- irqs[i - start_irq_index] = irq;
- err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
- gic->name, gic);
- if (err)
- goto free_irq;
- }
+ err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
- if (err)
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ cpus_read_lock();
+ if (nvec > num_online_cpus()) {
+ cpu = cpumask_local_spread(0, gc->numa_node);
+ irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu));
+ irqs++;
+ nvec -= 1;
+ }
+
+ err = irq_setup(irqs, nvec, gc->numa_node, false);
+ if (err) {
+ cpus_read_unlock();
goto free_irq;
+ }
- gc->max_num_msix = nvec;
- gc->num_msix_usable = nvec;
cpus_read_unlock();
+ kfree(start_irqs);
return 0;
+free_current_gic:
+ kfree(gic);
free_irq:
- for (j = i - 1; j >= 0; j--) {
- irq = pci_irq_vector(pdev, j);
- gic = &gc->irq_contexts[j];
+ for (i -= 1; i >= 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
- kfree(gc->irq_contexts);
- kfree(irqs);
- gc->irq_contexts = NULL;
-free_irq_vector:
- cpus_read_unlock();
- pci_free_irq_vectors(pdev);
+ kfree(start_irqs);
return err;
}
+static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_irqs, min_irqs;
+ int nvec, err;
+
+ if (pci_msix_can_alloc_dyn(pdev)) {
+ max_irqs = 1;
+ min_irqs = 1;
+ } else {
+ /* Need 1 interrupt for HWC */
+ max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1;
+ min_irqs = 2;
+ }
+
+ nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX);
+ if (nvec < 0)
+ return nvec;
+
+ err = mana_gd_setup_irqs(pdev, nvec);
+ if (err) {
+ pci_free_irq_vectors(pdev);
+ return err;
+ }
+
+ gc->num_msix_usable = nvec;
+ gc->max_num_msix = nvec;
+
+ return 0;
+}
+
+static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct msi_map irq_map;
+ int max_irqs, i, err;
+
+ if (!pci_msix_can_alloc_dyn(pdev))
+ /* remain irqs are already allocated with HWC IRQ */
+ return 0;
+
+ /* allocate only remaining IRQs*/
+ max_irqs = gc->num_msix_usable - 1;
+
+ for (i = 1; i <= max_irqs; i++) {
+ irq_map = pci_msix_alloc_irq_at(pdev, i, NULL);
+ if (!irq_map.virq) {
+ err = irq_map.index;
+ /* caller will handle cleaning up all allocated
+ * irqs, after HWC is destroyed
+ */
+ return err;
+ }
+ }
+
+ err = mana_gd_setup_dyn_irqs(pdev, max_irqs);
+ if (err)
+ return err;
+
+ gc->max_num_msix = gc->max_num_msix + max_irqs;
+
+ return 0;
+}
+
static void mana_gd_remove_irqs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1405,19 +1867,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (irq < 0)
continue;
- gic = &gc->irq_contexts[i];
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
/* Need to clear the hint before free_irq */
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
pci_free_irq_vectors(pdev);
gc->max_num_msix = 0;
gc->num_msix_usable = 0;
- kfree(gc->irq_contexts);
- gc->irq_contexts = NULL;
}
static int mana_gd_setup(struct pci_dev *pdev)
@@ -1428,9 +1892,16 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
- err = mana_gd_setup_irqs(pdev);
- if (err)
- return err;
+ gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0);
+ if (!gc->service_wq)
+ return -ENOMEM;
+
+ err = mana_gd_setup_hwc_irqs(pdev);
+ if (err) {
+ dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n",
+ err);
+ goto free_workqueue;
+ }
err = mana_hwc_create_channel(gc);
if (err)
@@ -1444,16 +1915,26 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ err = mana_gd_setup_remaining_irqs(pdev);
+ if (err) {
+ dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err);
+ goto destroy_hwc;
+ }
+
err = mana_gd_detect_devices(pdev);
if (err)
goto destroy_hwc;
+ dev_dbg(&pdev->dev, "mana gdma setup successful\n");
return 0;
destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+free_workqueue:
+ destroy_workqueue(gc->service_wq);
+ dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1464,6 +1945,9 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+
+ destroy_workqueue(gc->service_wq);
+ dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
static bool mana_is_pf(unsigned short dev_id)
@@ -1482,8 +1966,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
err = pci_enable_device(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
return -ENXIO;
+ }
pci_set_master(pdev);
@@ -1492,14 +1978,11 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto disable_dev;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err)
- goto release_region;
-
- err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
if (err) {
- dev_err(&pdev->dev, "Failed to set dma device segment size\n");
+ dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
goto release_region;
}
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
gc = vzalloc(sizeof(*gc));
@@ -1518,6 +2001,13 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
+ xa_init(&gc->irq_contexts);
+
+ if (gc->is_pf)
+ gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
+ else
+ gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot),
+ mana_debugfs_root);
err = mana_gd_setup(pdev);
if (err)
@@ -1527,11 +2017,37 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto cleanup_gd;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ goto cleanup_mana;
+
+ /*
+ * If a hardware reset event has occurred over HWC during probe,
+ * rollback and perform hardware reset procedure.
+ */
+ if (test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) {
+ err = -EPROTO;
+ goto cleanup_mana_rdma;
+ }
+
return 0;
+cleanup_mana_rdma:
+ mana_rdma_remove(&gc->mana_ib);
+cleanup_mana:
+ mana_remove(&gc->mana, false);
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
+ /*
+ * at this point we know that the other debugfs child dir/files
+ * are either not yet created or are already cleaned up.
+ * The pci debugfs folder clean-up now, will only be cleaning up
+ * adapter-MTU file and apc->mana_pci_debugfs folder.
+ */
+ debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+ xa_destroy(&gc->irq_contexts);
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1541,6 +2057,35 @@ release_region:
disable_dev:
pci_disable_device(pdev);
dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
+
+ /*
+ * Hardware could be in recovery mode and the HWC returns TIMEDOUT or
+ * EPROTO from mana_gd_setup(), mana_probe() or mana_rdma_probe(), or
+ * we received a hardware reset event over HWC interrupt. In this case,
+ * perform the device recovery procedure after MANA_SERVICE_PERIOD
+ * seconds.
+ */
+ if (err == -ETIMEDOUT || err == -EPROTO) {
+ struct mana_dev_recovery *dev;
+ unsigned long flags;
+
+ dev_info(&pdev->dev, "Start MANA recovery mode\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return err;
+
+ dev->pdev = pci_dev_get(pdev);
+ dev->type = GDMA_EQE_HWC_RESET_REQUEST;
+
+ spin_lock_irqsave(&mana_dev_recovery_work.lock, flags);
+ list_add_tail(&dev->list, &mana_dev_recovery_work.dev_list);
+ spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags);
+
+ schedule_delayed_work(&mana_dev_recovery_work.work,
+ secs_to_jiffies(MANA_SERVICE_PERIOD));
+ }
+
return err;
}
@@ -1548,23 +2093,33 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, false);
mana_gd_cleanup(pdev);
+ debugfs_remove_recursive(gc->mana_pci_debugfs);
+
+ gc->mana_pci_debugfs = NULL;
+
+ xa_destroy(&gc->irq_contexts);
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ dev_dbg(&pdev->dev, "mana gdma remove successful\n");
}
/* The 'state' parameter is not used. */
-static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
@@ -1576,7 +2131,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
* fail -- if this happens, it's safer to just report an error than try to undo
* what has been done.
*/
-static int mana_gd_resume(struct pci_dev *pdev)
+int mana_gd_resume(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
int err;
@@ -1589,6 +2144,10 @@ static int mana_gd_resume(struct pci_dev *pdev)
if (err)
return err;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ return err;
+
return 0;
}
@@ -1599,10 +2158,15 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
+ debugfs_remove_recursive(gc->mana_pci_debugfs);
+
+ gc->mana_pci_debugfs = NULL;
+
pci_disable_device(pdev);
}
@@ -1622,7 +2186,51 @@ static struct pci_driver mana_driver = {
.shutdown = mana_gd_shutdown,
};
-module_pci_driver(mana_driver);
+static int __init mana_driver_init(void)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mana_dev_recovery_work.dev_list);
+ spin_lock_init(&mana_dev_recovery_work.lock);
+ INIT_DELAYED_WORK(&mana_dev_recovery_work.work, mana_recovery_delayed_func);
+
+ mana_debugfs_root = debugfs_create_dir("mana", NULL);
+
+ err = pci_register_driver(&mana_driver);
+ if (err) {
+ debugfs_remove(mana_debugfs_root);
+ mana_debugfs_root = NULL;
+ }
+
+ return err;
+}
+
+static void __exit mana_driver_exit(void)
+{
+ struct mana_dev_recovery *dev;
+ unsigned long flags;
+
+ disable_delayed_work_sync(&mana_dev_recovery_work.work);
+
+ spin_lock_irqsave(&mana_dev_recovery_work.lock, flags);
+ while (!list_empty(&mana_dev_recovery_work.dev_list)) {
+ dev = list_first_entry(&mana_dev_recovery_work.dev_list,
+ struct mana_dev_recovery, list);
+ list_del(&dev->list);
+ pci_dev_put(dev->pdev);
+ kfree(dev);
+ }
+ spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags);
+
+ pci_unregister_driver(&mana_driver);
+
+ debugfs_remove(mana_debugfs_root);
+
+ mana_debugfs_root = NULL;
+}
+
+module_init(mana_driver_init);
+module_exit(mana_driver_exit);
MODULE_DEVICE_TABLE(pci, mana_id_table);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 2729a2c5acf9..aa4e2731e2ba 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -2,7 +2,9 @@
/* Copyright (c) 2021, Microsoft Corporation. */
#include <net/mana/gdma.h>
+#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>
+#include <linux/vmalloc.h>
static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
{
@@ -51,9 +53,33 @@ static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
return 0;
}
+static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
+ struct hwc_work_request *req)
+{
+ struct device *dev = hwc_rxq->hwc->dev;
+ struct gdma_sge *sge;
+ int err;
+
+ sge = &req->sge;
+ sge->address = (u64)req->buf_sge_addr;
+ sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
+ sge->size = req->buf_len;
+
+ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+ req->wqe_req.sgl = sge;
+ req->wqe_req.num_sge = 1;
+ req->wqe_req.client_data_unit = 0;
+
+ err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
+ if (err)
+ dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
+ return err;
+}
+
static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
- const struct gdma_resp_hdr *resp_msg)
+ struct hwc_work_request *rx_req)
{
+ const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
struct hwc_caller_ctx *ctx;
int err;
@@ -61,6 +87,7 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
hwc->inflight_msg_res.map)) {
dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
resp_msg->response.hwc_msg_id);
+ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
return;
}
@@ -74,40 +101,26 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
memcpy(ctx->output_buf, resp_msg, resp_len);
out:
ctx->error = err;
- complete(&ctx->comp_event);
-}
-
-static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
- struct hwc_work_request *req)
-{
- struct device *dev = hwc_rxq->hwc->dev;
- struct gdma_sge *sge;
- int err;
- sge = &req->sge;
- sge->address = (u64)req->buf_sge_addr;
- sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
- sge->size = req->buf_len;
-
- memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
- req->wqe_req.sgl = sge;
- req->wqe_req.num_sge = 1;
- req->wqe_req.client_data_unit = 0;
+ /* Must post rx wqe before complete(), otherwise the next rx may
+ * hit no_wqe error.
+ */
+ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
- err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
- if (err)
- dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
- return err;
+ complete(&ctx->comp_event);
}
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
struct gdma_event *event)
{
+ union hwc_init_soc_service_type service_data;
struct hw_channel_context *hwc = ctx;
struct gdma_dev *gd = hwc->gdma_dev;
union hwc_init_type_data type_data;
union hwc_init_eq_id_db eq_db;
+ struct mana_context *ac;
u32 type, val;
+ int ret;
switch (event->type) {
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
@@ -184,13 +197,41 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
hwc->hwc_timeout = val;
break;
+ case HWC_DATA_HW_LINK_CONNECT:
+ case HWC_DATA_HW_LINK_DISCONNECT:
+ ac = gd->gdma_context->mana.driver_data;
+ if (!ac)
+ break;
+
+ WRITE_ONCE(ac->link_event, type);
+ schedule_work(&ac->link_change_work);
+
+ break;
+
default:
dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
break;
}
break;
+ case GDMA_EQE_HWC_SOC_SERVICE:
+ service_data.as_uint32 = event->details[0];
+ type = service_data.type;
+ switch (type) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ ret = mana_rdma_service_event(gd->gdma_context, type);
+ if (ret)
+ dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
+ ret);
+ break;
+ default:
+ dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
+ break;
+ }
+
+ break;
default:
dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
@@ -234,14 +275,12 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
return;
}
- mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
+ mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
- /* Do no longer use 'resp', because the buffer is posted to the HW
- * in the below mana_hwc_post_rx_wqe().
+ /* Can no longer use 'resp', because the buffer is posted to the HW
+ * in mana_hwc_handle_resp() above.
*/
resp = NULL;
-
- mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
}
static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
@@ -361,12 +400,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
int err;
eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
- if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (eq_size < MANA_MIN_QSIZE)
+ eq_size = MANA_MIN_QSIZE;
cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
- if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (cq_size < MANA_MIN_QSIZE)
+ cq_size = MANA_MIN_QSIZE;
hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
if (!hwc_cq)
@@ -428,12 +467,13 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
dma_buf->num_reqs = q_depth;
- buf_size = PAGE_ALIGN(q_depth * max_msg_size);
+ buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
if (err) {
- dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
+ buf_size, err);
goto out;
}
@@ -496,8 +536,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
else
queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
- if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (queue_size < MANA_MIN_QSIZE)
+ queue_size = MANA_MIN_QSIZE;
hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
if (!hwc_wq)
@@ -522,6 +562,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
out:
if (err)
mana_hwc_destroy_wq(hwc, hwc_wq);
+
+ dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
+ queue_size, q_type, err);
return err;
}
@@ -848,8 +891,15 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (!wait_for_completion_timeout(&ctx->comp_event,
- (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
- dev_err(hwc->dev, "HWC: Request timed out!\n");
+ (msecs_to_jiffies(hwc->hwc_timeout)))) {
+ if (hwc->hwc_timeout != 0)
+ dev_err(hwc->dev, "HWC: Request timed out: %u ms\n",
+ hwc->hwc_timeout);
+
+ /* Reduce further waiting if HWC no response */
+ if (hwc->hwc_timeout > 1)
+ hwc->hwc_timeout = 1;
+
err = -ETIMEDOUT;
goto out;
}
@@ -860,8 +910,13 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
- dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
- ctx->status_code);
+ if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
+ dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
+ ctx->status_code);
err = -EPROTO;
goto out;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 23b1521c0df9..7697c9b52ed3 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
goto out;
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
- xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true);
act = bpf_prog_run_xdp(prog, xdp);
@@ -174,6 +174,7 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
struct mana_port_context *apc = netdev_priv(ndev);
struct bpf_prog *old_prog;
struct gdma_context *gc;
+ int err;
gc = apc->ac->gdma_dev->gdma_context;
@@ -195,11 +196,45 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
*/
apc->bpf_prog = prog;
- if (old_prog)
- bpf_prog_put(old_prog);
+ if (apc->port_is_up) {
+ /* Re-create rxq's after xdp prog was loaded or unloaded.
+ * Ex: re create rxq's to switch from full pages to smaller
+ * size page fragments when xdp prog is unloaded and
+ * vice-versa.
+ */
+
+ /* Pre-allocate buffers to prevent failure in mana_attach */
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Insufficient memory for tx/rx re-config");
+ return err;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev,
+ "mana_detach failed at xdp set: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Re-config failed at detach");
+ goto err_dealloc_rxbuffs;
+ }
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev,
+ "mana_attach failed at xdp set: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Re-config failed at attach");
+ goto err_dealloc_rxbuffs;
+ }
- if (apc->port_is_up)
mana_chn_setxdp(apc, prog);
+ mana_pre_dealloc_rxbufs(apc);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
if (prog)
ndev->max_mtu = MANA_XDP_MTU_MAX;
@@ -207,6 +242,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
return 0;
+
+err_dealloc_rxbuffs:
+ apc->bpf_prog = old_prog;
+ mana_pre_dealloc_rxbufs(apc);
+ return err;
}
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d8af5e7e15b4..1ad154f9db1a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -3,20 +3,25 @@
#include <uapi/linux/bpf.h>
+#include <linux/debugfs.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/export.h>
+#include <linux/skbuff.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
#include <net/mana/mana.h>
#include <net/mana/mana_auxiliary.h>
+#include <net/mana/hw_channel.h>
static DEFINE_IDA(mana_adev_ida);
@@ -30,25 +35,59 @@ static void mana_adev_idx_free(int idx)
ida_free(&mana_adev_ida, idx);
}
+static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct gdma_queue *gdma_q = filp->private_data;
+
+ return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr,
+ gdma_q->queue_size);
+}
+
+static const struct file_operations mana_dbg_q_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mana_dbg_q_read,
+};
+
+static bool mana_en_need_log(struct mana_port_context *apc, int err)
+{
+ if (apc && apc->ac && apc->ac->gdma_dev &&
+ apc->ac->gdma_dev->gdma_context)
+ return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
+ else
+ return true;
+}
+
+static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
+ bool from_pool)
+{
+ if (from_pool)
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ else
+ put_page(page);
+}
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
int err;
-
err = mana_alloc_queues(ndev);
- if (err)
+
+ if (err) {
+ netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
return err;
+ }
apc->port_is_up = true;
/* Ensure port state updated before txq state */
smp_wmb();
- netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
-
+ netdev_dbg(ndev, "%s successful\n", __func__);
return 0;
}
@@ -62,6 +101,46 @@ static int mana_close(struct net_device *ndev)
return mana_detach(ndev, true);
}
+static void mana_link_state_handle(struct work_struct *w)
+{
+ struct mana_context *ac;
+ struct net_device *ndev;
+ u32 link_event;
+ bool link_up;
+ int i;
+
+ ac = container_of(w, struct mana_context, link_change_work);
+
+ rtnl_lock();
+
+ link_event = READ_ONCE(ac->link_event);
+
+ if (link_event == HWC_DATA_HW_LINK_CONNECT)
+ link_up = true;
+ else if (link_event == HWC_DATA_HW_LINK_DISCONNECT)
+ link_up = false;
+ else
+ goto out;
+
+ /* Process all ports */
+ for (i = 0; i < ac->num_ports; i++) {
+ ndev = ac->ports[i];
+ if (!ndev)
+ continue;
+
+ if (link_up) {
+ netif_carrier_on(ndev);
+
+ __netdev_notify_peers(ndev);
+ } else {
+ netif_carrier_off(ndev);
+ }
+ }
+
+out:
+ rtnl_unlock();
+}
+
static bool mana_can_tx(struct gdma_queue *wq)
{
return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
@@ -160,6 +239,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
return 0;
frag_err:
+ if (net_ratelimit())
+ netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
+ skb->len);
for (i = sg_i - 1; i >= hsg; i--)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
@@ -229,10 +311,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct netdev_queue *net_txq;
struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
+ int err, len, num_gso_seg;
unsigned int csum_type;
struct mana_txq *txq;
struct mana_cq *cq;
- int err, len;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -240,11 +322,29 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_drop_count;
+
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
tx_stats = &txq->stats;
+ BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES);
+ if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES &&
+ skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
+ /* GSO skb with Hardware SGE limit exceeded is not expected here
+ * as they are handled in mana_features_check() callback
+ */
+ if (skb_linearize(skb)) {
+ netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n",
+ skb_shinfo(skb)->nr_frags,
+ skb_is_gso(skb));
+ goto tx_drop_count;
+ }
+ apc->eth_stats.tx_linear_pkt_cnt++;
+ }
+
pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
@@ -358,8 +458,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
- WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
-
if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
pkg.wqe_req.sgl = pkg.sgl_array;
} else {
@@ -382,6 +480,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_queue_tail(&txq->pending_skbs, skb);
len = skb->len;
+ num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
net_txq = netdev_get_tx_queue(ndev, txq_idx);
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
@@ -393,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (err) {
(void)skb_dequeue_tail(&txq->pending_skbs);
+ mana_unmap_skb(skb, apc);
netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
- err = NETDEV_TX_BUSY;
- goto tx_busy;
+ goto free_sgl_ptr;
}
err = NETDEV_TX_OK;
@@ -406,13 +505,15 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* skb may be freed after mana_gd_post_work_request. Do not use it. */
skb = NULL;
+ /* Populated the packet and bytes counters based on post GSO packet
+ * calculations
+ */
tx_stats = &txq->stats;
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += len;
+ tx_stats->packets += num_gso_seg;
+ tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
-tx_busy:
if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
netif_tx_wake_queue(net_txq);
apc->eth_stats.wake_queue++;
@@ -430,6 +531,25 @@ tx_drop:
return NETDEV_TX_OK;
}
+#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
+static netdev_features_t mana_features_check(struct sk_buff *skb,
+ struct net_device *ndev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
+ /* Exceeds HW SGE limit.
+ * GSO case:
+ * Disable GSO so the stack will software-segment the skb
+ * into smaller skbs that fit the SGE budget.
+ * Non-GSO case:
+ * The xmit path will attempt skb_linearize() as a fallback.
+ */
+ features &= ~NETIF_F_GSO_MASK;
+ }
+ return features;
+}
+#endif
+
static void mana_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *st)
{
@@ -446,6 +566,11 @@ static void mana_get_stats64(struct net_device *ndev,
netdev_stats_to_stats64(st, &ndev->stats);
+ if (apc->ac->hwc_timeout_occurred)
+ netdev_warn_once(ndev, "HWC timeout occurred\n");
+
+ st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe;
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -481,7 +606,7 @@ static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
struct sock *sk = skb->sk;
int txq;
- txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
+ txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
if (txq != old_q && sk && sk_fullsock(sk) &&
rcu_access_pointer(sk->sk_dst_cache))
@@ -511,7 +636,7 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
}
/* Release pre-allocated RX buffers */
-static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
+void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
{
struct device *dev;
int i;
@@ -591,20 +716,43 @@ static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
}
/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
-static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
- u32 *headroom)
+static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
+ int mtu, u32 *datasize, u32 *alloc_size,
+ u32 *headroom, u32 *frag_count)
{
- if (mtu > MANA_XDP_MTU_MAX)
- *headroom = 0; /* no support for XDP */
- else
- *headroom = XDP_PACKET_HEADROOM;
-
- *alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
+ u32 len, buf_size;
+ /* Calculate datasize first (consistent across all cases) */
*datasize = mtu + ETH_HLEN;
+
+ /* For xdp and jumbo frames make sure only one packet fits per page */
+ if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
+ if (mana_xdp_get(apc)) {
+ *headroom = XDP_PACKET_HEADROOM;
+ *alloc_size = PAGE_SIZE;
+ } else {
+ *headroom = 0; /* no support for XDP */
+ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD +
+ *headroom);
+ }
+
+ *frag_count = 1;
+ return;
+ }
+
+ /* Standard MTU case - optimize for multiple packets per page */
+ *headroom = 0;
+
+ /* Calculate base buffer size needed */
+ len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+ buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT);
+
+ /* Calculate how many packets can fit in a page */
+ *frag_count = PAGE_SIZE / buf_size;
+ *alloc_size = buf_size;
}
-static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
+int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
{
struct device *dev;
struct page *page;
@@ -613,12 +761,13 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
void *va;
int i;
- mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
- &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
+ mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize,
+ &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom,
+ &mpc->rxbpre_frag_count);
dev = mpc->ac->gdma_dev->gdma_context->dev;
- num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
+ num_rxb = num_queues * mpc->rx_queue_size;
WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
@@ -632,30 +781,16 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
mpc->rxbpre_total = 0;
for (i = 0; i < num_rxb; i++) {
- if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
- va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
- if (!va)
- goto error;
-
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) <
- get_order(mpc->rxbpre_alloc_size)) {
- put_page(page);
- goto error;
- }
- } else {
- page = dev_alloc_page();
- if (!page)
- goto error;
+ page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
+ if (!page)
+ goto error;
- va = page_to_virt(page);
- }
+ va = page_to_virt(page);
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
- put_page(virt_to_head_page(va));
+ put_page(page);
goto error;
}
@@ -667,6 +802,7 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
return 0;
error:
+ netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
mana_pre_dealloc_rxbufs(mpc);
return -ENOMEM;
}
@@ -678,7 +814,7 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu)
int err;
/* Pre-allocate buffers to prevent failure in mana_attach later */
- err = mana_pre_alloc_rxbufs(mpc, new_mtu);
+ err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
if (err) {
netdev_err(ndev, "Insufficient memory for new MTU\n");
return err;
@@ -690,12 +826,12 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu)
goto out;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err = mana_attach(ndev);
if (err) {
netdev_err(ndev, "mana_attach failed: %d\n", err);
- ndev->mtu = old_mtu;
+ WRITE_ONCE(ndev->mtu, old_mtu);
}
out:
@@ -703,24 +839,113 @@ out:
return err;
}
+static int mana_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ u32 old_speed, rate;
+ int err;
+
+ if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
+ return -EINVAL;
+ }
+
+ if (apc->handle.id && shaper->handle.id != apc->handle.id) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
+ return -EOPNOTSUPP;
+ }
+
+ if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
+ return -EINVAL;
+ }
+
+ rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
+ rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */
+
+ /* Get current speed */
+ err = mana_query_link_cfg(apc);
+ old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
+
+ if (!err) {
+ err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
+ apc->speed = (err) ? old_speed : rate;
+ apc->handle = (err) ? apc->handle : shaper->handle;
+ }
+
+ return err;
+}
+
+static int mana_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ int err;
+
+ err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
+
+ if (!err) {
+ /* Reset mana port context parameters */
+ apc->handle.id = 0;
+ apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
+ apc->speed = apc->max_speed;
+ }
+
+ return err;
+}
+
+static void mana_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops mana_shaper_ops = {
+ .set = mana_shaper_set,
+ .delete = mana_shaper_del,
+ .capabilities = mana_shaper_cap,
+};
+
static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
.ndo_select_queue = mana_select_queue,
+#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
+ .ndo_features_check = mana_features_check,
+#endif
.ndo_start_xmit = mana_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
+ .net_shaper_ops = &mana_shaper_ops,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
+ /*
+ * make sure subsequent cleanup attempts don't end up removing already
+ * cleaned dentry pointer
+ */
+ debugfs_remove(apc->mana_port_debugfs);
+ apc->mana_port_debugfs = NULL;
kfree(apc->rxqs);
apc->rxqs = NULL;
}
+static void mana_cleanup_indir_table(struct mana_port_context *apc)
+{
+ apc->indir_table_sz = 0;
+ kfree(apc->indir_table);
+ kfree(apc->rxobj_table);
+}
+
static int mana_init_port_context(struct mana_port_context *apc)
{
apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
@@ -745,8 +970,13 @@ static int mana_send_request(struct mana_context *ac, void *in_buf,
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
out_buf);
if (err || resp->status) {
- dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
- err, resp->status);
+ if (err == -EOPNOTSUPP)
+ return err;
+
+ if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
+ mana_need_log(gc, err))
+ dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
+ err, resp->status);
return err ? err : -EPROTO;
}
@@ -821,8 +1051,10 @@ static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+
return;
}
@@ -877,8 +1109,10 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+
return;
}
@@ -892,7 +1126,7 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
- u16 *max_num_vports)
+ u16 *max_num_vports, u8 *bm_hostmode)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_device_cfg_resp resp = {};
@@ -903,7 +1137,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
- req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
@@ -927,11 +1161,18 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
*max_num_vports = resp.max_num_vports;
- if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
gc->adapter_mtu = resp.adapter_mtu;
else
gc->adapter_mtu = ETH_FRAME_LEN;
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
+ *bm_hostmode = resp.bm_hostmode;
+ else
+ *bm_hostmode = 0;
+
+ debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
+
return 0;
}
@@ -962,7 +1203,16 @@ static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
*max_sq = resp.max_num_sq;
*max_rq = resp.max_num_rq;
- *num_indir_entry = resp.num_indirection_ent;
+ if (resp.num_indirection_ent > 0 &&
+ resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
+ is_power_of_2(resp.num_indirection_ent)) {
+ *num_indir_entry = resp.num_indirection_ent;
+ } else {
+ netdev_warn(apc->ndev,
+ "Setting indirection table size to default %d for vPort %d\n",
+ MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
+ *num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
+ }
apc->port_handle = resp.vport;
ether_addr_copy(apc->mac_addr, resp.mac_addr);
@@ -977,7 +1227,7 @@ void mana_uncfg_vport(struct mana_port_context *apc)
WARN_ON(apc->vport_use_count < 0);
mutex_unlock(&apc->vport_mutex);
}
-EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
+EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id)
@@ -1047,22 +1297,20 @@ out:
return err;
}
-EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
+EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
static int mana_cfg_vport_steering(struct mana_port_context *apc,
enum TRI_STATE rx,
bool update_default_rxobj, bool update_key,
bool update_tab)
{
- u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
struct net_device *ndev = apc->ndev;
- mana_handle_t *req_indir_tab;
u32 req_buf_size;
int err;
- req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
+ req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
req = kzalloc(req_buf_size, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -1073,8 +1321,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
req->hdr.req.msg_version = GDMA_MESSAGE_V2;
req->vport = apc->port_handle;
- req->num_indir_entries = num_entries;
- req->indir_tab_offset = sizeof(*req);
+ req->num_indir_entries = apc->indir_table_sz;
+ req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
+ indir_tab);
req->rx_enable = rx;
req->rss_enable = apc->rss_state;
req->update_default_rxobj = update_default_rxobj;
@@ -1086,16 +1335,16 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
if (update_key)
memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
- if (update_tab) {
- req_indir_tab = (mana_handle_t *)(req + 1);
- memcpy(req_indir_tab, apc->rxobj_table,
- req->num_indir_entries * sizeof(mana_handle_t));
- }
+ if (update_tab)
+ memcpy(req->indir_tab, apc->rxobj_table,
+ flex_array_size(req, indir_tab, req->num_indir_entries));
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+
goto out;
}
@@ -1113,12 +1362,101 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
}
netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
- apc->port_handle, num_entries);
+ apc->port_handle, apc->indir_table_sz);
out:
kfree(req);
return err;
}
+int mana_query_link_cfg(struct mana_port_context *apc)
+{
+ struct net_device *ndev = apc->ndev;
+ struct mana_query_link_config_resp resp = {};
+ struct mana_query_link_config_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(req), sizeof(resp));
+
+ req.vport = apc->port_handle;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to query link config: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured) {
+ err = -EINVAL;
+ return err;
+ }
+ apc->speed = resp.link_speed_mbps;
+ apc->max_speed = resp.qos_speed_mbps;
+ return 0;
+}
+
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping)
+{
+ struct mana_set_bw_clamp_resp resp = {};
+ struct mana_set_bw_clamp_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ req.link_speed_mbps = speed;
+ req.enable_clamping = enable_clamping;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
+ speed, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured)
+ netdev_info(ndev, "QoS is unconfigured\n");
+
+ return 0;
+}
+
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
@@ -1172,7 +1510,7 @@ int mana_create_wq_obj(struct mana_port_context *apc,
out:
return err;
}
-EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
+EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
mana_handle_t wq_obj)
@@ -1190,7 +1528,9 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+
return;
}
@@ -1200,7 +1540,7 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
resp.hdr.status);
}
-EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
+EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
static void mana_destroy_eq(struct mana_context *ac)
{
@@ -1211,6 +1551,9 @@ static void mana_destroy_eq(struct mana_context *ac)
if (!ac->eqs)
return;
+ debugfs_remove_recursive(ac->mana_eqs_debugfs);
+ ac->mana_eqs_debugfs = NULL;
+
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
if (!eq)
@@ -1223,6 +1566,18 @@ static void mana_destroy_eq(struct mana_context *ac)
ac->eqs = NULL;
}
+static void mana_create_eq_debugfs(struct mana_context *ac, int i)
+{
+ struct mana_eq eq = ac->eqs[i];
+ char eqnum[32];
+
+ sprintf(eqnum, "eq%d", i);
+ eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs);
+ debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head);
+ debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail);
+ debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops);
+}
+
static int mana_create_eq(struct mana_context *ac)
{
struct gdma_dev *gd = ac->gdma_dev;
@@ -1243,11 +1598,16 @@ static int mana_create_eq(struct mana_context *ac)
spec.eq.context = ac->eqs;
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+ ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs);
+
for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
goto out;
+ }
+ mana_create_eq_debugfs(ac, i);
}
return 0;
@@ -1326,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
return 0;
}
-static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
@@ -1488,8 +1848,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return NULL;
if (xdp->data_hard_start) {
+ u32 metasize = xdp->data - xdp->data_meta;
+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
return skb;
}
@@ -1587,8 +1951,11 @@ drop_xdp:
drop:
if (from_pool) {
- page_pool_recycle_direct(rxq->page_pool,
- virt_to_head_page(buf_va));
+ if (rxq->frag_count == 1)
+ page_pool_recycle_direct(rxq->page_pool,
+ virt_to_head_page(buf_va));
+ else
+ page_pool_free_va(rxq->page_pool, buf_va, true);
} else {
WARN_ON_ONCE(rxq->xdp_save_va);
/* Save for reuse */
@@ -1601,51 +1968,49 @@ drop:
}
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
- dma_addr_t *da, bool *from_pool, bool is_napi)
+ dma_addr_t *da, bool *from_pool)
{
struct page *page;
+ u32 offset;
void *va;
-
*from_pool = false;
- /* Reuse XDP dropped page if available */
- if (rxq->xdp_save_va) {
- va = rxq->xdp_save_va;
- rxq->xdp_save_va = NULL;
- } else if (rxq->alloc_size > PAGE_SIZE) {
- if (is_napi)
- va = napi_alloc_frag(rxq->alloc_size);
- else
- va = netdev_alloc_frag(rxq->alloc_size);
+ /* Don't use fragments for jumbo frames or XDP where it's 1 fragment
+ * per page.
+ */
+ if (rxq->frag_count == 1) {
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_va) {
+ va = rxq->xdp_save_va;
+ page = virt_to_head_page(va);
+ rxq->xdp_save_va = NULL;
+ } else {
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
+ return NULL;
- if (!va)
- return NULL;
+ *from_pool = true;
+ va = page_to_virt(page);
+ }
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) < get_order(rxq->alloc_size)) {
- put_page(page);
+ *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *da)) {
+ mana_put_rx_page(rxq, page, *from_pool);
return NULL;
}
- } else {
- page = page_pool_dev_alloc_pages(rxq->page_pool);
- if (!page)
- return NULL;
- *from_pool = true;
- va = page_to_virt(page);
+ return va;
}
- *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *da)) {
- if (*from_pool)
- page_pool_put_full_page(rxq->page_pool, page, false);
- else
- put_page(virt_to_head_page(va));
-
+ page = page_pool_dev_alloc_frag(rxq->page_pool, &offset,
+ rxq->alloc_size);
+ if (!page)
return NULL;
- }
+
+ va = page_to_virt(page) + offset;
+ *da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
+ *from_pool = true;
return va;
}
@@ -1659,12 +2024,12 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
dma_addr_t da;
void *va;
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return;
-
- dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
- DMA_FROM_DEVICE);
+ if (!rxoob->from_pool || rxq->frag_count == 1)
+ dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
+ DMA_FROM_DEVICE);
*old_buf = rxoob->buf_va;
*old_fp = rxoob->from_pool;
@@ -1775,7 +2140,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
{
struct mana_cq *cq = context;
- u8 arm_bit;
int w;
WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
@@ -1786,16 +2150,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
mana_poll_tx_cq(cq);
w = cq->work_done;
-
- if (w < cq->budget &&
- napi_complete_done(&cq->napi, w)) {
- arm_bit = SET_ARM_BIT;
- } else {
- arm_bit = 0;
+ cq->work_done_since_doorbell += w;
+
+ if (w < cq->budget) {
+ mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
+ cq->work_done_since_doorbell = 0;
+ napi_complete_done(&cq->napi, w);
+ } else if (cq->work_done_since_doorbell >
+ cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
+ /* MANA hardware requires at least one doorbell ring every 8
+ * wraparounds of CQ even if there is no need to arm the CQ.
+ * This driver rings the doorbell as soon as we have exceeded
+ * 4 wraparounds.
+ */
+ mana_gd_ring_cq(gdma_queue, 0);
+ cq->work_done_since_doorbell = 0;
}
- mana_gd_ring_cq(gdma_queue, arm_bit);
-
return w;
}
@@ -1848,11 +2219,16 @@ static void mana_destroy_txq(struct mana_port_context *apc)
return;
for (i = 0; i < apc->num_queues; i++) {
- napi = &apc->tx_qp[i].tx_cq.napi;
- napi_synchronize(napi);
- napi_disable(napi);
- netif_napi_del(napi);
+ debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+ apc->tx_qp[i].mana_tx_debugfs = NULL;
+ napi = &apc->tx_qp[i].tx_cq.napi;
+ if (apc->tx_qp[i].txq.napi_initialized) {
+ napi_synchronize(napi);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ apc->tx_qp[i].txq.napi_initialized = false;
+ }
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
@@ -1864,6 +2240,31 @@ static void mana_destroy_txq(struct mana_port_context *apc)
apc->tx_qp = NULL;
}
+static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
+{
+ struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
+ char qnum[32];
+
+ sprintf(qnum, "TX-%d", idx);
+ tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
+ debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->txq.gdma_sq->head);
+ debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->txq.gdma_sq->tail);
+ debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->txq.pending_skbs.qlen);
+ debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->tx_cq.gdma_cq->head);
+ debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->tx_cq.gdma_cq->tail);
+ debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->tx_cq.budget);
+ debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs,
+ tx_qp->txq.gdma_sq, &mana_dbg_q_fops);
+ debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs,
+ tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops);
+}
+
static int mana_create_txq(struct mana_port_context *apc,
struct net_device *net)
{
@@ -1886,15 +2287,17 @@ static int mana_create_txq(struct mana_port_context *apc,
return -ENOMEM;
/* The minimum size of the WQE is 32 bytes, hence
- * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
+ * apc->tx_queue_size represents the maximum number of WQEs
* the SQ can store. This value is then used to size other queues
* to prevent overflow.
+ * Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
+ * as min val of apc->tx_queue_size is 128 and that would make
+ * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
+ * are always power of two
*/
- txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
- BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
+ txq_size = apc->tx_queue_size * 32;
- cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
- cq_size = PAGE_ALIGN(cq_size);
+ cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
gc = gd->gdma_context;
@@ -1908,6 +2311,7 @@ static int mana_create_txq(struct mana_port_context *apc,
txq->ndev = net;
txq->net_txq = netdev_get_tx_queue(net, i);
txq->vp_offset = apc->tx_vp_offset;
+ txq->napi_initialized = false;
skb_queue_head_init(&txq->pending_skbs);
memset(&spec, 0, sizeof(spec));
@@ -1972,20 +2376,26 @@ static int mana_create_txq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_napi_add_tx(net, &cq->napi, mana_poll);
- napi_enable(&cq->napi);
+ mana_create_txq_debugfs(apc, i);
+
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
+ netif_napi_add_locked(net, &cq->napi, mana_poll);
+ napi_enable_locked(&cq->napi);
+ txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
}
return 0;
out:
+ netdev_err(net, "Failed to create %d TX queues, %d\n",
+ apc->num_queues, err);
mana_destroy_txq(apc);
return err;
}
static void mana_destroy_rxq(struct mana_port_context *apc,
- struct mana_rxq *rxq, bool validate_state)
+ struct mana_rxq *rxq, bool napi_initialized)
{
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
@@ -1998,17 +2408,19 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (!rxq)
return;
+ debugfs_remove_recursive(rxq->mana_rx_debugfs);
+ rxq->mana_rx_debugfs = NULL;
+
napi = &rxq->rx_cq.napi;
- if (validate_state)
+ if (napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
-
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ }
xdp_rxq_info_unreg(&rxq->xdp_rxq);
- netif_napi_del(napi);
-
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
mana_deinit_cq(apc, &rxq->rx_cq);
@@ -2022,15 +2434,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (!rx_oob->buf_va)
continue;
- dma_unmap_single(dev, rx_oob->sgl[0].address,
- rx_oob->sgl[0].size, DMA_FROM_DEVICE);
-
page = virt_to_head_page(rx_oob->buf_va);
- if (rx_oob->from_pool)
- page_pool_put_full_page(rxq->page_pool, page, false);
- else
- put_page(page);
+ if (rxq->frag_count == 1 || !rx_oob->from_pool) {
+ dma_unmap_single(dev, rx_oob->sgl[0].address,
+ rx_oob->sgl[0].size, DMA_FROM_DEVICE);
+ mana_put_rx_page(rxq, page, rx_oob->from_pool);
+ } else {
+ page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
+ }
rx_oob->buf_va = NULL;
}
@@ -2054,7 +2466,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return -ENOMEM;
@@ -2132,13 +2544,26 @@ static int mana_push_wqe(struct mana_rxq *rxq)
static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
{
+ struct mana_port_context *mpc = netdev_priv(rxq->ndev);
struct page_pool_params pprm = {};
int ret;
- pprm.pool_size = RX_BUFFERS_PER_QUEUE;
+ pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
+ pprm.order = get_order(rxq->alloc_size);
+ pprm.queue_idx = rxq->rxq_idx;
+ pprm.dev = gc->dev;
+
+ /* Let the page pool do the dma map when page sharing with multiple
+ * fragments enabled for rx buffers.
+ */
+ if (rxq->frag_count > 1) {
+ pprm.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pprm.max_len = PAGE_SIZE;
+ pprm.dma_dir = DMA_FROM_DEVICE;
+ }
rxq->page_pool = page_pool_create(&pprm);
@@ -2167,19 +2592,18 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc = gd->gdma_context;
- rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
+ rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size),
GFP_KERNEL);
if (!rxq)
return NULL;
rxq->ndev = ndev;
- rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
+ rxq->num_rx_buf = apc->rx_queue_size;
rxq->rxq_idx = rxq_idx;
rxq->rxobj = INVALID_MANA_HANDLE;
- mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
- &rxq->headroom);
-
+ mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
+ &rxq->headroom, &rxq->frag_count);
/* Create page pool for RX queue */
err = mana_create_page_pool(rxq, gc);
if (err) {
@@ -2191,8 +2615,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (err)
goto out;
- rq_size = PAGE_ALIGN(rq_size);
- cq_size = PAGE_ALIGN(cq_size);
+ rq_size = MANA_PAGE_ALIGN(rq_size);
+ cq_size = MANA_PAGE_ALIGN(cq_size);
/* Create RQ */
memset(&spec, 0, sizeof(spec));
@@ -2254,14 +2678,14 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
+ netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool));
- napi_enable(&cq->napi);
+ napi_enable_locked(&cq->napi);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
@@ -2278,6 +2702,28 @@ out:
return NULL;
}
+static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
+{
+ struct mana_rxq *rxq;
+ char qnum[32];
+
+ rxq = apc->rxqs[idx];
+
+ sprintf(qnum, "RX-%d", idx);
+ rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
+ debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
+ debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
+ debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
+ debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
+ &rxq->rx_cq.gdma_cq->head);
+ debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
+ &rxq->rx_cq.gdma_cq->tail);
+ debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
+ debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
+ debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
+ &mana_dbg_q_fops);
+}
+
static int mana_add_rx_queues(struct mana_port_context *apc,
struct net_device *ndev)
{
@@ -2290,12 +2736,15 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = -ENOMEM;
+ netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
goto out;
}
u64_stats_init(&rxq->stats.syncp);
apc->rxqs[i] = rxq;
+
+ mana_create_rxq_debugfs(apc, i);
}
apc->default_rxobj = apc->rxqs[0]->rxobj;
@@ -2321,7 +2770,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
mana_destroy_txq(apc);
mana_uncfg_vport(apc);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_hw_vport(apc);
}
@@ -2333,7 +2782,7 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_hw_vport(apc);
if (err)
return err;
@@ -2346,11 +2795,33 @@ static int mana_create_vport(struct mana_port_context *apc,
return mana_create_txq(apc, net);
}
+static int mana_rss_table_alloc(struct mana_port_context *apc)
+{
+ if (!apc->indir_table_sz) {
+ netdev_err(apc->ndev,
+ "Indirection table size not set for vPort %d\n",
+ apc->port_idx);
+ return -EINVAL;
+ }
+
+ apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
+ if (!apc->indir_table)
+ return -ENOMEM;
+
+ apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL);
+ if (!apc->rxobj_table) {
+ kfree(apc->indir_table);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void mana_rss_table_init(struct mana_port_context *apc)
{
int i;
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
apc->indir_table[i] =
ethtool_rxfh_indir_default(i, apc->num_queues);
}
@@ -2363,7 +2834,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
int i;
if (update_tab) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ for (i = 0; i < apc->indir_table_sz; i++) {
queue_idx = apc->indir_table[i];
apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
}
@@ -2378,15 +2849,17 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
return 0;
}
-void mana_query_gf_stats(struct mana_port_context *apc)
+int mana_query_gf_stats(struct mana_context *ac)
{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_gf_stat_resp resp = {};
struct mana_query_gf_stat_req req = {};
- struct net_device *ndev = apc->ndev;
+ struct device *dev = gc->dev;
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
sizeof(req), sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
STATISTICS_FLAGS_HC_RX_BYTES |
@@ -2415,68 +2888,156 @@ void mana_query_gf_stats(struct mana_port_context *apc)
STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
- err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ err = mana_send_request(ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to query GF stats: %d\n", err);
- return;
+ dev_err(dev, "Failed to query GF stats: %d\n", err);
+ return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
sizeof(resp));
if (err || resp.hdr.status) {
- netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
- resp.hdr.status);
- return;
+ dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err;
}
- apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
- apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
- apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
- apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
- apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
- apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
- apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
- apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
- apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
- apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
- apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
- apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
+ ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
+ ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
+ ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes;
+ ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
+ ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
+ ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
+ ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
+ ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
+ ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
+ ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
+ ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
+ ac->hc_stats.hc_tx_err_inval_vportoffset_pkt =
resp.tx_err_inval_vport_offset_pkt;
- apc->eth_stats.hc_tx_err_vlan_enforcement =
+ ac->hc_stats.hc_tx_err_vlan_enforcement =
resp.tx_err_vlan_enforcement;
- apc->eth_stats.hc_tx_err_eth_type_enforcement =
+ ac->hc_stats.hc_tx_err_eth_type_enforcement =
resp.tx_err_ethtype_enforcement;
- apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
- apc->eth_stats.hc_tx_err_sqpdid_enforcement =
+ ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
+ ac->hc_stats.hc_tx_err_sqpdid_enforcement =
resp.tx_err_SQPDID_enforcement;
- apc->eth_stats.hc_tx_err_cqpdid_enforcement =
+ ac->hc_stats.hc_tx_err_cqpdid_enforcement =
resp.tx_err_CQPDID_enforcement;
- apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
- apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
- apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
- apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
- apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
- apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
- apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
- apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
- apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
- apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
+ ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
+ ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
+ ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes;
+ ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
+ ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
+ ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
+ ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
+ ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
+ ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+ ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma;
+
+ return 0;
+}
+
+void mana_query_phy_stats(struct mana_port_context *apc)
+{
+ struct mana_query_phy_stat_resp resp = {};
+ struct mana_query_phy_stat_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(req), sizeof(resp));
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err)
+ return;
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev,
+ "Failed to query PHY stats: %d, resp:0x%x\n",
+ err, resp.hdr.status);
+ return;
+ }
+
+ /* Aggregate drop counters */
+ apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
+ apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
+ apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
+ apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
+ apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
+ apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
+ apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
+ apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
+ apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
+ apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
+ apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
+ apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
+ apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
+ apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
+ apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
+ apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
+ apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
+
+ /* Per TC byte Counters */
+ apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
+ apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
+ apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
+ apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
+ apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
+ apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
+ apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
+ apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
+ apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
+ apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
+ apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
+ apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
+ apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
+ apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
+ apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
+ apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
+ apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
+ apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
+ apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
+ apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
+ apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
+ apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
+ apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
+ apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
+ apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
+ apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
+ apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
+ apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
+ apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
+ apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
+ apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
}
static int mana_init_port(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
u32 max_txq, max_rxq, max_queues;
int port_idx = apc->port_idx;
- u32 num_indirect_entries;
+ struct gdma_context *gc;
+ char vport[32];
int err;
err = mana_init_port_context(apc);
if (err)
return err;
+ gc = gd->gdma_context;
+
err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
- &num_indirect_entries);
+ &apc->indir_table_sz);
if (err) {
netdev_err(ndev, "Failed to query info for vPort %d\n",
port_idx);
@@ -2491,12 +3052,12 @@ static int mana_init_port(struct net_device *ndev)
apc->num_queues = apc->max_queues;
eth_hw_addr_set(ndev, apc->mac_addr);
-
+ sprintf(vport, "vport%d", port_idx);
+ apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
return 0;
reset_apc:
- kfree(apc->rxqs);
- apc->rxqs = NULL;
+ mana_cleanup_port_context(apc);
return err;
}
@@ -2507,12 +3068,18 @@ int mana_alloc_queues(struct net_device *ndev)
int err;
err = mana_create_vport(apc, ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
return err;
+ }
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
err = mana_add_rx_queues(apc, ndev);
if (err)
@@ -2521,16 +3088,22 @@ int mana_alloc_queues(struct net_device *ndev)
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
mana_rss_table_init(apc);
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
goto destroy_vport;
+ }
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_filter(apc);
if (err)
goto destroy_vport;
@@ -2569,9 +3142,6 @@ int mana_attach(struct net_device *ndev)
/* Ensure port state updated before txq state */
smp_wmb();
- if (apc->port_is_up)
- netif_carrier_on(ndev);
-
netif_device_attach(ndev);
return 0;
@@ -2592,7 +3162,7 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_filter(apc);
/* No packet can be transmitted now since apc->port_is_up is false.
@@ -2641,11 +3211,10 @@ static int mana_dealloc_queues(struct net_device *ndev)
apc->rss_state = TRI_STATE_FALSE;
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
- if (err) {
+ if (err && mana_en_need_log(apc, err))
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
- return err;
- }
+ /* Even in err case, still need to cleanup the vPort */
mana_destroy_vport(apc);
return 0;
@@ -2665,12 +3234,13 @@ int mana_detach(struct net_device *ndev, bool from_close)
smp_wmb();
netif_tx_disable(ndev);
- netif_carrier_off(ndev);
if (apc->port_st_save) {
err = mana_dealloc_queues(ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
return err;
+ }
}
if (!from_close) {
@@ -2701,6 +3271,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->ndev = ndev;
apc->max_queues = gc->max_num_queues;
apc->num_queues = gc->max_num_queues;
+ apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
+ apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
apc->port_handle = INVALID_MANA_HANDLE;
apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
@@ -2717,6 +3289,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
+ netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
+
netif_carrier_off(ndev);
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
@@ -2725,6 +3299,10 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
if (err)
goto free_net;
+ err = mana_rss_table_alloc(apc);
+ if (err)
+ goto reset_apc;
+
netdev_lockdep_set_classes(ndev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -2741,14 +3319,19 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
err = register_netdev(ndev);
if (err) {
netdev_err(ndev, "Unable to register netdev.\n");
- goto reset_apc;
+ goto free_indir;
}
+ netif_carrier_on(ndev);
+
+ debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
+
return 0;
+free_indir:
+ mana_cleanup_indir_table(apc);
reset_apc:
- kfree(apc->rxqs);
- apc->rxqs = NULL;
+ mana_cleanup_port_context(apc);
free_net:
*ndev_storage = NULL;
netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
@@ -2775,7 +3358,7 @@ static void remove_adev(struct gdma_dev *gd)
gd->adev = NULL;
}
-static int add_adev(struct gdma_dev *gd)
+static int add_adev(struct gdma_dev *gd, const char *name)
{
struct auxiliary_device *adev;
struct mana_adev *madev;
@@ -2791,7 +3374,7 @@ static int add_adev(struct gdma_dev *gd)
goto idx_fail;
adev->id = ret;
- adev->name = "rdma";
+ adev->name = name;
adev->dev.parent = gd->gdma_context->dev;
adev->dev.release = adev_release;
madev->mdev = gd;
@@ -2800,11 +3383,15 @@ static int add_adev(struct gdma_dev *gd)
if (ret)
goto init_fail;
+ /* madev is owned by the auxiliary device */
+ madev = NULL;
ret = auxiliary_device_add(adev);
if (ret)
goto add_fail;
gd->adev = adev;
+ dev_dbg(gd->gdma_context->dev,
+ "Auxiliary device added successfully\n");
return 0;
add_fail:
@@ -2819,11 +3406,94 @@ idx_fail:
return ret;
}
+static void mana_rdma_service_handle(struct work_struct *work)
+{
+ struct mana_service_work *serv_work =
+ container_of(work, struct mana_service_work, work);
+ struct gdma_dev *gd = serv_work->gdma_dev;
+ struct device *dev = gd->gdma_context->dev;
+ int ret;
+
+ if (READ_ONCE(gd->rdma_teardown))
+ goto out;
+
+ switch (serv_work->event) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ if (!gd->adev || gd->is_suspended)
+ break;
+
+ remove_adev(gd);
+ gd->is_suspended = true;
+ break;
+
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ if (!gd->is_suspended)
+ break;
+
+ ret = add_adev(gd, "rdma");
+ if (ret)
+ dev_err(dev, "Failed to add adev on resume: %d\n", ret);
+ else
+ gd->is_suspended = false;
+ break;
+
+ default:
+ dev_warn(dev, "unknown adev service event %u\n",
+ serv_work->event);
+ break;
+ }
+
+out:
+ kfree(serv_work);
+}
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
+{
+ struct gdma_dev *gd = &gc->mana_ib;
+ struct mana_service_work *serv_work;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return 0;
+ }
+
+ serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC);
+ if (!serv_work)
+ return -ENOMEM;
+
+ serv_work->event = event;
+ serv_work->gdma_dev = gd;
+
+ INIT_WORK(&serv_work->work, mana_rdma_service_handle);
+ queue_work(gc->service_wq, &serv_work->work);
+
+ return 0;
+}
+
+#define MANA_GF_STATS_PERIOD (2 * HZ)
+
+static void mana_gf_stats_work_handler(struct work_struct *work)
+{
+ struct mana_context *ac =
+ container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
+ int err;
+
+ err = mana_query_gf_stats(ac);
+ if (err == -ETIMEDOUT) {
+ /* HWC timeout detected - reset stats and stop rescheduling */
+ ac->hwc_timeout_occurred = true;
+ memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
+ return;
+ }
+ schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
+ u8 bm_hostmode = 0;
u16 num_ports = 0;
int err;
int i;
@@ -2846,16 +3516,22 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_create_eq(ac);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to create EQs: %d\n", err);
goto out;
+ }
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
- MANA_MICRO_VERSION, &num_ports);
+ MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
if (err)
goto out;
+ ac->bm_hostmode = bm_hostmode;
+
if (!resuming) {
ac->num_ports = num_ports;
+
+ INIT_WORK(&ac->link_change_work, mana_link_state_handle);
} else {
if (ac->num_ports != num_ports) {
dev_err(dev, "The number of vPorts changed: %d->%d\n",
@@ -2863,6 +3539,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
err = -EPROTO;
goto out;
}
+
+ enable_work(&ac->link_change_work);
}
if (ac->num_ports == 0)
@@ -2874,23 +3552,47 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
if (!resuming) {
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
- if (err)
+ /* we log the port for which the probe failed and stop
+ * probes for subsequent ports.
+ * Note that we keep running ports, for which the probes
+ * were successful, unless add_adev fails too
+ */
+ if (err) {
+ dev_err(dev, "Probe Failed for port %d\n", i);
break;
+ }
}
} else {
for (i = 0; i < ac->num_ports; i++) {
rtnl_lock();
err = mana_attach(ac->ports[i]);
rtnl_unlock();
- if (err)
+ /* we log the port for which the attach failed and stop
+ * attach for subsequent ports
+ * Note that we keep running ports, for which the attach
+ * were successful, unless add_adev fails too
+ */
+ if (err) {
+ dev_err(dev, "Attach Failed for port %d\n", i);
break;
+ }
}
}
- err = add_adev(gd);
+ err = add_adev(gd, "eth");
+
+ INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
+ schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
+
out:
- if (err)
+ if (err) {
mana_remove(gd, false);
+ } else {
+ dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
+ gd, gd->dev_id.as_uint32, ac->num_ports,
+ gd->dev_id.type, gd->dev_id.instance);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+ }
return err;
}
@@ -2899,17 +3601,22 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
+ struct mana_port_context *apc;
struct device *dev = gc->dev;
struct net_device *ndev;
int err;
int i;
+ disable_work_sync(&ac->link_change_work);
+ cancel_delayed_work_sync(&ac->gf_stats_work);
+
/* adev currently doesn't support suspending, always remove it */
if (gd->adev)
remove_adev(gd);
for (i = 0; i < ac->num_ports; i++) {
ndev = ac->ports[i];
+ apc = netdev_priv(ndev);
if (!ndev) {
if (i == 0)
dev_err(dev, "No net device to remove\n");
@@ -2933,6 +3640,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
}
unregister_netdevice(ndev);
+ mana_cleanup_indir_table(apc);
rtnl_unlock();
@@ -2949,4 +3657,68 @@ out:
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+}
+
+int mana_rdma_probe(struct gdma_dev *gd)
+{
+ int err = 0;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return err;
+ }
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ err = add_adev(gd, "rdma");
+ if (err)
+ mana_gd_deregister_device(gd);
+
+ return err;
+}
+
+void mana_rdma_remove(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return;
+ }
+
+ WRITE_ONCE(gd->rdma_teardown, true);
+ flush_workqueue(gc->service_wq);
+
+ if (gd->adev)
+ remove_adev(gd);
+
+ mana_gd_deregister_device(gd);
+}
+
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker)
+{
+ struct net_device *ndev;
+
+ if (port_index >= ac->num_ports)
+ return NULL;
+
+ rcu_read_lock();
+
+ /* If mana is used in netvsc, the upper netdevice should be returned. */
+ ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
+
+ /* If there is no upper device, use the parent Ethernet device */
+ if (!ndev)
+ ndev = ac->ports[port_index];
+
+ netdev_hold(ndev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return ndev;
}
+EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index ab2413d71f6c..0e2f4343ac67 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -7,72 +7,132 @@
#include <net/mana/mana.h>
-static const struct {
+struct mana_stats_desc {
char name[ETH_GSTRING_LEN];
u16 offset;
-} mana_eth_stats[] = {
+};
+
+static const struct mana_stats_desc mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
- {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
+ {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
+ {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ tx_cqe_unknown_type)},
+ {"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats,
+ tx_linear_pkt_cnt)},
+ {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
+ rx_coalesced_err)},
+ {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ rx_cqe_unknown_type)},
+};
+
+static const struct mana_stats_desc mana_hc_stats[] = {
+ {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats,
hc_rx_discards_no_wqe)},
- {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_rx_err_vport_disabled)},
- {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
- {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)},
+ {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_rx_ucast_pkts)},
- {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_ucast_bytes)},
- {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_rx_bcast_pkts)},
- {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_bcast_bytes)},
- {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
- hc_rx_mcast_pkts)},
- {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
+ hc_rx_mcast_pkts)},
+ {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_mcast_bytes)},
- {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_gf_disabled)},
- {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_vport_disabled)},
{"hc_tx_err_inval_vportoffset_pkt",
- offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_inval_vportoffset_pkt)},
- {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_vlan_enforcement)},
{"hc_tx_err_eth_type_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
- {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)},
+ {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_sa_enforcement)},
{"hc_tx_err_sqpdid_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)},
{"hc_tx_err_cqpdid_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
- {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)},
+ {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_mtu_violation)},
- {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_inval_oob)},
- {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_gdma)},
- {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
- {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)},
+ {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_ucast_pkts)},
- {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_ucast_bytes)},
- {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_bcast_pkts)},
- {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_bcast_bytes)},
- {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_mcast_pkts)},
- {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_mcast_bytes)},
- {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
- {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
- tx_cqe_unknown_type)},
- {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
- rx_coalesced_err)},
- {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
- rx_cqe_unknown_type)},
+};
+
+static const struct mana_stats_desc mana_phy_stats[] = {
+ { "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
+ { "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
+ { "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
+ { "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
+ { "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
+ { "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
+ { "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
+ { "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
+ { "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
+ { "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
+ { "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
+ { "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
+ { "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
+ { "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
+ { "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
+ { "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
+ { "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
+ { "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
+ { "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
+ { "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
+ { "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
+ { "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
+ { "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
+ { "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
+ { "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
+ { "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
+ { "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
+ { "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
+ { "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
+ { "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
+ { "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
+ { "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
+ { "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
+ { "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
+ { "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
+ { "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
+ { "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
+ { "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
+ { "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
+ { "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
+ { "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
+ { "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
+ { "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
+ { "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
+ { "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
+ { "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
+ { "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
+ { "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
+ { "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
+ { "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
};
static int mana_get_sset_count(struct net_device *ndev, int stringset)
@@ -83,61 +143,47 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues *
- (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
+ return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) +
+ num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
- u8 *p = data;
int i;
if (stringset != ETH_SS_STATS)
return;
+ for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
+ ethtool_puts(&data, mana_eth_stats[i].name);
- for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
- memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++)
+ ethtool_puts(&data, mana_hc_stats[i].name);
+
+ for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
+ ethtool_puts(&data, mana_phy_stats[i].name);
for (i = 0; i < num_queues; i++) {
- sprintf(p, "rx_%d_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_%d_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_%d_xdp_drop", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_%d_xdp_tx", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_%d_xdp_redirect", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "rx_%d_packets", i);
+ ethtool_sprintf(&data, "rx_%d_bytes", i);
+ ethtool_sprintf(&data, "rx_%d_xdp_drop", i);
+ ethtool_sprintf(&data, "rx_%d_xdp_tx", i);
+ ethtool_sprintf(&data, "rx_%d_xdp_redirect", i);
}
for (i = 0; i < num_queues; i++) {
- sprintf(p, "tx_%d_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_xdp_xmit", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_tso_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_tso_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_tso_inner_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_tso_inner_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_long_pkt_fmt", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_short_pkt_fmt", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_csum_partial", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_mana_map_err", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "tx_%d_packets", i);
+ ethtool_sprintf(&data, "tx_%d_bytes", i);
+ ethtool_sprintf(&data, "tx_%d_xdp_xmit", i);
+ ethtool_sprintf(&data, "tx_%d_tso_packets", i);
+ ethtool_sprintf(&data, "tx_%d_tso_bytes", i);
+ ethtool_sprintf(&data, "tx_%d_tso_inner_packets", i);
+ ethtool_sprintf(&data, "tx_%d_tso_inner_bytes", i);
+ ethtool_sprintf(&data, "tx_%d_long_pkt_fmt", i);
+ ethtool_sprintf(&data, "tx_%d_short_pkt_fmt", i);
+ ethtool_sprintf(&data, "tx_%d_csum_partial", i);
+ ethtool_sprintf(&data, "tx_%d_mana_map_err", i);
}
}
@@ -147,6 +193,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
+ void *hc_stats = &apc->ac->hc_stats;
+ void *phy_stats = &apc->phy_stats;
struct mana_stats_rx *rx_stats;
struct mana_stats_tx *tx_stats;
unsigned int start;
@@ -167,12 +215,22 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
if (!apc->port_is_up)
return;
- /* we call mana function to update stats from GDMA */
- mana_query_gf_stats(apc);
+
+ /* We call this mana function to get the phy stats from GDMA and includes
+ * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
+ * counters.
+ */
+ mana_query_phy_stats(apc);
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+ for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++)
+ data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset);
+
+ for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
+ data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -245,7 +303,9 @@ static u32 mana_get_rxfh_key_size(struct net_device *ndev)
static u32 mana_rss_indir_size(struct net_device *ndev)
{
- return MANA_INDIRECT_TABLE_SIZE;
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ return apc->indir_table_sz;
}
static int mana_get_rxfh(struct net_device *ndev,
@@ -257,7 +317,7 @@ static int mana_get_rxfh(struct net_device *ndev,
rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (rxfh->indir) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
rxfh->indir[i] = apc->indir_table[i];
}
@@ -273,8 +333,8 @@ static int mana_set_rxfh(struct net_device *ndev,
{
struct mana_port_context *apc = netdev_priv(ndev);
bool update_hash = false, update_table = false;
- u32 save_table[MANA_INDIRECT_TABLE_SIZE];
u8 save_key[MANA_HASH_KEY_SIZE];
+ u32 *save_table;
int i, err;
if (!apc->port_is_up)
@@ -284,13 +344,19 @@ static int mana_set_rxfh(struct net_device *ndev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
+ if (!save_table)
+ return -ENOMEM;
+
if (rxfh->indir) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- if (rxfh->indir[i] >= apc->num_queues)
- return -EINVAL;
+ for (i = 0; i < apc->indir_table_sz; i++)
+ if (rxfh->indir[i] >= apc->num_queues) {
+ err = -EINVAL;
+ goto cleanup;
+ }
update_table = true;
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ for (i = 0; i < apc->indir_table_sz; i++) {
save_table[i] = apc->indir_table[i];
apc->indir_table[i] = rxfh->indir[i];
}
@@ -306,7 +372,7 @@ static int mana_set_rxfh(struct net_device *ndev,
if (err) { /* recover to original values */
if (update_table) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
apc->indir_table[i] = save_table[i];
}
@@ -316,6 +382,9 @@ static int mana_set_rxfh(struct net_device *ndev,
mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
}
+cleanup:
+ kfree(save_table);
+
return err;
}
@@ -334,30 +403,119 @@ static int mana_set_channels(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int new_count = channels->combined_count;
unsigned int old_count = apc->num_queues;
- int err, err2;
+ int err;
+
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count);
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for new allocations");
+ return err;
+ }
err = mana_detach(ndev, false);
if (err) {
netdev_err(ndev, "mana_detach failed: %d\n", err);
- return err;
+ goto out;
}
apc->num_queues = new_count;
err = mana_attach(ndev);
- if (!err)
- return 0;
+ if (err) {
+ apc->num_queues = old_count;
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+ }
- netdev_err(ndev, "mana_attach failed: %d\n", err);
+out:
+ mana_pre_dealloc_rxbufs(apc);
+ return err;
+}
+
+static void mana_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ ring->rx_pending = apc->rx_queue_size;
+ ring->tx_pending = apc->tx_queue_size;
+ ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
+ ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
+}
- /* Try to roll it back to the old configuration. */
- apc->num_queues = old_count;
- err2 = mana_attach(ndev);
- if (err2)
- netdev_err(ndev, "mana re-attach failed: %d\n", err2);
+static int mana_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u32 new_tx, new_rx;
+ u32 old_tx, old_rx;
+ int err;
+ old_tx = apc->tx_queue_size;
+ old_rx = apc->rx_queue_size;
+
+ if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
+ NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
+ MIN_TX_BUFFERS_PER_QUEUE);
+ return -EINVAL;
+ }
+
+ if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
+ NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
+ MIN_RX_BUFFERS_PER_QUEUE);
+ return -EINVAL;
+ }
+
+ new_rx = roundup_pow_of_two(ring->rx_pending);
+ new_tx = roundup_pow_of_two(ring->tx_pending);
+ netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
+ new_tx, new_rx);
+
+ /* pre-allocating new buffers to prevent failures in mana_attach() later */
+ apc->rx_queue_size = new_rx;
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ apc->rx_queue_size = old_rx;
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for new allocations\n");
+ return err;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ goto out;
+ }
+
+ apc->tx_queue_size = new_tx;
+ apc->rx_queue_size = new_rx;
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+ apc->tx_queue_size = old_tx;
+ apc->rx_queue_size = old_rx;
+ }
+out:
+ mana_pre_dealloc_rxbufs(apc);
return err;
}
+static int mana_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ err = mana_query_link_cfg(apc);
+ cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed;
+
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_OTHER;
+
+ return 0;
+}
+
const struct ethtool_ops mana_ethtool_ops = {
.get_ethtool_stats = mana_get_ethtool_stats,
.get_sset_count = mana_get_sset_count,
@@ -369,4 +527,8 @@ const struct ethtool_ops mana_ethtool_ops = {
.set_rxfh = mana_set_rxfh,
.get_channels = mana_get_channels,
.set_channels = mana_set_channels,
+ .get_ringparam = mana_get_ringparam,
+ .set_ringparam = mana_set_ringparam,
+ .get_link_ksettings = mana_get_link_ksettings,
+ .get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
index 5553af9c8085..0f1679ebad96 100644
--- a/drivers/net/ethernet/microsoft/mana/shm_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
@@ -6,6 +6,7 @@
#include <linux/io.h>
#include <linux/mm.h>
+#include <net/mana/gdma.h>
#include <net/mana/shm_channel.h>
#define PAGE_FRAME_L48_WIDTH_BYTES 6
@@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
return err;
}
- if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
- !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
+ if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
+ !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
return -EINVAL;
if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
@@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* EQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(eq_addr);
+ frame_addr = MANA_PFN(eq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* CQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(cq_addr);
+ frame_addr = MANA_PFN(cq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* RQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(rq_addr);
+ frame_addr = MANA_PFN(rq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* SQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(sq_addr);
+ frame_addr = MANA_PFN(sq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 96dc69e7141f..8bd60168624a 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -576,7 +576,7 @@ MODULE_DEVICE_TABLE(of, moxart_mac_match);
static struct platform_driver moxart_mac_driver = {
.probe = moxart_mac_probe,
- .remove_new = moxart_remove,
+ .remove = moxart_remove,
.driver = {
.name = "moxart-ethernet",
.of_match_table = moxart_mac_match,
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ed2fb44500b0..08bee56aea35 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -453,9 +453,158 @@ static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
return VLAN_N_VID - bridge_num - 1;
}
+/**
+ * ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID
+ *
+ * @ocelot: Switch private data structure
+ * @port: Index of ingress port
+ *
+ * IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN
+ * component conformance" suggest that a C-VLAN component should only recognize
+ * and filter on C-Tags, and an S-VLAN component should only recognize and
+ * process based on C-Tags.
+ *
+ * In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN
+ * components are largely represented by a bridge with vlan_protocol 802.1Q,
+ * and S-VLAN components by a bridge with vlan_protocol 802.1ad.
+ *
+ * Currently the driver only offloads vlan_protocol 802.1Q, but the hardware
+ * design is non-conformant, because the switch assigns each frame to a VLAN
+ * based on an entirely different question, as detailed in figure "Basic VLAN
+ * Classification Flow" from its manual and reproduced below.
+ *
+ * Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register
+ * if VLAN_AWARE_ENA[port] and frame has outer tag then:
+ * if VLAN_INNER_TAG_ENA[port] and frame has inner tag then:
+ * TAG_TYPE = (Frame.InnerTPID <> 0x8100)
+ * Set PCP, DEI, VID to values from inner VLAN header
+ * else:
+ * TAG_TYPE = (Frame.OuterTPID <> 0x8100)
+ * Set PCP, DEI, VID to values from outer VLAN header
+ * if VID == 0 then:
+ * VID = VLAN_CFG.VLAN_VID
+ *
+ * Summarized, the switch will recognize both 802.1Q and 802.1ad TPIDs as VLAN
+ * "with equal rights", and just set the TAG_TYPE bit to 0 (if 802.1Q) or to 1
+ * (if 802.1ad). It will classify based on whichever of the tags is "outer", no
+ * matter what TPID that may have (or "inner", if VLAN_INNER_TAG_ENA[port]).
+ *
+ * In the VLAN Table, the TAG_TYPE information is not accessible - just the
+ * classified VID is - so it is as if each VLAN Table entry is for 2 VLANs:
+ * C-VLAN X, and S-VLAN X.
+ *
+ * Whereas the Linux bridge behavior is to only filter on frames with a TPID
+ * equal to the vlan_protocol, and treat everything else as VLAN-untagged.
+ *
+ * Consider an ingress packet tagged with 802.1ad VID=3 and 802.1Q VID=5,
+ * received on a bridge vlan_filtering=1 vlan_protocol=802.1Q port. This frame
+ * should be treated as 802.1Q-untagged, and classified to the PVID of that
+ * bridge port. Not to VID=3, and not to VID=5.
+ *
+ * The VCAP IS1 TCAM has everything we need to overwrite the choices made in
+ * the basic VLAN classification pipeline: it can match on TAG_TYPE in the key,
+ * and it can modify the classified VID in the action. Thus, for each port
+ * under a vlan_filtering bridge, we can insert a rule in VCAP IS1 lookup 0 to
+ * match on 802.1ad tagged frames and modify their classified VID to the 802.1Q
+ * PVID of the port. This effectively makes it appear to the outside world as
+ * if those packets were processed as VLAN-untagged.
+ *
+ * The rule needs to be updated each time the bridge PVID changes, and needs
+ * to be deleted if the bridge PVID is deleted, or if the port becomes
+ * VLAN-unaware.
+ */
+static int ocelot_update_vlan_reclassify_rule(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port);
+ struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1];
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ const struct ocelot_bridge_vlan *pvid_vlan;
+ struct ocelot_vcap_filter *filter;
+ int err, val, pcp, dei;
+ bool vid_replace_ena;
+ u16 vid;
+
+ pvid_vlan = ocelot_port->pvid_vlan;
+ vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan;
+
+ filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie,
+ false);
+ if (!vid_replace_ena) {
+ /* If the reclassification filter doesn't need to exist, delete
+ * it if it was previously installed, and exit doing nothing
+ * otherwise.
+ */
+ if (filter)
+ return ocelot_vcap_filter_del(ocelot, filter);
+
+ return 0;
+ }
+
+ /* The reclassification rule must apply. See if it already exists
+ * or if it must be created.
+ */
+
+ /* Treating as VLAN-untagged means using as classified VID equal to
+ * the bridge PVID, and PCP/DEI set to the port default QoS values.
+ */
+ vid = pvid_vlan->vid;
+ val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+ pcp = ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
+ dei = !!(val & ANA_PORT_QOS_CFG_DP_DEFAULT_VAL);
+
+ if (filter) {
+ bool changed = false;
+
+ /* Filter exists, just update it */
+ if (filter->action.vid != vid) {
+ filter->action.vid = vid;
+ changed = true;
+ }
+ if (filter->action.pcp != pcp) {
+ filter->action.pcp = pcp;
+ changed = true;
+ }
+ if (filter->action.dei != dei) {
+ filter->action.dei = dei;
+ changed = true;
+ }
+
+ if (!changed)
+ return 0;
+
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
+ /* Filter doesn't exist, create it */
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->key_type = OCELOT_VCAP_KEY_ANY;
+ filter->ingress_port_mask = BIT(port);
+ filter->vlan.tpid = OCELOT_VCAP_BIT_1;
+ filter->prio = 1;
+ filter->id.cookie = cookie;
+ filter->id.tc_offload = false;
+ filter->block_id = VCAP_IS1;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ filter->lookup = 0;
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = vid;
+ filter->action.pcp = pcp;
+ filter->action.dei = dei;
+
+ err = ocelot_vcap_filter_add(ocelot, filter, NULL);
+ if (err)
+ kfree(filter);
+
+ return err;
+}
+
/* Default vlan to clasify for untagged frames (may be zero) */
-static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
- const struct ocelot_bridge_vlan *pvid_vlan)
+static int ocelot_port_set_pvid(struct ocelot *ocelot, int port,
+ const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
@@ -475,15 +624,23 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
* happens automatically), but also 802.1p traffic which gets
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
+ *
+ * Also, we only support the bridge 802.1Q VLAN protocol, so
+ * 802.1ad-tagged frames (carrying S-Tags) should be considered
+ * 802.1Q-untagged, and also dropped.
*/
if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA;
ocelot_rmw_gix(ocelot, val,
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA,
ANA_PORT_DROP_CFG, port);
+
+ return ocelot_update_vlan_reclassify_rule(ocelot, port);
}
static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
@@ -631,7 +788,10 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
ANA_PORT_VLAN_CFG, port);
- ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
+ err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
+ if (err)
+ return err;
+
ocelot_port_manage_port_tag(ocelot, port);
return 0;
@@ -670,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
/* Ignore VID 0 added to our RX filter by the 8021q module, since
@@ -684,9 +845,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
return err;
/* Default ingress vlan classification */
- if (pvid)
- ocelot_port_set_pvid(ocelot, port,
- ocelot_bridge_vlan_find(ocelot, vid));
+ if (pvid) {
+ err = ocelot_port_set_pvid(ocelot, port,
+ ocelot_bridge_vlan_find(ocelot, vid));
+ if (err)
+ return err;
+ } else if (ocelot_port->pvid_vlan &&
+ ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
+ err = ocelot_port_set_pvid(ocelot, port, NULL);
+ if (err)
+ return err;
+ }
/* Untagged egress vlan clasification */
ocelot_port_manage_port_tag(ocelot, port);
@@ -712,8 +881,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return err;
/* Ingress */
- if (del_pvid)
- ocelot_port_set_pvid(ocelot, port, NULL);
+ if (del_pvid) {
+ err = ocelot_port_set_pvid(ocelot, port, NULL);
+ if (err)
+ return err;
+ }
/* Egress */
ocelot_port_manage_port_tag(ocelot, port);
@@ -1099,6 +1271,48 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
}
EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
+void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->inj_lock)
+{
+ spin_lock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_inj_grp);
+
+void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->inj_lock)
+{
+ spin_unlock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_inj_grp);
+
+void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->inj_lock)
+{
+ spin_lock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp);
+
+void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->inj_lock)
+{
+ spin_unlock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp);
+
+void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->xtr_lock)
+{
+ spin_lock_bh(&ocelot->xtr_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp_bh);
+
+void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->xtr_lock)
+{
+ spin_unlock_bh(&ocelot->xtr_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp_bh);
+
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
{
u64 timestamp, src_port, len;
@@ -1109,6 +1323,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
u32 val, *buf;
int err;
+ lockdep_assert_held(&ocelot->xtr_lock);
+
err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
if (err)
return err;
@@ -1184,6 +1400,8 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
{
u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
+ lockdep_assert_held(&ocelot->inj_lock);
+
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
return false;
if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
@@ -1193,28 +1411,55 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_can_inject);
-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
+/**
+ * ocelot_ifh_set_basic - Set basic information in Injection Frame Header
+ * @ifh: Pointer to Injection Frame Header memory
+ * @ocelot: Switch private data structure
+ * @port: Egress port number
+ * @rew_op: Egress rewriter operation for PTP
+ * @skb: Pointer to socket buffer (packet)
+ *
+ * Populate the Injection Frame Header with basic information for this skb: the
+ * analyzer bypass bit, destination port, VLAN info, egress rewriter info.
+ */
+void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
+ u32 rew_op, struct sk_buff *skb)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct net_device *dev = skb->dev;
+ u64 vlan_tci, tag_type;
+ int qos_class;
+
+ ocelot_xmit_get_vlan_info(skb, ocelot_port->bridge, &vlan_tci,
+ &tag_type);
+
+ qos_class = netdev_get_num_tc(dev) ?
+ netdev_get_prio_tc_map(dev, skb->priority) : skb->priority;
+
+ memset(ifh, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(ifh, 1);
+ ocelot_ifh_set_src(ifh, ocelot->num_phys_ports);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
- ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- if (vlan_tag)
- ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
+ ocelot_ifh_set_qos_class(ifh, qos_class);
+ ocelot_ifh_set_tag_type(ifh, tag_type);
+ ocelot_ifh_set_vlan_tci(ifh, vlan_tci);
if (rew_op)
ocelot_ifh_set_rew_op(ifh, rew_op);
}
-EXPORT_SYMBOL(ocelot_ifh_port_set);
+EXPORT_SYMBOL(ocelot_ifh_set_basic);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb)
{
- u32 ifh[OCELOT_TAG_LEN / 4] = {0};
+ u32 ifh[OCELOT_TAG_LEN / 4];
unsigned int i, count, last;
+ lockdep_assert_held(&ocelot->inj_lock);
+
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
@@ -1247,6 +1492,8 @@ EXPORT_SYMBOL(ocelot_port_inject_frame);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
{
+ lockdep_assert_held(&ocelot->xtr_lock);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
ocelot_read_rix(ocelot, QS_XTR_RD, grp);
}
@@ -2532,7 +2779,7 @@ int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
ANA_PORT_QOS_CFG,
port);
- return 0;
+ return ocelot_update_vlan_reclassify_rule(ocelot, port);
}
EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
@@ -2929,6 +3176,8 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->fwd_domain_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
+ spin_lock_init(&ocelot->inj_lock);
+ spin_lock_init(&ocelot->xtr_lock);
ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
if (!ocelot->owq)
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
index 312a46832154..00326ae8c708 100644
--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -665,8 +665,7 @@ static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
ifh = skb_push(skb, OCELOT_TAG_LEN);
skb_put(skb, ETH_FCS_LEN);
- memset(ifh, 0, OCELOT_TAG_LEN);
- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 33b438c6aec5..986b1f150e3b 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -228,6 +228,32 @@ ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter,
return 0;
}
+static int
+ocelot_flower_parse_egress_port(struct ocelot *ocelot, struct flow_cls_offload *f,
+ const struct flow_action_entry *a, bool mirror,
+ struct netlink_ext_ack *extack)
+{
+ const char *act_string = mirror ? "mirror" : "redirect";
+ int egress_port = ocelot->ops->netdev_to_port(a->dev);
+ enum flow_action_id offloadable_act_id;
+
+ offloadable_act_id = mirror ? FLOW_ACTION_MIRRED : FLOW_ACTION_REDIRECT;
+
+ /* Mirroring towards foreign interfaces is handled in software */
+ if (egress_port < 0 || a->id != offloadable_act_id) {
+ if (f->common.skip_sw) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can only %s to %s if filter also runs in software",
+ act_string, egress_port < 0 ?
+ "CPU" : "ingress of ocelot port");
+ return -EOPNOTSUPP;
+ }
+ egress_port = ocelot->num_phys_ports;
+ }
+
+ return egress_port;
+}
+
static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
@@ -356,6 +382,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_REDIRECT_INGRESS:
if (filter->block_id != VCAP_IS2) {
NL_SET_ERR_MSG_MOD(extack,
"Redirect action can only be offloaded to VCAP IS2");
@@ -366,17 +393,19 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
- egress_port = ocelot->ops->netdev_to_port(a->dev);
- if (egress_port < 0) {
- NL_SET_ERR_MSG_MOD(extack,
- "Destination not an ocelot port");
- return -EOPNOTSUPP;
- }
+
+ egress_port = ocelot_flower_parse_egress_port(ocelot, f,
+ a, false,
+ extack);
+ if (egress_port < 0)
+ return egress_port;
+
filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
filter->action.port_mask = BIT(egress_port);
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_MIRRED:
+ case FLOW_ACTION_MIRRED_INGRESS:
if (filter->block_id != VCAP_IS2) {
NL_SET_ERR_MSG_MOD(extack,
"Mirror action can only be offloaded to VCAP IS2");
@@ -387,12 +416,13 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
- egress_port = ocelot->ops->netdev_to_port(a->dev);
- if (egress_port < 0) {
- NL_SET_ERR_MSG_MOD(extack,
- "Destination not an ocelot port");
- return -EOPNOTSUPP;
- }
+
+ egress_port = ocelot_flower_parse_egress_port(ocelot, f,
+ a, true,
+ extack);
+ if (egress_port < 0)
+ return egress_port;
+
filter->egress_port.value = egress_port;
filter->action.mirror_ena = true;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -609,11 +639,8 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return ret;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_match_control match;
-
- flow_rule_match_control(rule, &match);
- }
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 21a87a3fc556..469784d3a1a6 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -730,7 +730,7 @@ static void ocelot_get_stats64(struct net_device *dev,
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid, u16 flags,
+ u16 vid, u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -744,7 +744,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
@@ -758,12 +758,13 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct ocelot_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -869,23 +870,30 @@ static int ocelot_set_features(struct net_device *dev,
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
+static int ocelot_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
- /* If the attached PHY device isn't capable of timestamping operations,
- * use our own (when possible).
- */
- if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(ocelot, port, ifr);
- case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(ocelot, port, ifr);
- }
- }
+ ocelot_hwstamp_get(ocelot, port, cfg);
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return 0;
+}
+
+static int ocelot_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ return ocelot_hwstamp_set(ocelot, port, cfg, extack);
}
static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
@@ -916,6 +924,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_eth_ioctl = ocelot_ioctl,
+ .ndo_hwtstamp_get = ocelot_port_hwtstamp_get,
+ .ndo_hwtstamp_set = ocelot_port_hwtstamp_set,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
@@ -980,7 +990,7 @@ static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
}
static int ocelot_port_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
@@ -992,6 +1002,16 @@ static int ocelot_port_get_ts_info(struct net_device *dev,
return ocelot_get_ts_info(ocelot, port, info);
}
+static void ocelot_port_ts_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_port_get_strings,
.get_ethtool_stats = ocelot_port_get_ethtool_stats,
@@ -999,6 +1019,7 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ocelot_port_get_ts_info,
+ .get_ts_stats = ocelot_port_ts_stats,
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index cb32234a5bf1..88b5422cc2a0 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -14,6 +14,8 @@
#include <soc/mscc/ocelot.h>
#include "ocelot.h"
+#define OCELOT_PTP_TX_TSTAMP_TIMEOUT (5 * HZ)
+
int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
@@ -209,11 +211,6 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
if (pin == 0)
@@ -495,58 +492,64 @@ static int ocelot_traps_to_ptp_rx_filter(unsigned int proto)
return HWTSTAMP_FILTER_NONE;
}
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd)
+{
+ switch (tx_type) {
+ case HWTSTAMP_TX_ON:
+ *ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctionField,
+ * what we need to update is the originTimestamp.
+ */
+ *ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ *ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+void ocelot_hwstamp_get(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct hwtstamp_config cfg = {};
switch (ocelot_port->ptp_cmd) {
case IFH_REW_OP_TWO_STEP_PTP:
- cfg.tx_type = HWTSTAMP_TX_ON;
+ cfg->tx_type = HWTSTAMP_TX_ON;
break;
case IFH_REW_OP_ORIGIN_PTP:
- cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ cfg->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
break;
default:
- cfg.tx_type = HWTSTAMP_TX_OFF;
+ cfg->tx_type = HWTSTAMP_TX_OFF;
break;
}
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
}
EXPORT_SYMBOL(ocelot_hwstamp_get);
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
bool l2 = false, l4 = false;
- struct hwtstamp_config cfg;
+ int ptp_cmd;
int err;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
/* Tx type sanity check */
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_ON:
- ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- break;
- case HWTSTAMP_TX_ONESTEP_SYNC:
- /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
- * need to update the origin time.
- */
- ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
- break;
- case HWTSTAMP_TX_OFF:
- ocelot_port->ptp_cmd = 0;
- break;
- default:
- return -ERANGE;
- }
+ err = ocelot_ptp_tx_type_to_cmd(cfg->tx_type, &ptp_cmd);
+ if (err)
+ return err;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -573,26 +576,24 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
if (err)
return err;
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+ ocelot_port->ptp_cmd = ptp_cmd;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+
+ return 0;
}
EXPORT_SYMBOL(ocelot_hwstamp_set);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
- info->phc_index = ocelot->ptp_clock ?
- ptp_clock_index(ocelot->ptp_clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (ocelot->ptp_clock) {
+ info->phc_index = ptp_clock_index(ocelot->ptp_clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -607,34 +608,92 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_get_ts_info);
-static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
+static struct sk_buff *ocelot_port_dequeue_ptp_tx_skb(struct ocelot *ocelot,
+ int port, u8 ts_id,
+ u32 seqid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- unsigned long flags;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct ptp_header *hdr;
- spin_lock_irqsave(&ocelot->ts_id_lock, flags);
+ spin_lock(&ocelot->ts_id_lock);
- if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
- ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
- return -EBUSY;
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (OCELOT_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ /* Check that the timestamp ID is for the expected PTP
+ * sequenceId. We don't have to test ptp_parse_header() against
+ * NULL, because we've pre-validated the packet's ptp_class.
+ */
+ hdr = ptp_parse_header(skb, OCELOT_SKB_CB(skb)->ptp_class);
+ if (seqid != ntohs(hdr->sequence_id))
+ continue;
+
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ ocelot->ptp_skbs_in_flight--;
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock(&ocelot->ts_id_lock);
+
+ return skb_match;
+}
+
+static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ DECLARE_BITMAP(ts_id_in_flight, OCELOT_MAX_PTP_ID);
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long n;
+
+ spin_lock(&ocelot->ts_id_lock);
+
+ /* To get a better chance of acquiring a timestamp ID, first flush the
+ * stale packets still waiting in the TX timestamping queue. They are
+ * probably lost.
+ */
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time +
+ OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) {
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->lost++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d invalidating stale timestamp ID %u which seems lost\n",
+ port, OCELOT_SKB_CB(skb)->ts_id);
+
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ kfree_skb(skb);
+ ocelot->ptp_skbs_in_flight--;
+ } else {
+ __set_bit(OCELOT_SKB_CB(skb)->ts_id, ts_id_in_flight);
+ }
}
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
- OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
+ if (ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock(&ocelot->ts_id_lock);
+ return -EBUSY;
+ }
- ocelot_port->ts_id++;
- if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
- ocelot_port->ts_id = 0;
+ n = find_first_zero_bit(ts_id_in_flight, OCELOT_MAX_PTP_ID);
+ if (n == OCELOT_MAX_PTP_ID) {
+ spin_unlock(&ocelot->ts_id_lock);
+ return -EBUSY;
+ }
- ocelot_port->ptp_skbs_in_flight++;
+ /* Found an available timestamp ID, use it */
+ OCELOT_SKB_CB(clone)->ts_id = n;
+ OCELOT_SKB_CB(clone)->ptp_tx_time = jiffies;
ocelot->ptp_skbs_in_flight++;
+ __skb_queue_tail(&ocelot_port->tx_skbs, clone);
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
+ spin_unlock(&ocelot->ts_id_lock);
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ dev_dbg_ratelimited(ocelot->dev, "port %d timestamp id %lu\n", port, n);
return 0;
}
@@ -672,13 +731,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
return 0;
ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return -EINVAL;
+ if (ptp_class == PTP_CLASS_NONE) {
+ err = -EINVAL;
+ goto error;
+ }
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->onestep_pkts_unconfirmed++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
return 0;
}
@@ -688,18 +754,30 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
*clone = skb_clone_sk(skb);
- if (!(*clone))
- return -ENOMEM;
+ if (!(*clone)) {
+ err = -ENOMEM;
+ goto error;
+ }
- err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
- if (err)
- return err;
+ /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
+ err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone);
+ if (err) {
+ kfree_skb(*clone);
+ goto error;
+ }
+ skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS;
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
}
return 0;
+
+error:
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+ return err;
}
EXPORT_SYMBOL(ocelot_port_txtstamp_request);
@@ -730,28 +808,16 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
-static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
- if (WARN_ON(!hdr))
- return false;
-
- return seqid == ntohs(hdr->sequence_id);
-}
-
void ocelot_get_txtstamp(struct ocelot *ocelot)
{
int budget = OCELOT_PTP_QUEUE_SZ;
while (budget--) {
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
+ struct ocelot_port *ocelot_port;
u32 val, id, seqid, txport;
- struct ocelot_port *port;
+ struct sk_buff *skb_match;
struct timespec64 ts;
- unsigned long flags;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
@@ -765,38 +831,26 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
-
- port = ocelot->ports[txport];
-
- spin_lock(&ocelot->ts_id_lock);
- port->ptp_skbs_in_flight--;
- ocelot->ptp_skbs_in_flight--;
- spin_unlock(&ocelot->ts_id_lock);
+ ocelot_port = ocelot->ports[txport];
/* Retrieve its associated skb */
-try_again:
- spin_lock_irqsave(&port->tx_skbs.lock, flags);
-
- skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
- if (OCELOT_SKB_CB(skb)->ts_id != id)
- continue;
- __skb_unlink(skb, &port->tx_skbs);
- skb_match = skb;
- break;
+ skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id,
+ seqid);
+ if (!skb_match) {
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
+ txport, seqid, id);
+
+ goto next_ts;
}
- spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
-
- if (WARN_ON(!skb_match))
- continue;
-
- if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
- dev_err_ratelimited(ocelot->dev,
- "port %d received stale TX timestamp for seqid %d, discarding\n",
- txport, seqid);
- dev_kfree_skb_any(skb);
- goto try_again;
- }
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->pkts++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
@@ -806,7 +860,7 @@ try_again:
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_complete_tx_timestamp(skb_match, &shhwtstamps);
- /* Next ts */
+next_ts:
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index c018783757fb..d2be1be37716 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -821,6 +821,26 @@ void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
+void ocelot_port_get_ts_stats(struct ocelot *ocelot, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_ts_stats *stats = ocelot_port->ts_stats;
+ unsigned int start;
+
+ if (!ocelot->ptp)
+ return;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ ts_stats->pkts = stats->pkts;
+ ts_stats->onestep_pkts_unconfirmed = stats->onestep_pkts_unconfirmed;
+ ts_stats->lost = stats->lost;
+ ts_stats->err = stats->err;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_ts_stats);
+
void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
struct rtnl_link_stats64 *stats)
{
@@ -960,6 +980,23 @@ int ocelot_stats_init(struct ocelot *ocelot)
if (!ocelot->stats)
return -ENOMEM;
+ if (ocelot->ptp) {
+ for (int port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ ocelot_port->ts_stats = devm_kzalloc(ocelot->dev,
+ sizeof(*ocelot_port->ts_stats),
+ GFP_KERNEL);
+ if (!ocelot_port->ts_stats)
+ return -ENOMEM;
+
+ u64_stats_init(&ocelot_port->ts_stats->syncp);
+ }
+ }
+
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
@@ -984,6 +1021,6 @@ int ocelot_stats_init(struct ocelot *ocelot)
void ocelot_stats_deinit(struct ocelot *ocelot)
{
- cancel_delayed_work(&ocelot->stats_work);
+ disable_delayed_work_sync(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 73cdec5ca6a3..5734b86aed5b 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -695,6 +695,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TPID, tag->tpid);
vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
tag->vid.value, tag->vid.mask);
vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 993212c3a7da..498eec8ae61d 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -51,6 +51,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
struct ocelot *ocelot = arg;
int grp = 0, err;
+ ocelot_lock_xtr_grp(ocelot, grp);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
@@ -69,6 +71,8 @@ out:
if (err < 0)
ocelot_drain_cpu_queue(ocelot, 0);
+ ocelot_unlock_xtr_grp(ocelot, grp);
+
return IRQ_HANDLED;
}
@@ -104,6 +108,8 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.n_ext_ts = 0,
.n_per_out = OCELOT_PTP_PINS_NUM,
.n_pins = OCELOT_PTP_PINS_NUM,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
.pps = 0,
.gettime64 = ocelot_ptp_gettime64,
.settime64 = ocelot_ptp_settime64,
@@ -412,7 +418,7 @@ static void mscc_ocelot_remove(struct platform_device *pdev)
static struct platform_driver mscc_ocelot_driver = {
.probe = mscc_ocelot_probe,
- .remove_new = mscc_ocelot_remove,
+ .remove = mscc_ocelot_remove,
.driver = {
.name = "ocelot-switch",
.of_match_table = mscc_ocelot_match,
diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig
new file mode 100644
index 000000000000..0b3e853d625f
--- /dev/null
+++ b/drivers/net/ethernet/mucse/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Mucse network device configuration
+#
+
+config NET_VENDOR_MUCSE
+ bool "Mucse devices"
+ default y
+ help
+ If you have a network (Ethernet) card from Mucse(R), say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Mucse(R) cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_MUCSE
+
+config MGBE
+ tristate "Mucse(R) 1GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Mucse(R) 1GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/mucse/rnpgbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called rnpgbe.
+
+endif # NET_VENDOR_MUCSE
+
diff --git a/drivers/net/ethernet/mucse/Makefile b/drivers/net/ethernet/mucse/Makefile
new file mode 100644
index 000000000000..675173fa05f7
--- /dev/null
+++ b/drivers/net/ethernet/mucse/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2020 - 2025 MUCSE Corporation.
+#
+# Makefile for the MUCSE(R) network device drivers
+#
+
+obj-$(CONFIG_MGBE) += rnpgbe/
diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile
new file mode 100644
index 000000000000..de8bcb7772ab
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2020 - 2025 MUCSE Corporation.
+#
+# Makefile for the MUCSE(R) 1GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_MGBE) += rnpgbe.o
+rnpgbe-objs := rnpgbe_main.o\
+ rnpgbe_chip.o\
+ rnpgbe_mbx.o\
+ rnpgbe_mbx_fw.o
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
new file mode 100644
index 000000000000..5b024f9f7e17
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_H
+#define _RNPGBE_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+enum rnpgbe_boards {
+ board_n500,
+ board_n210
+};
+
+struct mucse_mbx_info {
+ u32 timeout_us;
+ u32 delay_us;
+ u16 fw_req;
+ u16 fw_ack;
+ /* lock for only one use mbx */
+ struct mutex lock;
+ /* fw <--> pf mbx */
+ u32 fwpf_shm_base;
+ u32 pf2fw_mbx_ctrl;
+ u32 fwpf_mbx_mask;
+ u32 fwpf_ctrl_base;
+};
+
+/* Enum for firmware notification modes,
+ * more modes (e.g., portup, link_report) will be added in future
+ **/
+enum {
+ mucse_fw_powerup,
+};
+
+struct mucse_hw {
+ void __iomem *hw_addr;
+ struct pci_dev *pdev;
+ struct mucse_mbx_info mbx;
+ int port;
+ u8 pfvfnum;
+};
+
+struct mucse_stats {
+ u64 tx_dropped;
+};
+
+struct mucse {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct mucse_hw hw;
+ struct mucse_stats stats;
+};
+
+int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr);
+int rnpgbe_reset_hw(struct mucse_hw *hw);
+int rnpgbe_send_notify(struct mucse_hw *hw,
+ bool enable,
+ int mode);
+int rnpgbe_init_hw(struct mucse_hw *hw, int board_type);
+
+/* Device IDs */
+#define PCI_VENDOR_ID_MUCSE 0x8848
+#define RNPGBE_DEVICE_ID_N500_QUAD_PORT 0x8308
+#define RNPGBE_DEVICE_ID_N500_DUAL_PORT 0x8318
+#define RNPGBE_DEVICE_ID_N210 0x8208
+#define RNPGBE_DEVICE_ID_N210L 0x820a
+
+#define mucse_hw_wr32(hw, reg, val) \
+ writel((val), (hw)->hw_addr + (reg))
+#endif /* _RNPGBE_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
new file mode 100644
index 000000000000..ebc7b3750157
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+
+#include "rnpgbe.h"
+#include "rnpgbe_hw.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+/**
+ * rnpgbe_get_permanent_mac - Get permanent mac
+ * @hw: hw information structure
+ * @perm_addr: pointer to store perm_addr
+ *
+ * rnpgbe_get_permanent_mac tries to get mac from hw
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr)
+{
+ struct device *dev = &hw->pdev->dev;
+ int err;
+
+ err = mucse_mbx_get_macaddr(hw, hw->pfvfnum, perm_addr, hw->port);
+ if (err) {
+ dev_err(dev, "Failed to get MAC from FW %d\n", err);
+ return err;
+ }
+
+ if (!is_valid_ether_addr(perm_addr)) {
+ dev_err(dev, "Failed to get valid MAC from FW\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * rnpgbe_reset_hw - Do a hardware reset
+ * @hw: hw information structure
+ *
+ * rnpgbe_reset_hw calls fw to do a hardware
+ * reset, and cleans some regs to default.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int rnpgbe_reset_hw(struct mucse_hw *hw)
+{
+ mucse_hw_wr32(hw, RNPGBE_DMA_AXI_EN, 0);
+ return mucse_mbx_reset_hw(hw);
+}
+
+/**
+ * rnpgbe_send_notify - Echo fw status
+ * @hw: hw information structure
+ * @enable: true or false status
+ * @mode: status mode
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int rnpgbe_send_notify(struct mucse_hw *hw,
+ bool enable,
+ int mode)
+{
+ int err;
+ /* Keep switch struct to support more modes in the future */
+ switch (mode) {
+ case mucse_fw_powerup:
+ err = mucse_mbx_powerup(hw, enable);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/**
+ * rnpgbe_init_n500 - Setup n500 hw info
+ * @hw: hw information structure
+ *
+ * rnpgbe_init_n500 initializes all private
+ * structure for n500
+ **/
+static void rnpgbe_init_n500(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+
+ mbx->fwpf_ctrl_base = MUCSE_N500_FWPF_CTRL_BASE;
+ mbx->fwpf_shm_base = MUCSE_N500_FWPF_SHM_BASE;
+}
+
+/**
+ * rnpgbe_init_n210 - Setup n210 hw info
+ * @hw: hw information structure
+ *
+ * rnpgbe_init_n210 initializes all private
+ * structure for n210
+ **/
+static void rnpgbe_init_n210(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+
+ mbx->fwpf_ctrl_base = MUCSE_N210_FWPF_CTRL_BASE;
+ mbx->fwpf_shm_base = MUCSE_N210_FWPF_SHM_BASE;
+}
+
+/**
+ * rnpgbe_init_hw - Setup hw info according to board_type
+ * @hw: hw information structure
+ * @board_type: board type
+ *
+ * rnpgbe_init_hw initializes all hw data
+ *
+ * Return: 0 on success, -EINVAL on failure
+ **/
+int rnpgbe_init_hw(struct mucse_hw *hw, int board_type)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+
+ hw->port = 0;
+
+ mbx->pf2fw_mbx_ctrl = MUCSE_GBE_PFFW_MBX_CTRL_OFFSET;
+ mbx->fwpf_mbx_mask = MUCSE_GBE_FWPF_MBX_MASK_OFFSET;
+
+ switch (board_type) {
+ case board_n500:
+ rnpgbe_init_n500(hw);
+ break;
+ case board_n210:
+ rnpgbe_init_n210(hw);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* init_params with mbx base */
+ mucse_init_mbx_params_pf(hw);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
new file mode 100644
index 000000000000..e77e6bc3d3e3
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_HW_H
+#define _RNPGBE_HW_H
+
+#define MUCSE_N500_FWPF_CTRL_BASE 0x28b00
+#define MUCSE_N500_FWPF_SHM_BASE 0x2d000
+#define MUCSE_GBE_PFFW_MBX_CTRL_OFFSET 0x5500
+#define MUCSE_GBE_FWPF_MBX_MASK_OFFSET 0x5700
+#define MUCSE_N210_FWPF_CTRL_BASE 0x29400
+#define MUCSE_N210_FWPF_SHM_BASE 0x2d900
+
+#define RNPGBE_DMA_AXI_EN 0x0010
+
+#define RNPGBE_MAX_QUEUES 8
+#endif /* _RNPGBE_HW_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
new file mode 100644
index 000000000000..316f941629d4
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#include <linux/pci.h>
+#include <net/rtnetlink.h>
+#include <linux/etherdevice.h>
+
+#include "rnpgbe.h"
+#include "rnpgbe_hw.h"
+#include "rnpgbe_mbx_fw.h"
+
+static const char rnpgbe_driver_name[] = "rnpgbe";
+
+/* rnpgbe_pci_tbl - PCI Device ID Table
+ *
+ * { PCI_VDEVICE(Vendor ID, Device ID),
+ * private_data (used for different hw chip) }
+ */
+static struct pci_device_id rnpgbe_pci_tbl[] = {
+ { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N210), board_n210 },
+ { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N210L), board_n210 },
+ { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N500_DUAL_PORT), board_n500 },
+ { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N500_QUAD_PORT), board_n500 },
+ /* required last entry */
+ {0, },
+};
+
+/**
+ * rnpgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).
+ *
+ * Return: 0
+ **/
+static int rnpgbe_open(struct net_device *netdev)
+{
+ return 0;
+}
+
+/**
+ * rnpgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.
+ *
+ * Return: 0, this is not allowed to fail
+ **/
+static int rnpgbe_close(struct net_device *netdev)
+{
+ return 0;
+}
+
+/**
+ * rnpgbe_xmit_frame - Send a skb to driver
+ * @skb: skb structure to be sent
+ * @netdev: network interface device structure
+ *
+ * Return: NETDEV_TX_OK
+ **/
+static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct mucse *mucse = netdev_priv(netdev);
+
+ dev_kfree_skb_any(skb);
+ mucse->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops rnpgbe_netdev_ops = {
+ .ndo_open = rnpgbe_open,
+ .ndo_stop = rnpgbe_close,
+ .ndo_start_xmit = rnpgbe_xmit_frame,
+};
+
+/**
+ * rnpgbe_add_adapter - Add netdev for this pci_dev
+ * @pdev: PCI device information structure
+ * @board_type: board type
+ *
+ * rnpgbe_add_adapter initializes a netdev for this pci_dev
+ * structure. Initializes Bar map, private structure, and a
+ * hardware reset occur.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int rnpgbe_add_adapter(struct pci_dev *pdev,
+ int board_type)
+{
+ struct net_device *netdev;
+ u8 perm_addr[ETH_ALEN];
+ void __iomem *hw_addr;
+ struct mucse *mucse;
+ struct mucse_hw *hw;
+ int err, err_notify;
+
+ netdev = alloc_etherdev_mq(sizeof(struct mucse), RNPGBE_MAX_QUEUES);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ mucse = netdev_priv(netdev);
+ mucse->netdev = netdev;
+ mucse->pdev = pdev;
+ pci_set_drvdata(pdev, mucse);
+
+ hw = &mucse->hw;
+ hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!hw_addr) {
+ err = -EIO;
+ goto err_free_net;
+ }
+
+ hw->hw_addr = hw_addr;
+ hw->pdev = pdev;
+
+ err = rnpgbe_init_hw(hw, board_type);
+ if (err) {
+ dev_err(&pdev->dev, "Init hw err %d\n", err);
+ goto err_free_net;
+ }
+ /* Step 1: Send power-up notification to firmware (no response expected)
+ * This informs firmware to initialize hardware power state, but
+ * firmware only acknowledges receipt without returning data. Must be
+ * done before synchronization as firmware may be in low-power idle
+ * state initially.
+ */
+ err_notify = rnpgbe_send_notify(hw, true, mucse_fw_powerup);
+ if (err_notify) {
+ dev_warn(&pdev->dev, "Send powerup to hw failed %d\n",
+ err_notify);
+ dev_warn(&pdev->dev, "Maybe low performance\n");
+ }
+ /* Step 2: Synchronize mailbox communication with firmware (requires
+ * response) After power-up, confirm firmware is ready to process
+ * requests with responses. This ensures subsequent request/response
+ * interactions work reliably.
+ */
+ err = mucse_mbx_sync_fw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Sync fw failed! %d\n", err);
+ goto err_powerdown;
+ }
+
+ netdev->netdev_ops = &rnpgbe_netdev_ops;
+ err = rnpgbe_reset_hw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Hw reset failed %d\n", err);
+ goto err_powerdown;
+ }
+
+ err = rnpgbe_get_permanent_mac(hw, perm_addr);
+ if (!err) {
+ eth_hw_addr_set(netdev, perm_addr);
+ } else if (err == -EINVAL) {
+ dev_warn(&pdev->dev, "Using random MAC\n");
+ eth_hw_addr_random(netdev);
+ } else if (err) {
+ dev_err(&pdev->dev, "get perm_addr failed %d\n", err);
+ goto err_powerdown;
+ }
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_powerdown;
+
+ return 0;
+err_powerdown:
+ /* notify powerdown only powerup ok */
+ if (!err_notify) {
+ err_notify = rnpgbe_send_notify(hw, false, mucse_fw_powerup);
+ if (err_notify)
+ dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n",
+ err_notify);
+ }
+err_free_net:
+ free_netdev(netdev);
+ return err;
+}
+
+/**
+ * rnpgbe_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @id: entry in rnpgbe_pci_tbl
+ *
+ * rnpgbe_probe initializes a PF adapter identified by a pci_dev
+ * structure.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int board_type = id->driver_data;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting %d\n", err);
+ goto err_disable_dev;
+ }
+
+ err = pci_request_mem_regions(pdev, rnpgbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_disable_dev;
+ }
+
+ pci_set_master(pdev);
+ err = pci_save_state(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_save_state failed %d\n", err);
+ goto err_free_regions;
+ }
+
+ err = rnpgbe_add_adapter(pdev, board_type);
+ if (err)
+ goto err_free_regions;
+
+ return 0;
+err_free_regions:
+ pci_release_mem_regions(pdev);
+err_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * rnpgbe_rm_adapter - Remove netdev for this mucse structure
+ * @pdev: PCI device information struct
+ *
+ * rnpgbe_rm_adapter remove a netdev for this mucse structure
+ **/
+static void rnpgbe_rm_adapter(struct pci_dev *pdev)
+{
+ struct mucse *mucse = pci_get_drvdata(pdev);
+ struct mucse_hw *hw = &mucse->hw;
+ struct net_device *netdev;
+ int err;
+
+ if (!mucse)
+ return;
+ netdev = mucse->netdev;
+ unregister_netdev(netdev);
+ err = rnpgbe_send_notify(hw, false, mucse_fw_powerup);
+ if (err)
+ dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n", err);
+ free_netdev(netdev);
+}
+
+/**
+ * rnpgbe_remove - Device removal routine
+ * @pdev: PCI device information struct
+ *
+ * rnpgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void rnpgbe_remove(struct pci_dev *pdev)
+{
+ rnpgbe_rm_adapter(pdev);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * rnpgbe_dev_shutdown - Device shutdown routine
+ * @pdev: PCI device information struct
+ **/
+static void rnpgbe_dev_shutdown(struct pci_dev *pdev)
+{
+ struct mucse *mucse = pci_get_drvdata(pdev);
+ struct net_device *netdev = mucse->netdev;
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ rnpgbe_close(netdev);
+ rtnl_unlock();
+ pci_disable_device(pdev);
+}
+
+/**
+ * rnpgbe_shutdown - Device shutdown routine
+ * @pdev: PCI device information struct
+ *
+ * rnpgbe_shutdown is called by the PCI subsystem to alert the driver
+ * that os shutdown. Device should setup wakeup state here.
+ **/
+static void rnpgbe_shutdown(struct pci_dev *pdev)
+{
+ rnpgbe_dev_shutdown(pdev);
+}
+
+static struct pci_driver rnpgbe_driver = {
+ .name = rnpgbe_driver_name,
+ .id_table = rnpgbe_pci_tbl,
+ .probe = rnpgbe_probe,
+ .remove = rnpgbe_remove,
+ .shutdown = rnpgbe_shutdown,
+};
+
+module_pci_driver(rnpgbe_driver);
+
+MODULE_DEVICE_TABLE(pci, rnpgbe_pci_tbl);
+MODULE_AUTHOR("Yibo Dong, <dong100@mucse.com>");
+MODULE_DESCRIPTION("Mucse(R) 1 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
new file mode 100644
index 000000000000..de5e29230b3c
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2025 Mucse Corporation. */
+
+#include <linux/errno.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+#include "rnpgbe_mbx.h"
+
+/**
+ * mbx_data_rd32 - Reads reg with base mbx->fwpf_shm_base
+ * @mbx: pointer to the MBX structure
+ * @reg: register offset
+ *
+ * Return: register value
+ **/
+static u32 mbx_data_rd32(struct mucse_mbx_info *mbx, u32 reg)
+{
+ struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx);
+
+ return readl(hw->hw_addr + mbx->fwpf_shm_base + reg);
+}
+
+/**
+ * mbx_data_wr32 - Writes value to reg with base mbx->fwpf_shm_base
+ * @mbx: pointer to the MBX structure
+ * @reg: register offset
+ * @value: value to be written
+ *
+ **/
+static void mbx_data_wr32(struct mucse_mbx_info *mbx, u32 reg, u32 value)
+{
+ struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx);
+
+ writel(value, hw->hw_addr + mbx->fwpf_shm_base + reg);
+}
+
+/**
+ * mbx_ctrl_rd32 - Reads reg with base mbx->fwpf_ctrl_base
+ * @mbx: pointer to the MBX structure
+ * @reg: register offset
+ *
+ * Return: register value
+ **/
+static u32 mbx_ctrl_rd32(struct mucse_mbx_info *mbx, u32 reg)
+{
+ struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx);
+
+ return readl(hw->hw_addr + mbx->fwpf_ctrl_base + reg);
+}
+
+/**
+ * mbx_ctrl_wr32 - Writes value to reg with base mbx->fwpf_ctrl_base
+ * @mbx: pointer to the MBX structure
+ * @reg: register offset
+ * @value: value to be written
+ *
+ **/
+static void mbx_ctrl_wr32(struct mucse_mbx_info *mbx, u32 reg, u32 value)
+{
+ struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx);
+
+ writel(value, hw->hw_addr + mbx->fwpf_ctrl_base + reg);
+}
+
+/**
+ * mucse_mbx_get_lock_pf - Write ctrl and read back lock status
+ * @hw: pointer to the HW structure
+ *
+ * Return: register value after write
+ **/
+static u32 mucse_mbx_get_lock_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u32 reg = MUCSE_MBX_PF2FW_CTRL(mbx);
+
+ mbx_ctrl_wr32(mbx, reg, MUCSE_MBX_PFU);
+
+ return mbx_ctrl_rd32(mbx, reg);
+}
+
+/**
+ * mucse_obtain_mbx_lock_pf - Obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * Pair with mucse_release_mbx_lock_pf()
+ * This function maybe used in an irq handler.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_obtain_mbx_lock_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u32 val;
+
+ return read_poll_timeout_atomic(mucse_mbx_get_lock_pf,
+ val, val & MUCSE_MBX_PFU,
+ mbx->delay_us,
+ mbx->timeout_us,
+ false, hw);
+}
+
+/**
+ * mucse_release_mbx_lock_pf - Release mailbox lock
+ * @hw: pointer to the HW structure
+ * @req: send a request or not
+ *
+ * Pair with mucse_obtain_mbx_lock_pf():
+ * - Releases the mailbox lock by clearing MUCSE_MBX_PFU bit
+ * - Simultaneously sends the request by setting MUCSE_MBX_REQ bit
+ * if req is true
+ * (Both bits are in the same mailbox control register,
+ * so operations are combined)
+ **/
+static void mucse_release_mbx_lock_pf(struct mucse_hw *hw, bool req)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u32 reg = MUCSE_MBX_PF2FW_CTRL(mbx);
+
+ mbx_ctrl_wr32(mbx, reg, req ? MUCSE_MBX_REQ : 0);
+}
+
+/**
+ * mucse_mbx_get_fwreq - Read fw req from reg
+ * @mbx: pointer to the mbx structure
+ *
+ * Return: the fwreq value
+ **/
+static u16 mucse_mbx_get_fwreq(struct mucse_mbx_info *mbx)
+{
+ u32 val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT);
+
+ return FIELD_GET(GENMASK_U32(15, 0), val);
+}
+
+/**
+ * mucse_mbx_inc_pf_ack - Increase ack
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_inc_pf_ack reads pf_ack from hw, then writes
+ * new value back after increase
+ **/
+static void mucse_mbx_inc_pf_ack(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u16 ack;
+ u32 val;
+
+ val = mbx_data_rd32(mbx, MUCSE_MBX_PF2FW_CNT);
+ ack = FIELD_GET(GENMASK_U32(31, 16), val);
+ ack++;
+ val &= ~GENMASK_U32(31, 16);
+ val |= FIELD_PREP(GENMASK_U32(31, 16), ack);
+ mbx_data_wr32(mbx, MUCSE_MBX_PF2FW_CNT, val);
+}
+
+/**
+ * mucse_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * mucse_read_mbx_pf copies a message from the mbx buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a fw request so no polling for message is needed.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_read_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ const int size_in_words = size / sizeof(u32);
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ int err;
+
+ err = mucse_obtain_mbx_lock_pf(hw);
+ if (err)
+ return err;
+
+ for (int i = 0; i < size_in_words; i++)
+ msg[i] = mbx_data_rd32(mbx, MUCSE_MBX_FWPF_SHM + 4 * i);
+ /* Hw needs write data_reg at last */
+ mbx_data_wr32(mbx, MUCSE_MBX_FWPF_SHM, 0);
+ /* flush reqs as we have read this request data */
+ hw->mbx.fw_req = mucse_mbx_get_fwreq(mbx);
+ mucse_mbx_inc_pf_ack(hw);
+ mucse_release_mbx_lock_pf(hw, false);
+
+ return 0;
+}
+
+/**
+ * mucse_check_for_msg_pf - Check to see if the fw has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * Return: 0 if the fw has set the Status bit or else -EIO
+ **/
+static int mucse_check_for_msg_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u16 fw_req;
+
+ fw_req = mucse_mbx_get_fwreq(mbx);
+ /* chip's register is reset to 0 when rc send reset
+ * mbx command. Return -EIO if in this state, others
+ * fw == hw->mbx.fw_req means no new msg.
+ **/
+ if (fw_req == 0 || fw_req == hw->mbx.fw_req)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * mucse_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_poll_for_msg(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ int val;
+
+ return read_poll_timeout(mucse_check_for_msg_pf,
+ val, !val, mbx->delay_us,
+ mbx->timeout_us,
+ false, hw);
+}
+
+/**
+ * mucse_poll_and_read_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * Return: 0 if it successfully received a message notification and
+ * copied it into the receive buffer, negative errno on failure
+ **/
+int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ int err;
+
+ err = mucse_poll_for_msg(hw);
+ if (err)
+ return err;
+
+ return mucse_read_mbx_pf(hw, msg, size);
+}
+
+/**
+ * mucse_mbx_get_fwack - Read fw ack from reg
+ * @mbx: pointer to the MBX structure
+ *
+ * Return: the fwack value
+ **/
+static u16 mucse_mbx_get_fwack(struct mucse_mbx_info *mbx)
+{
+ u32 val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT);
+
+ return FIELD_GET(GENMASK_U32(31, 16), val);
+}
+
+/**
+ * mucse_mbx_inc_pf_req - Increase req
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_inc_pf_req reads pf_req from hw, then writes
+ * new value back after increase
+ **/
+static void mucse_mbx_inc_pf_req(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u16 req;
+ u32 val;
+
+ val = mbx_data_rd32(mbx, MUCSE_MBX_PF2FW_CNT);
+ req = FIELD_GET(GENMASK_U32(15, 0), val);
+ req++;
+ val &= ~GENMASK_U32(15, 0);
+ val |= FIELD_PREP(GENMASK_U32(15, 0), req);
+ mbx_data_wr32(mbx, MUCSE_MBX_PF2FW_CNT, val);
+}
+
+/**
+ * mucse_write_mbx_pf - Place a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * Return: 0 if it successfully copied message into the buffer,
+ * negative errno on failure
+ **/
+static int mucse_write_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ const int size_in_words = size / sizeof(u32);
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ int err;
+
+ err = mucse_obtain_mbx_lock_pf(hw);
+ if (err)
+ return err;
+
+ for (int i = 0; i < size_in_words; i++)
+ mbx_data_wr32(mbx, MUCSE_MBX_FWPF_SHM + i * 4, msg[i]);
+
+ /* flush acks as we are overwriting the message buffer */
+ hw->mbx.fw_ack = mucse_mbx_get_fwack(mbx);
+ mucse_mbx_inc_pf_req(hw);
+ mucse_release_mbx_lock_pf(hw, true);
+
+ return 0;
+}
+
+/**
+ * mucse_check_for_ack_pf - Check to see if the fw has ACKed
+ * @hw: pointer to the HW structure
+ *
+ * Return: 0 if the fw has set the Status bit or else -EIO
+ **/
+static int mucse_check_for_ack_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u16 fw_ack;
+
+ fw_ack = mucse_mbx_get_fwack(mbx);
+ /* chip's register is reset to 0 when rc send reset
+ * mbx command. Return -EIO if in this state, others
+ * fw_ack == hw->mbx.fw_ack means no new ack.
+ **/
+ if (fw_ack == 0 || fw_ack == hw->mbx.fw_ack)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * mucse_poll_for_ack - Wait for message acknowledgment
+ * @hw: pointer to the HW structure
+ *
+ * Return: 0 if it successfully received a message acknowledgment,
+ * else negative errno
+ **/
+static int mucse_poll_for_ack(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ int val;
+
+ return read_poll_timeout(mucse_check_for_ack_pf,
+ val, !val, mbx->delay_us,
+ mbx->timeout_us,
+ false, hw);
+}
+
+/**
+ * mucse_write_and_wait_ack_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * Return: 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout_cnt period
+ **/
+int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ int err;
+
+ err = mucse_write_mbx_pf(hw, msg, size);
+ if (err)
+ return err;
+
+ return mucse_poll_for_ack(hw);
+}
+
+/**
+ * mucse_mbx_reset - Reset mbx info, sync info from regs
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_reset resets all mbx variables to default.
+ **/
+static void mucse_mbx_reset(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u32 val;
+
+ val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT);
+ hw->mbx.fw_req = FIELD_GET(GENMASK_U32(15, 0), val);
+ hw->mbx.fw_ack = FIELD_GET(GENMASK_U32(31, 16), val);
+ mbx_ctrl_wr32(mbx, MUCSE_MBX_PF2FW_CTRL(mbx), 0);
+ mbx_ctrl_wr32(mbx, MUCSE_MBX_FWPF_MASK(mbx), GENMASK_U32(31, 16));
+}
+
+/**
+ * mucse_init_mbx_params_pf - Set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void mucse_init_mbx_params_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+
+ mbx->delay_us = 100;
+ mbx->timeout_us = 4 * USEC_PER_SEC;
+ mutex_init(&mbx->lock);
+ mucse_mbx_reset(hw);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
new file mode 100644
index 000000000000..e6fcc8d1d3ca
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_MBX_H
+#define _RNPGBE_MBX_H
+
+#include "rnpgbe.h"
+
+#define MUCSE_MBX_FW2PF_CNT 0
+#define MUCSE_MBX_PF2FW_CNT 4
+#define MUCSE_MBX_FWPF_SHM 8
+#define MUCSE_MBX_PF2FW_CTRL(mbx) ((mbx)->pf2fw_mbx_ctrl)
+#define MUCSE_MBX_FWPF_MASK(mbx) ((mbx)->fwpf_mbx_mask)
+#define MUCSE_MBX_REQ BIT(0) /* Request a req to mailbox */
+#define MUCSE_MBX_PFU BIT(3) /* PF owns the mailbox buffer */
+
+int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size);
+void mucse_init_mbx_params_pf(struct mucse_hw *hw);
+int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size);
+#endif /* _RNPGBE_MBX_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
new file mode 100644
index 000000000000..8c8bd5e8e1db
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#include <linux/if_ether.h>
+#include <linux/bitfield.h>
+
+#include "rnpgbe.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+/**
+ * mucse_fw_send_cmd_wait_resp - Send cmd req and wait for response
+ * @hw: pointer to the HW structure
+ * @req: pointer to the cmd req structure
+ * @reply: pointer to the fw reply structure
+ *
+ * mucse_fw_send_cmd_wait_resp sends req to pf-fw mailbox and wait
+ * reply from fw.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_fw_send_cmd_wait_resp(struct mucse_hw *hw,
+ struct mbx_fw_cmd_req *req,
+ struct mbx_fw_cmd_reply *reply)
+{
+ int len = le16_to_cpu(req->datalen);
+ int retry_cnt = 3;
+ int err;
+
+ mutex_lock(&hw->mbx.lock);
+ err = mucse_write_and_wait_ack_mbx(hw, (u32 *)req, len);
+ if (err)
+ goto out;
+ do {
+ err = mucse_poll_and_read_mbx(hw, (u32 *)reply,
+ sizeof(*reply));
+ if (err)
+ goto out;
+ /* mucse_write_and_wait_ack_mbx return 0 means fw has
+ * received request, wait for the expect opcode
+ * reply with 'retry_cnt' times.
+ */
+ } while (--retry_cnt >= 0 && reply->opcode != req->opcode);
+out:
+ mutex_unlock(&hw->mbx.lock);
+ if (!err && retry_cnt < 0)
+ return -ETIMEDOUT;
+ if (!err && reply->error_code)
+ return -EIO;
+
+ return err;
+}
+
+/**
+ * mucse_mbx_get_info - Get hw info from fw
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_get_info tries to get hw info from hw.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_mbx_get_info(struct mucse_hw *hw)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(GET_HW_INFO),
+ };
+ struct mbx_fw_cmd_reply reply = {};
+ int err;
+
+ err = mucse_fw_send_cmd_wait_resp(hw, &req, &reply);
+ if (!err)
+ hw->pfvfnum = FIELD_GET(GENMASK_U16(7, 0),
+ le16_to_cpu(reply.hw_info.pfnum));
+
+ return err;
+}
+
+/**
+ * mucse_mbx_sync_fw - Try to sync with fw
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_sync_fw tries to sync with fw. It is only called in
+ * probe. Nothing (register network) todo if failed.
+ * Try more times to do sync.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_sync_fw(struct mucse_hw *hw)
+{
+ int try_cnt = 3;
+ int err;
+
+ do {
+ err = mucse_mbx_get_info(hw);
+ } while (err == -ETIMEDOUT && try_cnt--);
+
+ return err;
+}
+
+/**
+ * mucse_mbx_powerup - Echo fw to powerup
+ * @hw: pointer to the HW structure
+ * @is_powerup: true for powerup, false for powerdown
+ *
+ * mucse_mbx_powerup echo fw to change working frequency
+ * to normal after received true, and reduce working frequency
+ * if false.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(sizeof(req.powerup) +
+ MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(POWER_UP),
+ .powerup = {
+ /* fw needs this to reply correct cmd */
+ .version = cpu_to_le32(GENMASK_U32(31, 0)),
+ .status = cpu_to_le32(is_powerup ? 1 : 0),
+ },
+ };
+ int len, err;
+
+ len = le16_to_cpu(req.datalen);
+ mutex_lock(&hw->mbx.lock);
+ err = mucse_write_and_wait_ack_mbx(hw, (u32 *)&req, len);
+ mutex_unlock(&hw->mbx.lock);
+
+ return err;
+}
+
+/**
+ * mucse_mbx_reset_hw - Posts a mbx req to reset hw
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_reset_hw posts a mbx req to firmware to reset hw.
+ * We use mucse_fw_send_cmd_wait_resp to wait hw reset ok.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_reset_hw(struct mucse_hw *hw)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(RESET_HW),
+ };
+ struct mbx_fw_cmd_reply reply = {};
+
+ return mucse_fw_send_cmd_wait_resp(hw, &req, &reply);
+}
+
+/**
+ * mucse_mbx_get_macaddr - Posts a mbx req to request macaddr
+ * @hw: pointer to the HW structure
+ * @pfvfnum: index of pf/vf num
+ * @mac_addr: pointer to store mac_addr
+ * @port: port index
+ *
+ * mucse_mbx_get_macaddr posts a mbx req to firmware to get mac_addr.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum,
+ u8 *mac_addr,
+ int port)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(sizeof(req.get_mac_addr) +
+ MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(GET_MAC_ADDRESS),
+ .get_mac_addr = {
+ .port_mask = cpu_to_le32(BIT(port)),
+ .pfvf_num = cpu_to_le32(pfvfnum),
+ },
+ };
+ struct mbx_fw_cmd_reply reply = {};
+ int err;
+
+ err = mucse_fw_send_cmd_wait_resp(hw, &req, &reply);
+ if (err)
+ return err;
+
+ if (le32_to_cpu(reply.mac_addr.ports) & BIT(port))
+ memcpy(mac_addr, reply.mac_addr.addrs[port].mac, ETH_ALEN);
+ else
+ return -ENODATA;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
new file mode 100644
index 000000000000..fb24fc12b613
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_MBX_FW_H
+#define _RNPGBE_MBX_FW_H
+
+#include <linux/types.h>
+
+#include "rnpgbe.h"
+
+#define MUCSE_MBX_REQ_HDR_LEN 24
+
+enum MUCSE_FW_CMD {
+ GET_HW_INFO = 0x0601,
+ GET_MAC_ADDRESS = 0x0602,
+ RESET_HW = 0x0603,
+ POWER_UP = 0x0803,
+};
+
+struct mucse_hw_info {
+ u8 link_stat;
+ u8 port_mask;
+ __le32 speed;
+ __le16 phy_type;
+ __le16 nic_mode;
+ __le16 pfnum;
+ __le32 fw_version;
+ __le32 axi_mhz;
+ union {
+ u8 port_id[4];
+ __le32 port_ids;
+ };
+ __le32 bd_uid;
+ __le32 phy_id;
+ __le32 wol_status;
+ __le32 ext_info;
+} __packed;
+
+struct mbx_fw_cmd_req {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 ret_value;
+ __le32 cookie_lo;
+ __le32 cookie_hi;
+ __le32 reply_lo;
+ __le32 reply_hi;
+ union {
+ u8 data[32];
+ struct {
+ __le32 version;
+ __le32 status;
+ } powerup;
+ struct {
+ __le32 port_mask;
+ __le32 pfvf_num;
+ } get_mac_addr;
+ };
+} __packed;
+
+struct mbx_fw_cmd_reply {
+ __le16 flags;
+ __le16 opcode;
+ __le16 error_code;
+ __le16 datalen;
+ __le32 cookie_lo;
+ __le32 cookie_hi;
+ union {
+ u8 data[40];
+ struct mac_addr {
+ __le32 ports;
+ struct _addr {
+ /* for macaddr:01:02:03:04:05:06
+ * mac-hi=0x01020304 mac-lo=0x05060000
+ */
+ u8 mac[8];
+ } addrs[4];
+ } mac_addr;
+ struct mucse_hw_info hw_info;
+ };
+} __packed;
+
+int mucse_mbx_sync_fw(struct mucse_hw *hw);
+int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup);
+int mucse_mbx_reset_hw(struct mucse_hw *hw);
+int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum,
+ u8 *mac_addr, int port);
+#endif /* _RNPGBE_MBX_FW_H */
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 7b7e1c5b00f4..7be30a8df268 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2482,7 +2482,7 @@ static int myri10ge_close(struct net_device *dev)
if (mgp->ss[0].tx.req_bytes == NULL)
return 0;
- del_timer_sync(&mgp->watchdog_timer);
+ timer_delete_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
for (i = 0; i < mgp->num_slices; i++)
napi_disable(&mgp->ss[i].napi);
@@ -3036,11 +3036,11 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
myri10ge_close(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
myri10ge_open(dev);
- } else
- dev->mtu = new_mtu;
-
+ } else {
+ WRITE_ONCE(dev->mtu, new_mtu);
+ }
return 0;
}
@@ -3416,10 +3416,6 @@ static void myri10ge_watchdog(struct work_struct *work)
* nic was resumed from power saving mode.
*/
pci_restore_state(mgp->pdev);
-
- /* save state again for accounting reasons */
- pci_save_state(mgp->pdev);
-
} else {
/* if we get back -1's from our slot, perhaps somebody
* powered off our card. Don't try to reset it in
@@ -3478,7 +3474,7 @@ static void myri10ge_watchdog_timer(struct timer_list *t)
u32 rx_pause_cnt;
u16 cmd;
- mgp = from_timer(mgp, t, watchdog_timer);
+ mgp = timer_container_of(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 2b6e097df28f..6d29d2e1fa7c 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -241,7 +241,7 @@ static void jazz_sonic_device_remove(struct platform_device *pdev)
static struct platform_driver jazz_sonic_driver = {
.probe = jazz_sonic_probe,
- .remove_new = jazz_sonic_device_remove,
+ .remove = jazz_sonic_device_remove,
.driver = {
.name = jazz_sonic_string,
},
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 2fc63860dbdb..a740e24a9759 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -545,7 +545,7 @@ static void mac_sonic_platform_remove(struct platform_device *pdev)
static struct platform_driver mac_sonic_platform_driver = {
.probe = mac_sonic_platform_probe,
- .remove_new = mac_sonic_platform_remove,
+ .remove = mac_sonic_platform_remove,
.driver = {
.name = "macsonic",
},
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 650a5a166070..b253734dbc80 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -846,7 +846,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- i = pci_request_regions(pdev, DRV_NAME);
+ i = pcim_request_all_regions(pdev, DRV_NAME);
if (i)
goto err_pci_request_regions;
@@ -1786,7 +1786,7 @@ static void init_registers(struct net_device *dev)
*/
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->dev;
void __iomem * ioaddr = ns_ioaddr(dev);
int next_tick = NATSEMI_TIMER_FREQ;
@@ -2526,7 +2526,7 @@ static void __set_rx_mode(struct net_device *dev)
static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* synchronized against open : rtnl_lock() held by caller */
if (netif_running(dev)) {
@@ -3179,7 +3179,7 @@ static int netdev_close(struct net_device *dev)
* the final WOL settings?
*/
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
disable_irq(irq);
spin_lock_irq(&np->lock);
natsemi_irq_disable(dev);
@@ -3278,7 +3278,7 @@ static int __maybe_unused natsemi_suspend(struct device *dev_d)
if (netif_running (dev)) {
const int irq = np->pci_dev->irq;
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
disable_irq(irq);
spin_lock_irq(&np->lock);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 998586872599..cdbf82affa7b 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
unsigned next_rx;
- int rx_rc, len;
+ int len;
u32 cmdsts;
__le32 *desc;
unsigned long flags;
@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb_put(skb, len);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
goto netdev_mangle_me_harder_failed;
+ }
if (cmdsts & CMDSTS_DEST_MULTI)
ndev->stats.multicast++;
ndev->stats.rx_packets++;
@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
}
#endif
- rx_rc = netif_rx(skb);
- if (NET_RX_DROP == rx_rc) {
-netdev_mangle_me_harder_failed:
- ndev->stats.rx_dropped++;
- }
+ netif_rx(skb);
} else {
dev_kfree_skb_irq(skb);
}
+netdev_mangle_me_harder_failed:
nr++;
next_rx = info->next_rx;
desc = info->descs + (DESC_SIZE * next_rx);
@@ -1527,7 +1526,7 @@ static int ns83820_stop(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
/* FIXME: protect against interrupt handler? */
- del_timer_sync(&dev->tx_watchdog);
+ timer_delete_sync(&dev->tx_watchdog);
ns83820_disable_interrupts(dev);
@@ -1587,7 +1586,7 @@ static void ns83820_tx_timeout(struct net_device *ndev, unsigned int txqueue)
static void ns83820_tx_watch(struct timer_list *t)
{
- struct ns83820 *dev = from_timer(dev, t, tx_watchdog);
+ struct ns83820 *dev = timer_container_of(dev, t, tx_watchdog);
struct net_device *ndev = dev->ndev;
#if defined(DEBUG)
@@ -2090,7 +2089,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
*/
/* Ramit : 1024 DMA is not a good idea, it ends up banging
* some DELL and COMPAQ SMP systems
- * Turn on ALP, only we are accpeting Jumbo Packets */
+ * Turn on ALP, only we are accepting Jumbo Packets */
writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD
| RXCFG_STRIPCRC
//| RXCFG_ALP
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 8943e7244310..c01a4cb5dc0f 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -264,7 +264,7 @@ static void xtsonic_device_remove(struct platform_device *pdev)
static struct platform_driver xtsonic_driver = {
.probe = xtsonic_probe,
- .remove_new = xtsonic_device_remove,
+ .remove = xtsonic_device_remove,
.driver = {
.name = xtsonic_string,
},
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 55408f16fbbc..1e55ccb4822b 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3425,7 +3425,6 @@ static void s2io_reset(struct s2io_nic *sp)
/* Restore the PCI state saved during initialization. */
pci_restore_state(sp->pdev);
- pci_save_state(sp->pdev);
pci_read_config_word(sp->pdev, 0x2, &val16);
if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
break;
@@ -4195,7 +4194,7 @@ pci_map_failed:
static void
s2io_alarm_handle(struct timer_list *t)
{
- struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
+ struct s2io_nic *sp = timer_container_of(sp, t, alarm_timer);
struct net_device *dev = sp->dev;
s2io_handle_errors(dev);
@@ -4707,7 +4706,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
/*
* rx_traffic_int reg is an R1 register, writing all 1's
* will ensure that the actual interrupt causing bit
- * get's cleared and hence a read can be avoided.
+ * gets cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_RXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
@@ -4721,7 +4720,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
/*
* tx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit get's
+ * will ensure that the actual interrupt causing bit gets
* cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_TXTRAFFIC)
@@ -6637,7 +6636,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
struct s2io_nic *sp = netdev_priv(dev);
int ret = 0;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
@@ -7019,7 +7018,7 @@ static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
if (!is_s2io_card_up(sp))
return;
- del_timer_sync(&sp->alarm_timer);
+ timer_delete_sync(&sp->alarm_timer);
/* If s2io_set_link task is executing, wait till it completes. */
while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
msleep(50);
@@ -8523,7 +8522,7 @@ static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2ec62c8d86e1..59486fe2ad18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
struct sk_buff *skb;
skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
skb_put(skb, size);
return skb;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index df2ab5cbd49b..3a02eef58cc6 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -4537,8 +4537,8 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
u64 *prog;
int err;
- prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
- GFP_KERNEL);
+ prog = kmemdup_array(nfp_prog->prog, nfp_prog->prog_len, sizeof(u64),
+ GFP_KERNEL);
if (!prog)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 9d97cd281f18..c03558adda91 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ if (size_add(pkt_size, data_size) > INT_MAX ||
+ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index cc54faca2283..9e7c285eaa6b 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/netdevice.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ktime.h>
#include <net/xfrm.h>
@@ -266,17 +266,17 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
}
}
-static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+static int nfp_net_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.real_dev;
struct nfp_ipsec_cfg_mssg msg = {};
int i, key_len, trunc_len, err = 0;
struct nfp_ipsec_cfg_add_sa *cfg;
struct nfp_net *nn;
unsigned int saidx;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
cfg = &msg.cfg_add_sa;
/* General */
@@ -546,17 +546,16 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
return 0;
}
-static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+static void nfp_net_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct nfp_ipsec_cfg_mssg msg = {
.cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
.sa_idx = x->xso.offload_handle - 1,
};
- struct net_device *netdev = x->xso.real_dev;
struct nfp_net *nn;
int err;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
sizeof(msg), nfp_net_ipsec_cfg);
if (err)
@@ -565,20 +564,9 @@ static void nfp_net_xfrm_del_state(struct xfrm_state *x)
xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
}
-static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET)
- /* Offload with IPv4 options is not supported yet */
- return ip_hdr(skb)->ihl == 5;
-
- /* Offload with IPv6 extension headers is not support yet */
- return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
-}
-
static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = nfp_net_xfrm_add_state,
.xdo_dev_state_delete = nfp_net_xfrm_del_state,
- .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
};
void nfp_net_ipsec_init(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index f80f1a6953fa..f252ecdcd2cd 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -495,14 +495,13 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
switch (ipv6h->version) {
case 4:
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
+ sk = inet_lookup_established(net, iph->saddr, th->source,
+ iph->daddr, th->dest,
+ netdev->ifindex);
break;
#if IS_ENABLED(CONFIG_IPV6)
case 6:
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &ipv6h->saddr, th->source,
+ sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
break;
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index a655f9e69a7b..85e3b19e6165 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -81,7 +81,8 @@ static const struct nfp_devlink_param_u8_arg nfp_devlink_u8_args[] = {
static int
nfp_devlink_param_u8_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
const struct nfp_devlink_param_u8_arg *arg;
struct nfp_pf *pf = devlink_priv(devlink);
@@ -132,7 +133,8 @@ exit_close_nsp:
static int
nfp_devlink_param_u8_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
const struct nfp_devlink_param_u8_arg *arg;
struct nfp_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2c3f62907958..aca2a7417af3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -396,6 +396,17 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
return 0;
}
+#define NFP_FL_CHECK(flag) ({ \
+ IP_TUNNEL_DECLARE_FLAGS(__check) = { }; \
+ __be16 __res; \
+ \
+ __set_bit(IP_TUNNEL_##flag##_BIT, __check); \
+ __res = ip_tunnel_flags_to_be16(__check); \
+ \
+ BUILD_BUG_ON(__builtin_constant_p(__res) && \
+ NFP_FL_TUNNEL_##flag != __res); \
+})
+
static int
nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
const struct flow_action_entry *act,
@@ -410,6 +421,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ __be16 tun_flags;
if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
return -EOPNOTSUPP;
@@ -417,9 +429,10 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
return -EOPNOTSUPP;
- BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
- NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
- NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
+ NFP_FL_CHECK(CSUM);
+ NFP_FL_CHECK(KEY);
+ NFP_FL_CHECK(GENEVE_OPT);
+
if (ip_tun->options_len &&
(tun_type != NFP_FL_TUNNEL_GENEVE ||
!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
@@ -427,7 +440,9 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
return -EOPNOTSUPP;
}
- if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
+ tun_flags = ip_tunnel_flags_to_be16(ip_tun->key.tun_flags);
+ if (!ip_tunnel_flags_is_be16_compat(ip_tun->key.tun_flags) ||
+ (tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS)) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
@@ -442,7 +457,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
- if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY)
+ if (tun_flags & NFP_FL_TUNNEL_KEY)
set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
@@ -486,7 +501,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
}
set_tun->tos = ip_tun->key.tos;
- set_tun->tun_flags = ip_tun->key.tun_flags;
+ set_tun->tun_flags = tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
set_tun->tun_proto = htons(ETH_P_TEB);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 80e4675582bf..dde60c4572fa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -564,8 +564,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
- vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
- priv->stats_ring_size));
+ vmalloc_array(priv->stats_ring_size,
+ NFP_FL_STATS_ELEM_RS);
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 0aceef9fe582..46ffc2c20893 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -321,6 +321,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_control(rule, &enc_ctl);
+ if (flow_rule_has_enc_control_flags(enc_ctl.mask->flags,
+ extack))
+ return -EOPNOTSUPP;
+
if (enc_ctl.mask->addr_type != 0xffff) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP;
@@ -527,10 +531,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct flow_match_control ctl;
flow_rule_match_control(rule, &ctl);
- if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
+
+ if (!flow_rule_is_supp_control_flags(NFP_FLOWER_SUPPORTED_CTLFLAGS,
+ ctl.mask->flags, extack))
return -EOPNOTSUPP;
- }
}
ret_key_ls->key_layer = key_layer;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index d215efc6cad0..91a227929a5f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -779,7 +779,7 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
@@ -1169,32 +1169,24 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->rx_dim, dim_sample);
+ net_dim(&r_vec->rx_dim, &dim_sample);
}
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->tx_dim, dim_sample);
+ net_dim(&r_vec->tx_dim, &dim_sample);
}
return pkts_polled;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 45be6954d5aa..01cfa9cc1b5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -184,7 +184,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
xrxbuf->xdp->data += meta_len;
xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
xdp_set_data_meta_invalid(xrxbuf->xdp);
- xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xrxbuf->xdp);
net_prefetch(xrxbuf->xdp->data);
if (meta_len) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index dae5af7d1845..ee0db3d5fd66 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -779,7 +779,7 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
@@ -1279,32 +1279,24 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->rx_dim, dim_sample);
+ net_dim(&r_vec->rx_dim, &dim_sample);
}
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->tx_dim, dim_sample);
+ net_dim(&r_vec->tx_dim, &dim_sample);
}
return pkts_polled;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 635d33c0d6d3..ea75b9a06313 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -160,6 +160,7 @@ static const struct nfp_devlink_versions_simple {
{ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, "assembly.revision", },
{ DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE, "assembly.vendor", },
{ "board.model", /* code name */ "assembly.model", },
+ { DEVLINK_INFO_VERSION_GENERIC_BOARD_PART_NUMBER, "pn", },
};
static int
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index 0d6c59d6d4ae..ea6a288c0d5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -83,42 +83,12 @@ nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static u32 nfp_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info nfp_chip = {
- .type = hwmon_chip,
- .config = nfp_chip_config,
-};
-
-static u32 nfp_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_temp = {
- .type = hwmon_temp,
- .config = nfp_temp_config,
-};
-
-static u32 nfp_power_config[] = {
- HWMON_P_INPUT | HWMON_P_MAX,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_power = {
- .type = hwmon_power,
- .config = nfp_power_config,
-};
-
static const struct hwmon_channel_info * const nfp_hwmon_info[] = {
- &nfp_chip,
- &nfp_temp,
- &nfp_power,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT),
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_MAX,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 71301dbd8fb5..48390b2fd44d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -797,7 +797,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf->pdev = pdev;
pf->dev_info = dev_info;
- pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
+ pf->wq = alloc_workqueue("nfp-%s", WQ_PERCPU, 2, pci_name(pdev));
if (!pf->wq) {
err = -ENOMEM;
goto err_pci_priv_unset;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f28e769e6fda..9ef72f294117 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -159,7 +159,7 @@ static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
static void nfp_net_reconfig_timer(struct timer_list *t)
{
- struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
+ struct nfp_net *nn = timer_container_of(nn, t, reconfig_timer);
spin_lock_bh(&nn->reconfig_lock);
@@ -227,7 +227,7 @@ static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
spin_unlock_bh(&nn->reconfig_lock);
if (cancelled_timer) {
- del_timer_sync(&nn->reconfig_timer);
+ timer_delete_sync(&nn->reconfig_timer);
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
}
@@ -821,16 +821,15 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nfp_net_name(nn), idx);
- err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
- r_vec);
+ err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
+ r_vec->name, r_vec);
if (err) {
nfp_net_napi_del(&nn->dp, r_vec);
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
- disable_irq(r_vec->irq_vector);
- irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
+ irq_update_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
r_vec->irq_entry);
@@ -841,7 +840,7 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
- irq_set_affinity_hint(r_vec->irq_vector, NULL);
+ irq_update_affinity_hint(r_vec->irq_vector, NULL);
nfp_net_napi_del(&nn->dp, r_vec);
free_irq(r_vec->irq_vector, r_vec);
}
@@ -1526,7 +1525,7 @@ static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
*dp = nn->dp;
nn->dp = new_dp;
- nn->dp.netdev->mtu = new_dp.mtu;
+ WRITE_ONCE(nn->dp.netdev->mtu, new_dp.mtu);
if (!netif_is_rxfh_configured(nn->dp.netdev))
nfp_net_rss_init_itbl(nn);
@@ -2289,10 +2288,7 @@ static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
new_ctrl = nn->dp.ctrl;
mode = nla_get_u16(attr);
if (mode == BRIDGE_MODE_VEPA)
@@ -2398,8 +2394,7 @@ static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
.sync_table = nfp_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{
.n_entries = NFP_NET_N_VXLAN_PORTS,
@@ -2542,7 +2537,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
nn->dp.num_r_vecs, num_online_cpus());
nn->max_r_vecs = nn->dp.num_r_vecs;
- nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
+ nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(*nn->dp.xsk_pools),
GFP_KERNEL);
if (!nn->dp.xsk_pools) {
err = -ENOMEM;
@@ -2562,14 +2557,16 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
err = nfp_ccm_mbox_alloc(nn);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
return nn;
+err_free_xsk_pools:
+ kfree(nn->dp.xsk_pools);
err_free_nn:
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
@@ -2783,7 +2780,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
break;
}
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+ netdev->watchdog_timeo = secs_to_jiffies(5);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index a614df095b08..7276e44a21d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -34,10 +34,15 @@ enum nfp_dumpspec_type {
/* generic type plus length */
struct nfp_dump_tl {
- __be32 type;
- __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ /* New members must be added within the struct_group() macro below. */
+ struct_group_tagged(nfp_dump_tl_hdr, hdr,
+ __be32 type;
+ __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ );
char data[];
};
+static_assert(offsetof(struct nfp_dump_tl, data) == sizeof(struct nfp_dump_tl_hdr),
+ "struct member likely outside of struct_group_tagged()");
/* NFP CPP parameters */
struct nfp_dumpspec_cpp_isl_id {
@@ -55,19 +60,19 @@ struct nfp_dump_common_cpp {
/* CSR dumpables */
struct nfp_dumpspec_csr {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 register_width; /* in bits */
};
struct nfp_dumpspec_rtsym {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
char rtsym[];
};
/* header for register dumpable */
struct nfp_dump_csr {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 register_width; /* in bits */
__be32 error; /* error code encountered while reading */
@@ -75,7 +80,7 @@ struct nfp_dump_csr {
};
struct nfp_dump_rtsym {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 error; /* error code encountered while reading */
u8 padded_name_length; /* pad so data starts at 8 byte boundary */
@@ -84,12 +89,12 @@ struct nfp_dump_rtsym {
};
struct nfp_dump_prolog {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
__be32 dump_level;
};
struct nfp_dump_error {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
__be32 error;
char padding[4];
char spec[];
@@ -449,6 +454,8 @@ static int
nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_csr_tl =
+ container_of(&spec_csr->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_csr *dump_header = dump->p;
u32 reg_sz, header_size, total_size;
u32 cpp_rd_addr, max_rd_addr;
@@ -458,7 +465,7 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
int err;
if (!nfp_csr_spec_valid(spec_csr))
- return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_csr_tl, -EINVAL, dump);
reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
header_size = ALIGN8(sizeof(*dump_header));
@@ -466,7 +473,7 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_csr_tl->type), total_size, dump);
if (err)
return err;
@@ -552,6 +559,8 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
struct nfp_dumpspec_csr *spec_csr,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_csr_tl =
+ container_of(&spec_csr->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_csr *dump_header = dump->p;
u32 reg_sz, header_size, total_size;
u32 cpp_rd_addr, max_rd_addr;
@@ -560,7 +569,7 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
int err;
if (!nfp_csr_spec_valid(spec_csr))
- return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_csr_tl, -EINVAL, dump);
reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
header_size = ALIGN8(sizeof(*dump_header));
@@ -569,7 +578,7 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
total_size = header_size + ALIGN8(reg_data_length);
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_csr_tl->type), total_size, dump);
if (err)
return err;
@@ -597,6 +606,8 @@ static int
nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_tl =
+ container_of(&spec->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_rtsym *dump_header = dump->p;
struct nfp_dumpspec_cpp_isl_id cpp_params;
struct nfp_rtsym_table *rtbl = pf->rtbl;
@@ -607,14 +618,14 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
void *dest;
int err;
- tl_len = be32_to_cpu(spec->tl.length);
+ tl_len = be32_to_cpu(spec_tl->length);
key_len = strnlen(spec->rtsym, tl_len);
if (key_len == tl_len)
- return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_tl, -EINVAL, dump);
sym = nfp_rtsym_lookup(rtbl, spec->rtsym);
if (!sym)
- return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
+ return nfp_dump_error_tlv(spec_tl, -ENOENT, dump);
sym_size = nfp_rtsym_size(sym);
header_size =
@@ -622,7 +633,7 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
total_size = header_size + ALIGN8(sym_size);
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_tl->type), total_size, dump);
if (err)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index d8b735ccf899..d843d1e19715 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -77,7 +77,7 @@ DEFINE_SHOW_ATTRIBUTE(nfp_rx_q);
static int nfp_tx_q_show(struct seq_file *file, void *data);
DEFINE_SHOW_ATTRIBUTE(nfp_tx_q);
-static int nfp_tx_q_show(struct seq_file *file, void *data)
+static int __nfp_tx_q_show(struct seq_file *file, void *data, bool is_xdp)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
@@ -86,10 +86,10 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
rtnl_lock();
- if (debugfs_real_fops(file->file) == &nfp_tx_q_fops)
- tx_ring = r_vec->tx_ring;
- else
+ if (is_xdp)
tx_ring = r_vec->xdp_ring;
+ else
+ tx_ring = r_vec->tx_ring;
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
@@ -115,9 +115,14 @@ out:
return 0;
}
+static int nfp_tx_q_show(struct seq_file *file, void *data)
+{
+ return __nfp_tx_q_show(file, data, false);
+}
+
static int nfp_xdp_q_show(struct seq_file *file, void *data)
{
- return nfp_tx_q_show(file, data);
+ return __nfp_tx_q_show(file, data, true);
}
DEFINE_SHOW_ATTRIBUTE(nfp_xdp_q);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index fbca8d0efd85..16c828dd5c1a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1303,9 +1303,10 @@ static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
return xlate_ethtool_to_nfp[flow_type];
}
-static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
- struct ethtool_rxnfc *cmd)
+static int nfp_net_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct nfp_net *nn = netdev_priv(netdev);
u32 nfp_rss_flag;
cmd->data = 0;
@@ -1451,16 +1452,16 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXCLSRLALL:
cmd->data = NFP_FS_MAX_ENTRY;
return nfp_net_get_fs_loc(nn, rule_locs);
- case ETHTOOL_GRXFH:
- return nfp_net_get_rss_hash_opts(nn, cmd);
default:
return -EOPNOTSUPP;
}
}
-static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
- struct ethtool_rxnfc *nfc)
+static int nfp_net_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct nfp_net *nn = netdev_priv(netdev);
u32 new_rss_cfg = nn->rss_cfg;
u32 nfp_rss_flag;
int err;
@@ -1763,8 +1764,6 @@ static int nfp_net_set_rxnfc(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- return nfp_net_set_rss_hash_opt(nn, cmd);
case ETHTOOL_SRXCLSRLINS:
return nfp_net_fs_add(nn, cmd);
case ETHTOOL_SRXCLSRLDEL:
@@ -1789,7 +1788,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
- return -EOPNOTSUPP;
+ return 0;
return nfp_net_rss_key_sz(nn);
}
@@ -2506,6 +2505,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
.set_rxfh = nfp_net_set_rxfh,
+ .get_rxfh_fields = nfp_net_get_rxfh_fields,
+ .set_rxfh_fields = nfp_net_set_rxfh_fields,
.get_regs_len = nfp_net_get_regs_len,
.get_regs = nfp_net_get_regs,
.set_dump = nfp_app_set_dump,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 3af1229a3f08..227e7a5d712e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -177,7 +177,7 @@ static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
return err;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
@@ -248,7 +248,6 @@ nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features)
features = netdev_intersect_features(features, lower_features);
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC);
- features |= NETIF_F_LLTX;
return features;
}
@@ -386,7 +385,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
- netdev->features |= NETIF_F_LLTX;
+ netdev->lltx = true;
if (nfp_app_has_tc(app)) {
netdev->features |= NETIF_F_HW_TC;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 3f10c5365c80..7c2200b49ce4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -15,7 +15,7 @@
* abstraction builds upon this BAR interface.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index a8286d0032d1..669f9f8fb507 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -9,7 +9,7 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 508ae6b571ca..addf02c63b1a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -9,7 +9,7 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index f05dd34ab89f..cfa4db5d3f85 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -15,7 +15,7 @@
*/
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 7136bc48530b..0bd6477292a6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -7,7 +7,7 @@
* Jason McMullan <jason.mcmullan@netronome.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/firmware.h>
@@ -278,7 +278,7 @@ struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp)
res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
if (IS_ERR(res))
- return (void *)res;
+ return ERR_CAST(res);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 2260c2403a83..68862ac062d2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -10,7 +10,7 @@
* Francois H. Theron <francois.theron@netronome.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index fa1f78b03cb2..230d5ff99dd7 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -946,7 +946,7 @@ static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
NIXGE_MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
@@ -1415,7 +1415,7 @@ static void nixge_remove(struct platform_device *pdev)
static struct platform_driver nixge_driver = {
.probe = nixge_probe,
- .remove_new = nixge_remove,
+ .remove = nixge_remove,
.driver = {
.name = "nixge",
.of_match_table = nixge_dt_ids,
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 31f896c4aa26..19aa1f1538aa 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1120,20 +1120,6 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
}
}
-static void nv_napi_enable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_enable(&np->napi);
-}
-
-static void nv_napi_disable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_disable(&np->napi);
-}
-
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
@@ -1905,7 +1891,7 @@ packet_dropped:
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
static void nv_do_rx_refill(struct timer_list *t)
{
- struct fe_priv *np = from_timer(np, t, oom_kick);
+ struct fe_priv *np = timer_container_of(np, t, oom_kick);
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
@@ -3098,7 +3084,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
int old_mtu;
old_mtu = dev->mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* return early if the buffer sizes will not change */
if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
@@ -3114,7 +3100,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* Changing the MTU is a rare event, it shouldn't matter.
*/
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -3143,7 +3129,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -4154,7 +4140,7 @@ static void nv_free_irq(struct net_device *dev)
static void nv_do_nic_poll(struct timer_list *t)
{
- struct fe_priv *np = from_timer(np, t, nic_poll);
+ struct fe_priv *np = timer_container_of(np, t, nic_poll);
struct net_device *dev = np->dev;
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
@@ -4273,7 +4259,7 @@ static void nv_do_stats_poll(struct timer_list *t)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
{
- struct fe_priv *np = from_timer(np, t, stats_poll);
+ struct fe_priv *np = timer_container_of(np, t, stats_poll);
struct net_device *dev = np->dev;
/* If lock is currently taken, the stats are being refreshed
@@ -4731,7 +4717,7 @@ static int nv_set_ringparam(struct net_device *dev,
if (netif_running(dev)) {
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -4784,7 +4770,7 @@ static int nv_set_ringparam(struct net_device *dev,
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -5277,7 +5263,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
@@ -5334,7 +5320,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
/* restart rx engine */
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_hw_interrupts(dev, np->irqmask);
}
}
@@ -5576,6 +5562,7 @@ static int nv_open(struct net_device *dev)
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
+ netdev_lock(dev);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
@@ -5594,7 +5581,7 @@ static int nv_open(struct net_device *dev)
ret = nv_update_linkspeed(dev);
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable_locked(&np->napi);
if (ret) {
netif_carrier_on(dev);
@@ -5611,6 +5598,7 @@ static int nv_open(struct net_device *dev)
round_jiffies(jiffies + STATS_INTERVAL));
spin_unlock_irq(&np->lock);
+ netdev_unlock(dev);
/* If the loopback feature was set while the device was down, make sure
* that it's set correctly now.
@@ -5632,12 +5620,12 @@ static int nv_close(struct net_device *dev)
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
synchronize_irq(np->pci_dev->irq);
- del_timer_sync(&np->oom_kick);
- del_timer_sync(&np->nic_poll);
- del_timer_sync(&np->stats_poll);
+ timer_delete_sync(&np->oom_kick);
+ timer_delete_sync(&np->nic_poll);
+ timer_delete_sync(&np->stats_poll);
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index dd3e58a1319c..8b9a3e3bba30 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1503,7 +1503,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match);
static struct platform_driver lpc_eth_driver = {
.probe = lpc_eth_drv_probe,
- .remove_new = lpc_eth_drv_remove,
+ .remove = lpc_eth_drv_remove,
#ifdef CONFIG_PM
.suspend = lpc_eth_drv_suspend,
.resume = lpc_eth_drv_resume,
diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
new file mode 100644
index 000000000000..91a906a7918a
--- /dev/null
+++ b/drivers/net/ethernet/oa_tc6.c
@@ -0,0 +1,1369 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/oa_tc6.h>
+
+/* OPEN Alliance TC6 registers */
+/* Standard Capabilities Register */
+#define OA_TC6_REG_STDCAP 0x0002
+#define STDCAP_DIRECT_PHY_REG_ACCESS BIT(8)
+
+/* Reset Control and Status Register */
+#define OA_TC6_REG_RESET 0x0003
+#define RESET_SWRESET BIT(0) /* Software Reset */
+
+/* Configuration Register #0 */
+#define OA_TC6_REG_CONFIG0 0x0004
+#define CONFIG0_SYNC BIT(15)
+#define CONFIG0_ZARFE_ENABLE BIT(12)
+
+/* Status Register #0 */
+#define OA_TC6_REG_STATUS0 0x0008
+#define STATUS0_RESETC BIT(6) /* Reset Complete */
+#define STATUS0_HEADER_ERROR BIT(5)
+#define STATUS0_LOSS_OF_FRAME_ERROR BIT(4)
+#define STATUS0_RX_BUFFER_OVERFLOW_ERROR BIT(3)
+#define STATUS0_TX_PROTOCOL_ERROR BIT(0)
+
+/* Buffer Status Register */
+#define OA_TC6_REG_BUFFER_STATUS 0x000B
+#define BUFFER_STATUS_TX_CREDITS_AVAILABLE GENMASK(15, 8)
+#define BUFFER_STATUS_RX_CHUNKS_AVAILABLE GENMASK(7, 0)
+
+/* Interrupt Mask Register #0 */
+#define OA_TC6_REG_INT_MASK0 0x000C
+#define INT_MASK0_HEADER_ERR_MASK BIT(5)
+#define INT_MASK0_LOSS_OF_FRAME_ERR_MASK BIT(4)
+#define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK BIT(3)
+#define INT_MASK0_TX_PROTOCOL_ERR_MASK BIT(0)
+
+/* PHY Clause 22 registers base address and mask */
+#define OA_TC6_PHY_STD_REG_ADDR_BASE 0xFF00
+#define OA_TC6_PHY_STD_REG_ADDR_MASK 0x1F
+
+/* Control command header */
+#define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL BIT(31)
+#define OA_TC6_CTRL_HEADER_WRITE_NOT_READ BIT(29)
+#define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR GENMASK(27, 24)
+#define OA_TC6_CTRL_HEADER_ADDR GENMASK(23, 8)
+#define OA_TC6_CTRL_HEADER_LENGTH GENMASK(7, 1)
+#define OA_TC6_CTRL_HEADER_PARITY BIT(0)
+
+/* Data header */
+#define OA_TC6_DATA_HEADER_DATA_NOT_CTRL BIT(31)
+#define OA_TC6_DATA_HEADER_DATA_VALID BIT(21)
+#define OA_TC6_DATA_HEADER_START_VALID BIT(20)
+#define OA_TC6_DATA_HEADER_START_WORD_OFFSET GENMASK(19, 16)
+#define OA_TC6_DATA_HEADER_END_VALID BIT(14)
+#define OA_TC6_DATA_HEADER_END_BYTE_OFFSET GENMASK(13, 8)
+#define OA_TC6_DATA_HEADER_PARITY BIT(0)
+
+/* Data footer */
+#define OA_TC6_DATA_FOOTER_EXTENDED_STS BIT(31)
+#define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD BIT(30)
+#define OA_TC6_DATA_FOOTER_CONFIG_SYNC BIT(29)
+#define OA_TC6_DATA_FOOTER_RX_CHUNKS GENMASK(28, 24)
+#define OA_TC6_DATA_FOOTER_DATA_VALID BIT(21)
+#define OA_TC6_DATA_FOOTER_START_VALID BIT(20)
+#define OA_TC6_DATA_FOOTER_START_WORD_OFFSET GENMASK(19, 16)
+#define OA_TC6_DATA_FOOTER_END_VALID BIT(14)
+#define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET GENMASK(13, 8)
+#define OA_TC6_DATA_FOOTER_TX_CREDITS GENMASK(5, 1)
+
+/* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
+ * OPEN Alliance specification.
+ */
+#define OA_TC6_PHY_C45_PCS_MMS2 2 /* MMD 3 */
+#define OA_TC6_PHY_C45_PMA_PMD_MMS3 3 /* MMD 1 */
+#define OA_TC6_PHY_C45_VS_PLCA_MMS4 4 /* MMD 31 */
+#define OA_TC6_PHY_C45_AUTO_NEG_MMS5 5 /* MMD 7 */
+#define OA_TC6_PHY_C45_POWER_UNIT_MMS6 6 /* MMD 13 */
+
+#define OA_TC6_CTRL_HEADER_SIZE 4
+#define OA_TC6_CTRL_REG_VALUE_SIZE 4
+#define OA_TC6_CTRL_IGNORED_SIZE 4
+#define OA_TC6_CTRL_MAX_REGISTERS 128
+#define OA_TC6_CTRL_SPI_BUF_SIZE (OA_TC6_CTRL_HEADER_SIZE +\
+ (OA_TC6_CTRL_MAX_REGISTERS *\
+ OA_TC6_CTRL_REG_VALUE_SIZE) +\
+ OA_TC6_CTRL_IGNORED_SIZE)
+#define OA_TC6_CHUNK_PAYLOAD_SIZE 64
+#define OA_TC6_DATA_HEADER_SIZE 4
+#define OA_TC6_CHUNK_SIZE (OA_TC6_DATA_HEADER_SIZE +\
+ OA_TC6_CHUNK_PAYLOAD_SIZE)
+#define OA_TC6_MAX_TX_CHUNKS 48
+#define OA_TC6_SPI_DATA_BUF_SIZE (OA_TC6_MAX_TX_CHUNKS *\
+ OA_TC6_CHUNK_SIZE)
+#define STATUS0_RESETC_POLL_DELAY 1000
+#define STATUS0_RESETC_POLL_TIMEOUT 1000000
+
+/* Internal structure for MAC-PHY drivers */
+struct oa_tc6 {
+ struct device *dev;
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ struct mii_bus *mdiobus;
+ struct spi_device *spi;
+ struct mutex spi_ctrl_lock; /* Protects spi control transfer */
+ spinlock_t tx_skb_lock; /* Protects tx skb handling */
+ void *spi_ctrl_tx_buf;
+ void *spi_ctrl_rx_buf;
+ void *spi_data_tx_buf;
+ void *spi_data_rx_buf;
+ struct sk_buff *ongoing_tx_skb;
+ struct sk_buff *waiting_tx_skb;
+ struct sk_buff *rx_skb;
+ struct task_struct *spi_thread;
+ wait_queue_head_t spi_wq;
+ u16 tx_skb_offset;
+ u16 spi_data_tx_buf_offset;
+ u16 tx_credits;
+ u8 rx_chunks_available;
+ bool rx_buf_overflow;
+ bool int_flag;
+};
+
+enum oa_tc6_header_type {
+ OA_TC6_CTRL_HEADER,
+ OA_TC6_DATA_HEADER,
+};
+
+enum oa_tc6_register_op {
+ OA_TC6_CTRL_REG_READ = 0,
+ OA_TC6_CTRL_REG_WRITE = 1,
+};
+
+enum oa_tc6_data_valid_info {
+ OA_TC6_DATA_INVALID,
+ OA_TC6_DATA_VALID,
+};
+
+enum oa_tc6_data_start_valid_info {
+ OA_TC6_DATA_START_INVALID,
+ OA_TC6_DATA_START_VALID,
+};
+
+enum oa_tc6_data_end_valid_info {
+ OA_TC6_DATA_END_INVALID,
+ OA_TC6_DATA_END_VALID,
+};
+
+static int oa_tc6_spi_transfer(struct oa_tc6 *tc6,
+ enum oa_tc6_header_type header_type, u16 length)
+{
+ struct spi_transfer xfer = { 0 };
+ struct spi_message msg;
+
+ if (header_type == OA_TC6_DATA_HEADER) {
+ xfer.tx_buf = tc6->spi_data_tx_buf;
+ xfer.rx_buf = tc6->spi_data_rx_buf;
+ } else {
+ xfer.tx_buf = tc6->spi_ctrl_tx_buf;
+ xfer.rx_buf = tc6->spi_ctrl_rx_buf;
+ }
+ xfer.len = length;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(tc6->spi, &msg);
+}
+
+static int oa_tc6_get_parity(u32 p)
+{
+ /* Public domain code snippet, lifted from
+ * http://www-graphics.stanford.edu/~seander/bithacks.html
+ */
+ p ^= p >> 1;
+ p ^= p >> 2;
+ p = (p & 0x11111111U) * 0x11111111U;
+
+ /* Odd parity is used here */
+ return !((p >> 28) & 1);
+}
+
+static __be32 oa_tc6_prepare_ctrl_header(u32 addr, u8 length,
+ enum oa_tc6_register_op reg_op)
+{
+ u32 header;
+
+ header = FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL,
+ OA_TC6_CTRL_HEADER) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ, reg_op) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR, addr >> 16) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR, addr) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH, length - 1);
+ header |= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY,
+ oa_tc6_get_parity(header));
+
+ return cpu_to_be32(header);
+}
+
+static void oa_tc6_update_ctrl_write_data(struct oa_tc6 *tc6, u32 value[],
+ u8 length)
+{
+ __be32 *tx_buf = tc6->spi_ctrl_tx_buf + OA_TC6_CTRL_HEADER_SIZE;
+
+ for (int i = 0; i < length; i++)
+ *tx_buf++ = cpu_to_be32(value[i]);
+}
+
+static u16 oa_tc6_calculate_ctrl_buf_size(u8 length)
+{
+ /* Control command consists 4 bytes header + 4 bytes register value for
+ * each register + 4 bytes ignored value.
+ */
+ return OA_TC6_CTRL_HEADER_SIZE + OA_TC6_CTRL_REG_VALUE_SIZE * length +
+ OA_TC6_CTRL_IGNORED_SIZE;
+}
+
+static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 *tc6, u32 address,
+ u32 value[], u8 length,
+ enum oa_tc6_register_op reg_op)
+{
+ __be32 *tx_buf = tc6->spi_ctrl_tx_buf;
+
+ *tx_buf = oa_tc6_prepare_ctrl_header(address, length, reg_op);
+
+ if (reg_op == OA_TC6_CTRL_REG_WRITE)
+ oa_tc6_update_ctrl_write_data(tc6, value, length);
+}
+
+static int oa_tc6_check_ctrl_write_reply(struct oa_tc6 *tc6, u8 size)
+{
+ u8 *tx_buf = tc6->spi_ctrl_tx_buf;
+ u8 *rx_buf = tc6->spi_ctrl_rx_buf;
+
+ rx_buf += OA_TC6_CTRL_IGNORED_SIZE;
+
+ /* The echoed control write must match with the one that was
+ * transmitted.
+ */
+ if (memcmp(tx_buf, rx_buf, size - OA_TC6_CTRL_IGNORED_SIZE))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int oa_tc6_check_ctrl_read_reply(struct oa_tc6 *tc6, u8 size)
+{
+ u32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE;
+ u32 *tx_buf = tc6->spi_ctrl_tx_buf;
+
+ /* The echoed control read header must match with the one that was
+ * transmitted.
+ */
+ if (*tx_buf != *rx_buf)
+ return -EPROTO;
+
+ return 0;
+}
+
+static void oa_tc6_copy_ctrl_read_data(struct oa_tc6 *tc6, u32 value[],
+ u8 length)
+{
+ __be32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE +
+ OA_TC6_CTRL_HEADER_SIZE;
+
+ for (int i = 0; i < length; i++)
+ value[i] = be32_to_cpu(*rx_buf++);
+}
+
+static int oa_tc6_perform_ctrl(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length, enum oa_tc6_register_op reg_op)
+{
+ u16 size;
+ int ret;
+
+ /* Prepare control command and copy to SPI control buffer */
+ oa_tc6_prepare_ctrl_spi_buf(tc6, address, value, length, reg_op);
+
+ size = oa_tc6_calculate_ctrl_buf_size(length);
+
+ /* Perform SPI transfer */
+ ret = oa_tc6_spi_transfer(tc6, OA_TC6_CTRL_HEADER, size);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "SPI transfer failed for control: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Check echoed/received control write command reply for errors */
+ if (reg_op == OA_TC6_CTRL_REG_WRITE)
+ return oa_tc6_check_ctrl_write_reply(tc6, size);
+
+ /* Check echoed/received control read command reply for errors */
+ ret = oa_tc6_check_ctrl_read_reply(tc6, size);
+ if (ret)
+ return ret;
+
+ oa_tc6_copy_ctrl_read_data(tc6, value, length);
+
+ return 0;
+}
+
+/**
+ * oa_tc6_read_registers - function for reading multiple consecutive registers.
+ * @tc6: oa_tc6 struct.
+ * @address: address of the first register to be read in the MAC-PHY.
+ * @value: values to be read from the starting register address @address.
+ * @length: number of consecutive registers to be read from @address.
+ *
+ * Maximum of 128 consecutive registers can be read starting at @address.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length)
+{
+ int ret;
+
+ if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
+ dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tc6->spi_ctrl_lock);
+ ret = oa_tc6_perform_ctrl(tc6, address, value, length,
+ OA_TC6_CTRL_REG_READ);
+ mutex_unlock(&tc6->spi_ctrl_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_read_registers);
+
+/**
+ * oa_tc6_read_register - function for reading a MAC-PHY register.
+ * @tc6: oa_tc6 struct.
+ * @address: register address of the MAC-PHY to be read.
+ * @value: value read from the @address register address of the MAC-PHY.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value)
+{
+ return oa_tc6_read_registers(tc6, address, value, 1);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_read_register);
+
+/**
+ * oa_tc6_write_registers - function for writing multiple consecutive registers.
+ * @tc6: oa_tc6 struct.
+ * @address: address of the first register to be written in the MAC-PHY.
+ * @value: values to be written from the starting register address @address.
+ * @length: number of consecutive registers to be written from @address.
+ *
+ * Maximum of 128 consecutive registers can be written starting at @address.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length)
+{
+ int ret;
+
+ if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
+ dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tc6->spi_ctrl_lock);
+ ret = oa_tc6_perform_ctrl(tc6, address, value, length,
+ OA_TC6_CTRL_REG_WRITE);
+ mutex_unlock(&tc6->spi_ctrl_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_write_registers);
+
+/**
+ * oa_tc6_write_register - function for writing a MAC-PHY register.
+ * @tc6: oa_tc6 struct.
+ * @address: register address of the MAC-PHY to be written.
+ * @value: value to be written in the @address register address of the MAC-PHY.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value)
+{
+ return oa_tc6_write_registers(tc6, address, &value, 1);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_write_register);
+
+static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STDCAP, &regval);
+ if (ret)
+ return ret;
+
+ if (!(regval & STDCAP_DIRECT_PHY_REG_ACCESS))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void oa_tc6_handle_link_change(struct net_device *netdev)
+{
+ phy_print_status(netdev->phydev);
+}
+
+static int oa_tc6_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ u32 regval;
+ bool ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
+ (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
+ &regval);
+ if (ret)
+ return ret;
+
+ return regval;
+}
+
+static int oa_tc6_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+
+ return oa_tc6_write_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
+ (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
+ val);
+}
+
+static int oa_tc6_get_phy_c45_mms(int devnum)
+{
+ switch (devnum) {
+ case MDIO_MMD_PCS:
+ return OA_TC6_PHY_C45_PCS_MMS2;
+ case MDIO_MMD_PMAPMD:
+ return OA_TC6_PHY_C45_PMA_PMD_MMS3;
+ case MDIO_MMD_VEND2:
+ return OA_TC6_PHY_C45_VS_PLCA_MMS4;
+ case MDIO_MMD_AN:
+ return OA_TC6_PHY_C45_AUTO_NEG_MMS5;
+ case MDIO_MMD_POWER_UNIT:
+ return OA_TC6_PHY_C45_POWER_UNIT_MMS6;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int oa_tc6_mdiobus_read_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_get_phy_c45_mms(devnum);
+ if (ret < 0)
+ return ret;
+
+ ret = oa_tc6_read_register(tc6, (ret << 16) | regnum, &regval);
+ if (ret)
+ return ret;
+
+ return regval;
+}
+
+static int oa_tc6_mdiobus_write_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ int ret;
+
+ ret = oa_tc6_get_phy_c45_mms(devnum);
+ if (ret < 0)
+ return ret;
+
+ return oa_tc6_write_register(tc6, (ret << 16) | regnum, val);
+}
+
+static int oa_tc6_mdiobus_register(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ tc6->mdiobus = mdiobus_alloc();
+ if (!tc6->mdiobus) {
+ netdev_err(tc6->netdev, "MDIO bus alloc failed\n");
+ return -ENOMEM;
+ }
+
+ tc6->mdiobus->priv = tc6;
+ tc6->mdiobus->read = oa_tc6_mdiobus_read;
+ tc6->mdiobus->write = oa_tc6_mdiobus_write;
+ /* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
+ * C45 registers space. If the PHY is discovered via C22 bus protocol it
+ * assumes it uses C22 protocol and always uses C22 registers indirect
+ * access to access C45 registers. This is because, we don't have a
+ * clean separation between C22/C45 register space and C22/C45 MDIO bus
+ * protocols. Resulting, PHY C45 registers direct access can't be used
+ * which can save multiple SPI bus access. To support this feature, PHY
+ * drivers can set .read_mmd/.write_mmd in the PHY driver to call
+ * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c
+ */
+ tc6->mdiobus->read_c45 = oa_tc6_mdiobus_read_c45;
+ tc6->mdiobus->write_c45 = oa_tc6_mdiobus_write_c45;
+ tc6->mdiobus->name = "oa-tc6-mdiobus";
+ tc6->mdiobus->parent = tc6->dev;
+
+ snprintf(tc6->mdiobus->id, ARRAY_SIZE(tc6->mdiobus->id), "%s",
+ dev_name(&tc6->spi->dev));
+
+ ret = mdiobus_register(tc6->mdiobus);
+ if (ret) {
+ netdev_err(tc6->netdev, "Could not register MDIO bus\n");
+ mdiobus_free(tc6->mdiobus);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void oa_tc6_mdiobus_unregister(struct oa_tc6 *tc6)
+{
+ mdiobus_unregister(tc6->mdiobus);
+ mdiobus_free(tc6->mdiobus);
+}
+
+static int oa_tc6_phy_init(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ ret = oa_tc6_check_phy_reg_direct_access_capability(tc6);
+ if (ret) {
+ netdev_err(tc6->netdev,
+ "Direct PHY register access is not supported by the MAC-PHY\n");
+ return ret;
+ }
+
+ ret = oa_tc6_mdiobus_register(tc6);
+ if (ret)
+ return ret;
+
+ tc6->phydev = phy_find_first(tc6->mdiobus);
+ if (!tc6->phydev) {
+ netdev_err(tc6->netdev, "No PHY found\n");
+ oa_tc6_mdiobus_unregister(tc6);
+ return -ENODEV;
+ }
+
+ tc6->phydev->is_internal = true;
+ ret = phy_connect_direct(tc6->netdev, tc6->phydev,
+ &oa_tc6_handle_link_change,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (ret) {
+ netdev_err(tc6->netdev, "Can't attach PHY to %s\n",
+ tc6->mdiobus->id);
+ oa_tc6_mdiobus_unregister(tc6);
+ return ret;
+ }
+
+ phy_attached_info(tc6->netdev->phydev);
+
+ return 0;
+}
+
+static void oa_tc6_phy_exit(struct oa_tc6 *tc6)
+{
+ phy_disconnect(tc6->phydev);
+ oa_tc6_mdiobus_unregister(tc6);
+}
+
+static int oa_tc6_read_status0(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &regval);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "STATUS0 register read failed: %d\n",
+ ret);
+ return 0;
+ }
+
+ return regval;
+}
+
+static int oa_tc6_sw_reset_macphy(struct oa_tc6 *tc6)
+{
+ u32 regval = RESET_SWRESET;
+ int ret;
+
+ ret = oa_tc6_write_register(tc6, OA_TC6_REG_RESET, regval);
+ if (ret)
+ return ret;
+
+ /* Poll for soft reset complete for every 1ms until 1s timeout */
+ ret = readx_poll_timeout(oa_tc6_read_status0, tc6, regval,
+ regval & STATUS0_RESETC,
+ STATUS0_RESETC_POLL_DELAY,
+ STATUS0_RESETC_POLL_TIMEOUT);
+ if (ret)
+ return -ENODEV;
+
+ /* Clear the reset complete status */
+ return oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, regval);
+}
+
+static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_INT_MASK0, &regval);
+ if (ret)
+ return ret;
+
+ regval &= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK |
+ INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK |
+ INT_MASK0_LOSS_OF_FRAME_ERR_MASK |
+ INT_MASK0_HEADER_ERR_MASK);
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_INT_MASK0, regval);
+}
+
+static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &value);
+ if (ret)
+ return ret;
+
+ /* Enable configuration synchronization for data transfer */
+ value |= CONFIG0_SYNC;
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value);
+}
+
+static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6)
+{
+ if (tc6->rx_skb) {
+ tc6->netdev->stats.rx_dropped++;
+ kfree_skb(tc6->rx_skb);
+ tc6->rx_skb = NULL;
+ }
+}
+
+static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6)
+{
+ if (tc6->ongoing_tx_skb) {
+ tc6->netdev->stats.tx_dropped++;
+ kfree_skb(tc6->ongoing_tx_skb);
+ tc6->ongoing_tx_skb = NULL;
+ }
+}
+
+static int oa_tc6_process_extended_status(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &value);
+ if (ret) {
+ netdev_err(tc6->netdev, "STATUS0 register read failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Clear the error interrupts status */
+ ret = oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, value);
+ if (ret) {
+ netdev_err(tc6->netdev, "STATUS0 register write failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) {
+ tc6->rx_buf_overflow = true;
+ oa_tc6_cleanup_ongoing_rx_skb(tc6);
+ net_err_ratelimited("%s: Receive buffer overflow error\n",
+ tc6->netdev->name);
+ return -EAGAIN;
+ }
+ if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) {
+ netdev_err(tc6->netdev, "Transmit protocol error\n");
+ return -ENODEV;
+ }
+ /* TODO: Currently loss of frame and header errors are treated as
+ * non-recoverable errors. They will be handled in the next version.
+ */
+ if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR, value)) {
+ netdev_err(tc6->netdev, "Loss of frame error\n");
+ return -ENODEV;
+ }
+ if (FIELD_GET(STATUS0_HEADER_ERROR, value)) {
+ netdev_err(tc6->netdev, "Header error\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
+{
+ /* Process rx chunk footer for the following,
+ * 1. tx credits
+ * 2. errors if any from MAC-PHY
+ * 3. receive chunks available
+ */
+ tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer);
+ tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS,
+ footer);
+
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) {
+ int ret = oa_tc6_process_extended_status(tc6);
+
+ if (ret)
+ return ret;
+ }
+
+ /* TODO: Currently received header bad and configuration unsync errors
+ * are treated as non-recoverable errors. They will be handled in the
+ * next version.
+ */
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD, footer)) {
+ netdev_err(tc6->netdev, "Rxd header bad error\n");
+ return -ENODEV;
+ }
+
+ if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC, footer)) {
+ netdev_err(tc6->netdev, "Config unsync error\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6)
+{
+ tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev);
+ tc6->netdev->stats.rx_packets++;
+ tc6->netdev->stats.rx_bytes += tc6->rx_skb->len;
+
+ netif_rx(tc6->rx_skb);
+
+ tc6->rx_skb = NULL;
+}
+
+static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length)
+{
+ memcpy(skb_put(tc6->rx_skb, length), payload, length);
+}
+
+static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6)
+{
+ tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+ if (!tc6->rx_skb) {
+ tc6->netdev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload,
+ u16 size)
+{
+ int ret;
+
+ ret = oa_tc6_allocate_rx_skb(tc6);
+ if (ret)
+ return ret;
+
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ oa_tc6_submit_rx_skb(tc6);
+
+ return 0;
+}
+
+static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size)
+{
+ int ret;
+
+ ret = oa_tc6_allocate_rx_skb(tc6);
+ if (ret)
+ return ret;
+
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ return 0;
+}
+
+static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size)
+{
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ oa_tc6_submit_rx_skb(tc6);
+}
+
+static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload,
+ u32 footer)
+{
+ oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE);
+}
+
+static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data,
+ u32 footer)
+{
+ u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET,
+ footer) * sizeof(u32);
+ u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET,
+ footer);
+ bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer);
+ bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer);
+ u16 size;
+
+ /* Restart the new rx frame after receiving rx buffer overflow error */
+ if (start_valid && tc6->rx_buf_overflow)
+ tc6->rx_buf_overflow = false;
+
+ if (tc6->rx_buf_overflow)
+ return 0;
+
+ /* Process the chunk with complete rx frame */
+ if (start_valid && end_valid && start_byte_offset < end_byte_offset) {
+ size = end_byte_offset + 1 - start_byte_offset;
+ return oa_tc6_prcs_complete_rx_frame(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with only rx frame start */
+ if (start_valid && !end_valid) {
+ size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
+ return oa_tc6_prcs_rx_frame_start(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with only rx frame end */
+ if (end_valid && !start_valid) {
+ size = end_byte_offset + 1;
+ oa_tc6_prcs_rx_frame_end(tc6, data, size);
+ return 0;
+ }
+
+ /* Process the chunk with previous rx frame end and next rx frame
+ * start.
+ */
+ if (start_valid && end_valid && start_byte_offset > end_byte_offset) {
+ /* After rx buffer overflow error received, there might be a
+ * possibility of getting an end valid of a previously
+ * incomplete rx frame along with the new rx frame start valid.
+ */
+ if (tc6->rx_skb) {
+ size = end_byte_offset + 1;
+ oa_tc6_prcs_rx_frame_end(tc6, data, size);
+ }
+ size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
+ return oa_tc6_prcs_rx_frame_start(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with ongoing rx frame data */
+ oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer);
+
+ return 0;
+}
+
+static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset)
+{
+ u8 *rx_buf = tc6->spi_data_rx_buf;
+ __be32 footer;
+
+ footer = *((__be32 *)&rx_buf[footer_offset]);
+
+ return be32_to_cpu(footer);
+}
+
+static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length)
+{
+ u16 no_of_rx_chunks = length / OA_TC6_CHUNK_SIZE;
+ u32 footer;
+ int ret;
+
+ /* All the rx chunks in the receive SPI data buffer are examined here */
+ for (int i = 0; i < no_of_rx_chunks; i++) {
+ /* Last 4 bytes in each received chunk consist footer info */
+ footer = oa_tc6_get_rx_chunk_footer(tc6, i * OA_TC6_CHUNK_SIZE +
+ OA_TC6_CHUNK_PAYLOAD_SIZE);
+
+ ret = oa_tc6_process_rx_chunk_footer(tc6, footer);
+ if (ret)
+ return ret;
+
+ /* If there is a data valid chunks then process it for the
+ * information needed to determine the validity and the location
+ * of the receive frame data.
+ */
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) {
+ u8 *payload = tc6->spi_data_rx_buf + i *
+ OA_TC6_CHUNK_SIZE;
+
+ ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload,
+ footer);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static __be32 oa_tc6_prepare_data_header(bool data_valid, bool start_valid,
+ bool end_valid, u8 end_byte_offset)
+{
+ u32 header = FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL,
+ OA_TC6_DATA_HEADER) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID, data_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID, start_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID, end_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET,
+ end_byte_offset);
+
+ header |= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY,
+ oa_tc6_get_parity(header));
+
+ return cpu_to_be32(header);
+}
+
+static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 *tc6)
+{
+ enum oa_tc6_data_end_valid_info end_valid = OA_TC6_DATA_END_INVALID;
+ __be32 *tx_buf = tc6->spi_data_tx_buf + tc6->spi_data_tx_buf_offset;
+ u16 remaining_len = tc6->ongoing_tx_skb->len - tc6->tx_skb_offset;
+ u8 *tx_skb_data = tc6->ongoing_tx_skb->data + tc6->tx_skb_offset;
+ enum oa_tc6_data_start_valid_info start_valid;
+ u8 end_byte_offset = 0;
+ u16 length_to_copy;
+
+ /* Initial value is assigned here to avoid more than 80 characters in
+ * the declaration place.
+ */
+ start_valid = OA_TC6_DATA_START_INVALID;
+
+ /* Set start valid if the current tx chunk contains the start of the tx
+ * ethernet frame.
+ */
+ if (!tc6->tx_skb_offset)
+ start_valid = OA_TC6_DATA_START_VALID;
+
+ /* If the remaining tx skb length is more than the chunk payload size of
+ * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for
+ * next tx chunk.
+ */
+ length_to_copy = min_t(u16, remaining_len, OA_TC6_CHUNK_PAYLOAD_SIZE);
+
+ /* Copy the tx skb data to the tx chunk payload buffer */
+ memcpy(tx_buf + 1, tx_skb_data, length_to_copy);
+ tc6->tx_skb_offset += length_to_copy;
+
+ /* Set end valid if the current tx chunk contains the end of the tx
+ * ethernet frame.
+ */
+ if (tc6->ongoing_tx_skb->len == tc6->tx_skb_offset) {
+ end_valid = OA_TC6_DATA_END_VALID;
+ end_byte_offset = length_to_copy - 1;
+ tc6->tx_skb_offset = 0;
+ tc6->netdev->stats.tx_bytes += tc6->ongoing_tx_skb->len;
+ tc6->netdev->stats.tx_packets++;
+ kfree_skb(tc6->ongoing_tx_skb);
+ tc6->ongoing_tx_skb = NULL;
+ }
+
+ *tx_buf = oa_tc6_prepare_data_header(OA_TC6_DATA_VALID, start_valid,
+ end_valid, end_byte_offset);
+ tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
+}
+
+static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
+{
+ u16 used_tx_credits;
+
+ /* Get tx skbs and convert them into tx chunks based on the tx credits
+ * available.
+ */
+ for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
+ used_tx_credits++) {
+ if (!tc6->ongoing_tx_skb) {
+ spin_lock_bh(&tc6->tx_skb_lock);
+ tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
+ tc6->waiting_tx_skb = NULL;
+ spin_unlock_bh(&tc6->tx_skb_lock);
+ }
+ if (!tc6->ongoing_tx_skb)
+ break;
+ oa_tc6_add_tx_skb_to_spi_buf(tc6);
+ }
+
+ return used_tx_credits * OA_TC6_CHUNK_SIZE;
+}
+
+static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6,
+ u16 needed_empty_chunks)
+{
+ __be32 header;
+
+ header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID,
+ OA_TC6_DATA_START_INVALID,
+ OA_TC6_DATA_END_INVALID, 0);
+
+ while (needed_empty_chunks--) {
+ __be32 *tx_buf = tc6->spi_data_tx_buf +
+ tc6->spi_data_tx_buf_offset;
+
+ *tx_buf = header;
+ tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
+ }
+}
+
+static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len)
+{
+ u16 tx_chunks = len / OA_TC6_CHUNK_SIZE;
+ u16 needed_empty_chunks;
+
+ /* If there are more chunks to receive than to transmit, we need to add
+ * enough empty tx chunks to allow the reception of the excess rx
+ * chunks.
+ */
+ if (tx_chunks >= tc6->rx_chunks_available)
+ return len;
+
+ needed_empty_chunks = tc6->rx_chunks_available - tx_chunks;
+
+ oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks);
+
+ return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len;
+}
+
+static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ while (true) {
+ u16 spi_len = 0;
+
+ tc6->spi_data_tx_buf_offset = 0;
+
+ if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb)
+ spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
+
+ spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len);
+
+ if (tc6->int_flag) {
+ tc6->int_flag = false;
+ if (spi_len == 0) {
+ oa_tc6_add_empty_chunks_to_spi_buf(tc6, 1);
+ spi_len = OA_TC6_CHUNK_SIZE;
+ }
+ }
+
+ if (spi_len == 0)
+ break;
+
+ ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len);
+ if (ret) {
+ netdev_err(tc6->netdev, "SPI data transfer failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len);
+ if (ret) {
+ if (ret == -EAGAIN)
+ continue;
+
+ oa_tc6_cleanup_ongoing_tx_skb(tc6);
+ oa_tc6_cleanup_ongoing_rx_skb(tc6);
+ netdev_err(tc6->netdev, "Device error: %d\n", ret);
+ return ret;
+ }
+
+ if (!tc6->waiting_tx_skb && netif_queue_stopped(tc6->netdev))
+ netif_wake_queue(tc6->netdev);
+ }
+
+ return 0;
+}
+
+static int oa_tc6_spi_thread_handler(void *data)
+{
+ struct oa_tc6 *tc6 = data;
+ int ret;
+
+ while (likely(!kthread_should_stop())) {
+ /* This kthread will be waken up if there is a tx skb or mac-phy
+ * interrupt to perform spi transfer with tx chunks.
+ */
+ wait_event_interruptible(tc6->spi_wq, tc6->int_flag ||
+ (tc6->waiting_tx_skb &&
+ tc6->tx_credits) ||
+ kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ ret = oa_tc6_try_spi_transfer(tc6);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ /* Initially tx credits and rx chunks available to be updated from the
+ * register as there is no data transfer performed yet. Later they will
+ * be updated from the rx footer.
+ */
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value);
+ if (ret)
+ return ret;
+
+ tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value);
+ tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE,
+ value);
+
+ return 0;
+}
+
+static irqreturn_t oa_tc6_macphy_isr(int irq, void *data)
+{
+ struct oa_tc6 *tc6 = data;
+
+ /* MAC-PHY interrupt can occur for the following reasons.
+ * - availability of tx credits if it was 0 before and not reported in
+ * the previous rx footer.
+ * - availability of rx chunks if it was 0 before and not reported in
+ * the previous rx footer.
+ * - extended status event not reported in the previous rx footer.
+ */
+ tc6->int_flag = true;
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * oa_tc6_zero_align_receive_frame_enable - function to enable zero align
+ * receive frame feature.
+ * @tc6: oa_tc6 struct.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &regval);
+ if (ret)
+ return ret;
+
+ /* Set Zero-Align Receive Frame Enable */
+ regval |= CONFIG0_ZARFE_ENABLE;
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, regval);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable);
+
+/**
+ * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet
+ * frame.
+ * @tc6: oa_tc6 struct.
+ * @skb: socket buffer in which the ethernet frame is stored.
+ *
+ * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q
+ * otherwise returns NETDEV_TX_BUSY.
+ */
+netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
+{
+ if (tc6->waiting_tx_skb) {
+ netif_stop_queue(tc6->netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ tc6->netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_bh(&tc6->tx_skb_lock);
+ tc6->waiting_tx_skb = skb;
+ spin_unlock_bh(&tc6->tx_skb_lock);
+
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_start_xmit);
+
+/**
+ * oa_tc6_init - allocates and initializes oa_tc6 structure.
+ * @spi: device with which data will be exchanged.
+ * @netdev: network device interface structure.
+ *
+ * Return: pointer reference to the oa_tc6 structure if the MAC-PHY
+ * initialization is successful otherwise NULL.
+ */
+struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
+{
+ struct oa_tc6 *tc6;
+ int ret;
+
+ tc6 = devm_kzalloc(&spi->dev, sizeof(*tc6), GFP_KERNEL);
+ if (!tc6)
+ return NULL;
+
+ tc6->spi = spi;
+ tc6->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ mutex_init(&tc6->spi_ctrl_lock);
+ spin_lock_init(&tc6->tx_skb_lock);
+
+ /* Set the SPI controller to pump at realtime priority */
+ tc6->spi->rt = true;
+ if (spi_setup(tc6->spi) < 0)
+ return NULL;
+
+ tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_CTRL_SPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_ctrl_tx_buf)
+ return NULL;
+
+ tc6->spi_ctrl_rx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_CTRL_SPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_ctrl_rx_buf)
+ return NULL;
+
+ tc6->spi_data_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_SPI_DATA_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_data_tx_buf)
+ return NULL;
+
+ tc6->spi_data_rx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_SPI_DATA_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_data_rx_buf)
+ return NULL;
+
+ ret = oa_tc6_sw_reset_macphy(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC-PHY software reset failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_unmask_macphy_error_interrupts(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC-PHY error interrupts unmask failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_phy_init(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC internal PHY initialization failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_enable_data_transfer(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "Failed to enable data transfer: %d\n",
+ ret);
+ goto phy_exit;
+ }
+
+ ret = oa_tc6_update_buffer_status_from_register(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "Failed to update buffer status: %d\n", ret);
+ goto phy_exit;
+ }
+
+ init_waitqueue_head(&tc6->spi_wq);
+
+ tc6->spi_thread = kthread_run(oa_tc6_spi_thread_handler, tc6,
+ "oa-tc6-spi-thread");
+ if (IS_ERR(tc6->spi_thread)) {
+ dev_err(&tc6->spi->dev, "Failed to create SPI thread\n");
+ goto phy_exit;
+ }
+
+ sched_set_fifo(tc6->spi_thread);
+
+ ret = devm_request_irq(&tc6->spi->dev, tc6->spi->irq, oa_tc6_macphy_isr,
+ IRQF_TRIGGER_FALLING, dev_name(&tc6->spi->dev),
+ tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "Failed to request macphy isr %d\n",
+ ret);
+ goto kthread_stop;
+ }
+
+ /* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset
+ * complete status. IRQ is also asserted on reset completion and it is
+ * remain asserted until MAC-PHY receives a data chunk. So performing an
+ * empty data chunk transmission will deassert the IRQ. Refer section
+ * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details.
+ */
+ tc6->int_flag = true;
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return tc6;
+
+kthread_stop:
+ kthread_stop(tc6->spi_thread);
+phy_exit:
+ oa_tc6_phy_exit(tc6);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_init);
+
+/**
+ * oa_tc6_exit - exit function.
+ * @tc6: oa_tc6 struct.
+ */
+void oa_tc6_exit(struct oa_tc6 *tc6)
+{
+ oa_tc6_phy_exit(tc6);
+ kthread_stop(tc6->spi_thread);
+ dev_kfree_skb_any(tc6->ongoing_tx_skb);
+ dev_kfree_skb_any(tc6->waiting_tx_skb);
+ dev_kfree_skb_any(tc6->rx_skb);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_exit);
+
+MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib");
+MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 28b7cec485ef..62f05f4569b1 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -198,23 +198,21 @@ pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
}
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int pch_gbe_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config cfg;
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev;
u8 station[20];
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
/* Get ieee1588's dev information */
pdev = adapter->ptp_pdev;
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
adapter->hwts_rx_en = 0;
break;
@@ -223,17 +221,17 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- adapter->hwts_rx_en = 1;
+ adapter->hwts_rx_en = cfg->rx_filter;
pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- adapter->hwts_rx_en = 1;
+ adapter->hwts_rx_en = cfg->rx_filter;
pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
strcpy(station, PTP_L4_MULTICAST_SA);
pch_set_station_address(station, pdev);
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- adapter->hwts_rx_en = 1;
+ adapter->hwts_rx_en = cfg->rx_filter;
pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
strcpy(station, PTP_L2_MULTICAST_SA);
pch_set_station_address(station, pdev);
@@ -242,12 +240,23 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -ERANGE;
}
- adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
+ adapter->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON;
/* Clear out any old time stamps. */
pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
+}
+
+static int pch_gbe_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ cfg->tx_type = adapter->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg->rx_filter = adapter->hwts_rx_en;
+
+ return 0;
}
static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
@@ -1014,8 +1023,8 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
*/
static void pch_gbe_watchdog(struct timer_list *t)
{
- struct pch_gbe_adapter *adapter = from_timer(adapter, t,
- watchdog_timer);
+ struct pch_gbe_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
@@ -1916,7 +1925,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
pch_gbe_irq_disable(adapter);
pch_gbe_free_irq(adapter);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
@@ -2184,7 +2193,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
}
} else {
pch_gbe_reset(adapter);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.mac.max_frame_size = max_frame;
}
@@ -2234,9 +2243,6 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
- if (cmd == SIOCSHWTSTAMP)
- return hwtstamp_ioctl(netdev, ifr, cmd);
-
return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
}
@@ -2328,6 +2334,8 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = pch_gbe_netpoll,
#endif
+ .ndo_hwtstamp_get = pch_gbe_hwtstamp_get,
+ .ndo_hwtstamp_set = pch_gbe_hwtstamp_set,
};
static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 1cc001087193..b0de7e9f12a5 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -163,7 +163,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <linux/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/cache.h>
static const char version[] =
@@ -1025,7 +1025,7 @@ static inline int hamachi_tx(struct net_device *dev)
static void hamachi_timer(struct timer_list *t)
{
- struct hamachi_private *hmp = from_timer(hmp, t, timer);
+ struct hamachi_private *hmp = timer_container_of(hmp, t, timer);
struct net_device *dev = hmp->mii_if.dev;
void __iomem *ioaddr = hmp->base;
int next_tick = 10*HZ;
@@ -1712,7 +1712,7 @@ static int hamachi_close(struct net_device *dev)
free_irq(hmp->pci_dev->irq, dev);
- del_timer_sync(&hmp->timer);
+ timer_delete_sync(&hmp->timer);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 640ac01689fb..1e25ac13a7d8 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -102,7 +102,7 @@ static int gx_fix;
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/io.h>
/* These identify the driver base version and may not be removed. */
@@ -652,7 +652,7 @@ err_free_irq:
static void yellowfin_timer(struct timer_list *t)
{
- struct yellowfin_private *yp = from_timer(yp, t, timer);
+ struct yellowfin_private *yp = timer_container_of(yp, t, timer);
struct net_device *dev = pci_get_drvdata(yp->pci_dev);
void __iomem *ioaddr = yp->base;
int next_tick = 60*HZ;
@@ -1222,7 +1222,7 @@ static int yellowfin_close(struct net_device *dev)
iowrite32(0x80000000, ioaddr + RxCtrl);
iowrite32(0x80000000, ioaddr + TxCtrl);
- del_timer(&yp->timer);
+ timer_delete(&yp->timer);
#if defined(__i386__)
if (yellowfin_debug > 2) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index ed7dd0a04235..fe58024b5901 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -934,7 +934,8 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
static void pasemi_mac_tx_timer(struct timer_list *t)
{
- struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
+ struct pasemi_mac_txring *txring = timer_container_of(txring, t,
+ clean_timer);
struct pasemi_mac *mac = txring->mac;
pasemi_mac_clean_tx(txring);
@@ -1288,7 +1289,7 @@ static int pasemi_mac_close(struct net_device *dev)
phy_disconnect(dev->phydev);
}
- del_timer_sync(&mac->tx->clean_timer);
+ timer_delete_sync(&mac->tx->clean_timer);
netif_stop_queue(dev);
napi_disable(&mac->napi);
@@ -1639,7 +1640,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
@@ -1699,8 +1700,9 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &mac->napi, pasemi_mac_poll);
- dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_GSO;
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_GSO;
+ dev->lltx = true;
mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
if (!mac->dma_pdev) {
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 3f7519e435b8..c99758adf3ad 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -23,6 +23,8 @@ config IONIC
depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select DIMLIB
+ select PAGE_POOL
+ select AUXILIARY_BUS
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
index 4e7642a2d25f..a598972fef41 100644
--- a/drivers/net/ethernet/pensando/ionic/Makefile
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -5,5 +5,5 @@ obj-$(CONFIG_IONIC) := ionic.o
ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
- ionic_txrx.o ionic_stats.o ionic_fw.o
+ ionic_txrx.o ionic_stats.o ionic_fw.o ionic_aux.o
ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 2ccc2c2a06e3..85198e6a806e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -47,6 +47,7 @@ struct ionic {
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
+ struct workqueue_struct *wq;
struct ionic_lif *lif;
unsigned int nnqs_per_lif;
unsigned int neqs_per_lif;
@@ -54,7 +55,8 @@ struct ionic {
unsigned int nrxqs_per_lif;
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
- struct work_struct nb_work;
+ cpumask_var_t *affinity_masks;
+ struct delayed_work doorbell_check_dwork;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
struct ionic_vf *vfs;
@@ -63,16 +65,9 @@ struct ionic {
int watchdog_period;
};
-struct ionic_admin_ctx {
- struct completion work;
- union ionic_adminq_cmd cmd;
- union ionic_adminq_comp comp;
-};
-
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
const int err, const bool do_msg);
-int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
@@ -93,4 +88,6 @@ int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
+bool ionic_doorbell_wa(struct ionic *ionic);
+
#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_api.h b/drivers/net/ethernet/pensando/ionic/ionic_api.h
new file mode 100644
index 000000000000..bd88666836b8
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_api.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_API_H_
+#define _IONIC_API_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ionic_if.h"
+#include "ionic_regs.h"
+
+/**
+ * struct ionic_aux_dev - Auxiliary device information
+ * @lif: Logical interface
+ * @idx: Index identifier
+ * @adev: Auxiliary device
+ */
+struct ionic_aux_dev {
+ struct ionic_lif *lif;
+ int idx;
+ struct auxiliary_device adev;
+};
+
+/**
+ * struct ionic_admin_ctx - Admin command context
+ * @work: Work completion wait queue element
+ * @cmd: Admin command (64B) to be copied to the queue
+ * @comp: Admin completion (16B) copied from the queue
+ */
+struct ionic_admin_ctx {
+ struct completion work;
+ union ionic_adminq_cmd cmd;
+ union ionic_adminq_comp comp;
+};
+
+#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
+#define IONIC_INTR_NAME_MAX_SZ 32
+
+/**
+ * struct ionic_intr_info - Interrupt information
+ * @name: Name identifier
+ * @rearm_count: Interrupt rearm count
+ * @index: Interrupt index position
+ * @vector: Interrupt number
+ * @dim_coal_hw: Interrupt coalesce value in hardware units
+ * @affinity_mask: CPU affinity mask
+ * @aff_notify: context for notification of IRQ affinity changes
+ */
+struct ionic_intr_info {
+ char name[IONIC_INTR_NAME_MAX_SZ];
+ u64 rearm_count;
+ unsigned int index;
+ unsigned int vector;
+ u32 dim_coal_hw;
+ cpumask_var_t *affinity_mask;
+ struct irq_affinity_notify aff_notify;
+};
+
+/**
+ * ionic_adminq_post_wait - Post an admin command and wait for response
+ * @lif: Logical interface
+ * @ctx: API admin command context
+ *
+ * Post the command to an admin queue in the ethernet driver. If this command
+ * succeeds, then the command has been posted, but that does not indicate a
+ * completion. If this command returns success, then the completion callback
+ * will eventually be called.
+ *
+ * Return: zero or negative error status
+ */
+int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+
+/**
+ * ionic_error_to_errno - Transform ionic_if errors to os errno
+ * @code: Ionic error number
+ *
+ * Return: Negative OS error number or zero
+ */
+int ionic_error_to_errno(enum ionic_status_code code);
+
+/**
+ * ionic_request_rdma_reset - request reset or disable the device or lif
+ * @lif: Logical interface
+ *
+ * The reset is triggered asynchronously. It will wait until reset request
+ * completes or times out.
+ */
+void ionic_request_rdma_reset(struct ionic_lif *lif);
+
+/**
+ * ionic_intr_alloc - Reserve a device interrupt
+ * @lif: Logical interface
+ * @intr: Reserved ionic interrupt structure
+ *
+ * Reserve an interrupt index and get irq number for that index.
+ *
+ * Return: zero or negative error status
+ */
+int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr);
+
+/**
+ * ionic_intr_free - Release a device interrupt index
+ * @lif: Logical interface
+ * @intr: Interrupt index
+ *
+ * Mark the interrupt index unused so that it can be reserved again.
+ */
+void ionic_intr_free(struct ionic_lif *lif, int intr);
+
+/**
+ * ionic_get_cmb - Reserve cmb pages
+ * @lif: Logical interface
+ * @pgid: First page index
+ * @pgaddr: First page bus addr (contiguous)
+ * @order: Log base two number of pages (PAGE_SIZE)
+ * @stride_log2: Size of stride to determine CMB pool
+ * @expdb: Will be set to true if this CMB region has expdb enabled
+ *
+ * Return: zero or negative error status
+ */
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
+ int order, u8 stride_log2, bool *expdb);
+
+/**
+ * ionic_put_cmb - Release cmb pages
+ * @lif: Logical interface
+ * @pgid: First page index
+ * @order: Log base two number of pages (PAGE_SIZE)
+ */
+void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+
+#endif /* _IONIC_API_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.c b/drivers/net/ethernet/pensando/ionic/ionic_aux.c
new file mode 100644
index 000000000000..a2be338eb3e5
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/kernel.h>
+#include "ionic.h"
+#include "ionic_lif.h"
+#include "ionic_aux.h"
+
+static DEFINE_IDA(aux_ida);
+
+static void ionic_auxbus_release(struct device *dev)
+{
+ struct ionic_aux_dev *ionic_adev;
+
+ ionic_adev = container_of(dev, struct ionic_aux_dev, adev.dev);
+ ida_free(&aux_ida, ionic_adev->adev.id);
+ kfree(ionic_adev);
+}
+
+int ionic_auxbus_register(struct ionic_lif *lif)
+{
+ struct ionic_aux_dev *ionic_adev;
+ struct auxiliary_device *aux_dev;
+ int err, id;
+
+ if (!(le64_to_cpu(lif->ionic->ident.lif.capabilities) & IONIC_LIF_CAP_RDMA))
+ return 0;
+
+ ionic_adev = kzalloc(sizeof(*ionic_adev), GFP_KERNEL);
+ if (!ionic_adev)
+ return -ENOMEM;
+
+ aux_dev = &ionic_adev->adev;
+
+ id = ida_alloc(&aux_ida, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(lif->ionic->dev, "Failed to allocate aux id: %d\n", id);
+ kfree(ionic_adev);
+ return id;
+ }
+
+ aux_dev->id = id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &lif->ionic->pdev->dev;
+ aux_dev->dev.release = ionic_auxbus_release;
+ ionic_adev->lif = lif;
+ err = auxiliary_device_init(aux_dev);
+ if (err) {
+ dev_err(lif->ionic->dev, "Failed to initialize %s aux device: %d\n",
+ aux_dev->name, err);
+ ida_free(&aux_ida, id);
+ kfree(ionic_adev);
+ return err;
+ }
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ dev_err(lif->ionic->dev, "Failed to add %s aux device: %d\n",
+ aux_dev->name, err);
+ auxiliary_device_uninit(aux_dev);
+ return err;
+ }
+
+ lif->ionic_adev = ionic_adev;
+ return 0;
+}
+
+void ionic_auxbus_unregister(struct ionic_lif *lif)
+{
+ mutex_lock(&lif->adev_lock);
+ if (!lif->ionic_adev)
+ goto out;
+
+ auxiliary_device_delete(&lif->ionic_adev->adev);
+ auxiliary_device_uninit(&lif->ionic_adev->adev);
+
+ lif->ionic_adev = NULL;
+out:
+ mutex_unlock(&lif->adev_lock);
+}
+
+void ionic_request_rdma_reset(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ int err;
+
+ union ionic_dev_cmd cmd = {
+ .cmd.opcode = IONIC_CMD_RDMA_RESET_LIF,
+ .cmd.lif_index = cpu_to_le16(lif->index),
+ };
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err)
+ pr_warn("%s request_reset: error %d\n", __func__, err);
+}
+EXPORT_SYMBOL_NS(ionic_request_rdma_reset, "NET_IONIC");
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.h b/drivers/net/ethernet/pensando/ionic/ionic_aux.h
new file mode 100644
index 000000000000..f5528a9f187d
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_AUX_H_
+#define _IONIC_AUX_H_
+
+int ionic_auxbus_register(struct ionic_lif *lif);
+void ionic_auxbus_unregister(struct ionic_lif *lif);
+
+#endif /* _IONIC_AUX_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 6ba8d4aca0a0..70d86c5f52fb 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -9,6 +9,7 @@
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
+#include "ionic_aux.h"
#include "ionic_debugfs.h"
/* Supported devices */
@@ -271,6 +272,8 @@ static int ionic_setup_one(struct ionic *ionic)
}
ionic_debugfs_add_ident(ionic);
+ ionic_map_cmb(ionic);
+
err = ionic_init(ionic);
if (err) {
dev_err(dev, "Cannot init device: %d, aborting\n", err);
@@ -326,6 +329,11 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
}
+#ifdef CONFIG_PPC64
+ /* Ensure MSI/MSI-X interrupts lie within addressable physical memory */
+ pdev->no_64bit_msi = 1;
+#endif
+
err = ionic_setup_one(ionic);
if (err)
goto err_out;
@@ -370,8 +378,11 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_deregister_devlink;
}
+ ionic_auxbus_register(ionic->lif);
+
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
return 0;
@@ -388,6 +399,7 @@ err_out_free_irqs:
err_out_pci:
ionic_dev_teardown(ionic);
ionic_clear_pci(ionic);
+ ionic_debugfs_del_dev(ionic);
err_out:
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
@@ -402,10 +414,14 @@ static void ionic_remove(struct pci_dev *pdev)
timer_shutdown_sync(&ionic->watchdog_timer);
if (ionic->lif) {
+ cancel_work_sync(&ionic->lif->deferred.work);
/* prevent adminq cmds if already known as down */
if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
+ if (ionic->lif->doorbell_wa)
+ cancel_delayed_work_sync(&ionic->doorbell_check_dwork);
+ ionic_auxbus_unregister(ionic->lif);
ionic_lif_unregister(ionic->lif);
ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
@@ -432,9 +448,10 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
set_bit(IONIC_LIF_F_FW_RESET, lif->state);
- del_timer_sync(&ionic->watchdog_timer);
+ timer_delete_sync(&ionic->watchdog_timer);
cancel_work_sync(&lif->deferred.work);
+ ionic_auxbus_unregister(ionic->lif);
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
ionic_txrx_free(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index c3ae11a48024..c98b4e75e288 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -123,7 +123,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
struct ionic_cq *cq = &qcq->cq;
qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
- if (IS_ERR_OR_NULL(qcq_dentry))
+ if (IS_ERR(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
@@ -220,7 +220,7 @@ static int netdev_show(struct seq_file *seq, void *v)
{
struct net_device *netdev = seq->private;
- seq_printf(seq, "%s\n", netdev->name);
+ seq_printf(seq, "%s\n", netdev_name(netdev));
return 0;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 874499337132..ab27e9225c1e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -13,7 +13,7 @@
static void ionic_watchdog_cb(struct timer_list *t)
{
- struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+ struct ionic *ionic = timer_container_of(ionic, t, watchdog_timer);
struct ionic_lif *lif = ionic->lif;
struct ionic_deferred_work *work;
int hb;
@@ -43,11 +43,99 @@ static void ionic_watchdog_cb(struct timer_list *t)
work->type = IONIC_DW_TYPE_RX_MODE;
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
-static void ionic_watchdog_init(struct ionic *ionic)
+static void ionic_napi_schedule_do_softirq(struct napi_struct *napi)
+{
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+}
+
+void ionic_doorbell_napi_work(struct work_struct *work)
+{
+ struct ionic_qcq *qcq = container_of(work, struct ionic_qcq,
+ doorbell_napi_work);
+ unsigned long now, then, dif;
+
+ now = READ_ONCE(jiffies);
+ then = qcq->q.dbell_jiffies;
+ dif = now - then;
+
+ if (dif > qcq->q.dbell_deadline)
+ ionic_napi_schedule_do_softirq(&qcq->napi);
+}
+
+static int ionic_get_preferred_cpu(struct ionic *ionic,
+ struct ionic_intr_info *intr)
+{
+ int cpu;
+
+ cpu = cpumask_first_and(*intr->affinity_mask, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_local_spread(0, dev_to_node(ionic->dev));
+
+ return cpu;
+}
+
+static void ionic_queue_dbell_napi_work(struct ionic *ionic,
+ struct ionic_qcq *qcq)
+{
+ int cpu;
+
+ if (!(qcq->flags & IONIC_QCQ_F_INTR))
+ return;
+
+ cpu = ionic_get_preferred_cpu(ionic, &qcq->intr);
+ queue_work_on(cpu, ionic->wq, &qcq->doorbell_napi_work);
+}
+
+static void ionic_doorbell_check_dwork(struct work_struct *work)
+{
+ struct ionic *ionic = container_of(work, struct ionic,
+ doorbell_check_dwork.work);
+ struct ionic_lif *lif = ionic->lif;
+
+ mutex_lock(&lif->queue_lock);
+
+ if (test_bit(IONIC_LIF_F_FW_STOPPING, lif->state) ||
+ test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ mutex_unlock(&lif->queue_lock);
+ return;
+ }
+
+ ionic_napi_schedule_do_softirq(&lif->adminqcq->napi);
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ int i;
+
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_queue_dbell_napi_work(ionic, lif->txqcqs[i]);
+ ionic_queue_dbell_napi_work(ionic, lif->rxqcqs[i]);
+ }
+
+ if (lif->hwstamp_txq &&
+ lif->hwstamp_txq->flags & IONIC_QCQ_F_INTR)
+ ionic_napi_schedule_do_softirq(&lif->hwstamp_txq->napi);
+ if (lif->hwstamp_rxq &&
+ lif->hwstamp_rxq->flags & IONIC_QCQ_F_INTR)
+ ionic_napi_schedule_do_softirq(&lif->hwstamp_rxq->napi);
+ }
+ mutex_unlock(&lif->queue_lock);
+
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
+}
+
+bool ionic_doorbell_wa(struct ionic *ionic)
+{
+ u8 asic_type = ionic->idev.dev_info.asic_type;
+
+ return !asic_type || asic_type == IONIC_ASIC_TYPE_ELBA;
+}
+
+static int ionic_watchdog_init(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -63,6 +151,31 @@ static void ionic_watchdog_init(struct ionic *ionic)
idev->fw_status_ready = true;
idev->fw_generation = IONIC_FW_STS_F_GENERATION &
ioread8(&idev->dev_info_regs->fw_status);
+
+ ionic->wq = alloc_workqueue("%s-wq", WQ_UNBOUND, 0,
+ dev_name(ionic->dev));
+ if (!ionic->wq) {
+ dev_err(ionic->dev, "alloc_workqueue failed");
+ return -ENOMEM;
+ }
+
+ if (ionic_doorbell_wa(ionic))
+ INIT_DELAYED_WORK(&ionic->doorbell_check_dwork,
+ ionic_doorbell_check_dwork);
+
+ return 0;
+}
+
+void ionic_queue_doorbell_check(struct ionic *ionic, int delay)
+{
+ int cpu;
+
+ if (!ionic->lif->doorbell_wa)
+ return;
+
+ cpu = ionic_get_preferred_cpu(ionic, &ionic->lif->adminqcq->intr);
+ queue_delayed_work_on(cpu, ionic->wq, &ionic->doorbell_check_dwork,
+ delay);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -86,14 +199,203 @@ void ionic_init_devinfo(struct ionic *ionic)
dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version);
}
+static void ionic_map_disc_cmb(struct ionic *ionic)
+{
+ struct ionic_identity *ident = &ionic->ident;
+ u32 length_reg0, length, offset, num_regions;
+ struct ionic_dev_bar *bar = ionic->bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ int err, sz, i;
+ u64 end;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_discover_cmb(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ sz = min(sizeof(ident->cmb_layout),
+ sizeof(idev->dev_cmd_regs->data));
+ memcpy_fromio(&ident->cmb_layout,
+ &idev->dev_cmd_regs->data, sz);
+ }
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err) {
+ dev_warn(dev, "Cannot discover CMB layout, disabling CMB\n");
+ return;
+ }
+
+ bar += 2;
+
+ num_regions = le32_to_cpu(ident->cmb_layout.num_regions);
+ if (!num_regions || num_regions > IONIC_MAX_CMB_REGIONS) {
+ dev_warn(dev, "Invalid number of CMB entries (%d)\n",
+ num_regions);
+ return;
+ }
+
+ dev_dbg(dev, "ionic_cmb_layout_identity num_regions %d flags %x:\n",
+ num_regions, ident->cmb_layout.flags);
+
+ for (i = 0; i < num_regions; i++) {
+ offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
+ length = le32_to_cpu(ident->cmb_layout.region[i].length);
+ end = offset + length;
+
+ dev_dbg(dev, "CMB entry %d: bar_num %u cmb_type %u offset %x length %u\n",
+ i, ident->cmb_layout.region[i].bar_num,
+ ident->cmb_layout.region[i].cmb_type,
+ offset, length);
+
+ if (end > (bar->len >> IONIC_CMB_SHIFT_64K)) {
+ dev_warn(dev, "Out of bounds CMB region %d offset %x length %u\n",
+ i, offset, length);
+ return;
+ }
+ }
+
+ /* if first entry matches PCI config, expdb is not supported */
+ if (ident->cmb_layout.region[0].bar_num == bar->res_index &&
+ le32_to_cpu(ident->cmb_layout.region[0].length) == bar->len &&
+ !ident->cmb_layout.region[0].offset) {
+ dev_warn(dev, "No CMB mapping discovered\n");
+ return;
+ }
+
+ /* process first entry for regular mapping */
+ length_reg0 = le32_to_cpu(ident->cmb_layout.region[0].length);
+ if (!length_reg0) {
+ dev_warn(dev, "region len = 0. No CMB mapping discovered\n");
+ return;
+ }
+
+ /* Verify first entry size matches expected 8MB size (in 64KB pages) */
+ if (length_reg0 != IONIC_BAR2_CMB_ENTRY_SIZE >> IONIC_CMB_SHIFT_64K) {
+ dev_warn(dev, "Unexpected CMB size in entry 0: %u pages\n",
+ length_reg0);
+ return;
+ }
+
+ sz = BITS_TO_LONGS((length_reg0 << IONIC_CMB_SHIFT_64K) /
+ PAGE_SIZE) * sizeof(long);
+ idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
+ if (!idev->cmb_inuse) {
+ dev_warn(dev, "No memory for CMB, disabling\n");
+ idev->phy_cmb_pages = 0;
+ idev->phy_cmb_expdb64_pages = 0;
+ idev->phy_cmb_expdb128_pages = 0;
+ idev->phy_cmb_expdb256_pages = 0;
+ idev->phy_cmb_expdb512_pages = 0;
+ idev->cmb_npages = 0;
+ return;
+ }
+
+ for (i = 0; i < num_regions; i++) {
+ /* check this region matches first region length as to
+ * ease implementation
+ */
+ if (le32_to_cpu(ident->cmb_layout.region[i].length) !=
+ length_reg0)
+ continue;
+
+ offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
+
+ switch (ident->cmb_layout.region[i].cmb_type) {
+ case IONIC_CMB_TYPE_DEVMEM:
+ idev->phy_cmb_pages = bar->bus_addr + offset;
+ idev->cmb_npages =
+ (length_reg0 << IONIC_CMB_SHIFT_64K) / PAGE_SIZE;
+ dev_dbg(dev, "regular cmb mapping: bar->bus_addr %pa region[%d].length %u\n",
+ &bar->bus_addr, i, length);
+ dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
+ &idev->phy_cmb_pages, idev->cmb_npages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB64:
+ idev->phy_cmb_expdb64_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb64_pages %pad\n",
+ &idev->phy_cmb_expdb64_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB128:
+ idev->phy_cmb_expdb128_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb128_pages %pad\n",
+ &idev->phy_cmb_expdb128_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB256:
+ idev->phy_cmb_expdb256_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb256_pages %pad\n",
+ &idev->phy_cmb_expdb256_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB512:
+ idev->phy_cmb_expdb512_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb512_pages %pad\n",
+ &idev->phy_cmb_expdb512_pages);
+ break;
+
+ default:
+ dev_warn(dev, "[%d] Invalid cmb_type (%d)\n",
+ i, ident->cmb_layout.region[i].cmb_type);
+ break;
+ }
+ }
+}
+
+static void ionic_map_classic_cmb(struct ionic *ionic)
+{
+ struct ionic_dev_bar *bar = ionic->bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ int sz;
+
+ bar += 2;
+ /* classic CMB mapping */
+ idev->phy_cmb_pages = bar->bus_addr;
+ idev->cmb_npages = bar->len / PAGE_SIZE;
+ dev_dbg(dev, "classic cmb mapping: bar->bus_addr %pa bar->len %lu\n",
+ &bar->bus_addr, bar->len);
+ dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
+ &idev->phy_cmb_pages, idev->cmb_npages);
+
+ sz = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
+ idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
+ if (!idev->cmb_inuse) {
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
+ }
+}
+
+void ionic_map_cmb(struct ionic *ionic)
+{
+ struct pci_dev *pdev = ionic->pdev;
+ struct device *dev = ionic->dev;
+
+ if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) {
+ dev_dbg(dev, "No CMB, disabling\n");
+ return;
+ }
+
+ if (ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_DISC_CMB))
+ ionic_map_disc_cmb(ionic);
+ else
+ ionic_map_classic_cmb(ionic);
+}
+
int ionic_dev_setup(struct ionic *ionic)
{
struct ionic_dev_bar *bar = ionic->bars;
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
- int size;
u32 sig;
+ int err;
/* BAR0: dev_cmd and interrupts */
if (num_bars < 1) {
@@ -129,7 +431,9 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
- ionic_watchdog_init(ionic);
+ err = ionic_watchdog_init(ionic);
+ if (err)
+ return err;
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -139,16 +443,11 @@ int ionic_dev_setup(struct ionic *ionic)
mutex_init(&idev->cmb_inuse_lock);
if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
idev->cmb_inuse = NULL;
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
return 0;
}
- idev->phy_cmb_pages = bar->bus_addr;
- idev->cmb_npages = bar->len / PAGE_SIZE;
- size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
- idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
- if (!idev->cmb_inuse)
- dev_warn(dev, "No memory for CMB, disabling\n");
-
return 0;
}
@@ -161,6 +460,15 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
+ idev->phy_cmb_expdb64_pages = 0;
+ idev->phy_cmb_expdb128_pages = 0;
+ idev->phy_cmb_expdb256_pages = 0;
+ idev->phy_cmb_expdb512_pages = 0;
+
+ if (ionic->wq) {
+ destroy_workqueue(ionic->wq);
+ ionic->wq = NULL;
+ }
mutex_destroy(&idev->cmb_inuse_lock);
}
@@ -273,7 +581,7 @@ do_check_time:
if (work) {
work->type = IONIC_DW_TYPE_LIF_RESET;
work->fw_status = fw_status_ready;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
}
@@ -304,9 +612,9 @@ do_check_time:
if (fw_hb_ready != idev->fw_hb_ready) {
idev->fw_hb_ready = fw_hb_ready;
if (!fw_hb_ready)
- dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb);
+ dev_info(ionic->dev, "FW heartbeat stalled at %u\n", fw_hb);
else
- dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb);
+ dev_info(ionic->dev, "FW heartbeat restored at %u\n", fw_hb);
}
if (!fw_hb_ready)
@@ -578,28 +886,79 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
ionic_dev_cmd_go(idev, &cmd);
}
+void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .discover_cmb.opcode = IONIC_CMD_DISCOVER_CMB,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
int ionic_db_page_num(struct ionic_lif *lif, int pid)
{
return (lif->hw_index * lif->dbid_count) + pid;
}
-int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
+ int order, u8 stride_log2, bool *expdb)
{
struct ionic_dev *idev = &lif->ionic->idev;
- int ret;
+ void __iomem *nonexpdb_pgptr;
+ phys_addr_t nonexpdb_pgaddr;
+ int i, idx;
mutex_lock(&idev->cmb_inuse_lock);
- ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
+ idx = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
mutex_unlock(&idev->cmb_inuse_lock);
- if (ret < 0)
- return ret;
+ if (idx < 0)
+ return idx;
+
+ *pgid = (u32)idx;
+
+ if (idev->phy_cmb_expdb64_pages &&
+ stride_log2 == IONIC_EXPDB_64B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb64_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb128_pages &&
+ stride_log2 == IONIC_EXPDB_128B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb128_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb256_pages &&
+ stride_log2 == IONIC_EXPDB_256B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb256_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb512_pages &&
+ stride_log2 == IONIC_EXPDB_512B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb512_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else {
+ *pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = false;
+ }
- *pgid = ret;
- *pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
+ /* clear the requested CMB region, 1 PAGE_SIZE ioremap at a time */
+ nonexpdb_pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
+ for (i = 0; i < (1 << order); i++) {
+ nonexpdb_pgptr =
+ ioremap_wc(nonexpdb_pgaddr + i * PAGE_SIZE, PAGE_SIZE);
+ if (!nonexpdb_pgptr) {
+ ionic_put_cmb(lif, *pgid, order);
+ return -ENOMEM;
+ }
+ memset_io(nonexpdb_pgptr, 0, PAGE_SIZE);
+ iounmap(nonexpdb_pgptr);
+ }
return 0;
}
+EXPORT_SYMBOL_NS(ionic_get_cmb, "NET_IONIC");
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
{
@@ -609,6 +968,7 @@ void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
bitmap_release_region(idev->cmb_inuse, pgid, order);
mutex_unlock(&idev->cmb_inuse_lock);
}
+EXPORT_SYMBOL_NS(ionic_put_cmb, "NET_IONIC");
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
@@ -703,10 +1063,6 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
q->dbval | q->head_idx);
q->dbell_jiffies = jiffies;
-
- if (q_to_qcq(q)->napi_qcq)
- mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index f30eee4a5a80..35566f97eaea 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,6 +12,7 @@
#include "ionic_if.h"
#include "ionic_regs.h"
+#include "ionic_api.h"
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
@@ -28,11 +29,16 @@
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
-#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
+#define IONIC_NAPI_DEADLINE (HZ) /* 1 sec */
#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
-#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
+#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */
+
+#define IONIC_EXPDB_64B_WQE_LG2 6
+#define IONIC_EXPDB_128B_WQE_LG2 7
+#define IONIC_EXPDB_256B_WQE_LG2 8
+#define IONIC_EXPDB_512B_WQE_LG2 9
struct ionic_dev_bar {
void __iomem *vaddr;
@@ -170,6 +176,11 @@ struct ionic_dev {
dma_addr_t phy_cmb_pages;
u32 cmb_npages;
+ dma_addr_t phy_cmb_expdb64_pages;
+ dma_addr_t phy_cmb_expdb128_pages;
+ dma_addr_t phy_cmb_expdb256_pages;
+ dma_addr_t phy_cmb_expdb512_pages;
+
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
@@ -181,10 +192,7 @@ struct ionic_queue;
struct ionic_qcq;
#define IONIC_MAX_BUF_LEN ((u16)-1)
-#define IONIC_PAGE_SIZE PAGE_SIZE
-#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
-#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
- __GFP_COMP | __GFP_MEMALLOC)
+#define IONIC_PAGE_SIZE MIN(PAGE_SIZE, IONIC_MAX_BUF_LEN)
#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
(VLAN_ETH_HLEN + \
@@ -238,9 +246,8 @@ struct ionic_queue {
unsigned int index;
unsigned int num_descs;
unsigned int max_sg_elems;
+
u64 features;
- unsigned int type;
- unsigned int hw_index;
unsigned int hw_type;
bool xdp_flush;
union {
@@ -250,18 +257,23 @@ struct ionic_queue {
struct ionic_admin_cmd *adminq;
};
union {
- void __iomem *cmb_base;
- struct ionic_txq_desc __iomem *cmb_txq;
- struct ionic_rxq_desc __iomem *cmb_rxq;
- };
- union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_txq_sg_desc_v1 *txq_sgl_v1;
struct ionic_rxq_sg_desc *rxq_sgl;
};
struct xdp_rxq_info *xdp_rxq_info;
+ struct bpf_prog *xdp_prog;
+ struct page_pool *page_pool;
struct ionic_queue *partner;
+
+ union {
+ void __iomem *cmb_base;
+ struct ionic_txq_desc __iomem *cmb_txq;
+ struct ionic_rxq_desc __iomem *cmb_rxq;
+ };
+ unsigned int type;
+ unsigned int hw_index;
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
@@ -272,19 +284,6 @@ struct ionic_queue {
char name[IONIC_QUEUE_NAME_MAX_SZ];
} ____cacheline_aligned_in_smp;
-#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
-#define IONIC_INTR_NAME_MAX_SZ 32
-
-struct ionic_intr_info {
- char name[IONIC_INTR_NAME_MAX_SZ];
- u64 rearm_count;
- unsigned int index;
- unsigned int vector;
- unsigned int cpu;
- u32 dim_coal_hw;
- cpumask_t affinity_mask;
-};
-
struct ionic_cq {
struct ionic_lif *lif;
struct ionic_queue *bound_q;
@@ -362,8 +361,8 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid);
-int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
-void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev);
+void ionic_map_cmb(struct ionic *ionic);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
@@ -375,7 +374,9 @@ typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
@@ -386,6 +387,8 @@ bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos);
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
+void ionic_doorbell_napi_work(struct work_struct *work);
+void ionic_queue_doorbell_check(struct ionic *ionic, int delay);
bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
bool ionic_txq_poke_doorbell(struct ionic_queue *q);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 91183965a6b7..2d9efadb5d2a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -11,6 +11,8 @@
#include "ionic_ethtool.h"
#include "ionic_stats.h"
+#define IONIC_MAX_RX_COPYBREAK min(U16_MAX, IONIC_MAX_BUF_LEN)
+
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
u32 i;
@@ -156,6 +158,20 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
25000baseCR_Full);
copper_seen++;
break;
+ case IONIC_XCVR_PID_QSFP_50G_CR2_FC:
+ case IONIC_XCVR_PID_QSFP_50G_CR2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full);
+ copper_seen++;
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_AOC:
case IONIC_XCVR_PID_SFP_10GBASE_CU:
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -194,6 +210,31 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
+ case IONIC_XCVR_PID_QSFP_200G_AOC:
+ case IONIC_XCVR_PID_QSFP_200G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseSR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseSR4_Full);
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_SR:
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
@@ -872,10 +913,17 @@ static int ionic_set_tunable(struct net_device *dev,
const void *data)
{
struct ionic_lif *lif = netdev_priv(dev);
+ u32 rx_copybreak;
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
- lif->rx_copybreak = *(u32 *)data;
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > IONIC_MAX_RX_COPYBREAK) {
+ netdev_err(dev, "Max supported rx_copybreak size: %u\n",
+ IONIC_MAX_RX_COPYBREAK);
+ return -EINVAL;
+ }
+ lif->rx_copybreak = (u16)rx_copybreak;
break;
default:
return -EOPNOTSUPP;
@@ -900,63 +948,20 @@ static int ionic_get_tunable(struct net_device *netdev,
return 0;
}
-static int ionic_get_module_info(struct net_device *netdev,
- struct ethtool_modinfo *modinfo)
-
+static int ionic_do_module_copy(u8 *dst, u8 *src, u32 len)
{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- struct sfp_eeprom_base *sfp;
-
- xcvr = &idev->port_info->status.xcvr;
- sfp = (struct sfp_eeprom_base *) xcvr->sprom;
-
- /* report the module data type and length */
- switch (sfp->phys_id) {
- case SFF8024_ID_SFP:
- modinfo->type = ETH_MODULE_SFF_8079;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- case SFF8024_ID_QSFP_8436_8636:
- case SFF8024_ID_QSFP28_8636:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- break;
- default:
- netdev_info(netdev, "unknown xcvr type 0x%02x\n",
- xcvr->sprom[0]);
- modinfo->type = 0;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- }
-
- return 0;
-}
-
-static int ionic_get_module_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *ee,
- u8 *data)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- char tbuf[sizeof(xcvr->sprom)];
+ char tbuf[sizeof_field(struct ionic_xcvr_status, sprom)];
int count = 10;
- u32 len;
/* The NIC keeps the module prom up-to-date in the DMA space
* so we can simply copy the module bytes into the data buffer.
*/
- xcvr = &idev->port_info->status.xcvr;
- len = min_t(u32, sizeof(xcvr->sprom), ee->len);
-
do {
- memcpy(data, xcvr->sprom, len);
- memcpy(tbuf, xcvr->sprom, len);
+ memcpy(dst, src, len);
+ memcpy(tbuf, src, len);
/* Let's make sure we got a consistent copy */
- if (!memcmp(data, tbuf, len))
+ if (!memcmp(dst, tbuf, len))
break;
} while (--count);
@@ -967,8 +972,50 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int ionic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
+ int err;
+ u8 *src;
+
+ if (!page_data->length)
+ return -EINVAL;
+
+ if (page_data->bank != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Only bank 0 is supported");
+ return -EINVAL;
+ }
+
+ switch (page_data->page) {
+ case 0:
+ src = &idev->port_info->status.xcvr.sprom[page_data->offset];
+ break;
+ case 1:
+ src = &idev->port_info->sprom_page1[page_data->offset - 128];
+ break;
+ case 2:
+ src = &idev->port_info->sprom_page2[page_data->offset - 128];
+ break;
+ case 17:
+ src = &idev->port_info->sprom_page17[page_data->offset - 128];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ memset(page_data->data, 0, page_data->length);
+ err = ionic_do_module_copy(page_data->data, src, page_data->length);
+ if (err)
+ return err;
+
+ return page_data->length;
+}
+
static int ionic_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
@@ -980,8 +1027,6 @@ static int ionic_get_ts_info(struct net_device *netdev,
info->phc_index = ptp_clock_index(lif->phc->ptp);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1114,8 +1159,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.set_rxfh = ionic_set_rxfh,
.get_tunable = ionic_get_tunable,
.set_tunable = ionic_set_tunable,
- .get_module_info = ionic_get_module_info,
- .get_module_eeprom = ionic_get_module_eeprom,
+ .get_module_eeprom_by_page = ionic_get_module_eeprom_by_page,
.get_pauseparam = ionic_get_pauseparam,
.set_pauseparam = ionic_set_pauseparam,
.get_fecparam = ionic_get_fecparam,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9a1825edf0d0..47559c909c8b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -56,6 +56,9 @@ enum ionic_cmd_opcode {
IONIC_CMD_VF_SETATTR = 61,
IONIC_CMD_VF_CTRL = 62,
+ /* CMB command */
+ IONIC_CMD_DISCOVER_CMB = 80,
+
/* QoS commands */
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
@@ -71,7 +74,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_FW_CONTROL_V1 = 255,
};
-/**
+/*
* enum ionic_status_code - Device command return codes
*/
enum ionic_status_code {
@@ -112,6 +115,7 @@ enum ionic_notifyq_opcode {
/**
* struct ionic_admin_cmd - General admin command format
* @opcode: Opcode for the command
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @cmd_data: Opcode-specific command bytes
*/
@@ -125,6 +129,7 @@ struct ionic_admin_cmd {
/**
* struct ionic_admin_comp - General admin command completion format
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @cmd_data: Command-specific bytes
* @color: Color bit (Always 0 for commands issued to the
@@ -147,6 +152,7 @@ static inline u8 color_match(u8 color, u8 done_color)
/**
* struct ionic_nop_cmd - NOP command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
*/
struct ionic_nop_cmd {
u8 opcode;
@@ -156,6 +162,7 @@ struct ionic_nop_cmd {
/**
* struct ionic_nop_comp - NOP command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_nop_comp {
u8 status;
@@ -166,6 +173,7 @@ struct ionic_nop_comp {
* struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
* @type: Device type
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_init_cmd {
u8 opcode;
@@ -176,6 +184,7 @@ struct ionic_dev_init_cmd {
/**
* struct ionic_dev_init_comp - Device init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_init_comp {
u8 status;
@@ -185,6 +194,7 @@ struct ionic_dev_init_comp {
/**
* struct ionic_dev_reset_cmd - Device reset command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_reset_cmd {
u8 opcode;
@@ -194,6 +204,7 @@ struct ionic_dev_reset_cmd {
/**
* struct ionic_dev_reset_comp - Reset command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_reset_comp {
u8 status;
@@ -207,6 +218,7 @@ struct ionic_dev_reset_comp {
* struct ionic_dev_identify_cmd - Driver/device identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_identify_cmd {
u8 opcode;
@@ -218,6 +230,7 @@ struct ionic_dev_identify_cmd {
* struct ionic_dev_identify_comp - Driver/device identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_identify_comp {
u8 status;
@@ -242,6 +255,7 @@ enum ionic_os_type {
* @kernel_ver: Kernel version, numeric format
* @kernel_ver_str: Kernel version, string format
* @driver_ver_str: Driver version, string format
+ * @words: word access to struct contents
*/
union ionic_drv_identity {
struct {
@@ -258,16 +272,20 @@ union ionic_drv_identity {
/**
* enum ionic_dev_capability - Device capabilities
* @IONIC_DEV_CAP_VF_CTRL: Device supports VF ctrl operations
+ * @IONIC_DEV_CAP_DISC_CMB: Device supports CMB discovery operations
*/
enum ionic_dev_capability {
IONIC_DEV_CAP_VF_CTRL = BIT(0),
+ IONIC_DEV_CAP_DISC_CMB = BIT(1),
};
/**
* union ionic_dev_identity - device identity information
* @version: Version of device identify
* @type: Identify type (0 for now)
+ * @rsvd: reserved byte(s)
* @nports: Number of ports provisioned
+ * @rsvd2: reserved byte(s)
* @nlifs: Number of LIFs provisioned
* @nintrs: Number of interrupts provisioned
* @ndbpgs_per_lif: Number of doorbell pages per LIF
@@ -284,6 +302,7 @@ enum ionic_dev_capability {
* @hwstamp_mult: Hardware tick to nanosecond multiplier.
* @hwstamp_shift: Hardware tick to nanosecond divisor (power of two).
* @capabilities: Device capabilities
+ * @words: word access to struct contents
*/
union ionic_dev_identity {
struct {
@@ -317,6 +336,7 @@ enum ionic_lif_type {
* @opcode: opcode
* @type: LIF type (enum ionic_lif_type)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_identify_cmd {
u8 opcode;
@@ -329,6 +349,7 @@ struct ionic_lif_identify_cmd {
* struct ionic_lif_identify_comp - LIF identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_identify_comp {
u8 status;
@@ -379,6 +400,7 @@ enum ionic_logical_qtype {
* @IONIC_Q_F_4X_DESC: Quadruple main descriptor size
* @IONIC_Q_F_4X_CQ_DESC: Quadruple cq descriptor size
* @IONIC_Q_F_4X_SG_DESC: Quadruple sg descriptor size
+ * @IONIC_QIDENT_F_EXPDB: Queue supports express doorbell
*/
enum ionic_q_feature {
IONIC_QIDENT_F_CQ = BIT_ULL(0),
@@ -391,6 +413,7 @@ enum ionic_q_feature {
IONIC_Q_F_4X_DESC = BIT_ULL(7),
IONIC_Q_F_4X_CQ_DESC = BIT_ULL(8),
IONIC_Q_F_4X_SG_DESC = BIT_ULL(9),
+ IONIC_QIDENT_F_EXPDB = BIT_ULL(10),
};
/**
@@ -416,7 +439,7 @@ enum ionic_txq_feature {
};
/**
- * struct ionic_hwstamp_bits - Hardware timestamp decoding bits
+ * enum ionic_hwstamp_bits - Hardware timestamp decoding bits
* @IONIC_HWSTAMP_INVALID: Invalid hardware timestamp value
* @IONIC_HWSTAMP_CQ_NEGOFFSET: Timestamp field negative offset
* from the base cq descriptor.
@@ -429,6 +452,7 @@ enum ionic_hwstamp_bits {
/**
* struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type
* @qtype: Hardware Queue Type
+ * @rsvd: reserved byte(s)
* @qid_count: Number of Queue IDs of the logical type
* @qid_base: Minimum Queue ID of the logical type
*/
@@ -454,12 +478,14 @@ enum ionic_lif_state {
/**
* union ionic_lif_config - LIF configuration
* @state: LIF state (enum ionic_lif_state)
+ * @rsvd: reserved byte(s)
* @name: LIF name
* @mtu: MTU
* @mac: Station MAC address
* @vlan: Default Vlan ID
* @features: Features (enum ionic_eth_hw_features)
* @queue_count: Queue counts per queue-type
+ * @words: word access to struct contents
*/
union ionic_lif_config {
struct {
@@ -476,38 +502,59 @@ union ionic_lif_config {
};
/**
+ * enum ionic_lif_rdma_cap_stats - LIF stat type
+ * @IONIC_LIF_RDMA_STAT_GLOBAL: Global stats
+ * @IONIC_LIF_RDMA_STAT_QP: Queue pair stats
+ */
+enum ionic_lif_rdma_cap_stats {
+ IONIC_LIF_RDMA_STAT_GLOBAL = BIT(0),
+ IONIC_LIF_RDMA_STAT_QP = BIT(1),
+};
+
+/**
* struct ionic_lif_identity - LIF identity information (type-specific)
*
* @capabilities: LIF capabilities
*
* @eth: Ethernet identify structure
- * @version: Ethernet identify structure version
- * @max_ucast_filters: Number of perfect unicast addresses supported
- * @max_mcast_filters: Number of perfect multicast addresses supported
- * @min_frame_size: Minimum size of frames to be sent
- * @max_frame_size: Maximum size of frames to be sent
- * @hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode)
- * @hwstamp_rx_filters: Bitmask of enum ionic_pkt_class
- * @config: LIF config struct with features, mtu, mac, q counts
+ * @eth.version: Ethernet identify structure version
+ * @eth.rsvd: reserved byte(s)
+ * @eth.max_ucast_filters: Number of perfect unicast addresses supported
+ * @eth.max_mcast_filters: Number of perfect multicast addresses supported
+ * @eth.min_frame_size: Minimum size of frames to be sent
+ * @eth.max_frame_size: Maximum size of frames to be sent
+ * @eth.rsvd2: reserved byte(s)
+ * @eth.hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode)
+ * @eth.hwstamp_rx_filters: Bitmask of enum ionic_pkt_class
+ * @eth.rsvd3: reserved byte(s)
+ * @eth.config: LIF config struct with features, mtu, mac, q counts
*
* @rdma: RDMA identify structure
- * @version: RDMA version of opcodes and queue descriptors
- * @qp_opcodes: Number of RDMA queue pair opcodes supported
- * @admin_opcodes: Number of RDMA admin opcodes supported
- * @npts_per_lif: Page table size per LIF
- * @nmrs_per_lif: Number of memory regions per LIF
- * @nahs_per_lif: Number of address handles per LIF
- * @max_stride: Max work request stride
- * @cl_stride: Cache line stride
- * @pte_stride: Page table entry stride
- * @rrq_stride: Remote RQ work request stride
- * @rsq_stride: Remote SQ work request stride
- * @dcqcn_profiles: Number of DCQCN profiles
- * @aq_qtype: RDMA Admin Qtype
- * @sq_qtype: RDMA Send Qtype
- * @rq_qtype: RDMA Receive Qtype
- * @cq_qtype: RDMA Completion Qtype
- * @eq_qtype: RDMA Event Qtype
+ * @rdma.version: RDMA capability version
+ * @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported
+ * @rdma.admin_opcodes: Number of RDMA admin opcodes supported
+ * @rdma.minor_version: RDMA capability minor version
+ * @rdma.npts_per_lif: Page table size per LIF
+ * @rdma.nmrs_per_lif: Number of memory regions per LIF
+ * @rdma.nahs_per_lif: Number of address handles per LIF
+ * @rdma.max_stride: Max work request stride
+ * @rdma.cl_stride: Cache line stride
+ * @rdma.pte_stride: Page table entry stride
+ * @rdma.rrq_stride: Remote RQ work request stride
+ * @rdma.rsq_stride: Remote SQ work request stride
+ * @rdma.dcqcn_profiles: Number of DCQCN profiles
+ * @rdma.udma_shift: Log2 number of queues per queue group
+ * @rdma.rsvd_dimensions: Reserved byte
+ * @rdma.page_size_cap: Supported page sizes
+ * @rdma.aq_qtype: RDMA Admin Qtype
+ * @rdma.sq_qtype: RDMA Send Qtype
+ * @rdma.rq_qtype: RDMA Receive Qtype
+ * @rdma.cq_qtype: RDMA Completion Qtype
+ * @rdma.eq_qtype: RDMA Event Qtype
+ * @rdma.stats_type: Supported statistics type
+ * (enum ionic_lif_rdma_cap_stats)
+ * @rdma.rsvd1: Reserved byte(s)
+ * @words: word access to struct contents
*/
union ionic_lif_identity {
struct {
@@ -532,7 +579,7 @@ union ionic_lif_identity {
u8 version;
u8 qp_opcodes;
u8 admin_opcodes;
- u8 rsvd;
+ u8 minor_version;
__le32 npts_per_lif;
__le32 nmrs_per_lif;
__le32 nahs_per_lif;
@@ -542,12 +589,16 @@ union ionic_lif_identity {
u8 rrq_stride;
u8 rsq_stride;
u8 dcqcn_profiles;
- u8 rsvd_dimensions[10];
+ u8 udma_shift;
+ u8 rsvd_dimensions;
+ __le64 page_size_cap;
struct ionic_lif_logical_qtype aq_qtype;
struct ionic_lif_logical_qtype sq_qtype;
struct ionic_lif_logical_qtype rq_qtype;
struct ionic_lif_logical_qtype cq_qtype;
struct ionic_lif_logical_qtype eq_qtype;
+ __le16 stats_type;
+ u8 rsvd1[162];
} __packed rdma;
} __packed;
__le32 words[478];
@@ -558,7 +609,9 @@ union ionic_lif_identity {
* @opcode: Opcode
* @type: LIF type (enum ionic_lif_type)
* @index: LIF index
+ * @rsvd: reserved byte(s)
* @info_pa: Destination address for LIF info (struct ionic_lif_info)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -572,7 +625,9 @@ struct ionic_lif_init_cmd {
/**
* struct ionic_lif_init_comp - LIF init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @hw_index: Hardware index of the initialized LIF
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_init_comp {
u8 status;
@@ -584,9 +639,11 @@ struct ionic_lif_init_comp {
/**
* struct ionic_q_identify_cmd - queue identify command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_type: LIF type (enum ionic_lif_type)
* @type: Logical queue type (enum ionic_logical_qtype)
* @ver: Highest queue type version that the driver supports
+ * @rsvd2: reserved byte(s)
*/
struct ionic_q_identify_cmd {
u8 opcode;
@@ -600,8 +657,10 @@ struct ionic_q_identify_cmd {
/**
* struct ionic_q_identify_comp - queue identify command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @ver: Queue type version that can be used with FW
+ * @rsvd2: reserved byte(s)
*/
struct ionic_q_identify_comp {
u8 status;
@@ -615,12 +674,14 @@ struct ionic_q_identify_comp {
* union ionic_q_identity - queue identity information
* @version: Queue type version that can be used with FW
* @supported: Bitfield of queue versions, first bit = ver 0
+ * @rsvd: reserved byte(s)
* @features: Queue features (enum ionic_q_feature, etc)
* @desc_sz: Descriptor size
* @comp_sz: Completion descriptor size
* @sg_desc_sz: Scatter/Gather descriptor size
* @max_sg_elems: Maximum number of Scatter/Gather elements
* @sg_desc_stride: Number of Scatter/Gather elements per descriptor
+ * @words: word access to struct contents
*/
union ionic_q_identity {
struct {
@@ -640,8 +701,10 @@ union ionic_q_identity {
/**
* struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @type: Logical queue type
* @ver: Queue type version
+ * @rsvd1: reserved byte(s)
* @lif_index: LIF index
* @index: (LIF, qtype) relative admin queue index
* @intr_index: Interrupt control register index, or Event queue index
@@ -667,6 +730,7 @@ union ionic_q_identity {
* @ring_base: Queue ring base address
* @cq_ring_base: Completion queue ring base address
* @sg_ring_base: Scatter/Gather ring base address
+ * @rsvd2: reserved byte(s)
* @features: Mask of queue features to enable, if not in the flags above.
*/
struct ionic_q_init_cmd {
@@ -698,9 +762,11 @@ struct ionic_q_init_cmd {
/**
* struct ionic_q_init_comp - Queue init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @hw_index: Hardware Queue ID
* @hw_type: Hardware Queue type
+ * @rsvd2: reserved byte(s)
* @color: Color
*/
struct ionic_q_init_comp {
@@ -800,7 +866,7 @@ enum ionic_txq_desc_opcode {
* will set CWR flag in the first segment if
* CWR is set in the template header, and
* clear CWR in remaining segments.
- * @flags:
+ * flags:
* vlan:
* Insert an L2 VLAN header using @vlan_tci
* encap:
@@ -813,13 +879,14 @@ enum ionic_txq_desc_opcode {
* TSO start
* tso_eot:
* TSO end
- * @num_sg_elems: Number of scatter-gather elements in SG
+ * num_sg_elems: Number of scatter-gather elements in SG
* descriptor
- * @addr: First data buffer's DMA address
+ * addr: First data buffer's DMA address
* (Subsequent data buffers are on txq_sg_desc)
* @len: First data buffer's length, in bytes
* @vlan_tci: VLAN tag to insert in the packet (if requested
* by @V-bit). Includes .1p and .1q tags
+ * @hword0: half word padding
* @hdr_len: Length of packet headers, including
* encapsulating outer header, if applicable
* Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and
@@ -830,10 +897,12 @@ enum ionic_txq_desc_opcode {
* IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to
* inner-most L4 payload, so inclusive of
* inner-most L4 header.
+ * @hword1: half word padding
* @mss: Desired MSS value for TSO; only applicable for
* IONIC_TXQ_DESC_OPCODE_TSO
* @csum_start: Offset from packet to first byte checked in L4 checksum
* @csum_offset: Offset from csum_start to L4 checksum field
+ * @hword2: half word padding
*/
struct ionic_txq_desc {
__le64 cmd;
@@ -901,6 +970,7 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
* struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
+ * @rsvd: reserved byte(s)
*/
struct ionic_txq_sg_elem {
__le64 addr;
@@ -927,7 +997,9 @@ struct ionic_txq_sg_desc_v1 {
/**
* struct ionic_txq_comp - Ethernet transmit queue completion descriptor
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_txq_comp {
@@ -953,6 +1025,7 @@ enum ionic_rxq_desc_opcode {
* receive, including actual bytes received,
* are recorded in Rx completion descriptor.
*
+ * @rsvd: reserved byte(s)
* @len: Data buffer's length, in bytes
* @addr: Data buffer's DMA address
*/
@@ -967,6 +1040,7 @@ struct ionic_rxq_desc {
* struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
+ * @rsvd: reserved byte(s)
*/
struct ionic_rxq_sg_elem {
__le64 addr;
@@ -1026,7 +1100,7 @@ struct ionic_rxq_sg_desc {
* first IPv4 header. If the receive packet
* contains both a tunnel IPv4 header and a
* transport IPv4 header, the device validates the
- * checksum for the both IPv4 headers.
+ * checksum for both IPv4 headers.
*
* IONIC_RXQ_COMP_CSUM_F_IP_BAD:
* The IPv4 checksum calculated by the device did
@@ -1170,6 +1244,7 @@ enum ionic_pkt_class {
* @lif_index: LIF index
* @index: Queue index
* @oper: Operation (enum ionic_q_control_oper)
+ * @rsvd: reserved byte(s)
*/
struct ionic_q_control_cmd {
u8 opcode;
@@ -1182,7 +1257,7 @@ struct ionic_q_control_cmd {
typedef struct ionic_admin_comp ionic_q_control_comp;
-enum q_control_oper {
+enum ionic_q_control_oper {
IONIC_Q_DISABLE = 0,
IONIC_Q_ENABLE = 1,
IONIC_Q_HANG_RESET = 2,
@@ -1216,7 +1291,7 @@ enum ionic_xcvr_state {
IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
};
-/**
+/*
* enum ionic_xcvr_pid - Supported link modes
*/
enum ionic_xcvr_pid {
@@ -1228,7 +1303,10 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3,
IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4,
IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5,
-
+ IONIC_XCVR_PID_QSFP_50G_CR2_FC = 6,
+ IONIC_XCVR_PID_QSFP_50G_CR2 = 7,
+ IONIC_XCVR_PID_QSFP_200G_CR4 = 8,
+ IONIC_XCVR_PID_QSFP_400G_CR4 = 9,
/* Fiber */
IONIC_XCVR_PID_QSFP_100G_AOC = 50,
IONIC_XCVR_PID_QSFP_100G_ACC = 51,
@@ -1254,6 +1332,15 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
IONIC_XCVR_PID_SFP_10GBASE_T = 72,
IONIC_XCVR_PID_SFP_1000BASE_T = 73,
+ IONIC_XCVR_PID_QSFP_200G_AOC = 74,
+ IONIC_XCVR_PID_QSFP_200G_FR4 = 75,
+ IONIC_XCVR_PID_QSFP_200G_DR4 = 76,
+ IONIC_XCVR_PID_QSFP_200G_SR4 = 77,
+ IONIC_XCVR_PID_QSFP_200G_ACC = 78,
+ IONIC_XCVR_PID_QSFP_400G_FR4 = 79,
+ IONIC_XCVR_PID_QSFP_400G_DR4 = 80,
+ IONIC_XCVR_PID_QSFP_400G_SR4 = 81,
+ IONIC_XCVR_PID_QSFP_400G_VR4 = 82,
};
/**
@@ -1351,9 +1438,12 @@ struct ionic_xcvr_status {
* @fec_type: fec type (enum ionic_port_fec_type)
* @pause_type: pause type (enum ionic_port_pause_type)
* @loopback_mode: loopback mode (enum ionic_port_loopback_mode)
+ * @words: word access to struct contents
*/
union ionic_port_config {
struct {
+#define IONIC_SPEED_400G 400000 /* 400G in Mbps */
+#define IONIC_SPEED_200G 200000 /* 200G in Mbps */
#define IONIC_SPEED_100G 100000 /* 100G in Mbps */
#define IONIC_SPEED_50G 50000 /* 50G in Mbps */
#define IONIC_SPEED_40G 40000 /* 40G in Mbps */
@@ -1382,6 +1472,7 @@ union ionic_port_config {
* @speed: link speed (in Mbps)
* @link_down_count: number of times link went from up to down
* @fec_type: fec type (enum ionic_port_fec_type)
+ * @rsvd: reserved byte(s)
* @xcvr: transceiver status
*/
struct ionic_port_status {
@@ -1399,6 +1490,7 @@ struct ionic_port_status {
* @opcode: opcode
* @index: port index
* @ver: Highest version of identify supported by driver
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_identify_cmd {
u8 opcode;
@@ -1411,6 +1503,7 @@ struct ionic_port_identify_cmd {
* struct ionic_port_identify_comp - Port identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_identify_comp {
u8 status;
@@ -1422,7 +1515,9 @@ struct ionic_port_identify_comp {
* struct ionic_port_init_cmd - Port initialization command
* @opcode: opcode
* @index: port index
+ * @rsvd: reserved byte(s)
* @info_pa: destination address for port info (struct ionic_port_info)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_port_init_cmd {
u8 opcode;
@@ -1435,6 +1530,7 @@ struct ionic_port_init_cmd {
/**
* struct ionic_port_init_comp - Port initialization command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_init_comp {
u8 status;
@@ -1445,6 +1541,7 @@ struct ionic_port_init_comp {
* struct ionic_port_reset_cmd - Port reset command
* @opcode: opcode
* @index: port index
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_reset_cmd {
u8 opcode;
@@ -1455,6 +1552,7 @@ struct ionic_port_reset_cmd {
/**
* struct ionic_port_reset_comp - Port reset command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_reset_comp {
u8 status;
@@ -1510,6 +1608,7 @@ enum ionic_port_attr {
* @opcode: Opcode
* @index: Port index
* @attr: Attribute type (enum ionic_port_attr)
+ * @rsvd: reserved byte(s)
* @state: Port state
* @speed: Port speed
* @mtu: Port MTU
@@ -1518,6 +1617,7 @@ enum ionic_port_attr {
* @pause_type: Port pause type setting
* @loopback_mode: Port loopback mode
* @stats_ctl: Port stats setting
+ * @rsvd2: reserved byte(s)
*/
struct ionic_port_setattr_cmd {
u8 opcode;
@@ -1540,6 +1640,7 @@ struct ionic_port_setattr_cmd {
/**
* struct ionic_port_setattr_comp - Port set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @color: Color bit
*/
struct ionic_port_setattr_comp {
@@ -1553,6 +1654,7 @@ struct ionic_port_setattr_comp {
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_getattr_cmd {
u8 opcode;
@@ -1564,6 +1666,7 @@ struct ionic_port_getattr_cmd {
/**
* struct ionic_port_getattr_comp - Port get attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @state: Port state
* @speed: Port speed
* @mtu: Port MTU
@@ -1571,6 +1674,7 @@ struct ionic_port_getattr_cmd {
* @fec_type: Port FEC type setting
* @pause_type: Port pause type setting
* @loopback_mode: Port loopback mode
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_port_getattr_comp {
@@ -1593,9 +1697,11 @@ struct ionic_port_getattr_comp {
* struct ionic_lif_status - LIF status register
* @eid: most recent NotifyQ event id
* @port_num: port the LIF is connected to
+ * @rsvd: reserved byte(s)
* @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
* @link_down_count: number of times link went from up to down
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_status {
__le64 eid;
@@ -1610,7 +1716,9 @@ struct ionic_lif_status {
/**
* struct ionic_lif_reset_cmd - LIF reset command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @index: LIF index
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_reset_cmd {
u8 opcode;
@@ -1643,9 +1751,11 @@ enum ionic_dev_attr {
* struct ionic_dev_setattr_cmd - Set Device attributes on the NIC
* @opcode: Opcode
* @attr: Attribute type (enum ionic_dev_attr)
+ * @rsvd: reserved byte(s)
* @state: Device state (enum ionic_dev_state)
* @name: The bus info, e.g. PCI slot-device-function, 0 terminated
* @features: Device features
+ * @rsvd2: reserved byte(s)
*/
struct ionic_dev_setattr_cmd {
u8 opcode;
@@ -1662,7 +1772,9 @@ struct ionic_dev_setattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @features: Device features
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_dev_setattr_comp {
@@ -1679,6 +1791,7 @@ struct ionic_dev_setattr_comp {
* struct ionic_dev_getattr_cmd - Get Device attributes from the NIC
* @opcode: opcode
* @attr: Attribute type (enum ionic_dev_attr)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_getattr_cmd {
u8 opcode;
@@ -1687,9 +1800,11 @@ struct ionic_dev_getattr_cmd {
};
/**
- * struct ionic_dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_getattr_comp - Device set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @features: Device features
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_dev_getattr_comp {
@@ -1702,7 +1817,7 @@ struct ionic_dev_getattr_comp {
u8 color;
};
-/**
+/*
* RSS parameters
*/
#define IONIC_RSS_HASH_KEY_SIZE 40
@@ -1726,6 +1841,7 @@ enum ionic_rss_hash_types {
* @IONIC_LIF_ATTR_RSS: LIF RSS attribute
* @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
* @IONIC_LIF_ATTR_TXSTAMP: LIF TX timestamping mode
+ * @IONIC_LIF_ATTR_MAX: maximum attribute value
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1736,6 +1852,7 @@ enum ionic_lif_attr {
IONIC_LIF_ATTR_RSS = 5,
IONIC_LIF_ATTR_STATS_CTRL = 6,
IONIC_LIF_ATTR_TXSTAMP = 7,
+ IONIC_LIF_ATTR_MAX = 255,
};
/**
@@ -1749,11 +1866,13 @@ enum ionic_lif_attr {
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
- * @types: The hash types to enable (see rss_hash_types)
- * @key: The hash secret key
- * @addr: Address for the indirection table shared memory
+ * @rss.types: The hash types to enable (see rss_hash_types)
+ * @rss.key: The hash secret key
+ * @rss.rsvd: reserved byte(s)
+ * @rss.addr: Address for the indirection table shared memory
* @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd)
- * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_setattr_cmd {
u8 opcode;
@@ -1772,7 +1891,7 @@ struct ionic_lif_setattr_cmd {
__le64 addr;
} rss;
u8 stats_ctl;
- __le16 txstamp_mode;
+ __le16 txstamp_mode;
u8 rsvd[60];
} __packed;
};
@@ -1780,8 +1899,10 @@ struct ionic_lif_setattr_cmd {
/**
* struct ionic_lif_setattr_comp - LIF set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @features: features (enum ionic_eth_hw_features)
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_lif_setattr_comp {
@@ -1800,6 +1921,7 @@ struct ionic_lif_setattr_comp {
* @opcode: Opcode
* @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_getattr_cmd {
u8 opcode;
@@ -1811,13 +1933,14 @@ struct ionic_lif_getattr_cmd {
/**
* struct ionic_lif_getattr_comp - LIF get attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @state: LIF state (enum ionic_lif_state)
- * @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
- * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_lif_getattr_comp {
@@ -1838,12 +1961,15 @@ struct ionic_lif_getattr_comp {
/**
* struct ionic_lif_setphc_cmd - Set LIF PTP Hardware Clock
* @opcode: Opcode
+ * @rsvd1: reserved byte(s)
* @lif_index: LIF index
+ * @rsvd2: reserved byte(s)
* @tick: Hardware stamp tick of an instant in time.
* @nsec: Nanosecond stamp of the same instant.
* @frac: Fractional nanoseconds at the same instant.
* @mult: Cycle to nanosecond multiplier.
* @shift: Cycle to nanosecond divisor (power of two).
+ * @rsvd3: reserved byte(s)
*/
struct ionic_lif_setphc_cmd {
u8 opcode;
@@ -1870,6 +1996,7 @@ enum ionic_rx_mode {
/**
* struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
* IONIC_RX_MODE_F_UNICAST: Accept known unicast packets
@@ -1878,6 +2005,7 @@ enum ionic_rx_mode {
* IONIC_RX_MODE_F_PROMISC: Accept any packets
* IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets
* IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets
+ * @rsvd2: reserved byte(s)
*/
struct ionic_rx_mode_set_cmd {
u8 opcode;
@@ -1904,13 +2032,14 @@ enum ionic_rx_filter_match_type {
* @qid: Queue ID
* @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx)
* @vlan: VLAN filter
- * @vlan: VLAN ID
+ * @vlan.vlan: VLAN ID
* @mac: MAC filter
- * @addr: MAC address (network-byte order)
+ * @mac.addr: MAC address (network-byte order)
* @mac_vlan: MACVLAN filter
- * @vlan: VLAN ID
- * @addr: MAC address (network-byte order)
+ * @mac_vlan.vlan: VLAN ID
+ * @mac_vlan.addr: MAC address (network-byte order)
* @pkt_class: Packet classification filter
+ * @rsvd: reserved byte(s)
*/
struct ionic_rx_filter_add_cmd {
u8 opcode;
@@ -1937,8 +2066,10 @@ struct ionic_rx_filter_add_cmd {
/**
* struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @filter_id: Filter ID
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_rx_filter_add_comp {
@@ -1953,8 +2084,10 @@ struct ionic_rx_filter_add_comp {
/**
* struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @filter_id: Filter ID
+ * @rsvd2: reserved byte(s)
*/
struct ionic_rx_filter_del_cmd {
u8 opcode;
@@ -2000,6 +2133,7 @@ enum ionic_vf_link_status {
* @trust: enable VF trust
* @linkstate: set link up or down
* @stats_pa: set DMA address for VF stats
+ * @pad: reserved byte(s)
*/
struct ionic_vf_setattr_cmd {
u8 opcode;
@@ -2031,6 +2165,7 @@ struct ionic_vf_setattr_comp {
* @opcode: Opcode
* @attr: Attribute type (enum ionic_vf_attr)
* @vf_index: VF index
+ * @rsvd: reserved byte(s)
*/
struct ionic_vf_getattr_cmd {
u8 opcode;
@@ -2064,8 +2199,8 @@ enum ionic_vf_ctrl_opcode {
/**
* struct ionic_vf_ctrl_cmd - VF control command
* @opcode: Opcode for the command
- * @vf_index: VF Index. It is unused if op START_ALL is used.
* @ctrl_opcode: VF control operation type
+ * @vf_index: VF Index. It is unused if op START_ALL is used.
*/
struct ionic_vf_ctrl_cmd {
u8 opcode;
@@ -2086,10 +2221,84 @@ struct ionic_vf_ctrl_comp {
};
/**
+ * struct ionic_discover_cmb_cmd - CMB discovery command
+ * @opcode: Opcode for the command
+ * @rsvd: Reserved bytes
+ */
+struct ionic_discover_cmb_cmd {
+ u8 opcode;
+ u8 rsvd[63];
+};
+
+/**
+ * struct ionic_discover_cmb_comp - CMB discover command completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @rsvd: Reserved bytes
+ */
+struct ionic_discover_cmb_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+#define IONIC_MAX_CMB_REGIONS 16
+#define IONIC_CMB_SHIFT_64K 16
+
+enum ionic_cmb_type {
+ IONIC_CMB_TYPE_DEVMEM = 0,
+ IONIC_CMB_TYPE_EXPDB64 = 1,
+ IONIC_CMB_TYPE_EXPDB128 = 2,
+ IONIC_CMB_TYPE_EXPDB256 = 3,
+ IONIC_CMB_TYPE_EXPDB512 = 4,
+};
+
+/**
+ * union ionic_cmb_region - Configuration for CMB region
+ * @bar_num: CMB mapping number from FW
+ * @cmb_type: Type of CMB this region describes (enum ionic_cmb_type)
+ * @rsvd: Reserved
+ * @offset: Offset within BAR in 64KB pages
+ * @length: Length of the CMB region
+ * @words: 32-bit words for direct access to the entire region
+ */
+union ionic_cmb_region {
+ struct {
+ u8 bar_num;
+ u8 cmb_type;
+ u8 rsvd[6];
+ __le32 offset;
+ __le32 length;
+ } __packed;
+ __le32 words[4];
+};
+
+/**
+ * union ionic_discover_cmb_identity - CMB layout identity structure
+ * @num_regions: Number of CMB regions, up to 16
+ * @flags: Feature and capability bits (0 for express
+ * doorbell, 1 for 4K alignment indicator,
+ * 31-24 for version information)
+ * @region: CMB mappings region, entry 0 for regular
+ * mapping, entries 1-7 for WQE sizes 64,
+ * 128, 256, 512, 1024, 2048 and 4096 bytes
+ * @words: Full union buffer size
+ */
+union ionic_discover_cmb_identity {
+ struct {
+ __le32 num_regions;
+#define IONIC_CMB_FLAG_EXPDB BIT(0)
+#define IONIC_CMB_FLAG_4KALIGN BIT(1)
+#define IONIC_CMB_FLAG_VERSION 0xff000000
+ __le32 flags;
+ union ionic_cmb_region region[IONIC_MAX_CMB_REGIONS];
+ };
+ __le32 words[478];
+};
+
+/**
* struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
- *
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_identify_cmd {
u8 opcode;
@@ -2101,6 +2310,7 @@ struct ionic_qos_identify_cmd {
* struct ionic_qos_identify_comp - QoS identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_identify_comp {
u8 status;
@@ -2118,7 +2328,7 @@ struct ionic_qos_identify_comp {
#define IONIC_QOS_ALL_PCP 0xFF
#define IONIC_DSCP_BLOCK_SIZE 8
-/**
+/*
* enum ionic_qos_class
*/
enum ionic_qos_class {
@@ -2174,6 +2384,7 @@ enum ionic_qos_sched_type {
* @dot1q_pcp: Dot1q pcp value
* @ndscp: Number of valid dscp values in the ip_dscp field
* @ip_dscp: IP dscp values
+ * @words: word access to struct contents
*/
union ionic_qos_config {
struct {
@@ -2219,8 +2430,9 @@ union ionic_qos_config {
* union ionic_qos_identity - QoS identity structure
* @version: Version of the identify structure
* @type: QoS system type
- * @nclasses: Number of usable QoS classes
+ * @rsvd: reserved byte(s)
* @config: Current configuration of classes
+ * @words: word access to struct contents
*/
union ionic_qos_identity {
struct {
@@ -2236,7 +2448,9 @@ union ionic_qos_identity {
* struct ionic_qos_init_cmd - QoS config init command
* @opcode: Opcode
* @group: QoS class id
+ * @rsvd: reserved byte(s)
* @info_pa: destination address for qos info
+ * @rsvd1: reserved byte(s)
*/
struct ionic_qos_init_cmd {
u8 opcode;
@@ -2252,6 +2466,7 @@ typedef struct ionic_admin_comp ionic_qos_init_comp;
* struct ionic_qos_reset_cmd - QoS config reset command
* @opcode: Opcode
* @group: QoS class id
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_reset_cmd {
u8 opcode;
@@ -2260,8 +2475,10 @@ struct ionic_qos_reset_cmd {
};
/**
- * struct ionic_qos_clear_port_stats_cmd - Qos config reset command
+ * struct ionic_qos_clear_stats_cmd - Qos config reset command
* @opcode: Opcode
+ * @group_bitmap: bitmap of groups to be cleared
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_clear_stats_cmd {
u8 opcode;
@@ -2274,6 +2491,7 @@ typedef struct ionic_admin_comp ionic_qos_reset_comp;
/**
* struct ionic_fw_download_cmd - Firmware download command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @addr: dma address of the firmware buffer
* @offset: offset of the firmware buffer within the full image
* @length: number of valid bytes in the firmware buffer
@@ -2297,6 +2515,7 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;
* @IONIC_FW_INSTALL_STATUS: Firmware installation status
* @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
* @IONIC_FW_ACTIVATE_STATUS: Firmware activate status
+ * @IONIC_FW_UPDATE_CLEANUP: Clean up after an interrupted fw update
*/
enum ionic_fw_control_oper {
IONIC_FW_RESET = 0,
@@ -2312,8 +2531,10 @@ enum ionic_fw_control_oper {
/**
* struct ionic_fw_control_cmd - Firmware control command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @oper: firmware control operation (enum ionic_fw_control_oper)
* @slot: slot to activate
+ * @rsvd1: reserved byte(s)
*/
struct ionic_fw_control_cmd {
u8 opcode;
@@ -2326,8 +2547,10 @@ struct ionic_fw_control_cmd {
/**
* struct ionic_fw_control_comp - Firmware control copletion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @slot: Slot where the firmware was installed
+ * @rsvd1: reserved byte(s)
* @color: Color bit
*/
struct ionic_fw_control_comp {
@@ -2346,7 +2569,9 @@ struct ionic_fw_control_comp {
/**
* struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
+ * @rsvd2: reserved byte(s)
*
* There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
@@ -2362,6 +2587,7 @@ struct ionic_rdma_reset_cmd {
/**
* struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @qid_ver: (qid | (RDMA version << 24))
* @cid: intr, eq_id, or cq_id
@@ -2369,6 +2595,7 @@ struct ionic_rdma_reset_cmd {
* @depth_log2: log base two of queue depth
* @stride_log2: log base two of queue stride
* @dma_addr: address of the queue memory
+ * @rsvd2: reserved byte(s)
*
* The same command struct is used to create an RDMA event queue, completion
* queue, or RDMA admin queue. The cid is an interrupt number for an event
@@ -2425,6 +2652,7 @@ struct ionic_notifyq_event {
* @ecode: event code = IONIC_EVENT_LINK_CHANGE
* @link_status: link up/down, with error bits (enum ionic_port_status)
* @link_speed: speed of the network link
+ * @rsvd: reserved byte(s)
*
* Sent when the network link state changes between UP and DOWN
*/
@@ -2442,6 +2670,7 @@ struct ionic_link_change_event {
* @ecode: event code = IONIC_EVENT_RESET
* @reset_code: reset type
* @state: 0=pending, 1=complete, 2=error
+ * @rsvd: reserved byte(s)
*
* Sent when the NIC or some subsystem is going to be or
* has been reset.
@@ -2458,6 +2687,7 @@ struct ionic_reset_event {
* struct ionic_heartbeat_event - Sent periodically by NIC to indicate health
* @eid: event number
* @ecode: event code = IONIC_EVENT_HEARTBEAT
+ * @rsvd: reserved byte(s)
*/
struct ionic_heartbeat_event {
__le64 eid;
@@ -2481,6 +2711,7 @@ struct ionic_log_event {
* struct ionic_xcvr_event - Transceiver change event
* @eid: event number
* @ecode: event code = IONIC_EVENT_XCVR
+ * @rsvd: reserved byte(s)
*/
struct ionic_xcvr_event {
__le64 eid;
@@ -2488,7 +2719,7 @@ struct ionic_xcvr_event {
u8 rsvd[54];
};
-/**
+/*
* struct ionic_port_stats - Port statistics structure
*/
struct ionic_port_stats {
@@ -2646,8 +2877,7 @@ enum ionic_oflow_drop_stats {
IONIC_OFLOW_DROP_MAX,
};
-/**
- * struct port_pb_stats - packet buffers system stats
+/* struct ionic_port_pb_stats - packet buffers system stats
* uses ionic_pb_buffer_drop_stats for drop_counts[]
*/
struct ionic_port_pb_stats {
@@ -2681,7 +2911,9 @@ struct ionic_port_pb_stats {
* @pause_type: supported pause types
* @loopback_mode: supported loopback mode
* @speeds: supported speeds
+ * @rsvd2: reserved byte(s)
* @config: current port configuration
+ * @words: word access to struct contents
*/
union ionic_port_identity {
struct {
@@ -2707,7 +2939,12 @@ union ionic_port_identity {
* @status: Port status data
* @stats: Port statistics data
* @mgmt_stats: Port management statistics data
- * @port_pb_drop_stats: uplink pb drop stats
+ * @sprom_epage: Extended Transceiver sprom
+ * @sprom_page1: Extended Transceiver sprom, page 1
+ * @sprom_page2: Extended Transceiver sprom, page 2
+ * @sprom_page17: Extended Transceiver sprom, page 17
+ * @rsvd: reserved byte(s)
+ * @pb_stats: uplink pb drop stats
*/
struct ionic_port_info {
union ionic_port_config config;
@@ -2716,12 +2953,21 @@ struct ionic_port_info {
struct ionic_port_stats stats;
struct ionic_mgmt_port_stats mgmt_stats;
};
- /* room for pb_stats to start at 2k offset */
- u8 rsvd[760];
+ union {
+ u8 sprom_epage[384];
+ struct {
+ u8 sprom_page1[128];
+ u8 sprom_page2[128];
+ u8 sprom_page17[128];
+ };
+ };
+ u8 rsvd[376];
+
+ /* pb_stats must start at 2k offset */
struct ionic_port_pb_stats pb_stats;
};
-/**
+/*
* struct ionic_lif_stats - LIF statistics structure
*/
struct ionic_lif_stats {
@@ -2908,6 +3154,8 @@ union ionic_dev_cmd {
struct ionic_vf_getattr_cmd vf_getattr;
struct ionic_vf_ctrl_cmd vf_ctrl;
+ struct ionic_discover_cmb_cmd discover_cmb;
+
struct ionic_lif_identify_cmd lif_identify;
struct ionic_lif_init_cmd lif_init;
struct ionic_lif_reset_cmd lif_reset;
@@ -2947,6 +3195,8 @@ union ionic_dev_cmd_comp {
struct ionic_vf_getattr_comp vf_getattr;
struct ionic_vf_ctrl_comp vf_ctrl;
+ struct ionic_discover_cmb_comp discover_cmb;
+
struct ionic_lif_identify_comp lif_identify;
struct ionic_lif_init_comp lif_init;
ionic_lif_reset_comp lif_reset;
@@ -2983,8 +3233,10 @@ struct ionic_hwstamp_regs {
* bit 4-7 - 4 bit generation number, changes on fw restart
* @fw_heartbeat: Firmware heartbeat counter
* @serial_num: Serial number
+ * @rsvd_pad1024: reserved byte(s)
* @fw_version: Firmware version
- * @hwstamp_regs: Hardware current timestamp registers
+ * @hwstamp: Hardware current timestamp registers
+ * @words: word access to struct contents
*/
union ionic_dev_info_regs {
#define IONIC_DEVINFO_FWVERS_BUFLEN 32
@@ -3014,7 +3266,9 @@ union ionic_dev_info_regs {
* @done: Done indicator, bit 0 == 1 when command is complete
* @cmd: Opcode-specific command bytes
* @comp: Opcode-specific response bytes
+ * @rsvd: reserved byte(s)
* @data: Opcode-specific side-data
+ * @words: word access to struct contents
*/
union ionic_dev_cmd_regs {
struct {
@@ -3032,6 +3286,7 @@ union ionic_dev_cmd_regs {
* union ionic_dev_regs - Device register format for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
+ * @words: word access to struct contents
*/
union ionic_dev_regs {
struct {
@@ -3083,9 +3338,16 @@ union ionic_adminq_comp {
#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00
#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000
#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
+
+/* BAR2 */
+#define IONIC_BAR2_CMB_ENTRY_SIZE 0x800000
#define IONIC_DEV_CMD_DONE 0x00000001
-#define IONIC_ASIC_TYPE_CAPRI 0
+#define IONIC_ASIC_TYPE_NONE 0
+#define IONIC_ASIC_TYPE_CAPRI 1
+#define IONIC_ASIC_TYPE_ELBA 2
+#define IONIC_ASIC_TYPE_GIGLIO 3
+#define IONIC_ASIC_TYPE_SALINA 4
/**
* struct ionic_doorbell - Doorbell register layout
@@ -3098,6 +3360,7 @@ union ionic_adminq_comp {
* interrupts when armed.
* @qid_lo: Queue destination for the producer index and flags (low bits)
* @qid_hi: Queue destination for the producer index and flags (high bits)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_doorbell {
__le16 p_index;
@@ -3131,6 +3394,7 @@ struct ionic_identity {
union ionic_port_identity port;
union ionic_qos_identity qos;
union ionic_q_identity txq;
+ union ionic_discover_cmb_identity cmb_layout;
};
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7f0c6cdc375e..058eea86e141 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -13,11 +13,13 @@
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
#include <linux/vmalloc.h>
+#include <net/page_pool/helpers.h>
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_dev.h"
#include "ionic_lif.h"
+#include "ionic_aux.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
@@ -46,8 +48,9 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
-static int ionic_xdp_queues_config(struct ionic_lif *lif);
-static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
+static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif);
+static void ionic_unregister_rxq_info(struct ionic_queue *q);
+static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id);
static void ionic_dim_work(struct work_struct *work)
{
@@ -126,13 +129,13 @@ static void ionic_lif_deferred_work(struct work_struct *work)
} while (true);
}
-void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+void ionic_lif_deferred_enqueue(struct ionic_lif *lif,
struct ionic_deferred_work *work)
{
- spin_lock_bh(&def->lock);
- list_add_tail(&work->list, &def->list);
- spin_unlock_bh(&def->lock);
- schedule_work(&def->work);
+ spin_lock_bh(&lif->deferred.lock);
+ list_add_tail(&work->list, &lif->deferred.list);
+ spin_unlock_bh(&lif->deferred.lock);
+ queue_work(lif->ionic->wq, &lif->deferred.work);
}
static void ionic_link_status_check(struct ionic_lif *lif)
@@ -207,19 +210,12 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
}
work->type = IONIC_DW_TYPE_LINK_STATUS;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
} else {
ionic_link_status_check(lif);
}
}
-static void ionic_napi_deadline(struct timer_list *timer)
-{
- struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
-
- napi_schedule(&qcq->napi);
-}
-
static irqreturn_t ionic_isr(int irq, void *data)
{
struct napi_struct *napi = data;
@@ -237,39 +233,58 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
const char *name;
if (lif->registered)
- name = lif->netdev->name;
+ name = netdev_name(lif->netdev);
else
name = dev_name(dev);
snprintf(intr->name, sizeof(intr->name),
- "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
+ "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
return devm_request_irq(dev, intr->vector, ionic_isr,
0, intr->name, &qcq->napi);
}
-static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
+int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
{
struct ionic *ionic = lif->ionic;
- int index;
+ int index, err;
index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
- if (index == ionic->nintrs) {
- netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
- __func__, index, ionic->nintrs);
+ if (index == ionic->nintrs)
return -ENOSPC;
- }
set_bit(index, ionic->intrs);
ionic_intr_init(&ionic->idev, intr, index);
+ err = ionic_bus_get_irq(ionic, intr->index);
+ if (err < 0) {
+ clear_bit(index, ionic->intrs);
+ return err;
+ }
+
+ intr->vector = err;
+
return 0;
}
+EXPORT_SYMBOL_NS(ionic_intr_alloc, "NET_IONIC");
-static void ionic_intr_free(struct ionic *ionic, int index)
+void ionic_intr_free(struct ionic_lif *lif, int index)
+{
+ if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
+ clear_bit(index, lif->ionic->intrs);
+}
+EXPORT_SYMBOL_NS(ionic_intr_free, "NET_IONIC");
+
+static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify);
+
+ cpumask_copy(*intr->affinity_mask, mask);
+}
+
+static void ionic_irq_aff_release(struct kref __always_unused *ref)
{
- if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
- clear_bit(index, ionic->intrs);
}
static int ionic_qcq_enable(struct ionic_qcq *qcq)
@@ -304,12 +319,12 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
if (ret)
return ret;
- if (qcq->napi.poll)
- napi_enable(&qcq->napi);
-
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ napi_enable(&qcq->napi);
+ irq_set_affinity_notifier(qcq->intr.vector,
+ &qcq->intr.aff_notify);
irq_set_affinity_hint(qcq->intr.vector,
- &qcq->intr.affinity_mask);
+ *qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
@@ -339,13 +354,15 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
if (qcq->flags & IONIC_QCQ_F_INTR) {
struct ionic_dev *idev = &lif->ionic->idev;
+ if (lif->doorbell_wa)
+ cancel_work_sync(&qcq->doorbell_napi_work);
cancel_work_sync(&qcq->dim.work);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
synchronize_irq(qcq->intr.vector);
+ irq_set_affinity_notifier(qcq->intr.vector, NULL);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
- del_timer_sync(&qcq->napi_deadline);
}
/* If there was a previous fw communcation error, don't bother with
@@ -373,6 +390,7 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
if (!(qcq->flags & IONIC_QCQ_F_INITED))
return;
+ ionic_unregister_rxq_info(&qcq->q);
if (qcq->flags & IONIC_QCQ_F_INTR) {
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
@@ -390,7 +408,7 @@ static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ ionic_intr_free(lif, qcq->intr.index);
qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
@@ -430,9 +448,10 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0;
}
- ionic_xdp_unregister_rxq_info(&qcq->q);
- ionic_qcq_intr_free(lif, qcq);
+ page_pool_destroy(qcq->q.page_pool);
+ qcq->q.page_pool = NULL;
+ ionic_qcq_intr_free(lif, qcq);
vfree(qcq->q.info);
qcq->q.info = NULL;
}
@@ -480,11 +499,11 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
{
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
- n_qcq->napi_qcq = src_qcq->napi_qcq;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
+ cpumask_var_t *affinity_mask;
int err;
if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
@@ -499,13 +518,6 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
goto err_out;
}
- err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
- if (err < 0) {
- netdev_warn(lif->netdev, "no vector for %s: %d\n",
- qcq->q.name, err);
- goto err_out_free_intr;
- }
- qcq->intr.vector = err;
ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
@@ -516,16 +528,25 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
}
/* try to get the irq on the local numa node first */
- qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
- dev_to_node(lif->ionic->dev));
- if (qcq->intr.cpu != -1)
- cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
+ affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index];
+ if (cpumask_empty(*affinity_mask)) {
+ unsigned int cpu;
+
+ cpu = cpumask_local_spread(qcq->intr.index,
+ dev_to_node(lif->ionic->dev));
+ if (cpu != -1)
+ cpumask_set_cpu(cpu, *affinity_mask);
+ }
+
+ qcq->intr.affinity_mask = affinity_mask;
+ qcq->intr.aff_notify.notify = ionic_irq_aff_notify;
+ qcq->intr.aff_notify.release = ionic_irq_aff_release;
netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
return 0;
err_out_free_intr:
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ ionic_intr_free(lif, qcq->intr.index);
err_out:
return err;
}
@@ -537,7 +558,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int cq_desc_size,
unsigned int sg_desc_size,
unsigned int desc_info_size,
- unsigned int pid, struct ionic_qcq **qcq)
+ unsigned int pid, struct bpf_prog *xdp_prog,
+ struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
@@ -563,6 +585,31 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
goto err_out_free_qcq;
}
+ if (type == IONIC_QTYPE_RXQ) {
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = num_descs,
+ .nid = NUMA_NO_NODE,
+ .dev = lif->ionic->dev,
+ .napi = &new->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .netdev = lif->netdev,
+ };
+
+ if (xdp_prog)
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+
+ new->q.page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(new->q.page_pool)) {
+ netdev_err(lif->netdev, "Cannot create page_pool\n");
+ err = PTR_ERR(new->q.page_pool);
+ new->q.page_pool = NULL;
+ goto err_out_free_q_info;
+ }
+ }
+
new->q.type = type;
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
@@ -570,12 +617,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
desc_size, sg_desc_size, pid);
if (err) {
netdev_err(lif->netdev, "Cannot initialize queue\n");
- goto err_out_free_q_info;
+ goto err_out_free_page_pool;
}
err = ionic_alloc_qcq_interrupt(lif, new);
if (err)
- goto err_out_free_q_info;
+ goto err_out_free_page_pool;
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
@@ -626,7 +673,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
- new->cmb_order);
+ new->cmb_order, 0, NULL);
if (err) {
netdev_err(lif->netdev,
"Cannot allocate queue order %d from cmb: err %d\n",
@@ -676,6 +723,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
INIT_WORK(&new->dim.work, ionic_dim_work);
new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ if (lif->doorbell_wa)
+ INIT_WORK(&new->doorbell_napi_work, ionic_doorbell_napi_work);
*qcq = new;
@@ -692,8 +741,10 @@ err_out_free_q:
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
- ionic_intr_free(lif->ionic, new->intr.index);
+ ionic_intr_free(lif, new->intr.index);
}
+err_out_free_page_pool:
+ page_pool_destroy(new->q.page_pool);
err_out_free_q_info:
vfree(new->q.info);
err_out_free_qcq:
@@ -716,7 +767,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(struct ionic_admin_comp),
0,
sizeof(struct ionic_admin_desc_info),
- lif->kern_pid, &lif->adminqcq);
+ lif->kern_pid, NULL, &lif->adminqcq);
if (err)
return err;
ionic_debugfs_add_qcq(lif, lif->adminqcq);
@@ -729,7 +780,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(union ionic_notifyq_comp),
0,
sizeof(struct ionic_admin_desc_info),
- lif->kern_pid, &lif->notifyqcq);
+ lif->kern_pid, NULL, &lif->notifyqcq);
if (err)
goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
@@ -834,11 +885,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
- }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -910,9 +958,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
-
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+ err = ionic_register_rxq_info(q, qcq->napi.napi_id);
+ if (err) {
+ netif_napi_del(&qcq->napi);
+ return err;
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -948,7 +998,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &txq);
+ lif->kern_pid, NULL, &txq);
if (err)
goto err_qcq_alloc;
@@ -1008,7 +1058,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &rxq);
+ lif->kern_pid, NULL, &rxq);
if (err)
goto err_qcq_alloc;
@@ -1025,7 +1075,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
goto err_qcq_init;
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- ionic_rx_fill(&rxq->q);
+ ionic_rx_fill(&rxq->q, NULL);
err = ionic_qcq_enable(rxq);
if (err)
goto err_qcq_enable;
@@ -1168,7 +1218,6 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev = &lif->ionic->idev;
unsigned long irqflags;
unsigned int flags = 0;
- bool resched = false;
int rx_work = 0;
int tx_work = 0;
int n_work = 0;
@@ -1184,6 +1233,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
ionic_adminq_service, NULL, NULL);
+
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
if (lif->hwstamp_rxq)
@@ -1191,7 +1241,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -1205,15 +1255,14 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
- if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
- resched = true;
- if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
- resched = true;
- if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
- resched = true;
- if (resched)
- mod_timer(&lif->adminqcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
+ if (lif->doorbell_wa) {
+ if (!a_work)
+ ionic_adminq_poke_doorbell(&lif->adminqcq->q);
+ if (lif->hwstamp_rxq && !rx_work)
+ ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q);
+ if (lif->hwstamp_txq && !tx_work)
+ ionic_txq_poke_doorbell(&lif->hwstamp_txq->q);
+ }
return work_done;
}
@@ -1385,7 +1434,7 @@ static void ionic_ndo_set_rx_mode(struct net_device *netdev)
}
work->type = IONIC_DW_TYPE_RX_MODE;
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
@@ -1761,13 +1810,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
/* if we're not running, nothing more to do */
if (!netif_running(netdev)) {
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
@@ -2035,7 +2084,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &lif->txqcqs[i]);
+ lif->kern_pid, NULL, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2067,7 +2116,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &lif->rxqcqs[i]);
+ lif->kern_pid, lif->xdp_prog,
+ &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2132,9 +2182,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0;
int i, err;
- err = ionic_xdp_queues_config(lif);
- if (err)
- return err;
+ ionic_xdp_rxqs_prog_update(lif);
for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
@@ -2143,7 +2191,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
goto err_out;
}
- ionic_rx_fill(&lif->rxqcqs[i]->q);
+ ionic_rx_fill(&lif->rxqcqs[i]->q,
+ READ_ONCE(lif->rxqcqs[i]->q.xdp_prog));
err = ionic_qcq_enable(lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2156,7 +2205,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
}
if (lif->hwstamp_rxq) {
- ionic_rx_fill(&lif->hwstamp_rxq->q);
+ ionic_rx_fill(&lif->hwstamp_rxq->q, NULL);
err = ionic_qcq_enable(lif->hwstamp_rxq);
if (err)
goto err_out_hwstamp_rx;
@@ -2181,7 +2230,7 @@ err_out:
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
- ionic_xdp_queues_config(lif);
+ ionic_xdp_rxqs_prog_update(lif);
return err;
}
@@ -2286,20 +2335,6 @@ static int ionic_stop(struct net_device *netdev)
return 0;
}
-static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ionic_lif_hwstamp_set(lif, ifr);
- case SIOCGHWTSTAMP:
- return ionic_lif_hwstamp_get(lif, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int ionic_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivf)
{
@@ -2640,7 +2675,7 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic);
}
-static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
+static void ionic_unregister_rxq_info(struct ionic_queue *q)
{
struct xdp_rxq_info *xi;
@@ -2654,7 +2689,7 @@ static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
kfree(xi);
}
-static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
+static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
{
struct xdp_rxq_info *rxq_info;
int err;
@@ -2665,15 +2700,15 @@ static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_
err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
if (err) {
- dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
- q->index, err);
+ netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n",
+ q->index, err);
goto err_out;
}
- err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
+ err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool);
if (err) {
- dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
- q->index, err);
+ netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n",
+ q->index, err);
xdp_rxq_info_unreg(rxq_info);
goto err_out;
}
@@ -2687,44 +2722,20 @@ err_out:
return err;
}
-static int ionic_xdp_queues_config(struct ionic_lif *lif)
+static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif)
{
+ struct bpf_prog *xdp_prog;
unsigned int i;
- int err;
if (!lif->rxqcqs)
- return 0;
-
- /* There's no need to rework memory if not going to/from NULL program.
- * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
- * This way we don't need to keep an *xdp_prog in every queue struct.
- */
- if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
- return 0;
+ return;
+ xdp_prog = READ_ONCE(lif->xdp_prog);
for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
struct ionic_queue *q = &lif->rxqcqs[i]->q;
- if (q->xdp_rxq_info) {
- ionic_xdp_unregister_rxq_info(q);
- continue;
- }
-
- err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
- if (err) {
- dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
- i, err);
- goto err_out;
- }
+ WRITE_ONCE(q->xdp_prog, xdp_prog);
}
-
- return 0;
-
-err_out:
- for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
- ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
-
- return err;
}
static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
@@ -2754,11 +2765,17 @@ static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
if (!netif_running(netdev)) {
old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ } else if (lif->xdp_prog && bpf->prog) {
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ ionic_xdp_rxqs_prog_update(lif);
} else {
+ struct ionic_queue_params qparams;
+
+ ionic_init_queue_params(lif, &qparams);
+ qparams.xdp_prog = bpf->prog;
mutex_lock(&lif->queue_lock);
- ionic_stop_queues_reconfig(lif);
+ ionic_reconfigure_queues(lif, &qparams);
old_prog = xchg(&lif->xdp_prog, bpf->prog);
- ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
}
@@ -2781,7 +2798,6 @@ static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
- .ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
.ndo_bpf = ionic_xdp,
.ndo_xdp_xmit = ionic_xdp_xmit,
@@ -2802,6 +2818,8 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_config = ionic_get_vf_config,
.ndo_set_vf_link_state = ionic_set_vf_link_state,
.ndo_get_vf_stats = ionic_get_vf_stats,
+ .ndo_hwtstamp_get = ionic_hwstamp_get,
+ .ndo_hwtstamp_set = ionic_hwstamp_set,
};
static int ionic_cmb_reconfig(struct ionic_lif *lif,
@@ -2860,13 +2878,23 @@ err_out:
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
- /* only swapping the queues, not the napi, flags, or other stuff */
+ /* only swapping the queues and napi, not flags or other stuff */
+ swap(a->napi, b->napi);
+
+ if (a->q.type == IONIC_QTYPE_RXQ) {
+ swap(a->q.page_pool, b->q.page_pool);
+ a->q.page_pool->p.napi = &a->napi;
+ if (b->q.page_pool) /* is NULL when increasing queue count */
+ b->q.page_pool->p.napi = &b->napi;
+ }
+
swap(a->q.features, b->q.features);
swap(a->q.num_descs, b->q.num_descs);
swap(a->q.desc_size, b->q.desc_size);
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
+ swap(a->q.xdp_prog, b->q.xdp_prog);
swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base);
@@ -2917,7 +2945,8 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
}
if (qparam->nxqs != lif->nxqs ||
qparam->nrxq_descs != lif->nrxq_descs ||
- qparam->rxq_features != lif->rxq_features) {
+ qparam->rxq_features != lif->rxq_features ||
+ qparam->xdp_prog != lif->xdp_prog) {
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!rx_qcqs) {
@@ -2948,7 +2977,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &lif->txqcqs[i]);
+ lif->kern_pid, NULL, &lif->txqcqs[i]);
if (err)
goto err_out;
}
@@ -2957,7 +2986,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &tx_qcqs[i]);
+ lif->kern_pid, NULL, &tx_qcqs[i]);
if (err)
goto err_out;
}
@@ -2979,7 +3008,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &lif->rxqcqs[i]);
+ lif->kern_pid, NULL, &lif->rxqcqs[i]);
if (err)
goto err_out;
}
@@ -2988,11 +3017,12 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &rx_qcqs[i]);
+ lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]);
if (err)
goto err_out;
rx_qcqs[i]->q.features = qparam->rxq_features;
+ rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog;
}
}
@@ -3141,6 +3171,44 @@ err_out:
return err;
}
+static int ionic_affinity_masks_alloc(struct ionic *ionic)
+{
+ cpumask_var_t *affinity_masks;
+ int nintrs = ionic->nintrs;
+ int i;
+
+ affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!affinity_masks)
+ return -ENOMEM;
+
+ for (i = 0; i < nintrs; i++) {
+ if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL,
+ dev_to_node(ionic->dev)))
+ goto err_out;
+ }
+
+ ionic->affinity_masks = affinity_masks;
+
+ return 0;
+
+err_out:
+ for (--i; i >= 0; i--)
+ free_cpumask_var(affinity_masks[i]);
+ kfree(affinity_masks);
+
+ return -ENOMEM;
+}
+
+static void ionic_affinity_masks_free(struct ionic *ionic)
+{
+ int i;
+
+ for (i = 0; i < ionic->nintrs; i++)
+ free_cpumask_var(ionic->affinity_masks[i]);
+ kfree(ionic->affinity_masks);
+ ionic->affinity_masks = NULL;
+}
+
int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
@@ -3171,7 +3239,7 @@ int ionic_lif_alloc(struct ionic *ionic)
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
- netdev->watchdog_timeo = 2 * HZ;
+ netdev->watchdog_timeo = 5 * HZ;
netif_carrier_off(netdev);
lif->identity = lid;
@@ -3185,7 +3253,7 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
- le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
+ le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
lif->neqs = ionic->neqs_per_lif;
lif->nxqs = ionic->ntxqs_per_lif;
@@ -3213,6 +3281,7 @@ int ionic_lif_alloc(struct ionic *ionic)
mutex_init(&lif->queue_lock);
mutex_init(&lif->config_lock);
+ mutex_init(&lif->adev_lock);
spin_lock_init(&lif->adminq_lock);
@@ -3232,11 +3301,15 @@ int ionic_lif_alloc(struct ionic *ionic)
ionic_debugfs_add_lif(lif);
+ err = ionic_affinity_masks_alloc(ionic);
+ if (err)
+ goto err_out_free_lif_info;
+
/* allocate control queues and txrx queue arrays */
ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
- goto err_out_free_lif_info;
+ goto err_out_free_affinity_masks;
/* allocate rss indirection table */
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
@@ -3258,11 +3331,14 @@ int ionic_lif_alloc(struct ionic *ionic)
err_out_free_qcqs:
ionic_qcqs_free(lif);
+err_out_free_affinity_masks:
+ ionic_affinity_masks_free(lif->ionic);
err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
lif->info_pa = 0;
err_out_free_mutex:
+ mutex_destroy(&lif->adev_lock);
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
err_out_free_netdev:
@@ -3298,6 +3374,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
netif_device_detach(lif->netdev);
+ ionic_auxbus_unregister(ionic->lif);
mutex_lock(&lif->queue_lock);
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
@@ -3358,6 +3435,9 @@ int ionic_restart_lif(struct ionic_lif *lif)
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
+
+ ionic_auxbus_register(ionic->lif);
return 0;
@@ -3388,6 +3468,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
* just need to reanimate it.
*/
ionic_init_devinfo(ionic);
+ ionic_reset(ionic);
err = ionic_identify(ionic);
if (err)
goto err_out;
@@ -3430,18 +3511,17 @@ void ionic_lif_free(struct ionic_lif *lif)
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
ionic_lif_reset(lif);
+ ionic_affinity_masks_free(lif->ionic);
+
/* free lif info */
kfree(lif->identity);
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
lif->info_pa = 0;
- /* unmap doorbell page */
- ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
- lif->kern_dbpage = NULL;
-
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
+ mutex_destroy(&lif->adev_lock);
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
@@ -3465,6 +3545,9 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
+ lif->kern_dbpage = NULL;
+
ionic_lif_reset(lif);
}
@@ -3503,14 +3586,11 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
-
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
- &qcq->intr.affinity_mask);
+ *qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
@@ -3697,6 +3777,7 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
+ lif->doorbell_wa = ionic_doorbell_wa(lif->ionic);
set_bit(IONIC_LIF_F_INITED, lif->state);
@@ -3716,10 +3797,6 @@ err_out_adminq_deinit:
return err;
}
-static void ionic_lif_notify_work(struct work_struct *ws)
-{
-}
-
static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
{
struct ionic_admin_ctx ctx = {
@@ -3731,7 +3808,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
},
};
- strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
+ strscpy(ctx.cmd.lif_setattr.name, netdev_name(lif->netdev),
sizeof(ctx.cmd.lif_setattr.name));
ionic_adminq_post_wait(lif, &ctx);
@@ -3770,8 +3847,6 @@ int ionic_lif_register(struct ionic_lif *lif)
ionic_lif_register_phc(lif);
- INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
-
lif->ionic->nb.notifier_call = ionic_lif_notify;
err = register_netdevice_notifier(&lif->ionic->nb);
@@ -3781,8 +3856,8 @@ int ionic_lif_register(struct ionic_lif *lif)
/* only register LIF0 for now */
err = register_netdev(lif->netdev);
if (err) {
- dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
- ionic_lif_unregister_phc(lif);
+ dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err);
+ ionic_lif_unregister(lif);
return err;
}
@@ -3797,7 +3872,6 @@ void ionic_lif_unregister(struct ionic_lif *lif)
{
if (lif->ionic->nb.notifier_call) {
unregister_netdevice_notifier(&lif->ionic->nb);
- cancel_work_sync(&lif->ionic->nb_work);
lif->ionic->nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 08f4266fe2aa..8e10f66dc50e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -6,10 +6,11 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
-#include <uapi/linux/net_tstamp.h>
+#include <linux/net_tstamp.h>
#include <linux/dim.h>
#include <linux/pci.h>
#include "ionic_rx_filter.h"
+#include "ionic_api.h"
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
@@ -84,12 +85,11 @@ struct ionic_qcq {
u32 cmb_pgid;
u32 cmb_order;
struct dim dim;
- struct timer_list napi_deadline;
struct ionic_queue q;
struct ionic_cq cq;
struct napi_struct napi;
- struct ionic_qcq *napi_qcq;
struct ionic_intr_info intr;
+ struct work_struct doorbell_napi_work;
struct dentry *dentry;
};
@@ -207,11 +207,12 @@ struct ionic_lif {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
- u32 rx_copybreak;
u64 rxq_features;
- u16 rx_mode;
u64 hw_features;
+ u16 rx_copybreak;
+ u16 rx_mode;
bool registered;
+ bool doorbell_wa;
u16 lif_type;
unsigned int link_down_count;
unsigned int nmcast;
@@ -225,12 +226,14 @@ struct ionic_lif {
dma_addr_t info_pa;
u32 info_sz;
struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
+ struct ionic_aux_dev *ionic_adev;
+ struct mutex adev_lock; /* lock for aux_dev actions */
- u16 rss_types;
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
u8 *rss_ind_tbl;
dma_addr_t rss_ind_tbl_pa;
u32 rss_ind_tbl_sz;
+ u16 rss_types;
struct ionic_rx_filters rx_filters;
u32 rx_coalesce_usecs; /* what the user asked for */
@@ -251,7 +254,7 @@ struct ionic_phc {
struct timecounter tc;
struct mutex config_lock; /* lock for ts_config */
- struct hwtstamp_config ts_config;
+ struct kernel_hwtstamp_config ts_config;
u64 ts_config_rx_filt;
u32 ts_config_tx_mode;
@@ -268,6 +271,7 @@ struct ionic_queue_params {
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u64 rxq_features;
+ struct bpf_prog *xdp_prog;
bool intr_split;
bool cmb_tx;
bool cmb_rx;
@@ -280,6 +284,7 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->rxq_features = lif->rxq_features;
+ qparam->xdp_prog = lif->xdp_prog;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
@@ -333,7 +338,7 @@ static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
-void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+void ionic_lif_deferred_enqueue(struct ionic_lif *lif,
struct ionic_deferred_work *work);
int ionic_lif_alloc(struct ionic *ionic);
int ionic_lif_init(struct ionic_lif *lif);
@@ -357,8 +362,11 @@ int ionic_lif_size(struct ionic *ionic);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif);
-int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
-int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
+int ionic_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int ionic_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
void ionic_lif_register_phc(struct ionic_lif *lif);
void ionic_lif_unregister_phc(struct ionic_lif *lif);
@@ -368,12 +376,15 @@ void ionic_lif_free_phc(struct ionic_lif *lif);
static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {}
-static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
+static inline int ionic_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static inline int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
+static inline int ionic_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index c1259324b0be..14dc055be3e9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -72,7 +72,7 @@ static const char *ionic_error_to_str(enum ionic_status_code code)
}
}
-static int ionic_error_to_errno(enum ionic_status_code code)
+int ionic_error_to_errno(enum ionic_status_code code)
{
switch (code) {
case IONIC_RC_SUCCESS:
@@ -81,8 +81,9 @@ static int ionic_error_to_errno(enum ionic_status_code code)
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
- case IONIC_RC_ENOSUPP:
return -EINVAL;
+ case IONIC_RC_ENOSUPP:
+ return -EOPNOTSUPP;
case IONIC_RC_EPERM:
return -EPERM;
case IONIC_RC_ENOENT:
@@ -113,6 +114,7 @@ static int ionic_error_to_errno(enum ionic_status_code code)
return -EIO;
}
}
+EXPORT_SYMBOL_NS(ionic_error_to_errno, "NET_IONIC");
static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
{
@@ -287,7 +289,7 @@ bool ionic_notifyq_service(struct ionic_cq *cq)
clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
} else {
work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
break;
@@ -479,6 +481,7 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
return __ionic_adminq_post_wait(lif, ctx, true);
}
+EXPORT_SYMBOL_NS(ionic_adminq_post_wait, "NET_IONIC");
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
@@ -515,9 +518,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
- int done = 0;
bool fw_up;
int opcode;
+ bool done;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
@@ -525,6 +528,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
+ done = false;
opcode = idev->opcode;
start_time = jiffies;
for (fw_up = ionic_is_fw_running(idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 7505efdff8e9..05b44fc482f8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -65,11 +65,12 @@ static u64 ionic_hwstamp_rx_filt(int config_rx_filter)
}
static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
- struct hwtstamp_config *new_ts)
+ struct kernel_hwtstamp_config *new_ts,
+ struct netlink_ext_ack *extack)
{
+ struct kernel_hwtstamp_config *config;
+ struct kernel_hwtstamp_config ts = {};
struct ionic *ionic = lif->ionic;
- struct hwtstamp_config *config;
- struct hwtstamp_config ts;
int tx_mode = 0;
u64 rx_filt = 0;
int err, err2;
@@ -99,12 +100,16 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
tx_mode = ionic_hwstamp_tx_mode(config->tx_type);
if (tx_mode < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TX time stamping mode isn't supported");
err = tx_mode;
goto err_queues;
}
mask = cpu_to_le64(BIT_ULL(tx_mode));
if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TX time stamping mode isn't supported");
err = -ERANGE;
goto err_queues;
}
@@ -124,32 +129,47 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
if (tx_mode) {
err = ionic_lif_create_hwstamp_txq(lif);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error creating TX timestamp queue");
goto err_queues;
+ }
}
if (rx_filt) {
err = ionic_lif_create_hwstamp_rxq(lif);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error creating RX timestamp queue");
goto err_queues;
+ }
}
if (tx_mode != lif->phc->ts_config_tx_mode) {
err = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error enabling TX timestamp mode");
goto err_txmode;
+ }
}
if (rx_filt != lif->phc->ts_config_rx_filt) {
err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error enabling RX timestamp mode");
goto err_rxfilt;
+ }
}
if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) {
err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error enabling RX timestamp mode");
goto err_rxall;
+ }
}
memcpy(&lif->phc->ts_config, config, sizeof(*config));
@@ -183,28 +203,24 @@ err_queues:
return err;
}
-int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
+int ionic_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ionic_lif *lif = netdev_priv(netdev);
int err;
if (!lif->phc || !lif->phc->ptp)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
mutex_lock(&lif->queue_lock);
- err = ionic_lif_hwstamp_set_ts_config(lif, &config);
+ err = ionic_lif_hwstamp_set_ts_config(lif, config, extack);
mutex_unlock(&lif->queue_lock);
if (err) {
netdev_info(lif->netdev, "hwstamp set failed: %d\n", err);
return err;
}
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
return 0;
}
@@ -216,7 +232,7 @@ void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
return;
mutex_lock(&lif->queue_lock);
- err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
+ err = ionic_lif_hwstamp_set_ts_config(lif, NULL, NULL);
mutex_unlock(&lif->queue_lock);
if (err)
netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
@@ -246,19 +262,18 @@ void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif)
mutex_unlock(&lif->phc->config_lock);
}
-int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
+int ionic_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
+ struct ionic_lif *lif = netdev_priv(netdev);
if (!lif->phc || !lif->phc->ptp)
return -EOPNOTSUPP;
mutex_lock(&lif->phc->config_lock);
- memcpy(&config, &lif->phc->ts_config, sizeof(config));
+ memcpy(config, &lif->phc->ts_config, sizeof(*config));
mutex_unlock(&lif->phc->config_lock);
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
return 0;
}
@@ -290,7 +305,7 @@ static u64 ionic_hwstamp_read(struct ionic *ionic,
return (u64)tick_low | ((u64)tick_high << 32);
}
-static u64 ionic_cc_read(const struct cyclecounter *cc)
+static u64 ionic_cc_read(struct cyclecounter *cc)
{
struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc);
struct ionic *ionic = phc->lif->ionic;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 1ee2f285cb42..528114877677 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -312,8 +312,8 @@ static int ionic_lif_filter_add(struct ionic_lif *lif,
int err = 0;
ctx.cmd.rx_filter_add = *ac;
- ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
- ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
+ ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD;
+ ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index);
spin_lock_bh(&lif->rx_filters.lock);
f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 5dba6d2d633c..301ebee2fdc5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -6,6 +6,7 @@
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
#include "ionic.h"
#include "ionic_lif.h"
@@ -23,10 +24,15 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp);
+ struct ionic_txq_comp *comp,
+ bool in_napi);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
+ /* Ensure TX descriptor writes reach memory before NIC reads them.
+ * Prevents device from fetching stale descriptors.
+ */
+ dma_wmb();
ionic_q_post(q, ring_dbell);
}
@@ -117,108 +123,57 @@ static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
{
- return buf_info->dma_addr + buf_info->page_offset;
-}
-
-static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
-{
- return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
+ return page_pool_get_dma_addr(buf_info->page) + buf_info->page_offset;
}
-static int ionic_rx_page_alloc(struct ionic_queue *q,
- struct ionic_buf_info *buf_info)
+static void __ionic_rx_put_buf(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ bool recycle_direct)
{
- struct device *dev = q->dev;
- dma_addr_t dma_addr;
- struct page *page;
-
- page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
- if (unlikely(!page)) {
- net_err_ratelimited("%s: %s page alloc failed\n",
- dev_name(dev), q->name);
- q_to_rx_stats(q)->alloc_err++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_page(dev, page, 0,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
- __free_pages(page, 0);
- net_err_ratelimited("%s: %s dma map failed\n",
- dev_name(dev), q->name);
- q_to_rx_stats(q)->dma_map_err++;
- return -EIO;
- }
-
- buf_info->dma_addr = dma_addr;
- buf_info->page = page;
- buf_info->page_offset = 0;
-
- return 0;
-}
-
-static void ionic_rx_page_free(struct ionic_queue *q,
- struct ionic_buf_info *buf_info)
-{
- struct device *dev = q->dev;
-
- if (unlikely(!buf_info)) {
- net_err_ratelimited("%s: %s invalid buf_info in free\n",
- dev_name(dev), q->name);
- return;
- }
-
if (!buf_info->page)
return;
- dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(buf_info->page, 0);
+ page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct);
buf_info->page = NULL;
+ buf_info->len = 0;
+ buf_info->page_offset = 0;
}
-static bool ionic_rx_buf_recycle(struct ionic_queue *q,
- struct ionic_buf_info *buf_info, u32 len)
-{
- u32 size;
-
- /* don't re-use pages allocated in low-mem condition */
- if (page_is_pfmemalloc(buf_info->page))
- return false;
-
- /* don't re-use buffers from non-local numa nodes */
- if (page_to_nid(buf_info->page) != numa_mem_id())
- return false;
-
- size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
- buf_info->page_offset += size;
- if (buf_info->page_offset >= IONIC_PAGE_SIZE)
- return false;
- get_page(buf_info->page);
+static void ionic_rx_put_buf(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ __ionic_rx_put_buf(q, buf_info, false);
+}
- return true;
+static void ionic_rx_put_buf_direct(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ __ionic_rx_put_buf(q, buf_info, true);
}
static void ionic_rx_add_skb_frag(struct ionic_queue *q,
struct sk_buff *skb,
struct ionic_buf_info *buf_info,
- u32 off, u32 len,
+ u32 headroom, u32 len,
bool synced)
{
if (!synced)
- dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
- off, len, DMA_FROM_DEVICE);
+ page_pool_dma_sync_for_cpu(q->page_pool,
+ buf_info->page,
+ buf_info->page_offset + headroom,
+ len);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset + off,
- len,
- IONIC_PAGE_SIZE);
+ buf_info->page, buf_info->page_offset + headroom,
+ len, buf_info->len);
- if (!ionic_rx_buf_recycle(q, buf_info, len)) {
- dma_unmap_page(q->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- buf_info->page = NULL;
- }
+ /* napi_gro_frags() will release/recycle the
+ * page_pool buffers from the frags list
+ */
+ buf_info->page = NULL;
+ buf_info->len = 0;
+ buf_info->page_offset = 0;
}
static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
@@ -243,12 +198,13 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
q_to_rx_stats(q)->alloc_err++;
return NULL;
}
+ skb_mark_for_recycle(skb);
if (headroom)
frag_len = min_t(u16, len,
IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
else
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
if (unlikely(!buf_info->page))
goto err_bad_buf_page;
@@ -259,7 +215,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
for (i = 0; i < num_sg_elems; i++, buf_info++) {
if (unlikely(!buf_info->page))
goto err_bad_buf_page;
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ frag_len = min_t(u16, len, buf_info->len);
ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
len -= frag_len;
}
@@ -276,11 +232,13 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
struct ionic_rx_desc_info *desc_info,
unsigned int headroom,
unsigned int len,
+ unsigned int num_sg_elems,
bool synced)
{
struct ionic_buf_info *buf_info;
struct device *dev = q->dev;
struct sk_buff *skb;
+ int i;
buf_info = &desc_info->bufs[0];
@@ -291,54 +249,52 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
q_to_rx_stats(q)->alloc_err++;
return NULL;
}
-
- if (unlikely(!buf_info->page)) {
- dev_kfree_skb(skb);
- return NULL;
- }
+ skb_mark_for_recycle(skb);
if (!synced)
- dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
- headroom, len, DMA_FROM_DEVICE);
+ page_pool_dma_sync_for_cpu(q->page_pool,
+ buf_info->page,
+ buf_info->page_offset + headroom,
+ len);
+
skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
- dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
- headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, netdev);
+ /* recycle the Rx buffer now that we're done with it */
+ ionic_rx_put_buf_direct(q, buf_info);
+ buf_info++;
+ for (i = 0; i < num_sg_elems; i++, buf_info++)
+ ionic_rx_put_buf_direct(q, buf_info);
+
return skb;
}
static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
- struct ionic_tx_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info,
+ bool in_napi)
{
- unsigned int nbufs = desc_info->nbufs;
- struct ionic_buf_info *buf_info;
- struct device *dev = q->dev;
- int i;
+ struct xdp_frame_bulk bq;
- if (!nbufs)
+ if (!desc_info->nbufs)
return;
- buf_info = desc_info->bufs;
- dma_unmap_single(dev, buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- if (desc_info->act == XDP_TX)
- __free_pages(buf_info->page, 0);
- buf_info->page = NULL;
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
- buf_info++;
- for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
- dma_unmap_page(dev, buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- if (desc_info->act == XDP_TX)
- __free_pages(buf_info->page, 0);
- buf_info->page = NULL;
+ if (desc_info->act == XDP_TX) {
+ if (likely(in_napi))
+ xdp_return_frame_rx_napi(desc_info->xdpf);
+ else
+ xdp_return_frame(desc_info->xdpf);
+ } else if (desc_info->act == XDP_REDIRECT) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ xdp_return_frame_bulk(desc_info->xdpf, &bq);
}
- if (desc_info->act == XDP_REDIRECT)
- xdp_return_frame(desc_info->xdpf);
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
desc_info->nbufs = 0;
desc_info->xdpf = NULL;
@@ -362,9 +318,17 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
buf_info = desc_info->bufs;
stats = q_to_tx_stats(q);
- dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
- return -EIO;
+ if (act == XDP_TX) {
+ dma_addr = page_pool_get_dma_addr(page) +
+ off + XDP_PACKET_HEADROOM;
+ dma_sync_single_for_device(q->dev, dma_addr,
+ len, DMA_TO_DEVICE);
+ } else /* XDP_REDIRECT */ {
+ dma_addr = ionic_tx_map_single(q, frame->data, len);
+ if (dma_addr == DMA_MAPPING_ERROR)
+ return -EIO;
+ }
+
buf_info->dma_addr = dma_addr;
buf_info->len = len;
buf_info->page = page;
@@ -386,10 +350,21 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
frag = sinfo->frags;
elem = ionic_tx_sg_elems(q);
for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
- dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr) {
- ionic_tx_desc_unmap_bufs(q, desc_info);
- return -EIO;
+ if (act == XDP_TX) {
+ struct page *pg = skb_frag_page(frag);
+
+ dma_addr = page_pool_get_dma_addr(pg) +
+ skb_frag_off(frag);
+ dma_sync_single_for_device(q->dev, dma_addr,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ } else {
+ dma_addr = ionic_tx_map_frag(q, frag, 0,
+ skb_frag_size(frag));
+ if (dma_addr == DMA_MAPPING_ERROR) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ return -EIO;
+ }
}
bi->dma_addr = dma_addr;
bi->len = skb_frag_size(frag);
@@ -480,6 +455,18 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
return nxmit;
}
+static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ int nbufs)
+{
+ int i;
+
+ for (i = 0; i < nbufs; i++) {
+ buf_info->page = NULL;
+ buf_info++;
+ }
+}
+
static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct net_device *netdev,
struct bpf_prog *xdp_prog,
@@ -493,6 +480,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct netdev_queue *nq;
struct xdp_frame *xdpf;
int remain_len;
+ int nbufs = 1;
int frag_len;
int err = 0;
@@ -500,11 +488,9 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
XDP_PACKET_HEADROOM, frag_len, false);
-
- dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
- XDP_PACKET_HEADROOM, len,
- DMA_FROM_DEVICE);
-
+ page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page,
+ buf_info->page_offset + XDP_PACKET_HEADROOM,
+ frag_len);
prefetchw(&xdp_buf.data_hard_start);
/* We limit MTU size to one buffer if !xdp_has_frags, so
@@ -526,15 +512,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
do {
if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
err = -ENOSPC;
- goto out_xdp_abort;
+ break;
}
frag = &sinfo->frags[sinfo->nr_frags];
sinfo->nr_frags++;
bi++;
- frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
- dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
- 0, frag_len, DMA_FROM_DEVICE);
+ frag_len = min_t(u16, remain_len, bi->len);
+ page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page,
+ buf_info->page_offset,
+ frag_len);
skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
sinfo->xdp_frags_size += frag_len;
remain_len -= frag_len;
@@ -542,6 +529,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
if (page_is_pfmemalloc(bi->page))
xdp_buff_set_frag_pfmemalloc(&xdp_buf);
} while (remain_len > 0);
+ nbufs += sinfo->nr_frags;
}
xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
@@ -552,14 +540,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
return false; /* false = we didn't consume the packet */
case XDP_DROP:
- ionic_rx_page_free(rxq, buf_info);
+ ionic_rx_put_buf_direct(rxq, buf_info);
stats->xdp_drop++;
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(&xdp_buf);
- if (!xdpf)
- goto out_xdp_abort;
+ if (!xdpf) {
+ err = -ENOSPC;
+ break;
+ }
txq = rxq->partner;
nq = netdev_get_tx_queue(netdev, txq->index);
@@ -571,65 +561,58 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
ionic_q_space_avail(txq),
1, 1)) {
__netif_tx_unlock(nq);
- goto out_xdp_abort;
+ err = -EIO;
+ break;
}
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
buf_info->page,
buf_info->page_offset,
true);
__netif_tx_unlock(nq);
- if (err) {
+ if (unlikely(err)) {
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
- goto out_xdp_abort;
+ break;
}
+ ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
stats->xdp_tx++;
-
- /* the Tx completion will free the buffers */
break;
case XDP_REDIRECT:
- /* unmap the pages before handing them to a different device */
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
- if (err) {
+ if (unlikely(err)) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
- goto out_xdp_abort;
+ break;
}
- buf_info->page = NULL;
+ ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
rxq->xdp_flush = true;
stats->xdp_redirect++;
break;
case XDP_ABORTED:
default:
- goto out_xdp_abort;
+ err = -EIO;
+ break;
}
- return true;
-
-out_xdp_abort:
- trace_xdp_exception(netdev, xdp_prog, xdp_action);
- ionic_rx_page_free(rxq, buf_info);
- stats->xdp_aborted++;
+ if (err) {
+ ionic_rx_put_buf_direct(rxq, buf_info);
+ trace_xdp_exception(netdev, xdp_prog, xdp_action);
+ stats->xdp_aborted++;
+ }
return true;
}
static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_rx_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+ struct ionic_rxq_comp *comp,
+ struct bpf_prog *xdp_prog)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
- struct bpf_prog *xdp_prog;
- unsigned int headroom;
+ unsigned int headroom = 0;
struct sk_buff *skb;
bool synced = false;
bool use_copybreak;
@@ -637,7 +620,14 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats = q_to_rx_stats(q);
- if (comp->status) {
+ if (unlikely(comp->status)) {
+ /* Most likely status==2 and the pkt received was bigger
+ * than the buffer available: comp->len will show the
+ * pkt size received that didn't fit the advertised desc.len
+ */
+ dev_dbg(q->dev, "q%d drop comp->status %d comp->len %d desc->len %d\n",
+ q->index, comp->status, comp->len, q->rxq[q->head_idx].len);
+
stats->dropped++;
return;
}
@@ -646,18 +636,18 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats->pkts++;
stats->bytes += len;
- xdp_prog = READ_ONCE(q->lif->xdp_prog);
if (xdp_prog) {
if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
return;
synced = true;
+ headroom = XDP_PACKET_HEADROOM;
}
- headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
use_copybreak = len <= q->lif->rx_copybreak;
if (use_copybreak)
skb = ionic_rx_copybreak(netdev, q, desc_info,
- headroom, len, synced);
+ headroom, len,
+ comp->num_sg_elems, synced);
else
skb = ionic_rx_build_skb(q, desc_info, headroom, len,
comp->num_sg_elems, synced);
@@ -733,7 +723,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
napi_gro_frags(&qcq->napi);
}
-bool ionic_rx_service(struct ionic_cq *cq)
+static bool __ionic_rx_service(struct ionic_cq *cq, struct bpf_prog *xdp_prog)
{
struct ionic_rx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -755,11 +745,16 @@ bool ionic_rx_service(struct ionic_cq *cq)
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
- ionic_rx_clean(q, desc_info, comp);
+ ionic_rx_clean(q, desc_info, comp, xdp_prog);
return true;
}
+bool ionic_rx_service(struct ionic_cq *cq)
+{
+ return __ionic_rx_service(cq, NULL);
+}
+
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
void *desc)
{
@@ -770,7 +765,7 @@ static inline void ionic_write_cmb_desc(struct ionic_queue *q,
memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
}
-void ionic_rx_fill(struct ionic_queue *q)
+void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_desc_info *desc_info;
@@ -778,6 +773,9 @@ void ionic_rx_fill(struct ionic_queue *q)
struct ionic_buf_info *buf_info;
unsigned int fill_threshold;
struct ionic_rxq_desc *desc;
+ unsigned int first_frag_len;
+ unsigned int first_buf_len;
+ unsigned int headroom = 0;
unsigned int remain_len;
unsigned int frag_len;
unsigned int nfrags;
@@ -795,35 +793,43 @@ void ionic_rx_fill(struct ionic_queue *q)
len = netdev->mtu + VLAN_ETH_HLEN;
- for (i = n_fill; i; i--) {
- unsigned int headroom;
- unsigned int buf_len;
+ if (xdp_prog) {
+ /* Always alloc the full size buffer, but only need
+ * the actual frag_len in the descriptor
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = XDP_PACKET_HEADROOM;
+ first_buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN + headroom;
+ first_frag_len = min_t(u16, len + headroom, first_buf_len);
+ } else {
+ /* Use MTU size if smaller than max buffer size */
+ first_frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
+ first_buf_len = first_frag_len;
+ }
+ for (i = n_fill; i; i--) {
+ /* fill main descriptor - buf[0] */
nfrags = 0;
remain_len = len;
desc = &q->rxq[q->head_idx];
desc_info = &q->rx_info[q->head_idx];
buf_info = &desc_info->bufs[0];
- if (!buf_info->page) { /* alloc a new buffer? */
- if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
- desc->addr = 0;
- desc->len = 0;
- return;
- }
+ buf_info->len = first_buf_len;
+ frag_len = first_frag_len - headroom;
+
+ /* get a new buffer if we can't reuse one */
+ if (!buf_info->page)
+ buf_info->page = page_pool_alloc(q->page_pool,
+ &buf_info->page_offset,
+ &buf_info->len,
+ GFP_ATOMIC);
+ if (unlikely(!buf_info->page)) {
+ buf_info->len = 0;
+ return;
}
- /* fill main descriptor - buf[0]
- * XDP uses space in the first buffer, so account for
- * head room, tail room, and ip header in the first frag size.
- */
- headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
- if (q->xdp_rxq_info)
- buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
- else
- buf_len = ionic_rx_buf_size(buf_info);
- frag_len = min_t(u16, len, buf_len);
-
desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
@@ -833,16 +839,26 @@ void ionic_rx_fill(struct ionic_queue *q)
/* fill sg descriptors - buf[1..n] */
sg_elem = q->rxq_sgl[q->head_idx].elems;
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
- if (!buf_info->page) { /* alloc a new sg buffer? */
- if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
- sg_elem->addr = 0;
- sg_elem->len = 0;
+ frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE);
+
+ /* Recycle any leftover buffers that are too small to reuse */
+ if (unlikely(buf_info->page && buf_info->len < frag_len))
+ ionic_rx_put_buf_direct(q, buf_info);
+
+ /* Get new buffer if needed */
+ if (!buf_info->page) {
+ buf_info->len = frag_len;
+ buf_info->page = page_pool_alloc(q->page_pool,
+ &buf_info->page_offset,
+ &buf_info->len,
+ GFP_ATOMIC);
+ if (unlikely(!buf_info->page)) {
+ buf_info->len = 0;
return;
}
}
sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
- frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -867,25 +883,17 @@ void ionic_rx_fill(struct ionic_queue *q)
q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
-
- mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
}
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_rx_desc_info *desc_info;
- struct ionic_buf_info *buf_info;
unsigned int i, j;
for (i = 0; i < q->num_descs; i++) {
desc_info = &q->rx_info[i];
- for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
- buf_info = &desc_info->bufs[j];
- if (buf_info->page)
- ionic_rx_page_free(q, buf_info);
- }
-
+ for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++)
+ ionic_rx_put_buf(q, &desc_info->bufs[j]);
desc_info->nbufs = 0;
}
@@ -924,7 +932,7 @@ static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
dim_update_sample(qcq->cq.bound_intr->rearm_count,
pkts, bytes, &dim_sample);
- net_dim(&qcq->dim, dim_sample);
+ net_dim(&qcq->dim, &dim_sample);
}
int ionic_tx_napi(struct napi_struct *napi, int budget)
@@ -934,7 +942,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
- work_done = ionic_tx_cq_service(cq, budget);
+ work_done = ionic_tx_cq_service(cq, budget, !!budget);
if (unlikely(!budget))
return budget;
@@ -952,8 +960,8 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
- mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (!work_done && cq->bound_q->lif->doorbell_wa)
+ ionic_txq_poke_doorbell(&qcq->q);
return work_done;
}
@@ -966,6 +974,32 @@ static void ionic_xdp_do_flush(struct ionic_cq *cq)
}
}
+static unsigned int ionic_rx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do)
+{
+ struct ionic_queue *q = cq->bound_q;
+ unsigned int work_done = 0;
+ struct bpf_prog *xdp_prog;
+
+ if (work_to_do == 0)
+ return 0;
+
+ xdp_prog = READ_ONCE(q->xdp_prog);
+ while (__ionic_rx_service(cq, xdp_prog)) {
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+ ionic_rx_fill(q, xdp_prog);
+ ionic_xdp_do_flush(cq);
+
+ return work_done;
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
@@ -976,12 +1010,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
if (unlikely(!budget))
return budget;
- work_done = ionic_cq_service(cq, budget,
- ionic_rx_service, NULL, NULL);
+ work_done = ionic_rx_cq_service(cq, budget);
- ionic_rx_fill(cq->bound_q);
-
- ionic_xdp_do_flush(cq);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -995,8 +1025,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
- mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (!work_done && cq->bound_q->lif->doorbell_wa)
+ ionic_rxq_poke_doorbell(&qcq->q);
return work_done;
}
@@ -1009,7 +1039,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_qcq *txqcq;
struct ionic_lif *lif;
struct ionic_cq *txcq;
- bool resched = false;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
@@ -1018,17 +1047,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget);
if (unlikely(!budget))
return budget;
- rx_work_done = ionic_cq_service(rxcq, budget,
- ionic_rx_service, NULL, NULL);
-
- ionic_rx_fill(rxcq->bound_q);
+ rx_work_done = ionic_rx_cq_service(rxcq, budget);
- ionic_xdp_do_flush(rxcq);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -1041,12 +1066,12 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
- if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
- resched = true;
- if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
- resched = true;
- if (resched)
- mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (lif->doorbell_wa) {
+ if (!rx_work_done)
+ ionic_rxq_poke_doorbell(&rxqcq->q);
+ if (!tx_work_done)
+ ionic_txq_poke_doorbell(&txqcq->q);
+ }
return rx_work_done;
}
@@ -1058,11 +1083,11 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1075,11 +1100,11 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1095,7 +1120,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
@@ -1105,7 +1130,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
goto dma_fail;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
@@ -1151,14 +1176,15 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp)
+ struct ionic_txq_comp *comp,
+ bool in_napi)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
struct sk_buff *skb;
if (desc_info->xdpf) {
- ionic_xdp_tx_desc_clean(q->partner, desc_info);
+ ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi);
stats->clean++;
if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
@@ -1203,11 +1229,13 @@ static void ionic_tx_clean(struct ionic_queue *q,
desc_info->bytes = skb->len;
stats->clean++;
- napi_consume_skb(skb, 1);
+ napi_consume_skb(skb, likely(in_napi) ? 1 : 0);
}
static bool ionic_tx_service(struct ionic_cq *cq,
- unsigned int *total_pkts, unsigned int *total_bytes)
+ unsigned int *total_pkts,
+ unsigned int *total_bytes,
+ bool in_napi)
{
struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -1229,7 +1257,7 @@ static bool ionic_tx_service(struct ionic_cq *cq,
desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, comp);
+ ionic_tx_clean(q, desc_info, comp, in_napi);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
@@ -1243,7 +1271,9 @@ static bool ionic_tx_service(struct ionic_cq *cq,
return true;
}
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi)
{
unsigned int work_done = 0;
unsigned int bytes = 0;
@@ -1252,7 +1282,7 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
if (work_to_do == 0)
return 0;
- while (ionic_tx_service(cq, &pkts, &bytes)) {
+ while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
@@ -1278,7 +1308,7 @@ void ionic_tx_flush(struct ionic_cq *cq)
{
u32 work_done;
- work_done = ionic_tx_cq_service(cq, cq->num_descs);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs, false);
if (work_done)
ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
@@ -1295,7 +1325,7 @@ void ionic_tx_empty(struct ionic_queue *q)
desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, NULL);
+ ionic_tx_clean(q, desc_info, NULL, false);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
@@ -1316,7 +1346,7 @@ static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
- if (err)
+ if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@@ -1340,7 +1370,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
- if (err)
+ if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@@ -1357,7 +1387,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
}
static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
- struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_desc *desc,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
@@ -1365,7 +1395,6 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
- struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 flags = 0;
u64 cmd;
@@ -1419,19 +1448,6 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
bool encap;
int err;
- desc_info = &q->tx_info[q->head_idx];
-
- if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
- return -EIO;
-
- len = skb->len;
- mss = skb_shinfo(skb)->gso_size;
- outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
- SKB_GSO_GRE_CSUM |
- SKB_GSO_IPXIP4 |
- SKB_GSO_IPXIP6 |
- SKB_GSO_UDP_TUNNEL |
- SKB_GSO_UDP_TUNNEL_CSUM));
has_vlan = !!skb_vlan_tag_present(skb);
vlan_tci = skb_vlan_tag_get(skb);
encap = skb->encapsulation;
@@ -1445,12 +1461,21 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (err) {
- /* clean up mapping from ionic_tx_map_skb */
- ionic_tx_desc_unmap_bufs(q, desc_info);
+ if (unlikely(err))
return err;
- }
+ desc_info = &q->tx_info[q->head_idx];
+ if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
+ return -EIO;
+
+ len = skb->len;
+ mss = skb_shinfo(skb)->gso_size;
+ outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM));
if (encap)
hdrlen = skb_inner_tcp_all_headers(skb);
else
@@ -1503,10 +1528,9 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(netdev, q, desc_info, skb,
- desc_addr, desc_nsge, desc_len,
- hdrlen, mss, outer_csum, vlan_tci, has_vlan,
- start, done);
+ ionic_tx_tso_post(netdev, q, desc, skb, desc_addr, desc_nsge,
+ desc_len, hdrlen, mss, outer_csum, vlan_tci,
+ has_vlan, start, done);
start = false;
/* Buffer information is stored with the first tso descriptor */
desc_info = &q->tx_info[q->head_idx];
@@ -1731,7 +1755,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
linearize:
if (too_many_frags) {
err = skb_linearize(skb);
- if (err)
+ if (unlikely(err))
return err;
q_to_tx_stats(q)->linearize++;
}
@@ -1765,7 +1789,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
else
err = ionic_tx(netdev, q, skb);
- if (err)
+ if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;
@@ -1811,7 +1835,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
else
err = ionic_tx(netdev, q, skb);
- if (err)
+ if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 9e73e324e7a1..b2b9a2dc9eb8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -4,9 +4,11 @@
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
+struct bpf_prog;
+
void ionic_tx_flush(struct ionic_cq *cq);
-void ionic_rx_fill(struct ionic_queue *q);
+void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog);
void ionic_rx_empty(struct ionic_queue *q);
void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 2fcbcecb41d1..fef4b2b0b1f2 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -571,9 +571,6 @@ static u64 ctx_addr_sig_regs[][3] = {
#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
-#define lower32(x) ((u32)((x) & 0xffffffff))
-#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
-
static struct netxen_recv_crb recv_crb_registers[] = {
/* Instance 0 */
{
@@ -723,9 +720,9 @@ netxen_init_old_ctx(struct netxen_adapter *adapter)
NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
- lower32(recv_ctx->phys_addr));
+ lower_32_bits(recv_ctx->phys_addr));
NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
- upper32(recv_ctx->phys_addr));
+ upper_32_bits(recv_ctx->phys_addr));
NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
signature | port);
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 8c4cb910e09b..e7d8999049e1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -648,18 +648,18 @@ netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
static void
netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- int index;
+ const char *str;
+ int i;
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *netxen_nic_gstrings_test,
- NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < NETXEN_NIC_TEST_LEN; i++)
+ ethtool_puts(&data, netxen_nic_gstrings_test[i]);
break;
case ETH_SS_STATS:
- for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- netxen_nic_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
+ for (i = 0; i < NETXEN_NIC_STATS_LEN; i++) {
+ str = netxen_nic_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 6e12cd21ac90..89c8b2349694 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -960,7 +960,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
rc = adapter->set_mtu(adapter, mtu);
if (!rc)
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index ed24d6af7487..e8ff661fa4a5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2832,7 +2832,7 @@ netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2860,7 +2860,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
static ssize_t
netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2901,7 +2901,7 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2922,7 +2922,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2959,7 +2959,7 @@ static const struct bin_attribute bin_attr_mem = {
static ssize_t
netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3185,8 +3185,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter,
struct list_head *head;
bool ret = false;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
-
+ dev = ifa->ifa_dev->dev;
if (dev == NULL)
goto out;
@@ -3379,7 +3378,7 @@ netxen_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
unsigned long ip_event;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ dev = ifa->ifa_dev->dev;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1d719726f72b..016b575861b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -662,8 +662,6 @@ struct qed_hwfn {
};
struct pci_params {
- int pm_cap;
-
unsigned long mem_start;
unsigned long mem_end;
unsigned int irq;
@@ -941,7 +939,6 @@ u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
index f6cd1b3efdfd..27e91d0d39f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -1305,37 +1305,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
char *results_buf);
/**
- * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MVP trace dump buffer, starting from the header.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MCP trace dump buffer, starting from the header.
- * @num_dumped_bytes: Number of bytes that were dumped.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
* qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
* Should be called after continuous MCP Trace parsing.
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index f67be4b8ad43..1f0cea3cae92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -2873,6 +2873,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
false,
SPLIT_TYPE_NONE, 0);
}
+ cond_resched();
}
return offset;
@@ -4461,10 +4462,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Add override window info to buffer */
+ /* Add override window info to buffer, preventing buffer overflow */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
+ PROTECTION_OVERRIDE_DEPTH_DWORDS);
if (override_window_dwords) {
addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
offset += qed_grc_dump_addr_range(p_hwfn,
@@ -7613,31 +7615,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
results_buf, &parsed_buf_size, true);
}
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
- &parsed_buf_size, false);
-}
-
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf)
-{
- u32 parsed_results_bytes;
-
- return qed_parse_mcp_trace_buf(p_hwfn,
- dump_buf,
- num_dumped_bytes,
- 0,
- num_dumped_bytes,
- results_buf, &parsed_results_bytes);
-}
-
/* Frees the specified MCP Trace meta data */
void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 86a93cac2647..f3d2b2b3bad5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -255,25 +255,6 @@ static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
p_hwfn->db_recovery_info.db_recovery_counter = 0;
}
-/* Print the content of the doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
-{
- struct qed_db_recovery_entry *db_entry = NULL;
-
- DP_NOTICE(p_hwfn,
- "Displaying doorbell recovery database. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
-
- /* Protect the list */
- spin_lock_bh(&p_hwfn->db_recovery_info.lock);
- list_for_each_entry(db_entry,
- &p_hwfn->db_recovery_info.list, list_entry) {
- qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
- }
-
- spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
-}
-
/* Ring the doorbell of a single doorbell recovery entry */
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
struct qed_db_recovery_entry *db_entry)
@@ -2235,7 +2216,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
}
/* CID map / ILT shadow table / T2
- * The talbes sizes are determined by the computations above
+ * The table sizes are determined by the computations above
*/
rc = qed_cxt_tables_alloc(p_hwfn);
if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index dad8e617c393..0c5278c0598c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -87,20 +87,21 @@ qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
return 0;
}
+#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
+
static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = qed_fw_fatal_reporter_recover,
.dump = qed_fw_fatal_reporter_dump,
+ .default_graceful_period = QED_REPORTER_FW_GRACEFUL_PERIOD,
};
-#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
-
void qed_fw_reporters_create(struct devlink *devlink)
{
struct qed_devlink *dl = devlink_priv(devlink);
- dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops,
- QED_REPORTER_FW_GRACEFUL_PERIOD, dl);
+ dl->fw_reporter = devlink_health_reporter_create(devlink,
+ &qed_fw_fatal_reporter_ops, dl);
if (IS_ERR(dl->fw_reporter)) {
DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(dl->fw_reporter));
@@ -120,7 +121,8 @@ void qed_fw_reporters_destroy(struct devlink *devlink)
}
static int qed_dl_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct qed_devlink *qed_dl = devlink_priv(dl);
struct qed_dev *cdev;
@@ -132,7 +134,8 @@ static int qed_dl_param_get(struct devlink *dl, u32 id,
}
static int qed_dl_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct qed_devlink *qed_dl = devlink_priv(dl);
struct qed_dev *cdev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index ed1a84542ad2..10e355397cee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -2666,58 +2666,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
-/**
- * qed_calc_session_ctx_validation(): Calcualte validation byte for
- * session context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @cid: Context cid.
- *
- * Return: Void.
- */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid);
-
-/**
- * qed_calc_task_ctx_validation(): Calcualte validation byte for task
- * context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @tid: Context tid.
- *
- * Return: Void.
- */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid);
-
-/**
- * qed_memset_session_ctx(): Memset session context to 0 while
- * preserving validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Size to initialzie.
- * @ctx_type: Context type.
- *
- * Return: Void.
- */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
-/**
- * qed_memset_task_ctx(): Memset task context to 0 while preserving
- * validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: size to initialzie.
- * @ctx_type: context type.
- *
- * Return: Void.
- */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
#define NUM_STORMS 6
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 6263f847b6b9..9907973399dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -69,17 +69,6 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
return 0;
}
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
-{
- struct qed_ptt *p_ptt;
- int i;
-
- for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
- p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
- p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
- }
-}
-
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_ptt_pool);
@@ -596,6 +585,7 @@ static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
barrier();
while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
udelay(DMAE_MIN_WAIT_TIME);
+ cond_resched();
if (++wait_cnt > wait_cnt_limit) {
DP_NOTICE(p_hwfn->cdev,
"Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e535983ce21b..3c98f58a184f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -62,15 +62,6 @@ enum _dmae_cmd_crc_mask {
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
- *
- * @p_hwfn: HW device data.
- *
- * Return: Void.
- */
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
-
-/**
* qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
* @p_hwfn: HW device data.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 407029a36fa1..aa20bb8caa9a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -18,16 +18,6 @@
#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
- {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
-};
-
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
- {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
-};
-
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
@@ -1576,134 +1566,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
-DECLARE_CRC8_TABLE(cdu_crc8_table);
-
-/* Calculate and return CDU validation byte per connection type/region/cid */
-static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
-{
- const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
- u8 crc, validation_byte = 0;
- static u8 crc8_table_valid; /* automatically initialized to 0 */
- u32 validation_string = 0;
- __be32 data_to_crc;
-
- if (!crc8_table_valid) {
- crc8_populate_msb(cdu_crc8_table, 0x07);
- crc8_table_valid = 1;
- }
-
- /* The CRC is calculated on the String-to-compress:
- * [31:8] = {CID[31:20],CID[11:0]}
- * [7:4] = Region
- * [3:0] = Type
- */
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
- validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
- validation_string |= ((region & 0xF) << 4);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
- validation_string |= (conn_type & 0xF);
-
- /* Convert to big-endian and calculate CRC8 */
- data_to_crc = cpu_to_be32(validation_string);
- crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
- CRC8_INIT_VALUE);
-
- /* The validation byte [7:0] is composed:
- * for type A validation
- * [7] = active configuration bit
- * [6:0] = crc[6:0]
- *
- * for type B validation
- * [7] = active configuration bit
- * [6:3] = connection_type[3:0]
- * [2:0] = crc[2:0]
- */
- validation_byte |=
- ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
-
- if ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
- validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
- else
- validation_byte |= crc & 0x7F;
-
- return validation_byte;
-}
-
-/* Calcualte and set validation bytes for session context */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
-}
-
-/* Calcualte and set validation bytes for task context */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
-{
- u8 *p_ctx, *region1_val_ptr;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
-}
-
-/* Memset session context to 0 while preserving validation bytes */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
- u8 x_val, t_val, u_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- x_val = *x_val_ptr;
- t_val = *t_val_ptr;
- u_val = *u_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = x_val;
- *t_val_ptr = t_val;
- *u_val_ptr = u_val;
-}
-
-/* Memset task context to 0 while preserving validation bytes */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *p_ctx, *region1_val_ptr;
- u8 region1_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- region1_val = *region1_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = region1_val;
-}
-
/* Enable and configure context validation */
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c278f8893042..d4685ad4b169 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -323,8 +323,7 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
goto err2;
}
- cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
+ if (IS_PF(cdev) && !pdev->pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64));
@@ -455,7 +454,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
static void qed_free_cdev(struct qed_dev *cdev)
{
- kfree((void *)cdev);
+ kfree(cdev);
}
static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
@@ -1206,7 +1205,6 @@ out:
static int qed_slowpath_wq_start(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
- char name[NAME_SIZE];
int i;
if (IS_VF(cdev))
@@ -1215,11 +1213,12 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
- snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
- cdev->pdev->bus->number,
- PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
+ WQ_PERCPU, 0,
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn),
+ hwfn->abs_pf_id);
- hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
if (!hwfn->slowpath_wq) {
DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
return -ENOMEM;
@@ -1351,7 +1350,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_rev << 8) |
(params->drv_eng);
strscpy(drv_version.name, params->name,
- MCP_DRV_VER_STR_SIZE - 4);
+ sizeof(drv_version.name));
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 16e6bd466143..c7f497c36f66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -459,12 +459,11 @@ static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_mcp_mb_params *p_mb_params,
- u32 max_retries, u32 usecs)
+ struct qed_mcp_mb_params *p_mb_params)
{
- u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem;
u16 seq_num;
+ u32 cnt = 0;
int rc = 0;
/* Wait until the mailbox is non-occupied */
@@ -488,12 +487,13 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
- msleep(msecs);
+ usleep_range(QED_MCP_RESP_ITER_US,
+ QED_MCP_RESP_ITER_US * 2);
else
- udelay(usecs);
- } while (++cnt < max_retries);
+ udelay(QED_MCP_RESP_ITER_US);
+ } while (++cnt < QED_DRV_MB_MAX_RETRIES);
- if (cnt >= max_retries) {
+ if (cnt >= QED_DRV_MB_MAX_RETRIES) {
DP_NOTICE(p_hwfn,
"The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
@@ -520,9 +520,10 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
*/
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
- msleep(msecs);
+ usleep_range(QED_MCP_RESP_ITER_US,
+ QED_MCP_RESP_ITER_US * 2);
else
- udelay(usecs);
+ udelay(QED_MCP_RESP_ITER_US);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
@@ -536,9 +537,9 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
- } while (++cnt < max_retries);
+ } while (++cnt < QED_DRV_MB_MAX_RETRIES);
- if (cnt >= max_retries) {
+ if (cnt >= QED_DRV_MB_MAX_RETRIES) {
DP_NOTICE(p_hwfn,
"The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
@@ -564,7 +565,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
p_mb_params->mcp_resp,
p_mb_params->mcp_param,
- (cnt * usecs) / 1000, (cnt * usecs) % 1000);
+ (cnt * QED_MCP_RESP_ITER_US) / 1000,
+ (cnt * QED_MCP_RESP_ITER_US) % 1000);
/* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -581,8 +583,6 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_mcp_mb_params *p_mb_params)
{
size_t union_data_size = sizeof(union drv_union_data);
- u32 max_retries = QED_DRV_MB_MAX_RETRIES;
- u32 usecs = QED_MCP_RESP_ITER_US;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -606,13 +606,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
- max_retries = DIV_ROUND_UP(max_retries, 1000);
- usecs *= 1000;
- }
-
- return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
- usecs);
+ return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params);
}
static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -3085,20 +3079,13 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
- (u32 *)(p_buf + offset), false);
+ (u32 *)(p_buf + offset), true);
if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
break;
}
- /* This can be a lengthy process, and it's possible scheduler
- * isn't preemptible. Sleep a bit to prevent CPU hogging.
- */
- if (bytes_left % 0x1000 <
- (bytes_left - read_len) % 0x1000)
- usleep_range(1000, 2000);
-
offset += read_len;
bytes_left -= read_len;
}
@@ -3314,7 +3301,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED))
+ rc = -EOPNOTSUPP;
+ else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
rc = -EINVAL;
return rc;
@@ -3369,6 +3358,7 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
p_ptt, &nvm_info.num_images);
if (rc == -EOPNOTSUPP) {
DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
+ nvm_info.num_images = 0;
goto out;
} else if (rc || !nvm_info.num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index f55eed092f25..7d78f072b0a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -242,7 +242,7 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
}
/* Returns size of the data buffer or, -1 in case TLV data is not available. */
-static int
+static noinline_for_stack int
qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_generic *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -304,7 +304,7 @@ qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_eth *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -438,7 +438,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
return QED_MFW_TLV_TIME_SIZE;
}
-static int
+static noinline_for_stack int
qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_fcoe *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -1073,7 +1073,7 @@ qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_iscsi *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 5d725f59db24..8be567a6ad44 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -183,9 +183,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer,
list_entry);
- if (!p_buffer)
- break;
-
list_move_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
@@ -218,9 +215,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer,
list_entry);
- if (!p_buffer)
- break;
-
list_move_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
@@ -255,9 +249,6 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn)
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
struct qed_ooo_buffer, list_entry);
- if (!p_buffer)
- break;
-
list_del(&p_buffer->list_entry);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 295ce435a1a4..4df8a97b717e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -307,7 +307,7 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
} else if (ppb == 1) {
/* This is a special case as its the only value which wouldn't
* fit in a s64 variable. In order to prevent castings simple
- * handle it seperately.
+ * handle it separately.
*/
best_val = 4;
best_period = 0xee6b27f;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index fa167b1aa019..5222a035fd19 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3033,7 +3033,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
u16 length;
int rc;
- /* Valiate PF can send such a request */
+ /* Validate PF can send such a request */
if (!vf->vport_instance) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
@@ -3312,7 +3312,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Determine if the unicast filtering is acceptible by PF */
+ /* Determine if the unicast filtering is acceptable by PF */
if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
@@ -3729,7 +3729,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
if (rc) {
- DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n",
vfid);
return rc;
}
@@ -4480,7 +4480,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
/* Failure to acquire the ptt in 100g creates an odd error
- * where the first engine has already relased IOV.
+ * where the first engine has already released IOV.
*/
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index ae3ebf0cf999..23982704273c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -272,16 +272,14 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev,
{
int i;
- for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+ for (i = 0; i < QEDE_NUM_TQSTATS; i++)
if (txq->is_xdp)
- sprintf(*buf, "%d [XDP]: %s",
- QEDE_TXQ_XDP_TO_IDX(edev, txq),
- qede_tqstats_arr[i].string);
+ ethtool_sprintf(buf, "%d [XDP]: %s",
+ QEDE_TXQ_XDP_TO_IDX(edev, txq),
+ qede_tqstats_arr[i].string);
else
- sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
- qede_tqstats_arr[i].string);
- *buf += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(buf, "%d_%d: %s", txq->index, txq->cos,
+ qede_tqstats_arr[i].string);
}
static void qede_get_strings_stats_rxq(struct qede_dev *edev,
@@ -289,11 +287,9 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
{
int i;
- for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
- sprintf(*buf, "%d: %s", rxq->rxq_id,
- qede_rqstats_arr[i].string);
- *buf += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < QEDE_NUM_RQSTATS; i++)
+ ethtool_sprintf(buf, "%d: %s", rxq->rxq_id,
+ qede_rqstats_arr[i].string);
}
static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
@@ -331,26 +327,26 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
for (i = 0; i < QEDE_NUM_STATS; i++) {
if (qede_is_irrelevant_stat(edev, i))
continue;
- strcpy(buf, qede_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ ethtool_puts(&buf, qede_stats_arr[i].string);
}
}
static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct qede_dev *edev = netdev_priv(dev);
+ int i;
switch (stringset) {
case ETH_SS_STATS:
qede_get_strings_stats(edev, buf);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(buf, qede_private_arr,
- ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+ for (i = 0; i < QEDE_PRI_FLAG_LEN; i++)
+ ethtool_puts(&buf, qede_private_arr[i]);
break;
case ETH_SS_TEST:
- memcpy(buf, qede_tests_str_arr,
- ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+ for (i = 0; i < QEDE_ETHTOOL_TEST_MAX; i++)
+ ethtool_puts(&buf, qede_tests_str_arr[i]);
break;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
@@ -1026,7 +1022,7 @@ static int qede_get_regs_len(struct net_device *ndev)
static void qede_update_mtu(struct qede_dev *edev,
struct qede_reload_args *args)
{
- edev->ndev->mtu = args->u.mtu;
+ WRITE_ONCE(edev->ndev->mtu, args->u.mtu);
}
/* Netdevice NDOs */
@@ -1137,7 +1133,7 @@ static int qede_set_channels(struct net_device *dev,
}
static int qede_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -1172,8 +1168,11 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
-static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+static int qede_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct qede_dev *edev = netdev_priv(dev);
+
info->data = RXH_IP_SRC | RXH_IP_DST;
switch (info->flow_type) {
@@ -1210,9 +1209,6 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = QEDE_RSS_COUNT(edev);
break;
- case ETHTOOL_GRXFH:
- rc = qede_get_rss_flags(edev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = qede_get_arfs_filter_count(edev);
info->data = QEDE_RFS_MAX_FLTR;
@@ -1231,14 +1227,17 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return rc;
}
-static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+static int qede_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
struct qed_update_vport_params *vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
u8 set_caps = 0, clr_caps = 0;
int rc = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -1341,9 +1340,6 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
int rc;
switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = qede_set_rss_flags(edev, info);
- break;
case ETHTOOL_SRXCLSRLINS:
rc = qede_add_cls_rule(edev, info);
break;
@@ -2297,6 +2293,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_rxfh_fields = qede_get_rxfh_fields,
+ .set_rxfh_fields = qede_set_rxfh_fields,
.get_ts_info = qede_get_ts_info,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
@@ -2339,6 +2337,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_rxfh_fields = qede_get_rxfh_fields,
+ .set_rxfh_fields = qede_set_rxfh_fields,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.get_per_queue_coalesce = qede_get_per_coalesce,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a5ac21a0ee33..7e341e026489 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -987,20 +987,17 @@ static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
}, qede_udp_tunnels_vxlan = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
}, qede_udp_tunnels_geneve = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
@@ -1520,8 +1517,8 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev,
return 0;
}
-static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
- struct qede_arfs_tuple *t)
+static int qede_set_v4_tuple_to_profile(struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
/* We must have Only 4-tuples/l4 port/src ip/dst ip
* as an input.
@@ -1538,7 +1535,7 @@ static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
t->dst_ipv4 && !t->src_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
} else {
- DP_INFO(edev, "Invalid N-tuple\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple");
return -EOPNOTSUPP;
}
@@ -1549,9 +1546,9 @@ static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
return 0;
}
-static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct in6_addr *zaddr)
+static int qede_set_v6_tuple_to_profile(struct qede_arfs_tuple *t,
+ struct in6_addr *zaddr,
+ struct netlink_ext_ack *extack)
{
/* We must have Only 4-tuples/l4 port/src ip/dst ip
* as an input.
@@ -1573,7 +1570,7 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
!memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
} else {
- DP_INFO(edev, "Invalid N-tuple\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple");
return -EOPNOTSUPP;
}
@@ -1671,7 +1668,7 @@ static int qede_parse_actions(struct qede_dev *edev,
int i;
if (!flow_action_has_entries(flow_action)) {
- DP_NOTICE(edev, "No actions received\n");
+ NL_SET_ERR_MSG_MOD(extack, "No actions received");
return -EINVAL;
}
@@ -1687,7 +1684,8 @@ static int qede_parse_actions(struct qede_dev *edev,
break;
if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
- DP_INFO(edev, "Queue out-of-bounds\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Queue out-of-bounds");
return -EINVAL;
}
break;
@@ -1700,8 +1698,8 @@ static int qede_parse_actions(struct qede_dev *edev,
}
static int
-qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_ports(struct flow_rule *rule, struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
@@ -1709,7 +1707,8 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
flow_rule_match_ports(rule, &match);
if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
(match.key->dst && match.mask->dst != htons(U16_MAX))) {
- DP_NOTICE(edev, "Do not support ports masks\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support ports masks");
return -EINVAL;
}
@@ -1721,10 +1720,12 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
}
static int
-qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_v6_common(struct flow_rule *rule,
+ struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
struct in6_addr zero_addr, addr;
+ int err;
memset(&zero_addr, 0, sizeof(addr));
memset(&addr, 0xff, sizeof(addr));
@@ -1737,8 +1738,8 @@ qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
memcmp(&match.mask->src, &addr, sizeof(addr))) ||
(memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
- DP_NOTICE(edev,
- "Do not support IPv6 address prefix/mask\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support IPv6 address prefix/mask");
return -EINVAL;
}
@@ -1746,23 +1747,28 @@ qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
}
- if (qede_flow_parse_ports(edev, rule, t))
- return -EINVAL;
+ err = qede_flow_parse_ports(rule, t, extack);
+ if (err)
+ return err;
- return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
+ return qede_set_v6_tuple_to_profile(t, &zero_addr, extack);
}
static int
-qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_v4_common(struct flow_rule *rule,
+ struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
+ int err;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
(match.key->dst && match.mask->dst != htonl(U32_MAX))) {
- DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support ipv4 prefix/masks");
return -EINVAL;
}
@@ -1770,55 +1776,57 @@ qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
t->dst_ipv4 = match.key->dst;
}
- if (qede_flow_parse_ports(edev, rule, t))
- return -EINVAL;
+ err = qede_flow_parse_ports(rule, t, extack);
+ if (err)
+ return err;
- return qede_set_v4_tuple_to_profile(edev, t);
+ return qede_set_v4_tuple_to_profile(t, extack);
}
static int
-qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_tcp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_flow_parse_v6_common(edev, rule, tuple);
+ return qede_flow_parse_v6_common(rule, tuple, extack);
}
static int
-qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_tcp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_flow_parse_v4_common(edev, rule, tuple);
+ return qede_flow_parse_v4_common(rule, tuple, extack);
}
static int
-qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_udp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_flow_parse_v6_common(edev, rule, tuple);
+ return qede_flow_parse_v6_common(rule, tuple, extack);
}
static int
-qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_udp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_flow_parse_v4_common(edev, rule, tuple);
+ return qede_flow_parse_v4_common(rule, tuple, extack);
}
static int
-qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
- struct flow_rule *rule, struct qede_arfs_tuple *tuple)
+qede_parse_flow_attr(__be16 proto, struct flow_rule *rule,
+ struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
struct flow_dissector *dissector = rule->match.dissector;
int rc = -EINVAL;
@@ -1832,14 +1840,18 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
- DP_NOTICE(edev, "Unsupported key set:0x%llx\n",
- dissector->used_keys);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported key used: 0x%llx",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (proto != htons(ETH_P_IP) &&
proto != htons(ETH_P_IPV6)) {
- DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported proto=0x%x",
+ proto);
return -EPROTONOSUPPORT;
}
@@ -1851,15 +1863,15 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
}
if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
- rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
+ rc = qede_flow_parse_tcp_v4(rule, tuple, extack);
else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
- rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
+ rc = qede_flow_parse_tcp_v6(rule, tuple, extack);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
- rc = qede_flow_parse_udp_v4(edev, rule, tuple);
+ rc = qede_flow_parse_udp_v4(rule, tuple, extack);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
- rc = qede_flow_parse_udp_v6(edev, rule, tuple);
+ rc = qede_flow_parse_udp_v6(rule, tuple, extack);
else
- DP_NOTICE(edev, "Invalid protocol request\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid protocol request");
return rc;
}
@@ -1867,9 +1879,10 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct qede_arfs_fltr_node *n;
- int min_hlen, rc = -EINVAL;
struct qede_arfs_tuple t;
+ int min_hlen, rc;
__qede_lock(edev);
@@ -1879,7 +1892,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- if (qede_parse_flow_attr(edev, proto, f->rule, &t))
+ rc = qede_parse_flow_attr(proto, f->rule, &t, extack);
+ if (rc)
goto unlock;
/* Validate profile mode and number of filters */
@@ -1888,11 +1902,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
DP_NOTICE(edev,
"Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ rc = -EINVAL;
goto unlock;
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
+ rc = qede_parse_actions(edev, &f->rule->action, extack);
+ if (rc)
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -1938,8 +1954,11 @@ unlock:
static int qede_flow_spec_validate(struct qede_dev *edev,
struct flow_action *flow_action,
struct qede_arfs_tuple *t,
- __u32 location)
+ __u32 location,
+ struct netlink_ext_ack *extack)
{
+ int err;
+
if (location >= QEDE_RFS_MAX_FLTR) {
DP_INFO(edev, "Location out-of-bounds\n");
return -EINVAL;
@@ -1960,8 +1979,9 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
return -EINVAL;
}
- if (qede_parse_actions(edev, flow_action, NULL))
- return -EINVAL;
+ err = qede_parse_actions(edev, flow_action, extack);
+ if (err)
+ return err;
return 0;
}
@@ -1972,11 +1992,13 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
{
struct ethtool_rx_flow_spec_input input = {};
struct ethtool_rx_flow_rule *flow;
+ struct netlink_ext_ack extack;
__be16 proto;
- int err = 0;
+ int err;
- if (qede_flow_spec_validate_unused(edev, fs))
- return -EOPNOTSUPP;
+ err = qede_flow_spec_validate_unused(edev, fs);
+ if (err)
+ return err;
switch ((fs->flow_type & ~FLOW_EXT)) {
case TCP_V4_FLOW:
@@ -1998,15 +2020,16 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
- err = -EINVAL;
+ err = qede_parse_flow_attr(proto, flow->rule, t, &extack);
+ if (err)
goto err_out;
- }
/* Make sure location is valid and filter isn't already set */
err = qede_flow_spec_validate(edev, &flow->rule->action, t,
- fs->location);
+ fs->location, &extack);
err_out:
+ if (extack._msg)
+ DP_NOTICE(edev, "%s\n", extack._msg);
ethtool_rx_flow_rule_destroy(flow);
return err;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 847fa62c80df..e338bfc8b7b2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -4,6 +4,7 @@
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
+#include <linux/array_size.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -960,7 +961,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
{
int i;
- for (i = 0; cqe->len_list[i]; i++)
+ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
@@ -985,7 +986,7 @@ static int qede_tpa_end(struct qede_dev *edev,
dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
PAGE_SIZE, rxq->data_direction);
- for (i = 0; cqe->len_list[i]; i++)
+ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
if (unlikely(i > 1))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 99df00c30b8c..66ab1b9d65a1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
};
static struct qed_eth_cb_ops qede_ll_ops = {
- {
+ .common = {
#ifdef CONFIG_RFS_ACCEL
.arfs_filter_op = qede_arfs_filter_op,
#endif
@@ -506,25 +506,6 @@ static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
}
#endif
-static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct qede_dev *edev = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EAGAIN;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return qede_ptp_hw_ts(edev, ifr);
- default:
- DP_VERBOSE(edev, QED_MSG_DEBUG,
- "default IOCTL cmd 0x%x\n", cmd);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
{
char *p_sb = (char *)fp->sb_info->sb_virt;
@@ -717,7 +698,6 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
- .ndo_eth_ioctl = qede_ioctl,
.ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
@@ -742,6 +722,8 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_xdp_xmit = qede_xdp_transmit,
.ndo_setup_tc = qede_setup_tc_offload,
+ .ndo_hwtstamp_get = qede_hwtstamp_get,
+ .ndo_hwtstamp_set = qede_hwtstamp_set,
};
static const struct net_device_ops qede_netdev_vf_ops = {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 747cc5e2bb78..d351be5fbda1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -181,7 +181,7 @@ static void qede_ptp_task(struct work_struct *work)
}
/* Read the PHC. This API is invoked with ptp_lock held. */
-static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
+static u64 qede_ptp_read_cc(struct cyclecounter *cc)
{
struct qede_dev *edev;
struct qede_ptp *ptp;
@@ -199,18 +199,15 @@ static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
return phc_cycles;
}
-static int qede_ptp_cfg_filters(struct qede_dev *edev)
+static void qede_ptp_cfg_filters(struct qede_dev *edev)
{
enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
struct qede_ptp *ptp = edev->ptp;
- if (!ptp)
- return -EIO;
-
if (!ptp->hw_ts_ioctl_called) {
DP_INFO(edev, "TS IOCTL not called\n");
- return 0;
+ return;
}
switch (ptp->tx_type) {
@@ -223,11 +220,6 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
-
- case HWTSTAMP_TX_ONESTEP_SYNC:
- case HWTSTAMP_TX_ONESTEP_P2P:
- DP_ERR(edev, "One-step timestamping is not supported\n");
- return -ERANGE;
}
spin_lock_bh(&ptp->lock);
@@ -286,65 +278,84 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
spin_unlock_bh(&ptp->lock);
-
- return 0;
}
-int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
+int qede_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct qede_dev *edev = netdev_priv(netdev);
struct qede_ptp *ptp;
- int rc;
+
+ if (!netif_running(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
ptp = edev->ptp;
- if (!ptp)
+ if (!ptp) {
+ NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported");
return -EIO;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ }
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ "HWTSTAMP SET: Requested tx_type = %d, requested rx_filters = %d\n",
+ config->tx_type, config->rx_filter);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
+ }
ptp->hw_ts_ioctl_called = 1;
- ptp->tx_type = config.tx_type;
- ptp->rx_filter = config.rx_filter;
+ ptp->tx_type = config->tx_type;
+ ptp->rx_filter = config->rx_filter;
- rc = qede_ptp_cfg_filters(edev);
- if (rc)
- return rc;
+ qede_ptp_cfg_filters(edev);
+
+ config->rx_filter = ptp->rx_filter;
+
+ return 0;
+}
- config.rx_filter = ptp->rx_filter;
+int qede_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+ struct qede_ptp *ptp;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EIO;
+
+ config->tx_type = ptp->tx_type;
+ config->rx_filter = ptp->rx_filter;
+
+ return 0;
}
-int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
if (!ptp) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (ptp->clock)
info->phc_index = ptp_clock_index(ptp->clock);
- else
- info->phc_index = -1;
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index 1db0f021c645..88f168395812 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -14,10 +14,14 @@
void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
-int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+int qede_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int qede_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void qede_ptp_disable(struct qede_dev *edev);
int qede_ptp_enable(struct qede_dev *edev);
-int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
union eth_rx_cqe *cqe,
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index fc78bc959ded..fca94a69c777 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1501,7 +1501,7 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
"Remote error detected. Calling ql_port_start()\n");
/*
* ql_port_start() is shared code and needs
- * to lock the PHY on it's own.
+ * to lock the PHY on its own.
*/
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
if (ql_port_start(qdev)) /* Restart port */
@@ -3420,7 +3420,7 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
pci_disable_msi(qdev->pdev);
}
- del_timer_sync(&qdev->adapter_timer);
+ timer_delete_sync(&qdev->adapter_timer);
napi_disable(&qdev->napi);
@@ -3735,7 +3735,7 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
static void ql3xxx_timer(struct timer_list *t)
{
- struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
+ struct ql3_adapter *qdev = timer_container_of(qdev, t, adapter_timer);
queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b25102fded7b..3d0b5cd978cb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1608,7 +1608,6 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
-void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
void qlcnic_set_multi(struct net_device *netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index bcef8ab715bf..91e7b38143ea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2042,12 +2042,14 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
- int err;
- u32 word;
struct qlcnic_cmd_args cmd;
- const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+ u32 word;
+ int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
if (err)
@@ -4213,7 +4215,6 @@ static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
int err = 0;
- pdev->error_state = pci_channel_io_normal;
err = pci_enable_device(pdev);
if (err)
goto disconnect;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 23cd47d588e5..a55fe6ac06c7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -539,7 +539,6 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
-int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
@@ -577,8 +576,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
-void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
- struct qlcnic_info *);
int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
@@ -590,7 +587,6 @@ irqreturn_t qlcnic_83xx_intr(int, void *);
irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
const struct pci_device_id *);
-int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
@@ -602,8 +598,6 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
-int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
-int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
@@ -616,13 +610,9 @@ void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
-int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
-int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
- struct qlcnic_info *, u8);
-int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b733374b4dc5..6145252d8ff8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2051,7 +2051,7 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
}
-/* POST FW related definations*/
+/* POST FW related definitions*/
#define QLC_83XX_POST_SIGNATURE_REG 0x41602014
#define QLC_83XX_POST_MODE_REG 0x41602018
#define QLC_83XX_POST_FAST_MODE 0
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index c1436e1554de..17450e05c437 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1196,60 +1196,56 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int index, i, num_stats;
+ const char *str;
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *qlcnic_gstrings_test,
- QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < QLCNIC_TEST_LEN; i++)
+ ethtool_puts(&data, qlcnic_gstrings_test[i]);
break;
case ETH_SS_STATS:
num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
- for (i = 0; i < adapter->drv_tx_rings; i++) {
+ for (i = 0; i < adapter->drv_tx_rings; i++)
for (index = 0; index < num_stats; index++) {
- sprintf(data, "tx_queue_%d %s", i,
- qlcnic_tx_queue_stats_strings[index]);
- data += ETH_GSTRING_LEN;
+ str = qlcnic_tx_queue_stats_strings[index];
+ ethtool_sprintf(&data, "tx_queue_%d %s", i,
+ str);
}
- }
- for (index = 0; index < QLCNIC_STATS_LEN; index++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
+ for (i = 0; i < QLCNIC_STATS_LEN; i++) {
+ str = qlcnic_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
if (qlcnic_83xx_check(adapter)) {
num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
- for (i = 0; i < num_stats; i++, index++)
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_83xx_tx_stats_strings[i],
- ETH_GSTRING_LEN);
+ for (i = 0; i < num_stats; i++) {
+ str = qlcnic_83xx_tx_stats_strings[i];
+ ethtool_puts(&data, str);
+ }
num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
- for (i = 0; i < num_stats; i++, index++)
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_83xx_mac_stats_strings[i],
- ETH_GSTRING_LEN);
+ for (i = 0; i < num_stats; i++) {
+ str = qlcnic_83xx_mac_stats_strings[i];
+ ethtool_puts(&data, str);
+ }
num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
- for (i = 0; i < num_stats; i++, index++)
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_83xx_rx_stats_strings[i],
- ETH_GSTRING_LEN);
+ for (i = 0; i < num_stats; i++) {
+ str = qlcnic_83xx_rx_stats_strings[i];
+ ethtool_puts(&data, str);
+ }
return;
} else {
num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
- for (i = 0; i < num_stats; i++, index++)
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_83xx_mac_stats_strings[i],
- ETH_GSTRING_LEN);
+ for (i = 0; i < num_stats; i++) {
+ str = qlcnic_83xx_mac_stats_strings[i];
+ ethtool_puts(&data, str);
+ }
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
- for (i = 0; i < num_stats; index++, i++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_device_gstrings_stats[i],
- ETH_GSTRING_LEN);
- }
+ for (i = 0; i < num_stats; i++)
+ ethtool_puts(&data, qlcnic_device_gstrings_stats[i]);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4b8bc46f55c2..ae4ee0326ee1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1015,7 +1015,7 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
if (!rc)
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 90df4a0909fa..e051d8c7a28d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -367,7 +367,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
- const unsigned char *addr, u16 vid,
+ const unsigned char *addr, u16 vid, bool *notified,
struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -394,7 +394,7 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
const unsigned char *addr, u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
@@ -486,7 +486,6 @@ static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = {
.sync_table = qlcnic_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
@@ -3767,8 +3766,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- pdev->error_state = pci_channel_io_normal;
-
err = pci_enable_device(pdev);
if (err)
return err;
@@ -4146,7 +4143,7 @@ qlcnic_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ dev = ifa->ifa_dev->dev;
recheck:
if (dev == NULL)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f9dd50152b1e..d57b976b9040 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
- if (!sriov->allowed_vlans)
+ if (!sriov->allowed_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
vlans = (u16 *)&cmd->rsp.arg[3];
for (i = 0; i < num_vlans; i++)
@@ -1482,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
}
cmd_op = (cmd.rsp.arg[0] & 0xff);
- if (cmd.rsp.arg[0] >> 25 == 2)
- return 2;
+ if (cmd.rsp.arg[0] >> 25 == 2) {
+ ret = 2;
+ goto out;
+ }
+
if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
set_bit(QLC_BC_VF_STATE, &vf->state);
else
@@ -2167,8 +2172,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
- if (!vf->sriov_vlans)
+ if (!vf->sriov_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 74125188beb8..5296d9a6ee83 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -264,7 +264,7 @@ static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -281,7 +281,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -310,7 +310,7 @@ static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -332,7 +332,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -396,7 +396,7 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -446,7 +446,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -539,7 +539,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -623,7 +623,7 @@ out:
static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -675,7 +675,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -722,7 +722,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -769,7 +769,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -804,7 +804,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -839,7 +839,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -868,7 +868,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -898,7 +898,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -938,7 +938,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1115,7 +1115,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1217,7 +1217,6 @@ static const struct bin_attribute bin_attr_pci_config = {
.attr = { .name = "pci_config", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_read_pci_config,
- .write = NULL,
};
static const struct bin_attribute bin_attr_port_stats = {
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9210ff360fdc..ba7efb108637 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -52,7 +52,6 @@ config QCOM_EMAC
depends on HAS_DMA && HAS_IOMEM
select CRC32
select PHYLIB
- select MDIO_DEVRES
help
This driver supports the Qualcomm Technologies, Inc. Gigabit
Ethernet Media Access Controller (EMAC). The controller
@@ -61,6 +60,21 @@ config QCOM_EMAC
low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
Precision Clock Synchronization Protocol.
+config QCOM_PPE
+ tristate "Qualcomm Technologies, Inc. PPE Ethernet support"
+ depends on COMMON_CLK && HAS_IOMEM && OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This driver supports the Qualcomm Technologies, Inc. packet
+ process engine (PPE) available with IPQ SoC. The PPE includes
+ the Ethernet MACs, Ethernet DMA (EDMA) and switch core that
+ supports L3 flow offload, L2 switch function, RSS and tunnel
+ offload.
+
+ To compile this driver as a module, choose M here. The module
+ will be called qcom-ppe.
+
source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
endif # NET_VENDOR_QUALCOMM
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index 9250976dd884..166a59aea363 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -11,4 +11,5 @@ qcauart-objs := qca_uart.o
obj-y += emac/
+obj-$(CONFIG_QCOM_PPE) += ppe/
obj-$(CONFIG_RMNET) += rmnet/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index e4bc18009d08..28b3a7071e58 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -293,6 +293,11 @@ static struct sgmii_ops qdf2400_ops = {
};
#endif
+struct emac_match_data {
+ struct sgmii_ops **sgmii_ops;
+ struct device *target_device;
+};
+
static int emac_sgmii_acpi_match(struct device *dev, void *data)
{
#ifdef CONFIG_ACPI
@@ -303,7 +308,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
{}
};
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
- struct sgmii_ops **ops = data;
+ struct emac_match_data *match_data = data;
if (id) {
acpi_handle handle = ACPI_HANDLE(dev);
@@ -324,10 +329,12 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
switch (hrv) {
case 1:
- *ops = &qdf2432_ops;
+ *match_data->sgmii_ops = &qdf2432_ops;
+ match_data->target_device = dev;
return 1;
case 2:
- *ops = &qdf2400_ops;
+ *match_data->sgmii_ops = &qdf2400_ops;
+ match_data->target_device = dev;
return 1;
}
}
@@ -356,16 +363,21 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
int ret;
if (has_acpi_companion(&pdev->dev)) {
+ struct emac_match_data match_data = {
+ .sgmii_ops = &phy->sgmii_ops,
+ .target_device = NULL,
+ };
struct device *dev;
- dev = device_find_child(&pdev->dev, &phy->sgmii_ops,
- emac_sgmii_acpi_match);
+ device_for_each_child(&pdev->dev, &match_data, emac_sgmii_acpi_match);
+ dev = match_data.target_device;
if (!dev) {
dev_warn(&pdev->dev, "cannot find internal phy node\n");
return 0;
}
+ get_device(dev);
sgmii_pdev = to_platform_device(dev);
} else {
const struct of_device_id *match;
@@ -407,7 +419,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
goto error_put_device;
}
- /* v2 SGMII has a per-lane digital digital, so parse it if it exists */
+ /* v2 SGMII has a per-lane digital, so parse it if it exists */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1);
if (res) {
phy->digital = ioremap(res->start, resource_size(res));
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 4c06f55878de..699a8afc214a 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -216,7 +216,7 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
netif_dbg(adpt, hw, adpt->netdev,
"changing MTU from %d to %d\n", netdev->mtu,
new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
return emac_reinit_locked(adpt);
@@ -760,7 +760,7 @@ static void emac_shutdown(struct platform_device *pdev)
static struct platform_driver emac_platform_driver = {
.probe = emac_probe,
- .remove_new = emac_remove,
+ .remove = emac_remove,
.driver = {
.name = "qcom-emac",
.of_match_table = emac_dt_match,
diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
new file mode 100644
index 000000000000..9e60b2400c16
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the device driver of PPE (Packet Process Engine) in IPQ SoC
+#
+
+obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
new file mode 100644
index 000000000000..be747510d947
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE platform device probe, DTSI parser and PPE clock initializations. */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_debugfs.h"
+
+#define PPE_PORT_MAX 8
+#define PPE_CLK_RATE 353000000
+
+/* ICC clocks for enabling PPE device. The avg_bw and peak_bw with value 0
+ * will be updated by the clock rate of PPE.
+ */
+static const struct icc_bulk_data ppe_icc_data[] = {
+ {
+ .name = "ppe",
+ .avg_bw = 0,
+ .peak_bw = 0,
+ },
+ {
+ .name = "ppe_cfg",
+ .avg_bw = 0,
+ .peak_bw = 0,
+ },
+ {
+ .name = "qos_gen",
+ .avg_bw = 6000,
+ .peak_bw = 6000,
+ },
+ {
+ .name = "timeout_ref",
+ .avg_bw = 6000,
+ .peak_bw = 6000,
+ },
+ {
+ .name = "nssnoc_memnoc",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+ {
+ .name = "memnoc_nssnoc",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+ {
+ .name = "memnoc_nssnoc_1",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+};
+
+static const struct regmap_range ppe_readable_ranges[] = {
+ regmap_reg_range(0x0, 0x1ff), /* Global */
+ regmap_reg_range(0x400, 0x5ff), /* LPI CSR */
+ regmap_reg_range(0x1000, 0x11ff), /* GMAC0 */
+ regmap_reg_range(0x1200, 0x13ff), /* GMAC1 */
+ regmap_reg_range(0x1400, 0x15ff), /* GMAC2 */
+ regmap_reg_range(0x1600, 0x17ff), /* GMAC3 */
+ regmap_reg_range(0x1800, 0x19ff), /* GMAC4 */
+ regmap_reg_range(0x1a00, 0x1bff), /* GMAC5 */
+ regmap_reg_range(0xb000, 0xefff), /* PRX CSR */
+ regmap_reg_range(0xf000, 0x1efff), /* IPE */
+ regmap_reg_range(0x20000, 0x5ffff), /* PTX CSR */
+ regmap_reg_range(0x60000, 0x9ffff), /* IPE L2 CSR */
+ regmap_reg_range(0xb0000, 0xeffff), /* IPO CSR */
+ regmap_reg_range(0x100000, 0x17ffff), /* IPE PC */
+ regmap_reg_range(0x180000, 0x1bffff), /* PRE IPO CSR */
+ regmap_reg_range(0x1d0000, 0x1dffff), /* Tunnel parser */
+ regmap_reg_range(0x1e0000, 0x1effff), /* Ingress parse */
+ regmap_reg_range(0x200000, 0x2fffff), /* IPE L3 */
+ regmap_reg_range(0x300000, 0x3fffff), /* IPE tunnel */
+ regmap_reg_range(0x400000, 0x4fffff), /* Scheduler */
+ regmap_reg_range(0x500000, 0x503fff), /* XGMAC0 */
+ regmap_reg_range(0x504000, 0x507fff), /* XGMAC1 */
+ regmap_reg_range(0x508000, 0x50bfff), /* XGMAC2 */
+ regmap_reg_range(0x50c000, 0x50ffff), /* XGMAC3 */
+ regmap_reg_range(0x510000, 0x513fff), /* XGMAC4 */
+ regmap_reg_range(0x514000, 0x517fff), /* XGMAC5 */
+ regmap_reg_range(0x600000, 0x6fffff), /* BM */
+ regmap_reg_range(0x800000, 0x9fffff), /* QM */
+ regmap_reg_range(0xb00000, 0xbef800), /* EDMA */
+};
+
+static const struct regmap_access_table ppe_reg_table = {
+ .yes_ranges = ppe_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ppe_readable_ranges),
+};
+
+static const struct regmap_config regmap_config_ipq9574 = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .rd_table = &ppe_reg_table,
+ .wr_table = &ppe_reg_table,
+ .max_register = 0xbef800,
+ .fast_io = true,
+};
+
+static int ppe_clock_init_and_reset(struct ppe_device *ppe_dev)
+{
+ unsigned long ppe_rate = ppe_dev->clk_rate;
+ struct device *dev = ppe_dev->dev;
+ struct reset_control *rstc;
+ struct clk_bulk_data *clks;
+ struct clk *clk;
+ int ret, i;
+
+ for (i = 0; i < ppe_dev->num_icc_paths; i++) {
+ ppe_dev->icc_paths[i].name = ppe_icc_data[i].name;
+ ppe_dev->icc_paths[i].avg_bw = ppe_icc_data[i].avg_bw ? :
+ Bps_to_icc(ppe_rate);
+
+ /* PPE does not have an explicit peak bandwidth requirement,
+ * so set the peak bandwidth to be equal to the average
+ * bandwidth.
+ */
+ ppe_dev->icc_paths[i].peak_bw = ppe_icc_data[i].peak_bw ? :
+ Bps_to_icc(ppe_rate);
+ }
+
+ ret = devm_of_icc_bulk_get(dev, ppe_dev->num_icc_paths,
+ ppe_dev->icc_paths);
+ if (ret)
+ return ret;
+
+ ret = icc_bulk_set_bw(ppe_dev->num_icc_paths, ppe_dev->icc_paths);
+ if (ret)
+ return ret;
+
+ /* The PPE clocks have a common parent clock. Setting the clock
+ * rate of "ppe" ensures the clock rate of all PPE clocks is
+ * configured to the same rate.
+ */
+ clk = devm_clk_get(dev, "ppe");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_set_rate(clk, ppe_rate);
+ if (ret)
+ return ret;
+
+ ret = devm_clk_bulk_get_all_enabled(dev, &clks);
+ if (ret < 0)
+ return ret;
+
+ /* Reset the PPE. */
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rstc))
+ return PTR_ERR(rstc);
+
+ ret = reset_control_assert(rstc);
+ if (ret)
+ return ret;
+
+ /* The delay 10 ms of assert is necessary for resetting PPE. */
+ usleep_range(10000, 11000);
+
+ return reset_control_deassert(rstc);
+}
+
+static int qcom_ppe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ppe_device *ppe_dev;
+ void __iomem *base;
+ int ret, num_icc;
+
+ num_icc = ARRAY_SIZE(ppe_icc_data);
+ ppe_dev = devm_kzalloc(dev, struct_size(ppe_dev, icc_paths, num_icc),
+ GFP_KERNEL);
+ if (!ppe_dev)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "PPE ioremap failed\n");
+
+ ppe_dev->regmap = devm_regmap_init_mmio(dev, base, &regmap_config_ipq9574);
+ if (IS_ERR(ppe_dev->regmap))
+ return dev_err_probe(dev, PTR_ERR(ppe_dev->regmap),
+ "PPE initialize regmap failed\n");
+ ppe_dev->dev = dev;
+ ppe_dev->clk_rate = PPE_CLK_RATE;
+ ppe_dev->num_ports = PPE_PORT_MAX;
+ ppe_dev->num_icc_paths = num_icc;
+
+ ret = ppe_clock_init_and_reset(ppe_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE clock config failed\n");
+
+ ret = ppe_hw_config(ppe_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE HW config failed\n");
+
+ ppe_debugfs_setup(ppe_dev);
+ platform_set_drvdata(pdev, ppe_dev);
+
+ return 0;
+}
+
+static void qcom_ppe_remove(struct platform_device *pdev)
+{
+ struct ppe_device *ppe_dev;
+
+ ppe_dev = platform_get_drvdata(pdev);
+ ppe_debugfs_teardown(ppe_dev);
+}
+
+static const struct of_device_id qcom_ppe_of_match[] = {
+ { .compatible = "qcom,ipq9574-ppe" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ppe_of_match);
+
+static struct platform_driver qcom_ppe_driver = {
+ .driver = {
+ .name = "qcom_ppe",
+ .of_match_table = qcom_ppe_of_match,
+ },
+ .probe = qcom_ppe_probe,
+ .remove = qcom_ppe_remove,
+};
+module_platform_driver(qcom_ppe_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ PPE driver");
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.h b/drivers/net/ethernet/qualcomm/ppe/ppe.h
new file mode 100644
index 000000000000..27458f0bc206
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __PPE_H__
+#define __PPE_H__
+
+#include <linux/compiler.h>
+#include <linux/interconnect.h>
+
+struct device;
+struct regmap;
+struct dentry;
+
+/**
+ * struct ppe_device - PPE device private data.
+ * @dev: PPE device structure.
+ * @regmap: PPE register map.
+ * @clk_rate: PPE clock rate.
+ * @num_ports: Number of PPE ports.
+ * @debugfs_root: Debugfs root entry.
+ * @num_icc_paths: Number of interconnect paths.
+ * @icc_paths: Interconnect path array.
+ *
+ * PPE device is the instance of PPE hardware, which is used to
+ * configure PPE packet process modules such as BM (buffer management),
+ * QM (queue management), and scheduler.
+ */
+struct ppe_device {
+ struct device *dev;
+ struct regmap *regmap;
+ unsigned long clk_rate;
+ unsigned int num_ports;
+ struct dentry *debugfs_root;
+ unsigned int num_icc_paths;
+ struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
+};
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
new file mode 100644
index 000000000000..e9a0e22907a6
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
@@ -0,0 +1,2034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE HW initialization configs such as BM(buffer management),
+ * QM(queue management) and scheduler configs.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_regs.h"
+
+#define PPE_QUEUE_SCH_PRI_NUM 8
+
+/**
+ * struct ppe_bm_port_config - PPE BM port configuration.
+ * @port_id_start: The fist BM port ID to configure.
+ * @port_id_end: The last BM port ID to configure.
+ * @pre_alloc: BM port dedicated buffer number.
+ * @in_fly_buf: Buffer number for receiving the packet after pause frame sent.
+ * @ceil: Ceil to generate the back pressure.
+ * @weight: Weight value.
+ * @resume_offset: Resume offset from the threshold value.
+ * @resume_ceil: Ceil to resume from the back pressure state.
+ * @dynamic: Dynamic threshold used or not.
+ *
+ * The is for configuring the threshold that impacts the port
+ * flow control.
+ */
+struct ppe_bm_port_config {
+ unsigned int port_id_start;
+ unsigned int port_id_end;
+ unsigned int pre_alloc;
+ unsigned int in_fly_buf;
+ unsigned int ceil;
+ unsigned int weight;
+ unsigned int resume_offset;
+ unsigned int resume_ceil;
+ bool dynamic;
+};
+
+/**
+ * struct ppe_qm_queue_config - PPE queue config.
+ * @queue_start: PPE start of queue ID.
+ * @queue_end: PPE end of queue ID.
+ * @prealloc_buf: Queue dedicated buffer number.
+ * @ceil: Ceil to start drop packet from queue.
+ * @weight: Weight value.
+ * @resume_offset: Resume offset from the threshold.
+ * @dynamic: Threshold value is decided dynamically or statically.
+ *
+ * Queue configuration decides the threshold to drop packet from PPE
+ * hardware queue.
+ */
+struct ppe_qm_queue_config {
+ unsigned int queue_start;
+ unsigned int queue_end;
+ unsigned int prealloc_buf;
+ unsigned int ceil;
+ unsigned int weight;
+ unsigned int resume_offset;
+ bool dynamic;
+};
+
+/**
+ * enum ppe_scheduler_direction - PPE scheduler direction for packet.
+ * @PPE_SCH_INGRESS: Scheduler for the packet on ingress,
+ * @PPE_SCH_EGRESS: Scheduler for the packet on egress,
+ */
+enum ppe_scheduler_direction {
+ PPE_SCH_INGRESS = 0,
+ PPE_SCH_EGRESS = 1,
+};
+
+/**
+ * struct ppe_scheduler_bm_config - PPE arbitration for buffer config.
+ * @valid: Arbitration entry valid or not.
+ * @dir: Arbitration entry for egress or ingress.
+ * @port: Port ID to use arbitration entry.
+ * @backup_port_valid: Backup port valid or not.
+ * @backup_port: Backup port ID to use.
+ *
+ * Configure the scheduler settings for accessing and releasing the PPE buffers.
+ */
+struct ppe_scheduler_bm_config {
+ bool valid;
+ enum ppe_scheduler_direction dir;
+ unsigned int port;
+ bool backup_port_valid;
+ unsigned int backup_port;
+};
+
+/**
+ * struct ppe_scheduler_qm_config - PPE arbitration for scheduler config.
+ * @ensch_port_bmp: Port bit map for enqueue scheduler.
+ * @ensch_port: Port ID to enqueue scheduler.
+ * @desch_port: Port ID to dequeue scheduler.
+ * @desch_backup_port_valid: Dequeue for the backup port valid or not.
+ * @desch_backup_port: Backup port ID to dequeue scheduler.
+ *
+ * Configure the scheduler settings for enqueuing and dequeuing packets on
+ * the PPE port.
+ */
+struct ppe_scheduler_qm_config {
+ unsigned int ensch_port_bmp;
+ unsigned int ensch_port;
+ unsigned int desch_port;
+ bool desch_backup_port_valid;
+ unsigned int desch_backup_port;
+};
+
+/**
+ * struct ppe_scheduler_port_config - PPE port scheduler config.
+ * @port: Port ID to be scheduled.
+ * @flow_level: Scheduler flow level or not.
+ * @node_id: Node ID, for level 0, queue ID is used.
+ * @loop_num: Loop number of scheduler config.
+ * @pri_max: Max priority configured.
+ * @flow_id: Strict priority ID.
+ * @drr_node_id: Node ID for scheduler.
+ *
+ * PPE port scheduler configuration which decides the priority in the
+ * packet scheduler for the egress port.
+ */
+struct ppe_scheduler_port_config {
+ unsigned int port;
+ bool flow_level;
+ unsigned int node_id;
+ unsigned int loop_num;
+ unsigned int pri_max;
+ unsigned int flow_id;
+ unsigned int drr_node_id;
+};
+
+/**
+ * struct ppe_port_schedule_resource - PPE port scheduler resource.
+ * @ucastq_start: Unicast queue start ID.
+ * @ucastq_end: Unicast queue end ID.
+ * @mcastq_start: Multicast queue start ID.
+ * @mcastq_end: Multicast queue end ID.
+ * @flow_id_start: Flow start ID.
+ * @flow_id_end: Flow end ID.
+ * @l0node_start: Scheduler node start ID for queue level.
+ * @l0node_end: Scheduler node end ID for queue level.
+ * @l1node_start: Scheduler node start ID for flow level.
+ * @l1node_end: Scheduler node end ID for flow level.
+ *
+ * PPE scheduler resource allocated among the PPE ports.
+ */
+struct ppe_port_schedule_resource {
+ unsigned int ucastq_start;
+ unsigned int ucastq_end;
+ unsigned int mcastq_start;
+ unsigned int mcastq_end;
+ unsigned int flow_id_start;
+ unsigned int flow_id_end;
+ unsigned int l0node_start;
+ unsigned int l0node_end;
+ unsigned int l1node_start;
+ unsigned int l1node_end;
+};
+
+/* There are total 2048 buffers available in PPE, out of which some
+ * buffers are reserved for some specific purposes per PPE port. The
+ * rest of the pool of 1550 buffers are assigned to the general 'group0'
+ * which is shared among all ports of the PPE.
+ */
+static const int ipq9574_ppe_bm_group_config = 1550;
+
+/* The buffer configurations per PPE port. There are 15 BM ports and
+ * 4 BM groups supported by PPE. BM port (0-7) is for EDMA port 0,
+ * BM port (8-13) is for PPE physical port 1-6 and BM port 14 is for
+ * EIP port.
+ */
+static const struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
+ {
+ /* Buffer configuration for the BM port ID 0 of EDMA. */
+ .port_id_start = 0,
+ .port_id_end = 0,
+ .pre_alloc = 0,
+ .in_fly_buf = 100,
+ .ceil = 1146,
+ .weight = 7,
+ .resume_offset = 8,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 1-7 of EDMA. */
+ .port_id_start = 1,
+ .port_id_end = 7,
+ .pre_alloc = 0,
+ .in_fly_buf = 100,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 8-13 of PPE ports. */
+ .port_id_start = 8,
+ .port_id_end = 13,
+ .pre_alloc = 0,
+ .in_fly_buf = 128,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 14 of EIP. */
+ .port_id_start = 14,
+ .port_id_end = 14,
+ .pre_alloc = 0,
+ .in_fly_buf = 40,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+};
+
+/* QM fetches the packet from PPE buffer management for transmitting the
+ * packet out. The QM group configuration limits the total number of buffers
+ * enqueued by all PPE hardware queues.
+ * There are total 2048 buffers available, out of which some buffers are
+ * dedicated to hardware exception handlers. The remaining buffers are
+ * assigned to the general 'group0', which is the group assigned to all
+ * queues by default.
+ */
+static const int ipq9574_ppe_qm_group_config = 2000;
+
+/* Default QM settings for unicast and multicast queues for IPQ9754. */
+static const struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
+ {
+ /* QM settings for unicast queues 0 to 255. */
+ .queue_start = 0,
+ .queue_end = 255,
+ .prealloc_buf = 0,
+ .ceil = 1200,
+ .weight = 7,
+ .resume_offset = 36,
+ .dynamic = true,
+ },
+ {
+ /* QM settings for multicast queues 256 to 299. */
+ .queue_start = 256,
+ .queue_end = 299,
+ .prealloc_buf = 0,
+ .ceil = 250,
+ .weight = 0,
+ .resume_offset = 36,
+ .dynamic = false,
+ },
+};
+
+/* PPE scheduler configuration for BM includes multiple entries. Each entry
+ * indicates the primary port to be assigned the buffers for the ingress or
+ * to release the buffers for the egress. Backup port ID will be used when
+ * the primary port ID is down.
+ */
+static const struct ppe_scheduler_bm_config ipq9574_ppe_sch_bm_config[] = {
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 2, false, 0},
+ {true, PPE_SCH_EGRESS, 2, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 3, false, 0},
+ {true, PPE_SCH_EGRESS, 3, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 4, false, 0},
+ {true, PPE_SCH_EGRESS, 4, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 2, false, 0},
+ {true, PPE_SCH_EGRESS, 2, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 3, false, 0},
+ {true, PPE_SCH_EGRESS, 3, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 4, false, 0},
+ {true, PPE_SCH_EGRESS, 4, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+};
+
+/* PPE scheduler configuration for QM includes multiple entries. Each entry
+ * contains ports to be dispatched for enqueueing and dequeueing. The backup
+ * port for dequeueing is supported to be used when the primary port for
+ * dequeueing is down.
+ */
+static const struct ppe_scheduler_qm_config ipq9574_ppe_sch_qm_config[] = {
+ {0x98, 6, 0, true, 1},
+ {0x94, 5, 6, true, 3},
+ {0x86, 0, 5, true, 4},
+ {0x8C, 1, 6, true, 0},
+ {0x1C, 7, 5, true, 1},
+ {0x98, 2, 6, true, 0},
+ {0x1C, 5, 7, true, 1},
+ {0x34, 3, 6, true, 0},
+ {0x8C, 4, 5, true, 1},
+ {0x98, 2, 6, true, 0},
+ {0x8C, 5, 4, true, 1},
+ {0xA8, 0, 6, true, 2},
+ {0x98, 5, 1, true, 0},
+ {0x98, 6, 5, true, 2},
+ {0x89, 1, 6, true, 4},
+ {0xA4, 3, 0, true, 1},
+ {0x8C, 5, 6, true, 4},
+ {0xA8, 0, 2, true, 1},
+ {0x98, 6, 5, true, 0},
+ {0xC4, 4, 3, true, 1},
+ {0x94, 6, 5, true, 0},
+ {0x1C, 7, 6, true, 1},
+ {0x98, 2, 5, true, 0},
+ {0x1C, 6, 7, true, 1},
+ {0x1C, 5, 6, true, 0},
+ {0x94, 3, 5, true, 1},
+ {0x8C, 4, 6, true, 0},
+ {0x94, 1, 5, true, 3},
+ {0x94, 6, 1, true, 0},
+ {0xD0, 3, 5, true, 2},
+ {0x98, 6, 0, true, 1},
+ {0x94, 5, 6, true, 3},
+ {0x94, 1, 5, true, 0},
+ {0x98, 2, 6, true, 1},
+ {0x8C, 4, 5, true, 0},
+ {0x1C, 7, 6, true, 1},
+ {0x8C, 0, 5, true, 4},
+ {0x89, 1, 6, true, 2},
+ {0x98, 5, 0, true, 1},
+ {0x94, 6, 5, true, 3},
+ {0x92, 0, 6, true, 2},
+ {0x98, 1, 5, true, 0},
+ {0x98, 6, 2, true, 1},
+ {0xD0, 0, 5, true, 3},
+ {0x94, 6, 0, true, 1},
+ {0x8C, 5, 6, true, 4},
+ {0x8C, 1, 5, true, 0},
+ {0x1C, 6, 7, true, 1},
+ {0x1C, 5, 6, true, 0},
+ {0xB0, 2, 3, true, 1},
+ {0xC4, 4, 5, true, 0},
+ {0x8C, 6, 4, true, 1},
+ {0xA4, 3, 6, true, 0},
+ {0x1C, 5, 7, true, 1},
+ {0x4C, 0, 5, true, 4},
+ {0x8C, 6, 0, true, 1},
+ {0x34, 7, 6, true, 3},
+ {0x94, 5, 0, true, 1},
+ {0x98, 6, 5, true, 2},
+};
+
+static const struct ppe_scheduler_port_config ppe_port_sch_config[] = {
+ {
+ .port = 0,
+ .flow_level = true,
+ .node_id = 0,
+ .loop_num = 1,
+ .pri_max = 1,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 0,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 8,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 16,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 24,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 32,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 40,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 48,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 56,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 256,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 264,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 1,
+ .flow_level = true,
+ .node_id = 36,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 1,
+ .drr_node_id = 8,
+ },
+ {
+ .port = 1,
+ .flow_level = false,
+ .node_id = 144,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 36,
+ .drr_node_id = 48,
+ },
+ {
+ .port = 1,
+ .flow_level = false,
+ .node_id = 272,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 36,
+ .drr_node_id = 48,
+ },
+ {
+ .port = 2,
+ .flow_level = true,
+ .node_id = 40,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 2,
+ .drr_node_id = 12,
+ },
+ {
+ .port = 2,
+ .flow_level = false,
+ .node_id = 160,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 40,
+ .drr_node_id = 64,
+ },
+ {
+ .port = 2,
+ .flow_level = false,
+ .node_id = 276,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 40,
+ .drr_node_id = 64,
+ },
+ {
+ .port = 3,
+ .flow_level = true,
+ .node_id = 44,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 3,
+ .drr_node_id = 16,
+ },
+ {
+ .port = 3,
+ .flow_level = false,
+ .node_id = 176,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 44,
+ .drr_node_id = 80,
+ },
+ {
+ .port = 3,
+ .flow_level = false,
+ .node_id = 280,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 44,
+ .drr_node_id = 80,
+ },
+ {
+ .port = 4,
+ .flow_level = true,
+ .node_id = 48,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 4,
+ .drr_node_id = 20,
+ },
+ {
+ .port = 4,
+ .flow_level = false,
+ .node_id = 192,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 48,
+ .drr_node_id = 96,
+ },
+ {
+ .port = 4,
+ .flow_level = false,
+ .node_id = 284,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 48,
+ .drr_node_id = 96,
+ },
+ {
+ .port = 5,
+ .flow_level = true,
+ .node_id = 52,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 5,
+ .drr_node_id = 24,
+ },
+ {
+ .port = 5,
+ .flow_level = false,
+ .node_id = 208,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 52,
+ .drr_node_id = 112,
+ },
+ {
+ .port = 5,
+ .flow_level = false,
+ .node_id = 288,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 52,
+ .drr_node_id = 112,
+ },
+ {
+ .port = 6,
+ .flow_level = true,
+ .node_id = 56,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 6,
+ .drr_node_id = 28,
+ },
+ {
+ .port = 6,
+ .flow_level = false,
+ .node_id = 224,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 56,
+ .drr_node_id = 128,
+ },
+ {
+ .port = 6,
+ .flow_level = false,
+ .node_id = 292,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 56,
+ .drr_node_id = 128,
+ },
+ {
+ .port = 7,
+ .flow_level = true,
+ .node_id = 60,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 7,
+ .drr_node_id = 32,
+ },
+ {
+ .port = 7,
+ .flow_level = false,
+ .node_id = 240,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 60,
+ .drr_node_id = 144,
+ },
+ {
+ .port = 7,
+ .flow_level = false,
+ .node_id = 296,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 60,
+ .drr_node_id = 144,
+ },
+};
+
+/* The scheduler resource is applied to each PPE port, The resource
+ * includes the unicast & multicast queues, flow nodes and DRR nodes.
+ */
+static const struct ppe_port_schedule_resource ppe_scheduler_res[] = {
+ { .ucastq_start = 0,
+ .ucastq_end = 63,
+ .mcastq_start = 256,
+ .mcastq_end = 271,
+ .flow_id_start = 0,
+ .flow_id_end = 0,
+ .l0node_start = 0,
+ .l0node_end = 7,
+ .l1node_start = 0,
+ .l1node_end = 0,
+ },
+ { .ucastq_start = 144,
+ .ucastq_end = 159,
+ .mcastq_start = 272,
+ .mcastq_end = 275,
+ .flow_id_start = 36,
+ .flow_id_end = 39,
+ .l0node_start = 48,
+ .l0node_end = 63,
+ .l1node_start = 8,
+ .l1node_end = 11,
+ },
+ { .ucastq_start = 160,
+ .ucastq_end = 175,
+ .mcastq_start = 276,
+ .mcastq_end = 279,
+ .flow_id_start = 40,
+ .flow_id_end = 43,
+ .l0node_start = 64,
+ .l0node_end = 79,
+ .l1node_start = 12,
+ .l1node_end = 15,
+ },
+ { .ucastq_start = 176,
+ .ucastq_end = 191,
+ .mcastq_start = 280,
+ .mcastq_end = 283,
+ .flow_id_start = 44,
+ .flow_id_end = 47,
+ .l0node_start = 80,
+ .l0node_end = 95,
+ .l1node_start = 16,
+ .l1node_end = 19,
+ },
+ { .ucastq_start = 192,
+ .ucastq_end = 207,
+ .mcastq_start = 284,
+ .mcastq_end = 287,
+ .flow_id_start = 48,
+ .flow_id_end = 51,
+ .l0node_start = 96,
+ .l0node_end = 111,
+ .l1node_start = 20,
+ .l1node_end = 23,
+ },
+ { .ucastq_start = 208,
+ .ucastq_end = 223,
+ .mcastq_start = 288,
+ .mcastq_end = 291,
+ .flow_id_start = 52,
+ .flow_id_end = 55,
+ .l0node_start = 112,
+ .l0node_end = 127,
+ .l1node_start = 24,
+ .l1node_end = 27,
+ },
+ { .ucastq_start = 224,
+ .ucastq_end = 239,
+ .mcastq_start = 292,
+ .mcastq_end = 295,
+ .flow_id_start = 56,
+ .flow_id_end = 59,
+ .l0node_start = 128,
+ .l0node_end = 143,
+ .l1node_start = 28,
+ .l1node_end = 31,
+ },
+ { .ucastq_start = 240,
+ .ucastq_end = 255,
+ .mcastq_start = 296,
+ .mcastq_end = 299,
+ .flow_id_start = 60,
+ .flow_id_end = 63,
+ .l0node_start = 144,
+ .l0node_end = 159,
+ .l1node_start = 32,
+ .l1node_end = 35,
+ },
+ { .ucastq_start = 64,
+ .ucastq_end = 143,
+ .mcastq_start = 0,
+ .mcastq_end = 0,
+ .flow_id_start = 1,
+ .flow_id_end = 35,
+ .l0node_start = 8,
+ .l0node_end = 47,
+ .l1node_start = 1,
+ .l1node_end = 7,
+ },
+};
+
+/* Set the PPE queue level scheduler configuration. */
+static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
+ val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L0_C_FLOW_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_E_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L0_E_FLOW_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
+ val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_L0_COMP_CFG_TBL_NODE_METER_LEN,
+ val);
+}
+
+/* Set the PPE flow level scheduler configuration. */
+static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
+ reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+ reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L1_C_FLOW_CFG_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+ reg = PPE_L1_E_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L1_E_FLOW_CFG_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+ reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
+
+ return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
+}
+
+/**
+ * ppe_queue_scheduler_set - Configure scheduler for PPE hardware queue
+ * @ppe_dev: PPE device
+ * @node_id: PPE queue ID or flow ID
+ * @flow_level: Flow level scheduler or queue level scheduler
+ * @port: PPE port ID set scheduler configuration
+ * @scheduler_cfg: PPE scheduler configuration
+ *
+ * PPE scheduler configuration supports queue level and flow level on
+ * the PPE egress port.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ if (flow_level)
+ return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id,
+ port, scheduler_cfg);
+
+ return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id,
+ port, scheduler_cfg);
+}
+
+/**
+ * ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID
+ * @ppe_dev: PPE device
+ * @queue_dst: PPE queue destination configuration
+ * @queue_base: PPE queue base ID
+ * @profile_id: Profile ID
+ *
+ * The PPE unicast queue base ID and profile ID are configured based on the
+ * destination port information that can be service code or CPU code or the
+ * destination port.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
+ struct ppe_queue_ucast_dest queue_dst,
+ int queue_base, int profile_id)
+{
+ int index, profile_size;
+ u32 val, reg;
+
+ profile_size = queue_dst.src_profile << 8;
+ if (queue_dst.service_code_en)
+ index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
+ queue_dst.service_code;
+ else if (queue_dst.cpu_code_en)
+ index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
+ queue_dst.cpu_code;
+ else
+ index = profile_size + queue_dst.dest_port;
+
+ val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
+ val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
+ reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_queue_ucast_offset_pri_set - Set PPE unicast queue offset based on priority
+ * @ppe_dev: PPE device
+ * @profile_id: Profile ID
+ * @priority: PPE internal priority to be used to set queue offset
+ * @queue_offset: Queue offset used for calculating the destination queue ID
+ *
+ * The PPE unicast queue offset is configured based on the PPE
+ * internal priority.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int priority,
+ int queue_offset)
+{
+ u32 val, reg;
+ int index;
+
+ index = (profile_id << 4) + priority;
+ val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, queue_offset);
+ reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_queue_ucast_offset_hash_set - Set PPE unicast queue offset based on hash
+ * @ppe_dev: PPE device
+ * @profile_id: Profile ID
+ * @rss_hash: Packet hash value to be used to set queue offset
+ * @queue_offset: Queue offset used for calculating the destination queue ID
+ *
+ * The PPE unicast queue offset is configured based on the RSS hash value.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int rss_hash,
+ int queue_offset)
+{
+ u32 val, reg;
+ int index;
+
+ index = (profile_id << 8) + rss_hash;
+ val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, queue_offset);
+ reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_port_resource_get - Get PPE resource per port
+ * @ppe_dev: PPE device
+ * @port: PPE port
+ * @type: Resource type
+ * @res_start: Resource start ID returned
+ * @res_end: Resource end ID returned
+ *
+ * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end)
+{
+ struct ppe_port_schedule_resource res;
+
+ /* The reserved resource with the maximum port ID of PPE is
+ * also allowed to be acquired.
+ */
+ if (port > ppe_dev->num_ports)
+ return -EINVAL;
+
+ res = ppe_scheduler_res[port];
+ switch (type) {
+ case PPE_RES_UCAST:
+ *res_start = res.ucastq_start;
+ *res_end = res.ucastq_end;
+ break;
+ case PPE_RES_MCAST:
+ *res_start = res.mcastq_start;
+ *res_end = res.mcastq_end;
+ break;
+ case PPE_RES_FLOW_ID:
+ *res_start = res.flow_id_start;
+ *res_end = res.flow_id_end;
+ break;
+ case PPE_RES_L0_NODE:
+ *res_start = res.l0node_start;
+ *res_end = res.l0node_end;
+ break;
+ case PPE_RES_L1_NODE:
+ *res_start = res.l1node_start;
+ *res_end = res.l1node_end;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_sc_config_set - Set PPE service code configuration
+ * @ppe_dev: PPE device
+ * @sc: Service ID, 0-255 supported by PPE
+ * @cfg: Service code configuration
+ *
+ * PPE service code is used by the PPE during its packet processing stages,
+ * to perform or bypass certain selected packet operations on the packet.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc, struct ppe_sc_cfg cfg)
+{
+ u32 val, reg, servcode_val[2] = {};
+ unsigned long bitmap_value;
+ int ret;
+
+ val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src);
+
+ bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN,
+ test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter));
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN,
+ test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter));
+ reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * sc;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE);
+ PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value);
+ PPE_SERVICE_SET_RX_CNT_EN(servcode_val,
+ test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter));
+ reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * sc;
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * sc;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code);
+ PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.eip_field_update_bitmap);
+ PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.eip_hw_service);
+ PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.eip_offset_sel);
+ PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val,
+ test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter));
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE);
+ val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value);
+ reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * sc;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_counter_enable_set - Set PPE port counter enabled
+ * @ppe_dev: PPE device
+ * @port: PPE port ID
+ *
+ * Enable PPE counters on the given port for the unicast packet, multicast
+ * packet and VLAN packet received and transmitted by PPE.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port)
+{
+ u32 reg, mru_mtu_val[3];
+ int ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, true);
+ PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, true);
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
+ ret = regmap_set_bits(ppe_dev->regmap, reg, PPE_MC_MTU_CTRL_TBL_TX_CNT_EN);
+ if (ret)
+ return ret;
+
+ reg = PPE_PORT_EG_VLAN_TBL_ADDR + PPE_PORT_EG_VLAN_TBL_INC * port;
+
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN);
+}
+
+static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 reg, val;
+
+ switch (index) {
+ case 0:
+ val = cfg.hash_sip_mix[0];
+ break;
+ case 1:
+ val = cfg.hash_dip_mix[0];
+ break;
+ case 2:
+ val = cfg.hash_protocol_mix;
+ break;
+ case 3:
+ val = cfg.hash_dport_mix;
+ break;
+ case 4:
+ val = cfg.hash_sport_mix;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC;
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_RSS_HASH_MIX_IPV4_VAL,
+ FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, val));
+}
+
+static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 reg, val;
+
+ switch (index) {
+ case 0 ... 3:
+ val = cfg.hash_sip_mix[index];
+ break;
+ case 4 ... 7:
+ val = cfg.hash_dip_mix[index - 4];
+ break;
+ case 8:
+ val = cfg.hash_protocol_mix;
+ break;
+ case 9:
+ val = cfg.hash_dport_mix;
+ break;
+ case 10:
+ val = cfg.hash_sport_mix;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC;
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_RSS_HASH_MIX_VAL,
+ FIELD_PREP(PPE_RSS_HASH_MIX_VAL, val));
+}
+
+/**
+ * ppe_rss_hash_config_set - Configure the PPE hash settings for the packet received.
+ * @ppe_dev: PPE device.
+ * @mode: Configure RSS hash for the packet type IPv4 and IPv6.
+ * @cfg: RSS hash configuration.
+ *
+ * PPE RSS hash settings are configured for the packet type IPv4 and IPv6.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 val, reg;
+ int i, ret;
+
+ if (mode & PPE_RSS_HASH_MODE_IPV4) {
+ val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask);
+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_ENTRIES; i++) {
+ ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_ENTRIES; i++) {
+ val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]);
+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]);
+ reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (mode & PPE_RSS_HASH_MODE_IPV6) {
+ val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask);
+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PPE_RSS_HASH_MIX_ENTRIES; i++) {
+ ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_RSS_HASH_FIN_ENTRIES; i++) {
+ val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]);
+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
+ reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_ring_queue_map_set - Set the PPE queue to Ethernet DMA ring mapping
+ * @ppe_dev: PPE device
+ * @ring_id: Ethernet DMA ring ID
+ * @queue_map: Bit map of queue IDs to given Ethernet DMA ring
+ *
+ * Configure the mapping from a set of PPE queues to a given Ethernet DMA ring.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map)
+{
+ u32 reg, queue_bitmap_val[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT];
+
+ memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val));
+ reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id;
+
+ return regmap_bulk_write(ppe_dev->regmap, reg,
+ queue_bitmap_val,
+ ARRAY_SIZE(queue_bitmap_val));
+}
+
+static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+{
+ u32 reg, val, bm_fc_val[2];
+ int ret;
+
+ reg = PPE_BM_PORT_FC_CFG_TBL_ADDR + PPE_BM_PORT_FC_CFG_TBL_INC * bm_port_id;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
+ if (ret)
+ return ret;
+
+ /* Configure BM flow control related threshold. */
+ PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight);
+ PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset);
+ PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil);
+ PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic);
+ PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf);
+ PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc);
+
+ /* Configure low/high bits of the ceiling for the BM port. */
+ val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil);
+ PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val);
+ val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil);
+ PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
+ if (ret)
+ return ret;
+
+ /* Assign the default group ID 0 to the BM port. */
+ val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0);
+ reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id;
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID,
+ val);
+ if (ret)
+ return ret;
+
+ /* Enable BM port flow control. */
+ reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id;
+
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_BM_PORT_FC_MODE_EN);
+}
+
+/* Configure the buffer threshold for the port flow control function. */
+static int ppe_config_bm(struct ppe_device *ppe_dev)
+{
+ const struct ppe_bm_port_config *port_cfg;
+ unsigned int i, bm_port_id, port_cfg_cnt;
+ u32 reg, val;
+ int ret;
+
+ /* Configure the allocated buffer number only for group 0.
+ * The buffer number of group 1-3 is already cleared to 0
+ * after PPE reset during the probe of PPE driver.
+ */
+ reg = PPE_BM_SHARED_GROUP_CFG_ADDR;
+ val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
+ ipq9574_ppe_bm_group_config);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
+ val);
+ if (ret)
+ goto bm_config_fail;
+
+ /* Configure buffer thresholds for the BM ports. */
+ port_cfg = ipq9574_ppe_bm_port_config;
+ port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config);
+ for (i = 0; i < port_cfg_cnt; i++) {
+ for (bm_port_id = port_cfg[i].port_id_start;
+ bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) {
+ ret = ppe_config_bm_threshold(ppe_dev, bm_port_id,
+ port_cfg[i]);
+ if (ret)
+ goto bm_config_fail;
+ }
+ }
+
+ return 0;
+
+bm_config_fail:
+ dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret);
+ return ret;
+}
+
+/* Configure PPE hardware queue depth, which is decided by the threshold
+ * of queue.
+ */
+static int ppe_config_qm(struct ppe_device *ppe_dev)
+{
+ const struct ppe_qm_queue_config *queue_cfg;
+ int ret, i, queue_id, queue_cfg_count;
+ u32 reg, multicast_queue_cfg[5];
+ u32 unicast_queue_cfg[4];
+ u32 group_cfg[3];
+
+ /* Assign the buffer number to the group 0 by default. */
+ reg = PPE_AC_GRP_CFG_TBL_ADDR;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ group_cfg, ARRAY_SIZE(group_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ group_cfg, ARRAY_SIZE(group_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ queue_cfg = ipq9574_ppe_qm_queue_config;
+ queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config);
+ for (i = 0; i < queue_cfg_count; i++) {
+ queue_id = queue_cfg[i].queue_start;
+
+ /* Configure threshold for dropping packets separately for
+ * unicast and multicast PPE queues.
+ */
+ while (queue_id <= queue_cfg[i].queue_end) {
+ if (queue_id < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
+ reg = PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CFG_TBL_INC * queue_id;
+
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ unicast_queue_cfg,
+ ARRAY_SIZE(unicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_UNICAST_QUEUE_SET_EN(unicast_queue_cfg, true);
+ PPE_AC_UNICAST_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0);
+ PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg,
+ queue_cfg[i].prealloc_buf);
+ PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(unicast_queue_cfg,
+ queue_cfg[i].dynamic);
+ PPE_AC_UNICAST_QUEUE_SET_WEIGHT(unicast_queue_cfg,
+ queue_cfg[i].weight);
+ PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(unicast_queue_cfg,
+ queue_cfg[i].ceil);
+ PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(unicast_queue_cfg,
+ queue_cfg[i].resume_offset);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ unicast_queue_cfg,
+ ARRAY_SIZE(unicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+ } else {
+ reg = PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR +
+ PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC * queue_id;
+
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ multicast_queue_cfg,
+ ARRAY_SIZE(multicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_MULTICAST_QUEUE_SET_EN(multicast_queue_cfg, true);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg,
+ queue_cfg[i].prealloc_buf);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg,
+ queue_cfg[i].ceil);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(multicast_queue_cfg,
+ queue_cfg[i].resume_offset);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ multicast_queue_cfg,
+ ARRAY_SIZE(multicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+ }
+
+ /* Enable enqueue. */
+ reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id;
+ ret = regmap_clear_bits(ppe_dev->regmap, reg,
+ PPE_ENQ_OPR_TBL_ENQ_DISABLE);
+ if (ret)
+ goto qm_config_fail;
+
+ /* Enable dequeue. */
+ reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id;
+ ret = regmap_clear_bits(ppe_dev->regmap, reg,
+ PPE_DEQ_OPR_TBL_DEQ_DISABLE);
+ if (ret)
+ goto qm_config_fail;
+
+ queue_id++;
+ }
+ }
+
+ /* Enable queue counter for all PPE hardware queues. */
+ ret = regmap_set_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR,
+ PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN);
+ if (ret)
+ goto qm_config_fail;
+
+ return 0;
+
+qm_config_fail:
+ dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret);
+ return ret;
+}
+
+static int ppe_node_scheduler_config(struct ppe_device *ppe_dev,
+ const struct ppe_scheduler_port_config config)
+{
+ struct ppe_scheduler_cfg sch_cfg;
+ int ret, i;
+
+ for (i = 0; i < config.loop_num; i++) {
+ if (!config.pri_max) {
+ /* Round robin scheduler without priority. */
+ sch_cfg.flow_id = config.flow_id;
+ sch_cfg.pri = 0;
+ sch_cfg.drr_node_id = config.drr_node_id;
+ } else {
+ sch_cfg.flow_id = config.flow_id + (i / config.pri_max);
+ sch_cfg.pri = i % config.pri_max;
+ sch_cfg.drr_node_id = config.drr_node_id + i;
+ }
+
+ /* Scheduler weight, must be more than 0. */
+ sch_cfg.drr_node_wt = 1;
+ /* Byte based to be scheduled. */
+ sch_cfg.unit_is_packet = false;
+ /* Frame + CRC calculated. */
+ sch_cfg.frame_mode = PPE_SCH_WITH_FRAME_CRC;
+
+ ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i,
+ config.flow_level,
+ config.port,
+ sch_cfg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize scheduler settings for PPE buffer utilization and dispatching
+ * packet on PPE queue.
+ */
+static int ppe_config_scheduler(struct ppe_device *ppe_dev)
+{
+ const struct ppe_scheduler_port_config *port_cfg;
+ const struct ppe_scheduler_qm_config *qm_cfg;
+ const struct ppe_scheduler_bm_config *bm_cfg;
+ int ret, i, count;
+ u32 val, reg;
+
+ count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config);
+ bm_cfg = ipq9574_ppe_sch_bm_config;
+
+ /* Configure the depth of BM scheduler entries. */
+ val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, count);
+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0);
+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1);
+
+ ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val);
+ if (ret)
+ goto sch_config_fail;
+
+ /* Configure each BM scheduler entry with the valid ingress port and
+ * egress port, the second port takes effect when the specified port
+ * is in the inactive state.
+ */
+ for (i = 0; i < count; i++) {
+ val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].dir);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID,
+ bm_cfg[i].backup_port_valid);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT,
+ bm_cfg[i].backup_port);
+
+ reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC;
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ count = ARRAY_SIZE(ipq9574_ppe_sch_qm_config);
+ qm_cfg = ipq9574_ppe_sch_qm_config;
+
+ /* Configure the depth of QM scheduler entries. */
+ val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, count);
+ ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val);
+ if (ret)
+ goto sch_config_fail;
+
+ /* Configure each QM scheduler entry with enqueue port and dequeue
+ * port, the second port takes effect when the specified dequeue
+ * port is in the inactive port.
+ */
+ for (i = 0; i < count; i++) {
+ val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP,
+ qm_cfg[i].ensch_port_bmp);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT,
+ qm_cfg[i].ensch_port);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT,
+ qm_cfg[i].desch_port);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN,
+ qm_cfg[i].desch_backup_port_valid);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT,
+ qm_cfg[i].desch_backup_port);
+
+ reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC;
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ count = ARRAY_SIZE(ppe_port_sch_config);
+ port_cfg = ppe_port_sch_config;
+
+ /* Configure scheduler per PPE queue or flow. */
+ for (i = 0; i < count; i++) {
+ if (port_cfg[i].port >= ppe_dev->num_ports)
+ break;
+
+ ret = ppe_node_scheduler_config(ppe_dev, port_cfg[i]);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ return 0;
+
+sch_config_fail:
+ dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret);
+ return ret;
+};
+
+/* Configure PPE queue destination of each PPE port. */
+static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
+{
+ int ret, port_id, index, q_base, q_offset, res_start, res_end, pri_max;
+ struct ppe_queue_ucast_dest queue_dst;
+
+ for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
+ memset(&queue_dst, 0, sizeof(queue_dst));
+
+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
+ &res_start, &res_end);
+ if (ret)
+ return ret;
+
+ q_base = res_start;
+ queue_dst.dest_port = port_id;
+
+ /* Configure queue base ID and profile ID that is same as
+ * physical port ID.
+ */
+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
+ q_base, port_id);
+ if (ret)
+ return ret;
+
+ /* Queue priority range supported by each PPE port */
+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
+ &res_start, &res_end);
+ if (ret)
+ return ret;
+
+ pri_max = res_end - res_start;
+
+ /* Redirect ARP reply packet with the max priority on CPU port,
+ * which keeps the ARP reply directed to CPU (CPU code is 101)
+ * with highest priority queue of EDMA.
+ */
+ if (port_id == 0) {
+ memset(&queue_dst, 0, sizeof(queue_dst));
+
+ queue_dst.cpu_code_en = true;
+ queue_dst.cpu_code = 101;
+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
+ q_base + pri_max,
+ 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the queue offset of internal priority. */
+ for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
+ q_offset = index > pri_max ? pri_max : index;
+
+ ret = ppe_queue_ucast_offset_pri_set(ppe_dev, port_id,
+ index, q_offset);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the queue offset of RSS hash as 0 to avoid the
+ * random hardware value that will lead to the unexpected
+ * destination queue generated.
+ */
+ for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
+ ret = ppe_queue_ucast_offset_hash_set(ppe_dev, port_id,
+ index, 0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize the service code 1 used by CPU port. */
+static int ppe_servcode_init(struct ppe_device *ppe_dev)
+{
+ struct ppe_sc_cfg sc_cfg = {};
+
+ bitmap_zero(sc_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE);
+ bitmap_zero(sc_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
+
+ bitmap_fill(sc_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE);
+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, sc_cfg.bitmaps.ingress);
+ clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, sc_cfg.bitmaps.ingress);
+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, sc_cfg.bitmaps.ingress);
+
+ bitmap_fill(sc_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE);
+ clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, sc_cfg.bitmaps.egress);
+
+ return ppe_sc_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, sc_cfg);
+}
+
+/* Initialize PPE port configurations. */
+static int ppe_port_config_init(struct ppe_device *ppe_dev)
+{
+ u32 reg, val, mru_mtu_val[3];
+ int i, ret;
+
+ /* MTU and MRU settings are not required for CPU port 0. */
+ for (i = 1; i < ppe_dev->num_ports; i++) {
+ /* Enable Ethernet port counter */
+ ret = ppe_counter_enable_set(ppe_dev, i);
+ if (ret)
+ return ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ /* Drop the packet when the packet size is more than the MTU
+ * and redirect the packet to the CPU port when the received
+ * packet size is more than the MRU of the physical interface.
+ */
+ PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_REDIRECT_TO_CPU);
+ PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP);
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i;
+ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_MC_MTU_CTRL_TBL_MTU_CMD,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable CPU port counters. */
+ return ppe_counter_enable_set(ppe_dev, 0);
+}
+
+/* Initialize the PPE RSS configuration for IPv4 and IPv6 packet receive.
+ * RSS settings are to calculate the random RSS hash value generated during
+ * packet receive. This hash is then used to generate the queue offset used
+ * to determine the queue used to transmit the packet.
+ */
+static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
+{
+ u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 };
+ u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb };
+ struct ppe_rss_hash_cfg hash_cfg;
+ int i, ret;
+
+ hash_cfg.hash_seed = get_random_u32();
+ hash_cfg.hash_mask = 0xfff;
+
+ /* Use 5 tuple as RSS hash key for the first fragment of TCP, UDP
+ * and UDP-Lite packets.
+ */
+ hash_cfg.hash_fragment_mode = false;
+
+ /* The final common seed configs used to calculate the RSS has value,
+ * which is available for both IPv4 and IPv6 packet.
+ */
+ for (i = 0; i < ARRAY_SIZE(fins); i++) {
+ hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
+ hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
+ }
+
+ /* RSS seeds for IP protocol, L4 destination & source port and
+ * destination & source IP used to calculate the RSS hash value.
+ */
+ hash_cfg.hash_protocol_mix = 0x13;
+ hash_cfg.hash_dport_mix = 0xb;
+ hash_cfg.hash_sport_mix = 0x13;
+ hash_cfg.hash_dip_mix[0] = 0xb;
+ hash_cfg.hash_sip_mix[0] = 0x13;
+
+ /* Configure RSS seed configs for IPv4 packet. */
+ ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ips); i++) {
+ hash_cfg.hash_sip_mix[i] = ips[i];
+ hash_cfg.hash_dip_mix[i] = ips[i];
+ }
+
+ /* Configure RSS seed configs for IPv6 packet. */
+ return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
+}
+
+/* Initialize mapping between PPE queues assigned to CPU port 0
+ * to Ethernet DMA ring 0.
+ */
+static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
+{
+ u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {};
+ int ret, queue_id, queue_max;
+
+ ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST,
+ &queue_id, &queue_max);
+ if (ret)
+ return ret;
+
+ for (; queue_id <= queue_max; queue_id++)
+ queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32);
+
+ return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
+}
+
+/* Initialize PPE bridge settings to only enable L2 frame receive and
+ * transmit between CPU port and PPE Ethernet ports.
+ */
+static int ppe_bridge_init(struct ppe_device *ppe_dev)
+{
+ u32 reg, mask, port_cfg[4], vsi_cfg[2];
+ int ret, i;
+
+ /* Configure the following settings for CPU port0:
+ * a.) Enable Bridge TX
+ * b.) Disable FDB new address learning
+ * c.) Disable station move address learning
+ */
+ mask = PPE_PORT_BRIDGE_TXMAC_EN;
+ mask |= PPE_PORT_BRIDGE_NEW_LRN_EN;
+ mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN;
+ ret = regmap_update_bits(ppe_dev->regmap,
+ PPE_PORT_BRIDGE_CTRL_ADDR,
+ mask,
+ PPE_PORT_BRIDGE_TXMAC_EN);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < ppe_dev->num_ports; i++) {
+ /* Enable invalid VSI forwarding for all the physical ports
+ * to CPU port0, in case no VSI is assigned to the physical
+ * port.
+ */
+ reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ port_cfg, ARRAY_SIZE(port_cfg));
+
+ if (ret)
+ return ret;
+
+ PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true);
+ PPE_L2_PORT_SET_DST_INFO(port_cfg, 0);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ port_cfg, ARRAY_SIZE(port_cfg));
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_VSI_TBL_ENTRIES; i++) {
+ /* Set the VSI forward membership to include only CPU port0.
+ * FDB learning and forwarding take place only after switchdev
+ * is supported later to create the VSI and join the physical
+ * ports to the VSI port member.
+ */
+ reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
+ if (ret)
+ return ret;
+
+ PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true);
+ PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
+ PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true);
+ PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ppe_hw_config(struct ppe_device *ppe_dev)
+{
+ int ret;
+
+ ret = ppe_config_bm(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_config_qm(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_config_scheduler(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_queue_dest_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_servcode_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_port_config_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_rss_hash_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_queues_to_ring_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ return ppe_bridge_init(ppe_dev);
+}
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
new file mode 100644
index 000000000000..4bb45ca40144
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __PPE_CONFIG_H__
+#define __PPE_CONFIG_H__
+
+#include <linux/types.h>
+
+#include "ppe.h"
+
+/* There are different table index ranges for configuring queue base ID of
+ * the destination port, CPU code and service code.
+ */
+#define PPE_QUEUE_BASE_DEST_PORT 0
+#define PPE_QUEUE_BASE_CPU_CODE 1024
+#define PPE_QUEUE_BASE_SERVICE_CODE 2048
+
+#define PPE_QUEUE_INTER_PRI_NUM 16
+#define PPE_QUEUE_HASH_NUM 256
+
+/* The service code is used by EDMA port to transmit packet to PPE. */
+#define PPE_EDMA_SC_BYPASS_ID 1
+
+/* The PPE RSS hash configured for IPv4 and IPv6 packet separately. */
+#define PPE_RSS_HASH_MODE_IPV4 BIT(0)
+#define PPE_RSS_HASH_MODE_IPV6 BIT(1)
+#define PPE_RSS_HASH_IP_LENGTH 4
+#define PPE_RSS_HASH_TUPLES 5
+
+/* PPE supports 300 queues, each bit presents as one queue. */
+#define PPE_RING_TO_QUEUE_BITMAP_WORD_CNT 10
+
+/**
+ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
+ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
+ * preamble, Ethernet packet and CRC.
+ * @PPE_SCH_WITH_FRAME_CRC: The scheduled frame includes Ethernet frame and CRC
+ * excluding IPG and preamble.
+ * @PPE_SCH_WITH_L3_PAYLOAD: The scheduled frame includes layer 3 packet data.
+ */
+enum ppe_scheduler_frame_mode {
+ PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC = 0,
+ PPE_SCH_WITH_FRAME_CRC = 1,
+ PPE_SCH_WITH_L3_PAYLOAD = 2,
+};
+
+/**
+ * struct ppe_scheduler_cfg - PPE scheduler configuration.
+ * @flow_id: PPE flow ID.
+ * @pri: Scheduler priority.
+ * @drr_node_id: Node ID for scheduled traffic.
+ * @drr_node_wt: Weight for scheduled traffic.
+ * @unit_is_packet: Packet based or byte based unit for scheduled traffic.
+ * @frame_mode: Packet mode to be scheduled.
+ *
+ * PPE scheduler supports commit rate and exceed rate configurations.
+ */
+struct ppe_scheduler_cfg {
+ int flow_id;
+ int pri;
+ int drr_node_id;
+ int drr_node_wt;
+ bool unit_is_packet;
+ enum ppe_scheduler_frame_mode frame_mode;
+};
+
+/**
+ * enum ppe_resource_type - PPE resource type.
+ * @PPE_RES_UCAST: Unicast queue resource.
+ * @PPE_RES_MCAST: Multicast queue resource.
+ * @PPE_RES_L0_NODE: Level 0 for queue based node resource.
+ * @PPE_RES_L1_NODE: Level 1 for flow based node resource.
+ * @PPE_RES_FLOW_ID: Flow based node resource.
+ */
+enum ppe_resource_type {
+ PPE_RES_UCAST,
+ PPE_RES_MCAST,
+ PPE_RES_L0_NODE,
+ PPE_RES_L1_NODE,
+ PPE_RES_FLOW_ID,
+};
+
+/**
+ * struct ppe_queue_ucast_dest - PPE unicast queue destination.
+ * @src_profile: Source profile.
+ * @service_code_en: Enable service code to map the queue base ID.
+ * @service_code: Service code.
+ * @cpu_code_en: Enable CPU code to map the queue base ID.
+ * @cpu_code: CPU code.
+ * @dest_port: destination port.
+ *
+ * PPE egress queue ID is decided by the service code if enabled, otherwise
+ * by the CPU code if enabled, or by destination port if both service code
+ * and CPU code are disabled.
+ */
+struct ppe_queue_ucast_dest {
+ int src_profile;
+ bool service_code_en;
+ int service_code;
+ bool cpu_code_en;
+ int cpu_code;
+ int dest_port;
+};
+
+/* Hardware bitmaps for bypassing features of the ingress packet. */
+enum ppe_sc_ingress_type {
+ PPE_SC_BYPASS_INGRESS_VLAN_TAG_FMT_CHECK = 0,
+ PPE_SC_BYPASS_INGRESS_VLAN_MEMBER_CHECK = 1,
+ PPE_SC_BYPASS_INGRESS_VLAN_TRANSLATE = 2,
+ PPE_SC_BYPASS_INGRESS_MY_MAC_CHECK = 3,
+ PPE_SC_BYPASS_INGRESS_DIP_LOOKUP = 4,
+ PPE_SC_BYPASS_INGRESS_FLOW_LOOKUP = 5,
+ PPE_SC_BYPASS_INGRESS_FLOW_ACTION = 6,
+ PPE_SC_BYPASS_INGRESS_ACL = 7,
+ PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER = 8,
+ PPE_SC_BYPASS_INGRESS_SERVICE_CODE = 9,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L2 = 10,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV4 = 11,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV6 = 12,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L4 = 13,
+ PPE_SC_BYPASS_INGRESS_FLOW_SERVICE_CODE = 14,
+ PPE_SC_BYPASS_INGRESS_ACL_SERVICE_CODE = 15,
+ PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO = 16,
+ PPE_SC_BYPASS_INGRESS_PPPOE_TERMINATION = 17,
+ PPE_SC_BYPASS_INGRESS_DEFAULT_VLAN = 18,
+ PPE_SC_BYPASS_INGRESS_DEFAULT_PCP = 19,
+ PPE_SC_BYPASS_INGRESS_VSI_ASSIGN = 20,
+ /* Values 21-23 are not specified by hardware. */
+ PPE_SC_BYPASS_INGRESS_VLAN_ASSIGN_FAIL = 24,
+ PPE_SC_BYPASS_INGRESS_SOURCE_GUARD = 25,
+ PPE_SC_BYPASS_INGRESS_MRU_MTU_CHECK = 26,
+ PPE_SC_BYPASS_INGRESS_FLOW_SRC_CHECK = 27,
+ PPE_SC_BYPASS_INGRESS_FLOW_QOS = 28,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_INGRESS_SIZE,
+};
+
+/* Hardware bitmaps for bypassing features of the egress packet. */
+enum ppe_sc_egress_type {
+ PPE_SC_BYPASS_EGRESS_VLAN_MEMBER_CHECK = 0,
+ PPE_SC_BYPASS_EGRESS_VLAN_TRANSLATE = 1,
+ PPE_SC_BYPASS_EGRESS_VLAN_TAG_FMT_CTRL = 2,
+ PPE_SC_BYPASS_EGRESS_FDB_LEARN = 3,
+ PPE_SC_BYPASS_EGRESS_FDB_REFRESH = 4,
+ PPE_SC_BYPASS_EGRESS_L2_SOURCE_SECURITY = 5,
+ PPE_SC_BYPASS_EGRESS_MANAGEMENT_FWD = 6,
+ PPE_SC_BYPASS_EGRESS_BRIDGING_FWD = 7,
+ PPE_SC_BYPASS_EGRESS_IN_STP_FLTR = 8,
+ PPE_SC_BYPASS_EGRESS_EG_STP_FLTR = 9,
+ PPE_SC_BYPASS_EGRESS_SOURCE_FLTR = 10,
+ PPE_SC_BYPASS_EGRESS_POLICER = 11,
+ PPE_SC_BYPASS_EGRESS_L2_PKT_EDIT = 12,
+ PPE_SC_BYPASS_EGRESS_L3_PKT_EDIT = 13,
+ PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK = 14,
+ PPE_SC_BYPASS_EGRESS_PORT_ISOLATION = 15,
+ PPE_SC_BYPASS_EGRESS_PRE_ACL_QOS = 16,
+ PPE_SC_BYPASS_EGRESS_POST_ACL_QOS = 17,
+ PPE_SC_BYPASS_EGRESS_DSCP_QOS = 18,
+ PPE_SC_BYPASS_EGRESS_PCP_QOS = 19,
+ PPE_SC_BYPASS_EGRESS_PREHEADER_QOS = 20,
+ PPE_SC_BYPASS_EGRESS_FAKE_MAC_DROP = 21,
+ PPE_SC_BYPASS_EGRESS_TUNL_CONTEXT = 22,
+ PPE_SC_BYPASS_EGRESS_FLOW_POLICER = 23,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_EGRESS_SIZE,
+};
+
+/* Hardware bitmaps for bypassing counter of packet. */
+enum ppe_sc_counter_type {
+ PPE_SC_BYPASS_COUNTER_RX_VLAN = 0,
+ PPE_SC_BYPASS_COUNTER_RX = 1,
+ PPE_SC_BYPASS_COUNTER_TX_VLAN = 2,
+ PPE_SC_BYPASS_COUNTER_TX = 3,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_COUNTER_SIZE,
+};
+
+/* Hardware bitmaps for bypassing features of tunnel packet. */
+enum ppe_sc_tunnel_type {
+ PPE_SC_BYPASS_TUNNEL_SERVICE_CODE = 0,
+ PPE_SC_BYPASS_TUNNEL_TUNNEL_HANDLE = 1,
+ PPE_SC_BYPASS_TUNNEL_L3_IF_CHECK = 2,
+ PPE_SC_BYPASS_TUNNEL_VLAN_CHECK = 3,
+ PPE_SC_BYPASS_TUNNEL_DMAC_CHECK = 4,
+ PPE_SC_BYPASS_TUNNEL_UDP_CSUM_0_CHECK = 5,
+ PPE_SC_BYPASS_TUNNEL_TBL_DE_ACCE_CHECK = 6,
+ PPE_SC_BYPASS_TUNNEL_PPPOE_MC_TERM_CHECK = 7,
+ PPE_SC_BYPASS_TUNNEL_TTL_EXCEED_CHECK = 8,
+ PPE_SC_BYPASS_TUNNEL_MAP_SRC_CHECK = 9,
+ PPE_SC_BYPASS_TUNNEL_MAP_DST_CHECK = 10,
+ PPE_SC_BYPASS_TUNNEL_LPM_DST_LOOKUP = 11,
+ PPE_SC_BYPASS_TUNNEL_LPM_LOOKUP = 12,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L2 = 13,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV4 = 14,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV6 = 15,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L4 = 16,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_TUNNEL = 17,
+ /* Values 18-19 are not specified by hardware. */
+ PPE_SC_BYPASS_TUNNEL_PRE_IPO = 20,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_TUNNEL_SIZE,
+};
+
+/**
+ * struct ppe_sc_bypass - PPE service bypass bitmaps
+ * @ingress: Bitmap of features that can be bypassed on the ingress packet.
+ * @egress: Bitmap of features that can be bypassed on the egress packet.
+ * @counter: Bitmap of features that can be bypassed on the counter type.
+ * @tunnel: Bitmap of features that can be bypassed on the tunnel packet.
+ */
+struct ppe_sc_bypass {
+ DECLARE_BITMAP(ingress, PPE_SC_BYPASS_INGRESS_SIZE);
+ DECLARE_BITMAP(egress, PPE_SC_BYPASS_EGRESS_SIZE);
+ DECLARE_BITMAP(counter, PPE_SC_BYPASS_COUNTER_SIZE);
+ DECLARE_BITMAP(tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
+};
+
+/**
+ * struct ppe_sc_cfg - PPE service code configuration.
+ * @dest_port_valid: Generate destination port or not.
+ * @dest_port: Destination port ID.
+ * @bitmaps: Bitmap of bypass features.
+ * @is_src: Destination port acts as source port, packet sent to CPU.
+ * @next_service_code: New service code generated.
+ * @eip_field_update_bitmap: Fields updated as actions taken for EIP.
+ * @eip_hw_service: Selected hardware functions for EIP.
+ * @eip_offset_sel: Packet offset selection, using packet's layer 4 offset
+ * or using packet's layer 3 offset for EIP.
+ *
+ * Service code is generated during the packet passing through PPE.
+ */
+struct ppe_sc_cfg {
+ bool dest_port_valid;
+ int dest_port;
+ struct ppe_sc_bypass bitmaps;
+ bool is_src;
+ int next_service_code;
+ int eip_field_update_bitmap;
+ int eip_hw_service;
+ int eip_offset_sel;
+};
+
+/**
+ * enum ppe_action_type - PPE action of the received packet.
+ * @PPE_ACTION_FORWARD: Packet forwarded per L2/L3 process.
+ * @PPE_ACTION_DROP: Packet dropped by PPE.
+ * @PPE_ACTION_COPY_TO_CPU: Packet copied to CPU port per multicast queue.
+ * @PPE_ACTION_REDIRECT_TO_CPU: Packet redirected to CPU port per unicast queue.
+ */
+enum ppe_action_type {
+ PPE_ACTION_FORWARD = 0,
+ PPE_ACTION_DROP = 1,
+ PPE_ACTION_COPY_TO_CPU = 2,
+ PPE_ACTION_REDIRECT_TO_CPU = 3,
+};
+
+/**
+ * struct ppe_rss_hash_cfg - PPE RSS hash configuration.
+ * @hash_mask: Mask of the generated hash value.
+ * @hash_fragment_mode: Hash generation mode for the first fragment of TCP,
+ * UDP and UDP-Lite packets, to use either 3 tuple or 5 tuple for RSS hash
+ * key computation.
+ * @hash_seed: Seed to generate RSS hash.
+ * @hash_sip_mix: Source IP selection.
+ * @hash_dip_mix: Destination IP selection.
+ * @hash_protocol_mix: Protocol selection.
+ * @hash_sport_mix: Source L4 port selection.
+ * @hash_dport_mix: Destination L4 port selection.
+ * @hash_fin_inner: RSS hash value first selection.
+ * @hash_fin_outer: RSS hash value second selection.
+ *
+ * PPE RSS hash value is generated for the packet based on the RSS hash
+ * configured.
+ */
+struct ppe_rss_hash_cfg {
+ u32 hash_mask;
+ bool hash_fragment_mode;
+ u32 hash_seed;
+ u8 hash_sip_mix[PPE_RSS_HASH_IP_LENGTH];
+ u8 hash_dip_mix[PPE_RSS_HASH_IP_LENGTH];
+ u8 hash_protocol_mix;
+ u8 hash_sport_mix;
+ u8 hash_dport_mix;
+ u8 hash_fin_inner[PPE_RSS_HASH_TUPLES];
+ u8 hash_fin_outer[PPE_RSS_HASH_TUPLES];
+};
+
+int ppe_hw_config(struct ppe_device *ppe_dev);
+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg);
+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
+ struct ppe_queue_ucast_dest queue_dst,
+ int queue_base,
+ int profile_id);
+int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int priority,
+ int queue_offset);
+int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int rss_hash,
+ int queue_offset);
+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end);
+int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc,
+ struct ppe_sc_cfg cfg);
+int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port);
+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg hash_cfg);
+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev,
+ int ring_id,
+ u32 *queue_map);
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
new file mode 100644
index 000000000000..fd959a76ff43
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE debugfs routines for display of PPE counters useful for debug. */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_debugfs.h"
+#include "ppe_regs.h"
+
+#define PPE_PKT_CNT_TBL_SIZE 3
+#define PPE_DROP_PKT_CNT_TBL_SIZE 5
+
+#define PPE_W0_PKT_CNT GENMASK(31, 0)
+#define PPE_W2_DROP_PKT_CNT_LOW GENMASK(31, 8)
+#define PPE_W3_DROP_PKT_CNT_HIGH GENMASK(7, 0)
+
+#define PPE_GET_PKT_CNT(tbl_cnt) \
+ FIELD_GET(PPE_W0_PKT_CNT, *(tbl_cnt))
+#define PPE_GET_DROP_PKT_CNT_LOW(tbl_cnt) \
+ FIELD_GET(PPE_W2_DROP_PKT_CNT_LOW, *((tbl_cnt) + 0x2))
+#define PPE_GET_DROP_PKT_CNT_HIGH(tbl_cnt) \
+ FIELD_GET(PPE_W3_DROP_PKT_CNT_HIGH, *((tbl_cnt) + 0x3))
+
+/**
+ * enum ppe_cnt_size_type - PPE counter size type
+ * @PPE_PKT_CNT_SIZE_1WORD: Counter size with single register
+ * @PPE_PKT_CNT_SIZE_3WORD: Counter size with table of 3 words
+ * @PPE_PKT_CNT_SIZE_5WORD: Counter size with table of 5 words
+ *
+ * PPE takes the different register size to record the packet counters.
+ * It uses single register, or register table with 3 words or 5 words.
+ * The counter with table size 5 words also records the drop counter.
+ * There are also some other counter types occupying sizes less than 32
+ * bits, which is not covered by this enumeration type.
+ */
+enum ppe_cnt_size_type {
+ PPE_PKT_CNT_SIZE_1WORD,
+ PPE_PKT_CNT_SIZE_3WORD,
+ PPE_PKT_CNT_SIZE_5WORD,
+};
+
+/**
+ * enum ppe_cnt_type - PPE counter type.
+ * @PPE_CNT_BM: Packet counter processed by BM.
+ * @PPE_CNT_PARSE: Packet counter parsed on ingress.
+ * @PPE_CNT_PORT_RX: Packet counter on the ingress port.
+ * @PPE_CNT_VLAN_RX: VLAN packet counter received.
+ * @PPE_CNT_L2_FWD: Packet counter processed by L2 forwarding.
+ * @PPE_CNT_CPU_CODE: Packet counter marked with various CPU codes.
+ * @PPE_CNT_VLAN_TX: VLAN packet counter transmitted.
+ * @PPE_CNT_PORT_TX: Packet counter on the egress port.
+ * @PPE_CNT_QM: Packet counter processed by QM.
+ */
+enum ppe_cnt_type {
+ PPE_CNT_BM,
+ PPE_CNT_PARSE,
+ PPE_CNT_PORT_RX,
+ PPE_CNT_VLAN_RX,
+ PPE_CNT_L2_FWD,
+ PPE_CNT_CPU_CODE,
+ PPE_CNT_VLAN_TX,
+ PPE_CNT_PORT_TX,
+ PPE_CNT_QM,
+};
+
+/**
+ * struct ppe_debugfs_entry - PPE debugfs entry.
+ * @name: Debugfs file name.
+ * @counter_type: PPE packet counter type.
+ * @ppe: PPE device.
+ *
+ * The PPE debugfs entry is used to create the debugfs file and passed
+ * to debugfs_create_file() as private data.
+ */
+struct ppe_debugfs_entry {
+ const char *name;
+ enum ppe_cnt_type counter_type;
+ struct ppe_device *ppe;
+};
+
+static const struct ppe_debugfs_entry debugfs_files[] = {
+ {
+ .name = "bm",
+ .counter_type = PPE_CNT_BM,
+ },
+ {
+ .name = "parse",
+ .counter_type = PPE_CNT_PARSE,
+ },
+ {
+ .name = "port_rx",
+ .counter_type = PPE_CNT_PORT_RX,
+ },
+ {
+ .name = "vlan_rx",
+ .counter_type = PPE_CNT_VLAN_RX,
+ },
+ {
+ .name = "l2_forward",
+ .counter_type = PPE_CNT_L2_FWD,
+ },
+ {
+ .name = "cpu_code",
+ .counter_type = PPE_CNT_CPU_CODE,
+ },
+ {
+ .name = "vlan_tx",
+ .counter_type = PPE_CNT_VLAN_TX,
+ },
+ {
+ .name = "port_tx",
+ .counter_type = PPE_CNT_PORT_TX,
+ },
+ {
+ .name = "qm",
+ .counter_type = PPE_CNT_QM,
+ },
+};
+
+static int ppe_pkt_cnt_get(struct ppe_device *ppe_dev, u32 reg,
+ enum ppe_cnt_size_type cnt_type,
+ u32 *cnt, u32 *drop_cnt)
+{
+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE];
+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE];
+ u32 value;
+ int ret;
+
+ switch (cnt_type) {
+ case PPE_PKT_CNT_SIZE_1WORD:
+ ret = regmap_read(ppe_dev->regmap, reg, &value);
+ if (ret)
+ return ret;
+
+ *cnt = value;
+ break;
+ case PPE_PKT_CNT_SIZE_3WORD:
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
+ if (ret)
+ return ret;
+
+ *cnt = PPE_GET_PKT_CNT(pkt_cnt);
+ break;
+ case PPE_PKT_CNT_SIZE_5WORD:
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
+ if (ret)
+ return ret;
+
+ *cnt = PPE_GET_PKT_CNT(drop_pkt_cnt);
+
+ /* Drop counter with low 24 bits. */
+ value = PPE_GET_DROP_PKT_CNT_LOW(drop_pkt_cnt);
+ *drop_cnt = FIELD_PREP(GENMASK(23, 0), value);
+
+ /* Drop counter with high 8 bits. */
+ value = PPE_GET_DROP_PKT_CNT_HIGH(drop_pkt_cnt);
+ *drop_cnt |= FIELD_PREP(GENMASK(31, 24), value);
+ break;
+ }
+
+ return 0;
+}
+
+static void ppe_tbl_pkt_cnt_clear(struct ppe_device *ppe_dev, u32 reg,
+ enum ppe_cnt_size_type cnt_type)
+{
+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE] = {};
+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE] = {};
+
+ switch (cnt_type) {
+ case PPE_PKT_CNT_SIZE_1WORD:
+ regmap_write(ppe_dev->regmap, reg, 0);
+ break;
+ case PPE_PKT_CNT_SIZE_3WORD:
+ regmap_bulk_write(ppe_dev->regmap, reg,
+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
+ break;
+ case PPE_PKT_CNT_SIZE_5WORD:
+ regmap_bulk_write(ppe_dev->regmap, reg,
+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
+ break;
+ }
+}
+
+static int ppe_bm_counter_get(struct ppe_device *ppe_dev, struct seq_file *seq)
+{
+ u32 reg, val, pkt_cnt, pkt_cnt1;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "BM SILENT_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ /* The number of packets dropped because hardware buffers were
+ * available only partially for the packet.
+ */
+ seq_printf(seq, "%-24s", "BM OVERFLOW_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ /* The number of currently occupied buffers, that can't be flushed. */
+ seq_printf(seq, "%-24s", "BM USED/REACT:");
+ tag = 0;
+ for (i = 0; i < PPE_BM_USED_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_BM_USED_CNT_TBL_ADDR + i * PPE_BM_USED_CNT_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ /* The number of PPE buffers used for caching the received
+ * packets before the pause frame sent.
+ */
+ pkt_cnt = FIELD_GET(PPE_BM_USED_CNT_VAL, val);
+
+ reg = PPE_BM_REACT_CNT_TBL_ADDR + i * PPE_BM_REACT_CNT_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ /* The number of PPE buffers used for caching the received
+ * packets after pause frame sent out.
+ */
+ pkt_cnt1 = FIELD_GET(PPE_BM_REACT_CNT_VAL, val);
+
+ if (pkt_cnt > 0 || pkt_cnt1 > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, pkt_cnt1,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets processed by the ingress parser module of PPE. */
+static int ppe_parse_pkt_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, cnt = 0, tunnel_cnt = 0;
+ int i, ret, tag = 0;
+
+ seq_printf(seq, "%-24s", "PARSE TPRX/IPRX:");
+ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &tunnel_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (tunnel_cnt > 0 || cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", tunnel_cnt, cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets received or dropped on the ingress port. */
+static int ppe_port_rx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "PORT RX/RX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ seq_printf(seq, "%-24s", "VPORT RX/RX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets received or dropped by layer 2 processing. */
+static int ppe_l2_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "L2 RX/RX_DROP:");
+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of VLAN packets received by PPE. */
+static int ppe_vlan_rx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "VLAN RX:");
+ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets handed to CPU by PPE. */
+static int ppe_cpu_code_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i;
+
+ seq_printf(seq, "%-24s", "CPU CODE:");
+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (!pkt_cnt)
+ continue;
+
+ /* There are 256 CPU codes saved in the first 256 entries
+ * of register table, and 128 drop codes for each PPE port
+ * (0-7), the total entries is 256 + 8 * 128.
+ */
+ if (i < 256)
+ seq_printf(seq, "%10u(cpucode:%d)", pkt_cnt, i);
+ else
+ seq_printf(seq, "%10u(port=%04d),dropcode:%d", pkt_cnt,
+ (i - 256) % 8, (i - 256) / 8);
+ seq_putc(seq, '\n');
+ seq_printf(seq, "%-24s", "");
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets forwarded by VLAN on the egress direction. */
+static int ppe_vlan_tx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "VLAN TX:");
+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets transmitted or dropped on the egress port. */
+static int ppe_port_tx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "VPORT TX/TX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ seq_printf(seq, "%-24s", "PORT TX/TX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets transmitted or pending by the PPE queue. */
+static int ppe_queue_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, val, pkt_cnt = 0, pend_cnt = 0, drop_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "QUEUE TX/PEND/DROP:");
+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (i < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
+ reg = PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC * i;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ pend_cnt = FIELD_GET(PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT, val);
+
+ reg = PPE_UNICAST_DROP_CNT_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC *
+ (i * PPE_UNICAST_DROP_TYPES + PPE_UNICAST_DROP_FORCE_OFFSET);
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+ } else {
+ int mq_offset = i - PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES;
+
+ reg = PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR +
+ PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC * mq_offset;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ pend_cnt = FIELD_GET(PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT, val);
+
+ if (mq_offset < PPE_P0_MULTICAST_QUEUE_NUM) {
+ reg = PPE_CPU_PORT_MULTICAST_FORCE_DROP_CNT_TBL_ADDR(mq_offset);
+ } else {
+ mq_offset -= PPE_P0_MULTICAST_QUEUE_NUM;
+
+ reg = PPE_P1_MULTICAST_DROP_CNT_TBL_ADDR;
+ reg += (mq_offset / PPE_MULTICAST_QUEUE_NUM) *
+ PPE_MULTICAST_QUEUE_PORT_ADDR_INC;
+ reg += (mq_offset % PPE_MULTICAST_QUEUE_NUM) *
+ PPE_MULTICAST_DROP_CNT_TBL_INC *
+ PPE_MULTICAST_DROP_TYPES;
+ }
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pkt_cnt > 0 || pend_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u/%u(%s=%04d)",
+ pkt_cnt, pend_cnt, drop_cnt, "queue", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* Display the various packet counters of PPE. */
+static int ppe_packet_counter_show(struct seq_file *seq, void *v)
+{
+ struct ppe_debugfs_entry *entry = seq->private;
+ struct ppe_device *ppe_dev = entry->ppe;
+ int ret;
+
+ switch (entry->counter_type) {
+ case PPE_CNT_BM:
+ ret = ppe_bm_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PARSE:
+ ret = ppe_parse_pkt_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PORT_RX:
+ ret = ppe_port_rx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_VLAN_RX:
+ ret = ppe_vlan_rx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_L2_FWD:
+ ret = ppe_l2_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_CPU_CODE:
+ ret = ppe_cpu_code_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_VLAN_TX:
+ ret = ppe_vlan_tx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PORT_TX:
+ ret = ppe_port_tx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_QM:
+ ret = ppe_queue_counter_get(ppe_dev, seq);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Flush the various packet counters of PPE. */
+static ssize_t ppe_packet_counter_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct ppe_debugfs_entry *entry = file_inode(file)->i_private;
+ struct ppe_device *ppe_dev = entry->ppe;
+ u32 reg;
+ int i;
+
+ switch (entry->counter_type) {
+ case PPE_CNT_BM:
+ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+ }
+
+ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_PARSE:
+ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+
+ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+ }
+
+ break;
+ case PPE_CNT_PORT_RX:
+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ break;
+ case PPE_CNT_VLAN_RX:
+ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_L2_FWD:
+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ break;
+ case PPE_CNT_CPU_CODE:
+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_VLAN_TX:
+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_PORT_TX:
+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+
+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+
+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_QM:
+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(ppe_packet_counter);
+
+void ppe_debugfs_setup(struct ppe_device *ppe_dev)
+{
+ struct ppe_debugfs_entry *entry;
+ int i;
+
+ ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
+ if (IS_ERR(ppe_dev->debugfs_root))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) {
+ entry = devm_kzalloc(ppe_dev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ entry->ppe = ppe_dev;
+ entry->counter_type = debugfs_files[i].counter_type;
+
+ debugfs_create_file(debugfs_files[i].name, 0444,
+ ppe_dev->debugfs_root, entry,
+ &ppe_packet_counter_fops);
+ }
+}
+
+void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
+{
+ debugfs_remove_recursive(ppe_dev->debugfs_root);
+ ppe_dev->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
new file mode 100644
index 000000000000..81f49a709123
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE debugfs counters setup. */
+
+#ifndef __PPE_DEBUGFS_H__
+#define __PPE_DEBUGFS_H__
+
+#include "ppe.h"
+
+void ppe_debugfs_setup(struct ppe_device *ppe_dev);
+void ppe_debugfs_teardown(struct ppe_device *ppe_dev);
+
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
new file mode 100644
index 000000000000..746dfbb5a682
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE hardware register and table declarations. */
+#ifndef __PPE_REGS_H__
+#define __PPE_REGS_H__
+
+#include <linux/bitfield.h>
+
+/* PPE scheduler configurations for buffer manager block. */
+#define PPE_BM_SCH_CTRL_ADDR 0xb000
+#define PPE_BM_SCH_CTRL_INC 4
+#define PPE_BM_SCH_CTRL_SCH_DEPTH GENMASK(7, 0)
+#define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
+#define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
+
+/* PPE drop counters. */
+#define PPE_DROP_CNT_TBL_ADDR 0xb024
+#define PPE_DROP_CNT_TBL_ENTRIES 8
+#define PPE_DROP_CNT_TBL_INC 4
+
+/* BM port drop counters. */
+#define PPE_DROP_STAT_TBL_ADDR 0xe000
+#define PPE_DROP_STAT_TBL_ENTRIES 30
+#define PPE_DROP_STAT_TBL_INC 0x10
+
+/* Egress VLAN counters. */
+#define PPE_EG_VSI_COUNTER_TBL_ADDR 0x41000
+#define PPE_EG_VSI_COUNTER_TBL_ENTRIES 64
+#define PPE_EG_VSI_COUNTER_TBL_INC 0x10
+
+/* Port TX counters. */
+#define PPE_PORT_TX_COUNTER_TBL_ADDR 0x45000
+#define PPE_PORT_TX_COUNTER_TBL_ENTRIES 8
+#define PPE_PORT_TX_COUNTER_TBL_INC 0x10
+
+/* Virtual port TX counters. */
+#define PPE_VPORT_TX_COUNTER_TBL_ADDR 0x47000
+#define PPE_VPORT_TX_COUNTER_TBL_ENTRIES 256
+#define PPE_VPORT_TX_COUNTER_TBL_INC 0x10
+
+/* Queue counters. */
+#define PPE_QUEUE_TX_COUNTER_TBL_ADDR 0x4a000
+#define PPE_QUEUE_TX_COUNTER_TBL_ENTRIES 300
+#define PPE_QUEUE_TX_COUNTER_TBL_INC 0x10
+
+/* RSS settings are to calculate the random RSS hash value generated during
+ * packet receive to ARM cores. This hash is then used to generate the queue
+ * offset used to determine the queue used to transmit the packet to ARM cores.
+ */
+#define PPE_RSS_HASH_MASK_ADDR 0xb4318
+#define PPE_RSS_HASH_MASK_HASH_MASK GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_FRAGMENT BIT(28)
+
+#define PPE_RSS_HASH_SEED_ADDR 0xb431c
+#define PPE_RSS_HASH_SEED_VAL GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_ADDR 0xb4320
+#define PPE_RSS_HASH_MIX_ENTRIES 11
+#define PPE_RSS_HASH_MIX_INC 4
+#define PPE_RSS_HASH_MIX_VAL GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_ADDR 0xb4350
+#define PPE_RSS_HASH_FIN_ENTRIES 5
+#define PPE_RSS_HASH_FIN_INC 4
+#define PPE_RSS_HASH_FIN_INNER GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_OUTER GENMASK(9, 5)
+
+#define PPE_RSS_HASH_MASK_IPV4_ADDR 0xb4380
+#define PPE_RSS_HASH_MASK_IPV4_HASH_MASK GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_IPV4_FRAGMENT BIT(28)
+
+#define PPE_RSS_HASH_SEED_IPV4_ADDR 0xb4384
+#define PPE_RSS_HASH_SEED_IPV4_VAL GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_IPV4_ADDR 0xb4390
+#define PPE_RSS_HASH_MIX_IPV4_ENTRIES 5
+#define PPE_RSS_HASH_MIX_IPV4_INC 4
+#define PPE_RSS_HASH_MIX_IPV4_VAL GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_IPV4_ADDR 0xb43b0
+#define PPE_RSS_HASH_FIN_IPV4_ENTRIES 5
+#define PPE_RSS_HASH_FIN_IPV4_INC 4
+#define PPE_RSS_HASH_FIN_IPV4_INNER GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_IPV4_OUTER GENMASK(9, 5)
+
+#define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
+#define PPE_BM_SCH_CFG_TBL_ENTRIES 128
+#define PPE_BM_SCH_CFG_TBL_INC 0x10
+#define PPE_BM_SCH_CFG_TBL_PORT_NUM GENMASK(3, 0)
+#define PPE_BM_SCH_CFG_TBL_DIR BIT(4)
+#define PPE_BM_SCH_CFG_TBL_VALID BIT(5)
+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID BIT(6)
+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT GENMASK(11, 8)
+
+/* PPE service code configuration for the ingress direction functions,
+ * including bypass configuration for relevant PPE switch core functions
+ * such as flow entry lookup bypass.
+ */
+#define PPE_SERVICE_TBL_ADDR 0x15000
+#define PPE_SERVICE_TBL_ENTRIES 256
+#define PPE_SERVICE_TBL_INC 0x10
+#define PPE_SERVICE_W0_BYPASS_BITMAP GENMASK(31, 0)
+#define PPE_SERVICE_W1_RX_CNT_EN BIT(0)
+
+#define PPE_SERVICE_SET_BYPASS_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_SERVICE_W0_BYPASS_BITMAP, tbl_cfg, value)
+#define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_SERVICE_W1_RX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE port egress VLAN configurations. */
+#define PPE_PORT_EG_VLAN_TBL_ADDR 0x20020
+#define PPE_PORT_EG_VLAN_TBL_ENTRIES 8
+#define PPE_PORT_EG_VLAN_TBL_INC 4
+#define PPE_PORT_EG_VLAN_TBL_VLAN_TYPE BIT(0)
+#define PPE_PORT_EG_VLAN_TBL_CTAG_MODE GENMASK(2, 1)
+#define PPE_PORT_EG_VLAN_TBL_STAG_MODE GENMASK(4, 3)
+#define PPE_PORT_EG_VLAN_TBL_VSI_TAG_MODE_EN BIT(5)
+#define PPE_PORT_EG_VLAN_TBL_PCP_PROP_CMD BIT(6)
+#define PPE_PORT_EG_VLAN_TBL_DEI_PROP_CMD BIT(7)
+#define PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN BIT(8)
+
+/* PPE queue counters enable/disable control. */
+#define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
+#define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
+
+/* PPE service code configuration on the egress direction. */
+#define PPE_EG_SERVICE_TBL_ADDR 0x43000
+#define PPE_EG_SERVICE_TBL_ENTRIES 256
+#define PPE_EG_SERVICE_TBL_INC 0x10
+#define PPE_EG_SERVICE_W0_UPDATE_ACTION GENMASK(31, 0)
+#define PPE_EG_SERVICE_W1_NEXT_SERVCODE GENMASK(7, 0)
+#define PPE_EG_SERVICE_W1_HW_SERVICE GENMASK(13, 8)
+#define PPE_EG_SERVICE_W1_OFFSET_SEL BIT(14)
+#define PPE_EG_SERVICE_W1_TX_CNT_EN BIT(15)
+
+#define PPE_EG_SERVICE_SET_UPDATE_ACTION(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W0_UPDATE_ACTION, tbl_cfg, value)
+#define PPE_EG_SERVICE_SET_NEXT_SERVCODE(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_NEXT_SERVCODE, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_HW_SERVICE(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_HW_SERVICE, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_OFFSET_SEL(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_OFFSET_SEL, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_TX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE port bridge configuration */
+#define PPE_PORT_BRIDGE_CTRL_ADDR 0x60300
+#define PPE_PORT_BRIDGE_CTRL_ENTRIES 8
+#define PPE_PORT_BRIDGE_CTRL_INC 4
+#define PPE_PORT_BRIDGE_NEW_LRN_EN BIT(0)
+#define PPE_PORT_BRIDGE_STA_MOVE_LRN_EN BIT(3)
+#define PPE_PORT_BRIDGE_TXMAC_EN BIT(16)
+
+/* PPE port control configurations for the traffic to the multicast queues. */
+#define PPE_MC_MTU_CTRL_TBL_ADDR 0x60a00
+#define PPE_MC_MTU_CTRL_TBL_ENTRIES 8
+#define PPE_MC_MTU_CTRL_TBL_INC 4
+#define PPE_MC_MTU_CTRL_TBL_MTU GENMASK(13, 0)
+#define PPE_MC_MTU_CTRL_TBL_MTU_CMD GENMASK(15, 14)
+#define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN BIT(16)
+
+/* PPE VSI configurations */
+#define PPE_VSI_TBL_ADDR 0x63800
+#define PPE_VSI_TBL_ENTRIES 64
+#define PPE_VSI_TBL_INC 0x10
+#define PPE_VSI_W0_MEMBER_PORT_BITMAP GENMASK(7, 0)
+#define PPE_VSI_W0_UUC_BITMAP GENMASK(15, 8)
+#define PPE_VSI_W0_UMC_BITMAP GENMASK(23, 16)
+#define PPE_VSI_W0_BC_BITMAP GENMASK(31, 24)
+#define PPE_VSI_W1_NEW_ADDR_LRN_EN BIT(0)
+#define PPE_VSI_W1_NEW_ADDR_FWD_CMD GENMASK(2, 1)
+#define PPE_VSI_W1_STATION_MOVE_LRN_EN BIT(3)
+#define PPE_VSI_W1_STATION_MOVE_FWD_CMD GENMASK(5, 4)
+
+#define PPE_VSI_SET_MEMBER_PORT_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_MEMBER_PORT_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_UUC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_UUC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_UMC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_UMC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_BC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_BC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_NEW_ADDR_LRN_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_NEW_ADDR_LRN_EN, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_NEW_ADDR_FWD_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_NEW_ADDR_FWD_CMD, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_STATION_MOVE_LRN_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_STATION_MOVE_LRN_EN, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_STATION_MOVE_FWD_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_STATION_MOVE_FWD_CMD, (tbl_cfg) + 0x1, value)
+
+/* PPE port control configurations for the traffic to the unicast queues. */
+#define PPE_MRU_MTU_CTRL_TBL_ADDR 0x65000
+#define PPE_MRU_MTU_CTRL_TBL_ENTRIES 256
+#define PPE_MRU_MTU_CTRL_TBL_INC 0x10
+#define PPE_MRU_MTU_CTRL_W0_MRU GENMASK(13, 0)
+#define PPE_MRU_MTU_CTRL_W0_MRU_CMD GENMASK(15, 14)
+#define PPE_MRU_MTU_CTRL_W0_MTU GENMASK(29, 16)
+#define PPE_MRU_MTU_CTRL_W0_MTU_CMD GENMASK(31, 30)
+#define PPE_MRU_MTU_CTRL_W1_RX_CNT_EN BIT(0)
+#define PPE_MRU_MTU_CTRL_W1_TX_CNT_EN BIT(1)
+#define PPE_MRU_MTU_CTRL_W1_SRC_PROFILE GENMASK(3, 2)
+#define PPE_MRU_MTU_CTRL_W1_INNER_PREC_LOW BIT(31)
+#define PPE_MRU_MTU_CTRL_W2_INNER_PREC_HIGH GENMASK(1, 0)
+
+#define PPE_MRU_MTU_CTRL_SET_MRU(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MRU, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MRU_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MRU_CMD, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MTU(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MTU, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MTU_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MTU_CMD, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W1_RX_CNT_EN, (tbl_cfg) + 0x1, value)
+#define PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W1_TX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE service code configuration for destination port and counter. */
+#define PPE_IN_L2_SERVICE_TBL_ADDR 0x66000
+#define PPE_IN_L2_SERVICE_TBL_ENTRIES 256
+#define PPE_IN_L2_SERVICE_TBL_INC 0x10
+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID BIT(0)
+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID GENMASK(4, 1)
+#define PPE_IN_L2_SERVICE_TBL_DST_DIRECTION BIT(5)
+#define PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP GENMASK(29, 6)
+#define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN BIT(30)
+#define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN BIT(31)
+
+/* L2 Port configurations */
+#define PPE_L2_VP_PORT_TBL_ADDR 0x98000
+#define PPE_L2_VP_PORT_TBL_ENTRIES 256
+#define PPE_L2_VP_PORT_TBL_INC 0x10
+#define PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN BIT(0)
+#define PPE_L2_VP_PORT_W0_DST_INFO GENMASK(9, 2)
+
+#define PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN, tbl_cfg, value)
+#define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_L2_VP_PORT_W0_DST_INFO, tbl_cfg, value)
+
+/* Port RX and RX drop counters. */
+#define PPE_PORT_RX_CNT_TBL_ADDR 0x150000
+#define PPE_PORT_RX_CNT_TBL_ENTRIES 256
+#define PPE_PORT_RX_CNT_TBL_INC 0x20
+
+/* Physical port RX and RX drop counters. */
+#define PPE_PHY_PORT_RX_CNT_TBL_ADDR 0x156000
+#define PPE_PHY_PORT_RX_CNT_TBL_ENTRIES 8
+#define PPE_PHY_PORT_RX_CNT_TBL_INC 0x20
+
+/* Counters for the packet to CPU port. */
+#define PPE_DROP_CPU_CNT_TBL_ADDR 0x160000
+#define PPE_DROP_CPU_CNT_TBL_ENTRIES 1280
+#define PPE_DROP_CPU_CNT_TBL_INC 0x10
+
+/* VLAN counters. */
+#define PPE_VLAN_CNT_TBL_ADDR 0x178000
+#define PPE_VLAN_CNT_TBL_ENTRIES 64
+#define PPE_VLAN_CNT_TBL_INC 0x10
+
+/* PPE L2 counters. */
+#define PPE_PRE_L2_CNT_TBL_ADDR 0x17c000
+#define PPE_PRE_L2_CNT_TBL_ENTRIES 64
+#define PPE_PRE_L2_CNT_TBL_INC 0x20
+
+/* Port TX drop counters. */
+#define PPE_PORT_TX_DROP_CNT_TBL_ADDR 0x17d000
+#define PPE_PORT_TX_DROP_CNT_TBL_ENTRIES 8
+#define PPE_PORT_TX_DROP_CNT_TBL_INC 0x10
+
+/* Virtual port TX counters. */
+#define PPE_VPORT_TX_DROP_CNT_TBL_ADDR 0x17e000
+#define PPE_VPORT_TX_DROP_CNT_TBL_ENTRIES 256
+#define PPE_VPORT_TX_DROP_CNT_TBL_INC 0x10
+
+/* Counters for the tunnel packet. */
+#define PPE_TPR_PKT_CNT_TBL_ADDR 0x1d0080
+#define PPE_TPR_PKT_CNT_TBL_ENTRIES 8
+#define PPE_TPR_PKT_CNT_TBL_INC 4
+
+/* Counters for the all packet received. */
+#define PPE_IPR_PKT_CNT_TBL_ADDR 0x1e0080
+#define PPE_IPR_PKT_CNT_TBL_ENTRIES 8
+#define PPE_IPR_PKT_CNT_TBL_INC 4
+
+/* PPE service code configuration for the tunnel packet. */
+#define PPE_TL_SERVICE_TBL_ADDR 0x306000
+#define PPE_TL_SERVICE_TBL_ENTRIES 256
+#define PPE_TL_SERVICE_TBL_INC 4
+#define PPE_TL_SERVICE_TBL_BYPASS_BITMAP GENMASK(31, 0)
+
+/* Port scheduler global config. */
+#define PPE_PSCH_SCH_DEPTH_CFG_ADDR 0x400000
+#define PPE_PSCH_SCH_DEPTH_CFG_INC 4
+#define PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH GENMASK(7, 0)
+
+/* PPE queue level scheduler configurations. */
+#define PPE_L0_FLOW_MAP_TBL_ADDR 0x402000
+#define PPE_L0_FLOW_MAP_TBL_ENTRIES 300
+#define PPE_L0_FLOW_MAP_TBL_INC 0x10
+#define PPE_L0_FLOW_MAP_TBL_FLOW_ID GENMASK(5, 0)
+#define PPE_L0_FLOW_MAP_TBL_C_PRI GENMASK(8, 6)
+#define PPE_L0_FLOW_MAP_TBL_E_PRI GENMASK(11, 9)
+#define PPE_L0_FLOW_MAP_TBL_C_NODE_WT GENMASK(21, 12)
+#define PPE_L0_FLOW_MAP_TBL_E_NODE_WT GENMASK(31, 22)
+
+#define PPE_L0_C_FLOW_CFG_TBL_ADDR 0x404000
+#define PPE_L0_C_FLOW_CFG_TBL_ENTRIES 512
+#define PPE_L0_C_FLOW_CFG_TBL_INC 0x10
+#define PPE_L0_C_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
+#define PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
+
+#define PPE_L0_E_FLOW_CFG_TBL_ADDR 0x406000
+#define PPE_L0_E_FLOW_CFG_TBL_ENTRIES 512
+#define PPE_L0_E_FLOW_CFG_TBL_INC 0x10
+#define PPE_L0_E_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
+#define PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
+
+#define PPE_L0_FLOW_PORT_MAP_TBL_ADDR 0x408000
+#define PPE_L0_FLOW_PORT_MAP_TBL_ENTRIES 300
+#define PPE_L0_FLOW_PORT_MAP_TBL_INC 0x10
+#define PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
+
+#define PPE_L0_COMP_CFG_TBL_ADDR 0x428000
+#define PPE_L0_COMP_CFG_TBL_ENTRIES 300
+#define PPE_L0_COMP_CFG_TBL_INC 0x10
+#define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+#define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
+/* PPE queue to Ethernet DMA ring mapping table. */
+#define PPE_RING_Q_MAP_TBL_ADDR 0x42a000
+#define PPE_RING_Q_MAP_TBL_ENTRIES 24
+#define PPE_RING_Q_MAP_TBL_INC 0x40
+
+/* Table addresses for per-queue dequeue setting. */
+#define PPE_DEQ_OPR_TBL_ADDR 0x430000
+#define PPE_DEQ_OPR_TBL_ENTRIES 300
+#define PPE_DEQ_OPR_TBL_INC 0x10
+#define PPE_DEQ_OPR_TBL_DEQ_DISABLE BIT(0)
+
+/* PPE flow level scheduler configurations. */
+#define PPE_L1_FLOW_MAP_TBL_ADDR 0x440000
+#define PPE_L1_FLOW_MAP_TBL_ENTRIES 64
+#define PPE_L1_FLOW_MAP_TBL_INC 0x10
+#define PPE_L1_FLOW_MAP_TBL_FLOW_ID GENMASK(3, 0)
+#define PPE_L1_FLOW_MAP_TBL_C_PRI GENMASK(6, 4)
+#define PPE_L1_FLOW_MAP_TBL_E_PRI GENMASK(9, 7)
+#define PPE_L1_FLOW_MAP_TBL_C_NODE_WT GENMASK(19, 10)
+#define PPE_L1_FLOW_MAP_TBL_E_NODE_WT GENMASK(29, 20)
+
+#define PPE_L1_C_FLOW_CFG_TBL_ADDR 0x442000
+#define PPE_L1_C_FLOW_CFG_TBL_ENTRIES 64
+#define PPE_L1_C_FLOW_CFG_TBL_INC 0x10
+#define PPE_L1_C_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
+#define PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
+
+#define PPE_L1_E_FLOW_CFG_TBL_ADDR 0x444000
+#define PPE_L1_E_FLOW_CFG_TBL_ENTRIES 64
+#define PPE_L1_E_FLOW_CFG_TBL_INC 0x10
+#define PPE_L1_E_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
+#define PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
+
+#define PPE_L1_FLOW_PORT_MAP_TBL_ADDR 0x446000
+#define PPE_L1_FLOW_PORT_MAP_TBL_ENTRIES 64
+#define PPE_L1_FLOW_PORT_MAP_TBL_INC 0x10
+#define PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
+
+#define PPE_L1_COMP_CFG_TBL_ADDR 0x46a000
+#define PPE_L1_COMP_CFG_TBL_ENTRIES 64
+#define PPE_L1_COMP_CFG_TBL_INC 0x10
+#define PPE_L1_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+#define PPE_L1_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
+/* PPE port scheduler configurations for egress. */
+#define PPE_PSCH_SCH_CFG_TBL_ADDR 0x47a000
+#define PPE_PSCH_SCH_CFG_TBL_ENTRIES 128
+#define PPE_PSCH_SCH_CFG_TBL_INC 0x10
+#define PPE_PSCH_SCH_CFG_TBL_DES_PORT GENMASK(3, 0)
+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT GENMASK(7, 4)
+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP GENMASK(15, 8)
+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN BIT(16)
+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT GENMASK(20, 17)
+
+/* There are 15 BM ports and 4 BM groups supported by PPE.
+ * BM port (0-7) is for EDMA port 0, BM port (8-13) is for
+ * PPE physical port 1-6 and BM port 14 is for EIP port.
+ */
+#define PPE_BM_PORT_FC_MODE_ADDR 0x600100
+#define PPE_BM_PORT_FC_MODE_ENTRIES 15
+#define PPE_BM_PORT_FC_MODE_INC 0x4
+#define PPE_BM_PORT_FC_MODE_EN BIT(0)
+
+#define PPE_BM_PORT_GROUP_ID_ADDR 0x600180
+#define PPE_BM_PORT_GROUP_ID_ENTRIES 15
+#define PPE_BM_PORT_GROUP_ID_INC 0x4
+#define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID GENMASK(1, 0)
+
+/* Counters for PPE buffers used for packets cached. */
+#define PPE_BM_USED_CNT_TBL_ADDR 0x6001c0
+#define PPE_BM_USED_CNT_TBL_ENTRIES 15
+#define PPE_BM_USED_CNT_TBL_INC 0x4
+#define PPE_BM_USED_CNT_VAL GENMASK(10, 0)
+
+/* Counters for PPE buffers used for packets received after pause frame sent. */
+#define PPE_BM_REACT_CNT_TBL_ADDR 0x600240
+#define PPE_BM_REACT_CNT_TBL_ENTRIES 15
+#define PPE_BM_REACT_CNT_TBL_INC 0x4
+#define PPE_BM_REACT_CNT_VAL GENMASK(8, 0)
+
+#define PPE_BM_SHARED_GROUP_CFG_ADDR 0x600290
+#define PPE_BM_SHARED_GROUP_CFG_ENTRIES 4
+#define PPE_BM_SHARED_GROUP_CFG_INC 0x4
+#define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT GENMASK(10, 0)
+
+#define PPE_BM_PORT_FC_CFG_TBL_ADDR 0x601000
+#define PPE_BM_PORT_FC_CFG_TBL_ENTRIES 15
+#define PPE_BM_PORT_FC_CFG_TBL_INC 0x10
+#define PPE_BM_PORT_FC_W0_REACT_LIMIT GENMASK(8, 0)
+#define PPE_BM_PORT_FC_W0_RESUME_THRESHOLD GENMASK(17, 9)
+#define PPE_BM_PORT_FC_W0_RESUME_OFFSET GENMASK(28, 18)
+#define PPE_BM_PORT_FC_W0_CEILING_LOW GENMASK(31, 29)
+#define PPE_BM_PORT_FC_W1_CEILING_HIGH GENMASK(7, 0)
+#define PPE_BM_PORT_FC_W1_WEIGHT GENMASK(10, 8)
+#define PPE_BM_PORT_FC_W1_DYNAMIC BIT(11)
+#define PPE_BM_PORT_FC_W1_PRE_ALLOC GENMASK(22, 12)
+
+#define PPE_BM_PORT_FC_SET_REACT_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_REACT_LIMIT, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_RESUME_THRESHOLD, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_RESUME_OFFSET(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_RESUME_OFFSET, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_CEILING_LOW(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_CEILING_LOW, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_CEILING_HIGH(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_CEILING_HIGH, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_WEIGHT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_WEIGHT, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_DYNAMIC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_DYNAMIC, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_PRE_ALLOC, (tbl_cfg) + 0x1, value)
+
+/* The queue base configurations based on destination port,
+ * service code or CPU code.
+ */
+#define PPE_UCAST_QUEUE_MAP_TBL_ADDR 0x810000
+#define PPE_UCAST_QUEUE_MAP_TBL_ENTRIES 3072
+#define PPE_UCAST_QUEUE_MAP_TBL_INC 0x10
+#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID GENMASK(3, 0)
+#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID GENMASK(11, 4)
+
+/* The queue offset configurations based on RSS hash value. */
+#define PPE_UCAST_HASH_MAP_TBL_ADDR 0x830000
+#define PPE_UCAST_HASH_MAP_TBL_ENTRIES 4096
+#define PPE_UCAST_HASH_MAP_TBL_INC 0x10
+#define PPE_UCAST_HASH_MAP_TBL_HASH GENMASK(7, 0)
+
+/* The queue offset configurations based on PPE internal priority. */
+#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR 0x842000
+#define PPE_UCAST_PRIORITY_MAP_TBL_ENTRIES 256
+#define PPE_UCAST_PRIORITY_MAP_TBL_INC 0x10
+#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS GENMASK(3, 0)
+
+/* PPE unicast queue (0-255) configurations. */
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR 0x848000
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES 256
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_INC 0x10
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_EN BIT(0)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_WRED_EN BIT(1)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_FC_EN BIT(2)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_CLR_AWARE BIT(3)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID GENMASK(5, 4)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(16, 6)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC BIT(17)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT GENMASK(20, 18)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(31, 21)
+#define PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME GENMASK(23, 13)
+
+#define PPE_AC_UNICAST_QUEUE_SET_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_EN, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_GRP_ID(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_WEIGHT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME, (tbl_cfg) + 0x3, value)
+
+/* PPE multicast queue (256-299) configurations. */
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR 0x84a000
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ENTRIES 44
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC 0x10
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_EN BIT(0)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_FC_EN BIT(1)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_CLR_AWARE BIT(2)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID GENMASK(4, 3)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(15, 5)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(26, 16)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME GENMASK(17, 7)
+
+#define PPE_AC_MULTICAST_QUEUE_SET_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_EN, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME, (tbl_cfg) + 0x2, value)
+
+/* PPE admission control group (0-3) configurations */
+#define PPE_AC_GRP_CFG_TBL_ADDR 0x84c000
+#define PPE_AC_GRP_CFG_TBL_ENTRIES 0x4
+#define PPE_AC_GRP_CFG_TBL_INC 0x10
+#define PPE_AC_GRP_W0_AC_EN BIT(0)
+#define PPE_AC_GRP_W0_AC_FC_EN BIT(1)
+#define PPE_AC_GRP_W0_CLR_AWARE BIT(2)
+#define PPE_AC_GRP_W0_THRESHOLD_LOW GENMASK(31, 25)
+#define PPE_AC_GRP_W1_THRESHOLD_HIGH GENMASK(3, 0)
+#define PPE_AC_GRP_W1_BUF_LIMIT GENMASK(14, 4)
+#define PPE_AC_GRP_W2_RESUME_GRN GENMASK(15, 5)
+#define PPE_AC_GRP_W2_PRE_ALLOC GENMASK(26, 16)
+
+#define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_GRP_W1_BUF_LIMIT, (tbl_cfg) + 0x1, value)
+
+/* Counters for packets handled by unicast queues (0-255). */
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR 0x84e000
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ENTRIES 256
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_INC 0x10
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
+
+/* Counters for packets handled by multicast queues (256-299). */
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR 0x852000
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ENTRIES 44
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC 0x10
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
+
+/* Table addresses for per-queue enqueue setting. */
+#define PPE_ENQ_OPR_TBL_ADDR 0x85c000
+#define PPE_ENQ_OPR_TBL_ENTRIES 300
+#define PPE_ENQ_OPR_TBL_INC 0x10
+#define PPE_ENQ_OPR_TBL_ENQ_DISABLE BIT(0)
+
+/* Unicast drop count includes the possible drops with WRED for the green,
+ * yellow and red categories.
+ */
+#define PPE_UNICAST_DROP_CNT_TBL_ADDR 0x9e0000
+#define PPE_UNICAST_DROP_CNT_TBL_ENTRIES 1536
+#define PPE_UNICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_UNICAST_DROP_TYPES 6
+#define PPE_UNICAST_DROP_FORCE_OFFSET 3
+
+/* There are 16 multicast queues dedicated to CPU port 0. Multicast drop
+ * count includes the force drop for green, yellow and red category packets.
+ */
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR 0x9f0000
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_ENTRIES 48
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_P0_MULTICAST_QUEUE_NUM 16
+
+/* Each PPE physical port has four dedicated multicast queues, providing
+ * a total of 12 entries per port. The multicast drop count includes forced
+ * drops for green, yellow, and red category packets.
+ */
+#define PPE_MULTICAST_QUEUE_PORT_ADDR_INC 0x1000
+#define PPE_MULTICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_MULTICAST_DROP_TYPES 3
+#define PPE_MULTICAST_QUEUE_NUM 4
+#define PPE_MULTICAST_DROP_CNT_TBL_ENTRIES 12
+
+#define PPE_CPU_PORT_MULTICAST_FORCE_DROP_CNT_TBL_ADDR(mq_offset) \
+ (PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR + \
+ (mq_offset) * PPE_P0_MULTICAST_DROP_CNT_TBL_INC * \
+ PPE_MULTICAST_DROP_TYPES)
+
+#define PPE_P1_MULTICAST_DROP_CNT_TBL_ADDR \
+ (PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR + PPE_MULTICAST_QUEUE_PORT_ADDR_INC)
+#endif
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index ff3b89e9028e..13deb3da4a64 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -98,10 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
seq_printf(s, "IRQ : %d\n",
qca->spi_dev->irq);
- seq_printf(s, "INTR REQ : %u\n",
- qca->intr_req);
- seq_printf(s, "INTR SVC : %u\n",
- qca->intr_svc);
+ seq_printf(s, "FLAGS : %lx\n",
+ qca->flags);
seq_printf(s, "SPI max speed : %lu\n",
(unsigned long)qca->spi_dev->max_speed_hz);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5799ecc88a87..38a779f4b866 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -35,6 +35,9 @@
#define MAX_DMA_BURST_LEN 5000
+#define SPI_INTR 0
+#define SPI_RESET 1
+
/* Modules parameters */
#define QCASPI_CLK_SPEED_MIN 1000000
#define QCASPI_CLK_SPEED_MAX 16000000
@@ -51,7 +54,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."
#define QCASPI_PLUGGABLE_MIN 0
#define QCASPI_PLUGGABLE_MAX 1
-static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
+static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
module_param(qcaspi_pluggable, int, 0);
MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
@@ -493,7 +496,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
if (qca->sync == QCASPI_SYNC_READY)
qca->stats.bad_signature++;
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n");
return;
} else {
@@ -503,12 +506,17 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
if (wrbuf_space != QCASPI_HW_BUF_LEN) {
netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n");
qca->sync = QCASPI_SYNC_UNKNOWN;
+ qca->stats.buf_avail_err++;
} else {
netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n");
qca->sync = QCASPI_SYNC_READY;
return;
}
}
+ } else {
+ /* Handle reset only on QCASPI_EVENT_UPDATE */
+ if (test_and_clear_bit(SPI_RESET, &qca->flags))
+ qca->sync = QCASPI_SYNC_UNKNOWN;
}
switch (qca->sync) {
@@ -519,7 +527,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
qca->stats.bad_signature++;
netdev_dbg(qca->net_dev, "sync: bad signature, restart\n");
/* don't reset right away */
@@ -550,7 +558,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qca->reset_count);
if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
/* reset did not seem to take place, try again */
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
qca->stats.reset_timeout++;
netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n");
}
@@ -579,14 +587,14 @@ qcaspi_spi_thread(void *data)
continue;
}
- if ((qca->intr_req == qca->intr_svc) &&
+ if (!qca->flags &&
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
- netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
- qca->intr_req - qca->intr_svc,
+ netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
+ qca->flags,
qca->txr.skb[qca->txr.head]);
qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
@@ -600,8 +608,7 @@ qcaspi_spi_thread(void *data)
msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
}
- if (qca->intr_svc != qca->intr_req) {
- qca->intr_svc = qca->intr_req;
+ if (test_and_clear_bit(SPI_INTR, &qca->flags)) {
start_spi_intr_handling(qca, &intr_cause);
if (intr_cause & SPI_INT_CPU_ON) {
@@ -626,7 +633,7 @@ qcaspi_spi_thread(void *data)
/* restart sync */
netdev_dbg(qca->net_dev, "===> rdbuf error!\n");
qca->stats.read_buf_err++;
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
continue;
}
@@ -634,7 +641,7 @@ qcaspi_spi_thread(void *data)
/* restart sync */
netdev_dbg(qca->net_dev, "===> wrbuf error!\n");
qca->stats.write_buf_err++;
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
continue;
}
@@ -663,7 +670,7 @@ qcaspi_intr_handler(int irq, void *data)
{
struct qcaspi *qca = data;
- qca->intr_req++;
+ set_bit(SPI_INTR, &qca->flags);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
@@ -679,8 +686,7 @@ qcaspi_netdev_open(struct net_device *dev)
if (!qca)
return -EINVAL;
- qca->intr_req = 1;
- qca->intr_svc = 0;
+ set_bit(SPI_INTR, &qca->flags);
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
@@ -799,7 +805,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
jiffies, jiffies - dev_trans_start(dev));
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
@@ -812,7 +818,6 @@ qcaspi_netdev_init(struct net_device *dev)
dev->mtu = QCAFRM_MAX_MTU;
dev->type = ARPHRD_ETHER;
- qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL;
qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
@@ -903,17 +908,15 @@ qca_spi_probe(struct spi_device *spi)
legacy_mode = of_property_read_bool(spi->dev.of_node,
"qca,legacy-mode");
- if (qcaspi_clkspeed == 0) {
- if (spi->max_speed_hz)
- qcaspi_clkspeed = spi->max_speed_hz;
- else
- qcaspi_clkspeed = QCASPI_CLK_SPEED;
- }
+ if (qcaspi_clkspeed)
+ spi->max_speed_hz = qcaspi_clkspeed;
+ else if (!spi->max_speed_hz)
+ spi->max_speed_hz = QCASPI_CLK_SPEED;
- if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
- (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_err(&spi->dev, "Invalid clkspeed: %d\n",
- qcaspi_clkspeed);
+ if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
+ spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
+ dev_err(&spi->dev, "Invalid clkspeed: %u\n",
+ spi->max_speed_hz);
return -EINVAL;
}
@@ -938,14 +941,13 @@ qca_spi_probe(struct spi_device *spi)
return -EINVAL;
}
- dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
+ dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION,
- qcaspi_clkspeed,
+ spi->max_speed_hz,
qcaspi_burst_len,
qcaspi_pluggable);
spi->mode = SPI_MODE_3;
- spi->max_speed_hz = qcaspi_clkspeed;
if (spi_setup(spi) < 0) {
dev_err(&spi->dev, "Unable to setup SPI device\n");
return -EFAULT;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index d59cb2352cee..90b290f94c27 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -81,8 +81,7 @@ struct qcaspi {
struct qcafrm_handle frm_handle;
struct sk_buff *rx_skb;
- unsigned int intr_req;
- unsigned int intr_svc;
+ unsigned long flags;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS
@@ -90,7 +89,6 @@ struct qcaspi {
#endif
/* user configurable options */
- u32 clkspeed;
u8 legacy_mode;
u16 burst_len;
};
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index f3bea196a8f9..ba8763cac9d9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -117,11 +117,14 @@ static void rmnet_unregister_bridge(struct rmnet_port *port)
rmnet_unregister_real_device(bridge_dev);
}
-static int rmnet_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int rmnet_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -134,7 +137,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev) {
NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a5e3d1a88305..8b4640c5d61e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -686,8 +686,8 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
- hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ hrtimer_setup(&port->hrtimer, rmnet_map_flush_tx_packet_queue, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->agg_lock);
rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 9d2a9562c96f..4f0ddcedfa97 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -90,7 +90,7 @@ static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
new_mtu > (priv->real_dev->mtu - headroom))
return -EINVAL;
- rmnet_dev->mtu = new_mtu;
+ WRITE_ONCE(rmnet_dev->mtu, new_mtu);
return 0;
}
@@ -286,7 +286,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->needs_free_netdev = true;
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
- rmnet_dev->features |= NETIF_F_LLTX;
+ rmnet_dev->lltx = true;
/* This perm addr will be used as interface identifier by IPv6 */
rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index f5786d78ed23..5652da8a178c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1277,14 +1277,14 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
/* if network interface not up, no need for complexity */
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
cp_set_rxbufsize(cp); /* set new rx buf size */
return 0;
}
/* network IS up, close it, reset MTU, and come up again. */
cp_close(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
cp_set_rxbufsize(cp);
return cp_open(dev);
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 9ce0e8a64ba8..a73dcaffa8c5 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1684,6 +1684,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
if (tmp8 & CmdTxEnb)
RTL_W8 (ChipCmd, CmdRxEnb);
+ netdev_lock(dev);
spin_lock_bh(&tp->rx_lock);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16 (IntrMask, 0x0000);
@@ -1694,11 +1695,12 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
- napi_enable(&tp->napi);
+ napi_enable_locked(&tp->napi);
rtl8139_hw_start(dev);
netif_wake_queue(dev);
spin_unlock_bh(&tp->rx_lock);
+ netdev_unlock(dev);
}
static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 03015b665f4e..272c83bfdc6c 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -58,7 +58,7 @@ config 8139TOO
config 8139TOO_PIO
bool "Use PIO instead of MMIO"
default y
- depends on 8139TOO
+ depends on 8139TOO && !NO_IOPORT_MAP
help
This instructs the driver to use programmed I/O ports (PIO) instead
of PCI shared memory (MMIO). This can possibly solve some problems
@@ -114,10 +114,30 @@ config R8169
will be called r8169. This is recommended.
config R8169_LEDS
- def_bool R8169 && LEDS_TRIGGER_NETDEV
+ bool "Support for controlling the NIC LEDs"
+ depends on R8169 && LEDS_TRIGGER_NETDEV
depends on !(R8169=y && LEDS_CLASS=m)
help
Optional support for controlling the NIC LED's with the netdev
LED trigger.
+config RTASE
+ tristate "Realtek Automotive Switch 9054/9068/9072/9075/9068/9071 PCIe Interface support"
+ depends on PCI
+ select CRC32
+ select PAGE_POOL
+ help
+ Say Y here and it will be compiled and linked with the kernel
+ if you have a Realtek Ethernet adapter belonging to the
+ following families:
+ RTL9054 5GBit Ethernet
+ RTL9068 5GBit Ethernet
+ RTL9072 5GBit Ethernet
+ RTL9075 5GBit Ethernet
+ RTL9068 5GBit Ethernet
+ RTL9071 5GBit Ethernet
+
+ To compile this driver as a module, choose M here: the module
+ will be called rtase. This is recommended.
+
endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 635491d8826e..046adf503ff4 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_ATP) += atp.o
r8169-y += r8169_main.o r8169_firmware.o r8169_phy_config.o
r8169-$(CONFIG_R8169_LEDS) += r8169_leds.o
obj-$(CONFIG_R8169) += r8169.o
+obj-$(CONFIG_RTASE) += rtase/
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 6cbcb3164367..0d65434982a2 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -717,7 +717,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
problem where the adapter forgets its ethernet address. */
static void atp_timed_checker(struct timer_list *t)
{
- struct net_local *lp = from_timer(lp, t, timer);
+ struct net_local *lp = timer_container_of(lp, t, timer);
struct net_device *dev = lp->dev;
long ioaddr = dev->base_addr;
int tickssofar = jiffies - lp->last_rx_time;
@@ -832,7 +832,7 @@ net_close(struct net_device *dev)
netif_stop_queue(dev);
- del_timer_sync(&lp->timer);
+ timer_delete_sync(&lp->timer);
/* Flush the Tx and disable Rx here. */
lp->addr_mode = CMR2h_OFF;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 00882ffc7a02..2c1a0c21af8d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,7 +23,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_08,
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
+ /* support for RTL_GIGA_MAC_VER_11 has been removed */
/* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
/* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
@@ -64,12 +64,15 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_50 has been removed */
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
- RTL_GIGA_MAC_VER_53,
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
- RTL_GIGA_MAC_VER_65,
- RTL_GIGA_MAC_NONE
+ RTL_GIGA_MAC_VER_64,
+ RTL_GIGA_MAC_VER_66,
+ RTL_GIGA_MAC_VER_70,
+ RTL_GIGA_MAC_VER_80,
+ RTL_GIGA_MAC_NONE,
+ RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1
};
struct rtl8169_private;
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index ed6e721b1555..bf055078a855 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -215,7 +215,7 @@ int rtl_fw_request_firmware(struct rtl_fw *rtl_fw)
{
int rc;
- rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev);
+ rc = firmware_request_nowarn(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev);
if (rc < 0)
goto out;
@@ -227,7 +227,7 @@ int rtl_fw_request_firmware(struct rtl_fw *rtl_fw)
return 0;
out:
- dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n",
- rtl_fw->fw_name, rc);
+ dev_warn(rtl_fw->dev, "Unable to load firmware %s (%d)\n",
+ rtl_fw->fw_name, rc);
return rc;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0fc5fe564ae5..405e91eb3141 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -28,7 +28,7 @@
#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
@@ -55,7 +55,14 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
+#define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw"
+#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw"
+#define FIRMWARE_8125K_1 "rtl_nic/rtl8125k-1.fw"
+#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
+#define FIRMWARE_9151A_1 "rtl_nic/rtl9151a-1.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
+#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
+#define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -85,59 +92,121 @@
#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
-static const struct {
+static const struct rtl_chip_info {
+ u16 mask;
+ u16 val;
+ enum mac_version mac_version;
const char *name;
const char *fw_name;
} rtl_chip_infos[] = {
- /* PCI devices. */
- [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
- [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
- [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
- [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
- [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
- /* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
- [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
- [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_14] = {"RTL8401" },
- [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
- [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
- [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
- [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
- [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
- [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
- [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
- [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
- [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
- [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
- [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
- [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
- [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
- [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
- [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
- [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
- [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
- [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
- [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
- [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
- /* reserve 62 for CFG_METHOD_4 in the vendor driver */
- [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
- [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
+ /* 8127A family. */
+ { 0x7cf, 0x6c9, RTL_GIGA_MAC_VER_80, "RTL8127A", FIRMWARE_8127A_1 },
+
+ /* 8126A family. */
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_3 },
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_2 },
+
+ /* 8125BP family. */
+ { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 },
+
+ /* 8125D family. */
+ { 0x7cf, 0x68b, RTL_GIGA_MAC_VER_64, "RTL9151A", FIRMWARE_9151A_1 },
+ { 0x7cf, 0x68a, RTL_GIGA_MAC_VER_64, "RTL8125K", FIRMWARE_8125K_1 },
+ { 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 },
+ { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 },
+
+ /* 8125B family. */
+ { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63, "RTL8125B", FIRMWARE_8125B_2 },
+
+ /* 8125A family. */
+ { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61, "RTL8125A", FIRMWARE_8125A_3 },
+
+ /* RTL8117 */
+ { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117" },
+ { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117",
+ FIRMWARE_8168FP_3 },
+
+ /* 8168EP family. */
+ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51, "RTL8168ep/8111ep" },
+
+ /* 8168H family. */
+ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46, "RTL8168h/8111h",
+ FIRMWARE_8168H_2 },
+ /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
+ { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46, "RTL8168M", FIRMWARE_8168H_2 },
+
+ /* 8168G family. */
+ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44, "RTL8411b", FIRMWARE_8411_2 },
+ { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42, "RTL8168gu/8111gu",
+ FIRMWARE_8168G_3 },
+ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40, "RTL8168g/8111g",
+ FIRMWARE_8168G_2 },
+
+ /* 8168F family. */
+ { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38, "RTL8411", FIRMWARE_8411_1 },
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36, "RTL8168f/8111f",
+ FIRMWARE_8168F_2 },
+ { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35, "RTL8168f/8111f",
+ FIRMWARE_8168F_1 },
+
+ /* 8168E family. */
+ { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34, "RTL8168evl/8111evl",
+ FIRMWARE_8168E_3 },
+ { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32, "RTL8168e/8111e",
+ FIRMWARE_8168E_1 },
+ { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33, "RTL8168e/8111e",
+ FIRMWARE_8168E_2 },
+
+ /* 8168D family. */
+ { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25, "RTL8168d/8111d",
+ FIRMWARE_8168D_1 },
+ { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26, "RTL8168d/8111d",
+ FIRMWARE_8168D_2 },
+
+ /* 8168DP family. */
+ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28, "RTL8168dp/8111dp" },
+ { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31, "RTL8168dp/8111dp" },
+
+ /* 8168C family. */
+ { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18, "RTL8168cp/8111cp" },
+ { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21, "RTL8168c/8111c" },
+ { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22, "RTL8168c/8111c" },
+
+ /* 8168B family. */
+ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17, "RTL8168b/8111b" },
+ /* This one is very old and rare, support has been removed.
+ * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11, "RTL8168b/8111b" },
+ */
+
+ /* 8101 family. */
+ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39, "RTL8106e", FIRMWARE_8106E_1 },
+ { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37, "RTL8402", FIRMWARE_8402_1 },
+ { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14, "RTL8401" },
+ { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10, "RTL8101e/RTL8100e" },
+
+ /* 8110 family. */
+ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04, "RTL8169sb/8110sb" },
+ { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03, "RTL8110s" },
+ { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02, "RTL8169s" },
+
+ /* Catch-all */
+ { 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -151,8 +220,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8168) },
{ PCI_VDEVICE(NCUBE, 0x8168) },
{ PCI_VDEVICE(REALTEK, 0x8169) },
- { PCI_VENDOR_ID_DLINK, 0x4300,
- PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
{ PCI_VDEVICE(DLINK, 0x4300) },
{ PCI_VDEVICE(DLINK, 0x4302) },
{ PCI_VDEVICE(AT, 0xc107) },
@@ -161,7 +228,10 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
{ PCI_VDEVICE(REALTEK, 0x8126) },
+ { PCI_VDEVICE(REALTEK, 0x8127) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
+ { PCI_VDEVICE(REALTEK, 0x5000) },
+ { PCI_VDEVICE(REALTEK, 0x0e10) },
{}
};
@@ -342,6 +412,8 @@ enum rtl8125_registers {
TxPoll_8125 = 0x90,
LEDSEL3 = 0x96,
MAC0_BKP = 0x19e0,
+ RSS_CTRL_8125 = 0x4500,
+ Q_NUM_CTRL_8125 = 0x4800,
EEE_TXIDLE_TIMER_8125 = 0x6048,
};
@@ -576,7 +648,34 @@ struct rtl8169_counters {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
+ /* new since RTL8125 */
+ __le64 tx_octets;
+ __le64 rx_octets;
+ __le64 rx_multicast64;
+ __le64 tx_unicast64;
+ __le64 tx_broadcast64;
+ __le64 tx_multicast64;
+ __le32 tx_pause_on;
+ __le32 tx_pause_off;
+ __le32 tx_pause_all;
+ __le32 tx_deferred;
+ __le32 tx_late_collision;
+ __le32 tx_all_collision;
+ __le32 tx_aborted32;
+ __le32 align_errors32;
+ __le32 rx_frame_too_long;
+ __le32 rx_runt;
+ __le32 rx_pause_on;
+ __le32 rx_pause_off;
+ __le32 rx_pause_all;
+ __le32 rx_unknown_opcode;
+ __le32 rx_mac_error;
+ __le32 tx_underrun32;
+ __le32 rx_mac_missed;
+ __le32 rx_tcam_dropped;
+ __le32 tdu;
+ __le32 rdu;
};
struct rtl8169_tc_offsets {
@@ -588,9 +687,7 @@ struct rtl8169_tc_offsets {
};
enum rtl_flag {
- RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -599,6 +696,7 @@ enum rtl_dash_type {
RTL_DASH_NONE,
RTL_DASH_DP,
RTL_DASH_EP,
+ RTL_DASH_25_BP,
};
struct rtl8169_private {
@@ -629,13 +727,9 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- raw_spinlock_t config25_lock;
raw_spinlock_t mac_ocp_lock;
struct mutex led_lock; /* serialize LED ctrl RMW access */
- raw_spinlock_t cfg9346_usage_lock;
- int cfg9346_usage_count;
-
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
unsigned dash_enabled:1;
@@ -678,7 +772,14 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
+MODULE_FIRMWARE(FIRMWARE_8125D_1);
+MODULE_FIRMWARE(FIRMWARE_8125D_2);
+MODULE_FIRMWARE(FIRMWARE_8125K_1);
+MODULE_FIRMWARE(FIRMWARE_8125BP_2);
+MODULE_FIRMWARE(FIRMWARE_9151A_1);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_3);
+MODULE_FIRMWARE(FIRMWARE_8127A_1);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -687,22 +788,12 @@ static inline struct device *tp_to_dev(struct rtl8169_private *tp)
static void rtl_lock_config_regs(struct rtl8169_private *tp)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
- if (!--tp->cfg9346_usage_count)
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
}
static void rtl_unlock_config_regs(struct rtl8169_private *tp)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
- if (!tp->cfg9346_usage_count++)
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -713,24 +804,32 @@ static void rtl_pci_commit(struct rtl8169_private *tp)
static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
{
- unsigned long flags;
u8 val;
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config2);
RTL_W8(tp, Config2, (val & ~clear) | set);
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
}
static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
{
- unsigned long flags;
u8 val;
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config5);
RTL_W8(tp, Config5, (val & ~clear) | set);
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
+}
+
+static void r8169_mod_reg8_cond(struct rtl8169_private *tp, int reg,
+ u8 bits, bool cond)
+{
+ u8 val, old_val;
+
+ old_val = RTL_R8(tp, reg);
+ if (cond)
+ val = old_val | bits;
+ else
+ val = old_val & ~bits;
+ if (val != old_val)
+ RTL_W8(tp, reg, val);
}
static bool rtl_is_8125(struct rtl8169_private *tp)
@@ -742,7 +841,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_53;
+ tp->mac_version <= RTL_GIGA_MAC_VER_52;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -910,9 +1009,7 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (type == ERIAR_OOB &&
- (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
- tp->mac_version == RTL_GIGA_MAC_VER_53))
+ if (type == ERIAR_OOB && tp->mac_version == RTL_GIGA_MAC_VER_52)
*cmd |= 0xf70 << 18;
}
@@ -1201,7 +1298,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1216,7 +1313,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1316,46 +1413,34 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
-static void rtl_dash_loop_wait(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned long usecs, int n, bool high)
-{
- if (!tp->dash_enabled)
- return;
- rtl_loop_wait(tp, c, usecs, n, high);
-}
-
-static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned long d, int n)
-{
- rtl_dash_loop_wait(tp, c, d, n, true);
-}
-
-static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned long d, int n)
-{
- rtl_dash_loop_wait(tp, c, d, n, false);
-}
-
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ if (tp->dash_enabled)
+ rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ if (tp->dash_enabled)
+ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+}
+
+static void rtl8125bp_driver_start(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_start(tp);
else
rtl8168ep_driver_start(tp);
}
@@ -1363,7 +1448,8 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ if (tp->dash_enabled)
+ rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
@@ -1371,13 +1457,23 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl8168ep_stop_cmac(tp);
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ if (tp->dash_enabled)
+ rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+}
+
+static void rtl8125bp_driver_stop(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_stop(tp);
else
rtl8168ep_driver_stop(tp);
}
@@ -1400,6 +1496,7 @@ static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
case RTL_DASH_DP:
return r8168dp_check_dash(tp);
case RTL_DASH_EP:
+ case RTL_DASH_25_BP:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1412,8 +1509,10 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return RTL_DASH_DP;
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
return RTL_DASH_EP;
+ case RTL_GIGA_MAC_VER_66:
+ return RTL_DASH_25_BP;
default:
return RTL_DASH_NONE;
}
@@ -1422,16 +1521,17 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
- if (enable)
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
- else
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN);
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_38:
+ break;
+ case RTL_GIGA_MAC_VER_80:
+ r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, true);
break;
default:
+ r8169_mod_reg8_cond(tp, PMCH, D3HOT_NO_PLL_DOWN, true);
+ r8169_mod_reg8_cond(tp, PMCH, D3COLD_NO_PLL_DOWN, !enable);
break;
}
}
@@ -1542,61 +1642,40 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{
- static const struct {
- u32 opt;
- u16 reg;
- u8 mask;
- } cfg[] = {
- { WAKE_PHY, Config3, LinkUp },
- { WAKE_UCAST, Config5, UWF },
- { WAKE_BCAST, Config5, BWF },
- { WAKE_MCAST, Config5, MWF },
- { WAKE_ANY, Config5, LanWake },
- { WAKE_MAGIC, Config3, MagicPacket }
- };
- unsigned int i, tmp = ARRAY_SIZE(cfg);
- unsigned long flags;
- u8 options;
-
rtl_unlock_config_regs(tp);
if (rtl_is_8168evl_up(tp)) {
- tmp--;
if (wolopts & WAKE_MAGIC)
rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2);
else
rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2);
} else if (rtl_is_8125(tp)) {
- tmp--;
if (wolopts & WAKE_MAGIC)
r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
else
r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
+ } else {
+ r8169_mod_reg8_cond(tp, Config3, MagicPacket,
+ wolopts & WAKE_MAGIC);
}
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
- for (i = 0; i < tmp; i++) {
- options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
- if (wolopts & cfg[i].opt)
- options |= cfg[i].mask;
- RTL_W8(tp, cfg[i].reg, options);
- }
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
+ r8169_mod_reg8_cond(tp, Config3, LinkUp, wolopts & WAKE_PHY);
+ if (rtl_is_8125(tp))
+ r8168_mac_ocp_modify(tp, 0xe0c6, 0x3f,
+ wolopts & WAKE_PHY ? 0x13 : 0);
+ r8169_mod_reg8_cond(tp, Config5, UWF, wolopts & WAKE_UCAST);
+ r8169_mod_reg8_cond(tp, Config5, BWF, wolopts & WAKE_BCAST);
+ r8169_mod_reg8_cond(tp, Config5, MWF, wolopts & WAKE_MCAST);
+ r8169_mod_reg8_cond(tp, Config5, LanWake, wolopts);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
- options = RTL_R8(tp, Config1) & ~PMEnable;
- if (wolopts)
- options |= PMEnable;
- RTL_W8(tp, Config1, options);
+ r8169_mod_reg8_cond(tp, Config1, PMEnable, wolopts);
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
- if (wolopts)
- rtl_mod_config2(tp, 0, PME_SIGNAL);
- else
- rtl_mod_config2(tp, PME_SIGNAL, 0);
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_LAST:
+ r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
break;
@@ -1608,7 +1687,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
if (!tp->dash_enabled) {
rtl_set_d3_pll_down(tp, !wolopts);
- tp->dev->wol_enabled = wolopts ? 1 : 0;
+ tp->dev->ethtool->wol_enabled = wolopts ? 1 : 0;
}
}
@@ -1841,7 +1920,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
data[9] = le64_to_cpu(counters->rx_broadcast);
data[10] = le32_to_cpu(counters->rx_multicast);
data[11] = le16_to_cpu(counters->tx_aborted);
- data[12] = le16_to_cpu(counters->tx_underun);
+ data[12] = le16_to_cpu(counters->tx_underrun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2068,9 +2147,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2129,6 +2206,19 @@ static void rtl8169_get_ringparam(struct net_device *dev,
data->tx_pending = NUM_TX_DESC;
}
+static void rtl8169_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!rtl_is_8125(tp))
+ return;
+
+ rtl8169_update_counters(tp);
+ pause_stats->tx_pause_frames = le32_to_cpu(tp->counters->tx_pause_on);
+ pause_stats->rx_pause_frames = le32_to_cpu(tp->counters->rx_pause_on);
+}
+
static void rtl8169_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *data)
{
@@ -2155,6 +2245,69 @@ static int rtl8169_set_pauseparam(struct net_device *dev,
return 0;
}
+static void rtl8169_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_update_counters(tp);
+
+ mac_stats->FramesTransmittedOK =
+ le64_to_cpu(tp->counters->tx_packets);
+ mac_stats->SingleCollisionFrames =
+ le32_to_cpu(tp->counters->tx_one_collision);
+ mac_stats->MultipleCollisionFrames =
+ le32_to_cpu(tp->counters->tx_multi_collision);
+ mac_stats->FramesReceivedOK =
+ le64_to_cpu(tp->counters->rx_packets);
+ mac_stats->AlignmentErrors =
+ le16_to_cpu(tp->counters->align_errors);
+ mac_stats->FramesLostDueToIntMACXmitError =
+ le64_to_cpu(tp->counters->tx_errors);
+ mac_stats->BroadcastFramesReceivedOK =
+ le64_to_cpu(tp->counters->rx_broadcast);
+ mac_stats->MulticastFramesReceivedOK =
+ le32_to_cpu(tp->counters->rx_multicast);
+
+ if (!rtl_is_8125(tp))
+ return;
+
+ mac_stats->AlignmentErrors =
+ le32_to_cpu(tp->counters->align_errors32);
+ mac_stats->OctetsTransmittedOK =
+ le64_to_cpu(tp->counters->tx_octets);
+ mac_stats->LateCollisions =
+ le32_to_cpu(tp->counters->tx_late_collision);
+ mac_stats->FramesAbortedDueToXSColls =
+ le32_to_cpu(tp->counters->tx_aborted32);
+ mac_stats->OctetsReceivedOK =
+ le64_to_cpu(tp->counters->rx_octets);
+ mac_stats->FramesLostDueToIntMACRcvError =
+ le32_to_cpu(tp->counters->rx_mac_error);
+ mac_stats->MulticastFramesXmittedOK =
+ le64_to_cpu(tp->counters->tx_multicast64);
+ mac_stats->BroadcastFramesXmittedOK =
+ le64_to_cpu(tp->counters->tx_broadcast64);
+ mac_stats->MulticastFramesReceivedOK =
+ le64_to_cpu(tp->counters->rx_multicast64);
+ mac_stats->FrameTooLongErrors =
+ le32_to_cpu(tp->counters->rx_frame_too_long);
+}
+
+static void rtl8169_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!rtl_is_8125(tp))
+ return;
+
+ rtl8169_update_counters(tp);
+
+ ctrl_stats->UnsupportedOpcodesReceived =
+ le32_to_cpu(tp->counters->rx_unknown_opcode);
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -2176,143 +2329,37 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = rtl8169_get_ringparam,
+ .get_pause_stats = rtl8169_get_pause_stats,
.get_pauseparam = rtl8169_get_pauseparam,
.set_pauseparam = rtl8169_set_pauseparam,
+ .get_eth_mac_stats = rtl8169_get_eth_mac_stats,
+ .get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats,
};
-static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
+static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
{
- /*
- * The driver currently handles the 8168Bf and the 8168Be identically
- * but they can be identified more specifically through the test below
- * if needed:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
- *
- * Same thing for the 8101Eb and the 8101Ec:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
- */
- static const struct rtl_mac_info {
- u16 mask;
- u16 val;
- enum mac_version ver;
- } mac_info[] = {
- /* 8126A family. */
- { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
-
- /* 8125B family. */
- { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
-
- /* 8125A family. */
- { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 },
- /* It seems only XID 609 made it to the mass market.
- * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
- * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
- */
-
- /* RTL8117 */
- { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
- { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
-
- /* 8168EP family. */
- { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
- * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
- */
-
- /* 8168H family. */
- { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
- */
-
- /* 8168G family. */
- { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
- { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
- */
- { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
-
- /* 8168F family. */
- { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
- { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
-
- /* 8168E family. */
- { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
- { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
- { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
-
- /* 8168D family. */
- { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
- { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
-
- /* 8168DP family. */
- /* It seems this early RTL8168dp version never made it to
- * the wild. Support has been removed.
- * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
- */
- { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
- { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
-
- /* 8168C family. */
- { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
- { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
- { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
- { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
- { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
- { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
- { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
-
- /* 8168B family. */
- { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
-
- /* 8101 family. */
- { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
- { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
- { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
- { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
- { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
- { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 },
-
- /* 8110 family. */
- { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
- { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
- { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
- { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
- { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
-
- /* Catch-all */
- { 0x000, 0x000, RTL_GIGA_MAC_NONE }
+ /* Chips combining a 1Gbps MAC with a 100Mbps PHY */
+ static const struct rtl_chip_info rtl8106eus_info = {
+ .mac_version = RTL_GIGA_MAC_VER_43,
+ .name = "RTL8106eus",
+ .fw_name = FIRMWARE_8106E_2,
+ };
+ static const struct rtl_chip_info rtl8107e_info = {
+ .mac_version = RTL_GIGA_MAC_VER_48,
+ .name = "RTL8107e",
+ .fw_name = FIRMWARE_8107E_2,
};
- const struct rtl_mac_info *p = mac_info;
- enum mac_version ver;
+ const struct rtl_chip_info *p = rtl_chip_infos;
while ((xid & p->mask) != p->val)
p++;
- ver = p->ver;
- if (ver != RTL_GIGA_MAC_NONE && !gmii) {
- if (ver == RTL_GIGA_MAC_VER_42)
- ver = RTL_GIGA_MAC_VER_43;
- else if (ver == RTL_GIGA_MAC_VER_46)
- ver = RTL_GIGA_MAC_VER_48;
- }
+ if (p->mac_version == RTL_GIGA_MAC_VER_42 && !gmii)
+ return &rtl8106eus_info;
+ if (p->mac_version == RTL_GIGA_MAC_VER_46 && !gmii)
+ return &rtl8107e_info;
- return ver;
+ return p;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -2341,26 +2388,6 @@ void r8169_apply_firmware(struct rtl8169_private *tp)
}
}
-static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
-{
- /* Adjust EEE LED frequency */
- if (tp->mac_version != RTL_GIGA_MAC_VER_38)
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
-
- rtl_eri_set_bits(tp, 0x1b0, 0x0003);
-}
-
-static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
-{
- r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
- r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
-}
-
-static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
-{
- r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
-}
-
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
{
rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
@@ -2387,11 +2414,9 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
- if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
- return;
-
set_bit(flag, tp->wk.flags);
- schedule_work(&tp->wk.work);
+ if (!schedule_work(&tp->wk.work))
+ clear_bit(flag, tp->wk.flags);
}
static void rtl8169_init_phy(struct rtl8169_private *tp)
@@ -2458,14 +2483,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2480,86 +2504,31 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
-static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
-}
-
-static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
-}
-
-static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
-}
-
-static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
-}
-
-static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, MaxTxPacketSize, 0x24);
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
-}
-
-static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, MaxTxPacketSize, 0x3f);
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
-}
-
-static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
-}
-
-static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
-}
-
static void rtl_jumbo_config(struct rtl8169_private *tp)
{
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
int readrq = 4096;
+ if (jumbo && tp->mac_version >= RTL_GIGA_MAC_VER_17 &&
+ tp->mac_version <= RTL_GIGA_MAC_VER_26)
+ readrq = 512;
+
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_17:
- if (jumbo) {
- readrq = 512;
- r8168b_1_hw_jumbo_enable(tp);
- } else {
- r8168b_1_hw_jumbo_disable(tp);
- }
+ r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- if (jumbo) {
- readrq = 512;
- r8168c_hw_jumbo_enable(tp);
- } else {
- r8168c_hw_jumbo_disable(tp);
- }
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
+ r8169_mod_reg8_cond(tp, Config4, Jumbo_En1, jumbo);
break;
case RTL_GIGA_MAC_VER_28:
- if (jumbo)
- r8168dp_hw_jumbo_enable(tp);
- else
- r8168dp_hw_jumbo_disable(tp);
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- if (jumbo)
- r8168e_hw_jumbo_enable(tp);
- else
- r8168e_hw_jumbo_disable(tp);
+ RTL_W8(tp, MaxTxPacketSize, jumbo ? 0x24 : 0x3f);
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
+ r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo);
break;
default:
break;
@@ -2645,14 +2614,14 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2813,10 +2782,45 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_csi_mod(struct rtl8169_private *tp, int addr,
+ u32 mask, u32 set)
+{
+ u32 val;
+
+ WARN(addr % 4, "Invalid CSI address %#x\n", addr);
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+
+ val = rtl_csi_read(tp, addr);
+ rtl_csi_write(tp, addr, (val & ~mask) | set);
+}
+
+static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ int rc;
+ u8 val;
+
+#define RTL_GEN3_RELATED_OFF 0x0890
+#define RTL_GEN3_ZRXDC_NONCOMPL 0x1
+ if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
+ rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
+ if (rc == PCIBIOS_SUCCESSFUL) {
+ val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
+ rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
+ val);
+ if (rc == PCIBIOS_SUCCESSFUL)
+ return;
+ }
+ }
+
+ rtl_csi_mod(tp, RTL_GEN3_RELATED_OFF, RTL_GEN3_ZRXDC_NONCOMPL, 0);
+}
+
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
- u32 csi;
/* According to Realtek the value at config space address 0x070f
* controls the L0s/L1 entrance latency. We try standard ECAM access
@@ -2828,10 +2832,7 @@ static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
return;
- netdev_notice_once(tp->dev,
- "No native access to PCI extended config space, falling back to CSI\n");
- csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
- rtl_csi_write(tp, 0x070c, csi | val << 24);
+ rtl_csi_mod(tp, 0x070c, 0xff000000, val << 24);
}
static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
@@ -2895,7 +2896,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2909,7 +2910,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2935,7 +2936,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -2946,7 +2948,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2958,7 +2960,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2966,7 +2968,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3162,8 +3165,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
rtl_mod_config5(tp, Spi_en, 0);
@@ -3188,8 +3189,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
rtl_mod_config5(tp, Spi_en, 0);
-
- rtl8168_config_eee_mac(tp);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -3239,8 +3238,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
@@ -3381,8 +3378,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -3401,7 +3396,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
@@ -3430,8 +3425,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@ -3487,8 +3480,6 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -3506,7 +3497,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_write(tp, 0xea80, 0x0003);
r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
@@ -3668,8 +3659,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
RTL_W16(tp, 0x382, 0x221b);
- RTL_W8(tp, 0x4500, 0);
- RTL_W16(tp, 0x4800, 0);
+ RTL_W32(tp, RSS_CTRL_8125, 0);
+ RTL_W16(tp, Q_NUM_CTRL_8125, 0);
/* disable UPS */
r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
@@ -3686,10 +3677,13 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0f00, 0x0f00);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_70)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3704,10 +3698,11 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3725,11 +3720,6 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
- if (tp->mac_version == RTL_GIGA_MAC_VER_61)
- rtl8125a_config_eee_mac(tp);
- else
- rtl8125b_config_eee_mac(tp);
-
rtl_disable_rxdvgate(tp);
}
@@ -3772,8 +3762,21 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
rtl_hw_start_8125_common(tp);
}
+static void rtl_hw_start_8125d(struct rtl8169_private *tp)
+{
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
+ rtl_disable_zrxdc_timeout(tp);
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
+static void rtl_hw_start_8127a(struct rtl8169_private *tp)
+{
rtl_set_def_aspm_entry_latency(tp);
rtl_hw_start_8125_common(tp);
}
@@ -3785,7 +3788,6 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
@@ -3817,10 +3819,12 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
- [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
- [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_80] = rtl_hw_start_8127a,
};
if (hw_configs[tp->mac_version])
@@ -3836,11 +3840,16 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
/* disable interrupt coalescing */
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_64:
+ case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_80:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
break;
case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_70:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -3849,6 +3858,9 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
break;
}
+ /* enable extended tally counter */
+ r8168_mac_ocp_modify(tp, 0xea84, 0, BIT(1) | BIT(0));
+
rtl_hw_config(tp);
}
@@ -3922,7 +3934,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
rtl_jumbo_config(tp);
rtl_set_eee_txidle_timer(tp);
@@ -4069,7 +4081,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4189,8 +4201,8 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
{
unsigned int padto = 0, len = skb->len;
- if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
- rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
+ if (len < 128 + RTL_MIN_PATCH_LEN && rtl_skb_is_udp(skb) &&
+ skb_transport_header_was_set(skb)) {
unsigned int trans_data_len = skb_tail_pointer(skb) -
skb_transport_header(skb);
@@ -4214,13 +4226,19 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
struct sk_buff *skb)
{
- unsigned int padto;
+ unsigned int padto = 0;
- padto = rtl8125_quirk_udp_padto(tp, skb);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ padto = rtl8125_quirk_udp_padto(tp, skb);
+ break;
+ default:
+ break;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -4335,17 +4353,18 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int frags = skb_shinfo(skb)->nr_frags;
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
struct TxDesc *txd_first, *txd_last;
bool stop_queue, door_bell;
+ unsigned int frags;
u32 opts[2];
if (unlikely(!rtl_tx_slots_avail(tp))) {
if (net_ratelimit())
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
- goto err_stop_0;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
}
opts[1] = rtl8169_tx_vlan_tag(skb);
@@ -4362,6 +4381,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first = tp->TxDescArray + entry;
+ frags = skb_shinfo(skb)->nr_frags;
if (frags) {
if (rtl8169_xmit_frags(tp, skb, opts, entry))
goto err_dma_1;
@@ -4400,11 +4420,6 @@ err_dma_0:
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
-
-err_stop_0:
- netif_stop_queue(dev);
- dev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
}
static unsigned int rtl_last_frag_len(struct sk_buff *skb)
@@ -4641,7 +4656,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
- if (unlikely(status & SYSErr)) {
+ /* At least RTL8168fp may unexpectedly set the SYSErr bit */
+ if (unlikely(status & SYSErr &&
+ tp->mac_version <= RTL_GIGA_MAC_VER_06)) {
rtl8169_pcierr_interrupt(tp->dev);
goto out;
}
@@ -4649,16 +4666,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (status & LinkChg)
phy_mac_interrupt(tp->phydev);
- if (unlikely(status & RxFIFOOver &&
- tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(tp->dev);
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
- }
-
- if (napi_schedule_prep(&tp->napi)) {
- rtl_irq_disable(tp);
- __napi_schedule(&tp->napi);
- }
+ rtl_irq_disable(tp);
+ napi_schedule(&tp->napi);
out:
rtl_ack_events(tp, status);
@@ -4671,11 +4680,6 @@ static void rtl_task(struct work_struct *work)
container_of(work, struct rtl8169_private, wk.work);
int ret;
- rtnl_lock();
-
- if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
- goto out_unlock;
-
if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
/* if chip isn't accessible, reset bus to revive it */
if (RTL_R32(tp, TxConfig) == ~0) {
@@ -4683,7 +4687,7 @@ static void rtl_task(struct work_struct *work)
if (ret < 0) {
netdev_err(tp->dev, "Can't reset secondary PCI bus, detach NIC\n");
netif_device_detach(tp->dev);
- goto out_unlock;
+ return;
}
}
@@ -4699,11 +4703,7 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
- } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
- rtl_reset_work(tp);
}
-out_unlock:
- rtnl_unlock();
}
static int rtl8169_poll(struct napi_struct *napi, int budget)
@@ -4722,6 +4722,41 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void rtl_enable_tx_lpi(struct rtl8169_private *tp, bool enable)
+{
+ if (!rtl_supports_eee(tp))
+ return;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_52:
+ /* Adjust EEE LED frequency */
+ if (tp->mac_version != RTL_GIGA_MAC_VER_38)
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ if (enable)
+ rtl_eri_set_bits(tp, 0x1b0, 0x0003);
+ else
+ rtl_eri_clear_bits(tp, 0x1b0, 0x0003);
+ break;
+ case RTL_GIGA_MAC_VER_61:
+ if (enable) {
+ r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003);
+ r8168_mac_ocp_modify(tp, 0xeb62, 0, 0x0006);
+ } else {
+ r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0);
+ r8168_mac_ocp_modify(tp, 0xeb62, 0x0006, 0);
+ }
+ break;
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
+ if (enable)
+ r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003);
+ else
+ r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0);
+ break;
+ default:
+ break;
+ }
+}
+
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
@@ -4729,12 +4764,9 @@ static void r8169_phylink_handler(struct net_device *ndev)
if (netif_carrier_ok(ndev)) {
rtl_link_chg_patch(tp);
+ rtl_enable_tx_lpi(tp, tp->phydev->enable_tx_lpi);
pm_request_resume(d);
- netif_wake_queue(tp->dev);
} else {
- /* In few cases rx is broken after link-down otherwise */
- if (rtl_is_8125(tp))
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
pm_runtime_idle(d);
}
@@ -4765,6 +4797,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
static void rtl8169_down(struct rtl8169_private *tp)
{
+ disable_work_sync(&tp->wk.work);
/* Clear all task flags */
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
@@ -4793,7 +4826,7 @@ static void rtl8169_up(struct rtl8169_private *tp)
phy_resume(tp->phydev);
rtl8169_init_phy(tp);
napi_enable(&tp->napi);
- set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ enable_work(&tp->wk.work);
rtl_reset_work(tp);
phy_start(tp->phydev);
@@ -4810,8 +4843,6 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(tp);
rtl8169_rx_clear(tp);
- cancel_work(&tp->wk.work);
-
free_irq(tp->irq, tp);
phy_disconnect(tp->phydev);
@@ -4977,9 +5008,8 @@ static int rtl8169_resume(struct device *device)
if (!device_may_wakeup(tp_to_dev(tp)))
clk_prepare_enable(tp->clk);
- /* Reportedly at least Asus X453MA truncates packets otherwise */
- if (tp->mac_version == RTL_GIGA_MAC_VER_37)
- rtl_init_rxcfg(tp);
+ /* Some chip versions may truncate packets without this initialization */
+ rtl_init_rxcfg(tp);
return rtl8169_runtime_resume(device);
}
@@ -5031,10 +5061,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
- pci_wake_from_d3(pdev, tp->saved_wolopts);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled)
+ pci_prepare_to_sleep(pdev);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -5044,7 +5072,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
- cancel_work_sync(&tp->wk.work);
+ disable_work_sync(&tp->wk.work);
if (IS_ENABLED(CONFIG_R8169_LEDS))
r8169_remove_leds(tp->leds);
@@ -5085,12 +5113,7 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp)
tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
- tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
- /* special workaround needed */
- tp->irq_mask |= RxFIFOOver;
- else
- tp->irq_mask |= RxOverflow;
+ tp->irq_mask |= SYSErr | RxFIFOOver;
}
static int rtl_alloc_irq(struct rtl8169_private *tp)
@@ -5104,7 +5127,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
rtl_lock_config_regs(tp);
fallthrough;
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
- flags = PCI_IRQ_LEGACY;
+ flags = PCI_IRQ_INTX;
break;
default:
flags = PCI_IRQ_ALL_TYPES;
@@ -5163,6 +5186,33 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
return 0;
}
+static int r8169_mdio_read_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0)
+ return -ENODEV;
+
+ if (devnum == MDIO_MMD_VEND2 && regnum > MDIO_STAT2)
+ return r8168_phy_ocp_read(tp, regnum);
+
+ return 0;
+}
+
+static int r8169_mdio_write_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0 || devnum != MDIO_MMD_VEND2 || regnum <= MDIO_STAT2)
+ return -ENODEV;
+
+ r8168_phy_ocp_write(tp, regnum, val);
+
+ return 0;
+}
+
static int r8169_mdio_register(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -5186,12 +5236,18 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
+ new_bus->phy_mask = GENMASK(31, 1);
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_40) {
+ new_bus->read_c45 = r8169_mdio_read_reg_c45;
+ new_bus->write_c45 = r8169_mdio_write_reg_c45;
+ }
+
ret = devm_mdiobus_register(&pdev->dev, new_bus);
if (ret)
return ret;
@@ -5213,6 +5269,11 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
phy_support_eee(tp->phydev);
phy_support_asym_pause(tp->phydev);
+ /* mimic behavior of r8125/r8126 vendor drivers */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_61)
+ phy_disable_eee_mode(tp->phydev,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
+
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
@@ -5254,13 +5315,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
rtl_hw_init_8125(tp);
break;
default:
@@ -5279,12 +5340,14 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
return JUMBO_7K;
/* RTL8168b */
- case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
+ /* RTL8125/8126 */
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
+ return JUMBO_16K;
default:
return JUMBO_9K;
}
@@ -5319,7 +5382,7 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;
@@ -5328,9 +5391,9 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct rtl_chip_info *chip;
struct rtl8169_private *tp;
int jumbo_max, region, rc;
- enum mac_version chipset;
struct net_device *dev;
u32 txconfig;
u16 xid;
@@ -5347,8 +5410,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
tp->ocp_base = OCP_STD_PHY_BASE;
- raw_spin_lock_init(&tp->cfg9346_usage_lock);
- raw_spin_lock_init(&tp->config25_lock);
raw_spin_lock_init(&tp->mac_ocp_lock);
mutex_init(&tp->led_lock);
@@ -5370,11 +5431,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (region < 0)
return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n");
- rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
- if (rc < 0)
- return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n");
-
- tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ tp->mmio_addr = pcim_iomap_region(pdev, region, KBUILD_MODNAME);
+ if (IS_ERR(tp->mmio_addr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->mmio_addr),
+ "cannot remap MMIO, aborting\n");
txconfig = RTL_R32(tp, TxConfig);
if (txconfig == ~0U)
@@ -5383,22 +5443,34 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
xid = (txconfig >> 20) & 0xfcf;
/* Identify chip attached to board */
- chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
- if (chipset == RTL_GIGA_MAC_NONE)
+ chip = rtl8169_get_chip_version(xid, tp->supports_gmii);
+ if (chip->mac_version == RTL_GIGA_MAC_NONE)
return dev_err_probe(&pdev->dev, -ENODEV,
"unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
xid);
- tp->mac_version = chipset;
+ tp->mac_version = chip->mac_version;
+ tp->fw_name = chip->fw_name;
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
*/
- if (rtl_aspm_is_safe(tp))
+ if (rtl_aspm_is_safe(tp)) {
+ dev_info(&pdev->dev, "System vendor flags ASPM as safe\n");
rc = 0;
- else
+ } else {
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ }
tp->aspm_manageable = !rc;
+ /* Fiber mode on RTL8127AF isn't supported */
+ if (rtl_is_8125(tp)) {
+ u16 data = r8168_mac_ocp_read(tp, 0xd006);
+
+ if ((data & 0xff) == 0x07)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Fiber mode not supported\n");
+ }
+
tp->dash_type = rtl_get_dash_type(tp);
tp->dash_enabled = rtl_dash_is_enabled(tp);
@@ -5423,6 +5495,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->irq = pci_irq_vector(pdev, 0);
INIT_WORK(&tp->wk.work, rtl_task);
+ disable_work(&tp->wk.work);
rtl_init_mac_address(tp);
@@ -5448,11 +5521,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= dev->hw_features;
- /* There has been a number of reports that using SG/TSO results in
- * tx timeouts. However for a lot of people SG/TSO works fine.
- * Therefore disable both features by default, but allow users to
- * enable them. Use at own risk!
- */
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
@@ -5463,6 +5531,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
}
+ /* There has been a number of reports that using SG/TSO results in
+ * tx timeouts. However for a lot of people SG/TSO works fine.
+ * It's not fully clear which chip versions are affected. Vendor
+ * drivers enable SG/TSO for certain chip versions per default,
+ * let's mimic this here. On other chip versions users can
+ * use ethtool to enable SG/TSO, use at own risk!
+ */
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_61)
+ dev->features |= dev->hw_features;
+
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
@@ -5477,7 +5556,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_set_d3_pll_down(tp, true);
} else {
rtl_set_d3_pll_down(tp, false);
- dev->wol_enabled = 1;
+ dev->ethtool->wol_enabled = 1;
}
jumbo_max = rtl_jumbo_max(tp);
@@ -5486,8 +5565,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_set_irq_mask(tp);
- tp->fw_name = rtl_chip_infos[chipset].fw_name;
-
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
&tp->counters_phys_addr,
GFP_KERNEL);
@@ -5512,7 +5589,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
+ chip->name, dev->dev_addr, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 1f74317beb88..032d9d2cfa2a 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -50,6 +50,15 @@ static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
phy_restore_page(phydev, oldpage, 0);
}
+static void rtl8125_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ phy_lock_mdio_bus(phydev);
+ __phy_write_mmd(phydev, MDIO_MMD_VEND2, 0xb87c, parm);
+ __phy_modify_mmd(phydev, MDIO_MMD_VEND2, 0xb87e, mask, val);
+ phy_unlock_mdio_bus(phydev);
+}
+
struct phy_reg {
u16 reg;
u16 val;
@@ -89,20 +98,17 @@ static void rtl8168h_config_eee_phy(struct phy_device *phydev)
phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
}
-static void rtl8125a_config_eee_phy(struct phy_device *phydev)
+static void rtl8125_common_config_eee_phy(struct phy_device *phydev)
{
- rtl8168h_config_eee_phy(phydev);
-
- phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
+ phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000);
+ phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000);
}
-static void rtl8125b_config_eee_phy(struct phy_device *phydev)
+static void rtl8125_config_eee_phy(struct phy_device *phydev)
{
- phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
- phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000);
- phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000);
+ rtl8168g_config_eee_phy(phydev);
+ rtl8125_common_config_eee_phy(phydev);
}
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp,
@@ -279,15 +285,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, phy_reg_init);
}
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_write(phydev, 0x1f, 0x0001);
- phy_set_bits(phydev, 0x16, BIT(0));
- phy_write(phydev, 0x10, 0xf41b);
- phy_write(phydev, 0x1f, 0x0000);
-}
-
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1016,12 +1013,8 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80a2);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x16, 0x809c);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80a2, 0xffff, 0x0153);
+ rtl8125_phy_param(phydev, 0x809c, 0xffff, 0x0153);
phy_write(phydev, 0x1f, 0x0a43);
phy_write(phydev, 0x13, 0x81B3);
@@ -1060,26 +1053,22 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
rtl8168g_enable_gphy_10m(phydev);
- rtl8125a_config_eee_phy(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
}
static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090);
phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80f5);
- phy_write(phydev, 0x17, 0x760e);
- phy_write(phydev, 0x16, 0x8107);
- phy_write(phydev, 0x17, 0x360e);
- phy_write(phydev, 0x16, 0x8551);
- phy_modify(phydev, 0x17, 0xff00, 0x0800);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80f5, 0xffff, 0x760e);
+ rtl8125_phy_param(phydev, 0x8107, 0xffff, 0x360e);
+ rtl8125_phy_param(phydev, 0x8551, 0xff00, 0x0800);
phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000);
phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300);
@@ -1099,13 +1088,211 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
rtl8125_legacy_force_mode(phydev);
- rtl8125b_config_eee_phy(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
+}
+
+static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
+}
+
+static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x8088, 0xff00, 0x9000);
+ rtl8125_phy_param(phydev, 0x808f, 0xff00, 0x9000);
+
+ r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
}
static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_common_config_eee_phy(phydev);
+}
+
+static void rtl8127a_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8415, 0xff00, 0x9300);
+ r8168g_phy_param(phydev, 0x81a3, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81ae, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81b9, 0xff00, 0xb900);
+ rtl8125_phy_param(phydev, 0x83b0, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83C5, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83da, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83ef, 0x0e00, 0x0000);
+ phy_modify_paged(phydev, 0x0bf3, 0x14, 0x01f0, 0x0160);
+ phy_modify_paged(phydev, 0x0bf3, 0x15, 0x001f, 0x0014);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x16, 0xc000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x1fff, 0x0187);
+ phy_modify_paged(phydev, 0x0bf2, 0x15, 0x003f, 0x0003);
+
+ r8168g_phy_param(phydev, 0x8173, 0xffff, 0x8620);
+ r8168g_phy_param(phydev, 0x8175, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x817c, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8187, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8192, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x819d, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x81a8, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81b3, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81be, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x817d, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8188, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8193, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x819e, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x81a9, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81b4, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81bf, 0xff00, 0xa600);
+
+ phy_modify_paged(phydev, 0x0aea, 0x15, 0x0028, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f0, 0xffff, 0x201c);
+ rtl8125_phy_param(phydev, 0x84f2, 0xffff, 0x3117);
+
+ phy_write_paged(phydev, 0x0aec, 0x13, 0x0000);
+ phy_write_paged(phydev, 0x0ae2, 0x10, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x17, 0xffff);
+ phy_write_paged(phydev, 0x0aed, 0x11, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x14, 0x0000);
+ phy_modify_paged(phydev, 0x0aed, 0x10, 0x0001, 0x0000);
+ phy_write_paged(phydev, 0x0adb, 0x14, 0x0150);
+ rtl8125_phy_param(phydev, 0x8197, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x8231, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cb, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cd, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8233, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8199, 0xff00, 0x5700);
+
+ rtl8125_phy_param(phydev, 0x815a, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81f4, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x828e, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81b1, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x824b, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x82e5, 0xffff, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f7, 0xff00, 0x2800);
+ phy_modify_paged(phydev, 0x0aec, 0x11, 0x0000, 0x1000);
+ rtl8125_phy_param(phydev, 0x81b3, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x824d, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x82e7, 0xff00, 0xad00);
+ phy_modify_paged(phydev, 0x0ae4, 0x17, 0x000f, 0x0001);
+ rtl8125_phy_param(phydev, 0x82ce, 0xf000, 0x4000);
+
+ rtl8125_phy_param(phydev, 0x84ac, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84ae, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84b0, 0xffff, 0xf818);
+ rtl8125_phy_param(phydev, 0x84b2, 0xff00, 0x6000);
+
+ rtl8125_phy_param(phydev, 0x8ffc, 0xffff, 0x6008);
+ rtl8125_phy_param(phydev, 0x8ffe, 0xffff, 0xf450);
+
+ rtl8125_phy_param(phydev, 0x8015, 0x0000, 0x0200);
+ rtl8125_phy_param(phydev, 0x8016, 0x0800, 0x0000);
+ rtl8125_phy_param(phydev, 0x8fe6, 0xff00, 0x0800);
+ rtl8125_phy_param(phydev, 0x8fe4, 0xffff, 0x2114);
+
+ rtl8125_phy_param(phydev, 0x8647, 0xffff, 0xa7b1);
+ rtl8125_phy_param(phydev, 0x8649, 0xffff, 0xbbca);
+ rtl8125_phy_param(phydev, 0x864b, 0xff00, 0xdc00);
+
+ rtl8125_phy_param(phydev, 0x8154, 0xc000, 0x4000);
+ rtl8125_phy_param(phydev, 0x8158, 0xc000, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x826c, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x826e, 0xffff, 0xffff);
+
+ rtl8125_phy_param(phydev, 0x8872, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x0800);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x4000);
+ phy_modify_paged(phydev, 0x0b57, 0x13, 0x0000, 0x0001);
+ r8168g_phy_param(phydev, 0x834a, 0xff00, 0x0700);
+ rtl8125_phy_param(phydev, 0x8217, 0x3f00, 0x2a00);
+ r8168g_phy_param(phydev, 0x81b1, 0xff00, 0x0b00);
+ rtl8125_phy_param(phydev, 0x8fed, 0xff00, 0x4e00);
+
+ rtl8125_phy_param(phydev, 0x88ac, 0xff00, 0x2300);
+ phy_modify_paged(phydev, 0x0bf0, 0x16, 0x0000, 0x3800);
+ rtl8125_phy_param(phydev, 0x88de, 0xff00, 0x0000);
+ rtl8125_phy_param(phydev, 0x80b4, 0xffff, 0x5195);
+
+ r8168g_phy_param(phydev, 0x8370, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x8372, 0xffff, 0x86c8);
+
+ r8168g_phy_param(phydev, 0x8401, 0xffff, 0x86c8);
+ r8168g_phy_param(phydev, 0x8403, 0xffff, 0x86da);
+ r8168g_phy_param(phydev, 0x8406, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8408, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840a, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840c, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840e, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8410, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8412, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8414, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8416, 0x1800, 0x1000);
+
+ r8168g_phy_param(phydev, 0x82bd, 0xffff, 0x1f40);
+
+ phy_modify_paged(phydev, 0x0bfb, 0x12, 0x07ff, 0x0328);
+ phy_write_paged(phydev, 0x0bfb, 0x13, 0x3e14);
+
+ r8168g_phy_param(phydev, 0x81c4, 0xffff, 0x003b);
+ r8168g_phy_param(phydev, 0x81c6, 0xffff, 0x0086);
+ r8168g_phy_param(phydev, 0x81c8, 0xffff, 0x00b7);
+ r8168g_phy_param(phydev, 0x81ca, 0xffff, 0x00db);
+ r8168g_phy_param(phydev, 0x81cc, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81ce, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d0, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d2, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d4, 0xffff, 0x00c3);
+ r8168g_phy_param(phydev, 0x81d6, 0xffff, 0x0078);
+ r8168g_phy_param(phydev, 0x81d8, 0xffff, 0x0047);
+ r8168g_phy_param(phydev, 0x81da, 0xffff, 0x0023);
+
+ rtl8125_phy_param(phydev, 0x88d7, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x88d9, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x8ffa, 0xffff, 0x002a);
+
+ rtl8125_phy_param(phydev, 0x8fee, 0xffff, 0xffdf);
+ rtl8125_phy_param(phydev, 0x8ff0, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x8ff2, 0xffff, 0x0a4a);
+ rtl8125_phy_param(phydev, 0x8ff4, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x8ff6, 0xffff, 0x0a4a);
+
+ rtl8125_phy_param(phydev, 0x8ff8, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x88d5, 0xff00, 0x0200);
+
+ r8168g_phy_param(phydev, 0x84bb, 0xff00, 0x0a00);
+ r8168g_phy_param(phydev, 0x84c0, 0xff00, 0x1600);
+
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x0003);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_common_config_eee_phy(phydev);
}
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
@@ -1123,7 +1310,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
@@ -1155,10 +1341,12 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
- [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
+ [RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config,
+ [RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_80] = rtl8127a_1_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/Makefile b/drivers/net/ethernet/realtek/rtase/Makefile
new file mode 100644
index 000000000000..ba3d8550f9e6
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# Copyright(c) 2024 Realtek Semiconductor Corp. All rights reserved.
+
+#
+# Makefile for the Realtek PCIe driver
+#
+
+obj-$(CONFIG_RTASE) += rtase.o
+
+rtase-objs := rtase_main.o
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
new file mode 100644
index 000000000000..b9209eb6ea73
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * rtase is the Linux device driver released for Realtek Automotive Switch
+ * controllers with PCI-Express interface.
+ *
+ * Copyright(c) 2024 Realtek Semiconductor Corp.
+ */
+
+#ifndef RTASE_H
+#define RTASE_H
+
+#define RTASE_HW_VER_MASK 0x7C800000
+#define RTASE_HW_VER_906X_7XA 0x00800000
+#define RTASE_HW_VER_906X_7XC 0x04000000
+#define RTASE_HW_VER_907XD_V1 0x04800000
+#define RTASE_HW_VER_907XD_VA 0x08000000
+
+#define RTASE_RX_DMA_BURST_256 4
+#define RTASE_TX_DMA_BURST_UNLIMITED 7
+
+#define RTASE_RX_BUF_SIZE (PAGE_SIZE - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define RTASE_MAX_JUMBO_SIZE (RTASE_RX_BUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)
+
+/* 3 means InterFrameGap = the shortest one */
+#define RTASE_INTERFRAMEGAP 0x03
+
+#define RTASE_REGS_SIZE 256
+#define RTASE_PCI_REGS_SIZE 0x100
+
+#define RTASE_MULTICAST_FILTER_MASK GENMASK(30, 26)
+
+#define RTASE_VLAN_FILTER_ENTRY_NUM 32
+#define RTASE_NUM_TX_QUEUE 8
+#define RTASE_NUM_RX_QUEUE 4
+
+#define RTASE_TXQ_CTRL 1
+#define RTASE_FUNC_TXQ_NUM 1
+#define RTASE_FUNC_RXQ_NUM 1
+#define RTASE_INTERRUPT_NUM 1
+
+#define RTASE_MITI_TIME_COUNT_MASK GENMASK(3, 0)
+#define RTASE_MITI_TIME_UNIT_MASK GENMASK(7, 4)
+#define RTASE_MITI_DEFAULT_TIME 128
+#define RTASE_MITI_MAX_TIME 491520
+#define RTASE_MITI_PKT_NUM_COUNT_MASK GENMASK(11, 8)
+#define RTASE_MITI_PKT_NUM_UNIT_MASK GENMASK(13, 12)
+#define RTASE_MITI_DEFAULT_PKT_NUM 64
+#define RTASE_MITI_MAX_PKT_NUM_IDX 3
+#define RTASE_MITI_MAX_PKT_NUM_UNIT 16
+#define RTASE_MITI_MAX_PKT_NUM 240
+#define RTASE_MITI_COUNT_BIT_NUM 4
+
+#define RTASE_NUM_MSIX 4
+
+#define RTASE_DWORD_MOD 16
+
+/*****************************************************************************/
+enum rtase_registers {
+ RTASE_MAC0 = 0x0000,
+ RTASE_MAC4 = 0x0004,
+ RTASE_MAR0 = 0x0008,
+ RTASE_MAR1 = 0x000C,
+ RTASE_DTCCR0 = 0x0010,
+ RTASE_DTCCR4 = 0x0014,
+#define RTASE_COUNTER_RESET BIT(0)
+#define RTASE_COUNTER_DUMP BIT(3)
+
+ RTASE_FCR = 0x0018,
+#define RTASE_FCR_RXQ_MASK GENMASK(5, 4)
+
+ RTASE_LBK_CTRL = 0x001A,
+#define RTASE_LBK_ATLD BIT(1)
+#define RTASE_LBK_CLR BIT(0)
+
+ RTASE_TX_DESC_ADDR0 = 0x0020,
+ RTASE_TX_DESC_ADDR4 = 0x0024,
+ RTASE_TX_DESC_COMMAND = 0x0028,
+#define RTASE_TX_DESC_CMD_CS BIT(15)
+#define RTASE_TX_DESC_CMD_WE BIT(14)
+
+ RTASE_BOOT_CTL = 0x6004,
+ RTASE_CLKSW_SET = 0x6018,
+
+ RTASE_CHIP_CMD = 0x0037,
+#define RTASE_STOP_REQ BIT(7)
+#define RTASE_STOP_REQ_DONE BIT(6)
+#define RTASE_RE BIT(3)
+#define RTASE_TE BIT(2)
+
+ RTASE_IMR0 = 0x0038,
+ RTASE_ISR0 = 0x003C,
+#define RTASE_TOK7 BIT(30)
+#define RTASE_TOK6 BIT(28)
+#define RTASE_TOK5 BIT(26)
+#define RTASE_TOK4 BIT(24)
+#define RTASE_FOVW BIT(6)
+#define RTASE_RDU BIT(4)
+#define RTASE_TOK BIT(2)
+#define RTASE_ROK BIT(0)
+
+ RTASE_IMR1 = 0x0800,
+ RTASE_ISR1 = 0x0802,
+#define RTASE_Q_TOK BIT(4)
+#define RTASE_Q_RDU BIT(1)
+#define RTASE_Q_ROK BIT(0)
+
+ RTASE_EPHY_ISR = 0x6014,
+ RTASE_EPHY_IMR = 0x6016,
+
+ RTASE_TX_CONFIG_0 = 0x0040,
+#define RTASE_TX_INTER_FRAME_GAP_MASK GENMASK(25, 24)
+ /* DMA burst value (0-7) is shift this many bits */
+#define RTASE_TX_DMA_MASK GENMASK(10, 8)
+
+ RTASE_RX_CONFIG_0 = 0x0044,
+#define RTASE_RX_SINGLE_FETCH BIT(14)
+#define RTASE_RX_SINGLE_TAG BIT(13)
+#define RTASE_RX_MX_DMA_MASK GENMASK(10, 8)
+#define RTASE_ACPT_FLOW BIT(7)
+#define RTASE_ACCEPT_ERR BIT(5)
+#define RTASE_ACCEPT_RUNT BIT(4)
+#define RTASE_ACCEPT_BROADCAST BIT(3)
+#define RTASE_ACCEPT_MULTICAST BIT(2)
+#define RTASE_ACCEPT_MYPHYS BIT(1)
+#define RTASE_ACCEPT_ALLPHYS BIT(0)
+#define RTASE_ACCEPT_MASK (RTASE_ACPT_FLOW | RTASE_ACCEPT_ERR | \
+ RTASE_ACCEPT_RUNT | RTASE_ACCEPT_BROADCAST | \
+ RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_MYPHYS | \
+ RTASE_ACCEPT_ALLPHYS)
+
+ RTASE_RX_CONFIG_1 = 0x0046,
+#define RTASE_RX_MAX_FETCH_DESC_MASK GENMASK(15, 11)
+#define RTASE_RX_NEW_DESC_FORMAT_EN BIT(8)
+#define RTASE_OUTER_VLAN_DETAG_EN BIT(7)
+#define RTASE_INNER_VLAN_DETAG_EN BIT(6)
+#define RTASE_PCIE_NEW_FLOW BIT(2)
+#define RTASE_PCIE_RELOAD_EN BIT(0)
+
+ RTASE_EEM = 0x0050,
+#define RTASE_EEM_UNLOCK 0xC0
+
+ RTASE_TDFNR = 0x0057,
+ RTASE_TPPOLL = 0x0090,
+ RTASE_PDR = 0x00B0,
+ RTASE_FIFOR = 0x00D3,
+#define RTASE_TX_FIFO_EMPTY BIT(5)
+#define RTASE_RX_FIFO_EMPTY BIT(4)
+
+ RTASE_RMS = 0x00DA,
+ RTASE_CPLUS_CMD = 0x00E0,
+#define RTASE_FORCE_RXFLOW_EN BIT(11)
+#define RTASE_FORCE_TXFLOW_EN BIT(10)
+#define RTASE_RX_CHKSUM BIT(5)
+
+ RTASE_Q0_RX_DESC_ADDR0 = 0x00E4,
+ RTASE_Q0_RX_DESC_ADDR4 = 0x00E8,
+ RTASE_Q1_RX_DESC_ADDR0 = 0x4000,
+ RTASE_Q1_RX_DESC_ADDR4 = 0x4004,
+ RTASE_MTPS = 0x00EC,
+#define RTASE_TAG_NUM_SEL_MASK GENMASK(10, 8)
+
+ RTASE_MISC = 0x00F2,
+#define RTASE_RX_DV_GATE_EN BIT(3)
+
+ RTASE_TFUN_CTRL = 0x0400,
+#define RTASE_TX_NEW_DESC_FORMAT_EN BIT(0)
+
+ RTASE_TX_CONFIG_1 = 0x203E,
+#define RTASE_TC_MODE_MASK GENMASK(11, 10)
+
+ RTASE_TOKSEL = 0x2046,
+ RTASE_TXQCRDT_0 = 0x2500,
+ RTASE_RFIFONFULL = 0x4406,
+ RTASE_INT_MITI_TX = 0x0A00,
+ RTASE_INT_MITI_RX = 0x0A80,
+
+ RTASE_VLAN_ENTRY_0 = 0xAC80,
+};
+
+enum rtase_desc_status_bit {
+ RTASE_DESC_OWN = BIT(31), /* Descriptor is owned by NIC */
+ RTASE_RING_END = BIT(30), /* End of descriptor ring */
+};
+
+enum rtase_sw_flag_content {
+ RTASE_SWF_MSI_ENABLED = BIT(1),
+ RTASE_SWF_MSIX_ENABLED = BIT(2),
+};
+
+#define RSVD_MASK 0x3FFFC000
+
+struct rtase_tx_desc {
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
+ __le32 opts3;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
+} __packed;
+
+/*------ offset 0 of tx descriptor ------*/
+#define RTASE_TX_FIRST_FRAG BIT(29) /* Tx First segment of a packet */
+#define RTASE_TX_LAST_FRAG BIT(28) /* Tx Final segment of a packet */
+#define RTASE_GIANT_SEND_V4 BIT(26) /* TCP Giant Send Offload V4 (GSOv4) */
+#define RTASE_GIANT_SEND_V6 BIT(25) /* TCP Giant Send Offload V6 (GSOv6) */
+#define RTASE_TX_VLAN_TAG BIT(17) /* Add VLAN tag */
+
+/*------ offset 4 of tx descriptor ------*/
+#define RTASE_TX_UDPCS_C BIT(31) /* Calculate UDP/IP checksum */
+#define RTASE_TX_TCPCS_C BIT(30) /* Calculate TCP/IP checksum */
+#define RTASE_TX_IPCS_C BIT(29) /* Calculate IP checksum */
+#define RTASE_TX_IPV6F_C BIT(28) /* Indicate it is an IPv6 packet */
+
+union rtase_rx_desc {
+ struct {
+ __le64 header_buf_addr;
+ __le32 reserved1;
+ __le32 opts_header_len;
+ __le64 addr;
+ __le32 reserved2;
+ __le32 opts1;
+ } __packed desc_cmd;
+
+ struct {
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 rss;
+ __le32 opts4;
+ __le32 reserved3;
+ __le32 opts3;
+ __le32 opts2;
+ __le32 opts1;
+ } __packed desc_status;
+} __packed;
+
+/*------ offset 28 of rx descriptor ------*/
+#define RTASE_RX_FIRST_FRAG BIT(25) /* Rx First segment of a packet */
+#define RTASE_RX_LAST_FRAG BIT(24) /* Rx Final segment of a packet */
+#define RTASE_RX_RES BIT(20)
+#define RTASE_RX_RUNT BIT(19)
+#define RTASE_RX_RWT BIT(18)
+#define RTASE_RX_CRC BIT(17)
+#define RTASE_RX_V6F BIT(31)
+#define RTASE_RX_V4F BIT(30)
+#define RTASE_RX_UDPT BIT(29)
+#define RTASE_RX_TCPT BIT(28)
+#define RTASE_RX_IPF BIT(26) /* IP checksum failed */
+#define RTASE_RX_UDPF BIT(25) /* UDP/IP checksum failed */
+#define RTASE_RX_TCPF BIT(24) /* TCP/IP checksum failed */
+#define RTASE_RX_VLAN_TAG BIT(16) /* VLAN tag available */
+
+#define RTASE_NUM_DESC 1024
+#define RTASE_TX_BUDGET_DEFAULT 256
+#define RTASE_TX_RING_DESC_SIZE (RTASE_NUM_DESC * sizeof(struct rtase_tx_desc))
+#define RTASE_RX_RING_DESC_SIZE (RTASE_NUM_DESC * sizeof(union rtase_rx_desc))
+#define RTASE_TX_STOP_THRS (MAX_SKB_FRAGS + 1)
+#define RTASE_TX_START_THRS (2 * RTASE_TX_STOP_THRS)
+#define RTASE_VLAN_TAG_MASK GENMASK(15, 0)
+#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0)
+
+/* txqos hardware definitions */
+#define RTASE_1T_CLOCK 64
+#define RTASE_1T_POWER 10000000
+#define RTASE_IDLESLOPE_INT_SHIFT 25
+#define RTASE_IDLESLOPE_INT_MASK GENMASK(31, 25)
+
+#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10)
+
+struct rtase_int_vector {
+ struct rtase_private *tp;
+ unsigned int irq;
+ char name[RTASE_IVEC_NAME_SIZE];
+ u16 index;
+ u16 imr_addr;
+ u16 isr_addr;
+ u32 imr;
+ struct list_head ring_list;
+ struct napi_struct napi;
+ int (*poll)(struct napi_struct *napi, int budget);
+};
+
+struct rtase_ring {
+ struct rtase_int_vector *ivec;
+ void *desc;
+ dma_addr_t phy_addr;
+ u32 cur_idx;
+ u32 dirty_idx;
+ u16 index;
+ u8 type;
+
+ struct sk_buff *skbuff[RTASE_NUM_DESC];
+ void *data_buf[RTASE_NUM_DESC];
+ union {
+ u32 len[RTASE_NUM_DESC];
+ dma_addr_t data_phy_addr[RTASE_NUM_DESC];
+ } mis;
+
+ struct list_head ring_entry;
+ int (*ring_handler)(struct rtase_ring *ring, int budget);
+ u64 alloc_fail;
+};
+
+struct rtase_txqos {
+ int hicredit;
+ int locredit;
+ int idleslope;
+ int sendslope;
+};
+
+struct rtase_stats {
+ u64 tx_dropped;
+ u64 rx_dropped;
+ u64 multicast;
+ u64 rx_errors;
+ u64 rx_length_errors;
+ u64 rx_crc_errors;
+};
+
+struct rtase_private {
+ void __iomem *mmio_addr;
+ u32 sw_flag;
+
+ struct pci_dev *pdev;
+ struct net_device *dev;
+ u32 rx_buf_sz;
+
+ struct page_pool *page_pool;
+ struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE];
+ struct rtase_txqos tx_qos[RTASE_NUM_TX_QUEUE];
+ struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE];
+ struct rtase_counters *tally_vaddr;
+ dma_addr_t tally_paddr;
+
+ u32 vlan_filter_ctrl;
+ u16 vlan_filter_vid[RTASE_VLAN_FILTER_ENTRY_NUM];
+
+ struct msix_entry msix_entry[RTASE_NUM_MSIX];
+ struct rtase_int_vector int_vector[RTASE_NUM_MSIX];
+
+ struct rtase_stats stats;
+
+ u16 tx_queue_ctrl;
+ u16 func_tx_queue_num;
+ u16 func_rx_queue_num;
+ u16 int_nums;
+ u16 tx_int_mit;
+ u16 rx_int_mit;
+
+ u32 hw_ver;
+};
+
+#define RTASE_LSO_64K 64000
+
+#define RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2 (16 * 4)
+
+#define RTASE_TCPHO_MASK GENMASK(24, 18)
+
+#define RTASE_MSS_MASK GENMASK(28, 18)
+
+#endif /* RTASE_H */
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
new file mode 100644
index 000000000000..ef13109c49cf
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -0,0 +1,2400 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * rtase is the Linux device driver released for Realtek Automotive Switch
+ * controllers with PCI-Express interface.
+ *
+ * Copyright(c) 2024 Realtek Semiconductor Corp.
+ *
+ * Below is a simplified block diagram of the chip and its relevant interfaces.
+ *
+ * *************************
+ * * *
+ * * CPU network device *
+ * * *
+ * * +-------------+ *
+ * * | PCIE Host | *
+ * ***********++************
+ * ||
+ * PCIE
+ * ||
+ * ********************++**********************
+ * * | PCIE Endpoint | *
+ * * +---------------+ *
+ * * | GMAC | *
+ * * +--++--+ Realtek *
+ * * || RTL90xx Series *
+ * * || *
+ * * +-------------++----------------+ *
+ * * | | MAC | | *
+ * * | +-----+ | *
+ * * | | *
+ * * | Ethernet Switch Core | *
+ * * | | *
+ * * | +-----+ +-----+ | *
+ * * | | MAC |...........| MAC | | *
+ * * +---+-----+-----------+-----+---+ *
+ * * | PHY |...........| PHY | *
+ * * +--++-+ +--++-+ *
+ * *************||****************||***********
+ *
+ * The block of the Realtek RTL90xx series is our entire chip architecture,
+ * the GMAC is connected to the switch core, and there is no PHY in between.
+ * In addition, this driver is mainly used to control GMAC, but does not
+ * control the switch core, so it is not the same as DSA. Linux only plays
+ * the role of a normal leaf node in this model.
+ */
+
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/prefetch.h>
+#include <linux/rtnetlink.h>
+#include <linux/tcp.h>
+#include <asm/irq.h>
+#include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
+
+#include "rtase.h"
+
+#define RTK_OPTS1_DEBUG_VALUE 0x0BADBEEF
+#define RTK_MAGIC_NUMBER 0x0BADBADBADBADBAD
+
+static const struct pci_device_id rtase_pci_tbl[] = {
+ {PCI_VDEVICE(REALTEK, 0x906A)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, rtase_pci_tbl);
+
+MODULE_AUTHOR("Realtek ARD Software Team");
+MODULE_DESCRIPTION("Network Driver for the PCIe interface of Realtek Automotive Ethernet Switch");
+MODULE_LICENSE("Dual BSD/GPL");
+
+struct rtase_counters {
+ __le64 tx_packets;
+ __le64 rx_packets;
+ __le64 tx_errors;
+ __le32 rx_errors;
+ __le16 rx_missed;
+ __le16 align_errors;
+ __le32 tx_one_collision;
+ __le32 tx_multi_collision;
+ __le64 rx_unicast;
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underrun;
+} __packed;
+
+static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8)
+{
+ writeb(val8, tp->mmio_addr + reg);
+}
+
+static void rtase_w16(const struct rtase_private *tp, u16 reg, u16 val16)
+{
+ writew(val16, tp->mmio_addr + reg);
+}
+
+static void rtase_w32(const struct rtase_private *tp, u16 reg, u32 val32)
+{
+ writel(val32, tp->mmio_addr + reg);
+}
+
+static u8 rtase_r8(const struct rtase_private *tp, u16 reg)
+{
+ return readb(tp->mmio_addr + reg);
+}
+
+static u16 rtase_r16(const struct rtase_private *tp, u16 reg)
+{
+ return readw(tp->mmio_addr + reg);
+}
+
+static u32 rtase_r32(const struct rtase_private *tp, u16 reg)
+{
+ return readl(tp->mmio_addr + reg);
+}
+
+static void rtase_free_desc(struct rtase_private *tp)
+{
+ struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ if (!tp->tx_ring[i].desc)
+ continue;
+
+ dma_free_coherent(&pdev->dev, RTASE_TX_RING_DESC_SIZE,
+ tp->tx_ring[i].desc,
+ tp->tx_ring[i].phy_addr);
+ tp->tx_ring[i].desc = NULL;
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ if (!tp->rx_ring[i].desc)
+ continue;
+
+ dma_free_coherent(&pdev->dev, RTASE_RX_RING_DESC_SIZE,
+ tp->rx_ring[i].desc,
+ tp->rx_ring[i].phy_addr);
+ tp->rx_ring[i].desc = NULL;
+ }
+}
+
+static int rtase_alloc_desc(struct rtase_private *tp)
+{
+ struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ /* rx and tx descriptors needs 256 bytes alignment.
+ * dma_alloc_coherent provides more.
+ */
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ tp->tx_ring[i].desc =
+ dma_alloc_coherent(&pdev->dev,
+ RTASE_TX_RING_DESC_SIZE,
+ &tp->tx_ring[i].phy_addr,
+ GFP_KERNEL);
+ if (!tp->tx_ring[i].desc)
+ goto err_out;
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ tp->rx_ring[i].desc =
+ dma_alloc_coherent(&pdev->dev,
+ RTASE_RX_RING_DESC_SIZE,
+ &tp->rx_ring[i].phy_addr,
+ GFP_KERNEL);
+ if (!tp->rx_ring[i].desc)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ rtase_free_desc(tp);
+ return -ENOMEM;
+}
+
+static void rtase_unmap_tx_skb(struct pci_dev *pdev, u32 len,
+ struct rtase_tx_desc *desc)
+{
+ dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
+ DMA_TO_DEVICE);
+ desc->opts1 = cpu_to_le32(RTK_OPTS1_DEBUG_VALUE);
+ desc->opts2 = 0x00;
+ desc->addr = cpu_to_le64(RTK_MAGIC_NUMBER);
+}
+
+static void rtase_tx_clear_range(struct rtase_ring *ring, u32 start, u32 n)
+{
+ struct rtase_tx_desc *desc_base = ring->desc;
+ struct rtase_private *tp = ring->ivec->tp;
+ u32 i;
+
+ for (i = 0; i < n; i++) {
+ u32 entry = (start + i) % RTASE_NUM_DESC;
+ struct rtase_tx_desc *desc = desc_base + entry;
+ u32 len = ring->mis.len[entry];
+ struct sk_buff *skb;
+
+ if (len == 0)
+ continue;
+
+ rtase_unmap_tx_skb(tp->pdev, len, desc);
+ ring->mis.len[entry] = 0;
+ skb = ring->skbuff[entry];
+ if (!skb)
+ continue;
+
+ tp->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ ring->skbuff[entry] = NULL;
+ }
+}
+
+static void rtase_tx_clear(struct rtase_private *tp)
+{
+ struct rtase_ring *ring;
+ u16 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ ring = &tp->tx_ring[i];
+ rtase_tx_clear_range(ring, ring->dirty_idx, RTASE_NUM_DESC);
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ }
+}
+
+static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32 rx_buf_sz)
+{
+ u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END;
+
+ desc->desc_status.opts2 = 0;
+ /* force memory writes to complete before releasing descriptor */
+ dma_wmb();
+ WRITE_ONCE(desc->desc_cmd.opts1,
+ cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz));
+}
+
+static u32 rtase_tx_avail(struct rtase_ring *ring)
+{
+ return READ_ONCE(ring->dirty_idx) + RTASE_NUM_DESC -
+ READ_ONCE(ring->cur_idx);
+}
+
+static int tx_handler(struct rtase_ring *ring, int budget)
+{
+ const struct rtase_private *tp = ring->ivec->tp;
+ struct net_device *dev = tp->dev;
+ u32 dirty_tx, tx_left;
+ u32 bytes_compl = 0;
+ u32 pkts_compl = 0;
+ int workdone = 0;
+
+ dirty_tx = ring->dirty_idx;
+ tx_left = READ_ONCE(ring->cur_idx) - dirty_tx;
+
+ while (tx_left > 0) {
+ u32 entry = dirty_tx % RTASE_NUM_DESC;
+ struct rtase_tx_desc *desc = ring->desc +
+ sizeof(struct rtase_tx_desc) * entry;
+ u32 status;
+
+ status = le32_to_cpu(desc->opts1);
+
+ if (status & RTASE_DESC_OWN)
+ break;
+
+ rtase_unmap_tx_skb(tp->pdev, ring->mis.len[entry], desc);
+ ring->mis.len[entry] = 0;
+ if (ring->skbuff[entry]) {
+ pkts_compl++;
+ bytes_compl += ring->skbuff[entry]->len;
+ napi_consume_skb(ring->skbuff[entry], budget);
+ ring->skbuff[entry] = NULL;
+ }
+
+ dirty_tx++;
+ tx_left--;
+ workdone++;
+
+ if (workdone == RTASE_TX_BUDGET_DEFAULT)
+ break;
+ }
+
+ if (ring->dirty_idx != dirty_tx) {
+ dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
+ WRITE_ONCE(ring->dirty_idx, dirty_tx);
+
+ netif_subqueue_completed_wake(dev, ring->index, pkts_compl,
+ bytes_compl,
+ rtase_tx_avail(ring),
+ RTASE_TX_START_THRS);
+
+ if (ring->cur_idx != dirty_tx)
+ rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
+ }
+
+ return 0;
+}
+
+static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
+{
+ struct rtase_ring *ring = &tp->tx_ring[idx];
+ struct rtase_tx_desc *desc;
+ u32 i;
+
+ memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE);
+ memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_TX;
+ ring->alloc_fail = 0;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++) {
+ ring->mis.len[i] = 0;
+ if ((RTASE_NUM_DESC - 1) == i) {
+ desc = ring->desc + sizeof(struct rtase_tx_desc) * i;
+ desc->opts1 = cpu_to_le32(RTASE_RING_END);
+ }
+ }
+
+ ring->ring_handler = tx_handler;
+ if (idx < 4) {
+ ring->ivec = &tp->int_vector[idx];
+ list_add_tail(&ring->ring_entry,
+ &tp->int_vector[idx].ring_list);
+ } else {
+ ring->ivec = &tp->int_vector[0];
+ list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
+ }
+
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
+}
+
+static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
+ u32 rx_buf_sz)
+{
+ desc->desc_cmd.addr = cpu_to_le64(mapping);
+
+ rtase_mark_to_asic(desc, rx_buf_sz);
+}
+
+static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc)
+{
+ desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER);
+ desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN | RSVD_MASK);
+}
+
+static int rtase_alloc_rx_data_buf(struct rtase_ring *ring,
+ void **p_data_buf,
+ union rtase_rx_desc *desc,
+ dma_addr_t *rx_phy_addr)
+{
+ struct rtase_int_vector *ivec = ring->ivec;
+ const struct rtase_private *tp = ivec->tp;
+ dma_addr_t mapping;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(tp->page_pool);
+ if (!page) {
+ ring->alloc_fail++;
+ goto err_out;
+ }
+
+ *p_data_buf = page_address(page);
+ mapping = page_pool_get_dma_addr(page);
+ *rx_phy_addr = mapping;
+ rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
+
+ return 0;
+
+err_out:
+ rtase_make_unusable_by_asic(desc);
+
+ return -ENOMEM;
+}
+
+static u32 rtase_rx_ring_fill(struct rtase_ring *ring, u32 ring_start,
+ u32 ring_end)
+{
+ union rtase_rx_desc *desc_base = ring->desc;
+ u32 cur;
+
+ for (cur = ring_start; ring_end - cur > 0; cur++) {
+ u32 i = cur % RTASE_NUM_DESC;
+ union rtase_rx_desc *desc = desc_base + i;
+ int ret;
+
+ if (ring->data_buf[i])
+ continue;
+
+ ret = rtase_alloc_rx_data_buf(ring, &ring->data_buf[i], desc,
+ &ring->mis.data_phy_addr[i]);
+ if (ret)
+ break;
+ }
+
+ return cur - ring_start;
+}
+
+static void rtase_mark_as_last_descriptor(union rtase_rx_desc *desc)
+{
+ desc->desc_cmd.opts1 |= cpu_to_le32(RTASE_RING_END);
+}
+
+static void rtase_rx_ring_clear(struct page_pool *page_pool,
+ struct rtase_ring *ring)
+{
+ union rtase_rx_desc *desc;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++) {
+ desc = ring->desc + sizeof(union rtase_rx_desc) * i;
+ page = virt_to_head_page(ring->data_buf[i]);
+
+ if (ring->data_buf[i])
+ page_pool_put_full_page(page_pool, page, true);
+
+ rtase_make_unusable_by_asic(desc);
+ }
+}
+
+static int rtase_fragmented_frame(u32 status)
+{
+ return (status & (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG)) !=
+ (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG);
+}
+
+static void rtase_rx_csum(const struct rtase_private *tp, struct sk_buff *skb,
+ const union rtase_rx_desc *desc)
+{
+ u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
+
+ /* rx csum offload */
+ if (((opts2 & RTASE_RX_V4F) && !(opts2 & RTASE_RX_IPF)) ||
+ (opts2 & RTASE_RX_V6F)) {
+ if (((opts2 & RTASE_RX_TCPT) && !(opts2 & RTASE_RX_TCPF)) ||
+ ((opts2 & RTASE_RX_UDPT) && !(opts2 & RTASE_RX_UDPF)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
+static void rtase_rx_vlan_skb(union rtase_rx_desc *desc, struct sk_buff *skb)
+{
+ u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
+
+ if (!(opts2 & RTASE_RX_VLAN_TAG))
+ return;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ swab16(opts2 & RTASE_VLAN_TAG_MASK));
+}
+
+static void rtase_rx_skb(const struct rtase_ring *ring, struct sk_buff *skb)
+{
+ struct rtase_int_vector *ivec = ring->ivec;
+
+ napi_gro_receive(&ivec->napi, skb);
+}
+
+static int rx_handler(struct rtase_ring *ring, int budget)
+{
+ union rtase_rx_desc *desc_base = ring->desc;
+ u32 pkt_size, cur_rx, delta, entry, status;
+ struct rtase_private *tp = ring->ivec->tp;
+ struct net_device *dev = tp->dev;
+ union rtase_rx_desc *desc;
+ struct sk_buff *skb;
+ int workdone = 0;
+
+ cur_rx = ring->cur_idx;
+ entry = cur_rx % RTASE_NUM_DESC;
+ desc = &desc_base[entry];
+
+ while (workdone < budget) {
+ status = le32_to_cpu(desc->desc_status.opts1);
+
+ if (status & RTASE_DESC_OWN)
+ break;
+
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the rx descriptor until
+ * we know the status of RTASE_DESC_OWN
+ */
+ dma_rmb();
+
+ if (unlikely(status & RTASE_RX_RES)) {
+ if (net_ratelimit())
+ netdev_warn(dev, "Rx ERROR. status = %08x\n",
+ status);
+
+ tp->stats.rx_errors++;
+
+ if (status & (RTASE_RX_RWT | RTASE_RX_RUNT))
+ tp->stats.rx_length_errors++;
+
+ if (status & RTASE_RX_CRC)
+ tp->stats.rx_crc_errors++;
+
+ if (dev->features & NETIF_F_RXALL)
+ goto process_pkt;
+
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+
+process_pkt:
+ pkt_size = status & RTASE_RX_PKT_SIZE_MASK;
+ if (likely(!(dev->features & NETIF_F_RXFCS)))
+ pkt_size -= ETH_FCS_LEN;
+
+ /* The driver does not support incoming fragmented frames.
+ * They are seen as a symptom of over-mtu sized frames.
+ */
+ if (unlikely(rtase_fragmented_frame(status))) {
+ tp->stats.rx_dropped++;
+ tp->stats.rx_length_errors++;
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ ring->mis.data_phy_addr[entry],
+ tp->rx_buf_sz, DMA_FROM_DEVICE);
+
+ skb = build_skb(ring->data_buf[entry], PAGE_SIZE);
+ if (!skb) {
+ tp->stats.rx_dropped++;
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+ ring->data_buf[entry] = NULL;
+
+ if (dev->features & NETIF_F_RXCSUM)
+ rtase_rx_csum(tp, skb, desc);
+
+ skb_put(skb, pkt_size);
+ skb_mark_for_recycle(skb);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ tp->stats.multicast++;
+
+ rtase_rx_vlan_skb(desc, skb);
+ rtase_rx_skb(ring, skb);
+
+ dev_sw_netstats_rx_add(dev, pkt_size);
+
+skip_process_pkt:
+ workdone++;
+ cur_rx++;
+ entry = cur_rx % RTASE_NUM_DESC;
+ desc = ring->desc + sizeof(union rtase_rx_desc) * entry;
+ }
+
+ ring->cur_idx = cur_rx;
+ delta = rtase_rx_ring_fill(ring, ring->dirty_idx, ring->cur_idx);
+ ring->dirty_idx += delta;
+
+ return workdone;
+}
+
+static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
+{
+ struct rtase_ring *ring = &tp->rx_ring[idx];
+ u16 i;
+
+ memset(ring->desc, 0x0, RTASE_RX_RING_DESC_SIZE);
+ memset(ring->data_buf, 0x0, sizeof(ring->data_buf));
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_RX;
+ ring->alloc_fail = 0;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++)
+ ring->mis.data_phy_addr[i] = 0;
+
+ ring->ring_handler = rx_handler;
+ ring->ivec = &tp->int_vector[idx];
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
+ list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
+}
+
+static void rtase_rx_clear(struct rtase_private *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->func_rx_queue_num; i++)
+ rtase_rx_ring_clear(tp->page_pool, &tp->rx_ring[i]);
+
+ page_pool_destroy(tp->page_pool);
+ tp->page_pool = NULL;
+}
+
+static int rtase_init_ring(const struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *page_pool;
+ u32 num;
+ u16 i;
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = RTASE_NUM_DESC * tp->func_rx_queue_num;
+ pp_params.nid = dev_to_node(&tp->pdev->dev);
+ pp_params.dev = &tp->pdev->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ pp_params.offset = 0;
+
+ page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(page_pool)) {
+ netdev_err(tp->dev, "failed to create page pool\n");
+ return -ENOMEM;
+ }
+
+ tp->page_pool = page_pool;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++)
+ rtase_tx_desc_init(tp, i);
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ rtase_rx_desc_init(tp, i);
+
+ num = rtase_rx_ring_fill(&tp->rx_ring[i], 0, RTASE_NUM_DESC);
+ if (num != RTASE_NUM_DESC)
+ goto err_out;
+
+ rtase_mark_as_last_descriptor(tp->rx_ring[i].desc +
+ sizeof(union rtase_rx_desc) *
+ (RTASE_NUM_DESC - 1));
+ }
+
+ return 0;
+
+err_out:
+ rtase_rx_clear(tp);
+ return -ENOMEM;
+}
+
+static void rtase_interrupt_mitigation(const struct rtase_private *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++)
+ rtase_w16(tp, RTASE_INT_MITI_TX + i * 2, tp->tx_int_mit);
+
+ for (i = 0; i < tp->func_rx_queue_num; i++)
+ rtase_w16(tp, RTASE_INT_MITI_RX + i * 2, tp->rx_int_mit);
+}
+
+static void rtase_tally_counter_addr_fill(const struct rtase_private *tp)
+{
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
+ rtase_w32(tp, RTASE_DTCCR0, lower_32_bits(tp->tally_paddr));
+}
+
+static void rtase_tally_counter_clear(const struct rtase_private *tp)
+{
+ u32 cmd = lower_32_bits(tp->tally_paddr);
+
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
+ rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_RESET);
+}
+
+static void rtase_desc_addr_fill(const struct rtase_private *tp)
+{
+ const struct rtase_ring *ring;
+ u16 i, cmd, val;
+ int err;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ ring = &tp->tx_ring[i];
+
+ rtase_w32(tp, RTASE_TX_DESC_ADDR0,
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, RTASE_TX_DESC_ADDR4,
+ upper_32_bits(ring->phy_addr));
+
+ cmd = i | RTASE_TX_DESC_CMD_WE | RTASE_TX_DESC_CMD_CS;
+ rtase_w16(tp, RTASE_TX_DESC_COMMAND, cmd);
+
+ err = read_poll_timeout(rtase_r16, val,
+ !(val & RTASE_TX_DESC_CMD_CS), 10,
+ 1000, false, tp,
+ RTASE_TX_DESC_COMMAND);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev,
+ "error occurred in fill tx descriptor\n");
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ ring = &tp->rx_ring[i];
+
+ if (i == 0) {
+ rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR0,
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR4,
+ upper_32_bits(ring->phy_addr));
+ } else {
+ rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR0 + ((i - 1) * 8)),
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR4 + ((i - 1) * 8)),
+ upper_32_bits(ring->phy_addr));
+ }
+ }
+}
+
+static void rtase_hw_set_features(const struct net_device *dev,
+ netdev_features_t features)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_config, val;
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ if (features & NETIF_F_RXALL)
+ rx_config |= (RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
+ else
+ rx_config &= ~(RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
+
+ val = rtase_r16(tp, RTASE_CPLUS_CMD);
+ if (features & NETIF_F_RXCSUM)
+ rtase_w16(tp, RTASE_CPLUS_CMD, val | RTASE_RX_CHKSUM);
+ else
+ rtase_w16(tp, RTASE_CPLUS_CMD, val & ~RTASE_RX_CHKSUM);
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rx_config |= (RTASE_INNER_VLAN_DETAG_EN |
+ RTASE_OUTER_VLAN_DETAG_EN);
+ else
+ rx_config &= ~(RTASE_INNER_VLAN_DETAG_EN |
+ RTASE_OUTER_VLAN_DETAG_EN);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rx_config);
+}
+
+static void rtase_hw_set_rx_packet_filter(struct net_device *dev)
+{
+ u32 mc_filter[2] = { 0xFFFFFFFF, 0xFFFFFFFF };
+ struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_mode;
+
+ rx_mode = rtase_r16(tp, RTASE_RX_CONFIG_0) & ~RTASE_ACCEPT_MASK;
+ rx_mode |= RTASE_ACCEPT_BROADCAST | RTASE_ACCEPT_MYPHYS;
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_mode |= RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_ALLPHYS;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ rx_mode |= RTASE_ACCEPT_MULTICAST;
+ } else {
+ struct netdev_hw_addr *hw_addr;
+
+ mc_filter[0] = 0;
+ mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(hw_addr, dev) {
+ u32 bit_nr = eth_hw_addr_crc(hw_addr);
+ u32 idx = u32_get_bits(bit_nr, BIT(31));
+ u32 bit = u32_get_bits(bit_nr,
+ RTASE_MULTICAST_FILTER_MASK);
+
+ mc_filter[idx] |= BIT(bit);
+ rx_mode |= RTASE_ACCEPT_MULTICAST;
+ }
+ }
+
+ if (dev->features & NETIF_F_RXALL)
+ rx_mode |= RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT;
+
+ rtase_w32(tp, RTASE_MAR0, swab32(mc_filter[1]));
+ rtase_w32(tp, RTASE_MAR1, swab32(mc_filter[0]));
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_mode);
+}
+
+static void rtase_irq_dis_and_clear(const struct rtase_private *tp)
+{
+ const struct rtase_int_vector *ivec = &tp->int_vector[0];
+ u32 val1;
+ u16 val2;
+ u8 i;
+
+ rtase_w32(tp, ivec->imr_addr, 0);
+ val1 = rtase_r32(tp, ivec->isr_addr);
+ rtase_w32(tp, ivec->isr_addr, val1);
+
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ rtase_w16(tp, ivec->imr_addr, 0);
+ val2 = rtase_r16(tp, ivec->isr_addr);
+ rtase_w16(tp, ivec->isr_addr, val2);
+ }
+}
+
+static void rtase_poll_timeout(const struct rtase_private *tp, u32 cond,
+ u32 sleep_us, u64 timeout_us, u16 reg)
+{
+ int err;
+ u8 val;
+
+ err = read_poll_timeout(rtase_r8, val, val & cond, sleep_us,
+ timeout_us, false, tp, reg);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev, "poll reg 0x00%x timeout\n", reg);
+}
+
+static void rtase_nic_reset(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_config;
+ u8 val;
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config & ~RTASE_ACCEPT_MASK);
+
+ val = rtase_r8(tp, RTASE_MISC);
+ rtase_w8(tp, RTASE_MISC, val | RTASE_RX_DV_GATE_EN);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_STOP_REQ);
+ mdelay(2);
+
+ rtase_poll_timeout(tp, RTASE_STOP_REQ_DONE, 100, 150000,
+ RTASE_CHIP_CMD);
+
+ rtase_poll_timeout(tp, RTASE_TX_FIFO_EMPTY, 100, 100000,
+ RTASE_FIFOR);
+
+ rtase_poll_timeout(tp, RTASE_RX_FIFO_EMPTY, 100, 100000,
+ RTASE_FIFOR);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val & ~(RTASE_TE | RTASE_RE));
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val & ~RTASE_STOP_REQ);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
+}
+
+static void rtase_hw_reset(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_irq_dis_and_clear(tp);
+
+ rtase_nic_reset(dev);
+}
+
+static void rtase_set_rx_queue(const struct rtase_private *tp)
+{
+ u16 reg_data;
+
+ reg_data = rtase_r16(tp, RTASE_FCR);
+ switch (tp->func_rx_queue_num) {
+ case 1:
+ u16p_replace_bits(&reg_data, 0x1, RTASE_FCR_RXQ_MASK);
+ break;
+ case 2:
+ u16p_replace_bits(&reg_data, 0x2, RTASE_FCR_RXQ_MASK);
+ break;
+ case 4:
+ u16p_replace_bits(&reg_data, 0x3, RTASE_FCR_RXQ_MASK);
+ break;
+ }
+ rtase_w16(tp, RTASE_FCR, reg_data);
+}
+
+static void rtase_set_tx_queue(const struct rtase_private *tp)
+{
+ u16 reg_data;
+
+ reg_data = rtase_r16(tp, RTASE_TX_CONFIG_1);
+ switch (tp->tx_queue_ctrl) {
+ case 1:
+ u16p_replace_bits(&reg_data, 0x0, RTASE_TC_MODE_MASK);
+ break;
+ case 2:
+ u16p_replace_bits(&reg_data, 0x1, RTASE_TC_MODE_MASK);
+ break;
+ case 3:
+ case 4:
+ u16p_replace_bits(&reg_data, 0x2, RTASE_TC_MODE_MASK);
+ break;
+ default:
+ u16p_replace_bits(&reg_data, 0x3, RTASE_TC_MODE_MASK);
+ break;
+ }
+ rtase_w16(tp, RTASE_TX_CONFIG_1, reg_data);
+}
+
+static void rtase_hw_config(struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u32 reg_data32;
+ u16 reg_data16;
+
+ rtase_hw_reset(dev);
+
+ /* set rx dma burst */
+ reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ reg_data16 &= ~(RTASE_RX_SINGLE_TAG | RTASE_RX_SINGLE_FETCH);
+ u16p_replace_bits(&reg_data16, RTASE_RX_DMA_BURST_256,
+ RTASE_RX_MX_DMA_MASK);
+ rtase_w16(tp, RTASE_RX_CONFIG_0, reg_data16);
+
+ /* new rx descritpor */
+ reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ reg_data16 |= RTASE_RX_NEW_DESC_FORMAT_EN | RTASE_PCIE_NEW_FLOW;
+ u16p_replace_bits(&reg_data16, 0xF, RTASE_RX_MAX_FETCH_DESC_MASK);
+ rtase_w16(tp, RTASE_RX_CONFIG_1, reg_data16);
+
+ rtase_set_rx_queue(tp);
+
+ rtase_interrupt_mitigation(tp);
+
+ /* set tx dma burst size and interframe gap time */
+ reg_data32 = rtase_r32(tp, RTASE_TX_CONFIG_0);
+ u32p_replace_bits(&reg_data32, RTASE_TX_DMA_BURST_UNLIMITED,
+ RTASE_TX_DMA_MASK);
+ u32p_replace_bits(&reg_data32, RTASE_INTERFRAMEGAP,
+ RTASE_TX_INTER_FRAME_GAP_MASK);
+ rtase_w32(tp, RTASE_TX_CONFIG_0, reg_data32);
+
+ /* new tx descriptor */
+ reg_data16 = rtase_r16(tp, RTASE_TFUN_CTRL);
+ rtase_w16(tp, RTASE_TFUN_CTRL, reg_data16 |
+ RTASE_TX_NEW_DESC_FORMAT_EN);
+
+ /* tx fetch desc number */
+ rtase_w8(tp, RTASE_TDFNR, 0x10);
+
+ /* tag num select */
+ reg_data16 = rtase_r16(tp, RTASE_MTPS);
+ u16p_replace_bits(&reg_data16, 0x4, RTASE_TAG_NUM_SEL_MASK);
+ rtase_w16(tp, RTASE_MTPS, reg_data16);
+
+ rtase_set_tx_queue(tp);
+
+ rtase_w16(tp, RTASE_TOKSEL, 0x5555);
+
+ rtase_tally_counter_addr_fill(tp);
+ rtase_desc_addr_fill(tp);
+ rtase_hw_set_features(dev, dev->features);
+
+ /* enable flow control */
+ reg_data16 = rtase_r16(tp, RTASE_CPLUS_CMD);
+ reg_data16 |= (RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
+ rtase_w16(tp, RTASE_CPLUS_CMD, reg_data16);
+ /* set near fifo threshold - rx missed issue. */
+ rtase_w16(tp, RTASE_RFIFONFULL, 0x190);
+
+ rtase_w16(tp, RTASE_RMS, tp->rx_buf_sz);
+
+ rtase_hw_set_rx_packet_filter(dev);
+}
+
+static void rtase_nic_enable(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rcr = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ u8 val;
+
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rcr & ~RTASE_PCIE_RELOAD_EN);
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rcr | RTASE_PCIE_RELOAD_EN);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_TE | RTASE_RE);
+
+ val = rtase_r8(tp, RTASE_MISC);
+ rtase_w8(tp, RTASE_MISC, val & ~RTASE_RX_DV_GATE_EN);
+}
+
+static void rtase_enable_hw_interrupt(const struct rtase_private *tp)
+{
+ const struct rtase_int_vector *ivec = &tp->int_vector[0];
+ u32 i;
+
+ rtase_w32(tp, ivec->imr_addr, ivec->imr);
+
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ rtase_w16(tp, ivec->imr_addr, ivec->imr);
+ }
+}
+
+static void rtase_hw_start(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_nic_enable(dev);
+ rtase_enable_hw_interrupt(tp);
+}
+
+/* the interrupt handler does RXQ0 and TXQ0, TXQ4~7 interrutp status
+ */
+static irqreturn_t rtase_interrupt(int irq, void *dev_instance)
+{
+ const struct rtase_private *tp;
+ struct rtase_int_vector *ivec;
+ u32 status;
+
+ ivec = dev_instance;
+ tp = ivec->tp;
+ status = rtase_r32(tp, ivec->isr_addr);
+
+ rtase_w32(tp, ivec->imr_addr, 0x0);
+ rtase_w32(tp, ivec->isr_addr, status & ~RTASE_FOVW);
+
+ if (napi_schedule_prep(&ivec->napi))
+ __napi_schedule(&ivec->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* the interrupt handler does RXQ1&TXQ1 or RXQ2&TXQ2 or RXQ3&TXQ3 interrupt
+ * status according to interrupt vector
+ */
+static irqreturn_t rtase_q_interrupt(int irq, void *dev_instance)
+{
+ const struct rtase_private *tp;
+ struct rtase_int_vector *ivec;
+ u16 status;
+
+ ivec = dev_instance;
+ tp = ivec->tp;
+ status = rtase_r16(tp, ivec->isr_addr);
+
+ rtase_w16(tp, ivec->imr_addr, 0x0);
+ rtase_w16(tp, ivec->isr_addr, status);
+
+ if (napi_schedule_prep(&ivec->napi))
+ __napi_schedule(&ivec->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int rtase_poll(struct napi_struct *napi, int budget)
+{
+ const struct rtase_int_vector *ivec;
+ const struct rtase_private *tp;
+ struct rtase_ring *ring;
+ int total_workdone = 0;
+
+ ivec = container_of(napi, struct rtase_int_vector, napi);
+ tp = ivec->tp;
+
+ list_for_each_entry(ring, &ivec->ring_list, ring_entry)
+ total_workdone += ring->ring_handler(ring, budget);
+
+ if (total_workdone >= budget)
+ return budget;
+
+ if (napi_complete_done(napi, total_workdone)) {
+ if (!ivec->index)
+ rtase_w32(tp, ivec->imr_addr, ivec->imr);
+ else
+ rtase_w16(tp, ivec->imr_addr, ivec->imr);
+ }
+
+ return total_workdone;
+}
+
+static int rtase_open(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct pci_dev *pdev = tp->pdev;
+ struct rtase_int_vector *ivec;
+ u16 i = 0, j;
+ int ret;
+
+ ivec = &tp->int_vector[0];
+ tp->rx_buf_sz = RTASE_RX_BUF_SIZE;
+
+ ret = rtase_alloc_desc(tp);
+ if (ret)
+ return ret;
+
+ ret = rtase_init_ring(dev);
+ if (ret)
+ goto err_free_all_allocated_mem;
+
+ rtase_hw_config(dev);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
+ ret = request_irq(ivec->irq, rtase_interrupt, 0,
+ dev->name, ivec);
+ if (ret)
+ goto err_free_all_allocated_irq;
+
+ /* request other interrupts to handle multiqueue */
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ snprintf(ivec->name, sizeof(ivec->name), "%s_int%u",
+ tp->dev->name, i);
+ ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
+ ivec->name, ivec);
+ if (ret)
+ goto err_free_all_allocated_irq;
+ }
+ } else {
+ ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name,
+ ivec);
+ if (ret)
+ goto err_free_all_allocated_mem;
+ }
+
+ rtase_hw_start(dev);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_enable(&ivec->napi);
+ }
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+
+ return 0;
+
+err_free_all_allocated_irq:
+ for (j = 0; j < i; j++)
+ free_irq(tp->int_vector[j].irq, &tp->int_vector[j]);
+
+err_free_all_allocated_mem:
+ rtase_free_desc(tp);
+
+ return ret;
+}
+
+static void rtase_down(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ struct rtase_ring *ring, *tmp;
+ u32 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_disable(&ivec->napi);
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
+ list_del(&ring->ring_entry);
+ }
+ }
+
+ netif_tx_disable(dev);
+
+ netif_carrier_off(dev);
+
+ rtase_hw_reset(dev);
+
+ rtase_tx_clear(tp);
+
+ rtase_rx_clear(tp);
+}
+
+static int rtase_close(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ rtase_down(dev);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
+ for (i = 0; i < tp->int_nums; i++)
+ free_irq(tp->int_vector[i].irq, &tp->int_vector[i]);
+
+ } else {
+ free_irq(pdev->irq, &tp->int_vector[0]);
+ }
+
+ rtase_free_desc(tp);
+
+ return 0;
+}
+
+static u32 rtase_tx_vlan_tag(const struct rtase_private *tp,
+ const struct sk_buff *skb)
+{
+ return (skb_vlan_tag_present(skb)) ?
+ (RTASE_TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb))) : 0x00;
+}
+
+static u32 rtase_tx_csum(struct sk_buff *skb, const struct net_device *dev)
+{
+ u32 csum_cmd = 0;
+ u8 ip_protocol;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ csum_cmd = RTASE_TX_IPCS_C;
+ ip_protocol = ip_hdr(skb)->protocol;
+ break;
+
+ case htons(ETH_P_IPV6):
+ csum_cmd = RTASE_TX_IPV6F_C;
+ ip_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+
+ default:
+ ip_protocol = IPPROTO_RAW;
+ break;
+ }
+
+ if (ip_protocol == IPPROTO_TCP)
+ csum_cmd |= RTASE_TX_TCPCS_C;
+ else if (ip_protocol == IPPROTO_UDP)
+ csum_cmd |= RTASE_TX_UDPCS_C;
+
+ csum_cmd |= u32_encode_bits(skb_transport_offset(skb),
+ RTASE_TCPHO_MASK);
+
+ return csum_cmd;
+}
+
+static int rtase_xmit_frags(struct rtase_ring *ring, struct sk_buff *skb,
+ u32 opts1, u32 opts2)
+{
+ const struct skb_shared_info *info = skb_shinfo(skb);
+ const struct rtase_private *tp = ring->ivec->tp;
+ const u8 nr_frags = info->nr_frags;
+ struct rtase_tx_desc *txd = NULL;
+ u32 cur_frag, entry;
+
+ entry = ring->cur_idx;
+ for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
+ const skb_frag_t *frag = &info->frags[cur_frag];
+ dma_addr_t mapping;
+ u32 status, len;
+ void *addr;
+
+ entry = (entry + 1) % RTASE_NUM_DESC;
+
+ txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
+ len = skb_frag_size(frag);
+ addr = skb_frag_address(frag);
+ mapping = dma_map_single(&tp->pdev->dev, addr, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
+ if (unlikely(net_ratelimit()))
+ netdev_err(tp->dev,
+ "Failed to map TX fragments DMA!\n");
+
+ goto err_out;
+ }
+
+ if (((entry + 1) % RTASE_NUM_DESC) == 0)
+ status = (opts1 | len | RTASE_RING_END);
+ else
+ status = opts1 | len;
+
+ if (cur_frag == (nr_frags - 1)) {
+ ring->skbuff[entry] = skb;
+ status |= RTASE_TX_LAST_FRAG;
+ }
+
+ ring->mis.len[entry] = len;
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(opts2);
+
+ /* make sure the operating fields have been updated */
+ dma_wmb();
+ txd->opts1 = cpu_to_le32(status);
+ }
+
+ return cur_frag;
+
+err_out:
+ rtase_tx_clear_range(ring, ring->cur_idx + 1, cur_frag);
+ return -EIO;
+}
+
+static netdev_tx_t rtase_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct rtase_private *tp = netdev_priv(dev);
+ u32 q_idx, entry, len, opts1, opts2;
+ struct netdev_queue *tx_queue;
+ bool stop_queue, door_bell;
+ u32 mss = shinfo->gso_size;
+ struct rtase_tx_desc *txd;
+ struct rtase_ring *ring;
+ dma_addr_t mapping;
+ int frags;
+
+ /* multiqueues */
+ q_idx = skb_get_queue_mapping(skb);
+ ring = &tp->tx_ring[q_idx];
+ tx_queue = netdev_get_tx_queue(dev, q_idx);
+
+ if (unlikely(!rtase_tx_avail(ring))) {
+ if (net_ratelimit())
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
+
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = ring->cur_idx % RTASE_NUM_DESC;
+ txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
+
+ opts1 = RTASE_DESC_OWN;
+ opts2 = rtase_tx_vlan_tag(tp, skb);
+
+ /* tcp segmentation offload (or tcp large send) */
+ if (mss) {
+ if (shinfo->gso_type & SKB_GSO_TCPV4) {
+ opts1 |= RTASE_GIANT_SEND_V4;
+ } else if (shinfo->gso_type & SKB_GSO_TCPV6) {
+ if (skb_cow_head(skb, 0))
+ goto err_dma_0;
+
+ tcp_v6_gso_csum_prep(skb);
+ opts1 |= RTASE_GIANT_SEND_V6;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+
+ opts1 |= u32_encode_bits(skb_transport_offset(skb),
+ RTASE_TCPHO_MASK);
+ opts2 |= u32_encode_bits(mss, RTASE_MSS_MASK);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ opts2 |= rtase_tx_csum(skb, dev);
+ }
+
+ frags = rtase_xmit_frags(ring, skb, opts1, opts2);
+ if (unlikely(frags < 0))
+ goto err_dma_0;
+
+ if (frags) {
+ len = skb_headlen(skb);
+ opts1 |= RTASE_TX_FIRST_FRAG;
+ } else {
+ len = skb->len;
+ ring->skbuff[entry] = skb;
+ opts1 |= RTASE_TX_FIRST_FRAG | RTASE_TX_LAST_FRAG;
+ }
+
+ if (((entry + 1) % RTASE_NUM_DESC) == 0)
+ opts1 |= (len | RTASE_RING_END);
+ else
+ opts1 |= len;
+
+ mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
+ if (unlikely(net_ratelimit()))
+ netdev_err(dev, "Failed to map TX DMA!\n");
+
+ goto err_dma_1;
+ }
+
+ ring->mis.len[entry] = len;
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(opts2);
+ txd->opts1 = cpu_to_le32(opts1 & ~RTASE_DESC_OWN);
+
+ /* make sure the operating fields have been updated */
+ dma_wmb();
+
+ door_bell = __netdev_tx_sent_queue(tx_queue, skb->len,
+ netdev_xmit_more());
+
+ txd->opts1 = cpu_to_le32(opts1);
+
+ skb_tx_timestamp(skb);
+
+ /* tx needs to see descriptor changes before updated cur_idx */
+ smp_wmb();
+
+ WRITE_ONCE(ring->cur_idx, ring->cur_idx + frags + 1);
+
+ stop_queue = !netif_subqueue_maybe_stop(dev, ring->index,
+ rtase_tx_avail(ring),
+ RTASE_TX_STOP_THRS,
+ RTASE_TX_START_THRS);
+
+ if (door_bell || stop_queue)
+ rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
+
+ return NETDEV_TX_OK;
+
+err_dma_1:
+ ring->skbuff[entry] = NULL;
+ rtase_tx_clear_range(ring, ring->cur_idx + 1, frags);
+
+err_dma_0:
+ tp->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void rtase_set_rx_mode(struct net_device *dev)
+{
+ rtase_hw_set_rx_packet_filter(dev);
+}
+
+static void rtase_enable_eem_write(const struct rtase_private *tp)
+{
+ u8 val;
+
+ val = rtase_r8(tp, RTASE_EEM);
+ rtase_w8(tp, RTASE_EEM, val | RTASE_EEM_UNLOCK);
+}
+
+static void rtase_disable_eem_write(const struct rtase_private *tp)
+{
+ u8 val;
+
+ val = rtase_r8(tp, RTASE_EEM);
+ rtase_w8(tp, RTASE_EEM, val & ~RTASE_EEM_UNLOCK);
+}
+
+static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr)
+{
+ u32 rar_low, rar_high;
+
+ rar_low = (u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24);
+
+ rar_high = (u32)addr[4] | ((u32)addr[5] << 8);
+
+ rtase_enable_eem_write(tp);
+ rtase_w32(tp, RTASE_MAC0, rar_low);
+ rtase_w32(tp, RTASE_MAC4, rar_high);
+ rtase_disable_eem_write(tp);
+ rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR);
+}
+
+static int rtase_set_mac_address(struct net_device *dev, void *p)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
+
+ rtase_rar_set(tp, dev->dev_addr);
+
+ return 0;
+}
+
+static int rtase_change_mtu(struct net_device *dev, int new_mtu)
+{
+ dev->mtu = new_mtu;
+
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static void rtase_wait_for_quiescence(const struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ u32 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ synchronize_irq(ivec->irq);
+ /* wait for any pending NAPI task to complete */
+ napi_disable(&ivec->napi);
+ }
+
+ rtase_irq_dis_and_clear(tp);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_enable(&ivec->napi);
+ }
+}
+
+static void rtase_sw_reset(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_ring *ring, *tmp;
+ struct rtase_int_vector *ivec;
+ int ret;
+ u32 i;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ rtase_hw_reset(dev);
+
+ /* let's wait a bit while any (async) irq lands on */
+ rtase_wait_for_quiescence(dev);
+ rtase_tx_clear(tp);
+ rtase_rx_clear(tp);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
+ list_del(&ring->ring_entry);
+ }
+ }
+
+ ret = rtase_init_ring(dev);
+ if (ret) {
+ netdev_err(dev, "unable to init ring\n");
+ rtase_free_desc(tp);
+ return;
+ }
+
+ rtase_hw_config(dev);
+ /* always link, so start to transmit & receive */
+ rtase_hw_start(dev);
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+}
+
+static void rtase_dump_tally_counter(const struct rtase_private *tp)
+{
+ dma_addr_t paddr = tp->tally_paddr;
+ u32 cmd = lower_32_bits(paddr);
+ u32 val;
+ int err;
+
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(paddr));
+ rtase_w32(tp, RTASE_DTCCR0, cmd);
+ rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_DUMP);
+
+ err = read_poll_timeout(rtase_r32, val, !(val & RTASE_COUNTER_DUMP),
+ 10, 250, false, tp, RTASE_DTCCR0);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev, "error occurred in dump tally counter\n");
+}
+
+static void rtase_dump_state(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ int max_reg_size = RTASE_PCI_REGS_SIZE;
+ const struct rtase_counters *counters;
+ const struct rtase_ring *ring;
+ u32 dword_rd;
+ int n = 0;
+
+ ring = &tp->tx_ring[0];
+ netdev_err(dev, "Tx descriptor info:\n");
+ netdev_err(dev, "Tx curIdx = 0x%x\n", ring->cur_idx);
+ netdev_err(dev, "Tx dirtyIdx = 0x%x\n", ring->dirty_idx);
+ netdev_err(dev, "Tx phyAddr = %pad\n", &ring->phy_addr);
+
+ ring = &tp->rx_ring[0];
+ netdev_err(dev, "Rx descriptor info:\n");
+ netdev_err(dev, "Rx curIdx = 0x%x\n", ring->cur_idx);
+ netdev_err(dev, "Rx dirtyIdx = 0x%x\n", ring->dirty_idx);
+ netdev_err(dev, "Rx phyAddr = %pad\n", &ring->phy_addr);
+
+ netdev_err(dev, "Device Registers:\n");
+ netdev_err(dev, "Chip Command = 0x%02x\n",
+ rtase_r8(tp, RTASE_CHIP_CMD));
+ netdev_err(dev, "IMR = %08x\n", rtase_r32(tp, RTASE_IMR0));
+ netdev_err(dev, "ISR = %08x\n", rtase_r32(tp, RTASE_ISR0));
+ netdev_err(dev, "Boot Ctrl Reg(0xE004) = %04x\n",
+ rtase_r16(tp, RTASE_BOOT_CTL));
+ netdev_err(dev, "EPHY ISR(0xE014) = %04x\n",
+ rtase_r16(tp, RTASE_EPHY_ISR));
+ netdev_err(dev, "EPHY IMR(0xE016) = %04x\n",
+ rtase_r16(tp, RTASE_EPHY_IMR));
+ netdev_err(dev, "CLKSW SET REG(0xE018) = %04x\n",
+ rtase_r16(tp, RTASE_CLKSW_SET));
+
+ netdev_err(dev, "Dump PCI Registers:\n");
+
+ while (n < max_reg_size) {
+ if ((n % RTASE_DWORD_MOD) == 0)
+ netdev_err(tp->dev, "0x%03x:\n", n);
+
+ pci_read_config_dword(tp->pdev, n, &dword_rd);
+ netdev_err(tp->dev, "%08x\n", dword_rd);
+ n += 4;
+ }
+
+ netdev_err(dev, "Dump tally counter:\n");
+ counters = tp->tally_vaddr;
+ rtase_dump_tally_counter(tp);
+
+ netdev_err(dev, "tx_packets %lld\n",
+ le64_to_cpu(counters->tx_packets));
+ netdev_err(dev, "rx_packets %lld\n",
+ le64_to_cpu(counters->rx_packets));
+ netdev_err(dev, "tx_errors %lld\n",
+ le64_to_cpu(counters->tx_errors));
+ netdev_err(dev, "rx_errors %d\n",
+ le32_to_cpu(counters->rx_errors));
+ netdev_err(dev, "rx_missed %d\n",
+ le16_to_cpu(counters->rx_missed));
+ netdev_err(dev, "align_errors %d\n",
+ le16_to_cpu(counters->align_errors));
+ netdev_err(dev, "tx_one_collision %d\n",
+ le32_to_cpu(counters->tx_one_collision));
+ netdev_err(dev, "tx_multi_collision %d\n",
+ le32_to_cpu(counters->tx_multi_collision));
+ netdev_err(dev, "rx_unicast %lld\n",
+ le64_to_cpu(counters->rx_unicast));
+ netdev_err(dev, "rx_broadcast %lld\n",
+ le64_to_cpu(counters->rx_broadcast));
+ netdev_err(dev, "rx_multicast %d\n",
+ le32_to_cpu(counters->rx_multicast));
+ netdev_err(dev, "tx_aborted %d\n",
+ le16_to_cpu(counters->tx_aborted));
+ netdev_err(dev, "tx_underrun %d\n",
+ le16_to_cpu(counters->tx_underrun));
+}
+
+static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ rtase_dump_state(dev);
+ rtase_sw_reset(dev);
+}
+
+static void rtase_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ const struct rtase_counters *counters;
+
+ counters = tp->tally_vaddr;
+
+ dev_fetch_sw_netstats(stats, dev->tstats);
+
+ /* fetch additional counter values missing in stats collected by driver
+ * from tally counter
+ */
+ rtase_dump_tally_counter(tp);
+ stats->rx_errors = tp->stats.rx_errors;
+ stats->tx_errors = le64_to_cpu(counters->tx_errors);
+ stats->rx_dropped = tp->stats.rx_dropped;
+ stats->tx_dropped = tp->stats.tx_dropped;
+ stats->multicast = tp->stats.multicast;
+ stats->rx_length_errors = tp->stats.rx_length_errors;
+}
+
+static void rtase_set_hw_cbs(const struct rtase_private *tp, u32 queue)
+{
+ u32 idle = tp->tx_qos[queue].idleslope * RTASE_1T_CLOCK;
+ u32 val, i;
+
+ val = u32_encode_bits(idle / RTASE_1T_POWER, RTASE_IDLESLOPE_INT_MASK);
+ idle %= RTASE_1T_POWER;
+
+ for (i = 1; i <= RTASE_IDLESLOPE_INT_SHIFT; i++) {
+ idle *= 2;
+ if ((idle / RTASE_1T_POWER) == 1)
+ val |= BIT(RTASE_IDLESLOPE_INT_SHIFT - i);
+
+ idle %= RTASE_1T_POWER;
+ }
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, val);
+}
+
+static int rtase_setup_tc_cbs(struct rtase_private *tp,
+ const struct tc_cbs_qopt_offload *qopt)
+{
+ int queue = qopt->queue;
+
+ if (queue < 0 || queue >= tp->func_tx_queue_num)
+ return -EINVAL;
+
+ if (!qopt->enable) {
+ tp->tx_qos[queue].hicredit = 0;
+ tp->tx_qos[queue].locredit = 0;
+ tp->tx_qos[queue].idleslope = 0;
+ tp->tx_qos[queue].sendslope = 0;
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, 0);
+ } else {
+ tp->tx_qos[queue].hicredit = qopt->hicredit;
+ tp->tx_qos[queue].locredit = qopt->locredit;
+ tp->tx_qos[queue].idleslope = qopt->idleslope;
+ tp->tx_qos[queue].sendslope = qopt->sendslope;
+
+ rtase_set_hw_cbs(tp, queue);
+ }
+
+ return 0;
+}
+
+static int rtase_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return rtase_setup_tc_cbs(tp, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static netdev_features_t rtase_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t features_fix = features;
+
+ /* not support TSO for jumbo frames */
+ if (dev->mtu > ETH_DATA_LEN)
+ features_fix &= ~NETIF_F_ALL_TSO;
+
+ return features_fix;
+}
+
+static int rtase_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t features_set = features;
+
+ features_set &= NETIF_F_RXALL | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (features_set ^ dev->features)
+ rtase_hw_set_features(dev, features_set);
+
+ return 0;
+}
+
+static const struct net_device_ops rtase_netdev_ops = {
+ .ndo_open = rtase_open,
+ .ndo_stop = rtase_close,
+ .ndo_start_xmit = rtase_start_xmit,
+ .ndo_set_rx_mode = rtase_set_rx_mode,
+ .ndo_set_mac_address = rtase_set_mac_address,
+ .ndo_change_mtu = rtase_change_mtu,
+ .ndo_tx_timeout = rtase_tx_timeout,
+ .ndo_get_stats64 = rtase_get_stats64,
+ .ndo_setup_tc = rtase_setup_tc,
+ .ndo_fix_features = rtase_fix_features,
+ .ndo_set_features = rtase_set_features,
+};
+
+static void rtase_get_mac_address(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ u8 mac_addr[ETH_ALEN] __aligned(2) = {};
+ u32 i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = rtase_r8(tp, RTASE_MAC0 + i);
+
+ if (!is_valid_ether_addr(mac_addr)) {
+ eth_hw_addr_random(dev);
+ netdev_warn(dev, "Random ether addr %pM\n", dev->dev_addr);
+ } else {
+ eth_hw_addr_set(dev, mac_addr);
+ ether_addr_copy(dev->perm_addr, dev->dev_addr);
+ }
+
+ rtase_rar_set(tp, dev->dev_addr);
+}
+
+static int rtase_get_settings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+
+ switch (tp->hw_ver) {
+ case RTASE_HW_VER_906X_7XA:
+ case RTASE_HW_VER_906X_7XC:
+ cmd->base.speed = SPEED_5000;
+ break;
+ case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
+ cmd->base.speed = SPEED_10000;
+ break;
+ }
+
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_MII;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static void rtase_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->tx_pause = !!(value & RTASE_FORCE_TXFLOW_EN);
+ pause->rx_pause = !!(value & RTASE_FORCE_RXFLOW_EN);
+}
+
+static int rtase_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ value &= ~(RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
+
+ if (pause->tx_pause)
+ value |= RTASE_FORCE_TXFLOW_EN;
+
+ if (pause->rx_pause)
+ value |= RTASE_FORCE_RXFLOW_EN;
+
+ rtase_w16(tp, RTASE_CPLUS_CMD, value);
+ return 0;
+}
+
+static void rtase_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct rtase_counters *counters;
+
+ counters = tp->tally_vaddr;
+
+ rtase_dump_tally_counter(tp);
+
+ stats->FramesTransmittedOK = le64_to_cpu(counters->tx_packets);
+ stats->FramesReceivedOK = le64_to_cpu(counters->rx_packets);
+ stats->FramesLostDueToIntMACXmitError =
+ le64_to_cpu(counters->tx_errors);
+ stats->BroadcastFramesReceivedOK = le64_to_cpu(counters->rx_broadcast);
+}
+
+static const struct ethtool_ops rtase_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = rtase_get_settings,
+ .get_pauseparam = rtase_get_pauseparam,
+ .set_pauseparam = rtase_set_pauseparam,
+ .get_eth_mac_stats = rtase_get_eth_mac_stats,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static void rtase_init_netdev_ops(struct net_device *dev)
+{
+ dev->netdev_ops = &rtase_netdev_ops;
+ dev->ethtool_ops = &rtase_ethtool_ops;
+}
+
+static void rtase_init_napi(struct rtase_private *tp)
+{
+ u16 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ netif_napi_add_config(tp->dev, &tp->int_vector[i].napi,
+ tp->int_vector[i].poll, i);
+ netif_napi_set_irq(&tp->int_vector[i].napi,
+ tp->int_vector[i].irq);
+ }
+}
+
+static void rtase_reset_interrupt(struct pci_dev *pdev,
+ const struct rtase_private *tp)
+{
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
+ pci_disable_msix(pdev);
+ else
+ pci_disable_msi(pdev);
+}
+
+static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
+{
+ int ret, irq;
+ u16 i;
+
+ memset(tp->msix_entry, 0x0, RTASE_NUM_MSIX *
+ sizeof(struct msix_entry));
+
+ for (i = 0; i < RTASE_NUM_MSIX; i++)
+ tp->msix_entry[i].entry = i;
+
+ ret = pci_enable_msix_exact(pdev, tp->msix_entry, tp->int_nums);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ irq = pci_irq_vector(pdev, i);
+ if (irq < 0) {
+ pci_disable_msix(pdev);
+ return irq;
+ }
+
+ tp->int_vector[i].irq = irq;
+ }
+
+ return 0;
+}
+
+static int rtase_alloc_interrupt(struct pci_dev *pdev,
+ struct rtase_private *tp)
+{
+ int ret;
+
+ ret = rtase_alloc_msix(pdev, tp);
+ if (ret) {
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to alloc interrupt.(MSI)\n");
+ return ret;
+ }
+
+ tp->sw_flag |= RTASE_SWF_MSI_ENABLED;
+ } else {
+ tp->sw_flag |= RTASE_SWF_MSIX_ENABLED;
+ }
+
+ return 0;
+}
+
+static void rtase_init_hardware(const struct rtase_private *tp)
+{
+ u16 i;
+
+ for (i = 0; i < RTASE_VLAN_FILTER_ENTRY_NUM; i++)
+ rtase_w32(tp, RTASE_VLAN_ENTRY_0 + i * 4, 0);
+}
+
+static void rtase_init_int_vector(struct rtase_private *tp)
+{
+ u16 i;
+
+ /* interrupt vector 0 */
+ tp->int_vector[0].tp = tp;
+ tp->int_vector[0].index = 0;
+ tp->int_vector[0].imr_addr = RTASE_IMR0;
+ tp->int_vector[0].isr_addr = RTASE_ISR0;
+ tp->int_vector[0].imr = RTASE_ROK | RTASE_RDU | RTASE_TOK |
+ RTASE_TOK4 | RTASE_TOK5 | RTASE_TOK6 |
+ RTASE_TOK7;
+ tp->int_vector[0].poll = rtase_poll;
+
+ memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
+ INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
+
+ /* interrupt vector 1 ~ 3 */
+ for (i = 1; i < tp->int_nums; i++) {
+ tp->int_vector[i].tp = tp;
+ tp->int_vector[i].index = i;
+ tp->int_vector[i].imr_addr = RTASE_IMR1 + (i - 1) * 4;
+ tp->int_vector[i].isr_addr = RTASE_ISR1 + (i - 1) * 4;
+ tp->int_vector[i].imr = RTASE_Q_ROK | RTASE_Q_RDU |
+ RTASE_Q_TOK;
+ tp->int_vector[i].poll = rtase_poll;
+
+ memset(tp->int_vector[i].name, 0x0,
+ sizeof(tp->int_vector[0].name));
+ INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
+ }
+}
+
+static u16 rtase_calc_time_mitigation(u32 time_us)
+{
+ u8 msb, time_count, time_unit;
+ u16 int_miti;
+
+ time_us = min(time_us, RTASE_MITI_MAX_TIME);
+
+ if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
+ msb = fls(time_us);
+ time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
+ time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
+ } else {
+ time_unit = 0;
+ time_count = time_us;
+ }
+
+ int_miti = u16_encode_bits(time_count, RTASE_MITI_TIME_COUNT_MASK) |
+ u16_encode_bits(time_unit, RTASE_MITI_TIME_UNIT_MASK);
+
+ return int_miti;
+}
+
+static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
+{
+ u8 msb, pkt_num_count, pkt_num_unit;
+ u16 int_miti;
+
+ pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM);
+
+ if (pkt_num > 60) {
+ pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
+ pkt_num_count = pkt_num / RTASE_MITI_MAX_PKT_NUM_UNIT;
+ } else {
+ msb = fls(pkt_num);
+ if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ pkt_num_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
+ pkt_num_count = pkt_num >> (msb -
+ RTASE_MITI_COUNT_BIT_NUM);
+ } else {
+ pkt_num_unit = 0;
+ pkt_num_count = pkt_num;
+ }
+ }
+
+ int_miti = u16_encode_bits(pkt_num_count,
+ RTASE_MITI_PKT_NUM_COUNT_MASK) |
+ u16_encode_bits(pkt_num_unit,
+ RTASE_MITI_PKT_NUM_UNIT_MASK);
+
+ return int_miti;
+}
+
+static void rtase_init_software_variable(struct pci_dev *pdev,
+ struct rtase_private *tp)
+{
+ u16 int_miti;
+
+ tp->tx_queue_ctrl = RTASE_TXQ_CTRL;
+ tp->func_tx_queue_num = RTASE_FUNC_TXQ_NUM;
+ tp->func_rx_queue_num = RTASE_FUNC_RXQ_NUM;
+ tp->int_nums = RTASE_INTERRUPT_NUM;
+
+ int_miti = rtase_calc_time_mitigation(RTASE_MITI_DEFAULT_TIME) |
+ rtase_calc_packet_num_mitigation(RTASE_MITI_DEFAULT_PKT_NUM);
+ tp->tx_int_mit = int_miti;
+ tp->rx_int_mit = int_miti;
+
+ tp->sw_flag = 0;
+
+ rtase_init_int_vector(tp);
+
+ /* MTU range: 60 - hw-specific max */
+ tp->dev->min_mtu = ETH_ZLEN;
+ tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE;
+}
+
+static int rtase_check_mac_version_valid(struct rtase_private *tp)
+{
+ int ret = -ENODEV;
+
+ tp->hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
+
+ switch (tp->hw_ver) {
+ case RTASE_HW_VER_906X_7XA:
+ case RTASE_HW_VER_906X_7XC:
+ case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
+ void __iomem **ioaddr_out)
+{
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int ret = -ENOMEM;
+
+ /* dev zeroed in alloc_etherdev */
+ dev = alloc_etherdev_mq(sizeof(struct rtase_private),
+ RTASE_FUNC_TXQ_NUM);
+ if (!dev)
+ goto err_out;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_out_free_dev;
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ ret = -ENODEV;
+ goto err_out_disable;
+ }
+
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, 2) < RTASE_REGS_SIZE) {
+ ret = -ENODEV;
+ goto err_out_disable;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret)
+ goto err_out_disable;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "no usable dma addressing method\n");
+ goto err_out_free_res;
+ }
+
+ pci_set_master(pdev);
+
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!ioaddr) {
+ ret = -EIO;
+ goto err_out_free_res;
+ }
+
+ *ioaddr_out = ioaddr;
+ *dev_out = dev;
+
+ return ret;
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out:
+ *ioaddr_out = NULL;
+ *dev_out = NULL;
+
+ return ret;
+}
+
+static void rtase_release_board(struct pci_dev *pdev, struct net_device *dev,
+ void __iomem *ioaddr)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_rar_set(tp, tp->dev->perm_addr);
+ iounmap(ioaddr);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
+ pci_disable_msix(pdev);
+ else
+ pci_disable_msi(pdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+}
+
+static int rtase_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct rtase_int_vector *ivec;
+ void __iomem *ioaddr = NULL;
+ struct rtase_private *tp;
+ int ret, i;
+
+ if (!pdev->is_physfn && pdev->is_virtfn) {
+ dev_err(&pdev->dev,
+ "This module does not support a virtual function.");
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
+
+ ret = rtase_init_board(pdev, &dev, &ioaddr);
+ if (ret)
+ return ret;
+
+ tp = netdev_priv(dev);
+ tp->mmio_addr = ioaddr;
+ tp->dev = dev;
+ tp->pdev = pdev;
+
+ /* identify chip attached to board */
+ ret = rtase_check_mac_version_valid(tp);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
+ tp->hw_ver);
+ goto err_out_release_board;
+ }
+
+ rtase_init_software_variable(pdev, tp);
+ rtase_init_hardware(tp);
+
+ ret = rtase_alloc_interrupt(pdev, tp);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
+ goto err_out_del_napi;
+ }
+
+ rtase_init_napi(tp);
+
+ rtase_init_netdev_ops(dev);
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_IP_CSUM | NETIF_F_HIGHDMA |
+ NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO6;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXALL | NETIF_F_RXFCS |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_set_tso_max_size(dev, RTASE_LSO_64K);
+ netif_set_tso_max_segs(dev, RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2);
+
+ rtase_get_mac_address(dev);
+
+ tp->tally_vaddr = dma_alloc_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ &tp->tally_paddr,
+ GFP_KERNEL);
+ if (!tp->tally_vaddr) {
+ ret = -ENOMEM;
+ goto err_out_free_dma;
+ }
+
+ rtase_tally_counter_clear(tp);
+
+ pci_set_drvdata(pdev, dev);
+
+ netif_carrier_off(dev);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto err_out_free_dma;
+
+ netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
+
+ return 0;
+
+err_out_free_dma:
+ if (tp->tally_vaddr) {
+ dma_free_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ tp->tally_vaddr,
+ tp->tally_paddr);
+
+ tp->tally_vaddr = NULL;
+ }
+
+err_out_del_napi:
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ netif_napi_del(&ivec->napi);
+ }
+
+err_out_release_board:
+ rtase_release_board(pdev, dev, ioaddr);
+
+ return ret;
+}
+
+static void rtase_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ u32 i;
+
+ unregister_netdev(dev);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ netif_napi_del(&ivec->napi);
+ }
+
+ rtase_reset_interrupt(pdev, tp);
+ if (tp->tally_vaddr) {
+ dma_free_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ tp->tally_vaddr,
+ tp->tally_paddr);
+ tp->tally_vaddr = NULL;
+ }
+
+ rtase_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void rtase_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ const struct rtase_private *tp;
+
+ tp = netdev_priv(dev);
+
+ if (netif_running(dev))
+ rtase_close(dev);
+
+ rtase_reset_interrupt(pdev, tp);
+}
+
+static int rtase_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ rtase_hw_reset(dev);
+ }
+
+ return 0;
+}
+
+static int rtase_resume(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct rtase_private *tp = netdev_priv(dev);
+ int ret;
+
+ /* restore last modified mac address */
+ rtase_rar_set(tp, dev->dev_addr);
+
+ if (!netif_running(dev))
+ goto out;
+
+ rtase_wait_for_quiescence(dev);
+
+ rtase_tx_clear(tp);
+ rtase_rx_clear(tp);
+
+ ret = rtase_init_ring(dev);
+ if (ret) {
+ netdev_err(dev, "unable to init ring\n");
+ rtase_free_desc(tp);
+ return -ENOMEM;
+ }
+
+ rtase_hw_config(dev);
+ /* always link, so start to transmit & receive */
+ rtase_hw_start(dev);
+
+ netif_device_attach(dev);
+out:
+
+ return 0;
+}
+
+static const struct dev_pm_ops rtase_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(rtase_suspend, rtase_resume)
+};
+
+static struct pci_driver rtase_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtase_pci_tbl,
+ .probe = rtase_init_one,
+ .remove = rtase_remove_one,
+ .shutdown = rtase_shutdown,
+ .driver.pm = pm_ptr(&rtase_pm_ops),
+};
+
+module_pci_driver(rtase_pci_driver);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index b03fae7a0f72..9b7559c88bee 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -33,6 +33,7 @@ config RAVB
select CRC32
select MII
select MDIO_BITBANG
+ select PAGE_POOL
select PHYLIB
select RESET_CONTROLLER
help
@@ -58,4 +59,14 @@ config RENESAS_GEN4_PTP
help
Renesas R-Car Gen4 gPTP device driver.
+config RTSN
+ tristate "Renesas Ethernet-TSN support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PTP_1588_CLOCK
+ select CRC32
+ select PHYLIB
+ select RENESAS_GEN4_PTP
+ help
+ Renesas Ethernet-TSN device driver.
+
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 9070acfd6aaf..d63e0c61bb68 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -8,6 +8,9 @@ obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
obj-$(CONFIG_RAVB) += ravb.o
+rswitch-objs := rswitch_main.o rswitch_l2.o
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
+
+obj-$(CONFIG_RTSN) += rtsn.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b48935ec7e28..5e56ec9b1013 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -19,6 +19,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/page_pool/types.h>
#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
@@ -34,16 +35,6 @@
/* Driver's parameters */
#define RAVB_ALIGN 128
-/* Hardware time stamp */
-#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */
-#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */
-
-#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */
-#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */
-#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002
-#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006
-#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */
-
enum ravb_reg {
/* AVB-DMAC registers */
CCC = 0x0000,
@@ -257,6 +248,7 @@ enum APSR_BIT {
APSR_CMSW = 0x00000010,
APSR_RDM = 0x00002000,
APSR_TDM = 0x00004000,
+ APSR_MIISELECT = 0x01000000, /* R-Car V4M only */
};
/* RCR */
@@ -996,6 +988,8 @@ enum CSR1_BIT {
CSR1_TDHD = 0x08000000,
};
+#define CSR1_CSUM_ENABLE (CSR1_TTCP4 | CSR1_TUDP4 | CSR1_TTCP6 | CSR1_TUDP6)
+
enum CSR2_BIT {
CSR2_RIP4 = 0x00000001,
CSR2_RTCP4 = 0x00000010,
@@ -1010,7 +1004,9 @@ enum CSR2_BIT {
CSR2_RDHD = 0x08000000,
};
-#define DBAT_ENTRY_NUM 22
+#define CSR2_CSUM_ENABLE (CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4 | \
+ CSR2_RTCP6 | CSR2_RUDP6 | CSR2_RICMP6)
+
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
@@ -1039,7 +1035,7 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- bool (*receive)(struct net_device *ndev, int *quota, int q);
+ int (*receive)(struct net_device *ndev, int budget, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
int (*dmac_init)(struct net_device *ndev);
@@ -1048,12 +1044,16 @@ struct ravb_hw_info {
size_t gstrings_size;
netdev_features_t net_hw_features;
netdev_features_t net_features;
+ netdev_features_t vlan_features;
int stats_len;
u32 tccr_mask;
+ u32 tx_max_frame_size;
u32 rx_max_frame_size;
- u32 rx_max_desc_use;
+ u32 rx_buffer_size;
u32 rx_desc_size;
+ u32 dbat_entry_num;
unsigned aligned_tx: 1;
+ unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */
/* hardware features */
unsigned internal_delay:1; /* AVB-DMAC has internal delays */
@@ -1070,6 +1070,11 @@ struct ravb_hw_info {
unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
+struct ravb_rx_buffer {
+ struct page *page;
+ unsigned int offset;
+};
+
struct ravb_private {
struct net_device *ndev;
struct platform_device *pdev;
@@ -1093,13 +1098,14 @@ struct ravb_private {
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
- struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct page_pool *rx_pool[NUM_RX_QUEUE];
+ struct ravb_rx_buffer *rx_buffers[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
u32 rx_fifo_errors;
struct net_device_stats stats[NUM_RX_QUEUE];
- u32 tstamp_tx_ctrl;
- u32 tstamp_rx_ctrl;
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
struct list_head ts_skb_list;
u32 ts_skb_tag;
struct ravb_ptp ptp;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fcb756d77681..57b0db314fb5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -30,6 +30,7 @@
#include <linux/reset.h>
#include <linux/math64.h>
#include <net/ip.h>
+#include <net/page_pool/helpers.h>
#include "ravb.h"
@@ -113,25 +114,6 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static struct sk_buff *
-ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
- gfp_t gfp_mask)
-{
- struct sk_buff *skb;
- u32 reserve;
-
- skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
- gfp_mask);
- if (!skb)
- return NULL;
-
- reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
- if (reserve)
- skb_reserve(skb, RAVB_ALIGN - reserve);
-
- return skb;
-}
-
/* Get MAC address from the MAC address registers
*
* Ethernet AVB device doesn't have ROM for MAC address.
@@ -257,21 +239,10 @@ static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- unsigned int i;
if (!priv->rx_ring[q].raw)
return;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- }
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
@@ -298,13 +269,16 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_ring[q] = NULL;
}
- /* Free RX skb ringbuffer */
- if (priv->rx_skb[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++)
- dev_kfree_skb(priv->rx_skb[q][i]);
+ /* Free RX buffers */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ if (priv->rx_buffers[q][i].page)
+ page_pool_put_page(priv->rx_pool[q],
+ priv->rx_buffers[q][i].page,
+ 0, true);
}
- kfree(priv->rx_skb[q]);
- priv->rx_skb[q] = NULL;
+ kfree(priv->rx_buffers[q]);
+ priv->rx_buffers[q] = NULL;
+ page_pool_destroy(priv->rx_pool[q]);
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
@@ -317,35 +291,64 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format(struct net_device *ndev, int q)
+static int
+ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask,
+ struct ravb_rx_desc *rx_desc)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_rx_desc *rx_desc;
- unsigned int rx_ring_size;
+ const struct ravb_hw_info *info = priv->info;
+ struct ravb_rx_buffer *rx_buff;
dma_addr_t dma_addr;
- unsigned int i;
+ unsigned int size;
- rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
- memset(priv->rx_ring[q].raw, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = ravb_rx_get_desc(priv, q, i);
- rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
+ rx_buff = &priv->rx_buffers[q][entry];
+ size = info->rx_buffer_size;
+ rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
+ &size, gfp_mask);
+ if (unlikely(!rx_buff->page)) {
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
*/
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->ds_cc = cpu_to_le16(0);
+ return -ENOMEM;
+ }
+
+ dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_device(ndev->dev.parent, dma_addr,
+ info->rx_buffer_size, DMA_FROM_DEVICE);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+
+ /* The end of the RX buffer is used to store skb shared data, so we need
+ * to ensure that the hardware leaves enough space for this.
+ */
+ rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -
+ ETH_FCS_LEN + sizeof(__sum16));
+ return 0;
+}
+
+static u32
+ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_rx_desc *rx_desc;
+ u32 i, entry;
+
+ for (i = 0; i < count; i++) {
+ entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q];
+ rx_desc = ravb_rx_get_desc(priv, q, entry);
+
+ if (!priv->rx_buffers[q][entry].page) {
+ if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry,
+ gfp_mask, rx_desc)))
+ break;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = ravb_rx_get_desc(priv, q, i);
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
+
+ return i;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -353,6 +356,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct ravb_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
@@ -364,7 +368,13 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- ravb_rx_ring_format(ndev, q);
+ /* Regular RX descriptors have already been initialized by
+ * ravb_rx_ring_refill(), we just need to initialize the final link
+ * descriptor.
+ */
+ rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]);
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -408,26 +418,47 @@ static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct page_pool_params params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP,
+ .pool_size = priv->num_rx_ring[q],
+ .nid = NUMA_NO_NODE,
+ .dev = ndev->dev.parent,
+ .dma_dir = DMA_FROM_DEVICE,
+ };
unsigned int ring_size;
- struct sk_buff *skb;
- unsigned int i;
+ u32 num_filled;
+
+ /* Allocate RX page pool and buffers */
+ priv->rx_pool[q] = page_pool_create(&params);
+ if (IS_ERR(priv->rx_pool[q]))
+ goto error;
+
+ /* Allocate RX buffers */
+ priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_buffers[q]), GFP_KERNEL);
+ if (!priv->rx_buffers[q])
+ goto error;
- /* Allocate RX and TX skb rings */
- priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
- sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ /* Allocate TX skb rings */
priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
sizeof(*priv->tx_skb[q]), GFP_KERNEL);
- if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ if (!priv->tx_skb[q])
goto error;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
- if (!skb)
- goto error;
- priv->rx_skb[q][i] = skb;
- }
+ /* Allocate all RX descriptors. */
+ if (!ravb_alloc_rx_desc(ndev, q))
+ goto error;
+
+ /* Populate RX ring buffer. */
+ priv->dirty_rx[q] = 0;
+ ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, ring_size);
+ num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q],
+ GFP_KERNEL);
+ if (num_filled != priv->num_rx_ring[q])
+ goto error;
if (num_tx_desc > 1) {
/* Allocate rings for the aligned buffers */
@@ -437,12 +468,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
}
- /* Allocate all RX descriptors. */
- if (!ravb_alloc_rx_desc(ndev, q))
- goto error;
-
- priv->dirty_rx[q] = 0;
-
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * num_tx_desc + 1);
@@ -479,11 +504,10 @@ static void ravb_csum_init_gbeth(struct net_device *ndev)
ndev->features &= ~NETIF_F_RXCSUM;
} else {
if (tx_enable)
- ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1);
+ ravb_write(ndev, CSR1_CSUM_ENABLE, CSR1);
if (rx_enable)
- ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4,
- CSR2);
+ ravb_write(ndev, CSR2_CSUM_ENABLE, CSR2);
}
done:
@@ -530,8 +554,16 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
static void ravb_emac_init_rcar(struct net_device *ndev)
{
- /* Receive frame limit set register */
- ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Set receive frame length
+ *
+ * The length set here describes the frame from the destination address
+ * up to and including the CRC data. However only the frame data,
+ * excluding the CRC, are transferred to memory. To allow for the
+ * largest frames add the CRC length to the maximum Rx descriptor size.
+ */
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
ravb_write(ndev, ECMR_ZPF | ECMR_DM |
@@ -554,6 +586,16 @@ static void ravb_emac_init_rcar(struct net_device *ndev)
ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
}
+static void ravb_emac_init_rcar_gen4(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII;
+
+ ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0);
+
+ ravb_emac_init_rcar(ndev);
+}
+
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
@@ -706,25 +748,35 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
static void ravb_rx_csum_gbeth(struct sk_buff *skb)
{
- __wsum csum_ip_hdr, csum_proto;
- u8 *hw_csum;
-
- /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
- * bytes appended to packet data. First 2 bytes is ip header checksum
- * and last 2 bytes is protocol checksum.
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ size_t csum_len;
+ u16 *hw_csum;
+
+ /* The hardware checksum status is contained in 4 bytes appended to
+ * packet data.
+ *
+ * For ipv4, the first 2 bytes are the ip header checksum status. We can
+ * ignore this as it will always be re-checked in inet_gro_receive().
+ *
+ * The last 2 bytes are the protocol checksum status which will be zero
+ * if the checksum has been validated.
*/
- if (unlikely(skb->len < sizeof(__sum16) * 2))
+ csum_len = sizeof(*hw_csum) * 2;
+ if (unlikely(skb->len < csum_len))
return;
- hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
- csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *last_frag = &shinfo->frags[shinfo->nr_frags - 1];
- hw_csum -= sizeof(__sum16);
- csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
- skb_trim(skb, skb->len - 2 * sizeof(__sum16));
+ hw_csum = (u16 *)(skb_frag_address(last_frag) +
+ skb_frag_size(last_frag));
+ skb_frag_size_sub(last_frag, csum_len);
+ } else {
+ hw_csum = (u16 *)skb_tail_pointer(skb);
+ skb_trim(skb, skb->len - csum_len);
+ }
- /* TODO: IPV6 Rx checksum */
- if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
+ if (!get_unaligned(--hw_csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -743,30 +795,13 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
-static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
- struct ravb_rx_desc *desc)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct sk_buff *skb;
-
- skb = priv->rx_skb[RAVB_BE][entry];
- priv->rx_skb[RAVB_BE][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(priv->info->rx_max_frame_size, 16),
- DMA_FROM_DEVICE);
-
- return skb;
-}
-
/* Packet receive function for Gigabit Ethernet */
-static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
- struct sk_buff *skb;
- dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
u16 desc_len;
@@ -779,9 +814,11 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
stats = &priv->stats[q];
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ struct sk_buff *skb = NULL;
+
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
- if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
@@ -807,87 +844,134 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
die_dt = desc->die_dt & 0xF0;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ desc_len, DMA_FROM_DEVICE);
+
switch (die_dt) {
case DT_FSINGLE:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, desc_len);
- skb->protocol = eth_type_trans(skb, ndev);
- if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(skb);
- napi_gro_receive(&priv->napi[q], skb);
- rx_packets++;
- stats->rx_bytes += desc_len;
- break;
case DT_FSTART:
- priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, desc_len);
+ /* Start of packet: Set initial data length. */
+ skb = napi_build_skb(rx_addr,
+ info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+ goto refill;
+ }
+ skb_mark_for_recycle(skb);
+ skb_put(skb, desc_len);
+
+ /* Save this skb if the packet spans multiple
+ * descriptors.
+ */
+ if (die_dt == DT_FSTART)
+ priv->rx_1st_skb = skb;
break;
+
case DT_FMID:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- desc_len);
- skb_put(priv->rx_1st_skb, desc_len);
- dev_kfree_skb(skb);
- break;
case DT_FEND:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- desc_len);
- skb_put(priv->rx_1st_skb, desc_len);
- dev_kfree_skb(skb);
- priv->rx_1st_skb->protocol =
- eth_type_trans(priv->rx_1st_skb, ndev);
+ /* Continuing a packet: Add this buffer as an RX
+ * frag.
+ */
+
+ /* rx_1st_skb will be NULL if napi_build_skb()
+ * failed for the first descriptor of a
+ * multi-descriptor packet.
+ */
+ if (unlikely(!priv->rx_1st_skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+
+ /* We may find a DT_FSINGLE or DT_FSTART
+ * descriptor in the queue which we can
+ * process, so don't give up yet.
+ */
+ continue;
+ }
+ skb_add_rx_frag(priv->rx_1st_skb,
+ skb_shinfo(priv->rx_1st_skb)->nr_frags,
+ rx_buff->page, rx_buff->offset,
+ desc_len, info->rx_buffer_size);
+
+ /* Set skb to point at the whole packet so that
+ * we only need one code path for finishing a
+ * packet.
+ */
+ skb = priv->rx_1st_skb;
+ }
+
+ switch (die_dt) {
+ case DT_FSINGLE:
+ case DT_FEND:
+ /* Finishing a packet: Determine protocol &
+ * checksum, hand off to NAPI and update our
+ * stats.
+ */
+ skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(priv->rx_1st_skb);
- stats->rx_bytes += priv->rx_1st_skb->len;
- napi_gro_receive(&priv->napi[q],
- priv->rx_1st_skb);
+ ravb_rx_csum_gbeth(skb);
+ stats->rx_bytes += skb->len;
+ napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- break;
+
+ /* Clear rx_1st_skb so that it will only be
+ * non-NULL when valid.
+ */
+ priv->rx_1st_skb = NULL;
}
+
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
}
+refill:
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].desc[entry];
- desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
-
- if (!priv->rx_skb[q][entry]) {
- skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
- if (!skb)
- break;
- dma_addr = dma_map_single(ndev->dev.parent,
- skb->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
+}
+
+static void ravb_rx_rcar_hwstamp(struct ravb_private *priv, int q,
+ struct ravb_ex_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct timespec64 ts;
+ bool get_ts;
+
+ if (q == RAVB_NC)
+ get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
+ else
+ get_ts = priv->tstamp_rx_ctrl == HWTSTAMP_FILTER_ALL;
+
+ if (!get_ts)
+ return;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ ts.tv_sec = ((u64)le16_to_cpu(desc->ts_sh) << 32)
+ | le32_to_cpu(desc->ts_sl);
+ ts.tv_nsec = le32_to_cpu(desc->ts_n);
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -895,8 +979,6 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
struct ravb_ex_rx_desc *desc;
unsigned int limit, i;
struct sk_buff *skb;
- dma_addr_t dma_addr;
- struct timespec64 ts;
int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
@@ -906,7 +988,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
- if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
@@ -933,26 +1015,25 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
- u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
-
- skb = priv->rx_skb[q][entry];
- priv->rx_skb[q][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- get_ts &= (q == RAVB_NC) ?
- RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
- ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
- if (get_ts) {
- struct skb_shared_hwtstamps *shhwtstamps;
-
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
- ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
- 32) | le32_to_cpu(desc->ts_sl);
- ts.tv_nsec = le32_to_cpu(desc->ts_n);
- shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ pkt_len, DMA_FROM_DEVICE);
+
+ skb = napi_build_skb(rx_addr, info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0, true);
+ break;
}
+ skb_mark_for_recycle(skb);
+
+ ravb_rx_rcar_hwstamp(priv, q, desc, skb);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
@@ -961,48 +1042,28 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
stats->rx_bytes += pkt_len;
+
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
}
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].ex_desc[entry];
- desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
-
- if (!priv->rx_skb[q][entry]) {
- skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
- if (!skb)
- break; /* Better luck next round. */
- dma_addr = dma_map_single(ndev->dev.parent, skb->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+static int ravb_rx(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- return info->receive(ndev, quota, q);
+ return info->receive(ndev, budget, q);
}
static void ravb_rcv_snd_disable(struct net_device *ndev)
@@ -1319,13 +1380,12 @@ static int ravb_poll(struct napi_struct *napi, int budget)
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
- int quota = budget;
- bool unmask;
+ int work_done;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- unmask = !ravb_rx(ndev, &quota, q);
+ work_done = ravb_rx(ndev, budget, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1344,24 +1404,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- if (!unmask)
- goto out;
-
- napi_complete(napi);
-
- /* Re-enable RX/TX interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- if (!info->irq_en_dis) {
- ravb_modify(ndev, RIC0, mask, mask);
- ravb_modify(ndev, TIC, mask, mask);
- } else {
- ravb_write(ndev, mask, RIE0);
- ravb_write(ndev, mask, TIE);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!info->irq_en_dis) {
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
+ } else {
+ ravb_write(ndev, mask, RIE0);
+ ravb_write(ndev, mask, TIE);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
}
- spin_unlock_irqrestore(&priv->lock, flags);
-out:
- return budget - quota;
+ return work_done;
}
static void ravb_set_duplex_gbeth(struct net_device *ndev)
@@ -1696,25 +1752,24 @@ static int ravb_set_ringparam(struct net_device *ndev,
}
static int ravb_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *hw_info = priv->info;
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_ALL);
- if (hw_info->gptp || hw_info->ccc_gac)
+ if (hw_info->gptp || hw_info->ccc_gac) {
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_ALL);
info->phc_index = ptp_clock_index(priv->ptp.clock);
+ }
return 0;
}
@@ -1931,7 +1986,6 @@ out_ptp_stop:
out_set_reset:
ravb_set_opmode(ndev, CCC_OPC_RESET);
out_rpm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out_napi_off:
if (info->nc_queues)
@@ -2019,32 +2073,44 @@ out_unlock:
static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
{
- struct iphdr *ip = ip_hdr(skb);
+ u16 net_protocol = ntohs(skb->protocol);
+ u8 inner_protocol;
- /* TODO: Need to add support for VLAN tag 802.1Q */
- if (skb_vlan_tag_present(skb))
- return false;
+ /* GbEth IP can calculate the checksum if:
+ * - there are zero or one VLAN headers with TPID=0x8100
+ * - the network protocol is IPv4 or IPv6
+ * - the transport protocol is TCP, UDP or ICMP
+ * - the packet is not fragmented
+ */
- /* TODO: Need to add hardware checksum for IPv6 */
- if (skb->protocol != htons(ETH_P_IP))
- return false;
+ if (net_protocol == ETH_P_8021Q) {
+ struct vlan_hdr vhdr, *vh;
- switch (ip->protocol) {
- case IPPROTO_TCP:
- break;
- case IPPROTO_UDP:
- /* If the checksum value in the UDP header field is 0, TOE does
- * not calculate checksum for UDP part of this frame as it is
- * optional function as per standards.
- */
- if (udp_hdr(skb)->check == 0)
+ vh = skb_header_pointer(skb, ETH_HLEN, sizeof(vhdr), &vhdr);
+ if (!vh)
return false;
+
+ net_protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ }
+
+ switch (net_protocol) {
+ case ETH_P_IP:
+ inner_protocol = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ inner_protocol = ipv6_hdr(skb)->nexthdr;
break;
default:
return false;
}
- return true;
+ switch (inner_protocol) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ return true;
+ default:
+ return false;
+ }
}
/* Packet transmit function for Ethernet AVB */
@@ -2155,15 +2221,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
}
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
+
if (num_tx_desc > 1) {
desc->die_dt = DT_FEND;
desc--;
+ /* When using multi-descriptors, DT_FEND needs to get written
+ * before DT_FSTART, but the compiler may reorder the memory
+ * writes in an attempt to optimize the code.
+ * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART
+ * are written exactly in the order shown in the code.
+ * This is particularly important for cases where the DMA engine
+ * is already running when we are running this code. If the DMA
+ * sees DT_FSTART without the corresponding DT_FEND it will enter
+ * an error condition.
+ */
+ dma_wmb();
desc->die_dt = DT_FSTART;
} else {
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
desc->die_dt = DT_FSINGLE;
}
+
+ /* Before ringing the doorbell we need to make sure that the latest
+ * writes have been committed to memory, otherwise it could delay
+ * things until the doorbell is rang again.
+ * This is in replacement of the read operation mentioned in the HW
+ * manuals.
+ */
+ dma_wmb();
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += num_tx_desc;
@@ -2328,102 +2414,62 @@ static int ravb_close(struct net_device *ndev)
if (error)
return error;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
-static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
+static int ravb_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- config.flags = 0;
- config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
- HWTSTAMP_TX_OFF;
- switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
- case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- break;
- case RAVB_RXTSTAMP_TYPE_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- }
+ config->flags = 0;
+ config->tx_type = priv->tstamp_tx_ctrl;
+ config->rx_filter = priv->tstamp_rx_ctrl;
- return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Control hardware time stamping */
-static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
+static int ravb_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
- u32 tstamp_tx_ctrl;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
- if (copy_from_user(&config, req->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- tstamp_tx_ctrl = 0;
- break;
case HWTSTAMP_TX_ON:
- tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
+ tstamp_tx_ctrl = config->tx_type;
break;
default:
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- tstamp_rx_ctrl = 0;
- break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ tstamp_rx_ctrl = config->rx_filter;
break;
default:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
}
priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
- return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-/* ioctl to device function */
-static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
-{
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return ravb_hwtstamp_get(ndev, req);
- case SIOCSHWTSTAMP:
- return ravb_hwtstamp_set(ndev, req);
- }
-
- return phy_mii_ioctl(phydev, req, cmd);
+ return 0;
}
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ravb_private *priv = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
@@ -2482,7 +2528,7 @@ static int ravb_set_features_gbeth(struct net_device *ndev,
spin_lock_irqsave(&priv->lock, flags);
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
- val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4;
+ val = CSR2_CSUM_ENABLE;
else
val = 0;
@@ -2493,7 +2539,7 @@ static int ravb_set_features_gbeth(struct net_device *ndev,
if (changed & NETIF_F_HW_CSUM) {
if (features & NETIF_F_HW_CSUM)
- val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4;
+ val = CSR1_CSUM_ENABLE;
else
val = 0;
@@ -2552,11 +2598,13 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_get_stats = ravb_get_stats,
.ndo_set_rx_mode = ravb_set_rx_mode,
.ndo_tx_timeout = ravb_tx_timeout,
- .ndo_eth_ioctl = ravb_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = ravb_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = ravb_set_features,
+ .ndo_hwtstamp_get = ravb_hwtstamp_get,
+ .ndo_hwtstamp_set = ravb_hwtstamp_set,
};
/* MDIO bus init function */
@@ -2564,6 +2612,7 @@ static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
struct phy_device *phydev;
struct device_node *pn;
int error;
@@ -2583,7 +2632,13 @@ static int ravb_mdio_init(struct ravb_private *priv)
pdev->name, pdev->id);
/* Register MDIO bus */
- error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ /* backwards compatibility for DT lacking mdio subnode */
+ mdio_node = of_node_get(dev->of_node);
+ }
+ error = of_mdiobus_register(priv->mii_bus, mdio_node);
+ of_node_put(mdio_node);
if (error)
goto out_free_bus;
@@ -2614,6 +2669,30 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct ravb_hw_info ravb_gen2_hw_info = {
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .dbat_entry_num = 22,
+ .aligned_tx = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
static const struct ravb_hw_info ravb_gen3_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
@@ -2626,9 +2705,12 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .dbat_entry_num = 22,
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
@@ -2638,23 +2720,29 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.magic_pkt = 1,
};
-static const struct ravb_hw_info ravb_gen2_hw_info = {
+static const struct ravb_hw_info ravb_gen4_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
.dmac_init = ravb_dmac_init_rcar,
- .emac_init = ravb_emac_init_rcar,
+ .emac_init = ravb_emac_init_rcar_gen4,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
- .aligned_tx = 1,
- .gptp = 1,
+ .dbat_entry_num = 22,
+ .internal_delay = 1,
+ .tx_counters = 1,
+ .multi_irqs = 1,
+ .irq_en_dis = 1,
+ .ccc_gac = 1,
.nc_queues = 1,
.magic_pkt = 1,
};
@@ -2671,9 +2759,12 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .dbat_entry_num = 22,
.multi_irqs = 1,
.err_mgmt_irqs = 1,
.gptp = 1,
@@ -2692,12 +2783,16 @@ static const struct ravb_hw_info gbeth_hw_info = {
.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
.net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
+ .vlan_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
+ .tx_max_frame_size = 1522,
.rx_max_frame_size = SZ_8K,
- .rx_max_desc_use = 4080,
+ .rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
+ .dbat_entry_num = 2,
.aligned_tx = 1,
+ .coalesce_irqs = 1,
.tx_counters = 1,
.carrier_counters = 1,
.half_duplex = 1,
@@ -2709,7 +2804,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
- { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
@@ -2722,19 +2817,18 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
struct platform_device *pdev = priv->pdev;
struct net_device *ndev = priv->ndev;
struct device *dev = &pdev->dev;
- const char *dev_name;
+ const char *devname = dev_name(dev);
unsigned long flags;
int error, irq_num;
if (irq_name) {
- dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!dev_name)
+ devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
+ if (!devname)
return -ENOMEM;
irq_num = platform_get_irq_byname(pdev, irq_name);
flags = 0;
} else {
- dev_name = ndev->name;
irq_num = platform_get_irq(pdev, 0);
flags = IRQF_SHARED;
}
@@ -2744,9 +2838,9 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
if (irq)
*irq = irq_num;
- error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+ netdev_err(ndev, "cannot request IRQ %s\n", devname);
return error;
}
@@ -2824,15 +2918,17 @@ static int ravb_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get cpg reset\n");
+ info = of_device_get_match_data(&pdev->dev);
+
ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
- NUM_TX_QUEUE, NUM_RX_QUEUE);
+ info->nc_queues ? NUM_TX_QUEUE : 1,
+ info->nc_queues ? NUM_RX_QUEUE : 1);
if (!ndev)
return -ENOMEM;
- info = of_device_get_match_data(&pdev->dev);
-
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
+ ndev->vlan_features = info->vlan_features;
error = reset_control_deassert(rstc);
if (error)
@@ -2905,7 +3001,7 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- ndev->max_mtu = info->rx_max_frame_size -
+ ndev->max_mtu = info->tx_max_frame_size -
(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
@@ -2927,7 +3023,7 @@ static int ravb_probe(struct platform_device *pdev)
ravb_parse_delay_mode(np, ndev);
/* Allocate descriptor base address table */
- priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
+ priv->desc_bat_size = sizeof(struct ravb_desc) * info->dbat_entry_num;
priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
&priv->desc_bat_dma, GFP_KERNEL);
if (!priv->desc_bat) {
@@ -2937,7 +3033,7 @@ static int ravb_probe(struct platform_device *pdev)
error = -ENOMEM;
goto out_rpm_put;
}
- for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
+ for (q = RAVB_BE; q < info->dbat_entry_num; q++)
priv->desc_bat[q].die_dt = DT_EOS;
/* Initialise HW timestamp list */
@@ -2975,6 +3071,12 @@ static int ravb_probe(struct platform_device *pdev)
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
+ if (info->coalesce_irqs) {
+ netdev_sw_irq_coalesce_default_on(ndev);
+ if (num_present_cpus() == 1)
+ netif_threaded_enable(ndev);
+ }
+
/* Network device register */
error = register_netdev(ndev);
if (error)
@@ -2986,7 +3088,6 @@ static int ravb_probe(struct platform_device *pdev)
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -3114,10 +3215,15 @@ static int ravb_suspend(struct device *dev)
netif_device_detach(ndev);
- if (priv->wol_enabled)
- return ravb_wol_setup(ndev);
+ rtnl_lock();
+ if (priv->wol_enabled) {
+ ret = ravb_wol_setup(ndev);
+ rtnl_unlock();
+ return ret;
+ }
ret = ravb_close(ndev);
+ rtnl_unlock();
if (ret)
return ret;
@@ -3142,19 +3248,20 @@ static int ravb_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
/* If WoL is enabled restore the interface. */
- if (priv->wol_enabled) {
+ if (priv->wol_enabled)
ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- } else {
+ else
ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
+ if (ret) {
+ rtnl_unlock();
+ return ret;
}
/* Reopening the interface will restore the device to the working state. */
ret = ravb_open(ndev);
+ rtnl_unlock();
if (ret < 0)
goto out_rpm_put;
@@ -3164,10 +3271,8 @@ static int ravb_resume(struct device *dev)
return 0;
out_rpm_put:
- if (!priv->wol_enabled) {
- pm_runtime_mark_last_busy(dev);
+ if (!priv->wol_enabled)
pm_runtime_put_autosuspend(dev);
- }
return ret;
}
@@ -3197,7 +3302,7 @@ static const struct dev_pm_ops ravb_dev_pm_ops = {
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
- .remove_new = ravb_remove,
+ .remove = ravb_remove,
.driver = {
.name = "ravb",
.pm = pm_ptr(&ravb_dev_pm_ops),
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 6e4ef7af27bf..226c6c0ab945 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -176,13 +176,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
- /* Reject requests with unsupported flags */
- if (req->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -213,10 +206,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
- /* Reject requests with unsupported flags */
- if (req->flags)
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -288,6 +277,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
.n_per_out = N_PER_OUT,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE,
.adjfine = ravb_ptp_adjfine,
.adjtime = ravb_ptp_adjtime,
.gettime64 = ravb_ptp_gettime64,
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 72e7fcc56693..d0979abd36de 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -12,25 +12,24 @@
#include <linux/slab.h>
#include "rcar_gen4_ptp.h"
-#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
-static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
- .enable = PTPTMEC,
- .disable = PTPTMDC,
- .increment = PTPTIVC0,
- .config_t0 = PTPTOVC00,
- .config_t1 = PTPTOVC10,
- .config_t2 = PTPTOVC20,
- .monitor_t0 = PTPGPTPTM00,
- .monitor_t1 = PTPGPTPTM10,
- .monitor_t2 = PTPGPTPTM20,
-};
+#define PTPTMEC_REG 0x0010
+#define PTPTMDC_REG 0x0014
+#define PTPTIVC0_REG 0x0020
+#define PTPTOVC00_REG 0x0030
+#define PTPTOVC10_REG 0x0034
+#define PTPTOVC20_REG 0x0038
+#define PTPGPTPTM00_REG 0x0050
+#define PTPGPTPTM10_REG 0x0054
+#define PTPGPTPTM20_REG 0x0058
+
+#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- bool neg_adj = scaled_ppm < 0 ? true : false;
s64 addend = ptp_priv->default_addend;
+ bool neg_adj = scaled_ppm < 0;
s64 diff;
if (neg_adj)
@@ -38,20 +37,21 @@ static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
diff = div_s64(addend * scaled_ppm_to_ppb(scaled_ppm), NSEC_PER_SEC);
addend = neg_adj ? addend - diff : addend + diff;
- iowrite32(addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(addend, ptp_priv->addr + PTPTIVC0_REG);
return 0;
}
-/* Caller must hold the lock */
static void _rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- ts->tv_nsec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t0);
- ts->tv_sec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t1) |
- ((s64)ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t2) << 32);
+ lockdep_assert_held(&ptp_priv->lock);
+
+ ts->tv_nsec = ioread32(ptp_priv->addr + PTPGPTPTM00_REG);
+ ts->tv_sec = ioread32(ptp_priv->addr + PTPGPTPTM10_REG) |
+ ((s64)ioread32(ptp_priv->addr + PTPGPTPTM20_REG) << 32);
}
static int rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
@@ -73,14 +73,14 @@ static void _rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t0);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->enable);
- iowrite32(ts->tv_sec >> 32, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(ts->tv_sec, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(ts->tv_nsec, ptp_priv->addr + ptp_priv->offs->config_t0);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC00_REG);
+ iowrite32(1, ptp_priv->addr + PTPTMEC_REG);
+ iowrite32(ts->tv_sec >> 32, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(ts->tv_sec, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(ts->tv_nsec, ptp_priv->addr + PTPTOVC00_REG);
}
static int rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
@@ -130,17 +130,6 @@ static struct ptp_clock_info rcar_gen4_ptp_info = {
.enable = rcar_gen4_ptp_enable,
};
-static int rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout)
-{
- if (layout != RCAR_GEN4_PTP_REG_LAYOUT)
- return -EINVAL;
-
- ptp_priv->offs = &gen4_offs;
-
- return 0;
-}
-
static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
{
/* Timer increment in ns.
@@ -151,27 +140,20 @@ static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
return div_s64(1000000000LL << 27, rate);
}
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate)
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate)
{
- int ret;
-
if (ptp_priv->initialized)
return 0;
spin_lock_init(&ptp_priv->lock);
- ret = rcar_gen4_ptp_set_offs(ptp_priv, layout);
- if (ret)
- return ret;
-
ptp_priv->default_addend = rcar_gen4_ptp_rate_to_increment(rate);
- iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(ptp_priv->default_addend, ptp_priv->addr + PTPTIVC0_REG);
ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL);
if (IS_ERR(ptp_priv->clock))
return PTR_ERR(ptp_priv->clock);
- iowrite32(0x01, ptp_priv->addr + ptp_priv->offs->enable);
+ iowrite32(0x01, ptp_priv->addr + PTPTMEC_REG);
ptp_priv->initialized = true;
return 0;
@@ -180,7 +162,7 @@ EXPORT_SYMBOL_GPL(rcar_gen4_ptp_register);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
{
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
return ptp_clock_unregister(ptp_priv->clock);
}
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index e22da5acd53d..9a9c232c854e 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -9,60 +9,16 @@
#include <linux/ptp_clock_kernel.h>
-#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000
-
-enum rcar_gen4_ptp_reg_layout {
- RCAR_GEN4_PTP_REG_LAYOUT
-};
-
-/* driver's definitions */
-#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0)
-#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1)
-#define RCAR_GEN4_RXTSTAMP_TYPE_ALL (RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT | BIT(2))
-#define RCAR_GEN4_RXTSTAMP_TYPE RCAR_GEN4_RXTSTAMP_TYPE_ALL
-
-#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0)
-
-#define PTPRO 0
-
-enum rcar_gen4_ptp_reg {
- PTPTMEC = PTPRO + 0x0010,
- PTPTMDC = PTPRO + 0x0014,
- PTPTIVC0 = PTPRO + 0x0020,
- PTPTOVC00 = PTPRO + 0x0030,
- PTPTOVC10 = PTPRO + 0x0034,
- PTPTOVC20 = PTPRO + 0x0038,
- PTPGPTPTM00 = PTPRO + 0x0050,
- PTPGPTPTM10 = PTPRO + 0x0054,
- PTPGPTPTM20 = PTPRO + 0x0058,
-};
-
-struct rcar_gen4_ptp_reg_offset {
- u16 enable;
- u16 disable;
- u16 increment;
- u16 config_t0;
- u16 config_t1;
- u16 config_t2;
- u16 monitor_t0;
- u16 monitor_t1;
- u16 monitor_t2;
-};
-
struct rcar_gen4_ptp_private {
void __iomem *addr;
struct ptp_clock *clock;
struct ptp_clock_info info;
- const struct rcar_gen4_ptp_reg_offset *offs;
spinlock_t lock; /* For multiple registers access */
- u32 tstamp_tx_ctrl;
- u32 tstamp_rx_ctrl;
s64 default_addend;
bool initialized;
};
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate);
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 72e3ff596d31..aa605304fed0 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -1,18 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#ifndef __RSWITCH_H__
#define __RSWITCH_H__
#include <linux/platform_device.h>
+#include <linux/phy.h>
+
#include "rcar_gen4_ptp.h"
#define RSWITCH_MAX_NUM_QUEUES 128
+#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
+
+#define rswitch_for_all_ports(_priv, _rdev) \
+ list_for_each_entry(_rdev, &_priv->port_list, list)
+
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
if (priv->rdev[i]->disabled) \
@@ -724,35 +731,28 @@ enum rswitch_etha_mode {
#define EAVCC_VEM_SC_TAG (0x3 << 16)
-#define MPIC_PIS_MII 0x00
-#define MPIC_PIS_GMII 0x02
-#define MPIC_PIS_XGMII 0x04
-#define MPIC_LSC_SHIFT 3
-#define MPIC_LSC_100M (1 << MPIC_LSC_SHIFT)
-#define MPIC_LSC_1G (2 << MPIC_LSC_SHIFT)
-#define MPIC_LSC_2_5G (3 << MPIC_LSC_SHIFT)
-
-#define MDIO_READ_C45 0x03
-#define MDIO_WRITE_C45 0x01
+#define MPIC_PIS GENMASK(2, 0)
+#define MPIC_PIS_GMII 2
+#define MPIC_PIS_XGMII 4
+#define MPIC_LSC GENMASK(5, 3)
+#define MPIC_LSC_100M 1
+#define MPIC_LSC_1G 2
+#define MPIC_LSC_2_5G 3
+#define MPIC_PSMCS GENMASK(22, 16)
+#define MPIC_PSMHT GENMASK(26, 24)
#define MPSM_PSME BIT(0)
-#define MPSM_MFF_C45 BIT(2)
-#define MPSM_PRD_SHIFT 16
-#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT)
-
-/* Completion flags */
-#define MMIS1_PAACS BIT(2) /* Address */
-#define MMIS1_PWACS BIT(1) /* Write */
-#define MMIS1_PRACS BIT(0) /* Read */
-#define MMIS1_CLEAR_FLAGS 0xf
-
-#define MPIC_PSMCS_SHIFT 16
-#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT)
-#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT)
-
-#define MPIC_PSMHT_SHIFT 24
-#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
-#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT)
+#define MPSM_MFF BIT(2)
+#define MPSM_MMF_C22 0
+#define MPSM_MMF_C45 1
+#define MPSM_PDA GENMASK(7, 3)
+#define MPSM_PRA GENMASK(12, 8)
+#define MPSM_POP GENMASK(14, 13)
+#define MPSM_POP_ADDRESS 0
+#define MPSM_POP_WRITE 1
+#define MPSM_POP_READ_C22 2
+#define MPSM_POP_READ_C45 3
+#define MPSM_PRD GENMASK(31, 16)
#define MLVC_PLV BIT(16)
@@ -806,6 +806,7 @@ enum rswitch_gwca_mode {
#define CABPPFLC_INIT_VALUE 0x00800080
/* MFWD */
+#define FWPC0(i) (FWPC00 + (i) * 0x10)
#define FWPC0_LTHTA BIT(0)
#define FWPC0_IP4UE BIT(3)
#define FWPC0_IP4TE BIT(4)
@@ -814,23 +815,42 @@ enum rswitch_gwca_mode {
#define FWPC0_IP4EA BIT(10)
#define FWPC0_IPDSA BIT(12)
#define FWPC0_IPHLA BIT(18)
-#define FWPC0_MACSDA BIT(20)
+#define FWPC0_MACDSA BIT(20)
+#define FWPC0_MACSSA BIT(23)
#define FWPC0_MACHLA BIT(26)
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
-#define FWPC0(i) (FWPC00 + (i) * 0x10)
-#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \
- FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \
- FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \
- FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA)
#define FWPC1(i) (FWPC10 + (i) * 0x10)
+#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPC1_DDE BIT(0)
-#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPC2(i) (FWPC20 + (i) * 0x10)
+#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+#define FWCP2_LTWFW_MASK GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+
+#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
+#define FWMACHEC_MACHMUE_MASK GENMASK(26, 16)
+
+#define FWMACTIM_MACTIOG BIT(0)
+#define FWMACTIM_MACTR BIT(1)
+
+#define FWMACAGUSPC_MACAGUSP GENMASK(9, 0)
+#define FWMACAGC_MACAGT GENMASK(15, 0)
+#define FWMACAGC_MACAGE BIT(16)
+#define FWMACAGC_MACAGSL BIT(17)
+#define FWMACAGC_MACAGPM BIT(18)
+#define FWMACAGC_MACDES BIT(24)
+#define FWMACAGC_MACAGOG BIT(28)
+#define FWMACAGC_MACDESOG BIT(29)
+
+#define RSW_AGEING_CLK_PER_US 0x140
+#define RSW_AGEING_TIME 300
+
/* TOP */
#define TPEMIMC7(queue) (TPEMIMC70 + (queue) * 4)
@@ -972,14 +992,6 @@ struct rswitch_gwca_queue {
};
};
-struct rswitch_gwca_ts_info {
- struct sk_buff *skb;
- struct list_head list;
-
- int port;
- u8 tag;
-};
-
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
unsigned int index;
@@ -989,14 +1001,13 @@ struct rswitch_gwca {
struct rswitch_gwca_queue *queues;
int num_queues;
struct rswitch_gwca_queue ts_queue;
- struct list_head ts_info_list;
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
- int speed;
};
#define NUM_QUEUES_PER_NDEV 2
+#define TS_TAGS_PER_PORT 256
struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
@@ -1004,13 +1015,22 @@ struct rswitch_device {
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
- u8 ts_tag;
+ struct sk_buff *ts_skb[TS_TAGS_PER_PORT];
+ DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
bool disabled;
+ struct list_head list;
+
int port;
struct rswitch_etha *etha;
struct device_node *np_port;
struct phy *serdes;
+
+ struct net_device *brdev; /* master bridge device */
+ unsigned int learning_requested : 1;
+ unsigned int learning_offloaded : 1;
+ unsigned int forwarding_requested : 1;
+ unsigned int forwarding_offloaded : 1;
};
struct rswitch_mfwd_mac_table_entry {
@@ -1035,11 +1055,20 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ struct list_head port_list;
+
spinlock_t lock; /* lock interrupt registers' control */
struct clk *clk;
bool etha_no_runtime_change;
bool gwca_halt;
+ struct net_device *offload_brdev;
+
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
};
+bool is_rdev(const struct net_device *ndev);
+void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set);
+
#endif /* #ifndef __RSWITCH_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.c b/drivers/net/ethernet/renesas/rswitch_l2.c
new file mode 100644
index 000000000000..4a69ec77d69c
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch_l2.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/kernel.h>
+#include <net/switchdev.h>
+
+#include "rswitch.h"
+#include "rswitch_l2.h"
+
+static bool rdev_for_l2_offload(struct rswitch_device *rdev)
+{
+ return rdev->priv->offload_brdev &&
+ rdev->brdev == rdev->priv->offload_brdev &&
+ (test_bit(rdev->port, rdev->priv->opened_ports));
+}
+
+static void rswitch_change_l2_hw_offloading(struct rswitch_device *rdev,
+ bool start, bool learning)
+{
+ u32 bits = learning ? FWPC0_MACSSA | FWPC0_MACHLA | FWPC0_MACHMA : FWPC0_MACDSA;
+ u32 clear = start ? 0 : bits;
+ u32 set = start ? bits : 0;
+
+ if ((learning && rdev->learning_offloaded == start) ||
+ (!learning && rdev->forwarding_offloaded == start))
+ return;
+
+ rswitch_modify(rdev->priv->addr, FWPC0(rdev->port), clear, set);
+
+ if (learning)
+ rdev->learning_offloaded = start;
+ else
+ rdev->forwarding_offloaded = start;
+
+ netdev_info(rdev->ndev, "%s hw %s\n", start ? "starting" : "stopping",
+ learning ? "learning" : "forwarding");
+}
+
+static void rswitch_update_l2_hw_learning(struct rswitch_private *priv)
+{
+ struct rswitch_device *rdev;
+ bool learning_needed;
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (rdev_for_l2_offload(rdev))
+ learning_needed = rdev->learning_requested;
+ else
+ learning_needed = false;
+
+ rswitch_change_l2_hw_offloading(rdev, learning_needed, true);
+ }
+}
+
+static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
+{
+ struct rswitch_device *rdev;
+ unsigned int fwd_mask;
+
+ /* calculate fwd_mask with zeroes in bits corresponding to ports that
+ * shall participate in hardware forwarding
+ */
+ fwd_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (rdev_for_l2_offload(rdev) && rdev->forwarding_requested)
+ fwd_mask &= ~BIT(rdev->port);
+ }
+
+ rswitch_for_all_ports(priv, rdev) {
+ if ((rdev_for_l2_offload(rdev) && rdev->forwarding_requested) ||
+ rdev->forwarding_offloaded) {
+ /* Update allowed offload destinations even for ports
+ * with L2 offload enabled earlier.
+ *
+ * Do not allow L2 forwarding to self for hw port.
+ */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW_MASK, fwd_mask | BIT(rdev->port)),
+ priv->addr + FWPC2(rdev->port));
+ }
+
+ if (rdev_for_l2_offload(rdev) &&
+ rdev->forwarding_requested &&
+ !rdev->forwarding_offloaded) {
+ rswitch_change_l2_hw_offloading(rdev, true, false);
+ } else if (rdev->forwarding_offloaded) {
+ rswitch_change_l2_hw_offloading(rdev, false, false);
+ }
+ }
+}
+
+void rswitch_update_l2_offload(struct rswitch_private *priv)
+{
+ rswitch_update_l2_hw_learning(priv);
+ rswitch_update_l2_hw_forwarding(priv);
+}
+
+static void rswitch_update_offload_brdev(struct rswitch_private *priv)
+{
+ struct net_device *offload_brdev = NULL;
+ struct rswitch_device *rdev, *rdev2;
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (!rdev->brdev)
+ continue;
+ rswitch_for_all_ports(priv, rdev2) {
+ if (rdev2 == rdev)
+ break;
+ if (rdev2->brdev == rdev->brdev) {
+ offload_brdev = rdev->brdev;
+ break;
+ }
+ }
+ if (offload_brdev)
+ break;
+ }
+
+ if (offload_brdev == priv->offload_brdev)
+ dev_dbg(&priv->pdev->dev,
+ "changing l2 offload from %s to %s\n",
+ netdev_name(priv->offload_brdev),
+ netdev_name(offload_brdev));
+ else if (offload_brdev)
+ dev_dbg(&priv->pdev->dev, "starting l2 offload for %s\n",
+ netdev_name(offload_brdev));
+ else if (!offload_brdev)
+ dev_dbg(&priv->pdev->dev, "stopping l2 offload for %s\n",
+ netdev_name(priv->offload_brdev));
+
+ priv->offload_brdev = offload_brdev;
+
+ rswitch_update_l2_offload(priv);
+}
+
+static bool rswitch_port_check(const struct net_device *ndev)
+{
+ return is_rdev(ndev);
+}
+
+static void rswitch_port_update_brdev(struct net_device *ndev,
+ struct net_device *brdev)
+{
+ struct rswitch_device *rdev;
+
+ if (!is_rdev(ndev))
+ return;
+
+ rdev = netdev_priv(ndev);
+ rdev->brdev = brdev;
+ rswitch_update_offload_brdev(rdev->priv);
+}
+
+static int rswitch_port_update_stp_state(struct net_device *ndev, u8 stp_state)
+{
+ struct rswitch_device *rdev;
+
+ if (!is_rdev(ndev))
+ return -ENODEV;
+
+ rdev = netdev_priv(ndev);
+ rdev->learning_requested = (stp_state == BR_STATE_LEARNING ||
+ stp_state == BR_STATE_FORWARDING);
+ rdev->forwarding_requested = (stp_state == BR_STATE_FORWARDING);
+ rswitch_update_l2_offload(rdev->priv);
+
+ return 0;
+}
+
+static int rswitch_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *brdev;
+
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+ if (event != NETDEV_CHANGEUPPER)
+ return NOTIFY_DONE;
+
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ brdev = info->linking ? info->upper_dev : NULL;
+ rswitch_port_update_brdev(ndev, brdev);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int rswitch_update_ageing_time(struct net_device *ndev, clock_t time)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ u32 reg_val;
+
+ if (!is_rdev(ndev))
+ return -ENODEV;
+
+ if (!FIELD_FIT(FWMACAGC_MACAGT, time))
+ return -EINVAL;
+
+ reg_val = FIELD_PREP(FWMACAGC_MACAGT, time);
+ reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
+ iowrite32(reg_val, rdev->priv->addr + FWMACAGC);
+
+ return 0;
+}
+
+static int rswitch_port_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return rswitch_port_update_stp_state(ndev, attr->u.stp_state);
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ return rswitch_update_ageing_time(ndev, attr->u.ageing_time);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rswitch_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(ndev, ptr,
+ rswitch_port_check,
+ rswitch_port_attr_set);
+ return notifier_from_errno(ret);
+ }
+
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+
+ return notifier_from_errno(-EOPNOTSUPP);
+}
+
+static int rswitch_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ return -EOPNOTSUPP;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return -EOPNOTSUPP;
+ case SWITCHDEV_PORT_ATTR_SET:
+ ret = switchdev_handle_port_attr_set(ndev, ptr,
+ rswitch_port_check,
+ rswitch_port_attr_set);
+ break;
+ default:
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+ ret = -EOPNOTSUPP;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block rswitch_netdevice_nb = {
+ .notifier_call = rswitch_netdevice_event,
+};
+
+static struct notifier_block rswitch_switchdev_nb = {
+ .notifier_call = rswitch_switchdev_event,
+};
+
+static struct notifier_block rswitch_switchdev_blocking_nb = {
+ .notifier_call = rswitch_switchdev_blocking_event,
+};
+
+int rswitch_register_notifiers(void)
+{
+ int ret;
+
+ ret = register_netdevice_notifier(&rswitch_netdevice_nb);
+ if (ret)
+ goto register_netdevice_notifier_failed;
+
+ ret = register_switchdev_notifier(&rswitch_switchdev_nb);
+ if (ret)
+ goto register_switchdev_notifier_failed;
+
+ ret = register_switchdev_blocking_notifier(&rswitch_switchdev_blocking_nb);
+ if (ret)
+ goto register_switchdev_blocking_notifier_failed;
+
+ return 0;
+
+register_switchdev_blocking_notifier_failed:
+ unregister_switchdev_notifier(&rswitch_switchdev_nb);
+register_switchdev_notifier_failed:
+ unregister_netdevice_notifier(&rswitch_netdevice_nb);
+register_netdevice_notifier_failed:
+
+ return ret;
+}
+
+void rswitch_unregister_notifiers(void)
+{
+ unregister_switchdev_blocking_notifier(&rswitch_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&rswitch_switchdev_nb);
+ unregister_netdevice_notifier(&rswitch_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.h b/drivers/net/ethernet/renesas/rswitch_l2.h
new file mode 100644
index 000000000000..57050ede8f31
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch_l2.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#ifndef __RSWITCH_L2_H__
+#define __RSWITCH_L2_H__
+
+void rswitch_update_l2_offload(struct rswitch_private *priv);
+
+int rswitch_register_notifiers(void);
+void rswitch_unregister_notifiers(void);
+
+#endif /* #ifndef __RSWITCH_L2_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch_main.c
index dcab638c57fe..e14b21148f27 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch_main.c
@@ -1,15 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
@@ -25,6 +28,9 @@
#include <linux/sys_soc.h>
#include "rswitch.h"
+#include "rswitch_l2.h"
+
+#define RSWITCH_GPTP_OFFSET_S4 0x00018000
static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
{
@@ -34,7 +40,7 @@ static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected
1, RSWITCH_TIMEOUT_US);
}
-static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
+void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
{
iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
}
@@ -109,27 +115,56 @@ static void rswitch_top_init(struct rswitch_private *priv)
}
/* Forwarding engine block (MFWD) */
-static void rswitch_fwd_init(struct rswitch_private *priv)
+static int rswitch_fwd_init(struct rswitch_private *priv)
{
+ u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
-
- /* For ETHA */
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
+ u32 reg_val;
+
+ /* Start with empty configuration */
+ for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
+ /* Disable all port features */
+ iowrite32(0, priv->addr + FWPC0(i));
+ /* Disallow L3 forwarding and direct descriptor forwarding */
+ iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
+ priv->addr + FWPC1(i));
+ /* Disallow L2 forwarding */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
+ priv->addr + FWPC2(i));
+ /* Disallow port based forwarding */
iowrite32(0, priv->addr + FWPBFC(i));
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ /* Configure MAC table aging */
+ rswitch_modify(priv->addr, FWMACAGUSPC, FWMACAGUSPC_MACAGUSP,
+ FIELD_PREP(FWMACAGUSPC_MACAGUSP, RSW_AGEING_CLK_PER_US));
+
+ reg_val = FIELD_PREP(FWMACAGC_MACAGT, RSW_AGEING_TIME);
+ reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
+ iowrite32(reg_val, priv->addr + FWMACAGC);
+
+ /* For enabled ETHA ports, setup port based forwarding */
+ rswitch_for_each_enabled_port(priv, i) {
+ /* Port based forwarding from port i to GWCA port */
+ rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
+ FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
+ /* Within GWCA port, forward to Rx queue for port i */
iowrite32(priv->rdev[i]->rx_queue->index,
priv->addr + FWPBFCSDC(GWCA_INDEX, i));
- iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
}
- /* For GWCA */
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
- iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
- iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
- iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
+ /* For GWCA port, allow direct descriptor forwarding */
+ rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
+
+ /* Initialize hardware L2 forwarding table */
+
+ /* Allow entire table to be used for "unsecure" entries */
+ rswitch_modify(priv->addr, FWMACHEC, 0, FWMACHEC_MACHMUE_MASK);
+
+ /* Initialize MAC hash table */
+ iowrite32(FWMACTIM_MACTIOG, priv->addr + FWMACTIM);
+
+ return rswitch_reg_wait(priv->addr, FWMACTIM, FWMACTIM_MACTIOG, 0);
}
/* Gateway CPU agent block (GWCA) */
@@ -547,7 +582,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
desc = &gq->ts_ring[gq->ring_size];
desc->desc.die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
- INIT_LIST_HEAD(&priv->gwca.ts_info_list);
return 0;
}
@@ -811,7 +845,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
if (!skb)
goto out;
- get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ get_ts = rdev->priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
if (get_ts) {
struct skb_shared_hwtstamps *shhwtstamps;
struct timespec64 ts;
@@ -862,24 +896,24 @@ static void rswitch_tx_free(struct net_device *ndev)
struct rswitch_ext_desc *desc;
struct sk_buff *skb;
- for (; rswitch_get_num_cur_queues(gq) > 0;
- gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
- desc = &gq->tx_ring[gq->dirty];
- if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
- break;
-
+ desc = &gq->tx_ring[gq->dirty];
+ while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
dma_rmb();
+
skb = gq->skbs[gq->dirty];
if (skb) {
+ rdev->ndev->stats.tx_packets++;
+ rdev->ndev->stats.tx_bytes += skb->len;
dma_unmap_single(ndev->dev.parent,
gq->unmap_addrs[gq->dirty],
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL;
- rdev->ndev->stats.tx_packets++;
- rdev->ndev->stats.tx_bytes += skb->len;
}
+
desc->desc.die_dt = DT_EEMPTY;
+ gq->dirty = rswitch_next_queue_index(gq, false, 1);
+ desc = &gq->tx_ring[gq->dirty];
}
}
@@ -908,8 +942,10 @@ retry:
if (napi_complete_done(napi, budget - quota)) {
spin_lock_irqsave(&priv->lock, flags);
- rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
- rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ if (test_bit(rdev->port, priv->opened_ports)) {
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1001,9 +1037,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
static void rswitch_ts(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
struct skb_shared_hwtstamps shhwtstamps;
struct rswitch_ts_desc *desc;
+ struct rswitch_device *rdev;
+ struct sk_buff *ts_skb;
struct timespec64 ts;
unsigned int num;
u32 tag, port;
@@ -1013,23 +1050,28 @@ static void rswitch_ts(struct rswitch_private *priv)
dma_rmb();
port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
- tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
-
- list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
- if (!(ts_info->port == port && ts_info->tag == tag))
- continue;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- ts.tv_sec = __le32_to_cpu(desc->ts_sec);
- ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
- shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
- skb_tstamp_tx(ts_info->skb, &shhwtstamps);
- dev_consume_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- break;
- }
+ if (unlikely(port >= RSWITCH_NUM_PORTS))
+ goto next;
+ rdev = priv->rdev[port];
+ tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+ if (unlikely(tag >= TS_TAGS_PER_PORT))
+ goto next;
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
+ clear_bit(tag, rdev->ts_skb_used);
+
+ if (unlikely(!ts_skb))
+ goto next;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(ts_skb, &shhwtstamps);
+ dev_consume_skb_irq(ts_skb);
+
+next:
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->ts_ring[gq->cur];
}
@@ -1114,32 +1156,47 @@ static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
{
- u32 val;
+ u32 pis, lsc;
rswitch_etha_write_mac_address(etha, mac);
+ switch (etha->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ pis = MPIC_PIS_GMII;
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_5GBASER:
+ pis = MPIC_PIS_XGMII;
+ break;
+ default:
+ pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
+ break;
+ }
+
switch (etha->speed) {
case 100:
- val = MPIC_LSC_100M;
+ lsc = MPIC_LSC_100M;
break;
case 1000:
- val = MPIC_LSC_1G;
+ lsc = MPIC_LSC_1G;
break;
case 2500:
- val = MPIC_LSC_2_5G;
+ lsc = MPIC_LSC_2_5G;
break;
default:
- return;
+ lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
+ break;
}
- iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
+ rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
+ FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
}
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
- rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
- rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+ rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
+ FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
+ FIELD_PREP(MPIC_PSMHT, 0x06));
}
static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
@@ -1168,42 +1225,29 @@ static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
}
-static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
- int phyad, int devad, int regad, int data)
+static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
+ unsigned int mmf, unsigned int pda,
+ unsigned int pra, unsigned int pop,
+ unsigned int prd)
{
- int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
u32 val;
int ret;
- if (devad == 0xffffffff)
- return -ENODEV;
-
- writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
-
- val = MPSM_PSME | MPSM_MFF_C45;
- iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
+ val = MPSM_PSME |
+ FIELD_PREP(MPSM_MFF, mmf) |
+ FIELD_PREP(MPSM_PDA, pda) |
+ FIELD_PREP(MPSM_PRA, pra) |
+ FIELD_PREP(MPSM_POP, pop) |
+ FIELD_PREP(MPSM_PRD, prd);
+ iowrite32(val, etha->addr + MPSM);
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+ ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
if (ret)
return ret;
- rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
-
if (read) {
- writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- if (ret)
- return ret;
-
- ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
-
- rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- } else {
- iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
- etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
+ val = ioread32(etha->addr + MPSM);
+ ret = FIELD_GET(MPSM_PRD, val);
}
return ret;
@@ -1213,16 +1257,47 @@ static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
int regad)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
+
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
- return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_READ_C45, 0);
}
static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
+
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_WRITE, val);
+}
+
+static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
+{
+ struct rswitch_etha *etha = bus->priv;
- return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_READ_C22, 0);
+}
+
+static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
+ int regad, u16 val)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_WRITE, val);
}
/* Call of_node_put(port) after done */
@@ -1237,17 +1312,14 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
if (!ports)
return NULL;
- for_each_child_of_node(ports, port) {
+ for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &index);
if (err < 0) {
port = NULL;
goto out;
}
- if (index == rdev->etha->index) {
- if (!of_device_is_available(port))
- port = NULL;
+ if (index == rdev->etha->index)
break;
- }
}
out:
@@ -1307,6 +1379,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->priv = rdev->etha;
mii_bus->read_c45 = rswitch_etha_mii_read_c45;
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
+ mii_bus->read = rswitch_etha_mii_read_c22;
+ mii_bus->write = rswitch_etha_mii_write_c22;
mii_bus->parent = &rdev->priv->pdev->dev;
mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
@@ -1527,7 +1601,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
unsigned int i;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
@@ -1538,54 +1612,61 @@ static int rswitch_open(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
unsigned long flags;
- phy_start(ndev->phydev);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
napi_enable(&rdev->napi);
- netif_start_queue(ndev);
spin_lock_irqsave(&rdev->priv->lock, flags);
+ bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
- if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+ phy_start(ndev->phydev);
- bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
+ netif_start_queue(ndev);
+
+ if (rdev->brdev)
+ rswitch_update_l2_offload(rdev->priv);
return 0;
-};
+}
static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ struct sk_buff *ts_skb;
unsigned long flags;
+ unsigned int tag;
netif_tx_stop_all_queues(ndev);
- bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
- if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
-
- list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
- if (ts_info->port != rdev->port)
- continue;
- dev_kfree_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- }
+ phy_stop(ndev->phydev);
spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
- phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
+ if (rdev->brdev)
+ rswitch_update_l2_offload(rdev->priv);
+
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+
+ for_each_set_bit(tag, rdev->ts_skb_used, TS_TAGS_PER_PORT) {
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ clear_bit(tag, rdev->ts_skb_used);
+ if (ts_skb)
+ dev_kfree_skb(ts_skb);
+ }
+
return 0;
-};
+}
static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
struct sk_buff *skb,
@@ -1594,20 +1675,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- struct rswitch_gwca_ts_info *ts_info;
+ unsigned int tag;
- ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
- if (!ts_info)
+ tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
+ if (tag == TS_TAGS_PER_PORT)
return false;
+ smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
+ rdev->ts_skb[tag] = skb_get(skb);
+ set_bit(tag, rdev->ts_skb_used);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- rdev->ts_tag++;
- desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
-
- ts_info->skb = skb_get(skb);
- ts_info->port = rdev->port;
- ts_info->tag = rdev->ts_tag;
- list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
+ desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
skb_tx_timestamp(skb);
}
@@ -1681,8 +1759,11 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
goto err_kfree;
- gq->skbs[gq->cur] = skb;
- gq->unmap_addrs[gq->cur] = dma_addr_orig;
+ /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
+ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
+ gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
+
+ dma_wmb();
/* DT_FSTART should be set at last. So, this is reverse order. */
for (i = nr_desc; i-- > 0; ) {
@@ -1694,14 +1775,13 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
goto err_unmap;
}
- wmb(); /* gq->cur must be incremented after die_dt was set */
-
gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
err_unmap:
+ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
err_kfree:
@@ -1715,88 +1795,77 @@ static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
return &ndev->stats;
}
-static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
+static int rswitch_hwstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- struct rcar_gen4_ptp_private *ptp_priv;
- struct hwtstamp_config config;
-
- ptp_priv = rdev->priv->ptp_priv;
+ struct rswitch_private *priv = rdev->priv;
- config.flags = 0;
- config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
- HWTSTAMP_TX_OFF;
- switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
- case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- break;
- case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- break;
- }
+ config->flags = 0;
+ config->tx_type = priv->tstamp_tx_ctrl;
+ config->rx_filter = priv->tstamp_rx_ctrl;
- return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
+static int rswitch_hwstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
- struct hwtstamp_config config;
- u32 tstamp_tx_ctrl;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
- if (copy_from_user(&config, req->ifr_data, sizeof(config)))
- return -EFAULT;
-
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- tstamp_tx_ctrl = 0;
- break;
case HWTSTAMP_TX_ON:
- tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
+ tstamp_tx_ctrl = config->tx_type;
break;
default:
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- tstamp_rx_ctrl = 0;
- break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ tstamp_rx_ctrl = config->rx_filter;
break;
default:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
break;
}
- rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
- rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+ rdev->priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ rdev->priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
- return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+static int rswitch_get_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
{
- if (!netif_running(ndev))
- return -EINVAL;
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ const char *name;
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return rswitch_hwstamp_get(ndev, req);
- case SIOCSHWTSTAMP:
- return rswitch_hwstamp_set(ndev, req);
- default:
- return phy_mii_ioctl(ndev->phydev, req, cmd);
- }
+ name = dev_name(&rdev->priv->pdev->dev);
+ ppid->id_len = min_t(size_t, strlen(name), sizeof(ppid->id));
+ memcpy(ppid->id, name, ppid->id_len);
+
+ return 0;
+}
+
+static int rswitch_get_phys_port_name(struct net_device *ndev,
+ char *name, size_t len)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ snprintf(name, len, "tsn%d", rdev->port);
+
+ return 0;
}
static const struct net_device_ops rswitch_netdev_ops = {
@@ -1804,19 +1873,26 @@ static const struct net_device_ops rswitch_netdev_ops = {
.ndo_stop = rswitch_stop,
.ndo_start_xmit = rswitch_start_xmit,
.ndo_get_stats = rswitch_get_stats,
- .ndo_eth_ioctl = rswitch_eth_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_get_port_parent_id = rswitch_get_port_parent_id,
+ .ndo_get_phys_port_name = rswitch_get_phys_port_name,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_hwtstamp_get = rswitch_hwstamp_get,
+ .ndo_hwtstamp_set = rswitch_hwstamp_set,
};
-static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+bool is_rdev(const struct net_device *ndev)
+{
+ return (ndev->netdev_ops == &rswitch_netdev_ops);
+}
+
+static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct rswitch_device *rdev = netdev_priv(ndev);
info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1891,7 +1967,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
rdev->np_port = rswitch_get_port_node(rdev);
rdev->disabled = !rdev->np_port;
err = of_get_ethdev_address(rdev->np_port, ndev);
- of_node_put(rdev->np_port);
if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1903,9 +1978,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_get_params;
- if (rdev->priv->gwca.speed < rdev->etha->speed)
- rdev->priv->gwca.speed = rdev->etha->speed;
-
err = rswitch_rxdmac_alloc(ndev);
if (err < 0)
goto out_rxdmac;
@@ -1914,6 +1986,8 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_txdmac;
+ list_add_tail(&rdev->list, &priv->port_list);
+
return 0;
out_txdmac:
@@ -1921,6 +1995,7 @@ out_txdmac:
out_rxdmac:
out_get_params:
+ of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi);
free_netdev(ndev);
@@ -1932,8 +2007,10 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
+ list_del(&rdev->list);
rswitch_txdmac_free(ndev);
rswitch_rxdmac_free(ndev);
+ of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi);
free_netdev(ndev);
}
@@ -1977,10 +2054,11 @@ static int rswitch_init(struct rswitch_private *priv)
}
}
- rswitch_fwd_init(priv);
+ err = rswitch_fwd_init(priv);
+ if (err < 0)
+ goto err_fwd_init;
- err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ err = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (err < 0)
goto err_ptp_register;
@@ -2026,6 +2104,7 @@ err_gwca_ts_request_irq:
err_gwca_request_irq:
rcar_gen4_ptp_unregister(priv->ptp_priv);
+err_fwd_init:
err_ptp_register:
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
rswitch_device_free(priv, i);
@@ -2060,6 +2139,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
spin_lock_init(&priv->lock);
priv->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2080,7 +2160,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (IS_ERR(priv->addr))
return PTR_ERR(priv->addr);
- priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
+ priv->ptp_priv->addr = priv->addr + RSWITCH_GPTP_OFFSET_S4;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret < 0) {
@@ -2097,6 +2177,8 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (!priv->gwca.queues)
return -ENOMEM;
+ INIT_LIST_HEAD(&priv->port_list);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -2107,6 +2189,15 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
return ret;
}
+ if (list_empty(&priv->port_list))
+ dev_warn(&pdev->dev, "could not initialize any ports\n");
+
+ ret = rswitch_register_notifiers();
+ if (ret) {
+ dev_err(&pdev->dev, "could not register notifiers\n");
+ return ret;
+ }
+
device_set_wakeup_capable(&pdev->dev, 1);
return ret;
@@ -2140,6 +2231,7 @@ static void renesas_eth_sw_remove(struct platform_device *pdev)
{
struct rswitch_private *priv = platform_get_drvdata(pdev);
+ rswitch_unregister_notifiers();
rswitch_deinit(priv);
pm_runtime_put(&pdev->dev);
@@ -2190,7 +2282,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
static struct platform_driver renesas_eth_sw_driver_platform = {
.probe = renesas_eth_sw_probe,
- .remove_new = renesas_eth_sw_remove,
+ .remove = renesas_eth_sw_remove,
.driver = {
.name = "renesas_eth_sw",
.pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
new file mode 100644
index 000000000000..fdb1e7b7fb06
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -0,0 +1,1371 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include "rtsn.h"
+#include "rcar_gen4_ptp.h"
+
+struct rtsn_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct clk *clk;
+ struct reset_control *reset;
+
+ u32 num_tx_ring;
+ u32 num_rx_ring;
+ u32 tx_desc_bat_size;
+ dma_addr_t tx_desc_bat_dma;
+ struct rtsn_desc *tx_desc_bat;
+ u32 rx_desc_bat_size;
+ dma_addr_t rx_desc_bat_dma;
+ struct rtsn_desc *rx_desc_bat;
+ dma_addr_t tx_desc_dma;
+ dma_addr_t rx_desc_dma;
+ struct rtsn_ext_desc *tx_ring;
+ struct rtsn_ext_ts_desc *rx_ring;
+ struct sk_buff **tx_skb;
+ struct sk_buff **rx_skb;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_tx;
+ u32 dirty_tx;
+ u32 cur_rx;
+ u32 dirty_rx;
+ u8 ts_tag;
+ struct napi_struct napi;
+ struct rtnl_link_stats64 stats;
+
+ struct mii_bus *mii;
+ phy_interface_t iface;
+ int link;
+ int speed;
+
+ int tx_data_irq;
+ int rx_data_irq;
+
+ u32 tstamp_tx_ctrl;
+ u32 tstamp_rx_ctrl;
+};
+
+static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
+{
+ return ioread32(priv->base + reg);
+}
+
+static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 clear, u32 set)
+{
+ rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
+}
+
+static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 mask, u32 expected)
+{
+ u32 val;
+
+ return readl_poll_timeout(priv->base + reg, val,
+ (val & mask) == expected,
+ RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
+}
+
+static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
+{
+ if (enable) {
+ rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ } else {
+ rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ }
+}
+
+static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
+
+ ptp_priv->info.gettime64(&ptp_priv->info, ts);
+}
+
+static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ struct sk_buff *skb;
+ int free_num = 0;
+ int entry, size;
+
+ for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
+ entry = priv->dirty_tx % priv->num_tx_ring;
+ desc = &priv->tx_ring[entry];
+ if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ size = le16_to_cpu(desc->info_ds) & TX_DS;
+ skb = priv->tx_skb[entry];
+ if (skb) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+
+ rtsn_get_timestamp(priv, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skb[entry]);
+ free_num++;
+
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += size;
+ }
+
+ desc->die_dt = DT_EEMPTY;
+ }
+
+ desc = &priv->tx_ring[priv->num_tx_ring];
+ desc->die_dt = DT_LINK;
+
+ return free_num;
+}
+
+static int rtsn_rx(struct net_device *ndev, int budget)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ unsigned int ndescriptors;
+ unsigned int rx_packets;
+ unsigned int i;
+ bool get_ts;
+
+ get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
+
+ ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
+ rx_packets = 0;
+ for (i = 0; i < ndescriptors; i++) {
+ const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 pkt_len;
+
+ /* Stop processing descriptors if budget is consumed. */
+ if (rx_packets >= budget)
+ break;
+
+ /* Stop processing descriptors on first empty. */
+ if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
+
+ skb = priv->rx_skb[entry];
+ priv->rx_skb[entry] = NULL;
+ dma_addr = le32_to_cpu(desc->dptr);
+ dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+
+ /* Get timestamp if enabled. */
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct timespec64 ts;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+
+ /* Update statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += pkt_len;
+
+ /* Update counters. */
+ priv->cur_rx++;
+ rx_packets++;
+ }
+
+ /* Refill the RX ring buffers */
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+
+ if (!priv->rx_skb[entry]) {
+ skb = napi_alloc_skb(&priv->napi,
+ PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ le16_to_cpu(desc->info_ds),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->info_ds = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ skb_checksum_none_assert(skb);
+ priv->rx_skb[entry] = skb;
+ }
+
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+
+ priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
+
+ return rx_packets;
+}
+
+static int rtsn_poll(struct napi_struct *napi, int budget)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ unsigned long flags;
+ int work_done;
+
+ ndev = napi->dev;
+ priv = netdev_priv(ndev);
+
+ /* Processing RX Descriptor Ring */
+ work_done = rtsn_rx(ndev, budget);
+
+ /* Processing TX Descriptor Ring */
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_tx_free(ndev, true);
+ netif_wake_subqueue(ndev, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Re-enable TX/RX interrupts */
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_ctrl_data_irq(priv, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int rtsn_desc_alloc(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ unsigned int i;
+
+ priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
+ priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
+ &priv->tx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->tx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < TX_NUM_CHAINS; i++)
+ priv->tx_desc_bat[i].die_dt = DT_EOS;
+
+ priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
+ priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
+ &priv->rx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->rx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < RX_NUM_CHAINS; i++)
+ priv->rx_desc_bat[i].die_dt = DT_EOS;
+
+ return 0;
+}
+
+static void rtsn_desc_free(struct rtsn_private *priv)
+{
+ if (priv->tx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
+ priv->tx_desc_bat, priv->tx_desc_bat_dma);
+ priv->tx_desc_bat = NULL;
+
+ if (priv->rx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
+ priv->rx_desc_bat, priv->rx_desc_bat_dma);
+ priv->rx_desc_bat = NULL;
+}
+
+static void rtsn_chain_free(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
+ priv->tx_ring, priv->tx_desc_dma);
+ priv->tx_ring = NULL;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
+ priv->rx_ring, priv->rx_desc_dma);
+ priv->rx_ring = NULL;
+
+ kfree(priv->tx_skb);
+ priv->tx_skb = NULL;
+
+ kfree(priv->rx_skb);
+ priv->rx_skb = NULL;
+}
+
+static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
+{
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ int i;
+
+ priv->num_tx_ring = tx_size;
+ priv->num_rx_ring = rx_size;
+
+ priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
+ priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
+
+ if (!priv->rx_skb || !priv->tx_skb)
+ goto error;
+
+ for (i = 0; i < rx_size; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ goto error;
+ skb_reserve(skb, NET_IP_ALIGN);
+ priv->rx_skb[i] = skb;
+ }
+
+ /* Allocate TX, RX descriptors */
+ priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_desc) * (tx_size + 1),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
+ &priv->rx_desc_dma, GFP_KERNEL);
+
+ if (!priv->tx_ring || !priv->rx_ring)
+ goto error;
+
+ return 0;
+error:
+ rtsn_chain_free(priv);
+
+ return -ENOMEM;
+}
+
+static void rtsn_chain_format(struct rtsn_private *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct rtsn_ext_ts_desc *rx_desc;
+ struct rtsn_ext_desc *tx_desc;
+ struct rtsn_desc *bat_desc;
+ dma_addr_t dma_addr;
+ unsigned int i;
+
+ priv->cur_tx = 0;
+ priv->cur_rx = 0;
+ priv->dirty_rx = 0;
+ priv->dirty_tx = 0;
+
+ /* TX */
+ memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
+ for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
+ tx_desc->die_dt = DT_EEMPTY | D_DIE;
+
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+ tx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+
+ /* RX */
+ memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
+ for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
+ dma_addr = dma_map_single(ndev->dev.parent,
+ priv->rx_skb[i]->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+ rx_desc->dptr = cpu_to_le32((u32)dma_addr);
+ rx_desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+ rx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+}
+
+static int rtsn_dmac_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
+ if (ret)
+ return ret;
+
+ rtsn_chain_format(priv);
+
+ return 0;
+}
+
+static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
+{
+ return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
+}
+
+static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ unsigned int i;
+
+ /* Need to busy loop as mode changes can happen in atomic context. */
+ for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
+ if (rtsn_read_mode(priv) == mode)
+ return 0;
+
+ udelay(RTSN_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ int ret;
+
+ rtsn_write(priv, OCR, mode);
+ ret = rtsn_wait_mode(priv, mode);
+ if (ret)
+ netdev_err(priv->ndev, "Failed to switch operation mode\n");
+ return ret;
+}
+
+static int rtsn_get_data_irq_status(struct rtsn_private *priv)
+{
+ u32 val;
+
+ val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
+ val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
+
+ return val;
+}
+
+static irqreturn_t rtsn_irq(int irq, void *dev_id)
+{
+ struct rtsn_private *priv = dev_id;
+ int ret = IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ if (rtsn_get_data_irq_status(priv)) {
+ /* Clear TX/RX irq status */
+ rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, false);
+
+ __napi_schedule(&priv->napi);
+ }
+
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, struct rtsn_private *priv,
+ const char *ch)
+{
+ char *name;
+ int ret;
+
+ name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
+ priv->ndev->name, ch);
+ if (!name)
+ return -ENOMEM;
+
+ ret = request_irq(irq, handler, flags, name, priv);
+ if (ret)
+ netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
+
+ return ret;
+}
+
+static void rtsn_free_irqs(struct rtsn_private *priv)
+{
+ free_irq(priv->tx_data_irq, priv);
+ free_irq(priv->rx_data_irq, priv);
+}
+
+static int rtsn_request_irqs(struct rtsn_private *priv)
+{
+ int ret;
+
+ priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
+ if (priv->rx_data_irq < 0)
+ return priv->rx_data_irq;
+
+ priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
+ if (priv->tx_data_irq < 0)
+ return priv->tx_data_irq;
+
+ ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
+ if (ret)
+ return ret;
+
+ ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
+ if (ret) {
+ free_irq(priv->tx_data_irq, priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtsn_reset(struct rtsn_private *priv)
+{
+ reset_control_reset(priv->reset);
+ mdelay(1);
+
+ return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
+}
+
+static int rtsn_axibmi_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
+ if (ret)
+ return ret;
+
+ /* Set AXIWC */
+ rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
+
+ /* Set AXIRC */
+ rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
+
+ /* TX Descriptor chain setting */
+ rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
+ rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, TATLR, TATLR_TATL);
+
+ ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
+ if (ret)
+ return ret;
+
+ /* RX Descriptor chain setting */
+ rtsn_write(priv, RATLS0,
+ RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
+ rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, RATLR, RATLR_RATL);
+
+ ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
+ if (ret)
+ return ret;
+
+ /* Enable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, true);
+
+ return 0;
+}
+
+static void rtsn_mhd_init(struct rtsn_private *priv)
+{
+ /* TX General setting */
+ rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
+ rtsn_write(priv, TMS0, TMS_MFS_MAX);
+
+ /* RX Filter IP */
+ rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
+ rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
+}
+
+static int rtsn_get_phy_params(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
+ if (ret)
+ return ret;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ priv->speed = 100;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ priv->speed = 1000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtsn_set_phy_interface(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = MPIC_PIS_MII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MPIC_PIS_GMII;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
+}
+
+static void rtsn_set_rate(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->speed) {
+ case 10:
+ val = MPIC_LSC_10M;
+ break;
+ case 100:
+ val = MPIC_LSC_100M;
+ break;
+ case 1000:
+ val = MPIC_LSC_1G;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
+}
+
+static int rtsn_rmac_init(struct rtsn_private *priv)
+{
+ const u8 *mac_addr = priv->ndev->dev_addr;
+ int ret;
+
+ /* Set MAC address */
+ rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
+ rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ /* Set xMII type */
+ rtsn_set_phy_interface(priv);
+ rtsn_set_rate(priv);
+
+ /* Enable MII */
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Link verification */
+ rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
+ ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int rtsn_hw_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Change to CONFIG mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = rtsn_axibmi_init(priv);
+ if (ret)
+ return ret;
+
+ rtsn_mhd_init(priv);
+
+ ret = rtsn_rmac_init(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ if (ret)
+ return ret;
+
+ /* Change to OPERATION mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
+
+ return ret;
+}
+
+static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
+ int regad, u16 data)
+{
+ struct rtsn_private *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
+
+ if (!read)
+ val |= MPSM_PSMAD | MPSM_PRD_SET(data);
+
+ rtsn_write(priv, MPSM, val);
+
+ ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
+ if (ret)
+ return ret;
+
+ if (read)
+ ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
+
+ return ret;
+}
+
+static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
+{
+ return rtsn_mii_access(bus, true, addr, regnum, 0);
+}
+
+static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ return rtsn_mii_access(bus, false, addr, regnum, val);
+}
+
+static int rtsn_mdio_alloc(struct rtsn_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = mdiobus_alloc();
+ if (!mii)
+ return -ENOMEM;
+
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ ret = -ENODEV;
+ goto out_free_bus;
+ }
+
+ /* Enter config mode before registering the MDIO bus */
+ ret = rtsn_reset(priv);
+ if (ret)
+ goto out_free_bus;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ goto out_free_bus;
+
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Register the MDIO bus */
+ mii->name = "rtsn_mii";
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+ mii->priv = priv;
+ mii->read = rtsn_mii_read;
+ mii->write = rtsn_mii_write;
+ mii->parent = dev;
+
+ ret = of_mdiobus_register(mii, mdio_node);
+ of_node_put(mdio_node);
+ if (ret)
+ goto out_free_bus;
+
+ priv->mii = mii;
+
+ return 0;
+
+out_free_bus:
+ mdiobus_free(mii);
+ return ret;
+}
+
+static void rtsn_mdio_free(struct rtsn_private *priv)
+{
+ mdiobus_unregister(priv->mii);
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+}
+
+static void rtsn_adjust_link(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool new_state = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ }
+
+ if (!priv->link) {
+ new_state = true;
+ priv->link = phydev->link;
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ }
+
+ if (new_state) {
+ /* Need to transition to CONFIG mode before reconfiguring and
+ * then back to the original mode. Any state change to/from
+ * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
+ */
+ enum rtsn_mode orgmode = rtsn_read_mode(priv);
+
+ /* Transit to CONFIG */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
+ goto out;
+ }
+
+ rtsn_set_rate(priv);
+
+ /* Transition to original mode */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, orgmode))
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int rtsn_phy_init(struct rtsn_private *priv)
+{
+ struct device_node *np = priv->ndev->dev.parent->of_node;
+ struct phy_device *phydev;
+ struct device_node *phy;
+
+ priv->link = 0;
+
+ phy = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy)
+ return -ENOENT;
+
+ phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
+ priv->iface);
+ of_node_put(phy);
+ if (!phydev)
+ return -ENOENT;
+
+ /* Only support full-duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+static void rtsn_phy_deinit(struct rtsn_private *priv)
+{
+ phy_disconnect(priv->ndev->phydev);
+ priv->ndev->phydev = NULL;
+}
+
+static int rtsn_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_desc_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_dmac_init(priv);
+ if (ret)
+ goto error_free_desc;
+
+ ret = rtsn_hw_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_phy_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_request_irqs(priv);
+ if (ret)
+ goto error_free_phy;
+
+ return 0;
+error_free_phy:
+ rtsn_phy_deinit(priv);
+error_free_chain:
+ rtsn_chain_free(priv);
+error_free_desc:
+ rtsn_desc_free(priv);
+ return ret;
+}
+
+static void rtsn_deinit(struct rtsn_private *priv)
+{
+ rtsn_free_irqs(priv);
+ rtsn_phy_deinit(priv);
+ rtsn_chain_free(priv);
+ rtsn_desc_free(priv);
+}
+
+static void rtsn_parse_mac_address(struct device_node *np,
+ struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ u8 addr[ETH_ALEN];
+ u32 mrmac0;
+ u32 mrmac1;
+
+ /* Try to read address from Device Tree. */
+ if (!of_get_mac_address(np, addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Try to read address from device. */
+ mrmac0 = rtsn_read(priv, MRMAC0);
+ mrmac1 = rtsn_read(priv, MRMAC1);
+
+ addr[0] = (mrmac0 >> 8) & 0xff;
+ addr[1] = (mrmac0 >> 0) & 0xff;
+ addr[2] = (mrmac1 >> 24) & 0xff;
+ addr[3] = (mrmac1 >> 16) & 0xff;
+ addr[4] = (mrmac1 >> 8) & 0xff;
+ addr[5] = (mrmac1 >> 0) & 0xff;
+
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Fallback to a random address */
+ eth_hw_addr_random(ndev);
+}
+
+static int rtsn_open(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ int ret;
+
+ napi_enable(&priv->napi);
+
+ ret = rtsn_init(priv);
+ if (ret) {
+ napi_disable(&priv->napi);
+ return ret;
+ }
+
+ phy_start(ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int rtsn_stop(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ phy_stop(priv->ndev->phydev);
+ napi_disable(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ rtsn_deinit(priv);
+
+ return 0;
+}
+
+static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ int ret = NETDEV_TX_OK;
+ unsigned long flags;
+ dma_addr_t dma_addr;
+ int entry;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Drop packet if it won't fit in a single descriptor. */
+ if (skb->len >= TX_DS) {
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
+ netif_stop_subqueue(ndev, 0);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto out;
+
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ entry = priv->cur_tx % priv->num_tx_ring;
+ priv->tx_skb[entry] = skb;
+ desc = &priv->tx_ring[entry];
+ desc->dptr = cpu_to_le32(dma_addr);
+ desc->info_ds = cpu_to_le16(skb->len);
+ desc->info1 = cpu_to_le64(skb->len);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ts_tag++;
+ desc->info_ds |= cpu_to_le16(TXC);
+ desc->info = priv->ts_tag;
+ }
+
+ skb_tx_timestamp(skb);
+ dma_wmb();
+
+ desc->die_dt = DT_FSINGLE | D_DIE;
+ priv->cur_tx++;
+
+ /* Start xmit */
+ rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
+static void rtsn_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ *storage = priv->stats;
+}
+
+static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ return phy_do_ioctl_running(ndev, ifr, cmd);
+}
+
+static int rtsn_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct rtsn_private *priv;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+
+ config->flags = 0;
+ config->tx_type = priv->tstamp_tx_ctrl;
+ config->rx_filter = priv->tstamp_rx_ctrl;
+
+ return 0;
+}
+
+static int rtsn_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
+ struct rtsn_private *priv;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = config->tx_type;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl = config->rx_filter;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return 0;
+}
+
+static const struct net_device_ops rtsn_netdev_ops = {
+ .ndo_open = rtsn_open,
+ .ndo_stop = rtsn_stop,
+ .ndo_start_xmit = rtsn_start_xmit,
+ .ndo_get_stats64 = rtsn_get_stats64,
+ .ndo_eth_ioctl = rtsn_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_hwtstamp_set = rtsn_hwtstamp_set,
+ .ndo_hwtstamp_get = rtsn_hwtstamp_get,
+};
+
+static int rtsn_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops rtsn_ethtool_ops = {
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = rtsn_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct of_device_id rtsn_match_table[] = {
+ { .compatible = "renesas,r8a779g0-ethertsn", },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rtsn_match_table);
+
+static int rtsn_probe(struct platform_device *pdev)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ struct resource *res;
+ int ret;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
+ RX_NUM_CHAINS);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+
+ priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+ if (!priv->ptp_priv) {
+ ret = -ENOMEM;
+ goto error_free;
+ }
+
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto error_free;
+ }
+
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto error_free;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find tsnes resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto error_free;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->features = NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_RXCSUM;
+ ndev->base_addr = res->start;
+ ndev->netdev_ops = &rtsn_netdev_ops;
+ ndev->ethtool_ops = &rtsn_ethtool_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find gptp resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->ptp_priv->addr)) {
+ ret = PTR_ERR(priv->ptp_priv->addr);
+ goto error_free;
+ }
+
+ ret = rtsn_get_phy_params(priv);
+ if (ret)
+ goto error_free;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rtsn_poll);
+
+ rtsn_parse_mac_address(pdev->dev.of_node, ndev);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
+ if (ret)
+ goto error_pm;
+
+ ret = rtsn_mdio_alloc(priv);
+ if (ret)
+ goto error_ptp;
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto error_mdio;
+
+ netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
+
+ return 0;
+
+error_mdio:
+ rtsn_mdio_free(priv);
+error_ptp:
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+error_pm:
+ netif_napi_del(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+error_free:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static void rtsn_remove(struct platform_device *pdev)
+{
+ struct rtsn_private *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+ rtsn_mdio_free(priv);
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ free_netdev(priv->ndev);
+}
+
+static struct platform_driver rtsn_driver = {
+ .probe = rtsn_probe,
+ .remove = rtsn_remove,
+ .driver = {
+ .name = "rtsn",
+ .of_match_table = rtsn_match_table,
+ }
+};
+module_platform_driver(rtsn_driver);
+
+MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
+MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rtsn.h b/drivers/net/ethernet/renesas/rtsn.h
new file mode 100644
index 000000000000..3183e80d7e6b
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#ifndef __RTSN_H__
+#define __RTSN_H__
+
+#include <linux/types.h>
+
+#define AXIBMI 0x0000
+#define TSNMHD 0x1000
+#define RMSO 0x2000
+#define RMRO 0x3800
+
+enum rtsn_reg {
+ AXIWC = AXIBMI + 0x0000,
+ AXIRC = AXIBMI + 0x0004,
+ TDPC0 = AXIBMI + 0x0010,
+ TFT = AXIBMI + 0x0090,
+ TATLS0 = AXIBMI + 0x00a0,
+ TATLS1 = AXIBMI + 0x00a4,
+ TATLR = AXIBMI + 0x00a8,
+ RATLS0 = AXIBMI + 0x00b0,
+ RATLS1 = AXIBMI + 0x00b4,
+ RATLR = AXIBMI + 0x00b8,
+ TSA0 = AXIBMI + 0x00c0,
+ TSS0 = AXIBMI + 0x00c4,
+ TRCR0 = AXIBMI + 0x0140,
+ RIDAUAS0 = AXIBMI + 0x0180,
+ RR = AXIBMI + 0x0200,
+ TATS = AXIBMI + 0x0210,
+ TATSR0 = AXIBMI + 0x0214,
+ TATSR1 = AXIBMI + 0x0218,
+ TATSR2 = AXIBMI + 0x021c,
+ RATS = AXIBMI + 0x0220,
+ RATSR0 = AXIBMI + 0x0224,
+ RATSR1 = AXIBMI + 0x0228,
+ RATSR2 = AXIBMI + 0x022c,
+ RIDASM0 = AXIBMI + 0x0240,
+ RIDASAM0 = AXIBMI + 0x0244,
+ RIDACAM0 = AXIBMI + 0x0248,
+ EIS0 = AXIBMI + 0x0300,
+ EIE0 = AXIBMI + 0x0304,
+ EID0 = AXIBMI + 0x0308,
+ EIS1 = AXIBMI + 0x0310,
+ EIE1 = AXIBMI + 0x0314,
+ EID1 = AXIBMI + 0x0318,
+ TCEIS0 = AXIBMI + 0x0340,
+ TCEIE0 = AXIBMI + 0x0344,
+ TCEID0 = AXIBMI + 0x0348,
+ RFSEIS0 = AXIBMI + 0x04c0,
+ RFSEIE0 = AXIBMI + 0x04c4,
+ RFSEID0 = AXIBMI + 0x04c8,
+ RFEIS0 = AXIBMI + 0x0540,
+ RFEIE0 = AXIBMI + 0x0544,
+ RFEID0 = AXIBMI + 0x0548,
+ RCEIS0 = AXIBMI + 0x05c0,
+ RCEIE0 = AXIBMI + 0x05c4,
+ RCEID0 = AXIBMI + 0x05c8,
+ RIDAOIS = AXIBMI + 0x0640,
+ RIDAOIE = AXIBMI + 0x0644,
+ RIDAOID = AXIBMI + 0x0648,
+ TSFEIS = AXIBMI + 0x06c0,
+ TSFEIE = AXIBMI + 0x06c4,
+ TSFEID = AXIBMI + 0x06c8,
+ TSCEIS = AXIBMI + 0x06d0,
+ TSCEIE = AXIBMI + 0x06d4,
+ TSCEID = AXIBMI + 0x06d8,
+ DIS = AXIBMI + 0x0b00,
+ DIE = AXIBMI + 0x0b04,
+ DID = AXIBMI + 0x0b08,
+ TDIS0 = AXIBMI + 0x0b10,
+ TDIE0 = AXIBMI + 0x0b14,
+ TDID0 = AXIBMI + 0x0b18,
+ RDIS0 = AXIBMI + 0x0b90,
+ RDIE0 = AXIBMI + 0x0b94,
+ RDID0 = AXIBMI + 0x0b98,
+ TSDIS = AXIBMI + 0x0c10,
+ TSDIE = AXIBMI + 0x0c14,
+ TSDID = AXIBMI + 0x0c18,
+ GPOUT = AXIBMI + 0x6000,
+
+ OCR = TSNMHD + 0x0000,
+ OSR = TSNMHD + 0x0004,
+ SWR = TSNMHD + 0x0008,
+ SIS = TSNMHD + 0x000c,
+ GIS = TSNMHD + 0x0010,
+ GIE = TSNMHD + 0x0014,
+ GID = TSNMHD + 0x0018,
+ TIS1 = TSNMHD + 0x0020,
+ TIE1 = TSNMHD + 0x0024,
+ TID1 = TSNMHD + 0x0028,
+ TIS2 = TSNMHD + 0x0030,
+ TIE2 = TSNMHD + 0x0034,
+ TID2 = TSNMHD + 0x0038,
+ RIS = TSNMHD + 0x0040,
+ RIE = TSNMHD + 0x0044,
+ RID = TSNMHD + 0x0048,
+ TGC1 = TSNMHD + 0x0050,
+ TGC2 = TSNMHD + 0x0054,
+ TFS0 = TSNMHD + 0x0060,
+ TCF0 = TSNMHD + 0x0070,
+ TCR1 = TSNMHD + 0x0080,
+ TCR2 = TSNMHD + 0x0084,
+ TCR3 = TSNMHD + 0x0088,
+ TCR4 = TSNMHD + 0x008c,
+ TMS0 = TSNMHD + 0x0090,
+ TSR1 = TSNMHD + 0x00b0,
+ TSR2 = TSNMHD + 0x00b4,
+ TSR3 = TSNMHD + 0x00b8,
+ TSR4 = TSNMHD + 0x00bc,
+ TSR5 = TSNMHD + 0x00c0,
+ RGC = TSNMHD + 0x00d0,
+ RDFCR = TSNMHD + 0x00d4,
+ RCFCR = TSNMHD + 0x00d8,
+ REFCNCR = TSNMHD + 0x00dc,
+ RSR1 = TSNMHD + 0x00e0,
+ RSR2 = TSNMHD + 0x00e4,
+ RSR3 = TSNMHD + 0x00e8,
+ TCIS = TSNMHD + 0x01e0,
+ TCIE = TSNMHD + 0x01e4,
+ TCID = TSNMHD + 0x01e8,
+ TPTPC = TSNMHD + 0x01f0,
+ TTML = TSNMHD + 0x01f4,
+ TTJ = TSNMHD + 0x01f8,
+ TCC = TSNMHD + 0x0200,
+ TCS = TSNMHD + 0x0204,
+ TGS = TSNMHD + 0x020c,
+ TACST0 = TSNMHD + 0x0210,
+ TACST1 = TSNMHD + 0x0214,
+ TACST2 = TSNMHD + 0x0218,
+ TALIT0 = TSNMHD + 0x0220,
+ TALIT1 = TSNMHD + 0x0224,
+ TALIT2 = TSNMHD + 0x0228,
+ TAEN0 = TSNMHD + 0x0230,
+ TAEN1 = TSNMHD + 0x0234,
+ TASFE = TSNMHD + 0x0240,
+ TACLL0 = TSNMHD + 0x0250,
+ TACLL1 = TSNMHD + 0x0254,
+ TACLL2 = TSNMHD + 0x0258,
+ CACC = TSNMHD + 0x0260,
+ CCS = TSNMHD + 0x0264,
+ CAIV0 = TSNMHD + 0x0270,
+ CAUL0 = TSNMHD + 0x0290,
+ TOCST0 = TSNMHD + 0x0300,
+ TOCST1 = TSNMHD + 0x0304,
+ TOCST2 = TSNMHD + 0x0308,
+ TOLIT0 = TSNMHD + 0x0310,
+ TOLIT1 = TSNMHD + 0x0314,
+ TOLIT2 = TSNMHD + 0x0318,
+ TOEN0 = TSNMHD + 0x0320,
+ TOEN1 = TSNMHD + 0x0324,
+ TOSFE = TSNMHD + 0x0330,
+ TCLR0 = TSNMHD + 0x0340,
+ TCLR1 = TSNMHD + 0x0344,
+ TCLR2 = TSNMHD + 0x0348,
+ TSMS = TSNMHD + 0x0350,
+ COCC = TSNMHD + 0x0360,
+ COIV0 = TSNMHD + 0x03b0,
+ COUL0 = TSNMHD + 0x03d0,
+ QSTMACU0 = TSNMHD + 0x0400,
+ QSTMACD0 = TSNMHD + 0x0404,
+ QSTMAMU0 = TSNMHD + 0x0408,
+ QSTMAMD0 = TSNMHD + 0x040c,
+ QSFTVL0 = TSNMHD + 0x0410,
+ QSFTVLM0 = TSNMHD + 0x0414,
+ QSFTMSD0 = TSNMHD + 0x0418,
+ QSFTGMI0 = TSNMHD + 0x041c,
+ QSFTLS = TSNMHD + 0x0600,
+ QSFTLIS = TSNMHD + 0x0604,
+ QSFTLIE = TSNMHD + 0x0608,
+ QSFTLID = TSNMHD + 0x060c,
+ QSMSMC = TSNMHD + 0x0610,
+ QSGTMC = TSNMHD + 0x0614,
+ QSEIS = TSNMHD + 0x0618,
+ QSEIE = TSNMHD + 0x061c,
+ QSEID = TSNMHD + 0x0620,
+ QGACST0 = TSNMHD + 0x0630,
+ QGACST1 = TSNMHD + 0x0634,
+ QGACST2 = TSNMHD + 0x0638,
+ QGALIT1 = TSNMHD + 0x0640,
+ QGALIT2 = TSNMHD + 0x0644,
+ QGAEN0 = TSNMHD + 0x0648,
+ QGAEN1 = TSNMHD + 0x074c,
+ QGIGS = TSNMHD + 0x0650,
+ QGGC = TSNMHD + 0x0654,
+ QGATL0 = TSNMHD + 0x0664,
+ QGATL1 = TSNMHD + 0x0668,
+ QGATL2 = TSNMHD + 0x066c,
+ QGOCST0 = TSNMHD + 0x0670,
+ QGOCST1 = TSNMHD + 0x0674,
+ QGOCST2 = TSNMHD + 0x0678,
+ QGOLIT0 = TSNMHD + 0x067c,
+ QGOLIT1 = TSNMHD + 0x0680,
+ QGOLIT2 = TSNMHD + 0x0684,
+ QGOEN0 = TSNMHD + 0x0688,
+ QGOEN1 = TSNMHD + 0x068c,
+ QGTRO = TSNMHD + 0x0690,
+ QGTR1 = TSNMHD + 0x0694,
+ QGTR2 = TSNMHD + 0x0698,
+ QGFSMS = TSNMHD + 0x069c,
+ QTMIS = TSNMHD + 0x06e0,
+ QTMIE = TSNMHD + 0x06e4,
+ QTMID = TSNMHD + 0x06e8,
+ QMEC = TSNMHD + 0x0700,
+ QMMC = TSNMHD + 0x0704,
+ QRFDC = TSNMHD + 0x0708,
+ QYFDC = TSNMHD + 0x070c,
+ QVTCMC0 = TSNMHD + 0x0710,
+ QMCBSC0 = TSNMHD + 0x0750,
+ QMCIRC0 = TSNMHD + 0x0790,
+ QMEBSC0 = TSNMHD + 0x07d0,
+ QMEIRC0 = TSNMHD + 0x0710,
+ QMCFC = TSNMHD + 0x0850,
+ QMEIS = TSNMHD + 0x0860,
+ QMEIE = TSNMHD + 0x0864,
+ QMEID = TSNMHD + 0x086c,
+ QSMFC0 = TSNMHD + 0x0870,
+ QMSPPC0 = TSNMHD + 0x08b0,
+ QMSRPC0 = TSNMHD + 0x08f0,
+ QGPPC0 = TSNMHD + 0x0930,
+ QGRPC0 = TSNMHD + 0x0950,
+ QMDPC0 = TSNMHD + 0x0970,
+ QMGPC0 = TSNMHD + 0x09b0,
+ QMYPC0 = TSNMHD + 0x09f0,
+ QMRPC0 = TSNMHD + 0x0a30,
+ MQSTMACU = TSNMHD + 0x0a70,
+ MQSTMACD = TSNMHD + 0x0a74,
+ MQSTMAMU = TSNMHD + 0x0a78,
+ MQSTMAMD = TSNMHD + 0x0a7c,
+ MQSFTVL = TSNMHD + 0x0a80,
+ MQSFTVLM = TSNMHD + 0x0a84,
+ MQSFTMSD = TSNMHD + 0x0a88,
+ MQSFTGMI = TSNMHD + 0x0a8c,
+
+ CFCR0 = RMSO + 0x0800,
+ FMSCR = RMSO + 0x0c10,
+
+ MMC = RMRO + 0x0000,
+ MPSM = RMRO + 0x0010,
+ MPIC = RMRO + 0x0014,
+ MTFFC = RMRO + 0x0020,
+ MTPFC = RMRO + 0x0024,
+ MTATC0 = RMRO + 0x0040,
+ MRGC = RMRO + 0x0080,
+ MRMAC0 = RMRO + 0x0084,
+ MRMAC1 = RMRO + 0x0088,
+ MRAFC = RMRO + 0x008c,
+ MRSCE = RMRO + 0x0090,
+ MRSCP = RMRO + 0x0094,
+ MRSCC = RMRO + 0x0098,
+ MRFSCE = RMRO + 0x009c,
+ MRFSCP = RMRO + 0x00a0,
+ MTRC = RMRO + 0x00a4,
+ MPFC = RMRO + 0x0100,
+ MLVC = RMRO + 0x0340,
+ MEEEC = RMRO + 0x0350,
+ MLBC = RMRO + 0x0360,
+ MGMR = RMRO + 0x0400,
+ MMPFTCT = RMRO + 0x0410,
+ MAPFTCT = RMRO + 0x0414,
+ MPFRCT = RMRO + 0x0418,
+ MFCICT = RMRO + 0x041c,
+ MEEECT = RMRO + 0x0420,
+ MEIS = RMRO + 0x0500,
+ MEIE = RMRO + 0x0504,
+ MEID = RMRO + 0x0508,
+ MMIS0 = RMRO + 0x0510,
+ MMIE0 = RMRO + 0x0514,
+ MMID0 = RMRO + 0x0518,
+ MMIS1 = RMRO + 0x0520,
+ MMIE1 = RMRO + 0x0524,
+ MMID1 = RMRO + 0x0528,
+ MMIS2 = RMRO + 0x0530,
+ MMIE2 = RMRO + 0x0534,
+ MMID2 = RMRO + 0x0538,
+ MXMS = RMRO + 0x0600,
+
+};
+
+/* AXIBMI */
+#define RR_RATRR BIT(0)
+#define RR_TATRR BIT(1)
+#define RR_RST (RR_RATRR | RR_TATRR)
+#define RR_RST_COMPLETE 0x03
+
+#define AXIWC_DEFAULT 0xffff
+#define AXIRC_DEFAULT 0xffff
+
+#define TATLS0_TEDE BIT(1)
+#define TATLS0_TATEN_SHIFT 24
+#define TATLS0_TATEN(n) ((n) << TATLS0_TATEN_SHIFT)
+#define TATLR_TATL BIT(31)
+
+#define RATLS0_RETS BIT(2)
+#define RATLS0_REDE BIT(3)
+#define RATLS0_RATEN_SHIFT 24
+#define RATLS0_RATEN(n) ((n) << RATLS0_RATEN_SHIFT)
+#define RATLR_RATL BIT(31)
+
+#define DIE_DID_TDICX(n) BIT((n))
+#define DIE_DID_RDICX(n) BIT((n) + 8)
+#define TDIE_TDID_TDX(n) BIT(n)
+#define RDIE_RDID_RDX(n) BIT(n)
+#define TDIS_TDS(n) BIT(n)
+#define RDIS_RDS(n) BIT(n)
+
+/* MHD */
+#define OSR_OPS 0x07
+#define SWR_SWR BIT(0)
+
+#define TGC1_TQTM_SFM 0xff00
+#define TGC1_STTV_DEFAULT 0x03
+
+#define TMS_MFS_MAX 0x2800
+
+/* RMAC System */
+#define CFCR_SDID(n) ((n) << 16)
+#define FMSCR_FMSIE(n) ((n) << 0)
+
+/* RMAC */
+#define MPIC_PIS_MASK GENMASK(1, 0)
+#define MPIC_PIS_MII 0
+#define MPIC_PIS_RMII 0x01
+#define MPIC_PIS_GMII 0x02
+#define MPIC_PIS_RGMII 0x03
+#define MPIC_LSC_SHIFT 2
+#define MPIC_LSC_MASK GENMASK(3, MPIC_LSC_SHIFT)
+#define MPIC_LSC_10M (0 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_100M (0x01 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_1G (0x02 << MPIC_LSC_SHIFT)
+#define MPIC_PSMCS_SHIFT 16
+#define MPIC_PSMCS_MASK GENMASK(21, MPIC_PSMCS_SHIFT)
+#define MPIC_PSMCS_DEFAULT (0x0a << MPIC_PSMCS_SHIFT)
+#define MPIC_PSMHT_SHIFT 24
+#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
+#define MPIC_PSMHT_DEFAULT (0x07 << MPIC_PSMHT_SHIFT)
+
+#define MLVC_PASE BIT(8)
+#define MLVC_PSE BIT(16)
+#define MLVC_PLV BIT(17)
+
+#define MPSM_PSME BIT(0)
+#define MPSM_PSMAD BIT(1)
+#define MPSM_PDA_SHIFT 3
+#define MPSM_PDA_MASK GENMASK(7, 3)
+#define MPSM_PDA(n) (((n) << MPSM_PDA_SHIFT) & MPSM_PDA_MASK)
+#define MPSM_PRA_SHIFT 8
+#define MPSM_PRA_MASK GENMASK(12, 8)
+#define MPSM_PRA(n) (((n) << MPSM_PRA_SHIFT) & MPSM_PRA_MASK)
+#define MPSM_PRD_SHIFT 16
+#define MPSM_PRD_SET(n) ((n) << MPSM_PRD_SHIFT)
+#define MPSM_PRD_GET(n) ((n) >> MPSM_PRD_SHIFT)
+
+#define GPOUT_RDM BIT(13)
+#define GPOUT_TDM BIT(14)
+
+/* RTSN */
+#define RTSN_INTERVAL_US 1000
+#define RTSN_TIMEOUT_US 1000000
+
+#define TX_NUM_CHAINS 1
+#define RX_NUM_CHAINS 1
+
+#define TX_CHAIN_SIZE 1024
+#define RX_CHAIN_SIZE 1024
+
+#define TX_CHAIN_IDX 0
+#define RX_CHAIN_IDX 0
+
+#define TX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * TX_CHAIN_IDX)
+#define RX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * RX_CHAIN_IDX)
+
+#define PKT_BUF_SZ 1584
+#define RTSN_ALIGN 128
+
+enum rtsn_mode {
+ OCR_OPC_DISABLE,
+ OCR_OPC_CONFIG,
+ OCR_OPC_OPERATION,
+};
+
+/* Descriptors */
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+enum TX_FS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum DIE_DT {
+ /* HW/SW arbitration */
+ DT_FEMPTY_IS = 0x10,
+ DT_FEMPTY_IC = 0x20,
+ DT_FEMPTY_ND = 0x30,
+ DT_FEMPTY = 0x40,
+ DT_FEMPTY_START = 0x50,
+ DT_FEMPTY_MID = 0x60,
+ DT_FEMPTY_END = 0x70,
+
+ /* Frame data */
+ DT_FSINGLE = 0x80,
+ DT_FSTART = 0x90,
+ DT_FMID = 0xa0,
+ DT_FEND = 0xb0,
+
+ /* Chain control */
+ DT_LEMPTY = 0xc0,
+ DT_EEMPTY = 0xd0,
+ DT_LINK = 0xe0,
+ DT_EOS = 0xf0,
+
+ DT_MASK = 0xf0,
+ D_DIE = 0x08,
+};
+
+struct rtsn_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+} __packed;
+
+struct rtsn_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+struct rtsn_ext_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+} __packed;
+
+struct rtsn_ext_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+enum EXT_INFO_DS_BIT {
+ TXC = 0x4000,
+};
+
+#endif
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 0786eb0da391..6fb0ffc1c844 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2233,7 +2233,7 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
pm_runtime_get_sync(&mdp->pdev->dev);
__sh_eth_get_regs(ndev, buf);
- pm_runtime_put_sync(&mdp->pdev->dev);
+ pm_runtime_put(&mdp->pdev->dev);
}
static u32 sh_eth_get_msglevel(struct net_device *ndev)
@@ -2360,6 +2360,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2386,6 +2387,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return 0;
}
+#endif
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_regs_len = sh_eth_get_regs_len,
@@ -2401,8 +2403,10 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.set_ringparam = sh_eth_set_ringparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_PM_SLEEP
.get_wol = sh_eth_get_wol,
.set_wol = sh_eth_set_wol,
+#endif
};
/* network device open function */
@@ -2447,7 +2451,7 @@ out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
napi_disable(&mdp->napi);
- pm_runtime_put_sync(&mdp->pdev->dev);
+ pm_runtime_put(&mdp->pdev->dev);
return ret;
}
@@ -2624,7 +2628,7 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
netdev_update_features(ndev);
return 0;
@@ -3443,8 +3447,6 @@ static void sh_eth_drv_remove(struct platform_device *pdev)
free_netdev(ndev);
}
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
static int sh_eth_wol_setup(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -3494,10 +3496,12 @@ static int sh_eth_suspend(struct device *dev)
netif_device_detach(ndev);
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_setup(ndev);
else
ret = sh_eth_close(ndev);
+ rtnl_unlock();
return ret;
}
@@ -3511,10 +3515,12 @@ static int sh_eth_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_restore(ndev);
else
ret = sh_eth_open(ndev);
+ rtnl_unlock();
if (ret < 0)
return ret;
@@ -3523,28 +3529,8 @@ static int sh_eth_resume(struct device *dev)
return ret;
}
-#endif
-
-static int sh_eth_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-static const struct dev_pm_ops sh_eth_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
- SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
-};
-#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
-#else
-#define SH_ETH_PM_OPS NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(sh_eth_dev_pm_ops, sh_eth_suspend, sh_eth_resume);
static const struct platform_device_id sh_eth_id_table[] = {
{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
@@ -3560,11 +3546,11 @@ MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
static struct platform_driver sh_eth_driver = {
.probe = sh_eth_drv_probe,
- .remove_new = sh_eth_drv_remove,
+ .remove = sh_eth_drv_remove,
.id_table = sh_eth_id_table,
.driver = {
.name = CARDNAME,
- .pm = SH_ETH_PM_OPS,
+ .pm = pm_sleep_ptr(&sh_eth_dev_pm_ops),
.of_match_table = of_match_ptr(sh_eth_match_table),
},
};
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 755db89db909..36af94a2e062 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1967,7 +1967,7 @@ static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
rocker_port_stop(dev);
netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
if (err)
@@ -2502,7 +2502,7 @@ static void rocker_carrier_init(const struct rocker_port *rocker_port)
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
- link_up = link_status & (1 << rocker_port->pport);
+ link_up = link_status & (1ULL << rocker_port->pport);
if (link_up)
netif_carrier_on(rocker_port->dev);
else
@@ -2575,7 +2575,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
+ dev->features |= NETIF_F_SG;
+ dev->netns_immutable = true;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 826990459fa4..61e50517c05b 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1933,7 +1933,7 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
- if (!found != !removing) {
+ if (!found == removing) {
kfree(fdb);
if (!found && removing)
return 0;
@@ -1982,7 +1982,7 @@ err_out:
static void ofdpa_fdb_cleanup(struct timer_list *t)
{
- struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
+ struct ofdpa *ofdpa = timer_container_of(ofdpa, t, fdb_cleanup_timer);
struct ofdpa_port *ofdpa_port;
struct ofdpa_fdb_tbl_entry *entry;
struct hlist_node *tmp;
@@ -2386,7 +2386,7 @@ static void ofdpa_fini(struct rocker *rocker)
struct hlist_node *tmp;
int bkt;
- del_timer_sync(&ofdpa->fdb_cleanup_timer);
+ timer_delete_sync(&ofdpa->fdb_cleanup_timer);
flush_workqueue(rocker->rocker_owq);
spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 4a439b34114d..ad73733644f9 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -308,8 +308,8 @@ static int sxgbe_set_coalesce(struct net_device *dev,
return 0;
}
-static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
- struct ethtool_rxnfc *cmd)
+static int sxgbe_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
cmd->data = 0;
@@ -344,26 +344,11 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
return 0;
}
-static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static int sxgbe_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = sxgbe_get_rss_hash_opts(priv, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
- struct ethtool_rxnfc *cmd)
-{
u32 reg_val = 0;
/* RSS does not support anything other than hashing
@@ -421,22 +406,6 @@ static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
return 0;
}
-static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = sxgbe_set_rss_hash_opt(priv, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void sxgbe_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
@@ -489,8 +458,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_channels = sxgbe_get_channels,
.get_coalesce = sxgbe_get_coalesce,
.set_coalesce = sxgbe_set_coalesce,
- .get_rxnfc = sxgbe_get_rxnfc,
- .set_rxnfc = sxgbe_set_rxnfc,
+ .get_rxfh_fields = sxgbe_get_rxfh_fields,
+ .set_rxfh_fields = sxgbe_set_rxfh_fields,
.get_regs = sxgbe_get_regs,
.get_regs_len = sxgbe_get_regs_len,
.get_eee = sxgbe_get_eee,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index ecbe3994f2b1..849c5a6c2af1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -91,7 +91,7 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
{
/* Exit and disable EEE in case of we are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
}
@@ -104,7 +104,8 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
*/
static void sxgbe_eee_ctrl_timer(struct timer_list *t)
{
- struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
+ struct sxgbe_priv_data *priv = timer_container_of(priv, t,
+ eee_ctrl_timer);
sxgbe_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
@@ -1012,7 +1013,7 @@ static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
*/
static void sxgbe_tx_timer(struct timer_list *t)
{
- struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
+ struct sxgbe_tx_queue *p = timer_container_of(p, t, txtimer);
sxgbe_tx_queue_clean(p);
}
@@ -1044,7 +1045,7 @@ static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
struct sxgbe_tx_queue *p = priv->txq[queue_num];
- del_timer_sync(&p->txtimer);
+ timer_delete_sync(&p->txtimer);
}
}
@@ -1208,7 +1209,7 @@ static int sxgbe_release(struct net_device *dev)
struct sxgbe_priv_data *priv = netdev_priv(dev);
if (priv->eee_enabled)
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
if (dev->phydev) {
@@ -1520,8 +1521,10 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
skb = priv->rxq[qnum]->rx_skbuff[entry];
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
netdev_err(priv->dev, "rx descriptor is not consistent\n");
+ break;
+ }
prefetch(skb->data - NET_IP_ALIGN);
priv->rxq[qnum]->rx_skbuff[entry] = NULL;
@@ -1804,7 +1807,7 @@ static int sxgbe_set_features(struct net_device *dev,
*/
static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev))
return 0;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index e6e130dbe1de..2eccc7617507 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
static struct platform_driver sxgbe_platform_driver = {
.probe = sxgbe_platform_probe,
- .remove_new = sxgbe_platform_remove,
+ .remove = sxgbe_platform_remove,
.driver = {
.name = SXGBE_RESOURCE_NAME,
.pm = &sxgbe_platform_pm_ops,
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c672f92d65e9..20dad39b5ab9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -170,7 +170,7 @@ ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
*/
static void ether3_ledoff(struct timer_list *t)
{
- struct dev_priv *private = from_timer(private, t, timer);
+ struct dev_priv *private = timer_container_of(private, t, timer);
struct net_device *dev = private->dev;
ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
@@ -181,7 +181,7 @@ static void ether3_ledoff(struct timer_list *t)
*/
static inline void ether3_ledon(struct net_device *dev)
{
- del_timer(&priv(dev)->timer);
+ timer_delete(&priv(dev)->timer);
priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */
add_timer(&priv(dev)->timer);
if (priv(dev)->regs.config2 & CFG2_CTRLO)
@@ -454,7 +454,7 @@ static void ether3_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
- del_timer(&priv(dev)->timer);
+ timer_delete(&priv(dev)->timer);
local_irq_save(flags);
printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name);
@@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
+ timer_delete_sync(&priv(dev)->timer);
free_netdev(dev);
ecard_release_resources(ec);
}
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 76356dadf233..7967a0ee320b 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -832,7 +832,7 @@ static void sgiseeq_remove(struct platform_device *pdev)
static struct platform_driver sgiseeq_driver = {
.probe = sgiseeq_probe,
- .remove_new = sgiseeq_remove,
+ .remove = sgiseeq_remove,
.driver = {
.name = "sgiseeq",
}
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 3eb55dcfa8a6..c4c43434f314 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -38,8 +38,9 @@ config SFC_MTD
default y
help
This exposes the on-board flash and/or EEPROM as MTD devices
- (e.g. /dev/mtd1). This is required to update the firmware or
- the boot configuration under Linux.
+ (e.g. /dev/mtd1). This is required to update the boot
+ configuration under Linux, or use some older userland tools to
+ update the firmware.
config SFC_MCDI_MON
bool "Solarflare SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 8f446b9bd5ee..d99039ec468d 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -7,7 +7,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
mcdi_functions.o mcdi_filters.o mcdi_mon.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o \
- efx_devlink.o
+ efx_devlink.o efx_reflash.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 8fa6c0e9195b..fcec81f862ec 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1396,7 +1396,7 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
efx_mcdi_filter_table_reset_mc_allocations(efx);
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
@@ -1751,7 +1751,7 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
#endif
}
-static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
+static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 **names)
{
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -3501,7 +3501,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
const struct efx_ef10_nvram_type_info *info;
- size_t size, erase_size, outlen;
+ size_t size, erase_size, write_size, outlen;
int type_idx = 0;
bool protected;
int rc;
@@ -3516,7 +3516,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (info->port != efx_port_num(efx))
return -ENODEV;
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size,
+ &protected);
if (rc)
return rc;
if (protected &&
@@ -3561,6 +3562,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (!erase_size)
part->common.mtd.flags |= MTD_NO_ERASE;
+ part->common.mtd.writesize = write_size;
+
return 0;
}
@@ -3982,7 +3985,6 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
.set_port = efx_ef10_udp_tnl_set_port,
.unset_port = efx_ef10_udp_tnl_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{
.n_entries = 16,
@@ -4302,3 +4304,131 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.sensor_event = efx_mcdi_sensor_event,
.rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
+
+const struct efx_nic_type efx_x4_nic_type = {
+ .is_vf = false,
+ .mem_bar = efx_ef10_pf_mem_bar,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_pf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_ef10_fini_nic,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_pf,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .get_fec_stats = efx_ef10_get_fec_stats,
+ .test_chip = efx_ef10_test_chip,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_write = efx_ef10_tx_write,
+ .tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
+ .ev_probe = efx_mcdi_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
+ .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
+ .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
+#ifdef CONFIG_SFC_SRIOV
+ /* currently set to the VF versions of these functions
+ * because SRIOV will be reimplemented later.
+ */
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
+ .tso_versions = efx_ef10_tso_versions,
+
+ .get_phys_port_id = efx_ef10_get_phys_port_id,
+ .revision = EFX_REV_X4,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .option_descriptors = true,
+ .flash_auto_partition = true,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = EF10_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
+};
+
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index cf55202b3a7b..6c3b74000d3b 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -37,7 +37,6 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
@@ -59,8 +58,14 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_key = true,
+ .cap_rss_rxnfc_adds = true,
+ .rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .create_rxfh_context = efx_ethtool_create_rxfh_context,
+ .modify_rxfh_context = efx_ethtool_modify_rxfh_context,
+ .remove_rxfh_context = efx_ethtool_remove_rxfh_context,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 7f7d560cb2b4..3a06e3b1bd6b 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -450,9 +450,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
net_dev->hw_enc_features |= efx->type->offload_features;
net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
- netif_set_tso_max_segs(net_dev,
- ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
- efx->mdio.dev = net_dev;
+ nic_data = efx->nic_data;
+ netif_set_tso_max_size(efx->net_dev, nic_data->tso_max_payload_len);
+ netif_set_tso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs);
rc = efx_ef100_init_datapath_caps(efx);
if (rc < 0)
@@ -478,7 +478,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
/* Don't fail init if RSS setup doesn't work. */
efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
- nic_data = efx->nic_data;
rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF,
efx->type->is_vf);
if (rc)
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6da06931187d..3ad95a4c8af2 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -583,7 +583,7 @@ static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = {
EFX_GENERIC_SW_STAT(rx_noskb_drops),
};
-static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names)
+static size_t ef100_describe_stats(struct efx_nic *efx, u8 **names)
{
DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
@@ -887,8 +887,7 @@ static int ef100_process_design_param(struct efx_nic *efx,
case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS:
/* We always put HDR_NUM_SEGS=1 in our TSO descriptors */
if (!reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "TSO_MAX_HDR_NUM_SEGS < 1\n");
+ pci_err(efx->pci_dev, "TSO_MAX_HDR_NUM_SEGS < 1\n");
return -EOPNOTSUPP;
}
return 0;
@@ -901,32 +900,28 @@ static int ef100_process_design_param(struct efx_nic *efx,
*/
if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE ||
EFX_MIN_DMAQ_SIZE % (u32)reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "%s size granularity is %llu, can't guarantee safety\n",
- reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
- reader->value);
+ pci_err(efx->pci_dev,
+ "%s size granularity is %llu, can't guarantee safety\n",
+ reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
+ reader->value);
return -EOPNOTSUPP;
}
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
nic_data->tso_max_payload_len = min_t(u64, reader->value,
GSO_LEGACY_MAX_SIZE);
- netif_set_tso_max_size(efx->net_dev,
- nic_data->tso_max_payload_len);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
- netif_set_tso_max_segs(efx->net_dev,
- nic_data->tso_max_payload_num_segs);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
return 0;
case ESE_EF100_DP_GZ_COMPAT:
if (reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
- reader->value);
+ pci_err(efx->pci_dev,
+ "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
+ reader->value);
return -EOPNOTSUPP;
}
return 0;
@@ -946,10 +941,10 @@ static int ef100_process_design_param(struct efx_nic *efx,
* So the value of this shouldn't matter.
*/
if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT)
- netif_dbg(efx, probe, efx->net_dev,
- "NIC has other than default VI_STRIDES (mask "
- "%#llx), early probing might use wrong one\n",
- reader->value);
+ pci_dbg(efx->pci_dev,
+ "NIC has other than default VI_STRIDES (mask "
+ "%#llx), early probing might use wrong one\n",
+ reader->value);
return 0;
case ESE_EF100_DP_GZ_RX_MAX_RUNT:
/* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't
@@ -961,9 +956,9 @@ static int ef100_process_design_param(struct efx_nic *efx,
/* Host interface says "Drivers should ignore design parameters
* that they do not recognise."
*/
- netif_dbg(efx, probe, efx->net_dev,
- "Ignoring unrecognised design parameter %u\n",
- reader->type);
+ pci_dbg(efx->pci_dev,
+ "Ignoring unrecognised design parameter %u\n",
+ reader->type);
return 0;
}
}
@@ -999,13 +994,13 @@ static int ef100_check_design_params(struct efx_nic *efx)
*/
if (reader.state != EF100_TLV_TYPE) {
if (reader.state == EF100_TLV_TYPE_CONT)
- netif_err(efx, probe, efx->net_dev,
- "truncated design parameter (incomplete type %u)\n",
- reader.type);
+ pci_err(efx->pci_dev,
+ "truncated design parameter (incomplete type %u)\n",
+ reader.type);
else
- netif_err(efx, probe, efx->net_dev,
- "truncated design parameter %u\n",
- reader.type);
+ pci_err(efx->pci_dev,
+ "truncated design parameter %u\n",
+ reader.type);
rc = -EIO;
}
out:
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index 0b3083ef0ead..e923e1796369 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -233,8 +233,8 @@ static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
- net_dev->features |= NETIF_F_LLTX;
- net_dev->hw_features |= NETIF_F_LLTX;
+ net_dev->lltx = true;
+
return efv;
fail1:
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 83d9db71d7d7..44dc75feb162 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -134,6 +134,9 @@ void __ef100_rx_packet(struct efx_channel *channel)
goto free_rx_buffer;
}
+ ++rx_queue->rx_packets;
+ rx_queue->rx_bytes += rx_buf->len;
+
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
goto out;
@@ -149,8 +152,6 @@ static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
- ++rx_queue->rx_packets;
-
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received id %x\n",
efx_rx_queue_index(rx_queue), index);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index e6b6be549581..03005757c060 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -189,6 +189,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
{
bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u32 mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
unsigned int outer_ip_offset, outer_l4_offset;
u16 vlan_tci = skb_vlan_tag_get(skb);
@@ -200,8 +201,17 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
bool outer_csum;
u32 paylen;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
- mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (encap) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID_INNER)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ }
+
if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
vlan_enable = skb_vlan_tag_present(skb);
@@ -245,8 +255,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
- ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
- ESE_GZ_TX_DESC_IP4_ID_NO_OP,
+ ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, mangleid_outer,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e9d9de8e648a..112e55b98ed3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -22,6 +22,7 @@
#include "net_driver.h"
#include <net/gre.h>
#include <net/udp_tunnel.h>
+#include <net/netdev_queues.h>
#include "efx.h"
#include "efx_common.h"
#include "efx_channels.h"
@@ -299,7 +300,7 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1)
netdev_rss_key_fill(efx->rss_context.rx_hash_key,
sizeof(efx->rss_context.rx_hash_key));
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
/* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
@@ -417,14 +418,6 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
@@ -483,28 +476,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * ioctls
- *
- *************************************************************************/
-
-/* Net device ioctl
- * Context: process, rtnl_lock() held.
- */
-static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- /* Convert phy_id from older PRTAD/DEVAD format */
- if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
- (data->phy_id & 0xfc00) == 0x0400)
- data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
-
- return mdio_mii_ioctl(&efx->mdio, data, cmd);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -600,7 +571,6 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
@@ -626,6 +596,113 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_bpf = efx_xdp
};
+static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ channel = efx_get_channel(efx, idx);
+ rx_queue = efx_channel_get_rx_queue(channel);
+ /* Count only packets since last time datapath was started */
+ stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets;
+ stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes;
+ stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) -
+ channel->old_n_rx_hw_drops;
+ stats->hw_drop_overruns = channel->n_rx_nodesc_trunc -
+ channel->old_n_rx_hw_drop_overruns;
+}
+
+static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+
+ channel = efx_get_tx_channel(efx, idx);
+ stats->packets = 0;
+ stats->bytes = 0;
+ stats->hw_gso_packets = 0;
+ stats->hw_gso_wire_packets = 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ stats->packets += tx_queue->complete_packets -
+ tx_queue->old_complete_packets;
+ stats->bytes += tx_queue->complete_bytes -
+ tx_queue->old_complete_bytes;
+ /* Note that, unlike stats->packets and stats->bytes,
+ * these count TXes enqueued, rather than completed,
+ * which may not be what users expect.
+ */
+ stats->hw_gso_packets += tx_queue->tso_bursts -
+ tx_queue->old_tso_bursts;
+ stats->hw_gso_wire_packets += tx_queue->tso_packets -
+ tx_queue->old_tso_packets;
+ }
+}
+
+static void efx_get_base_stats(struct net_device *net_dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->hw_drops = 0;
+ rx->hw_drop_overruns = 0;
+ tx->packets = 0;
+ tx->bytes = 0;
+ tx->hw_gso_packets = 0;
+ tx->hw_gso_wire_packets = 0;
+
+ /* Count all packets on non-core queues, and packets before last
+ * datapath start on core queues.
+ */
+ efx_for_each_channel(channel, efx) {
+ rx_queue = efx_channel_get_rx_queue(channel);
+ if (channel->channel >= net_dev->real_num_rx_queues) {
+ rx->packets += rx_queue->rx_packets;
+ rx->bytes += rx_queue->rx_bytes;
+ rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel);
+ rx->hw_drop_overruns += channel->n_rx_nodesc_trunc;
+ } else {
+ rx->packets += rx_queue->old_rx_packets;
+ rx->bytes += rx_queue->old_rx_bytes;
+ rx->hw_drops += channel->old_n_rx_hw_drops;
+ rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns;
+ }
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (channel->channel < efx->tx_channel_offset ||
+ channel->channel >= efx->tx_channel_offset +
+ net_dev->real_num_tx_queues) {
+ tx->packets += tx_queue->complete_packets;
+ tx->bytes += tx_queue->complete_bytes;
+ tx->hw_gso_packets += tx_queue->tso_bursts;
+ tx->hw_gso_wire_packets += tx_queue->tso_packets;
+ } else {
+ tx->packets += tx_queue->old_complete_packets;
+ tx->bytes += tx_queue->old_complete_bytes;
+ tx->hw_gso_packets += tx_queue->old_tso_bursts;
+ tx->hw_gso_wire_packets += tx_queue->old_tso_packets;
+ }
+ /* Include XDP TX in device-wide stats */
+ tx->packets += tx_queue->complete_xdp_packets;
+ tx->bytes += tx_queue->complete_xdp_bytes;
+ }
+ }
+}
+
+static const struct netdev_stat_ops efx_stat_ops = {
+ .get_queue_stats_rx = efx_get_queue_stats_rx,
+ .get_queue_stats_tx = efx_get_queue_stats_tx,
+ .get_base_stats = efx_get_base_stats,
+};
+
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
{
struct bpf_prog *old_prog;
@@ -716,6 +793,7 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
+ net_dev->stat_ops = &efx_stat_ops;
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops;
@@ -821,6 +899,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
{0} /* end of list */
};
@@ -1096,7 +1178,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
- efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 48d3623735ba..45e191686625 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -158,7 +158,7 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
}
/* RSS contexts */
-static inline bool efx_rss_active(struct efx_rss_context *ctx)
+static inline bool efx_rss_active(struct efx_rss_context_priv *ctx)
{
return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
}
@@ -168,7 +168,6 @@ extern const struct ethtool_ops efx_ethtool_ops;
/* Global */
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index c9e17a8208a9..ed3a96ebc7f3 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -216,8 +216,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
if (efx_separate_tx_channels) {
efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
+ clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
@@ -1100,6 +1100,10 @@ void efx_start_channels(struct efx_nic *efx)
atomic_inc(&efx->active_queues);
}
+ /* reset per-queue stats */
+ channel->old_n_rx_hw_drops = efx_get_queue_stat_rx_hw_drops(channel);
+ channel->old_n_rx_hw_drop_overruns = channel->n_rx_nodesc_trunc;
+
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
@@ -1209,6 +1213,8 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
tx_queue->pkts_compl,
tx_queue->bytes_compl);
}
+ tx_queue->complete_packets += tx_queue->pkts_compl;
+ tx_queue->complete_bytes += tx_queue->bytes_compl;
}
/* Receive any packets we queued up */
@@ -1260,7 +1266,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
- xdp_do_flush();
+ if (budget)
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
@@ -1274,7 +1281,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index 46b702648721..547cf94014a3 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -43,6 +43,13 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx);
+static inline u64 efx_get_queue_stat_rx_hw_drops(struct efx_channel *channel)
+{
+ return channel->n_rx_eth_crc_err + channel->n_rx_frm_trunc +
+ channel->n_rx_overlength + channel->n_rx_nodesc_trunc +
+ channel->n_rx_mport_bad;
+}
+
void efx_init_napi_channel(struct efx_channel *channel);
void efx_init_napi(struct efx_nic *efx);
void efx_fini_napi_channel(struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 551f890db90a..e8fdbb62d872 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -302,7 +302,7 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu)
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
efx_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
@@ -635,22 +635,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
return rc;
}
-/* Reinitialise the MAC to pick up new PHY settings, even if the port is
- * disabled.
- */
-int efx_reconfigure_port(struct efx_nic *efx)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- rc = __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
/**************************************************************************
*
* Device reset and suspend
@@ -714,7 +698,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
efx->type->fini(efx);
}
@@ -777,7 +761,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (efx->type->rx_restore_rss_contexts)
efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
efx->type->filter_table_restore(efx);
up_write(&efx->filter_sem);
@@ -793,7 +777,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -1000,9 +984,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- mutex_init(&efx->rss_lock);
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
efx->vport_id = EVB_PORT_ID_ASSIGNED;
spin_lock_init(&efx->stats_lock);
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
@@ -1021,6 +1003,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
+ mutex_init(&efx->reflash_mutex);
efx->tx_queues_per_channel = 1;
efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
@@ -1275,9 +1258,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
const struct pci_error_handlers efx_err_handlers = {
.error_detected = efx_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 2c54dac3e662..19a8ca530969 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -40,7 +40,6 @@ void efx_destroy_reset_workqueue(void);
void efx_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
-int efx_reconfigure_port(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index 3cd750820fdd..d842c60dfc10 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -19,6 +19,7 @@
#include "mae.h"
#include "ef100_rep.h"
#endif
+#include "efx_reflash.h"
struct efx_devlink {
struct efx_nic *efx;
@@ -615,7 +616,19 @@ static int efx_devlink_info_get(struct devlink *devlink,
return 0;
}
+static int efx_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink_private = devlink_priv(devlink);
+ struct efx_nic *efx = devlink_private->efx;
+
+ return efx_reflash_flash_firmware(efx, params->fw, extack);
+}
+
static const struct devlink_ops sfc_devlink_ops = {
+ .supported_flash_update_params = 0,
+ .flash_update = efx_devlink_flash_update,
.info_get = efx_devlink_info_get,
};
diff --git a/drivers/net/ethernet/sfc/efx_reflash.c b/drivers/net/ethernet/sfc/efx_reflash.c
new file mode 100644
index 000000000000..b12e95f1c80a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/crc32.h>
+#include <net/devlink.h>
+#include "efx_reflash.h"
+#include "net_driver.h"
+#include "fw_formats.h"
+#include "mcdi_pcol.h"
+#include "mcdi.h"
+
+/* Try to parse a Reflash header at the specified offset */
+static bool efx_reflash_parse_reflash_header(const struct firmware *fw,
+ size_t header_offset, u32 *type,
+ u32 *subtype, const u8 **data,
+ size_t *data_size)
+{
+ size_t header_end, trailer_offset, trailer_end;
+ u32 magic, version, payload_size, header_len;
+ const u8 *header, *trailer;
+ u32 expected_crc, crc;
+
+ if (check_add_overflow(header_offset, EFX_REFLASH_HEADER_LENGTH_OFST +
+ EFX_REFLASH_HEADER_LENGTH_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_REFLASH_HEADER_MAGIC_OFST);
+ if (magic != EFX_REFLASH_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_REFLASH_HEADER_VERSION_OFST);
+ if (version != EFX_REFLASH_HEADER_VERSION_VALUE)
+ return false;
+
+ payload_size = get_unaligned_le32(header + EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST);
+ header_len = get_unaligned_le32(header + EFX_REFLASH_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &trailer_offset) ||
+ check_add_overflow(trailer_offset, payload_size, &trailer_offset) ||
+ check_add_overflow(trailer_offset, EFX_REFLASH_TRAILER_LEN,
+ &trailer_end))
+ return false;
+ if (fw->size < trailer_end)
+ return false;
+
+ trailer = fw->data + trailer_offset;
+ expected_crc = get_unaligned_le32(trailer + EFX_REFLASH_TRAILER_CRC_OFST);
+ /* Addition could overflow u32, but not size_t since we already
+ * checked trailer_offset didn't overflow. So cast to size_t first.
+ */
+ crc = crc32_le(0, header, (size_t)header_len + payload_size);
+ if (crc != expected_crc)
+ return false;
+
+ *type = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST);
+ *subtype = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST);
+ if (*type == EFX_REFLASH_FIRMWARE_TYPE_BUNDLE) {
+ /* All the bundle data is written verbatim to NVRAM */
+ *data = fw->data;
+ *data_size = fw->size;
+ } else {
+ /* Other payload types strip the reflash header and trailer
+ * from the data written to NVRAM
+ */
+ *data = header + header_len;
+ *data_size = payload_size;
+ }
+
+ return true;
+}
+
+/* Map from FIRMWARE_TYPE to NVRAM_PARTITION_TYPE */
+static int efx_reflash_partition_type(u32 type, u32 subtype,
+ u32 *partition_type,
+ u32 *partition_subtype)
+{
+ int rc = 0;
+
+ switch (type) {
+ case EFX_REFLASH_FIRMWARE_TYPE_BOOTROM:
+ *partition_type = NVRAM_PARTITION_TYPE_EXPANSION_ROM;
+ *partition_subtype = subtype;
+ break;
+ case EFX_REFLASH_FIRMWARE_TYPE_BUNDLE:
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = subtype;
+ break;
+ default:
+ /* Not supported */
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/* Try to parse a SmartNIC image header at the specified offset */
+static bool efx_reflash_parse_snic_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ u32 magic, version, payload_size, header_len, expected_crc, crc;
+ size_t header_end, payload_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICIMAGE_HEADER_MINLEN,
+ &header_end) ||
+ fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICIMAGE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICIMAGE_HEADER_VERSION_VALUE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &header_end))
+ return false;
+ payload_size = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST);
+ if (check_add_overflow(header_end, payload_size, &payload_end) ||
+ fw->size < payload_end)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_CRC_OFST);
+
+ /* Calculate CRC omitting the expected CRC field itself */
+ crc = crc32_le(~0, header, EFX_SNICIMAGE_HEADER_CRC_OFST);
+ crc = ~crc32_le(crc,
+ header + EFX_SNICIMAGE_HEADER_CRC_OFST +
+ EFX_SNICIMAGE_HEADER_CRC_LEN,
+ header_len + payload_size - EFX_SNICIMAGE_HEADER_CRC_OFST -
+ EFX_SNICIMAGE_HEADER_CRC_LEN);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST);
+ *partition_subtype =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to parse a SmartNIC bundle header at the specified offset */
+static bool efx_reflash_parse_snic_bundle_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data,
+ size_t *data_size)
+{
+ u32 magic, version, bundle_type, header_len, expected_crc, crc;
+ size_t header_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICBUNDLE_HEADER_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICBUNDLE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICBUNDLE_HEADER_VERSION_VALUE)
+ return false;
+
+ bundle_type = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST);
+ if (bundle_type != NVRAM_PARTITION_TYPE_BUNDLE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_LENGTH_OFST);
+ if (header_len != EFX_SNICBUNDLE_HEADER_LEN)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ crc = ~crc32_le(~0, header, EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to find a valid firmware payload in the firmware data.
+ * When we recognise a valid header, we parse it for the partition type
+ * (so we know where to ask the MC to write it to) and the location of
+ * the data blob to write.
+ */
+static int efx_reflash_parse_firmware_data(const struct firmware *fw,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ size_t header_offset;
+ u32 type, subtype;
+
+ /* Some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial
+ * task, so step through the firmware data until we find a valid
+ * header.
+ *
+ * The checks are intended to reject firmware data that is clearly not
+ * in the expected format. They do not need to be exhaustive as the
+ * running firmware will perform its own comprehensive validity and
+ * compatibility checks during the update procedure.
+ *
+ * Firmware packages may contain multiple reflash images, e.g. a
+ * bundle containing one or more other images. Only check the
+ * outermost container by stopping after the first candidate image
+ * found even it is for an unsupported partition type.
+ */
+ for (header_offset = 0; header_offset < fw->size; header_offset++) {
+ if (efx_reflash_parse_snic_bundle_header(fw, header_offset,
+ partition_type,
+ partition_subtype,
+ data, data_size))
+ return 0;
+
+ if (efx_reflash_parse_snic_header(fw, header_offset,
+ partition_type,
+ partition_subtype, data,
+ data_size))
+ return 0;
+
+ if (efx_reflash_parse_reflash_header(fw, header_offset, &type,
+ &subtype, data, data_size))
+ return efx_reflash_partition_type(type, subtype,
+ partition_type,
+ partition_subtype);
+ }
+
+ return -EINVAL;
+}
+
+/* Limit the number of status updates during the erase or write phases */
+#define EFX_DEVLINK_STATUS_UPDATE_COUNT 50
+
+/* Expected timeout for the efx_mcdi_nvram_update_finish_polled() */
+#define EFX_DEVLINK_UPDATE_FINISH_TIMEOUT 900
+
+/* Ideal erase chunk size. This is a balance between minimising the number of
+ * MCDI requests to erase an entire partition whilst avoiding tripping the MCDI
+ * RPC timeout.
+ */
+#define EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE (64 * 1024)
+
+static int efx_reflash_erase_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ size_t partition_size,
+ size_t align)
+{
+ size_t chunk, offset, next_update;
+ int rc;
+
+ /* Partitions that cannot be erased or do not require erase before
+ * write are advertised with a erase alignment/sector size of zero.
+ */
+ if (align == 0)
+ /* Nothing to do */
+ return 0;
+
+ if (partition_size % align)
+ return -EINVAL;
+
+ /* Erase the entire NVRAM partition a chunk at a time to avoid
+ * potentially tripping the MCDI RPC timeout.
+ */
+ if (align >= EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE)
+ chunk = align;
+ else
+ chunk = rounddown(EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE, align);
+
+ for (offset = 0, next_update = 0; offset < partition_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Erasing",
+ NULL, offset,
+ partition_size);
+ next_update += partition_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ chunk = min_t(size_t, partition_size - offset, chunk);
+ rc = efx_mcdi_nvram_erase(efx, type, offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Erase failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Erasing", NULL,
+ partition_size, partition_size);
+
+ return 0;
+}
+
+static int efx_reflash_write_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ const u8 *data, size_t data_size,
+ size_t align)
+{
+ size_t write_max, chunk, offset, next_update;
+ int rc;
+
+ if (align == 0)
+ return -EINVAL;
+
+ /* Write the NVRAM partition in chunks that are the largest multiple
+ * of the partition's required write alignment that will fit into the
+ * MCDI NVRAM_WRITE RPC payload.
+ */
+ if (efx->type->mcdi_max_ver < 2)
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM;
+ else
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2;
+ chunk = rounddown(write_max, align);
+
+ for (offset = 0, next_update = 0; offset + chunk <= data_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+ next_update += data_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ rc = efx_mcdi_nvram_write(efx, type, offset, data + offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ /* Round up left over data to satisfy write alignment */
+ if (offset < data_size) {
+ size_t remaining = data_size - offset;
+ u8 *buf;
+
+ if (offset >= next_update)
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+
+ chunk = roundup(remaining, align);
+ buf = kmalloc(chunk, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data + offset, remaining);
+ memset(buf + remaining, 0xFF, chunk - remaining);
+ rc = efx_mcdi_nvram_write(efx, type, offset, buf, chunk);
+ kfree(buf);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Writing", NULL, data_size,
+ data_size);
+
+ return 0;
+}
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ size_t data_size, size, erase_align, write_align;
+ struct devlink *devlink = efx->devlink;
+ u32 type, data_subtype, subtype;
+ const u8 *data;
+ bool protected;
+ int rc;
+
+ if (!efx_has_cap(efx, BUNDLE_UPDATE)) {
+ NL_SET_ERR_MSG_MOD(extack, "NVRAM bundle updates are not supported by the firmware");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&efx->reflash_mutex);
+
+ devlink_flash_update_status_notify(devlink, "Checking update", NULL, 0, 0);
+
+ if (efx->type->flash_auto_partition) {
+ /* NIC wants entire FW file including headers;
+ * FW will validate 'subtype' if there is one
+ */
+ type = NVRAM_PARTITION_TYPE_AUTO;
+ data = fw->data;
+ data_size = fw->size;
+ } else {
+ rc = efx_reflash_parse_firmware_data(fw, &type, &data_subtype, &data,
+ &data_size);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image validation check failed");
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_nvram_metadata(efx, type, &subtype, NULL, NULL, 0);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Metadata query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (subtype != data_subtype) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image is not appropriate for this adapter");
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_align, &write_align,
+ &protected);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Info query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (protected) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is protected",
+ type);
+ rc = -EPERM;
+ goto out_unlock;
+ }
+
+ if (write_align == 0) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is not writable",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (erase_align != 0 && size % erase_align) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x has a bad partition table entry, can't erase it",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (data_size > size) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Firmware image is too big for NVRAM partition %#x",
+ type);
+ rc = -EFBIG;
+ goto out_unlock;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Starting update", NULL, 0, 0);
+
+ rc = efx_mcdi_nvram_update_start(efx, type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Update start request for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ rc = efx_reflash_erase_partition(efx, extack, devlink, type, size,
+ erase_align);
+ if (rc)
+ goto out_update_finish;
+
+ rc = efx_reflash_write_partition(efx, extack, devlink, type, data,
+ data_size, write_align);
+ if (rc)
+ goto out_update_finish;
+
+ devlink_flash_update_timeout_notify(devlink, "Finishing update", NULL,
+ EFX_DEVLINK_UPDATE_FINISH_TIMEOUT);
+
+out_update_finish:
+ if (rc)
+ /* Don't obscure the return code from an earlier failure */
+ efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_ABORT);
+ else
+ rc = efx_mcdi_nvram_update_finish_polled(efx, type);
+out_unlock:
+ mutex_unlock(&efx->reflash_mutex);
+ devlink_flash_update_status_notify(devlink, rc ? "Update failed" :
+ "Update complete",
+ NULL, 0, 0);
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/efx_reflash.h b/drivers/net/ethernet/sfc/efx_reflash.h
new file mode 100644
index 000000000000..3dffac565161
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_REFLASH_H
+#define _EFX_REFLASH_H
+
+#include "net_driver.h"
+#include <linux/firmware.h>
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _EFX_REFLASH_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 37c69c8d90b1..18fe5850a978 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -217,7 +217,8 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
@@ -226,21 +227,15 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- /* Software capabilities */
- ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE);
- ts_info->phc_index = -1;
-
efx_ptp_get_ts_info(efx, ts_info);
return 0;
}
const struct ethtool_ops efx_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -268,8 +263,16 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_fields = true,
+ .rxfh_per_ctx_key = true,
+ .cap_rss_rxnfc_adds = true,
+ .rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .get_rxfh_fields = efx_ethtool_get_rxfh_fields,
+ .create_rxfh_context = efx_ethtool_create_rxfh_context,
+ .modify_rxfh_context = efx_ethtool_modify_rxfh_context,
+ .remove_rxfh_context = efx_ethtool_remove_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 7d5e5db4eac5..fa303e171d98 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -75,7 +75,6 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
@@ -83,8 +82,8 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_overlength),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
@@ -396,7 +395,7 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
return n;
}
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct efx_channel *channel;
@@ -404,24 +403,22 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
efx_for_each_channel(channel, efx) {
if (efx_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_MAX_TXQ_PER_CHANNEL);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_MAX_TXQ_PER_CHANNEL);
}
}
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
@@ -429,11 +426,11 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "tx-xdp-cpu-%hu.tx_packets",
+ xdp);
}
}
@@ -465,15 +462,11 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_ptp_describe_stats(efx, strings);
+ ethtool_puts(&strings, efx_sw_stat_desc[i].name);
+ efx_describe_per_queue_stats(efx, &strings);
+ efx_ptp_describe_stats(efx, &strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -807,6 +800,56 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
return rc;
}
+int efx_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *ctx;
+ __u64 data;
+ int rc = 0;
+
+ ctx = &efx->rss_context.priv;
+
+ if (info->rss_context) {
+ ctx = efx_find_rss_context_entry(efx, info->rss_context);
+ if (!ctx)
+ return -ENOENT;
+ }
+
+ data = 0;
+ if (!efx_rss_active(ctx)) /* No RSS */
+ goto out_setdata_unlock;
+
+ switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (ctx->rx_hash_udp_4tuple)
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+out_setdata_unlock:
+ info->data = data;
+ return rc;
+}
+
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -819,55 +862,6 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- struct efx_rss_context *ctx = &efx->rss_context;
- __u64 data;
-
- mutex_lock(&efx->rss_lock);
- if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_find_rss_context_entry(efx, info->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
- data = 0;
- if (!efx_rss_active(ctx)) /* No RSS */
- goto out_setdata_unlock;
-
- switch (info->flow_type & ~FLOW_RSS) {
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- if (ctx->rx_hash_udp_4tuple)
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- else
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV4_FLOW:
- case IPV6_FLOW:
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
-out_setdata_unlock:
- info->data = data;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
@@ -1163,46 +1157,14 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-static int efx_ethtool_get_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct efx_rss_context *ctx;
- int rc = 0;
-
- if (!efx->type->rx_pull_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
- ctx = efx_find_rss_context_entry(efx, rxfh->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- rc = efx->type->rx_pull_rss_context_config(efx, ctx);
- if (rc)
- goto out_unlock;
-
- rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->indir)
- memcpy(rxfh->indir, ctx->rx_indir_table,
- sizeof(ctx->rx_indir_table));
- if (rxfh->key)
- memcpy(rxfh->key, ctx->rx_hash_key,
- efx->type->rx_hash_key_size);
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
- if (rxfh->rss_context)
- return efx_ethtool_get_rxfh_context(net_dev, rxfh);
+ if (rxfh->rss_context) /* core should never call us for these */
+ return -EINVAL;
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
@@ -1218,68 +1180,85 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
return 0;
}
-static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+int efx_ethtool_modify_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- u32 *rss_context = &rxfh->rss_context;
- struct efx_rss_context *ctx;
- u32 *indir = rxfh->indir;
- bool allocated = false;
- u8 *key = rxfh->key;
- int rc;
+ struct efx_rss_context_priv *priv;
+ const u32 *indir = rxfh->indir;
+ const u8 *key = rxfh->key;
- if (!efx->type->rx_push_rss_context_config)
+ if (!efx->type->rx_push_rss_context_config) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "NIC type does not support custom contexts");
return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
-
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (rxfh->rss_delete) {
- /* alloc + delete == Nothing to do */
- rc = -EINVAL;
- goto out_unlock;
- }
- ctx = efx_alloc_rss_context_entry(efx);
- if (!ctx) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- /* Initialise indir table and key to defaults */
- efx_set_default_rx_indir_table(efx, ctx);
- netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
- allocated = true;
- } else {
- ctx = efx_find_rss_context_entry(efx, *rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
}
-
- if (rxfh->rss_delete) {
- /* delete this context */
- rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
- if (!rc)
- efx_free_rss_context_entry(ctx);
- goto out_unlock;
+ /* Hash function is Toeplitz, cannot be changed */
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "Only Toeplitz hash is supported");
+ return -EOPNOTSUPP;
}
+ priv = ethtool_rxfh_context_priv(ctx);
+
if (!key)
- key = ctx->rx_hash_key;
+ key = ethtool_rxfh_context_key(ctx);
if (!indir)
- indir = ctx->rx_indir_table;
+ indir = ethtool_rxfh_context_indir(ctx);
- rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
- if (rc && allocated)
- efx_free_rss_context_entry(ctx);
- else
- *rss_context = ctx->user_id;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
+ return efx->type->rx_push_rss_context_config(efx, priv, indir, key,
+ false);
+}
+
+int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *priv;
+
+ priv = ethtool_rxfh_context_priv(ctx);
+
+ priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ priv->rx_hash_udp_4tuple = false;
+ /* Generate default indir table and/or key if not specified.
+ * We use ctx as a place to store these; this is fine because
+ * we're doing a create, so if we fail then the ctx will just
+ * be deleted.
+ */
+ if (!rxfh->indir)
+ efx_set_default_rx_indir_table(efx, ethtool_rxfh_context_indir(ctx));
+ if (!rxfh->key)
+ netdev_rss_key_fill(ethtool_rxfh_context_key(ctx),
+ ctx->key_size);
+ if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE)
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->input_xfrm == RXH_XFRM_NO_CHANGE)
+ ctx->input_xfrm = 0;
+ return efx_ethtool_modify_rxfh_context(net_dev, ctx, rxfh, extack);
+}
+
+int efx_ethtool_remove_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *priv;
+
+ if (!efx->type->rx_push_rss_context_config) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "NIC type does not support custom contexts");
+ return -EOPNOTSUPP;
+ }
+
+ priv = ethtool_rxfh_context_priv(ctx);
+ return efx->type->rx_push_rss_context_config(efx, priv, NULL, NULL,
+ true);
}
int efx_ethtool_set_rxfh(struct net_device *net_dev,
@@ -1295,8 +1274,9 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- return efx_ethtool_set_rxfh_context(net_dev, rxfh, extack);
+ /* Custom contexts should use new API */
+ if (WARN_ON_ONCE(rxfh->rss_context))
+ return -EIO;
if (!indir && !key)
return 0;
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index a680e5980213..24db4fccbe78 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -49,6 +49,20 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
int efx_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info);
+int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int efx_ethtool_modify_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int efx_ethtool_remove_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack);
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 1cb32aedd89c..6ea41f6c9ef5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1394,9 +1394,8 @@ static int ef4_probe_interrupts(struct ef4_nic *efx)
if (n_channels > extra_channels)
n_channels -= extra_channels;
if (ef4_separate_tx_channels) {
- efx->n_tx_channels = min(max(n_channels / 2,
- 1U),
- efx->max_tx_channels);
+ efx->n_tx_channels = clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
@@ -1886,14 +1885,6 @@ unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
@@ -2125,7 +2116,7 @@ static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
ef4_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
ef4_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
@@ -3136,9 +3127,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
static const struct pci_error_handlers ef4_err_handlers = {
.error_detected = ef4_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h
index d3b4646545fa..52508f2c8cb2 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.h
+++ b/drivers/net/ethernet/sfc/falcon/efx.h
@@ -198,7 +198,6 @@ int ef4_try_recovery(struct ef4_nic *efx);
/* Global */
void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
-unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index f4db683b80f7..27d1cd6f24ca 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -353,7 +353,7 @@ static int ef4_ethtool_fill_self_tests(struct ef4_nic *efx,
return n;
}
-static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
+static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct ef4_channel *channel;
@@ -361,24 +361,22 @@ static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
ef4_for_each_channel(channel, efx) {
if (ef4_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EF4_TXQ_TYPES);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EF4_TXQ_TYPES);
}
}
ef4_for_each_channel(channel, efx) {
if (ef4_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
return n_stats;
@@ -409,14 +407,10 @@ static void ef4_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (ef4_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ ethtool_puts(&strings, ef4_sw_stat_desc[i].name);
+ ef4_describe_per_queue_stats(efx, &strings);
break;
case ETH_SS_TEST:
ef4_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -950,6 +944,37 @@ static int ef4_ethtool_get_class_rule(struct ef4_nic *efx,
}
static int
+ef4_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ info->data = 0;
+ /* Falcon A0 and A1 had a 4-tuple hash for TCP and UDP, but it was
+ * broken so we do not enable it.
+ * Falcon B0 adds a Toeplitz hash, 4-tuple for TCP and 2-tuple for
+ * other IPv4, including UDP.
+ * See falcon_init_rx_cfg().
+ */
+ if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
+ return 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
ef4_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -960,29 +985,6 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- unsigned min_revision = 0;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- min_revision = EF4_REV_FALCON_B0;
- break;
- default:
- break;
- }
- if (ef4_nic_rev(efx) < min_revision)
- info->data = 0;
- return 0;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = ef4_filter_get_rx_id_limit(efx);
if (info->data == 0)
@@ -1349,6 +1351,7 @@ const struct ethtool_ops ef4_ethtool_ops = {
.get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
.get_rxfh = ef4_ethtool_get_rxfh,
.set_rxfh = ef4_ethtool_set_rxfh,
+ .get_rxfh_fields = ef4_ethtool_get_rxfh_fields,
.get_module_info = ef4_ethtool_get_module_info,
.get_module_eeprom = ef4_ethtool_get_module_eeprom,
.get_link_ksettings = ef4_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 7a1c9337081b..c44df8e4dd30 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -367,7 +367,7 @@ static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
.getsda = falcon_getsda,
.getscl = falcon_getscl,
.udelay = 5,
- /* Wait up to 50 ms for slave to let us pull SCL high */
+ /* Wait up to 50 ms for target to let us pull SCL high */
.timeout = DIV_ROUND_UP(HZ, 20),
};
@@ -1453,8 +1453,8 @@ static void falcon_stats_complete(struct ef4_nic *efx)
static void falcon_stats_timer_func(struct timer_list *t)
{
- struct falcon_nic_data *nic_data = from_timer(nic_data, t,
- stats_timer);
+ struct falcon_nic_data *nic_data = timer_container_of(nic_data, t,
+ stats_timer);
struct ef4_nic *efx = nic_data->efx;
spin_lock(&efx->stats_lock);
@@ -2564,7 +2564,7 @@ static void falcon_remove_nic(struct ef4_nic *efx)
efx->nic_data = NULL;
}
-static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
+static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 **names)
{
return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask, names);
@@ -2657,7 +2657,7 @@ void falcon_stop_nic_stats(struct ef4_nic *efx)
++nic_data->stats_disable_count;
spin_unlock_bh(&efx->stats_lock);
- del_timer_sync(&nic_data->stats_timer);
+ timer_delete_sync(&nic_data->stats_timer);
/* Wait enough time for the most recent transfer to
* complete. */
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index c64623c2e80c..01017c41338e 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -1631,28 +1631,6 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
}
}
-/* Looks at available SRAM resources and works out how many queues we
- * can support, and where things like descriptor caches should live.
- *
- * SRAM is split up as follows:
- * 0 buftbl entries for channels
- * efx->vf_buftbl_base buftbl entries for SR-IOV
- * efx->rx_dc_base RX descriptor caches
- * efx->tx_dc_base TX descriptor caches
- */
-void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
-{
- unsigned vi_count;
-
- /* Account for the buffer table entries backing the datapath channels
- * and the descriptor caches for those channels.
- */
- vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
-
- efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
- efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
-}
-
u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
{
ef4_oword_t altera_build;
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a2c7139f2b32..7ab0db44720d 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -1057,7 +1057,7 @@ struct ef4_nic_type {
void (*finish_flush)(struct ef4_nic *efx);
void (*prepare_flr)(struct ef4_nic *efx);
void (*finish_flr)(struct ef4_nic *efx);
- size_t (*describe_stats)(struct ef4_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct ef4_nic *efx, u8 **names);
size_t (*update_stats)(struct ef4_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct ef4_nic *efx);
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index 78c851b5a56f..a6304686bc90 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -444,18 +444,15 @@ void ef4_nic_get_regs(struct ef4_nic *efx, void *buf)
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+ const unsigned long *mask, u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
+ if (names)
+ ethtool_puts(names, desc[index].name);
++visible;
}
}
@@ -511,14 +508,3 @@ void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
}
}
}
-
-void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
-{
- /* if down, or this is the first update after coming up */
- if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
- efx->rx_nodesc_drops_while_down +=
- *rx_nodesc_drops - efx->rx_nodesc_drops_total;
- efx->rx_nodesc_drops_total = *rx_nodesc_drops;
- efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
- *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
-}
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index 9f413474bd9f..bc6ef937d0fe 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -297,7 +297,7 @@ static inline struct falcon_board *falcon_board(struct ef4_nic *efx)
return &data->board;
}
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
extern const struct ef4_nic_type falcon_a1_nic_type;
extern const struct ef4_nic_type falcon_b0_nic_type;
@@ -477,7 +477,6 @@ void ef4_farch_finish_flr(struct ef4_nic *efx);
void falcon_start_nic_stats(struct ef4_nic *efx);
void falcon_stop_nic_stats(struct ef4_nic *efx);
int falcon_reset_xaui(struct ef4_nic *efx);
-void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
void ef4_farch_init_common(struct ef4_nic *efx);
void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
@@ -498,14 +497,10 @@ size_t ef4_nic_get_regs_len(struct ef4_nic *efx);
void ef4_nic_get_regs(struct ef4_nic *efx, void *buf);
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+ const unsigned long *mask, u8 **names);
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
-void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
-
-#define EF4_MAX_FLUSH_TIME 5000
-
void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
ef4_qword_t *event);
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 6bbdb5d2eebf..f69fcf6caca8 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -382,7 +382,8 @@ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
void ef4_rx_slow_fill(struct timer_list *t)
{
- struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct ef4_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
ef4_nic_generate_fill_event(rx_queue);
@@ -791,7 +792,7 @@ void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index b9369483758c..e6e80b039ca2 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -40,14 +40,6 @@ static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
-u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
- struct ef4_tx_buffer *buffer, size_t len)
-{
- if (len > EF4_TX_CB_SIZE)
- return NULL;
- return ef4_tx_get_copy_buffer(tx_queue, buffer);
-}
-
static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer,
unsigned int *pkts_compl,
diff --git a/drivers/net/ethernet/sfc/falcon/tx.h b/drivers/net/ethernet/sfc/falcon/tx.h
index 2a88c59cbbbe..868ed8a861ab 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.h
+++ b/drivers/net/ethernet/sfc/falcon/tx.h
@@ -15,9 +15,6 @@
unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
-u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
- struct ef4_tx_buffer *buffer, size_t len);
-
int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
diff --git a/drivers/net/ethernet/sfc/fw_formats.h b/drivers/net/ethernet/sfc/fw_formats.h
new file mode 100644
index 000000000000..cbc350c96013
--- /dev/null
+++ b/drivers/net/ethernet/sfc/fw_formats.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_FW_FORMATS_H
+#define _EFX_FW_FORMATS_H
+
+/* Header layouts of firmware update images recognised by Efx NICs.
+ * The sources-of-truth for these layouts are AMD internal documents
+ * and sfregistry headers, neither of which are available externally
+ * nor usable directly by the driver.
+ *
+ * While each format includes a 'magic number', these are at different
+ * offsets in the various formats, and a legal header for one format
+ * could have the right value in whichever field occupies that offset
+ * to match another format's magic.
+ * Besides, some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial task;
+ * rather than trying to parse those headers, we search byte-by-byte
+ * through the provided firmware image looking for a valid header.
+ * Thus, format recognition has to include validation of the checksum
+ * field, even though the firmware will validate that itself before
+ * applying the image.
+ */
+
+/* EF10 (Medford2, X2) "reflash" header format. Defined in SF-121352-AN */
+#define EFX_REFLASH_HEADER_MAGIC_OFST 0
+#define EFX_REFLASH_HEADER_MAGIC_LEN 4
+#define EFX_REFLASH_HEADER_MAGIC_VALUE 0x106F1A5
+
+#define EFX_REFLASH_HEADER_VERSION_OFST 4
+#define EFX_REFLASH_HEADER_VERSION_LEN 4
+#define EFX_REFLASH_HEADER_VERSION_VALUE 4
+
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST 8
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_LEN 4
+#define EFX_REFLASH_FIRMWARE_TYPE_BOOTROM 0x2
+#define EFX_REFLASH_FIRMWARE_TYPE_BUNDLE 0xd
+
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST 12
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_LEN 4
+
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST 16
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_REFLASH_HEADER_LENGTH_OFST 20
+#define EFX_REFLASH_HEADER_LENGTH_LEN 4
+
+/* Reflash trailer */
+#define EFX_REFLASH_TRAILER_CRC_OFST 0
+#define EFX_REFLASH_TRAILER_CRC_LEN 4
+
+#define EFX_REFLASH_TRAILER_LEN \
+ (EFX_REFLASH_TRAILER_CRC_OFST + EFX_REFLASH_TRAILER_CRC_LEN)
+
+/* EF100 "SmartNIC image" header format.
+ * Defined in sfregistry "src/layout/snic_image_hdr.h".
+ */
+#define EFX_SNICIMAGE_HEADER_MAGIC_OFST 16
+#define EFX_SNICIMAGE_HEADER_MAGIC_LEN 4
+#define EFX_SNICIMAGE_HEADER_MAGIC_VALUE 0x541C057A
+
+#define EFX_SNICIMAGE_HEADER_VERSION_OFST 20
+#define EFX_SNICIMAGE_HEADER_VERSION_LEN 4
+#define EFX_SNICIMAGE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICIMAGE_HEADER_LENGTH_OFST 24
+#define EFX_SNICIMAGE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST 36
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST 40
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST 60
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_CRC_OFST 64
+#define EFX_SNICIMAGE_HEADER_CRC_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_MINLEN 256
+
+/* EF100 "SmartNIC bundle" header format. Defined in SF-122606-TC */
+#define EFX_SNICBUNDLE_HEADER_MAGIC_OFST 0
+#define EFX_SNICBUNDLE_HEADER_MAGIC_LEN 4
+#define EFX_SNICBUNDLE_HEADER_MAGIC_VALUE 0xB1001001
+
+#define EFX_SNICBUNDLE_HEADER_VERSION_OFST 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_LEN 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST 8
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST 12
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LENGTH_OFST 20
+#define EFX_SNICBUNDLE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_CRC_OFST 224
+#define EFX_SNICBUNDLE_HEADER_CRC_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LEN \
+ (EFX_SNICBUNDLE_HEADER_CRC_OFST + EFX_SNICBUNDLE_HEADER_CRC_LEN)
+
+#endif /* _EFX_FW_FORMATS_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4cc7b501135f..ef374a8e05c3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -217,28 +217,4 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
(reg) != 0xa1c), \
page)
-/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
- * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
- * collector register.
- */
-static inline void _efx_writed_page_locked(struct efx_nic *efx,
- const efx_dword_t *value,
- unsigned int reg,
- unsigned int page)
-{
- unsigned long flags __attribute__ ((unused));
-
- if (page == 0) {
- spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- spin_unlock_irqrestore(&efx->biu_lock, flags);
- } else {
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- }
-}
-#define efx_writed_page_locked(efx, value, reg, page) \
- _efx_writed_page_locked(efx, value, \
- reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
- page)
-
#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 10709d828a63..7cfd9000f79d 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -76,17 +76,6 @@ void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
*out = EFX_DWORD_VAL(mport);
}
-void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
-{
- efx_dword_t mport;
-
- EFX_POPULATE_DWORD_3(mport,
- MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
- MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
- MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
- *out = EFX_DWORD_VAL(mport);
-}
-
/* Constructs an mport selector from an mport ID, because they're not the same */
void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
{
@@ -766,7 +755,7 @@ int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s",
mask_type_name(ingress_port_mask_type),
"ingress_port");
return rc;
@@ -1101,6 +1090,9 @@ void efx_mae_remove_mport(void *desc, void *arg)
kfree(mport);
}
+/*
+ * Takes ownership of @desc, even if it returns an error
+ */
static int efx_mae_process_mport(struct efx_nic *efx,
struct mae_mport_desc *desc)
{
@@ -1111,6 +1103,7 @@ static int efx_mae_process_mport(struct efx_nic *efx,
if (!IS_ERR_OR_NULL(mport)) {
netif_err(efx, drv, efx->net_dev,
"mport with id %u does exist!!!\n", desc->mport_id);
+ kfree(desc);
return -EEXIST;
}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 8df30bc4f3ba..db79912c86d8 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -23,7 +23,6 @@ int efx_mae_free_mport(struct efx_nic *efx, u32 id);
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
-void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 76578502226e..5e9b8def5e42 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -530,7 +530,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
* of it aborting the next request.
*/
if (!timeout)
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
spin_lock(&mcdi->async_lock);
async = list_first_entry(&mcdi->async_list,
@@ -605,7 +605,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+ struct efx_mcdi_iface *mcdi = timer_container_of(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
@@ -1051,15 +1051,6 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
cookie, false);
}
-int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen, efx_mcdi_async_completer *complete,
- unsigned long cookie)
-{
- return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
- cookie, true);
-}
-
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
@@ -1068,14 +1059,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
outlen_actual, false, NULL, NULL);
}
-int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
- efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual)
-{
- return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, true, NULL, NULL);
-}
-
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
size_t outlen, int rc)
@@ -1139,7 +1122,7 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
/* We must be in poll or fail mode so no more requests can be queued */
BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
/* If a request is still running, make sure we give the MC
* time to complete it so that the response won't overwrite our
@@ -1642,12 +1625,15 @@ fail:
return rc;
}
+#define EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN 128
+
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out)
+ size_t *write_size_out, bool *protected_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_V2_OUT_LEN);
+ size_t write_size = 0;
size_t outlen;
int rc;
@@ -1662,6 +1648,12 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
goto fail;
}
+ if (outlen >= MC_CMD_NVRAM_INFO_V2_OUT_LEN)
+ write_size = MCDI_DWORD(outbuf, NVRAM_INFO_V2_OUT_WRITESIZE);
+ else
+ write_size = EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN;
+
+ *write_size_out = write_size;
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
@@ -1982,33 +1974,6 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
}
-int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
- rc = -EIO;
- goto fail;
- }
-
- *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
-
- return 0;
-
-fail:
- *id_out = -1;
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
@@ -2021,38 +1986,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
return rc;
}
-int efx_mcdi_flush_rxqs(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
- int rc, count;
-
- BUILD_BUG_ON(EFX_MAX_CHANNELS >
- MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
-
- count = 0;
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (rx_queue->flush_pending) {
- rx_queue->flush_pending = false;
- atomic_dec(&efx->rxq_flush_pending);
- MCDI_SET_ARRAY_DWORD(
- inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
- count, efx_rx_queue_index(rx_queue));
- count++;
- }
- }
- }
-
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
- MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
- WARN_ON(rc < 0);
-
- return rc;
-}
-
int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
{
int rc;
@@ -2239,11 +2172,9 @@ out_free:
return rc;
}
-#ifdef CONFIG_SFC_MTD
-
#define EFX_MCDI_NVRAM_LEN_MAX 128
-static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
int rc;
@@ -2261,6 +2192,8 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
return rc;
}
+#ifdef CONFIG_SFC_MTD
+
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
@@ -2285,13 +2218,20 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
return 0;
}
-static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
+#endif /* CONFIG_SFC_MTD */
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ efx_dword_t *inbuf;
+ size_t inlen;
int rc;
+ inlen = ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
@@ -2299,14 +2239,14 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, inlen, NULL, 0, NULL);
+ kfree(inbuf);
+
return rc;
}
-static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset,
+ size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
int rc;
@@ -2322,7 +2262,8 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
return rc;
}
-static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
@@ -2330,22 +2271,41 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
int rc, rc2;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
- /* Always set this flag. Old firmware ignores it */
- MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+
+ /* Old firmware doesn't support background update finish and abort
+ * operations. Fallback to waiting if the requested mode is not
+ * supported.
+ */
+ if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) ||
+ (!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) &&
+ mode == EFX_UPDATE_FINISH_ABORT))
+ mode = EFX_UPDATE_FINISH_WAIT;
+
+ MCDI_POPULATE_DWORD_4(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
- 1);
+ (mode != EFX_UPDATE_FINISH_ABORT),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND,
+ (mode == EFX_UPDATE_FINISH_BACKGROUND),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT,
+ (mode == EFX_UPDATE_FINISH_POLL),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT,
+ (mode == EFX_UPDATE_FINISH_ABORT));
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
- if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS &&
+ rc2 != MC_CMD_NVRAM_VERIFY_RC_PENDING)
netif_err(efx, drv, efx->net_dev,
"NVRAM update failed verification with code 0x%x\n",
rc2);
switch (rc2) {
case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
break;
+ case MC_CMD_NVRAM_VERIFY_RC_PENDING:
+ rc = -EAGAIN;
+ break;
case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
@@ -2360,6 +2320,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ case MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED:
+ case MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE:
rc = -EPERM;
break;
default:
@@ -2372,6 +2334,42 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
return rc;
}
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS 5
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS 5000
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES 185
+
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type)
+{
+ unsigned int delay = EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS;
+ unsigned int retry = 0;
+ int rc;
+
+ /* NVRAM updates can take a long time (e.g. up to 1 minute for bundle
+ * images). Polling for NVRAM update completion ensures that other MCDI
+ * commands can be issued before the background NVRAM update completes.
+ *
+ * The initial call either completes the update synchronously, or
+ * returns -EAGAIN to indicate processing is continuing. In the latter
+ * case, we poll for at least 900 seconds, at increasing intervals
+ * (5ms, 50ms, 500ms, 5s).
+ */
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_BACKGROUND);
+ while (rc == -EAGAIN) {
+ if (retry > EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES)
+ return -ETIMEDOUT;
+ retry++;
+
+ msleep(delay);
+ if (delay < EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS)
+ delay *= 10;
+
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_POLL);
+ }
+ return rc;
+}
+
+#ifdef CONFIG_SFC_MTD
+
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
@@ -2465,7 +2463,8 @@ int efx_mcdi_mtd_sync(struct mtd_info *mtd)
if (part->updating) {
part->updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type,
+ EFX_UPDATE_FINISH_WAIT);
}
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index ea612c619874..3755cd3fe1e6 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -155,9 +155,6 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
- size_t inlen, efx_dword_t *outbuf,
- size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -167,11 +164,6 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
-int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen,
- efx_mcdi_async_completer *complete,
- unsigned long cookie);
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
@@ -400,7 +392,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
+ size_t *write_size_out, bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
u32 *subtype, u16 version[4], char *desc,
@@ -410,10 +402,8 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx);
int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
int *id_out);
-int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-int efx_mcdi_flush_rxqs(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
@@ -434,6 +424,26 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type);
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length);
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length);
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize);
+
+enum efx_update_finish_mode {
+ EFX_UPDATE_FINISH_WAIT,
+ EFX_UPDATE_FINISH_BACKGROUND,
+ EFX_UPDATE_FINISH_POLL,
+ EFX_UPDATE_FINISH_ABORT,
+};
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode);
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type);
+
#ifdef CONFIG_SFC_MTD
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 4ff6586116ee..6ef96292909a 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -194,7 +194,7 @@ efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx,
static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
const struct efx_filter_spec *spec,
efx_dword_t *inbuf, u64 handle,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
bool replacing)
{
u32 flags = spec->flags;
@@ -245,7 +245,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
static int efx_mcdi_filter_push(struct efx_nic *efx,
const struct efx_filter_spec *spec, u64 *handle,
- struct efx_rss_context *ctx, bool replacing)
+ struct efx_rss_context_priv *ctx, bool replacing)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
@@ -345,9 +345,9 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_rss_context_priv *ctx = NULL;
struct efx_mcdi_filter_table *table;
struct efx_filter_spec *saved_spec;
- struct efx_rss_context *ctx = NULL;
unsigned int match_pri, hash;
unsigned int priv_flags;
bool rss_locked = false;
@@ -380,12 +380,12 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
rss_locked = true;
if (spec->rss_context)
ctx = efx_find_rss_context_entry(efx, spec->rss_context);
else
- ctx = &efx->rss_context;
+ ctx = &efx->rss_context.priv;
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@@ -548,7 +548,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
out_unlock:
if (rss_locked)
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&table->lock);
return rc;
}
@@ -611,13 +611,13 @@ static int efx_mcdi_filter_remove_internal(struct efx_nic *efx,
new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- (efx_rss_active(&efx->rss_context) ?
+ (efx_rss_active(&efx->rss_context.priv) ?
EFX_FILTER_FLAG_RX_RSS : 0));
new_spec.dmaq_id = 0;
new_spec.rss_context = 0;
rc = efx_mcdi_filter_push(efx, &new_spec,
&table->entry[filter_idx].handle,
- &efx->rss_context,
+ &efx->rss_context.priv,
true);
if (rc == 0)
@@ -764,7 +764,7 @@ static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx,
ids = vlan->uc;
}
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0;
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
@@ -833,7 +833,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
int rc;
u16 *id;
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
@@ -1375,8 +1375,8 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
struct efx_mcdi_filter_table *table = efx->filter_state;
unsigned int invalid_filters = 0, failed = 0;
struct efx_mcdi_filter_vlan *vlan;
+ struct efx_rss_context_priv *ctx;
struct efx_filter_spec *spec;
- struct efx_rss_context *ctx;
unsigned int filter_idx;
u32 mcdi_flags;
int match_pri;
@@ -1388,7 +1388,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
return;
down_write(&table->lock);
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_mcdi_filter_entry_spec(table, filter_idx);
@@ -1407,7 +1407,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
if (spec->rss_context)
ctx = efx_find_rss_context_entry(efx, spec->rss_context);
else
- ctx = &efx->rss_context;
+ ctx = &efx->rss_context.priv;
if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
if (!ctx) {
netif_warn(efx, drv, efx->net_dev,
@@ -1444,7 +1444,7 @@ not_restored:
}
}
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&table->lock);
/*
@@ -1861,7 +1861,8 @@ out_unlock:
RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
-int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
+static int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
+ u32 *flags)
{
/*
* Firmware had a bug (sfc bug 61952) where it would not actually
@@ -1909,8 +1910,8 @@ int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
* Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
* just need to set the UDP ports flags (for both IP versions).
*/
-void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx)
+static void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context_priv *ctx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
u32 flags;
@@ -1931,7 +1932,7 @@ void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
}
static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
unsigned *context_size)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
@@ -2032,25 +2033,26 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
{
int rc;
- if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
- rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id);
+ if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id);
WARN_ON(rc != 0);
}
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
}
static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
unsigned *context_size)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
- context_size);
+ int rc = efx_mcdi_filter_alloc_rss_context(efx, false,
+ &efx->rss_context.priv,
+ context_size);
if (rc != 0)
return rc;
table->rx_rss_context_exclusive = false;
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
return 0;
}
@@ -2058,26 +2060,27 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
const u32 *rx_indir_table,
const u8 *key)
{
+ u32 old_rx_rss_context = efx->rss_context.priv.context_id;
struct efx_mcdi_filter_table *table = efx->filter_state;
- u32 old_rx_rss_context = efx->rss_context.context_id;
int rc;
- if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
+ if (efx->rss_context.priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
!table->rx_rss_context_exclusive) {
- rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
- NULL);
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true,
+ &efx->rss_context.priv,
+ NULL);
if (rc == -EOPNOTSUPP)
return rc;
else if (rc != 0)
goto fail1;
}
- rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id,
- rx_indir_table, key);
+ rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.priv.context_id,
+ rx_indir_table, key);
if (rc != 0)
goto fail2;
- if (efx->rss_context.context_id != old_rx_rss_context &&
+ if (efx->rss_context.priv.context_id != old_rx_rss_context &&
old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
table->rx_rss_context_exclusive = true;
@@ -2091,9 +2094,9 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
return 0;
fail2:
- if (old_rx_rss_context != efx->rss_context.context_id) {
- WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0);
- efx->rss_context.context_id = old_rx_rss_context;
+ if (old_rx_rss_context != efx->rss_context.priv.context_id) {
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id) != 0);
+ efx->rss_context.priv.context_id = old_rx_rss_context;
}
fail1:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
@@ -2101,33 +2104,28 @@ fail1:
}
int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key)
+ const u8 *key, bool delete)
{
int rc;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ if (delete)
+ /* already wasn't in HW, nothing to do */
+ return 0;
rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL);
if (rc)
return rc;
}
- if (!rx_indir_table) /* Delete this context */
+ if (delete) /* Delete this context */
return efx_mcdi_filter_free_rss_context(efx, ctx->context_id);
- rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
- rx_indir_table, key);
- if (rc)
- return rc;
-
- memcpy(ctx->rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
-
- return 0;
+ return efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
+ rx_indir_table, key);
}
int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
@@ -2139,16 +2137,16 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
size_t outlen;
int rc, i;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
- if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
+ if (ctx->priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
return -ENOENT;
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
- ctx->context_id);
+ ctx->priv.context_id);
BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
@@ -2164,7 +2162,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
- ctx->context_id);
+ ctx->priv.context_id);
BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
@@ -2186,35 +2184,42 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
{
int rc;
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
return rc;
}
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_rss_context *ctx;
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
int rc;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
if (!table->must_restore_rss_contexts)
return;
- list_for_each_entry(ctx, &efx->rss_context.list, list) {
+ xa_for_each(&efx->net_dev->ethtool->rss_ctx, context, ctx) {
+ struct efx_rss_context_priv *priv;
+ u32 *indir;
+ u8 *key;
+
+ priv = ethtool_rxfh_context_priv(ctx);
/* previous NIC RSS context is gone */
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* so try to allocate a new one */
- rc = efx_mcdi_rx_push_rss_context_config(efx, ctx,
- ctx->rx_indir_table,
- ctx->rx_hash_key);
+ indir = ethtool_rxfh_context_indir(ctx);
+ key = ethtool_rxfh_context_key(ctx);
+ rc = efx_mcdi_rx_push_rss_context_config(efx, priv, indir, key,
+ false);
if (rc)
netif_warn(efx, probe, efx->net_dev,
- "failed to restore RSS context %u, rc=%d"
+ "failed to restore RSS context %lu, rc=%d"
"; RSS filters may fail to be applied\n",
- ctx->user_id, rc);
+ context, rc);
}
table->must_restore_rss_contexts = false;
}
@@ -2276,7 +2281,7 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
{
if (user)
return -EOPNOTSUPP;
- if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
+ if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
return 0;
return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
}
@@ -2295,7 +2300,7 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
efx_mcdi_rx_free_indir_table(efx);
if (rss_spread > 1) {
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
rc = efx->type->rx_push_rss_config(efx, false,
efx->rss_context.rx_indir_table, NULL);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index c0d6558b9fd2..11b9f87ed9e1 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -145,9 +145,9 @@ void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid);
void efx_mcdi_rx_free_indir_table(struct efx_nic *efx);
int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key);
+ const u8 *key, bool delete);
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table,
const u8 *key);
@@ -161,10 +161,6 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
struct efx_rss_context *ctx);
-int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
- u32 *flags);
-void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx);
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx);
static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index cd297e19cddc..b9866e389e6d 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -72,19 +72,19 @@
* | \------- Error
* \------------------------------ Resync (always set)
*
- * The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writing
+ * The client writes its request into MC shared memory, and rings the
+ * doorbell. Each request is completed either by the MC writing
* back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
- * and some response's may also contain additional data (again, accounted
+ * and some responses may also contain additional data (again, accounted
* for by HEADER.LEN).
*
* Some MCDI commands support completion by event, in which any associated
* response data is included in the event.
*
- * The protocol requires one response to be delivered for every request, a
+ * The protocol requires one response to be delivered for every request; a
* request should not be sent unless the response for the previous request
* has been received (either by polling shared memory, or by receiving
* an event).
@@ -165,6 +165,7 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
#define MC_CMD_ERR_CODE_OFST 0
#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
@@ -321,7 +322,7 @@
/* enum: The requesting client is not a function */
#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
/* enum: The requested operation might require the command to be passed between
- * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * MCs, and the transport doesn't support that. Should only ever been seen over
* the UART.
*/
#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
@@ -358,7 +359,7 @@
* sub-variant switching.
*/
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+/* enum: The clock whose frequency you've attempted to set doesn't exist on
* this NIC
*/
#define MC_CMD_ERR_NO_CLOCK 0x1015
@@ -387,25 +388,6 @@
*/
#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-/* MC_CMD_RESOURCE_SPECIFIER enum */
-/* enum: Any */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
-
-/* MC_CMD_FPGA_FLASH_INDEX enum */
-#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
-#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
-
-/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
-/* enum: Legacy mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
-/* enum: Switchdev mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
-/* enum: Bootstrap mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
-/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
-
/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
* interfaces. There is a need to refer to interfaces explicitly from drivers
* (for example, a management driver on one interface administering a function
@@ -424,6 +406,14 @@
* an on-NIC ARM module is expected to be connected.
*/
#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: The PCIe logical interface 0. It is an alias for HOST_PRIMARY. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_0 0x0
+/* enum: The PCIe logical interface 1. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_1 0x2
+/* enum: The PCIe logical interface 2. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_2 0x3
+/* enum: The PCIe logical interface 3. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_3 0x4
/* enum: For MCDI commands issued over a PCIe interface, this value is
* translated into the interface over which the command was issued. Not
* meaningful for other MCDI transports.
@@ -640,7 +630,11 @@
* be allocated by different counter blocks, so e.g. AR counter 42 is different
* from CT counter 42. Generation counts are also type-specific. This value is
* also present in the header of streaming counter packets, in the IDENTIFIER
- * field (see packetiser packet format definitions).
+ * field (see packetiser packet format definitions). Also note that LACP
+ * counter IDs are not allocated individually, instead the counter IDs are
+ * directly tied to the LACP balance table indices. These in turn are allocated
+ * in large contiguous blocks as a LAG config. Calling MAE_COUNTER_ALLOC/FREE
+ * with an LACP counter type will return EPERM.
*/
/* enum: Action Rule counters - can be referenced in AR response. */
#define MAE_COUNTER_TYPE_AR 0x0
@@ -648,6 +642,14 @@
#define MAE_COUNTER_TYPE_CT 0x1
/* enum: Outer Rule counters - can be referenced in OR response. */
#define MAE_COUNTER_TYPE_OR 0x2
+/* enum: LACP counters - linked to LACP balance table entries. */
+#define MAE_COUNTER_TYPE_LACP 0x3
+
+/* MAE_COUNTER_ID enum: ID of allocated counter or counter list. */
+/* enum: A counter ID that is guaranteed never to represent a real counter or
+ * counter list.
+ */
+#define MAE_COUNTER_ID_NULL 0xffffffff
/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
* structured with bits [31:24] reserved (0), [23:16] indicating which major
@@ -656,7 +658,9 @@
* variations of the same table. (All of the tables currently defined within
* the streaming engines are listed here, but this does not imply that they are
* all supported - MC_CMD_TABLE_LIST returns the list of actually supported
- * tables.)
+ * tables.) The DPU offload engines' enumerators follow a deliberate pattern:
+ * 0x01010000 + is_dpu_net * 0x10000 + is_wr_or_tx * 0x8000 + is_lite_pipe *
+ * 0x1000 + oe_engine_type * 0x100 + oe_instance_within_pipe * 0x10
*/
/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
#define TABLE_ID_OUTER_RULE_TABLE 0x10000
@@ -694,45 +698,70 @@
#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
#define TABLE_ID_INDIRECTION_TABLE 0x20300
-
-/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
- * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
- * (ether_type_msb & 0x3);
- */
-#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
-
-/* TABLE_NAT_DIR enum: NAT direction. */
-#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
-#define TABLE_NAT_DIR_DEST 0x1 /* enum */
-
-/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
- * is constructed as a concatenation (indicated here by "++") of packet header
- * fields.
- */
-/* enum: IP src addr ++ IP dst addr */
-#define TABLE_RSS_KEY_MODE_SA_DA 0x0
-/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
-#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
-/* enum: IP src addr */
-#define TABLE_RSS_KEY_MODE_SA 0x2
-/* enum: IP dst addr */
-#define TABLE_RSS_KEY_MODE_DA 0x3
-/* enum: IP src addr ++ TCP/UDP src port */
-#define TABLE_RSS_KEY_MODE_SA_SP 0x4
-/* enum: IP dest addr ++ TCP dest port */
-#define TABLE_RSS_KEY_MODE_DA_DP 0x5
-/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
-#define TABLE_RSS_KEY_MODE_NONE 0x7
-
-/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
-/* enum: RSS uses Indirection_Table lookup. */
-#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
-/* enum: RSS uses even spreading calculation. */
-#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+/* enum: DPU.host read pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC0_OE_PROFILE 0x1010000
+/* enum: DPU.host read pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC1_OE_PROFILE 0x1010010
+/* enum: DPU.host write pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC0_OE_PROFILE 0x1018000
+/* enum: DPU.host write pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC1_OE_PROFILE 0x1018010
+/* enum: DPU.net 'full' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CRC0_OE_PROFILE 0x1020000
+/* enum: DPU.net 'full' receive pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM0_OE_PROFILE 0x1020100
+/* enum: DPU.net 'full' receive pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM1_OE_PROFILE 0x1020110
+/* enum: DPU.net 'full' receive pipe AES-GCM offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_AES_GCM0_OE_PROFILE 0x1020200
+/* enum: DPU.net 'lite' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CRC0_OE_PROFILE 0x1021000
+/* enum: DPU.net 'lite' receive pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CSUM0_OE_PROFILE 0x1021100
+/* enum: DPU.net 'full' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CRC0_OE_PROFILE 0x1028000
+/* enum: DPU.net 'full' transmit pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM0_OE_PROFILE 0x1028100
+/* enum: DPU.net 'full' transmit pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM1_OE_PROFILE 0x1028110
+/* enum: DPU.net 'full' transmit pipe AES-GCM offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_AES_GCM0_OE_PROFILE 0x1028200
+/* enum: DPU.net 'lite' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CRC0_OE_PROFILE 0x1029000
+/* enum: DPU.net 'lite' transmit pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CSUM0_OE_PROFILE 0x1029100
/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
* loosely grouped together into blocks with gaps for expansion, but the values
@@ -1026,6 +1055,16 @@
#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: LACP LAG ID (i.e. the low 3 bits of LACP LAG mport ID), indexing
+ * LACP_LAG_Config_Table. Refer to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_LACP_LAG_ID 0xe0
+/* enum: Address in LACP_Balance_Table. The balance table is partitioned
+ * between LAGs according to the settings in LACP_LAG_Config_Table and then
+ * indexed by the LACP hash, providing the mapping to destination mports. Refer
+ * to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_BAL_TBL_ADDR 0xe1
/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
* other encapsulation types.
*/
@@ -1082,6 +1121,55 @@
#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
/* enum: An offset to be applied to the base destination queue ID. */
#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+/* enum: DPU offload engine profile ID to address. */
+#define TABLE_FIELD_ID_OE_PROFILE 0x3e8
+/* enum: Width of the CRC to calculate - see CRC_VARIANT enum. */
+#define TABLE_FIELD_ID_CRC_VARIANT 0x3f2
+/* enum: If set, reflect the bits of each input byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFIN 0x3f3
+/* enum: If set, reflect the bits of each output byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFOUT 0x3f4
+/* enum: If set, invert every bit of the output value. */
+#define TABLE_FIELD_ID_CRC_INVOUT 0x3f5
+/* enum: The CRC polynomial to use for checksumming, in normal form. */
+#define TABLE_FIELD_ID_CRC_POLY 0x3f6
+/* enum: Operation for the checksum engine to perform - see DPU_CSUM_OP enum.
+ */
+#define TABLE_FIELD_ID_CSUM_OP 0x410
+/* enum: Byte offset of checksum relative to region_start (for VALIDATE_*
+ * operations only).
+ */
+#define TABLE_FIELD_ID_CSUM_OFFSET 0x411
+/* enum: Indicates there is additional data on OPR bus that needs to be
+ * incorporated into the payload checksum.
+ */
+#define TABLE_FIELD_ID_CSUM_OPR_ADDITIONAL_DATA 0x412
+/* enum: Log2 data size of additional data on OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_DATA_SIZE_LOG2 0x413
+/* enum: 4 byte offset of where to find the additional data on the OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_4B_OFF 0x414
+/* enum: Operation type for the AES-GCM core - see GCM_OP_CODE enum. */
+#define TABLE_FIELD_ID_GCM_OP_CODE 0x41a
+/* enum: Key length - AES_KEY_LEN enum. */
+#define TABLE_FIELD_ID_GCM_KEY_LEN 0x41b
+/* enum: OPR 4 byte offset for ICV or GHASH output (only in BULK_* mode) or
+ * IPSEC descrypt output.
+ */
+#define TABLE_FIELD_ID_GCM_OPR_4B_OFFSET 0x41c
+/* enum: If OP_CODE is BULK_*, indicates Emit GHASH (Fragment mode). Else,
+ * indicates IPSEC-ESN mode.
+ */
+#define TABLE_FIELD_ID_GCM_EMIT_GHASH_ISESN 0x41d
+/* enum: Replay Protection Enable. */
+#define TABLE_FIELD_ID_GCM_REPLAY_PROTECT_EN 0x41e
+/* enum: IPSEC Encrypt ESP trailer NEXT_HEADER byte. */
+#define TABLE_FIELD_ID_GCM_NEXT_HDR 0x41f
+/* enum: Replay Window Size. */
+#define TABLE_FIELD_ID_GCM_REPLAY_WIN_SIZE 0x420
/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
* platforms
@@ -1138,6 +1226,24 @@
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_LBN 31
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_WIDTH 1
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_LBN 31
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_WIDTH 1
#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
@@ -1237,7 +1343,7 @@
#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
/* enum: Notify that invalid flash type detected */
#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
-/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+/* enum: Notify that the attempt to run FPGA Controller firmware timed out */
#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
/* enum: Failure to probe one or more FPGA boot flash chips */
#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
@@ -1255,7 +1361,7 @@
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
/* enum: FC Assert happened, but the register information is not available */
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
-/* enum: The register information for FC Assert is ready for readinng by driver
+/* enum: The register information for FC Assert is ready for reading by driver
*/
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
@@ -1364,6 +1470,12 @@
#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_LBN 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_WIDTH 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_LBN 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
/* Alias for PTP_DATA. */
@@ -1500,6 +1612,31 @@
* change to the journal.
*/
#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
+/* enum: Notification that a source queue is enabled and attached to its proxy
+ * sink queue. SRC field contains the handle of the affected descriptor proxy
+ * function. DATA field contains the relative source queue number and absolute
+ * VI ID.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_QUEUE_START 0x28
+/* enum: Notification of a change in link state and/or link speed of a network
+ * port link. This event applies to a network port identified by a handle,
+ * PORT_HANDLE, which is discovered by the driver using the MC_CMD_ENUM_PORTS
+ * command.
+ */
+#define MCDI_EVENT_CODE_PORT_LINKCHANGE 0x29
+/* enum: Notification of a change in the state of an MDI (external connector)
+ * of a network port. This typically corresponds to module plug/unplug for
+ * modular interfaces (e.g., SFP/QSFP and similar) or cable connect/disconnect.
+ * This event applies to a network port identified by a handle, PORT_HANDLE,
+ * which is discovered by the driver using the MC_CMD_ENUM_PORTS command.
+ */
+#define MCDI_EVENT_CODE_PORT_MODULECHANGE 0x2a
+/* enum: Notification that the port enumeration journal has changed since it
+ * was last read and updates can be read using the MC_CMD_ENUM_PORTS command.
+ * The firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_ENUM_PORTS_CHANGE 0x2b
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1512,6 +1649,14 @@
#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
@@ -1668,247 +1813,6 @@
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
-/* FCDI_EVENT structuredef */
-#define FCDI_EVENT_LEN 8
-#define FCDI_EVENT_CONT_LBN 32
-#define FCDI_EVENT_CONT_WIDTH 1
-#define FCDI_EVENT_LEVEL_LBN 33
-#define FCDI_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define FCDI_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define FCDI_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define FCDI_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define FCDI_EVENT_LEVEL_FATAL 0x3
-#define FCDI_EVENT_DATA_OFST 0
-#define FCDI_EVENT_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0
-#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
-#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
-#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
-#define FCDI_EVENT_LINK_UP 0x1 /* enum */
-#define FCDI_EVENT_DATA_LBN 0
-#define FCDI_EVENT_DATA_WIDTH 32
-#define FCDI_EVENT_SRC_LBN 36
-#define FCDI_EVENT_SRC_WIDTH 8
-#define FCDI_EVENT_EV_CODE_LBN 60
-#define FCDI_EVENT_EV_CODE_WIDTH 4
-#define FCDI_EVENT_CODE_LBN 44
-#define FCDI_EVENT_CODE_WIDTH 8
-/* enum: The FC was rebooted. */
-#define FCDI_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define FCDI_EVENT_CODE_ASSERT 0x2
-/* enum: DDR3 test result. */
-#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
-/* enum: Link status. */
-#define FCDI_EVENT_CODE_LINK_STATE 0x4
-/* enum: A timed read is ready to be serviced. */
-#define FCDI_EVENT_CODE_TIMED_READ 0x5
-/* enum: One or more PPS IN events */
-#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: Tick event from PTP clock */
-#define FCDI_EVENT_CODE_PTP_TICK 0x7
-/* enum: ECC error counters */
-#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
-/* enum: Current status of PTP */
-#define FCDI_EVENT_CODE_PTP_STATUS 0x9
-/* enum: Port id config to map MC-FC port idx */
-#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
-/* enum: Boot result or error code */
-#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
-#define FCDI_EVENT_REBOOT_SRC_LBN 36
-#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
-#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
-#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
-#define FCDI_EVENT_ASSERT_TYPE_LBN 36
-#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
-#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
-#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
-#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PTP_STATE_OFST 0
-#define FCDI_EVENT_PTP_STATE_LEN 4
-#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
-#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
-#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
-#define FCDI_EVENT_PTP_STATE_LBN 0
-#define FCDI_EVENT_PTP_STATE_WIDTH 32
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
-/* Index of MC port being referred to */
-#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
-#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
-/* FC Port index that matches the MC port index in SRC */
-#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
-#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
-#define FCDI_EVENT_BOOT_RESULT_OFST 0
-#define FCDI_EVENT_BOOT_RESULT_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
-#define FCDI_EVENT_BOOT_RESULT_LBN 0
-#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
- * to the MC. Note that this structure | is overlayed over a normal FCDI event
- * such that bits 32-63 containing | event code, level, source etc remain the
- * same. In this case the data | field of the header is defined to be the
- * number of timestamps
- */
-#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
-#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
-/* Number of timestamps following */
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
-/* Seconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
-/* Nanoseconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
-/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
-
-/* MUM_EVENT structuredef */
-#define MUM_EVENT_LEN 8
-#define MUM_EVENT_CONT_LBN 32
-#define MUM_EVENT_CONT_WIDTH 1
-#define MUM_EVENT_LEVEL_LBN 33
-#define MUM_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define MUM_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define MUM_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define MUM_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define MUM_EVENT_LEVEL_FATAL 0x3
-#define MUM_EVENT_DATA_OFST 0
-#define MUM_EVENT_DATA_LEN 4
-#define MUM_EVENT_SENSOR_ID_OFST 0
-#define MUM_EVENT_SENSOR_ID_LBN 0
-#define MUM_EVENT_SENSOR_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-#define MUM_EVENT_SENSOR_STATE_OFST 0
-#define MUM_EVENT_SENSOR_STATE_LBN 8
-#define MUM_EVENT_SENSOR_STATE_WIDTH 8
-#define MUM_EVENT_PORT_PHY_READY_OFST 0
-#define MUM_EVENT_PORT_PHY_READY_LBN 0
-#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
-#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
-#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
-#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
-#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
-#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
-#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
-#define MUM_EVENT_DATA_LBN 0
-#define MUM_EVENT_DATA_WIDTH 32
-#define MUM_EVENT_SRC_LBN 36
-#define MUM_EVENT_SRC_WIDTH 8
-#define MUM_EVENT_EV_CODE_LBN 60
-#define MUM_EVENT_EV_CODE_WIDTH 4
-#define MUM_EVENT_CODE_LBN 44
-#define MUM_EVENT_CODE_WIDTH 8
-/* enum: The MUM was rebooted. */
-#define MUM_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define MUM_EVENT_CODE_ASSERT 0x2
-/* enum: Sensor failure. */
-#define MUM_EVENT_CODE_SENSOR 0x3
-/* enum: Link fault has been asserted, or has cleared. */
-#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
-#define MUM_EVENT_SENSOR_DATA_OFST 0
-#define MUM_EVENT_SENSOR_DATA_LEN 4
-#define MUM_EVENT_SENSOR_DATA_LBN 0
-#define MUM_EVENT_SENSOR_DATA_WIDTH 32
-#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
-#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
-#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
-#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
-#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
-#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
-#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
-#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_TECH_OFST 0
-#define MUM_EVENT_PORT_PHY_TECH_LEN 4
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
-#define MUM_EVENT_PORT_PHY_TECH_LBN 0
-#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
-
/***********************************/
/* MC_CMD_READ32
@@ -1969,90 +1873,6 @@
/***********************************/
-/* MC_CMD_COPYCODE
- * Copy MC code between two locations and jump. Note - this command really
- * belongs to INSECURE category but is required by shmboot. The command handler
- * has additional checks to reject insecure calls.
- */
-#define MC_CMD_COPYCODE 0x3
-#undef MC_CMD_0x3_PRIVILEGE_CTG
-
-#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_COPYCODE_IN msgrequest */
-#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address
- *
- * The main image should be entered via a copy of a single word from and to a
- * magic address, which controls various aspects of the boot. The magic address
- * is a bitfield, with each bit as documented below.
- */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
-#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
- */
-#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
- * below)
- */
-#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
-/* Destination address */
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
-#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
-#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
-/* Address of where to jump after copy. */
-#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
-/* enum: Control should return to the caller rather than jumping */
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1
-
-/* MC_CMD_COPYCODE_OUT msgresponse */
-#define MC_CMD_COPYCODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SET_FUNC
- * Select function for function-specific commands.
- */
-#define MC_CMD_SET_FUNC 0x4
-#undef MC_CMD_0x4_PRIVILEGE_CTG
-
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_FUNC_IN msgrequest */
-#define MC_CMD_SET_FUNC_IN_LEN 4
-/* Set function */
-#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
-#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
-
-/* MC_CMD_SET_FUNC_OUT msgresponse */
-#define MC_CMD_SET_FUNC_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_BOOT_STATUS
* Get the instruction address from which the MC booted.
*/
@@ -2259,6 +2079,7 @@
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum property: bitmask */
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
@@ -2304,6 +2125,9 @@
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2326,6 +2150,9 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2356,6 +2183,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2495,6 +2325,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2641,6 +2474,9 @@
* version information
*/
#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2803,6 +2639,9 @@
* and board version information
*/
#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -3065,8 +2904,18 @@
* subscribers.
*/
#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
-/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1c
+/* enum: X4 and later adapters should use this instead of
+ * PTP_OP_TIME_EVENT_SUBSCRIBE. Subscribe to receive periodic time events
+ * indicating the current NIC time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE_V2 0x1c
+/* enum: For X4 and later NICs. Packet timestamps and time sync events have
+ * IS_SET and IN_SYNC flags, that indicates whether time is updated and if it
+ * is in sync with host. Once set, IN_SYNC flag is cleared by hardware after a
+ * software configurable time out. Host driver need to query what is set and
+ * what is maximum supported interval, this MCDI can be used to query these.
+ */
+#define MC_CMD_PTP_OP_GET_SYNC_TIMEOUT 0x1d
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -3507,6 +3356,22 @@
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2 msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_LEN 4
+/* Space for flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_WIDTH 1
+
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
@@ -3540,6 +3405,13 @@
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+/* MC_CMD_PTP_IN_GET_SYNC_TIMEOUT msgrequest */
+#define MC_CMD_PTP_IN_GET_SYNC_TIMEOUT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -3939,416 +3811,14 @@
/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
-
-/***********************************/
-/* MC_CMD_CSR_READ32
- * Read 32bit words from the indirect memory map.
- */
-#define MC_CMD_CSR_READ32 0xc
-#undef MC_CMD_0xc_PRIVILEGE_CTG
-
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_READ32_IN msgrequest */
-#define MC_CMD_CSR_READ32_IN_LEN 12
-/* Address */
-#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
-#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
-
-/* MC_CMD_CSR_READ32_OUT msgresponse */
-#define MC_CMD_CSR_READ32_OUT_LENMIN 4
-#define MC_CMD_CSR_READ32_OUT_LENMAX 252
-#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
-/* The last dword is the status, not a value read */
-#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
-#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_CSR_WRITE32
- * Write 32bit dwords to the indirect memory map.
- */
-#define MC_CMD_CSR_WRITE32 0xd
-#undef MC_CMD_0xd_PRIVILEGE_CTG
-
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_WRITE32_IN msgrequest */
-#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
-#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
-#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
-/* Address */
-#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
-
-/* MC_CMD_CSR_WRITE32_OUT msgresponse */
-#define MC_CMD_CSR_WRITE32_OUT_LEN 4
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_HP
- * These commands are used for HP related features. They are grouped under one
- * MCDI command to avoid creating too many MCDI commands.
- */
-#define MC_CMD_HP 0x54
-#undef MC_CMD_0x54_PRIVILEGE_CTG
-
-#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_HP_IN msgrequest */
-#define MC_CMD_HP_IN_LEN 16
-/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
- * the specified address with the specified interval.When address is NULL,
- * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
- * state / 2: (debug) Show temperature reported by one of the supported
- * sensors.
- */
-#define MC_CMD_HP_IN_SUBCMD_OFST 0
-#define MC_CMD_HP_IN_SUBCMD_LEN 4
-/* enum: OCSD (Option Card Sensor Data) sub-command. */
-#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
-/* enum: Last known valid HP sub-command. */
-#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
-/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
- */
-#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
-/* The requested update interval, in seconds. (Or the sub-command if ADDR is
- * NULL.)
- */
-#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
-#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
-
-/* MC_CMD_HP_OUT msgresponse */
-#define MC_CMD_HP_OUT_LEN 4
-#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
-#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
-/* enum: OCSD stopped for this card. */
-#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
-/* enum: OCSD was successfully started with the address provided. */
-#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
-/* enum: OCSD was already started for this card. */
-#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
-
-
-/***********************************/
-/* MC_CMD_STACKINFO
- * Get stack information.
- */
-#define MC_CMD_STACKINFO 0xf
-#undef MC_CMD_0xf_PRIVILEGE_CTG
-
-#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_STACKINFO_IN msgrequest */
-#define MC_CMD_STACKINFO_IN_LEN 0
-
-/* MC_CMD_STACKINFO_OUT msgresponse */
-#define MC_CMD_STACKINFO_OUT_LENMIN 12
-#define MC_CMD_STACKINFO_OUT_LENMAX 252
-#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
-/* (thread ptr, stack size, free space) for each thread in system */
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_MDIO_READ
- * MDIO register read.
- */
-#define MC_CMD_MDIO_READ 0x10
-#undef MC_CMD_0x10_PRIVILEGE_CTG
-
-#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MDIO_READ_IN msgrequest */
-#define MC_CMD_MDIO_READ_IN_LEN 16
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
-/* enum: Internal. */
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0
-/* enum: External. */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
-/* Port address */
-#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-#define MC_CMD_MDIO_CLAUSE22 0x20
-/* Address */
-#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
-
-/* MC_CMD_MDIO_READ_OUT msgresponse */
-#define MC_CMD_MDIO_READ_OUT_LEN 8
-/* Value */
-#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
-#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
-/* Status the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
-/* enum: Good. */
-#define MC_CMD_MDIO_STATUS_GOOD 0x8
-
-
-/***********************************/
-/* MC_CMD_MDIO_WRITE
- * MDIO register write.
- */
-#define MC_CMD_MDIO_WRITE 0x11
-#undef MC_CMD_0x11_PRIVILEGE_CTG
-
-#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_MDIO_WRITE_IN msgrequest */
-#define MC_CMD_MDIO_WRITE_IN_LEN 20
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
-#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
-/* enum: Internal. */
-/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
-/* enum: External. */
-/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
-/* Port address */
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-/* MC_CMD_MDIO_CLAUSE22 0x20 */
-/* Address */
-#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
-#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
-
-/* MC_CMD_MDIO_WRITE_OUT msgresponse */
-#define MC_CMD_MDIO_WRITE_OUT_LEN 4
-/* Status; the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
-/* enum: Good. */
-/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
-
-
-/***********************************/
-/* MC_CMD_DBI_WRITE
- * Write DBI register(s).
- */
-#define MC_CMD_DBI_WRITE 0x12
-#undef MC_CMD_0x12_PRIVILEGE_CTG
-
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_WRITE_IN msgrequest */
-#define MC_CMD_DBI_WRITE_IN_LENMIN 12
-#define MC_CMD_DBI_WRITE_IN_LENMAX 252
-#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
-/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
- * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
- */
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
-
-/* MC_CMD_DBI_WRITE_OUT msgresponse */
-#define MC_CMD_DBI_WRITE_OUT_LEN 0
-
-/* MC_CMD_DBIWROP_TYPEDEF structuredef */
-#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ32 0x14
-
-/* MC_CMD_PORT_READ32_IN msgrequest */
-#define MC_CMD_PORT_READ32_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ32_OUT msgresponse */
-#define MC_CMD_PORT_READ32_OUT_LEN 8
-/* Value */
-#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
-/* Status */
-#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
-#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE32 0x15
-
-/* MC_CMD_PORT_WRITE32_IN msgrequest */
-#define MC_CMD_PORT_WRITE32_IN_LEN 8
-/* Address */
-#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
-
-/* MC_CMD_PORT_WRITE32_OUT msgresponse */
-#define MC_CMD_PORT_WRITE32_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ128 0x16
-
-/* MC_CMD_PORT_READ128_IN msgrequest */
-#define MC_CMD_PORT_READ128_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ128_OUT msgresponse */
-#define MC_CMD_PORT_READ128_OUT_LEN 20
-/* Value */
-#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
-/* Status */
-#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
-#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE128 0x17
-
-/* MC_CMD_PORT_WRITE128_IN msgrequest */
-#define MC_CMD_PORT_WRITE128_IN_LEN 20
-/* Address */
-#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
-
-/* MC_CMD_PORT_WRITE128_OUT msgresponse */
-#define MC_CMD_PORT_WRITE128_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
-
-/* MC_CMD_CAPABILITIES structuredef */
-#define MC_CMD_CAPABILITIES_LEN 4
-/* Small buf table. */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
-/* Turbo mode (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 1
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
-/* Turbo mode active (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
-/* PTP offload. */
-#define MC_CMD_CAPABILITIES_PTP_LBN 3
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
-/* AOE mode. */
-#define MC_CMD_CAPABILITIES_AOE_LBN 4
-#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
-#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
-#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+/* MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT msgresponse */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_LEN 8
+/* Current value set in NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_OFST 0
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_LEN 4
+/* Maximum supported by NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_OFST 4
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_LEN 4
/***********************************/
@@ -4427,112 +3897,6 @@
/***********************************/
-/* MC_CMD_DBI_READX
- * Read DBI register(s) -- extended functionality
- */
-#define MC_CMD_DBI_READX 0x19
-#undef MC_CMD_0x19_PRIVILEGE_CTG
-
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_READX_IN msgrequest */
-#define MC_CMD_DBI_READX_IN_LENMIN 8
-#define MC_CMD_DBI_READX_IN_LENMAX 248
-#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
-#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
-#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
-/* Each Read op consists of an address (offset 0), VF/CS2) */
-#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
-
-/* MC_CMD_DBI_READX_OUT msgresponse */
-#define MC_CMD_DBI_READX_OUT_LENMIN 4
-#define MC_CMD_DBI_READX_OUT_LENMAX 252
-#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* Value */
-#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
-#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
-#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
-
-/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
-#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_RAND_SEED
- * Set the 16byte seed for the MC pseudo-random generator.
- */
-#define MC_CMD_SET_RAND_SEED 0x1a
-#undef MC_CMD_0x1a_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_RAND_SEED_IN msgrequest */
-#define MC_CMD_SET_RAND_SEED_IN_LEN 16
-/* Seed value. */
-#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
-#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
-
-/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
-#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the LTSSM, if the build supports it.
- */
-#define MC_CMD_LTSSM_HIST 0x1b
-
-/* MC_CMD_LTSSM_HIST_IN msgrequest */
-#define MC_CMD_LTSSM_HIST_IN_LEN 0
-
-/* MC_CMD_LTSSM_HIST_OUT msgresponse */
-#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
-/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
-#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
/* MC_CMD_DRV_ATTACH
* Inform MCPU that this port is managed on the host (i.e. driver active). For
* Huntington, also request the preferred datapath firmware to use if possible
@@ -4705,6 +4069,7 @@
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum property: bitshift */
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -4732,22 +4097,6 @@
/***********************************/
-/* MC_CMD_SHMUART
- * Route UART output to circular buffer in shared memory instead.
- */
-#define MC_CMD_SHMUART 0x1f
-
-/* MC_CMD_SHMUART_IN msgrequest */
-#define MC_CMD_SHMUART_IN_LEN 4
-/* ??? */
-#define MC_CMD_SHMUART_IN_FLAG_OFST 0
-#define MC_CMD_SHMUART_IN_FLAG_LEN 4
-
-/* MC_CMD_SHMUART_OUT msgresponse */
-#define MC_CMD_SHMUART_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_PORT_RESET
* Generic per-port reset. There is no equivalent for per-board reset. Locks
* required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
@@ -4790,100 +4139,6 @@
/***********************************/
-/* MC_CMD_PCIE_CREDITS
- * Read instantaneous and minimum flow control thresholds.
- */
-#define MC_CMD_PCIE_CREDITS 0x21
-
-/* MC_CMD_PCIE_CREDITS_IN msgrequest */
-#define MC_CMD_PCIE_CREDITS_IN_LEN 8
-/* poll period. 0 is disabled */
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
-/* wipe statistics */
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
-
-/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
-#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
-
-
-/***********************************/
-/* MC_CMD_RXD_MONITOR
- * Get histogram of RX queue fill level.
- */
-#define MC_CMD_RXD_MONITOR 0x22
-
-/* MC_CMD_RXD_MONITOR_IN msgrequest */
-#define MC_CMD_RXD_MONITOR_IN_LEN 12
-#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
-#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
-
-/* MC_CMD_RXD_MONITOR_OUT msgresponse */
-#define MC_CMD_RXD_MONITOR_OUT_LEN 80
-#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
-
-
-/***********************************/
/* MC_CMD_PUTS
* Copy the given ASCII string out onto UART and/or out of the network port.
*/
@@ -4931,6 +4186,54 @@
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+/* MC_CMD_GET_PHY_CFG_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
@@ -5026,6 +4329,9 @@
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_200000FDX_OFST 8
+#define MC_CMD_PHY_CAP_200000FDX_LBN 22
+#define MC_CMD_PHY_CAP_200000FDX_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
@@ -5059,6 +4365,7 @@
#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum property: bitshift */
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -5084,7 +4391,7 @@
#define MC_CMD_START_BIST 0x25
#undef MC_CMD_0x25_PRIVILEGE_CTG
-#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
@@ -5124,7 +4431,7 @@
#define MC_CMD_POLL_BIST 0x26
#undef MC_CMD_0x26_PRIVILEGE_CTG
-#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -5282,33 +4589,6 @@
/***********************************/
-/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
- * flushes should be initiated via this MCDI operation, rather than via
- * directly writing FLUSH_CMD.
- *
- * The flush is completed (either done/fail) asynchronously (after this command
- * returns). The driver must still wait for flush done/failure events as usual.
- */
-#define MC_CMD_FLUSH_RX_QUEUES 0x27
-
-/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
-
-/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
-#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
* Returns a bitmask of loopback modes available at each speed.
*/
@@ -5320,6 +4600,54 @@
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+/* MC_CMD_GET_LOOPBACK_MODES_IN_V2 msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_LEN 8
+/* Target port to request loopback modes for. Uses MAE_LINK_ENDPOINT_SELECTOR
+ * which identifies a real or virtual network port by MAE port and link end.
+ * See the structure definition for more details
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
/* Supported loopbacks. */
@@ -5333,6 +4661,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -5422,6 +4751,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5435,6 +4765,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5448,6 +4779,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5461,6 +4793,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5479,6 +4812,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -5568,6 +4902,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5581,6 +4916,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5594,6 +4930,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5607,6 +4944,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
@@ -5620,6 +4958,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
@@ -5633,6 +4972,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
@@ -5646,6 +4986,214 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V3 msgresponse: Supported loopback modes for
+ * newer NICs with 200G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_LEN 72
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 200G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LBN 512
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_OFST 68
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LBN 544
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5673,13 +5221,835 @@
#define FEC_TYPE_TYPE_LEN 4
/* enum: No FEC */
#define MC_CMD_FEC_NONE 0x0
-/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+/* enum: IEEE 802.3, Clause 74 BASE-R FEC (a.k.a Firecode) */
#define MC_CMD_FEC_BASER 0x1
-/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+/* enum: IEEE 802.3, Clause 91/Clause 108 Reed-Solomon FEC */
#define MC_CMD_FEC_RS 0x2
+/* enum: IEEE 802.3, Clause 161, interleaved RS-FEC sublayer for 100GBASE-R
+ * PHYs
+ */
+#define MC_CMD_FEC_IEEE_RS_INT 0x3
+/* enum: Ethernet Consortium, Low Latency RS-FEC. RS(272, 258). Replaces FEC
+ * specified in Clause 119 for 100/200G PHY. Replaces FEC specified in Clause
+ * 134 for 50G PHY.
+ */
+#define MC_CMD_FEC_ETCS_RS_LL 0x4
+/* enum: FEC mode selected automatically */
+#define MC_CMD_FEC_AUTO 0x5
#define FEC_TYPE_TYPE_LBN 0
#define FEC_TYPE_TYPE_WIDTH 32
+/* MC_CMD_ETH_TECH structuredef: Ethernet technology as defined by IEEE802.3,
+ * Ethernet Technology Consortium, proprietary technologies. The driver must
+ * not use technologies labelled NONE and AUTO.
+ */
+#define MC_CMD_ETH_TECH_LEN 16
+/* The enums in this field can be used either as bitwise indices into a tech
+ * mask (e.g. see MC_CMD_ETH_AN_FIELDS/TECH_MASK for example) or as regular
+ * enums (e.g. see MC_CMD_LINK_CTRL_IN/ADVERTISED_TECH_ABILITIES_MASK). This
+ * structure must be updated to add new technologies when projects that need
+ * them arise. An incomplete list of possible expansion in the future include:
+ * 100GBASE_KP4, 800GBASE_CR8, 800GBASE_KR8, 800GBASE_DR8, 800GBASE_SR8
+ * 800GBASE_VR8
+ */
+#define MC_CMD_ETH_TECH_TECH_OFST 0
+#define MC_CMD_ETH_TECH_TECH_LEN 16
+/* enum: 1000BASE-KX - 1000BASE-X PCS/PMA over an electrical backplane PMD. See
+ * IEEE 802.3 Clause 70
+ */
+#define MC_CMD_ETH_TECH_1000BASEKX 0x0
+/* enum: 10GBASE-R - PCS/PMA over an electrical backplane PMD. Refer to IEEE
+ * 802.3 Clause 72
+ */
+#define MC_CMD_ETH_TECH_10GBASE_KR 0x1
+/* enum: 40GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 84.
+ */
+#define MC_CMD_ETH_TECH_40GBASE_KR4 0x2
+/* enum: 40GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 85
+ */
+#define MC_CMD_ETH_TECH_40GBASE_CR4 0x3
+/* enum: 40GBASE-R PCS/PMA over 4 lane multimode fiber PMD as specified in
+ * Clause 86
+ */
+#define MC_CMD_ETH_TECH_40GBASE_SR4 0x4
+/* enum: 40GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD with long
+ * reach. See IEEE 802.3 Clause 87
+ */
+#define MC_CMD_ETH_TECH_40GBASE_LR4 0x5
+/* enum: 25GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 Clause 110
+ */
+#define MC_CMD_ETH_TECH_25GBASE_CR 0x6
+/* enum: 25GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 111
+ */
+#define MC_CMD_ETH_TECH_25GBASE_KR 0x7
+/* enum: 25GBASE-R PCS/PMA over multimode fiber PMD. Refer to IEEE 802.3 Clause
+ * 112
+ */
+#define MC_CMD_ETH_TECH_25GBASE_SR 0x8
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on twin-axial copper
+ * cable. Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR2 0x9
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on copper backplane.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR2 0xa
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 93
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR4 0xb
+/* enum: 100GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 95
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR4 0xc
+/* enum: 100GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 92
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR4 0xd
+/* enum: 100GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD, with
+ * long/extended reach,. See IEEE 802.3 Clause 88
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR4_ER4 0xe
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on short reach fiber.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR2 0xf
+/* enum: 1000BASEX PCS/PMA. See IEEE 802.3 Clause 36 over undefined PMD, duplex
+ * mode unknown
+ */
+#define MC_CMD_ETH_TECH_1000BASEX 0x10
+/* enum: Non-standardised. 10G direct attach */
+#define MC_CMD_ETH_TECH_10GBASE_CR 0x11
+/* enum: 10GBASE-SR fiber over 850nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_SR 0x12
+/* enum: 10GBASE-LR fiber over 1310nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_LR 0x13
+/* enum: 10GBASE-LRM fiber over 1310 nm optics. See IEEE 802.3 Clause 68 */
+#define MC_CMD_ETH_TECH_10GBASE_LRM 0x14
+/* enum: 10GBASE-ER fiber over 1550nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_ER 0x15
+/* enum: 50GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR 0x16
+/* enum: 50GBASE-SR PCS/PMA over multimode fiber PMD as specified in Clause 138
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR 0x17
+/* enum: 50GBASE-CR PCS/PMA over shielded copper balanced cable PMD. See IEEE
+ * 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR 0x18
+/* enum: 50GBASE-R PCS/PMA over single mode fiber PMD as specified in Clause
+ * 139.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_LR_ER_FR 0x19
+/* enum: 100 Gb/s PHY using 100GBASE-R encoding over single-mode fiber with
+ * reach up to at least 500 m (see IEEE 802.3 Clause 140)
+ */
+#define MC_CMD_ETH_TECH_50GBASE_DR 0x1a
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR2 0x1b
+/* enum: 100GBASE-R PCS/PMA over 2 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR2 0x1c
+/* enum: 100GBASE-R PCS/PMA over 2 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR2 0x1d
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_LR2_ER2_FR2 0x1e
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_DR2 0x1f
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR4 0x20
+/* enum: 200GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR4 0x21
+/* enum: 200GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD as specified
+ * in Clause 122
+ */
+#define MC_CMD_ETH_TECH_200GBASE_LR4_ER4_FR4 0x22
+/* enum: 200GBASE-R PCS/PMA over 4-lane single-mode fiber PMD. See IEEE 802.3
+ * Clause 121
+ */
+#define MC_CMD_ETH_TECH_200GBASE_DR4 0x23
+/* enum: 200GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD as
+ * specified in Clause 136
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR4 0x24
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-KR8 PMD uses
+ * 802.3 Clause 137, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR8 0x25
+/* enum: 400GBASE-R PCS/PMA over 8-lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR8 0x26
+/* enum: 400GBASE-R PCS/PMA over 8 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 122
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR8_ER8_FR8 0x27
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_400GBASE_DR8 0x28
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-CR8 PMD uses
+ * IEEE 802.3 Clause 136, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR8 0x29
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3ck
+ * Clause 163.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR 0x2a
+/* enum: IEEE 802.3ck. 100G PHY with PMD as specified in Clause 167 over short
+ * reach fiber
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR 0x2b
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR_ER_FR 0x2c
+/* enum: 100GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR 0x2d
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_DR 0x2e
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD as specified in
+ * Clause 163 IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR2 0x2f
+/* enum: 200G PHY with PMD as specified in Clause 167 over short reach fiber
+ * IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR2 0x30
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_LR2_ER2_FR2 0x31
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_DR2 0x32
+/* enum: 200GBASE-R PCS/PMA over 2 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR2 0x33
+/* enum: 400GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 163 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR4 0x34
+/* enum: 400G PHY with PMD over short reach fiber. See Clause 167 of IEEE
+ * 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR4 0x35
+/* enum: 400GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 151
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR4_ER4_FR4 0x36
+/* enum: 400GBASE-R PCS/PMA over 4-lane single-mode fiber PMD as specified in
+ * Clause 124
+ */
+#define MC_CMD_ETH_TECH_400GBASE_DR4 0x37
+/* enum: 400GBASE-R PCS/PMA over 4 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 of IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR4 0x38
+/* enum: Automatic tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_AUTO 0x39
+/* enum: See IEEE 802.3cc-2017 Clause 114 */
+#define MC_CMD_ETH_TECH_25GBASE_LR_ER 0x3a
+/* enum: Up to 7 m over twinaxial copper cable assembly (10 lanes, 10 Gbit/s
+ * each) See IEEE 802.3ba-2010 Clause 85
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR10 0x3b
+/* enum: Invalid tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_NONE 0x7f
+#define MC_CMD_ETH_TECH_TECH_LBN 0
+#define MC_CMD_ETH_TECH_TECH_WIDTH 128
+
+/* MC_CMD_LINK_STATUS_FLAGS structuredef */
+#define MC_CMD_LINK_STATUS_FLAGS_LEN 8
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: Whether we have overall link up */
+#define MC_CMD_LINK_STATUS_FLAGS_LINK_UP 0x0
+/* enum: If set, the PHY has no external RX link synchronisation */
+#define MC_CMD_LINK_STATUS_FLAGS_NO_PHY_LINK 0x1
+/* enum: If set, PMD/MDI is not connected (e.g. cable disconnected, module cage
+ * empty)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_MDI_DISCONNECTED 0x2
+/* enum: Set on error while decoding module data (e.g. module EEPROM does not
+ * contain valid values, has checksum errors, etc.)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_BAD 0x3
+/* enum: Set when module unsupported (e.g. unsupported link rate or link
+ * technology)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_UNSUPPORTED 0x4
+/* enum: Set on error while communicating with the module (e.g. I2C errors
+ * while reading EEPROM)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_COMMS_FAULT 0x5
+/* enum: Set on module overcurrent/overvoltage condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_POWER_FAULT 0x6
+/* enum: Set on module overtemperature condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_THERMAL_FAULT 0x7
+/* enum: If set, the module is indicating Loss of Signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_LOS 0x8
+/* enum: If set, PMA is indicating loss of CDR lock (clock sync) */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_NO_CDR_LOCK 0x9
+/* enum: If set, PMA is indicating loss of analog signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_LOS 0xa
+/* enum: If set, PCS is indicating loss of block lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_BLOCK_LOCK 0xb
+/* enum: If set, PCS is indicating loss of alignment marker lock on one or more
+ * lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_AM_LOCK 0xc
+/* enum: If set, PCS is indicating loss of overall alignment lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_ALIGN_LOCK 0xd
+/* enum: If set, PCS is indicating high bit error rate condition. */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_HI_BER 0xe
+/* enum: If set, FEC is indicating loss of FEC lock */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_NO_LOCK 0xf
+/* enum: If set, indicates that the number of symbol errors in a 8192-codeword
+ * window has exceeded the threshold K (417).
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_HI_SER 0x10
+/* enum: If set, the receiver has detected the local FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_LOCAL_DEGRADED 0x11
+/* enum: If set, the receiver has detected the remote FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_RM_DEGRADED 0x12
+/* enum: If set, the number of symbol errors is over an internal threshold. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_DEGRADED_SER 0x13
+/* enum: If set, autonegotiation has detected an auto-negotiation capable link
+ * partner
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_ABLE 0x14
+/* enum: If set, autonegotiation base page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_BP_FAILED 0x15
+/* enum: If set, autonegotiation next page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NP_FAILED 0x16
+/* enum: If set, autonegotiation has failed to negotiate a common set of
+ * capabilities
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NO_HCD 0x17
+/* enum: If set, local end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_LOCAL_FRAME_LOCK 0x18
+/* enum: If set, remote end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RM_FRAME_LOCK 0x19
+/* enum: If set, remote end has failed to assert Receiver Ready (link training
+ * success) within the designated timeout
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RX_READY 0x1a
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_WIDTH 64
+
+/* MC_CMD_PAUSE_MODE structuredef */
+#define MC_CMD_PAUSE_MODE_LEN 1
+#define MC_CMD_PAUSE_MODE_TYPE_OFST 0
+#define MC_CMD_PAUSE_MODE_TYPE_LEN 1
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_PAUSE 0x0
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_ASYM_DIR 0x1
+#define MC_CMD_PAUSE_MODE_TYPE_LBN 0
+#define MC_CMD_PAUSE_MODE_TYPE_WIDTH 8
+
+/* MC_CMD_ETH_AN_FIELDS structuredef: Fields used for IEEE 802.3 Clause 73
+ * Auto-Negotiation. Warning - This is fixed size and cannot be extended. This
+ * structure is used to define autonegotiable abilities (advertised, link
+ * partner and supported abilities).
+ */
+#define MC_CMD_ETH_AN_FIELDS_LEN 25
+/* Mask of Ethernet technologies. The bit indices in this mask are taken from
+ * the TECH field in the MC_CMD_ETH_TECH structure.
+ */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_OFST 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LBN 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_WIDTH 128
+/* Mask of supported FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_OFST 16
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LBN 128
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_WIDTH 32
+/* Mask of requested FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_OFST 20
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LBN 160
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_WIDTH 32
+/* Bitmask of negotiated pause modes */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_OFST 24
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LBN 192
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_WIDTH 8
+
+/* MC_CMD_LOOPBACK_V2 structuredef: Loopback modes for use with the new
+ * MC_CMD_LINK_CTRL and MC_CMD_LINK_STATE. These loopback modes are not
+ * supported in other getlink/setlink commands.
+ */
+#define MC_CMD_LOOPBACK_V2_LEN 4
+#define MC_CMD_LOOPBACK_V2_MODE_OFST 0
+#define MC_CMD_LOOPBACK_V2_MODE_LEN 4
+/* enum: No loopback */
+#define MC_CMD_LOOPBACK_V2_NONE 0x0
+/* enum: Let firmware choose a supported loopback mode */
+#define MC_CMD_LOOPBACK_V2_AUTO 0x1
+/* enum: Loopback after the MAC */
+#define MC_CMD_LOOPBACK_V2_POST_MAC 0x2
+/* enum: Loopback after the PCS */
+#define MC_CMD_LOOPBACK_V2_POST_PCS 0x3
+/* enum: Loopback after the PMA */
+#define MC_CMD_LOOPBACK_V2_POST_PMA 0x4
+/* enum: Loopback after the MDI Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MDI_WS 0x5
+/* enum: Loopback after the PMA Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PMA_WS 0x6
+/* enum: Loopback after the PCS Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PCS_WS 0x7
+/* enum: Loopback after the MAC Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MAC_WS 0x8
+/* enum: Loopback after the MAC FIFOs (before the MAC) */
+#define MC_CMD_LOOPBACK_V2_PRE_MAC 0x9
+#define MC_CMD_LOOPBACK_V2_MODE_LBN 0
+#define MC_CMD_LOOPBACK_V2_MODE_WIDTH 32
+
+/* MC_CMD_FCNTL structuredef */
+#define MC_CMD_FCNTL_LEN 4
+#define MC_CMD_FCNTL_MASK_OFST 0
+#define MC_CMD_FCNTL_MASK_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto negotiate flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control. This is only supported on KSB. */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_FCNTL_MASK_LBN 0
+#define MC_CMD_FCNTL_MASK_WIDTH 32
+
+/* MC_CMD_LINK_FLAGS structuredef */
+#define MC_CMD_LINK_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_LINK_FLAGS bitmask.
+ */
+#define MC_CMD_LINK_FLAGS_MASK_OFST 0
+#define MC_CMD_LINK_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Enable auto-negotiation. If AN is enabled, link technology and FEC
+ * mode are determined by advertised capabilities and requested FEC modes,
+ * combined with link partner capabilities. If AN is disabled, link technology
+ * is forced to LINK_TECHNOLOGY and FEC mode is forced to FEC_MODE. Not valid
+ * if loopback is enabled
+ */
+#define MC_CMD_LINK_FLAGS_AUTONEG_EN 0x0
+/* enum: Enable parallel detect. In addition to AN, try to sense partner forced
+ * speed/FEC mode (when partner AN disabled). Only valid if AN is enabled.
+ */
+#define MC_CMD_LINK_FLAGS_PARALLEL_DETECT_EN 0x1
+/* enum: Force link down, in electrical idle. */
+#define MC_CMD_LINK_FLAGS_LINK_DISABLE 0x2
+/* enum: Ignore the sequence number and always apply. */
+#define MC_CMD_LINK_FLAGS_IGNORE_MODULE_SEQ 0x3
+#define MC_CMD_LINK_FLAGS_MASK_LBN 0
+#define MC_CMD_LINK_FLAGS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_LINK_CTRL
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_LINK_CTRL 0x6b
+#undef MC_CMD_0x6b_PRIVILEGE_CTG
+
+#define MC_CMD_0x6b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_CTRL_IN msgrequest */
+#define MC_CMD_LINK_CTRL_IN_LEN 40
+/* Handle to the port to set link state for. */
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_LEN 4
+/* Control flags */
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Reserved for future expansion, and included to provide padding for alignment
+ * purposes.
+ */
+#define MC_CMD_LINK_CTRL_IN_RESERVED_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LEN 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LBN 64
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_WIDTH 32
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_OFST 12
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LBN 96
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_WIDTH 32
+/* Technology abilities to advertise during auto-negotiation */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_OFST 16
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Pause abilities to advertise during auto-negotiation. Valid when auto-
+ * negotation is enabled and MC_CMD_SET_MAC_IN/FCTL is set to
+ * MC_CMD_FCNTL_AUTO. If auto-negotiation is disabled the driver must
+ * explicitly configure pause mode with MC_CMD_SET_MAC.
+ */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_OFST 32
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* When auto-negotiation is enabled, this is the FEC mode to request. Note that
+ * a weaker FEC mode may get negotiated, depending on what the link partner
+ * supports. The driver should subsequently use MC_CMD_GET_LINK to check the
+ * actual negotiated FEC mode. When auto-negotiation is disabled, this is the
+ * forced FEC mode.
+ */
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_OFST 33
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* This is only to be used when auto-negotiation is disabled (forced speed or
+ * loopback mode). If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_OFST 36
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* The sequence number of the last MODULECHANGE event. If this doesn't match,
+ * fail with EAGAIN.
+ */
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_OFST 38
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_LEN 1
+/* Loopback Mode. Only valid when auto-negotiation is disabled. */
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_OFST 39
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+/* MC_CMD_LINK_CTRL_OUT msgresponse */
+#define MC_CMD_LINK_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE
+ */
+#define MC_CMD_LINK_STATE 0x6c
+#undef MC_CMD_0x6c_PRIVILEGE_CTG
+
+#define MC_CMD_0x6c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_STATE_IN msgrequest */
+#define MC_CMD_LINK_STATE_IN_LEN 4
+/* Handle to the port to get link state for. */
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_LINK_STATE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_OUT_LEN 114
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_OFST 32
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_OFST 36
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_OFST 40
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_OFST 64
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_OFST 68
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_OFST 72
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_LEN 28
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_OFST 96
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_OFST 100
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_OFST 104
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_LEN 1
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+/* MC_CMD_LINK_STATE_OUT_V2 msgresponse: Updated LINK_STATE_OUT with
+ * LOCAL_AN_SUPPORT
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LEN 120
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+
+/* MC_CMD_LINK_STATE_OUT_V3 msgresponse: Updated LINK_STATE_OUT_V2 for explicit
+ * reporting of the link speed and duplex mode.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LEN 128
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero. LINK_SPEED field is intended to be used by drivers without
+ * the most up-to-date MCDI definitions, unable to deduce the link speed from
+ * the reported LINK_TECHNOLOGY field.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_OFST 120
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -5694,6 +6064,54 @@
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
+/* MC_CMD_GET_LINK_IN_V2 msgrequest */
+#define MC_CMD_GET_LINK_IN_V2_LEN 8
+/* Target port to request link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
/* Near-side advertised capabilities. Refer to
@@ -5745,6 +6163,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
@@ -5813,6 +6232,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
@@ -5969,6 +6389,95 @@
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+/* MC_CMD_SET_LINK_IN_V3 msgrequest */
+#define MC_CMD_SET_LINK_IN_V3_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_V3_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_V3_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_WIDTH 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_LBN 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_WIDTH 1
+/* Padding */
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_OFST 17
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_LEN 3
+/* Target port to set link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -6034,17 +6543,17 @@
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
+/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
+/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
-/* enum: Auto neg flow control. */
-#define MC_CMD_FCNTL_AUTO 0x3
-/* enum: Priority flow control (eftest builds only). */
-#define MC_CMD_FCNTL_QBB 0x4
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto negotiate flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control. This is only supported on KSB. */
+/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
-#define MC_CMD_FCNTL_GENERATE 0x5
+/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
@@ -6086,9 +6595,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6155,9 +6664,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6188,19 +6697,9 @@
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
-/* Identifies the MAC to update by the specifying the end of a logical MAE
- * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
- * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
- * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
- * circumstances. 1. Some will always work (e.g. a VF can always address its
- * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
- * meaningful and will always fail with EINVAL (e.g. attempting to address the
- * VNIC end of a link to a physical port), 3. Some are meaningful but require
- * the MCDI client to have the required permission and fail with EPERM
- * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
- * and 4. Some could be implementation-specific and fail with ENOTSUP if not
- * available (no examples exist right now). See SF-123581-TC section 4.3 for
- * more details.
+/* Target port to set mac state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
*/
#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
@@ -6212,6 +6711,7 @@
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
@@ -6405,6 +6905,98 @@
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+/* MC_CMD_MAC_STATS_V2_IN msgrequest */
+#define MC_CMD_MAC_STATS_V2_IN_LEN 28
+/* ??? */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_LEN 4
+/* Target port to request statistics for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -6421,6 +7013,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+/* enum property: index */
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
@@ -6583,6 +7176,7 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum property: index */
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
@@ -6622,6 +7216,7 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum property: index */
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
@@ -6702,6 +7297,7 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum property: index */
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
/* enum: RXDP counter: Number of packets truncated because scattering was
@@ -6723,112 +7319,35 @@
/* Other enum values, see field(s): */
/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
-
-/***********************************/
-/* MC_CMD_SRIOV
- * to be documented
- */
-#define MC_CMD_SRIOV 0x30
-
-/* MC_CMD_SRIOV_IN msgrequest */
-#define MC_CMD_SRIOV_IN_LEN 12
-#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
-#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
-#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
-#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
-#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
-#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
-
-/* MC_CMD_SRIOV_OUT msgresponse */
-#define MC_CMD_SRIOV_OUT_LEN 8
-#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
-#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
-
-/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
-/* this is only used for the first record */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MEMCPY
- * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
- * embedded directly in the command.
- *
- * A common pattern is for a client to use generation counts to signal a dma
- * update of a datastructure. To facilitate this, this MCDI operation can
- * contain multiple requests which are executed in strict order. Requests take
- * the form of duplicating the entire MCDI request continuously (including the
- * requests record, which is ignored in all but the first structure)
- *
- * The source data can either come from a DMA from the host, or it can be
- * embedded within the request directly, thereby eliminating a DMA read. To
- * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
- * ADDR_LO=offset, and inserts the data at %offset from the start of the
- * payload. It's the callers responsibility to ensure that the embedded data
- * doesn't overlap the records.
- *
- * Returns: 0, EINVAL (invalid RID)
- */
-#define MC_CMD_MEMCPY 0x31
-
-/* MC_CMD_MEMCPY_IN msgrequest */
-#define MC_CMD_MEMCPY_IN_LENMIN 32
-#define MC_CMD_MEMCPY_IN_LENMAX 224
-#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
-#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
-#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
-/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
-#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
-#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
-#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
-
-/* MC_CMD_MEMCPY_OUT msgresponse */
-#define MC_CMD_MEMCPY_OUT_LEN 0
+/* MC_CMD_MAC_STATS_V5_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V5_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V5*64))>>3)
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V5
+/* enum property: index */
+/* enum: Start of V5 stats buffer space */
+#define MC_CMD_MAC_V5_DMABUF_START 0x7c
+/* enum: Link toggle counter: Number of times the link has toggled between
+ * up/down and down/up
+ */
+#define MC_CMD_MAC_LINK_TOGGLES 0x7c
+/* enum: This includes the space at offset 125 which is the final
+ * GENERATION_END in a MAC_STATS_V5 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V5 0x7e
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA/STATISTICS */
/***********************************/
@@ -6984,6 +7503,7 @@
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+/* enum property: bitmask */
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -6992,23 +7512,6 @@
/***********************************/
-/* MC_CMD_SET_MCAST_HASH
- * Set the MCAST hash value without otherwise reconfiguring the MAC
- */
-#define MC_CMD_SET_MCAST_HASH 0x35
-
-/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
-#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
-
-/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
-#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_NVRAM_TYPES
* Return bitfield indicating available types of virtual NVRAM partitions.
* Locks required: none. Returns: 0
@@ -7026,6 +7529,7 @@
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum property: bitshift */
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -7152,6 +7656,12 @@
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_LBN 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_LBN 9
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
@@ -7499,6 +8009,128 @@
#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
/* enum: The update operation is in-progress. */
#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+/* enum: The update was an invalid user configuration file. */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_LEN 88
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 */
+/* enum: Verify succeeded without any errors. */
+/* MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 */
+/* enum: CMS format verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 */
+/* enum: Invalid CMS format in image metadata. */
+/* MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 */
+/* enum: Message digest verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 */
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 */
+/* enum: Signature verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 */
+/* enum: There are no valid signatures in the image. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 */
+/* enum: Trusted approvers verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 */
+/* enum: The Trusted approver's list is empty. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 */
+/* enum: Signature chain verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa */
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb */
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc */
+/* enum: The image has a lower security level than the current firmware. */
+/* MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd */
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe */
+/* enum: Internal-error. The bundle header is invalid. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf */
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 */
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 */
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 */
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 */
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 */
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 */
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 */
+/* enum: Internal-error. Component hash calculation failed. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 */
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 */
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 */
+/* enum: The update operation is in-progress. */
+/* MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a */
+/* enum: The update was an invalid user configuration file. */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b */
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c */
+/* If the update was a user configuration, what action(s) the user must take to
+ * apply the new configuration.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_LEN 4
+/* enum: No action required. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_NONE 0x0
+/* enum: The MC firmware must be rebooted (eg with MC_CMD_REBOOT). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_REBOOT 0x1
+/* enum: The host must be rebooted. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_REBOOT 0x2
+/* enum: The firmware and host must be rebooted (in either order). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_AND_HOST_REBOOT 0x3
+/* enum: The host must be fully powered off. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_POWERCYCLE 0x4
+/* If the update failed with MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG, a null-
+ * terminated US-ASCII string suitable for showing to the user.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_LEN 80
/***********************************/
@@ -7522,7 +8154,7 @@
#define MC_CMD_REBOOT 0x3d
#undef MC_CMD_0x3d_PRIVILEGE_CTG
-#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
@@ -7535,65 +8167,6 @@
/***********************************/
-/* MC_CMD_SCHEDINFO
- * Request scheduler info. Locks required: NONE. Returns: An array of
- * (timeslice,maximum overrun), one for each thread, in ascending order of
- * thread address.
- */
-#define MC_CMD_SCHEDINFO 0x3e
-#undef MC_CMD_0x3e_PRIVILEGE_CTG
-
-#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SCHEDINFO_IN msgrequest */
-#define MC_CMD_SCHEDINFO_IN_LEN 0
-
-/* MC_CMD_SCHEDINFO_OUT msgresponse */
-#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
-#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
-#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
-#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
-#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_REBOOT_MODE
- * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
- * mode to the specified value. Returns the old mode.
- */
-#define MC_CMD_REBOOT_MODE 0x3f
-#undef MC_CMD_0x3f_PRIVILEGE_CTG
-
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_REBOOT_MODE_IN msgrequest */
-#define MC_CMD_REBOOT_MODE_IN_LEN 4
-#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
-/* enum: Normal. */
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0
-/* enum: Power-on Reset. */
-#define MC_CMD_REBOOT_MODE_POR 0x2
-/* enum: Snapper. */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
-/* enum: snapper fake POR */
-#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
-#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
-#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
-
-/* MC_CMD_REBOOT_MODE_OUT msgresponse */
-#define MC_CMD_REBOOT_MODE_OUT_LEN 4
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
-
-
-/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
*
@@ -8061,6 +8634,54 @@
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+/* MC_CMD_GET_PHY_STATE_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
@@ -8072,22 +8693,6 @@
/***********************************/
-/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
- * disable 802.Qbb for a given priority.
- */
-#define MC_CMD_SETUP_8021QBB 0x44
-
-/* MC_CMD_SETUP_8021QBB_IN msgrequest */
-#define MC_CMD_SETUP_8021QBB_IN_LEN 32
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
-
-/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
-#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WOL_FILTER_GET
* Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
@@ -8106,133 +8711,6 @@
/***********************************/
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state. Locks required: None.
- * Returns: 0, ENOSYS
- */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
-#undef MC_CMD_0x46_PRIVILEGE_CTG
-
-#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state. Locks required:
- * None. Returns: 0, ENOSYS
- */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
-#undef MC_CMD_0x47_PRIVILEGE_CTG
-
-#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset. Locks required: None. Returns: 0.
- */
-#define MC_CMD_MAC_RESET_RESTORE 0x48
-
-/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
-#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
-
-/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
-#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TESTASSERT
- * Deliberately trigger an assert-detonation in the firmware for testing
- * purposes (i.e. to allow tests that the driver copes gracefully). Locks
- * required: None Returns: 0
- */
-#define MC_CMD_TESTASSERT 0x49
-#undef MC_CMD_0x49_PRIVILEGE_CTG
-
-#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_TESTASSERT_IN msgrequest */
-#define MC_CMD_TESTASSERT_IN_LEN 0
-
-/* MC_CMD_TESTASSERT_OUT msgresponse */
-#define MC_CMD_TESTASSERT_OUT_LEN 0
-
-/* MC_CMD_TESTASSERT_V2_IN msgrequest */
-#define MC_CMD_TESTASSERT_V2_IN_LEN 4
-/* How to provoke the assertion */
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
-/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
- * you're testing firmware, this is what you want.
- */
-#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
-/* enum: Assert using assert(0); */
-#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
-/* enum: Deliberately trigger a watchdog */
-#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
-/* enum: Deliberately trigger a trap by loading from an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
-/* enum: Deliberately trigger a trap by storing to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
-/* enum: Jump to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
-
-/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
-#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WORKAROUND
* Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
* understand the given workaround number - which should not be treated as a
@@ -8324,6 +8802,62 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_LEN 12
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_WIDTH 16
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 7
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 52
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 48
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 6
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
@@ -8348,7 +8882,7 @@
#define MC_CMD_NVRAM_TEST 0x4c
#undef MC_CMD_0x4c_PRIVILEGE_CTG
-#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
@@ -8370,103 +8904,6 @@
/***********************************/
-/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
- * I2C I/O expander bits are always read; if equaliser parameters are supplied,
- * they are configured first. Locks required: None. Return code: 0, EINVAL.
- */
-#define MC_CMD_MRSFP_TWEAK 0x4d
-
-/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
-/* 0-6 low->high de-emph. */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
-/* 0-8 0-8 low->high boost */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
-
-/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
-
-/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
-#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
-/* input bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
-/* output bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
-/* direction */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
-/* enum: Out. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
-/* enum: In. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
-
-
-/***********************************/
-/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
- * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
- * of range.
- */
-#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#undef MC_CMD_0x4e_PRIVILEGE_CTG
-
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
-
-/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
-#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RESOURCE_LIMITS
- */
-#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
-
-/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
-#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
-
-/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
-
-
-/***********************************/
/* MC_CMD_NVRAM_PARTITIONS
* Reads the list of available virtual NVRAM partition types. Locks required:
* none. Returns: 0, EINVAL (bad type).
@@ -8582,806 +9019,6 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
-
-/***********************************/
-/* MC_CMD_CLP
- * Perform a CLP related operation, see SF-110495-PS for details of CLP
- * processing. This command has been extended to accomodate the requirements of
- * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
- * SF-120509-TC and SF-117282-PS.
- */
-#define MC_CMD_CLP 0x56
-#undef MC_CMD_0x56_PRIVILEGE_CTG
-
-#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_CLP_IN msgrequest */
-#define MC_CMD_CLP_IN_LEN 4
-/* Sub operation */
-#define MC_CMD_CLP_IN_OP_OFST 0
-#define MC_CMD_CLP_IN_OP_LEN 4
-/* enum: Return to factory default settings */
-#define MC_CMD_CLP_OP_DEFAULT 0x1
-/* enum: Set MAC address */
-#define MC_CMD_CLP_OP_SET_MAC 0x2
-/* enum: Get MAC address */
-#define MC_CMD_CLP_OP_GET_MAC 0x3
-/* enum: Set UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_SET_BOOT 0x4
-/* enum: Get UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_GET_BOOT 0x5
-
-/* MC_CMD_CLP_OUT msgresponse */
-#define MC_CMD_CLP_OUT_LEN 0
-
-/* MC_CMD_CLP_IN_DEFAULT msgrequest */
-#define MC_CMD_CLP_IN_DEFAULT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
-#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_LEN 12
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
-
-/* MC_CMD_CLP_IN_GET_MAC msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
-
-/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
-/* MAC address assigned to port */
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* Boot flag */
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
-
-/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
-
-/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
-/* Boot flag */
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
-
-
-/***********************************/
-/* MC_CMD_MUM
- * Perform a MUM operation
- */
-#define MC_CMD_MUM 0x57
-#undef MC_CMD_0x57_PRIVILEGE_CTG
-
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_MUM_IN msgrequest */
-#define MC_CMD_MUM_IN_LEN 4
-#define MC_CMD_MUM_IN_OP_HDR_OFST 0
-#define MC_CMD_MUM_IN_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_OP_OFST 0
-#define MC_CMD_MUM_IN_OP_LBN 0
-#define MC_CMD_MUM_IN_OP_WIDTH 8
-/* enum: NULL MCDI command to MUM */
-#define MC_CMD_MUM_OP_NULL 0x1
-/* enum: Get MUM version */
-#define MC_CMD_MUM_OP_GET_VERSION 0x2
-/* enum: Issue raw I2C command to MUM */
-#define MC_CMD_MUM_OP_RAW_CMD 0x3
-/* enum: Read from registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_READ 0x4
-/* enum: Write to registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_WRITE 0x5
-/* enum: Control UART logging. */
-#define MC_CMD_MUM_OP_LOG 0x6
-/* enum: Operations on MUM GPIO lines */
-#define MC_CMD_MUM_OP_GPIO 0x7
-/* enum: Get sensor readings from MUM */
-#define MC_CMD_MUM_OP_READ_SENSORS 0x8
-/* enum: Initiate clock programming on the MUM */
-#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
-/* enum: Initiate FPGA load from flash on the MUM */
-#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
-/* enum: Request sensor reading from MUM ADC resulting from earlier request via
- * MUM ATB
- */
-#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
-/* enum: Send commands relating to the QSFP ports via the MUM for PHY
- * operations
- */
-#define MC_CMD_MUM_OP_QSFP 0xc
-/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
- * level) from MUM
- */
-#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
-
-/* MC_CMD_MUM_IN_NULL msgrequest */
-#define MC_CMD_MUM_IN_NULL_LEN 4
-/* MUM cmd header */
-#define MC_CMD_MUM_IN_CMD_OFST 0
-#define MC_CMD_MUM_IN_CMD_LEN 4
-
-/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
-#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_READ_LEN 16
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to read from registers of */
-#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE 0x1
-/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
-/* 32-bit address to read from */
-#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
-#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
-/* Number of words to read. */
-#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
-#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
-
-/* MC_CMD_MUM_IN_WRITE msgrequest */
-#define MC_CMD_MUM_IN_WRITE_LENMIN 16
-#define MC_CMD_MUM_IN_WRITE_LENMAX 252
-#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
-#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to write to registers of */
-#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-/* MC_CMD_MUM_DEV_HITTITE 0x1 */
-/* 32-bit address to write to */
-#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
-#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
-/* Words to write */
-#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
-#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
-
-/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
-#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MUM I2C cmd code */
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
-/* Number of bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
-/* Number of bytes to read */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
-/* Bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_MUM_IN_LOG msgrequest */
-#define MC_CMD_MUM_IN_LOG_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_LOG_OP_OFST 4
-#define MC_CMD_MUM_IN_LOG_OP_LEN 4
-#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
-
-/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
-#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
-/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
-/* Enable/disable debug output to UART */
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO msgrequest */
-#define MC_CMD_MUM_IN_GPIO_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
-
-/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
-#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
-
-/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Bit-mask of clocks to be programmed */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
-#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
-/* Control flags for clock programming */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
-
-/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
-#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Enable/Disable FPGA config from flash */
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
-#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_QSFP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
-#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
-#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
-#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
-#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_OUT msgresponse */
-#define MC_CMD_MUM_OUT_LEN 0
-
-/* MC_CMD_MUM_OUT_NULL msgresponse */
-#define MC_CMD_MUM_OUT_NULL_LEN 0
-
-/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
-#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
-
-/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
-/* returned data */
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
-
-/* MC_CMD_MUM_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_READ_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
-#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
-#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
-
-/* MC_CMD_MUM_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG msgresponse */
-#define MC_CMD_MUM_OUT_LOG_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
-#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
-/* The first 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
-/* The first 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
-
-/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
-
-/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
-#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
-
-/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
-
-/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
-/* in bytes */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
-
-/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
-
-/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
-/* Discrete (soldered) DDR resistor strap info */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
-/* Number of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
-/* Array of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
-/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
-/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
-/* enum: Total number of SODIMM banks */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
-/* enum: Values 5-15 are reserved for future usage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
-/* enum: No module present */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
-/* enum: Module present supported and powered on */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
-/* enum: Module present but bad type */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
-/* enum: Module present but incompatible voltage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
-/* enum: Module present but unknown SPD */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
-/* enum: Module present but slot cannot support it */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
-/* enum: Modules may or may not be present, but cannot establish contact by I2C
- */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
-
/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
* should match the equivalent structure in the sensor_query SPHINX service.
*/
@@ -9500,27 +9137,22 @@
* and a generation count for this version of the sensor table. On systems
* advertising the DYNAMIC_SENSORS capability bit, this replaces the
* MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
- * added by the NMC.
- *
- * Sensor handles are persistent for the lifetime of the sensor and are used to
- * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
- * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
- *
- * The generation count is maintained by the MC, is persistent across reboots
- * and will be incremented each time the sensor table is modified. When the
- * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
- * containing the new generation count. The driver should compare this against
- * the current generation count, and if it is different, call
- * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
- *
- * The sensor count is provided to allow a future path to supporting more than
+ * added by the NMC. Sensor handles are persistent for the lifetime of the
+ * sensor and are used to identify sensors in
+ * MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. The generation count is maintained by the
+ * MC, is persistent across reboots and will be incremented each time the
+ * sensor table is modified. When the table is modified, a
+ * CODE_DYNAMIC_SENSORS_CHANGE event will be generated containing the new
+ * generation count. The driver should compare this against the current
+ * generation count, and if it is different, call MC_CMD_DYNAMIC_SENSORS_LIST
+ * again to update it's copy of the sensor table. The sensor count is provided
+ * to allow a future path to supporting more than
* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
* the maximum number that will fit in a single response. As this is a fairly
* large number (253) it is not anticipated that this will be needed in the
- * near future, so can currently be ignored.
- *
- * On Riverhead this command is implemented as a wrapper for `list` in the
- * sensor_query SPHINX service.
+ * near future, so can currently be ignored. On Riverhead this command is
+ * implemented as a wrapper for `list` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
#undef MC_CMD_0x66_PRIVILEGE_CTG
@@ -9557,15 +9189,13 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
* Get descriptions for a set of sensors, specified as an array of sensor
- * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for
- * `get_descriptions` in the sensor_query SPHINX service.
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not
+ * correspond to a sensor currently managed by the MC will be dropped from
+ * the response. This may happen when a sensor table update is in progress, and
+ * effectively means the set of usable sensors is the intersection between the
+ * sets of sensors known to the driver and the MC. On Riverhead this command is
+ * implemented as a wrapper for `get_descriptions` in the sensor_query SPHINX
+ * service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
#undef MC_CMD_0x67_PRIVILEGE_CTG
@@ -9602,19 +9232,15 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
* Read the state and value for a set of sensors, specified as an array of
- * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
- *
- * In the case of a broken sensor, then the state of the response's
- * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
- * provided should be treated as erroneous.
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for `get_readings`
- * in the sensor_query SPHINX service.
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. In the case of a
+ * broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE
+ * entry will be set to BROKEN, and any value provided should be treated as
+ * erroneous. Any handles which do not correspond to a sensor currently managed
+ * by the MC will be dropped from the response. This may happen when a
+ * sensor table update is in progress, and effectively means the set of usable
+ * sensors is the intersection between the sets of sensors known to the driver
+ * and the MC. On Riverhead this command is implemented as a wrapper for
+ * `get_readings` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
#undef MC_CMD_0x68_PRIVILEGE_CTG
@@ -9647,45 +9273,1286 @@
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+/* MC_CMD_MAC_FLAGS structuredef */
+#define MC_CMD_MAC_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_MAC_FLAGS bitmask.
+ */
+#define MC_CMD_MAC_FLAGS_MASK_OFST 0
+#define MC_CMD_MAC_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Include the FCS in the packet data delivered to the host. Ignored if
+ * RX_INCLUDE_FCS not set in capabilities.
+ */
+#define MC_CMD_MAC_FLAGS_FLAG_INCLUDE_FCS 0x0
+#define MC_CMD_MAC_FLAGS_MASK_LBN 0
+#define MC_CMD_MAC_FLAGS_MASK_WIDTH 32
+
+/* MC_CMD_TRANSMISSION_MODE structuredef */
+#define MC_CMD_TRANSMISSION_MODE_LEN 4
+#define MC_CMD_TRANSMISSION_MODE_MASK_OFST 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_LEN 4
+/* enum property: value */
+#define MC_CMD_TRANSMISSION_MODE_PROMSC_MODE 0x0 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_UNCST_MODE 0x1 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_BRDCST_MODE 0x2 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_MASK_LBN 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_WIDTH 32
+
+/* MC_CMD_MAC_CONFIG_OPTIONS structuredef */
+#define MC_CMD_MAC_CONFIG_OPTIONS_LEN 4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_OFST 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LEN 4
+/* enum property: bitmask */
+/* enum: Configure the MAC address. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_ADDR 0x0
+/* enum: Configure the maximum frame length. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_MAX_FRAME_LEN 0x1
+/* enum: Configure flow control. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_FCNTL 0x2
+/* enum: Configure the transmission mode. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_TRANSMISSION_MODE 0x3
+/* enum: Configure FCS. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_INCLUDE_FCS 0x4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LBN 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAC_CTRL
+ * Set MAC configuration. Return code: 0, EINVAL, ENOTSUP
+ */
+#define MC_CMD_MAC_CTRL 0x1df
+#undef MC_CMD_0x1df_PRIVILEGE_CTG
+
+#define MC_CMD_0x1df_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_CTRL_IN msgrequest */
+#define MC_CMD_MAC_CTRL_IN_LEN 32
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+
+/* MC_CMD_MAC_CTRL_IN_V2 msgrequest: Updated MAC_CTRL with QBB mask */
+#define MC_CMD_MAC_CTRL_IN_V2_LEN 33
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* Priority-based flow control mask (QBB). PRIO7 corresponds to the highest
+ * priority, and PRIO0 to the lowest. This field is only used when CFG_FCNTL is
+ * set and FCNTL is QBB
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_OFST 32
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_LEN 1
+/* enum property: bitmask */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO0 0x0 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO1 0x1 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO2 0x2 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO3 0x3 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO4 0x4 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO5 0x5 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO6 0x6 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO7 0x7 /* enum */
+
+/* MC_CMD_MAC_CTRL_OUT msgresponse */
+#define MC_CMD_MAC_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_STATE
+ * Read the MAC state. Return code: 0, ETIME.
+ */
+#define MC_CMD_MAC_STATE 0x1e0
+#undef MC_CMD_0x1e0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e0_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_STATE_IN msgrequest */
+#define MC_CMD_MAC_STATE_IN_LEN 4
+/* Handle for selected network port. */
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_MAC_STATE_OUT msgresponse */
+#define MC_CMD_MAC_STATE_OUT_LEN 32
+/* The configured maximum frame length of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_OFST 0
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_LEN 4
+/* This returns the negotiated flow control value. */
+#define MC_CMD_MAC_STATE_OUT_FCNTL_OFST 4
+#define MC_CMD_MAC_STATE_OUT_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_STATE_OUT_ADDR_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LEN 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_WIDTH 32
+/* Flags indicating MAC faults. */
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_OFST 16
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* enum: Indicates a local MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_LOCAL 0x0
+/* enum: Indicates a remote MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_REMOTE 0x1
+/* enum: Indicates a pending reconfiguration of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_PENDING_RECONFIG 0x2
+/* The flags that were used to configure the MAC. This is a copy of the FLAGS
+ * field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_FLAGS_OFST 20
+#define MC_CMD_MAC_STATE_OUT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* The transmission mode that was used to configure the MAC. This is a copy of
+ * the TRANSMISSION_MODE field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* The control flags that were used to configure the MAC. This is a copy of the
+ * CONTROL field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_OFST 28
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+
+
+/***********************************/
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE
+ * Obtain a handle that can be operated on to configure and query the status of
+ * the link. ENOENT is returned when no port is assigned to the client. Return
+ * code: 0, ENOENT
+ */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE 0x1e2
+#undef MC_CMD_0x1e2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN_LEN 0
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_LEN 4
+/* Handle for assigned port. */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_LEN 4
+
+/* MC_CMD_STAT_ID structuredef */
+#define MC_CMD_STAT_ID_LEN 4
+#define MC_CMD_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_ID_SOURCE_ID_LEN 2
+/* enum property: index */
+/* enum: Internal markers (generation start and end markers) */
+#define MC_CMD_STAT_ID_MARKER 0x1
+/* enum: Network port MAC statistics. */
+#define MC_CMD_STAT_ID_MAC 0x2
+/* enum: Network port PHY statistics. */
+#define MC_CMD_STAT_ID_PHY 0x3
+#define MC_CMD_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: This value is used to mark the start of a generation of statistics for
+ * DMA synchronization. It is incremented whenever a new set of statistics is
+ * transferred. Always the first entry in the DMA buffer.
+ */
+#define MC_CMD_STAT_ID_GENERATION_START 0x1
+/* enum: This value is used to mark the end of a generation of statistics for
+ * DMA synchronizaion. Always the last entry in the DMA buffer and set to the
+ * same value as GENERATION_START. The host driver must compare the
+ * GENERATION_START and GENERATION_END values to verify that the DMA buffer is
+ * consistent upon copying the the DMA buffer. If they do not match, it means
+ * that new DMA transfer has started while the host driver was copying the DMA
+ * buffer. In this case, the host driver must repeat the copy operation.
+ */
+#define MC_CMD_STAT_ID_GENERATION_END 0x2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Total number of packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_PKTS 0x1
+/* enum: Pause frames transmitted. */
+#define MC_CMD_STAT_ID_TX_PAUSE_PKTS 0x2
+/* enum: Control frames transmitted. */
+#define MC_CMD_STAT_ID_TX_CONTROL_PKTS 0x3
+/* enum: Unicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_UNICAST_PKTS 0x4
+/* enum: Multicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_MULTICAST_PKTS 0x5
+/* enum: Broadcast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BROADCAST_PKTS 0x6
+/* enum: Bytes transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BYTES 0x7
+/* enum: Bytes transmitted with bad CRC. */
+#define MC_CMD_STAT_ID_TX_BAD_BYTES 0x8
+/* enum: Bytes transmitted with good CRC. */
+#define MC_CMD_STAT_ID_TX_GOOD_BYTES 0x9
+/* enum: Packets transmitted with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_TX_LT64_PKTS 0xa
+/* enum: Packets transmitted with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_TX_64_PKTS 0xb
+/* enum: Packets transmitted with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_TX_65_TO_127_PKTS 0xc
+/* enum: Packets transmitted with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_TX_128_TO_255_PKTS 0xd
+/* enum: Packets transmitted with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_TX_256_TO_511_PKTS 0xe
+/* enum: Packets transmitted with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_TX_512_TO_1023_PKTS 0xf
+/* enum: Packets transmitted with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_TX_1024_TO_15XX_PKTS 0x10
+/* enum: Packets transmitted with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_15XX_TO_JUMBO_PKTS 0x11
+/* enum: Packets transmitted with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_GTJUMBO_PKTS 0x12
+/* enum: Packets transmitted with bad FCS. */
+#define MC_CMD_STAT_ID_TX_BAD_FCS_PKTS 0x13
+/* enum: Packets transmitted with good FCS. */
+#define MC_CMD_STAT_ID_TX_GOOD_FCS_PKTS 0x14
+/* enum: Packets received. */
+#define MC_CMD_STAT_ID_RX_PKTS 0x15
+/* enum: Pause frames received. */
+#define MC_CMD_STAT_ID_RX_PAUSE_PKTS 0x16
+/* enum: Total number of good packets received. */
+#define MC_CMD_STAT_ID_RX_GOOD_PKTS 0x17
+/* enum: Total number of BAD packets received. */
+#define MC_CMD_STAT_ID_RX_BAD_PKTS 0x18
+/* enum: Total number of control frames received. */
+#define MC_CMD_STAT_ID_RX_CONTROL_PKTS 0x19
+/* enum: Total number of unicast packets received. */
+#define MC_CMD_STAT_ID_RX_UNICAST_PKTS 0x1a
+/* enum: Total number of multicast packets received. */
+#define MC_CMD_STAT_ID_RX_MULTICAST_PKTS 0x1b
+/* enum: Total number of broadcast packets received. */
+#define MC_CMD_STAT_ID_RX_BROADCAST_PKTS 0x1c
+/* enum: Total number of bytes received. */
+#define MC_CMD_STAT_ID_RX_BYTES 0x1d
+/* enum: Total number of bytes received with bad CRC. */
+#define MC_CMD_STAT_ID_RX_BAD_BYTES 0x1e
+/* enum: Total number of bytes received with GOOD CRC. */
+#define MC_CMD_STAT_ID_RX_GOOD_BYTES 0x1f
+/* enum: Packets received with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_RX_64_PKTS 0x20
+/* enum: Packets received with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_RX_65_TO_127_PKTS 0x21
+/* enum: Packets received with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_RX_128_TO_255_PKTS 0x22
+/* enum: Packets received with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_RX_256_TO_511_PKTS 0x23
+/* enum: Packets received with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_RX_512_TO_1023_PKTS 0x24
+/* enum: Packets received with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_RX_1024_TO_15XX_PKTS 0x25
+/* enum: Packets received with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_15XX_TO_JUMBO_PKTS 0x26
+/* enum: Packets received with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_GTJUMBO_PKTS 0x27
+/* enum: Packets received with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_RX_UNDERSIZE_PKTS 0x28
+/* enum: Packets received with bad FCS. */
+#define MC_CMD_STAT_ID_RX_BAD_FCS_PKTS 0x29
+/* enum: Packets received with GOOD FCS. */
+#define MC_CMD_STAT_ID_RX_GOOD_FCS_PKTS 0x2a
+/* enum: Packets received with overflow. */
+#define MC_CMD_STAT_ID_RX_OVERFLOW_PKTS 0x2b
+/* enum: Packets received with symbol error. */
+#define MC_CMD_STAT_ID_RX_SYMBOL_ERROR_PKTS 0x2c
+/* enum: Packets received with alignment error. */
+#define MC_CMD_STAT_ID_RX_ALIGN_ERROR_PKTS 0x2d
+/* enum: Packets received with length error. */
+#define MC_CMD_STAT_ID_RX_LENGTH_ERROR_PKTS 0x2e
+/* enum: Packets received with internal error. */
+#define MC_CMD_STAT_ID_RX_INTERNAL_ERROR_PKTS 0x2f
+/* enum: Packets received with jabber. These packets are larger than the
+ * allowed maximum receive unit (MRU). This indicates that a packet either has
+ * a bad CRC or has an RX error.
+ */
+#define MC_CMD_STAT_ID_RX_JABBER_PKTS 0x30
+/* enum: Packets dropped due to having no descriptor. This is a datapath stat
+ */
+#define MC_CMD_STAT_ID_RX_NODESC_DROPS 0x31
+/* enum: Packets received with lanes 0 and 1 character error. */
+#define MC_CMD_STAT_ID_RX_LANES01_CHAR_ERR 0x32
+/* enum: Packets received with lanes 2 and 3 character error. */
+#define MC_CMD_STAT_ID_RX_LANES23_CHAR_ERR 0x33
+/* enum: Packets received with lanes 0 and 1 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES01_DISP_ERR 0x34
+/* enum: Packets received with lanes 2 and 3 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES23_DISP_ERR 0x35
+/* enum: Packets received with match fault. */
+#define MC_CMD_STAT_ID_RX_MATCH_FAULT 0x36
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_WIDTH 16
+/* Include FEC stats. */
+#define MC_CMD_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_UNCORRECTED_ERRORS 0x1
+/* enum: Number of corrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_ERRORS 0x2
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE0 0x3
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE1 0x4
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE2 0x5
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE3 0x6
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_PHY_STAT_ID_WIDTH 16
+
+/* MC_CMD_STAT_DESC structuredef: Structure describing the layout and size of
+ * the stats DMA buffer descriptor.
+ */
+#define MC_CMD_STAT_DESC_LEN 8
+/* Unique identifier of the statistic. Formatted as MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_LEN 4
+#define MC_CMD_STAT_DESC_STAT_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_WIDTH 32
+/* See structuredef: MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_WIDTH 16
+/* Index of the statistic in the DMA buffer. */
+#define MC_CMD_STAT_DESC_STAT_INDEX_OFST 4
+#define MC_CMD_STAT_DESC_STAT_INDEX_LEN 2
+#define MC_CMD_STAT_DESC_STAT_INDEX_LBN 32
+#define MC_CMD_STAT_DESC_STAT_INDEX_WIDTH 16
+/* Reserved for future extension (e.g. flags field) - currently always 0. */
+#define MC_CMD_STAT_DESC_RESERVED_OFST 6
+#define MC_CMD_STAT_DESC_RESERVED_LEN 2
+#define MC_CMD_STAT_DESC_RESERVED_LBN 48
+#define MC_CMD_STAT_DESC_RESERVED_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR
+ * Get a list of descriptors that describe the layout and size of the stats
+ * buffer required for retrieving statistics for a given port. Each entry in
+ * the list is formatted as MC_CMD_STAT_DESC and provides the ID of each stat
+ * and its location and size in the buffer. It also gives the overall minimum
+ * size of the DMA buffer required when DMA mode is used. Note that the first
+ * and last entries in the list are reserved for the generation start
+ * (MC_CMD_MARKER_STAT_GENERATION_START) and end
+ * (MC_CMD_MARKER_STAT_GENERATION_END) markers respectively, to be used for DMA
+ * synchronisation as described in the documentation for the relevant enum
+ * entries. The entries are present in the buffer even if DMA mode is not used.
+ * Provisions are made (but currently unused) for extending the size of the
+ * descriptors, extending the size of the list beyond the maximum MCDI response
+ * size, as well as the dynamic runtime updates of the list. Returns: 0 on
+ * success, ENOENT on non-existent port handle
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR 0x1e3
+#undef MC_CMD_0x1e3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_LEN 8
+/* Handle of port to get MAC statitstics descriptors for. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_LEN 4
+/* Offset of the first entry to return, for cases where not all entries fit in
+ * the MCDI response. Should be set to 0 on initial request, and on subsequent
+ * requests updated by the number of entries already returned, as long as the
+ * MORE_ENTRIES flag is set.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_NUM(len) (((len)-20)/8)
+/* Generation number of the stats buffer. This is incremented each time the
+ * buffer is updated, and is used to verify the consistency of the buffer
+ * contents. Reserved for future extension (dynamic list updates). Currently
+ * always set to 0.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_LEN 4
+/* Minimum size of the DMA buffer required to retrieve all statistics for the
+ * port. This is the sum of the sizes of all the statistics, plus the size of
+ * the generation markers. Minimum buffer size in bytes required to fit all
+ * statistics. Drivers will typically round up this value to the granularity of
+ * the host DMA allocation units.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_LBN 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_WIDTH 1
+/* Size of the individual descriptor entry in the list. Determines the entry
+ * stride in the list. Currently always set to size of MC_CMD_STAT_DESC, larger
+ * values can be used in the future for extending the descriptor, by appending
+ * new data to the end of the existing structure.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_OFST 12
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_LEN 4
+/* Number of entries returned in the descriptor list. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_OFST 16
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_LEN 4
+/* List of descriptors. Each entry is formatted as MC_CMD_STAT_DESC and
+ * provides the ID of each stat and its location and size in the buffer. The
+ * first and last entries are reserved for the generation start and end markers
+ * respectively.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LEN 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LBN 160
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_OFST 24
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LBN 192
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM 29
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_MAC_STATISTICS 0x1e4
+#undef MC_CMD_0x1e4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_MAC_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_WIDTH 16
+/* This is the address of the DMA buffer to use for transfer of the statistics.
+ * Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* This is the length of the DMA buffer to use for the transfer of the
+ * statistics. The buffer should be at least DMA_BUFFER_SIZE long, as returned
+ * by MC_CMD_MAC_STATISTICS_DESCRIPTOR. If the supplied buffer is too small,
+ * the command will fail with EINVAL. Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_OUT_LENMIN 5
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_OFST 0
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_LEN 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_OFST 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_LEN 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM 248
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* NET_PORT_HANDLE_DESC structuredef: Network port descriptor containing a port
+ * handle and attributes used, for example, in MC_CMD_ENUM_PORTS.
+ */
+#define NET_PORT_HANDLE_DESC_LEN 53
+/* The handle to identify the port */
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_OFST 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_WIDTH 32
+/* Includes the type of port e.g. physical, virtual or MAE MPORT and other
+ * properties relevant to the port.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LEN 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_OFST 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LBN 64
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_WIDTH 3
+#define NET_PORT_HANDLE_DESC_PHYSICAL 0x0 /* enum */
+#define NET_PORT_HANDLE_DESC_VIRTUAL 0x1 /* enum */
+#define NET_PORT_HANDLE_DESC_MPORT 0x2 /* enum */
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_OFST 4
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_LBN 8
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_WIDTH 1
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_WIDTH 64
+/* The dynamic change that led to the port enumeration */
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_OFST 12
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LEN 1
+/* enum: Indicates that the ENTRY_SRC field has not been initialized. */
+#define NET_PORT_HANDLE_DESC_UNKNOWN 0x0
+/* enum: The port was enumerated at start of day. */
+#define NET_PORT_HANDLE_DESC_PRESENT 0x1
+/* enum: The port was dynamically added. */
+#define NET_PORT_HANDLE_DESC_ADDED 0x2
+/* enum: The port was dynamically deleted. */
+#define NET_PORT_HANDLE_DESC_DELETED 0x3
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LBN 96
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_WIDTH 8
+/* This is an opaque 40 byte label exposed to users as a unique identifier of
+ * the port. It is represented as a zero-terminated ASCII string and assigned
+ * by the port administrator which is typically either the firmware for a
+ * physical port or the host software responsible for creating the virtual
+ * port. The label is conveyed to the driver after assignment, which, unlike
+ * the port administrator, does not need to know how to interpret the label.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_OFST 13
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LEN 40
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LBN 104
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_WIDTH 320
+
+
+/***********************************/
+/* MC_CMD_ENUM_PORTS
+ * This command returns handles for all ports present in the system. The PCIe
+ * function type of each port (either physical or virtual) is also reported.
+ * After a start-of-day port enumeration, firmware keeps track of all available
+ * ports upon creation or deletion and updates the ports if there is a change.
+ * This command is cleared after a control interface reset (e.g. FLR,
+ * ENTITY_RESET), in which case it must be called again to reenumerate the
+ * ports. The command is also clear-on-read and repeated calls will drain the
+ * buffer.
+ */
+#define MC_CMD_ENUM_PORTS 0x1e5
+#undef MC_CMD_0x1e5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e5_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ENUM_PORTS_IN msgrequest */
+#define MC_CMD_ENUM_PORTS_IN_LEN 0
+
+/* MC_CMD_ENUM_PORTS_OUT msgresponse */
+#define MC_CMD_ENUM_PORTS_OUT_LENMIN 12
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX 252
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ENUM_PORTS_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_MORE_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_LBN 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_WIDTH 1
+/* The number of NET_PORT_HANDLE_DESC structures in PORT_HANDLES. */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_OFST 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_OFST 8
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_LEN 4
+/* Array of NET_PORT_HANDLE_DESC structures. Callers must use must use the
+ * SIZEOF_NET_PORT_HANDLE_DESC field field as the array stride between entries.
+ * This may also allow for tail padding for alignment. Fields beyond
+ * SIZEOF_NET_PORT_HANDLE_DESC are not present.
+ */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MINNUM 0
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM 240
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM_MCDI2 1008
+/* See structuredef: NET_PORT_HANDLE_DESC */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LEN 8
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_OFST 20
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LBN 160
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_WIDTH 3
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_LBN 136
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_WIDTH 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_OFST 24
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_OFST 25
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_LEN 40
+
+
+/***********************************/
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES
+ * Read properties of the transceiver associated with the port. Can be either
+ * for a fixed onboard transceiver or an inserted module. The returned data is
+ * interpreted from the transceiver hardware and may be fixed up by the
+ * firmware. Use MC_CMD_GET_MODULE_DATA to get raw undecoded data.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES 0x1e6
+#undef MC_CMD_0x1e6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e6_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_LEN 4
+/* Handle to port to get transceiver properties from. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LEN 89
+/* Supported technology abilities. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Reserved for future expansion to accommodate future Ethernet technology
+ * expansion.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_OFST 16
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_LEN 16
+/* Preferred FEC modes. This is a function of the cable type and length. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_OFST 32
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* SFF-8042 code reported by the module. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_OFST 36
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_LEN 2
+/* Medium. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_OFST 38
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_LEN 1
+/* enum property: value */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_COPPER 0x1 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_OPTICAL 0x2 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BACKPLANE 0x3 /* enum */
+/* Identifies the tech */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_OFST 39
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_LEN 1
+/* enum property: value */
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 */
+/* enum: Ethernet over twisted-pair copper cables for distances up to 100
+ * meters.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASET 0x1
+/* enum: Ethernet over twin-axial, balanced copper cable. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CR 0x2
+/* enum: Ethernet over backplane for connections on the same board. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KX 0x3
+/* enum: Ethernet over a single backplane lane for connections between
+ * different boards.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KR 0x4
+/* enum: Ethernet over copper backplane. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KP 0x5
+/* enum: Ethernet over fiber optic. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASEX 0x6
+/* enum: Short range ethernet over multimode fiber optic (See IEEE 802.3 Clause
+ * 49 and 52).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SR 0x7
+/* enum: Long range, extended range or far reach ethernet used with single mode
+ * fiber optics.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LR_ER_FR 0x8
+/* enum: Long reach multimode ethernet over multimode optical fiber. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LRM 0x9
+/* enum: Very short reach PAM4 ethernet over multimode optical fiber (see IEEE
+ * 802.3db).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VR 0xa
+/* enum: BASE-R encoding and PAM4 over single-mode fiber with reach up to at
+ * least 500 meters (803.2 Clause 121 and 124)
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_DR 0xb
+/* String of the vendor name as intepreted by NMC firmware. NMC firmware
+ * applies workarounds for known buggy transceivers. The vendor name is
+ * presented as 16 bytes of ASCII characters padded with spaces. It can also be
+ * represented as 16 bytes of zeros if the field is unspecified for the
+ * connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_OFST 40
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_NUM 16
+/* The vendor part number as intepreted by NMC firmware. The field is presented
+ * as 16 bytes of ASCII chars padded with spaces. It can also be 16 bytes of
+ * zeros if the field is unspecified for the connected module.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_OFST 56
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_NUM 16
+/* Serial number of the module presented as 16 bytes of ASCII characters padded
+ * with spaces. It can also be 16 bytes of zeros if the field is unspecified
+ * for the connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_OFST 72
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_NUM 16
+/* This reports the number of module changes detected by the NMC firmware. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 88
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+
+/***********************************/
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES 0x1e7
+#undef MC_CMD_0x1e7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e7_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_IN msgrequest: In this context, the port
+ * consists of the MAC and the PHY, and excludes any modules inserted into the
+ * cage. This information is fixed for a given board but not for a given ASIC.
+ * This command reports properties for the port as it is currently configured,
+ * and not its hardware capabilities, which can be better than the current
+ * configuration.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_LEN 4
+/* Handle to the port to from which to retreive properties */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LEN 36
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_LEN 25
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_OFST 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_OFST 20
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_OFST 24
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_LEN 1
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2 msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LEN 48
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_LEN 25
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_LEN 4
+/* Bitmask of supported loopback modes. This field replaces the
+ * LOOPBACK_MODES_MASK field which is defined under version 1 of this command.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LEN 8
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LBN 320
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_WIDTH 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_OFST 44
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LBN 352
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+
+/***********************************/
+/* MC_CMD_GET_MODULE_DATA
+ * Read media-specific data from the PHY (e.g. SFP/SFP+ module ID information
+ * for SFP+ PHYs). This command returns raw data from the module's EEPROM and
+ * it is not interpreted by the MC. Use MC_CMD_GET_TRANSCEIVER_PROPERTIES to
+ * get interpreted data. Return code: 0, ENOENT
+ */
+#define MC_CMD_GET_MODULE_DATA 0x1e8
+#undef MC_CMD_0x1e8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e8_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_MODULE_DATA_IN msgrequest */
+#define MC_CMD_GET_MODULE_DATA_IN_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_LEN 4
+
+/* MC_CMD_GET_MODULE_DATA_IN_V2 msgrequest: Updated MC_CMD_GET_MODULE_DATA with
+ * 8-bit wide ADDRESSING field. This new field provides a correctly aligned
+ * container for the 7-bit DEVADDR field from V1, now renamed MODULE_ADDR, to
+ * ensure proper alignment.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_LEN 4
+/* Container for 7 bit I2C addresses. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_LBN 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_WIDTH 7
+
+/* MC_CMD_GET_MODULE_DATA_OUT msgresponse */
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMIN 5
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX 252
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_MODULE_DATA_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_OFST 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_LEN 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM 248
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* EVENT_MASK structuredef */
+#define EVENT_MASK_LEN 4
+#define EVENT_MASK_TYPE_OFST 0
+#define EVENT_MASK_TYPE_LEN 4
+/* enum: PORT_LINKCHANGE event is enabled */
+#define EVENT_MASK_PORT_LINKCHANGE 0x0
+/* enum: PORT_MODULECHANGE event is enabled */
+#define EVENT_MASK_PORT_MODULECHANGE 0x1
+#define EVENT_MASK_TYPE_LBN 0
+#define EVENT_MASK_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK 0x1e9
+#undef MC_CMD_0x1e9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e9_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_IN msgrequest: Enable or disable delivery of
+ * specified network port events for a given port identified by PORT_HANDLE. At
+ * start of day, or after any control interface reset (FLR, ENTITY_RESET,
+ * etc.), all event delivery is disabled for all ports associated with the
+ * control interface.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_LEN 8
+/* Handle to port to set event delivery mask. */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+/* Bitmask of events to enable. Event delivery is enabled when corresponding
+ * bit is 1, disabled when 0.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_OFST 4
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_OUT_LEN 0
+
/***********************************/
-/* MC_CMD_EVENT_CTRL
- * Configure which categories of unsolicited events the driver expects to
- * receive (Riverhead).
- */
-#define MC_CMD_EVENT_CTRL 0x69
-#undef MC_CMD_0x69_PRIVILEGE_CTG
-
-#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVENT_CTRL_IN msgrequest */
-#define MC_CMD_EVENT_CTRL_IN_LENMIN 0
-#define MC_CMD_EVENT_CTRL_IN_LENMAX 252
-#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
-#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
-/* Array of event categories for which the driver wishes to receive events. */
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
-/* enum: Driver wishes to receive LINKCHANGE events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
-/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
- */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
-/* enum: Driver wishes to receive receive errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
-/* enum: Driver wishes to receive transmit errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
-/* enum: Driver wishes to receive firmware alerts. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
-/* enum: Driver wishes to receive reboot events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
-
-/* MC_CMD_EVENT_CTRL_OUT msgrequest */
-#define MC_CMD_EVENT_CTRL_OUT_LEN 0
+/* MC_CMD_GET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK 0x1ea
+#undef MC_CMD_0x1ea_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ea_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_IN msgrequest: Get event delivery mask a
+ * given port identified by PORT_HANDLE.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_LEN 4
+/* Handle to port to get event deliver mask for. */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS 0x1eb
+#undef MC_CMD_0x1eb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1eb_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN msgrequest: Get network port events
+ * supported by the platform. Information returned is fixed for a given NIC
+ * platform.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN_LEN 0
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT msgresponse */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_NETPORT_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS 0x1fa
+#undef MC_CMD_0x1fa_PRIVILEGE_CTG
+
+#define MC_CMD_0x1fa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NETPORT_STATISTICS_IN msgrequest */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_LBN 15
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_WIDTH 17
+/* Specifies the physical address of the DMA buffer to use for statistics
+ * transfer. This field must contain a valid address under either of these
+ * conditions: 1. DMA flag is set (immediate DMA requested) 2. Both
+ * PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* Specifies the length of the DMA buffer in bytes for statistics transfer. The
+ * buffer size must be at least DMA_BUFFER_SIZE bytes (as returned by
+ * MC_CMD_MAC_STATISTICS_DESCRIPTOR). Providing an insufficient buffer size
+ * will result in an EINVAL error. This field must contain a valid length under
+ * either of these conditions: 1. DMA flag is set (immediate DMA requested) 2.
+ * Both PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_GET_NETPORT_STATISTICS_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMIN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX 248
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_NUM(len) (((len)-0)/8)
+/* Statistics buffer. Zero-length if DMA mode is used. The statistics buffer is
+ * an array of 8-byte counter values, containing the generation start marker,
+ * stats counters, and generation end marker. The index of each counter in the
+ * array is reported by the MAC_STATISTICS_DESCRIPTOR command. The same layout
+ * is used for the DMA buffer for DMA mode stats.
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LBN 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MINNUM 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM 31
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM_MCDI2 127
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
@@ -9706,44 +10573,6 @@
#define EVB_PORT_ID_PORT_ID_LBN 0
#define EVB_PORT_ID_PORT_ID_WIDTH 32
-/* EVB_VLAN_TAG structuredef */
-#define EVB_VLAN_TAG_LEN 2
-/* The VLAN tag value */
-#define EVB_VLAN_TAG_VLAN_ID_LBN 0
-#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
-#define EVB_VLAN_TAG_MODE_LBN 12
-#define EVB_VLAN_TAG_MODE_WIDTH 4
-/* enum: Insert the VLAN. */
-#define EVB_VLAN_TAG_INSERT 0x0
-/* enum: Replace the VLAN if already present. */
-#define EVB_VLAN_TAG_REPLACE 0x1
-
-/* BUFTBL_ENTRY structuredef */
-#define BUFTBL_ENTRY_LEN 12
-/* the owner ID */
-#define BUFTBL_ENTRY_OID_OFST 0
-#define BUFTBL_ENTRY_OID_LEN 2
-#define BUFTBL_ENTRY_OID_LBN 0
-#define BUFTBL_ENTRY_OID_WIDTH 16
-/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
-#define BUFTBL_ENTRY_PGSZ_OFST 2
-#define BUFTBL_ENTRY_PGSZ_LEN 2
-#define BUFTBL_ENTRY_PGSZ_LBN 16
-#define BUFTBL_ENTRY_PGSZ_WIDTH 16
-/* the raw 64-bit address field from the SMC, not adjusted for page size */
-#define BUFTBL_ENTRY_RAWADDR_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LEN 8
-#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
-#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
-#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
-
/* NVRAM_PARTITION_TYPE structuredef */
#define NVRAM_PARTITION_TYPE_LEN 2
#define NVRAM_PARTITION_TYPE_ID_OFST 0
@@ -9787,6 +10616,8 @@
#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: RAM (volatile) log output partition */
+#define NVRAM_PARTITION_TYPE_RAM_LOG 0x702
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Crash log partition for NMC firmware */
@@ -9923,6 +10754,16 @@
#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
/* enum: System-on-Chip update information. */
#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
+/* enum: Virtual partition. Write-only. Writes will actually be sent to an
+ * appropriate partition (for instance BUNDLE if the data starts with the magic
+ * number for a bundle update), or discarded with an error if not recognised as
+ * a supported type.
+ */
+#define NVRAM_PARTITION_TYPE_AUTO 0x2100
+/* enum: MC/NMC (first stage) bootloader firmware. (For X4, see XN-202072-PS
+ * and XN-202084-SW section 3.1).
+ */
+#define NVRAM_PARTITION_TYPE_BOOTLOADER 0x2200
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -9981,116 +10822,6 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-/* LICENSED_FEATURES structuredef */
-#define LICENSED_FEATURES_LEN 8
-/* Bitmask of licensed firmware features */
-#define LICENSED_FEATURES_MASK_OFST 0
-#define LICENSED_FEATURES_MASK_LEN 8
-#define LICENSED_FEATURES_MASK_LO_OFST 0
-#define LICENSED_FEATURES_MASK_LO_LEN 4
-#define LICENSED_FEATURES_MASK_LO_LBN 0
-#define LICENSED_FEATURES_MASK_LO_WIDTH 32
-#define LICENSED_FEATURES_MASK_HI_OFST 4
-#define LICENSED_FEATURES_MASK_HI_LEN 4
-#define LICENSED_FEATURES_MASK_HI_LBN 32
-#define LICENSED_FEATURES_MASK_HI_WIDTH 32
-#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_PIO_OFST 0
-#define LICENSED_FEATURES_PIO_LBN 1
-#define LICENSED_FEATURES_PIO_WIDTH 1
-#define LICENSED_FEATURES_EVQ_TIMER_OFST 0
-#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
-#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
-#define LICENSED_FEATURES_CLOCK_OFST 0
-#define LICENSED_FEATURES_CLOCK_LBN 3
-#define LICENSED_FEATURES_CLOCK_WIDTH 1
-#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
-#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
-#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_RX_SNIFF_OFST 0
-#define LICENSED_FEATURES_RX_SNIFF_LBN 6
-#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_TX_SNIFF_OFST 0
-#define LICENSED_FEATURES_TX_SNIFF_LBN 7
-#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_MASK_LBN 0
-#define LICENSED_FEATURES_MASK_WIDTH 64
-
-/* LICENSED_V3_APPS structuredef */
-#define LICENSED_V3_APPS_LEN 8
-/* Bitmask of licensed applications */
-#define LICENSED_V3_APPS_MASK_OFST 0
-#define LICENSED_V3_APPS_MASK_LEN 8
-#define LICENSED_V3_APPS_MASK_LO_OFST 0
-#define LICENSED_V3_APPS_MASK_LO_LEN 4
-#define LICENSED_V3_APPS_MASK_LO_LBN 0
-#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
-#define LICENSED_V3_APPS_MASK_HI_OFST 4
-#define LICENSED_V3_APPS_MASK_HI_LEN 4
-#define LICENSED_V3_APPS_MASK_HI_LBN 32
-#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
-#define LICENSED_V3_APPS_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_ONLOAD_LBN 0
-#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_PTP_OFST 0
-#define LICENSED_V3_APPS_PTP_LBN 1
-#define LICENSED_V3_APPS_PTP_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
-#define LICENSED_V3_APPS_SOLARSECURE_OFST 0
-#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
-#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
-#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0
-#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
-#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
-#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0
-#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
-#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
-#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0
-#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
-#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_DSHBRD_OFST 0
-#define LICENSED_V3_APPS_DSHBRD_LBN 14
-#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
-#define LICENSED_V3_APPS_SCATRD_OFST 0
-#define LICENSED_V3_APPS_SCATRD_LBN 15
-#define LICENSED_V3_APPS_SCATRD_WIDTH 1
-#define LICENSED_V3_APPS_MASK_LBN 0
-#define LICENSED_V3_APPS_MASK_WIDTH 64
-
/* LICENSED_V3_FEATURES structuredef */
#define LICENSED_V3_FEATURES_LEN 8
/* Bitmask of licensed firmware features */
@@ -10199,44 +10930,6 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
-/* CTPIO_STATS_MAP structuredef */
-#define CTPIO_STATS_MAP_LEN 4
-/* The (function relative) VI number */
-#define CTPIO_STATS_MAP_VI_OFST 0
-#define CTPIO_STATS_MAP_VI_LEN 2
-#define CTPIO_STATS_MAP_VI_LBN 0
-#define CTPIO_STATS_MAP_VI_WIDTH 16
-/* The target bucket for the VI */
-#define CTPIO_STATS_MAP_BUCKET_OFST 2
-#define CTPIO_STATS_MAP_BUCKET_LEN 2
-#define CTPIO_STATS_MAP_BUCKET_LBN 16
-#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_READ_REGS
- * Get a dump of the MCPU registers
- */
-#define MC_CMD_READ_REGS 0x50
-#undef MC_CMD_0x50_PRIVILEGE_CTG
-
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_REGS_IN msgrequest */
-#define MC_CMD_READ_REGS_IN_LEN 0
-
-/* MC_CMD_READ_REGS_OUT msgresponse */
-#define MC_CMD_READ_REGS_OUT_LEN 308
-/* Whether the corresponding register entry contains a valid value */
-#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
-#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
-/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
- * fir, fp)
- */
-#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
-#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
-#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
-
/***********************************/
/* MC_CMD_INIT_EVQ
@@ -10640,25 +11333,6 @@
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
-/* QUEUE_CRC_MODE structuredef */
-#define QUEUE_CRC_MODE_LEN 1
-#define QUEUE_CRC_MODE_MODE_LBN 0
-#define QUEUE_CRC_MODE_MODE_WIDTH 4
-/* enum: No CRC. */
-#define QUEUE_CRC_MODE_NONE 0x0
-/* enum: CRC Fiber channel over ethernet. */
-#define QUEUE_CRC_MODE_FCOE 0x1
-/* enum: CRC (digest) iSCSI header only. */
-#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
-/* enum: CRC (digest) iSCSI header and payload. */
-#define QUEUE_CRC_MODE_ISCSI 0x3
-/* enum: CRC Fiber channel over IP over ethernet. */
-#define QUEUE_CRC_MODE_FCOIPOE 0x4
-/* enum: CRC MPA. */
-#define QUEUE_CRC_MODE_MPA 0x5
-#define QUEUE_CRC_MODE_SPARE_LBN 4
-#define QUEUE_CRC_MODE_SPARE_WIDTH 4
-
/***********************************/
/* MC_CMD_INIT_RXQ
@@ -10827,6 +11501,9 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
@@ -10933,6 +11610,9 @@
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
@@ -11068,6 +11748,9 @@
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
@@ -11216,6 +11899,9 @@
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
@@ -11610,320 +12296,6 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
-/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
- * manage proxied requests
- */
-#define MC_PROXY_STATUS_BUFFER_LEN 16
-/* Handle allocated by the firmware for this proxy transaction */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
-/* enum: An invalid handle. */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
-/* The requesting physical function number */
-#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
-#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
-#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
-/* The requesting virtual function number. Set to VF_NULL if the target is a
- * PF.
- */
-#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
-#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
-#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
-/* The target function RID. */
-#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
-#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
-#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
-#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
-/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
-#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
-#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
-#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
-#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
-/* If a request is authorized rather than carried out by the host, this is the
- * elevated privilege mask granted to the requesting function.
- */
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PROXY_CONFIGURE
- * Enable/disable authorization of MCDI requests from unprivileged functions by
- * a designated admin function
- */
-#define MC_CMD_PROXY_CONFIGURE 0x58
-#undef MC_CMD_0x58_PRIVILEGE_CTG
-
-#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
-
-/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
-
-/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
-#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PROXY_COMPLETE
- * Tells FW that a requested proxy operation has either been completed (by
- * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
- * function that enabled proxying/authorization (by using
- * MC_CMD_PROXY_CONFIGURE).
- */
-#define MC_CMD_PROXY_COMPLETE 0x5f
-#undef MC_CMD_0x5f_PRIVILEGE_CTG
-
-#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
-#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
-/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
- * is stored in the REPLY_BUFF.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
-/* enum: The operation has been authorized. The originating function may now
- * try again.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
-/* enum: The operation has been declined. */
-#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
-/* enum: The authorization failed because the relevant application did not
- * respond in time.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
-#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_ALLOC_BUFTBL_CHUNK
- * Allocate a set of buffer table entries using the specified owner ID. This
- * operation allocates the required buffer table entries (and fails if it
- * cannot do so). The buffer table entries will initially be zeroed.
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
-#undef MC_CMD_0x87_PRIVILEGE_CTG
-
-#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
-/* Owner ID to use */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
-/* Size of buffer table pages to use, in bytes (note that only a few values are
- * legal on any specific hardware).
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
-/* Buffer table IDs for use in DMA descriptors. */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
- * Reprogram a set of buffer table entries in the specified chunk.
- */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
-#undef MC_CMD_0x88_PRIVILEGE_CTG
-
-#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
-/* ID */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Num entries */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-/* Buffer table entry address */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_FREE_BUFTBL_CHUNK
- */
-#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
-#undef MC_CMD_0x89_PRIVILEGE_CTG
-
-#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -12822,6 +13194,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
/* enum: read the supported encapsulation types for the VNIC */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
+/* enum: read the supported RX filter matches for low-latency queues (as
+ * allocated by MC_CMD_ALLOC_LL_QUEUES)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_LL_RX_MATCHES 0x7
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -12860,6 +13236,48 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
+
/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
* returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
* OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
@@ -12914,136 +13332,6 @@
/***********************************/
-/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
- * Please note that this interface is only of use to debug tools which have
- * knowledge of firmware and hardware data structures; nothing here is intended
- * for use by normal driver code. Note that although this command is in the
- * Admin privilege group, in tamperproof adapters, only read operations are
- * permitted.
- */
-#define MC_CMD_PARSER_DISP_RW 0xe5
-#undef MC_CMD_0xe5_PRIVILEGE_CTG
-
-#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
-#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
-/* identifies the target of the operation */
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
-/* enum: RX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
-/* enum: TX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format). Deprecated; used only
- * by cmdclient as a fallback for very old Huntington firmware, and not
- * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
- * instead.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
-/* enum: Lookup engine (with requested metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
-/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
-/* enum: RX1 dispatcher CPU (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
-/* enum: Miscellaneous other state (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
-/* identifies the type of operation requested */
-#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
-/* enum: Read a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
- * tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
- * permitted on tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address (DICPU targets) or LUE index (LUE targets) */
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
-/* selector (for MISC_STATE target) */
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
-/* enum: Port to datapath mapping */
-#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
-/* value to write (for DMEM writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
-/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
-/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
-/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
-/* value to write (for LUE writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
-
-/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
-#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
-/* value read (for DMEM reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
-/* value read (for LUE reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
-/* up to 8 32-bit words of additional soft state from the LUE manager (the
- * exact content is firmware-dependent and intended only for debug use)
- */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
-/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
-#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
-#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
-
-
-/***********************************/
-/* MC_CMD_GET_PF_COUNT
- * Get number of PFs on the device.
- */
-#define MC_CMD_GET_PF_COUNT 0xb6
-#undef MC_CMD_0xb6_PRIVILEGE_CTG
-
-#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PF_COUNT_IN msgrequest */
-#define MC_CMD_GET_PF_COUNT_IN_LEN 0
-
-/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
-/* Identifies the number of PFs on the device. */
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
-
-
-/***********************************/
-/* MC_CMD_SET_PF_COUNT
- * Set number of PFs on the device.
- */
-#define MC_CMD_SET_PF_COUNT 0xb7
-
-/* MC_CMD_SET_PF_COUNT_IN msgrequest */
-#define MC_CMD_SET_PF_COUNT_IN_LEN 4
-/* New number of PFs on the device. */
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
-
-/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_PORT_ASSIGNMENT
* Get port assignment for current PCI function.
*/
@@ -13069,25 +13357,6 @@
/***********************************/
-/* MC_CMD_SET_PORT_ASSIGNMENT
- * Set port assignment for current PCI function.
- */
-#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
-#undef MC_CMD_0xb9_PRIVILEGE_CTG
-
-#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
-/* Identifies the port assignment for this function. */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
-#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_ALLOC_VIS
* Allocate VIs for current PCI function.
*/
@@ -13184,263 +13453,6 @@
/***********************************/
-/* MC_CMD_SET_SRIOV_CFG
- * Set SRIOV config for this PF.
- */
-#define MC_CMD_SET_SRIOV_CFG 0xbb
-#undef MC_CMD_0xbb_PRIVILEGE_CTG
-
-#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
-#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
-/* Number of VFs currently enabled. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
-/* Max number of VFs before sriov stride and offset may need to be changed. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
-/* RID offset of first VF from PF, or 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
-/* RID offset of each subsequent VF from the previous, 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
-
-/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
-#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VI_ALLOC_INFO
- * Get information about number of VI's and base VI number allocated to this
- * function. This message is not available to dynamic clients created by
- * MC_CMD_CLIENT_ALLOC.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
-#undef MC_CMD_0x8d_PRIVILEGE_CTG
-
-#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
-#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
-
-/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
-/* The number of VIs allocated on this function */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to this function. Required to
- * correctly interpret wakeup events.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
-/* Function's port vi_shift value (always 0 on Huntington) */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI. The
- * VI must be owned by the calling client or one of its ancestors; usership of
- * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
- */
-#define MC_CMD_DUMP_VI_STATE 0x8e
-#undef MC_CMD_0x8e_PRIVILEGE_CTG
-
-#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
-#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
-/* The VI number to query. */
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
-
-/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
-/* The PF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
-/* The VF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
-/* Base of VIs allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
-/* Count of VIs allocated to the owner function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
-/* Base interrupt vector allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
-/* Number of interrupt vectors allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
-/* Raw evq ptr table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
-/* Raw evq timer table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
-/* Reserved, currently 0. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
-/* Current user, as assigned by MC_CMD_SET_VI_USER. */
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
-
-
-/***********************************/
/* MC_CMD_ALLOC_PIOBUF
* Allocate a push I/O buffer for later use with a tx queue.
*/
@@ -13491,354 +13503,102 @@
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
-/* VI number to get information for. */
+/* Queue handle, encodes queue type and VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
-/* Set no snoop bit for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
+/* MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT msgresponse: This message has the same
+ * layout as GET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_LBN 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_LBN 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
* Set TLP steering and ordering information for a VI. The caller must have the
* GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
- * an ancestor of the current user (see MC_CMD_SET_VI_USER).
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER). Note that LL
+ * queues require this to be called after allocation but before initialisation
+ * of the queue. TLP options of a queue are fixed after queue is initialised,
+ * with the values set to current global value or they can be overriden using
+ * this command. At LL queue allocation, all overrides are cleared.
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
-/* VI number to set information for. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
-/* Set the no snoop bit for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
+/* MC_CMD_SET_VI_TLP_PROCESSING_V2_IN msgrequest: This message has the same
+ * layout as SET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_LEN 8
+/* Queue handle, encodes queue type and VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_LBN 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_LBN 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_LBN 18
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_LBN 19
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
/***********************************/
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
- * Get global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
-#undef MC_CMD_0xbc_PRIVILEGE_CTG
-
-#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* enum: MISC. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
-/* enum: IDO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
-/* enum: RO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
-/* enum: TPH Type. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
- * Set global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
-#undef MC_CMD_0xbd_PRIVILEGE_CTG
-
-#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SATELLITE_DOWNLOAD
- * Download a new set of images to the satellite CPUs from the host.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD 0x91
-#undef MC_CMD_0x91_PRIVILEGE_CTG
-
-#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
- * are subtle, and so downloads must proceed in a number of phases.
- *
- * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
- * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
- * be a checksum (a simple 32-bit sum) of the transferred data. An individual
- * download may be aborted using CHUNK_ID_ABORT.
- *
- * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
- * similar to PHASE_IMEMS.
- *
- * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * After any error (a requested abort is not considered to be an error) the
- * sequence must be restarted from PHASE_RESET.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4)
-/* Download phase. (Note: the IDLE phase is used internally and is never valid
- * in a command from the host.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
-/* Target for download. (These match the blob numbers defined in
- * mc_flash_layout.h.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
-/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
-/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
-/* enum: Last chunk, containing checksum rather than data */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
-/* enum: Abort download of this item */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
-/* Length of this chunk in bytes */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
-/* Data for this chunk */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251
-
-/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
-/* Same as MC_CMD_ERR field, but included as 0 in success cases */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
-/* Extra status information */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
-/* enum: Code download OK, completed. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
-/* enum: Code download aborted as requested. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
-/* enum: Code download OK so far, send next chunk. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
-/* enum: Download phases out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
-/* enum: Bad target for this phase */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
-/* enum: Chunk ID out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
-/* enum: Chunk length zero or too large */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
-/* enum: Checksum was incorrect */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
-
-
-/***********************************/
/* MC_CMD_GET_CAPABILITIES
- * Get device capabilities.
- *
- * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
- * reference inherent device capabilities as opposed to current NVRAM config.
+ * Get device capabilities. This is supplementary to the MC_CMD_GET_BOARD_CFG
+ * command, and intended to reference inherent device capabilities as opposed
+ * to current NVRAM config.
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
#undef MC_CMD_0xbe_PRIVILEGE_CTG
@@ -14490,7 +14250,10 @@
/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -14900,7 +14663,10 @@
/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15335,7 +15101,10 @@
/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15778,7 +15547,10 @@
/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16226,7 +15998,10 @@
/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16685,7 +16460,10 @@
/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16796,9 +16574,21 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -17189,7 +16979,10 @@
/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17300,9 +17093,21 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -17707,7 +17512,10 @@
/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17818,9 +17626,21 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18260,7 +18080,10 @@
/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -18371,9 +18194,21 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18438,6 +18273,1182 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V11_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LEN 196
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V12_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LEN 204
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+/* Number of VIs available for external ports 4-7. Information for ports 0-3 is
+ * in NUM_VIS_PER_PORT in GET_CAPABILITIES_V2_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_OFST 196
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_NUM 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -18468,168 +19479,13 @@
* are not defined.
*/
#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_ALLOC
- * Allocate a pacer bucket (for qau rp or a snapper test)
+/* enum: MCDI command used for platform management. Typically, these commands
+ * are used for low-level operations directed at the platform as a whole (e.g.
+ * MMIO device enumeration) rather than individual functions and use a
+ * dedicated comms channel (e.g. RPmsg/IPI). May be handled by the same or
+ * different CPU as MCDI_MESSAGE_TYPE_MC.
*/
-#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
-#undef MC_CMD_0xb2_PRIVILEGE_CTG
-
-#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
-
-/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_FREE
- * Free a pacer bucket
- */
-#define MC_CMD_TCM_BUCKET_FREE 0xb3
-#undef MC_CMD_0xb3_PRIVILEGE_CTG
-
-#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
-
-/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_INIT
- * Initialise pacer bucket with a given rate
- */
-#define MC_CMD_TCM_BUCKET_INIT 0xb4
-#undef MC_CMD_0xb4_PRIVILEGE_CTG
-
-#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
-/* the desired maximum fill level */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_TXQ_INIT
- * Initialise txq in pacer with given options or set options
- */
-#define MC_CMD_TCM_TXQ_INIT 0xb5
-#undef MC_CMD_0xb5_PRIVILEGE_CTG
-
-#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
-#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2
/***********************************/
@@ -18740,27 +19596,6 @@
/***********************************/
-/* MC_CMD_VSWITCH_QUERY
- * read some config of v-switch. For now this command is an empty placeholder.
- * It may be used to check if a v-switch is connected to a given EVB port (if
- * not, then the command returns ENOENT).
- */
-#define MC_CMD_VSWITCH_QUERY 0x63
-#undef MC_CMD_0x63_PRIVILEGE_CTG
-
-#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
-#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
-/* The port to which the v-switch is connected. */
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
-#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -18936,28 +19771,6 @@
/***********************************/
-/* MC_CMD_VADAPTOR_GET_MAC
- * read the MAC address assigned to a v-adaptor.
- */
-#define MC_CMD_VADAPTOR_GET_MAC 0x5e
-#undef MC_CMD_0x5e_PRIVILEGE_CTG
-
-#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
-/* The port to which the v-adaptor is connected. */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
-/* The MAC address assigned to this v-adaptor */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
-
-
-/***********************************/
/* MC_CMD_VADAPTOR_QUERY
* read some config of v-adaptor.
*/
@@ -19014,86 +19827,6 @@
/***********************************/
-/* MC_CMD_RDWR_A64_REGIONS
- * Assign the 64 bit region addresses.
- */
-#define MC_CMD_RDWR_A64_REGIONS 0x9b
-#undef MC_CMD_0x9b_PRIVILEGE_CTG
-
-#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
-#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
-/* Write enable bits 0-3, set to write, clear to read. */
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
-
-/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
- * regardless of state of write bits in the request.
- */
-#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_ALLOC
- * Allocate an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
-#undef MC_CMD_0x9c_PRIVILEGE_CTG
-
-#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
-/* The handle of the owning upstream port */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
-/* The handle of the new Onload stack */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_FREE
- * Free an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_FREE 0x9d
-#undef MC_CMD_0x9d_PRIVILEGE_CTG
-
-#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
-/* The handle of the Onload stack */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_ALLOC
* Allocate an RSS context.
*/
@@ -19305,93 +20038,6 @@
/***********************************/
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
- * Write a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
-#undef MC_CMD_0x13e_PRIVILEGE_CTG
-
-#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array of index-value pairs to be written to the table. Structure is
- * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
-/* The index of the table entry to be written. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
-/* The value to write into the table entry. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_RSS_CONTEXT_READ_TABLE
- * Read a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
-#undef MC_CMD_0x13f_PRIVILEGE_CTG
-
-#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array containing the indices of the entries to be read. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
-/* A buffer containing the requested entries read from the table. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_SET_FLAGS
* Set various control flags for an RSS context.
*/
@@ -19525,158 +20171,6 @@
/***********************************/
-/* MC_CMD_DOT1P_MAPPING_ALLOC
- * Allocate a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
-#undef MC_CMD_0xa4_PRIVILEGE_CTG
-
-#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
-/* The handle of the owning upstream port */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
- * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
- * referenced RSS contexts must span no more than this number.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping. This should be considered opaque to the
- * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
- * handle.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
-/* enum: guaranteed invalid .1p mapping handle value */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_FREE
- * Free a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
-#undef MC_CMD_0xa5_PRIVILEGE_CTG
-
-#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE
- * Set the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
-#undef MC_CMD_0xa6_PRIVILEGE_CTG
-
-#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE
- * Get the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
-#undef MC_CMD_0xa7_PRIVILEGE_CTG
-
-#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
-
-
-/***********************************/
-/* MC_CMD_GET_VECTOR_CFG
- * Get Interrupt Vector config for this PF.
- */
-#define MC_CMD_GET_VECTOR_CFG 0xbf
-#undef MC_CMD_0xbf_PRIVILEGE_CTG
-
-#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
-
-/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
-/* Base absolute interrupt vector number. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SET_VECTOR_CFG
- * Set Interrupt Vector config for this PF.
- */
-#define MC_CMD_SET_VECTOR_CFG 0xc0
-#undef MC_CMD_0xc0_PRIVILEGE_CTG
-
-#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
-/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
- * let the system find a suitable base.
- */
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
-
-/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -19810,124 +20304,6 @@
/***********************************/
-/* MC_CMD_EVB_PORT_QUERY
- * read some config of v-port.
- */
-#define MC_CMD_EVB_PORT_QUERY 0x62
-#undef MC_CMD_0x62_PRIVILEGE_CTG
-
-#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
-#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
-/* The handle of the v-port */
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
-
-/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
-#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
-/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
-/* The number of VLAN tags that may be used on a v-adaptor connected to this
- * EVB port.
- */
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_BUFTBL_ENTRIES
- * Dump buffer table entries, mainly for command client debug use. Dumps
- * absolute entries, and does not use chunk handles. All entries must be in
- * range, and used for q page mapping, Although the latter restriction may be
- * lifted in future.
- */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#undef MC_CMD_0xab_PRIVILEGE_CTG
-
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
-/* Index of the first buffer table entry. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Number of buffer table entries to dump. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12)
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_SET_RXDP_CONFIG
- * Set global RXDP configuration settings
- */
-#define MC_CMD_SET_RXDP_CONFIG 0xc1
-#undef MC_CMD_0xc1_PRIVILEGE_CTG
-
-#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
-/* enum: pad to 64 bytes */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
-/* enum: pad to 128 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
-/* enum: pad to 256 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
-
-/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RXDP_CONFIG
- * Get global RXDP configuration settings
- */
-#define MC_CMD_GET_RXDP_CONFIG 0xc2
-#undef MC_CMD_0xc2_PRIVILEGE_CTG
-
-#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
-/* Enum values, see field(s): */
-/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -19950,210 +20326,6 @@
/***********************************/
-/* MC_CMD_SET_CLOCK
- * Control the system and DPCPU clock frequencies. Changes are lost reboot.
- */
-#define MC_CMD_SET_CLOCK 0xad
-#undef MC_CMD_0xad_PRIVILEGE_CTG
-
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 28
-/* Requested frequency in MHz for system clock domain */
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
-/* enum: Leave the system clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for inter-core clock domain */
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
-/* enum: Leave the inter-core clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for DPCPU clock domain */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
-/* enum: Leave the DPCPU clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for PCS clock domain */
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
-/* enum: Leave the PCS clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for MC clock domain */
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
-/* enum: Leave the MC clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for rmon clock domain */
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
-/* enum: Leave the rmon clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for vswitch clock domain */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
-/* enum: Leave the vswitch clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
-
-/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 28
-/* Resulting system frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
-/* enum: The system clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting inter-core frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
-/* enum: The inter-core clock domain doesn't exist / isn't used */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
-/* Resulting DPCPU frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
-/* enum: The dpcpu clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
-/* Resulting PCS frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
-/* enum: The PCS clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting MC frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
-/* enum: The MC clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
-/* Resulting rmon frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
-/* enum: The rmon clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
-/* Resulting vswitch frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
-/* enum: The vswitch clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
-
-
-/***********************************/
-/* MC_CMD_DPCPU_RPC
- * Send an arbitrary DPCPU message.
- */
-#define MC_CMD_DPCPU_RPC 0xae
-#undef MC_CMD_0xae_PRIVILEGE_CTG
-
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DPCPU_RPC_IN msgrequest */
-#define MC_CMD_DPCPU_RPC_IN_LEN 36
-#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
-/* enum: RxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
-/* enum: TxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
-/* enum: TxDPCPU1 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
-/* enum: RxDPCPU1 (Medford only) */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
-/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_RX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
-/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_TX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
-/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
- * initialised to zero
- */
-#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
-/* Register data to write. Only valid in write/write-read. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
-/* Register address. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
-
-/* MC_CMD_DPCPU_RPC_OUT msgresponse */
-#define MC_CMD_DPCPU_RPC_OUT_LEN 36
-#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
-#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
-/* DATA */
-#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
-
-
-/***********************************/
/* MC_CMD_TRIGGER_INTERRUPT
* Trigger an interrupt by prodding the BIU.
*/
@@ -20173,66 +20345,6 @@
/***********************************/
-/* MC_CMD_SHMBOOT_OP
- * Special operations to support (for now) shmboot.
- */
-#define MC_CMD_SHMBOOT_OP 0xe6
-#undef MC_CMD_0xe6_PRIVILEGE_CTG
-
-#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SHMBOOT_OP_IN msgrequest */
-#define MC_CMD_SHMBOOT_OP_IN_LEN 4
-/* Identifies the operation to perform */
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
-/* enum: Copy slave_data section to the slave core. (Greenport only) */
-#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
-
-/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
-#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_CAP_BLK_READ
- * Read multiple 64bit words from capture block memory
- */
-#define MC_CMD_CAP_BLK_READ 0xe7
-#undef MC_CMD_0xe7_PRIVILEGE_CTG
-
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CAP_BLK_READ_IN msgrequest */
-#define MC_CMD_CAP_BLK_READ_IN_LEN 12
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
-
-/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
-#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8)
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
-
-
-/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -20380,34 +20492,6 @@
/***********************************/
-/* MC_CMD_SET_PSU
- * Adjusts power supply parameters. This is a warranty-voiding operation.
- * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
- * the parameter is out of range.
- */
-#define MC_CMD_SET_PSU 0xea
-#undef MC_CMD_0xea_PRIVILEGE_CTG
-
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_PSU_IN msgrequest */
-#define MC_CMD_SET_PSU_IN_LEN 12
-#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
-#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
-#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
-#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
-#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
-/* desired value, eg voltage in mV */
-#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
-#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
-
-/* MC_CMD_SET_PSU_OUT msgresponse */
-#define MC_CMD_SET_PSU_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_FUNCTION_INFO
* Get function information. PF and VF number.
*/
@@ -20448,7 +20532,7 @@
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
#undef MC_CMD_0xed_PRIVILEGE_CTG
-#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -20458,137 +20542,13 @@
/***********************************/
-/* MC_CMD_UART_SEND_DATA
- * Send checksummed[sic] block of data over the uart. Response is a placeholder
- * should we wish to make this reliable; currently requests are fire-and-
- * forget.
- */
-#define MC_CMD_UART_SEND_DATA 0xee
-#undef MC_CMD_0xee_PRIVILEGE_CTG
-
-#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
-#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_UART_SEND_DATA_IN msgresponse */
-#define MC_CMD_UART_SEND_DATA_IN_LEN 0
-
-
-/***********************************/
-/* MC_CMD_UART_RECV_DATA
- * Request checksummed[sic] block of data over the uart. Only a placeholder,
- * subject to change and not currently implemented.
- */
-#define MC_CMD_UART_RECV_DATA 0xef
-#undef MC_CMD_0xef_PRIVILEGE_CTG
-
-#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
-#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
-/* CRC32 over OFFSET, LENGTH, RESERVED */
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
-/* Offset from which to read the data */
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
-
-/* MC_CMD_UART_RECV_DATA_IN msgresponse */
-#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020
-#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
-#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
-#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
-#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004
-
-
-/***********************************/
-/* MC_CMD_READ_FUSES
- * Read data programmed into the device One-Time-Programmable (OTP) Fuses
- */
-#define MC_CMD_READ_FUSES 0xf0
-#undef MC_CMD_0xf0_PRIVILEGE_CTG
-
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_FUSES_IN msgrequest */
-#define MC_CMD_READ_FUSES_IN_LEN 8
-/* Offset in OTP to read */
-#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
-#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
-/* Length of data to read in bytes */
-#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
-#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
-
-/* MC_CMD_READ_FUSES_OUT msgresponse */
-#define MC_CMD_READ_FUSES_OUT_LENMIN 4
-#define MC_CMD_READ_FUSES_OUT_LENMAX 252
-#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Length of returned OTP data in bytes */
-#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
-#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
-/* Returned data */
-#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
-#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
-#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
/* MC_CMD_KR_TUNE
* Get or set KR Serdes RXEQ and TX Driver settings
*/
#define MC_CMD_KR_TUNE 0xf1
#undef MC_CMD_0xf1_PRIVILEGE_CTG
-#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
@@ -21138,262 +21098,6 @@
/***********************************/
-/* MC_CMD_PCIE_TUNE
- * Get or set PCIE Serdes RXEQ and TX Driver settings
- */
-#define MC_CMD_PCIE_TUNE 0xf2
-#undef MC_CMD_0xf2_PRIVILEGE_CTG
-
-#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PCIE_TUNE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
-#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
-/* enum: Get current RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
-/* enum: Override RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
-/* enum: Get current TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
-/* enum: Override TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
-/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
-#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
-/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
- * caller should call this command repeatedly after starting eye plot, until no
- * more data is returned.
- */
-#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
-/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
-#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
-/* Arguments specific to the operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254
-
-/* MC_CMD_PCIE_TUNE_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
-/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
-/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
-/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
-/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
-/* enum: DFE DLev */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
-/* enum: Figure of Merit */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
-/* enum: CTLE EQ Capacitor (HF Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
-/* enum: CTLE EQ Resistor (DC Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TxMargin (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
-/* enum: TxSwing (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
-/* enum: De-emphasis coefficient C(-1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
-/* enum: De-emphasis coefficient C(0) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
-/* enum: De-emphasis coefficient C(+1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
* - not used for V3 licensing
@@ -21532,56 +21236,6 @@
/***********************************/
-/* MC_CMD_LICENSING_GET_ID_V3
- * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
- * partition - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSING_GET_ID_V3 0xd1
-#undef MC_CMD_0xd1_PRIVILEGE_CTG
-
-#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
-#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
-
-/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
-/* type of license (eg 3) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
-/* length of the license ID (in bytes) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
-/* the unique license ID of the adapter */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
-
-
-/***********************************/
-/* MC_CMD_MC2MC_PROXY
- * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
- * This will fail on a single-core system.
- */
-#define MC_CMD_MC2MC_PROXY 0xf4
-#undef MC_CMD_0xf4_PRIVILEGE_CTG
-
-#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MC2MC_PROXY_IN msgrequest */
-#define MC_CMD_MC2MC_PROXY_IN_LEN 0
-
-/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
-#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
@@ -21610,424 +21264,6 @@
/***********************************/
-/* MC_CMD_GET_LICENSED_V3_APP_STATE
- * Query the state of an individual licensed application. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
-#undef MC_CMD_0xd2_PRIVILEGE_CTG
-
-#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
-/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
- * mask
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
-/* state of this application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
-/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
-/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
-
-
-/***********************************/
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
- * Query the state of an one or more licensed features. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
-#undef MC_CMD_0xd3_PRIVILEGE_CTG
-
-#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
-/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
- * more bits set
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
-/* states of these features - bit set for licensed, clear for not licensed */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application - not used for V3
- * licensing.
- */
-#define MC_CMD_LICENSED_APP_OP 0xf6
-#undef MC_CMD_0xf6_PRIVILEGE_CTG
-
-#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
-/* enum: validate application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
-/* enum: mask application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
-/* arguments specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
-
-/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
-/* result specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
-/* validation challenge */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
-/* feature expiry (time_t) */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
-/* validation response */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
-/* flag */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
-
-/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_VALIDATE_APP
- * Perform validation for an individual licensed application - V3 licensing
- * (Medford)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
-#undef MC_CMD_0xd4_PRIVILEGE_CTG
-
-#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
-/* challenge for validation (384 bits) */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
-/* application ID expressed as a single bit mask */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
-/* validation response to challenge in the form of ECDSA signature consisting
- * of two 384-bit integers, r and s, in big-endian order. The signature signs a
- * SHA-384 digest of a message constructed from the concatenation of the input
- * message and the remaining fields of this output message, e.g. challenge[48
- * bytes] ... expiry_time[4 bytes] ...
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
-/* application expiry time */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
-/* application expiry units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
-/* enum: expiry units are accounting units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
-/* enum: expiry units are calendar days */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
-/* base MAC address of the NIC stored in NVRAM (note that this is a constant
- * value for a given NIC regardless which function is calling, effectively this
- * is PF0 base MAC address)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
-/* MAC address of v-adaptor associated with the client. If no such v-adapator
- * exists, then the field is filled with 0xFF.
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_MASK_FEATURES
- * Mask features - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#undef MC_CMD_0xd5_PRIVILEGE_CTG
-
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
-/* mask to be applied to features to be changed */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
-/* whether to turn on or turn off the masked features */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
-/* enum: turn the features off */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
-/* enum: turn the features back on */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSING_V3_TEMPORARY
- * Perform operations to support installation of a single temporary license in
- * the adapter, in addition to those found in the licensing partition. See
- * SF-116124-SW for an overview of how this could be used. The license is
- * stored in MC persistent data and so will survive a MC reboot, but will be
- * erased when the adapter is power cycled
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#undef MC_CMD_0xd6_PRIVILEGE_CTG
-
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
-/* operation code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
-/* enum: install a new license, overwriting any existing temporary license.
- * This is an asynchronous operation owing to the time taken to validate an
- * ECDSA license
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
-/* enum: clear the license immediately rather than waiting for the next power
- * cycle
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
-/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
- * operation
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
-/* ECDSA license and signature */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
-/* status code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
-/* enum: finished validating and installing license */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
-/* enum: license validation and installation in progress */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
-/* enum: licensing error. More specific error messages are not provided to
- * avoid exposing details of the licensing system to the client
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
-/* bitmask of licensed features */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure RX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic delivered to the host (non-promiscuous
- * mode) or all traffic arriving at the port (promiscuous mode) may be
- * delivered to a specific queue, or a set of queues with RSS.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
-#undef MC_CMD_0xf7_PRIVILEGE_CTG
-
-#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current RX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
-#undef MC_CMD_0xf8_PRIVILEGE_CTG
-
-#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
/* MC_CMD_SET_PARSER_DISP_CONFIG
* Change configuration related to the parser-dispatcher subsystem.
*/
@@ -22073,305 +21309,6 @@
/***********************************/
-/* MC_CMD_GET_PARSER_DISP_CONFIG
- * Read configuration related to the parser-dispatcher subsystem.
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
-#undef MC_CMD_0xfa_PRIVILEGE_CTG
-
-#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
-/* the type of configuration setting to read */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
-/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
- * the type of configuration setting being read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* current value: the details depend on the type of configuration setting being
- * read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
- * Configure TX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic transmitted through the port may be
- * delivered to a specific queue, or a set of queues with RSS. Note that these
- * packets are delivered with transmit timestamps in the packet prefix, not
- * receive timestamps, so it is likely that the queue(s) will need to be
- * dedicated as TX sniff receivers.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
-#undef MC_CMD_0xfb_PRIVILEGE_CTG
-
-#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
- * Obtain the current TX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
-#undef MC_CMD_0xfc_PRIVILEGE_CTG
-
-#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_RMON_STATS_RX_ERRORS
- * Per queue rx error stats.
- */
-#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
-#undef MC_CMD_0xfe_PRIVILEGE_CTG
-
-#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
-/* The rx queue to get stats for. */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PCIE_RESOURCE_INFO
- * Find out about available PCIE resources
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
-#undef MC_CMD_0xfd_PRIVILEGE_CTG
-
-#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
-/* The maximum number of PFs the device can expose */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
-/* The maximum number of VFs the device can expose in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
-/* The maximum number of MSI-X vectors the device can provide in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each PF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each VF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one PF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one VF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_MODES
- * Find out about available port modes
- */
-#define MC_CMD_GET_PORT_MODES 0xff
-#undef MC_CMD_0xff_PRIVILEGE_CTG
-
-#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_MODES_IN msgrequest */
-#define MC_CMD_GET_PORT_MODES_IN_LEN 0
-
-/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
-
-/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
-/* Bitmask of engineering port modes available on the board (indexed by
- * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
- * contains all modes implemented in firmware for a particular board. Modes
- * listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
- * should be considered hidden (not to be exposed in userland tools) and for
- * engineering use only. There are no other semantic differences and any mode
- * listed in either MODES or ENGINEERING_MODES can be set on the board.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
-
-
-/***********************************/
-/* MC_CMD_OVERRIDE_PORT_MODE
- * Override flash config port mode for subsequent MC reboot(s). Override data
- * is stored in the presistent data section of DMEM and activated on next MC
- * warm reboot. A cold reboot resets the override. It is assumed that a
- * sufficient number of PFs are available and that port mapping is valid for
- * the new port mode, as the override does not affect PF configuration.
- */
-#define MC_CMD_OVERRIDE_PORT_MODE 0x137
-#undef MC_CMD_0x137_PRIVILEGE_CTG
-
-#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
-/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
-
-/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
-#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_READ_ATB
- * Sample voltages on the ATB
- */
-#define MC_CMD_READ_ATB 0x100
-#undef MC_CMD_0x100_PRIVILEGE_CTG
-
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_ATB_IN msgrequest */
-#define MC_CMD_READ_ATB_IN_LEN 16
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
-#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
-
-/* MC_CMD_READ_ATB_OUT msgresponse */
-#define MC_CMD_READ_ATB_OUT_LEN 4
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
-
-
-/***********************************/
/* MC_CMD_GET_WORKAROUNDS
* Read the list of all implemented and all currently enabled workarounds. The
* enums here must correspond with those in MC_CMD_WORKAROUND.
@@ -22538,447 +21475,6 @@
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
-
-/***********************************/
-/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
- * parameter to MC_CMD_INIT_RXQ.
- */
-#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
-#undef MC_CMD_0x101_PRIVILEGE_CTG
-
-#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
-/* Minimum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
-/* Maximum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_FUSE_DIAGS
- * Additional fuse diagnostics
- */
-#define MC_CMD_FUSE_DIAGS 0x102
-#undef MC_CMD_0x102_PRIVILEGE_CTG
-
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_FUSE_DIAGS_IN msgrequest */
-#define MC_CMD_FUSE_DIAGS_IN_LEN 0
-
-/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
-#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
-/* Total number of mismatched bits between pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PRIVILEGE_MODIFY
- * Modify the privileges of a set of PCIe functions. Note that this operation
- * only effects non-admin functions unless the admin privilege itself is
- * included in one of the masks provided.
- */
-#define MC_CMD_PRIVILEGE_MODIFY 0x60
-#undef MC_CMD_0x60_PRIVILEGE_CTG
-
-#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
-/* The groups of functions to have their privilege masks modified. */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
-/* For VFS_OF_PF specify the PF, for ONE specify the target function */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
-/* Privileges to be added to the target functions. For privilege definitions
- * refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
-/* Privileges to be removed from the target functions. For privilege
- * definitions refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
-
-/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
-#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_BYTES
- * Read XPM memory
- */
-#define MC_CMD_XPM_READ_BYTES 0x103
-#undef MC_CMD_0x103_PRIVILEGE_CTG
-
-#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
-#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1)
-/* Data */
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_BYTES
- * Write XPM memory
- */
-#define MC_CMD_XPM_WRITE_BYTES 0x104
-#undef MC_CMD_0x104_PRIVILEGE_CTG
-
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1)
-/* Start address (byte) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
-/* Data */
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_SECTOR
- * Read XPM sector
- */
-#define MC_CMD_XPM_READ_SECTOR 0x105
-#undef MC_CMD_0x105_PRIVILEGE_CTG
-
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
-/* Sector index */
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
-/* Sector size */
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
-
-/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Sector type */
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
-/* Sector data */
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_SECTOR
- * Write XPM sector
- */
-#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#undef MC_CMD_0x106_PRIVILEGE_CTG
-
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1)
-/* If writing fails due to an uncorrectable error, try up to RETRIES following
- * sectors (or until no more space available). If 0, only one write attempt is
- * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
- * mechanism.
- */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
-/* Sector type */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
-/* Sector size */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
-/* Sector data */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32
-
-/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
-/* New sector index */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_XPM_INVALIDATE_SECTOR
- * Invalidate XPM sector
- */
-#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#undef MC_CMD_0x107_PRIVILEGE_CTG
-
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
-/* Sector index */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_BLANK_CHECK
- * Blank-check XPM memory and report bad locations
- */
-#define MC_CMD_XPM_BLANK_CHECK 0x108
-#undef MC_CMD_0x108_PRIVILEGE_CTG
-
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
-#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2)
-/* Total number of bad (non-blank) locations */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
-/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
- * into MCDI response)
- */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508
-
-
-/***********************************/
-/* MC_CMD_XPM_REPAIR
- * Blank-check and repair XPM memory
- */
-#define MC_CMD_XPM_REPAIR 0x109
-#undef MC_CMD_0x109_PRIVILEGE_CTG
-
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_REPAIR_IN msgrequest */
-#define MC_CMD_XPM_REPAIR_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
-#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
-#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_REPAIR_OUT msgresponse */
-#define MC_CMD_XPM_REPAIR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_DECODER_TEST
- * Test XPM memory address decoders for gross manufacturing defects. Can only
- * be performed on an unprogrammed part.
- */
-#define MC_CMD_XPM_DECODER_TEST 0x10a
-#undef MC_CMD_0x10a_PRIVILEGE_CTG
-
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
-#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
-#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_TEST
- * XPM memory write test. Test XPM write logic for gross manufacturing defects
- * by writing to a dedicated test row. There are 16 locations in the test row
- * and the test can only be performed on locations that have not been
- * previously used (i.e. can be run at most 16 times). The test will pick the
- * first available location to use, or fail with ENOSPC if none left.
- */
-#define MC_CMD_XPM_WRITE_TEST 0x10b
-#undef MC_CMD_0x10b_PRIVILEGE_CTG
-
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
-#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_EXEC_SIGNED
- * Check the CMAC of the contents of IMEM and DMEM against the value supplied
- * and if correct begin execution from the start of IMEM. The caller supplies a
- * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
- * computation runs from the start of IMEM, and from the start of DMEM + 16k,
- * to match flash booting. The command will respond with EINVAL if the CMAC
- * does match, otherwise it will respond with success before it jumps to IMEM.
- */
-#define MC_CMD_EXEC_SIGNED 0x10c
-#undef MC_CMD_0x10c_PRIVILEGE_CTG
-
-#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_EXEC_SIGNED_IN msgrequest */
-#define MC_CMD_EXEC_SIGNED_IN_LEN 28
-/* the length of code to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
-/* the length of date to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
-/* the XPM sector containing the key to use */
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
-/* the expected CMAC value */
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
-
-/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
-#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PREPARE_SIGNED
- * Prepare to upload a signed image. This will scrub the specified length of
- * the data region, which must be at least as large as the DATALEN supplied to
- * MC_CMD_EXEC_SIGNED.
- */
-#define MC_CMD_PREPARE_SIGNED 0x10d
-#undef MC_CMD_0x10d_PRIVILEGE_CTG
-
-#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
-#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
-/* the length of data area to clear */
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
-
-/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
-#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
-
-
/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
/* UDP port (the standard ports are named below but any port may be used) */
@@ -23049,110 +21545,6 @@
/***********************************/
-/* MC_CMD_RX_BALANCING
- * Configure a port upconverter to distribute the packets on both RX engines.
- * Packets are distributed based on a table with the destination vFIFO. The
- * index of the table is a hash of source and destination of IPV4 and VLAN
- * priority.
- */
-#define MC_CMD_RX_BALANCING 0x118
-#undef MC_CMD_0x118_PRIVILEGE_CTG
-
-#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RX_BALANCING_IN msgrequest */
-#define MC_CMD_RX_BALANCING_IN_LEN 16
-/* The RX port whose upconverter table will be modified */
-#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
-#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
-/* The VLAN priority associated to the table index and vFIFO */
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
-/* The resulting bit of SRC^DST for indexing the table */
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
-/* The RX engine to which the vFIFO in the table entry will point to */
-#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
-#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
-
-/* MC_CMD_RX_BALANCING_OUT msgresponse */
-#define MC_CMD_RX_BALANCING_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_NVRAM_PRIVATE_APPEND
- * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
- * if the tag is already present.
- */
-#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
-#undef MC_CMD_0x11c_PRIVILEGE_CTG
-
-#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1)
-/* The tag to be appended */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
-/* The length of the data */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
-/* The data to be contained in the TLV structure */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_VERIFY_CONTENTS
- * Verify that the contents of the XPM memory is correct (Medford only). This
- * is used during manufacture to check that the XPM memory has been programmed
- * correctly at ATE.
- */
-#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
-#undef MC_CMD_0x11b_PRIVILEGE_CTG
-
-#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
-/* Data type to be checked */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1)
-/* Number of sectors found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
-/* Number of bytes found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
-/* Length of signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
-/* Signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008
-
-
-/***********************************/
/* MC_CMD_SET_EVQ_TMR
* Update the timer load, timer reload and timer mode values for a given EVQ.
* The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
@@ -23262,576 +21654,6 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP
- * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
- * non used switch buffers.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
-#undef MC_CMD_0x11d_PRIVILEGE_CTG
-
-#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
-/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index. The calling client must be the currently-assigned user of
- * this VI (see MC_CMD_SET_VI_USER).
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
-/* Will the common pool be used as TX_vFIFO_ULL (1) */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
-/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
-/* Number of buffers to reserve for the common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
-/* TX datapath to which the Common Pool is connected to. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
-/* enum: Extracts information from function */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
-/* Network port or RX Engine to which the common pool connects. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
-/* enum: Extracts information from function */
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
-/* ID of the common pool allocated */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
- * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
- * previously allocated common pools.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
-#undef MC_CMD_0x11e_PRIVILEGE_CTG
-
-#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
-/* Common pool previously allocated to which the new vFIFO will be associated
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
-/* Port or RX engine to associate the vFIFO egress */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
-/* enum: Extracts information from common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
-/* Minimum number of buffers that the pool must have */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
-/* enum: Do not check the space available */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
-/* Will the vFIFO be used as TX_vFIFO_ULL */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
-/* Network priority of the vFIFO,if applicable */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
-/* enum: Search for the lowest unused priority */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
-/* Short vFIFO ID */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
-/* Network priority of the vFIFO */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF
- * This interface clears the configuration of the given vFIFO and leaves it
- * ready to be re-used.
- */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
-#undef MC_CMD_0x11f_PRIVILEGE_CTG
-
-#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
-/* Short vFIFO ID */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
- * This interface clears the configuration of the given common pool and leaves
- * it ready to be re-used.
- */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
-#undef MC_CMD_0x121_PRIVILEGE_CTG
-
-#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
-/* Common pool ID given when pool allocated */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
- * This interface allows the host to find out how many common pool buffers are
- * not yet assigned.
- */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
-#undef MC_CMD_0x124_PRIVILEGE_CTG
-
-#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
-/* Available buffers for the ENG to NET vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
-/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SUC_VERSION
- * Get the version of the SUC
- */
-#define MC_CMD_SUC_VERSION 0x134
-#undef MC_CMD_0x134_PRIVILEGE_CTG
-
-#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SUC_VERSION_IN msgrequest */
-#define MC_CMD_SUC_VERSION_IN_LEN 0
-
-/* MC_CMD_SUC_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_VERSION_OUT_LEN 24
-/* The SUC firmware version as four numbers - a.b.c.d */
-#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
-#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
-/* The date, in seconds since the Unix epoch, when the firmware image was
- * built.
- */
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
-/* The ID of the SUC chip. This is specific to the platform but typically
- * indicates family, memory sizes etc. See SF-116728-SW for further details.
- */
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
-
-/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
- * loader.
- */
-#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
-/* enum: Requests the SUC boot version. */
-#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
-
-/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
-/* The SUC boot version */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_RX_PREFIX_ID
- * This command is part of the mechanism for configuring the format of the RX
- * packet prefix. It takes as input a bitmask of the fields the host would like
- * to be in the prefix. If the hardware supports RX prefixes with that
- * combination of fields, then this command returns a list of prefix-ids,
- * opaque identifiers suitable for use in the RX_PREFIX_ID field of a
- * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not
- * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids
- * due to resource constraints, returns ENOSPC.
- */
-#define MC_CMD_GET_RX_PREFIX_ID 0x13b
-#undef MC_CMD_0x13b_PRIVILEGE_CTG
-
-#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8
-/* Field bitmask. */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
-
-/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num))
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4)
-/* Number of prefix-ids returned */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4
-/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or
- * MC_CMD_QUERY_PREFIX_ID
- */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254
-
-/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix
- * field
- */
-#define RX_PREFIX_FIELD_INFO_LEN 4
-/* The offset of the field from the start of the prefix, in bits */
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16
-/* The width of the field, in bits */
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8
-/* The type of the field. These enum values are in the same order as the fields
- * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask
- */
-#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3
-#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1
-#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */
-#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */
-#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
-#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
-#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
-#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
-
-/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in
- * which every field has a fixed offset and width
- */
-#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020
-#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num))
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4)
-/* Length of the RX prefix in bytes */
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8
-/* Number of fields present in the prefix */
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16
-/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_QUERY_RX_PREFIX_ID
- * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID)
- * and returns a description of the RX prefix of packets delievered to an RXQ
- * created with that prefix id
- */
-#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c
-#undef MC_CMD_0x13c_PRIVILEGE_CTG
-
-#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4
-/* Prefix id to query */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1)
-/* An enum describing the structure of this response. */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1
-/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3
-/* The response. Its format is as defined by the RESPONSE_TYPE value */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_BUNDLE
- * A command to perform various bundle-related operations on insecure cards.
- */
-#define MC_CMD_BUNDLE 0x13d
-#undef MC_CMD_0x13d_PRIVILEGE_CTG
-
-#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_BUNDLE_IN msgrequest */
-#define MC_CMD_BUNDLE_IN_LEN 4
-/* Sub-command code */
-#define MC_CMD_BUNDLE_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_IN_OP_LEN 4
-/* enum: Get the current host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0
-/* enum: Set the host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current
- * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and
- * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On
- * secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access
- * control mode.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4
-/* enum: Component partitions are read-only from the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0
-/* enum: Component partitions can read read-from written-to by the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component
- * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as
- * read-only on firmware built with bundle support. This command marks these
- * partitions as read/writeable. The access status set by this command does not
- * persist across MC reboots. This command only works on engineering (insecure)
- * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VPD
- * Read all VPD starting from a given address
- */
-#define MC_CMD_GET_VPD 0x165
-#undef MC_CMD_0x165_PRIVILEGE_CTG
-
-#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VPD_IN msgresponse */
-#define MC_CMD_GET_VPD_IN_LEN 4
-/* VPD address to start from. In case VPD is longer than MCDI buffer
- * (unlikely), user can make multiple calls with different starting addresses.
- */
-#define MC_CMD_GET_VPD_IN_ADDR_OFST 0
-#define MC_CMD_GET_VPD_IN_ADDR_LEN 4
-
-/* MC_CMD_GET_VPD_OUT msgresponse */
-#define MC_CMD_GET_VPD_OUT_LENMIN 0
-#define MC_CMD_GET_VPD_OUT_LENMAX 252
-#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1)
-/* VPD data returned. */
-#define MC_CMD_GET_VPD_OUT_DATA_OFST 0
-#define MC_CMD_GET_VPD_OUT_DATA_LEN 1
-#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_GET_NCSI_INFO
- * Provide information about the NC-SI stack
- */
-#define MC_CMD_GET_NCSI_INFO 0x167
-#undef MC_CMD_0x167_PRIVILEGE_CTG
-
-#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_NCSI_INFO_IN msgrequest */
-#define MC_CMD_GET_NCSI_INFO_IN_LEN 8
-/* Operation to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4
-/* enum: Information on the link settings. */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0
-/* enum: Statistics associated with the channel */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1
-/* The NC-SI channel on which the operation is to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4
-
-/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12
-/* Settings as received from BMC. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4
-/* Advertised capabilities applied to channel. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4
-/* General status */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1
-
-/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28
-/* The number of NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4
-/* The number of NC-SI commands dropped. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4
-/* The number of invalid NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4
-/* The number of checksum errors seen. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4
-/* The number of NC-SI requests received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4
-/* The number of NC-SI responses sent (includes AENs) */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4
-/* The number of NC-SI AENs sent */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
-
/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
* requests of the device and that can own resources managed by the device.
* Examples of clients include PCIe functions and dynamic clients. A client
@@ -23848,59 +21670,10 @@
#define CLIENT_HANDLE_OPAQUE_LBN 0
#define CLIENT_HANDLE_OPAQUE_WIDTH 32
-/* CLOCK_INFO structuredef: Information about a single hardware clock */
-#define CLOCK_INFO_LEN 28
-/* Enumeration that uniquely identifies the clock */
-#define CLOCK_INFO_CLOCK_ID_OFST 0
-#define CLOCK_INFO_CLOCK_ID_LEN 2
-/* enum: The Riverhead CMC (card MC) */
-#define CLOCK_INFO_CLOCK_CMC 0x0
-/* enum: The Riverhead NMC (network MC) */
-#define CLOCK_INFO_CLOCK_NMC 0x1
-/* enum: The Riverhead SDNET slice main logic */
-#define CLOCK_INFO_CLOCK_SDNET 0x2
-/* enum: The Riverhead SDNET LUT */
-#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3
-/* enum: The Riverhead SDNET control logic */
-#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4
-/* enum: The Riverhead Streaming SubSystem */
-#define CLOCK_INFO_CLOCK_SSS 0x5
-/* enum: The Riverhead network MAC and associated CSR registers */
-#define CLOCK_INFO_CLOCK_MAC 0x6
-#define CLOCK_INFO_CLOCK_ID_LBN 0
-#define CLOCK_INFO_CLOCK_ID_WIDTH 16
-/* Assorted flags */
-#define CLOCK_INFO_FLAGS_OFST 2
-#define CLOCK_INFO_FLAGS_LEN 2
-#define CLOCK_INFO_SETTABLE_OFST 2
-#define CLOCK_INFO_SETTABLE_LBN 0
-#define CLOCK_INFO_SETTABLE_WIDTH 1
-#define CLOCK_INFO_FLAGS_LBN 16
-#define CLOCK_INFO_FLAGS_WIDTH 16
-/* The frequency in HZ */
-#define CLOCK_INFO_FREQUENCY_OFST 4
-#define CLOCK_INFO_FREQUENCY_LEN 8
-#define CLOCK_INFO_FREQUENCY_LO_OFST 4
-#define CLOCK_INFO_FREQUENCY_LO_LEN 4
-#define CLOCK_INFO_FREQUENCY_LO_LBN 32
-#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_HI_OFST 8
-#define CLOCK_INFO_FREQUENCY_HI_LEN 4
-#define CLOCK_INFO_FREQUENCY_HI_LBN 64
-#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_LBN 32
-#define CLOCK_INFO_FREQUENCY_WIDTH 64
-/* Human-readable ASCII name for clock, with NUL termination */
-#define CLOCK_INFO_NAME_OFST 12
-#define CLOCK_INFO_NAME_LEN 1
-#define CLOCK_INFO_NAME_NUM 16
-#define CLOCK_INFO_NAME_LBN 96
-#define CLOCK_INFO_NAME_WIDTH 8
-
/* SCHED_CREDIT_CHECK_RESULT structuredef */
#define SCHED_CREDIT_CHECK_RESULT_LEN 16
-/* The instance of the scheduler. Refer to XN-200389-AW for the location of
- * these schedulers in the hardware.
+/* The instance of the scheduler. Refer to XN-200389-AW (snic/hnic) and
+ * XN-200425-TC (cdx) for the location of these schedulers in the hardware.
*/
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
@@ -23914,6 +21687,16 @@
#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_ADAPTER_C2H_C 0xa /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A2_H2C_C 0xb /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A3_SOFT_ADAPTOR_C 0xc /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A4_DPU_WRITE_C 0xd /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_JRC_RRU 0xe /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_CDM_SINK 0xf /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PCIE_SINK 0x10 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_UPORT_SINK 0x11 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PSX_SINK 0x12 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A5_DPU_READ_C 0x13 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
/* The type of node that this result refers to. */
@@ -23923,6 +21706,10 @@
#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
/* enum: Source node */
#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+/* enum: Destination node credit type 1 (new to the Keystone schedulers, see
+ * SF-120268-TC)
+ */
+#define SCHED_CREDIT_CHECK_RESULT_DEST_CREDIT1 0x2
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
/* Level of node in scheduler hierarchy (level 0 is the bottom of the
@@ -23950,592 +21737,6 @@
/***********************************/
-/* MC_CMD_GET_CLOCKS_INFO
- * Get information about the device clocks
- */
-#define MC_CMD_GET_CLOCKS_INFO 0x166
-#undef MC_CMD_0x166_PRIVILEGE_CTG
-
-#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */
-#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0
-
-/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num))
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28)
-/* An array of CLOCK_INFO structures. */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this
- * only affects checksum validation in VNIC RX - on TX the send descriptor
- * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
- * apply to the current driver. If a rule matches, then the packet is
- * considered to have the corresponding encapsulation type, and the inner
- * packet is parsed. It is up to the driver to ensure that overlapping rules
- * are not inserted. (If a packet would match multiple rules, a random one of
- * them will be used.) A rule with the exact same match criteria may not be
- * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
- * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
- * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
- * combinations. Each driver may only have a limited set of active rules -
- * returns ENOSPC if the caller's table is full.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
-#undef MC_CMD_0x16d_PRIVILEGE_CTG
-
-#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
-/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
-/* Any non-zero bits other than the ones named below or an unsupported
- * combination will cause the NIC to return EOPNOTSUPP. In the future more
- * flags may be added.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
-/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
- * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
-/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
- * (Deprecated)
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
-/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
-/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
- * case of IPv4, the IP should be in the first 4 bytes and all other bytes
- * should be zero.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
-/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
-/* Actions that should be applied to packets match the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
-/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
-/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
-/* Handle to inserted rule. Used for removing the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously
- * matched the rule will then be considered as unencapsulated. Returns EALREADY
- * if the input HANDLE doesn't correspond to an existing rule.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
-#undef MC_CMD_0x16e_PRIVILEGE_CTG
-
-#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
-/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
-
-/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
- * the endianness specified by the RFC; users should ignore the broken-out
- * fields and instead do straight memory copies to ensure correct ordering.
- */
-#define UUID_LEN 16
-#define UUID_TIME_LOW_OFST 0
-#define UUID_TIME_LOW_LEN 4
-#define UUID_TIME_LOW_LBN 0
-#define UUID_TIME_LOW_WIDTH 32
-#define UUID_TIME_MID_OFST 4
-#define UUID_TIME_MID_LEN 2
-#define UUID_TIME_MID_LBN 32
-#define UUID_TIME_MID_WIDTH 16
-#define UUID_TIME_HI_LBN 52
-#define UUID_TIME_HI_WIDTH 12
-#define UUID_VERSION_LBN 48
-#define UUID_VERSION_WIDTH 4
-#define UUID_RESERVED_LBN 64
-#define UUID_RESERVED_WIDTH 2
-#define UUID_CLK_SEQ_LBN 66
-#define UUID_CLK_SEQ_WIDTH 14
-#define UUID_NODE_OFST 10
-#define UUID_NODE_LEN 6
-#define UUID_NODE_LBN 80
-#define UUID_NODE_WIDTH 48
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_ALLOC
- * Create a handle to a datapath plugin's extension. This involves finding a
- * currently-loaded plugin offering the given functionality (as identified by
- * the UUID) and allocating a handle to track the usage of it. Plugin
- * functionality is identified by 'extension' rather than any other identifier
- * so that a single plugin bitfile may offer more than one piece of independent
- * functionality. If two bitfiles are loaded which both offer the same
- * extension, then the metadata is interrogated further to determine which is
- * the newest and that is the one opened. See SF-123625-SW for architectural
- * detail on datapath plugins.
- */
-#define MC_CMD_PLUGIN_ALLOC 0x1ad
-#undef MC_CMD_0x1ad_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
-#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
-/* The functionality requested of the plugin, as a UUID structure */
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
-/* Additional options for opening the handle */
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
-/* Load the extension only if it is in the specified administrative group.
- * Specify ANY to load the extension wherever it is found (if there are
- * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
- * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
- * administrative groups.
- */
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
-/* enum: Load the extension from any ADMIN_GROUP. */
-#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
-/* Reserved */
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
-
-/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
-#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
-/* Unique identifier of this usage */
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_FREE
- * Delete a handle to a plugin's extension.
- */
-#define MC_CMD_PLUGIN_FREE 0x1ae
-#undef MC_CMD_0x1ae_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_FREE_IN msgrequest */
-#define MC_CMD_PLUGIN_FREE_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
-#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_GLOBAL
- * Returns the global metadata applying to the whole plugin extension. See the
- * other metadata calls for subtypes of data.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
-#undef MC_CMD_0x1af_PRIVILEGE_CTG
-
-#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
-/* Unique identifier of this plugin extension. This is identical to the value
- * which was requested when the handle was allocated.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
-/* semver sub-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
-/* semver micro-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
-/* Number of different messages which can be sent to this extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
-/* Byte offset within the VI window of the plugin's mapped CSR window. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
-/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
- * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
- * MAPPED_CSR_FLAGS are ignored).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
-/* Flags indicating how to perform the CSR window mapping. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
-/* Identifier of the set of extensions which all change state together.
- * Extensions having the same ADMIN_GROUP will always load and unload at the
- * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
- * generation number as an implementation detail to ensure that they're not
- * reused rapidly).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
-/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
- * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
- * access to this extension.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
-/* Reserved */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER
- * Returns metadata supplied by the plugin author which describes this
- * extension in a human-readable way. Contrast with
- * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
- * to operate.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
-#undef MC_CMD_0x1b0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
-/* Category of data to return */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
-/* enum: Top-level information about the extension. The returned data is an
- * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
- * the extension. The data is a back-to-back list of zero-terminated strings;
- * the even-numbered fields (0,2,4,...) are keys and their following odd-
- * numbered fields are the corresponding values. Both keys and values are
- * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
- * times. Note that all information (including the key/value structure itself
- * and the UTF-8 encoding) may have been provided by the plugin author, so
- * callers must be cautious about parsing it. Callers should parse only the
- * top-level structure to separate out the keys and values; the contents of the
- * values is not expected to be machine-readable.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
-/* Byte position of the data to be returned within the full data block of the
- * given SUBTYPE.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Full length of the data block of the requested SUBTYPE, in bytes. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
-/* The information requested by SUBTYPE. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_MSG
- * Returns the simple metadata for a specific plugin request message. This
- * supplies information necessary for the host to know how to build an
- * MC_CMD_PLUGIN_REQ request.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
-#undef MC_CMD_0x1b1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
-/* Unique message ID to obtain */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
-/* Unique message ID. This is the same value as the input parameter; it exists
- * to allow future MCDI extensions which enumerate all messages.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
-/* Packed index number of this message, assigned by the MC to give each message
- * a unique ID in an array to allow for more efficient storage/management.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
-/* Short human-readable codename for this message. This is conventionally
- * formatted as a C identifier in the basic ASCII character set with any spare
- * bytes at the end set to 0, however this convention is not enforced by the MC
- * so consumers must check for all potential malformations before using it for
- * a trusted purpose.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
-/* Number of bytes of data which must be passed from the host kernel to the MC
- * for this message's payload, and which are passed back again in the response.
- * The MC's plugin metadata loader will have validated that the number of bytes
- * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
- * message.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
-
-/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
- * an individual extension.
- */
-#define PLUGIN_EXTENSION_LEN 20
-#define PLUGIN_EXTENSION_UUID_OFST 0
-#define PLUGIN_EXTENSION_UUID_LEN 16
-#define PLUGIN_EXTENSION_UUID_LBN 0
-#define PLUGIN_EXTENSION_UUID_WIDTH 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
-#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
-#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
-#define PLUGIN_EXTENSION_RESERVED_LBN 137
-#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_ALL
- * Returns a list of all plugin extensions currently loaded and available. The
- * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
- * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
- * ADMIN_GROUP field collects how extensions are grouped in to units which are
- * loaded/unloaded together; extensions with the same value are in the same
- * group.
- */
-#define MC_CMD_PLUGIN_GET_ALL 0x1b2
-#undef MC_CMD_0x1b2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
-/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
- * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
- */
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
-
-/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
-/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
- * structs.
- */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_REQ
- * Send a command to a plugin. A plugin may define an arbitrary number of
- * 'messages' which it allows applications on the host system to send, each
- * identified by a 32-bit ID.
- */
-#define MC_CMD_PLUGIN_REQ 0x1b3
-#undef MC_CMD_0x1b3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_REQ_IN msgrequest */
-#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
-#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
-/* Message ID defined by the plugin author */
-#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
-/* Data blob being the parameter to the message. This must be of the length
- * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
- */
-#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
-#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
-#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
-/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
- * the same size as the input DATA parameter.
- */
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
-
-/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
- * space that maps to a contiguous region of TRGT_ADDR space. Addresses
- * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
- * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
- * TRGT_ADDR_BASE.
- */
-#define DESC_ADDR_REGION_LEN 32
-/* The start of the region in DESC_ADDR space. */
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
-/* The start of the region in TRGT_ADDR space. Drivers can set this via
- * MC_CMD_SET_DESC_ADDR_REGIONS.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
-/* The size of the region. */
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
-/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
- * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_OFST 24
-#define DESC_ADDR_REGION_RSVD_LEN 8
-#define DESC_ADDR_REGION_RSVD_LO_OFST 24
-#define DESC_ADDR_REGION_RSVD_LO_LEN 4
-#define DESC_ADDR_REGION_RSVD_LO_LBN 192
-#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_HI_OFST 28
-#define DESC_ADDR_REGION_RSVD_HI_LEN 4
-#define DESC_ADDR_REGION_RSVD_HI_LBN 224
-#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_LBN 192
-#define DESC_ADDR_REGION_RSVD_WIDTH 64
-
-
-/***********************************/
/* MC_CMD_GET_DESC_ADDR_INFO
* Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
*/
@@ -24836,122 +22037,6 @@
/***********************************/
-/* MC_CMD_GET_BOARD_ATTR
- * Retrieve physical build-level board attributes as configured at
- * manufacturing stage. Fields originate from EEPROM and per-platform constants
- * in firmware. Fields are used in development to identify/ differentiate
- * boards based on build levels/parameters, and also in manufacturing to cross
- * check "what was programmed in manufacturing" is same as "what firmware
- * thinks has been programmed" as there are two layers to translation within
- * firmware before the attributes reach this MCDI handler. Some parameters are
- * retrieved as part of other commands and therefore not replicated here. See
- * GET_VERSION_OUT.
- */
-#define MC_CMD_GET_BOARD_ATTR 0x1c6
-#undef MC_CMD_0x1c6_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
-#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
-
-/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
-#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
-/* Defines board capabilities and validity of attributes returned in this
- * response-message.
- */
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
-/* enum: The FPGA voltage on the adapter can be set to low */
-#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
-/* enum: The FPGA voltage on the adapter can be set to regular */
-#define MC_CMD_FPGA_VOLTAGE_REG 0x1
-/* enum: The FPGA voltage on the adapter can be set to high */
-#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
-/* An array of cage types on the board */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
-/* enum: The cages are not known */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
-/* enum: The cages are SFP/SFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
-/* enum: The cages are QSFP/QSFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
-
-
-/***********************************/
-/* MC_CMD_GET_SOC_STATE
- * Retrieve current state of the System-on-Chip. This command is valid when
- * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
- */
-#define MC_CMD_GET_SOC_STATE 0x1c7
-#undef MC_CMD_0x1c7_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SOC_STATE_IN msgrequest */
-#define MC_CMD_GET_SOC_STATE_IN_LEN 0
-
-/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
-#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
-/* Status flags for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
-/* Status fields for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
-/* enum: Power on (set by SUC on power up) */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
-/* enum: Running bootloader */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
-/* enum: Bootloader has started OS. OS is booting */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
-/* enum: OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
-/* enum: Maintenance OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
-/* Number of SoC resets since power on */
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
-
-
-/***********************************/
/* MC_CMD_CHECK_SCHEDULER_CREDITS
* For debugging purposes. For each source and destination node in the hardware
* schedulers, check whether the number of credits is as it should be. This
@@ -25010,76 +22095,6 @@
/***********************************/
-/* MC_CMD_TXQ_STATS
- * Query per-TXQ statistics.
- */
-#define MC_CMD_TXQ_STATS 0x1d5
-#undef MC_CMD_0x1d5_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TXQ_STATS_IN msgrequest */
-#define MC_CMD_TXQ_STATS_IN_LEN 8
-/* Instance of TXQ to retrieve statistics for */
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
-/* Flags for the request */
-#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
-#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
-#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
-
-/* MC_CMD_TXQ_STATS_OUT msgresponse */
-#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
-#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
-#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
-#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
-
-/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
- * defined in SF-120734-TC with more information in SF-122717-TC.
- */
-#define FUNCTION_PERSONALITY_LEN 4
-#define FUNCTION_PERSONALITY_ID_OFST 0
-#define FUNCTION_PERSONALITY_ID_LEN 4
-/* enum: Function has no assigned personality */
-#define FUNCTION_PERSONALITY_NULL 0x0
-/* enum: Function has an EF100-style function control window and VI windows
- * with both EF100 and vDPA doorbells.
- */
-#define FUNCTION_PERSONALITY_EF100 0x1
-/* enum: Function has virtio net device configuration registers and doorbells
- * for virtio queue pairs.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2
-/* enum: Function has virtio block device configuration registers and a
- * doorbell for a single virtqueue.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
-/* enum: Function is a Xilinx acceleration device - management function */
-#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
-/* enum: Function is a Xilinx acceleration device - user function */
-#define FUNCTION_PERSONALITY_ACCEL_USR 0x5
-#define FUNCTION_PERSONALITY_ID_LBN 0
-#define FUNCTION_PERSONALITY_ID_WIDTH 32
-
-
-/***********************************/
/* MC_CMD_VIRTIO_GET_FEATURES
* Get a list of the virtio features supported by the device.
*/
@@ -25162,37 +22177,6 @@
/***********************************/
-/* MC_CMD_VIRTIO_GET_CAPABILITIES
- * Get virtio capabilities supported by the device. Returns general virtio
- * capabilities and limitations of the hardware / firmware implementation
- * (hardware device as a whole), rather than that of individual configured
- * virtio devices. At present, only the absolute maximum number of queues
- * allowed on multi-queue devices is returned. Response is expected to be
- * extended as necessary in the future.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
-#undef MC_CMD_0x1d3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
-/* Type of device to get capabilities for. Matches the device id as defined by
- * the virtio spec.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
-/* Maximum number of queues supported for a single device instance */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
-
-
-/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -25474,866 +22458,6 @@
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
-/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
- * (absolute VI number + VI relative queue number). On Keystone, a VI can
- * contain multiple queues (at present, up to 2), each with separate controls
- * for direction. This structure is required to uniquely identify the absolute
- * source queue for descriptor proxy functions.
- */
-#define QUEUE_ID_LEN 4
-/* Absolute VI number */
-#define QUEUE_ID_ABS_VI_OFST 0
-#define QUEUE_ID_ABS_VI_LEN 2
-#define QUEUE_ID_ABS_VI_LBN 0
-#define QUEUE_ID_ABS_VI_WIDTH 16
-/* Relative queue number within the VI */
-#define QUEUE_ID_REL_QUEUE_LBN 16
-#define QUEUE_ID_REL_QUEUE_WIDTH 1
-#define QUEUE_ID_RESERVED_LBN 17
-#define QUEUE_ID_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CREATE
- * Descriptor proxy functions are abstract devices that forward all request
- * submitted to the host PCIe function (descriptors submitted to Virtio or
- * EF100 queues) to be handled on another function (most commonly on the
- * embedded Application Processor), via EF100 descriptor proxy, memory-to-
- * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk
- * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy
- * function on the host and assigns a user-defined label. The actual function
- * configuration is not persisted until the caller configures it with
- * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with
- * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172
-#undef MC_CMD_0x172_PRIVILEGE_CTG
-
-#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52
-/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to
- * {PF_ANY,VF_ANY,interface} for "any available function" Set to
- * {PF_ANY,VF_NULL,interface} for "any available PF"
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
-/* The personality to set. The meanings of the personalities are defined in
- * SF-120734-TC with more information in SF-122717-TC. At present, we only
- * support proxying for VIRTIO_BLK
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4
-/* Allocated function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY
- * Remove an existing descriptor proxy function. Underlying function
- * personality and configuration reverts back to factory default. Function
- * configuration is committed immediately to specified store and any function
- * ownership is released.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173
-#undef MC_CMD_0x173_PRIVILEGE_CTG
-
-#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40
-/* Store from which to remove function configuration */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0
-
-/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See
- * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature
- * bits. See Virtio specification v1.1, Section 5.2.4 (struct
- * virtio_blk_config) for definition of remaining configuration fields
- */
-#define VIRTIO_BLK_CONFIG_LEN 68
-/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */
-#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1
-#define VIRTIO_BLK_CONFIG_FEATURES_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64
-/* The capacity of the device (expressed in 512-byte sectors) */
-#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
-/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32
-/* Maximum number of segments in a request. Only valid when
- * VIRTIO_BLK_F_SEG_MAX is set.
- */
-#define VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160
-#define VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32
-/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192
-#define VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16
-/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_HEADS_OFST 26
-#define VIRTIO_BLK_CONFIG_HEADS_LEN 1
-#define VIRTIO_BLK_CONFIG_HEADS_LBN 208
-#define VIRTIO_BLK_CONFIG_HEADS_WIDTH 8
-/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_SECTORS_OFST 27
-#define VIRTIO_BLK_CONFIG_SECTORS_LEN 1
-#define VIRTIO_BLK_CONFIG_SECTORS_LBN 216
-#define VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8
-/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32
-/* Block topology - number of logical blocks per physical block (log2). Only
- * valid when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8
-/* Block topology - offset of first aligned logical block. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8
-/* Block topology - suggested minimum I/O size in blocks. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16
-/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid
- * when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32
-/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and
- * not carried in config data.
- */
-#define VIRTIO_BLK_CONFIG_UNUSED0_OFST 40
-#define VIRTIO_BLK_CONFIG_UNUSED0_LEN 2
-#define VIRTIO_BLK_CONFIG_UNUSED0_LBN 320
-#define VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16
-/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated.
- */
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16
-/* Maximum discard sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32
-/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32
-/* Discard sector alignment, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32
-/* Maximum write zeroes sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32
-/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES
- * is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32
-/* Write zeroes request can result in deallocating one or more sectors. Only
- * valid if VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8
-/* Unused, set to zero. */
-#define VIRTIO_BLK_CONFIG_UNUSED1_OFST 65
-#define VIRTIO_BLK_CONFIG_UNUSED1_LEN 3
-#define VIRTIO_BLK_CONFIG_UNUSED1_LBN 520
-#define VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET
- * Set configuration for an existing descriptor proxy function. Configuration
- * data must match function personality. The actual function configuration is
- * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174
-#undef MC_CMD_0x174_PRIVILEGE_CTG
-
-#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1)
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16
-/* Configuration data. Format of configuration data is determined implicitly
- * from function personality referred to by HANDLE. Currently, only supported
- * format is VIRTIO_BLK_CONFIG.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT
- * Commit function configuration to non-volatile or volatile store. Once
- * configuration is applied to hardware (which may happen immediately or on
- * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be
- * delivered to callers MCDI event queue.
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175
-#undef MC_CMD_0x175_PRIVILEGE_CTG
-
-#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4
-/* enum: Store into non-volatile (dynamic) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0
-/* enum: Store into volatile (ephemeral) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4
-/* Generation count to be delivered in an event once configuration becomes live
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_OPEN
- * Retrieve a handle for an existing descriptor proxy function. Returns an
- * integer handle, valid until function is deallocated, MC rebooted or power-
- * cycle. Returns ENODEV if no function with given label exists.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176
-#undef MC_CMD_0x176_PRIVILEGE_CTG
-
-#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1)
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4
-/* PCIe Function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
-/* Function personality */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* Function configuration state */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4
-/* enum: Function configuration is visible to the host (live) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
-/* enum: Function configuration is pending reset */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
-/* enum: Function configuration is missing (created, but no configuration
- * committed)
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
-/* Generation count to be delivered in an event once the configuration becomes
- * live (if status is "pending")
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
-/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE
- * Releases a handle for an open descriptor proxy function. If proxying was
- * enabled on the device, the caller is expected to gracefully stop it using
- * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an
- * active device without disabling proxying will result in forced close, which
- * will put the device into a failed state and signal the host driver of the
- * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1
-#undef MC_CMD_0x1a1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0
-
-/* DESC_PROXY_FUNC_MAP structuredef */
-#define DESC_PROXY_FUNC_MAP_LEN 52
-/* PCIe function ID (as struct PCIE_FUNCTION) */
-#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
-/* Function personality */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define DESC_PROXY_FUNC_MAP_LABEL_OFST 12
-#define DESC_PROXY_FUNC_MAP_LABEL_LEN 40
-#define DESC_PROXY_FUNC_MAP_LABEL_LBN 96
-#define DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENUM
- * Enumerate existing descriptor proxy functions
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177
-#undef MC_CMD_0x177_PRIVILEGE_CTG
-
-#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4
-/* Starting index, set to 0 on first request. See
- * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num))
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52)
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1
-/* Function map, as array of DESC_PROXY_FUNC_MAP */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE
- * Enable descriptor proxying for function into target event queue. Returns VI
- * allocation info for the proxy source function, so that the caller can map
- * absolute VI IDs from descriptor proxy events back to the originating
- * function. This is a legacy function that only supports single queue proxy
- * devices. It is also limited in that it can only be called after host driver
- * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
- * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
- * supports multi-queue devices and has no dependency on host driver attach.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
-#undef MC_CMD_0x178_PRIVILEGE_CTG
-
-#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8
-/* The number of VIs allocated on the function */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to the function. */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
- * Enable descriptor proxying for a source queue on a host function into target
- * event queue. Source queue number is a relative virtqueue number on the
- * source function (0 to max_virtqueues-1). For a multi-queue device, the
- * caller must enable all source queues individually. To retrieve absolute VI
- * information for the source function (so that VI IDs from descriptor proxy
- * events can be mapped back to source function / queue) see
- * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
-#undef MC_CMD_0x1d0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to enable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function. For multi-queue functions,
- * disables all queues.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
-#undef MC_CMD_0x179_PRIVILEGE_CTG
-
-#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
- * Disable descriptor proxying for a specific source queue on a function.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
-#undef MC_CMD_0x1d1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to disable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_GET_VI_INFO
- * Returns absolute VI allocation information for the descriptor proxy source
- * function referenced by HANDLE, so that the caller can map absolute VI IDs
- * from descriptor proxy events back to the originating function and queue. The
- * call is only valid after the host driver for the source function has
- * attached (after receiving a driver attach event for the descriptor proxy
- * function) and will fail with ENOTCONN otherwise.
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
-#undef MC_CMD_0x1d2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
-/* VI information (VI ID + VI relative queue number) for each of the source
- * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
- * structures.
- */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_GET_ADDR_SPC_ID
- * Get Address space identifier for use in mem2mem descriptors for a given
- * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
- * descriptors.
- */
-#define MC_CMD_GET_ADDR_SPC_ID 0x1a0
-#undef MC_CMD_0x1a0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16
-/* Resource type to get ADDR_SPC_ID for */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4
-/* enum: Address space ID for host/AP memory DMA over the same interface this
- * MCDI was called on
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value specified by PASID
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value of relative VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3
-/* enum: Address space ID for host/AP memory DMA via PCI interface, function
- * and PASID value of absolute VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4
-/* enum: Address space ID for host memory DMA via PCI interface and function of
- * descriptor proxy function specified by HANDLE
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5
-/* enum: Address space ID for DMA to/from MC memory */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6
-/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR)
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7
-/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC,
- * PCI_FUNC_PASID or REL_VI.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
-/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
-/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4
-/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4
-
-/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8
-/* Address Space ID for the requested target. Only the lower 36 bits are valid
- * in the current SmartNIC implementation.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
-
/***********************************/
/* MC_CMD_GET_CLIENT_HANDLE
@@ -26359,10 +22483,11 @@
* INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
* a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
* to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
- * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * VF on the calling interface - INTF=..., PF=PF_NULL, VF=VF_NULL to refer to
+ * the named interface itself - INTF=..., PF=..., VF=VF_NULL to refer to a PF
* on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
* interface where ... refers to a small integer for the VF/PF fields, and to
- * values from the PCIE_INTERFACE enum for for the INTF field. It's only
+ * values from the PCIE_INTERFACE enum for the INTF field. It's only
* meaningful to use INTF=CALLER within a structure that's an argument to
* MC_CMD_DEVEL_GET_CLIENT_HANDLE.
*/
@@ -26380,6 +22505,7 @@
* backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
*/
#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+/* See structuredef: PCIE_FUNCTION */
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
@@ -27350,7 +23476,7 @@
/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
* integer value (mport_id) that is guaranteed to be representable within
* 32-bits or within any NIC interface field that needs store the value
- * (whichever is narrowers). This selector structure provides a stable way to
+ * (whichever is narrower). This selector structure provides a stable way to
* refer to m-ports.
*/
#define MAE_MPORT_SELECTOR_LEN 4
@@ -27425,10 +23551,22 @@
#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
- * virtual network port by MAE port and link end
+ * virtual network port by MAE port and link end. Intended to be used by
+ * network port MCDI commands. Setting FLAT to MAE_LINK_ENDPOINT_COMPAT is
+ * equivalent to using the previous version of the command. Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in MAE_LINK_ENDPOINT_SELECTOR
+ * will work in all circumstances. 1. Some will always work (e.g. a VF can
+ * always address its logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC),
+ * 2. Some are not meaningful and will always fail with EINVAL (e.g. attempting
+ * to address the VNIC end of a link to a physical port), 3. Some are
+ * meaningful but require the MCDI client to have the required permission and
+ * fail with EPERM otherwise (e.g. trying to set the MAC on a VF the caller
+ * cannot administer), and 4. Some could be implementation-specific and fail
+ * with ENOTSUP if not available (no examples exist right now). See
+ * SF-123581-TC section 4.3 for more details.
*/
#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
-/* The MAE MPORT of interest */
+/* Identifier for the MAE MPORT of interest */
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
@@ -27829,6 +23967,8 @@
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
/* enum: A counter ID that is guaranteed never to represent a real counter */
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+/* Other enum values, see field(s): */
+/* MAE_COUNTER_ID */
/***********************************/
@@ -28266,6 +24406,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
@@ -28291,19 +24449,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28347,6 +24509,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
@@ -28372,19 +24552,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28437,6 +24621,172 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V3_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V10_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LEN 53
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * MAE_COUNTER_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* Actions for overwriting CH_ROUTE subfields. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_WIDTH 1
+/* Override outgoing CH_VC to network port for DO_SET_NET_CHAN action. Cannot
+ * be used in conjunction with DO_SET_SRC_MPORT action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_OFST 52
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_LEN 1
+
/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
@@ -28680,58 +25030,6 @@
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
-
-/***********************************/
-/* MC_CMD_MAE_OUTER_RULE_UPDATE
- * Atomically change the response of an Outer Rule.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
-#undef MC_CMD_0x17d_PRIVILEGE_CTG
-
-#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
-/* ID of outer rule to update */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
-/* Packets matching the rule will be parsed with this encapsulation. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MAE_MCDI_ENCAP_TYPE */
-/* This field controls the actions that are performed when a rule is hit. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
-/* Enum values, see field(s): */
-/* MAE_CT_VNI_MODE */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
-/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
- * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
-
/* MAE_ACTION_RULE_RESPONSE structuredef */
#define MAE_ACTION_RULE_RESPONSE_LEN 16
#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
@@ -29122,142 +25420,6 @@
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
-/* MAE_MPORT_DESC_V2 structuredef */
-#define MAE_MPORT_DESC_V2_LEN 56
-#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
-/* Reserved for future purposes, contains information independent of caller */
-#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
-#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
-#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
-#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
-/* Not the ideal name; it's really the type of thing connected to the m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
-/* enum: Connected to a MAC... */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
-/* enum: Adds metadata and delivers to another m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
-/* enum: Connected to a VNIC. */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
-/* 128-bit value available to drivers for m-port identification. */
-#define MAE_MPORT_DESC_V2_UUID_OFST 16
-#define MAE_MPORT_DESC_V2_UUID_LEN 16
-#define MAE_MPORT_DESC_V2_UUID_LBN 128
-#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
-/* Big wadge of space reserved for other common properties */
-#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
-#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
-#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
-/* Logical port index. Only valid when type NET Port. */
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
-/* The m-port delivered to */
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
-/* The type of thing that owns the VNIC */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
-/* The PCIe interface on which the function lives. CJK: We need an enumeration
- * of interfaces that we extend as new interface (types) appear. This belongs
- * elsewhere and should be referenced from here
- */
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
-/* enum: Indicates that the function is a PF */
-#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
-/* Reserved. Should be ignored for now. */
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
-/* A client handle for the VNIC's owner. Only valid for type VNIC. */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MAE_MPORT_ENUMERATE
- * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
- * will be removed at some future point.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
-#undef MC_CMD_0x17c_PRIVILEGE_CTG
-
-#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
-#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
-/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
- * grow in future version of this command. Drivers should use a stride of
- * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
-
/***********************************/
/* MC_CMD_MAE_MPORT_READ_JOURNAL
@@ -29570,73 +25732,6 @@
/***********************************/
-/* MC_CMD_TABLE_UPDATE
- * Update an existing entry in a table with a new response value. May return
- * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
- * entry does not already exist, or EPERM if the operation is not permitted. In
- * case of an error, the additional MCDI error argument field returns the raw
- * error code from the underlying CAM driver.
- */
-#define MC_CMD_TABLE_UPDATE 0x1ce
-#undef MC_CMD_0x1ce_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TABLE_UPDATE_IN msgrequest */
-#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
-#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
-/* Table identifier. */
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
-/* Enum values, see field(s): */
-/* TABLE_ID */
-/* Width in bits of supplied key data (must match table properties). */
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
-/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
- * when allocated MASK_ID is used instead).
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
-/* Width in bits of supplied response data (for INSERT and UPDATE operations
- * this must match the table properties; for DELETE operations, no response
- * data is required and this must be 0).
- */
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
-/* Mask ID for STCAM table - used instead of mask data if the table descriptor
- * reports ALLOC_MASKS==1. Otherwise set to 0.
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
-/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
-/* (32-bit alignment padding - set to 0) */
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
-/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
- * data values. Each of these items is logically treated as a single wide N-bit
- * value, in which the individual fields have been placed within that value per
- * the LBN and WIDTH information from the table field descriptors. The wide
- * N-bit value is padded with 0 bits at the MSB end if necessary to make a
- * multiple of 32 bits. The value is then packed into this command as a
- * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
- */
-#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
-#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
-
-/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
-#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_TABLE_DELETE
* Delete an existing entry in a table. May return EINVAL for unknown table ID
* or other bad request parameters, ENOENT if the entry does not exist, or
@@ -29702,5 +25797,124 @@
/* MC_CMD_TABLE_DELETE_OUT msgresponse */
#define MC_CMD_TABLE_DELETE_OUT_LEN 0
+/* MC_CMD_QUEUE_HANDLE structuredef: On X4, to distinguish between full-
+ * featured (X2-style) VIs and low-latency (X3-style) queues, we use the top
+ * bits of the queue handle to specify the queue type in all MCDI calls which
+ * refer to VIs/queues. These bits should be masked off when indexing into a
+ * queue in the BAR.
+ */
+#define MC_CMD_QUEUE_HANDLE_LEN 4
+/* Combined queue number and type. This is the ID returned by and passed into
+ * MCDI calls that use queues.
+ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LEN 4
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_WIDTH 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LBN 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_WIDTH 8
+/* enum: Indicates that the queue instance is a full-featured VI */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_FF_VI 0x0
+/* enum: Indicates that the queue instance is a LL TXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_TXQ 0x1
+/* enum: Indicates that the queue instance is a LL RXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_RXQ 0x2
+/* enum: Indicates that the queue instance is a LL EVQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_EVQ 0x3
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_ALLOC_LL_QUEUES
+ * Allocate low latency (X3-style) queues for current PCI function. Can be
+ * called more than once if desired to allocate more queues.
+ */
+#define MC_CMD_ALLOC_LL_QUEUES 0x1dd
+#undef MC_CMD_0x1dd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1dd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_LL_QUEUES_IN msgrequest */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_LEN 24
+/* The minimum number of TXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_LEN 4
+/* The maximum number of TXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_LEN 4
+/* The minimum number of RXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_LEN 4
+/* The maximum number of RXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_LEN 4
+/* The minimum number of EVQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_OFST 16
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_LEN 4
+/* The maximum number of EVQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_OFST 20
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMIN 16
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX 252
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LEN(num) (12+4*(num))
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_NUM(len) (((len)-12)/4)
+/* The number of TXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_LEN 4
+/* The number of RXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_LEN 4
+/* The number of EVQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_LEN 4
+/* A list of allocated queues, returned as MC_CMD_QUEUE_HANDLEs, not
+ * necessarily contiguous. TXQs are first in the list, followed by RXQs then
+ * EVQs. The type of each queue is indicated by the top bits (see the
+ * QUEUE_TYPE enum)
+ */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_LEN 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MINNUM 1
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM 60
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM_MCDI2 252
+
+
+/***********************************/
+/* MC_CMD_FREE_LL_QUEUES
+ * Free low latency (X3-style) queues for current PCI function.
+ */
+#define MC_CMD_FREE_LL_QUEUES 0x1de
+#undef MC_CMD_0x1de_PRIVILEGE_CTG
+
+#define MC_CMD_0x1de_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_LL_QUEUES_IN msgrequest */
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMIN 8
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX 252
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX_MCDI2 1020
+#define MC_CMD_FREE_LL_QUEUES_IN_LEN(num) (4+4*(num))
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_NUM(len) (((len)-4)/4)
+/* The number of queues to free. */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_OFST 0
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_LEN 4
+/* A list of queues to free, as a list of MC_CMD_QUEUE_HANDLEs. They must have
+ * all been previously allocated by MC_CMD_ALLOC_LL_QUEUES. The type of each
+ * queue should be indicated by the top bits.
+ */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_OFST 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_LEN 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MINNUM 1
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM 62
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM_MCDI2 254
+
+/* MC_CMD_FREE_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_FREE_LL_QUEUES_OUT_LEN 0
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ad4694fa3dda..7b236d291d8c 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -17,58 +17,6 @@
#include "selftest.h"
#include "mcdi_port_common.h"
-static int efx_mcdi_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-}
-
-static int efx_mcdi_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
{
@@ -97,12 +45,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = efx_mcdi_mdio_read;
- efx->mdio.mdio_write = efx_mcdi_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
+ /* Fill out loopback modes and initial link state */
rc = efx_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 76ea26722ca4..dae684194ac8 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -448,15 +448,6 @@ int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->phy_data = phy_data;
efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_data->channel;
- efx->mdio.prtad = phy_data->port;
- efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
- efx->mdio.mode_support = 0;
- if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
mcdi_to_ethtool_linkset(phy_data->media, caps,
@@ -546,8 +537,6 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
- cmd->base.mdio_support = (efx->mdio.mode_support &
- (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
cmd->link_modes.supported);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f2dd7feb0e0c..b98c259f672d 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -15,7 +15,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/timer.h>
-#include <linux/mdio.h>
+#include <linux/mii.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
@@ -193,6 +193,12 @@ struct efx_tx_buffer {
* @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel?
* @xdp_tx: Is this an XDP tx queue?
+ * @old_complete_packets: Value of @complete_packets as of last
+ * efx_init_tx_queue()
+ * @old_complete_bytes: Value of @complete_bytes as of last
+ * efx_init_tx_queue()
+ * @old_tso_bursts: Value of @tso_bursts as of last efx_init_tx_queue()
+ * @old_tso_packets: Value of @tso_packets as of last efx_init_tx_queue()
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
* @old_write_count: The value of @write_count when last checked.
@@ -202,6 +208,20 @@ struct efx_tx_buffer {
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
+ * @bytes_compl: Number of bytes completed during this NAPI poll
+ * (efx_process_channel()). For BQL.
+ * @pkts_compl: Number of packets completed during this NAPI poll.
+ * @complete_packets: Number of packets completed since this struct was
+ * created. Only counts SKB packets, not XDP TX (it accumulates
+ * the same values that are reported to BQL).
+ * @complete_bytes: Number of bytes completed since this struct was
+ * created. For TSO, counts the superframe size, not the sizes of
+ * generated frames on the wire (i.e. the headers are only counted
+ * once)
+ * @complete_xdp_packets: Number of XDP TX packets completed since this
+ * struct was created.
+ * @complete_xdp_bytes: Number of XDP TX bytes completed since this
+ * struct was created.
* @completed_timestamp_major: Top part of the most recent tx timestamp.
* @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
@@ -232,6 +252,7 @@ struct efx_tx_buffer {
* @xmit_pending: Are any packets waiting to be pushed to the NIC
* @cb_packets: Number of times the TX copybreak feature has been used
* @notify_count: Count of notified descriptors to the NIC
+ * @tx_packets: Number of packets sent since this struct was created
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -255,6 +276,10 @@ struct efx_tx_queue {
bool initialised;
bool timestamping;
bool xdp_tx;
+ unsigned long old_complete_packets;
+ unsigned long old_complete_bytes;
+ unsigned int old_tso_bursts;
+ unsigned int old_tso_packets;
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
@@ -262,6 +287,10 @@ struct efx_tx_queue {
unsigned int merge_events;
unsigned int bytes_compl;
unsigned int pkts_compl;
+ unsigned long complete_packets;
+ unsigned long complete_bytes;
+ unsigned long complete_xdp_packets;
+ unsigned long complete_xdp_bytes;
u32 completed_timestamp_major;
u32 completed_timestamp_minor;
@@ -370,8 +399,11 @@ struct efx_rx_page_state {
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
* @grant_work: workitem used to grant credits to the MAE if @grant_credits
+ * @rx_packets: Number of packets received since this struct was created
+ * @rx_bytes: Number of bytes received since this struct was created
+ * @old_rx_packets: Value of @rx_packets as of last efx_init_rx_queue()
+ * @old_rx_bytes: Value of @rx_bytes as of last efx_init_rx_queue()
* @xdp_rxq_info: XDP specific RX queue information.
- * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -406,8 +438,10 @@ struct efx_rx_queue {
struct work_struct grant_work;
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long old_rx_packets;
+ unsigned long old_rx_bytes;
struct xdp_rxq_info xdp_rxq_info;
- bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
@@ -451,10 +485,8 @@ enum efx_sync_events_state {
* @filter_work: Work item for efx_filter_rfs_expire()
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
- * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
- * @n_rx_mcast_mismatch: Count of unmatched multicast frames
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
@@ -468,6 +500,10 @@ enum efx_sync_events_state {
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
* not recognised
+ * @old_n_rx_hw_drops: Count of all RX packets dropped for any reason as of last
+ * efx_start_channels()
+ * @old_n_rx_hw_drop_overruns: Value of @n_rx_nodesc_trunc as of last
+ * efx_start_channels()
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -511,7 +547,6 @@ struct efx_channel {
u32 *rps_flow_id;
#endif
- unsigned int n_rx_tobe_disc;
unsigned int n_rx_ip_hdr_chksum_err;
unsigned int n_rx_tcp_udp_chksum_err;
unsigned int n_rx_outer_ip_hdr_chksum_err;
@@ -519,7 +554,6 @@ struct efx_channel {
unsigned int n_rx_inner_ip_hdr_chksum_err;
unsigned int n_rx_inner_tcp_udp_chksum_err;
unsigned int n_rx_eth_crc_err;
- unsigned int n_rx_mcast_mismatch;
unsigned int n_rx_frm_trunc;
unsigned int n_rx_overlength;
unsigned int n_skbuff_leaks;
@@ -532,6 +566,9 @@ struct efx_channel {
unsigned int n_rx_xdp_redirect;
unsigned int n_rx_mport_bad;
+ unsigned int old_n_rx_hw_drops;
+ unsigned int old_n_rx_hw_drop_overruns;
+
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -737,21 +774,24 @@ struct vfdi_status;
/* The reserved RSS context value */
#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
- * struct efx_rss_context - A user-defined RSS context for filtering
- * @list: node of linked list on which this struct is stored
+ * struct efx_rss_context_priv - driver private data for an RSS context
* @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
* %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
- * @user_id: the rss_context ID exposed to userspace over ethtool.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
+ */
+struct efx_rss_context_priv {
+ u32 context_id;
+ bool rx_hash_udp_4tuple;
+};
+
+/**
+ * struct efx_rss_context - an RSS context
+ * @priv: hardware-specific state
* @rx_hash_key: Toeplitz hash key for this RSS context
* @indir_table: Indirection table for this RSS context
*/
struct efx_rss_context {
- struct list_head list;
- u32 context_id;
- u32 user_id;
- bool rx_hash_udp_4tuple;
+ struct efx_rss_context_priv priv;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
};
@@ -789,6 +829,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -796,6 +837,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
@@ -883,9 +925,7 @@ struct efx_mae;
* @rx_packet_ts_offset: Offset of timestamp from start of packet data
* (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_scatter: Scatter mode enabled for receives
- * @rss_context: Main RSS context. Its @list member is the head of the list of
- * RSS contexts created by user requests
- * @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @rss_context: Main RSS context.
* @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
@@ -914,8 +954,6 @@ struct efx_mae;
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_data: PHY private data (including PHY-specific stats)
- * @mdio: PHY MDIO interface
- * @mdio_bus: PHY MDIO bus ID (only used by Siena)
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
* @link_advertising: Autonegotiation advertising flags
* @fec_config: Forward Error Correction configuration flags. For bit positions
@@ -964,6 +1002,7 @@ struct efx_mae;
* @dl_port: devlink port associated with the PF
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
+ * @reflash_mutex: Mutex for serialising firmware reflash operations.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1052,7 +1091,6 @@ struct efx_nic {
int rx_packet_ts_offset;
bool rx_scatter;
struct efx_rss_context rss_context;
- struct mutex rss_lock;
u32 vport_id;
unsigned int_error_count;
@@ -1090,8 +1128,6 @@ struct efx_nic {
unsigned int phy_type;
void *phy_data;
- struct mdio_if_info mdio;
- unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
@@ -1150,6 +1186,7 @@ struct efx_nic {
struct devlink_port *dl_port;
unsigned int mem_bar;
u32 reg_base;
+ struct mutex reflash_mutex;
/* The following fields may be written more often */
@@ -1342,6 +1379,8 @@ struct efx_udp_tunnel {
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @option_descriptors: NIC supports TX option descriptors
+ * @flash_auto_partition: firmware flash uses AUTO partition, driver does
+ * not need to perform image parsing
* @min_interrupt_mode: Lowest capability interrupt mode supported
* from &enum efx_int_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks)
@@ -1369,7 +1408,7 @@ struct efx_nic_type {
int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flr)(struct efx_nic *efx);
void (*finish_flr)(struct efx_nic *efx);
- size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 **names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
@@ -1416,9 +1455,9 @@ struct efx_nic_type {
const u32 *rx_indir_table, const u8 *key);
int (*rx_pull_rss_config)(struct efx_nic *efx);
int (*rx_push_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key);
+ const u8 *key, bool delete);
int (*rx_pull_rss_context_config)(struct efx_nic *efx,
struct efx_rss_context *ctx);
void (*rx_restore_rss_contexts)(struct efx_nic *efx);
@@ -1518,6 +1557,7 @@ struct efx_nic_type {
bool can_rx_scatter;
bool always_rx_scatter;
bool option_descriptors;
+ bool flash_auto_partition;
unsigned int min_interrupt_mode;
unsigned int timer_period_max;
netdev_features_t offload_features;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index a33ed473cc8a..80aa5e9c732a 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -299,18 +299,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+ const unsigned long *mask, u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
+ if (names)
+ ethtool_puts(names, desc[index].name);
++visible;
}
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1db64fc6e909..9fa5c4c713ab 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -211,4 +211,6 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
+extern const struct efx_nic_type efx_x4_nic_type;
+
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 466df5348b29..821d91efda19 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -21,6 +21,7 @@ enum {
*/
EFX_REV_HUNT_A0 = 4,
EFX_REV_EF100 = 5,
+ EFX_REV_X4 = 6,
};
static inline int efx_nic_rev(struct efx_nic *efx)
@@ -240,7 +241,7 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+ const unsigned long *mask, u8 **names);
int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index c3bffbf0ba2b..4c7222bf26be 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -399,7 +399,7 @@ static const unsigned long efx_ptp_stat_mask[] = {
[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
};
-size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 **strings)
{
if (!efx->ptp_data)
return 0;
@@ -884,7 +884,7 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
- timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND);
timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
@@ -1800,11 +1800,6 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-int efx_ptp_get_mode(struct efx_nic *efx)
-{
- return efx->ptp_data->mode;
-}
-
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode)
{
@@ -1864,7 +1859,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct kernel_hwtstamp_config *i
return 0;
}
-void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct kernel_ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_nic *primary = efx->primary;
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 2f30dbb490d2..cb9b077921e8 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -12,7 +12,7 @@
#include <linux/net_tstamp.h>
#include "net_driver.h"
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
@@ -23,14 +23,14 @@ int efx_ptp_set_ts_config(struct efx_nic *efx,
struct netlink_ext_ack *extack);
int efx_ptp_get_ts_config(struct efx_nic *efx,
struct kernel_hwtstamp_config *config);
-void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+void efx_ptp_get_ts_info(struct efx_nic *efx,
+ struct kernel_ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-int efx_ptp_get_mode(struct efx_nic *efx);
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 **strings);
size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index f77a2d3ef37e..ffca82207e47 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -125,8 +125,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
- rx_queue->rx_packets++;
-
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
@@ -394,6 +392,9 @@ void __efx_rx_packet(struct efx_channel *channel)
goto out;
}
+ rx_queue->rx_packets++;
+ rx_queue->rx_bytes += rx_buf->len;
+
if (!efx_do_xdp(efx, channel, rx_buf, &eh))
goto out;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index dcd901eccfc8..5306f4c44be4 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -241,6 +241,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->page_recycle_failed = 0;
rx_queue->page_recycle_full = 0;
+ rx_queue->old_rx_packets = rx_queue->rx_packets;
+ rx_queue->old_rx_bytes = rx_queue->rx_bytes;
+
/* Initialise limit fields */
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
max_trigger =
@@ -266,8 +269,6 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
"Failure to initialise XDP queue information rc=%d\n",
rc);
efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
}
/* Set up RX descriptor ring */
@@ -282,7 +283,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
if (rx_queue->grant_credits)
flush_work(&rx_queue->grant_work);
@@ -299,10 +300,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_recycle_ring(rx_queue);
- if (rx_queue->xdp_rxq_info_valid)
+ if (xdp_rxq_info_is_reg(&rx_queue->xdp_rxq_info))
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -349,7 +348,8 @@ void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
void efx_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
@@ -557,69 +557,25 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
napi_gro_frags(napi);
}
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
+ u32 id)
{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
+ struct ethtool_rxfh_context *ctx;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
-
- /* Create the new entry */
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
+ ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id);
+ if (!ctx)
return NULL;
- new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
+ return ethtool_rxfh_context_priv(ctx);
}
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx)
+void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir)
{
size_t i;
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++)
+ indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread);
}
/**
@@ -938,7 +894,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -1030,7 +986,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
index fbd2769307f9..75fa84192362 100644
--- a/drivers/net/ethernet/sfc/rx_common.h
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -84,11 +84,9 @@ void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum);
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
-void efx_free_rss_context_entry(struct efx_rss_context *ctx);
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx);
+struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
+ u32 id);
+void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir);
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index a7346e965bfe..fc075ab6b7b5 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -217,8 +217,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
if (efx_siena_separate_tx_channels) {
efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
+ clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
@@ -1285,7 +1285,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
- xdp_do_flush();
+ if (budget)
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
@@ -1299,7 +1300,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index 88e5bc347a44..35036cc902fe 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -306,7 +306,7 @@ int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu)
efx_siena_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
efx_siena_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
@@ -725,7 +725,6 @@ void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
efx->type->fini(efx);
}
@@ -786,9 +785,6 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
" VFs may not function\n", rc);
#endif
- if (efx->type->rx_restore_rss_contexts)
- efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
efx->type->filter_table_restore(efx);
up_write(&efx->filter_sem);
if (efx->type->sriov_reset)
@@ -806,7 +802,6 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
- mutex_unlock(&efx->rss_lock);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -1016,9 +1011,7 @@ int efx_siena_init_struct(struct efx_nic *efx,
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- mutex_init(&efx->rss_lock);
efx->vport_id = EVB_PORT_ID_ASSIGNED;
spin_lock_init(&efx->stats_lock);
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
@@ -1292,9 +1285,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
const struct pci_error_handlers efx_siena_err_handlers = {
.error_detected = efx_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 14dd3893bdef..8c3ebd0617fb 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -217,7 +217,8 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -226,21 +227,15 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_nic *efx = netdev_priv(net_dev);
- /* Software capabilities */
- ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE);
- ts_info->phc_index = -1;
-
efx_siena_ptp_get_ts_info(efx, ts_info);
return 0;
}
const struct ethtool_ops efx_siena_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -270,6 +265,7 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
.set_rxfh = efx_siena_ethtool_set_rxfh,
+ .get_rxfh_fields = efx_siena_ethtool_get_rxfh_fields,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_siena_ethtool_get_module_info,
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 5f0a8127e967..47cd16a113cf 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -395,7 +395,7 @@ fail:
test->flags |= ETH_TEST_FL_FAILED;
}
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct efx_channel *channel;
@@ -403,24 +403,22 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
efx_for_each_channel(channel, efx) {
if (efx_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_MAX_TXQ_PER_CHANNEL);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_MAX_TXQ_PER_CHANNEL);
}
}
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
@@ -428,11 +426,11 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "tx-xdp-cpu-%hu.tx_packets",
+ xdp);
}
}
@@ -464,15 +462,11 @@ void efx_siena_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_siena_ptp_describe_stats(efx, strings);
+ ethtool_puts(&strings, efx_sw_stat_desc[i].name);
+ efx_describe_per_queue_stats(efx, &strings);
+ efx_siena_ptp_describe_stats(efx, &strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -807,6 +801,46 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
return rc;
}
+int efx_siena_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ __u64 data;
+
+ data = 0;
+ if (!efx_rss_active(&efx->rss_context)) /* No RSS */
+ goto out_setdata;
+
+ switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (efx->rss_context.rx_hash_udp_4tuple)
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+out_setdata:
+ info->data = data;
+ return 0;
+}
+
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -819,56 +853,6 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- struct efx_rss_context *ctx = &efx->rss_context;
- __u64 data;
-
- mutex_lock(&efx->rss_lock);
- if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_siena_find_rss_context_entry(efx,
- info->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
- data = 0;
- if (!efx_rss_active(ctx)) /* No RSS */
- goto out_setdata_unlock;
-
- switch (info->flow_type & ~FLOW_RSS) {
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- if (ctx->rx_hash_udp_4tuple)
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- else
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV4_FLOW:
- case IPV6_FLOW:
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
-out_setdata_unlock:
- info->data = data;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
@@ -1164,47 +1148,12 @@ u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-static int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_rss_context *ctx;
- int rc = 0;
-
- if (!efx->type->rx_pull_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
- ctx = efx_siena_find_rss_context_entry(efx, rxfh->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- rc = efx->type->rx_pull_rss_context_config(efx, ctx);
- if (rc)
- goto out_unlock;
-
- rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->indir)
- memcpy(rxfh->indir, ctx->rx_indir_table,
- sizeof(ctx->rx_indir_table));
- if (rxfh->key)
- memcpy(rxfh->key, ctx->rx_hash_key,
- efx->type->rx_hash_key_size);
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- if (rxfh->rss_context)
- return efx_siena_ethtool_get_rxfh_context(net_dev, rxfh);
-
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
return rc;
@@ -1219,70 +1168,6 @@ int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
return 0;
}
-static int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- u32 *rss_context = &rxfh->rss_context;
- struct efx_rss_context *ctx;
- u32 *indir = rxfh->indir;
- bool allocated = false;
- u8 *key = rxfh->key;
- int rc;
-
- if (!efx->type->rx_push_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
-
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (rxfh->rss_delete) {
- /* alloc + delete == Nothing to do */
- rc = -EINVAL;
- goto out_unlock;
- }
- ctx = efx_siena_alloc_rss_context_entry(efx);
- if (!ctx) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- /* Initialise indir table and key to defaults */
- efx_siena_set_default_rx_indir_table(efx, ctx);
- netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
- allocated = true;
- } else {
- ctx = efx_siena_find_rss_context_entry(efx, *rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
- if (rxfh->rss_delete) {
- /* delete this context */
- rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
- if (!rc)
- efx_siena_free_rss_context_entry(ctx);
- goto out_unlock;
- }
-
- if (!key)
- key = ctx->rx_hash_key;
- if (!indir)
- indir = ctx->rx_indir_table;
-
- rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
- if (rc && allocated)
- efx_siena_free_rss_context_entry(ctx);
- else
- *rss_context = ctx->user_id;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@@ -1296,9 +1181,6 @@ int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- efx_siena_ethtool_set_rxfh_context(net_dev, rxfh, extack);
-
if (!indir && !key)
return 0;
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
index d674bab0f65b..278d69e920d9 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -46,6 +46,8 @@ int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_siena_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info);
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c
index 89ccd65c978b..562a038e38a7 100644
--- a/drivers/net/ethernet/sfc/siena/farch.c
+++ b/drivers/net/ethernet/sfc/siena/farch.c
@@ -1708,7 +1708,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
+ "Reducing VF count from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
}
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c
index 3f7899daa86a..c8f0fb43e285 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi.c
+++ b/drivers/net/ethernet/sfc/siena/mcdi.c
@@ -534,7 +534,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
* of it aborting the next request.
*/
if (!timeout)
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
spin_lock(&mcdi->async_lock);
async = list_first_entry(&mcdi->async_list,
@@ -609,7 +609,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+ struct efx_mcdi_iface *mcdi = timer_container_of(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
@@ -1145,7 +1145,7 @@ void efx_siena_mcdi_flush_async(struct efx_nic *efx)
/* We must be in poll or fail mode so no more requests can be queued */
BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
/* If a request is still running, make sure we give the MC
* time to complete it so that the response won't overwrite our
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
index a3cc8b7ec732..b81b0aa460d2 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -6704,16 +6704,16 @@
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
@@ -7823,7 +7823,7 @@
* handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
*
* Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
+ * will be dropped from the response. This may happen when a sensor table
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
@@ -7872,7 +7872,7 @@
* provided should be treated as erroneous.
*
* Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
+ * will be dropped from the response. This may happen when a sensor table
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 94152f595acd..4cf556782133 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -384,7 +384,6 @@ struct efx_rx_page_state {
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
* @xdp_rxq_info: XDP specific RX queue information.
- * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -417,7 +416,6 @@ struct efx_rx_queue {
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
struct xdp_rxq_info xdp_rxq_info;
- bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
@@ -707,20 +705,14 @@ struct vfdi_status;
/* The reserved RSS context value */
#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
- * struct efx_rss_context - A user-defined RSS context for filtering
- * @list: node of linked list on which this struct is stored
- * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
- * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
- * @user_id: the rss_context ID exposed to userspace over ethtool.
+ * struct efx_rss_context - An RSS context for filtering
+ * @context_id: 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @rx_hash_key: Toeplitz hash key for this RSS context
* @indir_table: Indirection table for this RSS context
*/
struct efx_rss_context {
- struct list_head list;
u32 context_id;
- u32 user_id;
bool rx_hash_udp_4tuple;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
@@ -759,6 +751,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -766,6 +759,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
@@ -851,9 +845,7 @@ enum efx_xdp_tx_queues_mode {
* @rx_packet_ts_offset: Offset of timestamp from start of packet data
* (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_scatter: Scatter mode enabled for receives
- * @rss_context: Main RSS context. Its @list member is the head of the list of
- * RSS contexts created by user requests
- * @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @rss_context: Main RSS context
* @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
@@ -1018,7 +1010,6 @@ struct efx_nic {
int rx_packet_ts_offset;
bool rx_scatter;
struct efx_rss_context rss_context;
- struct mutex rss_lock;
u32 vport_id;
unsigned int_error_count;
@@ -1220,10 +1211,6 @@ struct efx_udp_tunnel {
* @tx_enqueue: Add an SKB to TX queue
* @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
- * @rx_push_rss_context_config: Write RSS hash key and indirection table for
- * user RSS context to the NIC
- * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user
- * RSS context back from the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1320,7 +1307,7 @@ struct efx_nic_type {
void (*finish_flush)(struct efx_nic *efx);
void (*prepare_flr)(struct efx_nic *efx);
void (*finish_flr)(struct efx_nic *efx);
- size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 **names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
@@ -1366,13 +1353,6 @@ struct efx_nic_type {
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
const u32 *rx_indir_table, const u8 *key);
int (*rx_pull_rss_config)(struct efx_nic *efx);
- int (*rx_push_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx,
- const u32 *rx_indir_table,
- const u8 *key);
- int (*rx_pull_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx);
- void (*rx_restore_rss_contexts)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c
index 0ea0433a6230..32fce70085e3 100644
--- a/drivers/net/ethernet/sfc/siena/nic.c
+++ b/drivers/net/ethernet/sfc/siena/nic.c
@@ -449,20 +449,20 @@ void efx_siena_get_regs(struct efx_nic *efx, void *buf)
* Returns the number of visible statistics, i.e. the number of set
* bits in the first @count bits of @mask for which a name is defined.
*/
-size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc,
+ size_t count, const unsigned long *mask,
+ u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
++visible;
+ if (!names)
+ continue;
+
+ ethtool_puts(names, desc[index].name);
}
}
diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h
index 3af0405eeaa4..b7fbe198008d 100644
--- a/drivers/net/ethernet/sfc/siena/nic_common.h
+++ b/drivers/net/ethernet/sfc/siena/nic_common.h
@@ -239,8 +239,9 @@ void efx_siena_get_regs(struct efx_nic *efx, void *buf);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc,
+ size_t count, const unsigned long *mask,
+ u8 **names);
void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index 4b5e2f0ba350..062c77c92077 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -393,7 +393,7 @@ static const unsigned long efx_ptp_stat_mask[] = {
[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
};
-size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 **strings)
{
if (!efx->ptp_data)
return 0;
@@ -897,7 +897,7 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
- timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND);
timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
@@ -1780,7 +1780,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx,
}
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_nic *primary = efx->primary;
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
index 6352f84424f6..54840036ab67 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.h
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -12,7 +12,7 @@
#include <linux/net_tstamp.h>
#include "net_driver.h"
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx);
int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
@@ -21,14 +21,14 @@ int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
int efx_siena_ptp_get_ts_config(struct efx_nic *efx,
struct kernel_hwtstamp_config *config);
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info);
+ struct kernel_ethtool_ts_info *ts_info);
bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_siena_ptp_get_mode(struct efx_nic *efx);
int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode);
int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 **strings);
size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats);
void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel,
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 219fb358a646..4ae09505e417 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -268,8 +268,6 @@ void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
"Failure to initialise XDP queue information rc=%d\n",
rc);
efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
}
/* Set up RX descriptor ring */
@@ -284,7 +282,7 @@ void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
@@ -299,10 +297,8 @@ void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_recycle_ring(rx_queue);
- if (rx_queue->xdp_rxq_info_valid)
+ if (xdp_rxq_info_is_reg(&rx_queue->xdp_rxq_info))
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
}
void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -349,7 +345,8 @@ void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
void efx_siena_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
@@ -558,62 +555,6 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
napi_gro_frags(napi);
}
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
-
- /* Create the new entry */
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- return NULL;
- new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
- u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
-}
-
void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx)
{
@@ -944,7 +885,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -1036,7 +977,8 @@ int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h
index 6b37f83ecb30..f90a8320d396 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.h
+++ b/drivers/net/ethernet/sfc/siena/rx_common.h
@@ -78,10 +78,6 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum);
-struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
- u32 id);
-void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx);
diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c
index ca33dc08e555..49f0c8a1a90a 100644
--- a/drivers/net/ethernet/sfc/siena/siena.c
+++ b/drivers/net/ethernet/sfc/siena/siena.c
@@ -545,7 +545,7 @@ static const unsigned long siena_stat_mask[] = {
[0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
};
-static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
+static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 **names)
{
return efx_siena_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
siena_stat_mask, names);
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 82e8891a619a..fa94aa3cd5fe 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -273,11 +273,10 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG;
match->mask.ip_firstfrag = true;
}
- if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x",
- fm.mask->flags);
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ fm.mask->flags, extack))
return -EOPNOTSUPP;
- }
}
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -388,11 +387,8 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
struct flow_match_control fm;
flow_rule_match_enc_control(rule, &fm);
- if (fm.mask->flags) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on enc_control.flags %#x",
- fm.mask->flags);
+ if (flow_rule_has_enc_control_flags(fm.mask->flags, extack))
return -EOPNOTSUPP;
- }
if (!IS_ALL_ONES(fm.mask->addr_type)) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported enc addr_type mask %u (key %u)",
fm.mask->addr_type,
@@ -1047,7 +1043,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
return -EOPNOTSUPP;
}
if (fa->ct.action) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule",
fa->ct.action);
return -EOPNOTSUPP;
}
@@ -1060,7 +1056,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
act->zone = ct_zone;
break;
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule",
fa->id);
return -EOPNOTSUPP;
}
@@ -1585,7 +1581,7 @@ static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx,
type = efx_tc_indr_netdev_type(net_dev);
if (type == EFX_ENCAP_TYPE_NONE) {
- NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
index d90206f27161..c0603f54cec3 100644
--- a/drivers/net/ethernet/sfc/tc_conntrack.c
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv);
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
- .key_len = offsetof(struct efx_tc_ct_zone, linkage),
+ .key_len = sizeof_field(struct efx_tc_ct_zone, zone),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
};
diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c
index c44088424323..a421b0123506 100644
--- a/drivers/net/ethernet/sfc/tc_counters.c
+++ b/drivers/net/ethernet/sfc/tc_counters.c
@@ -249,7 +249,7 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
&ctr->linkage,
efx_tc_counter_id_ht_params);
kfree(ctr);
- return (void *)cnt; /* it's an ERR_PTR */
+ return ERR_CAST(cnt);
}
ctr->cnt = cnt;
refcount_set(&ctr->ref, 1);
diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c
index 87443f9dfd22..eef06e48185d 100644
--- a/drivers/net/ethernet/sfc/tc_encap_actions.c
+++ b/drivers/net/ethernet/sfc/tc_encap_actions.c
@@ -11,6 +11,8 @@
#include "tc_encap_actions.h"
#include "tc.h"
#include "mae.h"
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
#include <net/netevent.h>
@@ -99,7 +101,7 @@ static int efx_bind_neigh(struct efx_nic *efx,
case EFX_ENCAP_TYPE_GENEVE:
flow4.flowi4_proto = IPPROTO_UDP;
flow4.fl4_dport = encap->key.tp_dst;
- flow4.flowi4_tos = encap->key.tos;
+ flow4.flowi4_dscp = inet_dsfield_to_dscp(encap->key.tos);
flow4.daddr = encap->key.u.ipv4.dst;
flow4.saddr = encap->key.u.ipv4.src;
break;
@@ -442,7 +444,7 @@ static void efx_tc_update_encap(struct efx_nic *efx,
rule = container_of(acts, struct efx_tc_flow_rule, acts);
if (rule->fallback)
fallback = rule->fallback;
- else /* fallback fallback: deliver to PF */
+ else /* fallback of the fallback: deliver to PF */
fallback = &efx->tc->facts.pf;
rc = efx_mae_update_rule(efx, fallback->fw_id,
rule->fw_id);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fe2d476028e7..4dff19b6ef17 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -49,14 +49,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, size_t len)
-{
- if (len > EFX_TX_CB_SIZE)
- return NULL;
- return efx_tx_get_copy_buffer(tx_queue, buffer);
-}
-
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider all queues that the net core sees as one */
@@ -553,6 +545,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
{
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
unsigned int read_ptr;
@@ -577,7 +570,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -585,6 +579,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
+ tx_queue->complete_xdp_packets += xdp_pkts_compl;
+ tx_queue->complete_xdp_bytes += xdp_bytes_compl;
EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h
index f2c4d2f89919..f882749af8c3 100644
--- a/drivers/net/ethernet/sfc/tx.h
+++ b/drivers/net/ethernet/sfc/tx.h
@@ -15,9 +15,6 @@
unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, size_t len);
-
/* What TXQ type will satisfy the checksum offloads required for this skb? */
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 2adb132b2f7e..a22a0d634ffc 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -86,6 +86,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
+ tx_queue->old_complete_packets = tx_queue->complete_packets;
+ tx_queue->old_complete_bytes = tx_queue->complete_bytes;
+ tx_queue->old_tso_bursts = tx_queue->tso_bursts;
+ tx_queue->old_tso_packets = tx_queue->tso_packets;
+
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
tx_queue->tso_version = 0;
@@ -109,12 +114,14 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
++tx_queue->read_count;
}
@@ -150,7 +157,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl)
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
@@ -195,6 +204,10 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
tx_queue->queue, tx_queue->read_count);
} else if (buffer->flags & EFX_TX_BUF_XDP) {
xdp_return_frame_rx_napi(buffer->xdpf);
+ if (xdp_pkts)
+ (*xdp_pkts)++;
+ if (xdp_bytes)
+ (*xdp_bytes) += buffer->xdpf->len;
}
buffer->len = 0;
@@ -210,7 +223,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl)
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -230,7 +245,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
}
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
- efv_pkts_compl);
+ efv_pkts_compl, xdp_pkts, xdp_bytes);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -253,15 +268,18 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
struct efx_nic *efx = tx_queue->efx;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl, &xdp_bytes_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
+ tx_queue->complete_xdp_packets += xdp_pkts_compl;
+ tx_queue->complete_xdp_bytes += xdp_bytes_compl;
if (pkts_compl + efv_pkts_compl > 1)
++tx_queue->merge_events;
@@ -290,6 +308,8 @@ int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
+ unsigned int xdp_bytes_compl = 0;
+ unsigned int xdp_pkts_compl = 0;
unsigned int efv_pkts_compl = 0;
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
@@ -300,7 +320,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
}
}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index 1e9f42938aac..039eefafba23 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -20,7 +20,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl);
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 98d0b561a057..39731069d99e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -570,7 +570,7 @@ static inline void ioc3_setup_duplex(struct ioc3_private *ip)
static void ioc3_timer(struct timer_list *t)
{
- struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
+ struct ioc3_private *ip = timer_container_of(ip, t, ioc3_timer);
/* Print the link status if it has changed */
mii_check_media(&ip->mii, 1, 0);
@@ -718,7 +718,7 @@ static void ioc3_init(struct net_device *dev)
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3_ethregs *regs = ip->regs;
- del_timer_sync(&ip->ioc3_timer); /* Kill if running */
+ timer_delete_sync(&ip->ioc3_timer); /* Kill if running */
writel(EMCR_RST, &regs->emcr); /* Reset */
readl(&regs->emcr); /* Flush WB */
@@ -801,7 +801,7 @@ static int ioc3_close(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
netif_stop_queue(dev);
@@ -950,7 +950,7 @@ static int ioc3eth_probe(struct platform_device *pdev)
return 0;
out_stop:
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
if (ip->rxr)
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
ip->rxr_dma);
@@ -971,7 +971,7 @@ static void ioc3eth_remove(struct platform_device *pdev)
dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
free_netdev(dev);
}
@@ -1273,7 +1273,7 @@ static void ioc3_set_multicast_list(struct net_device *dev)
static struct platform_driver ioc3eth_driver = {
.probe = ioc3eth_probe,
- .remove_new = ioc3eth_remove,
+ .remove = ioc3eth_remove,
.driver = {
.name = "ioc3-eth",
}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 18b6f93d875e..f7c3a5a766b7 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -864,7 +864,7 @@ static void meth_remove(struct platform_device *pdev)
static struct platform_driver meth_driver = {
.probe = meth_probe,
- .remove_new = meth_remove,
+ .remove = meth_remove,
.driver = {
.name = "meth",
}
diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig
index 775d76d9890e..7e498bdbca73 100644
--- a/drivers/net/ethernet/sis/Kconfig
+++ b/drivers/net/ethernet/sis/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_SIS
config SIS900
tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
@@ -35,7 +35,7 @@ config SIS900
config SIS190
tristate "SiS190/SiS191 gigabit ethernet support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index dda4e488c77a..15e46e6ac262 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -758,7 +758,7 @@ static irqreturn_t sis190_irq(int irq, void *__dev)
if (status & LinkChange) {
netif_info(tp, intr, dev, "link change\n");
- del_timer(&tp->timer);
+ timer_delete(&tp->timer);
schedule_work(&tp->phy_task);
}
@@ -1023,7 +1023,7 @@ out_unlock:
static void sis190_phy_timer(struct timer_list *t)
{
- struct sis190_private *tp = from_timer(tp, t, timer);
+ struct sis190_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (likely(netif_running(dev)))
@@ -1034,7 +1034,7 @@ static inline void sis190_delete_timer(struct net_device *dev)
{
struct sis190_private *tp = netdev_priv(dev);
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
}
static inline void sis190_request_timer(struct net_device *dev)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index cb7fec226cab..b461918dc5f4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
/* We do a request_region() to register /proc/ioports info. */
- ret = pci_request_regions(pci_dev, "sis900");
+ ret = pcim_request_all_regions(pci_dev, "sis900");
if (ret)
goto err_out;
@@ -1314,7 +1314,8 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
static void sis900_timer(struct timer_list *t)
{
- struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
+ struct sis900_private *sis_priv = timer_container_of(sis_priv, t,
+ timer);
struct net_device *net_dev = sis_priv->mii_info.dev;
struct mii_phy *mii_phy = sis_priv->mii;
static const int next_tick = 5*HZ;
@@ -1983,7 +1984,7 @@ static int sis900_close(struct net_device *net_dev)
/* Stop the chip's Tx and Rx Status Machine */
sw32(cr, RxDIS | TxDIS | sr32(cr));
- del_timer(&sis_priv->timer);
+ timer_delete(&sis_priv->timer);
free_irq(pdev->irq, net_dev);
@@ -2273,7 +2274,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
* (which seems to be different from the ifport(pcmcia) definition) */
switch(map->port){
case IF_PORT_UNKNOWN: /* use auto here */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
* here. When the Link comes up again, it will be
@@ -2294,7 +2295,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
break;
case IF_PORT_10BASET: /* 10BaseT */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
@@ -2315,7 +2316,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
case IF_PORT_100BASET: /* 100BaseT */
case IF_PORT_100BASETX: /* 100BaseTx */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 5f22a8a4d27b..13ce9086a9ca 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -54,7 +54,7 @@ config SMC91X
config PCMCIA_SMC91C92
tristate "SMC 91Cxx PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
select MII
help
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 013e90d69182..45f703fe0e5a 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -850,7 +850,7 @@ static void check_media(struct net_device *dev)
static void epic_timer(struct timer_list *t)
{
- struct epic_private *ep = from_timer(ep, t, timer);
+ struct epic_private *ep = timer_container_of(ep, t, timer);
struct net_device *dev = ep->mii.dev;
void __iomem *ioaddr = ep->ioaddr;
int next_tick = 5*HZ;
@@ -1292,7 +1292,7 @@ static int epic_close(struct net_device *dev)
netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
er32(INTSTAT));
- del_timer_sync(&ep->timer);
+ timer_delete_sync(&ep->timer);
epic_disable_int(dev, ep);
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index af661c65ffe2..e2e7b1c68563 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1501,6 +1501,7 @@ static void smc_set_multicast_list(struct net_device *dev)
#ifdef MODULE
static struct net_device *devSMC9194;
+MODULE_DESCRIPTION("SMC 9194 Ethernet driver");
MODULE_LICENSE("GPL");
module_param_hw(io, int, ioport, 0);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 29bb19f42de9..cc0c75694351 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1105,7 +1105,7 @@ static int smc_close(struct net_device *dev)
outw(CTL_POWERDOWN, ioaddr + CONTROL );
link->open--;
- del_timer_sync(&smc->media);
+ timer_delete_sync(&smc->media);
return 0;
} /* smc_close */
@@ -1595,7 +1595,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
return -EOPNOTSUPP;
else if (map->port > 2)
return -EINVAL;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
smc_reset(dev);
}
@@ -1713,7 +1713,7 @@ static void smc_reset(struct net_device *dev)
static void media_check(struct timer_list *t)
{
- struct smc_private *smc = from_timer(smc, t, media);
+ struct smc_private *smc = timer_container_of(smc, t, media);
struct net_device *dev = smc->mii_if.dev;
unsigned int ioaddr = dev->base_addr;
u_short i, media, saved_bank;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 78ff3af7911a..9d1a83a5fa7e 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1574,12 +1574,8 @@ smc_ethtool_set_link_ksettings(struct net_device *dev,
(cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
return -EINVAL;
-// lp->port = cmd->base.port;
lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
-// if (netif_running(dev))
-// smc_set_port(dev);
-
ret = 0;
}
@@ -2359,7 +2355,7 @@ static int smc_drv_probe(struct platform_device *pdev)
* the resource supplies a trigger, override the irqflags with
* the trigger flags from the resource.
*/
- irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+ irq_resflags = irq_get_trigger_type(ndev->irq);
if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
@@ -2479,7 +2475,7 @@ static const struct dev_pm_ops smc_drv_pm_ops = {
static struct platform_driver smc_driver = {
.probe = smc_drv_probe,
- .remove_new = smc_drv_remove,
+ .remove = smc_drv_remove,
.driver = {
.name = CARDNAME,
.pm = &smc_drv_pm_ops,
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 46eee747c699..38aa4374e813 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -142,22 +142,22 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
#define SMC_CAN_USE_32BIT 0
#define SMC_NOWAIT 1
-static inline void mcf_insw(void *a, unsigned char *p, int l)
+static inline void mcf_insw(void __iomem *a, unsigned char *p, int l)
{
u16 *wp = (u16 *) p;
while (l-- > 0)
*wp++ = readw(a);
}
-static inline void mcf_outsw(void *a, unsigned char *p, int l)
+static inline void mcf_outsw(void __iomem *a, unsigned char *p, int l)
{
u16 *wp = (u16 *) p;
while (l-- > 0)
writew(*wp++, a);
}
-#define SMC_inw(a, r) _swapw(readw((a) + (r)))
-#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
+#define SMC_inw(a, r) ioread16be((a) + (r))
+#define SMC_outw(lp, v, a, r) iowrite16be(v, (a) + (r))
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 74f1ccc96459..3ebd0664c697 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -43,7 +43,6 @@
#include <linux/smsc911x.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
@@ -2163,10 +2162,20 @@ static const struct net_device_ops smsc911x_netdev_ops = {
static void smsc911x_read_mac_address(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
- u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+ u32 mac_high16, mac_low32;
u8 addr[ETH_ALEN];
+ mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+
+ /* The first mac_read in some setups can incorrectly read 0. Re-read it
+ * to get the full MAC if this is observed.
+ */
+ if (mac_high16 == 0) {
+ SMSC_TRACE(pdata, probe, "Re-read MAC ADDRH\n");
+ mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ }
+
addr[0] = (u8)(mac_low32);
addr[1] = (u8)(mac_low32 >> 8);
addr[2] = (u8)(mac_low32 >> 16);
@@ -2351,7 +2360,7 @@ static void smsc911x_drv_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-/* standard register acces */
+/* standard register access */
static const struct smsc911x_ops standard_smsc911x_ops = {
.reg_read = __smsc911x_reg_read,
.reg_write = __smsc911x_reg_write,
@@ -2667,7 +2676,7 @@ MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
- .remove_new = smsc911x_drv_remove,
+ .remove = smsc911x_drv_remove,
.driver = {
.name = SMSC_CHIPNAME,
.pm = SMSC911X_PM_OPS,
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 15cb96c2506d..f30d4b17c7fb 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -18,7 +18,7 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "smsc9420.h"
#define DRV_NAME "smsc9420"
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 5ab8b81b84e6..ee890de69ffe 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -970,7 +970,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = NETSEC_XDP_PASS;
+ u32 metasize, xdp_result = NETSEC_XDP_PASS;
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
@@ -1019,7 +1019,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
prefetch(desc->addr);
xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
- pkt_len, false);
+ pkt_len, true);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
@@ -1048,6 +1048,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
@@ -2211,7 +2214,7 @@ MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
static struct platform_driver netsec_driver = {
.probe = netsec_probe,
- .remove_new = netsec_remove,
+ .remove = netsec_remove,
.driver = {
.name = "netsec",
.pm = &netsec_pm_ops,
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index eed24e67c5a6..66b3549636f8 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1974,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, of_ave_match);
static struct platform_driver ave_driver = {
.probe = ave_probe,
- .remove_new = ave_remove,
+ .remove = ave_remove,
.driver = {
.name = "ave",
.pm = AVE_PM_OPS,
diff --git a/drivers/net/ethernet/spacemit/Kconfig b/drivers/net/ethernet/spacemit/Kconfig
new file mode 100644
index 000000000000..85ef61a9b4ef
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/Kconfig
@@ -0,0 +1,29 @@
+config NET_VENDOR_SPACEMIT
+ bool "SpacemiT devices"
+ default y
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ If you have a network (Ethernet) device belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just cause the configurator to skip all
+ the questions regarding SpacemiT devices. If you say Y, you will
+ be asked for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_SPACEMIT
+
+config SPACEMIT_K1_EMAC
+ tristate "SpacemiT K1 Ethernet MAC driver"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on MFD_SYSCON
+ depends on OF
+ default m if ARCH_SPACEMIT
+ select PHYLIB
+ help
+ This driver supports the Ethernet MAC in the SpacemiT K1 SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called k1_emac.
+
+endif # NET_VENDOR_SPACEMIT
diff --git a/drivers/net/ethernet/spacemit/Makefile b/drivers/net/ethernet/spacemit/Makefile
new file mode 100644
index 000000000000..d29efd997a4f
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the SpacemiT network device drivers.
+#
+
+obj-$(CONFIG_SPACEMIT_K1_EMAC) += k1_emac.o
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
new file mode 100644
index 000000000000..220eb5ce7583
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -0,0 +1,2162 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SpacemiT K1 Ethernet driver
+ *
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include "k1_emac.h"
+
+#define DRIVER_NAME "k1_emac"
+
+#define EMAC_DEFAULT_BUFSIZE 1536
+#define EMAC_RX_BUF_2K 2048
+#define EMAC_RX_BUF_4K 4096
+
+/* Tuning parameters from SpacemiT */
+#define EMAC_TX_FRAMES 64
+#define EMAC_TX_COAL_TIMEOUT 40000
+#define EMAC_RX_FRAMES 64
+#define EMAC_RX_COAL_TIMEOUT (600 * 312)
+
+#define DEFAULT_FC_PAUSE_TIME 0xffff
+#define DEFAULT_FC_FIFO_HIGH 1600
+#define DEFAULT_TX_ALMOST_FULL 0x1f8
+#define DEFAULT_TX_THRESHOLD 1518
+#define DEFAULT_RX_THRESHOLD 12
+#define DEFAULT_TX_RING_NUM 1024
+#define DEFAULT_RX_RING_NUM 1024
+#define DEFAULT_DMA_BURST MREGBIT_BURST_16WORD
+#define HASH_TABLE_SIZE 64
+
+struct desc_buf {
+ u64 dma_addr;
+ void *buff_addr;
+ u16 dma_len;
+ u8 map_as_page;
+};
+
+struct emac_tx_desc_buffer {
+ struct sk_buff *skb;
+ struct desc_buf buf[2];
+};
+
+struct emac_rx_desc_buffer {
+ struct sk_buff *skb;
+ u64 dma_addr;
+ void *buff_addr;
+ u16 dma_len;
+ u8 map_as_page;
+};
+
+/**
+ * struct emac_desc_ring - Software-side information for one descriptor ring
+ * Same structure used for both RX and TX
+ * @desc_addr: Virtual address to the descriptor ring memory
+ * @desc_dma_addr: DMA address of the descriptor ring
+ * @total_size: Size of ring in bytes
+ * @total_cnt: Number of descriptors
+ * @head: Next descriptor to associate a buffer with
+ * @tail: Next descriptor to check status bit
+ * @rx_desc_buf: Array of descriptors for RX
+ * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
+ */
+struct emac_desc_ring {
+ void *desc_addr;
+ dma_addr_t desc_dma_addr;
+ u32 total_size;
+ u32 total_cnt;
+ u32 head;
+ u32 tail;
+ union {
+ struct emac_rx_desc_buffer *rx_desc_buf;
+ struct emac_tx_desc_buffer *tx_desc_buf;
+ };
+};
+
+struct emac_priv {
+ void __iomem *iobase;
+ u32 dma_buf_sz;
+ struct emac_desc_ring tx_ring;
+ struct emac_desc_ring rx_ring;
+
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct platform_device *pdev;
+ struct clk *bus_clk;
+ struct clk *ref_clk;
+ struct regmap *regmap_apmu;
+ u32 regmap_apmu_offset;
+ int irq;
+
+ phy_interface_t phy_interface;
+
+ union emac_hw_tx_stats tx_stats, tx_stats_off;
+ union emac_hw_rx_stats rx_stats, rx_stats_off;
+
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timeout;
+ struct work_struct tx_timeout_task;
+
+ struct timer_list txtimer;
+ struct timer_list stats_timer;
+
+ u32 tx_delay;
+ u32 rx_delay;
+
+ bool flow_control_autoneg;
+ u8 flow_control;
+
+ /* Softirq-safe, hold while touching hardware statistics */
+ spinlock_t stats_lock;
+};
+
+static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->iobase + reg);
+}
+
+static u32 emac_rd(struct emac_priv *priv, u32 reg)
+{
+ return readl(priv->iobase + reg);
+}
+
+static int emac_phy_interface_config(struct emac_priv *priv)
+{
+ u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ val |= PHY_INTF_RGMII;
+
+ regmap_update_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
+ mask, val);
+
+ return 0;
+}
+
+/*
+ * Where the hardware expects a MAC address, it is laid out in this high, med,
+ * low order in three consecutive registers and in this format.
+ */
+
+static void emac_set_mac_addr_reg(struct emac_priv *priv,
+ const unsigned char *addr,
+ u32 reg)
+{
+ emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
+ emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
+ emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
+}
+
+static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
+{
+ /* We use only one address, so set the same for flow control as well */
+ emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
+ emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
+}
+
+static void emac_reset_hw(struct emac_priv *priv)
+{
+ /* Disable all interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
+
+ /* Disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
+
+ /* Disable DMA */
+ emac_wr(priv, DMA_CONTROL, 0x0);
+}
+
+static void emac_init_hw(struct emac_priv *priv)
+{
+ /* Destination address for 802.3x Ethernet flow control */
+ u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
+
+ u32 rxirq = 0, dma = 0;
+
+ regmap_set_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
+ AXI_SINGLE_ID);
+
+ /* Disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
+
+ /* Enable MAC address 1 filtering */
+ emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
+
+ /* Zero initialize the multicast hash table */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
+
+ /* Configure thresholds */
+ emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
+ emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
+ DEFAULT_TX_THRESHOLD);
+ emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
+
+ /* Configure flow control (enabled in emac_adjust_link() later) */
+ emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
+ emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
+ emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
+ emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
+
+ /* RX IRQ mitigation */
+ rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
+ EMAC_RX_FRAMES);
+ rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
+ EMAC_RX_COAL_TIMEOUT);
+ rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
+ emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
+
+ /* Disable and set DMA config */
+ emac_wr(priv, DMA_CONTROL, 0x0);
+
+ emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
+ usleep_range(9000, 10000);
+ emac_wr(priv, DMA_CONFIGURATION, 0x0);
+ usleep_range(9000, 10000);
+
+ dma |= MREGBIT_STRICT_BURST;
+ dma |= MREGBIT_DMA_64BIT_MODE;
+ dma |= DEFAULT_DMA_BURST;
+
+ emac_wr(priv, DMA_CONFIGURATION, dma);
+}
+
+static void emac_dma_start_transmit(struct emac_priv *priv)
+{
+ /* The actual value written does not matter */
+ emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
+}
+
+static void emac_enable_interrupt(struct emac_priv *priv)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+ val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+static void emac_disable_interrupt(struct emac_priv *priv)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+ val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+static u32 emac_tx_avail(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ u32 avail;
+
+ if (tx_ring->tail > tx_ring->head)
+ avail = tx_ring->tail - tx_ring->head - 1;
+ else
+ avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
+
+ return avail;
+}
+
+static void emac_tx_coal_timer_resched(struct emac_priv *priv)
+{
+ mod_timer(&priv->txtimer,
+ jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
+}
+
+static void emac_tx_coal_timer(struct timer_list *t)
+{
+ struct emac_priv *priv = timer_container_of(priv, t, txtimer);
+
+ napi_schedule(&priv->napi);
+}
+
+static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
+{
+ priv->tx_count_frames += pkt_num;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ emac_tx_coal_timer_resched(priv);
+ return false;
+ }
+
+ priv->tx_count_frames = 0;
+ return true;
+}
+
+static void emac_free_tx_buf(struct emac_priv *priv, int i)
+{
+ struct emac_tx_desc_buffer *tx_buf;
+ struct emac_desc_ring *tx_ring;
+ struct desc_buf *buf;
+ int j;
+
+ tx_ring = &priv->tx_ring;
+ tx_buf = &tx_ring->tx_desc_buf[i];
+
+ for (j = 0; j < 2; j++) {
+ buf = &tx_buf->buf[j];
+ if (!buf->dma_addr)
+ continue;
+
+ if (buf->map_as_page)
+ dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
+ buf->dma_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&priv->pdev->dev,
+ buf->dma_addr, buf->dma_len,
+ DMA_TO_DEVICE);
+
+ buf->dma_addr = 0;
+ buf->map_as_page = false;
+ buf->buff_addr = NULL;
+ }
+
+ if (tx_buf->skb) {
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+ }
+}
+
+static void emac_clean_tx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ u32 i;
+
+ for (i = 0; i < tx_ring->total_cnt; i++)
+ emac_free_tx_buf(priv, i);
+
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+}
+
+static void emac_clean_rx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_rx_desc_buffer *rx_buf;
+ struct emac_desc_ring *rx_ring;
+ u32 i;
+
+ rx_ring = &priv->rx_ring;
+
+ for (i = 0; i < rx_ring->total_cnt; i++) {
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ if (!rx_buf->skb)
+ continue;
+
+ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+
+ dev_kfree_skb(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ rx_ring->tail = 0;
+ rx_ring->head = 0;
+}
+
+static int emac_alloc_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct platform_device *pdev = priv->pdev;
+
+ tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
+ sizeof(*tx_ring->tx_desc_buf),
+ GFP_KERNEL);
+
+ if (!tx_ring->tx_desc_buf)
+ return -ENOMEM;
+
+ tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
+ tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
+
+ tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
+ &tx_ring->desc_dma_addr,
+ GFP_KERNEL);
+ if (!tx_ring->desc_addr) {
+ kfree(tx_ring->tx_desc_buf);
+ return -ENOMEM;
+ }
+
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+
+ return 0;
+}
+
+static int emac_alloc_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct platform_device *pdev = priv->pdev;
+
+ rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
+ sizeof(*rx_ring->rx_desc_buf),
+ GFP_KERNEL);
+ if (!rx_ring->rx_desc_buf)
+ return -ENOMEM;
+
+ rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
+
+ rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
+
+ rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
+ &rx_ring->desc_dma_addr,
+ GFP_KERNEL);
+ if (!rx_ring->desc_addr) {
+ kfree(rx_ring->rx_desc_buf);
+ return -ENOMEM;
+ }
+
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+
+ return 0;
+}
+
+static void emac_free_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tr = &priv->tx_ring;
+ struct device *dev = &priv->pdev->dev;
+
+ emac_clean_tx_desc_ring(priv);
+
+ kfree(tr->tx_desc_buf);
+ tr->tx_desc_buf = NULL;
+
+ dma_free_coherent(dev, tr->total_size, tr->desc_addr,
+ tr->desc_dma_addr);
+ tr->desc_addr = NULL;
+}
+
+static void emac_free_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rr = &priv->rx_ring;
+ struct device *dev = &priv->pdev->dev;
+
+ emac_clean_rx_desc_ring(priv);
+
+ kfree(rr->rx_desc_buf);
+ rr->rx_desc_buf = NULL;
+
+ dma_free_coherent(dev, rr->total_size, rr->desc_addr,
+ rr->desc_dma_addr);
+ rr->desc_addr = NULL;
+}
+
+static int emac_tx_clean_desc(struct emac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_desc_ring *tx_ring;
+ struct emac_desc *tx_desc;
+ u32 i;
+
+ netif_tx_lock(ndev);
+
+ tx_ring = &priv->tx_ring;
+
+ i = tx_ring->tail;
+
+ while (i != tx_ring->head) {
+ tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
+
+ /* Stop checking if desc still own by DMA */
+ if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
+ break;
+
+ emac_free_tx_buf(priv, i);
+ memset(tx_desc, 0, sizeof(struct emac_desc));
+
+ if (++i == tx_ring->total_cnt)
+ i = 0;
+ }
+
+ tx_ring->tail = i;
+
+ if (unlikely(netif_queue_stopped(ndev) &&
+ emac_tx_avail(priv) > tx_ring->total_cnt / 4))
+ netif_wake_queue(ndev);
+
+ netif_tx_unlock(ndev);
+
+ return 0;
+}
+
+static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
+{
+ const char *msg;
+ u32 len;
+
+ len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
+
+ if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
+ msg = "Not last descriptor"; /* This would be a bug */
+ else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
+ msg = "Runt frame";
+ else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
+ msg = "Frame CRC error";
+ else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
+ msg = "Frame exceeds max length";
+ else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
+ msg = "Frame jabber error";
+ else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
+ msg = "Frame length error";
+ else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
+ msg = "Frame length unacceptable";
+ else
+ return true; /* All good */
+
+ dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
+
+ return false;
+}
+
+static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct emac_desc rx_desc, *rx_desc_addr;
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc_buffer *rx_buf;
+ struct sk_buff *skb;
+ u32 i;
+
+ i = rx_ring->head;
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ while (!rx_buf->skb) {
+ skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
+ if (!skb)
+ break;
+
+ skb->dev = ndev;
+
+ rx_buf->skb = skb;
+ rx_buf->dma_len = priv->dma_buf_sz;
+ rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
+ dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
+ goto err_free_skb;
+ }
+
+ rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
+
+ memset(&rx_desc, 0, sizeof(rx_desc));
+
+ rx_desc.buffer_addr_1 = rx_buf->dma_addr;
+ rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
+ rx_buf->dma_len);
+
+ if (++i == rx_ring->total_cnt) {
+ rx_desc.desc1 |= RX_DESC_1_END_RING;
+ i = 0;
+ }
+
+ *rx_desc_addr = rx_desc;
+ dma_wmb();
+ WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
+
+ rx_buf = &rx_ring->rx_desc_buf[i];
+ }
+
+ rx_ring->head = i;
+ return;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ rx_buf->skb = NULL;
+}
+
+/* Returns number of packets received */
+static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc_buffer *rx_buf;
+ struct emac_desc_ring *rx_ring;
+ struct sk_buff *skb = NULL;
+ struct emac_desc *rx_desc;
+ u32 got = 0, skb_len, i;
+
+ rx_ring = &priv->rx_ring;
+
+ i = rx_ring->tail;
+
+ while (budget--) {
+ rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
+
+ /* Stop checking if rx_desc still owned by DMA */
+ if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
+ break;
+
+ dma_rmb();
+
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ if (!rx_buf->skb)
+ break;
+
+ got++;
+
+ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+
+ if (likely(emac_rx_frame_good(priv, rx_desc))) {
+ skb = rx_buf->skb;
+
+ skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
+ rx_desc->desc0);
+ skb_len -= ETH_FCS_LEN;
+
+ skb_put(skb, skb_len);
+ skb->dev = ndev;
+ ndev->hard_header_len = ETH_HLEN;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ napi_gro_receive(&priv->napi, skb);
+
+ memset(rx_desc, 0, sizeof(struct emac_desc));
+ rx_buf->skb = NULL;
+ } else {
+ dev_kfree_skb_irq(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ if (++i == rx_ring->total_cnt)
+ i = 0;
+ }
+
+ rx_ring->tail = i;
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ return got;
+}
+
+static int emac_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+ int work_done;
+
+ emac_tx_clean_desc(priv);
+
+ work_done = emac_rx_clean_desc(priv, budget);
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ emac_enable_interrupt(priv);
+
+ return work_done;
+}
+
+/*
+ * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
+ *
+ * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
+ * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
+ */
+
+static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
+ struct emac_tx_desc_buffer *tx_buf,
+ struct sk_buff *skb, u32 frag_idx)
+{
+ bool map_as_page, buf_idx;
+ const skb_frag_t *frag;
+ phys_addr_t addr;
+ u32 len;
+ int ret;
+
+ buf_idx = frag_idx % 2;
+
+ if (frag_idx == 0) {
+ /* Non-fragmented part */
+ len = skb_headlen(skb);
+ addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ map_as_page = false;
+ } else {
+ /* Fragment */
+ frag = &skb_shinfo(skb)->frags[frag_idx - 1];
+ len = skb_frag_size(frag);
+ addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ map_as_page = true;
+ }
+
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ return ret;
+
+ tx_buf->buf[buf_idx].dma_addr = addr;
+ tx_buf->buf[buf_idx].dma_len = len;
+ tx_buf->buf[buf_idx].map_as_page = map_as_page;
+
+ if (buf_idx == 0) {
+ tx_desc->buffer_addr_1 = addr;
+ tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
+ } else {
+ tx_desc->buffer_addr_2 = addr;
+ tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
+ }
+
+ return 0;
+}
+
+static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct emac_desc tx_desc, *tx_desc_addr;
+ struct device *dev = &priv->pdev->dev;
+ struct emac_tx_desc_buffer *tx_buf;
+ u32 head, old_head, frag_num, f;
+ bool buf_idx;
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+ head = tx_ring->head;
+ old_head = head;
+
+ for (f = 0; f < frag_num + 1; f++) {
+ buf_idx = f % 2;
+
+ /*
+ * If using buffer 1, initialize a new desc. Otherwise, use
+ * buffer 2 of previous fragment's desc.
+ */
+ if (!buf_idx) {
+ tx_buf = &tx_ring->tx_desc_buf[head];
+ tx_desc_addr =
+ &((struct emac_desc *)tx_ring->desc_addr)[head];
+ memset(&tx_desc, 0, sizeof(tx_desc));
+
+ /*
+ * Give ownership for all but first desc initially. For
+ * first desc, give at the end so DMA cannot start
+ * reading uninitialized descs.
+ */
+ if (head != old_head)
+ tx_desc.desc0 |= TX_DESC_0_OWN;
+
+ if (++head == tx_ring->total_cnt) {
+ /* Just used last desc in ring */
+ tx_desc.desc1 |= TX_DESC_1_END_RING;
+ head = 0;
+ }
+ }
+
+ if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
+ dev_err_ratelimited(&priv->ndev->dev,
+ "Map TX frag %d failed\n", f);
+ goto err_free_skb;
+ }
+
+ if (f == 0)
+ tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
+
+ if (f == frag_num) {
+ tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
+ tx_buf->skb = skb;
+ if (emac_tx_should_interrupt(priv, frag_num + 1))
+ tx_desc.desc1 |=
+ TX_DESC_1_INTERRUPT_ON_COMPLETION;
+ }
+
+ *tx_desc_addr = tx_desc;
+ }
+
+ /* All descriptors are ready, give ownership for first desc */
+ tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
+ dma_wmb();
+ WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
+
+ emac_dma_start_transmit(priv);
+
+ tx_ring->head = head;
+
+ return;
+
+err_free_skb:
+ dev_dstats_tx_dropped(priv->ndev);
+ dev_kfree_skb_any(skb);
+}
+
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct device *dev = &priv->pdev->dev;
+
+ if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ emac_tx_mem_map(priv, skb);
+
+ /* Make sure there is space in the ring for the next TX. */
+ if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int ret = eth_mac_addr(ndev, addr);
+
+ if (ret)
+ return ret;
+
+ /* If running, set now; if not running it will be set in emac_up. */
+ if (netif_running(ndev))
+ emac_set_mac_addr(priv, ndev->dev_addr);
+
+ return 0;
+}
+
+static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
+{
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
+}
+
+/*
+ * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
+ * when matching multicast addresses.
+ */
+static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
+{
+ u32 crc32 = ether_crc(ETH_ALEN, addr);
+
+ return crc32 >> 26;
+}
+
+/* Configure Multicast and Promiscuous modes */
+static void emac_set_rx_mode(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[4] = { 0 };
+ u32 hash, reg, bit, val;
+
+ val = emac_rd(priv, MAC_ADDRESS_CONTROL);
+
+ val &= ~MREGBIT_PROMISCUOUS_MODE;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promisc mode */
+ val |= MREGBIT_PROMISCUOUS_MODE;
+ } else if ((ndev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
+ /* Accept all multicast frames by setting every bit */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
+ } else if (!netdev_mc_empty(ndev)) {
+ emac_mac_multicast_filter_clear(priv);
+ netdev_for_each_mc_addr(ha, ndev) {
+ /*
+ * The hash table is an array of 4 16-bit registers. It
+ * is treated like an array of 64 bits (bits[hash]).
+ */
+ hash = emac_ether_addr_hash(ha->addr);
+ reg = hash / 16;
+ bit = hash % 16;
+ mc_filter[reg] |= BIT(bit);
+ }
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
+ }
+
+ emac_wr(priv, MAC_ADDRESS_CONTROL, val);
+}
+
+static int emac_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 frame_len;
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev, "must be stopped to change MTU\n");
+ return -EBUSY;
+ }
+
+ frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if (frame_len <= EMAC_DEFAULT_BUFSIZE)
+ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
+ else if (frame_len <= EMAC_RX_BUF_2K)
+ priv->dma_buf_sz = EMAC_RX_BUF_2K;
+ else
+ priv->dma_buf_sz = EMAC_RX_BUF_4K;
+
+ ndev->mtu = mtu;
+
+ return 0;
+}
+
+static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0, val;
+ int ret;
+
+ cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
+ cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
+ cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
+
+ emac_wr(priv, MAC_MDIO_DATA, 0x0);
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
+
+ if (ret)
+ return ret;
+
+ val = emac_rd(priv, MAC_MDIO_DATA);
+ return FIELD_GET(MREGBIT_MDIO_DATA, val);
+}
+
+static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0, val;
+ int ret;
+
+ emac_wr(priv, MAC_MDIO_DATA, value);
+
+ cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
+ cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
+ cmd |= MREGBIT_START_MDIO_TRANS;
+
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
+
+ return ret;
+}
+
+static int emac_mdio_init(struct emac_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *mii_np;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = devm_mdiobus_alloc(dev);
+ if (!mii)
+ return -ENOMEM;
+
+ mii->priv = priv;
+ mii->name = "k1_emac_mii";
+ mii->read = emac_mii_read;
+ mii->write = emac_mii_write;
+ mii->parent = dev;
+ mii->phy_mask = ~0;
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
+
+ mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
+
+ ret = devm_of_mdiobus_register(dev, mii, mii_np);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to register mdio bus\n");
+
+ of_node_put(mii_np);
+ return ret;
+}
+
+static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
+{
+ u32 val;
+
+ val = emac_rd(priv, MAC_FC_CONTROL);
+
+ FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
+ FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
+
+ emac_wr(priv, MAC_FC_CONTROL, val);
+}
+
+static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
+{
+ u32 val = emac_rd(priv, MAC_FC_CONTROL);
+
+ FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
+
+ emac_wr(priv, MAC_FC_CONTROL, val);
+}
+
+static void emac_set_fc(struct emac_priv *priv, u8 fc)
+{
+ emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
+ emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
+ priv->flow_control = fc;
+}
+
+static void emac_set_fc_autoneg(struct emac_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 local_adv, remote_adv;
+ u8 fc;
+
+ local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+
+ remote_adv = 0;
+
+ if (phydev->pause)
+ remote_adv |= LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_adv |= LPA_PAUSE_ASYM;
+
+ fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
+
+ priv->flow_control_autoneg = true;
+
+ emac_set_fc(priv, fc);
+}
+
+/*
+ * Even though this MAC supports gigabit operation, it only provides 32-bit
+ * statistics counters. The most overflow-prone counters are the "bytes" ones,
+ * which at gigabit overflow about twice a minute.
+ *
+ * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
+ * every time statistics seem to go backwards. Also, update periodically to
+ * catch overflows when we are not otherwise checking the statistics often
+ * enough.
+ */
+
+#define EMAC_STATS_TIMER_PERIOD 20
+
+static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
+ u32 control_reg, u32 high_reg, u32 low_reg)
+{
+ u32 val, high, low;
+ int ret;
+
+ /* The "read" bit is the same for TX and RX */
+
+ val = MREGBIT_START_TX_COUNTER_READ | cnt;
+ emac_wr(priv, control_reg, val);
+ val = emac_rd(priv, control_reg);
+
+ ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
+ !(val & MREGBIT_START_TX_COUNTER_READ),
+ 100, 10000);
+
+ if (ret) {
+ netdev_err(priv->ndev, "Read stat timeout\n");
+ return ret;
+ }
+
+ high = emac_rd(priv, high_reg);
+ low = emac_rd(priv, low_reg);
+ *res = high << 16 | lower_16_bits(low);
+
+ return 0;
+}
+
+static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
+{
+ return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
+ MAC_TX_STATCTR_DATA_HIGH,
+ MAC_TX_STATCTR_DATA_LOW);
+}
+
+static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
+{
+ return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
+ MAC_RX_STATCTR_DATA_HIGH,
+ MAC_RX_STATCTR_DATA_LOW);
+}
+
+static void emac_update_counter(u64 *counter, u32 new_low)
+{
+ u32 old_low = lower_32_bits(*counter);
+ u64 high = upper_32_bits(*counter);
+
+ if (old_low > new_low) {
+ /* Overflowed, increment high 32 bits */
+ high++;
+ }
+
+ *counter = (high << 32) | new_low;
+}
+
+static void emac_stats_update(struct emac_priv *priv)
+{
+ u64 *tx_stats_off = priv->tx_stats_off.array;
+ u64 *rx_stats_off = priv->rx_stats_off.array;
+ u64 *tx_stats = priv->tx_stats.array;
+ u64 *rx_stats = priv->rx_stats.array;
+ u32 i, res, offset;
+
+ assert_spin_locked(&priv->stats_lock);
+
+ if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
+ /* Not up, don't try to update */
+ return;
+ }
+
+ for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
+ /*
+ * If reading stats times out, everything is broken and there's
+ * nothing we can do. Reading statistics also can't return an
+ * error, so just return without updating and without
+ * rescheduling.
+ */
+ if (emac_tx_read_stat_cnt(priv, i, &res))
+ return;
+
+ /*
+ * Re-initializing while bringing interface up resets counters
+ * to zero, so to provide continuity, we add the values saved
+ * last time we did emac_down() to the new hardware-provided
+ * value.
+ */
+ offset = lower_32_bits(tx_stats_off[i]);
+ emac_update_counter(&tx_stats[i], res + offset);
+ }
+
+ /* Similar remarks as TX stats */
+ for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
+ if (emac_rx_read_stat_cnt(priv, i, &res))
+ return;
+ offset = lower_32_bits(rx_stats_off[i]);
+ emac_update_counter(&rx_stats[i], res + offset);
+ }
+
+ mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
+}
+
+static void emac_stats_timer(struct timer_list *t)
+{
+ struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
+
+ spin_lock(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ spin_unlock(&priv->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4096 },
+ { /* sentinel */ },
+};
+
+/* Like dev_fetch_dstats(), but we only use tx_drops */
+static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
+{
+ const struct pcpu_dstats *stats;
+ u64 tx_drops, total = 0;
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(priv->ndev->dstats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_drops = u64_stats_read(&stats->tx_drops);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ total += tx_drops;
+ }
+
+ return total;
+}
+
+static void emac_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ /* This is the only software counter */
+ storage->tx_dropped = emac_get_stat_tx_drops(priv);
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ storage->tx_packets = tx_stats->stats.tx_ok_pkts;
+ storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
+ storage->tx_errors = tx_stats->stats.tx_err_pkts;
+
+ storage->rx_packets = rx_stats->stats.rx_ok_pkts;
+ storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
+ storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
+ storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
+ storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
+ storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
+
+ storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
+ storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
+ storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
+
+ storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
+ storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_rx_stats *rx_stats;
+
+ rx_stats = &priv->rx_stats;
+
+ *ranges = emac_rmon_hist_ranges;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
+ rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
+ rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
+ rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
+
+ /* Only RX has histogram stats */
+
+ rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
+ rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
+ rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
+ rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
+ rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
+ rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
+ rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
+ mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
+
+ mac_stats->MulticastFramesReceivedOK =
+ rx_stats->stats.rx_multicast_pkts;
+ mac_stats->BroadcastFramesReceivedOK =
+ rx_stats->stats.rx_broadcast_pkts;
+
+ mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
+ mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
+ mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
+ mac_stats->FramesAbortedDueToXSColls =
+ tx_stats->stats.tx_excessclsn_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
+ pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+/* Other statistics that are not derivable from standard statistics */
+
+#define EMAC_ETHTOOL_STAT(type, name) \
+ { offsetof(type, stats.name) / sizeof(u64), #name }
+
+static const struct emac_ethtool_stats {
+ size_t offset;
+ char str[ETH_GSTRING_LEN];
+} emac_ethtool_rx_stats[] = {
+ EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
+ EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
+};
+
+static int emac_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(emac_ethtool_rx_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
+ memcpy(data, emac_ethtool_rx_stats[i].str,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void emac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u64 *rx_stats = (u64 *)&priv->rx_stats;
+ int i;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
+ data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static int emac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
+}
+
+static void emac_ethtool_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u32 *reg_space = space;
+ int i;
+
+ regs->version = 1;
+
+ for (i = 0; i < EMAC_DMA_REG_CNT; i++)
+ reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
+
+ for (i = 0; i < EMAC_MAC_REG_CNT; i++)
+ reg_space[i + EMAC_DMA_REG_CNT] =
+ emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
+}
+
+static void emac_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+
+ pause->autoneg = priv->flow_control_autoneg;
+ pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
+ pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
+}
+
+static int emac_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u8 fc = 0;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ priv->flow_control_autoneg = pause->autoneg;
+
+ if (pause->autoneg) {
+ emac_set_fc_autoneg(priv);
+ } else {
+ if (pause->tx_pause)
+ fc |= FLOW_CTRL_TX;
+
+ if (pause->rx_pause)
+ fc |= FLOW_CTRL_RX;
+
+ emac_set_fc(priv, fc);
+ }
+
+ return 0;
+}
+
+static void emac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
+}
+
+static void emac_tx_timeout_task(struct work_struct *work)
+{
+ struct net_device *ndev;
+ struct emac_priv *priv;
+
+ priv = container_of(work, struct emac_priv, tx_timeout_task);
+ ndev = priv->ndev;
+
+ rtnl_lock();
+
+ /* No need to reset if already down */
+ if (!netif_running(ndev)) {
+ rtnl_unlock();
+ return;
+ }
+
+ netdev_err(ndev, "MAC reset due to TX timeout\n");
+
+ netif_trans_update(ndev); /* prevent tx timeout */
+ dev_close(ndev);
+ dev_open(ndev, NULL);
+
+ rtnl_unlock();
+}
+
+static void emac_sw_init(struct emac_priv *priv)
+{
+ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
+
+ priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
+ priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
+
+ spin_lock_init(&priv->stats_lock);
+
+ INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
+
+ priv->tx_coal_frames = EMAC_TX_FRAMES;
+ priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
+
+ timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
+ timer_setup(&priv->stats_timer, emac_stats_timer, 0);
+}
+
+static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+ bool should_schedule = false;
+ u32 clr = 0;
+ u32 status;
+
+ status = emac_rd(priv, DMA_STATUS_IRQ);
+
+ if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
+ should_schedule = true;
+ }
+
+ if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
+
+ if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
+ should_schedule = true;
+ }
+
+ if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
+
+ if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
+ clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
+
+ if (should_schedule) {
+ if (napi_schedule_prep(&priv->napi)) {
+ emac_disable_interrupt(priv);
+ __napi_schedule_irqoff(&priv->napi);
+ }
+ }
+
+ emac_wr(priv, DMA_STATUS_IRQ, clr);
+
+ return IRQ_HANDLED;
+}
+
+static void emac_configure_tx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* Set base address */
+ val = (u32)priv->tx_ring.desc_dma_addr;
+ emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
+
+ /* Set TX inter-frame gap value, enable transmit */
+ val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
+ val &= ~MREGBIT_IFG_LEN;
+ val |= MREGBIT_TRANSMIT_ENABLE;
+ val |= MREGBIT_TRANSMIT_AUTO_RETRY;
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
+
+ emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
+
+ /* Start TX DMA */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_TRANSMIT_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+static void emac_configure_rx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* Set base address */
+ val = (u32)priv->rx_ring.desc_dma_addr;
+ emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
+
+ /* Enable receive */
+ val = emac_rd(priv, MAC_RECEIVE_CONTROL);
+ val |= MREGBIT_RECEIVE_ENABLE;
+ val |= MREGBIT_STORE_FORWARD;
+ emac_wr(priv, MAC_RECEIVE_CONTROL, val);
+
+ /* Start RX DMA */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_RECEIVE_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+static void emac_adjust_link(struct net_device *dev)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ u32 ctrl;
+
+ if (phydev->link) {
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+
+ /* Update duplex and speed from PHY */
+
+ FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
+ phydev->duplex == DUPLEX_FULL);
+
+ ctrl &= ~MREGBIT_SPEED;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ ctrl |= MREGBIT_SPEED_1000M;
+ break;
+ case SPEED_100:
+ ctrl |= MREGBIT_SPEED_100M;
+ break;
+ case SPEED_10:
+ ctrl |= MREGBIT_SPEED_10M;
+ break;
+ default:
+ netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+
+ emac_set_fc_autoneg(priv);
+ }
+
+ phy_print_status(phydev);
+}
+
+static void emac_update_delay_line(struct emac_priv *priv)
+{
+ u32 mask = 0, val = 0;
+
+ mask |= EMAC_RX_DLINE_EN;
+ mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
+ mask |= EMAC_TX_DLINE_EN;
+ mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
+ val |= EMAC_RX_DLINE_EN;
+ val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
+ EMAC_DLINE_STEP_15P6);
+ val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
+
+ val |= EMAC_TX_DLINE_EN;
+ val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
+ EMAC_DLINE_STEP_15P6);
+ val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
+ }
+
+ regmap_update_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
+ mask, val);
+}
+
+static int emac_phy_connect(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ struct device_node *np;
+ int ret;
+
+ ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
+ if (ret) {
+ netdev_err(ndev, "No phy-mode found");
+ return ret;
+ }
+
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ break;
+ default:
+ netdev_err(ndev, "Unsupported PHY interface %s",
+ phy_modes(priv->phy_interface));
+ return -EINVAL;
+ }
+
+ np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(dev->of_node))
+ np = of_node_get(dev->of_node);
+
+ if (!np) {
+ netdev_err(ndev, "No PHY specified");
+ return -ENODEV;
+ }
+
+ ret = emac_phy_interface_config(priv);
+ if (ret)
+ goto err_node_put;
+
+ phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ phy_support_asym_pause(phydev);
+
+ phydev->mac_managed_pm = true;
+
+ emac_update_delay_line(priv);
+
+err_node_put:
+ of_node_put(np);
+ return ret;
+}
+
+static int emac_up(struct emac_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = emac_phy_connect(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "emac_phy_connect failed\n");
+ goto err_pm_put;
+ }
+
+ emac_init_hw(priv);
+
+ emac_set_mac_addr(priv, ndev->dev_addr);
+ emac_configure_tx(priv);
+ emac_configure_rx(priv);
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ phy_start(ndev->phydev);
+
+ ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto err_reset_disconnect_phy;
+ }
+
+ /* Don't enable MAC interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+
+ /* Enable DMA interrupts */
+ emac_wr(priv, DMA_INTERRUPT_ENABLE,
+ MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
+
+ napi_enable(&priv->napi);
+
+ netif_start_queue(ndev);
+
+ mod_timer(&priv->stats_timer, jiffies);
+
+ return 0;
+
+err_reset_disconnect_phy:
+ emac_reset_hw(priv);
+ phy_disconnect(ndev->phydev);
+
+err_pm_put:
+ pm_runtime_put_sync(&pdev->dev);
+ return ret;
+}
+
+static int emac_down(struct emac_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+
+ netif_stop_queue(ndev);
+
+ phy_disconnect(ndev->phydev);
+
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
+
+ free_irq(priv->irq, ndev);
+
+ napi_disable(&priv->napi);
+
+ timer_delete_sync(&priv->txtimer);
+ cancel_work_sync(&priv->tx_timeout_task);
+
+ timer_delete_sync(&priv->stats_timer);
+
+ emac_reset_hw(priv);
+
+ /* Update and save current stats, see emac_stats_update() for usage */
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ priv->tx_stats_off = priv->tx_stats;
+ priv->rx_stats_off = priv->rx_stats;
+
+ spin_unlock_bh(&priv->stats_lock);
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+}
+
+/* Called when net interface is brought up. */
+static int emac_open(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ ret = emac_alloc_tx_resources(priv);
+ if (ret) {
+ dev_err(dev, "Cannot allocate TX resources\n");
+ return ret;
+ }
+
+ ret = emac_alloc_rx_resources(priv);
+ if (ret) {
+ dev_err(dev, "Cannot allocate RX resources\n");
+ goto err_free_tx;
+ }
+
+ ret = emac_up(priv);
+ if (ret) {
+ dev_err(dev, "Error when bringing interface up\n");
+ goto err_free_rx;
+ }
+ return 0;
+
+err_free_rx:
+ emac_free_rx_resources(priv);
+err_free_tx:
+ emac_free_tx_resources(priv);
+
+ return ret;
+}
+
+/* Called when interface is brought down. */
+static int emac_stop(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_down(priv);
+ emac_free_tx_resources(priv);
+ emac_free_rx_resources(priv);
+
+ return 0;
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_drvinfo = emac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+
+ .get_regs = emac_ethtool_get_regs,
+ .get_regs_len = emac_ethtool_get_regs_len,
+
+ .get_rmon_stats = emac_get_rmon_stats,
+ .get_pause_stats = emac_get_pause_stats,
+ .get_eth_mac_stats = emac_get_eth_mac_stats,
+
+ .get_sset_count = emac_get_sset_count,
+ .get_strings = emac_get_strings,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+
+ .get_pauseparam = emac_get_pauseparam,
+ .set_pauseparam = emac_set_pauseparam,
+};
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_stop,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_change_mtu = emac_change_mtu,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_set_rx_mode = emac_set_rx_mode,
+ .ndo_get_stats64 = emac_get_stats64,
+};
+
+/* Currently we always use 15.6 ps/step for the delay line */
+
+static u32 delay_ps_to_unit(u32 ps)
+{
+ return DIV_ROUND_CLOSEST(ps * 10, 156);
+}
+
+static u32 delay_unit_to_ps(u32 unit)
+{
+ return DIV_ROUND_CLOSEST(unit * 156, 10);
+}
+
+#define EMAC_MAX_DELAY_UNIT FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
+
+/* Minus one just to be safe from rounding errors */
+#define EMAC_MAX_DELAY_PS (delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
+
+static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int ret;
+
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->iobase))
+ return dev_err_probe(dev, PTR_ERR(priv->iobase),
+ "ioremap failed\n");
+
+ priv->regmap_apmu =
+ syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
+ &priv->regmap_apmu_offset);
+
+ if (IS_ERR(priv->regmap_apmu))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
+ "failed to get syscon\n");
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ ret = of_get_mac_address(np, mac_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return dev_err_probe(dev, ret,
+ "Can't get MAC address\n");
+
+ dev_info(&pdev->dev, "Using random MAC address\n");
+ eth_hw_addr_random(priv->ndev);
+ } else {
+ eth_hw_addr_set(priv->ndev, mac_addr);
+ }
+
+ priv->tx_delay = 0;
+ priv->rx_delay = 0;
+
+ of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
+ of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
+
+ if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
+ dev_err(&pdev->dev,
+ "tx-internal-delay-ps too large: max %d, got %d",
+ EMAC_MAX_DELAY_PS, priv->tx_delay);
+ return -EINVAL;
+ }
+
+ if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
+ dev_err(&pdev->dev,
+ "rx-internal-delay-ps too large: max %d, got %d",
+ EMAC_MAX_DELAY_PS, priv->rx_delay);
+ return -EINVAL;
+ }
+
+ priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
+ priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
+
+ return 0;
+}
+
+static void emac_phy_deregister_fixed_link(void *data)
+{
+ struct device_node *of_node = data;
+
+ of_phy_deregister_fixed_link(of_node);
+}
+
+static int emac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *reset;
+ struct net_device *ndev;
+ struct emac_priv *priv;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->hw_features = NETIF_F_SG;
+ ndev->features |= ndev->hw_features;
+
+ ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
+ ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ platform_set_drvdata(pdev, priv);
+
+ ret = emac_config_dt(pdev, priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Configuration failed\n");
+
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->base_addr = (unsigned long)priv->iobase;
+ ndev->irq = priv->irq;
+
+ ndev->ethtool_ops = &emac_ethtool_ops;
+ ndev->netdev_ops = &emac_netdev_ops;
+
+ devm_pm_runtime_enable(&pdev->dev);
+
+ priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
+ "Failed to get clock\n");
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
+ NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "Failed to get reset\n");
+
+ if (of_phy_is_fixed_link(dev->of_node)) {
+ ret = of_phy_register_fixed_link(dev->of_node);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register fixed-link\n");
+
+ ret = devm_add_action_or_reset(dev,
+ emac_phy_deregister_fixed_link,
+ dev->of_node);
+
+ if (ret) {
+ dev_err(dev, "devm_add_action_or_reset failed\n");
+ return ret;
+ }
+ }
+
+ emac_sw_init(priv);
+
+ ret = emac_mdio_init(priv);
+ if (ret)
+ goto err_timer_delete;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ dev_err(dev, "devm_register_netdev failed\n");
+ goto err_timer_delete;
+ }
+
+ netif_napi_add(ndev, &priv->napi, emac_rx_poll);
+ netif_carrier_off(ndev);
+
+ return 0;
+
+err_timer_delete:
+ timer_delete_sync(&priv->txtimer);
+ timer_delete_sync(&priv->stats_timer);
+
+ return ret;
+}
+
+static void emac_remove(struct platform_device *pdev)
+{
+ struct emac_priv *priv = platform_get_drvdata(pdev);
+
+ timer_shutdown_sync(&priv->txtimer);
+ cancel_work_sync(&priv->tx_timeout_task);
+
+ timer_shutdown_sync(&priv->stats_timer);
+
+ emac_reset_hw(priv);
+}
+
+static int emac_resume(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable bus clock: %d\n", ret);
+ return ret;
+ }
+
+ if (!netif_running(ndev))
+ return 0;
+
+ ret = emac_open(ndev);
+ if (ret) {
+ clk_disable_unprepare(priv->bus_clk);
+ return ret;
+ }
+
+ netif_device_attach(ndev);
+
+ mod_timer(&priv->stats_timer, jiffies);
+
+ return 0;
+}
+
+static int emac_suspend(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!ndev || !netif_running(ndev)) {
+ clk_disable_unprepare(priv->bus_clk);
+ return 0;
+ }
+
+ emac_stop(ndev);
+
+ clk_disable_unprepare(priv->bus_clk);
+ netif_device_detach(ndev);
+ return 0;
+}
+
+static const struct dev_pm_ops emac_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
+};
+
+static const struct of_device_id emac_of_match[] = {
+ { .compatible = "spacemit,k1-emac" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, emac_of_match);
+
+static struct platform_driver emac_driver = {
+ .probe = emac_probe,
+ .remove = emac_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(emac_of_match),
+ .pm = &emac_pm_ops,
+ },
+};
+module_platform_driver(emac_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
+MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/spacemit/k1_emac.h b/drivers/net/ethernet/spacemit/k1_emac.h
new file mode 100644
index 000000000000..577efe66573e
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/k1_emac.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SpacemiT K1 Ethernet hardware definitions
+ *
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
+ */
+
+#ifndef _K1_EMAC_H_
+#define _K1_EMAC_H_
+
+#include <linux/stddef.h>
+
+/* APMU syscon registers */
+
+#define APMU_EMAC_CTRL_REG 0x0
+
+#define PHY_INTF_RGMII BIT(2)
+
+/*
+ * Only valid for RMII mode
+ * 0: Ref clock from External PHY
+ * 1: Ref clock from SoC
+ */
+#define REF_CLK_SEL BIT(3)
+
+/*
+ * Function clock select
+ * 0: 208 MHz
+ * 1: 312 MHz
+ */
+#define FUNC_CLK_SEL BIT(4)
+
+/* Only valid for RMII, invert TX clk */
+#define RMII_TX_CLK_SEL BIT(6)
+
+/* Only valid for RMII, invert RX clk */
+#define RMII_RX_CLK_SEL BIT(7)
+
+/*
+ * Only valid for RGMII
+ * 0: TX clk from RX clk
+ * 1: TX clk from SoC
+ */
+#define RGMII_TX_CLK_SEL BIT(8)
+
+#define PHY_IRQ_EN BIT(12)
+#define AXI_SINGLE_ID BIT(13)
+
+#define APMU_EMAC_DLINE_REG 0x4
+
+#define EMAC_RX_DLINE_EN BIT(0)
+#define EMAC_RX_DLINE_STEP_MASK GENMASK(5, 4)
+#define EMAC_RX_DLINE_CODE_MASK GENMASK(15, 8)
+
+#define EMAC_TX_DLINE_EN BIT(16)
+#define EMAC_TX_DLINE_STEP_MASK GENMASK(21, 20)
+#define EMAC_TX_DLINE_CODE_MASK GENMASK(31, 24)
+
+#define EMAC_DLINE_STEP_15P6 0 /* 15.6 ps/step */
+#define EMAC_DLINE_STEP_24P4 1 /* 24.4 ps/step */
+#define EMAC_DLINE_STEP_29P7 2 /* 29.7 ps/step */
+#define EMAC_DLINE_STEP_35P1 3 /* 35.1 ps/step */
+
+/* DMA register set */
+#define DMA_CONFIGURATION 0x0000
+#define DMA_CONTROL 0x0004
+#define DMA_STATUS_IRQ 0x0008
+#define DMA_INTERRUPT_ENABLE 0x000c
+
+#define DMA_TRANSMIT_AUTO_POLL_COUNTER 0x0010
+#define DMA_TRANSMIT_POLL_DEMAND 0x0014
+#define DMA_RECEIVE_POLL_DEMAND 0x0018
+
+#define DMA_TRANSMIT_BASE_ADDRESS 0x001c
+#define DMA_RECEIVE_BASE_ADDRESS 0x0020
+#define DMA_MISSED_FRAME_COUNTER 0x0024
+#define DMA_STOP_FLUSH_COUNTER 0x0028
+
+#define DMA_RECEIVE_IRQ_MITIGATION_CTRL 0x002c
+
+#define DMA_CURRENT_TRANSMIT_DESCRIPTOR_POINTER 0x0030
+#define DMA_CURRENT_TRANSMIT_BUFFER_POINTER 0x0034
+#define DMA_CURRENT_RECEIVE_DESCRIPTOR_POINTER 0x0038
+#define DMA_CURRENT_RECEIVE_BUFFER_POINTER 0x003c
+
+/* MAC Register set */
+#define MAC_GLOBAL_CONTROL 0x0100
+#define MAC_TRANSMIT_CONTROL 0x0104
+#define MAC_RECEIVE_CONTROL 0x0108
+#define MAC_MAXIMUM_FRAME_SIZE 0x010c
+#define MAC_TRANSMIT_JABBER_SIZE 0x0110
+#define MAC_RECEIVE_JABBER_SIZE 0x0114
+#define MAC_ADDRESS_CONTROL 0x0118
+#define MAC_MDIO_CLK_DIV 0x011c
+#define MAC_ADDRESS1_HIGH 0x0120
+#define MAC_ADDRESS1_MED 0x0124
+#define MAC_ADDRESS1_LOW 0x0128
+#define MAC_ADDRESS2_HIGH 0x012c
+#define MAC_ADDRESS2_MED 0x0130
+#define MAC_ADDRESS2_LOW 0x0134
+#define MAC_ADDRESS3_HIGH 0x0138
+#define MAC_ADDRESS3_MED 0x013c
+#define MAC_ADDRESS3_LOW 0x0140
+#define MAC_ADDRESS4_HIGH 0x0144
+#define MAC_ADDRESS4_MED 0x0148
+#define MAC_ADDRESS4_LOW 0x014c
+#define MAC_MULTICAST_HASH_TABLE1 0x0150
+#define MAC_MULTICAST_HASH_TABLE2 0x0154
+#define MAC_MULTICAST_HASH_TABLE3 0x0158
+#define MAC_MULTICAST_HASH_TABLE4 0x015c
+#define MAC_FC_CONTROL 0x0160
+#define MAC_FC_PAUSE_FRAME_GENERATE 0x0164
+#define MAC_FC_SOURCE_ADDRESS_HIGH 0x0168
+#define MAC_FC_SOURCE_ADDRESS_MED 0x016c
+#define MAC_FC_SOURCE_ADDRESS_LOW 0x0170
+#define MAC_FC_DESTINATION_ADDRESS_HIGH 0x0174
+#define MAC_FC_DESTINATION_ADDRESS_MED 0x0178
+#define MAC_FC_DESTINATION_ADDRESS_LOW 0x017c
+#define MAC_FC_PAUSE_TIME_VALUE 0x0180
+#define MAC_FC_HIGH_PAUSE_TIME 0x0184
+#define MAC_FC_LOW_PAUSE_TIME 0x0188
+#define MAC_FC_PAUSE_HIGH_THRESHOLD 0x018c
+#define MAC_FC_PAUSE_LOW_THRESHOLD 0x0190
+#define MAC_MDIO_CONTROL 0x01a0
+#define MAC_MDIO_DATA 0x01a4
+#define MAC_RX_STATCTR_CONTROL 0x01a8
+#define MAC_RX_STATCTR_DATA_HIGH 0x01ac
+#define MAC_RX_STATCTR_DATA_LOW 0x01b0
+#define MAC_TX_STATCTR_CONTROL 0x01b4
+#define MAC_TX_STATCTR_DATA_HIGH 0x01b8
+#define MAC_TX_STATCTR_DATA_LOW 0x01bc
+#define MAC_TRANSMIT_FIFO_ALMOST_FULL 0x01c0
+#define MAC_TRANSMIT_PACKET_START_THRESHOLD 0x01c4
+#define MAC_RECEIVE_PACKET_START_THRESHOLD 0x01c8
+#define MAC_STATUS_IRQ 0x01e0
+#define MAC_INTERRUPT_ENABLE 0x01e4
+
+/* Used for register dump */
+#define EMAC_DMA_REG_CNT 16
+#define EMAC_MAC_REG_CNT 124
+
+/* DMA_CONFIGURATION (0x0000) */
+
+/*
+ * 0-DMA controller in normal operation mode,
+ * 1-DMA controller reset to default state,
+ * clearing all internal state information
+ */
+#define MREGBIT_SOFTWARE_RESET BIT(0)
+
+#define MREGBIT_BURST_1WORD BIT(1)
+#define MREGBIT_BURST_2WORD BIT(2)
+#define MREGBIT_BURST_4WORD BIT(3)
+#define MREGBIT_BURST_8WORD BIT(4)
+#define MREGBIT_BURST_16WORD BIT(5)
+#define MREGBIT_BURST_32WORD BIT(6)
+#define MREGBIT_BURST_64WORD BIT(7)
+#define MREGBIT_BURST_LENGTH GENMASK(7, 1)
+#define MREGBIT_DESCRIPTOR_SKIP_LENGTH GENMASK(12, 8)
+
+/* For Receive and Transmit DMA operate in Big-Endian mode for Descriptors. */
+#define MREGBIT_DESCRIPTOR_BYTE_ORDERING BIT(13)
+
+#define MREGBIT_BIG_LITLE_ENDIAN BIT(14)
+#define MREGBIT_TX_RX_ARBITRATION BIT(15)
+#define MREGBIT_WAIT_FOR_DONE BIT(16)
+#define MREGBIT_STRICT_BURST BIT(17)
+#define MREGBIT_DMA_64BIT_MODE BIT(18)
+
+/* DMA_CONTROL (0x0004) */
+#define MREGBIT_START_STOP_TRANSMIT_DMA BIT(0)
+#define MREGBIT_START_STOP_RECEIVE_DMA BIT(1)
+
+/* DMA_STATUS_IRQ (0x0008) */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_IRQ BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_IRQ BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_IRQ BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_IRQ BIT(7)
+#define MREGBIT_MAC_IRQ BIT(8)
+#define MREGBIT_TRANSMIT_DMA_STATE GENMASK(18, 16)
+#define MREGBIT_RECEIVE_DMA_STATE GENMASK(23, 20)
+
+/* DMA_INTERRUPT_ENABLE (0x000c) */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_INTR_ENABLE BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE BIT(7)
+#define MREGBIT_MAC_INTR_ENABLE BIT(8)
+
+/* DMA_RECEIVE_IRQ_MITIGATION_CTRL (0x002c) */
+#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK GENMASK(7, 0)
+#define MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK GENMASK(27, 8)
+#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MODE BIT(30)
+#define MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE BIT(31)
+
+/* MAC_GLOBAL_CONTROL (0x0100) */
+#define MREGBIT_SPEED GENMASK(1, 0)
+#define MREGBIT_SPEED_10M 0x0
+#define MREGBIT_SPEED_100M BIT(0)
+#define MREGBIT_SPEED_1000M BIT(1)
+#define MREGBIT_FULL_DUPLEX_MODE BIT(2)
+#define MREGBIT_RESET_RX_STAT_COUNTERS BIT(3)
+#define MREGBIT_RESET_TX_STAT_COUNTERS BIT(4)
+#define MREGBIT_UNICAST_WAKEUP_MODE BIT(8)
+#define MREGBIT_MAGIC_PACKET_WAKEUP_MODE BIT(9)
+
+/* MAC_TRANSMIT_CONTROL (0x0104) */
+#define MREGBIT_TRANSMIT_ENABLE BIT(0)
+#define MREGBIT_INVERT_FCS BIT(1)
+#define MREGBIT_DISABLE_FCS_INSERT BIT(2)
+#define MREGBIT_TRANSMIT_AUTO_RETRY BIT(3)
+#define MREGBIT_IFG_LEN GENMASK(6, 4)
+#define MREGBIT_PREAMBLE_LENGTH GENMASK(9, 7)
+
+/* MAC_RECEIVE_CONTROL (0x0108) */
+#define MREGBIT_RECEIVE_ENABLE BIT(0)
+#define MREGBIT_DISABLE_FCS_CHECK BIT(1)
+#define MREGBIT_STRIP_FCS BIT(2)
+#define MREGBIT_STORE_FORWARD BIT(3)
+#define MREGBIT_STATUS_FIRST BIT(4)
+#define MREGBIT_PASS_BAD_FRAMES BIT(5)
+#define MREGBIT_ACOOUNT_VLAN BIT(6)
+
+/* MAC_MAXIMUM_FRAME_SIZE (0x010c) */
+#define MREGBIT_MAX_FRAME_SIZE GENMASK(13, 0)
+
+/* MAC_TRANSMIT_JABBER_SIZE (0x0110) */
+#define MREGBIT_TRANSMIT_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_RECEIVE_JABBER_SIZE (0x0114) */
+#define MREGBIT_RECEIVE_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_ADDRESS_CONTROL (0x0118) */
+#define MREGBIT_MAC_ADDRESS1_ENABLE BIT(0)
+#define MREGBIT_MAC_ADDRESS2_ENABLE BIT(1)
+#define MREGBIT_MAC_ADDRESS3_ENABLE BIT(2)
+#define MREGBIT_MAC_ADDRESS4_ENABLE BIT(3)
+#define MREGBIT_INVERSE_MAC_ADDRESS1_ENABLE BIT(4)
+#define MREGBIT_INVERSE_MAC_ADDRESS2_ENABLE BIT(5)
+#define MREGBIT_INVERSE_MAC_ADDRESS3_ENABLE BIT(6)
+#define MREGBIT_INVERSE_MAC_ADDRESS4_ENABLE BIT(7)
+#define MREGBIT_PROMISCUOUS_MODE BIT(8)
+
+/* MAC_FC_CONTROL (0x0160) */
+#define MREGBIT_FC_DECODE_ENABLE BIT(0)
+#define MREGBIT_FC_GENERATION_ENABLE BIT(1)
+#define MREGBIT_AUTO_FC_GENERATION_ENABLE BIT(2)
+#define MREGBIT_MULTICAST_MODE BIT(3)
+#define MREGBIT_BLOCK_PAUSE_FRAMES BIT(4)
+
+/* MAC_FC_PAUSE_FRAME_GENERATE (0x0164) */
+#define MREGBIT_GENERATE_PAUSE_FRAME BIT(0)
+
+/* MAC_FC_PAUSE_TIME_VALUE (0x0180) */
+#define MREGBIT_MAC_FC_PAUSE_TIME GENMASK(15, 0)
+
+/* MAC_MDIO_CONTROL (0x01a0) */
+#define MREGBIT_PHY_ADDRESS GENMASK(4, 0)
+#define MREGBIT_REGISTER_ADDRESS GENMASK(9, 5)
+#define MREGBIT_MDIO_READ_WRITE BIT(10)
+#define MREGBIT_START_MDIO_TRANS BIT(15)
+
+/* MAC_MDIO_DATA (0x01a4) */
+#define MREGBIT_MDIO_DATA GENMASK(15, 0)
+
+/* MAC_RX_STATCTR_CONTROL (0x01a8) */
+#define MREGBIT_RX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_RX_COUNTER_READ BIT(15)
+
+/* MAC_RX_STATCTR_DATA_HIGH (0x01ac) */
+#define MREGBIT_RX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_RX_STATCTR_DATA_LOW (0x01b0) */
+#define MREGBIT_RX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TX_STATCTR_CONTROL (0x01b4) */
+#define MREGBIT_TX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_TX_COUNTER_READ BIT(15)
+
+/* MAC_TX_STATCTR_DATA_HIGH (0x01b8) */
+#define MREGBIT_TX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_TX_STATCTR_DATA_LOW (0x01bc) */
+#define MREGBIT_TX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TRANSMIT_FIFO_ALMOST_FULL (0x01c0) */
+#define MREGBIT_TX_FIFO_AF GENMASK(13, 0)
+
+/* MAC_TRANSMIT_PACKET_START_THRESHOLD (0x01c4) */
+#define MREGBIT_TX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_RECEIVE_PACKET_START_THRESHOLD (0x01c8) */
+#define MREGBIT_RX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_STATUS_IRQ (0x01e0) */
+#define MREGBIT_MAC_UNDERRUN_IRQ BIT(0)
+#define MREGBIT_MAC_JABBER_IRQ BIT(1)
+
+/* MAC_INTERRUPT_ENABLE (0x01e4) */
+#define MREGBIT_MAC_UNDERRUN_INTERRUPT_ENABLE BIT(0)
+#define MREGBIT_JABBER_INTERRUPT_ENABLE BIT(1)
+
+/* RX DMA descriptor */
+
+#define RX_DESC_0_FRAME_PACKET_LENGTH_MASK GENMASK(13, 0)
+#define RX_DESC_0_FRAME_ALIGN_ERR BIT(14)
+#define RX_DESC_0_FRAME_RUNT BIT(15)
+#define RX_DESC_0_FRAME_ETHERNET_TYPE BIT(16)
+#define RX_DESC_0_FRAME_VLAN BIT(17)
+#define RX_DESC_0_FRAME_MULTICAST BIT(18)
+#define RX_DESC_0_FRAME_BROADCAST BIT(19)
+#define RX_DESC_0_FRAME_CRC_ERR BIT(20)
+#define RX_DESC_0_FRAME_MAX_LEN_ERR BIT(21)
+#define RX_DESC_0_FRAME_JABBER_ERR BIT(22)
+#define RX_DESC_0_FRAME_LENGTH_ERR BIT(23)
+#define RX_DESC_0_FRAME_MAC_ADDR1_MATCH BIT(24)
+#define RX_DESC_0_FRAME_MAC_ADDR2_MATCH BIT(25)
+#define RX_DESC_0_FRAME_MAC_ADDR3_MATCH BIT(26)
+#define RX_DESC_0_FRAME_MAC_ADDR4_MATCH BIT(27)
+#define RX_DESC_0_FRAME_PAUSE_CTRL BIT(28)
+#define RX_DESC_0_LAST_DESCRIPTOR BIT(29)
+#define RX_DESC_0_FIRST_DESCRIPTOR BIT(30)
+#define RX_DESC_0_OWN BIT(31)
+
+#define RX_DESC_1_BUFFER_SIZE_1_MASK GENMASK(11, 0)
+#define RX_DESC_1_BUFFER_SIZE_2_MASK GENMASK(23, 12)
+ /* [24] reserved */
+#define RX_DESC_1_SECOND_ADDRESS_CHAINED BIT(25)
+#define RX_DESC_1_END_RING BIT(26)
+ /* [29:27] reserved */
+#define RX_DESC_1_RX_TIMESTAMP BIT(30)
+#define RX_DESC_1_PTP_PKT BIT(31)
+
+/* TX DMA descriptor */
+
+ /* [29:0] unused */
+#define TX_DESC_0_TX_TIMESTAMP BIT(30)
+#define TX_DESC_0_OWN BIT(31)
+
+#define TX_DESC_1_BUFFER_SIZE_1_MASK GENMASK(11, 0)
+#define TX_DESC_1_BUFFER_SIZE_2_MASK GENMASK(23, 12)
+#define TX_DESC_1_FORCE_EOP_ERROR BIT(24)
+#define TX_DESC_1_SECOND_ADDRESS_CHAINED BIT(25)
+#define TX_DESC_1_END_RING BIT(26)
+#define TX_DESC_1_DISABLE_PADDING BIT(27)
+#define TX_DESC_1_ADD_CRC_DISABLE BIT(28)
+#define TX_DESC_1_FIRST_SEGMENT BIT(29)
+#define TX_DESC_1_LAST_SEGMENT BIT(30)
+#define TX_DESC_1_INTERRUPT_ON_COMPLETION BIT(31)
+
+struct emac_desc {
+ u32 desc0;
+ u32 desc1;
+ u32 buffer_addr_1;
+ u32 buffer_addr_2;
+};
+
+/* Keep stats in this order, index used for accessing hardware */
+
+union emac_hw_tx_stats {
+ struct individual_tx_stats {
+ u64 tx_ok_pkts;
+ u64 tx_total_pkts;
+ u64 tx_ok_bytes;
+ u64 tx_err_pkts;
+ u64 tx_singleclsn_pkts;
+ u64 tx_multiclsn_pkts;
+ u64 tx_lateclsn_pkts;
+ u64 tx_excessclsn_pkts;
+ u64 tx_unicast_pkts;
+ u64 tx_multicast_pkts;
+ u64 tx_broadcast_pkts;
+ u64 tx_pause_pkts;
+ } stats;
+
+ u64 array[sizeof(struct individual_tx_stats) / sizeof(u64)];
+};
+
+union emac_hw_rx_stats {
+ struct individual_rx_stats {
+ u64 rx_ok_pkts;
+ u64 rx_total_pkts;
+ u64 rx_crc_err_pkts;
+ u64 rx_align_err_pkts;
+ u64 rx_err_total_pkts;
+ u64 rx_ok_bytes;
+ u64 rx_total_bytes;
+ u64 rx_unicast_pkts;
+ u64 rx_multicast_pkts;
+ u64 rx_broadcast_pkts;
+ u64 rx_pause_pkts;
+ u64 rx_len_err_pkts;
+ u64 rx_len_undersize_pkts;
+ u64 rx_len_oversize_pkts;
+ u64 rx_len_fragment_pkts;
+ u64 rx_len_jabber_pkts;
+ u64 rx_64_pkts;
+ u64 rx_65_127_pkts;
+ u64 rx_128_255_pkts;
+ u64 rx_256_511_pkts;
+ u64 rx_512_1023_pkts;
+ u64 rx_1024_1518_pkts;
+ u64 rx_1519_plus_pkts;
+ u64 rx_drp_fifo_full_pkts;
+ u64 rx_truncate_fifo_full_pkts;
+ } stats;
+
+ u64 array[sizeof(struct individual_rx_stats) / sizeof(u64)];
+};
+
+#endif /* _K1_EMAC_H_ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 4ec61f1ee71a..907fe2e927f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,12 +3,14 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
select MII
select PCS_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
select RESET_CONTROLLER
+ select NET_DEVLINK
help
This is the driver for the Ethernet IPs built around a
Synopsys IP Core.
@@ -66,6 +68,15 @@ config DWMAC_ANARION
This selects the Anarion SoC glue layer support for the stmmac driver.
+config DWMAC_EIC7700
+ tristate "Support for Eswin eic7700 ethernet driver"
+ depends on OF && HAS_DMA && ARCH_ESWIN || COMPILE_TEST
+ help
+ This driver supports the Eswin EIC7700 Ethernet controller,
+ which integrates Synopsys DesignWare QoS features. It enables
+ high-speed networking with DMA acceleration and is optimized
+ for embedded systems.
+
config DWMAC_INGENIC
tristate "Ingenic MAC support"
default MACH_INGENIC
@@ -131,6 +142,19 @@ config DWMAC_QCOM_ETHQOS
This selects the Qualcomm ETHQOS glue layer support for the
stmmac device driver.
+config DWMAC_RENESAS_GBETH
+ tristate "Renesas RZ/V2H(P) GBETH and RZ/T2H, RZ/N2H GMAC support"
+ default ARCH_RENESAS
+ depends on OF && (ARCH_RENESAS || COMPILE_TEST)
+ select PCS_RZN1_MIIC
+ help
+ Support for Gigabit Ethernet Interface (GBETH)/ Ethernet MAC (GMAC)
+ on Renesas SoCs.
+
+ This selects Renesas SoC glue layer support for the stmmac device
+ driver. This driver is used for the RZ/V2H(P) family, RZ/T2H and
+ RZ/N2H SoCs.
+
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
@@ -142,6 +166,30 @@ config DWMAC_ROCKCHIP
This selects the Rockchip RK3288 SoC glue layer support for
the stmmac device driver.
+config DWMAC_RZN1
+ tristate "Renesas RZ/N1 dwmac support"
+ default ARCH_RZN1
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ select PCS_RZN1_MIIC
+ help
+ Support for Ethernet controller on Renesas RZ/N1 SoC family.
+
+ This selects the Renesas RZ/N1 SoC glue layer support for
+ the stmmac device driver. This support can make use of a custom MII
+ converter PCS device.
+
+config DWMAC_S32
+ tristate "NXP S32G/S32R GMAC support"
+ default ARCH_S32
+ depends on OF && (ARCH_S32 || COMPILE_TEST)
+ help
+ Support for ethernet controller on NXP S32CC SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for the S32CC series
+ SOCs GMAC ethernet controller, ie. S32G2xx, S32G3xx and
+ S32R45.
+
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_INTEL_SOCFPGA
@@ -157,6 +205,17 @@ config DWMAC_SOCFPGA
for the stmmac device driver. This driver is used for
arria5 and cyclone5 FPGA SoCs.
+config DWMAC_SOPHGO
+ tristate "Sophgo dwmac support"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ default m if ARCH_SOPHGO
+ help
+ Support for ethernet controllers on Sophgo RISC-V SoCs
+
+ This selects the Sophgo SoC specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ ethernet controllers on various Sophgo SoCs.
+
config DWMAC_STARFIVE
tristate "StarFive dwmac support"
depends on OF && (ARCH_STARFIVE || COMPILE_TEST)
@@ -216,6 +275,28 @@ config DWMAC_SUN8I
stmmac device driver. This driver is used for H3/A83T/A64
EMAC ethernet controller.
+config DWMAC_SUN55I
+ tristate "Allwinner sun55i GMAC200 support"
+ default ARCH_SUNXI
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ help
+ Support for Allwinner A523/T527 GMAC200 ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A523/T527
+ GMAC200 ethernet controller.
+
+config DWMAC_THEAD
+ tristate "T-HEAD dwmac support"
+ depends on OF && (ARCH_THEAD || COMPILE_TEST)
+ help
+ Support for ethernet controllers on T-HEAD RISC-V SoCs
+
+ This selects the T-HEAD platform specific glue layer support for
+ the stmmac device driver. This driver is used for T-HEAD TH1520
+ ethernet controller.
+
config DWMAC_IMX8
tristate "NXP IMX8 DWMAC support"
default ARCH_MXC
@@ -268,11 +349,17 @@ config DWMAC_VISCONTI
endif
+config STMMAC_LIBPCI
+ tristate
+ help
+ This option enables the PCI bus helpers for the stmmac driver.
+
config DWMAC_INTEL
tristate "Intel GMAC support"
default X86
depends on X86 && STMMAC_ETH && PCI
depends on COMMON_CLK
+ depends on ACPI
help
This selects the Intel platform specific bus support for the
stmmac driver. This driver is used for Intel Quark/EHL/TGL.
@@ -280,16 +367,18 @@ config DWMAC_INTEL
config DWMAC_LOONGSON
tristate "Loongson PCI DWMAC support"
default MACH_LOONGSON64
- depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
+ depends on (MACH_LOONGSON64 || COMPILE_TEST) && PCI
depends on COMMON_CLK
+ select STMMAC_LIBPCI
help
This selects the LOONGSON PCI bus support for the stmmac driver,
Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
config STMMAC_PCI
tristate "STMMAC PCI bus support"
- depends on STMMAC_ETH && PCI
+ depends on PCI
depends on COMMON_CLK
+ select STMMAC_LIBPCI
help
This selects the platform specific bus support for the stmmac driver.
This driver was tested on XLINX XC2V3000 FF1152AMT0221
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 26cad4344701..7bf528731034 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,27 +6,34 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o stmmac_est.o \
- $(stmmac-y)
+ stmmac_xdp.o stmmac_est.o stmmac_fpe.o stmmac_vlan.o \
+ stmmac_pcs.o $(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
+obj-$(CONFIG_DWMAC_EIC7700) += dwmac-eic7700.o
obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
+obj-$(CONFIG_DWMAC_RENESAS_GBETH) += dwmac-renesas-gbeth.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
+obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
+obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
+obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
+obj-$(CONFIG_DWMAC_SUN55I) += dwmac-sun55i.o
+obj-$(CONFIG_DWMAC_THEAD) += dwmac-thead.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_LOONGSON1) += dwmac-loongson1.o
@@ -37,6 +44,7 @@ obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := dwmac-socfpga.o
+obj-$(CONFIG_STMMAC_LIBPCI) += stmmac_libpci.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index fb55efd52240..120a009c9992 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -83,14 +83,13 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
return entry;
}
-static unsigned int is_jumbo_frm(int len, int enh_desc)
+static bool is_jumbo_frm(unsigned int len, bool enh_desc)
{
- unsigned int ret = 0;
+ bool ret = false;
if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
- (!enh_desc && (len > BUF_SIZE_2KiB))) {
- ret = 1;
- }
+ (!enh_desc && (len > BUF_SIZE_2KiB)))
+ ret = true;
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 3b7d4ac1e7be..49df46be3669 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -26,9 +26,13 @@
#include "hwif.h"
#include "mmc.h"
+#define DWMAC_SNPSVER GENMASK_U32(7, 0)
+#define DWMAC_USERVER GENMASK_U32(15, 8)
+
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_3_70 0x37
#define DWMAC_CORE_4_00 0x40
#define DWMAC_CORE_4_10 0x41
#define DWMAC_CORE_5_00 0x50
@@ -42,6 +46,11 @@
#define DWXGMAC_ID 0x76
#define DWXLGMAC_ID 0x27
+static inline bool dwmac_is_xmac(enum dwmac_core_type core_type)
+{
+ return core_type == DWMAC_CORE_GMAC4 || core_type == DWMAC_CORE_XGMAC;
+}
+
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
/* TX and RX Descriptor Length, these need to be power of two.
@@ -100,8 +109,8 @@ struct stmmac_rxq_stats {
/* Updates on each CPU protected by not allowing nested irqs. */
struct stmmac_pcpu_stats {
struct u64_stats_sync syncp;
- u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
- u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES];
};
/* Extra statistic and debug information exposed by ethtool */
@@ -191,9 +200,6 @@ struct stmmac_extra_stats {
unsigned long irq_pcs_ane_n;
unsigned long irq_pcs_link_n;
unsigned long irq_rgmii_n;
- unsigned long pcs_link;
- unsigned long pcs_duplex;
- unsigned long pcs_speed;
/* debug register */
unsigned long mtl_tx_status_fifo_full;
unsigned long mtl_tx_fifo_not_empty;
@@ -227,6 +233,7 @@ struct stmmac_extra_stats {
unsigned long mtl_est_btrlm;
unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbs[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
@@ -256,6 +263,8 @@ struct stmmac_safety_stats {
#define CSR_F_150M 150000000
#define CSR_F_250M 250000000
#define CSR_F_300M 300000000
+#define CSR_F_500M 500000000
+#define CSR_F_800M 800000000
#define MAC_CSR_H_FRQ_MASK 0x20
@@ -269,10 +278,7 @@ struct stmmac_safety_stats {
#define FLOW_AUTO (FLOW_TX | FLOW_RX)
/* PCS defines */
-#define STMMAC_PCS_RGMII (1 << 0)
#define STMMAC_PCS_SGMII (1 << 1)
-#define STMMAC_PCS_TBI (1 << 2)
-#define STMMAC_PCS_RTBI (1 << 3)
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
@@ -307,6 +313,16 @@ struct stmmac_safety_stats {
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
+/* phy_intf_sel_i and ACTPHYIF encodings */
+#define PHY_INTF_SEL_GMII_MII 0
+#define PHY_INTF_SEL_RGMII 1
+#define PHY_INTF_SEL_SGMII 2
+#define PHY_INTF_SEL_TBI 3
+#define PHY_INTF_SEL_RMII 4
+#define PHY_INTF_SEL_RTBI 5
+#define PHY_INTF_SEL_SMII 6
+#define PHY_INTF_SEL_REVMII 7
+
/* MSI defines */
#define STMMAC_MSI_VEC_MAX 32
@@ -395,17 +411,6 @@ enum request_irq_err {
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
-/* Physical Coding Sublayer */
-struct rgmii_adv {
- unsigned int pause;
- unsigned int duplex;
- unsigned int lp_pause;
- unsigned int lp_duplex;
-};
-
-#define STMMAC_PCS_PAUSE 1
-#define STMMAC_PCS_ASYM_PAUSE 2
-
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
@@ -529,6 +534,33 @@ struct dma_features {
#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_ET_MAX 0xFFFFF
+/* Common LPI register bits */
+#define LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable, gmac4, xgmac2 only */
+#define LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable, gmac4 only */
+#define LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN BIT(18) /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST BIT(9) /* Receive LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_TLPIST BIT(8) /* Transmit LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+
+/* Common definitions for AXI Master Bus Mode */
+#define DMA_AXI_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_AXI_BLEN_MASK GENMASK(7, 1)
+
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len);
+
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
@@ -544,14 +576,8 @@ struct dma_features {
#define STMMAC_VLAN_INSERT 0x2
#define STMMAC_VLAN_REPLACE 0x3
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-
struct mac_device_info;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
-
struct mac_link {
u32 caps;
u32 speed_mask;
@@ -593,8 +619,9 @@ struct mac_device_info {
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
const struct stmmac_est_ops *est;
+ const struct stmmac_vlan_ops *vlan;
struct dw_xpcs *xpcs;
- struct phylink_pcs *lynx_pcs; /* Lynx external PCS */
+ struct phylink_pcs *phylink_pcs;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
@@ -603,14 +630,18 @@ struct mac_device_info {
unsigned int mcast_bits_log2;
unsigned int rx_csum;
unsigned int pcs;
- unsigned int pmt;
- unsigned int ps;
unsigned int xlgmac;
unsigned int num_vlan;
u32 vlan_filter[32];
bool vlan_fail_q_en;
u8 vlan_fail_q;
bool hw_vlan_en;
+ bool reverse_sgmii_enable;
+
+ /* This spinlock protects read-modify-write of the interrupt
+ * mask/enable registers.
+ */
+ spinlock_t irq_ctrl_lock;
};
struct stmmac_rx_routing {
@@ -638,8 +669,4 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_mode_ops ring_mode_ops;
-extern const struct stmmac_mode_ops chain_mode_ops;
-extern const struct stmmac_desc_ops dwmac4_desc_ops;
-
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 643ee6d8d4dd..5e0fc31762d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -34,7 +34,7 @@ static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val)
writel(val, gmac->ctl_block + reg);
}
-static int anarion_gmac_init(struct platform_device *pdev, void *priv)
+static int anarion_gmac_init(struct device *dev, void *priv)
{
uint32_t sw_config;
struct anarion_gmac *gmac = priv;
@@ -52,25 +52,25 @@ static int anarion_gmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
+static void anarion_gmac_exit(struct device *dev, void *priv)
{
struct anarion_gmac *gmac = priv;
gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1);
}
-static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
+static struct anarion_gmac *
+anarion_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct anarion_gmac *gmac;
- phy_interface_t phy_mode;
void __iomem *ctl_block;
- int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
- err = PTR_ERR(ctl_block);
- dev_err(&pdev->dev, "Cannot get reset region (%d)!\n", err);
- return ERR_PTR(err);
+ dev_err(&pdev->dev, "Cannot get reset region (%pe)!\n",
+ ctl_block);
+ return ERR_CAST(ctl_block);
}
gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
@@ -79,21 +79,11 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = ctl_block;
- err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
- if (err)
- return ERR_PTR(err);
-
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_RGMII:
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(plat_dat->phy_interface)) {
gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII;
- break;
- default:
- dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
- phy_mode);
+ } else {
+ dev_err(&pdev->dev, "Unsupported phy-mode (%s)\n",
+ phy_modes(plat_dat->phy_interface));
return ERR_PTR(-ENOTSUPP);
}
@@ -111,20 +101,19 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- gmac = anarion_config_dt(pdev);
- if (IS_ERR(gmac))
- return PTR_ERR(gmac);
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
+ gmac = anarion_config_dt(pdev, plat_dat);
+ if (IS_ERR(gmac))
+ return PTR_ERR(gmac);
+
plat_dat->init = anarion_gmac_init;
plat_dat->exit = anarion_gmac_exit;
- anarion_gmac_init(pdev, gmac);
plat_dat->bsp_priv = gmac;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id anarion_dwmac_match[] = {
@@ -135,7 +124,6 @@ MODULE_DEVICE_TABLE(of, anarion_dwmac_match);
static struct platform_driver anarion_dwmac_driver = {
.probe = anarion_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
.driver = {
.name = "anarion-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index ec924c6c76c6..d043bad4a862 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -29,10 +29,6 @@ struct tegra_eqos {
void __iomem *regs;
struct reset_control *rst;
- struct clk *clk_master;
- struct clk *clk_slave;
- struct clk *clk_tx;
- struct clk *clk_rx;
struct gpio_desc *reset;
};
@@ -42,11 +38,11 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
u32 burst_map = 0;
- u32 bit_index = 0;
- u32 a_index = 0;
if (!plat_dat->axi) {
- plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+ plat_dat->axi = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_axi),
+ GFP_KERNEL);
if (!plat_dat->axi)
return -ENOMEM;
@@ -85,33 +81,11 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
}
device_property_read_u32(dev, "snps,burst-map", &burst_map);
- /* converts burst-map bitmask to burst array */
- for (bit_index = 0; bit_index < 7; bit_index++) {
- if (burst_map & (1 << bit_index)) {
- switch (bit_index) {
- case 0:
- plat_dat->axi->axi_blen[a_index] = 4; break;
- case 1:
- plat_dat->axi->axi_blen[a_index] = 8; break;
- case 2:
- plat_dat->axi->axi_blen[a_index] = 16; break;
- case 3:
- plat_dat->axi->axi_blen[a_index] = 32; break;
- case 4:
- plat_dat->axi->axi_blen[a_index] = 64; break;
- case 5:
- plat_dat->axi->axi_blen[a_index] = 128; break;
- case 6:
- plat_dat->axi->axi_blen[a_index] = 256; break;
- default:
- break;
- }
- a_index++;
- }
- }
+ plat_dat->axi->axi_blen_regval = FIELD_PREP(DMA_AXI_BLEN_MASK,
+ burst_map);
/* dwc-qos needs GMAC4, AAL, TSO and PMT */
- plat_dat->has_gmac4 = 1;
+ plat_dat->core_type = DWMAC_CORE_GMAC4;
plat_dat->dma_cfg->aal = 1;
plat_dat->flags |= STMMAC_FLAG_TSO_EN;
plat_dat->pmt = 1;
@@ -123,49 +97,9 @@ static int dwc_qos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
{
- int err;
-
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- return PTR_ERR(plat_dat->stmmac_clk);
- }
-
- err = clk_prepare_enable(plat_dat->stmmac_clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
- err);
- return err;
- }
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- err = PTR_ERR(plat_dat->pclk);
- goto disable;
- }
-
- err = clk_prepare_enable(plat_dat->pclk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
- err);
- goto disable;
- }
+ plat_dat->pclk = stmmac_pltfr_find_clk(plat_dat, "phy_ref_clk");
return 0;
-
-disable:
- clk_disable_unprepare(plat_dat->stmmac_clk);
- return err;
-}
-
-static void dwc_qos_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- clk_disable_unprepare(priv->plat->pclk);
- clk_disable_unprepare(priv->plat->stmmac_clk);
}
#define SDMEMCOMPPADCTRL 0x8800
@@ -178,35 +112,34 @@ static void dwc_qos_remove(struct platform_device *pdev)
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode)
{
- struct tegra_eqos *eqos = priv;
- unsigned long rate = 125000000;
+ struct tegra_eqos *eqos = bsp_priv;
bool needs_calibration = false;
+ struct stmmac_priv *priv;
u32 value;
int err;
switch (speed) {
case SPEED_1000:
- needs_calibration = true;
- rate = 125000000;
- break;
-
case SPEED_100:
needs_calibration = true;
- rate = 25000000;
- break;
+ fallthrough;
case SPEED_10:
- rate = 2500000;
break;
default:
- dev_err(eqos->dev, "invalid speed %u\n", speed);
+ dev_err(eqos->dev, "invalid speed %d\n", speed);
break;
}
if (needs_calibration) {
+ priv = netdev_priv(dev_get_drvdata(eqos->dev));
+
+ /* Calibration should be done with the MDIO bus idle */
+ stmmac_mdio_lock(priv);
+
/* calibrate */
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
@@ -240,33 +173,17 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ stmmac_mdio_unlock(priv);
} else {
value = readl(eqos->regs + AUTO_CAL_CONFIG);
value &= ~AUTO_CAL_CONFIG_ENABLE;
writel(value, eqos->regs + AUTO_CAL_CONFIG);
}
-
- err = clk_set_rate(eqos->clk_tx, rate);
- if (err < 0)
- dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
-}
-
-static int tegra_eqos_init(struct platform_device *pdev, void *priv)
-{
- struct tegra_eqos *eqos = priv;
- unsigned long rate;
- u32 value;
-
- rate = clk_get_rate(eqos->clk_slave);
-
- value = (rate / 1000000) - 1;
- writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
-
- return 0;
}
static int tegra_eqos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
struct device *dev = &pdev->dev;
@@ -283,59 +200,19 @@ static int tegra_eqos_probe(struct platform_device *pdev,
if (!is_of_node(dev->fwnode))
goto bypass_clk_reset_gpio;
- eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
- if (IS_ERR(eqos->clk_master)) {
- err = PTR_ERR(eqos->clk_master);
- goto error;
- }
-
- err = clk_prepare_enable(eqos->clk_master);
- if (err < 0)
- goto error;
-
- eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
- if (IS_ERR(eqos->clk_slave)) {
- err = PTR_ERR(eqos->clk_slave);
- goto disable_master;
- }
-
- data->stmmac_clk = eqos->clk_slave;
-
- err = clk_prepare_enable(eqos->clk_slave);
- if (err < 0)
- goto disable_master;
-
- eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
- if (IS_ERR(eqos->clk_rx)) {
- err = PTR_ERR(eqos->clk_rx);
- goto disable_slave;
- }
-
- err = clk_prepare_enable(eqos->clk_rx);
- if (err < 0)
- goto disable_slave;
-
- eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
- if (IS_ERR(eqos->clk_tx)) {
- err = PTR_ERR(eqos->clk_tx);
- goto disable_rx;
- }
-
- err = clk_prepare_enable(eqos->clk_tx);
- if (err < 0)
- goto disable_rx;
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
if (IS_ERR(eqos->reset)) {
err = PTR_ERR(eqos->reset);
- goto disable_tx;
+ return err;
}
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
/* MDIO bus was already reset just above */
- data->mdio_bus_data->needs_reset = false;
+ plat_dat->mdio_bus_data->needs_reset = false;
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
@@ -356,29 +233,18 @@ static int tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
bypass_clk_reset_gpio:
- data->fix_mac_speed = tegra_eqos_fix_speed;
- data->init = tegra_eqos_init;
- data->bsp_priv = eqos;
- data->flags |= STMMAC_FLAG_SPH_DISABLE;
-
- err = tegra_eqos_init(pdev, eqos);
- if (err < 0)
- goto reset;
+ plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->bsp_priv = eqos;
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ STMMAC_FLAG_USE_PHY_WOL;
return 0;
-reset:
- reset_control_assert(eqos->rst);
+
reset_phy:
gpiod_set_value(eqos->reset, 1);
-disable_tx:
- clk_disable_unprepare(eqos->clk_tx);
-disable_rx:
- clk_disable_unprepare(eqos->clk_rx);
-disable_slave:
- clk_disable_unprepare(eqos->clk_slave);
-disable_master:
- clk_disable_unprepare(eqos->clk_master);
-error:
+
return err;
}
@@ -388,27 +254,29 @@ static void tegra_eqos_remove(struct platform_device *pdev)
reset_control_assert(eqos->rst);
gpiod_set_value(eqos->reset, 1);
- clk_disable_unprepare(eqos->clk_tx);
- clk_disable_unprepare(eqos->clk_rx);
- clk_disable_unprepare(eqos->clk_slave);
- clk_disable_unprepare(eqos->clk_master);
}
struct dwc_eth_dwmac_data {
int (*probe)(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
void (*remove)(struct platform_device *pdev);
+ const char *stmmac_clk_name;
};
static const struct dwc_eth_dwmac_data dwc_qos_data = {
.probe = dwc_qos_probe,
- .remove = dwc_qos_remove,
+ .stmmac_clk_name = "apb_pclk",
};
static const struct dwc_eth_dwmac_data tegra_eqos_data = {
.probe = tegra_eqos_probe,
.remove = tegra_eqos_remove,
+ .stmmac_clk_name = "slave_bus",
+};
+
+static const struct dwc_eth_dwmac_data fsd_eqos_data = {
+ .stmmac_clk_name = "slave_bus",
};
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
@@ -439,7 +307,16 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- ret = data->probe(pdev, plat_dat, &stmmac_res);
+ ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n");
+ plat_dat->num_clks = ret;
+
+ plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
+ data->stmmac_clk_name);
+
+ if (data->probe)
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
return ret;
@@ -456,7 +333,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
return ret;
remove:
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
return ret;
}
@@ -467,19 +345,21 @@ static void dwc_eth_dwmac_remove(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
{ .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
+ { .compatible = "tesla,fsd-ethqos", .data = &fsd_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
static struct platform_driver dwc_eth_dwmac_driver = {
.probe = dwc_eth_dwmac_probe,
- .remove_new = dwc_eth_dwmac_remove,
+ .remove = dwc_eth_dwmac_remove,
.driver = {
.name = "dwc-eth-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
new file mode 100644
index 000000000000..bcb8e000e720
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Eswin DWC Ethernet linux driver
+ *
+ * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd.
+ *
+ * Authors:
+ * Zhi Li <lizhi2@eswincomputing.com>
+ * Shuang Liang <liangshuang@eswincomputing.com>
+ * Shangjuan Wei <weishangjuan@eswincomputing.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/pm_runtime.h>
+#include <linux/stmmac.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+#include "stmmac_platform.h"
+
+/* eth_phy_ctrl_offset eth0:0x100 */
+#define EIC7700_ETH_TX_CLK_SEL BIT(16)
+#define EIC7700_ETH_PHY_INTF_SELI BIT(0)
+
+/* eth_axi_lp_ctrl_offset eth0:0x108 */
+#define EIC7700_ETH_CSYSREQ_VAL BIT(0)
+
+/*
+ * TX/RX Clock Delay Bit Masks:
+ * - TX Delay: bits [14:8] — TX_CLK delay (unit: 0.1ns per bit)
+ * - RX Delay: bits [30:24] — RX_CLK delay (unit: 0.1ns per bit)
+ */
+#define EIC7700_ETH_TX_ADJ_DELAY GENMASK(14, 8)
+#define EIC7700_ETH_RX_ADJ_DELAY GENMASK(30, 24)
+
+#define EIC7700_MAX_DELAY_UNIT 0x7F
+
+static const char * const eic7700_clk_names[] = {
+ "tx", "axi", "cfg",
+};
+
+struct eic7700_qos_priv {
+ struct plat_stmmacenet_data *plat_dat;
+};
+
+static int eic7700_clks_config(void *priv, bool enabled)
+{
+ struct eic7700_qos_priv *dwc = (struct eic7700_qos_priv *)priv;
+ struct plat_stmmacenet_data *plat = dwc->plat_dat;
+ int ret = 0;
+
+ if (enabled)
+ ret = clk_bulk_prepare_enable(plat->num_clks, plat->clks);
+ else
+ clk_bulk_disable_unprepare(plat->num_clks, plat->clks);
+
+ return ret;
+}
+
+static int eic7700_dwmac_init(struct device *dev, void *priv)
+{
+ struct eic7700_qos_priv *dwc = priv;
+
+ return eic7700_clks_config(dwc, true);
+}
+
+static void eic7700_dwmac_exit(struct device *dev, void *priv)
+{
+ struct eic7700_qos_priv *dwc = priv;
+
+ eic7700_clks_config(dwc, false);
+}
+
+static int eic7700_dwmac_suspend(struct device *dev, void *priv)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int eic7700_dwmac_resume(struct device *dev, void *priv)
+{
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ dev_err(dev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int eic7700_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct eic7700_qos_priv *dwc_priv;
+ struct regmap *eic7700_hsp_regmap;
+ u32 eth_axi_lp_ctrl_offset;
+ u32 eth_phy_ctrl_offset;
+ u32 eth_phy_ctrl_regset;
+ u32 eth_rxd_dly_offset;
+ u32 eth_dly_param = 0;
+ u32 delay_ps;
+ int i, ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ dwc_priv = devm_kzalloc(&pdev->dev, sizeof(*dwc_priv), GFP_KERNEL);
+ if (!dwc_priv)
+ return -ENOMEM;
+
+ /* Read rx-internal-delay-ps and update rx_clk delay */
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "rx-internal-delay-ps", &delay_ps)) {
+ u32 val = min(delay_ps / 100, EIC7700_MAX_DELAY_UNIT);
+
+ eth_dly_param &= ~EIC7700_ETH_RX_ADJ_DELAY;
+ eth_dly_param |= FIELD_PREP(EIC7700_ETH_RX_ADJ_DELAY, val);
+ } else {
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "missing required property rx-internal-delay-ps\n");
+ }
+
+ /* Read tx-internal-delay-ps and update tx_clk delay */
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "tx-internal-delay-ps", &delay_ps)) {
+ u32 val = min(delay_ps / 100, EIC7700_MAX_DELAY_UNIT);
+
+ eth_dly_param &= ~EIC7700_ETH_TX_ADJ_DELAY;
+ eth_dly_param |= FIELD_PREP(EIC7700_ETH_TX_ADJ_DELAY, val);
+ } else {
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "missing required property tx-internal-delay-ps\n");
+ }
+
+ eic7700_hsp_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "eswin,hsp-sp-csr");
+ if (IS_ERR(eic7700_hsp_regmap))
+ return dev_err_probe(&pdev->dev,
+ PTR_ERR(eic7700_hsp_regmap),
+ "Failed to get hsp-sp-csr regmap\n");
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "eswin,hsp-sp-csr",
+ 1, &eth_phy_ctrl_offset);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't get eth_phy_ctrl_offset\n");
+
+ regmap_read(eic7700_hsp_regmap, eth_phy_ctrl_offset,
+ &eth_phy_ctrl_regset);
+ eth_phy_ctrl_regset |=
+ (EIC7700_ETH_TX_CLK_SEL | EIC7700_ETH_PHY_INTF_SELI);
+ regmap_write(eic7700_hsp_regmap, eth_phy_ctrl_offset,
+ eth_phy_ctrl_regset);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "eswin,hsp-sp-csr",
+ 2, &eth_axi_lp_ctrl_offset);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't get eth_axi_lp_ctrl_offset\n");
+
+ regmap_write(eic7700_hsp_regmap, eth_axi_lp_ctrl_offset,
+ EIC7700_ETH_CSYSREQ_VAL);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "eswin,hsp-sp-csr",
+ 3, &eth_rxd_dly_offset);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't get eth_rxd_dly_offset\n");
+
+ regmap_write(eic7700_hsp_regmap, eth_rxd_dly_offset,
+ eth_dly_param);
+
+ plat_dat->num_clks = ARRAY_SIZE(eic7700_clk_names);
+ plat_dat->clks = devm_kcalloc(&pdev->dev,
+ plat_dat->num_clks,
+ sizeof(*plat_dat->clks),
+ GFP_KERNEL);
+ if (!plat_dat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(eic7700_clk_names); i++)
+ plat_dat->clks[i].id = eic7700_clk_names[i];
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev,
+ plat_dat->num_clks,
+ plat_dat->clks);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get clocks\n");
+
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->clks_config = eic7700_clks_config;
+ plat_dat->bsp_priv = dwc_priv;
+ dwc_priv->plat_dat = plat_dat;
+ plat_dat->init = eic7700_dwmac_init;
+ plat_dat->exit = eic7700_dwmac_exit;
+ plat_dat->suspend = eic7700_dwmac_suspend;
+ plat_dat->resume = eic7700_dwmac_resume;
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id eic7700_dwmac_match[] = {
+ { .compatible = "eswin,eic7700-qos-eth" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, eic7700_dwmac_match);
+
+static struct platform_driver eic7700_dwmac_driver = {
+ .probe = eic7700_dwmac_probe,
+ .driver = {
+ .name = "eic7700-eth-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = eic7700_dwmac_match,
+ },
+};
+module_platform_driver(eic7700_dwmac_driver);
+
+MODULE_AUTHOR("Zhi Li <lizhi2@eswincomputing.com>");
+MODULE_AUTHOR("Shuang Liang <liangshuang@eswincomputing.com>");
+MODULE_AUTHOR("Shangjuan Wei <weishangjuan@eswincomputing.com>");
+MODULE_DESCRIPTION("Eswin eic7700 qos ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 598eff926815..b9218c07eb6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -56,6 +56,7 @@ static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
{ .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac-3.72a"},
{ .compatible = "snps,dwmac-4.00"},
{ .compatible = "snps,dwmac-4.10a"},
{ .compatible = "snps,dwmac"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 6b65420e11b5..db288fbd5a4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -23,33 +23,32 @@
#include "stmmac_platform.h"
#define GPR_ENET_QOS_INTF_MODE_MASK GENMASK(21, 16)
-#define GPR_ENET_QOS_INTF_SEL_MII (0x0 << 16)
-#define GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 16)
-#define GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 16)
+#define GPR_ENET_QOS_INTF_SEL_MASK GENMASK(20, 16)
#define GPR_ENET_QOS_CLK_GEN_EN (0x1 << 19)
#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
#define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0)
-#define MX93_GPR_ENET_QOS_INTF_MASK GENMASK(3, 1)
-#define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1)
-#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
-#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
+#define MX93_GPR_ENET_QOS_INTF_SEL_MASK GENMASK(3, 1)
#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
+#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0)
+#define MX93_GPR_CLK_SEL_OFFSET (4)
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
#define RMII_RESET_SPEED (0x3 << 14)
#define CTRL_SPEED_MASK GENMASK(15, 14)
+struct imx_priv_data;
+
struct imx_dwmac_ops {
u32 addr_width;
u32 flags;
bool mac_rgmii_txclk_auto_adj;
- int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
- int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
+ int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
+ int (*set_intf_mode)(struct imx_priv_data *dwmac, u8 phy_intf_sel);
+ void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
struct imx_priv_data {
@@ -65,71 +64,46 @@ struct imx_priv_data {
struct plat_stmmacenet_data *plat_dat;
};
-static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+static int imx8mp_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel)
{
- struct imx_priv_data *dwmac = plat_dat->bsp_priv;
- int val;
-
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_MII:
- val = GPR_ENET_QOS_INTF_SEL_MII;
- break;
- case PHY_INTERFACE_MODE_RMII:
- val = GPR_ENET_QOS_INTF_SEL_RMII;
- val |= (dwmac->rmii_refclk_ext ? 0 : GPR_ENET_QOS_CLK_TX_CLK_SEL);
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- val = GPR_ENET_QOS_INTF_SEL_RGMII |
- GPR_ENET_QOS_RGMII_EN;
- break;
- default:
- pr_debug("imx dwmac doesn't support %d interface\n",
- plat_dat->mac_interface);
- return -EINVAL;
- }
+ unsigned int val;
+
+ val = FIELD_PREP(GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) |
+ GPR_ENET_QOS_CLK_GEN_EN;
+
+ if (phy_intf_sel == PHY_INTF_SEL_RMII && !dwmac->rmii_refclk_ext)
+ val |= GPR_ENET_QOS_CLK_TX_CLK_SEL;
+ else if (phy_intf_sel == PHY_INTF_SEL_RGMII)
+ val |= GPR_ENET_QOS_RGMII_EN;
- val |= GPR_ENET_QOS_CLK_GEN_EN;
return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
GPR_ENET_QOS_INTF_MODE_MASK, val);
};
static int
-imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+imx8dxl_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel)
{
- int ret = 0;
-
/* TBD: depends on imx8dxl scu interfaces to be upstreamed */
- return ret;
+ return 0;
}
-static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+static int imx93_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel)
{
- struct imx_priv_data *dwmac = plat_dat->bsp_priv;
- int val;
-
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_MII:
- val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
- break;
- case PHY_INTERFACE_MODE_RMII:
- val = MX93_GPR_ENET_QOS_INTF_SEL_RMII;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII;
- break;
- default:
- dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
- plat_dat->mac_interface);
- return -EINVAL;
+ unsigned int val;
+ int ret;
+
+ if (phy_intf_sel == PHY_INTF_SEL_RMII && dwmac->rmii_refclk_ext) {
+ ret = regmap_clear_bits(dwmac->intf_regmap,
+ dwmac->intf_reg_off +
+ MX93_GPR_CLK_SEL_OFFSET,
+ MX93_GPR_ENET_QOS_CLK_SEL_MASK);
+ if (ret)
+ return ret;
}
- val |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ val = FIELD_PREP(MX93_GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) |
+ MX93_GPR_ENET_QOS_CLK_GEN_EN;
+
return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
MX93_GPR_ENET_QOS_INTF_MODE_MASK, val);
};
@@ -160,54 +134,48 @@ static int imx_dwmac_clks_config(void *priv, bool enabled)
return ret;
}
-static int imx_dwmac_init(struct platform_device *pdev, void *priv)
+static int imx_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel)
{
- struct plat_stmmacenet_data *plat_dat;
- struct imx_priv_data *dwmac = priv;
- int ret;
+ struct imx_priv_data *dwmac = bsp_priv;
- plat_dat = dwmac->plat_dat;
+ if (!dwmac->ops->set_intf_mode)
+ return 0;
- if (dwmac->ops->set_intf_mode) {
- ret = dwmac->ops->set_intf_mode(plat_dat);
- if (ret)
- return ret;
- }
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII)
+ return -EINVAL;
- return 0;
+ return dwmac->ops->set_intf_mode(dwmac, phy_intf_sel);
}
-static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
+static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- /* nothing to do now */
+ if (interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_MII)
+ return 0;
+
+ return stmmac_set_clk_tx_rate(bsp_priv, clk_tx_i, interface, speed);
}
-static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
plat_dat = dwmac->plat_dat;
if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
- (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) ||
- (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
+ (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->phy_interface == PHY_INTERFACE_MODE_MII))
return;
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
return;
}
@@ -216,7 +184,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
}
-static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct imx_priv_data *dwmac = priv;
unsigned int iface;
@@ -230,8 +198,8 @@ static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
if (regmap_read(dwmac->intf_regmap, dwmac->intf_reg_off, &iface))
return;
- iface &= MX93_GPR_ENET_QOS_INTF_MASK;
- if (iface != MX93_GPR_ENET_QOS_INTF_SEL_RGMII)
+ if (FIELD_GET(MX93_GPR_ENET_QOS_INTF_SEL_MASK, iface) !=
+ PHY_INTF_SEL_RGMII)
return;
old_ctrl = readl(dwmac->base_addr + MAC_CTRL_REG);
@@ -244,6 +212,7 @@ static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
readl(dwmac->base_addr + MAC_CTRL_REG);
usleep_range(10, 20);
+ iface &= MX93_GPR_ENET_QOS_INTF_SEL_MASK;
iface |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
MX93_GPR_ENET_QOS_INTF_MODE_MASK, iface);
@@ -251,16 +220,16 @@ static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
writel(old_ctrl, dwmac->base_addr + MAC_CTRL_REG);
}
-static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr)
+static int imx_dwmac_mx93_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
{
- struct plat_stmmacenet_data *plat_dat = priv;
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) {
usleep_range(100, 200);
writel(RMII_RESET_SPEED, ioaddr + MAC_CTRL_REG);
}
@@ -287,6 +256,7 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
dwmac->clk_mem = NULL;
if (of_machine_is_compatible("fsl,imx8dxl") ||
+ of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
dwmac->clk_mem = devm_clk_get(dev, "mem");
if (IS_ERR(dwmac->clk_mem)) {
@@ -296,20 +266,17 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
if (of_machine_is_compatible("fsl,imx8mp") ||
+ of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
/* Binding doc describes the propety:
- * is required by i.MX8MP, i.MX93.
+ * is required by i.MX8MP, i.MX91, i.MX93.
* is optinoal for i.MX8DXL.
*/
- dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ dwmac->intf_regmap =
+ syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1,
+ &dwmac->intf_reg_off);
if (IS_ERR(dwmac->intf_regmap))
return PTR_ERR(dwmac->intf_regmap);
-
- err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
- if (err) {
- dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
- return err;
- }
}
return err;
@@ -358,10 +325,8 @@ static int imx_dwmac_probe(struct platform_device *pdev)
plat_dat->tx_queues_cfg[i].tbs_en = 1;
plat_dat->host_dma_width = dwmac->ops->addr_width;
- plat_dat->init = imx_dwmac_init;
- plat_dat->exit = imx_dwmac_exit;
+ plat_dat->set_phy_intf_sel = imx_set_phy_intf_sel;
plat_dat->clks_config = imx_dwmac_clks_config;
- plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
plat_dat->bsp_priv = dwmac;
dwmac->plat_dat = plat_dat;
dwmac->base_addr = stmmac_res.addr;
@@ -370,24 +335,19 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = imx_dwmac_init(pdev, dwmac);
- if (ret)
- goto err_dwmac_init;
-
- if (dwmac->ops->fix_mac_speed)
+ if (dwmac->ops->fix_mac_speed) {
plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
+ } else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) {
+ plat_dat->clk_tx_i = dwmac->clk_tx;
+ plat_dat->set_clk_tx_rate = imx_dwmac_set_clk_tx_rate;
+ }
+
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
- goto err_drv_probe;
-
- return 0;
+ imx_dwmac_clks_config(dwmac, false);
-err_drv_probe:
- imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dwmac_init:
- imx_dwmac_clks_config(dwmac, false);
return ret;
}
@@ -422,7 +382,7 @@ MODULE_DEVICE_TABLE(of, imx_dwmac_match);
static struct platform_driver imx_dwmac_driver = {
.probe = imx_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "imx-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 19c93b998fb3..8e4a30c11db0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -35,10 +35,6 @@
#define MACPHYC_RX_DELAY_MASK GENMASK(10, 4)
#define MACPHYC_SOFT_RST_MASK GENMASK(3, 3)
#define MACPHYC_PHY_INFT_MASK GENMASK(2, 0)
-#define MACPHYC_PHY_INFT_RMII 0x4
-#define MACPHYC_PHY_INFT_RGMII 0x1
-#define MACPHYC_PHY_INFT_GMII 0x0
-#define MACPHYC_PHY_INFT_MII 0x0
#define MACPHYC_TX_DELAY_PS_MAX 2496
#define MACPHYC_TX_DELAY_PS_MIN 20
@@ -56,6 +52,7 @@ enum ingenic_mac_version {
struct ingenic_mac {
const struct ingenic_soc_info *soc_info;
+ struct plat_stmmacenet_data *plat_dat;
struct device *dev;
struct regmap *regmap;
@@ -67,167 +64,93 @@ struct ingenic_soc_info {
enum ingenic_mac_version version;
u32 mask;
- int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
-};
-
-static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat)
-{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
- int ret;
-
- if (mac->soc_info->set_mode) {
- ret = mac->soc_info->set_mode(plat_dat);
- if (ret)
- return ret;
- }
+ int (*set_mode)(struct ingenic_mac *mac, u8 phy_intf_sel);
- return 0;
-}
+ u8 valid_phy_intf_sel;
+};
-static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int jz4775_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_MII:
- val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_MII\n");
- break;
-
- case PHY_INTERFACE_MODE_GMII:
- val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_GMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_GMII\n");
- break;
-
- case PHY_INTERFACE_MODE_RMII:
- val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
- }
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel) |
+ FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT);
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
}
-static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int x1000_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
-
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
- }
-
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, 0);
}
-static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int x1600_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
- }
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel);
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
}
-static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int x1830_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
- }
+ val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel);
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
}
-static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int x2000_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
- FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel);
+ if (phy_intf_sel == PHY_INTF_SEL_RMII) {
+ val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
+ FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN);
+ } else if (phy_intf_sel == PHY_INTF_SEL_RGMII) {
if (mac->tx_delay == 0)
val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN);
else
val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_DELAY) |
- FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1);
+ FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1);
if (mac->rx_delay == 0)
val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN);
else
val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_DELAY) |
FIELD_PREP(MACPHYC_RX_DELAY_MASK, (mac->rx_delay + 9750) / 19500 - 1);
-
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
}
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
}
+static int ingenic_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel)
+{
+ struct ingenic_mac *mac = bsp_priv;
+
+ if (!mac->soc_info->set_mode)
+ return 0;
+
+ if (phy_intf_sel >= BITS_PER_BYTE ||
+ ~mac->soc_info->valid_phy_intf_sel & BIT(phy_intf_sel))
+ return -EINVAL;
+
+ dev_dbg(mac->dev, "MAC PHY control register: interface %s\n",
+ phy_modes(mac->plat_dat->phy_interface));
+
+ return mac->soc_info->set_mode(mac, phy_intf_sel);
+}
+
static int ingenic_mac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -284,49 +207,22 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->soc_info = data;
mac->dev = &pdev->dev;
+ mac->plat_dat = plat_dat;
plat_dat->bsp_priv = mac;
+ plat_dat->set_phy_intf_sel = ingenic_set_phy_intf_sel;
- ret = ingenic_mac_init(plat_dat);
- if (ret)
- return ret;
-
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ingenic_mac_suspend(struct device *dev)
-{
- int ret;
-
- ret = stmmac_suspend(dev);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int ingenic_mac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret;
-
- ret = ingenic_mac_init(priv->plat);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume);
-
static struct ingenic_soc_info jz4775_soc_info = {
.version = ID_JZ4775,
.mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
.set_mode = jz4775_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_GMII_MII) |
+ BIT(PHY_INTF_SEL_RGMII) |
+ BIT(PHY_INTF_SEL_RMII),
};
static struct ingenic_soc_info x1000_soc_info = {
@@ -334,6 +230,7 @@ static struct ingenic_soc_info x1000_soc_info = {
.mask = MACPHYC_SOFT_RST_MASK,
.set_mode = x1000_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII),
};
static struct ingenic_soc_info x1600_soc_info = {
@@ -341,6 +238,7 @@ static struct ingenic_soc_info x1600_soc_info = {
.mask = MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
.set_mode = x1600_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII),
};
static struct ingenic_soc_info x1830_soc_info = {
@@ -348,6 +246,7 @@ static struct ingenic_soc_info x1830_soc_info = {
.mask = MACPHYC_MODE_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
.set_mode = x1830_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII),
};
static struct ingenic_soc_info x2000_soc_info = {
@@ -356,6 +255,8 @@ static struct ingenic_soc_info x2000_soc_info = {
MACPHYC_RX_DELAY_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
.set_mode = x2000_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RGMII) |
+ BIT(PHY_INTF_SEL_RMII),
};
static const struct of_device_id ingenic_mac_of_matches[] = {
@@ -370,10 +271,9 @@ MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
static struct platform_driver ingenic_mac_driver = {
.probe = ingenic_mac_probe,
- .remove_new = stmmac_pltfr_remove,
.driver = {
.name = "ingenic-mac",
- .pm = pm_ptr(&ingenic_mac_pm_ops),
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = ingenic_mac_of_matches,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index d68f0c4e7835..4ea7b0a803d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -22,45 +22,12 @@ struct intel_dwmac {
};
struct intel_dwmac_data {
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
unsigned long ptp_ref_clk_rate;
unsigned long tx_clk_rate;
bool tx_clk_en;
};
-static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct intel_dwmac *dwmac = priv;
- unsigned long rate;
- int ret;
-
- rate = clk_get_rate(dwmac->tx_clk);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
-
- case SPEED_100:
- rate = 25000000;
- break;
-
- case SPEED_10:
- rate = 2500000;
- break;
-
- default:
- dev_err(dwmac->dev, "Invalid speed\n");
- break;
- }
-
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret)
- dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
-}
-
static const struct intel_dwmac_data kmb_data = {
- .fix_mac_speed = kmb_eth_fix_mac_speed,
.ptp_ref_clk_rate = 200000000,
.tx_clk_rate = 125000000,
.tx_clk_en = true,
@@ -97,30 +64,36 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
dwmac->dev = &pdev->dev;
dwmac->tx_clk = NULL;
+ /*
+ * This cannot return NULL at this point because the driver’s
+ * compatibility with the device has already been validated in
+ * platform_match().
+ */
dwmac->data = device_get_match_data(&pdev->dev);
- if (dwmac->data) {
- if (dwmac->data->fix_mac_speed)
- plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
-
- /* Enable TX clock */
- if (dwmac->data->tx_clk_en) {
- dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- if (IS_ERR(dwmac->tx_clk))
- return PTR_ERR(dwmac->tx_clk);
-
- clk_prepare_enable(dwmac->tx_clk);
-
- /* Check and configure TX clock rate */
- rate = clk_get_rate(dwmac->tx_clk);
- if (dwmac->data->tx_clk_rate &&
- rate != dwmac->data->tx_clk_rate) {
- rate = dwmac->data->tx_clk_rate;
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to set tx_clk\n");
- return ret;
- }
+
+ /* Enable TX clock */
+ if (dwmac->data->tx_clk_en) {
+ dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ if (IS_ERR(dwmac->tx_clk))
+ return PTR_ERR(dwmac->tx_clk);
+
+ ret = clk_prepare_enable(dwmac->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable tx_clk\n");
+ return ret;
+ }
+
+ /* Check and configure TX clock rate */
+ rate = clk_get_rate(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_rate &&
+ rate != dwmac->data->tx_clk_rate) {
+ rate = dwmac->data->tx_clk_rate;
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set tx_clk\n");
+ goto err_tx_clk_disable;
}
}
@@ -133,28 +106,25 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- return ret;
+ goto err_tx_clk_disable;
}
}
}
+ plat_dat->clk_tx_i = dwmac->tx_clk;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->bsp_priv = dwmac;
- plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
-
- if (plat_dat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER);
- }
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret) {
- clk_disable_unprepare(dwmac->tx_clk);
- return ret;
- }
+ if (ret)
+ goto err_tx_clk_disable;
return 0;
+
+err_tx_clk_disable:
+ if (dwmac->data->tx_clk_en)
+ clk_disable_unprepare(dwmac->tx_clk);
+ return ret;
}
static void intel_eth_plat_remove(struct platform_device *pdev)
@@ -162,12 +132,13 @@ static void intel_eth_plat_remove(struct platform_device *pdev)
struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
stmmac_pltfr_remove(pdev);
- clk_disable_unprepare(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_en)
+ clk_disable_unprepare(dwmac->tx_clk);
}
static struct platform_driver intel_eth_plat_driver = {
.probe = intel_eth_plat_probe,
- .remove_new = intel_eth_plat_remove,
+ .remove = intel_eth_plat_remove,
.driver = {
.name = "intel-eth-plat",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 60283543ffc8..aad1be1ec4c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -5,15 +5,30 @@
#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/intel_pmc_ipc.h>
#include "dwmac-intel.h"
#include "dwmac4.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+struct pmc_serdes_regs {
+ u8 index;
+ u32 val;
+};
+
+struct pmc_serdes_reg_info {
+ const struct pmc_serdes_regs *regs;
+ u8 num_regs;
+};
+
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
unsigned long crossts_adj;
bool is_pse;
+ const int *tsn_lane_regs;
+ int max_tsn_lane_regs;
+ struct pmc_serdes_reg_info pid_1g;
+ struct pmc_serdes_reg_info pid_2p5g;
};
/* This struct is used to associate PCI Function of MAC controller on a board,
@@ -35,6 +50,45 @@ struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
+static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
+static const int adln_tsn_lane_regs[] = {6};
+
static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
const struct dmi_system_id *dmi_list)
{
@@ -93,7 +147,7 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
data &= ~SERDES_RATE_MASK;
data &= ~SERDES_PCLK_MASK;
- if (priv->plat->max_speed == 2500)
+ if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
else
@@ -230,28 +284,28 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
}
-static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
{
- struct intel_priv_data *intel_priv = intel_data;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int serdes_phy_addr = 0;
- u32 data = 0;
-
- serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+ struct intel_priv_data *intel_priv = bsp_priv;
+ phy_interface_t interface;
+ int data;
/* Determine the link speed mode: 2.5Gbps/1Gbps */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR);
+ data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR);
+ if (data < 0)
+ return;
- if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
- SERDES_LINK_MODE_2G5) {
+ if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) {
dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
- priv->plat->mdio_bus_data->xpcs_an_inband = false;
+ priv->plat->mdio_bus_data->default_an_inband = false;
+ interface = PHY_INTERFACE_MODE_2500BASEX;
} else {
- priv->plat->max_speed = 1000;
+ interface = PHY_INTERFACE_MODE_SGMII;
}
+
+ __set_bit(interface, interfaces);
+ priv->plat->phy_interface = interface;
}
/* Program PTP Clock Frequency for different variant of
@@ -317,9 +371,6 @@ static int intel_crosststamp(ktime_t *device,
u32 acr_value;
int i;
- if (!boot_cpu_has(X86_FEATURE_ART))
- return -EOPNOTSUPP;
-
intel_priv = priv->plat->bsp_priv;
/* Both internal crosstimestamping and external triggered event
@@ -379,6 +430,12 @@ static int intel_crosststamp(ktime_t *device,
return -ETIMEDOUT;
}
+ *system = (struct system_counterval_t) {
+ .cycles = 0,
+ .cs_id = CSID_X86_ART,
+ .use_nsecs = false,
+ };
+
num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
@@ -390,10 +447,11 @@ static int intel_crosststamp(ktime_t *device,
*device = ns_to_ktime(ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
- *system = convert_art_to_tsc(art_time);
+ system->cycles = art_time;
}
system->cycles *= intel_priv->crossts_adj;
+
priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return 0;
@@ -414,33 +472,113 @@ static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
}
}
-static void common_default_data(struct plat_stmmacenet_data *plat)
+static int intel_tsn_lane_is_available(struct net_device *ndev,
+ struct intel_priv_data *intel_priv)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat->has_gmac = 1;
- plat->force_sf_dma_mode = 1;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+ int ret = 0, i, j;
+ const int max_fia_regs = 5;
- plat->mdio_bus_data->needs_reset = true;
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
+
+ for (i = 0; i < max_fia_regs; i++) {
+ tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0) {
+ netdev_info(priv->dev, "Failed to read from PMC.\n");
+ return ret;
+ }
+
+ for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
+ if ((rbuf.buf[0] >>
+ (4 * (intel_priv->tsn_lane_regs[j] % 8)) &
+ B_PCH_FIA_PCR_L0O) == 0xB)
+ return 0;
+ }
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
+ return -EINVAL;
+}
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
+static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
+{
+ int ret = 0, i;
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
+ for (i = 0; i < max_regs; i++) {
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
+ tmp.wbuf[0] = (u32)regs[i].index;
+ tmp.wbuf[1] = regs[i].val;
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0)
+ return ret;
+ }
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
+ return ret;
+}
+
+static int intel_mac_finish(struct net_device *ndev,
+ void *intel_data,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct pmc_serdes_regs *regs;
+ int max_regs = 0;
+ int ret = 0;
+
+ ret = intel_tsn_lane_is_available(ndev, intel_priv);
+ if (ret < 0) {
+ netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
+ return ret;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX) {
+ regs = intel_priv->pid_2p5g.regs;
+ max_regs = intel_priv->pid_2p5g.num_regs;
+ } else {
+ regs = intel_priv->pid_1g.regs;
+ max_regs = intel_priv->pid_1g.num_regs;
+ }
+
+ ret = intel_set_reg_access(regs, max_regs);
+ if (ret < 0)
+ return ret;
+
+ priv->plat->phy_interface = interface;
+
+ intel_serdes_powerdown(ndev, intel_priv);
+ intel_serdes_powerup(ndev, intel_priv);
+
+ return ret;
+}
+
+static void common_default_data(struct plat_stmmacenet_data *plat)
+{
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
+ plat->core_type = DWMAC_CORE_GMAC;
+ plat->force_sf_dma_mode = 1;
+
+ plat->mdio_bus_data->needs_reset = true;
+}
+
+static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ /* plat->mdio_bus_data->has_xpcs has been set true, so there
+ * should always be an XPCS. The original code would always
+ * return this if present.
+ */
+ return xpcs_to_phylink_pcs(priv->hw->xpcs);
}
static int intel_mgbe_common_data(struct pci_dev *pdev,
@@ -453,9 +591,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->pdev = pdev;
plat->phy_addr = -1;
- plat->clk_csr = 5;
- plat->has_gmac = 0;
- plat->has_gmac4 = 1;
+ plat->clk_csr = STMMAC_CSR_250_300M;
+ plat->core_type = DWMAC_CORE_GMAC4;
plat->force_sf_dma_mode = 0;
plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
@@ -472,22 +609,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
- for (i = 0; i < plat->rx_queues_to_use; i++) {
+ for (i = 0; i < plat->rx_queues_to_use; i++)
plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].chan = i;
-
- /* Disable Priority config by default */
- plat->rx_queues_cfg[i].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- }
for (i = 0; i < plat->tx_queues_to_use; i++) {
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- /* Disable Priority config by default */
- plat->tx_queues_cfg[i].use_prio = false;
/* Default TX Q0 to use TSO and rest TXQ for TBS */
if (i > 0)
plat->tx_queues_cfg[i].tbs_en = 1;
@@ -523,12 +650,10 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_xit_frm = 0;
plat->axi->axi_wr_osr_lmt = 1;
plat->axi->axi_rd_osr_lmt = 1;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16;
plat->ptp_max_adj = plat->clk_ptp_rate;
- plat->eee_usecs_rate = plat->clk_ptp_rate;
/* Set system clock */
sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
@@ -550,15 +675,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
/* Use the last Rx queue */
@@ -585,19 +701,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Intel mgbe SGMII interface uses pcs-xcps */
if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
- plat->mdio_bus_data->has_xpcs = true;
- plat->mdio_bus_data->xpcs_an_inband = true;
- }
-
- /* For fixed-link setup, we clear xpcs_an_inband */
- if (fwnode) {
- struct fwnode_handle *fixed_node;
-
- fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
- if (fixed_node)
- plat->mdio_bus_data->xpcs_an_inband = false;
-
- fwnode_handle_put(fixed_node);
+ plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR);
+ plat->mdio_bus_data->default_an_inband = true;
+ plat->select_pcs = intel_mgbe_select_pcs;
}
/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
@@ -606,7 +712,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->int_snapshot_num = AUX_SNAPSHOT1;
- plat->crosststamp = intel_crosststamp;
+ if (boot_cpu_has(X86_FEATURE_ART))
+ plat->crosststamp = intel_crosststamp;
+
plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
/* Setup MSI vector offset specific to Intel mGbE controller */
@@ -623,6 +731,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
@@ -638,20 +748,29 @@ static int ehl_common_data(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 0;
plat->safety_feat_cfg->tmouten = 0;
+ intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
+
return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
-
+ plat->mac_finish = intel_mac_finish;
plat->clk_ptp_rate = 204800000;
+ intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
+
return ehl_common_data(pdev, plat);
}
@@ -704,10 +823,18 @@ static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse0_common_data(pdev, plat);
}
@@ -745,10 +872,18 @@ static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse1_common_data(pdev, plat);
}
@@ -762,7 +897,7 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 204800000;
- plat->speed_mode_2500 = intel_speed_mode_2500;
+ plat->get_interfaces = tgl_get_interfaces;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0;
@@ -781,7 +916,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -795,7 +929,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -809,7 +942,6 @@ static int adls_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -824,7 +956,6 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -834,6 +965,55 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
.setup = adls_sgmii_phy1_data,
};
+
+static int adln_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 204800000;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
+ intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
+
+ return intel_mgbe_common_data(pdev, plat);
+}
+
+static int adln_sgmii_phy0_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
+ return adln_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
+ .setup = adln_sgmii_phy0_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -1010,6 +1190,37 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
return 0;
}
+static int intel_eth_pci_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+static int intel_eth_pci_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
/**
* intel_eth_pci_probe
*
@@ -1035,7 +1246,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (!intel_priv)
return -ENOMEM;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -1071,6 +1282,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
plat->bsp_priv = intel_priv;
+ plat->suspend = intel_eth_pci_suspend;
+ plat->resume = intel_eth_pci_resume;
+
intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
intel_priv->crossts_adj = 1;
@@ -1093,13 +1307,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- if (plat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
- }
-
ret = stmmac_config_multi_msi(pdev, plat, &res);
if (ret) {
ret = stmmac_config_single_msi(pdev, plat, &res);
@@ -1141,44 +1348,6 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
}
-static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_wake_from_d3(pdev, true);
- pci_set_power_state(pdev, PCI_D3hot);
- return 0;
-}
-
-static int __maybe_unused intel_eth_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
- intel_eth_pci_resume);
-
#define PCI_DEVICE_ID_INTEL_QUARK 0x0937
#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
@@ -1216,8 +1385,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
- { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
@@ -1228,7 +1397,7 @@ static struct pci_driver intel_eth_pci_driver = {
.probe = intel_eth_pci_probe,
.remove = intel_eth_pci_remove,
.driver = {
- .pm = &intel_eth_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index 0a37987478c1..7511c224b312 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -21,7 +21,6 @@
#define SERDES_RATE_MASK GENMASK(9, 8)
#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */
#define SERDES_LINK_MODE_MASK GENMASK(2, 1)
-#define SERDES_LINK_MODE_SHIFT 1
#define SERDES_PWR_ST_SHIFT 4
#define SERDES_PWR_ST_P0 0x0
#define SERDES_PWR_ST_P3 0x3
@@ -50,4 +49,33 @@
#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
#define PCH_PTP_CLK_FREQ_200MHZ (0)
+/* Modphy Register index */
+#define R_PCH_FIA_15_PCR_LOS1_REG_BASE 8
+#define R_PCH_FIA_15_PCR_LOS2_REG_BASE 9
+#define R_PCH_FIA_15_PCR_LOS3_REG_BASE 10
+#define R_PCH_FIA_15_PCR_LOS4_REG_BASE 11
+#define R_PCH_FIA_15_PCR_LOS5_REG_BASE 12
+#define B_PCH_FIA_PCR_L0O GENMASK(3, 0)
+#define PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0 13
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2 14
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7 15
+#define PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10 16
+#define PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30 17
+#define PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0 18
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2 19
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7 20
+#define PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10 21
+#define PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30 22
+
+#define B_MODPHY_PCR_LCPLL_DWORD0_1G 0x46AAAA41
+#define N_MODPHY_PCR_LCPLL_DWORD2_1G 0x00000139
+#define N_MODPHY_PCR_LCPLL_DWORD7_1G 0x002A0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_1G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_1G 0x0000D4AC
+#define B_MODPHY_PCR_LCPLL_DWORD0_2P5G 0x58555551
+#define N_MODPHY_PCR_LCPLL_DWORD2_2P5G 0x0000012D
+#define N_MODPHY_PCR_LCPLL_DWORD7_2P5G 0x001F0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_2P5G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G 0x8200ACAC
+
#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 281687d7083b..c05f85534f0c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -112,7 +112,7 @@ struct ipq806x_gmac {
phy_interface_t phy_mode;
};
-static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -138,7 +138,7 @@ static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -164,13 +164,16 @@ static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, int speed)
{
uint32_t clk_bits, val;
int div;
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
div = get_clk_div_rgmii(gmac, speed);
clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
@@ -208,16 +211,12 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
return 0;
}
-static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac,
+ struct plat_stmmacenet_data *plat_dat)
{
struct device *dev = &gmac->pdev->dev;
- int ret;
- ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
- if (ret) {
- dev_err(dev, "missing phy mode property\n");
- return -EINVAL;
- }
+ gmac->phy_mode = plat_dat->phy_interface;
if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
dev_err(dev, "missing qcom id property\n");
@@ -257,11 +256,12 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return PTR_ERR_OR_ZERO(gmac->qsgmii_csr);
}
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int ipq806x_gmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct ipq806x_gmac *gmac = priv;
+ struct ipq806x_gmac *gmac = bsp_priv;
- ipq806x_gmac_set_speed(gmac, speed);
+ return ipq806x_gmac_set_speed(gmac, speed);
}
static int
@@ -394,7 +394,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
gmac->pdev = pdev;
- err = ipq806x_gmac_of_parse(gmac);
+ err = ipq806x_gmac_of_parse(gmac, plat_dat);
if (err) {
dev_err(dev, "device tree parsing error\n");
return err;
@@ -410,6 +410,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
break;
case PHY_INTERFACE_MODE_SGMII:
@@ -425,6 +428,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
break;
@@ -442,6 +448,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
break;
@@ -464,9 +473,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
return err;
}
- plat_dat->has_gmac = true;
+ plat_dat->core_type = DWMAC_CORE_GMAC;
plat_dat->bsp_priv = gmac;
- plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate;
plat_dat->multicast_filter_bins = 0;
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
@@ -487,7 +496,7 @@ MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
static struct platform_driver ipq806x_gmac_dwmac_driver = {
.probe = ipq806x_gmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "ipq806x-gmac-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 9e40c28d453a..107a7c84ace8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -8,223 +8,618 @@
#include <linux/device.h>
#include <linux/of_irq.h>
#include "stmmac.h"
+#include "stmmac_libpci.h"
+#include "dwmac_dma.h"
+#include "dwmac1000.h"
+
+#define DRIVER_NAME "dwmac-loongson-pci"
+
+/* Normal Loongson Tx Summary */
+#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
+/* Normal Loongson Rx Summary */
+#define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000
+
+#define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \
+ DMA_INTR_ENA_NIE_RX_LOONGSON | \
+ DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE)
+
+/* Abnormal Loongson Tx Summary */
+#define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000
+/* Abnormal Loongson Rx Summary */
+#define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000
+
+#define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \
+ DMA_INTR_ENA_AIE_RX_LOONGSON | \
+ DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE)
+
+#define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \
+ DMA_INTR_ABNORMAL_LOONGSON)
+
+/* Normal Loongson Tx Interrupt Summary */
+#define DMA_STATUS_NIS_TX_LOONGSON 0x00040000
+/* Normal Loongson Rx Interrupt Summary */
+#define DMA_STATUS_NIS_RX_LOONGSON 0x00020000
+
+/* Abnormal Loongson Tx Interrupt Summary */
+#define DMA_STATUS_AIS_TX_LOONGSON 0x00010000
+/* Abnormal Loongson Rx Interrupt Summary */
+#define DMA_STATUS_AIS_RX_LOONGSON 0x00008000
+
+/* Fatal Loongson Tx Bus Error Interrupt */
+#define DMA_STATUS_FBI_TX_LOONGSON 0x00002000
+/* Fatal Loongson Rx Bus Error Interrupt */
+#define DMA_STATUS_FBI_RX_LOONGSON 0x00001000
+
+#define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \
+ DMA_STATUS_NIS_RX_LOONGSON | \
+ DMA_STATUS_AIS_TX_LOONGSON | \
+ DMA_STATUS_AIS_RX_LOONGSON | \
+ DMA_STATUS_FBI_TX_LOONGSON | \
+ DMA_STATUS_FBI_RX_LOONGSON)
+
+#define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \
+ DMA_STATUS_RPS | DMA_STATUS_RU | \
+ DMA_STATUS_RI | DMA_STATUS_OVF | \
+ DMA_STATUS_MSK_COMMON_LOONGSON)
+
+#define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \
+ DMA_STATUS_TJT | DMA_STATUS_TU | \
+ DMA_STATUS_TPS | DMA_STATUS_TI | \
+ DMA_STATUS_MSK_COMMON_LOONGSON)
+
+#define PCI_DEVICE_ID_LOONGSON_GMAC1 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC2 0x7a23
+#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
+#define DWMAC_CORE_MULTICHAN_V1 0x10 /* Loongson custom ID 0x10 */
+#define DWMAC_CORE_MULTICHAN_V2 0x12 /* Loongson custom ID 0x12 */
+
+struct loongson_data {
+ u32 multichan;
+ u32 loongson_id;
+ struct device *dev;
+};
+
+struct stmmac_pci_info {
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
+};
-static int loongson_default_data(struct plat_stmmacenet_data *plat)
+static void loongson_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat->has_gmac = 1;
+ struct loongson_data *ld = plat->bsp_priv;
+
+ /* Get bus_id, this can be overwritten later */
+ plat->bus_id = pci_dev_id(pdev);
+
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
+ plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
+ /* Increase the default value for multicast hash bins */
+ plat->multicast_filter_bins = 256;
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
+ plat->clk_ref_rate = 125000000;
+ plat->clk_ptp_rate = 125000000;
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
+ switch (ld->loongson_id) {
+ case DWMAC_CORE_MULTICHAN_V1:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
+ /* Only channel 0 supports checksum,
+ * so turn off checksum to enable multiple channels.
+ */
+ for (int i = 1; i < 8; i++)
+ plat->tx_queues_cfg[i].coe_unsupported = 1;
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
+ break;
+ case DWMAC_CORE_MULTICHAN_V2:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 4;
+ plat->tx_queues_to_use = 4;
+ break;
+ default:
+ ld->multichan = 0;
+ break;
+ }
+}
- /* Default to phy auto-detection */
- plat->phy_addr = -1;
+static int loongson_gmac_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ loongson_default_data(pdev, plat);
- plat->dma_cfg->pbl = 32;
- plat->dma_cfg->pblx8 = true;
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
- plat->multicast_filter_bins = 256;
return 0;
}
-static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct stmmac_pci_info loongson_gmac_pci_info = {
+ .setup = loongson_gmac_data,
+};
+
+static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode)
{
- struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- struct device_node *np;
- int ret, i, phy_mode;
+ struct loongson_data *ld = (struct loongson_data *)priv;
+ struct net_device *ndev = dev_get_drvdata(ld->dev);
+ struct stmmac_priv *ptr = netdev_priv(ndev);
+
+ /* The integrated PHY has a weird problem with switching from the low
+ * speeds to 1000Mbps mode. The speedup procedure requires the PHY-link
+ * re-negotiation.
+ */
+ if (speed == SPEED_1000) {
+ if (readl(ptr->ioaddr + MAC_CTRL_REG) &
+ GMAC_CONTROL_PS)
+ /* Word around hardware bug, restart autoneg */
+ phy_restart_aneg(ndev->phydev);
+ }
+}
- np = dev_of_node(&pdev->dev);
+static int loongson_gnet_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ loongson_default_data(pdev, plat);
+
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
+ plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
+ plat->fix_mac_speed = loongson_gnet_fix_speed;
+
+ return 0;
+}
+
+static struct stmmac_pci_info loongson_gnet_pci_info = {
+ .setup = loongson_gnet_data,
+};
+
+static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 chan)
+{
+ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ if (dma_cfg->pblx8)
+ value |= DMA_BUS_MODE_MAXPBL;
+
+ value |= DMA_BUS_MODE_USP;
+ value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
+ value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+ value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
- if (!np) {
- pr_info("dwmac_loongson_pci: No OF node\n");
- return -ENODEV;
+ /* Set the Fixed burst mode */
+ if (dma_cfg->fixed_burst)
+ value |= DMA_BUS_MODE_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (dma_cfg->mixed_burst)
+ value |= DMA_BUS_MODE_MB;
+
+ if (dma_cfg->atds)
+ value |= DMA_BUS_MODE_ATDS;
+
+ if (dma_cfg->aal)
+ value |= DMA_BUS_MODE_AAL;
+
+ writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr +
+ DMA_CHAN_INTR_ENA(chan));
+}
+
+static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_extra_stats *x,
+ u32 chan, u32 dir)
+{
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
+ u32 abnor_intr_status;
+ u32 nor_intr_status;
+ u32 fb_intr_status;
+ u32 intr_status;
+ int ret = 0;
+
+ /* read the status register (CSR5) */
+ intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
+
+ if (dir == DMA_DIR_RX)
+ intr_status &= DMA_STATUS_MSK_RX_LOONGSON;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= DMA_STATUS_MSK_TX_LOONGSON;
+
+ nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON |
+ DMA_STATUS_NIS_RX_LOONGSON);
+ abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON |
+ DMA_STATUS_AIS_RX_LOONGSON);
+ fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON |
+ DMA_STATUS_FBI_RX_LOONGSON);
+
+ /* ABNORMAL interrupts */
+ if (unlikely(abnor_intr_status)) {
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT))
+ x->tx_jabber_irq++;
+ if (unlikely(intr_status & DMA_STATUS_OVF))
+ x->rx_overflow_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(fb_intr_status)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(nor_intr_status)) {
+ if (likely(intr_status & DMA_STATUS_RI)) {
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_INTR_ENA_RIE)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_STATUS_TI)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ERI))
+ x->rx_early_irq++;
}
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
- if (!plat)
- return -ENOMEM;
+ /* Clear the interrupt by writing a logic 1 to the CSR5[19-0] */
+ writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan));
- plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
- sizeof(*plat->mdio_bus_data),
- GFP_KERNEL);
- if (!plat->mdio_bus_data)
+ return ret;
+}
+
+static int loongson_dwmac_setup(void *apriv, struct mac_device_info *mac)
+{
+ struct stmmac_priv *priv = apriv;
+ struct stmmac_dma_ops *dma;
+ struct loongson_data *ld;
+ struct pci_dev *pdev;
+
+ ld = priv->plat->bsp_priv;
+ pdev = to_pci_dev(priv->device);
+
+ dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
return -ENOMEM;
- plat->mdio_node = of_get_child_by_name(np, "mdio");
- if (plat->mdio_node) {
- dev_info(&pdev->dev, "Found MDIO subnode\n");
- plat->mdio_bus_data->needs_reset = true;
+ /* The Loongson GMAC and GNET devices are based on the DW GMAC
+ * v3.50a and v3.73a IP-cores. But the HW designers have changed
+ * the GMAC_VERSION.SNPSVER field to the custom 0x10/0x12 value
+ * on the network controllers with the multi-channels feature
+ * available to emphasize the differences: multiple DMA-channels,
+ * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
+ * original value so the correct HW-interface would be selected.
+ */
+ if (ld->multichan) {
+ priv->synopsys_id = DWMAC_CORE_3_70;
+ *dma = dwmac1000_dma_ops;
+ dma->init_chan = loongson_dwmac_dma_init_channel;
+ dma->dma_interrupt = loongson_dwmac_dma_interrupt;
+ mac->dma = dma;
}
- plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
- if (!plat->dma_cfg) {
- ret = -ENOMEM;
- goto err_put_node;
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Pre-initialize the respective "mac" fields as it's done in
+ * dwmac1000_setup()
+ */
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ /* Loongson GMAC doesn't support the flow control. Loongson GNET
+ * without multi-channel doesn't support the half-duplex link mode.
+ */
+ if (pdev->device != PCI_DEVICE_ID_LOONGSON_GNET) {
+ mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
+ } else {
+ if (ld->multichan)
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+ else
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
}
- /* Enable pci device */
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
- goto err_put_node;
- }
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+ mac->mii.addr_shift = 11;
+ mac->mii.addr_mask = 0x0000F800;
+ mac->mii.reg_shift = 6;
+ mac->mii.reg_mask = 0x000007C0;
+ mac->mii.clk_csr_shift = 2;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
- /* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
- if (ret)
- goto err_disable_device;
- break;
+ return 0;
+}
+
+static int loongson_dwmac_msi_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ int i, ch_num, ret, vecs;
+
+ ch_num = min(plat->tx_queues_to_use, plat->rx_queues_to_use);
+
+ vecs = roundup_pow_of_two(ch_num * 2 + 1);
+ ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
+ return ret;
}
- plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = pci_dev_id(pdev);
+ res->irq = pci_irq_vector(pdev, 0);
- phy_mode = device_get_phy_mode(&pdev->dev);
- if (phy_mode < 0) {
- dev_err(&pdev->dev, "phy_mode not found\n");
- ret = phy_mode;
- goto err_disable_device;
+ for (i = 0; i < ch_num; i++) {
+ res->rx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 1 + i * 2);
+ }
+
+ for (i = 0; i < ch_num; i++) {
+ res->tx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 2 + i * 2);
}
- plat->phy_interface = phy_mode;
- plat->mac_interface = PHY_INTERFACE_MODE_GMII;
+ plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
- pci_set_master(pdev);
+ return 0;
+}
- loongson_default_data(plat);
- pci_enable_msi(pdev);
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[0];
+static void loongson_dwmac_msi_clear(struct pci_dev *pdev)
+{
+ pci_free_irq_vectors(pdev);
+}
- res.irq = of_irq_get_byname(np, "macirq");
- if (res.irq < 0) {
+static int loongson_dwmac_dt_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ struct device_node *np = dev_of_node(&pdev->dev);
+ int ret;
+
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ if (plat->mdio_node) {
+ dev_info(&pdev->dev, "Found MDIO subnode\n");
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
+ ret = of_alias_get_id(np, "ethernet");
+ if (ret >= 0)
+ plat->bus_id = ret;
+
+ res->irq = of_irq_get_byname(np, "macirq");
+ if (res->irq < 0) {
dev_err(&pdev->dev, "IRQ macirq not found\n");
ret = -ENODEV;
- goto err_disable_msi;
+ goto err_put_node;
}
- res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
- if (res.wol_irq < 0) {
- dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n");
- res.wol_irq = res.irq;
+ res->wol_irq = of_irq_get_byname(np, "eth_wake_irq");
+ if (res->wol_irq < 0) {
+ dev_info(&pdev->dev,
+ "IRQ eth_wake_irq not found, using macirq\n");
+ res->wol_irq = res->irq;
}
- res.lpi_irq = of_irq_get_byname(np, "eth_lpi");
- if (res.lpi_irq < 0) {
+ res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
+ if (res->lpi_irq < 0) {
dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
ret = -ENODEV;
- goto err_disable_msi;
+ goto err_put_node;
}
- ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
- if (ret)
- goto err_disable_msi;
+ ret = device_get_phy_mode(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_mode not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
- return ret;
+ plat->phy_interface = ret;
+
+ return 0;
-err_disable_msi:
- pci_disable_msi(pdev);
-err_disable_device:
- pci_disable_device(pdev);
err_put_node:
of_node_put(plat->mdio_node);
+
return ret;
}
-static void loongson_dwmac_remove(struct pci_dev *pdev)
+static void loongson_dwmac_dt_clear(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
- struct net_device *ndev = dev_get_drvdata(&pdev->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- int i;
+ of_node_put(plat->mdio_node);
+}
- of_node_put(priv->plat->mdio_node);
- stmmac_dvr_remove(&pdev->dev);
+static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ if (!pdev->irq)
+ return -EINVAL;
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
+ res->irq = pdev->irq;
- pci_disable_msi(pdev);
- pci_disable_device(pdev);
+ return 0;
}
-static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
+/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
+static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
+ if (value & DMA_BUS_MODE_SFT_RESET) {
+ netdev_err(priv->dev, "the PHY clock is missing\n");
+ return -EINVAL;
+ }
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 2000000);
}
-static int __maybe_unused loongson_dwmac_resume(struct device *dev)
+static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res = {};
+ struct stmmac_pci_info *info;
+ struct loongson_data *ld;
int ret;
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL);
+ if (!ld)
+ return -ENOMEM;
+ /* Enable pci device */
ret = pci_enable_device(pdev);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
return ret;
+ }
pci_set_master(pdev);
- return stmmac_resume(dev);
+ /* Get the base address of device */
+ res.addr = pcim_iomap_region(pdev, 0, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(res.addr);
+ if (ret)
+ goto err_disable_device;
+
+ plat->bsp_priv = ld;
+ plat->mac_setup = loongson_dwmac_setup;
+ plat->fix_soc_reset = loongson_dwmac_fix_reset;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = stmmac_pci_plat_resume;
+ ld->dev = &pdev->dev;
+ ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
+
+ info = (struct stmmac_pci_info *)id->driver_data;
+ ret = info->setup(pdev, plat);
+ if (ret)
+ goto err_disable_device;
+
+ plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use;
+ plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use;
+
+ if (dev_of_node(&pdev->dev))
+ ret = loongson_dwmac_dt_config(pdev, plat, &res);
+ else
+ ret = loongson_dwmac_acpi_config(pdev, plat, &res);
+ if (ret)
+ goto err_disable_device;
+
+ /* Use the common MAC IRQ if per-channel MSIs allocation failed */
+ if (ld->multichan)
+ loongson_dwmac_msi_config(pdev, plat, &res);
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret)
+ goto err_plat_clear;
+
+ return 0;
+
+err_plat_clear:
+ if (dev_of_node(&pdev->dev))
+ loongson_dwmac_dt_clear(pdev, plat);
+ if (ld->multichan)
+ loongson_dwmac_msi_clear(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
}
-static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
- loongson_dwmac_resume);
+static void loongson_dwmac_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct loongson_data *ld;
+
+ ld = priv->plat->bsp_priv;
+ stmmac_dvr_remove(&pdev->dev);
+
+ if (dev_of_node(&pdev->dev))
+ loongson_dwmac_dt_clear(pdev, priv->plat);
+
+ if (ld->multichan)
+ loongson_dwmac_msi_clear(pdev);
+
+ pci_disable_device(pdev);
+}
static const struct pci_device_id loongson_dwmac_id_table[] = {
- { PCI_VDEVICE(LOONGSON, 0x7a03) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC1, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC2, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
{}
};
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
static struct pci_driver loongson_dwmac_driver = {
- .name = "dwmac-loongson-pci",
+ .name = DRIVER_NAME,
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
.driver = {
- .pm = &loongson_dwmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
@@ -232,4 +627,5 @@ module_pci_driver(loongson_dwmac_driver);
MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
+MODULE_AUTHOR("Yanteng Si <siyanteng@loongson.cn>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
index 3e86810717d3..de9aba756aac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
@@ -38,30 +38,54 @@
#define GMAC_SHUT BIT(6)
#define PHY_INTF_SELI GENMASK(30, 28)
-#define PHY_INTF_MII FIELD_PREP(PHY_INTF_SELI, 0)
-#define PHY_INTF_RMII FIELD_PREP(PHY_INTF_SELI, 4)
struct ls1x_dwmac {
struct plat_stmmacenet_data *plat_dat;
struct regmap *regmap;
+ unsigned int id;
};
-static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+struct ls1x_data {
+ int (*setup)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat);
+ int (*init)(struct device *dev, void *bsp_priv);
+};
+
+static int ls1b_dwmac_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
- struct ls1x_dwmac *dwmac = priv;
- struct plat_stmmacenet_data *plat = dwmac->plat_dat;
- struct regmap *regmap = dwmac->regmap;
+ struct ls1x_dwmac *dwmac = plat_dat->bsp_priv;
struct resource *res;
- unsigned long reg_base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
+ /* This shouldn't fail - stmmac_get_platform_resources()
+ * already mapped this resource.
+ */
dev_err(&pdev->dev, "Could not get IO_MEM resources\n");
return -EINVAL;
}
- reg_base = (unsigned long)res->start;
- if (reg_base == LS1B_GMAC0_BASE) {
+ if (res->start == LS1B_GMAC0_BASE) {
+ dwmac->id = 0;
+ } else if (res->start == LS1B_GMAC1_BASE) {
+ dwmac->id = 1;
+ } else {
+ dev_err(&pdev->dev, "Invalid Ethernet MAC base address %pR",
+ res);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ls1b_dwmac_syscon_init(struct device *dev, void *priv)
+{
+ struct ls1x_dwmac *dwmac = priv;
+ struct plat_stmmacenet_data *plat = dwmac->plat_dat;
+ struct regmap *regmap = dwmac->regmap;
+
+ if (dwmac->id == 0) {
switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
regmap_update_bits(regmap, LS1X_SYSCON0,
@@ -74,13 +98,13 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ dev_err(dev, "Unsupported PHY mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
- } else if (reg_base == LS1B_GMAC1_BASE) {
+ } else if (dwmac->id == 1) {
regmap_update_bits(regmap, LS1X_SYSCON0,
GMAC1_USE_UART1 | GMAC1_USE_UART0,
GMAC1_USE_UART1 | GMAC1_USE_UART0);
@@ -98,42 +122,34 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ dev_err(dev, "Unsupported PHY mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0);
- } else {
- dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx",
- reg_base);
- return -EINVAL;
}
return 0;
}
-static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+static int ls1c_dwmac_syscon_init(struct device *dev, void *priv)
{
struct ls1x_dwmac *dwmac = priv;
struct plat_stmmacenet_data *plat = dwmac->plat_dat;
struct regmap *regmap = dwmac->regmap;
+ int phy_intf_sel;
- switch (plat->phy_interface) {
- case PHY_INTERFACE_MODE_MII:
- regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
- PHY_INTF_MII);
- break;
- case PHY_INTERFACE_MODE_RMII:
- regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
- PHY_INTF_RMII);
- break;
- default:
- dev_err(&pdev->dev, "Unsupported PHY-mode %u\n",
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_interface);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
+ dev_err(dev, "Unsupported PHY-mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
+ regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
+ FIELD_PREP(PHY_INTF_SELI, phy_intf_sel));
regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
return 0;
@@ -143,9 +159,9 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
+ const struct ls1x_data *data;
struct regmap *regmap;
struct ls1x_dwmac *dwmac;
- int (*init)(struct platform_device *pdev, void *priv);
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -159,8 +175,8 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
"Unable to find syscon\n");
- init = of_device_get_match_data(&pdev->dev);
- if (!init) {
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
dev_err(&pdev->dev, "No of match data provided\n");
return -EINVAL;
}
@@ -175,21 +191,36 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
"dt configuration failed\n");
plat_dat->bsp_priv = dwmac;
- plat_dat->init = init;
+ plat_dat->init = data->init;
dwmac->plat_dat = plat_dat;
dwmac->regmap = regmap;
+ if (data->setup) {
+ ret = data->setup(pdev, plat_dat);
+ if (ret)
+ return ret;
+ }
+
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
+static const struct ls1x_data ls1b_dwmac_data = {
+ .setup = ls1b_dwmac_setup,
+ .init = ls1b_dwmac_syscon_init,
+};
+
+static const struct ls1x_data ls1c_dwmac_data = {
+ .init = ls1c_dwmac_syscon_init,
+};
+
static const struct of_device_id ls1x_dwmac_match[] = {
{
.compatible = "loongson,ls1b-gmac",
- .data = &ls1b_dwmac_syscon_init,
+ .data = &ls1b_dwmac_data,
},
{
.compatible = "loongson,ls1c-emac",
- .data = &ls1c_dwmac_syscon_init,
+ .data = &ls1c_dwmac_data,
},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 4c810d8f5bea..c68d7de1f8ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -21,16 +21,29 @@
/* Register defines for CREG syscon */
#define LPC18XX_CREG_CREG6 0x12c
-# define LPC18XX_CREG_CREG6_ETHMODE_MASK 0x7
-# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
-# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
+# define LPC18XX_CREG_CREG6_ETHMODE_MASK GENMASK(2, 0)
+
+static int lpc18xx_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel)
+{
+ struct regmap *reg = bsp_priv;
+
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII)
+ return -EINVAL;
+
+ regmap_update_bits(reg, LPC18XX_CREG_CREG6,
+ LPC18XX_CREG_CREG6_ETHMODE_MASK,
+ FIELD_PREP(LPC18XX_CREG_CREG6_ETHMODE_MASK,
+ phy_intf_sel));
+
+ return 0;
+}
static int lpc18xx_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct regmap *reg;
- u8 ethmode;
+ struct regmap *regmap;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -41,25 +54,16 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->has_gmac = true;
+ plat_dat->core_type = DWMAC_CORE_GMAC;
- reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
- if (IS_ERR(reg)) {
+ regmap = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "syscon lookup failed\n");
- return PTR_ERR(reg);
- }
-
- if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
- ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
- ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
- } else {
- dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
- return -EINVAL;
+ return PTR_ERR(regmap);
}
- regmap_update_bits(reg, LPC18XX_CREG_CREG6,
- LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
+ plat_dat->bsp_priv = regmap;
+ plat_dat->set_phy_intf_sel = lpc18xx_set_phy_intf_sel;
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
@@ -72,7 +76,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
static struct platform_driver lpc18xx_dwmac_driver = {
.probe = lpc18xx_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "lpc18xx-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 2a9132d6d743..1f2d7d19ca56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -17,9 +17,6 @@
/* Peri Configuration register for mt2712 */
#define PERI_ETH_PHY_INTF_SEL 0x418
-#define PHY_INTF_MII 0
-#define PHY_INTF_RGMII 1
-#define PHY_INTF_RMII 4
#define RMII_CLK_SRC_RXC BIT(4)
#define RMII_CLK_SRC_INTERNAL BIT(5)
@@ -88,7 +85,8 @@ struct mediatek_dwmac_plat_data {
};
struct mediatek_dwmac_variant {
- int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
+ int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat,
+ u8 phy_intf_sel);
int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
/* clock ids to be requested */
@@ -109,29 +107,16 @@ static const char * const mt8195_dwmac_clk_l[] = {
"axi", "apb", "mac_cg", "mac_main", "ptp_ref"
};
-static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
+static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat,
+ u8 phy_intf_sel)
{
- int rmii_clk_from_mac = plat->rmii_clk_from_mac ? RMII_CLK_SRC_INTERNAL : 0;
- int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
- u32 intf_val = 0;
+ u32 intf_val = phy_intf_sel;
- /* select phy interface in top control domain */
- switch (plat->phy_mode) {
- case PHY_INTERFACE_MODE_MII:
- intf_val |= PHY_INTF_MII;
- break;
- case PHY_INTERFACE_MODE_RMII:
- intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- intf_val |= PHY_INTF_RGMII;
- break;
- default:
- dev_err(plat->dev, "phy interface not supported\n");
- return -EINVAL;
+ if (phy_intf_sel == PHY_INTF_SEL_RMII) {
+ if (plat->rmii_clk_from_mac)
+ intf_val |= RMII_CLK_SRC_INTERNAL;
+ if (plat->rmii_rxc)
+ intf_val |= RMII_CLK_SRC_RXC;
}
regmap_write(plat->peri_regmap, PERI_ETH_PHY_INTF_SEL, intf_val);
@@ -288,30 +273,16 @@ static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
.tx_delay_max = 17600,
};
-static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat)
+static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat,
+ u8 phy_intf_sel)
{
- int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0;
- int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0;
- u32 intf_val = 0;
+ u32 intf_val = FIELD_PREP(MT8195_ETH_INTF_SEL, phy_intf_sel);
- /* select phy interface in top control domain */
- switch (plat->phy_mode) {
- case PHY_INTERFACE_MODE_MII:
- intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII);
- break;
- case PHY_INTERFACE_MODE_RMII:
- intf_val |= (rmii_rxc | rmii_clk_from_mac);
- intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII);
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII);
- break;
- default:
- dev_err(plat->dev, "phy interface not supported\n");
- return -EINVAL;
+ if (phy_intf_sel == PHY_INTF_SEL_RMII) {
+ if (plat->rmii_clk_from_mac)
+ intf_val |= MT8195_RMII_CLK_SRC_INTERNAL;
+ if (plat->rmii_rxc)
+ intf_val |= MT8195_RMII_CLK_SRC_RXC;
}
/* MT8195 only support external PHY */
@@ -456,7 +427,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
- int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -464,12 +434,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- err = of_get_phy_mode(plat->np, &plat->phy_mode);
- if (err) {
- dev_err(plat->dev, "not find phy-mode\n");
- return err;
- }
-
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
if (tx_delay_ps < plat->variant->tx_delay_max) {
mac_delay->tx_delay = tx_delay_ps;
@@ -530,16 +494,24 @@ static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
return ret;
}
-static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
+static int mediatek_dwmac_init(struct device *dev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
const struct mediatek_dwmac_variant *variant = plat->variant;
- int ret;
+ int phy_intf_sel, ret;
if (variant->dwmac_set_phy_interface) {
- ret = variant->dwmac_set_phy_interface(plat);
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_mode);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
+ dev_err(plat->dev, "phy interface not supported\n");
+ return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL;
+ }
+
+ ret = variant->dwmac_set_phy_interface(plat, phy_intf_sel);
if (ret) {
- dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ dev_err(dev, "failed to set phy interface, err = %d\n", ret);
return ret;
}
}
@@ -547,7 +519,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
if (variant->dwmac_set_delay) {
ret = variant->dwmac_set_delay(plat);
if (ret) {
- dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ dev_err(dev, "failed to set delay value, err = %d\n", ret);
return ret;
}
}
@@ -587,16 +559,16 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
- plat->mac_interface = priv_plat->phy_mode;
+ priv_plat->phy_mode = plat->phy_interface;
if (priv_plat->mac_wol)
- plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
- else
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
+ else
+ plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
plat->riwt_off = 1;
plat->maxmtu = ETH_DATA_LEN;
plat->host_dma_width = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
- plat->init = mediatek_dwmac_init;
+ plat->resume = mediatek_dwmac_init;
plat->clks_config = mediatek_dwmac_clks_config;
plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
@@ -661,7 +633,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
- mediatek_dwmac_init(pdev, priv_plat);
+ mediatek_dwmac_init(&pdev->dev, priv_plat);
ret = mediatek_dwmac_clks_config(priv_plat, true);
if (ret)
@@ -699,7 +671,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
static struct platform_driver mediatek_dwmac_driver = {
.probe = mediatek_dwmac_probe,
- .remove_new = mediatek_dwmac_remove,
+ .remove = mediatek_dwmac_remove,
.driver = {
.name = "dwmac-mediatek",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index a16bfa9089ea..07c504d07604 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -22,9 +22,10 @@ struct meson_dwmac {
void __iomem *reg;
};
-static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int meson6_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct meson_dwmac *dwmac = priv;
+ struct meson_dwmac *dwmac = bsp_priv;
unsigned int val;
val = readl(dwmac->reg);
@@ -39,6 +40,8 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
}
writel(val, dwmac->reg);
+
+ return 0;
}
static int meson6_dwmac_probe(struct platform_device *pdev)
@@ -65,7 +68,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->reg);
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = meson6_dwmac_set_clk_tx_rate;
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
@@ -78,7 +81,7 @@ MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
static struct platform_driver meson6_dwmac_driver = {
.probe = meson6_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "meson6-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index b23944aa344e..e4d5c41294f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -26,8 +26,6 @@
#define PRG_ETH0_RGMII_MODE BIT(0)
#define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0)
-#define PRG_ETH0_EXT_RGMII_MODE 1
-#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
@@ -238,28 +236,20 @@ static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac)
static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
{
- switch (dwmac->phy_mode) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- /* enable RGMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
- PRG_ETH0_EXT_PHY_MODE_MASK,
- PRG_ETH0_EXT_RGMII_MODE);
- break;
- case PHY_INTERFACE_MODE_RMII:
- /* disable RGMII mode -> enables RMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
- PRG_ETH0_EXT_PHY_MODE_MASK,
- PRG_ETH0_EXT_RMII_MODE);
- break;
- default:
+ int phy_intf_sel;
+
+ phy_intf_sel = stmmac_get_phy_intf_sel(dwmac->phy_mode);
+ if (phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
dev_err(dwmac->dev, "fail to set phy-mode %s\n",
phy_modes(dwmac->phy_mode));
- return -EINVAL;
+ return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL;
}
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_EXT_PHY_MODE_MASK,
+ FIELD_PREP(PRG_ETH0_EXT_PHY_MODE_MASK,
+ phy_intf_sel));
+
return 0;
}
@@ -417,11 +407,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->regs);
dwmac->dev = &pdev->dev;
- ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
- if (ret) {
- dev_err(&pdev->dev, "missing phy-mode property\n");
- return ret;
- }
+ dwmac->phy_mode = plat_dat->phy_interface;
/* use 2ns as fallback since this value was previously hardcoded */
if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
@@ -520,7 +506,7 @@ MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
static struct platform_driver meson8b_dwmac_driver = {
.probe = meson8b_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "meson8b-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e254b21fdb59..0826a7bd32ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -21,6 +21,7 @@
#define RGMII_IO_MACRO_CONFIG2 0x1C
#define RGMII_IO_MACRO_DEBUG1 0x20
#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
+#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4
/* RGMII_IO_MACRO_CONFIG fields */
#define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
@@ -75,9 +76,8 @@
#define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6)
#define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5)
-/* MAC_CTRL_REG bits */
-#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
-#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
+/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */
+#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3)
#define SGMII_10M_RX_CLK_DVDR 0x31
@@ -92,20 +92,20 @@ struct ethqos_emac_driver_data {
bool rgmii_config_loopback_en;
bool has_emac_ge_3;
const char *link_clk_name;
- bool has_integrated_pcs;
+ u32 dma_addr_width;
struct dwmac4_addrs dwmac4_addrs;
+ bool needs_sgmii_loopback;
};
struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
void __iomem *mac_base;
- int (*configure_func)(struct qcom_ethqos *ethqos);
+ int (*configure_func)(struct qcom_ethqos *ethqos, int speed);
unsigned int link_clk_rate;
struct clk *link_clk;
struct phy *serdes_phy;
- unsigned int speed;
int serdes_speed;
phy_interface_t phy_mode;
@@ -113,29 +113,42 @@ struct qcom_ethqos {
unsigned int num_por;
bool rgmii_config_loopback_en;
bool has_emac_ge_3;
+ bool needs_sgmii_loopback;
};
-static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
+static u32 rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
{
return readl(ethqos->rgmii_base + offset);
}
-static void rgmii_writel(struct qcom_ethqos *ethqos,
- int value, unsigned int offset)
+static void rgmii_writel(struct qcom_ethqos *ethqos, u32 value,
+ unsigned int offset)
{
writel(value, ethqos->rgmii_base + offset);
}
-static void rgmii_updatel(struct qcom_ethqos *ethqos,
- int mask, int val, unsigned int offset)
+static void rgmii_updatel(struct qcom_ethqos *ethqos, u32 mask, u32 val,
+ unsigned int offset)
{
- unsigned int temp;
+ u32 temp;
temp = rgmii_readl(ethqos, offset);
temp = (temp & ~(mask)) | val;
rgmii_writel(ethqos, temp, offset);
}
+static void rgmii_setmask(struct qcom_ethqos *ethqos, u32 mask,
+ unsigned int offset)
+{
+ rgmii_updatel(ethqos, mask, mask, offset);
+}
+
+static void rgmii_clrmask(struct qcom_ethqos *ethqos, u32 mask,
+ unsigned int offset)
+{
+ rgmii_updatel(ethqos, mask, 0, offset);
+}
+
static void rgmii_dump(void *priv)
{
struct qcom_ethqos *ethqos = priv;
@@ -162,38 +175,38 @@ static void rgmii_dump(void *priv)
rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
}
-/* Clock rates */
-#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
-#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
-#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
-
static void
-ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+ethqos_update_link_clk(struct qcom_ethqos *ethqos, int speed)
{
+ long rate;
+
if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
return;
- switch (speed) {
- case SPEED_1000:
- ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
- break;
+ rate = rgmii_clock(speed);
+ if (rate > 0)
+ ethqos->link_clk_rate = rate * 2;
- case SPEED_100:
- ethqos->link_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
- break;
+ clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
+}
- case SPEED_10:
- ethqos->link_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
- break;
- }
+static void
+qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable)
+{
+ if (!ethqos->needs_sgmii_loopback ||
+ ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX)
+ return;
- clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
+ rgmii_updatel(ethqos,
+ SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN,
+ enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0,
+ EMAC_WRAPPER_SGMII_PHY_CNTRL1);
}
static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
{
- rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
- RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
+ qcom_ethqos_set_sgmii_loopback(ethqos, true);
+ rgmii_setmask(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
}
static const struct ethqos_emac_por emac_v2_3_0_por[] = {
@@ -271,11 +284,12 @@ static const struct ethqos_emac_por emac_v4_0_0_por[] = {
static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
.por = emac_v4_0_0_por,
- .num_por = ARRAY_SIZE(emac_v3_0_0_por),
+ .num_por = ARRAY_SIZE(emac_v4_0_0_por),
.rgmii_config_loopback_en = false,
.has_emac_ge_3 = true,
.link_clk_name = "phyaux",
- .has_integrated_pcs = true,
+ .needs_sgmii_loopback = true,
+ .dma_addr_width = 36,
.dwmac4_addrs = {
.dma_chan = 0x00008100,
.dma_chan_offset = 0x1000,
@@ -297,69 +311,55 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
struct device *dev = &ethqos->pdev->dev;
- unsigned int val;
- int retry = 1000;
+ u32 val;
/* Set CDR_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
- SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
/* Set CDR_EXT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
- SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Clear CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set DLL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
- SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
if (!ethqos->has_emac_ge_3) {
- rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ SDCC_HC_REG_DLL_CONFIG);
- rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ SDCC_HC_REG_DLL_CONFIG);
}
/* Wait for CK_OUT_EN clear */
- do {
- val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
- val &= SDCC_DLL_CONFIG_CK_OUT_EN;
- if (!val)
- break;
- mdelay(1);
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ !(val & SDCC_DLL_CONFIG_CK_OUT_EN),
+ 1000, 1000000, false,
+ ethqos, SDCC_HC_REG_DLL_CONFIG))
dev_err(dev, "Clear CK_OUT_EN timedout\n");
/* Set CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Wait for CK_OUT_EN set */
- retry = 1000;
- do {
- val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
- val &= SDCC_DLL_CONFIG_CK_OUT_EN;
- if (val)
- break;
- mdelay(1);
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ val & SDCC_DLL_CONFIG_CK_OUT_EN,
+ 1000, 1000000, false,
+ ethqos, SDCC_HC_REG_DLL_CONFIG))
dev_err(dev, "Set CK_OUT_EN timedout\n");
/* Set DDR_CAL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
- SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
+ SDCC_HC_REG_DLL_CONFIG2);
if (!ethqos->has_emac_ge_3) {
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
- 0, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ SDCC_HC_REG_DLL_CONFIG2);
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
@@ -367,15 +367,14 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
BIT(2), SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
- SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
SDCC_HC_REG_DLL_CONFIG2);
}
return 0;
}
-static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
+static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
int phase_shift;
@@ -389,8 +388,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
phase_shift = RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN;
/* Disable loopback mode */
- rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
+ RGMII_IO_MACRO_CONFIG2);
/* Determine if this platform wants loopback enabled after programming */
if (ethqos->rgmii_config_loopback_en)
@@ -399,29 +398,26 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
loopback = 0;
/* Select RGMII, write 0 to interface select */
- rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_INTF_SEL, RGMII_IO_MACRO_CONFIG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_1000:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- RGMII_CONFIG_POS_NEG_DATA_SEL,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
/* PRG_RCLK_DLY = TCXO period * TCXO_CYCLES_CNT / 2 * RX delay ns,
@@ -436,104 +432,95 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
57, SDCC_HC_REG_DDR_CONFIG);
}
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
- SDCC_DDR_CONFIG_PRG_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- RGMII_CONFIG_BYPASS_TX_ID_EN,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
BIT(6), RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
if (ethqos->has_emac_ge_3)
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
else
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_10:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- RGMII_CONFIG_BYPASS_TX_ID_EN,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
BIT(12) | GENMASK(9, 8),
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
if (ethqos->has_emac_ge_3)
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
else
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
default:
- dev_err(dev, "Invalid speed %d\n", ethqos->speed);
+ dev_err(dev, "Invalid speed %d\n", speed);
return -EINVAL;
}
return 0;
}
-static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
- volatile unsigned int dll_lock;
- unsigned int i, retry = 1000;
+ unsigned int i;
+ u32 val;
/* Reset to POR values and enable clk */
for (i = 0; i < ethqos->num_por; i++)
@@ -544,15 +531,15 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
/* Initialize the DLL first */
/* Set DLL_RST */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
- SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_RST,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set PDN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
- SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_PDN,
+ SDCC_HC_REG_DLL_CONFIG);
if (ethqos->has_emac_ge_3) {
- if (ethqos->speed == SPEED_1000) {
+ if (speed == SPEED_1000) {
rgmii_writel(ethqos, 0x1800000, SDCC_TEST_CTL);
rgmii_writel(ethqos, 0x2C010800, SDCC_USR_CTL);
rgmii_writel(ethqos, 0xA001, SDCC_HC_REG_DLL_CONFIG2);
@@ -563,21 +550,18 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
}
/* Clear DLL_RST */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
- SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
/* Clear PDN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
- SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
- if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
+ if (speed != SPEED_100 && speed != SPEED_10) {
/* Set DLL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
- SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- SDCC_DLL_CONFIG_CK_OUT_EN,
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
SDCC_HC_REG_DLL_CONFIG);
/* Set USR_CTL bit 26 with mask of 3 bits */
@@ -586,95 +570,84 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
SDCC_USR_CTL);
/* wait for DLL LOCK */
- do {
- mdelay(1);
- dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
- if (dll_lock & SDC4_STATUS_DLL_LOCK)
- break;
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ val & SDC4_STATUS_DLL_LOCK,
+ 1000, 1000000, true,
+ ethqos, SDC4_STATUS))
dev_err(dev, "Timeout while waiting for DLL lock\n");
}
- if (ethqos->speed == SPEED_1000)
+ if (speed == SPEED_1000)
ethqos_dll_configure(ethqos);
- ethqos_rgmii_macro_init(ethqos);
+ ethqos_rgmii_macro_init(ethqos, speed);
return 0;
}
+static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
+{
+ if (ethqos->serdes_speed != speed) {
+ phy_set_speed(ethqos->serdes_phy, speed);
+ ethqos->serdes_speed = speed;
+ }
+}
+
+static void ethqos_pcs_set_inband(struct stmmac_priv *priv, bool enable)
+{
+ stmmac_pcs_ctrl_ane(priv, enable, 0);
+}
+
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
-static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
{
struct net_device *dev = platform_get_drvdata(ethqos->pdev);
struct stmmac_priv *priv = netdev_priv(dev);
- int val;
-
- val = readl(ethqos->mac_base + MAC_CTRL_REG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_2500:
- val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
- rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
- RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
- if (ethqos->serdes_speed != SPEED_2500)
- phy_set_speed(ethqos->serdes_phy, SPEED_2500);
- ethqos->serdes_speed = SPEED_2500;
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
+ ethqos_set_serdes_speed(ethqos, SPEED_2500);
+ ethqos_pcs_set_inband(priv, false);
break;
case SPEED_1000:
- val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
- rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
- RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, SPEED_1000);
- ethqos->serdes_speed = SPEED_1000;
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
+ ethqos_pcs_set_inband(priv, true);
break;
case SPEED_100:
- val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, SPEED_1000);
- ethqos->serdes_speed = SPEED_1000;
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
+ ethqos_pcs_set_inband(priv, true);
break;
case SPEED_10:
- val |= ETHQOS_MAC_CTRL_PORT_SEL;
- val &= ~ETHQOS_MAC_CTRL_SPEED_MODE;
rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR,
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, ethqos->speed);
- ethqos->serdes_speed = SPEED_1000;
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
+ ethqos_pcs_set_inband(priv, true);
break;
}
- writel(val, ethqos->mac_base + MAC_CTRL_REG);
-
- return val;
+ return 0;
}
-static int ethqos_configure(struct qcom_ethqos *ethqos)
+static int ethqos_configure(struct qcom_ethqos *ethqos, int speed)
{
- return ethqos->configure_func(ethqos);
+ return ethqos->configure_func(ethqos, speed);
}
-static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct qcom_ethqos *ethqos = priv;
- ethqos->speed = speed;
+ qcom_ethqos_set_sgmii_loopback(ethqos, false);
ethqos_update_link_clk(ethqos, speed);
- ethqos_configure(ethqos);
+ ethqos_configure(ethqos, speed);
}
static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
@@ -690,7 +663,7 @@ static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
if (ret)
return ret;
- return phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ return phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
}
static void qcom_ethqos_serdes_powerdown(struct net_device *ndev, void *priv)
@@ -745,7 +718,7 @@ static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv)
netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err);
plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref);
- netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate);
+ netdev_dbg(priv->dev, "PTP rate %lu\n", plat_dat->clk_ptp_rate);
}
static int qcom_ethqos_probe(struct platform_device *pdev)
@@ -775,9 +748,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (!ethqos)
return -ENOMEM;
- ret = of_get_phy_mode(np, &ethqos->phy_mode);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get phy mode\n");
+ ethqos->phy_mode = plat_dat->phy_interface;
switch (ethqos->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -785,6 +756,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
case PHY_INTERFACE_MODE_RGMII_TXID:
ethqos->configure_func = ethqos_configure_rgmii;
break;
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
@@ -807,6 +779,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->num_por = data->num_por;
ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en;
ethqos->has_emac_ge_3 = data->has_emac_ge_3;
+ ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback;
ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii");
if (IS_ERR(ethqos->link_clk))
@@ -826,7 +799,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy),
"Failed to get serdes phy\n");
- ethqos->speed = SPEED_1000;
ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
@@ -835,7 +807,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
plat_dat->dump_debug_regs = rgmii_dump;
plat_dat->ptp_clk_freq_config = ethqos_ptp_clk_freq_config;
- plat_dat->has_gmac4 = 1;
+ plat_dat->core_type = DWMAC_CORE_GMAC4;
if (ethqos->has_emac_ge_3)
plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
plat_dat->pmt = 1;
@@ -843,8 +815,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_TSO_EN;
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
- if (data->has_integrated_pcs)
- plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
+ if (data->dma_addr_width)
+ plat_dat->host_dma_width = data->dma_addr_width;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
new file mode 100644
index 000000000000..be7f5eb2cdcf
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dwmac-renesas-gbeth.c - DWMAC Specific Glue layer for Renesas GBETH
+ *
+ * The Rx and Tx clocks are supplied as follows for the GBETH IP.
+ *
+ * Rx / Tx
+ * -------+------------- on / off -------
+ * |
+ * | Rx-180 / Tx-180
+ * +---- not ---- on / off -------
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "stmmac_platform.h"
+
+/**
+ * struct renesas_gbeth_of_data - OF data for Renesas GBETH
+ *
+ * @clks: Array of clock names
+ * @num_clks: Number of clocks
+ * @stmmac_flags: Flags for the stmmac driver
+ * @handle_reset: Flag to indicate if reset control is
+ * handled by the glue driver or core driver.
+ * @set_clk_tx_rate: Flag to indicate if Tx clock is fixed or
+ * set_clk_tx_rate is needed.
+ * @has_pcs: Flag to indicate if the MAC has a PCS
+ */
+struct renesas_gbeth_of_data {
+ const char * const *clks;
+ u8 num_clks;
+ u32 stmmac_flags;
+ bool handle_reset;
+ bool set_clk_tx_rate;
+ bool has_pcs;
+};
+
+struct renesas_gbeth {
+ const struct renesas_gbeth_of_data *of_data;
+ struct plat_stmmacenet_data *plat_dat;
+ struct reset_control *rstc;
+ struct device *dev;
+};
+
+static const char *const renesas_gbeth_clks[] = {
+ "tx", "tx-180", "rx", "rx-180",
+};
+
+static const char *const renesas_gmac_clks[] = {
+ "tx",
+};
+
+static int renesas_gmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct device_node *np = priv->device->of_node;
+ struct device_node *pcs_node;
+ struct phylink_pcs *pcs;
+
+ pcs_node = of_parse_phandle(np, "pcs-handle", 0);
+ if (pcs_node) {
+ pcs = miic_create(priv->device, pcs_node);
+ of_node_put(pcs_node);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ }
+
+ return 0;
+}
+
+static void renesas_gmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ miic_destroy(priv->hw->phylink_pcs);
+}
+
+static struct phylink_pcs *renesas_gmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
+static int renesas_gbeth_init(struct device *dev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ ret = reset_control_deassert(gbeth->rstc);
+ if (ret) {
+ dev_err(gbeth->dev, "Reset deassert failed\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks,
+ plat_dat->clks);
+ if (ret)
+ reset_control_assert(gbeth->rstc);
+
+ return ret;
+}
+
+static void renesas_gbeth_exit(struct device *dev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
+
+ ret = reset_control_assert(gbeth->rstc);
+ if (ret)
+ dev_err(gbeth->dev, "Reset assert failed\n");
+}
+
+static int renesas_gbeth_probe(struct platform_device *pdev)
+{
+ const struct renesas_gbeth_of_data *of_data;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct renesas_gbeth *gbeth;
+ unsigned int i;
+ int err;
+
+ err = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to get resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ gbeth = devm_kzalloc(dev, sizeof(*gbeth), GFP_KERNEL);
+ if (!gbeth)
+ return -ENOMEM;
+
+ of_data = of_device_get_match_data(&pdev->dev);
+ gbeth->of_data = of_data;
+
+ plat_dat->num_clks = of_data->num_clks;
+ plat_dat->clks = devm_kcalloc(dev, plat_dat->num_clks,
+ sizeof(*plat_dat->clks), GFP_KERNEL);
+ if (!plat_dat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < plat_dat->num_clks; i++)
+ plat_dat->clks[i].id = of_data->clks[i];
+
+ err = devm_clk_bulk_get(dev, plat_dat->num_clks, plat_dat->clks);
+ if (err < 0)
+ return err;
+
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
+ if (!plat_dat->clk_tx_i)
+ return dev_err_probe(dev, -EINVAL,
+ "error finding tx clock\n");
+
+ if (of_data->handle_reset) {
+ gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(gbeth->rstc))
+ return PTR_ERR(gbeth->rstc);
+ }
+
+ gbeth->dev = dev;
+ gbeth->plat_dat = plat_dat;
+ plat_dat->bsp_priv = gbeth;
+ if (of_data->set_clk_tx_rate)
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->init = renesas_gbeth_init;
+ plat_dat->exit = renesas_gbeth_exit;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ gbeth->of_data->stmmac_flags;
+ if (of_data->has_pcs) {
+ plat_dat->pcs_init = renesas_gmac_pcs_init;
+ plat_dat->pcs_exit = renesas_gmac_pcs_exit;
+ plat_dat->select_pcs = renesas_gmac_select_pcs;
+ }
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct renesas_gbeth_of_data renesas_gbeth_of_data = {
+ .clks = renesas_gbeth_clks,
+ .num_clks = ARRAY_SIZE(renesas_gbeth_clks),
+ .handle_reset = true,
+ .set_clk_tx_rate = true,
+ .stmmac_flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_SPH_DISABLE,
+};
+
+static const struct renesas_gbeth_of_data renesas_gmac_of_data = {
+ .clks = renesas_gmac_clks,
+ .num_clks = ARRAY_SIZE(renesas_gmac_clks),
+ .stmmac_flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY,
+ .has_pcs = true,
+};
+
+static const struct of_device_id renesas_gbeth_match[] = {
+ { .compatible = "renesas,r9a09g077-gbeth", .data = &renesas_gmac_of_data },
+ { .compatible = "renesas,rzv2h-gbeth", .data = &renesas_gbeth_of_data },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_gbeth_match);
+
+static struct platform_driver renesas_gbeth_driver = {
+ .probe = renesas_gbeth_probe,
+ .driver = {
+ .name = "renesas-gbeth",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = renesas_gbeth_match,
+ },
+};
+module_platform_driver(renesas_gbeth_driver);
+
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas GBETH DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 382e8de1255d..0a95f54e725e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -8,14 +8,13 @@
*/
#include <linux/stmmac.h>
+#include <linux/hw_bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/phy.h>
#include <linux/of_net.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
@@ -26,15 +25,26 @@
#include "stmmac_platform.h"
struct rk_priv_data;
+
+struct rk_reg_speed_data {
+ unsigned int rgmii_10;
+ unsigned int rgmii_100;
+ unsigned int rgmii_1000;
+ unsigned int rmii_10;
+ unsigned int rmii_100;
+};
+
struct rk_gmac_ops {
void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay);
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
- void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
- void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ int (*set_speed)(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed);
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv);
+ bool php_grf_required;
bool regs_valid;
u32 regs[];
};
@@ -58,11 +68,10 @@ enum rk_clocks_index {
};
struct rk_priv_data {
- struct platform_device *pdev;
+ struct device *dev;
phy_interface_t phy_iface;
int id;
struct regulator *regulator;
- bool suspended;
const struct rk_gmac_ops *ops;
bool clk_enabled;
@@ -71,7 +80,6 @@ struct rk_priv_data {
struct clk_bulk_data *clks;
int num_clks;
- struct clk *clk_mac;
struct clk *clk_phy;
struct reset_control *phy_reset;
@@ -83,73 +91,191 @@ struct rk_priv_data {
struct regmap *php_grf;
};
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
+static int rk_set_reg_speed(struct rk_priv_data *bsp_priv,
+ const struct rk_reg_speed_data *rsd,
+ unsigned int reg, phy_interface_t interface,
+ int speed)
+{
+ unsigned int val;
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ if (speed == SPEED_10) {
+ val = rsd->rgmii_10;
+ } else if (speed == SPEED_100) {
+ val = rsd->rgmii_100;
+ } else if (speed == SPEED_1000) {
+ val = rsd->rgmii_1000;
+ } else {
+ /* Phylink will not allow inappropriate speeds for
+ * interface modes, so this should never happen.
+ */
+ return -EINVAL;
+ }
+ } else if (interface == PHY_INTERFACE_MODE_RMII) {
+ if (speed == SPEED_10) {
+ val = rsd->rmii_10;
+ } else if (speed == SPEED_100) {
+ val = rsd->rmii_100;
+ } else {
+ /* Phylink will not allow inappropriate speeds for
+ * interface modes, so this should never happen.
+ */
+ return -EINVAL;
+ }
+ } else {
+ /* This should never happen, as .get_interfaces() limits
+ * the interface modes that are supported to RGMII and/or
+ * RMII.
+ */
+ return -EINVAL;
+ }
+
+ regmap_write(bsp_priv->grf, reg, val);
+
+ return 0;
+
+}
+
+static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ long rate;
-#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
-#define GRF_CLR_BIT(nr) (BIT(nr+16))
+ rate = rgmii_clock(speed);
+ if (rate < 0)
+ return rate;
+
+ return clk_set_rate(clk_mac_speed, rate);
+}
+
+#define GRF_FIELD(hi, lo, val) \
+ FIELD_PREP_WM16(GENMASK_U16(hi, lo), val)
+#define GRF_FIELD_CONST(hi, lo, val) \
+ FIELD_PREP_WM16_CONST(GENMASK_U16(hi, lo), val)
+
+#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
+#define GRF_CLR_BIT(nr) (BIT(nr+16))
#define DELAY_ENABLE(soc, tx, rx) \
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK_GRF_MACPHY_CON0 0xb00
+#define RK_GRF_MACPHY_CON1 0xb04
+#define RK_GRF_MACPHY_CON2 0xb08
+#define RK_GRF_MACPHY_CON3 0xb0c
+
+#define RK_MACPHY_ENABLE GRF_BIT(0)
+#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
+#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
+#define RK_GMAC2PHY_RMII_MODE GRF_FIELD(7, 6, 1)
+#define RK_GRF_CON2_MACPHY_ID GRF_FIELD(15, 0, 0x1234)
+#define RK_GRF_CON3_MACPHY_ID GRF_FIELD(5, 0, 0x35)
+
+static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
+
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
+
+ if (priv->phy_reset) {
+ /* PHY needs to be disabled before trying to reset it */
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+ usleep_range(10, 20);
+ if (priv->phy_reset)
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(10, 20);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
+ msleep(30);
+ }
+}
+
+static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+}
+
+#define RK_FEPHY_SHUTDOWN GRF_BIT(1)
+#define RK_FEPHY_POWERUP GRF_CLR_BIT(1)
+#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6)
+#define RK_FEPHY_24M_CLK_SEL GRF_FIELD(9, 8, 3)
+#define RK_FEPHY_PHY_ID GRF_BIT(11)
+
+static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ reset_control_assert(priv->phy_reset);
+ usleep_range(20, 30);
+
+ regmap_write(priv->grf, reg,
+ RK_FEPHY_POWERUP |
+ RK_FEPHY_INTERNAL_RMII_SEL |
+ RK_FEPHY_24M_CLK_SEL |
+ RK_FEPHY_PHY_ID);
+ usleep_range(10000, 12000);
+
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(50000, 60000);
+}
+
+static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ regmap_write(priv->grf, reg, RK_FEPHY_SHUTDOWN);
+}
+
#define PX30_GRF_GMAC_CON1 0x0904
/* PX30_GRF_GMAC_CON1 */
-#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
- GRF_BIT(6))
+#define PX30_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define PX30_GMAC_SPEED_100M GRF_BIT(2)
static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_PHY_INTF_SEL_RMII);
+ PX30_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
-static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int px30_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- int ret;
+ struct device *dev = bsp_priv->dev;
+ unsigned int con1;
+ long rate;
if (!clk_mac_speed) {
dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
- return;
+ return -EINVAL;
}
if (speed == 10) {
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_SPEED_10M);
-
- ret = clk_set_rate(clk_mac_speed, 2500000);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
- __func__, ret);
+ con1 = PX30_GMAC_SPEED_10M;
+ rate = 2500000;
} else if (speed == 100) {
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_SPEED_100M);
-
- ret = clk_set_rate(clk_mac_speed, 25000000);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
- __func__, ret);
-
+ con1 = PX30_GMAC_SPEED_100M;
+ rate = 25000000;
} else {
dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return -EINVAL;
}
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, con1);
+
+ return clk_set_rate(clk_mac_speed, rate);
}
static const struct rk_gmac_ops px30_ops = {
.set_to_rmii = px30_set_to_rmii,
- .set_rmii_speed = px30_set_rmii_speed,
+ .set_speed = px30_set_speed,
};
#define RK3128_GRF_MAC_CON0 0x0168
@@ -160,38 +286,28 @@ static const struct rk_gmac_ops px30_ops = {
#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3128_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3128_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3128_GRF_MAC_CON1 */
-#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
-#define RK3128_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define RK3128_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
+#define RK3128_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
+#define RK3128_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL_RGMII |
+ RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3128_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
@@ -201,66 +317,30 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
+ RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3128_GMAC_RMII_MODE);
}
-static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3128_reg_speed_data = {
+ .rgmii_10 = RK3128_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3128_GMAC_CLK_25M,
+ .rgmii_1000 = RK3128_GMAC_CLK_125M,
+ .rmii_10 = RK3128_GMAC_RMII_CLK_2_5M | RK3128_GMAC_SPEED_10M,
+ .rmii_100 = RK3128_GMAC_RMII_CLK_25M | RK3128_GMAC_SPEED_100M,
+};
-static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3128_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_RMII_CLK_2_5M |
- RK3128_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_RMII_CLK_25M |
- RK3128_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3128_reg_speed_data,
+ RK3128_GRF_MAC_CON1, interface, speed);
}
static const struct rk_gmac_ops rk3128_ops = {
.set_to_rgmii = rk3128_set_to_rgmii,
.set_to_rmii = rk3128_set_to_rmii,
- .set_rgmii_speed = rk3128_set_rgmii_speed,
- .set_rmii_speed = rk3128_set_rmii_speed,
+ .set_speed = rk3128_set_speed,
};
#define RK3228_GRF_MAC_CON0 0x0900
@@ -269,23 +349,20 @@ static const struct rk_gmac_ops rk3128_ops = {
#define RK3228_GRF_CON_MUX 0x50
/* RK3228_GRF_MAC_CON0 */
-#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3228_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3228_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3228_GRF_MAC_CON1 */
-#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3228_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3228_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
-#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
-#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_CLK_125M GRF_FIELD_CONST(9, 8, 0)
+#define RK3228_GMAC_CLK_25M GRF_FIELD_CONST(9, 8, 3)
+#define RK3228_GMAC_CLK_2_5M GRF_FIELD_CONST(9, 8, 2)
#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
@@ -299,15 +376,8 @@ static const struct rk_gmac_ops rk3128_ops = {
static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL_RGMII |
+ RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3228_GMAC_RMII_MODE_CLR |
DELAY_ENABLE(RK3228, tx_delay, rx_delay));
@@ -318,95 +388,59 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL_RMII |
+ RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
RK3228_GMAC_RMII_MODE);
/* set MAC to RMII mode */
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
}
-static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3228_reg_speed_data = {
+ .rgmii_10 = RK3228_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3228_GMAC_CLK_25M,
+ .rgmii_1000 = RK3228_GMAC_CLK_125M,
+ .rmii_10 = RK3228_GMAC_RMII_CLK_2_5M | RK3228_GMAC_SPEED_10M,
+ .rmii_100 = RK3228_GMAC_RMII_CLK_25M | RK3228_GMAC_SPEED_100M,
+};
-static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3228_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_RMII_CLK_2_5M |
- RK3228_GMAC_SPEED_10M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_RMII_CLK_25M |
- RK3228_GMAC_SPEED_100M);
- else
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return rk_set_reg_speed(bsp_priv, &rk3228_reg_speed_data,
+ RK3228_GRF_MAC_CON1, interface, speed);
}
static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3228_GRF_CON_MUX,
RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3228_ops = {
.set_to_rgmii = rk3228_set_to_rgmii,
.set_to_rmii = rk3228_set_to_rmii,
- .set_rgmii_speed = rk3228_set_rgmii_speed,
- .set_rmii_speed = rk3228_set_rmii_speed,
- .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .set_speed = rk3228_set_speed,
+ .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3288_GRF_SOC_CON1 0x0248
#define RK3288_GRF_SOC_CON3 0x0250
/*RK3288_GRF_SOC_CON1*/
-#define RK3288_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | \
- GRF_CLR_BIT(8))
-#define RK3288_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | \
- GRF_BIT(8))
+#define RK3288_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3288_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
#define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10)
#define RK3288_GMAC_SPEED_100M GRF_BIT(10)
#define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11)
#define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3288_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define RK3288_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define RK3288_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
+#define RK3288_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
+#define RK3288_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
#define RK3288_GMAC_RMII_MODE GRF_BIT(14)
#define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
@@ -415,21 +449,14 @@ static const struct rk_gmac_ops rk3228_ops = {
#define RK3288_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
#define RK3288_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3288_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3288_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3288_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3288_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3288_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL_RGMII |
+ RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
@@ -439,73 +466,36 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
+ RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3288_GMAC_RMII_MODE);
}
-static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3288_reg_speed_data = {
+ .rgmii_10 = RK3288_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3288_GMAC_CLK_25M,
+ .rgmii_1000 = RK3288_GMAC_CLK_125M,
+ .rmii_10 = RK3288_GMAC_RMII_CLK_2_5M | RK3288_GMAC_SPEED_10M,
+ .rmii_100 = RK3288_GMAC_RMII_CLK_25M | RK3288_GMAC_SPEED_100M,
+};
-static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3288_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_RMII_CLK_2_5M |
- RK3288_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_RMII_CLK_25M |
- RK3288_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3288_reg_speed_data,
+ RK3288_GRF_SOC_CON1, interface, speed);
}
static const struct rk_gmac_ops rk3288_ops = {
.set_to_rgmii = rk3288_set_to_rgmii,
.set_to_rmii = rk3288_set_to_rmii,
- .set_rgmii_speed = rk3288_set_rgmii_speed,
- .set_rmii_speed = rk3288_set_rmii_speed,
+ .set_speed = rk3288_set_speed,
};
#define RK3308_GRF_MAC_CON0 0x04a0
/* RK3308_GRF_MAC_CON0 */
-#define RK3308_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(2) | GRF_CLR_BIT(3) | \
- GRF_BIT(4))
+#define RK3308_GMAC_PHY_INTF_SEL(val) GRF_FIELD(4, 2, val)
#define RK3308_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0)
@@ -513,40 +503,25 @@ static const struct rk_gmac_ops rk3288_ops = {
static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_PHY_INTF_SEL_RMII);
+ RK3308_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
-static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
+static const struct rk_reg_speed_data rk3308_reg_speed_data = {
+ .rmii_10 = RK3308_GMAC_SPEED_10M,
+ .rmii_100 = RK3308_GMAC_SPEED_100M,
+};
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+static int rk3308_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rk3308_reg_speed_data,
+ RK3308_GRF_MAC_CON0, interface, speed);
}
static const struct rk_gmac_ops rk3308_ops = {
.set_to_rmii = rk3308_set_to_rmii,
- .set_rmii_speed = rk3308_set_rmii_speed,
+ .set_speed = rk3308_set_speed,
};
#define RK3328_GRF_MAC_CON0 0x0900
@@ -555,29 +530,24 @@ static const struct rk_gmac_ops rk3308_ops = {
#define RK3328_GRF_MACPHY_CON1 0xb04
/* RK3328_GRF_MAC_CON0 */
-#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3328_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3328_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3328_GRF_MAC_CON1 */
-#define RK3328_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3328_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3328_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define RK3328_GMAC_SPEED_100M GRF_BIT(2)
#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7)
#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12))
-#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12))
-#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12))
+#define RK3328_GMAC_CLK_125M GRF_FIELD_CONST(12, 11, 0)
+#define RK3328_GMAC_CLK_25M GRF_FIELD_CONST(12, 11, 3)
+#define RK3328_GMAC_CLK_2_5M GRF_FIELD_CONST(12, 11, 2)
#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
-#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
-#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0)
/* RK3328_GRF_MACPHY_CON1 */
#define RK3328_MACPHY_RMII_MODE GRF_BIT(9)
@@ -585,15 +555,8 @@ static const struct rk_gmac_ops rk3308_ops = {
static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_PHY_INTF_SEL_RGMII |
+ RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3328_GMAC_RMII_MODE_CLR |
RK3328_GMAC_RXCLK_DLY_ENABLE |
RK3328_GMAC_TXCLK_DLY_ENABLE);
@@ -605,100 +568,68 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_PHY_INTF_SEL_RMII |
+ RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
RK3328_GMAC_RMII_MODE);
}
-static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3328_reg_speed_data = {
+ .rgmii_10 = RK3328_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3328_GMAC_CLK_25M,
+ .rgmii_1000 = RK3328_GMAC_CLK_125M,
+ .rmii_10 = RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M,
+ .rmii_100 = RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M,
+};
-static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3328_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
- RK3328_GRF_MAC_CON1;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_RMII_CLK_2_5M |
- RK3328_GMAC_SPEED_10M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_RMII_CLK_25M |
- RK3328_GMAC_SPEED_100M);
+ if (interface == PHY_INTERFACE_MODE_RMII && bsp_priv->integrated_phy)
+ reg = RK3328_GRF_MAC_CON2;
else
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ reg = RK3328_GRF_MAC_CON1;
+
+ return rk_set_reg_speed(bsp_priv, &rk3328_reg_speed_data, reg,
+ interface, speed);
}
static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
RK3328_MACPHY_RMII_MODE);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3328_ops = {
.set_to_rgmii = rk3328_set_to_rgmii,
.set_to_rmii = rk3328_set_to_rmii,
- .set_rgmii_speed = rk3328_set_rgmii_speed,
- .set_rmii_speed = rk3328_set_rmii_speed,
- .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .set_speed = rk3328_set_speed,
+ .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3366_GRF_SOC_CON6 0x0418
#define RK3366_GRF_SOC_CON7 0x041c
/* RK3366_GRF_SOC_CON6 */
-#define RK3366_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3366_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3366_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3366_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3366_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3366_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3366_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3366_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -707,21 +638,14 @@ static const struct rk_gmac_ops rk3328_ops = {
#define RK3366_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3366_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3366_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3366_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3366_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3366_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3366_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL_RGMII |
+ RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3366_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
@@ -731,85 +655,46 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
+ RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3366_GMAC_RMII_MODE);
}
-static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3366_reg_speed_data = {
+ .rgmii_10 = RK3366_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3366_GMAC_CLK_25M,
+ .rgmii_1000 = RK3366_GMAC_CLK_125M,
+ .rmii_10 = RK3366_GMAC_RMII_CLK_2_5M | RK3366_GMAC_SPEED_10M,
+ .rmii_100 = RK3366_GMAC_RMII_CLK_25M | RK3366_GMAC_SPEED_100M,
+};
-static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3366_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_RMII_CLK_2_5M |
- RK3366_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_RMII_CLK_25M |
- RK3366_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3366_reg_speed_data,
+ RK3366_GRF_SOC_CON6, interface, speed);
}
static const struct rk_gmac_ops rk3366_ops = {
.set_to_rgmii = rk3366_set_to_rgmii,
.set_to_rmii = rk3366_set_to_rmii,
- .set_rgmii_speed = rk3366_set_rgmii_speed,
- .set_rmii_speed = rk3366_set_rmii_speed,
+ .set_speed = rk3366_set_speed,
};
#define RK3368_GRF_SOC_CON15 0x043c
#define RK3368_GRF_SOC_CON16 0x0440
/* RK3368_GRF_SOC_CON15 */
-#define RK3368_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3368_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3368_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3368_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3368_GMAC_SPEED_100M GRF_BIT(7)
#define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3368_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3368_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3368_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3368_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3368_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3368_GMAC_RMII_MODE GRF_BIT(6)
#define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -818,21 +703,14 @@ static const struct rk_gmac_ops rk3366_ops = {
#define RK3368_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3368_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3368_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3368_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3368_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3368_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3368_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL_RGMII |
+ RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3368_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
@@ -842,85 +720,46 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
+ RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3368_GMAC_RMII_MODE);
}
-static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3368_reg_speed_data = {
+ .rgmii_10 = RK3368_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3368_GMAC_CLK_25M,
+ .rgmii_1000 = RK3368_GMAC_CLK_125M,
+ .rmii_10 = RK3368_GMAC_RMII_CLK_2_5M | RK3368_GMAC_SPEED_10M,
+ .rmii_100 = RK3368_GMAC_RMII_CLK_25M | RK3368_GMAC_SPEED_100M,
+};
-static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3368_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_RMII_CLK_2_5M |
- RK3368_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_RMII_CLK_25M |
- RK3368_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3368_reg_speed_data,
+ RK3368_GRF_SOC_CON15, interface, speed);
}
static const struct rk_gmac_ops rk3368_ops = {
.set_to_rgmii = rk3368_set_to_rgmii,
.set_to_rmii = rk3368_set_to_rmii,
- .set_rgmii_speed = rk3368_set_rgmii_speed,
- .set_rmii_speed = rk3368_set_rmii_speed,
+ .set_speed = rk3368_set_speed,
};
#define RK3399_GRF_SOC_CON5 0xc214
#define RK3399_GRF_SOC_CON6 0xc218
/* RK3399_GRF_SOC_CON5 */
-#define RK3399_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3399_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3399_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3399_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3399_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3399_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3399_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3399_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -929,21 +768,14 @@ static const struct rk_gmac_ops rk3368_ops = {
#define RK3399_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3399_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3399_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3399_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3399_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3399_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3399_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL_RGMII |
+ RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3399_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
@@ -953,66 +785,226 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3399_GMAC_RMII_MODE);
+}
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
+static const struct rk_reg_speed_data rk3399_reg_speed_data = {
+ .rgmii_10 = RK3399_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3399_GMAC_CLK_25M,
+ .rgmii_1000 = RK3399_GMAC_CLK_125M,
+ .rmii_10 = RK3399_GMAC_RMII_CLK_2_5M | RK3399_GMAC_SPEED_10M,
+ .rmii_100 = RK3399_GMAC_RMII_CLK_25M | RK3399_GMAC_SPEED_100M,
+};
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
+static int rk3399_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rk3399_reg_speed_data,
+ RK3399_GRF_SOC_CON5, interface, speed);
}
-static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static const struct rk_gmac_ops rk3399_ops = {
+ .set_to_rgmii = rk3399_set_to_rgmii,
+ .set_to_rmii = rk3399_set_to_rmii,
+ .set_speed = rk3399_set_speed,
+};
+
+#define RK3506_GRF_SOC_CON8 0x0020
+#define RK3506_GRF_SOC_CON11 0x002c
+
+#define RK3506_GMAC_RMII_MODE GRF_BIT(1)
+
+#define RK3506_GMAC_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3506_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+
+#define RK3506_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(5)
+#define RK3506_GMAC_CLK_SELECT_IO GRF_BIT(5)
+
+#define RK3506_GMAC_CLK_RMII_GATE GRF_BIT(2)
+#define RK3506_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+
+static void rk3506_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int id = bsp_priv->id, offset;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
+ offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
+ regmap_write(bsp_priv->grf, offset, RK3506_GMAC_RMII_MODE);
+}
+
+static const struct rk_reg_speed_data rk3506_reg_speed_data = {
+ .rmii_10 = RK3506_GMAC_CLK_RMII_DIV20,
+ .rmii_100 = RK3506_GMAC_CLK_RMII_DIV2,
+};
+
+static int rk3506_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ unsigned int id = bsp_priv->id, offset;
+
+ offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
+ return rk_set_reg_speed(bsp_priv, &rk3506_reg_speed_data,
+ offset, interface, speed);
+}
+
+static void rk3506_set_clock_selection(struct rk_priv_data *bsp_priv,
+ bool input, bool enable)
+{
+ unsigned int value, offset, id = bsp_priv->id;
+
+ offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
+
+ value = input ? RK3506_GMAC_CLK_SELECT_IO :
+ RK3506_GMAC_CLK_SELECT_CRU;
+ value |= enable ? RK3506_GMAC_CLK_RMII_NOGATE :
+ RK3506_GMAC_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, offset, value);
+}
+
+static const struct rk_gmac_ops rk3506_ops = {
+ .set_to_rmii = rk3506_set_to_rmii,
+ .set_speed = rk3506_set_speed,
+ .set_clock_selection = rk3506_set_clock_selection,
+ .regs_valid = true,
+ .regs = {
+ 0xff4c8000, /* gmac0 */
+ 0xff4d0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
+#define RK3528_VO_GRF_GMAC_CON 0x0018
+#define RK3528_VO_GRF_MACPHY_CON0 0x001c
+#define RK3528_VO_GRF_MACPHY_CON1 0x0020
+#define RK3528_VPU_GRF_GMAC_CON5 0x0018
+#define RK3528_VPU_GRF_GMAC_CON6 0x001c
+
+#define RK3528_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3528_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
+
+#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
+#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
+#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
+
+#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
+#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
+
+#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
+#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+
+#define RK3528_GMAC1_CLK_RGMII_DIV1 GRF_FIELD_CONST(11, 10, 0)
+#define RK3528_GMAC1_CLK_RGMII_DIV5 GRF_FIELD_CONST(11, 10, 3)
+#define RK3528_GMAC1_CLK_RGMII_DIV50 GRF_FIELD_CONST(11, 10, 2)
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_125M);
+#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
+#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
+#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+
+static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RGMII);
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ DELAY_ENABLE(RK3528, tx_delay, rx_delay));
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6,
+ RK3528_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ if (bsp_priv->id == 1)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RMII);
else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON,
+ RK3528_GMAC0_PHY_INTF_SEL_RMII |
+ RK3528_GMAC0_CLK_RMII_DIV2);
}
-static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static const struct rk_reg_speed_data rk3528_gmac0_reg_speed_data = {
+ .rmii_10 = RK3528_GMAC0_CLK_RMII_DIV20,
+ .rmii_100 = RK3528_GMAC0_CLK_RMII_DIV2,
+};
+
+static const struct rk_reg_speed_data rk3528_gmac1_reg_speed_data = {
+ .rgmii_10 = RK3528_GMAC1_CLK_RGMII_DIV50,
+ .rgmii_100 = RK3528_GMAC1_CLK_RGMII_DIV5,
+ .rgmii_1000 = RK3528_GMAC1_CLK_RGMII_DIV1,
+ .rmii_10 = RK3528_GMAC1_CLK_RMII_DIV20,
+ .rmii_100 = RK3528_GMAC1_CLK_RMII_DIV2,
+};
+
+static int rk3528_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
+ const struct rk_reg_speed_data *rsd;
+ unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
+ if (bsp_priv->id == 1) {
+ rsd = &rk3528_gmac1_reg_speed_data;
+ reg = RK3528_VPU_GRF_GMAC_CON5;
+ } else {
+ rsd = &rk3528_gmac0_reg_speed_data;
+ reg = RK3528_VO_GRF_GMAC_CON;
}
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_RMII_CLK_2_5M |
- RK3399_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_RMII_CLK_25M |
- RK3399_GMAC_SPEED_100M);
+ return rk_set_reg_speed(bsp_priv, rsd, reg, interface, speed);
+}
+
+static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
+ bool input, bool enable)
+{
+ unsigned int val;
+
+ if (bsp_priv->id == 1) {
+ val = input ? RK3528_GMAC1_CLK_SELECT_IO :
+ RK3528_GMAC1_CLK_SELECT_CRU;
+ val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
+ RK3528_GMAC1_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val);
} else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
+ RK3528_GMAC0_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val);
}
}
-static const struct rk_gmac_ops rk3399_ops = {
- .set_to_rgmii = rk3399_set_to_rgmii,
- .set_to_rmii = rk3399_set_to_rmii,
- .set_rgmii_speed = rk3399_set_rgmii_speed,
- .set_rmii_speed = rk3399_set_rmii_speed,
+static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerdown(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static const struct rk_gmac_ops rk3528_ops = {
+ .set_to_rgmii = rk3528_set_to_rgmii,
+ .set_to_rmii = rk3528_set_to_rmii,
+ .set_speed = rk3528_set_speed,
+ .set_clock_selection = rk3528_set_clock_selection,
+ .integrated_phy_powerup = rk3528_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
+ .regs_valid = true,
+ .regs = {
+ 0xffbd0000, /* gmac0 */
+ 0xffbe0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
};
#define RK3568_GRF_GMAC0_CON0 0x0380
@@ -1021,10 +1013,7 @@ static const struct rk_gmac_ops rk3399_ops = {
#define RK3568_GRF_GMAC1_CON1 0x038c
/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
-#define RK3568_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3568_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3568_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3568_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1033,20 +1022,14 @@ static const struct rk_gmac_ops rk3399_ops = {
#define RK3568_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
/* RK3568_GRF_GMAC0_CON0 && RK3568_GRF_GMAC1_CON0 */
-#define RK3568_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3568_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3568_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3568_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con0, con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 :
RK3568_GRF_GMAC0_CON0;
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
@@ -1057,59 +1040,25 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3568_GMAC_CLK_TX_DL_CFG(tx_delay));
regmap_write(bsp_priv->grf, con1,
- RK3568_GMAC_PHY_INTF_SEL_RGMII |
+ RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3568_GMAC_RXCLK_DLY_ENABLE |
RK3568_GMAC_TXCLK_DLY_ENABLE);
}
static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
RK3568_GRF_GMAC0_CON1;
- regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
-}
-
-static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
- int ret;
-
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
+ regmap_write(bsp_priv->grf, con1,
+ RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_gmac_ops rk3568_ops = {
.set_to_rgmii = rk3568_set_to_rgmii,
.set_to_rmii = rk3568_set_to_rmii,
- .set_rgmii_speed = rk3568_set_gmac_speed,
- .set_rmii_speed = rk3568_set_gmac_speed,
+ .set_speed = rk_set_clk_mac_speed,
.regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
@@ -1118,6 +1067,128 @@ static const struct rk_gmac_ops rk3568_ops = {
},
};
+/* VCCIO0_1_3_IOC */
+#define RK3576_VCCIO0_1_3_IOC_CON2 0X6408
+#define RK3576_VCCIO0_1_3_IOC_CON3 0X640c
+#define RK3576_VCCIO0_1_3_IOC_CON4 0X6410
+#define RK3576_VCCIO0_1_3_IOC_CON5 0X6414
+
+#define RK3576_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3576_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3576_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
+
+/* SDGMAC_GRF */
+#define RK3576_GRF_GMAC_CON0 0X0020
+#define RK3576_GRF_GMAC_CON1 0X0024
+
+#define RK3576_GMAC_RMII_MODE GRF_BIT(3)
+#define RK3576_GMAC_RGMII_MODE GRF_CLR_BIT(3)
+
+#define RK3576_GMAC_CLK_SELECT_IO GRF_BIT(7)
+#define RK3576_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
+#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
+
+#define RK3576_GMAC_CLK_RGMII_DIV1 GRF_FIELD_CONST(6, 5, 0)
+#define RK3576_GMAC_CLK_RGMII_DIV5 GRF_FIELD_CONST(6, 5, 3)
+#define RK3576_GMAC_CLK_RGMII_DIV50 GRF_FIELD_CONST(6, 5, 2)
+
+#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
+#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
+
+static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ unsigned int offset_con;
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RGMII_MODE);
+
+ offset_con = bsp_priv->id == 1 ? RK3576_VCCIO0_1_3_IOC_CON4 :
+ RK3576_VCCIO0_1_3_IOC_CON2;
+
+ /* m0 && m1 delay enabled */
+ regmap_write(bsp_priv->php_grf, offset_con,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+ regmap_write(bsp_priv->php_grf, offset_con + 0x4,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+
+ /* m0 && m1 delay value */
+ regmap_write(bsp_priv->php_grf, offset_con,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+ regmap_write(bsp_priv->php_grf, offset_con + 0x4,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+}
+
+static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ unsigned int offset_con;
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
+}
+
+static const struct rk_reg_speed_data rk3578_reg_speed_data = {
+ .rgmii_10 = RK3576_GMAC_CLK_RGMII_DIV50,
+ .rgmii_100 = RK3576_GMAC_CLK_RGMII_DIV5,
+ .rgmii_1000 = RK3576_GMAC_CLK_RGMII_DIV1,
+ .rmii_10 = RK3576_GMAC_CLK_RMII_DIV20,
+ .rmii_100 = RK3576_GMAC_CLK_RMII_DIV2,
+};
+
+static int rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ unsigned int offset_con;
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ return rk_set_reg_speed(bsp_priv, &rk3578_reg_speed_data, offset_con,
+ interface, speed);
+}
+
+static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
+ bool enable)
+{
+ unsigned int val = input ? RK3576_GMAC_CLK_SELECT_IO :
+ RK3576_GMAC_CLK_SELECT_CRU;
+ unsigned int offset_con;
+
+ val |= enable ? RK3576_GMAC_CLK_RMII_NOGATE :
+ RK3576_GMAC_CLK_RMII_GATE;
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, val);
+}
+
+static const struct rk_gmac_ops rk3576_ops = {
+ .set_to_rgmii = rk3576_set_to_rgmii,
+ .set_to_rmii = rk3576_set_to_rmii,
+ .set_speed = rk3576_set_gmac_speed,
+ .set_clock_selection = rk3576_set_clock_selection,
+ .php_grf_required = true,
+ .regs_valid = true,
+ .regs = {
+ 0x2a220000, /* gmac0 */
+ 0x2a230000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
/* sys_grf */
#define RK3588_GRF_GMAC_CON7 0X031c
#define RK3588_GRF_GMAC_CON8 0X0320
@@ -1128,33 +1199,31 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2)
#define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2)
-#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
-#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+#define RK3588_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
+#define RK3588_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
/* php_grf */
#define RK3588_GRF_GMAC_CON0 0X0008
#define RK3588_GRF_CLK_CON1 0X0070
-#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \
- (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
-#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \
- (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
+#define RK3588_GMAC_PHY_INTF_SEL(id, val) \
+ (GRF_FIELD(5, 3, val) << ((id) * 6))
#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
-#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4)
-#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELECT_CRU(id) GRF_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELECT_IO(id) GRF_CLR_BIT(5 * (id) + 4)
#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
- (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 0) << ((id) * 5))
#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
- (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 3) << ((id) * 5))
#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
- (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 2) << ((id) * 5))
#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
@@ -1162,19 +1231,13 @@ static const struct rk_gmac_ops rk3568_ops = {
static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 offset_con, id = bsp_priv->id;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
RK3588_GRF_GMAC_CON8;
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
+ RK3588_GMAC_PHY_INTF_SEL(id, PHY_INTF_SEL_RGMII));
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
RK3588_GMAC_CLK_RGMII_MODE(id));
@@ -1190,40 +1253,33 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
+ RK3588_GMAC_PHY_INTF_SEL(bsp_priv->id, PHY_INTF_SEL_RMII));
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
}
-static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int val = 0, id = bsp_priv->id;
switch (speed) {
case 10:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ if (interface == PHY_INTERFACE_MODE_RMII)
val = RK3588_GMA_CLK_RMII_DIV20(id);
else
val = RK3588_GMAC_CLK_RGMII_DIV50(id);
break;
case 100:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ if (interface == PHY_INTERFACE_MODE_RMII)
val = RK3588_GMA_CLK_RMII_DIV2(id);
else
val = RK3588_GMAC_CLK_RGMII_DIV5(id);
break;
case 1000:
- if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ if (interface != PHY_INTERFACE_MODE_RMII)
val = RK3588_GMAC_CLK_RGMII_DIV1(id);
else
goto err;
@@ -1234,16 +1290,16 @@ static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
- return;
+ return 0;
err:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return -EINVAL;
}
static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
bool enable)
{
- unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) :
- RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id);
+ unsigned int val = input ? RK3588_GMAC_CLK_SELECT_IO(bsp_priv->id) :
+ RK3588_GMAC_CLK_SELECT_CRU(bsp_priv->id);
val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
@@ -1254,9 +1310,9 @@ static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input
static const struct rk_gmac_ops rk3588_ops = {
.set_to_rgmii = rk3588_set_to_rgmii,
.set_to_rmii = rk3588_set_to_rmii,
- .set_rgmii_speed = rk3588_set_gmac_speed,
- .set_rmii_speed = rk3588_set_gmac_speed,
+ .set_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0xfe1b0000, /* gmac0 */
@@ -1268,8 +1324,7 @@ static const struct rk_gmac_ops rk3588_ops = {
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
-#define RV1108_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
- GRF_BIT(6))
+#define RV1108_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3)
#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2)
@@ -1279,42 +1334,25 @@ static const struct rk_gmac_ops rk3588_ops = {
static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_PHY_INTF_SEL_RMII);
+ RV1108_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
-static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
+static const struct rk_reg_speed_data rv1108_reg_speed_data = {
+ .rmii_10 = RV1108_GMAC_RMII_CLK_2_5M | RV1108_GMAC_SPEED_10M,
+ .rmii_100 = RV1108_GMAC_RMII_CLK_25M | RV1108_GMAC_SPEED_100M,
+};
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_RMII_CLK_2_5M |
- RV1108_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_RMII_CLK_25M |
- RV1108_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+static int rv1108_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rv1108_reg_speed_data,
+ RV1108_GRF_GMAC_CON0, interface, speed);
}
static const struct rk_gmac_ops rv1108_ops = {
.set_to_rmii = rv1108_set_to_rmii,
- .set_rmii_speed = rv1108_set_rmii_speed,
+ .set_speed = rv1108_set_speed,
};
#define RV1126_GRF_GMAC_CON0 0X0070
@@ -1322,10 +1360,7 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GRF_GMAC_CON2 0X0078
/* RV1126_GRF_GMAC_CON0 */
-#define RV1126_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RV1126_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RV1126_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1338,24 +1373,17 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2)
/* RV1126_GRF_GMAC_CON1 */
-#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RV1126_GRF_GMAC_CON2 */
-#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL_RGMII |
+ RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
@@ -1372,126 +1400,21 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL_RMII);
-}
-
-static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
- int ret;
-
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
- dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
-}
-
-static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
- int ret;
-
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- default:
- dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
+ RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_gmac_ops rv1126_ops = {
.set_to_rgmii = rv1126_set_to_rgmii,
.set_to_rmii = rv1126_set_to_rmii,
- .set_rgmii_speed = rv1126_set_rgmii_speed,
- .set_rmii_speed = rv1126_set_rmii_speed,
+ .set_speed = rk_set_clk_mac_speed,
};
-#define RK_GRF_MACPHY_CON0 0xb00
-#define RK_GRF_MACPHY_CON1 0xb04
-#define RK_GRF_MACPHY_CON2 0xb08
-#define RK_GRF_MACPHY_CON3 0xb0c
-
-#define RK_MACPHY_ENABLE GRF_BIT(0)
-#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
-#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
-#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
-#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
-#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
-
-static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv)
-{
- if (priv->ops->integrated_phy_powerup)
- priv->ops->integrated_phy_powerup(priv);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
-
- if (priv->phy_reset) {
- /* PHY needs to be disabled before trying to reset it */
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
- usleep_range(10, 20);
- if (priv->phy_reset)
- reset_control_deassert(priv->phy_reset);
- usleep_range(10, 20);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
- msleep(30);
- }
-}
-
-static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv)
-{
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
-}
-
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
- struct device *dev = &bsp_priv->pdev->dev;
int phy_iface = bsp_priv->phy_iface;
+ struct device *dev = bsp_priv->dev;
int i, j, ret;
bsp_priv->clk_enabled = false;
@@ -1518,16 +1441,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
if (ret)
return dev_err_probe(dev, ret, "Failed to get clocks\n");
- /* "stmmaceth" will be enabled by the core */
- bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
- ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
- if (ret)
- return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");
-
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
} else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- clk_set_rate(bsp_priv->clk_mac, 50000000);
+ clk_set_rate(plat->stmmac_clk, 50000000);
}
if (plat->phy_node && bsp_priv->integrated_phy) {
@@ -1565,14 +1482,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
}
} else {
if (bsp_priv->clk_enabled) {
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) {
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
+ }
+
clk_bulk_disable_unprepare(bsp_priv->num_clks,
bsp_priv->clks);
clk_disable_unprepare(bsp_priv->clk_phy);
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, false);
-
bsp_priv->clk_enabled = false;
}
}
@@ -1583,8 +1501,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
{
struct regulator *ldo = bsp_priv->regulator;
+ struct device *dev = bsp_priv->dev;
int ret;
- struct device *dev = &bsp_priv->pdev->dev;
if (enable) {
ret = regulator_enable(ldo);
@@ -1614,7 +1532,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
+ bsp_priv->phy_iface = plat->phy_interface;
bsp_priv->ops = ops;
/* Some SoCs have multiple MAC controllers, which need
@@ -1677,8 +1595,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->grf),
+ "failed to lookup rockchip,grf\n");
+ return ERR_CAST(bsp_priv->grf);
+ }
+
+ if (ops->php_grf_required) {
+ bsp_priv->php_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf),
+ "failed to lookup rockchip,php-grf\n");
+ return ERR_CAST(bsp_priv->php_grf);
+ }
+ }
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1694,7 +1626,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
dev_info(dev, "integrated PHY? (%s).\n",
bsp_priv->integrated_phy ? "yes" : "no");
- bsp_priv->pdev = pdev;
+ bsp_priv->dev = dev;
return bsp_priv;
}
@@ -1714,7 +1646,7 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
return -EINVAL;
break;
default:
- dev_err(&bsp_priv->pdev->dev,
+ dev_err(bsp_priv->dev,
"unsupported interface %d", bsp_priv->phy_iface);
}
return 0;
@@ -1722,8 +1654,8 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
+ struct device *dev = bsp_priv->dev;
int ret;
- struct device *dev = &bsp_priv->pdev->dev;
ret = rk_gmac_check_ops(bsp_priv);
if (ret)
@@ -1768,43 +1700,82 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
pm_runtime_get_sync(dev);
- if (bsp_priv->integrated_phy)
- rk_gmac_integrated_phy_powerup(bsp_priv);
+ if (bsp_priv->integrated_phy && bsp_priv->ops->integrated_phy_powerup)
+ bsp_priv->ops->integrated_phy_powerup(bsp_priv);
return 0;
}
static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- if (gmac->integrated_phy)
- rk_gmac_integrated_phy_powerdown(gmac);
+ if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown)
+ gmac->ops->integrated_phy_powerdown(gmac);
- pm_runtime_put_sync(&gmac->pdev->dev);
+ pm_runtime_put_sync(gmac->dev);
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
-static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void rk_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
{
- struct rk_priv_data *bsp_priv = priv;
- struct device *dev = &bsp_priv->pdev->dev;
+ struct rk_priv_data *rk = bsp_priv;
- switch (bsp_priv->phy_iface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (bsp_priv->ops->set_rgmii_speed)
- bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
- break;
- case PHY_INTERFACE_MODE_RMII:
- if (bsp_priv->ops->set_rmii_speed)
- bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
- break;
- default:
- dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
- }
+ if (rk->ops->set_to_rgmii)
+ phy_interface_set_rgmii(interfaces);
+
+ if (rk->ops->set_to_rmii)
+ __set_bit(PHY_INTERFACE_MODE_RMII, interfaces);
+}
+
+static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ if (bsp_priv->ops->set_speed)
+ return bsp_priv->ops->set_speed(bsp_priv, interface, speed);
+
+ return -EINVAL;
+}
+
+static int rk_gmac_suspend(struct device *dev, void *bsp_priv_)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (!device_may_wakeup(dev))
+ rk_gmac_powerdown(bsp_priv);
+
+ return 0;
+}
+
+static int rk_gmac_resume(struct device *dev, void *bsp_priv_)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (!device_may_wakeup(dev))
+ rk_gmac_powerup(bsp_priv);
+
+ return 0;
+}
+
+static int rk_gmac_init(struct device *dev, void *bsp_priv)
+{
+ return rk_gmac_powerup(bsp_priv);
+}
+
+static void rk_gmac_exit(struct device *dev, void *bsp_priv_)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ rk_gmac_powerdown(bsp_priv);
+
+ if (priv->plat->phy_node && bsp_priv->integrated_phy)
+ clk_put(bsp_priv->clk_phy);
}
static int rk_gmac_probe(struct platform_device *pdev)
@@ -1831,9 +1802,18 @@ static int rk_gmac_probe(struct platform_device *pdev)
/* If the stmmac is not already selected as gmac4,
* then make sure we fallback to gmac.
*/
- if (!plat_dat->has_gmac4)
- plat_dat->has_gmac = true;
- plat_dat->fix_mac_speed = rk_fix_speed;
+ if (plat_dat->core_type != DWMAC_CORE_GMAC4) {
+ plat_dat->core_type = DWMAC_CORE_GMAC;
+ plat_dat->rx_fifo_size = 4096;
+ plat_dat->tx_fifo_size = 2048;
+ }
+
+ plat_dat->get_interfaces = rk_get_interfaces;
+ plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
+ plat_dat->init = rk_gmac_init;
+ plat_dat->exit = rk_gmac_exit;
+ plat_dat->suspend = rk_gmac_suspend;
+ plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -1843,62 +1823,9 @@ static int rk_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = rk_gmac_powerup(plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_powerdown;
-
- return 0;
-
-err_gmac_powerdown:
- rk_gmac_powerdown(plat_dat->bsp_priv);
-
- return ret;
-}
-
-static void rk_gmac_remove(struct platform_device *pdev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
-
- stmmac_dvr_remove(&pdev->dev);
-
- rk_gmac_powerdown(bsp_priv);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int rk_gmac_suspend(struct device *dev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
-
- /* Keep the PHY up if we use Wake-on-Lan. */
- if (!device_may_wakeup(dev)) {
- rk_gmac_powerdown(bsp_priv);
- bsp_priv->suspended = true;
- }
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int rk_gmac_resume(struct device *dev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
-
- /* The PHY was up for Wake-on-Lan. */
- if (bsp_priv->suspended) {
- rk_gmac_powerup(bsp_priv);
- bsp_priv->suspended = false;
- }
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
-
static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,px30-gmac", .data = &px30_ops },
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
@@ -1909,7 +1836,10 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+ { .compatible = "rockchip,rk3506-gmac", .data = &rk3506_ops },
+ { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
{ .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
@@ -1919,10 +1849,9 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
.probe = rk_gmac_probe,
- .remove_new = rk_gmac_remove,
.driver = {
.name = "rk_gmac-dwmac",
- .pm = &rk_gmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
.of_match_table = rk_gmac_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
new file mode 100644
index 000000000000..13634965bc19
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/of.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+#include "stmmac.h"
+
+static int rzn1_dwmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct device_node *np = priv->device->of_node;
+ struct device_node *pcs_node;
+ struct phylink_pcs *pcs;
+
+ pcs_node = of_parse_phandle(np, "pcs-handle", 0);
+
+ if (pcs_node) {
+ pcs = miic_create(priv->device, pcs_node);
+ of_node_put(pcs_node);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ }
+
+ return 0;
+}
+
+static void rzn1_dwmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ miic_destroy(priv->hw->phylink_pcs);
+}
+
+static struct phylink_pcs *rzn1_dwmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
+static int rzn1_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->bsp_priv = plat_dat;
+ plat_dat->pcs_init = rzn1_dwmac_pcs_init;
+ plat_dat->pcs_exit = rzn1_dwmac_pcs_exit;
+ plat_dat->select_pcs = rzn1_dwmac_select_pcs;
+
+ ret = stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id rzn1_dwmac_match[] = {
+ { .compatible = "renesas,rzn1-gmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rzn1_dwmac_match);
+
+static struct platform_driver rzn1_dwmac_driver = {
+ .probe = rzn1_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "rzn1-dwmac",
+ .of_match_table = rzn1_dwmac_match,
+ },
+};
+module_platform_driver(rzn1_dwmac_driver);
+
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
+MODULE_DESCRIPTION("Renesas RZN1 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
new file mode 100644
index 000000000000..5a485ee98fa7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP S32G/R GMAC glue layer
+ *
+ * Copyright 2019-2024 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_INTF_RATE_125M 125000000 /* 125MHz */
+
+/* SoC PHY interface control register */
+#define S32_PHY_INTF_SEL_MII 0x00
+#define S32_PHY_INTF_SEL_SGMII 0x01
+#define S32_PHY_INTF_SEL_RGMII 0x02
+#define S32_PHY_INTF_SEL_RMII 0x08
+
+struct s32_priv_data {
+ void __iomem *ioaddr;
+ void __iomem *ctrl_sts;
+ struct device *dev;
+ phy_interface_t *intf_mode;
+ struct clk *tx_clk;
+ struct clk *rx_clk;
+};
+
+static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
+{
+ writel(S32_PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+
+ dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode));
+
+ return 0;
+}
+
+static int s32_gmac_init(struct device *dev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+ int ret;
+
+ /* Set initial TX interface clock */
+ ret = clk_prepare_enable(gmac->tx_clk);
+ if (ret) {
+ dev_err(dev, "Can't enable tx clock\n");
+ return ret;
+ }
+ ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(dev, "Can't set tx clock\n");
+ goto err_tx_disable;
+ }
+
+ /* Set initial RX interface clock */
+ ret = clk_prepare_enable(gmac->rx_clk);
+ if (ret) {
+ dev_err(dev, "Can't enable rx clock\n");
+ goto err_tx_disable;
+ }
+ ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(dev, "Can't set rx clock\n");
+ goto err_txrx_disable;
+ }
+
+ /* Set interface mode */
+ ret = s32_gmac_write_phy_intf_select(gmac);
+ if (ret) {
+ dev_err(dev, "Can't set PHY interface mode\n");
+ goto err_txrx_disable;
+ }
+
+ return 0;
+
+err_txrx_disable:
+ clk_disable_unprepare(gmac->rx_clk);
+err_tx_disable:
+ clk_disable_unprepare(gmac->tx_clk);
+ return ret;
+}
+
+static void s32_gmac_exit(struct device *dev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+
+ clk_disable_unprepare(gmac->tx_clk);
+ clk_disable_unprepare(gmac->rx_clk);
+}
+
+static int s32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct device *dev = &pdev->dev;
+ struct stmmac_resources res;
+ struct s32_priv_data *gmac;
+ int ret;
+
+ gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->dev = &pdev->dev;
+
+ ret = stmmac_get_platform_resources(pdev, &res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get platform resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ /* PHY interface mode control reg */
+ gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(gmac->ctrl_sts))
+ return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
+ "S32CC config region is missing\n");
+
+ /* tx clock */
+ gmac->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(gmac->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->tx_clk),
+ "tx clock not found\n");
+
+ /* rx clock */
+ gmac->rx_clk = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(gmac->rx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->rx_clk),
+ "rx clock not found\n");
+
+ gmac->intf_mode = &plat->phy_interface;
+ gmac->ioaddr = res.addr;
+
+ /* S32CC core feature set */
+ plat->core_type = DWMAC_CORE_GMAC4;
+ plat->pmt = 1;
+ plat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat->rx_fifo_size = 20480;
+ plat->tx_fifo_size = 20480;
+
+ plat->init = s32_gmac_init;
+ plat->exit = s32_gmac_exit;
+
+ plat->clk_tx_i = gmac->tx_clk;
+ plat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+
+ plat->bsp_priv = gmac;
+
+ return stmmac_pltfr_probe(pdev, plat, &res);
+}
+
+static const struct of_device_id s32_dwmac_match[] = {
+ { .compatible = "nxp,s32g2-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s32_dwmac_match);
+
+static struct platform_driver s32_dwmac_driver = {
+ .probe = s32_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "s32-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = s32_dwmac_match,
+ },
+};
+module_platform_driver(s32_dwmac_driver);
+
+MODULE_AUTHOR("Jan Petrous (OSS) <jan.petrous@oss.nxp.com>");
+MODULE_DESCRIPTION("NXP S32G/R common chassis GMAC driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 68f85e4605cb..a2b52d2c4eb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -5,6 +5,7 @@
*/
#include <linux/mfd/altera-sysmgr.h>
+#include <linux/clocksource_ids.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
@@ -15,8 +16,10 @@
#include <linux/reset.h>
#include <linux/stmmac.h>
+#include "dwxgmac2.h"
#include "stmmac.h"
#include "stmmac_platform.h"
+#include "stmmac_ptp.h"
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
@@ -41,15 +44,24 @@
#define SGMII_ADAPTER_ENABLE 0x0000
#define SGMII_ADAPTER_DISABLE 0x0001
+#define SMTG_MDIO_ADDR 0x15
+#define SMTG_TSC_WORD0 0xC
+#define SMTG_TSC_WORD1 0xD
+#define SMTG_TSC_WORD2 0xE
+#define SMTG_TSC_WORD3 0xF
+#define SMTG_TSC_SHIFT 16
+
struct socfpga_dwmac;
struct socfpga_dwmac_ops {
int (*set_phy_mode)(struct socfpga_dwmac *dwmac_priv);
+ void (*setup_plat_dat)(struct socfpga_dwmac *dwmac_priv);
};
struct socfpga_dwmac {
u32 reg_offset;
u32 reg_shift;
struct device *dev;
+ struct plat_stmmacenet_data *plat_dat;
struct regmap *sys_mgr_base_addr;
struct reset_control *stmmac_rst;
struct reset_control *stmmac_ocp_rst;
@@ -58,17 +70,15 @@ struct socfpga_dwmac {
void __iomem *sgmii_adapter_base;
bool f2h_ptp_ref_clk;
const struct socfpga_dwmac_ops *ops;
- struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void socfpga_dwmac_fix_mac_speed(void *bsp_priv, int speed,
+ unsigned int mode)
{
- struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+ struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)bsp_priv;
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dwmac->dev));
void __iomem *splitter_base = dwmac->splitter_base;
void __iomem *sgmii_adapter_base = dwmac->sgmii_adapter_base;
- struct device *dev = dwmac->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct phy_device *phy_dev = ndev->phydev;
u32 val;
if (sgmii_adapter_base)
@@ -95,7 +105,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (phy_dev && sgmii_adapter_base)
+ if ((priv->plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ priv->plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) &&
+ sgmii_adapter_base)
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
}
@@ -233,10 +245,7 @@ err_node_put:
static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
{
- struct net_device *ndev = dev_get_drvdata(dwmac->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return priv->plat->mac_interface;
+ return dwmac->plat_dat->phy_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
@@ -258,6 +267,7 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
*val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
break;
case PHY_INTERFACE_MODE_RMII:
@@ -269,6 +279,112 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
return 0;
}
+static void get_smtgtime(struct mii_bus *mii, int smtg_addr, u64 *smtg_time)
+{
+ u64 ns;
+
+ ns = mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD3);
+ ns <<= SMTG_TSC_SHIFT;
+ ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD2);
+ ns <<= SMTG_TSC_SHIFT;
+ ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD1);
+ ns <<= SMTG_TSC_SHIFT;
+ ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD0);
+
+ *smtg_time = ns;
+}
+
+static int smtg_crosststamp(ktime_t *device, struct system_counterval_t *system,
+ void *ctx)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
+ u32 num_snapshot, gpio_value, acr_value;
+ void __iomem *ptpaddr = priv->ptpaddr;
+ void __iomem *ioaddr = priv->hw->pcsr;
+ unsigned long flags;
+ u64 smtg_time = 0;
+ u64 ptp_time = 0;
+ int i, ret;
+ u32 v;
+
+ /* Both internal crosstimestamping and external triggered event
+ * timestamping cannot be run concurrently.
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
+ return -EBUSY;
+
+ mutex_lock(&priv->aux_ts_lock);
+ /* Enable Internal snapshot trigger */
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value &= ~PTP_ACR_MASK;
+ switch (priv->plat->int_snapshot_num) {
+ case AUX_SNAPSHOT0:
+ acr_value |= PTP_ACR_ATSEN0;
+ break;
+ case AUX_SNAPSHOT1:
+ acr_value |= PTP_ACR_ATSEN1;
+ break;
+ case AUX_SNAPSHOT2:
+ acr_value |= PTP_ACR_ATSEN2;
+ break;
+ case AUX_SNAPSHOT3:
+ acr_value |= PTP_ACR_ATSEN3;
+ break;
+ default:
+ mutex_unlock(&priv->aux_ts_lock);
+ return -EINVAL;
+ }
+ writel(acr_value, ptpaddr + PTP_ACR);
+
+ /* Clear FIFO */
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value |= PTP_ACR_ATSFC;
+ writel(acr_value, ptpaddr + PTP_ACR);
+ /* Release the mutex */
+ mutex_unlock(&priv->aux_ts_lock);
+
+ /* Trigger Internal snapshot signal. Create a rising edge by just toggle
+ * the GPO0 to low and back to high.
+ */
+ gpio_value = readl(ioaddr + XGMAC_GPIO_STATUS);
+ gpio_value &= ~XGMAC_GPIO_GPO0;
+ writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS);
+ gpio_value |= XGMAC_GPIO_GPO0;
+ writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS);
+
+ /* Poll for time sync operation done */
+ ret = readl_poll_timeout(priv->ioaddr + XGMAC_INT_STATUS, v,
+ (v & XGMAC_INT_TSIS), 100, 10000);
+ if (ret) {
+ netdev_err(priv->dev, "%s: Wait for time sync operation timeout\n",
+ __func__);
+ return ret;
+ }
+
+ *system = (struct system_counterval_t) {
+ .cycles = 0,
+ .cs_id = CSID_ARM_ARCH_COUNTER,
+ .use_nsecs = false,
+ };
+
+ num_snapshot = (readl(ioaddr + XGMAC_TIMESTAMP_STATUS) &
+ XGMAC_TIMESTAMP_ATSNS_MASK) >>
+ XGMAC_TIMESTAMP_ATSNS_SHIFT;
+
+ /* Repeat until the timestamps are from the FIFO last segment */
+ for (i = 0; i < num_snapshot; i++) {
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
+ *device = ns_to_ktime(ptp_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+ }
+
+ get_smtgtime(priv->mii, SMTG_MDIO_ADDR, &smtg_time);
+ system->cycles = smtg_time;
+
+ return 0;
+}
+
static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac)
{
struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
@@ -379,6 +495,106 @@ static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac)
return 0;
}
+static int socfpga_dwmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct socfpga_dwmac *dwmac = priv->plat->bsp_priv;
+ struct regmap_config pcs_regmap_cfg = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_shift = REGMAP_UPSHIFT(1),
+ };
+ struct mdio_regmap_config mrc;
+ struct regmap *pcs_regmap;
+ struct phylink_pcs *pcs;
+ struct mii_bus *pcs_bus;
+
+ if (!dwmac->tse_pcs_base)
+ return 0;
+
+ pcs_regmap = devm_regmap_init_mmio(priv->device, dwmac->tse_pcs_base,
+ &pcs_regmap_cfg);
+ if (IS_ERR(pcs_regmap))
+ return PTR_ERR(pcs_regmap);
+
+ memset(&mrc, 0, sizeof(mrc));
+ mrc.regmap = pcs_regmap;
+ mrc.parent = priv->device;
+ mrc.valid_addr = 0x0;
+ mrc.autoscan = false;
+
+ /* Can't use ndev->name here because it will not have been initialised,
+ * and in any case, the user can rename network interfaces at runtime.
+ */
+ snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii",
+ dev_name(priv->device));
+ pcs_bus = devm_mdio_regmap_register(priv->device, &mrc);
+ if (IS_ERR(pcs_bus))
+ return PTR_ERR(pcs_bus);
+
+ pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ return 0;
+}
+
+static void socfpga_dwmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ lynx_pcs_destroy(priv->hw->phylink_pcs);
+}
+
+static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
+static int socfpga_dwmac_init(struct device *dev, void *bsp_priv)
+{
+ struct socfpga_dwmac *dwmac = bsp_priv;
+
+ return dwmac->ops->set_phy_mode(dwmac);
+}
+
+static void socfpga_gen5_setup_plat_dat(struct socfpga_dwmac *dwmac)
+{
+ struct plat_stmmacenet_data *plat_dat = dwmac->plat_dat;
+
+ plat_dat->core_type = DWMAC_CORE_GMAC;
+
+ /* Rx watchdog timer in dwmac is buggy in this hw */
+ plat_dat->riwt_off = 1;
+}
+
+static void socfpga_agilex5_setup_plat_dat(struct socfpga_dwmac *dwmac)
+{
+ struct plat_stmmacenet_data *plat_dat = dwmac->plat_dat;
+
+ plat_dat->core_type = DWMAC_CORE_XGMAC;
+
+ /* Enable TSO */
+ plat_dat->flags |= STMMAC_FLAG_TSO_EN;
+
+ /* Enable TBS */
+ switch (plat_dat->tx_queues_to_use) {
+ case 8:
+ plat_dat->tx_queues_cfg[7].tbs_en = true;
+ fallthrough;
+ case 7:
+ plat_dat->tx_queues_cfg[6].tbs_en = true;
+ break;
+ default:
+ /* Tx Queues 0 - 5 doesn't support TBS on Agilex5 */
+ break;
+ }
+
+ /* Hw supported cross-timestamp */
+ plat_dat->int_snapshot_num = AUX_SNAPSHOT0;
+ plat_dat->crosststamp = smtg_crosststamp;
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -386,8 +602,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
struct socfpga_dwmac *dwmac;
- struct net_device *ndev;
- struct stmmac_priv *stpriv;
const struct socfpga_dwmac_ops *ops;
ops = device_get_match_data(&pdev->dev);
@@ -423,163 +637,54 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return ret;
}
- dwmac->ops = ops;
- plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- return ret;
-
- ndev = platform_get_drvdata(pdev);
- stpriv = netdev_priv(ndev);
-
/* The socfpga driver needs to control the stmmac reset to set the phy
* mode. Create a copy of the core reset handle so it can be used by
* the driver later.
*/
- dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
-
- ret = ops->set_phy_mode(dwmac);
- if (ret)
- goto err_dvr_remove;
-
- /* Create a regmap for the PCS so that it can be used by the PCS driver,
- * if we have such a PCS
- */
- if (dwmac->tse_pcs_base) {
- struct regmap_config pcs_regmap_cfg;
- struct mdio_regmap_config mrc;
- struct regmap *pcs_regmap;
- struct mii_bus *pcs_bus;
-
- memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg));
- memset(&mrc, 0, sizeof(mrc));
-
- pcs_regmap_cfg.reg_bits = 16;
- pcs_regmap_cfg.val_bits = 16;
- pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1);
-
- pcs_regmap = devm_regmap_init_mmio(&pdev->dev, dwmac->tse_pcs_base,
- &pcs_regmap_cfg);
- if (IS_ERR(pcs_regmap)) {
- ret = PTR_ERR(pcs_regmap);
- goto err_dvr_remove;
- }
-
- mrc.regmap = pcs_regmap;
- mrc.parent = &pdev->dev;
- mrc.valid_addr = 0x0;
- mrc.autoscan = false;
-
- snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name);
- pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc);
- if (IS_ERR(pcs_bus)) {
- ret = PTR_ERR(pcs_bus);
- goto err_dvr_remove;
- }
-
- stpriv->hw->lynx_pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
- if (IS_ERR(stpriv->hw->lynx_pcs)) {
- ret = PTR_ERR(stpriv->hw->lynx_pcs);
- goto err_dvr_remove;
- }
- }
-
- return 0;
-
-err_dvr_remove:
- stmmac_dvr_remove(&pdev->dev);
-
- return ret;
-}
-
-static void socfpga_dwmac_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct phylink_pcs *pcs = priv->hw->lynx_pcs;
-
- stmmac_pltfr_remove(pdev);
-
- lynx_pcs_destroy(pcs);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int socfpga_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev);
-
- dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
-
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (ndev->phydev)
- phy_resume(ndev->phydev);
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- stmmac_bus_clks_config(priv, false);
+ dwmac->stmmac_rst = plat_dat->stmmac_rst;
+ dwmac->ops = ops;
+ dwmac->plat_dat = plat_dat;
- return 0;
-}
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+ plat_dat->init = socfpga_dwmac_init;
+ plat_dat->pcs_init = socfpga_dwmac_pcs_init;
+ plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
+ plat_dat->select_pcs = socfpga_dwmac_select_pcs;
-static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
+ ops->setup_plat_dat(dwmac);
- return stmmac_bus_clks_config(priv, true);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
- SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
-};
-
static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
.set_phy_mode = socfpga_gen5_set_phy_mode,
+ .setup_plat_dat = socfpga_gen5_setup_plat_dat,
};
static const struct socfpga_dwmac_ops socfpga_gen10_ops = {
.set_phy_mode = socfpga_gen10_set_phy_mode,
+ .setup_plat_dat = socfpga_gen5_setup_plat_dat,
+};
+
+static const struct socfpga_dwmac_ops socfpga_agilex5_ops = {
+ .set_phy_mode = socfpga_gen10_set_phy_mode,
+ .setup_plat_dat = socfpga_agilex5_setup_plat_dat,
};
static const struct of_device_id socfpga_dwmac_match[] = {
{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gen5_ops },
{ .compatible = "altr,socfpga-stmmac-a10-s10", .data = &socfpga_gen10_ops },
+ { .compatible = "altr,socfpga-stmmac-agilex5", .data = &socfpga_agilex5_ops },
{ }
};
MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
.probe = socfpga_dwmac_probe,
- .remove_new = socfpga_dwmac_remove,
.driver = {
.name = "socfpga-dwmac",
- .pm = &socfpga_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = socfpga_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
new file mode 100644
index 000000000000..44d4ceb8415f
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Sophgo DWMAC platform driver
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+struct sophgo_dwmac_data {
+ bool has_internal_rx_delay;
+};
+
+static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
+ "failed to get tx clock\n");
+
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->multicast_filter_bins = 0;
+
+ return 0;
+}
+
+static int sophgo_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ const struct sophgo_dwmac_data *data;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get platform resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "failed to parse DT parameters\n");
+
+ ret = sophgo_sg2044_dwmac_init(pdev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ data = device_get_match_data(&pdev->dev);
+ if (data && data->has_internal_rx_delay) {
+ plat_dat->phy_interface = phy_fix_phy_mode_for_mac_delays(plat_dat->phy_interface,
+ false, true);
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_NA)
+ return -EINVAL;
+ }
+
+ return stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+}
+
+static const struct sophgo_dwmac_data sg2042_dwmac_data = {
+ .has_internal_rx_delay = true,
+};
+
+static const struct of_device_id sophgo_dwmac_match[] = {
+ { .compatible = "sophgo,sg2042-dwmac", .data = &sg2042_dwmac_data },
+ { .compatible = "sophgo,sg2044-dwmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sophgo_dwmac_match);
+
+static struct platform_driver sophgo_dwmac_driver = {
+ .probe = sophgo_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sophgo-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sophgo_dwmac_match,
+ },
+};
+module_platform_driver(sophgo_dwmac_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 4e1076faee0c..16b955a6d77b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -15,8 +15,6 @@
#include "stmmac_platform.h"
-#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
-#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8
@@ -27,62 +25,23 @@ struct starfive_dwmac_data {
struct starfive_dwmac {
struct device *dev;
- struct clk *clk_tx;
const struct starfive_dwmac_data *data;
};
-static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct starfive_dwmac *dwmac = priv;
- unsigned long rate;
- int err;
-
- rate = clk_get_rate(dwmac->clk_tx);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- break;
- }
-
- err = clk_set_rate(dwmac->clk_tx, rate);
- if (err)
- dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
-}
-
static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct starfive_dwmac *dwmac = plat_dat->bsp_priv;
struct regmap *regmap;
unsigned int args[2];
- unsigned int mode;
+ int phy_intf_sel;
int err;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- mode = STARFIVE_DWMAC_PHY_INFT_RMII;
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
- break;
-
- default:
- dev_err(dwmac->dev, "unsupported interface %d\n",
- plat_dat->mac_interface);
- return -EINVAL;
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface);
+ if (phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
+ dev_err(dwmac->dev, "unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
+ return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL;
}
regmap = syscon_regmap_lookup_by_phandle_args(dwmac->dev->of_node,
@@ -94,7 +53,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
/* args[0]:offset args[1]: shift */
err = regmap_update_bits(regmap, args[0],
STARFIVE_DWMAC_PHY_INFT_FIELD << args[1],
- mode << args[1]);
+ phy_intf_sel << args[1]);
if (err)
return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
@@ -133,9 +92,9 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
dwmac->data = device_get_match_data(&pdev->dev);
- dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
- if (IS_ERR(dwmac->clk_tx))
- return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
"error getting tx clock\n");
clk_gtx = devm_clk_get_enabled(&pdev->dev, "gtx");
@@ -150,9 +109,10 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
* internally, because rgmii_rxin will be adaptively adjusted.
*/
if (!device_property_read_bool(&pdev->dev, "starfive,tx-use-rgmii-clk"))
- plat_dat->fix_mac_speed = starfive_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
dwmac->dev = &pdev->dev;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
plat_dat->dma_cfg->dche = true;
@@ -176,7 +136,7 @@ MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
static struct platform_driver starfive_dwmac_driver = {
.probe = starfive_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "starfive-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 4445cddc4cbe..f50547b67fbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,17 +21,9 @@
#include "stmmac_platform.h"
-#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
-#define DWMAC_25MHZ 25000000
-#define DWMAC_2_5MHZ 2500000
-#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
- iface == PHY_INTERFACE_MODE_RGMII_ID || \
- iface == PHY_INTERFACE_MODE_RGMII_RXID || \
- iface == PHY_INTERFACE_MODE_RGMII_TXID)
-
-#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+#define IS_PHY_IF_MODE_GBIT(iface) (phy_interface_mode_is_rgmii(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
/* STiH4xx register definitions (STiH407/STiH410 families)
@@ -85,13 +77,9 @@
* 001-RGMII
* 010-SGMII
* 100-RMII
+ * These are the DW MAC phy_intf_sel values.
*/
#define MII_PHY_SEL_MASK GENMASK(4, 2)
-#define ETH_PHY_SEL_RMII BIT(4)
-#define ETH_PHY_SEL_SGMII BIT(3)
-#define ETH_PHY_SEL_RGMII BIT(2)
-#define ETH_PHY_SEL_GMII 0x0
-#define ETH_PHY_SEL_MII 0x0
struct sti_dwmac {
phy_interface_t interface; /* MII interface */
@@ -102,21 +90,12 @@ struct sti_dwmac {
int clk_sel_reg; /* GMAC ext clk selection register */
struct regmap *regmap;
bool gmac_en;
- u32 speed;
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ int speed;
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
struct sti_dwmac_of_data {
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
-};
-
-static u32 phy_intf_sels[] = {
- [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
- [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
- [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
- [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
- [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
- [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
enum {
@@ -135,12 +114,12 @@ static u32 stih4xx_tx_retime_val[] = {
| STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
};
-static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
+static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode)
{
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
u32 reg = dwmac->ctrl_reg;
- u32 freq = 0;
+ long freq = 0;
if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
src = TX_RETIME_SRC_TXCLK;
@@ -151,40 +130,44 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
src = TX_RETIME_SRC_CLKGEN;
freq = DWMAC_50MHZ;
}
- } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ } else if (phy_interface_mode_is_rgmii(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
- if (spd == SPEED_1000) {
- freq = DWMAC_125MHZ;
- } else {
+ freq = rgmii_clock(spd);
+
+ if (spd != SPEED_1000 && freq > 0)
/* Switch to clkgen for these speeds */
src = TX_RETIME_SRC_CLKGEN;
- if (spd == SPEED_100)
- freq = DWMAC_25MHZ;
- else if (spd == SPEED_10)
- freq = DWMAC_2_5MHZ;
- }
}
- if (src == TX_RETIME_SRC_CLKGEN && freq)
+ if (src == TX_RETIME_SRC_CLKGEN && freq > 0)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
stih4xx_tx_retime_val[src]);
}
-static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
+static int sti_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel)
{
- struct regmap *regmap = dwmac->regmap;
- int iface = dwmac->interface;
- u32 reg = dwmac->ctrl_reg;
- u32 val;
+ struct sti_dwmac *dwmac = bsp_priv;
+ struct regmap *regmap;
+ u32 reg, val;
+
+ regmap = dwmac->regmap;
+ reg = dwmac->ctrl_reg;
if (dwmac->gmac_en)
regmap_update_bits(regmap, reg, EN_MASK, EN);
- regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_SGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII)
+ phy_intf_sel = PHY_INTF_SEL_GMII_MII;
+
+ regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK,
+ FIELD_PREP(MII_PHY_SEL_MASK, phy_intf_sel));
- val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+ val = (dwmac->interface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
regmap_update_bits(regmap, reg, ENMII_MASK, val);
dwmac->fix_retime_src(dwmac, dwmac->speed, 0);
@@ -193,7 +176,8 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
}
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
- struct platform_device *pdev)
+ struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct resource *res;
struct device *dev = &pdev->dev;
@@ -207,22 +191,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (res)
dwmac->clk_sel_reg = res->start;
- regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->ctrl_reg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
- return err;
- }
-
- err = of_get_phy_mode(np, &dwmac->interface);
- if (err && err != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return err;
- }
-
+ dwmac->interface = plat_dat->phy_interface;
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
@@ -255,6 +229,20 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
+static int sti_dwmac_init(struct device *dev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+
+ return clk_prepare_enable(dwmac->clk);
+}
+
+static void sti_dwmac_exit(struct device *dev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+
+ clk_disable_unprepare(dwmac->clk);
+}
+
static int sti_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -281,7 +269,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- ret = sti_dwmac_parse_data(dwmac, pdev);
+ ret = sti_dwmac_parse_data(dwmac, pdev, plat_dat);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
return ret;
@@ -290,62 +278,14 @@ static int sti_dwmac_probe(struct platform_device *pdev)
dwmac->fix_retime_src = data->fix_retime_src;
plat_dat->bsp_priv = dwmac;
+ plat_dat->set_phy_intf_sel = sti_set_phy_intf_sel;
plat_dat->fix_mac_speed = data->fix_retime_src;
+ plat_dat->init = sti_dwmac_init;
+ plat_dat->exit = sti_dwmac_exit;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
-
- ret = sti_dwmac_set_mode(dwmac);
- if (ret)
- goto disable_clk;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto disable_clk;
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static void sti_dwmac_remove(struct platform_device *pdev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
-
- stmmac_dvr_remove(&pdev->dev);
-
- clk_disable_unprepare(dwmac->clk);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int sti_dwmac_suspend(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
-
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int sti_dwmac_resume(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-
- clk_prepare_enable(dwmac->clk);
- sti_dwmac_set_mode(dwmac);
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
- sti_dwmac_resume);
-
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
};
@@ -358,10 +298,9 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
.probe = sti_dwmac_probe,
- .remove_new = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = &sti_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index c92dfc4ecf57..e1b260ed4790 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -47,18 +47,24 @@
*------------------------------------------
*/
#define SYSCFG_PMCR_ETH_SEL_MII BIT(20)
-#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21)
-#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23)
-#define SYSCFG_PMCR_ETH_SEL_GMII 0
+#define SYSCFG_PMCR_PHY_INTF_SEL_MASK GENMASK(23, 21)
#define SYSCFG_MCU_ETH_SEL_MII 0
#define SYSCFG_MCU_ETH_SEL_RMII 1
-/* STM32MP1 register definitions
+/* STM32MP2 register definitions */
+#define SYSCFG_MP2_ETH_MASK GENMASK(31, 0)
+
+#define SYSCFG_ETHCR_ETH_SEL_MASK GENMASK(6, 4)
+#define SYSCFG_ETHCR_ETH_PTP_CLK_SEL BIT(2)
+#define SYSCFG_ETHCR_ETH_CLK_SEL BIT(1)
+#define SYSCFG_ETHCR_ETH_REF_CLK_SEL BIT(0)
+
+/* STM32MPx register definitions
*
* Below table summarizes the clock requirement and clock sources for
* supported phy interface modes.
* __________________________________________________________________________
- *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125Mhz from PHY|
+ *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125MHz from PHY|
*| | | 25MHz | 50MHz | |
* ---------------------------------------------------------------------------
*| MII | - | eth-ck | n/a | n/a |
@@ -90,6 +96,7 @@ struct stm32_dwmac {
int eth_ref_clk_sel_reg;
int irq_pwr_wakeup;
u32 mode_reg; /* MAC glue-logic mode register */
+ u32 mode_mask;
struct regmap *regmap;
u32 speed;
const struct stm32_ops *ops;
@@ -102,11 +109,12 @@ struct stm32_ops {
void (*resume)(struct stm32_dwmac *dwmac);
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
- u32 syscfg_eth_mask;
bool clk_rx_enable_in_suspend;
+ bool is_mp13, is_mp2;
+ u32 syscfg_clr_off;
};
-static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
+static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac)
{
int ret;
@@ -114,11 +122,9 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
if (ret)
goto err_clk_tx;
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) {
- ret = clk_prepare_enable(dwmac->clk_rx);
- if (ret)
- goto err_clk_rx;
- }
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_rx;
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
@@ -135,15 +141,14 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
err_clk_eth_ck:
clk_disable_unprepare(dwmac->syscfg_clk);
err_syscfg_clk:
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume)
- clk_disable_unprepare(dwmac->clk_rx);
+ clk_disable_unprepare(dwmac->clk_rx);
err_clk_rx:
clk_disable_unprepare(dwmac->clk_tx);
err_clk_tx:
return ret;
}
-static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
int ret;
@@ -154,68 +159,202 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
return ret;
}
- return stm32_dwmac_clk_enable(dwmac, resume);
+ return stm32_dwmac_clk_enable(dwmac);
}
-static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+
+ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ dwmac->enable_eth_ck = dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_GMII:
+ dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_RMII:
+ dwmac->enable_eth_ck = dwmac->eth_ref_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ default:
+ dwmac->enable_eth_ck = false;
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->phy_interface));
+ return -EINVAL;
+ }
+}
+
+static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ const u32 clk_rate = clk_get_rate(dwmac->clk_eth_ck);
+
+ if (!dwmac->enable_eth_ck)
+ return 0;
+
+ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ if (clk_rate == ETH_CK_F_25M)
+ return 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M)
+ return 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ dev_err(dwmac->dev, "Mode %s does not match eth-ck frequency %d Hz",
+ phy_modes(plat_dat->phy_interface), clk_rate);
+ return -EINVAL;
+}
+
+static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat,
+ u8 phy_intf_sel)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- u32 reg = dwmac->mode_reg, clk_rate;
+ u32 reg = dwmac->mode_reg;
int val;
- clk_rate = clk_get_rate(dwmac->clk_eth_ck);
- dwmac->enable_eth_ck = false;
- switch (plat_dat->mac_interface) {
+ val = FIELD_PREP(SYSCFG_PMCR_PHY_INTF_SEL_MASK, phy_intf_sel);
+
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
- if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
- dwmac->enable_eth_ck = true;
- val = SYSCFG_PMCR_ETH_SEL_MII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
+ /*
+ * STM32MP15xx supports both MII and GMII, STM32MP13xx MII only.
+ * SYSCFG_PMCSETR ETH_SELMII is present only on STM32MP15xx and
+ * acts as a selector between 0:GMII and 1:MII. As STM32MP13xx
+ * supports only MII, ETH_SELMII is not present.
+ */
+ if (!dwmac->ops->is_mp13) /* Select MII mode on STM32MP15xx */
+ val |= SYSCFG_PMCR_ETH_SEL_MII;
break;
case PHY_INTERFACE_MODE_GMII:
- val = SYSCFG_PMCR_ETH_SEL_GMII;
- if (clk_rate == ETH_CK_F_25M &&
- (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
break;
case PHY_INTERFACE_MODE_RMII:
- val = SYSCFG_PMCR_ETH_SEL_RMII;
- if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) &&
- (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- val = SYSCFG_PMCR_ETH_SEL_RGMII;
- if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) &&
- (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
break;
default:
- pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->mac_interface);
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
+
+ /* Shift value at correct ethernet MAC offset in SYSCFG_PMCSETR */
+ val <<= ffs(dwmac->mode_mask) - ffs(SYSCFG_MP1_ETH_MASK);
+
/* Need to update PMCCLRR (clear register) */
- regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
- dwmac->ops->syscfg_eth_mask);
+ regmap_write(dwmac->regmap, dwmac->ops->syscfg_clr_off,
+ dwmac->mode_mask);
/* Update PMCSETR (set register) */
return regmap_update_bits(dwmac->regmap, reg,
- dwmac->ops->syscfg_eth_mask, val);
+ dwmac->mode_mask, val);
+}
+
+static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat,
+ u8 phy_intf_sel)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ int val;
+
+ val = FIELD_PREP(SYSCFG_ETHCR_ETH_SEL_MASK, phy_intf_sel);
+
+ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (dwmac->enable_eth_ck) {
+ /* Internal clock ETH_CLK of 50MHz from RCC is used */
+ val |= SYSCFG_ETHCR_ETH_REF_CLK_SEL;
+ }
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_GMII:
+ if (dwmac->enable_eth_ck) {
+ /* Internal clock ETH_CLK of 125MHz from RCC is used */
+ val |= SYSCFG_ETHCR_ETH_CLK_SEL;
+ }
+ break;
+ default:
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->phy_interface));
+ /* Do not manage others interfaces */
+ return -EINVAL;
+ }
+
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
+
+ /* Select PTP (IEEE1588) clock selection from RCC (ck_ker_ethxptp) */
+ val |= SYSCFG_ETHCR_ETH_PTP_CLK_SEL;
+
+ /* Update ETHCR (set register) */
+ return regmap_update_bits(dwmac->regmap, reg,
+ SYSCFG_MP2_ETH_MASK, val);
+}
+
+static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ int phy_intf_sel, ret;
+
+ ret = stm32mp1_select_ethck_external(plat_dat);
+ if (ret)
+ return ret;
+
+ ret = stm32mp1_validate_ethck_rate(plat_dat);
+ if (ret)
+ return ret;
+
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
+ dev_err(dwmac->dev, "Mode %s not supported\n",
+ phy_modes(plat_dat->phy_interface));
+ return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL;
+ }
+
+ if (!dwmac->ops->is_mp2)
+ return stm32mp1_configure_pmcr(plat_dat, phy_intf_sel);
+ else
+ return stm32mp2_configure_syscfg(plat_dat, phy_intf_sel);
}
static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
@@ -224,32 +363,30 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_MCU_ETH_SEL_RMII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
default:
- pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->mac_interface);
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
+
return regmap_update_bits(dwmac->regmap, reg,
- dwmac->ops->syscfg_eth_mask, val << 23);
+ SYSCFG_MCU_ETH_MASK, val << 23);
}
-static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend)
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
{
clk_disable_unprepare(dwmac->clk_tx);
- if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend)
- clk_disable_unprepare(dwmac->clk_rx);
-
+ clk_disable_unprepare(dwmac->clk_rx);
clk_disable_unprepare(dwmac->syscfg_clk);
if (dwmac->enable_eth_ck)
clk_disable_unprepare(dwmac->clk_eth_ck);
@@ -281,13 +418,24 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
}
/* Get mode register */
- dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ dwmac->regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->mode_reg);
if (IS_ERR(dwmac->regmap))
return PTR_ERR(dwmac->regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
- if (err)
- dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
+ if (dwmac->ops->is_mp2)
+ return 0;
+
+ dwmac->mode_mask = SYSCFG_MP1_ETH_MASK;
+ err = of_property_read_u32_index(np, "st,syscon", 2, &dwmac->mode_mask);
+ if (err) {
+ if (dwmac->ops->is_mp13) {
+ dev_err(dev, "Sysconfig register mask must be set (%d)\n", err);
+ } else {
+ dev_dbg(dev, "Warning sysconfig register mask not set\n");
+ err = 0;
+ }
+ }
return err;
}
@@ -305,7 +453,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
/* Gigabit Ethernet 125MHz clock selection. */
dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
- /* Ethernet 50Mhz RMII clock selection */
+ /* Ethernet 50MHz RMII clock selection */
dwmac->eth_ref_clk_sel_reg =
of_property_read_bool(np, "st,eth-ref-clk-sel");
@@ -354,6 +502,26 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
return err;
}
+static int stm32_dwmac_suspend(struct device *dev, void *bsp_priv)
+{
+ struct stm32_dwmac *dwmac = bsp_priv;
+
+ stm32_dwmac_clk_disable(dwmac);
+
+ return dwmac->ops->suspend ? dwmac->ops->suspend(dwmac) : 0;
+}
+
+static int stm32_dwmac_resume(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+ struct stm32_dwmac *dwmac = bsp_priv;
+
+ if (dwmac->ops->resume)
+ dwmac->ops->resume(dwmac);
+
+ return stm32_dwmac_init(priv->plat);
+}
+
static int stm32_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -389,20 +557,37 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
+ plat_dat->suspend = stm32_dwmac_suspend;
+ plat_dat->resume = stm32_dwmac_resume;
- ret = stm32_dwmac_init(plat_dat, false);
+ ret = stm32_dwmac_init(plat_dat);
if (ret)
return ret;
+ /* If this platform requires the clock to be running in suspend,
+ * prepare and enable the receive clock an additional time to keep
+ * it running.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend) {
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_disable;
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_clk_disable;
+ goto err_clk_disable_suspend;
return 0;
+err_clk_disable_suspend:
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
err_clk_disable:
- stm32_dwmac_clk_disable(dwmac, false);
+ stm32_dwmac_clk_disable(dwmac);
return ret;
}
@@ -415,7 +600,15 @@ static void stm32_dwmac_remove(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
- stm32_dwmac_clk_disable(dwmac, false);
+ /* If this platform requires the clock to be running in suspend,
+ * we need to disable and unprepare the receive clock an additional
+ * time to balance the extra clk_prepare_enable() in the probe
+ * function.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
+ stm32_dwmac_clk_disable(dwmac);
if (dwmac->irq_pwr_wakeup >= 0) {
dev_pm_clear_wake_irq(&pdev->dev);
@@ -433,53 +626,8 @@ static void stm32mp1_resume(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_ethstp);
}
-#ifdef CONFIG_PM_SLEEP
-static int stm32_dwmac_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
-
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- stm32_dwmac_clk_disable(dwmac, true);
-
- if (dwmac->ops->suspend)
- ret = dwmac->ops->suspend(dwmac);
-
- return ret;
-}
-
-static int stm32_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
- int ret;
-
- if (dwmac->ops->resume)
- dwmac->ops->resume(dwmac);
-
- ret = stm32_dwmac_init(priv->plat, true);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
- stm32_dwmac_suspend, stm32_dwmac_resume);
-
static struct stm32_ops stm32mcu_dwmac_data = {
- .set_mode = stm32mcu_set_mode,
- .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK
+ .set_mode = stm32mcu_set_mode
};
static struct stm32_ops stm32mp1_dwmac_data = {
@@ -487,23 +635,45 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
- .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
+ .syscfg_clr_off = 0x44,
+ .is_mp13 = false,
+ .clk_rx_enable_in_suspend = true
+};
+
+static struct stm32_ops stm32mp13_dwmac_data = {
+ .set_mode = stm32mp1_set_mode,
+ .suspend = stm32mp1_suspend,
+ .resume = stm32mp1_resume,
+ .parse_data = stm32mp1_parse_data,
+ .syscfg_clr_off = 0x08,
+ .is_mp13 = true,
+ .clk_rx_enable_in_suspend = true
+};
+
+static struct stm32_ops stm32mp25_dwmac_data = {
+ .set_mode = stm32mp1_set_mode,
+ .suspend = stm32mp1_suspend,
+ .resume = stm32mp1_resume,
+ .parse_data = stm32mp1_parse_data,
+ .is_mp2 = true,
.clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {
{ .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data},
{ .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data},
+ { .compatible = "st,stm32mp13-dwmac", .data = &stm32mp13_dwmac_data},
+ { .compatible = "st,stm32mp25-dwmac", .data = &stm32mp25_dwmac_data},
{ }
};
MODULE_DEVICE_TABLE(of, stm32_dwmac_match);
static struct platform_driver stm32_dwmac_driver = {
.probe = stm32_dwmac_probe,
- .remove_new = stm32_dwmac_remove,
+ .remove = stm32_dwmac_remove,
.driver = {
.name = "stm32-dwmac",
- .pm = &stm32_dwmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
.of_match_table = stm32_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
new file mode 100644
index 000000000000..862df173d963
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dwmac-sun55i.c - Allwinner sun55i GMAC200 specific glue layer
+ *
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ *
+ * syscon parts taken from dwmac-sun8i.c, which is
+ *
+ * Copyright (C) 2017 Corentin Labbe <clabbe.montjoie@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define SYSCON_REG 0x34
+
+/* RMII specific bits */
+#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
+/* Generic system control EMAC_CLK bits */
+#define SYSCON_ETXDC_MASK GENMASK(12, 10)
+#define SYSCON_ERXDC_MASK GENMASK(9, 5)
+/* EMAC PHY Interface Type */
+#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
+#define SYSCON_ETCS_MASK GENMASK(1, 0)
+#define SYSCON_ETCS_MII 0x0
+#define SYSCON_ETCS_EXT_GMII 0x1
+#define SYSCON_ETCS_INT_GMII 0x2
+
+static int sun55i_gmac200_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *node = dev->of_node;
+ struct regmap *regmap;
+ u32 val, reg = 0;
+ int ret;
+
+ regmap = syscon_regmap_lookup_by_phandle(node, "syscon");
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Unable to map syscon\n");
+
+ if (!of_property_read_u32(node, "tx-internal-delay-ps", &val)) {
+ if (val % 100)
+ return dev_err_probe(dev, -EINVAL,
+ "tx-delay must be a multiple of 100ps\n");
+ val /= 100;
+ dev_dbg(dev, "set tx-delay to %x\n", val);
+ if (!FIELD_FIT(SYSCON_ETXDC_MASK, val))
+ return dev_err_probe(dev, -EINVAL,
+ "TX clock delay exceeds maximum (%u00ps > %lu00ps)\n",
+ val, FIELD_MAX(SYSCON_ETXDC_MASK));
+
+ reg |= FIELD_PREP(SYSCON_ETXDC_MASK, val);
+ }
+
+ if (!of_property_read_u32(node, "rx-internal-delay-ps", &val)) {
+ if (val % 100)
+ return dev_err_probe(dev, -EINVAL,
+ "rx-delay must be a multiple of 100ps\n");
+ val /= 100;
+ dev_dbg(dev, "set rx-delay to %x\n", val);
+ if (!FIELD_FIT(SYSCON_ERXDC_MASK, val))
+ return dev_err_probe(dev, -EINVAL,
+ "RX clock delay exceeds maximum (%u00ps > %lu00ps)\n",
+ val, FIELD_MAX(SYSCON_ERXDC_MASK));
+
+ reg |= FIELD_PREP(SYSCON_ERXDC_MASK, val);
+ }
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* default */
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg |= SYSCON_RMII_EN;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unsupported interface mode: %s",
+ phy_modes(plat->phy_interface));
+ }
+
+ ret = regmap_write(regmap, SYSCON_REG, reg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to write to syscon\n");
+
+ return 0;
+}
+
+static int sun55i_gmac200_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ /* BSP disables it */
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->host_dma_width = 32;
+
+ ret = sun55i_gmac200_set_syscon(dev, plat_dat);
+ if (ret)
+ return ret;
+
+ clk = devm_clk_get_enabled(dev, "mbus");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to get or enable MBUS clock\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "phy");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get or enable PHY supply\n");
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id sun55i_gmac200_match[] = {
+ { .compatible = "allwinner,sun55i-a523-gmac200" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun55i_gmac200_match);
+
+static struct platform_driver sun55i_gmac200_driver = {
+ .probe = sun55i_gmac200_probe,
+ .driver = {
+ .name = "dwmac-sun55i",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun55i_gmac200_match,
+ },
+};
+module_platform_driver(sun55i_gmac200_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sun55i GMAC200 specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index e1b761dcfa1d..8aa496ac85cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -31,10 +31,6 @@
*/
/* struct emac_variant - Describe dwmac-sun8i hardware variant
- * @default_syscon_value: The default value of the EMAC register in syscon
- * This value is used for disabling properly EMAC
- * and used as a good starting value in case of the
- * boot process(uboot) leave some stuff.
* @syscon_field reg_field for the syscon's gmac register
* @soc_has_internal_phy: Does the MAC embed an internal PHY
* @support_mii: Does the MAC handle MII
@@ -48,7 +44,6 @@
* value of zero indicates this is not supported.
*/
struct emac_variant {
- u32 default_syscon_value;
const struct reg_field *syscon_field;
bool soc_has_internal_phy;
bool support_mii;
@@ -94,7 +89,6 @@ static const struct reg_field sun8i_ccu_reg_field = {
};
static const struct emac_variant emac_variant_h3 = {
- .default_syscon_value = 0x58000,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true,
@@ -105,14 +99,12 @@ static const struct emac_variant emac_variant_h3 = {
};
static const struct emac_variant emac_variant_v3s = {
- .default_syscon_value = 0x38000,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true
};
static const struct emac_variant emac_variant_a83t = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
@@ -122,7 +114,6 @@ static const struct emac_variant emac_variant_a83t = {
};
static const struct emac_variant emac_variant_r40 = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_ccu_reg_field,
.support_mii = true,
.support_rgmii = true,
@@ -130,7 +121,6 @@ static const struct emac_variant emac_variant_r40 = {
};
static const struct emac_variant emac_variant_a64 = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
@@ -141,7 +131,6 @@ static const struct emac_variant emac_variant_a64 = {
};
static const struct emac_variant emac_variant_h6 = {
- .default_syscon_value = 0x50000,
.syscon_field = &sun8i_syscon_reg_field,
/* The "Internal PHY" of H6 is not on the die. It's on the
* co-packaged AC200 chip instead.
@@ -299,7 +288,7 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
* Called from stmmac via stmmac_dma_ops->init
*/
static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
@@ -395,7 +384,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv,
writel(v, ioaddr + EMAC_TX_CTL1);
}
-static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
+static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
u32 v;
@@ -582,16 +571,16 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
-static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+static int sun8i_dwmac_init(struct device *dev, void *priv)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct sunxi_priv_data *gmac = priv;
int ret;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
if (ret) {
- dev_err(&pdev->dev, "Fail to enable regulator\n");
+ dev_err(dev, "Fail to enable regulator\n");
return ret;
}
}
@@ -774,8 +763,8 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
static int get_ephy_nodes(struct stmmac_priv *priv)
{
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *mdio_mux, *iphynode;
struct device_node *mdio_internal;
+ struct device_node *mdio_mux;
int ret;
mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux");
@@ -793,7 +782,7 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
}
/* Seek for internal PHY */
- for_each_child_of_node(mdio_internal, iphynode) {
+ for_each_child_of_node_scoped(mdio_internal, iphynode) {
gmac->ephy_clk = of_clk_get(iphynode, 0);
if (IS_ERR(gmac->ephy_clk))
continue;
@@ -801,14 +790,12 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
if (IS_ERR(gmac->rst_ephy)) {
ret = PTR_ERR(gmac->rst_ephy);
if (ret == -EPROBE_DEFER) {
- of_node_put(iphynode);
of_node_put(mdio_internal);
return ret;
}
continue;
}
dev_info(priv->device, "Found internal PHY node\n");
- of_node_put(iphynode);
of_node_put(mdio_internal);
return 0;
}
@@ -935,25 +922,11 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
struct sunxi_priv_data *gmac = plat->bsp_priv;
struct device_node *node = dev->of_node;
int ret;
- u32 reg, val;
-
- ret = regmap_field_read(gmac->regmap_field, &val);
- if (ret) {
- dev_err(dev, "Fail to read from regmap field.\n");
- return ret;
- }
-
- reg = gmac->variant->default_syscon_value;
- if (reg != val)
- dev_warn(dev,
- "Current syscon value is not the default %x (expect %x)\n",
- val, reg);
+ u32 reg = 0, val;
if (gmac->variant->soc_has_internal_phy) {
if (of_property_read_bool(node, "allwinner,leds-active-low"))
reg |= H3_EPHY_LED_POL;
- else
- reg &= ~H3_EPHY_LED_POL;
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
@@ -966,12 +939,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
* address. No need to mask it again.
*/
- reg |= 1 << H3_EPHY_ADDR_SHIFT;
- } else {
- /* For SoCs without internal PHY the PHY selection bit should be
- * set to 0 (external PHY).
- */
- reg &= ~H3_EPHY_SELECT;
+ reg |= ret << H3_EPHY_ADDR_SHIFT;
}
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
@@ -982,8 +950,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
val /= 100;
dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
- reg &= ~(gmac->variant->tx_delay_max <<
- SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
dev_err(dev, "Invalid TX clock delay: %d\n",
@@ -1000,8 +966,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
val /= 100;
dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
- reg &= ~(gmac->variant->rx_delay_max <<
- SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
dev_err(dev, "Invalid RX clock delay: %d\n",
@@ -1010,12 +974,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
}
}
- /* Clear interface mode bits */
- reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
- if (gmac->variant->support_rmii)
- reg &= ~SYSCON_RMII_EN;
-
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -1030,7 +989,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
break;
default:
dev_err(dev, "Unsupported interface mode: %s",
- phy_modes(plat->mac_interface));
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -1041,12 +1000,12 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
{
- u32 reg = gmac->variant->default_syscon_value;
-
- regmap_field_write(gmac->regmap_field, reg);
+ if (gmac->variant->soc_has_internal_phy)
+ regmap_field_write(gmac->regmap_field,
+ (H3_EPHY_SHUTDOWN | H3_EPHY_SELECT));
}
-static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
+static void sun8i_dwmac_exit(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
@@ -1081,15 +1040,10 @@ static const struct stmmac_ops sun8i_dwmac_ops = {
.set_mac_loopback = sun8i_dwmac_set_mac_loopback,
};
-static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
+static int sun8i_dwmac_setup(void *ppriv, struct mac_device_info *mac)
{
- struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
- mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return NULL;
-
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@@ -1120,7 +1074,7 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
/* Synopsys Id is not available */
priv->synopsys_id = 0;
- return mac;
+ return 0;
}
static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node)
@@ -1157,11 +1111,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
- phy_interface_t interface;
- int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
struct regmap *regmap;
+ int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -1221,10 +1174,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node, &interface);
- if (ret)
- return -EINVAL;
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1232,14 +1181,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
plat_dat->bsp_priv = gmac;
plat_dat->init = sun8i_dwmac_init;
plat_dat->exit = sun8i_dwmac_exit;
- plat_dat->setup = sun8i_dwmac_setup;
+ plat_dat->mac_setup = sun8i_dwmac_setup;
plat_dat->tx_fifo_size = 4096;
plat_dat->rx_fifo_size = 16384;
@@ -1247,14 +1195,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
goto dwmac_syscon;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto dwmac_exit;
-
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
@@ -1291,9 +1235,7 @@ dwmac_mux:
clk_put(gmac->ephy_clk);
dwmac_remove:
pm_runtime_put_noidle(&pdev->dev);
- stmmac_dvr_remove(&pdev->dev);
-dwmac_exit:
- sun8i_dwmac_exit(pdev, gmac);
+ stmmac_pltfr_remove(pdev);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
@@ -1323,7 +1265,7 @@ static void sun8i_dwmac_shutdown(struct platform_device *pdev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- sun8i_dwmac_exit(pdev, gmac);
+ sun8i_dwmac_exit(&pdev->dev, gmac);
}
static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1345,7 +1287,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove_new = sun8i_dwmac_remove,
+ .remove = sun8i_dwmac_remove,
.shutdown = sun8i_dwmac_shutdown,
.driver = {
.name = "dwmac-sun8i",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 2653a9f0958c..52593ba3a3a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -27,7 +27,7 @@ struct sunxi_priv_data {
#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
#define SUN7I_GMAC_MII_RATE 25000000
-static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+static int sun7i_gmac_init(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
int ret = 0;
@@ -58,7 +58,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+static void sun7i_gmac_exit(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
@@ -72,28 +72,28 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int sun7i_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct sunxi_priv_data *gmac = priv;
-
- /* only GMII mode requires us to reconfigure the clock lines */
- if (gmac->interface != PHY_INTERFACE_MODE_GMII)
- return;
-
- if (gmac->clk_enabled) {
- clk_disable(gmac->tx_clk);
- gmac->clk_enabled = 0;
- }
- clk_unprepare(gmac->tx_clk);
-
- if (speed == 1000) {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
- clk_prepare_enable(gmac->tx_clk);
- gmac->clk_enabled = 1;
- } else {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
- clk_prepare(gmac->tx_clk);
+ struct sunxi_priv_data *gmac = bsp_priv;
+
+ if (interface == PHY_INTERFACE_MODE_GMII) {
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
}
+ return 0;
}
static int sun7i_gmac_probe(struct platform_device *pdev)
@@ -116,11 +116,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (!gmac)
return -ENOMEM;
- ret = of_get_phy_mode(dev->of_node, &gmac->interface);
- if (ret && ret != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return ret;
- }
+ gmac->interface = plat_dat->phy_interface;
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
@@ -140,28 +136,15 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers. */
plat_dat->tx_coe = 1;
- plat_dat->has_gmac = true;
+ plat_dat->core_type = DWMAC_CORE_GMAC;
plat_dat->bsp_priv = gmac;
plat_dat->init = sun7i_gmac_init;
plat_dat->exit = sun7i_gmac_exit;
- plat_dat->fix_mac_speed = sun7i_fix_speed;
+ plat_dat->set_clk_tx_rate = sun7i_set_clk_tx_rate;
plat_dat->tx_fifo_size = 4096;
plat_dat->rx_fifo_size = 16384;
- ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_exit;
-
- return 0;
-
-err_gmac_exit:
- sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id sun7i_dwmac_match[] = {
@@ -172,7 +155,6 @@ MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
.probe = sun7i_gmac_probe,
- .remove_new = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index 362f85136c3e..d765acbe3754 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/module.h>
@@ -19,6 +20,8 @@ struct tegra_mgbe {
struct reset_control *rst_mac;
struct reset_control *rst_pcs;
+ u32 iommu_sid;
+
void __iomem *hv;
void __iomem *regs;
void __iomem *xpcs;
@@ -50,7 +53,6 @@ struct tegra_mgbe {
#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704
#define MAC_SBD_INTR BIT(2)
#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400
-#define MGBE_SID 0x6
static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
{
@@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
@@ -127,10 +129,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ usleep_range(10, 20); /* 500ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
@@ -143,22 +147,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
return err;
}
+ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
- value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
- value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ msleep(30); /* 30ms delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
500, 500 * 2000);
@@ -231,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
if (IS_ERR(mgbe->xpcs))
return PTR_ERR(mgbe->xpcs);
+ /* get controller's stream id from iommu property in device tree */
+ if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
+ dev_err(mgbe->dev, "failed to get iommu stream id\n");
+ return -EINVAL;
+ }
+
res.addr = mgbe->regs;
res.irq = irq;
@@ -290,7 +308,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
goto disable_clks;
}
- plat->has_xgmac = 1;
+ plat->core_type = DWMAC_CORE_XGMAC;
plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
plat->bsp_priv = mgbe;
@@ -336,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
@@ -371,7 +389,7 @@ static SIMPLE_DEV_PM_OPS(tegra_mgbe_pm_ops, tegra_mgbe_suspend, tegra_mgbe_resum
static struct platform_driver tegra_mgbe_driver = {
.probe = tegra_mgbe_probe,
- .remove_new = tegra_mgbe_remove,
+ .remove = tegra_mgbe_remove,
.driver = {
.name = "tegra-mgbe",
.pm = &tegra_mgbe_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
new file mode 100644
index 000000000000..e291028ba56e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * T-HEAD DWMAC platform driver
+ *
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org>
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_CLK_EN 0x00
+#define GMAC_TX_CLK_EN BIT(1)
+#define GMAC_TX_CLK_N_EN BIT(2)
+#define GMAC_TX_CLK_OUT_EN BIT(3)
+#define GMAC_RX_CLK_EN BIT(4)
+#define GMAC_RX_CLK_N_EN BIT(5)
+#define GMAC_EPHY_REF_CLK_EN BIT(6)
+#define GMAC_RXCLK_DELAY_CTRL 0x04
+#define GMAC_RXCLK_BYPASS BIT(15)
+#define GMAC_RXCLK_INVERT BIT(14)
+#define GMAC_RXCLK_DELAY GENMASK(4, 0)
+#define GMAC_TXCLK_DELAY_CTRL 0x08
+#define GMAC_TXCLK_BYPASS BIT(15)
+#define GMAC_TXCLK_INVERT BIT(14)
+#define GMAC_TXCLK_DELAY GENMASK(4, 0)
+#define GMAC_PLLCLK_DIV 0x0c
+#define GMAC_PLLCLK_DIV_EN BIT(31)
+#define GMAC_PLLCLK_DIV_NUM GENMASK(7, 0)
+#define GMAC_GTXCLK_SEL 0x18
+#define GMAC_GTXCLK_SEL_PLL BIT(0)
+#define GMAC_INTF_CTRL 0x1c
+#define PHY_INTF_MASK BIT(0)
+#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1)
+#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0)
+#define GMAC_TXCLK_OEN 0x20
+#define TXCLK_DIR_MASK BIT(0)
+#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
+#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1)
+
+struct thead_dwmac {
+ struct plat_stmmacenet_data *plat;
+ void __iomem *apb_base;
+ struct device *dev;
+};
+
+static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 phyif;
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ phyif = PHY_INTF_MII_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ phyif = PHY_INTF_RGMII;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
+ return -EINVAL;
+ }
+
+ writel(phyif, dwmac->apb_base + GMAC_INTF_CTRL);
+ return 0;
+}
+
+static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 txclk_dir;
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ txclk_dir = TXCLK_DIR_INPUT;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ txclk_dir = TXCLK_DIR_OUTPUT;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
+ return -EINVAL;
+ }
+
+ writel(txclk_dir, dwmac->apb_base + GMAC_TXCLK_OEN);
+ return 0;
+}
+
+static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ struct thead_dwmac *dwmac = bsp_priv;
+ struct plat_stmmacenet_data *plat;
+ unsigned long rate;
+ long tx_rate;
+ u32 div, reg;
+
+ plat = dwmac->plat;
+
+ switch (plat->phy_interface) {
+ /* For MII, rxc/txc is provided by phy */
+ case PHY_INTERFACE_MODE_MII:
+ return 0;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rate = clk_get_rate(plat->stmmac_clk);
+
+ writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
+
+ tx_rate = rgmii_clock(speed);
+ if (tx_rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
+ return tx_rate;
+ }
+
+ div = rate / tx_rate;
+ if (rate != tx_rate * div) {
+ dev_err(dwmac->dev, "invalid gmac rate %lu\n", rate);
+ return -EINVAL;
+ }
+
+ reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
+ FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
+ writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
+ return 0;
+
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
+ return -EINVAL;
+ }
+}
+
+static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 reg, div;
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ reg = GMAC_RX_CLK_EN | GMAC_TX_CLK_EN;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* use pll */
+ div = clk_get_rate(plat->stmmac_clk) / rgmii_clock(SPEED_1000);
+ reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
+ FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
+
+ writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
+ writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
+
+ writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL);
+ reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN |
+ GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN;
+ break;
+
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
+ return -EINVAL;
+ }
+
+ writel(reg, dwmac->apb_base + GMAC_CLK_EN);
+ return 0;
+}
+
+static int thead_dwmac_init(struct device *dev, void *priv)
+{
+ struct thead_dwmac *dwmac = priv;
+ unsigned int reg;
+ int ret;
+
+ ret = thead_dwmac_set_phy_if(dwmac->plat);
+ if (ret)
+ return ret;
+
+ ret = thead_dwmac_set_txclk_dir(dwmac->plat);
+ if (ret)
+ return ret;
+
+ reg = readl(dwmac->apb_base + GMAC_RXCLK_DELAY_CTRL);
+ reg &= ~(GMAC_RXCLK_DELAY);
+ reg |= FIELD_PREP(GMAC_RXCLK_DELAY, 0);
+ writel(reg, dwmac->apb_base + GMAC_RXCLK_DELAY_CTRL);
+
+ reg = readl(dwmac->apb_base + GMAC_TXCLK_DELAY_CTRL);
+ reg &= ~(GMAC_TXCLK_DELAY);
+ reg |= FIELD_PREP(GMAC_TXCLK_DELAY, 0);
+ writel(reg, dwmac->apb_base + GMAC_TXCLK_DELAY_CTRL);
+
+ return thead_dwmac_enable_clk(dwmac->plat);
+}
+
+static int thead_dwmac_probe(struct platform_device *pdev)
+{
+ struct stmmac_resources stmmac_res;
+ struct plat_stmmacenet_data *plat;
+ struct thead_dwmac *dwmac;
+ struct clk *apb_clk;
+ void __iomem *apb;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ /*
+ * The APB clock is essential for accessing glue registers. However,
+ * old devicetrees don't describe it correctly. We continue to probe
+ * and emit a warning if it isn't present.
+ */
+ apb_clk = devm_clk_get_enabled(&pdev->dev, "apb");
+ if (PTR_ERR(apb_clk) == -ENOENT)
+ dev_warn(&pdev->dev,
+ "cannot get apb clock, link may break after speed changes\n");
+ else if (IS_ERR(apb_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk),
+ "failed to get apb clock\n");
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ apb = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(apb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(apb),
+ "failed to remap gmac apb registers\n");
+
+ dwmac->dev = &pdev->dev;
+ dwmac->plat = plat;
+ dwmac->apb_base = apb;
+ plat->bsp_priv = dwmac;
+ plat->set_clk_tx_rate = thead_set_clk_tx_rate;
+ plat->init = thead_dwmac_init;
+
+ return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res);
+}
+
+static const struct of_device_id thead_dwmac_match[] = {
+ { .compatible = "thead,th1520-gmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, thead_dwmac_match);
+
+static struct platform_driver thead_dwmac_driver = {
+ .probe = thead_dwmac_probe,
+ .driver = {
+ .name = "thead-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = thead_dwmac_match,
+ },
+};
+module_platform_driver(thead_dwmac_driver);
+
+MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
+MODULE_AUTHOR("Drew Fustini <drew@pdp7.com>");
+MODULE_DESCRIPTION("T-HEAD DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index a5a5cfa989c6..9497b13a5753 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -42,69 +42,45 @@
#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
-#define ETHER_CONFIG_INTF_MII 0
-#define ETHER_CONFIG_INTF_RGMII BIT(0)
-#define ETHER_CONFIG_INTF_RMII BIT(2)
-
struct visconti_eth {
void __iomem *reg;
- u32 phy_intf_sel;
struct clk *phy_ref_clk;
struct device *dev;
- spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct visconti_eth *dwmac = priv;
- struct net_device *netdev = dev_get_drvdata(dwmac->dev);
- unsigned int val, clk_sel_val = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&dwmac->lock, flags);
-
- /* adjust link */
- val = readl(dwmac->reg + MAC_CTRL_REG);
- val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES);
-
- switch (speed) {
- case SPEED_1000:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
- break;
- case SPEED_100:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
- clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
- val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
- break;
- case SPEED_10:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
- clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
- val |= GMAC_CONFIG_PS;
- break;
- default:
- /* No bit control */
- netdev_err(netdev, "Unsupported speed request (%d)", speed);
- spin_unlock_irqrestore(&dwmac->lock, flags);
- return;
- }
-
- writel(val, dwmac->reg + MAC_CTRL_REG);
-
- /* Stop internal clock */
- val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
- val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
- val |= ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ struct visconti_eth *dwmac = bsp_priv;
+ unsigned long clk_sel, val;
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ switch (speed) {
+ case SPEED_1000:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_125M;
+ break;
+
+ case SPEED_100:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_25M;
+ break;
+
+ case SPEED_10:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_2P5M;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- /* Set Clock-Mux, Start clock, Set TX_O direction */
- switch (dwmac->phy_intf_sel) {
- case ETHER_CONFIG_INTF_RGMII:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
@@ -112,11 +88,32 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned
val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
- case ETHER_CONFIG_INTF_RMII:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ } else if (interface == PHY_INTERFACE_MODE_RMII) {
+ switch (speed) {
+ case SPEED_100:
+ clk_sel = ETHER_CLK_SEL_DIV_SEL_2;
+ break;
+
+ case SPEED_10:
+ clk_sel = ETHER_CLK_SEL_DIV_SEL_20;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RMII_CLK_RST;
@@ -124,46 +121,42 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned
val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
- case ETHER_CONFIG_INTF_MII:
- default:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ } else {
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC |
+ ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
}
- spin_unlock_irqrestore(&dwmac->lock, flags);
+ return 0;
}
static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
{
struct visconti_eth *dwmac = plat_dat->bsp_priv;
- unsigned int reg_val, clk_sel_val;
-
- switch (plat_dat->phy_interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
- break;
- case PHY_INTERFACE_MODE_MII:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_MII;
- break;
- case PHY_INTERFACE_MODE_RMII:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RMII;
- break;
- default:
+ unsigned int clk_sel_val;
+ int phy_intf_sel;
+
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface);
return -EOPNOTSUPP;
}
- reg_val = dwmac->phy_intf_sel;
- writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+ writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL);
/* Enable TX/RX clock */
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
@@ -173,8 +166,8 @@ static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmac
dwmac->reg + REG_ETHER_CLOCK_SEL);
/* release internal-reset */
- reg_val |= ETHER_ETH_CONTROL_RESET;
- writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+ phy_intf_sel |= ETHER_ETH_CONTROL_RESET;
+ writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL);
return 0;
}
@@ -228,11 +221,10 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = visconti_eth_set_clk_tx_rate;
ret = visconti_eth_clock_probe(pdev, plat_dat);
if (ret)
@@ -268,7 +260,7 @@ MODULE_DEVICE_TABLE(of, visconti_eth_dwmac_match);
static struct platform_driver visconti_eth_dwmac_driver = {
.probe = visconti_eth_dwmac_probe,
- .remove_new = visconti_eth_dwmac_remove,
+ .remove = visconti_eth_dwmac_remove,
.driver = {
.name = "visconti-eth-dwmac",
.of_match_table = visconti_eth_dwmac_match,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 4296ddda8aaa..697bba641e05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -38,11 +38,10 @@
#define GMAC_INT_DISABLE_PCSAN BIT(2)
#define GMAC_INT_DISABLE_PMT BIT(3)
#define GMAC_INT_DISABLE_TIMESTAMP BIT(9)
-#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \
+#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_RGMII | \
GMAC_INT_DISABLE_PCSLINK | \
- GMAC_INT_DISABLE_PCSAN)
-#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \
- GMAC_INT_DISABLE_PCS)
+ GMAC_INT_DISABLE_PCSAN | \
+ GMAC_INT_DISABLE_TIMESTAMP)
/* PMT Control and Status */
#define GMAC_PMT 0x0000002c
@@ -59,22 +58,11 @@ enum power_event {
/* Energy Efficient Ethernet (EEE)
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define LPI_CTRL_STATUS 0x0030
#define LPI_TIMER_CTRL 0x0034
-/* LPI control and status defines */
-#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
-#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
-#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
-#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
-#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
-#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
-#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
-#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
-#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
-#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
-
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
0x00000040 + (reg * 8))
@@ -329,5 +317,17 @@ enum rtc_control {
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
#define GMAC_EXTHASH_BASE 0x500
+/* PTP and timestamping registers */
+
+#define GMAC3_X_ATSNS GENMASK(29, 25)
+#define GMAC3_X_ATSNS_SHIFT 25
+
+#define GMAC_PTP_TCR_ATSFC BIT(24)
+#define GMAC_PTP_TCR_ATSEN0 BIT(25)
+
+#define GMAC3_X_TIMESTAMP_STATUS 0x28
+#define GMAC_PTP_ATNR 0x30
+#define GMAC_PTP_ATSR 0x34
+
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 8555299443f4..a2ae136d2c0e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -15,52 +15,42 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
-#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/string_choices.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
+#include "stmmac_ptp.h"
#include "dwmac1000.h"
+static int dwmac1000_pcs_init(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.pcs)
+ return 0;
+
+ return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE,
+ GMAC_INT_DISABLE_PCSLINK |
+ GMAC_INT_DISABLE_PCSAN);
+}
+
static void dwmac1000_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_CONTROL);
int mtu = dev->mtu;
+ u32 value;
/* Configure GMAC core */
- value |= GMAC_CORE_INIT;
+ value = readl(ioaddr + GMAC_CONTROL);
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
value |= GMAC_CONTROL_JE;
- if (hw->ps) {
- value |= GMAC_CONTROL_TE;
-
- value &= ~hw->link.speed_mask;
- switch (hw->ps) {
- case SPEED_1000:
- value |= hw->link.speed1000;
- break;
- case SPEED_100:
- value |= hw->link.speed100;
- break;
- case SPEED_10:
- value |= hw->link.speed10;
- break;
- }
- }
-
- writel(value, ioaddr + GMAC_CONTROL);
+ writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONTROL);
/* Mask GMAC interrupts */
- value = GMAC_INT_DEFAULT_MASK;
-
- if (hw->pcs)
- value &= ~GMAC_INT_DISABLE_PCS;
-
- writel(value, ioaddr + GMAC_INT_MASK);
+ writel(GMAC_INT_DEFAULT_MASK, ioaddr + GMAC_INT_MASK);
#ifdef STMMAC_VLAN_TAG_USED
/* Tag detection without filtering */
@@ -68,6 +58,20 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
#endif
}
+static void dwmac1000_irq_modify(struct mac_device_info *hw, u32 disable,
+ u32 enable)
+{
+ void __iomem *int_mask = hw->pcsr + GMAC_INT_MASK;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&hw->irq_ctrl_lock, flags);
+ value = readl(int_mask) | disable;
+ value &= ~enable;
+ writel(value, int_mask);
+ spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags);
+}
+
static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -261,39 +265,6 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
writel(pmt, ioaddr + GMAC_PMT);
}
-/* RGMII or SMII interface */
-static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x)
-{
- u32 status;
-
- status = readl(ioaddr + GMAC_RGSMIIIS);
- x->irq_rgmii_n++;
-
- /* Check the link status */
- if (status & GMAC_RGSMIIIS_LNKSTS) {
- int speed_value;
-
- x->pcs_link = 1;
-
- speed_value = ((status & GMAC_RGSMIIIS_SPEED) >>
- GMAC_RGSMIIIS_SPEED_SHIFT);
- if (speed_value == GMAC_RGSMIIIS_SPEED_125)
- x->pcs_speed = SPEED_1000;
- else if (speed_value == GMAC_RGSMIIIS_SPEED_25)
- x->pcs_speed = SPEED_100;
- else
- x->pcs_speed = SPEED_10;
-
- x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK);
-
- pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
- x->pcs_duplex ? "Full" : "Half");
- } else {
- x->pcs_link = 0;
- pr_info("Link is Down\n");
- }
-}
-
static int dwmac1000_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
@@ -335,37 +306,27 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
- if (intr_status & PCS_RGSMIIIS_IRQ)
- dwmac1000_rgsmii(ioaddr, x);
-
return ret;
}
-static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac1000_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- /*TODO - en_tx_lpi_clockgating treatment */
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
value = readl(ioaddr + LPI_CTRL_STATUS);
- value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_FORCED)
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ else
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
writel(value, ioaddr + LPI_CTRL_STATUS);
-}
-static void dwmac1000_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + LPI_CTRL_STATUS);
- value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
- writel(value, ioaddr + LPI_CTRL_STATUS);
+ return 0;
}
static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link)
@@ -398,20 +359,10 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
-static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback)
+static void dwmac1000_ctrl_ane(struct stmmac_priv *priv, bool ane,
+ bool srgmi_ral)
{
- dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
-}
-
-static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
-{
- dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
-}
-
-static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
-{
- dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+ dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral);
}
static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -503,7 +454,9 @@ static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable)
}
const struct stmmac_ops dwmac1000_ops = {
+ .pcs_init = dwmac1000_pcs_init,
.core_init = dwmac1000_core_init,
+ .irq_modify = dwmac1000_irq_modify,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac1000_rx_ipc_enable,
.dump_regs = dwmac1000_dump_regs,
@@ -513,14 +466,11 @@ const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
- .set_eee_mode = dwmac1000_set_eee_mode,
- .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_lpi_mode = dwmac1000_set_lpi_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
.pcs_ctrl_ane = dwmac1000_ctrl_ane,
- .pcs_rane = dwmac1000_rane,
- .pcs_get_adv_lp = dwmac1000_get_adv_lp,
.set_mac_loopback = dwmac1000_set_mac_loopback,
};
@@ -557,3 +507,103 @@ int dwmac1000_setup(struct stmmac_priv *priv)
return 0;
}
+
+/* DWMAC 1000 HW Timestaming ops */
+
+void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
+{
+ u64 ns;
+
+ ns = readl(ptpaddr + GMAC_PTP_ATNR);
+ ns += (u64)readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
+
+ *ptp_time = ns;
+}
+
+void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv)
+{
+ struct ptp_clock_event event;
+ u32 ts_status, num_snapshot;
+ unsigned long flags;
+ u64 ptp_time;
+ int i;
+
+ /* Clears the timestamp interrupt */
+ ts_status = readl(priv->ptpaddr + GMAC3_X_TIMESTAMP_STATUS);
+
+ if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN))
+ return;
+
+ num_snapshot = (ts_status & GMAC3_X_ATSNS) >> GMAC3_X_ATSNS_SHIFT;
+
+ for (i = 0; i < num_snapshot; i++) {
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_ptptime(priv, priv->ptpaddr, &ptp_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ptp_time;
+ ptp_clock_event(priv->ptp_clock, &event);
+ }
+}
+
+/* DWMAC 1000 ptp_clock_info ops */
+
+static void dwmac1000_timestamp_interrupt_cfg(struct stmmac_priv *priv, bool en)
+{
+ void __iomem *ioaddr = priv->ioaddr;
+
+ u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
+
+ if (en)
+ intr_mask &= ~GMAC_INT_DISABLE_TIMESTAMP;
+ else
+ intr_mask |= GMAC_INT_DISABLE_TIMESTAMP;
+
+ writel(intr_mask, ioaddr + GMAC_INT_MASK);
+}
+
+int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ void __iomem *ptpaddr = priv->ptpaddr;
+ int ret = -EOPNOTSUPP;
+ u32 tcr_val;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ mutex_lock(&priv->aux_ts_lock);
+ tcr_val = readl(ptpaddr + PTP_TCR);
+
+ if (on) {
+ tcr_val |= GMAC_PTP_TCR_ATSEN0;
+ tcr_val |= GMAC_PTP_TCR_ATSFC;
+ priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ } else {
+ tcr_val &= ~GMAC_PTP_TCR_ATSEN0;
+ priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ }
+
+ netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n",
+ str_enabled_disabled(on));
+ writel(tcr_val, ptpaddr + PTP_TCR);
+
+ /* wait for auxts fifo clear to finish */
+ ret = readl_poll_timeout(ptpaddr + PTP_TCR, tcr_val,
+ !(tcr_val & GMAC_PTP_TCR_ATSFC),
+ 10, 10000);
+
+ mutex_unlock(&priv->aux_ts_lock);
+
+ dwmac1000_timestamp_interrupt_cfg(priv, on);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index daf79cdbd3ec..5877fec9f6c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -12,14 +12,13 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <asm/io.h>
+#include <linux/io.h>
#include "dwmac1000.h"
#include "dwmac_dma.h"
static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
- int i;
pr_info("dwmac1000: Master AXI performs %s burst length\n",
!(value & DMA_AXI_UNDEF) ? "fixed" : "any");
@@ -39,46 +38,25 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
- * set).
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
*/
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= DMA_AXI_BLEN256;
- break;
- case 128:
- value |= DMA_AXI_BLEN128;
- break;
- case 64:
- value |= DMA_AXI_BLEN64;
- break;
- case 32:
- value |= DMA_AXI_BLEN32;
- break;
- case 16:
- value |= DMA_AXI_BLEN16;
- break;
- case 8:
- value |= DMA_AXI_BLEN8;
- break;
- case 4:
- value |= DMA_AXI_BLEN4;
- break;
- }
- }
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
-static void dwmac1000_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
- /*
- * Set the DMA PBL (Programmable Burst Length) mode.
+ /* Set the DMA PBL (Programmable Burst Length) mode.
*
* Note: before stmmac core 3.50 this mode bit was 4xPBL, and
* post 3.5 mode bit acts as 8*PBL.
@@ -98,16 +76,16 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
if (dma_cfg->mixed_burst)
value |= DMA_BUS_MODE_MB;
- if (atds)
+ if (dma_cfg->atds)
value |= DMA_BUS_MODE_ATDS;
if (dma_cfg->aal)
value |= DMA_BUS_MODE_AAL;
- writel(value, ioaddr + DMA_BUS_MODE);
+ writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
/* Mask interrupts by writing to CSR7 */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
@@ -116,7 +94,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
dma_addr_t dma_rx_phy, u32 chan)
{
/* RX descriptor base address list must be written into DMA CSR3 */
- writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan));
}
static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
@@ -125,7 +103,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
dma_addr_t dma_tx_phy, u32 chan)
{
/* TX descriptor base address list must be written into DMA CSR4 */
- writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -153,14 +131,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
- csr6 |= DMA_CONTROL_RSF;
+ csr6 |= DMA_CONTROL_RSF | DMA_CONTROL_DFF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
- csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= ~(DMA_CONTROL_RSF | DMA_CONTROL_DFF);
csr6 &= DMA_CONTROL_TC_RX_MASK;
if (mode <= 32)
csr6 |= DMA_CONTROL_RTC_32;
@@ -175,14 +153,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
/* Configure flow control based on rx fifo size */
csr6 = dwmac1000_configure_fc(csr6, fifosz);
- writel(csr6, ioaddr + DMA_CONTROL);
+ writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
@@ -209,7 +187,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
csr6 |= DMA_CONTROL_TTC_256;
}
- writel(csr6, ioaddr + DMA_CONTROL);
+ writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv,
@@ -271,12 +249,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
static void dwmac1000_rx_watchdog(struct stmmac_priv *priv,
void __iomem *ioaddr, u32 riwt, u32 queue)
{
- writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
}
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset,
- .init = dwmac1000_dma_init,
+ .init_chan = dwmac1000_dma_init_channel,
.init_rx_chan = dwmac1000_dma_init_rx,
.init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi,
@@ -284,6 +262,7 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.dma_rx_mode = dwmac1000_dma_operation_mode_rx,
.dma_tx_mode = dwmac1000_dma_operation_mode_tx,
.enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_reception = dwmac_enable_dma_reception,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
.start_tx = dwmac_dma_start_tx,
@@ -294,3 +273,4 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.get_hw_feature = dwmac1000_get_hw_feature,
.rx_watchdog = dwmac1000_rx_watchdog,
};
+EXPORT_SYMBOL_GPL(dwmac1000_dma_ops);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 7667d103cd0e..14e847c0e1a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -15,7 +15,7 @@
*******************************************************************************/
#include <linux/crc32.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "stmmac.h"
#include "dwmac100.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index dea270f60cc3..82957db47c99 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -14,12 +14,12 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <asm/io.h>
+#include <linux/io.h>
#include "dwmac100.h"
#include "dwmac_dma.h"
static void dwmac100_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index d3c5306f1c41..3cb733781e1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -17,11 +17,7 @@
#define GMAC_EXT_CONFIG 0x00000004
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
-#define GMAC_VLAN_TAG 0x00000050
-#define GMAC_VLAN_TAG_DATA 0x00000054
-#define GMAC_VLAN_HASH_TABLE 0x00000058
#define GMAC_RX_FLOW_CTRL 0x00000090
-#define GMAC_VLAN_INCL 0x00000060
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
#define GMAC_TXQ_PRTY_MAP0 0x98
#define GMAC_TXQ_PRTY_MAP1 0x9C
@@ -31,7 +27,6 @@
#define GMAC_RXQ_CTRL3 0x000000ac
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
-#define GMAC_1US_TIC_COUNTER 0x000000dc
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
@@ -44,6 +39,7 @@
#define GMAC_MDIO_DATA 0x00000204
#define GMAC_GPIO_STATUS 0x0000020C
#define GMAC_ARP_ADDR 0x00000210
+#define GMAC_EXT_CFG1 0x00000238
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30)
@@ -68,7 +64,6 @@
#define GMAC_RXQCTRL_TACPQE BIT(21)
#define GMAC_RXQCTRL_TACPQE_SHIFT 21
#define GMAC_RXQCTRL_FPRQ GENMASK(26, 24)
-#define GMAC_RXQCTRL_FPRQ_SHIFT 24
/* MAC Packet Filtering */
#define GMAC_PACKET_FILTER_PR BIT(0)
@@ -82,42 +77,6 @@
#define GMAC_MAX_PERFECT_ADDRESSES 128
-/* MAC VLAN */
-#define GMAC_VLAN_EDVLP BIT(26)
-#define GMAC_VLAN_VTHM BIT(25)
-#define GMAC_VLAN_DOVLTC BIT(20)
-#define GMAC_VLAN_ESVL BIT(18)
-#define GMAC_VLAN_ETV BIT(16)
-#define GMAC_VLAN_VID GENMASK(15, 0)
-#define GMAC_VLAN_VLTI BIT(20)
-#define GMAC_VLAN_CSVL BIT(19)
-#define GMAC_VLAN_VLC GENMASK(17, 16)
-#define GMAC_VLAN_VLC_SHIFT 16
-#define GMAC_VLAN_VLHT GENMASK(15, 0)
-
-/* MAC VLAN Tag */
-#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_ETV BIT(16)
-
-/* MAC VLAN Tag Control */
-#define GMAC_VLAN_TAG_CTRL_OB BIT(0)
-#define GMAC_VLAN_TAG_CTRL_CT BIT(1)
-#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
-#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2
-#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
-#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21
-#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24)
-
-#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-
-/* MAC VLAN Tag Data/Filter */
-#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_DATA_VEN BIT(16)
-#define GMAC_VLAN_TAG_DATA_ETV BIT(17)
-
/* MAC RX Queue Enable */
#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
@@ -147,9 +106,6 @@
#define GMAC_INT_LPI_EN BIT(5)
#define GMAC_INT_TSIE BIT(12)
-#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
- GMAC_INT_PCS_ANE)
-
#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \
GMAC_INT_TSIE)
@@ -177,23 +133,13 @@ enum power_event {
/* Energy Efficient Ethernet (EEE) for GMAC4
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
-/* LPI control and status defines */
-#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
-#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
-#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
-
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
#define GMAC_DEBUG_TFCSTS_SHIFT 17
@@ -284,6 +230,10 @@ enum power_event {
#define GMAC_HW_FEAT_DVLAN BIT(5)
#define GMAC_HW_FEAT_NRVF GENMASK(2, 0)
+/* MAC extended config 1 */
+#define GMAC_CONFIG1_SAVE_EN BIT(24)
+#define GMAC_CONFIG1_SPLM(v) FIELD_PREP(GENMASK(9, 8), v)
+
/* GMAC GPIO Status reg */
#define GMAC_GPO0 BIT(16)
#define GMAC_GPO1 BIT(17)
@@ -388,9 +338,10 @@ static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define MTL_OP_MODE_RFA_SHIFT 8
#define MTL_OP_MODE_EHFC BIT(7)
+#define MTL_OP_MODE_DIS_TCP_EF BIT(6)
-#define MTL_OP_MODE_RTC_MASK 0x18
-#define MTL_OP_MODE_RTC_SHIFT 3
+#define MTL_OP_MODE_RTC_MASK GENMASK(1, 0)
+#define MTL_OP_MODE_RTC_SHIFT 0
#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
#define MTL_OP_MODE_RTC_64 0
@@ -573,8 +524,6 @@ static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
-/* LNKMOD */
-#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1
/* LNKSPEED */
#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2
#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index a38226d7cc6a..a4282fd7c3c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -14,61 +14,60 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include "stmmac.h"
+#include "stmmac_fpe.h"
#include "stmmac_pcs.h"
+#include "stmmac_vlan.h"
#include "dwmac4.h"
#include "dwmac5.h"
+static int dwmac4_pcs_init(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.pcs)
+ return 0;
+
+ return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE,
+ GMAC_INT_PCS_LINK | GMAC_INT_PCS_ANE);
+}
+
static void dwmac4_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_CONFIG);
- u32 clk_rate;
-
- value |= GMAC_CORE_INIT;
-
- if (hw->ps) {
- value |= GMAC_CONFIG_TE;
-
- value &= hw->link.speed_mask;
- switch (hw->ps) {
- case SPEED_1000:
- value |= hw->link.speed1000;
- break;
- case SPEED_100:
- value |= hw->link.speed100;
- break;
- case SPEED_10:
- value |= hw->link.speed10;
- break;
- }
- }
+ unsigned long clk_rate;
+ u32 value;
- writel(value, ioaddr + GMAC_CONFIG);
+ value = readl(ioaddr + GMAC_CONFIG);
+ writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONFIG);
/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
/* Enable GMAC interrupts */
- value = GMAC_INT_DEFAULT_ENABLE;
-
- if (hw->pcs)
- value |= GMAC_PCS_IRQ_DEFAULT;
-
- /* Enable FPE interrupt */
- if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
- value |= GMAC_INT_FPE_EN;
-
- writel(value, ioaddr + GMAC_INT_EN);
+ writel(GMAC_INT_DEFAULT_ENABLE, ioaddr + GMAC_INT_EN);
if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
init_waitqueue_head(&priv->tstamp_busy_wait);
}
-static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
+static void dwmac4_irq_modify(struct mac_device_info *hw, u32 disable,
+ u32 enable)
+{
+ void __iomem *int_mask = hw->pcsr + GMAC_INT_EN;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&hw->irq_ctrl_lock, flags);
+ value = readl(int_mask) & ~disable;
+ value |= enable;
+ writel(value, int_mask);
+ spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags);
+}
+
+static void dwmac4_update_caps(struct stmmac_priv *priv)
{
if (priv->plat->tx_queues_to_use > 1)
priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
@@ -378,33 +377,46 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg_n));
}
-static void dwmac4_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, mask;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_DISABLE) {
+ value = 0;
+ } else {
+ value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+ if (mode == STMMAC_LPI_TIMER) {
+ /* Return ERANGE if the timer is larger than the
+ * register field.
+ */
+ if (et > STMMAC_ET_MAX)
+ return -ERANGE;
- writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
+ /* Set the hardware LPI entry timer */
+ writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ /* Interpret a zero LPI entry timer to mean
+ * immediate entry into LPI mode.
+ */
+ if (et)
+ value |= LPI_CTRL_STATUS_LPIATE;
+ }
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ }
+
+ mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
+ LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
+
+ value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ return 0;
}
static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
@@ -415,34 +427,13 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
if (link)
- value |= GMAC4_LPI_CTRL_STATUS_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
-{
- void __iomem *ioaddr = hw->pcsr;
- int value = et & STMMAC_ET_MAX;
- int regval;
-
- /* Program LPI entry timer value into register */
- writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-
- /* Enable/disable LPI entry timer */
- regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
-
- if (et)
- regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
- else
- regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
-
- writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
-
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -458,166 +449,6 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
}
-static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- u32 val;
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~GMAC_VLAN_TAG_VID;
- val |= GMAC_VLAN_TAG_ETV | vid;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-}
-
-static int dwmac4_write_vlan_filter(struct net_device *dev,
- struct mac_device_info *hw,
- u8 index, u32 data)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- int i, timeout = 10;
- u32 val;
-
- if (index >= hw->num_vlan)
- return -EINVAL;
-
- writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
- GMAC_VLAN_TAG_CTRL_CT |
- GMAC_VLAN_TAG_CTRL_OB);
- val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-
- for (i = 0; i < timeout; i++) {
- val = readl(ioaddr + GMAC_VLAN_TAG);
- if (!(val & GMAC_VLAN_TAG_CTRL_OB))
- return 0;
- udelay(1);
- }
-
- netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
-
- return -EBUSY;
-}
-
-static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int index = -1;
- u32 val = 0;
- int i, ret;
-
- if (vid > 4095)
- return -EINVAL;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- /* For single VLAN filter, VID 0 means VLAN promiscuous */
- if (vid == 0) {
- netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
- return -EPERM;
- }
-
- if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
- netdev_err(dev, "Only single VLAN ID supported\n");
- return -EPERM;
- }
-
- hw->vlan_filter[0] = vid;
- dwmac4_write_single_vlan(dev, vid);
-
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
-
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] == val)
- return 0;
- else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
- index = i;
- }
-
- if (index == -1) {
- netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
- hw->num_vlan);
- return -EPERM;
- }
-
- ret = dwmac4_write_vlan_filter(dev, hw, index, val);
-
- if (!ret)
- hw->vlan_filter[index] = val;
-
- return ret;
-}
-
-static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int i, ret = 0;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
- hw->vlan_filter[0] = 0;
- dwmac4_write_single_vlan(dev, 0);
- }
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
- ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
-
- if (!ret)
- hw->vlan_filter[i] = 0;
- else
- return ret;
- }
- }
-
- return ret;
-}
-
-static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- u32 hash;
- u32 val;
- int i;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
- return;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
- val = hw->vlan_filter[i];
- dwmac4_write_vlan_filter(dev, hw, i, val);
- }
- }
-
- hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
- if (hash & GMAC_VLAN_VLHT) {
- value = readl(ioaddr + GMAC_VLAN_TAG);
- value |= GMAC_VLAN_VTHM;
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
@@ -752,53 +583,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
}
-static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback)
+static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral)
{
- dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
-}
-
-static void dwmac4_rane(void __iomem *ioaddr, bool restart)
-{
- dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
-}
-
-static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
-{
- dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
-}
-
-/* RGMII or SMII interface */
-static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
-{
- u32 status;
-
- status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
- x->irq_rgmii_n++;
-
- /* Check the link status */
- if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
- int speed_value;
-
- x->pcs_link = 1;
-
- speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
- GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
- if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
- x->pcs_speed = SPEED_1000;
- else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
- x->pcs_speed = SPEED_100;
- else
- x->pcs_speed = SPEED_10;
-
- x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
-
- pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
- x->pcs_duplex ? "Full" : "Half");
- } else {
- x->pcs_link = 0;
- pr_info("Link is Down\n");
- }
+ dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral);
}
static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
@@ -857,23 +644,21 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ if (status & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ if (status & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ if (status & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ if (status & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
- if (intr_status & PCS_RGSMIIIS_IRQ)
- dwmac4_phystatus(ioaddr, x);
return ret;
}
@@ -981,45 +766,6 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
-
- value = readl(ioaddr + GMAC_VLAN_TAG);
-
- if (hash) {
- value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = GMAC_VLAN_ETV;
-
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
- } else {
- value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
- value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
- value &= ~GMAC_VLAN_DOVLTC;
- value &= ~GMAC_VLAN_VID;
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + GMAC_CONFIG);
@@ -1030,19 +776,6 @@ static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + GMAC_VLAN_INCL);
- value |= GMAC_VLAN_VLTI;
- value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~GMAC_VLAN_VLC;
- value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
- writel(value, ioaddr + GMAC_VLAN_INCL);
-}
-
static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
u32 addr)
{
@@ -1159,38 +892,11 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return 0;
}
-static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
- struct dma_desc *rx_desc, struct sk_buff *skb)
-{
- if (hw->desc->get_rx_vlan_valid(rx_desc)) {
- u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
-
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
- }
-}
-
-static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_VLAN_TAG);
-
- value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
-
- if (hw->hw_vlan_en)
- /* Always strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_ALL;
- else
- /* Do not strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_NONE;
-
- /* Enable outer VLAN Tag in Rx DMA descriptor */
- value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
- writel(value, ioaddr + GMAC_VLAN_TAG);
-}
-
const struct stmmac_ops dwmac4_ops = {
+ .pcs_init = dwmac4_pcs_init,
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .irq_modify = dwmac4_irq_modify,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1209,33 +915,24 @@ const struct stmmac_ops dwmac4_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac410_ops = {
+ .pcs_init = dwmac4_pcs_init,
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .irq_modify = dwmac4_irq_modify,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1254,37 +951,26 @@ const struct stmmac_ops dwmac410_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .fpe_configure = dwmac5_fpe_configure,
- .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
- .fpe_irq_status = dwmac5_fpe_irq_status,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
};
const struct stmmac_ops dwmac510_ops = {
+ .pcs_init = dwmac4_pcs_init,
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .irq_modify = dwmac4_irq_modify,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1303,14 +989,10 @@ const struct stmmac_ops dwmac510_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.safety_feat_config = dwmac5_safety_feat_config,
@@ -1319,53 +1001,13 @@ const struct stmmac_ops dwmac510_ops = {
.rxp_config = dwmac5_rxp_config,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .fpe_configure = dwmac5_fpe_configure,
- .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
- .fpe_irq_status = dwmac5_fpe_irq_status,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
};
-static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
-{
- u32 val, num_vlan;
-
- val = readl(ioaddr + GMAC_HW_FEATURE3);
- switch (val & GMAC_HW_FEAT_NRVF) {
- case 0:
- num_vlan = 1;
- break;
- case 1:
- num_vlan = 4;
- break;
- case 2:
- num_vlan = 8;
- break;
- case 3:
- num_vlan = 16;
- break;
- case 4:
- num_vlan = 24;
- break;
- case 5:
- num_vlan = 32;
- break;
- default:
- num_vlan = 1;
- }
-
- return num_vlan;
-}
-
int dwmac4_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1397,7 +1039,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(20, 16);
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
- mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 1c5802e0d7f4..aac68dc28dc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -110,14 +110,20 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
- if (rdes1 & RDES1_IP_HDR_ERROR)
+ if (rdes1 & RDES1_IP_HDR_ERROR) {
x->ip_hdr_err++;
+ ret |= csum_none;
+ }
if (rdes1 & RDES1_IP_CSUM_BYPASSED)
x->ip_csum_bypassed++;
if (rdes1 & RDES1_IPV4_HEADER)
x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++;
+ if (rdes1 & RDES1_IP_PAYLOAD_ERROR) {
+ x->ip_payload_err++;
+ ret |= csum_none;
+ }
if (message_type == RDES_EXT_NO_PTP)
x->no_ptp_rx_msg_type_ext++;
@@ -186,10 +192,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+ u32 flags = (RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
+ flags |= RDES3_INT_ON_COMPLETION_EN;
+
+ p->des3 |= cpu_to_le32(flags);
}
static int dwmac4_get_tx_ls(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 6da070ccd737..806555976496 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -95,7 +95,7 @@
#define RDES1_IPV4_HEADER BIT(4)
#define RDES1_IPV6_HEADER BIT(5)
#define RDES1_IP_CSUM_BYPASSED BIT(6)
-#define RDES1_IP_CSUM_ERROR BIT(7)
+#define RDES1_IP_PAYLOAD_ERROR BIT(7)
#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8)
#define RDES1_PTP_PACKET_TYPE BIT(12)
#define RDES1_PTP_VER BIT(13)
@@ -144,4 +144,7 @@
/* TDS3 use for both format (read and write back) */
#define RDES3_OWN BIT(31)
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 84d3a8551b03..7b513324cfb0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -18,7 +18,6 @@
static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
- int i;
pr_info("dwmac4: Master AXI performs %s burst length\n",
(value & DMA_SYS_BUS_FB) ? "fixed" : "any");
@@ -38,33 +37,10 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
- * set).
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
*/
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= DMA_AXI_BLEN256;
- break;
- case 128:
- value |= DMA_AXI_BLEN128;
- break;
- case 64:
- value |= DMA_AXI_BLEN64;
- break;
- case 32:
- value |= DMA_AXI_BLEN32;
- break;
- case 16:
- value |= DMA_AXI_BLEN16;
- break;
- case 8:
- value |= DMA_AXI_BLEN8;
- break;
- case 4:
- value |= DMA_AXI_BLEN4;
- break;
- }
- }
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
@@ -153,7 +129,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
}
static void dwmac4_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
@@ -203,8 +179,12 @@ static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_TX_BASE_ADDR_HI(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_RX_BASE_ADDR_HI(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] =
@@ -225,8 +205,12 @@ static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_CUR_TX_BUF_ADDR_HI(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_CUR_RX_BUF_ADDR_HI(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] =
@@ -260,13 +244,15 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
+ mtl_rx_op |= MTL_OP_MODE_DIS_TCP_EF;
+
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
mtl_rx_op |= MTL_OP_MODE_RSF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
mtl_rx_op &= ~MTL_OP_MODE_RSF;
- mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ mtl_rx_op &= ~MTL_OP_MODE_RTC_MASK;
if (mode <= 32)
mtl_rx_op |= MTL_OP_MODE_RTC_32;
else if (mode <= 64)
@@ -335,7 +321,7 @@ static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv,
} else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
- mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+ mtl_tx_op &= ~MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
if (mode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
@@ -526,6 +512,11 @@ static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
writel(value, ioaddr + GMAC_EXT_CONFIG);
+ value = readl(ioaddr + GMAC_EXT_CFG1);
+ value |= GMAC_CONFIG1_SPLM(1); /* Split mode set to L2OFST */
+ value |= GMAC_CONFIG1_SAVE_EN; /* Enable Split AV mode */
+ writel(value, ioaddr + GMAC_EXT_CFG1);
+
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (en)
value |= DMA_CONTROL_SPH;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 17d9120db5fe..f27126f05551 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -69,15 +69,8 @@
#define DMA_SYS_BUS_MB BIT(14)
#define DMA_AXI_1KBBE BIT(13)
-#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_SYS_BUS_AAL DMA_AXI_AAL
#define DMA_SYS_BUS_EAME BIT(11)
-#define DMA_AXI_BLEN256 BIT(7)
-#define DMA_AXI_BLEN128 BIT(6)
-#define DMA_AXI_BLEN64 BIT(5)
-#define DMA_AXI_BLEN32 BIT(4)
-#define DMA_AXI_BLEN16 BIT(3)
-#define DMA_AXI_BLEN8 BIT(2)
-#define DMA_AXI_BLEN4 BIT(1)
#define DMA_SYS_BUS_FB BIT(0)
#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
@@ -85,8 +78,6 @@
DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
DMA_AXI_BLEN4)
-#define DMA_AXI_BURST_LEN_MASK 0x000000FE
-
/* DMA TBS Control */
#define DMA_TBS_FTOS GENMASK(31, 8)
#define DMA_TBS_FTOV BIT(0)
@@ -127,7 +118,9 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define DMA_CHAN_SLOT_CTRL_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x3c)
#define DMA_CHAN_CUR_TX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x44)
#define DMA_CHAN_CUR_RX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4c)
+#define DMA_CHAN_CUR_TX_BUF_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x50)
#define DMA_CHAN_CUR_TX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x54)
+#define DMA_CHAN_CUR_RX_BUF_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x58)
#define DMA_CHAN_CUR_RX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x5c)
#define DMA_CHAN_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x60)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 0d185e54eb7e..57c03d491774 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -185,8 +185,6 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
x->rx_buf_unav_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
x->rx_process_stopped_irq++;
- if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
- x->rx_watchdog_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
x->tx_early_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
@@ -198,6 +196,10 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
ret = tx_hard_error;
}
}
+
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
+ x->rx_watchdog_irq++;
+
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e02cebc3f1b7..1c431b918719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -572,69 +572,3 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
-
-void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq, u32 num_rxq,
- bool enable)
-{
- u32 value;
-
- if (enable) {
- cfg->fpe_csr = EFPE;
- value = readl(ioaddr + GMAC_RXQ_CTRL1);
- value &= ~GMAC_RXQCTRL_FPRQ;
- value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
- writel(value, ioaddr + GMAC_RXQ_CTRL1);
- } else {
- cfg->fpe_csr = 0;
- }
- writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
-}
-
-int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
-{
- u32 value;
- int status;
-
- status = FPE_EVENT_UNKNOWN;
-
- /* Reads from the MAC_FPE_CTRL_STS register should only be performed
- * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
- */
- value = readl(ioaddr + MAC_FPE_CTRL_STS);
-
- if (value & TRSP) {
- status |= FPE_EVENT_TRSP;
- netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
- }
-
- if (value & TVER) {
- status |= FPE_EVENT_TVER;
- netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
- }
-
- if (value & RRSP) {
- status |= FPE_EVENT_RRSP;
- netdev_info(dev, "FPE: Respond mPacket is received\n");
- }
-
- if (value & RVER) {
- status |= FPE_EVENT_RVER;
- netdev_info(dev, "FPE: Verify mPacket is received\n");
- }
-
- return status;
-}
-
-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- enum stmmac_mpacket_type type)
-{
- u32 value = cfg->fpe_csr;
-
- if (type == MPACKET_VERIFY)
- value |= SVER;
- else if (type == MPACKET_RESPONSE)
- value |= SRSP;
-
- writel(value, ioaddr + MAC_FPE_CTRL_STS);
-}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index bf33a51d229e..00b151b3b688 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -11,15 +11,6 @@
#define PRTYEN BIT(1)
#define TMOUTEN BIT(0)
-#define MAC_FPE_CTRL_STS 0x00000234
-#define TRSP BIT(19)
-#define TVER BIT(18)
-#define RRSP BIT(17)
-#define RVER BIT(16)
-#define SRSP BIT(2)
-#define SVER BIT(1)
-#define EFPE BIT(0)
-
#define MAC_PPS_CONTROL 0x00000b70
#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
#define PPS_MINIDX(x) ((x) * 8)
@@ -102,12 +93,5 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
-void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq, u32 num_rxq,
- bool enable);
-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
- struct stmmac_fpe_cfg *cfg,
- enum stmmac_mpacket_type type);
-int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 72672391675f..054ecb20ce3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,31 @@
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+/* Following DMA defines are channels oriented */
+#define DMA_CHAN_BASE_OFFSET 0x100
+
+static inline u32 dma_chan_base_addr(u32 base, u32 chan)
+{
+ return base + chan * DMA_CHAN_BASE_OFFSET;
+}
+
+#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan)
+#define DMA_CHAN_XMT_POLL_DEMAND(chan) \
+ dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan)
+#define DMA_CHAN_RCV_POLL_DEMAND(chan) \
+ dma_chan_base_addr(DMA_RCV_POLL_DEMAND, chan)
+#define DMA_CHAN_RCV_BASE_ADDR(chan) \
+ dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan)
+#define DMA_CHAN_TX_BASE_ADDR(chan) \
+ dma_chan_base_addr(DMA_TX_BASE_ADDR, chan)
+#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan)
+#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan)
+#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan)
+#define DMA_CHAN_MISSED_FRAME_CTR(chan) \
+ dma_chan_base_addr(DMA_MISSED_FRAME_CTR, chan)
+#define DMA_CHAN_RX_WATCHDOG(chan) \
+ dma_chan_base_addr(DMA_RX_WATCHDOG, chan)
+
/* SW Reset */
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
@@ -43,23 +68,14 @@
#define DMA_AXI_OSR_MAX 0xf
#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
(DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
-#define DMA_AXI_1KBBE BIT(13)
-#define DMA_AXI_AAL BIT(12)
-#define DMA_AXI_BLEN256 BIT(7)
-#define DMA_AXI_BLEN128 BIT(6)
-#define DMA_AXI_BLEN64 BIT(5)
-#define DMA_AXI_BLEN32 BIT(4)
-#define DMA_AXI_BLEN16 BIT(3)
-#define DMA_AXI_BLEN8 BIT(2)
-#define DMA_AXI_BLEN4 BIT(1)
#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
DMA_AXI_BLEN4)
-#define DMA_AXI_UNDEF BIT(0)
+#define DMA_AXI_1KBBE BIT(13)
-#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+#define DMA_AXI_UNDEF BIT(0)
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
@@ -152,7 +168,8 @@
#define NUM_DWMAC1000_DMA_REGS 23
#define NUM_DWMAC4_DMA_REGS 27
-void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
+void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan);
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 85e18f9a22f9..97a803d68e3a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -28,65 +28,70 @@ int dwmac_dma_reset(void __iomem *ioaddr)
}
/* CSR1 enables the transmit DMA to check for new descriptor */
-void dwmac_enable_dma_transmission(void __iomem *ioaddr)
+void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
- writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+ writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
+}
+
+void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan)
+{
+ writel(1, ioaddr + DMA_CHAN_RCV_POLL_DEMAND(chan));
}
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value |= DMA_INTR_DEFAULT_RX;
if (tx)
value |= DMA_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_INTR_ENA);
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value &= ~DMA_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_INTR_ENA);
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
#ifdef DWMAC_DMA_DEBUG
@@ -165,7 +170,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
#ifdef DWMAC_DMA_DEBUG
/* Enable it to monitor DMA rx/tx status in case of critical problems */
@@ -251,7 +256,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
- unsigned long data;
+ u32 data;
data = (addr[5] << 8) | addr[4];
/* For MAC Addr registers we have to set the Address Enable (AE)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 6a2c7d22df1e..fecda3034d36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -57,19 +57,6 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
-#define XGMAC_VLAN_TAG 0x00000050
-#define XGMAC_VLAN_EDVLP BIT(26)
-#define XGMAC_VLAN_VTHM BIT(25)
-#define XGMAC_VLAN_DOVLTC BIT(20)
-#define XGMAC_VLAN_ESVL BIT(18)
-#define XGMAC_VLAN_ETV BIT(16)
-#define XGMAC_VLAN_VID GENMASK(15, 0)
-#define XGMAC_VLAN_HASH_TABLE 0x00000058
-#define XGMAC_VLAN_INCL 0x00000060
-#define XGMAC_VLAN_VLTI BIT(20)
-#define XGMAC_VLAN_CSVL BIT(19)
-#define XGMAC_VLAN_VLC GENMASK(17, 16)
-#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -84,8 +71,7 @@
#define XGMAC_MCBCQEN BIT(15)
#define XGMAC_MCBCQ GENMASK(11, 8)
#define XGMAC_MCBCQ_SHIFT 8
-#define XGMAC_RQ GENMASK(7, 4)
-#define XGMAC_RQ_SHIFT 4
+#define XGMAC_FPRQ GENMASK(7, 4)
#define XGMAC_UPQ GENMASK(3, 0)
#define XGMAC_UPQ_SHIFT 0
#define XGMAC_RXQ_CTRL2 0x000000a8
@@ -93,9 +79,11 @@
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_INT_TSIS BIT(12)
#define XGMAC_LPIIS BIT(5)
#define XGMAC_PMTIS BIT(4)
#define XGMAC_INT_EN 0x000000b4
+#define XGMAC_FPEIE BIT(15)
#define XGMAC_TSIE BIT(12)
#define XGMAC_LPIIE BIT(5)
#define XGMAC_PMTIE BIT(4)
@@ -112,14 +100,7 @@
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
#define XGMAC_LPI_CTRL 0x000000d0
-#define XGMAC_TXCGE BIT(21)
-#define XGMAC_LPITXA BIT(19)
-#define XGMAC_PLS BIT(17)
-#define XGMAC_LPITXEN BIT(16)
-#define XGMAC_RLPIEX BIT(3)
-#define XGMAC_RLPIEN BIT(2)
-#define XGMAC_TLPIEX BIT(1)
-#define XGMAC_TLPIEN BIT(0)
+/* For definitions, see LPI_CTRL_STATUS_xxx in common.h */
#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_EDMA BIT(31)
@@ -193,8 +174,8 @@
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
-#define XGMAC_FPE_CTRL_STS 0x00000280
-#define XGMAC_EFPE BIT(0)
+#define XGMAC_GPIO_STATUS 0x0000027c
+#define XGMAC_GPIO_GPO0 BIT(16)
#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
@@ -242,6 +223,8 @@
#define XGMAC_OB BIT(0)
#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
+#define XGMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25)
+#define XGMAC_TIMESTAMP_ATSNS_SHIFT 25
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
#define XGMAC_TXTSSTSLO GENMASK(30, 0)
@@ -355,16 +338,9 @@
#define XGMAC_RD_OSR_LMT_SHIFT 16
#define XGMAC_EN_LPI BIT(15)
#define XGMAC_LPI_XIT_PKT BIT(14)
-#define XGMAC_AAL BIT(12)
+#define XGMAC_AAL DMA_AXI_AAL
#define XGMAC_EAME BIT(11)
-#define XGMAC_BLEN GENMASK(7, 1)
-#define XGMAC_BLEN256 BIT(7)
-#define XGMAC_BLEN128 BIT(6)
-#define XGMAC_BLEN64 BIT(5)
-#define XGMAC_BLEN32 BIT(4)
-#define XGMAC_BLEN16 BIT(3)
-#define XGMAC_BLEN8 BIT(2)
-#define XGMAC_BLEN4 BIT(1)
+/* XGMAC_BLEN* are now defined as DMA_AXI_BLEN* in common.h */
#define XGMAC_UNDEF BIT(0)
#define XGMAC_TX_EDMA_CTRL 0x00003040
#define XGMAC_TDPS GENMASK(29, 0)
@@ -486,6 +462,7 @@
#define XGMAC_RDES3_RSV BIT(26)
#define XGMAC_RDES3_L34T GENMASK(23, 20)
#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_RDES3_ET_LT GENMASK(19, 16)
#define XGMAC_L34T_IP4TCP 0x1
#define XGMAC_L34T_IP4UDP 0x2
#define XGMAC_L34T_IP6TCP 0x9
@@ -495,4 +472,20 @@
#define XGMAC_RDES3_TSD BIT(6)
#define XGMAC_RDES3_TSA BIT(4)
+/* RDES0 (write back format) */
+#define XGMAC_RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* Error Type or L2 Type(ET/LT) Field Number */
+#define XGMAC_ET_LT_VLAN_STAG 8
+#define XGMAC_ET_LT_VLAN_CTAG 9
+#define XGMAC_ET_LT_DVLAN_CTAG_CTAG 10
+#define XGMAC_ET_LT_DVLAN_STAG_STAG 11
+#define XGMAC_ET_LT_DVLAN_CTAG_STAG 12
+#define XGMAC_ET_LT_DVLAN_STAG_CTAG 13
+
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+
#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index f8e7775bb633..b40b3ea50e25 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -8,7 +8,9 @@
#include <linux/crc32.h>
#include <linux/iopoll.h>
#include "stmmac.h"
+#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
+#include "stmmac_vlan.h"
#include "dwxlgmac2.h"
#include "dwxgmac2.h"
@@ -21,30 +23,31 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
tx = readl(ioaddr + XGMAC_TX_CONFIG);
rx = readl(ioaddr + XGMAC_RX_CONFIG);
- tx |= XGMAC_CORE_INIT_TX;
- rx |= XGMAC_CORE_INIT_RX;
+ writel(tx | XGMAC_CORE_INIT_TX, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx | XGMAC_CORE_INIT_RX, ioaddr + XGMAC_RX_CONFIG);
+ writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+}
- if (hw->ps) {
- tx |= XGMAC_CONFIG_TE;
- tx &= ~hw->link.speed_mask;
+static void dwxgmac2_irq_modify(struct mac_device_info *hw, u32 disable,
+ u32 enable)
+{
+ void __iomem *int_mask = hw->pcsr + XGMAC_INT_EN;
+ unsigned long flags;
+ u32 value;
- switch (hw->ps) {
- case SPEED_10000:
- tx |= hw->link.xgmii.speed10000;
- break;
- case SPEED_2500:
- tx |= hw->link.speed2500;
- break;
- case SPEED_1000:
- default:
- tx |= hw->link.speed1000;
- break;
- }
- }
+ spin_lock_irqsave(&hw->irq_ctrl_lock, flags);
+ value = readl(int_mask) & ~disable;
+ value |= enable;
+ writel(value, int_mask);
+ spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags);
+}
- writel(tx, ioaddr + XGMAC_TX_CONFIG);
- writel(rx, ioaddr + XGMAC_RX_CONFIG);
- writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+static void dwxgmac2_update_caps(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.mbps_10_100)
+ priv->hw->link.caps &= ~(MAC_10 | MAC_100);
+ else if (!priv->dma_cap.half_duplex)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
}
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
@@ -315,17 +318,17 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
if (stat & XGMAC_LPIIS) {
u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
- if (lpi & XGMAC_TLPIEN) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (lpi & XGMAC_TLPIEX) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (lpi & XGMAC_RLPIEN)
+ if (lpi & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (lpi & XGMAC_RLPIEX)
+ if (lpi & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -424,29 +427,28 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
-static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwxgmac2_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- value = readl(ioaddr + XGMAC_LPI_CTRL);
-
- value |= XGMAC_LPITXEN | XGMAC_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= XGMAC_TXCGE;
-
- writel(value, ioaddr + XGMAC_LPI_CTRL);
-}
-
-static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
value = readl(ioaddr + XGMAC_LPI_CTRL);
- value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ if (mode == STMMAC_LPI_FORCED) {
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ } else {
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA |
+ LPI_CTRL_STATUS_LPITCSE);
+ }
writel(value, ioaddr + XGMAC_LPI_CTRL);
+
+ return 0;
}
static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
@@ -456,9 +458,9 @@ static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + XGMAC_LPI_CTRL);
if (link)
- value |= XGMAC_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~XGMAC_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + XGMAC_LPI_CTRL);
}
@@ -614,76 +616,6 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
return 0;
}
-static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
-
- writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
-
- if (hash) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~XGMAC_VLAN_VTHM;
- value |= XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
- } else {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value &= ~XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
- value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
- value &= ~XGMAC_VLAN_DOVLTC;
- value &= ~XGMAC_VLAN_VID;
-
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- }
-}
-
struct dwxgmac3_error_desc {
bool valid;
const char *desc;
@@ -846,42 +778,41 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
-#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
-#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
-
+static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error";
+static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error";
static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
- { true, "TDPES0", DPP_TX_ERR },
- { true, "TDPES1", DPP_TX_ERR },
- { true, "TDPES2", DPP_TX_ERR },
- { true, "TDPES3", DPP_TX_ERR },
- { true, "TDPES4", DPP_TX_ERR },
- { true, "TDPES5", DPP_TX_ERR },
- { true, "TDPES6", DPP_TX_ERR },
- { true, "TDPES7", DPP_TX_ERR },
- { true, "TDPES8", DPP_TX_ERR },
- { true, "TDPES9", DPP_TX_ERR },
- { true, "TDPES10", DPP_TX_ERR },
- { true, "TDPES11", DPP_TX_ERR },
- { true, "TDPES12", DPP_TX_ERR },
- { true, "TDPES13", DPP_TX_ERR },
- { true, "TDPES14", DPP_TX_ERR },
- { true, "TDPES15", DPP_TX_ERR },
- { true, "RDPES0", DPP_RX_ERR },
- { true, "RDPES1", DPP_RX_ERR },
- { true, "RDPES2", DPP_RX_ERR },
- { true, "RDPES3", DPP_RX_ERR },
- { true, "RDPES4", DPP_RX_ERR },
- { true, "RDPES5", DPP_RX_ERR },
- { true, "RDPES6", DPP_RX_ERR },
- { true, "RDPES7", DPP_RX_ERR },
- { true, "RDPES8", DPP_RX_ERR },
- { true, "RDPES9", DPP_RX_ERR },
- { true, "RDPES10", DPP_RX_ERR },
- { true, "RDPES11", DPP_RX_ERR },
- { true, "RDPES12", DPP_RX_ERR },
- { true, "RDPES13", DPP_RX_ERR },
- { true, "RDPES14", DPP_RX_ERR },
- { true, "RDPES15", DPP_RX_ERR },
+ { true, "TDPES0", dpp_tx_err },
+ { true, "TDPES1", dpp_tx_err },
+ { true, "TDPES2", dpp_tx_err },
+ { true, "TDPES3", dpp_tx_err },
+ { true, "TDPES4", dpp_tx_err },
+ { true, "TDPES5", dpp_tx_err },
+ { true, "TDPES6", dpp_tx_err },
+ { true, "TDPES7", dpp_tx_err },
+ { true, "TDPES8", dpp_tx_err },
+ { true, "TDPES9", dpp_tx_err },
+ { true, "TDPES10", dpp_tx_err },
+ { true, "TDPES11", dpp_tx_err },
+ { true, "TDPES12", dpp_tx_err },
+ { true, "TDPES13", dpp_tx_err },
+ { true, "TDPES14", dpp_tx_err },
+ { true, "TDPES15", dpp_tx_err },
+ { true, "RDPES0", dpp_rx_err },
+ { true, "RDPES1", dpp_rx_err },
+ { true, "RDPES2", dpp_rx_err },
+ { true, "RDPES3", dpp_rx_err },
+ { true, "RDPES4", dpp_rx_err },
+ { true, "RDPES5", dpp_rx_err },
+ { true, "RDPES6", dpp_rx_err },
+ { true, "RDPES7", dpp_rx_err },
+ { true, "RDPES8", dpp_rx_err },
+ { true, "RDPES9", dpp_rx_err },
+ { true, "RDPES10", dpp_rx_err },
+ { true, "RDPES11", dpp_rx_err },
+ { true, "RDPES12", dpp_rx_err },
+ { true, "RDPES13", dpp_rx_err },
+ { true, "RDPES14", dpp_rx_err },
+ { true, "RDPES15", dpp_rx_err },
};
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
@@ -1301,19 +1232,6 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
-static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + XGMAC_VLAN_INCL);
- value |= XGMAC_VLAN_VLTI;
- value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~XGMAC_VLAN_VLC;
- value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
- writel(value, ioaddr + XGMAC_VLAN_INCL);
-}
-
static int dwxgmac2_filter_wait(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -1505,33 +1423,10 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq,
- u32 num_rxq, bool enable)
-{
- u32 value;
-
- if (!enable) {
- value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
-
- value &= ~XGMAC_EFPE;
-
- writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
- return;
- }
-
- value = readl(ioaddr + XGMAC_RXQ_CTRL1);
- value &= ~XGMAC_RQ;
- value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
- writel(value, ioaddr + XGMAC_RXQ_CTRL1);
-
- value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
- value |= XGMAC_EFPE;
- writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
-}
-
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
+ .irq_modify = dwxgmac2_irq_modify,
+ .update_caps = dwxgmac2_update_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1550,13 +1445,9 @@ const struct stmmac_ops dwxgmac210_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
- .pcs_ctrl_ane = NULL,
- .pcs_rane = NULL,
- .pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
.safety_feat_config = dwxgmac3_safety_feat_config,
@@ -1564,16 +1455,14 @@ const struct stmmac_ops dwxgmac210_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .fpe_configure = dwxgmac3_fpe_configure,
+ .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
};
static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
@@ -1592,6 +1481,7 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
+ .irq_modify = dwxgmac2_irq_modify,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
@@ -1610,13 +1500,9 @@ const struct stmmac_ops dwxlgmac2_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
- .pcs_ctrl_ane = NULL,
- .pcs_rane = NULL,
- .pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
.safety_feat_config = dwxgmac3_safety_feat_config,
@@ -1624,16 +1510,14 @@ const struct stmmac_ops dwxlgmac2_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .fpe_configure = dwxgmac3_fpe_configure,
+ .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
@@ -1652,8 +1536,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_1000FD | MAC_2500FD | MAC_5000FD |
- MAC_10000FD;
+ MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@@ -1672,6 +1556,7 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(15, 0);
mac->mii.clk_csr_shift = 19;
mac->mii.clk_csr_mask = GENMASK(21, 19);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index fc82862a612c..a2980482fcce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -4,6 +4,7 @@
* stmmac XGMAC support.
*/
+#include <linux/bitfield.h>
#include <linux/stmmac.h>
#include "common.h"
#include "dwxgmac2.h"
@@ -56,10 +57,12 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p)
static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
+ u32 flags = XGMAC_RDES3_OWN;
if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
+ flags |= XGMAC_RDES3_IOC;
+
+ p->des3 |= cpu_to_le32(flags);
}
static int dwxgmac2_get_tx_ls(struct dma_desc *p)
@@ -67,6 +70,21 @@ static int dwxgmac2_get_tx_ls(struct dma_desc *p)
return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
}
+static u16 dwxgmac2_wrback_get_rx_vlan_tci(struct dma_desc *p)
+{
+ return le32_to_cpu(p->des0) & XGMAC_RDES0_VLAN_TAG_MASK;
+}
+
+static bool dwxgmac2_wrback_get_rx_vlan_valid(struct dma_desc *p)
+{
+ u32 et_lt;
+
+ et_lt = FIELD_GET(XGMAC_RDES3_ET_LT, le32_to_cpu(p->des3));
+
+ return et_lt >= XGMAC_ET_LT_VLAN_STAG &&
+ et_lt <= XGMAC_ET_LT_DVLAN_STAG_CTAG;
+}
+
static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
@@ -349,6 +367,8 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_tx_owner = dwxgmac2_set_tx_owner,
.set_rx_owner = dwxgmac2_set_rx_owner,
.get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_vlan_tci = dwxgmac2_wrback_get_rx_vlan_tci,
+ .get_rx_vlan_valid = dwxgmac2_wrback_get_rx_vlan_valid,
.get_rx_frame_len = dwxgmac2_get_rx_frame_len,
.enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
.get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index dd2ab6185c40..cc1bdc0975d5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -20,7 +20,7 @@ static int dwxgmac2_dma_reset(void __iomem *ioaddr)
}
static void dwxgmac2_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
@@ -84,7 +84,6 @@ static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
- int i;
if (axi->axi_lpi_en)
value |= XGMAC_EN_LPI;
@@ -102,32 +101,12 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (!axi->axi_fb)
value |= XGMAC_UNDEF;
- value &= ~XGMAC_BLEN;
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= XGMAC_BLEN256;
- break;
- case 128:
- value |= XGMAC_BLEN128;
- break;
- case 64:
- value |= XGMAC_BLEN64;
- break;
- case 32:
- value |= XGMAC_BLEN32;
- break;
- case 16:
- value |= XGMAC_BLEN16;
- break;
- case 8:
- value |= XGMAC_BLEN8;
- break;
- case 4:
- value |= XGMAC_BLEN4;
- break;
- }
- }
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
+ */
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
@@ -203,10 +182,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
}
writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
-
- /* Enable MTL RX overflow */
- value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
- writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
}
static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -364,19 +339,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
}
/* TX/RX NORMAL interrupts */
- if (likely(intr_status & XGMAC_NIS)) {
- if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_normal_irq_n[chan]);
- u64_stats_update_end(&stats->syncp);
- ret |= handle_rx;
- }
- if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_normal_irq_n[chan]);
- u64_stats_update_end(&stats->syncp);
- ret |= handle_tx;
- }
+ if (likely(intr_status & XGMAC_RI)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
}
/* Clear interrupts */
@@ -388,8 +361,11 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
struct dma_features *dma_cap)
{
+ struct stmmac_priv *priv;
u32 hw_cap;
+ priv = container_of(dma_cap, struct stmmac_priv, dma_cap);
+
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
@@ -412,6 +388,8 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+ if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20)
+ dma_cap->mbps_10_100 = 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 29367105df54..014f7cd79a3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -6,34 +6,49 @@
#include "common.h"
#include "stmmac.h"
+#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
+#include "stmmac_vlan.h"
+#include "dwmac4_descs.h"
+#include "dwxgmac2.h"
-static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
+struct stmmac_version {
+ u8 snpsver;
+ u8 dev_id;
+};
+
+static void stmmac_get_version(struct stmmac_priv *priv,
+ struct stmmac_version *ver)
{
- u32 reg = readl(priv->ioaddr + id_reg);
+ enum dwmac_core_type core_type = priv->plat->core_type;
+ unsigned int version_offset;
+ u32 version;
- if (!reg) {
- dev_info(priv->device, "Version ID not available\n");
- return 0x0;
- }
+ ver->snpsver = 0;
+ ver->dev_id = 0;
- dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n",
- (unsigned int)(reg & GENMASK(15, 8)) >> 8,
- (unsigned int)(reg & GENMASK(7, 0)));
- return reg & GENMASK(7, 0);
-}
+ if (core_type == DWMAC_CORE_MAC100)
+ return;
-static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg)
-{
- u32 reg = readl(priv->ioaddr + id_reg);
+ if (core_type == DWMAC_CORE_GMAC)
+ version_offset = GMAC_VERSION;
+ else
+ version_offset = GMAC4_VERSION;
- if (!reg) {
+ version = readl(priv->ioaddr + version_offset);
+ if (version == 0) {
dev_info(priv->device, "Version ID not available\n");
- return 0x0;
+ return;
}
- return (reg & GENMASK(15, 8)) >> 8;
+ dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n",
+ FIELD_GET(DWMAC_USERVER, version),
+ FIELD_GET(DWMAC_SNPSVER, version));
+
+ ver->snpsver = FIELD_GET(DWMAC_SNPSVER, version);
+ if (core_type == DWMAC_CORE_XGMAC)
+ ver->dev_id = FIELD_GET(DWMAC_USERVER, version);
}
static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv)
@@ -88,23 +103,19 @@ static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv)
return 0;
}
-int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
+int stmmac_reset(struct stmmac_priv *priv)
{
- struct plat_stmmacenet_data *plat = priv ? priv->plat : NULL;
-
- if (!priv)
- return -EINVAL;
+ struct plat_stmmacenet_data *plat = priv->plat;
+ void __iomem *ioaddr = priv->ioaddr;
if (plat && plat->fix_soc_reset)
- return plat->fix_soc_reset(plat, ioaddr);
+ return plat->fix_soc_reset(priv, ioaddr);
return stmmac_do_callback(priv, dma, reset, ioaddr);
}
static const struct stmmac_hwif_entry {
- bool gmac;
- bool gmac4;
- bool xgmac;
+ enum dwmac_core_type core_type;
u32 min_id;
u32 dev_id;
const struct stmmac_regs_off regs;
@@ -112,18 +123,18 @@ static const struct stmmac_hwif_entry {
const void *dma;
const void *mac;
const void *hwtimestamp;
+ const void *ptp;
const void *mode;
const void *tc;
const void *mmc;
const void *est;
+ const void *vlan;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
/* NOTE: New HW versions shall go to the end of this table */
{
- .gmac = false,
- .gmac4 = false,
- .xgmac = false,
+ .core_type = DWMAC_CORE_MAC100,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -132,16 +143,15 @@ static const struct stmmac_hwif_entry {
.desc = NULL,
.dma = &dwmac100_dma_ops,
.mac = &dwmac100_ops,
- .hwtimestamp = &stmmac_ptp,
+ .hwtimestamp = &dwmac1000_ptp,
+ .ptp = &dwmac1000_ptp_clock_ops,
.mode = NULL,
.tc = NULL,
.mmc = &dwmac_mmc_ops,
.setup = dwmac100_setup,
.quirks = stmmac_dwmac1_quirks,
}, {
- .gmac = true,
- .gmac4 = false,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -150,16 +160,15 @@ static const struct stmmac_hwif_entry {
.desc = NULL,
.dma = &dwmac1000_dma_ops,
.mac = &dwmac1000_ops,
- .hwtimestamp = &stmmac_ptp,
+ .hwtimestamp = &dwmac1000_ptp,
+ .ptp = &dwmac1000_ptp_clock_ops,
.mode = NULL,
.tc = NULL,
.mmc = &dwmac_mmc_ops,
.setup = dwmac1000_setup,
.quirks = stmmac_dwmac1_quirks,
}, {
- .gmac = false,
- .gmac4 = true,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC4,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -169,27 +178,30 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac4_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwmac4_tc_ops,
.mmc = &dwmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = stmmac_dwmac4_quirks,
}, {
- .gmac = false,
- .gmac4 = true,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC4,
.min_id = DWMAC_CORE_4_00,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
.est_off = EST_GMAC4_OFFSET,
+ .fpe_reg = &dwmac5_fpe_reg,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -197,19 +209,20 @@ static const struct stmmac_hwif_entry {
.setup = dwmac4_setup,
.quirks = NULL,
}, {
- .gmac = false,
- .gmac4 = true,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC4,
.min_id = DWMAC_CORE_4_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
.est_off = EST_GMAC4_OFFSET,
+ .fpe_reg = &dwmac5_fpe_reg,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -217,19 +230,20 @@ static const struct stmmac_hwif_entry {
.setup = dwmac4_setup,
.quirks = NULL,
}, {
- .gmac = false,
- .gmac4 = true,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC4,
.min_id = DWMAC_CORE_5_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
.est_off = EST_GMAC4_OFFSET,
+ .fpe_reg = &dwmac5_fpe_reg,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac510_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -237,20 +251,21 @@ static const struct stmmac_hwif_entry {
.setup = dwmac4_setup,
.quirks = NULL,
}, {
- .gmac = false,
- .gmac4 = false,
- .xgmac = true,
+ .core_type = DWMAC_CORE_XGMAC,
.min_id = DWXGMAC_CORE_2_10,
.dev_id = DWXGMAC_ID,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
.est_off = EST_XGMAC_OFFSET,
+ .fpe_reg = &dwxgmac3_fpe_reg,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxgmac210_ops,
+ .vlan = &dwxgmac210_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
@@ -258,20 +273,21 @@ static const struct stmmac_hwif_entry {
.setup = dwxgmac2_setup,
.quirks = NULL,
}, {
- .gmac = false,
- .gmac4 = false,
- .xgmac = true,
+ .core_type = DWMAC_CORE_XGMAC,
.min_id = DWXLGMAC_CORE_2_00,
.dev_id = DWXLGMAC_ID,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
.est_off = EST_XGMAC_OFFSET,
+ .fpe_reg = &dwxgmac3_fpe_reg,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxlgmac2_ops,
+ .vlan = &dwxlgmac2_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
@@ -281,96 +297,114 @@ static const struct stmmac_hwif_entry {
},
};
+static const struct stmmac_hwif_entry *
+stmmac_hwif_find(enum dwmac_core_type core_type, u8 snpsver, u8 dev_id)
+{
+ const struct stmmac_hwif_entry *entry;
+ int i;
+
+ for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) {
+ entry = &stmmac_hw[i];
+
+ if (core_type != entry->core_type)
+ continue;
+ /* Use synopsys_id var because some setups can override this */
+ if (snpsver < entry->min_id)
+ continue;
+ if (core_type == DWMAC_CORE_XGMAC &&
+ dev_id != entry->dev_id)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
int stmmac_hwif_init(struct stmmac_priv *priv)
{
- bool needs_xgmac = priv->plat->has_xgmac;
- bool needs_gmac4 = priv->plat->has_gmac4;
- bool needs_gmac = priv->plat->has_gmac;
+ enum dwmac_core_type core_type = priv->plat->core_type;
const struct stmmac_hwif_entry *entry;
+ struct stmmac_version version;
struct mac_device_info *mac;
bool needs_setup = true;
- u32 id, dev_id = 0;
- int i, ret;
-
- if (needs_gmac) {
- id = stmmac_get_id(priv, GMAC_VERSION);
- } else if (needs_gmac4 || needs_xgmac) {
- id = stmmac_get_id(priv, GMAC4_VERSION);
- if (needs_xgmac)
- dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION);
- } else {
- id = 0;
- }
+ int ret;
+
+ stmmac_get_version(priv, &version);
/* Save ID for later use */
- priv->synopsys_id = id;
+ priv->synopsys_id = version.snpsver;
/* Lets assume some safe values first */
- priv->ptpaddr = priv->ioaddr +
- (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
- priv->mmcaddr = priv->ioaddr +
- (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
- if (needs_gmac4)
+ if (core_type == DWMAC_CORE_GMAC4) {
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
priv->estaddr = priv->ioaddr + EST_GMAC4_OFFSET;
- else if (needs_xgmac)
- priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET;
-
- /* Check for HW specific setup first */
- if (priv->plat->setup) {
- mac = priv->plat->setup(priv);
- needs_setup = false;
} else {
- mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
+ if (core_type == DWMAC_CORE_XGMAC)
+ priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET;
}
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return -ENOMEM;
+ /* Check for HW specific setup first */
+ if (priv->plat->mac_setup) {
+ ret = priv->plat->mac_setup(priv, mac);
+ if (ret)
+ return ret;
+
+ needs_setup = false;
+ }
+
+ spin_lock_init(&mac->irq_ctrl_lock);
+
/* Fallback to generic HW */
- for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) {
- entry = &stmmac_hw[i];
- if (needs_gmac ^ entry->gmac)
- continue;
- if (needs_gmac4 ^ entry->gmac4)
- continue;
- if (needs_xgmac ^ entry->xgmac)
- continue;
- /* Use synopsys_id var because some setups can override this */
- if (priv->synopsys_id < entry->min_id)
- continue;
- if (needs_xgmac && (dev_id ^ entry->dev_id))
- continue;
+ /* Use synopsys_id var because some setups can override this */
+ entry = stmmac_hwif_find(core_type, priv->synopsys_id, version.dev_id);
+ if (!entry) {
+ dev_err(priv->device,
+ "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n",
+ version.snpsver, core_type == DWMAC_CORE_GMAC,
+ core_type == DWMAC_CORE_GMAC4);
- /* Only use generic HW helpers if needed */
- mac->desc = mac->desc ? : entry->desc;
- mac->dma = mac->dma ? : entry->dma;
- mac->mac = mac->mac ? : entry->mac;
- mac->ptp = mac->ptp ? : entry->hwtimestamp;
- mac->mode = mac->mode ? : entry->mode;
- mac->tc = mac->tc ? : entry->tc;
- mac->mmc = mac->mmc ? : entry->mmc;
- mac->est = mac->est ? : entry->est;
-
- priv->hw = mac;
- priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
- priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
- if (entry->est)
- priv->estaddr = priv->ioaddr + entry->regs.est_off;
-
- /* Entry found */
- if (needs_setup) {
- ret = entry->setup(priv);
- if (ret)
- return ret;
- }
+ return -EINVAL;
+ }
- /* Save quirks, if needed for posterior use */
- priv->hwif_quirks = entry->quirks;
- return 0;
+ /* Only use generic HW helpers if needed */
+ mac->desc = mac->desc ? : entry->desc;
+ mac->dma = mac->dma ? : entry->dma;
+ mac->mac = mac->mac ? : entry->mac;
+ mac->ptp = mac->ptp ? : entry->hwtimestamp;
+ mac->mode = mac->mode ? : entry->mode;
+ mac->tc = mac->tc ? : entry->tc;
+ mac->mmc = mac->mmc ? : entry->mmc;
+ mac->est = mac->est ? : entry->est;
+ mac->vlan = mac->vlan ? : entry->vlan;
+
+ priv->hw = mac;
+ priv->fpe_cfg.reg = entry->regs.fpe_reg;
+ priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
+ priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+ memcpy(&priv->ptp_clock_ops, entry->ptp,
+ sizeof(struct ptp_clock_info));
+
+ if (entry->est)
+ priv->estaddr = priv->ioaddr + entry->regs.est_off;
+
+ /* Entry found */
+ if (needs_setup) {
+ ret = entry->setup(priv);
+ if (ret)
+ return ret;
}
- dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n",
- id, needs_gmac, needs_gmac4);
- return -EINVAL;
+ /* Save quirks, if needed for posterior use */
+ priv->hwif_quirks = entry->quirks;
+
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 7be04b54738b..df6e8a567b1f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
+#include <net/pkt_cls.h>
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
@@ -28,6 +29,8 @@
struct stmmac_extra_stats;
struct stmmac_priv;
struct stmmac_safety_stats;
+struct stmmac_fpe_cfg;
+enum stmmac_mpacket_type;
struct dma_desc;
struct dma_extended_desc;
struct dma_edesc;
@@ -175,8 +178,7 @@ struct dma_features;
struct stmmac_dma_ops {
/* DMA core initialization */
int (*reset)(void __iomem *ioaddr);
- void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
- int atds);
+ void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg);
void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -198,7 +200,8 @@ struct stmmac_dma_ops {
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
- void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
+ void (*enable_dma_reception)(void __iomem *ioaddr, u32 chan);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -259,6 +262,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args)
#define stmmac_enable_dma_transmission(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args)
+#define stmmac_enable_dma_reception(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_dma_reception, __args)
#define stmmac_enable_dma_irq(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_irq, __priv, __args)
#define stmmac_disable_dma_irq(__priv, __args...) \
@@ -298,18 +303,27 @@ struct stmmac_dma_ops {
struct mac_device_info;
struct net_device;
-struct rgmii_adv;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
struct stmmac_est;
+enum stmmac_lpi_mode {
+ STMMAC_LPI_DISABLE,
+ STMMAC_LPI_FORCED,
+ STMMAC_LPI_TIMER,
+};
+
/* Helpers to program the MAC core */
struct stmmac_ops {
+ /* Initialise any PCS instances */
+ int (*pcs_init)(struct stmmac_priv *priv);
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
- /* Get phylink capabilities */
- void (*phylink_get_caps)(struct stmmac_priv *priv);
+ /* Update MAC capabilities */
+ void (*update_caps)(struct stmmac_priv *priv);
+ /* Change the interrupt enable setting. Enable takes precedence. */
+ void (*irq_modify)(struct mac_device_info *hw, u32 disable, u32 enable);
/* Enable the MAC RX/TX */
void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
@@ -358,20 +372,17 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
- void (*set_eee_mode)(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating);
- void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
+ int (*set_lpi_mode)(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 rx_queues,
u32 tx_queues);
/* PCS calls */
- void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback);
- void (*pcs_rane)(void __iomem *ioaddr, bool restart);
- void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
+ void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane,
+ bool srgmi_ral);
/* Safety Features */
int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_feature_cfg *safety_cfg);
@@ -392,21 +403,6 @@ struct stmmac_ops {
/* RSS */
int (*rss_configure)(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq);
- /* VLAN */
- void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double);
- void (*enable_vlan)(struct mac_device_info *hw, u32 type);
- void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
- struct sk_buff *skb);
- void (*set_hw_vlan_mode)(struct mac_device_info *hw);
- int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
/* Source Address Insertion / Replacement */
@@ -419,19 +415,19 @@ struct stmmac_ops {
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
- void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq, u32 num_rxq,
- bool enable);
- void (*fpe_send_mpacket)(void __iomem *ioaddr,
- struct stmmac_fpe_cfg *cfg,
- enum stmmac_mpacket_type type);
- int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+ int (*fpe_map_preemption_class)(struct net_device *ndev,
+ struct netlink_ext_ack *extack,
+ u32 pclass);
};
+#define stmmac_mac_pcs_init(__priv) \
+ stmmac_do_callback(__priv, mac, pcs_init, __priv)
#define stmmac_core_init(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, core_init, __args)
-#define stmmac_mac_phylink_get_caps(__priv) \
- stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv)
+#define stmmac_mac_update_caps(__priv) \
+ stmmac_do_void_callback(__priv, mac, update_caps, __priv)
+#define stmmac_mac_irq_modify(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, irq_modify, (__priv)->hw, __args)
#define stmmac_mac_set(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac, __args)
#define stmmac_rx_ipc(__priv, __args...) \
@@ -470,12 +466,8 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
#define stmmac_get_umac_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
-#define stmmac_set_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
-#define stmmac_reset_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
-#define stmmac_set_eee_lpi_timer(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
+#define stmmac_set_lpi_mode(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, set_lpi_mode, __args)
#define stmmac_set_eee_timer(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
#define stmmac_set_eee_pls(__priv, __args...) \
@@ -483,11 +475,7 @@ struct stmmac_ops {
#define stmmac_mac_debug(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, debug, __priv, __args)
#define stmmac_pcs_ctrl_ane(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
-#define stmmac_pcs_rane(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_rane, __priv, __args)
-#define stmmac_pcs_get_adv_lp(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
+ stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __priv, __args)
#define stmmac_safety_feat_config(__priv, __args...) \
stmmac_do_callback(__priv, mac, safety_feat_config, __args)
#define stmmac_safety_feat_irq_status(__priv, __args...) \
@@ -502,20 +490,6 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
#define stmmac_rss_configure(__priv, __args...) \
stmmac_do_callback(__priv, mac, rss_configure, __args)
-#define stmmac_update_vlan_hash(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
-#define stmmac_enable_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
-#define stmmac_rx_hw_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
-#define stmmac_set_hw_vlan_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
-#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
-#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
-#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
#define stmmac_sarc_configure(__priv, __args...) \
@@ -526,12 +500,8 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
-#define stmmac_fpe_configure(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
-#define stmmac_fpe_send_mpacket(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
-#define stmmac_fpe_irq_status(__priv, __args...) \
- stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
+#define stmmac_fpe_map_preemption_class(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_map_preemption_class, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -574,7 +544,7 @@ struct stmmac_rx_queue;
struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ bool (*is_jumbo_frm)(unsigned int len, bool enh_desc);
int (*jumbo_frm)(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
int csum);
int (*set_16kib_bfsize)(int mtu);
@@ -619,6 +589,8 @@ struct stmmac_tc_ops {
struct tc_etf_qopt_offload *qopt);
int (*query_caps)(struct stmmac_priv *priv,
struct tc_query_caps_base *base);
+ int (*setup_mqprio)(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -635,6 +607,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_etf, __args)
#define stmmac_tc_query_caps(__priv, __args...) \
stmmac_do_callback(__priv, tc, query_caps, __args)
+#define stmmac_tc_setup_mqprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_mqprio, __args)
struct stmmac_counters;
@@ -663,12 +637,55 @@ struct stmmac_est_ops {
#define stmmac_est_irq_status(__priv, __args...) \
stmmac_do_void_callback(__priv, est, irq_status, __args)
+struct stmmac_vlan_ops {
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
+ struct sk_buff *skb);
+ void (*set_hw_vlan_mode)(struct mac_device_info *hw);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw);
+};
+
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, enable_vlan, __args)
+#define stmmac_rx_hw_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, rx_hw_vlan, __args)
+#define stmmac_set_hw_vlan_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, set_hw_vlan_mode, __args)
+#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, add_hw_vlan_rx_fltr, __args)
+#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, del_hw_vlan_rx_fltr, __args)
+#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, restore_hw_vlan_rx_fltr, __args)
+
struct stmmac_regs_off {
+ const struct stmmac_fpe_reg *fpe_reg;
u32 ptp_off;
u32 mmc_off;
u32 est_off;
};
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_hwtimestamp dwmac1000_ptp;
+
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
+
extern const struct stmmac_ops dwmac100_ops;
extern const struct stmmac_dma_ops dwmac100_dma_ops;
extern const struct stmmac_ops dwmac1000_ops;
@@ -678,19 +695,13 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops;
extern const struct stmmac_ops dwmac410_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
+extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
-extern const struct stmmac_ops dwxgmac210_ops;
-extern const struct stmmac_ops dwxlgmac2_ops;
-extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
-extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
-extern const struct stmmac_mmc_ops dwmac_mmc_ops;
-extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
-extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
-int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr);
+int stmmac_reset(struct stmmac_priv *priv);
int stmmac_hwif_init(struct stmmac_priv *priv);
#endif /* __STMMAC_HWIF_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 5d1ea3e07459..1cba39fb2c44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -139,4 +139,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fpe_fragment_cntr;
};
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d218412ca832..382d94a3b972 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -91,14 +91,9 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
return entry;
}
-static unsigned int is_jumbo_frm(int len, int enh_desc)
+static bool is_jumbo_frm(unsigned int len, bool enh_desc)
{
- unsigned int ret = 0;
-
- if (len >= BUF_SIZE_4KiB)
- ret = 1;
-
- return ret;
+ return len >= BUF_SIZE_4KiB;
}
static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dddcaa9220cc..012b0a477255 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -25,6 +25,8 @@
#include <net/xdp.h>
#include <uapi/linux/bpf.h>
+struct stmmac_pcs;
+
struct stmmac_resources {
void __iomem *addr;
u8 mac[ETH_ALEN];
@@ -106,6 +108,8 @@ struct stmmac_metadata_request {
struct stmmac_priv *priv;
struct dma_desc *tx_desc;
bool *set_ic;
+ struct dma_edesc *edesc;
+ int tbs;
};
struct stmmac_xsk_tx_complete {
@@ -126,7 +130,7 @@ struct stmmac_rx_queue {
unsigned int cur_rx;
unsigned int dirty_rx;
unsigned int buf_alloc_num;
- u32 rx_zeroc_thresh;
+ unsigned int napi_skb_frag_size;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
unsigned int state_saved;
@@ -146,6 +150,12 @@ struct stmmac_channel {
u32 index;
};
+struct stmmac_fpe_cfg {
+ struct ethtool_mmsv mmsv;
+ const struct stmmac_fpe_reg *reg;
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
+};
+
struct stmmac_tc_entry {
bool in_use;
bool in_hw;
@@ -221,22 +231,35 @@ struct stmmac_dma_conf {
unsigned int dma_tx_size;
};
+#define EST_GCL 1024
+struct stmmac_est {
+ int enable;
+ u32 btr_reserve[2];
+ u32 btr_offset[2];
+ u32 btr[2];
+ u32 ctr[2];
+ u32 ter;
+ u32 gcl_unaligned[EST_GCL];
+ u32 gcl[EST_GCL];
+ u32 gcl_size;
+ u32 max_sdu[MTL_MAX_TX_QUEUES];
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
- u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_RX_QUEUES];
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
- int sph;
- int sph_cap;
+ bool sph_active;
+ bool sph_capable;
u32 sarc_type;
-
- unsigned int rx_copybreak;
- u32 rx_riwt[MTL_MAX_TX_QUEUES];
+ u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
+ bool tsfupdt_coarse;
void __iomem *ioaddr;
struct net_device *dev;
@@ -250,38 +273,40 @@ struct stmmac_priv {
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
- int speed;
- unsigned int flow_ctrl;
- unsigned int pause;
+ unsigned int pause_time;
struct mii_bus *mii;
+ struct stmmac_pcs *integrated_pcs;
+
struct phylink_config phylink_config;
struct phylink *phylink;
struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
struct stmmac_safety_stats sstats;
struct plat_stmmacenet_data *plat;
+ /* Protect est parameters */
+ struct mutex est_lock;
+ struct stmmac_est *est;
struct dma_features dma_cap;
struct stmmac_counters mmc;
int hw_cap_support;
int synopsys_id;
u32 msg_enable;
+ /* Our MAC Wake-on-Lan options */
int wolopts;
int wol_irq;
- bool wol_irq_disabled;
- int clk_csr;
+ u32 gmii_address_bus_config;
struct timer_list eee_ctrl_timer;
int lpi_irq;
- int eee_enabled;
- int eee_active;
- int tx_lpi_timer;
- int tx_lpi_enabled;
- int eee_tw_timer;
+ u32 tx_lpi_timer;
+ bool tx_lpi_clk_stop;
+ bool eee_enabled;
+ bool eee_active;
bool eee_sw_timer_en;
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
@@ -311,7 +336,7 @@ struct stmmac_priv {
char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
- char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_rx_irq[MTL_MAX_RX_QUEUES][IFNAMSIZ + 14];
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
@@ -322,11 +347,8 @@ struct stmmac_priv {
struct workqueue_struct *wq;
struct work_struct service_task;
- /* Workqueue for handling FPE hand-shaking */
- unsigned long fpe_task_state;
- struct workqueue_struct *fpe_wq;
- struct work_struct fpe_task;
- char wq_name[IFNAMSIZ + 4];
+ /* Frame Preemption feature (FPE) */
+ struct stmmac_fpe_cfg fpe_cfg;
/* TC Handling */
unsigned int tc_entries_max;
@@ -348,6 +370,8 @@ struct stmmac_priv {
/* XDP BPF Program */
unsigned long *af_xdp_zc_qps;
struct bpf_prog *xdp_prog;
+
+ struct devlink *devlink;
};
enum stmmac_state {
@@ -357,43 +381,40 @@ enum stmmac_state {
STMMAC_SERVICE_SCHED,
};
+extern const struct dev_pm_ops stmmac_simple_pm_ops;
+
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
-int stmmac_xpcs_setup(struct mii_bus *mii);
+void stmmac_mdio_lock(struct stmmac_priv *priv);
+void stmmac_mdio_unlock(struct stmmac_priv *priv);
+int stmmac_pcs_setup(struct net_device *ndev);
+void stmmac_pcs_clean(struct net_device *ndev);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_xdp_open(struct net_device *dev);
void stmmac_xdp_release(struct net_device *dev);
+int stmmac_get_phy_intf_sel(phy_interface_t interface);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
void stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
-void stmmac_disable_eee_mode(struct stmmac_priv *priv);
-bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed);
+
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
return !!priv->xdp_prog;
}
-static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
-{
- if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index c9693f77e1f6..afc516059b89 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -32,6 +32,11 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
int i, ret = 0;
u32 ctrl;
+ if (!ptp_rate) {
+ netdev_warn(priv->dev, "Invalid PTP rate");
+ return -EINVAL;
+ }
+
ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
ret |= est_write(est_addr, EST_TER, cfg->ter, false);
@@ -48,7 +53,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
}
ctrl = readl(est_addr + EST_CONTROL);
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
ctrl &= ~EST_XGMAC_PTOV;
ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) <<
EST_XGMAC_PTOV_SHIFT;
@@ -58,7 +63,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
EST_GMAC5_PTOV_SHIFT;
}
if (cfg->enable)
- ctrl |= EST_EEST | EST_SSWL;
+ ctrl |= EST_EEST | EST_SSWL | EST_DFBS;
else
ctrl &= ~EST_EEST;
@@ -104,6 +109,10 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbs++;
+ for (i = 0; i < txqcnt; i++)
+ if (value & BIT(i))
+ x->mtl_est_txq_hlbs[i]++;
+
/* Clear Interrupt */
writel(value, est_addr + EST_SCH_ERR);
@@ -126,10 +135,9 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
- for (i = 0; i < txqcnt; i++) {
+ for (i = 0; i < txqcnt; i++)
if (feqn & BIT(i))
x->mtl_est_txq_hlbf[i]++;
- }
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
@@ -140,7 +148,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
}
if (status & EST_BTRE) {
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
btrl = FIELD_GET(EST_XGMAC_BTRL, status);
btrl_max = FIELD_MAX(EST_XGMAC_BTRL);
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
index 7a858c566e7e..f70221c9c84a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -16,6 +16,7 @@
#define EST_XGMAC_PTOV_MUL 9
#define EST_SSWL BIT(1)
#define EST_EEST BIT(0)
+#define EST_DFBS BIT(5)
#define EST_STATUS 0x00000008
#define EST_GMAC5_BTRL GENMASK(11, 8)
@@ -62,3 +63,5 @@
#define EST_SRWO BIT(0)
#define EST_GCL_DATA 0x00000034
+
+extern const struct stmmac_est_ops dwmac510_est_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 542e2633a6f5..b155e71aac51 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -11,12 +11,13 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/mii.h>
#include <linux/phylink.h>
#include <linux/net_tstamp.h>
-#include <asm/io.h>
#include "stmmac.h"
+#include "stmmac_fpe.h"
#include "dwmac_dma.h"
#include "dwxgmac2.h"
@@ -36,7 +37,7 @@
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int sizeof_stat;
int stat_offset;
};
@@ -302,9 +303,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->plat->has_gmac || priv->plat->has_gmac4)
+ if (priv->plat->core_type == DWMAC_CORE_GMAC ||
+ priv->plat->core_type == DWMAC_CORE_GMAC4)
strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
- else if (priv->plat->has_xgmac)
+ else if (priv->plat->core_type == DWMAC_CORE_XGMAC)
strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
strscpy(info->driver, MAC100_ETHTOOL_NAME,
@@ -321,84 +323,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
- (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII)) {
- struct rgmii_adv adv;
- u32 supported, advertising, lp_advertising;
-
- if (!priv->xstats.pcs_link) {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- return 0;
- }
- cmd->base.duplex = priv->xstats.pcs_duplex;
-
- cmd->base.speed = priv->xstats.pcs_speed;
-
- /* Get and convert ADV/LP_ADV from the HW AN registers */
- if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
- return -EOPNOTSUPP; /* should never happen indeed */
-
- /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
-
- ethtool_convert_link_mode_to_legacy_u32(
- &supported, cmd->link_modes.supported);
- ethtool_convert_link_mode_to_legacy_u32(
- &advertising, cmd->link_modes.advertising);
- ethtool_convert_link_mode_to_legacy_u32(
- &lp_advertising, cmd->link_modes.lp_advertising);
-
- if (adv.pause & STMMAC_PCS_PAUSE)
- advertising |= ADVERTISED_Pause;
- if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
- advertising |= ADVERTISED_Asym_Pause;
- if (adv.lp_pause & STMMAC_PCS_PAUSE)
- lp_advertising |= ADVERTISED_Pause;
- if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
- lp_advertising |= ADVERTISED_Asym_Pause;
-
- /* Reg49[3] always set because ANE is always supported */
- cmd->base.autoneg = ADVERTISED_Autoneg;
- supported |= SUPPORTED_Autoneg;
- advertising |= ADVERTISED_Autoneg;
- lp_advertising |= ADVERTISED_Autoneg;
-
- if (adv.duplex) {
- supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Full);
- advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
- } else {
- supported |= (SUPPORTED_1000baseT_Half |
- SUPPORTED_100baseT_Half |
- SUPPORTED_10baseT_Half);
- advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
- }
- if (adv.lp_duplex)
- lp_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
- else
- lp_advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
- cmd->base.port = PORT_OTHER;
-
- ethtool_convert_legacy_u32_to_link_mode(
- cmd->link_modes.supported, supported);
- ethtool_convert_legacy_u32_to_link_mode(
- cmd->link_modes.advertising, advertising);
- ethtool_convert_legacy_u32_to_link_mode(
- cmd->link_modes.lp_advertising, lp_advertising);
-
- return 0;
- }
-
return phylink_ethtool_ksettings_get(priv->phylink, cmd);
}
@@ -408,20 +332,6 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
- (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII)) {
- /* Only support ANE */
- if (cmd->base.autoneg != AUTONEG_ENABLE)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
- mutex_unlock(&priv->lock);
-
- return 0;
- }
-
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
@@ -438,20 +348,13 @@ static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
}
-static int stmmac_check_if_running(struct net_device *dev)
-{
- if (!netif_running(dev))
- return -EBUSY;
- return 0;
-}
-
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
return XGMAC_REGSIZE * 4;
- else if (priv->plat->has_gmac4)
+ else if (priv->plat->core_type == DWMAC_CORE_GMAC4)
return GMAC4_REG_SPACE_SIZE;
return REG_SPACE_SIZE;
}
@@ -466,12 +369,12 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
/* Copy DMA registers to where ethtool expects them */
- if (priv->plat->has_gmac4) {
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4) {
/* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
NUM_DWMAC4_DMA_REGS * 4);
- } else if (!priv->plat->has_xgmac) {
+ } else if (priv->plat->core_type != DWMAC_CORE_XGMAC) {
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
@@ -521,15 +424,8 @@ stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct rgmii_adv adv_lp;
- if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
- pause->autoneg = 1;
- if (!adv_lp.pause)
- return;
- } else {
- phylink_ethtool_get_pauseparam(priv->phylink, pause);
- }
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
}
static int
@@ -537,16 +433,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct rgmii_adv adv_lp;
- if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
- pause->autoneg = 1;
- if (!adv_lp.pause)
- return -EOPNOTSUPP;
- return 0;
- } else {
- return phylink_ethtool_set_pauseparam(priv->phylink, pause);
- }
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
}
static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
@@ -660,7 +548,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
- if (priv->eee_enabled) {
+ if (priv->dma_cap.eee) {
int val = phylink_get_eee_err(priv->phylink);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
@@ -836,64 +724,14 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->plat->pmt)
- return phylink_ethtool_get_wol(priv->phylink, wol);
-
- mutex_lock(&priv->lock);
- if (device_can_wakeup(priv->device)) {
- wol->supported = WAKE_MAGIC | WAKE_UCAST;
- if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame)
- wol->supported &= ~WAKE_MAGIC;
- wol->wolopts = priv->wolopts;
- }
- mutex_unlock(&priv->lock);
+ return phylink_ethtool_get_wol(priv->phylink, wol);
}
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- u32 support = WAKE_MAGIC | WAKE_UCAST;
-
- if (!device_can_wakeup(priv->device))
- return -EOPNOTSUPP;
-
- if (!priv->plat->pmt) {
- int ret = phylink_ethtool_set_wol(priv->phylink, wol);
-
- if (!ret)
- device_set_wakeup_enable(priv->device, !!wol->wolopts);
- return ret;
- }
-
- /* By default almost all GMAC devices support the WoL via
- * magic frame but we can disable it if the HW capability
- * register shows no support for pmt_magic_frame. */
- if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
- wol->wolopts &= ~WAKE_MAGIC;
-
- if (wol->wolopts & ~support)
- return -EINVAL;
-
- if (wol->wolopts) {
- pr_info("stmmac: wakeup enable\n");
- device_set_wakeup_enable(priv->device, 1);
- /* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
- enable_irq_wake(priv->wol_irq);
- priv->wol_irq_disabled = false;
- } else {
- device_set_wakeup_enable(priv->device, 0);
- /* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
- disable_irq_wake(priv->wol_irq);
- priv->wol_irq_disabled = true;
- }
- mutex_lock(&priv->lock);
- priv->wolopts = wol->wolopts;
- mutex_unlock(&priv->lock);
-
- return 0;
+ return phylink_ethtool_set_wol(priv->phylink, wol);
}
static int stmmac_ethtool_op_get_eee(struct net_device *dev,
@@ -901,12 +739,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->dma_cap.eee)
- return -EOPNOTSUPP;
-
- edata->tx_lpi_timer = priv->tx_lpi_timer;
- edata->tx_lpi_enabled = priv->tx_lpi_enabled;
-
return phylink_ethtool_get_eee(priv->phylink, edata);
}
@@ -914,29 +746,8 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
-
- if (!priv->dma_cap.eee)
- return -EOPNOTSUPP;
-
- if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
- netdev_warn(priv->dev,
- "Setting EEE tx-lpi is not supported\n");
-
- if (!edata->eee_enabled)
- stmmac_disable_eee_mode(priv);
- ret = phylink_ethtool_set_eee(priv->phylink, edata);
- if (ret)
- return ret;
-
- if (edata->eee_enabled &&
- priv->tx_lpi_timer != edata->tx_lpi_timer) {
- priv->tx_lpi_timer = edata->tx_lpi_timer;
- stmmac_eee_init(priv);
- }
-
- return 0;
+ return phylink_ethtool_set_eee(priv->phylink, edata);
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
@@ -1199,7 +1010,7 @@ static int stmmac_set_channels(struct net_device *dev,
}
static int stmmac_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1207,13 +1018,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (priv->ptp_clock)
info->phc_index = ptp_clock_index(priv->ptp_clock);
+ else
+ info->phc_index = 0;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
@@ -1233,47 +1044,64 @@ static int stmmac_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
-static int stmmac_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
+static int stmmac_get_mm(struct net_device *ndev,
+ struct ethtool_mm_state *state)
{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 frag_size;
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = priv->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ if (!stmmac_fpe_supported(priv))
+ return -EOPNOTSUPP;
+
+ state->rx_min_frag_size = ETH_ZLEN;
+ frag_size = stmmac_fpe_get_add_frag_size(priv);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
- return ret;
+ ethtool_mmsv_get_mm(&priv->fpe_cfg.mmsv, state);
+
+ return 0;
}
-static int stmmac_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
+static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 frag_size;
+ int err;
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- priv->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &frag_size, extack);
+ if (err)
+ return err;
+
+ stmmac_fpe_set_add_frag_size(priv, frag_size);
+ ethtool_mmsv_set_mm(&priv->fpe_cfg.mmsv, cfg);
+
+ return 0;
+}
+
+static void stmmac_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_counters *mmc = &priv->mmc;
+
+ if (!priv->dma_cap.rmon)
+ return;
+
+ stmmac_mmc_read(priv, priv->mmcaddr, mmc);
- return ret;
+ s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr;
+ s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr;
+ s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr;
+ s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr;
+ s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr;
+ s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr;
}
static const struct ethtool_ops stmmac_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
- .begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_msglevel = stmmac_ethtool_getmsglevel,
.set_msglevel = stmmac_ethtool_setmsglevel,
@@ -1305,10 +1133,11 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
.get_channels = stmmac_get_channels,
.set_channels = stmmac_set_channels,
- .get_tunable = stmmac_get_tunable,
- .set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
+ .get_mm = stmmac_get_mm,
+ .set_mm = stmmac_set_mm,
+ .get_mm_stats = stmmac_get_mm_stats,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
new file mode 100644
index 000000000000..c54c70224351
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Furong Xu <0x1207@gmail.com>
+ * stmmac FPE(802.3 Qbu) handling
+ */
+#include "stmmac.h"
+#include "stmmac_fpe.h"
+#include "dwmac4.h"
+#include "dwmac5.h"
+#include "dwxgmac2.h"
+
+#define GMAC5_MAC_FPE_CTRL_STS 0x00000234
+#define XGMAC_MAC_FPE_CTRL_STS 0x00000280
+
+#define GMAC5_MTL_FPE_CTRL_STS 0x00000c90
+#define XGMAC_MTL_FPE_CTRL_STS 0x00001090
+/* Preemption Classification */
+#define FPE_MTL_PREEMPTION_CLASS GENMASK(15, 8)
+/* Additional Fragment Size of preempted frames */
+#define FPE_MTL_ADD_FRAG_SZ GENMASK(1, 0)
+
+#define STMMAC_MAC_FPE_CTRL_STS_TRSP BIT(19)
+#define STMMAC_MAC_FPE_CTRL_STS_TVER BIT(18)
+#define STMMAC_MAC_FPE_CTRL_STS_RRSP BIT(17)
+#define STMMAC_MAC_FPE_CTRL_STS_RVER BIT(16)
+#define STMMAC_MAC_FPE_CTRL_STS_SRSP BIT(2)
+#define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1)
+#define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0)
+
+struct stmmac_fpe_reg {
+ const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */
+ const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */
+ const u32 rxq_ctrl1_reg; /* offset of MAC_RxQ_Ctrl1 */
+ const u32 fprq_mask; /* Frame Preemption Residue Queue */
+ const u32 int_en_reg; /* offset of MAC_Interrupt_Enable */
+ const u32 int_en_bit; /* Frame Preemption Interrupt Enable */
+};
+
+bool stmmac_fpe_supported(struct stmmac_priv *priv)
+{
+ return priv->dma_cap.fpesel && priv->fpe_cfg.reg &&
+ priv->hw->mac->fpe_map_preemption_class;
+}
+
+static void stmmac_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
+{
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
+ u32 num_rxq = priv->plat->rx_queues_to_use;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
+
+ if (tx_enable) {
+ cfg->fpe_csr = STMMAC_MAC_FPE_CTRL_STS_EFPE;
+ value = readl(ioaddr + reg->rxq_ctrl1_reg);
+ value &= ~reg->fprq_mask;
+ /* Keep this SHIFT, FIELD_PREP() expects a constant mask :-/ */
+ value |= (num_rxq - 1) << __ffs(reg->fprq_mask);
+ writel(value, ioaddr + reg->rxq_ctrl1_reg);
+ } else {
+ cfg->fpe_csr = 0;
+ }
+ writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
+}
+
+static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enable)
+{
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&priv->hw->irq_ctrl_lock, flags);
+ value = readl(ioaddr + reg->int_en_reg);
+
+ if (pmac_enable) {
+ if (!(value & reg->int_en_bit)) {
+ /* Dummy read to clear any pending masked interrupts */
+ readl(ioaddr + reg->mac_fpe_reg);
+
+ value |= reg->int_en_bit;
+ }
+ } else {
+ value &= ~reg->int_en_bit;
+ }
+
+ writel(value, ioaddr + reg->int_en_reg);
+ spin_unlock_irqrestore(&priv->hw->irq_ctrl_lock, flags);
+}
+
+static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
+{
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value = cfg->fpe_csr;
+
+ if (type == ETHTOOL_MPACKET_VERIFY)
+ value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
+ else if (type == ETHTOOL_MPACKET_RESPONSE)
+ value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
+
+ writel(value, ioaddr + reg->mac_fpe_reg);
+}
+
+static const struct ethtool_mmsv_ops stmmac_mmsv_ops = {
+ .configure_tx = stmmac_fpe_configure_tx,
+ .configure_pmac = stmmac_fpe_configure_pmac,
+ .send_mpacket = stmmac_fpe_send_mpacket,
+};
+
+static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ struct ethtool_mmsv *mmsv = &fpe_cfg->mmsv;
+
+ if (status == FPE_EVENT_UNKNOWN)
+ return;
+
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET);
+
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
+
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET);
+}
+
+void stmmac_fpe_irq_status(struct stmmac_priv *priv)
+{
+ const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ struct net_device *dev = priv->dev;
+ int status = FPE_EVENT_UNKNOWN;
+ u32 value;
+
+ /* Reads from the MAC_FPE_CTRL_STS register should only be performed
+ * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
+ */
+ value = readl(ioaddr + reg->mac_fpe_reg);
+
+ if (value & STMMAC_MAC_FPE_CTRL_STS_TRSP) {
+ status |= FPE_EVENT_TRSP;
+ netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
+ }
+
+ if (value & STMMAC_MAC_FPE_CTRL_STS_TVER) {
+ status |= FPE_EVENT_TVER;
+ netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
+ }
+
+ if (value & STMMAC_MAC_FPE_CTRL_STS_RRSP) {
+ status |= FPE_EVENT_RRSP;
+ netdev_dbg(dev, "FPE: Respond mPacket is received\n");
+ }
+
+ if (value & STMMAC_MAC_FPE_CTRL_STS_RVER) {
+ status |= FPE_EVENT_RVER;
+ netdev_dbg(dev, "FPE: Verify mPacket is received\n");
+ }
+
+ stmmac_fpe_event_status(priv, status);
+}
+
+void stmmac_fpe_init(struct stmmac_priv *priv)
+{
+ ethtool_mmsv_init(&priv->fpe_cfg.mmsv, priv->dev,
+ &stmmac_mmsv_ops);
+
+ if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
+ priv->dma_cap.fpesel)
+ dev_info(priv->device, "FPE is not supported by driver.\n");
+}
+
+int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
+{
+ const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ void __iomem *ioaddr = priv->ioaddr;
+
+ return FIELD_GET(FPE_MTL_ADD_FRAG_SZ, readl(ioaddr + reg->mtl_fpe_reg));
+}
+
+void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size)
+{
+ const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
+
+ value = readl(ioaddr + reg->mtl_fpe_reg);
+ writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
+ ioaddr + reg->mtl_fpe_reg);
+}
+
+#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
+#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
+
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass)
+{
+ u32 val, offset, count, queue_weight, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int num_tc = netdev_get_num_tc(ndev);
+
+ if (!pclass)
+ goto update_mapping;
+
+ /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
+ *
+ * Synopsys Databook:
+ * "The number of Tx DMA channels is equal to the number of Tx queues,
+ * and is direct one-to-one mapping."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ /* This is 1:1 mapping, go to next TC */
+ if (count == 1)
+ continue;
+
+ if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
+ NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
+ return -EINVAL;
+ }
+
+ queue_weight = priv->plat->tx_queues_cfg[offset].weight;
+
+ for (u32 i = 1; i < count; i++) {
+ if (priv->plat->tx_queues_cfg[offset + i].weight !=
+ queue_weight) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
+ queue_weight, tc);
+ return -EINVAL;
+ }
+ }
+ }
+
+update_mapping:
+ val = readl(priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
+ priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
+
+ return 0;
+}
+
+int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass)
+{
+ u32 val, offset, count, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int num_tc = netdev_get_num_tc(ndev);
+
+ if (!num_tc) {
+ /* Restore default TC:Queue mapping */
+ for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
+ writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
+ priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
+ }
+ }
+
+ /* Synopsys Databook:
+ * "All Queues within a traffic class are selected in a round robin
+ * fashion (when packets are available) when the traffic class is
+ * selected by the scheduler for packet transmission. This is true for
+ * any of the scheduling algorithms."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ for (u32 i = 0; i < count; i++) {
+ val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
+ writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
+ priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
+ }
+ }
+
+ val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
+ priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+
+ return 0;
+}
+
+const struct stmmac_fpe_reg dwmac5_fpe_reg = {
+ .mac_fpe_reg = GMAC5_MAC_FPE_CTRL_STS,
+ .mtl_fpe_reg = GMAC5_MTL_FPE_CTRL_STS,
+ .rxq_ctrl1_reg = GMAC_RXQ_CTRL1,
+ .fprq_mask = GMAC_RXQCTRL_FPRQ,
+ .int_en_reg = GMAC_INT_EN,
+ .int_en_bit = GMAC_INT_FPE_EN,
+};
+
+const struct stmmac_fpe_reg dwxgmac3_fpe_reg = {
+ .mac_fpe_reg = XGMAC_MAC_FPE_CTRL_STS,
+ .mtl_fpe_reg = XGMAC_MTL_FPE_CTRL_STS,
+ .rxq_ctrl1_reg = XGMAC_RXQ_CTRL1,
+ .fprq_mask = XGMAC_FPRQ,
+ .int_en_reg = XGMAC_INT_EN,
+ .int_en_bit = XGMAC_FPEIE,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
new file mode 100644
index 000000000000..3fc46acf7001
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Furong Xu <0x1207@gmail.com>
+ * stmmac FPE(802.3 Qbu) handling
+ */
+#ifndef _STMMAC_FPE_H_
+#define _STMMAC_FPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+struct stmmac_priv;
+
+bool stmmac_fpe_supported(struct stmmac_priv *priv);
+void stmmac_fpe_init(struct stmmac_priv *priv);
+void stmmac_fpe_irq_status(struct stmmac_priv *priv);
+int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv);
+void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size);
+
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass);
+int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass);
+
+extern const struct stmmac_fpe_reg dwmac5_fpe_reg;
+extern const struct stmmac_fpe_reg dwxgmac3_fpe_reg;
+
+#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index f05bd757dfe5..bb110124f21e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -18,9 +18,22 @@
#include "dwmac4.h"
#include "stmmac.h"
+#define STMMAC_HWTS_CFG_MASK (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+ PTP_TCR_TSINIT | PTP_TCR_TSUPDT | \
+ PTP_TCR_TSCTRLSSR | PTP_TCR_SNAPTYPSEL_1 | \
+ PTP_TCR_TSIPV4ENA | PTP_TCR_TSIPV6ENA | \
+ PTP_TCR_TSEVNTENA | PTP_TCR_TSMSTRENA | \
+ PTP_TCR_TSVER2ENA | PTP_TCR_TSIPENA | \
+ PTP_TCR_TSTRIG | PTP_TCR_TSENALL)
+
static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
{
- writel(data, ioaddr + PTP_TCR);
+ u32 regval = readl(ioaddr + PTP_TCR);
+
+ regval &= ~STMMAC_HWTS_CFG_MASK;
+ regval |= data;
+
+ writel(regval, ioaddr + PTP_TCR);
}
static void config_sub_second_increment(void __iomem *ioaddr,
@@ -122,7 +135,6 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
static int config_addend(void __iomem *ioaddr, u32 addend)
{
u32 value;
- int limit;
writel(addend, ioaddr + PTP_TAR);
/* issue command to update the addend value */
@@ -131,23 +143,15 @@ static int config_addend(void __iomem *ioaddr, u32 addend)
writel(value, ioaddr + PTP_TCR);
/* wait for present addend update to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSADDREG),
+ 10, 100000);
}
static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
int add_sub, int gmac4)
{
u32 value;
- int limit;
if (add_sub) {
/* If the new sec value needs to be subtracted with
@@ -174,16 +178,9 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
writel(value, ioaddr + PTP_TCR);
/* wait for present system time adjust/update to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSUPDT),
+ 10, 100000);
}
static void get_systime(void __iomem *ioaddr, u64 *systime)
@@ -209,7 +206,7 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + PTP_ATNR);
- ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
@@ -218,6 +215,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
{
u32 num_snapshot, ts_status, tsync_int;
struct ptp_clock_event event;
+ u32 acr_value, channel;
unsigned long flags;
u64 ptp_time;
int i;
@@ -243,12 +241,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
+ acr_value = readl(priv->ptpaddr + PTP_ACR);
+ channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
+
for (i = 0; i < num_snapshot; i++) {
read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
+ event.index = channel;
event.timestamp = ptp_time;
ptp_clock_event(priv->ptp_clock, &event);
}
@@ -265,3 +266,14 @@ const struct stmmac_hwtimestamp stmmac_ptp = {
.timestamp_interrupt = timestamp_interrupt,
.hwtstamp_correct_latency = hwtstamp_correct_latency,
};
+
+const struct stmmac_hwtimestamp dwmac1000_ptp = {
+ .config_hw_tstamping = config_hw_tstamping,
+ .init_systime = init_systime,
+ .config_sub_second_increment = config_sub_second_increment,
+ .config_addend = config_addend,
+ .adjust_systime = adjust_systime,
+ .get_systime = get_systime,
+ .get_ptptime = dwmac1000_get_ptptime,
+ .timestamp_interrupt = dwmac1000_timestamp_interrupt,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c
new file mode 100644
index 000000000000..5c5dd502f79a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCI bus helpers for STMMAC driver
+ * Copyright (C) 2025 Yao Zi <ziyao@disroot.org>
+ */
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "stmmac_libpci.h"
+
+int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_pci_plat_suspend);
+
+int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_pci_plat_resume);
+
+MODULE_DESCRIPTION("STMMAC PCI helper library");
+MODULE_AUTHOR("Yao Zi <ziyao@disroot.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h
new file mode 100644
index 000000000000..71553184f982
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Yao Zi <ziyao@disroot.org>
+ */
+
+#ifndef __STMMAC_LIBPCI_H__
+#define __STMMAC_LIBPCI_H__
+
+int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv);
+int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv);
+
+#endif /* __STMMAC_LIBPCI_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7c6fb14b5555..da206b24aaed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/prefetch.h>
#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_DEBUG_FS
@@ -39,11 +40,14 @@
#include <linux/phylink.h>
#include <linux/udp.h>
#include <linux/bpf_trace.h>
+#include <net/devlink.h>
#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h"
+#include "stmmac_fpe.h"
#include "stmmac.h"
+#include "stmmac_pcs.h"
#include "stmmac_xdp.h"
#include <linux/reset.h>
#include <linux/of_mdio.h>
@@ -55,8 +59,7 @@
* with fine resolution and binary rollover. This avoid non-monotonic behavior
* (clock jumps) when changing timestamping settings at runtime.
*/
-#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
- PTP_TCR_TSCTRLSSR)
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@ -76,7 +79,6 @@ module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -88,33 +90,32 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
-static int flow_ctrl = FLOW_AUTO;
+static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
-MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
static int pause = PAUSE_TIME;
module_param(pause, int, 0644);
-MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, 0644);
MODULE_PARM_DESC(tc, "DMA threshold control value");
+/* This is unused */
#define DEFAULT_BUFSIZE 1536
static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-#define STMMAC_RX_COPYBREAK 256
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
#define STMMAC_DEFAULT_LPI_TIMER 1000
-static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, 0644);
+static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, uint, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
@@ -147,37 +148,84 @@ static void stmmac_exit_fs(struct net_device *dev);
#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
+struct stmmac_devlink_priv {
+ struct stmmac_priv *stmmac_priv;
+};
+
+enum stmmac_dl_param_id {
+ STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
+};
+
+/**
+ * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
+ * @bsp_priv: BSP private data structure (unused)
+ * @clk_tx_i: the transmit clock
+ * @interface: the selected interface mode
+ * @speed: the speed that the MAC will be operating at
+ *
+ * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
+ * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
+ * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
+ * the plat_data->set_clk_tx_rate method directly, call it via their own
+ * implementation, or implement their own method should they have more
+ * complex requirements. It is intended to only be used in this method.
+ *
+ * plat_data->clk_tx_i must be filled in.
+ */
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- int ret = 0;
+ long rate = rgmii_clock(speed);
- if (enabled) {
- ret = clk_prepare_enable(priv->plat->stmmac_clk);
- if (ret)
- return ret;
- ret = clk_prepare_enable(priv->plat->pclk);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- return ret;
- }
- if (priv->plat->clks_config) {
- ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- return ret;
- }
+ /* Silently ignore unsupported speeds as rgmii_clock() only
+ * supports 10, 100 and 1000Mbps. We do not want to spit
+ * errors for 2500 and higher speeds here.
+ */
+ if (rate < 0)
+ return 0;
+
+ return clk_set_rate(clk_tx_i, rate);
+}
+EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
+
+/**
+ * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
+ * @regval: pointer to a u32 for the resulting register value
+ * @blen: pointer to an array of u32 containing the burst length values in bytes
+ * @len: the number of entries in the @blen array
+ */
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
+{
+ size_t i;
+ u32 val;
+
+ for (val = i = 0; i < len; i++) {
+ u32 burst = blen[i];
+
+ /* Burst values of zero must be skipped. */
+ if (!burst)
+ continue;
+
+ /* The valid range for the burst length is 4 to 256 inclusive,
+ * and it must be a power of two.
+ */
+ if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
+ pr_err("stmmac: invalid burst length %u at index %zu\n",
+ burst, i);
+ continue;
}
- } else {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- if (priv->plat->clks_config)
- priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+
+ /* Since burst is a power of two, and the register field starts
+ * with burst = 4, shift right by two bits so bit 0 of the field
+ * corresponds with the minimum value.
+ */
+ val |= burst >> 2;
}
- return ret;
+ *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
}
-EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
+EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
/**
* stmmac_verify_args - verify the driver parameters.
@@ -188,16 +236,11 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DEFAULT_BUFSIZE;
- if (unlikely(flow_ctrl > 1))
- flow_ctrl = FLOW_AUTO;
- else if (likely(flow_ctrl < 0))
- flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
- if (eee_timer < 0)
- eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+
+ if (flow_ctrl != 0xdead)
+ pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -286,73 +329,6 @@ static void stmmac_global_err(struct stmmac_priv *priv)
stmmac_service_event_schedule(priv);
}
-/**
- * stmmac_clk_csr_set - dynamically set the MDC clock
- * @priv: driver private structure
- * Description: this is to dynamically set the MDC clock according to the csr
- * clock input.
- * Note:
- * If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed (as reported in the driver
- * documentation). Viceversa the driver will try to set the MDC
- * clock dynamically according to the actual clock input.
- */
-static void stmmac_clk_csr_set(struct stmmac_priv *priv)
-{
- u32 clk_rate;
-
- clk_rate = clk_get_rate(priv->plat->stmmac_clk);
-
- /* Platform provided default clk_csr would be assumed valid
- * for all other cases except for the below mentioned ones.
- * For values higher than the IEEE 802.3 specified frequency
- * we can not estimate the proper divider as it is not known
- * the frequency of clk_csr_i. So we do not change the default
- * divider.
- */
- if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
- if (clk_rate < CSR_F_35M)
- priv->clk_csr = STMMAC_CSR_20_35M;
- else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
- priv->clk_csr = STMMAC_CSR_35_60M;
- else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
- priv->clk_csr = STMMAC_CSR_60_100M;
- else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
- priv->clk_csr = STMMAC_CSR_100_150M;
- else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
- priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
- priv->clk_csr = STMMAC_CSR_250_300M;
- }
-
- if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
- if (clk_rate > 160000000)
- priv->clk_csr = 0x03;
- else if (clk_rate > 80000000)
- priv->clk_csr = 0x02;
- else if (clk_rate > 40000000)
- priv->clk_csr = 0x01;
- else
- priv->clk_csr = 0;
- }
-
- if (priv->plat->has_xgmac) {
- if (clk_rate > 400000000)
- priv->clk_csr = 0x5;
- else if (clk_rate > 350000000)
- priv->clk_csr = 0x4;
- else if (clk_rate > 300000000)
- priv->clk_csr = 0x3;
- else if (clk_rate > 250000000)
- priv->clk_csr = 0x2;
- else if (clk_rate > 150000000)
- priv->clk_csr = 0x1;
- else
- priv->clk_csr = 0x0;
- }
-}
-
static void print_pkt(unsigned char *buf, int len)
{
pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
@@ -390,23 +366,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
-{
- int tx_lpi_timer;
-
- /* Clear/set the SW EEE timer flag based on LPI ET enablement */
- priv->eee_sw_timer_en = en ? 0 : 1;
- tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
- stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
-}
-
-/**
- * stmmac_enable_eee_mode - check and enter in LPI mode
- * @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode in case of
- * EEE.
- */
-static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -416,31 +376,45 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return -EBUSY; /* still unfinished work */
+ return true; /* still unfinished work */
}
- /* Check and enter in LPI mode */
- if (!priv->tx_path_in_lpi_mode)
- stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
- return 0;
+ return false;
+}
+
+static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
+{
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
- * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
* @priv: driver private structure
- * Description: this function is to exit and disable EEE in case of
- * LPI state is true. This is called by the xmit.
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
*/
-void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
{
- if (!priv->eee_sw_timer_en) {
- stmmac_lpi_entry_timer_config(priv, 0);
+ if (stmmac_eee_tx_busy(priv)) {
+ stmmac_restart_sw_lpi_timer(priv);
return;
}
- stmmac_reset_eee_mode(priv, priv->hw);
- del_timer_sync(&priv->eee_ctrl_timer);
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
+ priv->tx_lpi_clk_stop, 0);
+}
+
+/**
+ * stmmac_stop_sw_lpi - stop transmitting LPI
+ * @priv: driver private structure
+ * Description: When using software-controlled LPI, stop transmitting LPI state.
+ */
+static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
+{
+ timer_delete_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
priv->tx_path_in_lpi_mode = false;
}
@@ -453,76 +427,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
*/
static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
-
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
-}
-
-/**
- * stmmac_eee_init - init EEE
- * @priv: driver private structure
- * Description:
- * if the GMAC supports the EEE (from the HW cap reg) and the phy device
- * can also manage EEE, this function enable the LPI state and start related
- * timer.
- */
-bool stmmac_eee_init(struct stmmac_priv *priv)
-{
- int eee_tw_timer = priv->eee_tw_timer;
-
- /* Using PCS we cannot dial with the phy registers at this stage
- * so we do not support extra feature like EEE.
- */
- if (priv->hw->pcs == STMMAC_PCS_TBI ||
- priv->hw->pcs == STMMAC_PCS_RTBI)
- return false;
-
- /* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee)
- return false;
-
- mutex_lock(&priv->lock);
-
- /* Check if it needs to be deactivated */
- if (!priv->eee_active) {
- if (priv->eee_enabled) {
- netdev_dbg(priv->dev, "disable EEE\n");
- stmmac_lpi_entry_timer_config(priv, 0);
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- false);
- }
- mutex_unlock(&priv->lock);
- return false;
- }
-
- if (priv->eee_active && !priv->eee_enabled) {
- timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
- stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- eee_tw_timer);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- true);
- }
-
- if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
- del_timer_sync(&priv->eee_ctrl_timer);
- priv->tx_path_in_lpi_mode = false;
- stmmac_lpi_entry_timer_config(priv, 1);
- } else {
- stmmac_lpi_entry_timer_config(priv, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
- mutex_unlock(&priv->lock);
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
- return true;
+ stmmac_try_to_start_sw_lpi(priv);
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -586,7 +493,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (!priv->hwts_rx_en)
return;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ if (dwmac_is_xmac(priv->plat->core_type))
desc = np;
/* Check if timestamp is available */
@@ -604,21 +511,49 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
}
}
+static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
+{
+ bool xmac = dwmac_is_xmac(priv->plat->core_type);
+ u32 sec_inc = 0;
+ u64 temp = 0;
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+}
+
/**
* stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
+ * @extack: netlink extended ack structure for error reporting.
* Description:
* This function configures the MAC to enable/disable both outgoing(TX)
* and incoming(RX) packets time stamping based on user input.
* Return Value:
* 0 on success and an appropriate -ve integer on failure.
*/
-static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
u32 ptp_over_ipv4_udp = 0;
@@ -629,34 +564,36 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
- netdev_alert(priv->dev, "No support for HW time stamping\n");
+ NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
return -EOPNOTSUPP;
}
- if (copy_from_user(&config, ifr->ifr_data,
- sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change timestamping configuration while down");
+ return -ENODEV;
+ }
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
- __func__, config.flags, config.tx_type, config.rx_filter);
+ __func__, config->flags, config->tx_type, config->rx_filter);
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
if (priv->adv_ts) {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* time stamp no incoming packet at all */
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* 'xmac' hardware can support Sync, Pdelay_Req and
* Pdelay_resp by setting bit14 and bits17/16 to 01
* This leaves Delay_Req timestamps out.
@@ -670,7 +607,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
/* PTP v1, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -680,7 +617,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
/* PTP v1, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -691,7 +628,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
/* PTP v2, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -702,7 +639,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
/* PTP v2, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -713,7 +650,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
/* PTP v2, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -725,7 +662,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
/* PTP v2/802.AS1 any layer, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id < DWMAC_CORE_4_10)
@@ -737,7 +674,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
/* PTP v2/802.AS1, any layer, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -749,7 +686,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* PTP v2/802.AS1, any layer, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -763,7 +700,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
tstamp_all = PTP_TCR_TSENALL;
break;
@@ -771,20 +708,22 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
} else {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
default:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
}
}
- priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
- priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+ priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
+ priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
priv->systime_flags = STMMAC_HWTS_ACTIVE;
+ if (!priv->tsfupdt_coarse)
+ priv->systime_flags |= PTP_TCR_TSCFUPDT;
if (priv->hwts_tx_en || priv->hwts_rx_en) {
priv->systime_flags |= tstamp_all | ptp_v2 |
@@ -795,31 +734,30 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
- memcpy(&priv->tstamp_config, &config, sizeof(config));
+ priv->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
/**
* stmmac_hwtstamp_get - read hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
* Description:
* This function obtain the current hardware timestamping settings
* as requested.
*/
-static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config *config = &priv->tstamp_config;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ *config = priv->tstamp_config;
+
+ return 0;
}
/**
@@ -832,36 +770,20 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
* Will be rerun after resuming from suspend, case in which the timestamping
* flags updated by stmmac_hwtstamp_set() also need to be restored.
*/
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
+ u32 systime_flags)
{
- bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
struct timespec64 now;
- u32 sec_inc = 0;
- u64 temp = 0;
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
- return -EOPNOTSUPP;
+ if (!priv->plat->clk_ptp_rate) {
+ netdev_err(priv->dev, "Invalid PTP clock rate");
+ return -EINVAL;
+ }
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
- /* program Sub Second Increment reg */
- stmmac_config_sub_second_increment(priv, priv->ptpaddr,
- priv->plat->clk_ptp_rate,
- xmac, &sec_inc);
- temp = div_u64(1000000000ULL, sec_inc);
-
- /* Store sub second increment for later use */
- priv->sub_second_inc = sec_inc;
-
- /* calculate default added value:
- * formula is :
- * addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = 1e9ns/sec_inc
- */
- temp = (u64)(temp << 32);
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+ stmmac_update_subsecond_increment(priv);
/* initialize system time */
ktime_get_real_ts64(&now);
@@ -871,26 +793,33 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
return 0;
}
-EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
/**
- * stmmac_init_ptp - init PTP
+ * stmmac_init_timestamping - initialise timestamping
* @priv: driver private structure
* Description: this is to verify if the HW supports the PTPv1 or PTPv2.
* This is done by looking at the HW cap. register.
* This function also registers the ptp driver.
*/
-static int stmmac_init_ptp(struct stmmac_priv *priv)
+static int stmmac_init_timestamping(struct stmmac_priv *priv)
{
- bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ bool xmac = dwmac_is_xmac(priv->plat->core_type);
int ret;
if (priv->plat->ptp_clk_freq_config)
priv->plat->ptp_clk_freq_config(priv);
- ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
- if (ret)
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
+ netdev_info(priv->dev, "PTP not supported by HW\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
+ PTP_TCR_TSCFUPDT);
+ if (ret) {
+ netdev_warn(priv->dev, "PTP init failed\n");
return ret;
+ }
priv->adv_ts = 0;
/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
@@ -916,36 +845,76 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
return 0;
}
+static void stmmac_setup_ptp(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+
+ if (stmmac_init_timestamping(priv) == 0)
+ stmmac_ptp_register(priv);
+}
+
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
stmmac_ptp_unregister(priv);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
/**
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
* @duplex: duplex passed to the next function
+ * @flow_ctrl: desired flow control modes
* Description: It is used for configuring the flow control in all queues
*/
-static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
+ unsigned int flow_ctrl)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
+ tx_cnt);
+}
+
+static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ /* Refresh the MAC-specific capabilities */
+ stmmac_mac_update_caps(priv);
+
+ config->mac_capabilities = priv->hw->link.caps;
+
+ if (priv->plat->max_speed)
+ phylink_limit_mac_speed(config, priv->plat->max_speed);
+
+ return config->mac_capabilities;
}
static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ struct phylink_pcs *pcs;
- if (priv->hw->xpcs)
- return &priv->hw->xpcs->pcs;
+ if (priv->plat->select_pcs) {
+ pcs = priv->plat->select_pcs(priv, interface);
+ if (!IS_ERR(pcs))
+ return pcs;
+ }
- if (priv->hw->lynx_pcs)
- return priv->hw->lynx_pcs;
+ /* The PCS control register is only relevant for SGMII, TBI and RTBI
+ * modes. We no longer support TBI or RTBI, so only configure this
+ * register when operating in SGMII mode with the integrated PCS.
+ */
+ if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
+ return &priv->integrated_pcs->pcs;
return NULL;
}
@@ -956,20 +925,16 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
/* Nothing to do, xpcs_config() handles everything */
}
-static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
- if (is_up && *hs_enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
- MPACKET_VERIFY);
- } else {
- *lo_state = FPE_STATE_OFF;
- *lp_state = FPE_STATE_OFF;
- }
+ if (priv->plat->mac_finish)
+ priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
+
+ return 0;
}
static void stmmac_mac_link_down(struct phylink_config *config,
@@ -978,13 +943,11 @@ static void stmmac_mac_link_down(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- priv->tx_lpi_enabled = false;
- priv->eee_enabled = stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
+ if (priv->dma_cap.eee)
+ stmmac_set_eee_pls(priv, priv->hw, false);
- if (priv->dma_cap.fpesel)
- stmmac_fpe_link_state_handle(priv, false);
+ if (stmmac_fpe_supported(priv))
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
}
static void stmmac_mac_link_up(struct phylink_config *config,
@@ -994,7 +957,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ unsigned int flow_ctrl;
u32 old_ctrl, ctrl;
+ int ret;
if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
priv->plat->serdes_powerup)
@@ -1062,8 +1027,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
}
- priv->speed = speed;
-
if (priv->plat->fix_mac_speed)
priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
@@ -1074,41 +1037,124 @@ static void stmmac_mac_link_up(struct phylink_config *config,
/* Flow Control operation */
if (rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_AUTO;
+ flow_ctrl = FLOW_AUTO;
else if (rx_pause && !tx_pause)
- priv->flow_ctrl = FLOW_RX;
+ flow_ctrl = FLOW_RX;
else if (!rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_TX;
+ flow_ctrl = FLOW_TX;
else
- priv->flow_ctrl = FLOW_OFF;
+ flow_ctrl = FLOW_OFF;
- stmmac_mac_flow_ctrl(priv, duplex);
+ stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
if (ctrl != old_ctrl)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (priv->plat->set_clk_tx_rate) {
+ ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
+ priv->plat->clk_tx_i,
+ interface, speed);
+ if (ret < 0)
+ netdev_err(priv->dev,
+ "failed to configure %s transmit clock for %dMbps: %pe\n",
+ phy_modes(interface), speed, ERR_PTR(ret));
+ }
+
stmmac_mac_set(priv, priv->ioaddr, true);
- if (phy && priv->dma_cap.eee) {
- priv->eee_active =
- phy_init_eee(phy, !(priv->plat->flags &
- STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
- priv->eee_enabled = stmmac_eee_init(priv);
- priv->tx_lpi_enabled = priv->eee_enabled;
+ if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
- }
- if (priv->dma_cap.fpesel)
- stmmac_fpe_link_state_handle(priv, true);
+ if (stmmac_fpe_supported(priv))
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
stmmac_hwtstamp_correct_latency(priv, priv);
}
+static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ priv->eee_active = false;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = false;
+
+ netdev_dbg(priv->dev, "disable EEE\n");
+ priv->eee_sw_timer_en = false;
+ timer_delete_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
+ priv->tx_path_in_lpi_mode = false;
+
+ stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
+ mutex_unlock(&priv->lock);
+}
+
+static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ int ret;
+
+ priv->tx_lpi_timer = timer;
+ priv->eee_active = true;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = true;
+
+ /* Update the transmit clock stop according to PHY capability if
+ * the platform allows
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
+ priv->tx_lpi_clk_stop = tx_clk_stop;
+
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ STMMAC_DEFAULT_TWT_LS);
+
+ /* Try to cnfigure the hardware timer. */
+ ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
+ priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
+
+ if (ret) {
+ /* Hardware timer mode not supported, or value out of range.
+ * Fall back to using software LPI mode
+ */
+ priv->eee_sw_timer_en = true;
+ stmmac_restart_sw_lpi_timer(priv);
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+
+ return 0;
+}
+
+static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
+ const u8 *sopass)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ device_set_wakeup_enable(priv->device, !!wolopts);
+
+ mutex_lock(&priv->lock);
+ priv->wolopts = wolopts;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
+ .mac_finish = stmmac_mac_finish,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
+ .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
+ .mac_wol_set = stmmac_mac_wol_set,
};
/**
@@ -1120,18 +1166,26 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->mac_interface;
-
- if (priv->dma_cap.pcs) {
- if ((interface == PHY_INTERFACE_MODE_RGMII) ||
- (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
- priv->hw->pcs = STMMAC_PCS_RGMII;
- } else if (interface == PHY_INTERFACE_MODE_SGMII) {
- netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
- priv->hw->pcs = STMMAC_PCS_SGMII;
+ int interface = priv->plat->phy_interface;
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_SGMII;
+
+ switch (speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ priv->hw->reverse_sgmii_enable = true;
+ break;
+
+ default:
+ dev_warn(priv->device, "invalid port speed\n");
+ fallthrough;
+ case 0:
+ priv->hw->reverse_sgmii_enable = false;
+ break;
}
}
}
@@ -1147,13 +1201,19 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
+ struct ethtool_keee eee;
int ret;
if (!phylink_expects_phy(priv->phylink))
return 0;
+ if (priv->hw->xpcs &&
+ xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
+ return 0;
+
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1187,59 +1247,110 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
- if (!priv->plat->pmt) {
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ if (ret) {
+ netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
- device_set_wakeup_enable(priv->device, !!wol.wolopts);
+ /* Configure phylib's copy of the LPI timer. Normally,
+ * phylink_config.lpi_timer_default would do this, but there is a
+ * chance that userspace could change the eee_timer setting via sysfs
+ * before the first open. Thus, preserve existing behaviour.
+ */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
}
- return ret;
+ return 0;
}
-static int stmmac_phy_setup(struct stmmac_priv *priv)
+static int stmmac_phylink_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
- int mode = priv->plat->phy_interface;
+ struct phylink_config *config;
struct fwnode_handle *fwnode;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
- int max_speed;
- priv->phylink_config.dev = &priv->dev->dev;
- priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.mac_managed_pm = true;
+ config = &priv->phylink_config;
+
+ config->dev = &priv->dev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_managed_pm = true;
+
+ /* Stmmac always requires an RX clock for hardware initialization */
+ config->mac_requires_rxc = true;
+
+ /* Disable EEE RX clock stop to ensure VLAN register access works
+ * correctly.
+ */
+ if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
+ !(priv->dev->features & NETIF_F_VLAN_FEATURES))
+ config->eee_rx_clk_stop_enable = true;
+
+ /* Set the default transmit clock stop bit based on the platform glue */
+ priv->tx_lpi_clk_stop = priv->plat->flags &
+ STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
- priv->phylink_config.ovr_an_inband =
- mdio_bus_data->xpcs_an_inband;
+ config->default_an_inband = mdio_bus_data->default_an_inband;
- /* Set the platform/firmware specified interface mode. Note, phylink
- * deals with the PHY interface mode, not the MAC interface mode.
+ /* Get the PHY interface modes (at the PHY end of the link) that
+ * are supported by the platform.
*/
- __set_bit(mode, priv->phylink_config.supported_interfaces);
+ if (priv->plat->get_interfaces)
+ priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
+ config->supported_interfaces);
+
+ /* Set the platform/firmware specified interface mode if the
+ * supported interfaces have not already been provided using
+ * phy_interface as a last resort.
+ */
+ if (phy_interface_empty(config->supported_interfaces))
+ __set_bit(priv->plat->phy_interface,
+ config->supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
- xpcs_get_interfaces(priv->hw->xpcs,
- priv->phylink_config.supported_interfaces);
+ pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
+ else
+ pcs = priv->hw->phylink_pcs;
- /* Get the MAC specific capabilities */
- stmmac_mac_phylink_get_caps(priv);
+ if (pcs)
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ pcs->supported_interfaces);
- priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+ if (priv->dma_cap.eee) {
+ /* Assume all supported interfaces also support LPI */
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
- max_speed = priv->plat->max_speed;
- if (max_speed)
- phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+ /* All full duplex speeds above 100Mbps are supported */
+ config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
+ config->lpi_timer_default = eee_timer * 1000;
+ config->eee_enabled_default = true;
+ }
+
+ config->wol_phy_speed_ctrl = true;
+ if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
+ config->wol_phy_legacy = true;
+ } else {
+ if (priv->dma_cap.pmt_remote_wake_up)
+ config->wol_mac_support |= WAKE_UCAST;
+ if (priv->dma_cap.pmt_magic_frame)
+ config->wol_mac_support |= WAKE_MAGIC;
+ }
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
- phylink = phylink_create(&priv->phylink_config, fwnode,
- mode, &stmmac_phylink_mac_ops);
+ phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
+ &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
@@ -1315,9 +1426,17 @@ static void stmmac_display_rings(struct stmmac_priv *priv,
stmmac_display_tx_rings(priv, dma_conf);
}
-static int stmmac_set_bfsize(int mtu, int bufsize)
+static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
+{
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return NET_SKB_PAD;
+}
+
+static int stmmac_set_bfsize(int mtu)
{
- int ret = bufsize;
+ int ret;
if (mtu >= BUF_SIZE_8KiB)
ret = BUF_SIZE_16KiB;
@@ -1446,7 +1565,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
buf->page_offset = stmmac_rx_offset(priv);
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -2011,22 +2130,31 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
- unsigned int num_pages;
+ unsigned int dma_buf_sz_pad, num_pages;
unsigned int napi_id;
int ret;
+ dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
+
rx_q->queue_index = queue;
rx_q->priv_data = priv;
+ rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = dma_conf->dma_rx_size;
- num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
+ pp_params.order = order_base_2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
- pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+ pp_params.max_len = dma_conf->dma_buf_sz;
+
+ if (priv->sph_active) {
+ pp_params.offset = 0;
+ pp_params.max_len += stmmac_rx_offset(priv);
+ }
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
@@ -2363,9 +2491,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
- /* Adjust for real per queue fifo size */
- rxfifosz /= rx_channels_count;
- txfifosz /= tx_channels_count;
+ /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
+ if (dwmac_is_xmac(priv->plat->core_type)) {
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+ }
if (priv->plat->force_thresh_dma_mode) {
txmode = tc;
@@ -2451,9 +2581,20 @@ static u64 stmmac_xsk_fill_timestamp(void *_priv)
return 0;
}
+static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct timespec64 ts = ns_to_timespec64(launch_time);
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ if (meta_req->tbs & STMMAC_TBS_EN)
+ stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
+ ts.tv_nsec);
+}
+
static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
.tmo_request_timestamp = stmmac_xsk_request_timestamp,
.tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+ .tmo_request_launch_time = stmmac_xsk_request_launch_time,
};
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -2461,6 +2602,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2473,7 +2615,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
budget = min(budget, stmmac_tx_avail(priv, queue));
- while (budget-- > 0) {
+ for (; budget > 0; budget--) {
struct stmmac_metadata_request meta_req;
struct xsk_tx_metadata *meta = NULL;
dma_addr_t dma_addr;
@@ -2491,9 +2633,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- xdp_desc.len > priv->plat->est->max_sdu[queue]) {
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ xdp_desc.len > priv->est->max_sdu[queue]) {
priv->xstats.max_sdu_txq_drop[queue]++;
continue;
}
@@ -2537,6 +2679,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
meta_req.priv = priv;
meta_req.tx_desc = tx_desc;
meta_req.set_ic = &set_ic;
+ meta_req.tbs = tx_q->tbs;
+ meta_req.edesc = &tx_q->dma_entx[entry];
xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
&meta_req);
if (set_ic) {
@@ -2546,10 +2690,10 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdp_desc.len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
xsk_tx_metadata_to_compl(meta,
&tx_q->tx_skbuff_dma[entry].xsk_meta);
@@ -2763,11 +2907,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
xmits = budget;
}
- if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
- priv->eee_sw_timer_en) {
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
+ stmmac_restart_sw_lpi_timer(priv);
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
@@ -2908,7 +3049,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
u32 channels_to_check = tx_channel_count > rx_channel_count ?
tx_channel_count : rx_channel_count;
u32 chan;
- int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+ int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */
if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
@@ -2983,6 +3124,56 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
}
}
+int stmmac_get_phy_intf_sel(phy_interface_t interface)
+{
+ int phy_intf_sel = -EINVAL;
+
+ if (interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_GMII)
+ phy_intf_sel = PHY_INTF_SEL_GMII_MII;
+ else if (phy_interface_mode_is_rgmii(interface))
+ phy_intf_sel = PHY_INTF_SEL_RGMII;
+ else if (interface == PHY_INTERFACE_MODE_SGMII)
+ phy_intf_sel = PHY_INTF_SEL_SGMII;
+ else if (interface == PHY_INTERFACE_MODE_RMII)
+ phy_intf_sel = PHY_INTF_SEL_RMII;
+ else if (interface == PHY_INTERFACE_MODE_REVMII)
+ phy_intf_sel = PHY_INTF_SEL_REVMII;
+
+ return phy_intf_sel;
+}
+EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
+
+static int stmmac_prereset_configure(struct stmmac_priv *priv)
+{
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
+ phy_interface_t interface;
+ int phy_intf_sel, ret;
+
+ if (!plat_dat->set_phy_intf_sel)
+ return 0;
+
+ interface = plat_dat->phy_interface;
+ phy_intf_sel = stmmac_get_phy_intf_sel(interface);
+ if (phy_intf_sel < 0) {
+ netdev_err(priv->dev,
+ "failed to get phy_intf_sel for %s: %pe\n",
+ phy_modes(interface), ERR_PTR(phy_intf_sel));
+ return phy_intf_sel;
+ }
+
+ ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
+ if (ret == -EINVAL)
+ netdev_err(priv->dev, "platform does not support %s\n",
+ phy_modes(interface));
+ else if (ret < 0)
+ netdev_err(priv->dev,
+ "platform failed to set interface %s: %pe\n",
+ phy_modes(interface), ERR_PTR(ret));
+
+ return ret;
+}
+
/**
* stmmac_init_dma_engine - DMA init.
* @priv: driver private structure
@@ -2999,25 +3190,28 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
u32 chan = 0;
- int atds = 0;
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
- dev_err(priv->device, "Invalid DMA configuration\n");
+ netdev_err(priv->dev, "Invalid DMA configuration\n");
return -EINVAL;
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
- atds = 1;
+ priv->plat->dma_cfg->atds = 1;
+
+ ret = stmmac_prereset_configure(priv);
+ if (ret)
+ return ret;
- ret = stmmac_reset(priv, priv->ioaddr);
+ ret = stmmac_reset(priv);
if (ret) {
- dev_err(priv->device, "Failed to reset the dma\n");
+ netdev_err(priv->dev, "Failed to reset the dma\n");
return ret;
}
/* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
@@ -3130,8 +3324,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -3353,31 +3546,9 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
}
}
-static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
-{
- char *name;
-
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
- clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- name = priv->wq_name;
- sprintf(name, "%s-fpe", priv->dev->name);
-
- priv->fpe_wq = create_singlethread_workqueue(name);
- if (!priv->fpe_wq) {
- netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
-
- return -ENOMEM;
- }
- netdev_info(priv->dev, "FPE workqueue start");
-
- return 0;
-}
-
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3387,7 +3558,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
+static int stmmac_hw_setup(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3396,9 +3567,22 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
u32 chan;
int ret;
+ /* Make sure RX clock is enabled */
+ if (priv->hw->phylink_pcs)
+ phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3406,19 +3590,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
-
- /* PS and related bits will be programmed according to the speed */
- if (priv->hw->pcs) {
- int speed = priv->plat->mac_port_sel_speed;
-
- if ((speed == SPEED_10) || (speed == SPEED_100) ||
- (speed == SPEED_1000)) {
- priv->hw->ps = speed;
- } else {
- dev_warn(priv->device, "invalid port speed\n");
- priv->hw->ps = 0;
- }
- }
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* Initialize the MAC Core */
stmmac_core_init(priv, priv->hw, dev);
@@ -3444,28 +3616,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
- if (ptp_register) {
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0)
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- }
-
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_info(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- else if (ptp_register)
- stmmac_ptp_register(priv);
-
- priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
-
- /* Convert the timer from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
if (priv->use_riwt) {
u32 queue;
@@ -3478,9 +3628,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
}
}
- if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
-
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
@@ -3498,7 +3645,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
}
/* Enable Split Header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
for (chan = 0; chan < rx_cnt; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
@@ -3522,25 +3669,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
-
- if (priv->dma_cap.fpesel) {
- stmmac_fpe_start_wq(priv);
-
- if (priv->plat->fpe_cfg->enable)
- stmmac_fpe_handshake(priv, true);
- }
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
-static void stmmac_hw_teardown(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
-}
-
static void stmmac_free_irq(struct net_device *dev,
enum request_irq_err irq_err, int irq_idx)
{
@@ -3601,7 +3736,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
enum request_irq_err irq_err;
- cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
int ret;
@@ -3623,7 +3757,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
- priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
int_name = priv->int_name_wol;
sprintf(int_name, "%s:%s", dev->name, "wol");
@@ -3730,9 +3863,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
/* Request Tx MSI irq */
@@ -3755,9 +3887,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
return 0;
@@ -3869,12 +4000,13 @@ stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
return ERR_PTR(-ENOMEM);
}
+ /* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
bfsize = stmmac_set_16kib_bfsize(priv, mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
- bfsize = stmmac_set_bfsize(mtu, 0);
+ bfsize = stmmac_set_bfsize(mtu);
dma_conf->dma_buf_sz = bfsize;
/* Chose the tx/rx size from the already defined one in the
@@ -3934,31 +4066,9 @@ static int __stmmac_open(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int mode = priv->plat->phy_interface;
u32 chan;
int ret;
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI &&
- (!priv->hw->xpcs ||
- xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
- !priv->hw->lynx_pcs) {
- ret = stmmac_init_phy(dev);
- if (ret) {
- netdev_err(priv->dev,
- "%s: Cannot attach to PHY (error: %d)\n",
- __func__, ret);
- goto init_phy_error;
- }
- }
-
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
- buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
@@ -3976,17 +4086,17 @@ static int __stmmac_open(struct net_device *dev,
}
}
- ret = stmmac_hw_setup(dev, true);
+ ret = stmmac_hw_setup(dev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
goto init_error;
}
+ stmmac_setup_ptp(priv);
+
stmmac_init_coalesce(priv);
phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
ret = stmmac_request_irq(dev);
if (ret)
@@ -4004,11 +4114,8 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
+ stmmac_release_ptp(priv);
init_error:
- phylink_disconnect_phy(priv->phylink);
-init_phy_error:
- pm_runtime_put(priv->device);
return ret;
}
@@ -4018,46 +4125,50 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_dma_conf *dma_conf;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
if (IS_ERR(dma_conf))
return PTR_ERR(dma_conf);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ goto err_dma_resources;
+
+ ret = stmmac_init_phy(dev);
+ if (ret)
+ goto err_runtime_pm;
+
ret = __stmmac_open(dev, dma_conf);
if (ret)
- free_dma_desc_resources(priv, dma_conf);
+ goto err_disconnect_phy;
kfree(dma_conf);
- return ret;
-}
-static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
-{
- set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
- if (priv->fpe_wq) {
- destroy_workqueue(priv->fpe_wq);
- priv->fpe_wq = NULL;
- }
+ return ret;
- netdev_info(priv->dev, "FPE workqueue stop");
+err_disconnect_phy:
+ phylink_disconnect_phy(priv->phylink);
+err_runtime_pm:
+ pm_runtime_put(priv->device);
+err_dma_resources:
+ free_dma_desc_resources(priv, dma_conf);
+ kfree(dma_conf);
+ return ret;
}
-/**
- * stmmac_release - close entry point of the driver
- * @dev : device pointer.
- * Description:
- * This is the stop entry point of the driver.
- */
-static int stmmac_release(struct net_device *dev)
+static void __stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
phylink_stop(priv->phylink);
- phylink_disconnect_phy(priv->phylink);
stmmac_disable_all_queues(priv);
@@ -4069,32 +4180,43 @@ static int stmmac_release(struct net_device *dev)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
- if (priv->eee_enabled) {
- priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
- }
-
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
-
/* Powerdown Serdes if there is */
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
- netif_carrier_off(dev);
-
stmmac_release_ptp(priv);
- pm_runtime_put(priv->device);
+ if (stmmac_fpe_supported(priv))
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+}
- if (priv->dma_cap.fpesel)
- stmmac_fpe_stop_wq(priv);
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* If the PHY or MAC has WoL enabled, then the PHY will not be
+ * suspended when phylink_stop() is called below. Set the PHY
+ * to its slowest speed to save power.
+ */
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+
+ __stmmac_release(dev);
+
+ phylink_disconnect_phy(priv->phylink);
+ pm_runtime_put(priv->device);
return 0;
}
@@ -4102,18 +4224,11 @@ static int stmmac_release(struct net_device *dev)
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
struct stmmac_tx_queue *tx_q)
{
- u16 tag = 0x0, inner_tag = 0x0;
- u32 inner_type = 0x0;
struct dma_desc *p;
+ u16 tag = 0x0;
- if (!priv->dma_cap.vlins)
+ if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
return false;
- if (!skb_vlan_tag_present(skb))
- return false;
- if (skb->vlan_proto == htons(ETH_P_8021AD)) {
- inner_tag = skb_vlan_tag_get(skb);
- inner_type = STMMAC_VLAN_INSERT;
- }
tag = skb_vlan_tag_get(skb);
@@ -4122,7 +4237,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
else
p = &tx_q->dma_tx[tx_q->cur_tx];
- if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
return false;
stmmac_set_tx_owner(priv, p);
@@ -4164,11 +4279,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
- if (priv->dma_cap.addr64 <= 32)
- desc->des0 = cpu_to_le32(curr_addr);
- else
- stmmac_set_desc_addr(priv, desc, curr_addr);
-
+ stmmac_set_desc_addr(priv, desc, curr_addr);
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -4214,17 +4325,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
* First Descriptor
* --------
* | DES0 |---> buffer1 = L2/L3/L4 header
- * | DES1 |---> TCP Payload (can continue on next descr...)
- * | DES2 |---> buffer 1 and 2 len
+ * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
+ * | | width is 32-bit, but we never use it.
+ * | | Also can be used as the most-significant 8-bits or 16-bits of
+ * | | buffer1 address pointer if the DMA AXI address width is 40-bit
+ * | | or 48-bit, and we always use it.
+ * | DES2 |---> buffer1 len
* | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
* --------
+ * --------
+ * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
+ * | DES3 |
+ * --------
* |
* ...
* |
* --------
- * | DES0 | --| Split TCP Payload on Buffers 1 and 2
- * | DES1 | --|
- * | DES2 | --> buffer 1 and 2 len
+ * | DES0 |---> buffer1 = Split TCP Payload
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
* | DES3 |
* --------
*
@@ -4234,17 +4355,30 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int nfrags = skb_shinfo(skb)->nr_frags;
- u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
- int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ u32 pay_len, mss, queue;
+ int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
- u32 pay_len, mss;
dma_addr_t des;
- int i;
+ bool set_ic;
+
+ /* Always insert VLAN tag to SKB payload for TSO frames.
+ *
+ * Never insert VLAN tag by HW, since segments splited by
+ * TSO engine will be un-tagged by mistake.
+ */
+ if (skb_vlan_tag_present(skb)) {
+ skb = __vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb)) {
+ priv->xstats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ queue = skb_get_queue_mapping(skb);
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
@@ -4298,9 +4432,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
- /* Check if VLAN can be inserted by HW */
- has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
-
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -4310,37 +4441,32 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
desc = &tx_q->dma_tx[first_entry];
first = desc;
- if (has_vlan)
- stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
-
/* first descriptor: fill Headers on Buf1 */
des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- tx_q->tx_skbuff_dma[first_entry].buf = des;
- tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
- tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
-
- if (priv->dma_cap.addr64 <= 32) {
- first->des0 = cpu_to_le32(des);
-
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
-
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- } else {
- stmmac_set_desc_addr(priv, first, des);
- tmp_pay_len = pay_len;
- des += proto_hdr_len;
- pay_len = 0;
- }
-
- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+ stmmac_set_desc_addr(priv, first, des);
+ stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
+ (nfrags == 0), queue);
+
+ /* In case two or more DMA transmit descriptors are allocated for this
+ * non-paged SKB data, the DMA buffer address should be saved to
+ * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
+ * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
+ * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
+ * since the tail areas of the DMA buffer can be accessed by DMA engine
+ * sooner or later.
+ * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
+ * corresponding to the last descriptor, stmmac_tx_clean() will unmap
+ * this DMA buffer right after the DMA engine completely finishes the
+ * full buffer transmission.
+ */
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
@@ -4417,8 +4543,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
/* declare that device is doing timestamping */
@@ -4427,11 +4551,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Complete the first descriptor before granting the DMA */
- stmmac_prepare_tso_tx_desc(priv, first, 1,
- proto_hdr_len,
- pay_len,
- 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
+ tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -4453,6 +4575,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4498,41 +4621,48 @@ static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned int first_entry, tx_packets, enh_desc;
+ bool enh_desc, has_vlan, set_ic, is_jumbo = false;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
- int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, tx_packets;
int gso = skb_shinfo(skb)->gso_type;
struct stmmac_txq_stats *txq_stats;
struct dma_edesc *tbs_desc = NULL;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ int i, csum_insertion = 0;
int entry, first_tx;
dma_addr_t des;
+ u32 sdu_len;
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
- stmmac_disable_eee_mode(priv);
+ stmmac_stop_sw_lpi(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
- if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
+ (gso & SKB_GSO_UDP_L4))
return stmmac_tso_xmit(skb, dev);
}
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- skb->len > priv->plat->est->max_sdu[queue]){
- priv->xstats.max_sdu_txq_drop[queue]++;
- goto max_sdu_err;
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue]) {
+ sdu_len = skb->len;
+ /* Add VLAN tag length if VLAN tag insertion offload is requested */
+ if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
+ sdu_len += VLAN_HLEN;
+ if (sdu_len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -4696,8 +4826,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
* passed to the DMA engine.
@@ -4743,8 +4871,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
-
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4809,7 +4937,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
@@ -4820,7 +4948,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->sph)
+ if (priv->sph_active)
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
else
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
@@ -4845,6 +4973,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(rx_q->dirty_rx * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ /* Wake up Rx DMA from the suspend state if required */
+ stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
}
static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
@@ -4855,12 +4985,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
int coe = priv->hw->rx_csum;
/* Not first descriptor, buffer is always zero */
- if (priv->sph && len)
+ if (priv->sph_active && len)
return 0;
/* First descriptor, get split header length */
stmmac_get_rx_header_len(priv, p, &hlen);
- if (priv->sph && hlen) {
+ if (priv->sph_active && hlen) {
priv->xstats.rx_split_hdr_pkt_n++;
return hlen;
}
@@ -4883,7 +5013,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
unsigned int plen = 0;
/* Not split header, buffer is not available */
- if (!priv->sph)
+ if (!priv->sph_active)
return 0;
/* Not last descriptor */
@@ -4901,6 +5031,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
{
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -4909,9 +5040,9 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- xdpf->len > priv->plat->est->max_sdu[queue]) {
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ xdpf->len > priv->est->max_sdu[queue]) {
priv->xstats.max_sdu_txq_drop[queue]++;
return STMMAC_XDP_CONSUMED;
}
@@ -4952,7 +5083,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_set_desc_addr(priv, tx_desc, dma_addr);
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdpf->len);
tx_q->tx_count_frames++;
@@ -4970,7 +5101,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
u64_stats_update_end(&txq_stats->q_syncp);
}
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
@@ -5094,9 +5225,8 @@ static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ch->rxtx_napi,
- xdp->data_end - xdp->data_hard_start,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&ch->rxtx_napi,
+ xdp->data_end - xdp->data_hard_start);
if (unlikely(!skb))
return NULL;
@@ -5266,10 +5396,10 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
+read_again:
if (count >= limit)
break;
-read_again:
buf1_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -5352,7 +5482,7 @@ read_again:
/* RX buffer is good and fit into a XSK pool buffer */
buf->xdp->data_end = buf->xdp->data + buf1_len;
- xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(buf->xdp);
prog = READ_ONCE(priv->xdp_prog);
res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
@@ -5426,10 +5556,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
int xdp_status = 0;
- int buf_sz;
+ int bufsz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
@@ -5500,7 +5630,7 @@ read_again:
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
@@ -5518,10 +5648,6 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page) + buf->page_offset);
- if (buf->sec_page)
- prefetch(page_address(buf->sec_page));
-
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
@@ -5543,8 +5669,10 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
+ net_prefetch(page_address(buf->page) +
+ buf->page_offset);
- xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
buf->page_offset, buf1_len, true);
@@ -5596,22 +5724,26 @@ read_again:
}
if (!skb) {
+ unsigned int head_pad_len;
+
/* XDP program may expand or reduce tail */
buf1_len = ctx.xdp.data_end - ctx.xdp.data;
- skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ skb = napi_build_skb(page_address(buf->page),
+ rx_q->napi_skb_frag_size);
if (!skb) {
+ page_pool_recycle_direct(rx_q->page_pool,
+ buf->page);
rx_dropped++;
count++;
goto drain_data;
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
+ head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
+ skb_reserve(skb, head_pad_len);
skb_put(skb, buf1_len);
-
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
} else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
@@ -5619,9 +5751,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5631,9 +5760,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -5656,7 +5782,8 @@ drain_data:
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
+ (status & csum_none))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5832,6 +5959,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5885,7 +6015,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return PTR_ERR(dma_conf);
}
- stmmac_release(dev);
+ __stmmac_release(dev);
ret = __stmmac_open(dev, dma_conf);
if (ret) {
@@ -5900,7 +6030,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
stmmac_set_rx_mode(dev);
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
netdev_update_features(dev);
return 0;
@@ -5951,8 +6081,8 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
- if (priv->sph_cap) {
- bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ if (priv->sph_capable) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
u32 chan;
for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
@@ -5964,54 +6094,13 @@ static int stmmac_set_features(struct net_device *netdev,
else
priv->hw->hw_vlan_en = false;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
-static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
-{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
-
- if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
- return;
-
- /* If LP has sent verify mPacket, LP is FPE capable */
- if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
- if (*lp_state < FPE_STATE_CAPABLE)
- *lp_state = FPE_STATE_CAPABLE;
-
- /* If user has requested FPE enable, quickly response */
- if (*hs_enable)
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_RESPONSE);
- }
-
- /* If Local has sent verify mPacket, Local is FPE capable */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
- if (*lo_state < FPE_STATE_CAPABLE)
- *lo_state = FPE_STATE_CAPABLE;
- }
-
- /* If LP has sent response mPacket, LP is entering FPE ON */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
- *lp_state = FPE_STATE_ENTERING_ON;
-
- /* If Local has sent response mPacket, Local is entering FPE ON */
- if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
- *lo_state = FPE_STATE_ENTERING_ON;
-
- if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
- !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
- priv->fpe_wq) {
- queue_work(priv->fpe_wq, &priv->fpe_task);
- }
-}
-
static void stmmac_common_interrupt(struct stmmac_priv *priv)
{
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -6020,7 +6109,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
u32 queue;
bool xmac;
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ xmac = dwmac_is_xmac(priv->plat->core_type);
queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
@@ -6030,15 +6119,11 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
stmmac_est_irq_status(priv, priv, priv->dev,
&priv->xstats, tx_cnt);
- if (priv->dma_cap.fpesel) {
- int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
- priv->dev);
-
- stmmac_fpe_event_status(priv, status);
- }
+ if (stmmac_fpe_supported(priv))
+ stmmac_fpe_irq_status(priv);
/* To handle GMAC own interrupts */
- if ((priv->plat->has_gmac) || xmac) {
+ if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
if (unlikely(status)) {
@@ -6052,15 +6137,6 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
for (queue = 0; queue < queues_count; queue++)
stmmac_host_mtl_irq_status(priv, priv->hw, queue);
- /* PCS link status */
- if (priv->hw->pcs &&
- !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
- if (priv->xstats.pcs_link)
- netif_carrier_on(priv->dev);
- else
- netif_carrier_off(priv->dev);
- }
-
stmmac_timestamp_interrupt(priv, priv);
}
}
@@ -6197,12 +6273,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCSMIIREG:
ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
break;
- case SIOCSHWTSTAMP:
- ret = stmmac_hwtstamp_set(dev, rq);
- break;
- case SIOCGHWTSTAMP:
- ret = stmmac_hwtstamp_get(dev, rq);
- break;
default:
break;
}
@@ -6246,6 +6316,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_QUERY_CAPS:
return stmmac_tc_query_caps(priv, priv, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return stmmac_tc_setup_mqprio(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
@@ -6293,7 +6365,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6410,7 +6484,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.mbps_1000) ? "Y" : "N");
seq_printf(seq, "\tHalf duplex: %s\n",
(priv->dma_cap.half_duplex) ? "Y" : "N");
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
seq_printf(seq,
"\tNumber of Additional MAC address registers: %d\n",
priv->dma_cap.multi_addr);
@@ -6434,7 +6508,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.time_stamp) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
(priv->dma_cap.atime_stamp) ? "Y" : "N");
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
seq_printf(seq, "\tTimestamp System Time Source: %s\n",
dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
@@ -6443,7 +6517,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
- priv->plat->has_xgmac) {
+ priv->plat->core_type == DWMAC_CORE_XGMAC) {
seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
(priv->dma_cap.rx_coe) ? "Y" : "N");
} else {
@@ -6561,11 +6635,7 @@ static int stmmac_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (priv->dbgfs_dir)
- priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
- priv->dbgfs_dir,
- stmmac_fs_dir,
- dev->name);
+ debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
break;
}
done:
@@ -6631,7 +6701,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
- __le16 pmatch = 0;
+ u16 pmatch = 0;
int count = 0;
u16 vid = 0;
@@ -6646,13 +6716,16 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
if (count > 2) /* VID = 0 always passes filter */
return -EOPNOTSUPP;
- pmatch = cpu_to_le16(vid);
+ pmatch = vid;
hash = 0;
}
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6684,6 +6757,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6955,7 +7031,7 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* Adjust Split header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
@@ -6995,8 +7071,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
@@ -7021,7 +7096,6 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
@@ -7136,6 +7210,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_bpf = stmmac_bpf,
.ndo_xdp_xmit = stmmac_xdp_xmit,
.ndo_xsk_wakeup = stmmac_xsk_wakeup,
+ .ndo_hwtstamp_get = stmmac_hwtstamp_get,
+ .ndo_hwtstamp_set = stmmac_hwtstamp_set,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -7204,7 +7280,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
- priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
(BIT(priv->dma_cap.hash_tb_sz) << 5);
@@ -7242,11 +7317,42 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->plat->pmt) {
dev_info(priv->device, "Wake-Up On Lan supported\n");
device_set_wakeup_capable(priv->device, 1);
+ devm_pm_set_wake_irq(priv->device, priv->wol_irq);
}
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
+ if (priv->dma_cap.number_rx_queues &&
+ priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
+ dev_warn(priv->device,
+ "Number of Rx queues (%u) exceeds dma capability\n",
+ priv->plat->rx_queues_to_use);
+ priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
+ }
+ if (priv->dma_cap.number_tx_queues &&
+ priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
+ dev_warn(priv->device,
+ "Number of Tx queues (%u) exceeds dma capability\n",
+ priv->plat->tx_queues_to_use);
+ priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
+ }
+
+ if (priv->dma_cap.rx_fifo_size &&
+ priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
+ dev_warn(priv->device,
+ "Rx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->rx_fifo_size);
+ priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
+ }
+ if (priv->dma_cap.tx_fifo_size &&
+ priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
+ dev_warn(priv->device,
+ "Tx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->tx_fifo_size);
+ priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
+ }
+
priv->hw->vlan_fail_q_en =
(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
@@ -7263,13 +7369,21 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
+ priv->plat->core_type == DWMAC_CORE_XGMAC) &&
+ !priv->plat->riwt_off) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
+ /* Unimplemented PCS init (as indicated by stmmac_do_callback()
+ * perversely returning -EINVAL) is non-fatal.
+ */
+ ret = stmmac_mac_pcs_init(priv);
+ if (ret != -EINVAL)
+ return ret;
+
return 0;
}
@@ -7327,7 +7441,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret = 0, i;
- int max_speed;
if (netif_running(dev))
stmmac_release(dev);
@@ -7341,14 +7454,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
- stmmac_mac_phylink_get_caps(priv);
-
- priv->phylink_config.mac_capabilities = priv->hw->link.caps;
-
- max_speed = priv->plat->max_speed;
- if (max_speed)
- phylink_limit_mac_speed(&priv->phylink_config, max_speed);
-
stmmac_napi_add(dev);
if (netif_running(dev))
@@ -7374,71 +7479,6 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
return ret;
}
-#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
-static void stmmac_fpe_lp_task(struct work_struct *work)
-{
- struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
- fpe_task);
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
- bool *enable = &fpe_cfg->enable;
- int retries = 20;
-
- while (retries-- > 0) {
- /* Bail out immediately if FPE handshake is OFF */
- if (*lo_state == FPE_STATE_OFF || !*hs_enable)
- break;
-
- if (*lo_state == FPE_STATE_ENTERING_ON &&
- *lp_state == FPE_STATE_ENTERING_ON) {
- stmmac_fpe_configure(priv, priv->ioaddr,
- fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- *enable);
-
- netdev_info(priv->dev, "configured FPE\n");
-
- *lo_state = FPE_STATE_ON;
- *lp_state = FPE_STATE_ON;
- netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
- break;
- }
-
- if ((*lo_state == FPE_STATE_CAPABLE ||
- *lo_state == FPE_STATE_ENTERING_ON) &&
- *lp_state != FPE_STATE_ON) {
- netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
- *lo_state, *lp_state);
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_VERIFY);
- }
- /* Sleep then retry */
- msleep(500);
- }
-
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
-}
-
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
-{
- if (priv->plat->fpe_cfg->hs_enable != enable) {
- if (enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- MPACKET_VERIFY);
- } else {
- priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
- priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
- }
-
- priv->plat->fpe_cfg->hs_enable = enable;
- }
-}
-
static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
{
const struct stmmac_xdp_buff *ctx = (void *)_ctx;
@@ -7452,7 +7492,7 @@ static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
return -ENODATA;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ if (dwmac_is_xmac(priv->plat->core_type))
desc_contains_ts = ndesc;
/* Check if timestamp is available */
@@ -7470,19 +7510,133 @@ static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
.xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
};
-/**
- * stmmac_dvr_probe
- * @device: device pointer
- * @plat_dat: platform data pointer
- * @res: stmmac resource pointer
- * Description: this is the main probe function used to
- * call the alloc_etherdev, allocate the priv structure.
- * Return:
- * returns 0 on success, otherwise errno.
- */
-int stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- struct stmmac_resources *res)
+static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+ struct stmmac_priv *priv = dl_priv->stmmac_priv;
+
+ priv->tsfupdt_coarse = ctx->val.vbool;
+
+ if (priv->tsfupdt_coarse)
+ priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
+ else
+ priv->systime_flags |= PTP_TCR_TSCFUPDT;
+
+ /* In Coarse mode, we can use a smaller subsecond increment, let's
+ * reconfigure the systime, subsecond increment and addend.
+ */
+ stmmac_update_subsecond_increment(priv);
+
+ return 0;
+}
+
+static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+ struct stmmac_priv *priv = dl_priv->stmmac_priv;
+
+ ctx->val.vbool = priv->tsfupdt_coarse;
+
+ return 0;
+}
+
+static const struct devlink_param stmmac_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ stmmac_dl_ts_coarse_get,
+ stmmac_dl_ts_coarse_set, NULL),
+};
+
+/* None of the generic devlink parameters are implemented */
+static const struct devlink_ops stmmac_devlink_ops = {};
+
+static int stmmac_register_devlink(struct stmmac_priv *priv)
+{
+ struct stmmac_devlink_priv *dl_priv;
+ int ret;
+
+ /* For now, what is exposed over devlink is only relevant when
+ * timestamping is available and we have a valid ptp clock rate
+ */
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
+ !priv->plat->clk_ptp_rate)
+ return 0;
+
+ priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
+ priv->device);
+ if (!priv->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(priv->devlink);
+ dl_priv->stmmac_priv = priv;
+
+ ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
+ ARRAY_SIZE(stmmac_devlink_params));
+ if (ret)
+ goto dl_free;
+
+ devlink_register(priv->devlink);
+ return 0;
+
+dl_free:
+ devlink_free(priv->devlink);
+
+ return ret;
+}
+
+static void stmmac_unregister_devlink(struct stmmac_priv *priv)
+{
+ if (!priv->devlink)
+ return;
+
+ devlink_unregister(priv->devlink);
+ devlink_params_unregister(priv->devlink, stmmac_devlink_params,
+ ARRAY_SIZE(stmmac_devlink_params));
+ devlink_free(priv->devlink);
+}
+
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ int i;
+
+ plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
+ if (!plat_dat)
+ return NULL;
+
+ /* Set the defaults:
+ * - phy autodetection
+ * - determine GMII_Address CR field from CSR clock
+ * - allow MTU up to JUMBO_LEN
+ * - hash table size
+ * - one unicast filter entry
+ */
+ plat_dat->phy_addr = -1;
+ plat_dat->clk_csr = -1;
+ plat_dat->maxmtu = JUMBO_LEN;
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+ plat_dat->unicast_filter_entries = 1;
+
+ /* Set the mtl defaults */
+ plat_dat->tx_queues_to_use = 1;
+ plat_dat->rx_queues_to_use = 1;
+
+ /* Setup the default RX queue channel map */
+ for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
+ plat_dat->rx_queues_cfg[i].chan = i;
+
+ return plat_dat;
+}
+EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
+
+static int __stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
@@ -7513,7 +7667,7 @@ int stmmac_dvr_probe(struct device *device,
return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
- priv->pause = pause;
+ priv->pause_time = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
@@ -7553,8 +7707,7 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
- /* Initialize Link Partner FPE workqueue */
- INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
@@ -7609,7 +7762,7 @@ int stmmac_dvr_probe(struct device *device,
if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (priv->plat->has_gmac4)
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
@@ -7618,8 +7771,8 @@ int stmmac_dvr_probe(struct device *device,
if (priv->dma_cap.sphen &&
!(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
ndev->hw_features |= NETIF_F_GRO;
- priv->sph_cap = true;
- priv->sph = priv->sph_cap;
+ priv->sph_capable = true;
+ priv->sph_active = priv->sph_capable;
dev_info(priv->device, "SPH feature enabled\n");
}
@@ -7662,18 +7815,16 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
- ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- priv->hw->hw_vlan_en = true;
-
+ if (dwmac_is_xmac(priv->plat->core_type)) {
+ ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ priv->hw->hw_vlan_en = true;
+ }
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
}
- if (priv->dma_cap.vlins) {
+ if (priv->dma_cap.vlins)
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
- if (priv->dma_cap.dvlan)
- ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
- }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
@@ -7689,30 +7840,26 @@ int stmmac_dvr_probe(struct device *device,
ndev->features |= NETIF_F_RXHASH;
ndev->vlan_features |= ndev->features;
- /* TSO doesn't work on VLANs yet */
- ndev->vlan_features &= ~NETIF_F_TSO;
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
- if (priv->plat->has_xgmac)
+
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
ndev->max_mtu = XGMAC_JUMBO_LEN;
- else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
+ else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
ndev->max_mtu = JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
- /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
- * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+
+ /* Warn if the platform's maxmtu is smaller than the minimum MTU,
+ * otherwise clamp the maximum MTU above to the platform's maxmtu.
*/
- if ((priv->plat->maxmtu < ndev->max_mtu) &&
- (priv->plat->maxmtu >= ndev->min_mtu))
- ndev->max_mtu = priv->plat->maxmtu;
- else if (priv->plat->maxmtu < ndev->min_mtu)
+ if (priv->plat->maxmtu < ndev->min_mtu)
dev_warn(priv->device,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
-
- if (flow_ctrl)
- priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+ else if (priv->plat->maxmtu < ndev->max_mtu)
+ ndev->max_mtu = priv->plat->maxmtu;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -7721,16 +7868,7 @@ int stmmac_dvr_probe(struct device *device,
mutex_init(&priv->lock);
- /* If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed. Viceversa the driver'll try to
- * set the MDC clock dynamically according to the csr actual
- * clock input.
- */
- if (priv->plat->clk_csr >= 0)
- priv->clk_csr = priv->plat->clk_csr;
- else
- stmmac_clk_csr_set(priv);
+ stmmac_fpe_init(priv);
stmmac_check_pcs_mode(priv);
@@ -7739,33 +7877,28 @@ int stmmac_dvr_probe(struct device *device,
if (!pm_runtime_enabled(device))
pm_runtime_enable(device);
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI) {
- /* MDIO bus Registration */
- ret = stmmac_mdio_register(ndev);
- if (ret < 0) {
- dev_err_probe(priv->device, ret,
- "%s: MDIO bus (id: %d) registration failed\n",
- __func__, priv->plat->bus_id);
- goto error_mdio_register;
- }
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ dev_err_probe(priv->device, ret,
+ "MDIO bus (id: %d) registration failed\n",
+ priv->plat->bus_id);
+ goto error_mdio_register;
}
- if (priv->plat->speed_mode_2500)
- priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
-
- if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
- ret = stmmac_xpcs_setup(priv->mii);
- if (ret)
- goto error_xpcs_setup;
- }
+ ret = stmmac_pcs_setup(ndev);
+ if (ret)
+ goto error_pcs_setup;
- ret = stmmac_phy_setup(priv);
+ ret = stmmac_phylink_setup(priv);
if (ret) {
netdev_err(ndev, "failed to setup phy (%d)\n", ret);
goto error_phy_setup;
}
+ ret = stmmac_register_devlink(priv);
+ if (ret)
+ goto error_devlink_setup;
+
ret = register_netdev(ndev);
if (ret) {
dev_err(priv->device, "%s: ERROR %i registering the device\n",
@@ -7788,12 +7921,13 @@ int stmmac_dvr_probe(struct device *device,
return ret;
error_netdev_register:
+ stmmac_unregister_devlink(priv);
+error_devlink_setup:
phylink_destroy(priv->phylink);
-error_xpcs_setup:
error_phy_setup:
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
+ stmmac_pcs_clean(ndev);
+error_pcs_setup:
+ stmmac_mdio_unregister(ndev);
error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
@@ -7803,6 +7937,34 @@ error_wq_init:
return ret;
}
+
+/**
+ * stmmac_dvr_probe
+ * @dev: device pointer
+ * @plat_dat: platform data pointer
+ * @res: stmmac resource pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * returns 0 on success, otherwise errno.
+ */
+int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
+{
+ int ret;
+
+ if (plat_dat->init) {
+ ret = plat_dat->init(dev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = __stmmac_dvr_probe(dev, plat_dat, res);
+ if (ret && plat_dat->exit)
+ plat_dat->exit(dev, plat_dat->bsp_priv);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
@@ -7820,27 +7982,30 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_get_sync(dev);
- stmmac_stop_all_dma(priv);
- stmmac_mac_set(priv, priv->ioaddr, false);
- netif_carrier_off(ndev);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs(ndev);
#endif
+ stmmac_unregister_devlink(priv);
+
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
+
+ stmmac_pcs_clean(ndev);
+ stmmac_mdio_unregister(ndev);
+
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(dev, priv->plat->bsp_priv);
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
@@ -7869,9 +8034,9 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- if (priv->eee_enabled) {
+ if (priv->eee_sw_timer_en) {
priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
}
/* Stop TX/RX DMA */
@@ -7881,7 +8046,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (priv->wolopts) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
@@ -7892,27 +8057,15 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ phylink_suspend(priv->phylink, !!priv->wolopts);
rtnl_unlock();
- if (priv->dma_cap.fpesel) {
- /* Disable FPE */
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use, false);
+ if (stmmac_fpe_supported(priv))
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
- stmmac_fpe_handshake(priv, false);
- stmmac_fpe_stop_wq(priv);
- }
+ if (priv->plat->suspend)
+ return priv->plat->suspend(dev, priv->plat->bsp_priv);
- priv->speed = SPEED_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7965,6 +8118,12 @@ int stmmac_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret;
+ if (priv->plat->resume) {
+ ret = priv->plat->resume(dev, priv->plat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
if (!netif_running(ndev))
return 0;
@@ -7974,7 +8133,7 @@ int stmmac_resume(struct device *dev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (priv->wolopts) {
mutex_lock(&priv->lock);
stmmac_pmt(priv, priv->hw, 0);
mutex_unlock(&priv->lock);
@@ -7996,16 +8155,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -8013,16 +8168,33 @@ int stmmac_resume(struct device *dev)
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv, &priv->dma_conf);
- stmmac_hw_setup(ndev, false);
+ ret = stmmac_hw_setup(ndev);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ mutex_unlock(&priv->lock);
+ rtnl_unlock();
+ return ret;
+ }
+
+ stmmac_init_timestamping(priv);
+
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
rtnl_unlock();
netif_device_attach(ndev);
@@ -8031,6 +8203,10 @@ int stmmac_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_resume);
+/* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
+DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
+EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
+
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -8045,9 +8221,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "buf_sz:", 7)) {
- if (kstrtoint(opt + 7, 0, &buf_sz))
- goto err;
} else if (!strncmp(opt, "tc:", 3)) {
if (kstrtoint(opt + 3, 0, &tc))
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0542cfd1817e..1e82850f2a25 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -23,9 +23,9 @@
#include "dwxgmac2.h"
#include "stmmac.h"
-#define MII_BUSY 0x00000001
-#define MII_WRITE 0x00000002
-#define MII_DATA_MASK GENMASK(15, 0)
+#define MII_ADDR_GBUSY BIT(0)
+#define MII_ADDR_GWRITE BIT(1)
+#define MII_DATA_GD_MASK GENMASK(15, 0)
/* GMAC4 defines */
#define MII_GMAC4_GOC_SHIFT 2
@@ -45,6 +45,16 @@
#define MII_XGMAC_PA_SHIFT 16
#define MII_XGMAC_DA_SHIFT 21
+static int stmmac_mdio_wait(void __iomem *reg, u32 mask)
+{
+ u32 v;
+
+ if (readl_poll_timeout(reg, v, !(v & mask), 100, 10000))
+ return -EBUSY;
+
+ return 0;
+}
+
static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
int devad, int phyreg, u32 *hw_addr)
{
@@ -83,7 +93,6 @@ static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -91,33 +100,25 @@ static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
return ret;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= MII_XGMAC_READ;
+ value |= priv->gmii_address_bus_config | MII_XGMAC_READ;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Set the MII address register to read */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Read the data from the MII data register */
ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
@@ -131,12 +132,9 @@ err_disable_clks:
static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
/* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
phyaddr > MII_XGMAC_MAX_C22ADDR)
@@ -150,12 +148,9 @@ static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
static int stmmac_xgmac2_mdio_read_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY);
@@ -166,7 +161,6 @@ static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -174,31 +168,23 @@ static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
return ret;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= phydata;
- value |= MII_XGMAC_WRITE;
+ value |= priv->gmii_address_bus_config | phydata | MII_XGMAC_WRITE;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Set the MII address register to write */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000);
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
err_disable_clks:
pm_runtime_put(priv->device);
@@ -209,12 +195,9 @@ err_disable_clks:
static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
/* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
phyaddr > MII_XGMAC_MAX_C22ADDR)
@@ -229,37 +212,78 @@ static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
static int stmmac_xgmac2_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
return stmmac_xgmac2_mdio_write(priv, addr, MII_XGMAC_BUSY,
phydata);
}
-static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
+/**
+ * stmmac_mdio_format_addr() - format the address register
+ * @priv: struct stmmac_priv pointer
+ * @pa: 5-bit MDIO package address
+ * @gr: 5-bit MDIO register address (C22) or MDIO device address (C45)
+ *
+ * Return: formatted address register
+ */
+static u32 stmmac_mdio_format_addr(struct stmmac_priv *priv,
+ unsigned int pa, unsigned int gr)
+{
+ const struct mii_regs *mii_regs = &priv->hw->mii;
+
+ return ((pa << mii_regs->addr_shift) & mii_regs->addr_mask) |
+ ((gr << mii_regs->reg_shift) & mii_regs->reg_mask) |
+ priv->gmii_address_bus_config |
+ MII_ADDR_GBUSY;
+}
+
+static int stmmac_mdio_access(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, u32 data, bool read)
{
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- u32 v;
+ void __iomem *mii_address = priv->ioaddr + priv->hw->mii.addr;
+ void __iomem *mii_data = priv->ioaddr + priv->hw->mii.data;
+ u32 addr;
+ int ret;
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
+ ret = stmmac_mdio_wait(mii_address, MII_ADDR_GBUSY);
+ if (ret)
+ goto out;
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ addr = stmmac_mdio_format_addr(priv, pa, gr) | cmd;
- /* Read the data from the MII data register */
- return readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+ writel(data, mii_data);
+ writel(addr, mii_address);
+
+ ret = stmmac_mdio_wait(mii_address, MII_ADDR_GBUSY);
+ if (ret)
+ goto out;
+
+ /* Read the data from the MII data register if in read mode */
+ ret = read ? readl(mii_data) & MII_DATA_GD_MASK : 0;
+
+out:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_mdio_read(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, int data)
+{
+ return stmmac_mdio_access(priv, pa, gr, cmd, data, true);
+}
+
+static int stmmac_mdio_write(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, int data)
+{
+ return stmmac_mdio_access(priv, pa, gr, cmd, data, false);
}
/**
@@ -274,28 +298,15 @@ static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
*/
static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 value = MII_BUSY;
- int data = 0;
-
- data = pm_runtime_resume_and_get(priv->device);
- if (data < 0)
- return data;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd;
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4)
- value |= MII_GMAC4_READ;
-
- data = stmmac_mdio_read(priv, data, value);
-
- pm_runtime_put(priv->device);
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
+ cmd = MII_GMAC4_READ;
+ else
+ cmd = 0;
- return data;
+ return stmmac_mdio_read(priv, phyaddr, phyreg, cmd, 0);
}
/**
@@ -312,54 +323,11 @@ static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
static int stmmac_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad,
int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 value = MII_BUSY;
- int data = 0;
-
- data = pm_runtime_get_sync(priv->device);
- if (data < 0) {
- pm_runtime_put_noidle(priv->device);
- return data;
- }
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= MII_GMAC4_READ;
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-
- data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ int data = phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+ u32 cmd = MII_GMAC4_READ | MII_GMAC4_C45E;
- data = stmmac_mdio_read(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return data;
-}
-
-static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
-{
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- u32 v;
-
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
-
- /* Set the MII address register to write */
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
-
- /* Wait until any existing MII operation is complete */
- return readl_poll_timeout(priv->ioaddr + mii_address, v,
- !(v & MII_BUSY), 100, 10000);
+ return stmmac_mdio_read(priv, phyaddr, devad, cmd, data);
}
/**
@@ -373,31 +341,15 @@ static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret, data = phydata;
- u32 value = MII_BUSY;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd;
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4)
- value |= MII_GMAC4_WRITE;
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
+ cmd = MII_GMAC4_WRITE;
else
- value |= MII_WRITE;
-
- ret = stmmac_mdio_write(priv, data, value);
-
- pm_runtime_put(priv->device);
+ cmd = MII_ADDR_GWRITE;
- return ret;
+ return stmmac_mdio_write(priv, phyaddr, phyreg, cmd, phydata);
}
/**
@@ -412,36 +364,13 @@ static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret, data = phydata;
- u32 value = MII_BUSY;
-
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
-
- value |= MII_GMAC4_WRITE;
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd = MII_GMAC4_WRITE | MII_GMAC4_C45E;
+ int data = phydata;
data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
- ret = stmmac_mdio_write(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return ret;
+ return stmmac_mdio_write(priv, phyaddr, devad, cmd, data);
}
/**
@@ -452,8 +381,7 @@ static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int stmmac_mdio_reset(struct mii_bus *bus)
{
#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
unsigned int mii_address = priv->hw->mii.addr;
#ifdef CONFIG_OF
@@ -489,40 +417,158 @@ int stmmac_mdio_reset(struct mii_bus *bus)
* on MDC, so perform a dummy mdio read. To be updated for GMAC4
* if needed.
*/
- if (!priv->plat->has_gmac4)
+ if (priv->plat->core_type != DWMAC_CORE_GMAC4)
writel(0, priv->ioaddr + mii_address);
#endif
return 0;
}
-int stmmac_xpcs_setup(struct mii_bus *bus)
+int stmmac_pcs_setup(struct net_device *ndev)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
- struct dw_xpcs *xpcs;
- int mode, addr;
-
- priv = netdev_priv(ndev);
- mode = priv->plat->phy_interface;
-
- /* Try to probe the XPCS by scanning all addresses. */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- xpcs = xpcs_create_mdiodev(bus, addr, mode);
- if (IS_ERR(xpcs))
- continue;
-
- priv->hw->xpcs = xpcs;
- break;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct fwnode_handle *devnode, *pcsnode;
+ struct dw_xpcs *xpcs = NULL;
+ int addr, ret;
+
+ devnode = priv->plat->port_node;
+
+ if (priv->plat->pcs_init) {
+ ret = priv->plat->pcs_init(priv);
+ } else if (fwnode_property_present(devnode, "pcs-handle")) {
+ pcsnode = fwnode_find_reference(devnode, "pcs-handle", 0);
+ xpcs = xpcs_create_fwnode(pcsnode);
+ fwnode_handle_put(pcsnode);
+ ret = PTR_ERR_OR_ZERO(xpcs);
+ } else if (priv->plat->mdio_bus_data &&
+ priv->plat->mdio_bus_data->pcs_mask) {
+ addr = ffs(priv->plat->mdio_bus_data->pcs_mask) - 1;
+ xpcs = xpcs_create_mdiodev(priv->mii, addr);
+ ret = PTR_ERR_OR_ZERO(xpcs);
+ } else {
+ return 0;
}
- if (!priv->hw->xpcs) {
- dev_warn(priv->device, "No xPCS found\n");
- return -ENODEV;
- }
+ if (ret)
+ return dev_err_probe(priv->device, ret, "No xPCS found\n");
+
+ if (xpcs)
+ xpcs_config_eee_mult_fact(xpcs, priv->plat->mult_fact_100ns);
+
+ priv->hw->xpcs = xpcs;
return 0;
}
+void stmmac_pcs_clean(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->pcs_exit)
+ priv->plat->pcs_exit(priv);
+
+ if (!priv->hw->xpcs)
+ return;
+
+ xpcs_destroy(priv->hw->xpcs);
+ priv->hw->xpcs = NULL;
+}
+
+/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Return: MII register CR field value
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
+static u32 stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+ unsigned long clk_rate;
+ u32 value = ~0;
+
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+
+ /* Platform provided default clk_csr would be assumed valid
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
+ if (clk_rate < CSR_F_35M)
+ value = STMMAC_CSR_20_35M;
+ else if (clk_rate < CSR_F_60M)
+ value = STMMAC_CSR_35_60M;
+ else if (clk_rate < CSR_F_100M)
+ value = STMMAC_CSR_60_100M;
+ else if (clk_rate < CSR_F_150M)
+ value = STMMAC_CSR_100_150M;
+ else if (clk_rate < CSR_F_250M)
+ value = STMMAC_CSR_150_250M;
+ else if (clk_rate <= CSR_F_300M)
+ value = STMMAC_CSR_250_300M;
+ else if (clk_rate < CSR_F_500M)
+ value = STMMAC_CSR_300_500M;
+ else if (clk_rate < CSR_F_800M)
+ value = STMMAC_CSR_500_800M;
+
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
+ if (clk_rate > 160000000)
+ value = 0x03;
+ else if (clk_rate > 80000000)
+ value = 0x02;
+ else if (clk_rate > 40000000)
+ value = 0x01;
+ else
+ value = 0;
+ }
+
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
+ if (clk_rate > 400000000)
+ value = 0x5;
+ else if (clk_rate > 350000000)
+ value = 0x4;
+ else if (clk_rate > 300000000)
+ value = 0x3;
+ else if (clk_rate > 250000000)
+ value = 0x2;
+ else if (clk_rate > 150000000)
+ value = 0x1;
+ else
+ value = 0x0;
+ }
+
+ return value;
+}
+
+static void stmmac_mdio_bus_config(struct stmmac_priv *priv)
+{
+ u32 value;
+
+ /* If a specific clk_csr value is passed from the platform, this means
+ * that the CSR Clock Range value should not be computed from the CSR
+ * clock.
+ */
+ if (priv->plat->clk_csr >= 0)
+ value = priv->plat->clk_csr;
+ else
+ value = stmmac_clk_csr_set(priv);
+
+ value <<= priv->hw->mii.clk_csr_shift;
+
+ if (value & ~priv->hw->mii.clk_csr_mask)
+ dev_warn(priv->device,
+ "clk_csr value out of range (0x%08x exceeds mask 0x%08x), truncating\n",
+ value, priv->hw->mii.clk_csr_mask);
+
+ priv->gmii_address_bus_config = value & priv->hw->mii.clk_csr_mask;
+}
+
/**
* stmmac_mdio_register
* @ndev: net device structure
@@ -537,12 +583,15 @@ int stmmac_mdio_register(struct net_device *ndev)
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
struct fwnode_handle *fixed_node;
+ int max_addr = PHY_MAX_ADDR - 1;
struct fwnode_handle *fwnode;
- int addr, found, max_addr;
+ struct phy_device *phydev;
if (!mdio_bus_data)
return 0;
+ stmmac_mdio_bus_config(priv);
+
new_bus = mdiobus_alloc();
if (!new_bus)
return -ENOMEM;
@@ -552,7 +601,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->name = "stmmac";
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
new_bus->read = &stmmac_xgmac2_mdio_read_c22;
new_bus->write = &stmmac_xgmac2_mdio_write_c22;
new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45;
@@ -560,25 +609,20 @@ int stmmac_mdio_register(struct net_device *ndev)
if (priv->synopsys_id < DWXGMAC_CORE_2_20) {
/* Right now only C22 phys are supported */
- max_addr = MII_XGMAC_MAX_C22ADDR + 1;
+ max_addr = MII_XGMAC_MAX_C22ADDR;
/* Check if DT specified an unsupported phy addr */
if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR)
dev_err(dev, "Unsupported phy_addr (max=%d)\n",
MII_XGMAC_MAX_C22ADDR);
- } else {
- /* XGMAC version 2.20 onwards support 32 phy addr */
- max_addr = PHY_MAX_ADDR;
}
} else {
new_bus->read = &stmmac_mdio_read_c22;
new_bus->write = &stmmac_mdio_write_c22;
- if (priv->plat->has_gmac4) {
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4) {
new_bus->read_c45 = &stmmac_mdio_read_c45;
new_bus->write_c45 = &stmmac_mdio_write_c45;
}
-
- max_addr = PHY_MAX_ADDR;
}
if (mdio_bus_data->needs_reset)
@@ -587,7 +631,7 @@ int stmmac_mdio_register(struct net_device *ndev)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
- new_bus->phy_mask = mdio_bus_data->phy_mask;
+ new_bus->phy_mask = mdio_bus_data->phy_mask | mdio_bus_data->pcs_mask;
new_bus->parent = priv->device;
err = of_mdiobus_register(new_bus, mdio_node);
@@ -601,7 +645,7 @@ int stmmac_mdio_register(struct net_device *ndev)
}
/* Looks like we need a dummy read for XGMAC only and C45 PHYs */
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0);
/* If fixed-link is set, skip PHY scanning */
@@ -620,41 +664,31 @@ int stmmac_mdio_register(struct net_device *ndev)
if (priv->plat->phy_node || mdio_node)
goto bus_register_done;
- found = 0;
- for (addr = 0; addr < max_addr; addr++) {
- struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
-
- if (!phydev)
- continue;
-
- /*
- * If an IRQ was provided to be assigned after
- * the bus probe, do it here.
- */
- if (!mdio_bus_data->irqs &&
- (mdio_bus_data->probed_phy_irq > 0)) {
- new_bus->irq[addr] = mdio_bus_data->probed_phy_irq;
- phydev->irq = mdio_bus_data->probed_phy_irq;
- }
-
- /*
- * If we're going to bind the MAC to this PHY bus,
- * and no PHY number was provided to the MAC,
- * use the one probed here.
- */
- if (priv->plat->phy_addr == -1)
- priv->plat->phy_addr = addr;
-
- phy_attached_info(phydev);
- found = 1;
- }
-
- if (!found && !mdio_node) {
+ phydev = phy_find_first(new_bus);
+ if (!phydev || phydev->mdio.addr > max_addr) {
dev_warn(dev, "No PHY found\n");
err = -ENODEV;
goto no_phy_found;
}
+ /*
+ * If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if (!mdio_bus_data->irqs && mdio_bus_data->probed_phy_irq > 0) {
+ new_bus->irq[phydev->mdio.addr] = mdio_bus_data->probed_phy_irq;
+ phydev->irq = mdio_bus_data->probed_phy_irq;
+ }
+
+ /*
+ * If we're going to bind the MAC to this PHY bus, and no PHY number
+ * was provided to the MAC, use the one probed here.
+ */
+ if (priv->plat->phy_addr == -1)
+ priv->plat->phy_addr = phydev->mdio.addr;
+
+ phy_attached_info(phydev);
+
bus_register_done:
priv->mii = new_bus;
@@ -679,9 +713,6 @@ int stmmac_mdio_unregister(struct net_device *ndev)
if (!priv->mii)
return 0;
- if (priv->hw->xpcs)
- xpcs_destroy(priv->hw->xpcs);
-
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
mdiobus_free(priv->mii);
@@ -689,3 +720,17 @@ int stmmac_mdio_unregister(struct net_device *ndev)
return 0;
}
+
+void stmmac_mdio_lock(struct stmmac_priv *priv)
+{
+ if (priv->mii)
+ mutex_lock(&priv->mii->mdio_lock);
+}
+EXPORT_SYMBOL_GPL(stmmac_mdio_lock);
+
+void stmmac_mdio_unlock(struct stmmac_priv *priv)
+{
+ if (priv->mii)
+ mutex_unlock(&priv->mii->mdio_lock);
+}
+EXPORT_SYMBOL_GPL(stmmac_mdio_unlock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 352b01678c22..270ad066ced3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include "stmmac.h"
+#include "stmmac_libpci.h"
struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
@@ -21,31 +22,12 @@ struct stmmac_pci_info {
static void common_default_data(struct plat_stmmacenet_data *plat)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat->has_gmac = 1;
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
+ plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
}
static int stmmac_default_data(struct pci_dev *pdev,
@@ -74,28 +56,18 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
{
int i;
- plat->clk_csr = 5;
- plat->has_gmac4 = 1;
+ plat->clk_csr = STMMAC_CSR_250_300M;
+ plat->core_type = DWMAC_CORE_GMAC4;
plat->force_sf_dma_mode = 1;
plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
/* Set default number of RX and TX queues to use */
plat->tx_queues_to_use = 4;
plat->rx_queues_to_use = 4;
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
for (i = 0; i < plat->tx_queues_to_use; i++) {
- plat->tx_queues_cfg[i].use_prio = false;
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[i].weight = 25;
if (i > 0)
@@ -103,15 +75,10 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
}
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- plat->rx_queues_cfg[i].use_prio = false;
+ for (i = 0; i < plat->rx_queues_to_use; i++)
plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- plat->rx_queues_cfg[i].chan = i;
- }
plat->bus_id = 1;
- plat->phy_addr = -1;
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->dma_cfg->pbl = 32;
@@ -126,10 +93,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->axi->axi_rd_osr_lmt = 31;
plat->axi->axi_fb = false;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
- plat->axi->axi_blen[3] = 32;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN32;
return 0;
}
@@ -155,11 +120,11 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- int i;
+ struct stmmac_resources res = {};
int ret;
+ int i;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -192,9 +157,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
+ res.addr = pcim_iomap_region(pdev, i, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(res.addr))
+ return PTR_ERR(res.addr);
break;
}
@@ -204,8 +169,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
res.wol_irq = pdev->irq;
res.irq = pdev->irq;
@@ -219,6 +182,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 1;
plat->safety_feat_cfg->tmouten = 1;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = stmmac_pci_plat_resume;
+
return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
@@ -226,60 +192,13 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
* stmmac_pci_remove
*
* @pdev: platform device pointer
- * Description: this function calls the main to free the net resources
- * and releases the PCI resources.
+ * Description: this function calls the main to free the net resources.
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- int i;
-
stmmac_dvr_remove(&pdev->dev);
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
-}
-
-static int __maybe_unused stmmac_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
}
-static int __maybe_unused stmmac_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
-
/* synthetic ID, no official vendor */
#define PCI_VENDOR_ID_STMMAC 0x0700
@@ -301,7 +220,7 @@ static struct pci_driver stmmac_pci_driver = {
.probe = stmmac_pci_probe,
.remove = stmmac_pci_remove,
.driver = {
- .pm = &stmmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
new file mode 100644
index 000000000000..e2f531c11986
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "stmmac.h"
+#include "stmmac_pcs.h"
+
+static int dwmac_integrated_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs);
+
+ stmmac_mac_irq_modify(spcs->priv, 0, spcs->int_mask);
+
+ return 0;
+}
+
+static void dwmac_integrated_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs);
+
+ stmmac_mac_irq_modify(spcs->priv, spcs->int_mask, 0);
+}
+
+static void dwmac_integrated_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ state->link = false;
+}
+
+static int dwmac_integrated_pcs_config(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs);
+
+ dwmac_ctrl_ane(spcs->base, 0, 1, spcs->priv->hw->reverse_sgmii_enable);
+
+ return 0;
+}
+
+static const struct phylink_pcs_ops dwmac_integrated_pcs_ops = {
+ .pcs_enable = dwmac_integrated_pcs_enable,
+ .pcs_disable = dwmac_integrated_pcs_disable,
+ .pcs_get_state = dwmac_integrated_pcs_get_state,
+ .pcs_config = dwmac_integrated_pcs_config,
+};
+
+int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset,
+ u32 int_mask)
+{
+ struct stmmac_pcs *spcs;
+
+ spcs = devm_kzalloc(priv->device, sizeof(*spcs), GFP_KERNEL);
+ if (!spcs)
+ return -ENOMEM;
+
+ spcs->priv = priv;
+ spcs->base = priv->ioaddr + offset;
+ spcs->int_mask = int_mask;
+ spcs->pcs.ops = &dwmac_integrated_pcs_ops;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII, spcs->pcs.supported_interfaces);
+
+ priv->integrated_pcs = spcs;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index 13a30e6df4c1..cda93894168e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -9,6 +9,7 @@
#ifndef __STMMAC_PCS_H__
#define __STMMAC_PCS_H__
+#include <linux/phylink.h>
#include <linux/slab.h>
#include <linux/io.h>
#include "common.h"
@@ -16,6 +17,8 @@
/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
#define GMAC_AN_CTRL(x) (x) /* AN control */
#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
+
+/* ADV, LPA and EXP are only available for the TBI and RTBI interfaces */
#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
@@ -44,6 +47,24 @@
#define GMAC_ANE_RFE_SHIFT 12
#define GMAC_ANE_ACK BIT(14)
+struct stmmac_priv;
+
+struct stmmac_pcs {
+ struct stmmac_priv *priv;
+ void __iomem *base;
+ u32 int_mask;
+ struct phylink_pcs pcs;
+};
+
+static inline struct stmmac_pcs *
+phylink_pcs_to_stmmac_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct stmmac_pcs, pcs);
+}
+
+int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset,
+ u32 int_mask);
+
/**
* dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
* @ioaddr: IO registers pointer
@@ -75,35 +96,17 @@ static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
}
/**
- * dwmac_rane - To restart ANE
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @restart: to restart ANE
- * Description: this is to just restart the Auto-Negotiation.
- */
-static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
-{
- u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
-
- if (restart)
- value |= GMAC_AN_CTRL_RAN;
-
- writel(value, ioaddr + GMAC_AN_CTRL(reg));
-}
-
-/**
* dwmac_ctrl_ane - To program the AN Control Register.
* @ioaddr: IO registers pointer
* @reg: Base address of the AN Control Register.
* @ane: to enable the auto-negotiation
* @srgmi_ral: to manage MAC-2-MAC SGMII connections.
- * @loopback: to cause the PHY to loopback tx data into rx path.
* Description: this is the main function to configure the AN control register
* and init the ANE, select loopback (usually for debugging purpose) and
* configure SGMII RAL.
*/
static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
- bool srgmi_ral, bool loopback)
+ bool srgmi_ral)
{
u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
@@ -119,39 +122,6 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
if (srgmi_ral)
value |= GMAC_AN_CTRL_SGMRAL;
- if (loopback)
- value |= GMAC_AN_CTRL_ELE;
-
writel(value, ioaddr + GMAC_AN_CTRL(reg));
}
-
-/**
- * dwmac_get_adv_lp - Get ADV and LP cap
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @adv_lp: structure to store the adv,lp status
- * Description: this is to expose the ANE advertisement and Link partner ability
- * status to ethtool support.
- */
-static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
- struct rgmii_adv *adv_lp)
-{
- u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
-
- if (value & GMAC_ANE_FD)
- adv_lp->duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv_lp->duplex |= DUPLEX_HALF;
-
- adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-
- value = readl(ioaddr + GMAC_ANE_LPA(reg));
-
- if (value & GMAC_ANE_FD)
- adv_lp->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv_lp->lp_duplex = DUPLEX_HALF;
-
- adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-}
#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 54797edc9b38..8979a50b5507 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -60,7 +60,7 @@ static int dwmac1000_validate_mcast_bins(struct device *dev, int mcast_bins)
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for its Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
@@ -95,6 +95,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
{
struct device_node *np;
struct stmmac_axi *axi;
+ u32 axi_blen[AXI_BLEN];
np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
if (!np)
@@ -117,7 +118,8 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_wr_osr_lmt = 1;
if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
axi->axi_rd_osr_lmt = 1;
- of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+ of_property_read_u32_array(np, "snps,blen", axi_blen, AXI_BLEN);
+ stmmac_axi_blen_to_mask(&axi->axi_blen_regval, axi_blen, AXI_BLEN);
of_node_put(np);
return axi;
@@ -137,13 +139,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
u8 queue = 0;
int ret = 0;
- /* For backwards-compatibility with device trees that don't have any
- * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
- * to one RX and TX queues each.
- */
- plat->rx_queues_to_use = 1;
- plat->tx_queues_to_use = 1;
-
/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
* to always set this, otherwise Queue will be classified as AVB
* (because MTL_QUEUE_AVB = 0).
@@ -162,9 +157,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing RX queues common config */
- if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
- &plat->rx_queues_to_use))
- plat->rx_queues_to_use = 1;
+ of_property_read_u32(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use);
if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
@@ -185,18 +179,13 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
else
plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
- if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
- &plat->rx_queues_cfg[queue].chan))
- plat->rx_queues_cfg[queue].chan = queue;
+ of_property_read_u32(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan);
/* TODO: Dynamic mapping to be included in the future */
- if (of_property_read_u32(q_node, "snps,priority",
- &plat->rx_queues_cfg[queue].prio)) {
- plat->rx_queues_cfg[queue].prio = 0;
- plat->rx_queues_cfg[queue].use_prio = false;
- } else {
+ if (!of_property_read_u32(q_node, "snps,priority",
+ &plat->rx_queues_cfg[queue].prio))
plat->rx_queues_cfg[queue].use_prio = true;
- }
/* RX queue specific packet type routing */
if (of_property_read_bool(q_node, "snps,route-avcp"))
@@ -209,8 +198,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
- else
- plat->rx_queues_cfg[queue].pkt_route = 0x0;
queue++;
}
@@ -221,9 +208,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing TX queues common config */
- if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
- &plat->tx_queues_to_use))
- plat->tx_queues_to_use = 1;
+ of_property_read_u32(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use);
if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
@@ -268,13 +254,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
- if (of_property_read_u32(q_node, "snps,priority",
- &plat->tx_queues_cfg[queue].prio)) {
- plat->tx_queues_cfg[queue].prio = 0;
- plat->tx_queues_cfg[queue].use_prio = false;
- } else {
+ if (!of_property_read_u32(q_node, "snps,priority",
+ &plat->tx_queues_cfg[queue].prio))
plat->tx_queues_cfg[queue].use_prio = true;
- }
plat->tx_queues_cfg[queue].coe_unsupported =
of_property_read_bool(q_node, "snps,coe-unsupported");
@@ -405,21 +387,17 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
return -ENODEV;
}
-/**
- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
- * @pdev: platform_device structure
- * @plat: driver data platform structure
- *
- * Release resources claimed by stmmac_probe_config_dt().
- */
-static void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
- clk_disable_unprepare(plat->stmmac_clk);
- clk_disable_unprepare(plat->pclk);
- of_node_put(plat->phy_node);
- of_node_put(plat->mdio_node);
-}
+/* Compatible string array for all gmac4 devices */
+static const char * const stmmac_gmac4_compats[] = {
+ "snps,dwmac-4.00",
+ "snps,dwmac-4.10a",
+ "snps,dwmac-4.20a",
+ "snps,dwmac-5.00a",
+ "snps,dwmac-5.10a",
+ "snps,dwmac-5.20",
+ "snps,dwmac-5.30a",
+ NULL
+};
/**
* stmmac_probe_config_dt - parse device-tree driver parameters
@@ -435,11 +413,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ static int bus_id = -ENODEV;
int phy_mode;
void *ret;
int rc;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return ERR_PTR(-ENOMEM);
@@ -456,8 +435,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
+
rc = stmmac_of_get_mac_mode(np);
- plat->mac_interface = rc < 0 ? plat->phy_interface : rc;
+ if (rc >= 0 && rc != phy_mode)
+ dev_warn(&pdev->dev,
+ "\"mac-mode\" property used for %s but differs to \"phy-mode\" of %s, and will be ignored. Please report.\n",
+ phy_modes(rc), phy_modes(phy_mode));
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
@@ -470,16 +453,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = 0;
-
- /* Default to phy auto-detection */
- plat->phy_addr = -1;
+ if (plat->bus_id < 0) {
+ if (bus_id < 0)
+ bus_id = of_alias_get_highest_id("ethernet");
+ /* No ethernet alias found, init at -1 so first bus_id is 0 */
+ if (bus_id < 0)
+ bus_id = -1;
+ plat->bus_id = ++bus_id;
+ }
- /* Default to get clk_csr from stmmac_clk_csr_set(),
- * or get clk_csr from device tree.
- */
- plat->clk_csr = -1;
if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr))
of_property_read_u32(np, "clk_csr", &plat->clk_csr);
@@ -490,8 +472,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
- if (rc)
- return ERR_PTR(rc);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto error_put_phy;
+ }
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@@ -500,19 +484,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
- if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating"))
+ if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) {
+ dev_warn(&pdev->dev,
+ "OF property snps,en-tx-lpi-clockgating is deprecated, please convert driver to use STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP\n");
plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
-
- /* Set the maxmtu to a default of JUMBO_LEN in case the
- * parameter is not present in the device tree.
- */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
+ }
/*
* Currently only the properties needed on SPEAr600
@@ -522,6 +498,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_device_is_compatible(np, "st,spear600-gmac") ||
of_device_is_compatible(np, "snps,dwmac-3.50a") ||
of_device_is_compatible(np, "snps,dwmac-3.70a") ||
+ of_device_is_compatible(np, "snps,dwmac-3.72a") ||
of_device_is_compatible(np, "snps,dwmac")) {
/* Note that the max-frame-size parameter as defined in the
* ePAPR v1.1 spec is defined as max-frame-size, it's
@@ -539,25 +516,20 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
&pdev->dev, plat->unicast_filter_entries);
plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
&pdev->dev, plat->multicast_filter_bins);
- plat->has_gmac = 1;
+ plat->core_type = DWMAC_CORE_GMAC;
plat->pmt = 1;
}
if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
- plat->has_gmac = 1;
+ plat->core_type = DWMAC_CORE_GMAC;
plat->enh_desc = 1;
plat->tx_coe = 1;
plat->bugged_jumbo = 1;
plat->pmt = 1;
}
- if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
- of_device_is_compatible(np, "snps,dwmac-4.10a") ||
- of_device_is_compatible(np, "snps,dwmac-4.20a") ||
- of_device_is_compatible(np, "snps,dwmac-5.10a") ||
- of_device_is_compatible(np, "snps,dwmac-5.20")) {
- plat->has_gmac4 = 1;
- plat->has_gmac = 0;
+ if (of_device_compatible_match(np, stmmac_gmac4_compats)) {
+ plat->core_type = DWMAC_CORE_GMAC4;
plat->pmt = 1;
if (of_property_read_bool(np, "snps,tso"))
plat->flags |= STMMAC_FLAG_TSO_EN;
@@ -571,17 +543,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
}
if (of_device_is_compatible(np, "snps,dwxgmac")) {
- plat->has_xgmac = 1;
+ plat->core_type = DWMAC_CORE_XGMAC;
plat->pmt = 1;
if (of_property_read_bool(np, "snps,tso"))
plat->flags |= STMMAC_FLAG_TSO_EN;
+ of_property_read_u32(np, "snps,multicast-filter-bins",
+ &plat->multicast_filter_bins);
}
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(-ENOMEM);
+ ret = ERR_PTR(-ENOMEM);
+ goto error_put_mdio;
}
plat->dma_cfg = dma_cfg;
@@ -609,8 +583,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
rc = stmmac_mtl_setup(pdev, plat);
if (rc) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(rc);
+ ret = ERR_PTR(rc);
+ goto error_put_mdio;
}
/* clock setup */
@@ -639,7 +613,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_info(&pdev->dev, "PTP uses main clock\n");
} else {
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
- dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+ dev_dbg(&pdev->dev, "PTP rate %lu\n", plat->clk_ptp_rate);
}
plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
@@ -662,6 +636,10 @@ error_hw_init:
clk_disable_unprepare(plat->pclk);
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
+error_put_mdio:
+ of_node_put(plat->mdio_node);
+error_put_phy:
+ of_node_put(plat->phy_node);
return ret;
}
@@ -670,16 +648,17 @@ static void devm_stmmac_remove_config_dt(void *data)
{
struct plat_stmmacenet_data *plat = data;
- /* Platform data argument is unused */
- stmmac_remove_config_dt(NULL, plat);
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
+ of_node_put(plat->mdio_node);
+ of_node_put(plat->phy_node);
}
/**
* devm_stmmac_probe_config_dt
* @pdev: platform_device structure
* @mac: MAC address to use
- * Description: Devres variant of stmmac_probe_config_dt(). Does not require
- * the user to call stmmac_remove_config_dt() at driver detach.
+ * Description: Devres variant of stmmac_probe_config_dt().
*/
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
@@ -707,6 +686,17 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
#endif /* CONFIG_OF */
EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name)
+{
+ for (int i = 0; i < plat_dat->num_clks; i++)
+ if (strcmp(plat_dat->clks[i].id, name) == 0)
+ return plat_dat->clks[i].clk;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_find_clk);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
{
@@ -759,36 +749,50 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
/**
* stmmac_pltfr_init
- * @pdev: pointer to the platform device
+ * @dev: pointer to the device structure
* @plat: driver data platform structure
* Description: Call the platform's init callback (if any) and propagate
* the return value.
*/
-int stmmac_pltfr_init(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
+static int stmmac_pltfr_init(struct device *dev,
+ struct plat_stmmacenet_data *plat)
{
int ret = 0;
if (plat->init)
- ret = plat->init(pdev, plat->bsp_priv);
+ ret = plat->init(dev, plat->bsp_priv);
return ret;
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_init);
/**
* stmmac_pltfr_exit
- * @pdev: pointer to the platform device
+ * @dev: pointer to the device structure
* @plat: driver data platform structure
* Description: Call the platform's exit callback (if any).
*/
-void stmmac_pltfr_exit(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
+static void stmmac_pltfr_exit(struct device *dev,
+ struct plat_stmmacenet_data *plat)
{
if (plat->exit)
- plat->exit(pdev, plat->bsp_priv);
+ plat->exit(dev, plat->bsp_priv);
+}
+
+static int stmmac_plat_suspend(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+
+ stmmac_pltfr_exit(dev, priv->plat);
+
+ return 0;
+}
+
+static int stmmac_plat_resume(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+
+ return stmmac_pltfr_init(dev, priv->plat);
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_exit);
/**
* stmmac_pltfr_probe
@@ -802,19 +806,12 @@ int stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
- int ret;
-
- ret = stmmac_pltfr_init(pdev, plat);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat, res);
- if (ret) {
- stmmac_pltfr_exit(pdev, plat);
- return ret;
- }
+ if (!plat->suspend && plat->exit)
+ plat->suspend = stmmac_plat_suspend;
+ if (!plat->resume && plat->init)
+ plat->resume = stmmac_plat_resume;
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat, res);
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
@@ -856,54 +853,40 @@ EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe);
*/
void stmmac_pltfr_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct plat_stmmacenet_data *plat = priv->plat;
-
stmmac_dvr_remove(&pdev->dev);
- stmmac_pltfr_exit(pdev, plat);
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
-/**
- * stmmac_pltfr_suspend
- * @dev: device pointer
- * Description: this function is invoked when suspend the driver and it direcly
- * call the main suspend function and then, if required, on some platform, it
- * can call an exit helper.
- */
-static int __maybe_unused stmmac_pltfr_suspend(struct device *dev)
+static int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
{
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
int ret;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct platform_device *pdev = to_platform_device(dev);
-
- ret = stmmac_suspend(dev);
- stmmac_pltfr_exit(pdev, priv->plat);
-
- return ret;
-}
-/**
- * stmmac_pltfr_resume
- * @dev: device pointer
- * Description: this function is invoked when resume the driver before calling
- * the main resume function, on some platforms, it can call own init helper
- * if required.
- */
-static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct platform_device *pdev = to_platform_device(dev);
- int ret;
-
- ret = stmmac_pltfr_init(pdev, priv->plat);
- if (ret)
- return ret;
+ if (enabled) {
+ ret = clk_prepare_enable(plat_dat->stmmac_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(plat_dat->pclk);
+ if (ret) {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ return ret;
+ }
+ if (plat_dat->clks_config) {
+ ret = plat_dat->clks_config(plat_dat->bsp_priv, enabled);
+ if (ret) {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ clk_disable_unprepare(plat_dat->pclk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ clk_disable_unprepare(plat_dat->pclk);
+ if (plat_dat->clks_config)
+ plat_dat->clks_config(plat_dat->bsp_priv, enabled);
+ }
- return stmmac_resume(dev);
+ return 0;
}
static int __maybe_unused stmmac_runtime_suspend(struct device *dev)
@@ -933,7 +916,7 @@ static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
if (!netif_running(ndev))
return 0;
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ if (!priv->wolopts) {
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->plat->clk_ptp_ref);
@@ -954,7 +937,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ if (!priv->wolopts) {
/* enable the clk previously disabled */
ret = pm_runtime_force_resume(dev);
if (ret)
@@ -973,7 +956,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
}
const struct dev_pm_ops stmmac_pltfr_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, stmmac_resume)
SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index bb6fc7e59aed..6e6561e29d6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -14,14 +14,12 @@
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
-int stmmac_pltfr_init(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat);
-void stmmac_pltfr_exit(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat);
-
int stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index e04830a3a1fb..3e30172fa129 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -9,7 +9,8 @@
*******************************************************************************/
#include "stmmac.h"
#include "stmmac_ptp.h"
-#include "dwmac4.h"
+
+#define PTP_SAFE_TIME_OFFSET_NS 500000
/**
* stmmac_adjust_freq
@@ -56,7 +57,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
bool xmac, est_rst = false;
int ret;
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ xmac = dwmac_is_xmac(priv->plat->core_type);
if (delta < 0) {
neg_adj = 1;
@@ -68,13 +69,13 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
nsec = reminder;
/* If EST is enabled, disabled it before adjust ptp time. */
- if (priv->plat->est && priv->plat->est->enable) {
+ if (priv->est && priv->est->enable) {
est_rst = true;
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv, priv->plat->est,
+ mutex_lock(&priv->est_lock);
+ priv->est->enable = false;
+ stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
write_lock_irqsave(&priv->ptp_lock, flags);
@@ -87,24 +88,24 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
ktime_t current_time_ns, basetime;
u64 cycle_time;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
- time.tv_nsec = priv->plat->est->btr_reserve[0];
- time.tv_sec = priv->plat->est->btr_reserve[1];
+ time.tv_nsec = priv->est->btr_reserve[0];
+ time.tv_sec = priv->est->btr_reserve[1];
basetime = timespec64_to_ktime(time);
- cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
- priv->plat->est->ctr[0];
+ cycle_time = (u64)priv->est->ctr[1] * NSEC_PER_SEC +
+ priv->est->ctr[0];
time = stmmac_calc_tas_basetime(basetime,
current_time_ns,
cycle_time);
- priv->plat->est->btr[0] = (u32)time.tv_nsec;
- priv->plat->est->btr[1] = (u32)time.tv_sec;
- priv->plat->est->enable = true;
- ret = stmmac_est_configure(priv, priv, priv->plat->est,
+ priv->est->btr[0] = (u32)time.tv_nsec;
+ priv->est->btr[1] = (u32)time.tv_sec;
+ priv->est->enable = true;
+ ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret)
netdev_err(priv->dev, "failed to configure EST\n");
}
@@ -172,7 +173,11 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
u32 acr_value;
switch (rq->type) {
- case PTP_CLK_REQ_PEROUT:
+ case PTP_CLK_REQ_PEROUT: {
+ struct timespec64 curr_time;
+ u64 target_ns = 0;
+ u64 ns = 0;
+
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
@@ -181,6 +186,31 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
cfg->start.tv_sec = rq->perout.start.sec;
cfg->start.tv_nsec = rq->perout.start.nsec;
+
+ /* A time set in the past won't trigger the start of the flexible PPS generation for
+ * the GMAC5. For some reason it does for the GMAC4 but setting a time in the past
+ * should be addressed anyway. Therefore, any value set it the past is considered as
+ * an offset compared to the current MAC system time.
+ * Be aware that an offset too low may not trigger flexible PPS generation
+ * if time spent in this configuration makes the targeted time already outdated.
+ * To address this, add a safe time offset.
+ */
+ if (!cfg->start.tv_sec && cfg->start.tv_nsec < PTP_SAFE_TIME_OFFSET_NS)
+ cfg->start.tv_nsec += PTP_SAFE_TIME_OFFSET_NS;
+
+ target_ns = cfg->start.tv_nsec + ((u64)cfg->start.tv_sec * NSEC_PER_SEC);
+
+ stmmac_get_systime(priv, priv->ptpaddr, &ns);
+ if (ns > TIME64_MAX - PTP_SAFE_TIME_OFFSET_NS)
+ return -EINVAL;
+
+ curr_time = ns_to_timespec64(ns);
+ if (target_ns < ns + PTP_SAFE_TIME_OFFSET_NS) {
+ cfg->start = timespec64_add_safe(cfg->start, curr_time);
+ if (cfg->start.tv_sec == TIME64_MAX)
+ return -EINVAL;
+ }
+
cfg->period.tv_sec = rq->perout.period.sec;
cfg->period.tv_nsec = rq->perout.period.nsec;
@@ -191,6 +221,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
priv->systime_flags);
write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
+ }
case PTP_CLK_REQ_EXTTS: {
u8 channel;
@@ -248,10 +279,7 @@ static int stmmac_get_syncdevicetime(ktime_t *device,
{
struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
- if (priv->plat->crosststamp)
- return priv->plat->crosststamp(device, system, ctx);
- else
- return -EOPNOTSUPP;
+ return priv->plat->crosststamp(device, system, ctx);
}
static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
@@ -265,7 +293,7 @@ static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
}
/* structure describing a PTP hardware clock */
-static struct ptp_clock_info stmmac_ptp_clock_ops = {
+const struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
.name = "stmmac ptp",
.max_adj = 62500000,
@@ -279,7 +307,23 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
.gettime64 = stmmac_get_time,
.settime64 = stmmac_set_time,
.enable = stmmac_enable,
- .getcrosststamp = stmmac_getcrosststamp,
+};
+
+/* structure describing a PTP hardware clock */
+const struct ptp_clock_info dwmac1000_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "stmmac ptp",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 1,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+ .gettime64 = stmmac_get_time,
+ .settime64 = stmmac_set_time,
+ .enable = dwmac1000_ptp_enable,
};
/**
@@ -298,28 +342,40 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
priv->pps[i].available = true;
}
- if (priv->plat->ptp_max_adj)
- stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
-
/* Calculate the clock domain crossing (CDC) error if necessary */
priv->plat->cdc_error_adj = 0;
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
- stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
- stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
+ /* Update the ptp clock parameters based on feature discovery, when
+ * available
+ */
+ if (priv->dma_cap.pps_out_num)
+ priv->ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
+
+ if (priv->dma_cap.aux_snapshot_n)
+ priv->ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
+
+ if (priv->plat->ptp_max_adj)
+ priv->ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+
+ if (priv->plat->crosststamp)
+ priv->ptp_clock_ops.getcrosststamp = stmmac_getcrosststamp;
rwlock_init(&priv->ptp_lock);
mutex_init(&priv->aux_ts_lock);
- priv->ptp_clock_ops = stmmac_ptp_clock_ops;
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
if (IS_ERR(priv->ptp_clock)) {
netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- } else if (priv->ptp_clock)
+ }
+
+ if (priv->ptp_clock)
netdev_info(priv->dev, "registered PTP clock\n");
+ else
+ mutex_destroy(&priv->aux_ts_lock);
}
/**
@@ -335,7 +391,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
priv->ptp_clock = NULL;
pr_debug("Removed PTP HW clock successfully on %s\n",
priv->dev->name);
- }
- mutex_destroy(&priv->aux_ts_lock);
+ mutex_destroy(&priv->aux_ts_lock);
+ }
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index fce3fba2ffd2..3fe0e3a80e80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -94,4 +94,17 @@ enum aux_snapshot {
AUX_SNAPSHOT3 = 0x80,
};
+struct ptp_clock_info;
+struct ptp_clock_request;
+struct stmmac_priv;
+
+int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+
+void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time);
+void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv);
+
+extern const struct ptp_clock_info stmmac_ptp_clock_ops;
+extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
+
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 3ca1c2a816ff..e90a2c469b9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -382,14 +382,14 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
if (!priv->dev->phydev)
return -EOPNOTSUPP;
- ret = phy_loopback(priv->dev->phydev, true);
+ ret = phy_loopback(priv->dev->phydev, true, 0);
if (ret)
return ret;
attr.dst = priv->dev->dev_addr;
ret = __stmmac_test_loopback(priv, &attr);
- phy_loopback(priv->dev->phydev, false);
+ phy_loopback(priv->dev->phydev, false, 0);
return ret;
}
@@ -1721,7 +1721,7 @@ static int stmmac_test_sph(struct stmmac_priv *priv)
struct stmmac_packet_attrs attr = { };
int ret;
- if (!priv->sph)
+ if (!priv->sph_active)
return -EOPNOTSUPP;
/* Check for UDP first */
@@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, true);
+ ret = phy_loopback(dev->phydev, true, 0);
if (!ret)
break;
fallthrough;
@@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, false);
+ ret = phy_loopback(dev->phydev, false, 0);
if (!ret)
break;
fallthrough;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index cce00719937d..d78652718599 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -262,10 +262,10 @@ static int tc_init(struct stmmac_priv *priv)
unsigned int count;
int ret, i;
- if (dma_cap->l3l4fnum) {
- priv->flow_entries_max = dma_cap->l3l4fnum;
+ priv->flow_entries_max = dma_cap->l3l4fnum;
+ if (priv->flow_entries_max) {
priv->flow_entries = devm_kcalloc(priv->device,
- dma_cap->l3l4fnum,
+ priv->flow_entries_max,
sizeof(*priv->flow_entries),
GFP_KERNEL);
if (!priv->flow_entries)
@@ -282,16 +282,6 @@ static int tc_init(struct stmmac_priv *priv)
if (ret)
return -ENOMEM;
- if (!priv->plat->fpe_cfg) {
- priv->plat->fpe_cfg = devm_kzalloc(priv->device,
- sizeof(*priv->plat->fpe_cfg),
- GFP_KERNEL);
- if (!priv->plat->fpe_cfg)
- return -ENOMEM;
- } else {
- memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
- }
-
/* Fail silently as we can still use remaining features, e.g. CBS */
if (!dma_cap->frpsel)
return 0;
@@ -343,10 +333,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
struct tc_cbs_qopt_offload *qopt)
{
u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ s64 port_transmit_rate_kbps;
u32 queue = qopt->queue;
- u32 ptr, speed_div;
u32 mode_to_use;
u64 value;
+ u32 ptr;
int ret;
/* Queue 0 is not AVB capable */
@@ -355,30 +346,30 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- /* Port Transmit Rate and Speed Divider */
- switch (priv->speed) {
- case SPEED_10000:
- ptr = 32;
- speed_div = 10000000;
- break;
- case SPEED_5000:
- ptr = 32;
- speed_div = 5000000;
- break;
- case SPEED_2500:
- ptr = 8;
- speed_div = 2500000;
- break;
- case SPEED_1000:
- ptr = 8;
- speed_div = 1000000;
- break;
- case SPEED_100:
- ptr = 4;
- speed_div = 100000;
- break;
- default:
- return -EOPNOTSUPP;
+ port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
+
+ if (qopt->enable) {
+ /* Port Transmit Rate and Speed Divider */
+ switch (div_s64(port_transmit_rate_kbps, 1000)) {
+ case SPEED_10000:
+ case SPEED_5000:
+ ptr = 32;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ ptr = 8;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ break;
+ default:
+ netdev_err(priv->dev,
+ "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
+ port_transmit_rate_kbps);
+ return -EINVAL;
+ }
+ } else {
+ ptr = 0;
}
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
@@ -395,13 +386,14 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return ret;
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ return 0;
}
/* Final adjustments for HW */
- value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
- value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
value = qopt->hicredit * 1024ll * 8;
@@ -918,7 +910,6 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
- struct plat_stmmacenet_data *plat = priv->plat;
u32 num_tc = qopt->mqprio.qopt.num_tc;
u32 offset, count, i, j;
@@ -933,7 +924,7 @@ static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
count = qopt->mqprio.qopt.count[i];
for (j = offset; j < offset + count; j++)
- plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
+ priv->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
}
}
@@ -941,10 +932,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
- struct plat_stmmacenet_data *plat = priv->plat;
+ struct netlink_ext_ack *extack = qopt->mqprio.extack;
struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
- bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -991,133 +981,111 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- if (qopt->num_entries >= dep)
+ if (qopt->num_entries > dep)
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
if (qopt->cycle_time_extension >= BIT(wid + 7))
return -ERANGE;
- if (!plat->est) {
- plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
+ if (!priv->est) {
+ priv->est = devm_kzalloc(priv->device, sizeof(*priv->est),
GFP_KERNEL);
- if (!plat->est)
+ if (!priv->est)
return -ENOMEM;
- mutex_init(&priv->plat->est->lock);
+ mutex_init(&priv->est_lock);
} else {
- memset(plat->est, 0, sizeof(*plat->est));
+ mutex_lock(&priv->est_lock);
+ memset(priv->est, 0, sizeof(*priv->est));
+ mutex_unlock(&priv->est_lock);
}
size = qopt->num_entries;
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->gcl_size = size;
- priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
- mutex_unlock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
+ priv->est->gcl_size = size;
+ priv->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
+ mutex_unlock(&priv->est_lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
u32 gates = qopt->entries[i].gate_mask;
- if (delta_ns > GENMASK(wid, 0))
+ if (delta_ns > GENMASK(wid - 1, 0))
return -ERANGE;
if (gates > GENMASK(31 - wid, 0))
return -ERANGE;
switch (qopt->entries[i].command) {
case TC_TAPRIO_CMD_SET_GATES:
- if (fpe)
- return -EINVAL;
break;
case TC_TAPRIO_CMD_SET_AND_HOLD:
gates |= BIT(0);
- fpe = true;
break;
case TC_TAPRIO_CMD_SET_AND_RELEASE:
gates &= ~BIT(0);
- fpe = true;
break;
default:
return -EOPNOTSUPP;
}
- priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ priv->est->gcl[i] = delta_ns | (gates << wid);
}
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
qopt->cycle_time);
- priv->plat->est->btr[0] = (u32)time.tv_nsec;
- priv->plat->est->btr[1] = (u32)time.tv_sec;
+ priv->est->btr[0] = (u32)time.tv_nsec;
+ priv->est->btr[1] = (u32)time.tv_sec;
qopt_time = ktime_to_timespec64(qopt->base_time);
- priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
- priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+ priv->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+ priv->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
ctr = qopt->cycle_time;
- priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
- priv->plat->est->ctr[1] = (u32)ctr;
+ priv->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
+ priv->est->ctr[1] = (u32)ctr;
- priv->plat->est->ter = qopt->cycle_time_extension;
+ priv->est->ter = qopt->cycle_time_extension;
tc_taprio_map_maxsdu_txq(priv, qopt);
- if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->plat->est->lock);
- return -EOPNOTSUPP;
- }
-
- /* Actual FPE register configuration will be done after FPE handshake
- * is success.
- */
- priv->plat->fpe_cfg->enable = fpe;
-
- ret = stmmac_est_configure(priv, priv, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
}
- netdev_info(priv->dev, "configured EST\n");
-
- if (fpe) {
- stmmac_fpe_handshake(priv, true);
- netdev_info(priv->dev, "start FPE handshake\n");
- }
+ ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack,
+ qopt->mqprio.preemptible_tcs);
+ if (ret)
+ goto disable;
return 0;
disable:
- if (priv->plat->est) {
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv, priv->plat->est,
+ if (priv->est) {
+ mutex_lock(&priv->est_lock);
+ priv->est->enable = false;
+ stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
/* Reset taprio status */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
priv->xstats.max_sdu_txq_drop[i] = 0;
priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ priv->xstats.mtl_est_txq_hlbs[i] = 0;
}
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
- priv->plat->fpe_cfg->enable = false;
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- false);
- netdev_info(priv->dev, "disabled FPE\n");
-
- stmmac_fpe_handshake(priv, false);
- netdev_info(priv->dev, "stop FPE handshake\n");
+ stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0);
return ret;
}
@@ -1130,7 +1098,8 @@ static void tc_taprio_stats(struct stmmac_priv *priv,
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
window_drops += priv->xstats.max_sdu_txq_drop[i] +
- priv->xstats.mtl_est_txq_hlbf[i];
+ priv->xstats.mtl_est_txq_hlbf[i] +
+ priv->xstats.mtl_est_txq_hlbs[i];
qopt->stats.window_drops = window_drops;
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
@@ -1144,7 +1113,8 @@ static void tc_taprio_queue_stats(struct stmmac_priv *priv,
int queue = qopt->queue_stats.queue;
q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
- priv->xstats.mtl_est_txq_hlbf[queue];
+ priv->xstats.mtl_est_txq_hlbf[queue] +
+ priv->xstats.mtl_est_txq_hlbs[queue];
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
q_stats->stats.tx_overruns = 0;
@@ -1173,6 +1143,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return err;
}
+static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ if (!qopt->mqprio.preemptible_tcs)
+ return tc_setup_taprio(priv, qopt);
+
+ NL_SET_ERR_MSG_MOD(qopt->mqprio.extack,
+ "taprio with FPE is not implemented for this MAC");
+
+ return -EOPNOTSUPP;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1197,6 +1179,13 @@ static int tc_query_caps(struct stmmac_priv *priv,
struct tc_query_caps_base *base)
{
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
@@ -1213,6 +1202,81 @@ static int tc_query_caps(struct stmmac_priv *priv,
}
}
+static void stmmac_reset_tc_mqprio(struct net_device *ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+ stmmac_fpe_map_preemption_class(priv, ndev, extack, 0);
+}
+
+static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct netlink_ext_ack *extack = mqprio->extack;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ u32 offset, count, num_stack_tx_queues = 0;
+ struct net_device *ndev = priv->dev;
+ u32 num_tc = qopt->num_tc;
+ int err;
+
+ if (!num_tc) {
+ stmmac_reset_tc_mqprio(ndev, extack);
+ return 0;
+ }
+
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
+
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ offset = qopt->offset[tc];
+ count = qopt->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+ }
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
+
+ err = stmmac_fpe_map_preemption_class(priv, ndev, extack,
+ mqprio->preemptible_tcs);
+ if (err)
+ goto err_reset_tc;
+
+ return 0;
+
+err_reset_tc:
+ stmmac_reset_tc_mqprio(ndev, extack);
+
+ return err;
+}
+
+static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "mqprio HW offload is not implemented for this MAC");
+ return -EOPNOTSUPP;
+}
+
+const struct stmmac_tc_ops dwmac4_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
+};
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
@@ -1221,4 +1285,5 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
.query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_dwmac510_mqprio,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
new file mode 100644
index 000000000000..b18404dd5a8b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN (802.1Q) handling
+ */
+
+#include "stmmac.h"
+#include "stmmac_vlan.h"
+
+static void vlan_write_single(struct net_device *dev, u16 vid)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 val;
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~VLAN_TAG_VID;
+ val |= VLAN_TAG_ETV | vid;
+
+ writel(val, ioaddr + VLAN_TAG);
+}
+
+static int vlan_write_filter(struct net_device *dev,
+ struct mac_device_info *hw,
+ u8 index, u32 data)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int ret;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+ return -EINVAL;
+
+ writel(data, ioaddr + VLAN_TAG_DATA);
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~(VLAN_TAG_CTRL_OFS_MASK |
+ VLAN_TAG_CTRL_CT |
+ VLAN_TAG_CTRL_OB);
+ val |= (index << VLAN_TAG_CTRL_OFS_SHIFT) | VLAN_TAG_CTRL_OB;
+
+ writel(val, ioaddr + VLAN_TAG);
+
+ ret = readl_poll_timeout(ioaddr + VLAN_TAG, val,
+ !(val & VLAN_TAG_CTRL_OB),
+ 1000, 500000);
+ if (ret) {
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int index = -1;
+ u32 val = 0;
+ int i, ret;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ /* For single VLAN filter, VID 0 means VLAN promiscuous */
+ if (vid == 0) {
+ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
+ return -EPERM;
+ }
+
+ if (hw->vlan_filter[0] & VLAN_TAG_VID) {
+ netdev_err(dev, "Only single VLAN ID supported\n");
+ return -EPERM;
+ }
+
+ hw->vlan_filter[0] = vid;
+ vlan_write_single(dev, vid);
+
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ val |= VLAN_TAG_DATA_ETV | VLAN_TAG_DATA_VEN | vid;
+
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] == val)
+ return 0;
+ else if (!(hw->vlan_filter[i] & VLAN_TAG_DATA_VEN))
+ index = i;
+ }
+
+ if (index == -1) {
+ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
+ hw->num_vlan);
+ return -EPERM;
+ }
+
+ ret = vlan_write_filter(dev, hw, index, val);
+
+ if (!ret)
+ hw->vlan_filter[index] = val;
+
+ return ret;
+}
+
+static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int i, ret = 0;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+ vlan_write_single(dev, 0);
+ }
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) &&
+ ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid)) {
+ ret = vlan_write_filter(dev, hw, i, 0);
+
+ if (!ret)
+ hw->vlan_filter[i] = 0;
+ else
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ vlan_write_single(dev, hw->vlan_filter[0]);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i];
+ vlan_write_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + VLAN_HASH_TABLE);
+ if (hash & VLAN_VLHT) {
+ value = readl(ioaddr + VLAN_TAG);
+ value |= VLAN_VTHM;
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ if (hash) {
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = VLAN_ETV;
+
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_enable(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + VLAN_INCL);
+ value |= VLAN_VLTI;
+ value &= ~VLAN_CSVL; /* Only use CVLAN */
+ value &= ~VLAN_VLC;
+ value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
+ writel(value, ioaddr + VLAN_INCL);
+}
+
+static void vlan_rx_hw(struct mac_device_info *hw,
+ struct dma_desc *rx_desc, struct sk_buff *skb)
+{
+ if (hw->desc->get_rx_vlan_valid(rx_desc)) {
+ u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+static void vlan_set_hw_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_TAG_CTRL_EVLS_MASK;
+
+ if (hw->hw_vlan_en)
+ /* Always strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_ALL;
+ else
+ /* Do not strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_NONE;
+
+ /* Enable outer VLAN Tag in Rx DMA descriptor */
+ value |= VLAN_TAG_CTRL_EVLRXS;
+ writel(value, ioaddr + VLAN_TAG);
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_VTHM;
+ value |= VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+const struct stmmac_vlan_ops dwmac_vlan_ops = {
+ .update_vlan_hash = vlan_update_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+const struct stmmac_vlan_ops dwxlgmac2_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+};
+
+const struct stmmac_vlan_ops dwxgmac210_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr)
+{
+ u32 val, num_vlan;
+
+ val = readl(ioaddr + HW_FEATURE3);
+ switch (val & VLAN_HW_FEAT_NRVF) {
+ case 0:
+ num_vlan = 1;
+ break;
+ case 1:
+ num_vlan = 4;
+ break;
+ case 2:
+ num_vlan = 8;
+ break;
+ case 3:
+ num_vlan = 16;
+ break;
+ case 4:
+ num_vlan = 24;
+ break;
+ case 5:
+ num_vlan = 32;
+ break;
+ default:
+ num_vlan = 1;
+ }
+
+ return num_vlan;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
new file mode 100644
index 000000000000..c24f89a9049b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN(802.1Q) handling
+ */
+
+#ifndef __STMMAC_VLAN_H__
+#define __STMMAC_VLAN_H__
+
+#include <linux/bitfield.h>
+#include "dwxgmac2.h"
+
+#define VLAN_TAG 0x00000050
+#define VLAN_TAG_DATA 0x00000054
+#define VLAN_HASH_TABLE 0x00000058
+#define VLAN_INCL 0x00000060
+
+/* MAC VLAN */
+#define VLAN_EDVLP BIT(26)
+#define VLAN_VTHM BIT(25)
+#define VLAN_DOVLTC BIT(20)
+#define VLAN_ESVL BIT(18)
+#define VLAN_ETV BIT(16)
+#define VLAN_VID GENMASK(15, 0)
+#define VLAN_VLTI BIT(20)
+#define VLAN_CSVL BIT(19)
+#define VLAN_VLC GENMASK(17, 16)
+#define VLAN_VLC_SHIFT 16
+#define VLAN_VLHT GENMASK(15, 0)
+
+/* MAC VLAN Tag */
+#define VLAN_TAG_VID GENMASK(15, 0)
+#define VLAN_TAG_ETV BIT(16)
+
+/* MAC VLAN Tag Control */
+#define VLAN_TAG_CTRL_OB BIT(0)
+#define VLAN_TAG_CTRL_CT BIT(1)
+#define VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
+#define VLAN_TAG_CTRL_OFS_SHIFT 2
+#define VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
+#define VLAN_TAG_CTRL_EVLS_SHIFT 21
+#define VLAN_TAG_CTRL_EVLRXS BIT(24)
+
+#define VLAN_TAG_STRIP_NONE FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x0)
+#define VLAN_TAG_STRIP_PASS FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x1)
+#define VLAN_TAG_STRIP_FAIL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x2)
+#define VLAN_TAG_STRIP_ALL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x3)
+
+/* MAC VLAN Tag Data/Filter */
+#define VLAN_TAG_DATA_VID GENMASK(15, 0)
+#define VLAN_TAG_DATA_VEN BIT(16)
+#define VLAN_TAG_DATA_ETV BIT(17)
+
+/* MAC VLAN HW FEAT */
+#define HW_FEATURE3 0x00000128
+#define VLAN_HW_FEAT_NRVF GENMASK(2, 0)
+
+extern const struct stmmac_vlan_ops dwmac_vlan_ops;
+extern const struct stmmac_vlan_ops dwxgmac210_vlan_ops;
+extern const struct stmmac_vlan_ops dwxlgmac2_vlan_ops;
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr);
+
+#endif /* __STMMAC_VLAN_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index aa6f16d3df64..d7e4db7224b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
bpf_prog_put(old_prog);
/* Disable RX SPH for XDP operation */
- priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
+ priv->sph_active = priv->sph_capable && !stmmac_xdp_is_enabled(priv);
if (if_running && need_update)
stmmac_xdp_open(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
index 896dc987d4ef..77ce8cfbe976 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
@@ -4,7 +4,6 @@
#ifndef _STMMAC_XDP_H_
#define _STMMAC_XDP_H_
-#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index bfb903506367..acfb523214b9 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -73,6 +73,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/skbuff_ref.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/random.h>
@@ -3778,7 +3779,7 @@ static void cas_shutdown(struct cas *cp)
/* Make us not-running to avoid timers respawning */
cp->hw_running = 0;
- del_timer_sync(&cp->link_timer);
+ timer_delete_sync(&cp->link_timer);
/* Stop the reset task */
#if 0
@@ -3803,7 +3804,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
{
struct cas *cp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev) || !netif_device_present(dev))
return 0;
@@ -4020,7 +4021,7 @@ done:
static void cas_link_timer(struct timer_list *t)
{
- struct cas *cp = from_timer(cp, t, link_timer);
+ struct cas *cp = timer_container_of(cp, t, link_timer);
int mask, pending = 0, reset = 0;
unsigned long flags;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index a9a6670b5ff1..6fc37ab27f7b 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -390,7 +390,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
return 0;
err_out_del_timer:
- del_timer_sync(&port->clean_timer);
+ timer_delete_sync(&port->clean_timer);
list_del_rcu(&port->list);
synchronize_rcu();
netif_napi_del(&port->napi);
@@ -408,8 +408,8 @@ static void vsw_port_remove(struct vio_dev *vdev)
unsigned long flags;
if (port) {
- del_timer_sync(&port->vio.timer);
- del_timer_sync(&port->clean_timer);
+ timer_delete_sync(&port->vio.timer);
+ timer_delete_sync(&port->clean_timer);
napi_disable(&port->napi);
unregister_netdev(port->dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f68aa813d4fb..893216b0e08d 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -2225,7 +2225,7 @@ static int niu_link_status(struct niu *np, int *link_up_p)
static void niu_timer(struct timer_list *t)
{
- struct niu *np = from_timer(np, t, timer);
+ struct niu *np = timer_container_of(np, t, timer);
unsigned long off;
int err, link_up;
@@ -3303,7 +3303,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
addr &= PAGE_MASK;
pp = &rp->rxhash[h];
for (; (p = *pp) != NULL; pp = &niu_next_page(p)) {
- if (p->index == addr) {
+ if (p->private == addr) {
*link = pp;
goto found;
}
@@ -3318,7 +3318,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
{
unsigned int h = niu_hash_rxaddr(rp, base);
- page->index = base;
+ page->private = base;
niu_next_page(page) = rp->rxhash[h];
rp->rxhash[h] = page;
}
@@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
addr = np->ops->map_page(np->device, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- if (!addr) {
+ if (np->ops->mapping_error(np->device, addr)) {
__free_page(page);
return -ENOMEM;
}
@@ -3400,11 +3400,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
RCR_ENTRY_PKTBUFSZ_SHIFT];
- if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+ if ((page->private + PAGE_SIZE) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
rp->rbr_refill_pending++;
@@ -3469,11 +3469,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
- if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+ if ((page->private + rp->rbr_block_size) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
rp->rbr_refill_pending++;
} else
@@ -3538,11 +3538,11 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
page = rp->rxhash[i];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
np->ops->unmap_page(np->device, base, PAGE_SIZE,
DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
@@ -5825,7 +5825,7 @@ static int niu_init_mac(struct niu *np)
/* This looks hookey but the RX MAC reset we just did will
* undo some of the state we setup in niu_init_tx_mac() so we
* have to call it again. In particular, the RX MAC reset will
- * set the XMAC_MAX register back to it's default value.
+ * set the XMAC_MAX register back to its default value.
*/
niu_init_tx_mac(np);
niu_enable_tx_mac(np, 1);
@@ -6086,7 +6086,7 @@ static void niu_enable_napi(struct niu *np)
int i;
for (i = 0; i < np->num_ldg; i++)
- napi_enable(&np->ldg[i].napi);
+ napi_enable_locked(&np->ldg[i].napi);
}
static void niu_disable_napi(struct niu *np)
@@ -6116,7 +6116,9 @@ static int niu_open(struct net_device *dev)
if (err)
goto out_free_channels;
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
spin_lock_irq(&np->lock);
@@ -6163,7 +6165,7 @@ static void niu_full_shutdown(struct niu *np, struct net_device *dev)
niu_disable_napi(np);
netif_tx_stop_all_queues(dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irq(&np->lock);
@@ -6460,7 +6462,7 @@ static void niu_reset_buffers(struct niu *np)
page = rp->rxhash[j];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
base = base >> RBR_DESCR_ADDR_SHIFT;
rp->rbr[k++] = cpu_to_le32(base);
page = next;
@@ -6509,7 +6511,7 @@ static void niu_reset_task(struct work_struct *work)
spin_unlock_irqrestore(&np->lock, flags);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
niu_netif_stop(np);
@@ -6521,6 +6523,7 @@ static void niu_reset_task(struct work_struct *work)
niu_reset_buffers(np);
+ netdev_lock(np->dev);
spin_lock_irqsave(&np->lock, flags);
err = niu_init_hw(np);
@@ -6531,6 +6534,7 @@ static void niu_reset_task(struct work_struct *work)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(np->dev);
}
static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -6672,6 +6676,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len = skb_headlen(skb);
mapping = np->ops->map_single(np->device, skb->data,
len, DMA_TO_DEVICE);
+ if (np->ops->mapping_error(np->device, mapping))
+ goto out_drop;
prod = rp->prod;
@@ -6713,6 +6719,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
skb_frag_off(frag), len,
DMA_TO_DEVICE);
+ if (np->ops->mapping_error(np->device, mapping))
+ goto out_unmap;
rp->tx_buffs[prod].skb = NULL;
rp->tx_buffs[prod].mapping = mapping;
@@ -6737,6 +6745,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
out:
return NETDEV_TX_OK;
+out_unmap:
+ while (i--) {
+ const skb_frag_t *frag;
+
+ prod = PREVIOUS_TX(rp, prod);
+ frag = &skb_shinfo(skb)->frags[i];
+ np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
out_drop:
rp->tx_errors++;
kfree_skb(skb);
@@ -6751,7 +6772,7 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
orig_jumbo = (dev->mtu > ETH_DATA_LEN);
new_jumbo = (new_mtu > ETH_DATA_LEN);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev) ||
(orig_jumbo == new_jumbo))
@@ -6761,7 +6782,9 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
niu_free_channels(np);
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
err = niu_alloc_channels(np);
if (err)
@@ -7071,8 +7094,10 @@ static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
}
-static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+static int niu_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
{
+ struct niu *np = netdev_priv(dev);
u64 class;
nfc->data = 0;
@@ -7284,9 +7309,6 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = niu_get_hash_opts(np, cmd);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = np->num_rx_rings;
break;
@@ -7307,8 +7329,11 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+static int niu_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct niu *np = netdev_priv(dev);
u64 class;
u64 flow_key = 0;
unsigned long flags;
@@ -7650,9 +7675,6 @@ static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = niu_set_hash_opts(np, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = niu_add_ethtool_tcam_entry(np, cmd);
break;
@@ -7906,6 +7928,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_rxfh_fields = niu_get_rxfh_fields,
+ .set_rxfh_fields = niu_set_rxfh_fields,
.get_link_ksettings = niu_get_link_ksettings,
.set_link_ksettings = niu_set_link_ksettings,
};
@@ -9058,6 +9082,8 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
msi_vec[i].entry = i;
}
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST;
+
num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
@@ -9636,6 +9662,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
dma_unmap_single(dev, dma_address, size, direction);
}
+static int niu_pci_mapping_error(struct device *dev, u64 addr)
+{
+ return dma_mapping_error(dev, addr);
+}
+
static const struct niu_ops niu_pci_ops = {
.alloc_coherent = niu_pci_alloc_coherent,
.free_coherent = niu_pci_free_coherent,
@@ -9643,6 +9674,7 @@ static const struct niu_ops niu_pci_ops = {
.unmap_page = niu_pci_unmap_page,
.map_single = niu_pci_map_single,
.unmap_single = niu_pci_unmap_single,
+ .mapping_error = niu_pci_mapping_error,
};
static void niu_driver_version(void)
@@ -9908,7 +9940,7 @@ static int __maybe_unused niu_suspend(struct device *dev_d)
flush_work(&np->reset_task);
niu_netif_stop(np);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irqsave(&np->lock, flags);
niu_enable_interrupts(np, 0);
@@ -9937,6 +9969,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
spin_lock_irqsave(&np->lock, flags);
+ netdev_lock(dev);
err = niu_init_hw(np);
if (!err) {
np->timer.expires = jiffies + HZ;
@@ -9945,6 +9978,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(dev);
return err;
}
@@ -10009,6 +10043,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
/* Nothing to do. */
}
+static int niu_phys_mapping_error(struct device *dev, u64 dma_address)
+{
+ return false;
+}
+
static const struct niu_ops niu_phys_ops = {
.alloc_coherent = niu_phys_alloc_coherent,
.free_coherent = niu_phys_free_coherent,
@@ -10016,6 +10055,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_page = niu_phys_unmap_page,
.map_single = niu_phys_map_single,
.unmap_single = niu_phys_unmap_single,
+ .mapping_error = niu_phys_mapping_error,
};
static int niu_of_probe(struct platform_device *op)
@@ -10182,7 +10222,7 @@ static struct platform_driver niu_of_driver = {
.of_match_table = niu_match,
},
.probe = niu_of_probe,
- .remove_new = niu_of_remove,
+ .remove = niu_of_remove,
};
#endif /* CONFIG_SPARC64 */
diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h
index 04c215f91fc0..d8368043fc3b 100644
--- a/drivers/net/ethernet/sun/niu.h
+++ b/drivers/net/ethernet/sun/niu.h
@@ -2879,6 +2879,9 @@ struct tx_ring_info {
#define NEXT_TX(tp, index) \
(((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
+#define PREVIOUS_TX(tp, index) \
+ (((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1))
+
static inline u32 niu_tx_avail(struct tx_ring_info *tp)
{
return (tp->pending -
@@ -3140,6 +3143,7 @@ struct niu_ops {
enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction);
+ int (*mapping_error)(struct device *dev, u64 dma_address);
};
struct niu_link_config {
@@ -3246,8 +3250,8 @@ struct niu {
struct niu_parent *parent;
u32 flags;
-#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/
-#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */
+#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removable PHY detected*/
+#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removable PHY */
#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 16c86b13c185..edb2fd3a6551 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -526,7 +526,7 @@ static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
static void bigmac_timer(struct timer_list *t)
{
- struct bigmac *bp = from_timer(bp, t, bigmac_timer);
+ struct bigmac *bp = timer_container_of(bp, t, bigmac_timer);
void __iomem *tregs = bp->tregs;
int restart_timer = 0;
@@ -931,7 +931,7 @@ static int bigmac_close(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
- del_timer(&bp->bigmac_timer);
+ timer_delete(&bp->bigmac_timer);
bp->timer_state = asleep;
bp->timer_ticks = 0;
@@ -1272,7 +1272,7 @@ static struct platform_driver bigmac_sbus_driver = {
.of_match_table = bigmac_sbus_match,
},
.probe = bigmac_sbus_probe,
- .remove_new = bigmac_sbus_remove,
+ .remove = bigmac_sbus_remove,
};
module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9bd1df8308d2..8e69d917d827 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void gem_poll_controller(struct net_device *dev)
-{
- struct gem *gp = netdev_priv(dev);
-
- disable_irq(gp->pdev->irq);
- gem_interrupt(gp->pdev->irq, dev);
- enable_irq(gp->pdev->irq);
-}
-#endif
-
static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gem *gp = netdev_priv(dev);
@@ -1492,7 +1481,7 @@ static int gem_mdio_link_not_up(struct gem *gp)
static void gem_link_timer(struct timer_list *t)
{
- struct gem *gp = from_timer(gp, t, link_timer);
+ struct gem *gp = timer_container_of(gp, t, link_timer);
struct net_device *dev = gp->dev;
int restart_aneg = 0;
@@ -2191,7 +2180,7 @@ static void gem_do_stop(struct net_device *dev, int wol)
gem_disable_ints(gp);
/* Stop the link timer */
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
/* We cannot cancel the reset task while holding the
* rtnl lock, we'd get an A->B / B->A deadlock stituation
@@ -2241,7 +2230,7 @@ static void gem_reset_task(struct work_struct *work)
}
/* Stop the link timer */
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
/* Stop NAPI and tx */
gem_netif_stop(gp);
@@ -2499,7 +2488,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
{
struct gem *gp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* We'll just catch it later when the device is up'd or resumed */
if (!netif_running(dev) || !netif_device_present(dev))
@@ -2621,7 +2610,7 @@ static int gem_set_link_ksettings(struct net_device *dev,
/* Apply settings and restart link process. */
if (netif_device_present(gp->dev)) {
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
gem_begin_auto_negotiation(gp, cmd);
}
@@ -2637,7 +2626,7 @@ static int gem_nway_reset(struct net_device *dev)
/* Restart link process */
if (netif_device_present(gp->dev)) {
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
gem_begin_auto_negotiation(gp, NULL);
}
@@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = {
.ndo_change_mtu = gem_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = gem_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = gem_poll_controller,
-#endif
};
static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 50ace461a1af..48f0a96c0e9e 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -451,7 +451,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
/* Auto negotiation. The scheme is very simple. We have a timer routine
* that keeps watching the auto negotiation process as it progresses.
* The DP83840 is first told to start doing it's thing, we set up the time
- * and place the timer state machine in it's initial state.
+ * and place the timer state machine in its initial state.
*
* Here the timer peeks at the DP83840 status registers at each click to see
* if the auto negotiation has completed, we assume here that the DP83840 PHY
@@ -721,7 +721,7 @@ force_link:
static void happy_meal_timer(struct timer_list *t)
{
- struct happy_meal *hp = from_timer(hp, t, happy_timer);
+ struct happy_meal *hp = timer_container_of(hp, t, happy_timer);
void __iomem *tregs = hp->tcvregs;
int restart_timer = 0;
@@ -1265,7 +1265,7 @@ static int happy_meal_init(struct happy_meal *hp)
u32 regtmp, rxcfg;
/* If auto-negotiation timer is running, kill it. */
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
HMD("happy_flags[%08x]\n", hp->happy_flags);
if (!(hp->happy_flags & HFLAG_INIT)) {
@@ -1922,7 +1922,7 @@ static int happy_meal_close(struct net_device *dev)
happy_meal_clean_rings(hp);
/* If auto-negotiation timer is running, kill it. */
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
spin_unlock_irq(&hp->happy_lock);
@@ -2184,7 +2184,7 @@ static int hme_set_link_ksettings(struct net_device *dev,
/* Ok, do it to it. */
spin_lock_irq(&hp->happy_lock);
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
spin_unlock_irq(&hp->happy_lock);
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index aedd13c94225..2920341b14a0 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -965,7 +965,7 @@ static struct platform_driver qec_sbus_driver = {
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
- .remove_new = qec_sbus_remove,
+ .remove = qec_sbus_remove,
};
static int __init qec_init(void)
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index 0daed05b7c83..300631e8ac0d 100644
--- a/drivers/net/ethernet/sun/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -36,7 +36,7 @@
#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
-/* In MACE mode, there are four qe channels. Each channel has it's own
+/* In MACE mode, there are four qe channels. Each channel has its own
* status bits in the QEC status register. This macro picks out the
* ones you want.
*/
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 2f30715e9b67..a2a3e94da4b8 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -114,37 +114,23 @@ static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct vnet *vp = (struct vnet *)netdev_priv(dev);
struct vnet_port *port;
- char *p = (char *)buf;
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
- p += sizeof(ethtool_stats_keys);
+ buf += sizeof(ethtool_stats_keys);
rcu_read_lock();
list_for_each_entry_rcu(port, &vp->port_list, list) {
- snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
- port->q_index, port->switch_port ? "s" : "q",
- port->raddr);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
- port->q_index);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&buf, "p%u.%s-%pM", port->q_index,
+ port->switch_port ? "s" : "q",
+ port->raddr);
+ ethtool_sprintf(&buf, "p%u.rx_packets", port->q_index);
+ ethtool_sprintf(&buf, "p%u.tx_packets", port->q_index);
+ ethtool_sprintf(&buf, "p%u.rx_bytes", port->q_index);
+ ethtool_sprintf(&buf, "p%u.tx_bytes", port->q_index);
+ ethtool_sprintf(&buf, "p%u.event_up", port->q_index);
+ ethtool_sprintf(&buf, "p%u.event_reset", port->q_index);
}
rcu_read_unlock();
break;
@@ -519,7 +505,7 @@ static void vnet_port_remove(struct vio_dev *vdev)
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- del_timer_sync(&port->vio.timer);
+ timer_delete_sync(&port->vio.timer);
napi_disable(&port->napi);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 1cacb2a0ee03..0212853c9430 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1044,7 +1044,7 @@ static inline void vnet_free_skbs(struct sk_buff *skb)
void sunvnet_clean_timer_expire_common(struct timer_list *t)
{
- struct vnet_port *port = from_timer(port, t, clean_timer);
+ struct vnet_port *port = timer_container_of(port, t, clean_timer);
struct sk_buff *freeskbs;
unsigned pending;
@@ -1058,7 +1058,7 @@ void sunvnet_clean_timer_expire_common(struct timer_list *t)
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
}
EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
@@ -1513,7 +1513,7 @@ out_dropped:
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else if (port)
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
rcu_read_unlock();
dev_kfree_skb(skb);
vnet_free_skbs(freeskbs);
@@ -1707,7 +1707,7 @@ EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
void vnet_port_reset(struct vnet_port *port)
{
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
sunvnet_port_free_tx_bufs_common(port);
port->rmtu = 0;
port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 391a1bc7f446..5e0e4c9ecbb0 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -199,7 +199,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = spl2sw_ethernet_start_xmit,
.ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
.ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
};
@@ -549,7 +549,7 @@ MODULE_DEVICE_TABLE(of, spl2sw_of_match);
static struct platform_driver spl2sw_driver = {
.probe = spl2sw_probe,
- .remove_new = spl2sw_remove,
+ .remove = spl2sw_remove,
.driver = {
.name = "sp7021_emac",
.of_match_table = spl2sw_of_match,
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index f8e133604146..131786aa4d5b 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -21,8 +21,6 @@
#include "dwc-xlgmac.h"
#include "dwc-xlgmac-reg.h"
-MODULE_LICENSE("Dual BSD/GPL");
-
static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)");
@@ -725,3 +723,8 @@ void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
XLGMAC_PR("=====================================================\n");
XLGMAC_PR("\n");
}
+
+MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
+MODULE_VERSION(XLGMAC_DRV_VERSION);
+MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 36b948820c1e..37101dc50d04 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -360,7 +360,8 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
static void xlgmac_tx_timer(struct timer_list *t)
{
- struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
+ struct xlgmac_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xlgmac_pdata *pdata = channel->pdata;
struct napi_struct *napi;
@@ -405,7 +406,7 @@ static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
if (!channel->tx_ring)
break;
- del_timer_sync(&channel->tx_timer);
+ timer_delete_sync(&channel->tx_timer);
}
}
@@ -823,7 +824,7 @@ static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xlgmac_restart_dev(pdata);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
index fa8604d7b797..36fe538e3332 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -71,8 +71,3 @@ static struct pci_driver xlgmac_pci_driver = {
};
module_pci_driver(xlgmac_pci_driver);
-
-MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
-MODULE_VERSION(XLGMAC_DRV_VERSION);
-MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig
index 8735633765a1..6db2c9817445 100644
--- a/drivers/net/ethernet/tehuti/Kconfig
+++ b/drivers/net/ethernet/tehuti/Kconfig
@@ -23,4 +23,19 @@ config TEHUTI
help
Tehuti Networks 10G Ethernet NIC
+config TEHUTI_TN40
+ tristate "Tehuti Networks TN40xx 10G Ethernet adapters"
+ depends on PCI
+ select PAGE_POOL
+ select FW_LOADER
+ select PHYLINK
+ help
+ This driver supports 10G Ethernet adapters using Tehuti Networks
+ TN40xx chips. Currently, adapters with Applied Micro Circuits
+ Corporation QT2025 are supported; Tehuti Networks TN9310,
+ DLink DXE-810S, ASUS XG-C100F, and Edimax EN-9320.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tn40xx.
+
endif # NET_VENDOR_TEHUTI
diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile
index 13a0ddd62088..0d4f4d63a65c 100644
--- a/drivers/net/ethernet/tehuti/Makefile
+++ b/drivers/net/ethernet/tehuti/Makefile
@@ -4,3 +4,6 @@
#
obj-$(CONFIG_TEHUTI) += tehuti.o
+
+tn40xx-y := tn40.o tn40_mdio.o tn40_phy.o
+obj-$(CONFIG_TEHUTI_TN40) += tn40xx.o
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ca409515ead5..2cee1f05ac47 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -276,7 +276,7 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
* currently intrs are disabled (since we read ISR),
* and we have failed to register next poll.
* so we read the regs to trigger chip
- * and allow further interupts. */
+ * and allow further interrupts. */
READ_REG(priv, regTXF_WPTR_0);
READ_REG(priv, regRXD_WPTR_0);
}
@@ -756,7 +756,7 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
{
ENTER;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
bdx_close(ndev);
bdx_open(ndev);
@@ -1671,7 +1671,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
#endif
#ifdef BDX_LLTX
- netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */
+ netif_trans_update(ndev); /* dev->lltx driver :( */
#endif
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
@@ -2019,7 +2019,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* set multicast list callback has to use priv->tx_lock.
*/
#ifdef BDX_LLTX
- ndev->features |= NETIF_F_LLTX;
+ ndev->lltx = true;
#endif
/* MTU range: 60 - 16384 */
ndev->min_mtu = ETH_ZLEN;
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
index 909e7296cecf..47a2d3e5f8ed 100644
--- a/drivers/net/ethernet/tehuti/tehuti.h
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -260,7 +260,7 @@ struct bdx_priv {
int tx_update_mark;
int tx_noupd;
#endif
- spinlock_t tx_lock; /* NETIF_F_LLTX mode */
+ spinlock_t tx_lock; /* dev->lltx mode */
/* rarely used */
u8 port;
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
new file mode 100644
index 000000000000..558b791a97ed
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -0,0 +1,1857 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/bitfield.h>
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+
+#include "tn40.h"
+
+#define TN40_SHORT_PACKET_SIZE 60
+#define TN40_FIRMWARE_NAME "tehuti/bdx.bin"
+
+static void tn40_enable_interrupts(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_IMR, priv->isr_mask);
+}
+
+static void tn40_disable_interrupts(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_IMR, 0);
+}
+
+static int tn40_fifo_alloc(struct tn40_priv *priv, struct tn40_fifo *f,
+ int fsz_type,
+ u16 reg_cfg0, u16 reg_cfg1,
+ u16 reg_rptr, u16 reg_wptr)
+{
+ u16 memsz = TN40_FIFO_SIZE * (1 << fsz_type);
+ u64 cfg_base;
+
+ memset(f, 0, sizeof(struct tn40_fifo));
+ /* 1K extra space is allocated at the end of the fifo to simplify
+ * processing of descriptors that wraps around fifo's end.
+ */
+ f->va = dma_alloc_coherent(&priv->pdev->dev,
+ memsz + TN40_FIFO_EXTRA_SPACE, &f->da,
+ GFP_KERNEL);
+ if (!f->va)
+ return -ENOMEM;
+
+ f->reg_cfg0 = reg_cfg0;
+ f->reg_cfg1 = reg_cfg1;
+ f->reg_rptr = reg_rptr;
+ f->reg_wptr = reg_wptr;
+ f->rptr = 0;
+ f->wptr = 0;
+ f->memsz = memsz;
+ f->size_mask = memsz - 1;
+ cfg_base = lower_32_bits((f->da & TN40_TX_RX_CFG0_BASE) | fsz_type);
+ tn40_write_reg(priv, reg_cfg0, cfg_base);
+ tn40_write_reg(priv, reg_cfg1, upper_32_bits(f->da));
+ return 0;
+}
+
+static void tn40_fifo_free(struct tn40_priv *priv, struct tn40_fifo *f)
+{
+ dma_free_coherent(&priv->pdev->dev,
+ f->memsz + TN40_FIFO_EXTRA_SPACE, f->va, f->da);
+}
+
+static struct tn40_rxdb *tn40_rxdb_alloc(int nelem)
+{
+ size_t size = sizeof(struct tn40_rxdb) + (nelem * sizeof(int)) +
+ (nelem * sizeof(struct tn40_rx_map));
+ struct tn40_rxdb *db;
+ int i;
+
+ db = vzalloc(size);
+ if (db) {
+ db->stack = (int *)(db + 1);
+ db->elems = (void *)(db->stack + nelem);
+ db->nelem = nelem;
+ db->top = nelem;
+ /* make the first alloc close to db struct */
+ for (i = 0; i < nelem; i++)
+ db->stack[i] = nelem - i - 1;
+ }
+ return db;
+}
+
+static void tn40_rxdb_free(struct tn40_rxdb *db)
+{
+ vfree(db);
+}
+
+static int tn40_rxdb_alloc_elem(struct tn40_rxdb *db)
+{
+ return db->stack[--db->top];
+}
+
+static void *tn40_rxdb_addr_elem(struct tn40_rxdb *db, unsigned int n)
+{
+ return db->elems + n;
+}
+
+static int tn40_rxdb_available(struct tn40_rxdb *db)
+{
+ return db->top;
+}
+
+static void tn40_rxdb_free_elem(struct tn40_rxdb *db, unsigned int n)
+{
+ db->stack[db->top++] = n;
+}
+
+/**
+ * tn40_create_rx_ring - Initialize RX all related HW and SW resources
+ * @priv: NIC private structure
+ *
+ * create_rx_ring creates rxf and rxd fifos, updates the relevant HW registers,
+ * preallocates skbs for rx. It assumes that Rx is disabled in HW funcs are
+ * grouped for better cache usage
+ *
+ * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be
+ * filled and packets will be dropped by the NIC without getting into the host
+ * or generating interrupts. In this situation the host has no chance of
+ * processing all the packets. Dropping packets by the NIC is cheaper, since it
+ * takes 0 CPU cycles.
+ *
+ * Return: 0 on success and negative value on error.
+ */
+static int tn40_create_rx_ring(struct tn40_priv *priv)
+{
+ struct page_pool_params pp = {
+ .dev = &priv->pdev->dev,
+ .napi = &priv->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .netdev = priv->ndev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .max_len = PAGE_SIZE,
+ };
+ int ret, pkt_size, nr;
+
+ priv->page_pool = page_pool_create(&pp);
+ if (IS_ERR(priv->page_pool))
+ return PTR_ERR(priv->page_pool);
+
+ ret = tn40_fifo_alloc(priv, &priv->rxd_fifo0.m, priv->rxd_size,
+ TN40_REG_RXD_CFG0_0, TN40_REG_RXD_CFG1_0,
+ TN40_REG_RXD_RPTR_0, TN40_REG_RXD_WPTR_0);
+ if (ret)
+ goto err_destroy_page_pool;
+
+ ret = tn40_fifo_alloc(priv, &priv->rxf_fifo0.m, priv->rxf_size,
+ TN40_REG_RXF_CFG0_0, TN40_REG_RXF_CFG1_0,
+ TN40_REG_RXF_RPTR_0, TN40_REG_RXF_WPTR_0);
+ if (ret)
+ goto err_free_rxd;
+
+ pkt_size = priv->ndev->mtu + VLAN_ETH_HLEN;
+ priv->rxf_fifo0.m.pktsz = pkt_size;
+ nr = priv->rxf_fifo0.m.memsz / sizeof(struct tn40_rxf_desc);
+ priv->rxdb0 = tn40_rxdb_alloc(nr);
+ if (!priv->rxdb0) {
+ ret = -ENOMEM;
+ goto err_free_rxf;
+ }
+ return 0;
+err_free_rxf:
+ tn40_fifo_free(priv, &priv->rxf_fifo0.m);
+err_free_rxd:
+ tn40_fifo_free(priv, &priv->rxd_fifo0.m);
+err_destroy_page_pool:
+ page_pool_destroy(priv->page_pool);
+ return ret;
+}
+
+static void tn40_rx_free_buffers(struct tn40_priv *priv)
+{
+ struct tn40_rxdb *db = priv->rxdb0;
+ struct tn40_rx_map *dm;
+ u16 i;
+
+ netdev_dbg(priv->ndev, "total =%d free =%d busy =%d\n", db->nelem,
+ tn40_rxdb_available(db),
+ db->nelem - tn40_rxdb_available(db));
+
+ for (i = 0; i < db->nelem; i++) {
+ dm = tn40_rxdb_addr_elem(db, i);
+ if (dm->page)
+ page_pool_put_full_page(priv->page_pool, dm->page,
+ false);
+ }
+}
+
+static void tn40_destroy_rx_ring(struct tn40_priv *priv)
+{
+ if (priv->rxdb0) {
+ tn40_rx_free_buffers(priv);
+ tn40_rxdb_free(priv->rxdb0);
+ priv->rxdb0 = NULL;
+ }
+ tn40_fifo_free(priv, &priv->rxf_fifo0.m);
+ tn40_fifo_free(priv, &priv->rxd_fifo0.m);
+ page_pool_destroy(priv->page_pool);
+}
+
+static void tn40_set_rx_desc(struct tn40_priv *priv, int idx, u64 dma)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rxf_desc *rxfd;
+ int delta;
+
+ rxfd = (struct tn40_rxf_desc *)(f->m.va + f->m.wptr);
+ rxfd->info = cpu_to_le32(0x10003); /* INFO =1 BC =3 */
+ rxfd->va_lo = cpu_to_le32(idx);
+ rxfd->pa_lo = cpu_to_le32(lower_32_bits(dma));
+ rxfd->pa_hi = cpu_to_le32(upper_32_bits(dma));
+ rxfd->len = cpu_to_le32(f->m.pktsz);
+ f->m.wptr += sizeof(struct tn40_rxf_desc);
+ delta = f->m.wptr - f->m.memsz;
+ if (unlikely(delta >= 0)) {
+ f->m.wptr = delta;
+ if (delta > 0) {
+ memcpy(f->m.va, f->m.va + f->m.memsz, delta);
+ netdev_dbg(priv->ndev,
+ "wrapped rxd descriptor\n");
+ }
+ }
+}
+
+/**
+ * tn40_rx_alloc_buffers - Fill rxf fifo with buffers.
+ *
+ * @priv: NIC's private structure
+ *
+ * rx_alloc_buffers allocates buffers via the page pool API, builds rxf descs
+ * and pushes them (rxf descr) into the rxf fifo. The pages are stored in rxdb.
+ * To calculate the free space, we uses the cached values of RPTR and WPTR
+ * when needed. This function also updates RPTR and WPTR.
+ */
+static void tn40_rx_alloc_buffers(struct tn40_priv *priv)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rxdb *db = priv->rxdb0;
+ struct tn40_rx_map *dm;
+ struct page *page;
+ int dno, i, idx;
+
+ dno = tn40_rxdb_available(db) - 1;
+ for (i = dno; i > 0; i--) {
+ page = page_pool_dev_alloc_pages(priv->page_pool);
+ if (!page)
+ break;
+
+ idx = tn40_rxdb_alloc_elem(db);
+ tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(page));
+ dm = tn40_rxdb_addr_elem(db, idx);
+ dm->page = page;
+ }
+ if (i != dno)
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr 0x%x\n",
+ f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ netdev_dbg(priv->ndev, "read_reg 0x%04x f->m.reg_rptr=0x%x\n",
+ f->m.reg_rptr, tn40_read_reg(priv, f->m.reg_rptr));
+ netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr=0x%x\n",
+ f->m.reg_wptr, tn40_read_reg(priv, f->m.reg_wptr));
+}
+
+static void tn40_recycle_rx_buffer(struct tn40_priv *priv,
+ struct tn40_rxd_desc *rxdd)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rx_map *dm;
+ int idx;
+
+ idx = le32_to_cpu(rxdd->va_lo);
+ dm = tn40_rxdb_addr_elem(priv->rxdb0, idx);
+ tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(dm->page));
+
+ tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+}
+
+static int tn40_rx_receive(struct tn40_priv *priv, int budget)
+{
+ struct tn40_rxd_fifo *f = &priv->rxd_fifo0;
+ u32 rxd_val1, rxd_err, pkt_id;
+ int tmp_len, size, done = 0;
+ struct tn40_rxdb *db = NULL;
+ struct tn40_rxd_desc *rxdd;
+ struct tn40_rx_map *dm;
+ struct sk_buff *skb;
+ u16 len, rxd_vlan;
+ int idx;
+
+ f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_WR_PTR;
+ size = f->m.wptr - f->m.rptr;
+ if (size < 0)
+ size += f->m.memsz; /* Size is negative :-) */
+
+ while (size > 0) {
+ rxdd = (struct tn40_rxd_desc *)(f->m.va + f->m.rptr);
+ db = priv->rxdb0;
+
+ /* We have a chicken and egg problem here. If the
+ * descriptor is wrapped we first need to copy the tail
+ * of the descriptor to the end of the buffer before
+ * extracting values from the descriptor. However in
+ * order to know if the descriptor is wrapped we need to
+ * obtain the length of the descriptor from (the
+ * wrapped) descriptor. Luckily the length is the first
+ * word of the descriptor. Descriptor lengths are
+ * multiples of 8 bytes so in case of a wrapped
+ * descriptor the first 8 bytes guaranteed to appear
+ * before the end of the buffer. We first obtain the
+ * length, we then copy the rest of the descriptor if
+ * needed and then extract the rest of the values from
+ * the descriptor.
+ *
+ * Do not change the order of operations as it will
+ * break the code!!!
+ */
+ rxd_val1 = le32_to_cpu(rxdd->rxd_val1);
+ tmp_len = TN40_GET_RXD_BC(rxd_val1) << 3;
+ pkt_id = TN40_GET_RXD_PKT_ID(rxd_val1);
+ size -= tmp_len;
+ /* CHECK FOR A PARTIALLY ARRIVED DESCRIPTOR */
+ if (size < 0) {
+ netdev_dbg(priv->ndev,
+ "%s partially arrived desc tmp_len %d\n",
+ __func__, tmp_len);
+ break;
+ }
+ /* make sure that the descriptor fully is arrived
+ * before reading the rest of the descriptor.
+ */
+ rmb();
+
+ /* A special treatment is given to non-contiguous
+ * descriptors that start near the end, wraps around
+ * and continue at the beginning. The second part is
+ * copied right after the first, and then descriptor
+ * is interpreted as normal. The fifo has an extra
+ * space to allow such operations.
+ */
+
+ /* HAVE WE REACHED THE END OF THE QUEUE? */
+ f->m.rptr += tmp_len;
+ tmp_len = f->m.rptr - f->m.memsz;
+ if (unlikely(tmp_len >= 0)) {
+ f->m.rptr = tmp_len;
+ if (tmp_len > 0) {
+ /* COPY PARTIAL DESCRIPTOR
+ * TO THE END OF THE QUEUE
+ */
+ netdev_dbg(priv->ndev,
+ "wrapped desc rptr=%d tmp_len=%d\n",
+ f->m.rptr, tmp_len);
+ memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
+ }
+ }
+ idx = le32_to_cpu(rxdd->va_lo);
+ dm = tn40_rxdb_addr_elem(db, idx);
+ prefetch(dm);
+
+ len = le16_to_cpu(rxdd->len);
+ rxd_vlan = le16_to_cpu(rxdd->rxd_vlan);
+ /* CHECK FOR ERRORS */
+ rxd_err = TN40_GET_RXD_ERR(rxd_val1);
+ if (unlikely(rxd_err)) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_errors++;
+ u64_stats_update_end(&priv->syncp);
+ tn40_recycle_rx_buffer(priv, rxdd);
+ continue;
+ }
+
+ skb = napi_build_skb(page_address(dm->page), PAGE_SIZE);
+ if (!skb) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_dropped++;
+ priv->alloc_fail++;
+ u64_stats_update_end(&priv->syncp);
+ tn40_recycle_rx_buffer(priv, rxdd);
+ break;
+ }
+ skb_mark_for_recycle(skb);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, priv->ndev);
+ skb->ip_summed =
+ (pkt_id == 0) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ if (TN40_GET_RXD_VTAG(rxd_val1))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ TN40_GET_RXD_VLAN_TCI(rxd_vlan));
+
+ dm->page = NULL;
+ tn40_rxdb_free_elem(db, idx);
+
+ napi_gro_receive(&priv->napi, skb);
+
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_bytes += len;
+ u64_stats_update_end(&priv->syncp);
+
+ if (unlikely(++done >= budget))
+ break;
+ }
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_packets += done;
+ u64_stats_update_end(&priv->syncp);
+ /* FIXME: Do something to minimize pci accesses */
+ tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR);
+ tn40_rx_alloc_buffers(priv);
+ return done;
+}
+
+/* TX HW/SW interaction overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * There are 2 types of TX communication channels between driver and NIC.
+ * 1) TX Free Fifo - TXF - Holds ack descriptors for sent packets.
+ * 2) TX Data Fifo - TXD - Holds descriptors of full buffers.
+ *
+ * Currently the NIC supports TSO, checksumming and gather DMA
+ * UFO and IP fragmentation is on the way.
+ *
+ * RX SW Data Structures
+ * ~~~~~~~~~~~~~~~~~~~~~
+ * TXDB is used to keep track of all skbs owned by SW and their DMA addresses.
+ * For TX case, ownership lasts from getting the packet via hard_xmit and
+ * until the HW acknowledges sending the packet by TXF descriptors.
+ * TXDB is implemented as a cyclic buffer.
+ *
+ * FIFO objects keep info about the fifo's size and location, relevant HW
+ * registers, usage and skb db. Each RXD and RXF fifo has their own fifo
+ * structure. Implemented as simple struct.
+ *
+ * TX SW Execution Flow
+ * ~~~~~~~~~~~~~~~~~~~~
+ * OS calls the driver's hard_xmit method with a packet to send. The driver
+ * creates DMA mappings, builds TXD descriptors and kicks the HW by updating
+ * TXD WPTR.
+ *
+ * When a packet is sent, The HW write a TXF descriptor and the SW
+ * frees the original skb. To prevent TXD fifo overflow without
+ * reading HW registers every time, the SW deploys "tx level"
+ * technique. Upon startup, the tx level is initialized to TXD fifo
+ * length. For every sent packet, the SW gets its TXD descriptor size
+ * (from a pre-calculated array) and subtracts it from tx level. The
+ * size is also stored in txdb. When a TXF ack arrives, the SW fetched
+ * the size of the original TXD descriptor from the txdb and adds it
+ * to the tx level. When the Tx level drops below some predefined
+ * threshold, the driver stops the TX queue. When the TX level rises
+ * above that level, the tx queue is enabled again.
+ *
+ * This technique avoids excessive reading of RPTR and WPTR registers.
+ * As our benchmarks shows, it adds 1.5 Gbit/sec to NIC's throughput.
+ */
+static void tn40_do_tx_db_ptr_next(struct tn40_txdb *db,
+ struct tn40_tx_map **pptr)
+{
+ ++*pptr;
+ if (unlikely(*pptr == db->end))
+ *pptr = db->start;
+}
+
+static void tn40_tx_db_inc_rptr(struct tn40_txdb *db)
+{
+ tn40_do_tx_db_ptr_next(db, &db->rptr);
+}
+
+static void tn40_tx_db_inc_wptr(struct tn40_txdb *db)
+{
+ tn40_do_tx_db_ptr_next(db, &db->wptr);
+}
+
+static int tn40_tx_db_init(struct tn40_txdb *d, int sz_type)
+{
+ int memsz = TN40_FIFO_SIZE * (1 << (sz_type + 1));
+
+ d->start = vzalloc(memsz);
+ if (!d->start)
+ return -ENOMEM;
+ /* In order to differentiate between an empty db state and a full db
+ * state at least one element should always be empty in order to
+ * avoid rptr == wptr, which means that the db is empty.
+ */
+ d->size = memsz / sizeof(struct tn40_tx_map) - 1;
+ d->end = d->start + d->size + 1; /* just after last element */
+
+ /* All dbs are created empty */
+ d->rptr = d->start;
+ d->wptr = d->start;
+ return 0;
+}
+
+static void tn40_tx_db_close(struct tn40_txdb *d)
+{
+ if (d->start) {
+ vfree(d->start);
+ d->start = NULL;
+ }
+}
+
+/* Sizes of tx desc (including padding if needed) as function of the SKB's
+ * frag number
+ * 7 - is number of lwords in txd with one phys buffer
+ * 3 - is number of lwords used for every additional phys buffer
+ * for (i = 0; i < TN40_MAX_PBL; i++) {
+ * lwords = 7 + (i * 3);
+ * if (lwords & 1)
+ * lwords++; pad it with 1 lword
+ * tn40_txd_sizes[i].bytes = lwords << 2;
+ * tn40_txd_sizes[i].qwords = lwords >> 1;
+ * }
+ */
+static struct {
+ u16 bytes;
+ u16 qwords; /* qword = 64 bit */
+} tn40_txd_sizes[] = {
+ {0x20, 0x04},
+ {0x28, 0x05},
+ {0x38, 0x07},
+ {0x40, 0x08},
+ {0x50, 0x0a},
+ {0x58, 0x0b},
+ {0x68, 0x0d},
+ {0x70, 0x0e},
+ {0x80, 0x10},
+ {0x88, 0x11},
+ {0x98, 0x13},
+ {0xa0, 0x14},
+ {0xb0, 0x16},
+ {0xb8, 0x17},
+ {0xc8, 0x19},
+ {0xd0, 0x1a},
+ {0xe0, 0x1c},
+ {0xe8, 0x1d},
+ {0xf8, 0x1f},
+};
+
+static void tn40_pbl_set(struct tn40_pbl *pbl, dma_addr_t dma, int len)
+{
+ pbl->len = cpu_to_le32(len);
+ pbl->pa_lo = cpu_to_le32(lower_32_bits(dma));
+ pbl->pa_hi = cpu_to_le32(upper_32_bits(dma));
+}
+
+static void tn40_txdb_set(struct tn40_txdb *db, dma_addr_t dma, int len)
+{
+ db->wptr->len = len;
+ db->wptr->addr.dma = dma;
+}
+
+struct tn40_mapping_info {
+ dma_addr_t dma;
+ size_t size;
+};
+
+/**
+ * tn40_tx_map_skb - create and store DMA mappings for skb's data blocks
+ * @priv: NIC private structure
+ * @skb: socket buffer to map
+ * @txdd: pointer to tx descriptor to be updated
+ * @pkt_len: pointer to unsigned long value
+ *
+ * This function creates DMA mappings for skb's data blocks and writes them to
+ * PBL of a new tx descriptor. It also stores them in the tx db, so they could
+ * be unmapped after the data has been sent. It is the responsibility of the
+ * caller to make sure that there is enough space in the txdb. The last
+ * element holds a pointer to skb itself and is marked with a zero length.
+ *
+ * Return: 0 on success and negative value on error.
+ */
+static int tn40_tx_map_skb(struct tn40_priv *priv, struct sk_buff *skb,
+ struct tn40_txd_desc *txdd, unsigned int *pkt_len)
+{
+ struct tn40_mapping_info info[TN40_MAX_PBL];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct tn40_pbl *pbl = &txdd->pbl[0];
+ struct tn40_txdb *db = &priv->txdb;
+ unsigned int size;
+ int i, len, ret;
+ dma_addr_t dma;
+
+ netdev_dbg(priv->ndev, "TX skb %p skbLen %d dataLen %d frags %d\n", skb,
+ skb->len, skb->data_len, nr_frags);
+ if (nr_frags > TN40_MAX_PBL - 1) {
+ ret = skb_linearize(skb);
+ if (ret)
+ return ret;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ }
+ /* initial skb */
+ len = skb->len - skb->data_len;
+ dma = dma_map_single(&priv->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(&priv->pdev->dev, dma);
+ if (ret)
+ return ret;
+
+ tn40_txdb_set(db, dma, len);
+ tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len);
+ *pkt_len = db->wptr->len;
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ size = skb_frag_size(frag);
+ dma = skb_frag_dma_map(&priv->pdev->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+
+ ret = dma_mapping_error(&priv->pdev->dev, dma);
+ if (ret)
+ goto mapping_error;
+ info[i].dma = dma;
+ info[i].size = size;
+ }
+
+ for (i = 0; i < nr_frags; i++) {
+ tn40_tx_db_inc_wptr(db);
+ tn40_txdb_set(db, info[i].dma, info[i].size);
+ tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len);
+ *pkt_len += db->wptr->len;
+ }
+
+ /* SHORT_PKT_FIX */
+ if (skb->len < TN40_SHORT_PACKET_SIZE)
+ ++nr_frags;
+
+ /* Add skb clean up info. */
+ tn40_tx_db_inc_wptr(db);
+ db->wptr->len = -tn40_txd_sizes[nr_frags].bytes;
+ db->wptr->addr.skb = skb;
+ tn40_tx_db_inc_wptr(db);
+
+ return 0;
+ mapping_error:
+ dma_unmap_page(&priv->pdev->dev, db->wptr->addr.dma, db->wptr->len,
+ DMA_TO_DEVICE);
+ for (; i > 0; i--)
+ dma_unmap_page(&priv->pdev->dev, info[i - 1].dma,
+ info[i - 1].size, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+static int tn40_create_tx_ring(struct tn40_priv *priv)
+{
+ int ret;
+
+ ret = tn40_fifo_alloc(priv, &priv->txd_fifo0.m, priv->txd_size,
+ TN40_REG_TXD_CFG0_0, TN40_REG_TXD_CFG1_0,
+ TN40_REG_TXD_RPTR_0, TN40_REG_TXD_WPTR_0);
+ if (ret)
+ return ret;
+
+ ret = tn40_fifo_alloc(priv, &priv->txf_fifo0.m, priv->txf_size,
+ TN40_REG_TXF_CFG0_0, TN40_REG_TXF_CFG1_0,
+ TN40_REG_TXF_RPTR_0, TN40_REG_TXF_WPTR_0);
+ if (ret)
+ goto err_free_txd;
+
+ /* The TX db has to keep mappings for all packets sent (on
+ * TxD) and not yet reclaimed (on TxF).
+ */
+ ret = tn40_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size));
+ if (ret)
+ goto err_free_txf;
+
+ /* SHORT_PKT_FIX */
+ priv->b0_len = 64;
+ priv->b0_va = dma_alloc_coherent(&priv->pdev->dev, priv->b0_len,
+ &priv->b0_dma, GFP_KERNEL);
+ if (!priv->b0_va)
+ goto err_free_db;
+
+ priv->tx_level = TN40_MAX_TX_LEVEL;
+ priv->tx_update_mark = priv->tx_level - 1024;
+ return 0;
+err_free_db:
+ tn40_tx_db_close(&priv->txdb);
+err_free_txf:
+ tn40_fifo_free(priv, &priv->txf_fifo0.m);
+err_free_txd:
+ tn40_fifo_free(priv, &priv->txd_fifo0.m);
+ return -ENOMEM;
+}
+
+/**
+ * tn40_tx_space - Calculate the available space in the TX fifo.
+ * @priv: NIC private structure
+ *
+ * Return: available space in TX fifo in bytes
+ */
+static int tn40_tx_space(struct tn40_priv *priv)
+{
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int fsize;
+
+ f->m.rptr = tn40_read_reg(priv, f->m.reg_rptr) & TN40_TXF_WPTR_WR_PTR;
+ fsize = f->m.rptr - f->m.wptr;
+ if (fsize <= 0)
+ fsize = f->m.memsz + fsize;
+ return fsize;
+}
+
+#define TN40_TXD_FULL_CHECKSUM 7
+
+static netdev_tx_t tn40_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int txd_checksum = TN40_TXD_FULL_CHECKSUM;
+ struct tn40_txd_desc *txdd;
+ int nr_frags, len, err;
+ unsigned int pkt_len;
+ int txd_vlan_id = 0;
+ int txd_lgsnd = 0;
+ int txd_vtag = 0;
+ int txd_mss = 0;
+
+ /* Build tx descriptor */
+ txdd = (struct tn40_txd_desc *)(f->m.va + f->m.wptr);
+ err = tn40_tx_map_skb(priv, skb, txdd, &pkt_len);
+ if (err) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.tx_dropped++;
+ u64_stats_update_end(&priv->syncp);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ txd_checksum = 0;
+
+ if (skb_shinfo(skb)->gso_size) {
+ txd_mss = skb_shinfo(skb)->gso_size;
+ txd_lgsnd = 1;
+ netdev_dbg(priv->ndev, "skb %p pkt len %d gso size = %d\n", skb,
+ pkt_len, txd_mss);
+ }
+ if (skb_vlan_tag_present(skb)) {
+ /* Don't cut VLAN ID to 12 bits */
+ txd_vlan_id = skb_vlan_tag_get(skb);
+ txd_vtag = 1;
+ }
+ txdd->va_hi = 0;
+ txdd->va_lo = 0;
+ txdd->length = cpu_to_le16(pkt_len);
+ txdd->mss = cpu_to_le16(txd_mss);
+ txdd->txd_val1 =
+ cpu_to_le32(TN40_TXD_W1_VAL
+ (tn40_txd_sizes[nr_frags].qwords, txd_checksum,
+ txd_vtag, txd_lgsnd, txd_vlan_id));
+ netdev_dbg(priv->ndev, "=== w1 qwords[%d] %d =====\n", nr_frags,
+ tn40_txd_sizes[nr_frags].qwords);
+ netdev_dbg(priv->ndev, "=== TxD desc =====================\n");
+ netdev_dbg(priv->ndev, "=== w1: 0x%x ================\n",
+ txdd->txd_val1);
+ netdev_dbg(priv->ndev, "=== w2: mss 0x%x len 0x%x\n", txdd->mss,
+ txdd->length);
+ /* SHORT_PKT_FIX */
+ if (pkt_len < TN40_SHORT_PACKET_SIZE) {
+ struct tn40_pbl *pbl = &txdd->pbl[++nr_frags];
+
+ txdd->length = cpu_to_le16(TN40_SHORT_PACKET_SIZE);
+ txdd->txd_val1 =
+ cpu_to_le32(TN40_TXD_W1_VAL
+ (tn40_txd_sizes[nr_frags].qwords,
+ txd_checksum, txd_vtag, txd_lgsnd,
+ txd_vlan_id));
+ pbl->len = cpu_to_le32(TN40_SHORT_PACKET_SIZE - pkt_len);
+ pbl->pa_lo = cpu_to_le32(lower_32_bits(priv->b0_dma));
+ pbl->pa_hi = cpu_to_le32(upper_32_bits(priv->b0_dma));
+ netdev_dbg(priv->ndev, "=== SHORT_PKT_FIX ==============\n");
+ netdev_dbg(priv->ndev, "=== nr_frags : %d ==============\n",
+ nr_frags);
+ }
+
+ /* Increment TXD write pointer. In case of fifo wrapping copy
+ * reminder of the descriptor to the beginning.
+ */
+ f->m.wptr += tn40_txd_sizes[nr_frags].bytes;
+ len = f->m.wptr - f->m.memsz;
+ if (unlikely(len >= 0)) {
+ f->m.wptr = len;
+ if (len > 0)
+ memcpy(f->m.va, f->m.va + f->m.memsz, len);
+ }
+ /* Force memory writes to complete before letting the HW know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ priv->tx_level -= tn40_txd_sizes[nr_frags].bytes;
+ if (priv->tx_level > priv->tx_update_mark) {
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ } else {
+ if (priv->tx_noupd++ > TN40_NO_UPD_PACKETS) {
+ priv->tx_noupd = 0;
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ }
+ }
+
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += pkt_len;
+ u64_stats_update_end(&priv->syncp);
+ if (priv->tx_level < TN40_MIN_TX_LEVEL) {
+ netdev_dbg(priv->ndev, "TX Q STOP level %d\n", priv->tx_level);
+ netif_stop_queue(ndev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void tn40_tx_cleanup(struct tn40_priv *priv)
+{
+ struct tn40_txf_fifo *f = &priv->txf_fifo0;
+ struct tn40_txdb *db = &priv->txdb;
+ int tx_level = 0;
+
+ f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_MASK;
+
+ netif_tx_lock(priv->ndev);
+ while (f->m.wptr != f->m.rptr) {
+ f->m.rptr += TN40_TXF_DESC_SZ;
+ f->m.rptr &= f->m.size_mask;
+ /* Unmap all fragments */
+ /* First has to come tx_maps containing DMA */
+ do {
+ dma_addr_t addr = db->rptr->addr.dma;
+ size_t size = db->rptr->len;
+
+ netif_tx_unlock(priv->ndev);
+ dma_unmap_page(&priv->pdev->dev, addr,
+ size, DMA_TO_DEVICE);
+ netif_tx_lock(priv->ndev);
+ tn40_tx_db_inc_rptr(db);
+ } while (db->rptr->len > 0);
+ tx_level -= db->rptr->len; /* '-' Because the len is negative */
+
+ /* Now should come skb pointer - free it */
+ dev_kfree_skb_any(db->rptr->addr.skb);
+ netdev_dbg(priv->ndev, "dev_kfree_skb_any %p %d\n",
+ db->rptr->addr.skb, -db->rptr->len);
+ tn40_tx_db_inc_rptr(db);
+ }
+
+ /* Let the HW know which TXF descriptors were cleaned */
+ tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR);
+
+ /* We reclaimed resources, so in case the Q is stopped by xmit
+ * callback, we resume the transmission and use tx_lock to
+ * synchronize with xmit.
+ */
+ priv->tx_level += tx_level;
+ if (priv->tx_noupd) {
+ priv->tx_noupd = 0;
+ tn40_write_reg(priv, priv->txd_fifo0.m.reg_wptr,
+ priv->txd_fifo0.m.wptr & TN40_TXF_WPTR_WR_PTR);
+ }
+ if (unlikely(netif_queue_stopped(priv->ndev) &&
+ netif_carrier_ok(priv->ndev) &&
+ (priv->tx_level >= TN40_MAX_TX_LEVEL / 2))) {
+ netdev_dbg(priv->ndev, "TX Q WAKE level %d\n", priv->tx_level);
+ netif_wake_queue(priv->ndev);
+ }
+ netif_tx_unlock(priv->ndev);
+}
+
+static void tn40_tx_free_skbs(struct tn40_priv *priv)
+{
+ struct tn40_txdb *db = &priv->txdb;
+
+ while (db->rptr != db->wptr) {
+ if (likely(db->rptr->len))
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
+ else
+ dev_kfree_skb(db->rptr->addr.skb);
+ tn40_tx_db_inc_rptr(db);
+ }
+}
+
+static void tn40_destroy_tx_ring(struct tn40_priv *priv)
+{
+ tn40_tx_free_skbs(priv);
+ tn40_fifo_free(priv, &priv->txd_fifo0.m);
+ tn40_fifo_free(priv, &priv->txf_fifo0.m);
+ tn40_tx_db_close(&priv->txdb);
+ /* SHORT_PKT_FIX */
+ if (priv->b0_len) {
+ dma_free_coherent(&priv->pdev->dev, priv->b0_len, priv->b0_va,
+ priv->b0_dma);
+ priv->b0_len = 0;
+ }
+}
+
+/**
+ * tn40_tx_push_desc - Push a descriptor to TxD fifo.
+ *
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
+ *
+ * This function pushes desc to TxD fifo and overlaps it if needed.
+ *
+ * This function does not check for available space, nor does it check
+ * that the data size is smaller than the fifo size. Checking for
+ * space is the responsibility of the caller.
+ */
+static void tn40_tx_push_desc(struct tn40_priv *priv, void *data, int size)
+{
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int i = f->m.memsz - f->m.wptr;
+
+ if (size == 0)
+ return;
+
+ if (i > size) {
+ memcpy(f->m.va + f->m.wptr, data, size);
+ f->m.wptr += size;
+ } else {
+ memcpy(f->m.va + f->m.wptr, data, i);
+ f->m.wptr = size - i;
+ memcpy(f->m.va, data + i, f->m.wptr);
+ }
+ tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+}
+
+/**
+ * tn40_tx_push_desc_safe - push descriptor to TxD fifo in a safe way.
+ *
+ * @priv: NIC private structure
+ * @data: descriptor data
+ * @size: descriptor size
+ *
+ * This function does check for available space and, if necessary,
+ * waits for the NIC to read existing data before writing new data.
+ */
+static void tn40_tx_push_desc_safe(struct tn40_priv *priv, void *data, int size)
+{
+ int timer = 0;
+
+ while (size > 0) {
+ /* We subtract 8 because when the fifo is full rptr ==
+ * wptr, which also means that fifo is empty, we can
+ * understand the difference, but could the HW do the
+ * same ???
+ */
+ int avail = tn40_tx_space(priv) - 8;
+
+ if (avail <= 0) {
+ if (timer++ > 300) /* Prevent endless loop */
+ break;
+ /* Give the HW a chance to clean the fifo */
+ usleep_range(50, 60);
+ continue;
+ }
+ avail = min(avail, size);
+ netdev_dbg(priv->ndev,
+ "about to push %d bytes starting %p size %d\n",
+ avail, data, size);
+ tn40_tx_push_desc(priv, data, avail);
+ size -= avail;
+ data += avail;
+ }
+}
+
+int tn40_set_link_speed(struct tn40_priv *priv, u32 speed)
+{
+ u32 val;
+ int i;
+
+ netdev_dbg(priv->ndev, "speed %d\n", speed);
+ switch (speed) {
+ case SPEED_10000:
+ case SPEED_5000:
+ case SPEED_2500:
+ netdev_dbg(priv->ndev, "link_speed %d\n", speed);
+
+ tn40_write_reg(priv, 0x1010, 0x217); /*ETHSD.REFCLK_CONF */
+ tn40_write_reg(priv, 0x104c, 0x4c); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x4c); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x4c); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x4c); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x434); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x434); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x434); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x434); /*ETHSD.L3_TX_PCNT */
+ tn40_write_reg(priv, 0x6300, 0x0400); /*MAC.PCS_CTRL */
+
+ tn40_write_reg(priv, 0x1018, 0x00); /*Mike2 */
+ udelay(5);
+ tn40_write_reg(priv, 0x1018, 0x04); /*Mike2 */
+ udelay(5);
+ tn40_write_reg(priv, 0x1018, 0x06); /*Mike2 */
+ udelay(5);
+ /*MikeFix1 */
+ /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */
+ tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */
+ tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */
+ tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */
+ tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */
+ tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */
+ for (i = 1000; i; i--) {
+ usleep_range(50, 60);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+ if (val & (1 << 9)) {
+ /*ETHSD.INIT_STAT */
+ tn40_write_reg(priv, 0x1014, 0x3);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+
+ break;
+ }
+ }
+ if (!i)
+ netdev_err(priv->ndev, "MAC init timeout!\n");
+
+ tn40_write_reg(priv, 0x6350, 0x0); /*MAC.PCS_IF_MODE */
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ break;
+
+ case SPEED_1000:
+ case SPEED_100:
+ tn40_write_reg(priv, 0x1010, 0x613); /*ETHSD.REFCLK_CONF */
+ tn40_write_reg(priv, 0x104c, 0x4d); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x35); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */
+ tn40_write_reg(priv, 0x6300, 0x01140); /*MAC.PCS_CTRL */
+
+ tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */
+ for (i = 1000; i; i--) {
+ usleep_range(50, 60);
+ val = tn40_read_reg(priv, 0x1014); /*ETHSD.INIT_STAT */
+ if (val & (1 << 9)) {
+ /*ETHSD.INIT_STAT */
+ tn40_write_reg(priv, 0x1014, 0x3);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+
+ break;
+ }
+ }
+ if (!i)
+ netdev_err(priv->ndev, "MAC init timeout!\n");
+
+ tn40_write_reg(priv, 0x6350, 0x2b); /*MAC.PCS_IF_MODE 1g */
+ tn40_write_reg(priv, 0x6310, 0x9801); /*MAC.PCS_DEV_AB */
+
+ tn40_write_reg(priv, 0x6314, 0x1); /*MAC.PCS_PART_AB */
+ tn40_write_reg(priv, 0x6348, 0xc8); /*MAC.PCS_LINK_LO */
+ tn40_write_reg(priv, 0x634c, 0xc8); /*MAC.PCS_LINK_HI */
+ usleep_range(50, 60);
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ tn40_write_reg(priv, 0x6300, 0x1140); /*MAC.PCS_CTRL */
+ break;
+
+ case 0: /* Link down */
+ tn40_write_reg(priv, 0x104c, 0x0); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x0); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */
+
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0x800);
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ break;
+
+ default:
+ netdev_err(priv->ndev,
+ "Link speed was not identified yet (%d)\n", speed);
+ speed = 0;
+ break;
+ }
+ return speed;
+}
+
+static void tn40_link_changed(struct tn40_priv *priv)
+{
+ u32 link = tn40_read_reg(priv,
+ TN40_REG_MAC_LNK_STAT) & TN40_MAC_LINK_STAT;
+
+ netdev_dbg(priv->ndev, "link changed %u\n", link);
+}
+
+static void tn40_isr_extra(struct tn40_priv *priv, u32 isr)
+{
+ if (isr & (TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 | TN40_IR_TMR0)) {
+ netdev_dbg(priv->ndev, "isr = 0x%x\n", isr);
+ tn40_link_changed(priv);
+ }
+}
+
+static irqreturn_t tn40_isr_napi(int irq, void *dev)
+{
+ struct tn40_priv *priv = netdev_priv((struct net_device *)dev);
+ u32 isr;
+
+ isr = tn40_read_reg(priv, TN40_REG_ISR_MSK0);
+
+ if (unlikely(!isr)) {
+ tn40_enable_interrupts(priv);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+
+ if (isr & TN40_IR_EXTRA)
+ tn40_isr_extra(priv, isr);
+
+ if (isr & (TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 | TN40_IR_TMR1)) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ __napi_schedule(&priv->napi);
+ return IRQ_HANDLED;
+ }
+ /* We get here if an interrupt has slept into the
+ * small time window between these lines in
+ * tn40_poll: tn40_enable_interrupts(priv); return 0;
+ *
+ * Currently interrupts are disabled (since we read
+ * the ISR register) and we have failed to register
+ * the next poll. So we read the regs to trigger the
+ * chip and allow further interrupts.
+ */
+ tn40_read_reg(priv, TN40_REG_TXF_WPTR_0);
+ tn40_read_reg(priv, TN40_REG_RXD_WPTR_0);
+ }
+
+ tn40_enable_interrupts(priv);
+ return IRQ_HANDLED;
+}
+
+static int tn40_poll(struct napi_struct *napi, int budget)
+{
+ struct tn40_priv *priv = container_of(napi, struct tn40_priv, napi);
+ int work_done;
+
+ tn40_tx_cleanup(priv);
+
+ if (!budget)
+ return 0;
+
+ work_done = tn40_rx_receive(priv, budget);
+ if (work_done == budget)
+ return budget;
+
+ if (napi_complete_done(napi, work_done))
+ tn40_enable_interrupts(priv);
+ return work_done;
+}
+
+static int tn40_fw_load(struct tn40_priv *priv)
+{
+ const struct firmware *fw = NULL;
+ int master, ret;
+ u32 val;
+
+ ret = request_firmware(&fw, TN40_FIRMWARE_NAME, &priv->pdev->dev);
+ if (ret)
+ return ret;
+
+ master = tn40_read_reg(priv, TN40_REG_INIT_SEMAPHORE);
+ if (!tn40_read_reg(priv, TN40_REG_INIT_STATUS) && master) {
+ netdev_dbg(priv->ndev, "Loading FW...\n");
+ tn40_tx_push_desc_safe(priv, (void *)fw->data, fw->size);
+ msleep(100);
+ }
+ ret = read_poll_timeout(tn40_read_reg, val, val, 2000, 400000, false,
+ priv, TN40_REG_INIT_STATUS);
+ if (master)
+ tn40_write_reg(priv, TN40_REG_INIT_SEMAPHORE, 1);
+
+ if (ret) {
+ netdev_err(priv->ndev, "firmware loading failed\n");
+ netdev_dbg(priv->ndev, "VPC: 0x%x VIC: 0x%x STATUS: 0x%xd\n",
+ tn40_read_reg(priv, TN40_REG_VPC),
+ tn40_read_reg(priv, TN40_REG_VIC),
+ tn40_read_reg(priv, TN40_REG_INIT_STATUS));
+ ret = -EIO;
+ } else {
+ netdev_dbg(priv->ndev, "firmware loading success\n");
+ }
+ release_firmware(fw);
+ return ret;
+}
+
+static void tn40_restore_mac(struct net_device *ndev, struct tn40_priv *priv)
+{
+ u32 val;
+
+ netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n",
+ tn40_read_reg(priv, TN40_REG_UNC_MAC0_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC1_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC2_A));
+
+ val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC2_A, val);
+ val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC1_A, val);
+ val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC0_A, val);
+
+ /* More then IP MAC address */
+ tn40_write_reg(priv, TN40_REG_MAC_ADDR_0,
+ (ndev->dev_addr[3] << 24) | (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[1] << 8) | (ndev->dev_addr[0]));
+ tn40_write_reg(priv, TN40_REG_MAC_ADDR_1,
+ (ndev->dev_addr[5] << 8) | (ndev->dev_addr[4]));
+
+ netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n",
+ tn40_read_reg(priv, TN40_REG_UNC_MAC0_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC1_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC2_A));
+}
+
+static void tn40_hw_start(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_FRM_LENGTH, 0X3FE0);
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0X10fd);
+ /*MikeFix1 */
+ /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */
+ tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */
+ tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */
+ tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */
+ tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */
+ tn40_write_reg(priv, TN40_REG_RX_FIFO_SECTION, 0x10);
+ tn40_write_reg(priv, TN40_REG_TX_FIFO_SECTION, 0xE00010);
+ tn40_write_reg(priv, TN40_REG_RX_FULLNESS, 0);
+ tn40_write_reg(priv, TN40_REG_TX_FULLNESS, 0);
+
+ tn40_write_reg(priv, TN40_REG_VGLB, 0);
+ tn40_write_reg(priv, TN40_REG_MAX_FRAME_A,
+ priv->rxf_fifo0.m.pktsz & TN40_MAX_FRAME_AB_VAL);
+ tn40_write_reg(priv, TN40_REG_RDINTCM0, priv->rdintcm);
+ tn40_write_reg(priv, TN40_REG_RDINTCM2, 0);
+
+ /* old val = 0x300064 */
+ tn40_write_reg(priv, TN40_REG_TDINTCM0, priv->tdintcm);
+
+ /* Enable timer interrupt once in 2 secs. */
+ tn40_restore_mac(priv->ndev, priv);
+
+ /* Pause frame */
+ tn40_write_reg(priv, 0x12E0, 0x28);
+ tn40_write_reg(priv, TN40_REG_PAUSE_QUANT, 0xFFFF);
+ tn40_write_reg(priv, 0x6064, 0xF);
+
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A,
+ TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC |
+ TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB);
+
+ tn40_enable_interrupts(priv);
+}
+
+static int tn40_hw_reset(struct tn40_priv *priv)
+{
+ u32 val;
+
+ /* Reset sequences: read, write 1, read, write 0 */
+ val = tn40_read_reg(priv, TN40_REG_CLKPLL);
+ tn40_write_reg(priv, TN40_REG_CLKPLL, (val | TN40_CLKPLL_SFTRST) + 0x8);
+ usleep_range(50, 60);
+ val = tn40_read_reg(priv, TN40_REG_CLKPLL);
+ tn40_write_reg(priv, TN40_REG_CLKPLL, val & ~TN40_CLKPLL_SFTRST);
+
+ /* Check that the PLLs are locked and reset ended */
+ val = read_poll_timeout(tn40_read_reg, val,
+ (val & TN40_CLKPLL_LKD) == TN40_CLKPLL_LKD,
+ 10000, 700000, false, priv, TN40_REG_CLKPLL);
+ if (val)
+ return -EIO;
+
+ usleep_range(50, 60);
+ /* Do any PCI-E read transaction */
+ tn40_read_reg(priv, TN40_REG_RXD_CFG0_0);
+ return 0;
+}
+
+static void tn40_sw_reset(struct tn40_priv *priv)
+{
+ int i, ret;
+ u32 val;
+
+ /* 1. load MAC (obsolete) */
+ /* 2. disable Rx (and Tx) */
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0);
+ msleep(100);
+ /* 3. Disable port */
+ tn40_write_reg(priv, TN40_REG_DIS_PORT, 1);
+ /* 4. Disable queue */
+ tn40_write_reg(priv, TN40_REG_DIS_QU, 1);
+ /* 5. Wait until hw is disabled */
+ ret = read_poll_timeout(tn40_read_reg, val, val & 1, 10000, 500000,
+ false, priv, TN40_REG_RST_PORT);
+ if (ret)
+ netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
+
+ /* 6. Disable interrupts */
+ tn40_write_reg(priv, TN40_REG_RDINTCM0, 0);
+ tn40_write_reg(priv, TN40_REG_TDINTCM0, 0);
+ tn40_write_reg(priv, TN40_REG_IMR, 0);
+ tn40_read_reg(priv, TN40_REG_ISR);
+
+ /* 7. Reset queue */
+ tn40_write_reg(priv, TN40_REG_RST_QU, 1);
+ /* 8. Reset port */
+ tn40_write_reg(priv, TN40_REG_RST_PORT, 1);
+ /* 9. Zero all read and write pointers */
+ for (i = TN40_REG_TXD_WPTR_0; i <= TN40_REG_TXF_RPTR_3; i += 0x10)
+ tn40_write_reg(priv, i, 0);
+ /* 10. Unset port disable */
+ tn40_write_reg(priv, TN40_REG_DIS_PORT, 0);
+ /* 11. Unset queue disable */
+ tn40_write_reg(priv, TN40_REG_DIS_QU, 0);
+ /* 12. Unset queue reset */
+ tn40_write_reg(priv, TN40_REG_RST_QU, 0);
+ /* 13. Unset port reset */
+ tn40_write_reg(priv, TN40_REG_RST_PORT, 0);
+ /* 14. Enable Rx */
+ /* Skipped. will be done later */
+}
+
+static int tn40_start(struct tn40_priv *priv)
+{
+ int ret;
+
+ ret = tn40_create_tx_ring(priv);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to tx init %d\n", ret);
+ return ret;
+ }
+
+ ret = tn40_create_rx_ring(priv);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to rx init %d\n", ret);
+ goto err_tx_ring;
+ }
+
+ tn40_rx_alloc_buffers(priv);
+ if (tn40_rxdb_available(priv->rxdb0) != 1) {
+ ret = -ENOMEM;
+ netdev_err(priv->ndev, "failed to allocate rx buffers\n");
+ goto err_rx_ring;
+ }
+
+ ret = request_irq(priv->pdev->irq, &tn40_isr_napi, IRQF_SHARED,
+ priv->ndev->name, priv->ndev);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to request irq %d\n", ret);
+ goto err_rx_ring;
+ }
+
+ tn40_hw_start(priv);
+ return 0;
+err_rx_ring:
+ tn40_destroy_rx_ring(priv);
+err_tx_ring:
+ tn40_destroy_tx_ring(priv);
+ return ret;
+}
+
+static void tn40_stop(struct tn40_priv *priv)
+{
+ tn40_disable_interrupts(priv);
+ free_irq(priv->pdev->irq, priv->ndev);
+ tn40_sw_reset(priv);
+ tn40_destroy_tx_ring(priv);
+ tn40_destroy_rx_ring(priv);
+}
+
+static int tn40_close(struct net_device *ndev)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
+
+ napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
+ tn40_stop(priv);
+ return 0;
+}
+
+static int tn40_open(struct net_device *dev)
+{
+ struct tn40_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = phylink_connect_phy(priv->phylink, priv->phydev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to phy %d\n", ret);
+ return ret;
+ }
+ tn40_sw_reset(priv);
+ ret = tn40_start(priv);
+ if (ret) {
+ phylink_disconnect_phy(priv->phylink);
+ netdev_err(dev, "failed to start %d\n", ret);
+ return ret;
+ }
+ napi_enable(&priv->napi);
+ phylink_start(priv->phylink);
+ netif_start_queue(priv->ndev);
+ return 0;
+}
+
+static void __tn40_vlan_rx_vid(struct net_device *ndev, uint16_t vid,
+ int enable)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ u32 reg, bit, val;
+
+ netdev_dbg(priv->ndev, "vid =%d value =%d\n", (int)vid, enable);
+ reg = TN40_REG_VLAN_0 + (vid / 32) * 4;
+ bit = 1 << vid % 32;
+ val = tn40_read_reg(priv, reg);
+ netdev_dbg(priv->ndev, "reg =%x, val =%x, bit =%d\n", reg, val, bit);
+ if (enable)
+ val |= bit;
+ else
+ val &= ~bit;
+ netdev_dbg(priv->ndev, "new val %x\n", val);
+ tn40_write_reg(priv, reg, val);
+}
+
+static int tn40_vlan_rx_add_vid(struct net_device *ndev,
+ __always_unused __be16 proto, u16 vid)
+{
+ __tn40_vlan_rx_vid(ndev, vid, 1);
+ return 0;
+}
+
+static int tn40_vlan_rx_kill_vid(struct net_device *ndev,
+ __always_unused __be16 proto, u16 vid)
+{
+ __tn40_vlan_rx_vid(ndev, vid, 0);
+ return 0;
+}
+
+static void tn40_setmulti(struct net_device *ndev)
+{
+ u32 rxf_val = TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB |
+ TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC;
+ struct tn40_priv *priv = netdev_priv(ndev);
+ int i;
+
+ /* IMF - imperfect (hash) rx multicast filter */
+ /* PMF - perfect rx multicast filter */
+
+ /* FIXME: RXE(OFF) */
+ if (ndev->flags & IFF_PROMISC) {
+ rxf_val |= TN40_GMAC_RX_FILTER_PRM;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ /* set IMF to accept all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++)
+ tn40_write_reg(priv,
+ TN40_REG_RX_MCST_HASH0 + i * 4, ~0);
+ } else if (netdev_mc_count(ndev)) {
+ struct netdev_hw_addr *mclist;
+ u32 reg, val;
+ u8 hash;
+
+ /* Set IMF to deny all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++)
+ tn40_write_reg(priv,
+ TN40_REG_RX_MCST_HASH0 + i * 4, 0);
+
+ /* Set PMF to deny all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_NUM; i++) {
+ tn40_write_reg(priv,
+ TN40_REG_RX_MAC_MCST0 + i * 8, 0);
+ tn40_write_reg(priv,
+ TN40_REG_RX_MAC_MCST1 + i * 8, 0);
+ }
+ /* Use PMF to accept first MAC_MCST_NUM (15) addresses */
+
+ /* TBD: Sort the addresses and write them in ascending
+ * order into RX_MAC_MCST regs. we skip this phase now
+ * and accept ALL multicast frames through IMF. Accept
+ * the rest of addresses throw IMF.
+ */
+ netdev_for_each_mc_addr(mclist, ndev) {
+ hash = 0;
+ for (i = 0; i < ETH_ALEN; i++)
+ hash ^= mclist->addr[i];
+
+ reg = TN40_REG_RX_MCST_HASH0 + ((hash >> 5) << 2);
+ val = tn40_read_reg(priv, reg);
+ val |= (1 << (hash % 32));
+ tn40_write_reg(priv, reg, val);
+ }
+ } else {
+ rxf_val |= TN40_GMAC_RX_FILTER_AB;
+ }
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, rxf_val);
+ /* Enable RX */
+ /* FIXME: RXE(ON) */
+}
+
+static int tn40_set_mac(struct net_device *ndev, void *p)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+ eth_hw_addr_set(ndev, addr->sa_data);
+ tn40_restore_mac(ndev, priv);
+ return 0;
+}
+
+static void tn40_mac_init(struct tn40_priv *priv)
+{
+ u8 addr[ETH_ALEN];
+ u64 val;
+
+ val = (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC0_A);
+ val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC1_A) << 16;
+ val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC2_A) << 32;
+
+ u64_to_ether_addr(val, addr);
+ eth_hw_addr_set(priv->ndev, addr);
+}
+
+static void tn40_get_stats(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+ stats->tx_packets = priv->stats.tx_packets;
+ stats->tx_bytes = priv->stats.tx_bytes;
+ stats->tx_dropped = priv->stats.tx_dropped;
+
+ stats->rx_packets = priv->stats.rx_packets;
+ stats->rx_bytes = priv->stats.rx_bytes;
+ stats->rx_dropped = priv->stats.rx_dropped;
+ stats->rx_errors = priv->stats.rx_errors;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static const struct net_device_ops tn40_netdev_ops = {
+ .ndo_open = tn40_open,
+ .ndo_stop = tn40_close,
+ .ndo_start_xmit = tn40_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = tn40_setmulti,
+ .ndo_get_stats64 = tn40_get_stats,
+ .ndo_set_mac_address = tn40_set_mac,
+ .ndo_vlan_rx_add_vid = tn40_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = tn40_vlan_rx_kill_vid,
+};
+
+static int tn40_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static const struct ethtool_ops tn40_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = tn40_ethtool_get_link_ksettings,
+};
+
+static void tn40_get_queue_stats_rx(struct net_device *ndev, int idx,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ stats->packets = priv->stats.rx_packets;
+ stats->bytes = priv->stats.rx_bytes;
+ stats->alloc_fail = priv->alloc_fail;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static void tn40_get_queue_stats_tx(struct net_device *ndev, int idx,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ stats->packets = priv->stats.tx_packets;
+ stats->bytes = priv->stats.tx_bytes;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static void tn40_get_base_stats(struct net_device *ndev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ tx->packets = 0;
+ tx->bytes = 0;
+}
+
+static const struct netdev_stat_ops tn40_stat_ops = {
+ .get_queue_stats_rx = tn40_get_queue_stats_rx,
+ .get_queue_stats_tx = tn40_get_queue_stats_tx,
+ .get_base_stats = tn40_get_base_stats,
+};
+
+static int tn40_priv_init(struct tn40_priv *priv)
+{
+ int ret;
+
+ tn40_set_link_speed(priv, 0);
+
+ /* Set GPIO[9:0] to output 0 */
+ tn40_write_reg(priv, 0x51E0, 0x30010006); /* GPIO_OE_ WR CMD */
+ tn40_write_reg(priv, 0x51F0, 0x0); /* GPIO_OE_ DATA */
+ tn40_write_reg(priv, TN40_REG_MDIO_CMD_STAT, 0x3ec8);
+
+ /* we use tx descriptors to load a firmware. */
+ ret = tn40_create_tx_ring(priv);
+ if (ret)
+ return ret;
+ ret = tn40_fw_load(priv);
+ tn40_destroy_tx_ring(priv);
+ return ret;
+}
+
+static struct net_device *tn40_netdev_alloc(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(struct tn40_priv));
+ if (!ndev)
+ return NULL;
+ ndev->netdev_ops = &tn40_netdev_ops;
+ ndev->ethtool_ops = &tn40_ethtool_ops;
+ ndev->stat_ops = &tn40_stat_ops;
+ ndev->tx_queue_len = TN40_NDEV_TXQ_LEN;
+ ndev->mem_start = pci_resource_start(pdev, 0);
+ ndev->mem_end = pci_resource_end(pdev, 0);
+ ndev->min_mtu = ETH_ZLEN;
+ ndev->max_mtu = TN40_MAX_MTU;
+
+ ndev->features = NETIF_F_IP_CSUM |
+ NETIF_F_SG |
+ NETIF_F_FRAGLIST |
+ NETIF_F_TSO | NETIF_F_GRO |
+ NETIF_F_RXCSUM |
+ NETIF_F_RXHASH |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->vlan_features = NETIF_F_IP_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO | NETIF_F_RXHASH;
+
+ if (dma_get_mask(&pdev->dev) == DMA_BIT_MASK(64)) {
+ ndev->features |= NETIF_F_HIGHDMA;
+ ndev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+ ndev->hw_features |= ndev->features;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ netif_stop_queue(ndev);
+ return ndev;
+}
+
+static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *ndev;
+ struct tn40_priv *priv;
+ unsigned int nvec = 1;
+ void __iomem *regs;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set DMA mask.\n");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, TN40_DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request PCI regions.\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ regs = pci_iomap(pdev, 0, TN40_REGS_SIZE);
+ if (!regs) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "failed to map PCI bar.\n");
+ goto err_free_regions;
+ }
+
+ ndev = tn40_netdev_alloc(pdev);
+ if (!ndev) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "failed to allocate netdev.\n");
+ goto err_iounmap;
+ }
+
+ priv = netdev_priv(ndev);
+ pci_set_drvdata(pdev, priv);
+ netif_napi_add(ndev, &priv->napi, tn40_poll);
+
+ priv->regs = regs;
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ /* Initialize fifo sizes. */
+ priv->txd_size = 3;
+ priv->txf_size = 3;
+ priv->rxd_size = 3;
+ priv->rxf_size = 3;
+ /* Initialize the initial coalescing registers. */
+ priv->rdintcm = TN40_INT_REG_VAL(0x20, 1, 4, 12);
+ priv->tdintcm = TN40_INT_REG_VAL(0x20, 1, 0, 12);
+
+ ret = tn40_hw_reset(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset HW.\n");
+ goto err_unset_drvdata;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate irq.\n");
+ goto err_unset_drvdata;
+ }
+
+ ret = tn40_mdiobus_init(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize mdio bus.\n");
+ goto err_free_irq;
+ }
+
+ priv->stats_flag =
+ ((tn40_read_reg(priv, TN40_FPGA_VER) & 0xFFF) != 308);
+ u64_stats_init(&priv->syncp);
+
+ priv->isr_mask = TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_PSE |
+ TN40_IR_TMR0 | TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 |
+ TN40_IR_TMR1;
+
+ tn40_mac_init(priv);
+ ret = tn40_phy_register(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set up PHY.\n");
+ goto err_cleanup_swnodes;
+ }
+
+ ret = tn40_priv_init(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize tn40_priv.\n");
+ goto err_unregister_phydev;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register netdev.\n");
+ goto err_unregister_phydev;
+ }
+ return 0;
+err_unregister_phydev:
+ tn40_phy_unregister(priv);
+err_cleanup_swnodes:
+ tn40_swnodes_cleanup(priv);
+err_free_irq:
+ pci_free_irq_vectors(pdev);
+err_unset_drvdata:
+ pci_set_drvdata(pdev, NULL);
+err_iounmap:
+ iounmap(regs);
+err_free_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void tn40_remove(struct pci_dev *pdev)
+{
+ struct tn40_priv *priv = pci_get_drvdata(pdev);
+ struct net_device *ndev = priv->ndev;
+
+ unregister_netdev(ndev);
+
+ tn40_phy_unregister(priv);
+ tn40_swnodes_cleanup(priv);
+ pci_free_irq_vectors(priv->pdev);
+ pci_set_drvdata(pdev, NULL);
+ iounmap(priv->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id tn40_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_DLINK, 0x4d00) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_ASUSTEK, 0x8709) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_EDIMAX, 0x8102) },
+ { }
+};
+
+static struct pci_driver tn40_driver = {
+ .name = TN40_DRV_NAME,
+ .id_table = tn40_id_table,
+ .probe = tn40_probe,
+ .remove = tn40_remove,
+};
+
+module_pci_driver(tn40_driver);
+
+MODULE_DEVICE_TABLE(pci, tn40_id_table);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(TN40_FIRMWARE_NAME);
+MODULE_DESCRIPTION("Tehuti Network TN40xx Driver");
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
new file mode 100644
index 000000000000..25da8686d469
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#ifndef _TN40_H_
+#define _TN40_H_
+
+#include <linux/property.h>
+#include "tn40_regs.h"
+
+#define TN40_DRV_NAME "tn40xx"
+
+#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025
+
+#define TN40_MDIO_SPEED_1MHZ (1)
+#define TN40_MDIO_SPEED_6MHZ (6)
+
+/* netdev tx queue len for Luxor. The default value is 1000.
+ * ifconfig eth1 txqueuelen 3000 - to change it at runtime.
+ */
+#define TN40_NDEV_TXQ_LEN 1000
+
+#define TN40_FIFO_SIZE 4096
+#define TN40_FIFO_EXTRA_SPACE 1024
+
+#define TN40_TXF_DESC_SZ 16
+#define TN40_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16)
+#define TN40_MIN_TX_LEVEL 256
+#define TN40_NO_UPD_PACKETS 40
+#define TN40_MAX_MTU BIT(14)
+
+#define TN40_PCK_TH_MULT 128
+#define TN40_INT_COAL_MULT 2
+
+#define TN40_INT_REG_VAL(coal, coal_rc, rxf_th, pck_th) ( \
+ FIELD_PREP(GENMASK(14, 0), (coal)) | \
+ FIELD_PREP(BIT(15), (coal_rc)) | \
+ FIELD_PREP(GENMASK(19, 16), (rxf_th)) | \
+ FIELD_PREP(GENMASK(31, 20), (pck_th)) \
+ )
+
+struct tn40_fifo {
+ dma_addr_t da; /* Physical address of fifo (used by HW) */
+ char *va; /* Virtual address of fifo (used by SW) */
+ u32 rptr, wptr;
+ /* Cached values of RPTR and WPTR registers,
+ * they're 32 bits on both 32 and 64 archs.
+ */
+ u16 reg_cfg0;
+ u16 reg_cfg1;
+ u16 reg_rptr;
+ u16 reg_wptr;
+ u16 memsz; /* Memory size allocated for fifo */
+ u16 size_mask;
+ u16 pktsz; /* Skb packet size to allocate */
+ u16 rcvno; /* Number of buffers that come from this RXF */
+};
+
+struct tn40_txf_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_txd_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rxf_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rxd_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rx_map {
+ struct page *page;
+};
+
+struct tn40_rxdb {
+ unsigned int *stack;
+ struct tn40_rx_map *elems;
+ unsigned int nelem;
+ unsigned int top;
+};
+
+union tn40_tx_dma_addr {
+ dma_addr_t dma;
+ struct sk_buff *skb;
+};
+
+/* Entry in the db.
+ * if len == 0 addr is dma
+ * if len != 0 addr is skb
+ */
+struct tn40_tx_map {
+ union tn40_tx_dma_addr addr;
+ int len;
+};
+
+/* tx database - implemented as circular fifo buffer */
+struct tn40_txdb {
+ struct tn40_tx_map *start; /* Points to the first element */
+ struct tn40_tx_map *end; /* Points just AFTER the last element */
+ struct tn40_tx_map *rptr; /* Points to the next element to read */
+ struct tn40_tx_map *wptr; /* Points to the next element to write */
+ int size; /* Number of elements in the db */
+};
+
+#define NODE_PROP(_NAME, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .properties = _PROP, \
+ })
+
+#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .parent = _PAR, \
+ .properties = _PROP, \
+ })
+
+enum tn40_swnodes {
+ SWNODE_MDIO,
+ SWNODE_PHY,
+ SWNODE_MAX
+};
+
+struct tn40_nodes {
+ char phy_name[32];
+ char mdio_name[32];
+ struct property_entry phy_props[3];
+ struct software_node swnodes[SWNODE_MAX];
+ const struct software_node *group[SWNODE_MAX + 1];
+};
+
+struct tn40_priv {
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+
+ struct tn40_nodes nodes;
+
+ struct napi_struct napi;
+ /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
+ struct tn40_rxd_fifo rxd_fifo0;
+ struct tn40_rxf_fifo rxf_fifo0;
+ struct tn40_rxdb *rxdb0; /* Rx dbs to store skb pointers */
+ struct page_pool *page_pool;
+
+ /* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */
+ struct tn40_txd_fifo txd_fifo0;
+ struct tn40_txf_fifo txf_fifo0;
+ struct tn40_txdb txdb;
+ int tx_level;
+ int tx_update_mark;
+ int tx_noupd;
+
+ int stats_flag;
+ struct rtnl_link_stats64 stats;
+ u64 alloc_fail;
+ struct u64_stats_sync syncp;
+
+ u8 txd_size;
+ u8 txf_size;
+ u8 rxd_size;
+ u8 rxf_size;
+ u32 rdintcm;
+ u32 tdintcm;
+
+ u32 isr_mask;
+
+ void __iomem *regs;
+
+ /* SHORT_PKT_FIX */
+ u32 b0_len;
+ dma_addr_t b0_dma; /* Physical address of buffer */
+ char *b0_va; /* Virtual address of buffer */
+
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+};
+
+/* RX FREE descriptor - 64bit */
+struct tn40_rxf_desc {
+ __le32 info; /* Buffer Count + Info - described below */
+ __le32 va_lo; /* VAdr[31:0] */
+ __le32 va_hi; /* VAdr[63:32] */
+ __le32 pa_lo; /* PAdr[31:0] */
+ __le32 pa_hi; /* PAdr[63:32] */
+ __le32 len; /* Buffer Length */
+};
+
+#define TN40_GET_RXD_BC(x) FIELD_GET(GENMASK(4, 0), (x))
+#define TN40_GET_RXD_ERR(x) FIELD_GET(GENMASK(26, 21), (x))
+#define TN40_GET_RXD_PKT_ID(x) FIELD_GET(GENMASK(30, 28), (x))
+#define TN40_GET_RXD_VTAG(x) FIELD_GET(BIT(31), (x))
+#define TN40_GET_RXD_VLAN_TCI(x) FIELD_GET(GENMASK(15, 0), (x))
+
+struct tn40_rxd_desc {
+ __le32 rxd_val1;
+ __le16 len;
+ __le16 rxd_vlan;
+ __le32 va_lo;
+ __le32 va_hi;
+ __le32 rss_lo;
+ __le32 rss_hash;
+};
+
+#define TN40_MAX_PBL (19)
+/* PBL describes each virtual buffer to be transmitted from the host. */
+struct tn40_pbl {
+ __le32 pa_lo;
+ __le32 pa_hi;
+ __le32 len;
+};
+
+/* First word for TXD descriptor. It means: type = 3 for regular Tx packet,
+ * hw_csum = 7 for IP+UDP+TCP HW checksums.
+ */
+#define TN40_TXD_W1_VAL(bc, checksum, vtag, lgsnd, vlan_id) ( \
+ GENMASK(17, 16) | \
+ FIELD_PREP(GENMASK(4, 0), (bc)) | \
+ FIELD_PREP(GENMASK(7, 5), (checksum)) | \
+ FIELD_PREP(BIT(8), (vtag)) | \
+ FIELD_PREP(GENMASK(12, 9), (lgsnd)) | \
+ FIELD_PREP(GENMASK(15, 13), \
+ FIELD_GET(GENMASK(15, 13), (vlan_id))) | \
+ FIELD_PREP(GENMASK(31, 20), \
+ FIELD_GET(GENMASK(11, 0), (vlan_id))) \
+ )
+
+struct tn40_txd_desc {
+ __le32 txd_val1;
+ __le16 mss;
+ __le16 length;
+ __le32 va_lo;
+ __le32 va_hi;
+ struct tn40_pbl pbl[]; /* Fragments */
+};
+
+struct tn40_txf_desc {
+ u32 status;
+ u32 va_lo; /* VAdr[31:0] */
+ u32 va_hi; /* VAdr[63:32] */
+ u32 pad;
+};
+
+static inline u32 tn40_read_reg(struct tn40_priv *priv, u32 reg)
+{
+ return readl(priv->regs + reg);
+}
+
+static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->regs + reg);
+}
+
+int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+
+void tn40_swnodes_cleanup(struct tn40_priv *priv);
+int tn40_mdiobus_init(struct tn40_priv *priv);
+
+int tn40_phy_register(struct tn40_priv *priv);
+void tn40_phy_unregister(struct tn40_priv *priv);
+
+#endif /* _TN40XX_H */
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
new file mode 100644
index 000000000000..fb1a4a2e4dbc
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+
+#include "tn40.h"
+
+#define TN40_MDIO_DEVAD_MASK GENMASK(4, 0)
+#define TN40_MDIO_PRTAD_MASK GENMASK(9, 5)
+#define TN40_MDIO_CMD_VAL(device, port) \
+ (FIELD_PREP(TN40_MDIO_DEVAD_MASK, (device)) | \
+ (FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
+#define TN40_MDIO_CMD_READ BIT(15)
+
+#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld"
+
+static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
+{
+ void __iomem *regs = priv->regs;
+ int mdio_cfg;
+
+ if (speed == TN40_MDIO_SPEED_1MHZ)
+ mdio_cfg = (0x7d << 7) | 0x08; /* 1MHz */
+ else
+ mdio_cfg = 0xA08; /* 6MHz */
+ mdio_cfg |= (1 << 6);
+ writel(mdio_cfg, regs + TN40_REG_MDIO_CMD_STAT);
+ msleep(100);
+}
+
+static u32 tn40_mdio_stat(struct tn40_priv *priv)
+{
+ void __iomem *regs = priv->regs;
+
+ return readl(regs + TN40_REG_MDIO_CMD_STAT);
+}
+
+static int tn40_mdio_wait_nobusy(struct tn40_priv *priv, u32 *val)
+{
+ u32 stat;
+ int ret;
+
+ ret = readx_poll_timeout_atomic(tn40_mdio_stat, priv, stat,
+ TN40_GET_MDIO_BUSY(stat) == 0, 10,
+ 10000);
+ if (val)
+ *val = stat;
+ return ret;
+}
+
+static int tn40_mdio_read(struct tn40_priv *priv, int port, int device,
+ u16 regnum)
+{
+ void __iomem *regs = priv->regs;
+ u32 i;
+
+ /* wait until MDIO is not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ i = TN40_MDIO_CMD_VAL(device, port);
+ writel(i, regs + TN40_REG_MDIO_CMD);
+ writel((u32)regnum, regs + TN40_REG_MDIO_ADDR);
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ writel(TN40_MDIO_CMD_READ | i, regs + TN40_REG_MDIO_CMD);
+ /* read CMD_STAT until not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ return lower_16_bits(readl(regs + TN40_REG_MDIO_DATA));
+}
+
+static int tn40_mdio_write(struct tn40_priv *priv, int port, int device,
+ u16 regnum, u16 data)
+{
+ void __iomem *regs = priv->regs;
+ u32 tmp_reg = 0;
+ int ret;
+
+ /* wait until MDIO is not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+ writel(TN40_MDIO_CMD_VAL(device, port), regs + TN40_REG_MDIO_CMD);
+ writel((u32)regnum, regs + TN40_REG_MDIO_ADDR);
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+ writel((u32)data, regs + TN40_REG_MDIO_DATA);
+ /* read CMD_STAT until not busy */
+ ret = tn40_mdio_wait_nobusy(priv, &tmp_reg);
+ if (ret)
+ return -EIO;
+
+ if (TN40_GET_MDIO_RD_ERR(tmp_reg)) {
+ dev_err(&priv->pdev->dev, "MDIO error after write command\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int tn40_mdio_read_c45(struct mii_bus *mii_bus, int addr, int devnum,
+ int regnum)
+{
+ return tn40_mdio_read(mii_bus->priv, addr, devnum, regnum);
+}
+
+static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
+}
+
+/* registers an mdio node and an aqr105 PHY at address 1
+ * tn40_mdio-%id {
+ * ethernet-phy@1 {
+ * compatible = "ethernet-phy-id03a1.b4a3";
+ * reg = <1>;
+ * firmware-name = AQR105_FIRMWARE;
+ * };
+ * };
+ */
+static int tn40_swnodes_register(struct tn40_priv *priv)
+{
+ struct tn40_nodes *nodes = &priv->nodes;
+ struct pci_dev *pdev = priv->pdev;
+ struct software_node *swnodes;
+ u32 id;
+
+ id = pci_dev_id(pdev);
+
+ snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1");
+ snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x",
+ id);
+
+ swnodes = nodes->swnodes;
+
+ swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL);
+
+ nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible",
+ "ethernet-phy-id03a1.b4a3");
+ nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1);
+ nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name",
+ AQR105_FIRMWARE);
+ swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name,
+ &swnodes[SWNODE_MDIO],
+ nodes->phy_props);
+
+ nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY];
+ nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO];
+ return software_node_register_node_group(nodes->group);
+}
+
+void tn40_swnodes_cleanup(struct tn40_priv *priv)
+{
+ /* cleanup of swnodes is only needed for AQR105-based cards */
+ if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ fwnode_handle_put(dev_fwnode(&priv->mdio->dev));
+ device_remove_software_node(&priv->mdio->dev);
+ software_node_unregister_node_group(priv->nodes.group);
+ }
+}
+
+int tn40_mdiobus_init(struct tn40_priv *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = TN40_DRV_NAME;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "tn40xx-%x-%x",
+ pci_domain_nr(pdev->bus), pci_dev_id(pdev));
+ bus->priv = priv;
+
+ bus->read_c45 = tn40_mdio_read_c45;
+ bus->write_c45 = tn40_mdio_write_c45;
+ priv->mdio = bus;
+
+ /* provide swnodes for AQR105-based cards only */
+ if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ ret = tn40_swnodes_register(priv);
+ if (ret) {
+ pr_err("swnodes failed\n");
+ return ret;
+ }
+
+ ret = device_add_software_node(&bus->dev,
+ priv->nodes.group[SWNODE_MDIO]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "device_add_software_node failed: %d\n", ret);
+ goto err_swnodes_unregister;
+ }
+ }
+
+ tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
+ ret = devm_mdiobus_register(&pdev->dev, bus);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
+ ret, bus->state, MDIOBUS_UNREGISTERED);
+ goto err_swnodes_cleanup;
+ }
+ return 0;
+
+err_swnodes_unregister:
+ software_node_unregister_node_group(priv->nodes.group);
+ return ret;
+err_swnodes_cleanup:
+ tn40_swnodes_cleanup(priv);
+ return ret;
+}
+
+MODULE_FIRMWARE(AQR105_FIRMWARE);
diff --git a/drivers/net/ethernet/tehuti/tn40_phy.c b/drivers/net/ethernet/tehuti/tn40_phy.c
new file mode 100644
index 000000000000..39eef7ca7958
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_phy.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+
+#include "tn40.h"
+
+static struct tn40_priv *tn40_config_to_priv(struct phylink_config *config)
+{
+ return container_of(config, struct tn40_priv, phylink_config);
+}
+
+static void tn40_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct tn40_priv *priv = tn40_config_to_priv(config);
+
+ tn40_set_link_speed(priv, speed);
+ netif_wake_queue(priv->ndev);
+}
+
+static void tn40_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct tn40_priv *priv = tn40_config_to_priv(config);
+
+ netif_stop_queue(priv->ndev);
+ tn40_set_link_speed(priv, 0);
+}
+
+static void tn40_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static const struct phylink_mac_ops tn40_mac_ops = {
+ .mac_config = tn40_mac_config,
+ .mac_link_up = tn40_link_up,
+ .mac_link_down = tn40_link_down,
+};
+
+int tn40_phy_register(struct tn40_priv *priv)
+{
+ struct phylink_config *config;
+ struct phy_device *phydev;
+ struct phylink *phylink;
+
+ phydev = phy_find_first(priv->mdio);
+ if (!phydev) {
+ dev_err(&priv->pdev->dev, "PHY isn't found\n");
+ return -ENODEV;
+ }
+
+ config = &priv->phylink_config;
+ config->dev = &priv->ndev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_10000FD;
+ __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces);
+
+ phylink = phylink_create(config, NULL, PHY_INTERFACE_MODE_XAUI,
+ &tn40_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ priv->phydev = phydev;
+ priv->phylink = phylink;
+ return 0;
+}
+
+void tn40_phy_unregister(struct tn40_priv *priv)
+{
+ phylink_destroy(priv->phylink);
+}
diff --git a/drivers/net/ethernet/tehuti/tn40_regs.h b/drivers/net/ethernet/tehuti/tn40_regs.h
new file mode 100644
index 000000000000..95171aa57a9e
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_regs.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#ifndef _TN40_REGS_H_
+#define _TN40_REGS_H_
+
+/* Register region size */
+#define TN40_REGS_SIZE 0x10000
+
+/* Registers from 0x0000-0x00fc were remapped to 0x4000-0x40fc */
+#define TN40_REG_TXD_CFG1_0 0x4000
+#define TN40_REG_TXD_CFG1_1 0x4004
+#define TN40_REG_TXD_CFG1_2 0x4008
+#define TN40_REG_TXD_CFG1_3 0x400C
+
+#define TN40_REG_RXF_CFG1_0 0x4010
+#define TN40_REG_RXF_CFG1_1 0x4014
+#define TN40_REG_RXF_CFG1_2 0x4018
+#define TN40_REG_RXF_CFG1_3 0x401C
+
+#define TN40_REG_RXD_CFG1_0 0x4020
+#define TN40_REG_RXD_CFG1_1 0x4024
+#define TN40_REG_RXD_CFG1_2 0x4028
+#define TN40_REG_RXD_CFG1_3 0x402C
+
+#define TN40_REG_TXF_CFG1_0 0x4030
+#define TN40_REG_TXF_CFG1_1 0x4034
+#define TN40_REG_TXF_CFG1_2 0x4038
+#define TN40_REG_TXF_CFG1_3 0x403C
+
+#define TN40_REG_TXD_CFG0_0 0x4040
+#define TN40_REG_TXD_CFG0_1 0x4044
+#define TN40_REG_TXD_CFG0_2 0x4048
+#define TN40_REG_TXD_CFG0_3 0x404C
+
+#define TN40_REG_RXF_CFG0_0 0x4050
+#define TN40_REG_RXF_CFG0_1 0x4054
+#define TN40_REG_RXF_CFG0_2 0x4058
+#define TN40_REG_RXF_CFG0_3 0x405C
+
+#define TN40_REG_RXD_CFG0_0 0x4060
+#define TN40_REG_RXD_CFG0_1 0x4064
+#define TN40_REG_RXD_CFG0_2 0x4068
+#define TN40_REG_RXD_CFG0_3 0x406C
+
+#define TN40_REG_TXF_CFG0_0 0x4070
+#define TN40_REG_TXF_CFG0_1 0x4074
+#define TN40_REG_TXF_CFG0_2 0x4078
+#define TN40_REG_TXF_CFG0_3 0x407C
+
+#define TN40_REG_TXD_WPTR_0 0x4080
+#define TN40_REG_TXD_WPTR_1 0x4084
+#define TN40_REG_TXD_WPTR_2 0x4088
+#define TN40_REG_TXD_WPTR_3 0x408C
+
+#define TN40_REG_RXF_WPTR_0 0x4090
+#define TN40_REG_RXF_WPTR_1 0x4094
+#define TN40_REG_RXF_WPTR_2 0x4098
+#define TN40_REG_RXF_WPTR_3 0x409C
+
+#define TN40_REG_RXD_WPTR_0 0x40A0
+#define TN40_REG_RXD_WPTR_1 0x40A4
+#define TN40_REG_RXD_WPTR_2 0x40A8
+#define TN40_REG_RXD_WPTR_3 0x40AC
+
+#define TN40_REG_TXF_WPTR_0 0x40B0
+#define TN40_REG_TXF_WPTR_1 0x40B4
+#define TN40_REG_TXF_WPTR_2 0x40B8
+#define TN40_REG_TXF_WPTR_3 0x40BC
+
+#define TN40_REG_TXD_RPTR_0 0x40C0
+#define TN40_REG_TXD_RPTR_1 0x40C4
+#define TN40_REG_TXD_RPTR_2 0x40C8
+#define TN40_REG_TXD_RPTR_3 0x40CC
+
+#define TN40_REG_RXF_RPTR_0 0x40D0
+#define TN40_REG_RXF_RPTR_1 0x40D4
+#define TN40_REG_RXF_RPTR_2 0x40D8
+#define TN40_REG_RXF_RPTR_3 0x40DC
+
+#define TN40_REG_RXD_RPTR_0 0x40E0
+#define TN40_REG_RXD_RPTR_1 0x40E4
+#define TN40_REG_RXD_RPTR_2 0x40E8
+#define TN40_REG_RXD_RPTR_3 0x40EC
+
+#define TN40_REG_TXF_RPTR_0 0x40F0
+#define TN40_REG_TXF_RPTR_1 0x40F4
+#define TN40_REG_TXF_RPTR_2 0x40F8
+#define TN40_REG_TXF_RPTR_3 0x40FC
+
+/* Hardware versioning */
+#define TN40_FPGA_VER 0x5030
+
+/* Registers from 0x0100-0x0150 were remapped to 0x5100-0x5150 */
+#define TN40_REG_ISR TN40_REG_ISR0
+#define TN40_REG_ISR0 0x5100
+
+#define TN40_REG_IMR TN40_REG_IMR0
+#define TN40_REG_IMR0 0x5110
+
+#define TN40_REG_RDINTCM0 0x5120
+#define TN40_REG_RDINTCM2 0x5128
+
+#define TN40_REG_TDINTCM0 0x5130
+
+#define TN40_REG_ISR_MSK0 0x5140
+
+#define TN40_REG_INIT_SEMAPHORE 0x5170
+#define TN40_REG_INIT_STATUS 0x5180
+
+#define TN40_REG_MAC_LNK_STAT 0x0200
+#define TN40_MAC_LINK_STAT 0x0004 /* Link state */
+
+#define TN40_REG_BLNK_LED 0x0210
+
+#define TN40_REG_GMAC_RXF_A 0x1240
+
+#define TN40_REG_UNC_MAC0_A 0x1250
+#define TN40_REG_UNC_MAC1_A 0x1260
+#define TN40_REG_UNC_MAC2_A 0x1270
+
+#define TN40_REG_VLAN_0 0x1800
+
+#define TN40_REG_MAX_FRAME_A 0x12C0
+
+#define TN40_REG_RX_MAC_MCST0 0x1A80
+#define TN40_REG_RX_MAC_MCST1 0x1A84
+#define TN40_MAC_MCST_NUM 15
+#define TN40_REG_RX_MCST_HASH0 0x1A00
+#define TN40_MAC_MCST_HASH_NUM 8
+
+#define TN40_REG_VPC 0x2300
+#define TN40_REG_VIC 0x2320
+#define TN40_REG_VGLB 0x2340
+
+#define TN40_REG_CLKPLL 0x5000
+
+/* MDIO interface */
+
+#define TN40_REG_MDIO_CMD_STAT 0x6030
+#define TN40_REG_MDIO_CMD 0x6034
+#define TN40_REG_MDIO_DATA 0x6038
+#define TN40_REG_MDIO_ADDR 0x603C
+#define TN40_GET_MDIO_BUSY(x) FIELD_GET(GENMASK(0, 0), (x))
+#define TN40_GET_MDIO_RD_ERR(x) FIELD_GET(GENMASK(1, 1), (x))
+
+#define TN40_REG_REVISION 0x6000
+#define TN40_REG_SCRATCH 0x6004
+#define TN40_REG_CTRLST 0x6008
+#define TN40_REG_MAC_ADDR_0 0x600C
+#define TN40_REG_MAC_ADDR_1 0x6010
+#define TN40_REG_FRM_LENGTH 0x6014
+#define TN40_REG_PAUSE_QUANT 0x6054
+#define TN40_REG_RX_FIFO_SECTION 0x601C
+#define TN40_REG_TX_FIFO_SECTION 0x6020
+#define TN40_REG_RX_FULLNESS 0x6024
+#define TN40_REG_TX_FULLNESS 0x6028
+#define TN40_REG_HASHTABLE 0x602C
+
+#define TN40_REG_RST_PORT 0x7000
+#define TN40_REG_DIS_PORT 0x7010
+#define TN40_REG_RST_QU 0x7020
+#define TN40_REG_DIS_QU 0x7030
+
+#define TN40_REG_CTRLST_TX_ENA 0x0001
+#define TN40_REG_CTRLST_RX_ENA 0x0002
+#define TN40_REG_CTRLST_PRM_ENA 0x0010
+#define TN40_REG_CTRLST_PAD_ENA 0x0020
+
+#define TN40_REG_CTRLST_BASE (TN40_REG_CTRLST_PAD_ENA | REG_CTRLST_PRM_ENA)
+
+/* TXD TXF RXF RXD CONFIG 0x0000 --- 0x007c */
+#define TN40_TX_RX_CFG1_BASE 0xffffffff /*0-31 */
+#define TN40_TX_RX_CFG0_BASE 0xfffff000 /*31:12 */
+#define TN40_TX_RX_CFG0_RSVD 0x00000ffc /*11:2 */
+#define TN40_TX_RX_CFG0_SIZE 0x00000003 /*1:0 */
+
+/* TXD TXF RXF RXD WRITE 0x0080 --- 0x00BC */
+#define TN40_TXF_WPTR_WR_PTR 0x00007ff8 /*14:3 */
+
+/* TXD TXF RXF RXD READ 0x00CO --- 0x00FC */
+#define TN40_TXF_RPTR_RD_PTR 0x00007ff8 /*14:3 */
+
+/* The last 4 bits are dropped size is rounded to 16 */
+#define TN40_TXF_WPTR_MASK 0x7ff0
+
+/* regISR 0x0100 */
+/* regIMR 0x0110 */
+#define TN40_IMR_INPROG 0x80000000 /*31 */
+#define TN40_IR_LNKCHG1 0x10000000 /*28 */
+#define TN40_IR_LNKCHG0 0x08000000 /*27 */
+#define TN40_IR_GPIO 0x04000000 /*26 */
+#define TN40_IR_RFRSH 0x02000000 /*25 */
+#define TN40_IR_RSVD 0x01000000 /*24 */
+#define TN40_IR_SWI 0x00800000 /*23 */
+#define TN40_IR_RX_FREE_3 0x00400000 /*22 */
+#define TN40_IR_RX_FREE_2 0x00200000 /*21 */
+#define TN40_IR_RX_FREE_1 0x00100000 /*20 */
+#define TN40_IR_RX_FREE_0 0x00080000 /*19 */
+#define TN40_IR_TX_FREE_3 0x00040000 /*18 */
+#define TN40_IR_TX_FREE_2 0x00020000 /*17 */
+#define TN40_IR_TX_FREE_1 0x00010000 /*16 */
+#define TN40_IR_TX_FREE_0 0x00008000 /*15 */
+#define TN40_IR_RX_DESC_3 0x00004000 /*14 */
+#define TN40_IR_RX_DESC_2 0x00002000 /*13 */
+#define TN40_IR_RX_DESC_1 0x00001000 /*12 */
+#define TN40_IR_RX_DESC_0 0x00000800 /*11 */
+#define TN40_IR_PSE 0x00000400 /*10 */
+#define TN40_IR_TMR3 0x00000200 /* 9 */
+#define TN40_IR_TMR2 0x00000100 /* 8 */
+#define TN40_IR_TMR1 0x00000080 /* 7 */
+#define TN40_IR_TMR0 0x00000040 /* 6 */
+#define TN40_IR_VNT 0x00000020 /* 5 */
+#define TN40_IR_RxFL 0x00000010 /* 4 */
+#define TN40_IR_SDPERR 0x00000008 /* 3 */
+#define TN40_IR_TR 0x00000004 /* 2 */
+#define TN40_IR_PCIE_LINK 0x00000002 /* 1 */
+#define TN40_IR_PCIE_TOUT 0x00000001 /* 0 */
+
+#define TN40_IR_EXTRA \
+ (TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 |\
+ TN40_IR_PSE | TN40_IR_TMR0 | TN40_IR_PCIE_LINK | \
+ TN40_IR_PCIE_TOUT)
+
+#define TN40_GMAC_RX_FILTER_OSEN 0x1000 /* shared OS enable */
+#define TN40_GMAC_RX_FILTER_TXFC 0x0400 /* Tx flow control */
+#define TN40_GMAC_RX_FILTER_RSV0 0x0200 /* reserved */
+#define TN40_GMAC_RX_FILTER_FDA 0x0100 /* filter out direct address */
+#define TN40_GMAC_RX_FILTER_AOF 0x0080 /* accept over run */
+#define TN40_GMAC_RX_FILTER_ACF 0x0040 /* accept control frames */
+#define TN40_GMAC_RX_FILTER_ARUNT 0x0020 /* accept under run */
+#define TN40_GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */
+#define TN40_GMAC_RX_FILTER_AM 0x0008 /* accept multicast */
+#define TN40_GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */
+#define TN40_GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscuous mode */
+
+#define TN40_MAX_FRAME_AB_VAL 0x3fff /* 13:0 */
+
+#define TN40_CLKPLL_PLLLKD 0x0200 /* 9 */
+#define TN40_CLKPLL_RSTEND 0x0100 /* 8 */
+#define TN40_CLKPLL_SFTRST 0x0001 /* 0 */
+
+#define TN40_CLKPLL_LKD (TN40_CLKPLL_PLLLKD | TN40_CLKPLL_RSTEND)
+
+#endif
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 1530d13984d4..a54d71155263 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select PAGE_POOL
select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
@@ -167,7 +168,7 @@ config TI_KEYSTONE_NETCP_ETHSS
config TLAN
tristate "TI ThunderLAN support"
- depends on (PCI || EISA)
+ depends on (PCI || EISA) && HAS_IOPORT
help
If you have a PCI Ethernet network card based on the ThunderLAN chip
which is supported by this driver, say Y here.
@@ -188,6 +189,7 @@ config TI_ICSSG_PRUETH
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
depends on PTP_1588_CLOCK_OPTIONAL
help
@@ -198,6 +200,23 @@ config TI_ICSSG_PRUETH
to support the Ethernet operation. Currently, it supports Ethernet
with 1G and 100M link speed.
+config TI_ICSSG_PRUETH_SR1
+ tristate "TI Gigabit PRU SR1.0 Ethernet driver"
+ select PHYLIB
+ select TI_ICSS_IEP
+ select TI_K3_CPPI_DESC_POOL
+ select PAGE_POOL
+ depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ help
+ Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
+ This subsystem is available on the AM65 SR1.0 platform.
+
+ This driver requires firmware binaries which will run on the PRUs
+ to support the Ethernet operation. Currently, it supports Ethernet
+ with 1G, 100M and 10M link speed.
+
config TI_ICSS_IEP
tristate "TI PRU ICSS IEP driver"
depends on PTP_1588_CLOCK_OPTIONAL
@@ -210,4 +229,16 @@ config TI_ICSS_IEP
To compile this driver as a module, choose M here. The module
will be called icss_iep.
+config TI_PRUETH
+ tristate "TI PRU Ethernet EMAC driver"
+ depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
+ select TI_ICSS_IEP
+ imply PTP_1588_CLOCK
+ help
+ Some TI SoCs has Programmable Realtime Unit (PRU) cores which can
+ support Single or Dual Ethernet ports with the help of firmware code
+ running on PRU cores. This driver supports remoteproc based
+ communication to PRU firmware to expose Ethernet interface to Linux.
+
endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index d8590304f3df..93c0a4d0e33a 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -3,6 +3,9 @@
# Makefile for the TI network device drivers.
#
+obj-$(CONFIG_TI_PRUETH) += icssm-prueth.o
+icssm-prueth-y := icssm/icssm_prueth.o
+
obj-$(CONFIG_TI_CPSW) += cpsw-common.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
@@ -31,12 +34,18 @@ ti-am65-cpsw-nuss-$(CONFIG_TI_AM65_CPSW_QOS) += am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
-obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
-icssg-prueth-y := icssg/icssg_prueth.o \
- icssg/icssg_classifier.o \
- icssg/icssg_queues.o \
- icssg/icssg_config.o \
- icssg/icssg_mii_cfg.o \
- icssg/icssg_stats.o \
- icssg/icssg_ethtool.o
+obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o icssg.o
+icssg-prueth-y := icssg/icssg_prueth.o icssg/icssg_switchdev.o
+
+obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o icssg.o
+icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o
+
+icssg-y := icssg/icssg_common.o \
+ icssg/icssg_classifier.o \
+ icssg/icssg_queues.o \
+ icssg/icssg_config.o \
+ icssg/icssg_mii_cfg.o \
+ icssg/icssg_stats.o \
+ icssg/icssg_ethtool.o
+
obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index d6ce2c9f0a8d..c57497074ae6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -427,9 +427,9 @@ static void am65_cpsw_get_channels(struct net_device *ndev,
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- ch->max_rx = AM65_CPSW_MAX_RX_QUEUES;
- ch->max_tx = AM65_CPSW_MAX_TX_QUEUES;
- ch->rx_count = AM65_CPSW_MAX_RX_QUEUES;
+ ch->max_rx = AM65_CPSW_MAX_QUEUES;
+ ch->max_tx = AM65_CPSW_MAX_QUEUES;
+ ch->rx_count = common->rx_ch_num_flows;
ch->tx_count = common->tx_ch_num;
}
@@ -447,9 +447,8 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
if (common->usage_count)
return -EBUSY;
- am65_cpsw_nuss_remove_tx_chns(common);
-
- return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
+ return am65_cpsw_nuss_update_tx_rx_chns(common, chs->tx_count,
+ chs->rx_count);
}
static void
@@ -692,9 +691,23 @@ static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev,
};
static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ unsigned int ptp_filter;
+
+ ptp_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
return ethtool_op_get_ts_info(ndev, info);
@@ -703,12 +716,10 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = am65_cpts_phc_index(common->cpts);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_filter;
return 0;
}
@@ -904,80 +915,64 @@ static void am65_cpsw_get_mm_stats(struct net_device *ndev,
s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
}
-static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal,
- struct netlink_ext_ack *extack)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_tx_chn *tx_chn;
-
- tx_chn = &common->tx_chns[0];
-
- coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
- coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
-
- return 0;
-}
-
static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
- if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
-
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+ rx_flow = &common->rx_chns.flows[queue];
+ coal->rx_coalesce_usecs = rx_flow->rx_pace_timeout / 1000;
+
return 0;
}
-static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_tx_chn *tx_chn;
-
- tx_chn = &common->tx_chns[0];
-
- if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
- return -EINVAL;
-
- if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
- return -EINVAL;
-
- common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
- tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
-
- return 0;
+ return am65_cpsw_get_per_queue_coalesce(ndev, 0, coal);
}
static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
- if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
-
- if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
- dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
- queue);
- coal->tx_coalesce_usecs = 20;
- }
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ return -EINVAL;
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+ rx_flow = &common->rx_chns.flows[queue];
+ if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
+ return -EINVAL;
+
+ rx_flow->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
+
return 0;
}
+static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return am65_cpsw_set_per_queue_coalesce(ndev, 0, coal);
+}
+
const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.begin = am65_cpsw_ethtool_op_begin,
.complete = am65_cpsw_ethtool_op_complete,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 1d00e21808c1..5924db6be3fe 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -5,6 +5,7 @@
*
*/
+#include <linux/bpf_trace.h>
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
@@ -30,6 +31,8 @@
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
+#include <net/page_pool/helpers.h>
+#include <net/dsa.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -69,6 +72,8 @@
#define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
#define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
+#define AM65_CPSW_PORTN_REG_CTL 0x004
+#define AM65_CPSW_PORTN_REG_DSCP_MAP 0x120
#define AM65_CPSW_PORTN_REG_SA_L 0x308
#define AM65_CPSW_PORTN_REG_SA_H 0x30c
#define AM65_CPSW_PORTN_REG_TS_CTL 0x310
@@ -92,6 +97,10 @@
/* AM65_CPSW_PORT_REG_PRI_CTL */
#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
+/* AM65_CPSW_PN_REG_CTL */
+#define AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN BIT(1)
+#define AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN BIT(2)
+
/* AM65_CPSW_PN_TS_CTL register fields */
#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
@@ -101,6 +110,12 @@
#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0)
+#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1)
+#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2)
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3)
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9)
+
/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
@@ -124,8 +139,13 @@
AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
+#define AM65_CPSW_TS_RX_ANX_ALL_EN \
+ (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \
+ AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \
+ AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
+
#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
-/* Number of TX/RX descriptors */
+/* Number of TX/RX descriptors per channel/flow */
#define AM65_CPSW_MAX_TX_DESC 500
#define AM65_CPSW_MAX_RX_DESC 500
@@ -137,6 +157,21 @@
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define AM65_CPSW_DEFAULT_TX_CHNS 8
+#define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1
+
+/* CPPI streaming packet interface */
+#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
+#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
+
+/* XDP */
+#define AM65_CPSW_XDP_TX BIT(2)
+#define AM65_CPSW_XDP_CONSUMED BIT(1)
+#define AM65_CPSW_XDP_REDIRECT BIT(0)
+#define AM65_CPSW_XDP_PASS 0
+
+/* Include headroom compatible with both skb and xdpf */
+#define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
const u8 *dev_addr)
@@ -149,6 +184,99 @@ static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
}
+#define AM65_CPSW_DSCP_MAX GENMASK(5, 0)
+#define AM65_CPSW_PRI_MAX GENMASK(2, 0)
+#define AM65_CPSW_DSCP_PRI_PER_REG 8
+#define AM65_CPSW_DSCP_PRI_SIZE 4 /* in bits */
+static int am65_cpsw_port_set_dscp_map(struct am65_cpsw_port *slave, u8 dscp, u8 pri)
+{
+ int reg_ofs;
+ int bit_ofs;
+ u32 val;
+
+ if (dscp > AM65_CPSW_DSCP_MAX)
+ return -EINVAL;
+
+ if (pri > AM65_CPSW_PRI_MAX)
+ return -EINVAL;
+
+ /* 32-bit register offset to this dscp */
+ reg_ofs = (dscp / AM65_CPSW_DSCP_PRI_PER_REG) * 4;
+ /* bit field offset to this dscp */
+ bit_ofs = AM65_CPSW_DSCP_PRI_SIZE * (dscp % AM65_CPSW_DSCP_PRI_PER_REG);
+
+ val = readl(slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs);
+ val &= ~(AM65_CPSW_PRI_MAX << bit_ofs); /* clear */
+ val |= pri << bit_ofs; /* set */
+ writel(val, slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs);
+
+ return 0;
+}
+
+static void am65_cpsw_port_enable_dscp_map(struct am65_cpsw_port *slave)
+{
+ int dscp, pri;
+ u32 val;
+
+ /* Default DSCP to User Priority mapping as per:
+ * https://datatracker.ietf.org/doc/html/rfc8325#section-4.3
+ * and
+ * https://datatracker.ietf.org/doc/html/rfc8622#section-11
+ */
+ for (dscp = 0; dscp <= AM65_CPSW_DSCP_MAX; dscp++) {
+ switch (dscp) {
+ case 56: /* CS7 */
+ case 48: /* CS6 */
+ pri = 7;
+ break;
+ case 46: /* EF */
+ case 44: /* VA */
+ pri = 6;
+ break;
+ case 40: /* CS5 */
+ pri = 5;
+ break;
+ case 34: /* AF41 */
+ case 36: /* AF42 */
+ case 38: /* AF43 */
+ case 32: /* CS4 */
+ case 26: /* AF31 */
+ case 28: /* AF32 */
+ case 30: /* AF33 */
+ case 24: /* CS3 */
+ pri = 4;
+ break;
+ case 18: /* AF21 */
+ case 20: /* AF22 */
+ case 22: /* AF23 */
+ pri = 3;
+ break;
+ case 16: /* CS2 */
+ case 10: /* AF11 */
+ case 12: /* AF12 */
+ case 14: /* AF13 */
+ case 0: /* DF */
+ pri = 0;
+ break;
+ case 8: /* CS1 */
+ case 1: /* LE */
+ pri = 1;
+ break;
+ default:
+ pri = 0;
+ break;
+ }
+
+ am65_cpsw_port_set_dscp_map(slave, dscp, pri);
+ }
+
+ /* enable port IPV4 and IPV6 DSCP for this port */
+ val = readl(slave->port_base + AM65_CPSW_PORTN_REG_CTL);
+ val |= AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN |
+ AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN;
+ writel(val, slave->port_base + AM65_CPSW_PORTN_REG_CTL);
+}
+
static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
{
cpsw_sl_reset(port->slave.mac_sl, 100);
@@ -299,21 +427,20 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
if (netif_tx_queue_stopped(netif_txq)) {
/* try recover if stopped by us */
- txq_trans_update(netif_txq);
+ txq_trans_update(ndev, netif_txq);
netif_tx_wake_queue(netif_txq);
}
}
static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
- struct sk_buff *skb)
+ struct page *page, u32 flow_idx)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
- u32 pkt_len = skb_tailroom(skb);
+ struct am65_cpsw_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
- void *swdata;
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
@@ -322,22 +449,26 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
- DMA_FROM_DEVICE);
+ buf_dma = dma_map_single(rx_chn->dma_dev,
+ page_address(page) + AM65_CPSW_HEADROOM,
+ AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- dev_err(dev, "Failed to map rx skb buffer\n");
+ dev_err(dev, "Failed to map rx buffer\n");
return -EINVAL;
}
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
+ buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *((void **)swdata) = skb;
+ swdata->page = page;
+ swdata->flow_id = flow_idx;
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
+ desc_rx, desc_dma);
}
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
@@ -368,26 +499,296 @@ static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
+ struct page *page,
+ bool allow_direct);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
+
+static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_rx_flow *flow;
+ struct xdp_rxq_info *rxq;
+ int port;
+
+ flow = &rx_chn->flows[id];
+ napi_disable(&flow->napi_rx);
+ hrtimer_cancel(&flow->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
+ am65_cpsw_nuss_rx_cleanup);
+
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
+
+ rxq = &common->ports[port].xdp_rxq[id];
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
+
+ if (flow->page_pool) {
+ page_pool_destroy(flow->page_pool);
+ flow->page_pool = NULL;
+ }
+}
+
+static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ int id;
+
+ reinit_completion(&common->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
+ for (id = common->rx_ch_num_flows - 1; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+}
+
+static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP,
+ .order = 0,
+ .pool_size = AM65_CPSW_MAX_RX_DESC,
+ .nid = dev_to_node(common->dev),
+ .dev = common->dev,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ /* .napi set dynamically */
+ };
+ struct am65_cpsw_rx_flow *flow;
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ struct page *page;
+ int port, ret, i;
+
+ flow = &rx_chn->flows[id];
+ pp_params.napi = &flow->napi_rx;
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ return ret;
+ }
+
+ flow->page_pool = pool;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ /* FIXME should we BUG here? */
+ continue;
+
+ rxq = &common->ports[port].xdp_rxq[id];
+ ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
+ id, flow->napi_rx.napi_id);
+ if (ret)
+ goto err;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
+ page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (!page) {
+ dev_err(common->dev, "cannot allocate page in flow %d\n",
+ id);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, page, id);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit page to rx channel flow %d, error %d\n",
+ id, ret);
+ am65_cpsw_put_page(flow, page, false);
+ goto err;
+ }
+ }
+
+ napi_enable(&flow->napi_rx);
+ return 0;
+
+err:
+ am65_cpsw_destroy_rxq(common, id);
+ return ret;
+}
+
+static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
+
+ for (id = 0; id < common->rx_ch_num_flows; id++) {
+ ret = am65_cpsw_create_rxq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create rxq %d: %d\n",
+ id, ret);
+ goto err;
+ }
+ }
+
+ ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ return ret;
+}
+
+static void am65_cpsw_destroy_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
+
+ napi_disable(&tx_chn->napi_tx);
+ hrtimer_cancel(&tx_chn->tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(tx_chn->tx_chn, tx_chn,
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chn->tx_chn);
+}
+
+static void am65_cpsw_destroy_txqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
+ int id;
+
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, common->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
+
+ for (id = 0; id < common->tx_ch_num; id++)
+ k3_udma_glue_tdown_tx_chn(tx_chn[id].tx_chn, false);
+
+ id = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "tx teardown timeout\n");
+
+ for (id = common->tx_ch_num - 1; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+}
+
+static int am65_cpsw_create_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
+ int ret;
+
+ ret = k3_udma_glue_enable_tx_chn(tx_chn->tx_chn);
+ if (ret)
+ return ret;
+
+ napi_enable(&tx_chn->napi_tx);
+
+ return 0;
+}
+
+static int am65_cpsw_create_txqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
+
+ for (id = 0; id < common->tx_ch_num; id++) {
+ ret = am65_cpsw_create_txq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create txq %d: %d\n",
+ id, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool,
+ void *desc,
+ unsigned char dsize_log2)
+{
+ void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool);
+
+ return (desc - pool_addr) >> dsize_log2;
+}
+
+static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc,
+ enum am65_cpsw_tx_buf_type buf_type)
+{
+ int desc_idx;
+
+ desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc,
+ tx_chn->dsize_log2);
+ k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx,
+ (void *)buf_type);
+}
+
+static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
+{
+ struct cppi5_host_desc_t *desc_tx;
+ int desc_idx;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx,
+ tx_chn->dsize_log2);
+
+ return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool,
+ desc_idx);
+}
+
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
+ struct page *page,
+ bool allow_direct)
+{
+ page_pool_put_full_page(flow->page_pool, page, allow_direct);
+}
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
+ struct am65_cpsw_swdata *swdata;
dma_addr_t buf_dma;
+ struct page *page;
u32 buf_dma_len;
- void **swdata;
+ u32 flow_id;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page = swdata->page;
+ flow_id = swdata->flow_id;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
-
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
+ am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -428,24 +829,48 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
+ enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
struct cppi5_host_desc_t *desc_tx;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = swdata->skb;
+ dev_kfree_skb_any(skb);
+ } else {
+ xdpf = swdata->xdpf;
+ xdp_return_frame(xdpf);
+ }
+
am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+}
- dev_kfree_skb_any(skb);
+static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ struct net_device *ndev,
+ unsigned int len,
+ unsigned int headroom)
+{
+ struct sk_buff *skb;
+
+ skb = build_skb(page_addr, len);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, headroom);
+ skb->dev = ndev;
+
+ return skb;
}
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
- int port_idx, i, ret, tx;
- struct sk_buff *skb;
u32 val, port_mask;
+ int port_idx, ret;
if (common->usage_count)
return 0;
@@ -485,7 +910,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -505,128 +930,33 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
am65_cpsw_qos_tx_p0_rate_init(common);
- for (i = 0; i < common->rx_chns.descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL,
- AM65_CPSW_MAX_PACKET_SIZE,
- GFP_KERNEL);
- if (!skb) {
- ret = -ENOMEM;
- dev_err(common->dev, "cannot allocate skb\n");
- if (i)
- goto fail_rx;
-
- return ret;
- }
-
- ret = am65_cpsw_nuss_rx_push(common, skb);
- if (ret < 0) {
- dev_err(common->dev,
- "cannot submit skb to channel rx, error %d\n",
- ret);
- kfree_skb(skb);
- if (i)
- goto fail_rx;
-
- return ret;
- }
- }
-
- ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
- goto fail_rx;
- }
-
- for (tx = 0; tx < common->tx_ch_num; tx++) {
- ret = k3_udma_glue_enable_tx_chn(common->tx_chns[tx].tx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
- tx, ret);
- tx--;
- goto fail_tx;
- }
- napi_enable(&common->tx_chns[tx].napi_tx);
- }
+ ret = am65_cpsw_create_rxqs(common);
+ if (ret)
+ return ret;
- napi_enable(&common->napi_rx);
- if (common->rx_irq_disabled) {
- common->rx_irq_disabled = false;
- enable_irq(common->rx_chns.irq);
- }
+ ret = am65_cpsw_create_txqs(common);
+ if (ret)
+ goto cleanup_rx;
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
-fail_tx:
- while (tx >= 0) {
- napi_disable(&common->tx_chns[tx].napi_tx);
- k3_udma_glue_disable_tx_chn(common->tx_chns[tx].tx_chn);
- tx--;
- }
-
- k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+cleanup_rx:
+ am65_cpsw_destroy_rxqs(common);
-fail_rx:
- k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0,
- &common->rx_chns,
- am65_cpsw_nuss_rx_cleanup, 0);
return ret;
}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
- int i;
-
if (common->usage_count != 1)
return 0;
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
- /* shutdown tx channels */
- atomic_set(&common->tdown_cnt, common->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- reinit_completion(&common->tdown_complete);
-
- for (i = 0; i < common->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
-
- i = wait_for_completion_timeout(&common->tdown_complete,
- msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "tx timeout\n");
- for (i = 0; i < common->tx_ch_num; i++) {
- napi_disable(&common->tx_chns[i].napi_tx);
- hrtimer_cancel(&common->tx_chns[i].tx_hrtimer);
- }
-
- for (i = 0; i < common->tx_ch_num; i++) {
- k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
- &common->tx_chns[i],
- am65_cpsw_nuss_tx_cleanup);
- k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
- }
-
- reinit_completion(&common->tdown_complete);
- k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
-
- if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
- i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "rx teardown timeout\n");
- }
-
- napi_disable(&common->napi_rx);
- hrtimer_cancel(&common->rx_hrtimer);
-
- for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
- &common->rx_chns,
- am65_cpsw_nuss_rx_cleanup, !!i);
-
- k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
-
+ am65_cpsw_destroy_txqs(common);
+ am65_cpsw_destroy_rxqs(common);
cpsw_ale_stop(common->ale);
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
@@ -700,7 +1030,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
goto runtime_put;
}
- ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows);
if (ret) {
dev_err(common->dev, "cannot set real number of rx queues\n");
goto runtime_put;
@@ -719,7 +1049,17 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
common->usage_count++;
+ /* VLAN aware CPSW mode is incompatible with some DSA tagging schemes.
+ * Therefore disable VLAN_AWARE mode if any of the ports is a DSA Port.
+ */
+ if (netdev_uses_dsa(ndev)) {
+ reg = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ reg &= ~AM65_CPSW_CTL_VLAN_AWARE;
+ writel(reg, common->cpsw_base + AM65_CPSW_REG_CTL);
+ }
+
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
+ am65_cpsw_port_enable_dscp_map(port);
if (common->is_emac_mode)
am65_cpsw_init_port_emac_ale(port);
@@ -729,7 +1069,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
- ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
+ ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0);
if (ret)
goto error_cleanup;
@@ -749,16 +1089,147 @@ runtime_put:
return ret;
}
-static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
+ struct am65_cpsw_tx_chn *tx_chn,
+ struct xdp_frame *xdpf,
+ enum am65_cpsw_tx_buf_type buf_type)
{
- struct skb_shared_hwtstamps *ssh;
- u64 ns;
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct cppi5_host_desc_t *host_desc;
+ struct am65_cpsw_tx_swdata *swdata;
+ struct netdev_queue *netif_txq;
+ dma_addr_t dma_desc, dma_buf;
+ u32 pkt_len = xdpf->len;
+ int ret;
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc)) {
+ ndev->stats.tx_dropped++;
+ return AM65_CPSW_XDP_CONSUMED; /* drop */
+ }
+
+ am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
+
+ dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data,
+ pkt_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
+ ndev->stats.tx_dropped++;
+ ret = AM65_CPSW_XDP_CONSUMED; /* drop */
+ goto pool_free;
+ }
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id);
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ swdata->ndev = ndev;
+ swdata->xdpf = xdpf;
+
+ /* Report BQL before sending the packet */
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc);
+ if (AM65_CPSW_IS_CPSW2G(common)) {
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
+ dma_desc);
+ } else {
+ spin_lock_bh(&tx_chn->lock);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
+ dma_desc);
+ spin_unlock_bh(&tx_chn->lock);
+ }
+ if (ret) {
+ /* Inform BQL */
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ ndev->stats.tx_errors++;
+ ret = AM65_CPSW_XDP_CONSUMED; /* drop */
+ goto dma_unmap;
+ }
+
+ return 0;
+
+dma_unmap:
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf);
+ dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE);
+pool_free:
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ return ret;
+}
+
+static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
+ struct am65_cpsw_port *port,
+ struct xdp_buff *xdp, int *len)
+{
+ struct am65_cpsw_common *common = flow->common;
+ struct net_device *ndev = port->ndev;
+ int ret = AM65_CPSW_XDP_CONSUMED;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ int pkt_len;
+ u32 act;
+ int err;
- ns = ((u64)psdata[1] << 32) | psdata[0];
+ pkt_len = *len;
+ prog = READ_ONCE(port->xdp_prog);
+ if (!prog)
+ return AM65_CPSW_XDP_PASS;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
+ switch (act) {
+ case XDP_PASS:
+ return AM65_CPSW_XDP_PASS;
+ case XDP_TX:
+ tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ __netif_tx_lock(netif_txq, cpu);
+ err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
+ AM65_CPSW_TX_BUF_TYPE_XDP_TX);
+ __netif_tx_unlock(netif_txq);
+ if (err)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return AM65_CPSW_XDP_TX;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return AM65_CPSW_XDP_REDIRECT;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+drop:
+ trace_xdp_exception(ndev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ }
- ssh = skb_hwtstamps(skb);
- memset(ssh, 0, sizeof(*ssh));
- ssh->hwtstamp = ns_to_ktime(ns);
+ return ret;
}
/* RX psdata[2] word format - checksum information */
@@ -794,23 +1265,28 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
}
-static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
- u32 flow_idx)
+static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ int *xdp_state)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
+ struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv;
- struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
- struct sk_buff *skb, *new_skb;
+ struct am65_cpsw_swdata *swdata;
+ struct page *page, *new_page;
dma_addr_t desc_dma, buf_dma;
struct am65_cpsw_port *port;
struct net_device *ndev;
- void **swdata;
+ u32 flow_idx = flow->id;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ int headroom, ret;
+ void *page_addr;
u32 *psdata;
- int ret = 0;
+ *xdp_state = AM65_CPSW_XDP_PASS;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
if (ret) {
if (ret != -ENODATA)
@@ -830,7 +1306,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
__func__, flow_idx, &desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page = swdata->page;
+ page_addr = page_address(page);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -838,49 +1315,68 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
port = am65_common_get_port(common, port_id);
ndev = port->ndev;
- skb->dev = ndev;
-
psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* add RX timestamp */
- if (port->rx_ts_enabled)
- am65_cpsw_nuss_rx_ts(skb, psdata);
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
-
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
- if (new_skb) {
- ndev_priv = netdev_priv(ndev);
- am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, ndev);
- am65_cpsw_nuss_rx_csum(skb, csum_info);
- napi_gro_receive(&common->napi_rx, skb);
-
- stats = this_cpu_ptr(ndev_priv->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
- u64_stats_update_end(&stats->syncp);
- kmemleak_not_leak(new_skb);
+ if (port->xdp_prog) {
+ xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
+ xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
+ pkt_len, false);
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len);
+ if (*xdp_state == AM65_CPSW_XDP_CONSUMED) {
+ page = virt_to_head_page(xdp.data);
+ am65_cpsw_put_page(flow, page, true);
+ goto allocate;
+ }
+
+ if (*xdp_state != AM65_CPSW_XDP_PASS)
+ goto allocate;
+
+ headroom = xdp.data - xdp.data_hard_start;
} else {
- ndev->stats.rx_dropped++;
- new_skb = skb;
+ headroom = AM65_CPSW_HEADROOM;
+ }
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ PAGE_SIZE, headroom);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
+ }
+
+ ndev_priv = netdev_priv(ndev);
+ am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
+ skb_put(skb, pkt_len);
+ if (port->rx_ts_enabled)
+ am65_cpts_rx_timestamp(common->cpts, skb);
+ skb_mark_for_recycle(skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+ am65_cpsw_nuss_rx_csum(skb, csum_info);
+ napi_gro_receive(&flow->napi_rx, skb);
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+
+allocate:
+ new_page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (unlikely(!new_page)) {
+ dev_err(dev, "page alloc failed\n");
+ return -ENOMEM;
}
if (netif_dormant(ndev)) {
- dev_kfree_skb_any(new_skb);
+ am65_cpsw_put_page(flow, new_page, true);
ndev->stats.rx_dropped++;
return 0;
}
- ret = am65_cpsw_nuss_rx_push(common, new_skb);
+requeue:
+ ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ am65_cpsw_put_page(flow, new_page, true);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -890,46 +1386,47 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
{
- struct am65_cpsw_common *common =
- container_of(timer, struct am65_cpsw_common, rx_hrtimer);
+ struct am65_cpsw_rx_flow *flow = container_of(timer,
+ struct am65_cpsw_rx_flow,
+ rx_hrtimer);
- enable_irq(common->rx_chns.irq);
+ enable_irq(flow->irq);
return HRTIMER_NORESTART;
}
static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
- int flow = AM65_CPSW_MAX_RX_FLOWS;
+ struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
+ struct am65_cpsw_common *common = flow->common;
+ int xdp_state_or = 0;
int cur_budget, ret;
+ int xdp_state;
int num_rx = 0;
- /* process every flow */
- while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(common, flow);
- if (ret)
- break;
- num_rx++;
- }
-
- if (num_rx >= budget)
+ /* process only this flow */
+ cur_budget = budget;
+ while (cur_budget--) {
+ ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
break;
+ num_rx++;
}
+ if (xdp_state_or & AM65_CPSW_XDP_REDIRECT)
+ xdp_do_flush();
+
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
- if (common->rx_irq_disabled) {
- common->rx_irq_disabled = false;
- if (unlikely(common->rx_pace_timeout)) {
- hrtimer_start(&common->rx_hrtimer,
- ns_to_ktime(common->rx_pace_timeout),
+ if (flow->irq_disabled) {
+ flow->irq_disabled = false;
+ if (unlikely(flow->rx_pace_timeout)) {
+ hrtimer_start(&flow->rx_hrtimer,
+ ns_to_ktime(flow->rx_pace_timeout),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(common->rx_chns.irq);
+ enable_irq(flow->irq);
}
}
}
@@ -937,37 +1434,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
return num_rx;
}
-static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
-{
- struct am65_cpsw_ndev_priv *ndev_priv;
- struct am65_cpsw_ndev_stats *stats;
- struct cppi5_host_desc_t *desc_tx;
- struct net_device *ndev;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
-
- am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
-
- ndev_priv = netdev_priv(ndev);
- stats = this_cpu_ptr(ndev_priv->stats);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
- return skb;
-}
-
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -988,11 +1454,17 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ bool single_port = AM65_CPSW_IS_CPSW2G(common);
+ enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
+ struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
+ struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1000,9 +1472,12 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
tx_chn = &common->tx_chns[chn];
while (true) {
- spin_lock(&tx_chn->lock);
+ if (!single_port)
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- spin_unlock(&tx_chn->lock);
+ if (!single_port)
+ spin_unlock(&tx_chn->lock);
+
if (res == -ENODATA)
break;
@@ -1013,16 +1488,43 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
- skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
- total_bytes = skb->len;
- ndev = skb->dev;
- napi_consume_skb(skb, budget);
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ ndev = swdata->ndev;
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = swdata->skb;
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+ pkt_len = skb->len;
+ napi_consume_skb(skb, budget);
+ } else {
+ xdpf = swdata->xdpf;
+ pkt_len = xdpf->len;
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
+ }
+
+ total_bytes += pkt_len;
num_tx++;
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ if (!single_port) {
+ /* as packets from multi ports can be interleaved
+ * on the same channel, we have to figure out the
+ * port/queue at every packet and report it/wake queue.
+ */
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+ }
+ if (single_port && num_tx) {
netif_txq = netdev_get_tx_queue(ndev, chn);
-
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
}
@@ -1031,54 +1533,6 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
return num_tx;
}
-static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
- int chn, unsigned int budget, bool *tdown)
-{
- struct device *dev = common->dev;
- struct am65_cpsw_tx_chn *tx_chn;
- struct netdev_queue *netif_txq;
- unsigned int total_bytes = 0;
- struct net_device *ndev;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
-
- tx_chn = &common->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&common->tdown_cnt))
- complete(&common->tdown_complete);
- *tdown = true;
- break;
- }
-
- skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
-
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
-
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
-
- dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
-
- return num_tx;
-}
-
static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
{
struct am65_cpsw_tx_chn *tx_chns =
@@ -1094,13 +1548,8 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
bool tdown = false;
int num_tx;
- if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
- num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
- budget, &tdown);
- else
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
- tx_chn->id, budget, &tdown);
-
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
if (num_tx >= budget)
return budget;
@@ -1119,11 +1568,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
{
- struct am65_cpsw_common *common = dev_id;
+ struct am65_cpsw_rx_flow *flow = dev_id;
- common->rx_irq_disabled = true;
+ flow->irq_disabled = true;
disable_irq_nosync(irq);
- napi_schedule(&common->napi_rx);
+ napi_schedule(&flow->napi_rx);
return IRQ_HANDLED;
}
@@ -1144,12 +1593,12 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_tx_swdata *swdata;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
dma_addr_t desc_dma, buf_dma;
int ret, q_idx, i;
- void **swdata;
u32 *psdata;
u32 pkt_len;
@@ -1183,16 +1632,20 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_stop_q;
}
+ am65_cpsw_nuss_set_buf_type(tx_chn, first_desc,
+ AM65_CPSW_TX_BUF_TYPE_SKB);
+
cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
- cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
- cppi5_hdesc_set_pkttype(first_desc, 0x7);
+ cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
+ cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *(swdata) = skb;
+ swdata->ndev = ndev;
+ swdata->skb = skb;
psdata = cppi5_hdesc_get_psdata(first_desc);
/* HW csum offload if enabled */
@@ -1225,6 +1678,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
+ am65_cpsw_nuss_set_buf_type(tx_chn, next_desc,
+ AM65_CPSW_TX_BUF_TYPE_SKB);
+
buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
@@ -1332,34 +1788,31 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
}
static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
- struct hwtstamp_config cfg;
- if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) {
+ NL_SET_ERR_MSG(extack, "Time stamping is not supported");
return -EOPNOTSUPP;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
+ }
/* TX HW timestamp */
- switch (cfg.tx_type) {
+ switch (cfg->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
+ NL_SET_ERR_MSG(extack, "TX mode is not supported");
return -ERANGE;
}
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->rx_ts_enabled = false;
break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
@@ -1372,15 +1825,20 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
port->rx_ts_enabled = true;
- cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ NL_SET_ERR_MSG(extack, "RX filter is not supported");
+ return -EOPNOTSUPP;
default:
+ NL_SET_ERR_MSG(extack, "RX filter is not supported");
return -ERANGE;
}
- port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
+ port->tx_ts_enabled = (cfg->tx_type == HWTSTAMP_TX_ON);
/* cfg TX timestamp */
seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
@@ -1405,6 +1863,10 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+ if (port->rx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN;
+
writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
writel(ts_vlan_ltype, port->port_base +
AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
@@ -1412,28 +1874,24 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
- /* en/dis RX timestamp */
- am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *cfg)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- struct hwtstamp_config cfg;
if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
return -EOPNOTSUPP;
- cfg.flags = 0;
- cfg.tx_type = port->tx_ts_enabled ?
+ cfg->flags = 0;
+ cfg->tx_type = port->tx_ts_enabled ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = port->rx_ts_enabled ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ cfg->rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT |
+ HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
@@ -1444,48 +1902,71 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return am65_cpsw_nuss_hwtstamp_set(ndev, req);
- case SIOCGHWTSTAMP:
- return am65_cpsw_nuss_hwtstamp_get(ndev, req);
- }
-
return phylink_mii_ioctl(port->slave.phylink, req, cmd);
}
static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
- unsigned int start;
- int cpu;
+ dev_fetch_sw_netstats(stats, dev->tstats);
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+}
+
+static int am65_cpsw_xdp_prog_setup(struct net_device *ndev,
+ struct bpf_prog *prog)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ bool running = netif_running(ndev);
+ struct bpf_prog *old_prog;
- for_each_possible_cpu(cpu) {
- struct am65_cpsw_ndev_stats *cpu_stats;
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ if (running)
+ am65_cpsw_nuss_ndo_slave_stop(ndev);
- cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
- do {
- start = u64_stats_fetch_begin(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
- tx_packets = cpu_stats->tx_packets;
- tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+ old_prog = xchg(&port->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
+ if (running)
+ return am65_cpsw_nuss_ndo_slave_open(ndev);
+
+ return 0;
+}
+
+static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return am65_cpsw_xdp_prog_setup(ndev, bpf->prog);
+ default:
+ return -EINVAL;
}
+}
- stats->rx_errors = dev->stats.rx_errors;
- stats->rx_dropped = dev->stats.rx_dropped;
- stats->tx_dropped = dev->stats.tx_dropped;
+static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ int i, nxmit = 0;
+
+ tx_chn = &common->tx_chns[cpu % common->tx_ch_num];
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+
+ __netif_tx_lock(netif_txq, cpu);
+ for (i = 0; i < n; i++) {
+ if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i],
+ AM65_CPSW_TX_BUF_TYPE_XDP_NDO))
+ break;
+ nxmit++;
+ }
+ __netif_tx_unlock(netif_txq);
+
+ return nxmit;
}
static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
@@ -1502,6 +1983,10 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
.ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate,
+ .ndo_bpf = am65_cpsw_ndo_bpf,
+ .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit,
+ .ndo_hwtstamp_get = am65_cpsw_nuss_hwtstamp_get,
+ .ndo_hwtstamp_set = am65_cpsw_nuss_hwtstamp_set,
};
static void am65_cpsw_disable_phy(struct phy *phy)
@@ -1708,44 +2193,38 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
}
}
-void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
int i;
- devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
-
common->tx_ch_rate_msk = 0;
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- if (tx_chn->irq)
+ if (tx_chn->irq > 0)
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
-
- if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
- k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
-
- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
- memset(tx_chn, 0, sizeof(*tx_chn));
}
+
+ am65_cpsw_nuss_free_tx_chns(common);
}
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
int i, ret = 0;
for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ tx_chn = &common->tx_chns[i];
+
+ hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -1758,7 +2237,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
}
}
+ return 0;
+
err:
+ netif_napi_del(&tx_chn->napi_tx);
+ for (--i; i >= 0; i--) {
+ tx_chn = &common->tx_chns[i];
+ devm_free_irq(dev, tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+
return ret;
}
@@ -1772,7 +2260,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
.mode = K3_RINGACC_RING_MODE_RING,
.flags = 0
};
- u32 hdesc_size;
+ u32 hdesc_size, hdesc_size_out;
int i, ret = 0;
hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
@@ -1816,6 +2304,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool);
+ tx_chn->dsize_log2 = __fls(hdesc_size_out);
+ WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2));
+
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq < 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
@@ -1835,12 +2327,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ return 0;
+
err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
- return i;
- }
+ am65_cpsw_nuss_free_tx_chns(common);
return ret;
}
@@ -1859,25 +2349,23 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
}
-static void am65_cpsw_nuss_remove_rx_chns(void *data)
+static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
{
- struct am65_cpsw_common *common = data;
- struct am65_cpsw_rx_chn *rx_chn;
struct device *dev = common->dev;
+ struct am65_cpsw_rx_chn *rx_chn;
+ struct am65_cpsw_rx_flow *flows;
+ int i;
rx_chn = &common->rx_chns;
- devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
-
- if (!(rx_chn->irq < 0))
- devm_free_irq(dev, rx_chn->irq, common);
-
- netif_napi_del(&common->napi_rx);
+ flows = rx_chn->flows;
- if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
- k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ if (!(flows[i].irq < 0))
+ devm_free_irq(dev, flows[i].irq, &flows[i]);
+ netif_napi_del(&flows[i].napi_rx);
+ }
- if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+ am65_cpsw_nuss_free_rx_chns(common);
common->rx_flow_id_base = -1;
}
@@ -1888,7 +2376,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
struct device *dev = common->dev;
- u32 hdesc_size;
+ struct am65_cpsw_rx_flow *flow;
+ u32 hdesc_size, hdesc_size_out;
u32 fdqring_id;
int i, ret = 0;
@@ -1896,12 +2385,17 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
AM65_CPSW_NAV_SW_DATA_SIZE);
rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
- rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
+ rx_cfg.flow_id_num = common->rx_ch_num_flows;
rx_cfg.flow_id_base = common->rx_flow_id_base;
/* init all flows */
rx_chn->dev = dev;
- rx_chn->descs_num = max_desc_num;
+ rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num;
+
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ flow = &rx_chn->flows[i];
+ flow->page_pool = NULL;
+ }
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
@@ -1920,6 +2414,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
goto err;
}
+ hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool);
+ rx_chn->dsize_log2 = __fls(hdesc_size_out);
+ WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
+
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@@ -1943,52 +2441,74 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
};
+ flow = &rx_chn->flows[i];
+ flow->id = i;
+ flow->common = common;
+ flow->irq = -EINVAL;
+
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ /* share same FDQ for all flows */
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num;
rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
i, &rx_flow_cfg);
if (ret) {
dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
- goto err;
+ goto err_flow;
}
if (!i)
fdqring_id =
k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
- rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
-
- if (rx_chn->irq <= 0) {
+ flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (flow->irq <= 0) {
dev_err(dev, "Failed to get rx dma irq %d\n",
- rx_chn->irq);
- ret = -ENXIO;
- goto err;
+ flow->irq);
+ ret = flow->irq;
+ goto err_flow;
+ }
+
+ snprintf(flow->name,
+ sizeof(flow->name), "%s-rx%d",
+ dev_name(dev), i);
+ hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
+
+ ret = devm_request_irq(dev, flow->irq,
+ am65_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH,
+ flow->name, flow);
+ if (ret) {
+ dev_err(dev, "failure requesting rx %d irq %u, %d\n",
+ i, flow->irq, ret);
+ flow->irq = -EINVAL;
+ goto err_request_irq;
}
}
- netif_napi_add(common->dma_ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll);
- hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+ /* setup classifier to route priorities to flows */
+ cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
- ret = devm_request_irq(dev, rx_chn->irq,
- am65_cpsw_nuss_rx_irq,
- IRQF_TRIGGER_HIGH, dev_name(dev), common);
- if (ret) {
- dev_err(dev, "failure requesting rx irq %u, %d\n",
- rx_chn->irq, ret);
- goto err;
+ return 0;
+
+err_request_irq:
+ netif_napi_del(&flow->napi_rx);
+
+err_flow:
+ for (--i; i >= 0; i--) {
+ flow = &rx_chn->flows[i];
+ devm_free_irq(dev, flow->irq, flow);
+ netif_napi_del(&flow->napi_rx);
}
err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
- return i;
- }
+ am65_cpsw_nuss_free_rx_chns(common);
return ret;
}
@@ -2009,20 +2529,15 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
{
u32 mac_lo, mac_hi, offset;
struct regmap *syscon;
- int ret;
- syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ syscon = syscon_regmap_lookup_by_phandle_args(of_node, "ti,syscon-efuse",
+ 1, &offset);
if (IS_ERR(syscon)) {
if (PTR_ERR(syscon) == -ENODEV)
return 0;
return PTR_ERR(syscon);
}
- ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
- &offset);
- if (ret)
- return ret;
-
regmap_read(syscon, offset, &mac_lo);
regmap_read(syscon, offset + 4, &mac_hi);
@@ -2084,6 +2599,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return -ENOENT;
for_each_child_of_node(node, port_np) {
+ phy_interface_t phy_if;
struct am65_cpsw_port *port;
u32 port_id;
@@ -2148,26 +2664,50 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- port->slave.phy_node = port_np;
- ret = of_get_phy_mode(port_np, &port->slave.phy_if);
+ port->slave.port_np = of_node_get(port_np);
+ ret = of_get_phy_mode(port_np, &phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret);
goto of_node_put;
}
- ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ /* CPSW controllers supported by this driver have a fixed
+ * internal TX delay in RGMII mode. Fix up PHY mode to account
+ * for this and warn about Device Trees that claim to have a TX
+ * delay on the PCB.
+ */
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_if = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_warn(dev,
+ "RGMII mode without internal TX delay unsupported; please fix your Device Tree\n");
+ break;
+ default:
+ break;
+ }
+
+ port->slave.phy_if = phy_if;
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, phy_if);
if (ret)
goto of_node_put;
ret = of_get_mac_address(port_np, port->slave.mac_addr);
- if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ goto of_node_put;
+ } else if (ret) {
am65_cpsw_am654_get_efuse_macid(port_np,
port->port_id,
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
eth_random_addr(port->slave.mac_addr);
- dev_err(dev, "Use random MAC address\n");
+ dev_info(dev, "Use random MAC address\n");
}
}
@@ -2190,22 +2730,26 @@ of_node_put:
return ret;
}
-static void am65_cpsw_pcpu_stats_free(void *data)
+static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
{
- struct am65_cpsw_ndev_stats __percpu *stats = data;
+ struct am65_cpsw_port *port;
+ int i;
- free_percpu(stats);
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->slave.phylink)
+ phylink_destroy(port->slave.phylink);
+ }
}
-static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
+static void am65_cpsw_remove_dt(struct am65_cpsw_common *common)
{
struct am65_cpsw_port *port;
int i;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
- if (port->slave.phylink)
- phylink_destroy(port->slave.phylink);
+ of_node_put(port->slave.port_np);
}
}
@@ -2216,7 +2760,6 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
struct device *dev = common->dev;
struct am65_cpsw_port *port;
struct phylink *phylink;
- int ret;
port = &common->ports[port_idx];
@@ -2224,10 +2767,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
return 0;
/* alloc netdev */
- port->ndev = devm_alloc_etherdev_mqs(common->dev,
- sizeof(struct am65_cpsw_ndev_priv),
- AM65_CPSW_MAX_TX_QUEUES,
- AM65_CPSW_MAX_RX_QUEUES);
+ port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv),
+ AM65_CPSW_MAX_QUEUES,
+ AM65_CPSW_MAX_QUEUES);
if (!port->ndev) {
dev_err(dev, "error allocating slave net_device %u\n",
port->port_id);
@@ -2240,6 +2782,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
mutex_init(&ndev_priv->mm_lock);
port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
+ device_set_node(&port->ndev->dev, of_fwnode_handle(port->slave.port_np));
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
@@ -2252,6 +2795,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
NETIF_F_HW_TC;
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
port->ndev->vlan_features |= NETIF_F_SG;
port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
@@ -2294,7 +2840,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
}
phylink = phylink_create(&port->slave.phylink_config,
- of_node_to_fwnode(port->slave.phy_node),
+ of_fwnode_handle(port->slave.port_np),
port->slave.phy_if,
&am65_cpsw_phylink_mac_ops);
if (IS_ERR(phylink))
@@ -2306,19 +2852,13 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
port->ndev->features &= ~NETIF_F_HW_CSUM;
- ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
- if (!ndev_priv->stats)
- return -ENOMEM;
-
- ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
- ndev_priv->stats);
- if (ret)
- dev_err(dev, "failed to add percpu stat free action %d\n", ret);
+ port->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ port->xdp_prog = NULL;
if (!common->dma_ndev)
common->dma_ndev = port->ndev;
- return ret;
+ return 0;
}
static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
@@ -2342,8 +2882,12 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
- if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED)
+ if (!port->ndev)
+ continue;
+ if (port->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(port->ndev);
+ free_netdev(port->ndev);
+ port->ndev = NULL;
}
}
@@ -2524,7 +3068,8 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
}
static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *common = dl_priv->common;
@@ -2588,7 +3133,8 @@ static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
}
static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *cpsw = dl_priv->common;
@@ -2805,7 +3351,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
if (ret)
- return ret;
+ goto err_remove_tx;
/* The DMA Channels are not guaranteed to be in a clean state.
* Reset and disable them to ensure that they are back to the
@@ -2817,15 +3363,16 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
}
- for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ for (i = 0; i < common->rx_ch_num_flows; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
+ rx_chan,
+ am65_cpsw_nuss_rx_cleanup);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
- return ret;
+ goto err_remove_rx;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -2856,16 +3403,31 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+err_remove_rx:
+ am65_cpsw_nuss_remove_rx_chns(common);
+err_remove_tx:
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
-int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
+int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ int num_tx, int num_rx)
{
int ret;
+ am65_cpsw_nuss_remove_tx_chns(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+
common->tx_ch_num = num_tx;
+ common->rx_ch_num_flows = num_rx;
ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+
+ ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -2908,7 +3470,8 @@ static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
.quirks = 0,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
};
static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
@@ -2922,7 +3485,8 @@ static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
.quirks = 0,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_USXGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
};
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
@@ -2958,10 +3522,14 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct device_node *node;
struct resource *res;
struct clk *clk;
- u64 id_temp;
- int ret, i;
int ale_entries;
+ __be64 id_temp;
+ int ret, i;
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
return -ENOMEM;
@@ -2994,7 +3562,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
- common->pf_p0_rx_ptype_rrobin = false;
+ common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
+ common->pf_p0_rx_ptype_rrobin = true;
common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,
@@ -3015,6 +3584,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_nuss_get_ver(common);
+
+ ret = am65_cpsw_nuss_init_host_p(common);
+ if (ret)
+ goto err_pm_clear;
+
+ ret = am65_cpsw_nuss_init_slave_ports(common);
+ if (ret)
+ goto err_pm_clear;
+
node = of_get_child_by_name(dev->of_node, "mdio");
if (!node) {
dev_warn(dev, "MDIO node not found\n");
@@ -3031,16 +3610,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
}
of_node_put(node);
- am65_cpsw_nuss_get_ver(common);
-
- ret = am65_cpsw_nuss_init_host_p(common);
- if (ret)
- goto err_of_clear;
-
- ret = am65_cpsw_nuss_init_slave_ports(common);
- if (ret)
- goto err_of_clear;
-
/* init common data */
ale_params.dev = dev;
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
@@ -3074,18 +3643,20 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
- goto err_free_phylink;
+ goto err_ndevs_clear;
ret = am65_cpsw_nuss_register_ndevs(common);
if (ret)
- goto err_free_phylink;
+ goto err_ndevs_clear;
pm_runtime_put(dev);
return 0;
-err_free_phylink:
+err_ndevs_clear:
+ am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
+ am65_cpsw_remove_dt(common);
err_of_clear:
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
@@ -3113,16 +3684,19 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
return;
}
- am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
/* must unregister ndevs here because DD release_driver routine calls
* dma_deconfigure(dev) before devres_release_all(dev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_unregister_devlink(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+ am65_cpsw_nuss_remove_tx_chns(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
+ am65_cpsw_remove_dt(common);
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
@@ -3172,21 +3746,25 @@ static int am65_cpsw_nuss_suspend(struct device *dev)
static int am65_cpsw_nuss_resume(struct device *dev)
{
struct am65_cpsw_common *common = dev_get_drvdata(dev);
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_port *port;
struct net_device *ndev;
int i, ret;
- struct am65_cpsw_host *host_p = am65_common_get_host(common);
ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
- if (ret)
+ if (ret) {
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
+ }
/* If RX IRQ was disabled before suspend, keep it disabled */
- if (common->rx_irq_disabled)
- disable_irq(common->rx_chns.irq);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ if (common->rx_chns.flows[i].irq_disabled)
+ disable_irq(common->rx_chns.flows[i].irq);
+ }
am65_cpts_resume(common->cpts);
@@ -3228,7 +3806,7 @@ static struct platform_driver am65_cpsw_nuss_driver = {
.pm = &am65_cpsw_nuss_dev_pm_ops,
},
.probe = am65_cpsw_nuss_probe,
- .remove_new = am65_cpsw_nuss_remove,
+ .remove = am65_cpsw_nuss_remove,
};
module_platform_driver(am65_cpsw_nuss_driver);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 7da0492dc091..917c37e4e89b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -14,22 +14,21 @@
#include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <net/devlink.h>
+#include <net/xdp.h>
#include "am65-cpsw-qos.h"
struct am65_cpts;
#define HOST_PORT_NUM 0
-#define AM65_CPSW_MAX_TX_QUEUES 8
-#define AM65_CPSW_MAX_RX_QUEUES 1
-#define AM65_CPSW_MAX_RX_FLOWS 1
+#define AM65_CPSW_MAX_QUEUES 8 /* both TX & RX */
#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
- struct device_node *phy_node;
+ struct device_node *port_np;
phy_interface_t phy_if;
struct phy *ifphy;
struct phy *serdes_phy;
@@ -56,10 +55,18 @@ struct am65_cpsw_port {
bool rx_ts_enabled;
struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
+ struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq[AM65_CPSW_MAX_QUEUES];
/* Only for suspend resume context */
u32 vid_context;
};
+enum am65_cpsw_tx_buf_type {
+ AM65_CPSW_TX_BUF_TYPE_SKB,
+ AM65_CPSW_TX_BUF_TYPE_XDP_TX,
+ AM65_CPSW_TX_BUF_TYPE_XDP_NDO,
+};
+
struct am65_cpsw_host {
struct am65_cpsw_common *common;
void __iomem *port_base;
@@ -80,17 +87,44 @@ struct am65_cpsw_tx_chn {
int irq;
u32 id;
u32 descs_num;
+ unsigned char dsize_log2;
char tx_chn_name[128];
u32 rate_mbps;
};
+struct am65_cpsw_rx_flow {
+ u32 id;
+ struct napi_struct napi_rx;
+ struct am65_cpsw_common *common;
+ int irq;
+ bool irq_disabled;
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout;
+ struct page_pool *page_pool;
+ char name[32];
+};
+
+struct am65_cpsw_tx_swdata {
+ struct net_device *ndev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+};
+
+struct am65_cpsw_swdata {
+ u32 flow_id;
+ struct page *page;
+};
+
struct am65_cpsw_rx_chn {
struct device *dev;
struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
u32 descs_num;
- int irq;
+ unsigned char dsize_log2;
+ struct am65_cpsw_rx_flow flows[AM65_CPSW_MAX_QUEUES];
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
@@ -132,16 +166,12 @@ struct am65_cpsw_common {
u32 tx_ch_rate_msk;
u32 rx_flow_id_base;
- struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES];
+ struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_QUEUES];
struct completion tdown_complete;
atomic_t tdown_cnt;
+ int rx_ch_num_flows;
struct am65_cpsw_rx_chn rx_chns;
- struct napi_struct napi_rx;
-
- bool rx_irq_disabled;
- struct hrtimer rx_hrtimer;
- unsigned long rx_pace_timeout;
u32 nuss_ver;
u32 cpsw_ver;
@@ -162,18 +192,9 @@ struct am65_cpsw_common {
u32 *ale_context;
};
-struct am65_cpsw_ndev_stats {
- u64 tx_packets;
- u64 tx_bytes;
- u64 rx_packets;
- u64 rx_bytes;
- struct u64_stats_sync syncp;
-};
-
struct am65_cpsw_ndev_priv {
u32 msg_enable;
struct am65_cpsw_port *port;
- struct am65_cpsw_ndev_stats __percpu *stats;
bool offload_fwd_mark;
/* Serialize access to MAC Merge state between ethtool requests
* and link state updates
@@ -190,8 +211,8 @@ struct am65_cpsw_ndev_priv {
#define am65_common_get_host(common) (&(common)->host)
#define am65_common_get_port(common, id) (&(common)->ports[(id) - 1])
-#define am65_cpsw_napi_to_common(pnapi) \
- container_of(pnapi, struct am65_cpsw_common, napi_rx)
+#define am65_cpsw_napi_to_rx_flow(pnapi) \
+ container_of(pnapi, struct am65_cpsw_rx_flow, napi_rx)
#define am65_cpsw_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx)
@@ -202,8 +223,8 @@ struct am65_cpsw_ndev_priv {
extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
-void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
-int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ int num_tx, int num_rx);
bool am65_cpsw_port_dev_check(const struct net_device *dev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 816e73a3d6e4..66e8b224827b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -9,6 +9,7 @@
#include <linux/pm_runtime.h>
#include <linux/math.h>
+#include <linux/math64.h>
#include <linux/time.h>
#include <linux/units.h>
#include <net/pkt_cls.h>
@@ -275,9 +276,31 @@ static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
/* The number of wireside clocks contained in the verify
* timeout counter. The default is 0x1312d0
* (10ms at 125Mhz in 1G mode).
+ * The frequency of the clock depends on the link speed
+ * and the PHY interface.
*/
- val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */
+ switch (port->slave.phy_if) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (port->qos.link_speed == SPEED_1000)
+ val = 125 * HZ_PER_MHZ; /* 125 MHz at 1000Mbps*/
+ else if (port->qos.link_speed == SPEED_100)
+ val = 25 * HZ_PER_MHZ; /* 25 MHz at 100Mbps*/
+ else
+ val = (25 * HZ_PER_MHZ) / 10; /* 2.5 MHz at 10Mbps*/
+ break;
+
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ val = 125 * HZ_PER_MHZ; /* 125 MHz */
+ break;
+ default:
+ netdev_err(port->ndev, "selected mode does not supported IET\n");
+ return -EOPNOTSUPP;
+ }
val /= MILLIHZ_PER_HZ; /* count per ms timeout */
val *= verify_time_ms; /* count for timeout ms */
@@ -294,20 +317,21 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
u32 ctrl, status;
int try;
- try = 20;
- do {
- /* Reset the verify state machine by writing 1
- * to LINKFAIL
- */
- ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
- writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ try = 3;
+
+ /* Reset the verify state machine by writing 1
+ * to LINKFAIL
+ */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- /* Clear MAC_LINKFAIL bit to start Verify. */
- ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
- writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ /* Clear MAC_LINKFAIL bit to start Verify. */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ do {
msleep(port->qos.iet.verify_time_ms);
status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
@@ -329,7 +353,7 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
netdev_dbg(port->ndev, "MAC Merge verify error\n");
return -ENODEV;
}
- } while (try-- > 0);
+ } while (--try > 0);
netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
return -ETIMEDOUT;
@@ -837,6 +861,7 @@ static int am65_cpsw_taprio_replace(struct net_device *ndev,
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpts *cpts = common->cpts;
struct am65_cpsw_est *est_new;
+ u64 cur_time, n;
int ret, tact;
if (!netif_running(ndev)) {
@@ -888,13 +913,21 @@ static int am65_cpsw_taprio_replace(struct net_device *ndev,
if (tact == TACT_PROG)
am65_cpsw_timer_stop(ndev);
- if (!est_new->taprio.base_time)
- est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
-
am65_cpsw_port_est_get_buf_num(ndev, est_new);
am65_cpsw_est_set_sched_list(ndev, est_new);
am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+ /* If the base-time is in the past, start schedule from the time:
+ * base_time + (N*cycle_time)
+ * where N is the smallest possible integer such that the above
+ * time is in the future.
+ */
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (est_new->taprio.base_time < cur_time) {
+ n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time);
+ est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time;
+ }
+
am65_cpsw_est_set(ndev, 1);
if (tact == TACT_PROG) {
@@ -1008,6 +1041,9 @@ static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c66618d91c28..8ffbfaa3ab18 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -163,7 +163,9 @@ struct am65_cpts {
struct device_node *clk_mux_np;
struct clk *refclk;
u32 refclk_freq;
- struct list_head events;
+ /* separate lists to handle TX and RX timestamp independently */
+ struct list_head events_tx;
+ struct list_head events_rx;
struct list_head pool;
struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
spinlock_t lock; /* protects events lists*/
@@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts)
am65_cpts_write32(cpts, 0, int_enable);
}
+static int am65_cpts_purge_event_list(struct am65_cpts *cpts,
+ struct list_head *events)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+ return removed;
+}
+
static int am65_cpts_event_get_port(struct am65_cpts_event *event)
{
return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
@@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event)
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
}
-static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+static int am65_cpts_purge_events(struct am65_cpts *cpts)
{
- struct list_head *this, *next;
- struct am65_cpts_event *event;
int removed = 0;
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct am65_cpts_event, list);
- if (time_after(jiffies, event->tmo)) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- ++removed;
- }
- }
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx);
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx);
if (removed)
dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
@@ -275,21 +287,19 @@ static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
return true;
}
-static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
{
struct ptp_clock_event pevent;
struct am65_cpts_event *event;
bool schedule = false;
int i, type, ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&cpts->lock, flags);
for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
event = list_first_entry_or_null(&cpts->pool,
struct am65_cpts_event, list);
if (!event) {
- if (am65_cpts_cpts_purge_events(cpts)) {
+ if (am65_cpts_purge_events(cpts)) {
dev_err(cpts->dev, "cpts: event pool empty\n");
ret = -1;
goto out;
@@ -308,12 +318,21 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
cpts->timestamp);
break;
case AM65_CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_move_tail(&event->list, &cpts->events_rx);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ break;
case AM65_CPTS_EV_TX:
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
- list_del_init(&event->list);
- list_add_tail(&event->list, &cpts->events);
+ list_move_tail(&event->list, &cpts->events_tx);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
@@ -356,14 +375,24 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
}
out:
- spin_unlock_irqrestore(&cpts->lock, flags);
-
if (schedule)
ptp_schedule_worker(cpts->ptp_clock, 0);
return ret;
}
+static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ ret = __am65_cpts_fifo_read(cpts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ret;
+}
+
static u64 am65_cpts_gettime(struct am65_cpts *cpts,
struct ptp_system_timestamp *sts)
{
@@ -784,6 +813,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
+ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
+ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
+ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
+ mtype_seqid = skb_cb->skb_mtype_seqid;
+
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;
@@ -816,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
return found;
}
-static void am65_cpts_find_ts(struct am65_cpts *cpts)
+static void am65_cpts_find_tx_ts(struct am65_cpts *cpts)
{
struct am65_cpts_event *event;
struct list_head *this, *next;
@@ -825,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
LIST_HEAD(events);
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_init(&cpts->events, &events);
+ list_splice_init(&cpts->events_tx, &events);
spin_unlock_irqrestore(&cpts->lock, flags);
list_for_each_safe(this, next, &events) {
@@ -838,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
}
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events, &cpts->events_tx);
list_splice_tail(&events_free, &cpts->pool);
spin_unlock_irqrestore(&cpts->lock, flags);
}
@@ -849,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
unsigned long flags;
long delay = -1;
- am65_cpts_find_ts(cpts);
+ am65_cpts_find_tx_ts(cpts);
spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq))
@@ -859,29 +893,6 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
return delay;
}
-/**
- * am65_cpts_rx_enable - enable rx timestamping
- * @cpts: cpts handle
- * @en: enable
- *
- * This functions enables rx packets timestamping. The CPTS can timestamp all
- * rx packets.
- */
-void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
-{
- u32 val;
-
- mutex_lock(&cpts->ptp_clk_lock);
- val = am65_cpts_read32(cpts, control);
- if (en)
- val |= AM65_CPTS_CONTROL_TSTAMP_EN;
- else
- val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
- am65_cpts_write32(cpts, val, control);
- mutex_unlock(&cpts->ptp_clk_lock);
-}
-EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
-
static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
@@ -906,6 +917,69 @@ static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
return 1;
}
+static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ unsigned long flags;
+ u32 mtype_seqid;
+ u64 ns = 0;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ __am65_cpts_fifo_read(cpts);
+ list_for_each_safe(this, next, &cpts->events_rx) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_move(&event->list, &cpts->pool);
+ continue;
+ }
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
+
+ if (mtype_seqid == skb_mtype_seqid) {
+ ns = event->timestamp;
+ list_move(&event->list, &cpts->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ns;
+}
+
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb;
+ struct skb_shared_hwtstamps *ssh;
+ int ret;
+ u64 ns;
+
+ /* am65_cpts_rx_timestamp() is called before eth_type_trans(), so
+ * skb MAC Hdr properties are not configured yet. Hence need to
+ * reset skb MAC header here
+ */
+ skb_reset_mac_header(skb);
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return; /* if not PTP class packet */
+
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid);
+
+ ns = am65_cpts_find_rx_ts(cpts, skb_cb->skb_mtype_seqid);
+ if (!ns)
+ return;
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp);
+
/**
* am65_cpts_tx_timestamp - save tx packet for timestamping
* @cpts: cpts handle
@@ -1103,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
return ERR_PTR(ret);
mutex_init(&cpts->ptp_clk_lock);
- INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->events_tx);
+ INIT_LIST_HEAD(&cpts->events_rx);
INIT_LIST_HEAD(&cpts->pool);
spin_lock_init(&cpts->lock);
skb_queue_head_init(&cpts->txq);
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
index 6e14df0be113..6099d772799d 100644
--- a/drivers/net/ethernet/ti/am65-cpts.h
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -22,9 +22,9 @@ void am65_cpts_release(struct am65_cpts *cpts);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
-void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
@@ -48,17 +48,18 @@ static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
return -1;
}
-static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+static inline void am65_cpts_rx_timestamp(struct am65_cpts *cpts,
struct sk_buff *skb)
{
}
-static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
- struct sk_buff *skb)
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
{
}
-static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
{
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c0a5abd8d9a8..54c24cd3d3be 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -351,6 +351,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
int ret = 0, port, ch = xmeta->ch;
int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
+ u32 metasize = 0;
struct cpsw_priv *priv;
struct page_pool *pool;
struct sk_buff *skb;
@@ -400,7 +401,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
@@ -408,6 +409,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -423,6 +425,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -635,6 +639,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -684,7 +690,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -1150,6 +1156,27 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
}
#endif
+/* We need a custom implementation of phy_do_ioctl_running() because in switch
+ * mode, dev->phydev may be different than the phy of the active_slave. We need
+ * to operate on the locally saved phy instead.
+ */
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ phy = cpsw->slaves[slave_no].phy;
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -1168,6 +1195,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_setup_tc = cpsw_ndo_setup_tc,
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -1225,7 +1254,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
@@ -1641,6 +1669,9 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT;
+ /* Hijack PHY timestamping requests in order to block them */
+ if (!cpsw->data.dual_emac)
+ ndev->see_all_hwtstamp_requests = true;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -1802,7 +1833,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
- .remove_new = cpsw_remove,
+ .remove = cpsw_remove,
};
module_platform_driver(cpsw_driver);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 64bf22cd860c..fbe35af615a6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -45,6 +46,24 @@
#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
+#define ALE_POLICER_PORT_OUI 0x100
+#define ALE_POLICER_DA_SA 0x104
+#define ALE_POLICER_VLAN 0x108
+#define ALE_POLICER_ETHERTYPE_IPSA 0x10c
+#define ALE_POLICER_IPDA 0x110
+#define ALE_POLICER_PIR 0x118
+#define ALE_POLICER_CIR 0x11c
+#define ALE_POLICER_TBL_CTL 0x120
+#define ALE_POLICER_CTL 0x124
+#define ALE_POLICER_TEST_CTL 0x128
+#define ALE_POLICER_HIT_STATUS 0x12c
+#define ALE_THREAD_DEF 0x134
+#define ALE_THREAD_CTL 0x138
+#define ALE_THREAD_VAL 0x13c
+
+#define ALE_POLICER_TBL_WRITE_ENABLE BIT(31)
+#define ALE_POLICER_TBL_INDEX_MASK GENMASK(4, 0)
+
#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
/* ALE_AGING_TIMER */
@@ -76,7 +95,8 @@ enum {
* @dev_id: ALE version/SoC id
* @features: features supported by ALE
* @tbl_entries: number of ALE entries
- * @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg.
+ * @reg_fields: pointer to array of register field configuration
+ * @num_fields: number of fields in the reg_fields array
* @nu_switch_ale: NU Switch ALE
* @vlan_entry_tbl: ALE vlan entry fields description tbl
*/
@@ -84,7 +104,8 @@ struct cpsw_ale_dev_id {
const char *dev_id;
u32 features;
u32 tbl_entries;
- u32 major_ver_mask;
+ const struct reg_field *reg_fields;
+ int num_fields;
bool nu_switch_ale;
const struct ale_entry_fld *vlan_entry_tbl;
};
@@ -102,19 +123,19 @@ struct cpsw_ale_dev_id {
#define ALE_UCAST_TOUCHED 3
#define ALE_TABLE_SIZE_MULTIPLIER 1024
-#define ALE_STATUS_SIZE_MASK 0x1f
+#define ALE_POLICER_SIZE_MULTIPLIER 8
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx, idx2;
+ int idx, idx2, index;
u32 hi_val = 0;
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be fetched exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ index = 2 - idx2; /* flip */
+ hi_val = ale_entry[index] << ((idx2 * 32) - start);
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -124,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx, idx2;
+ int idx, idx2, index;
value &= BITMASK(bits);
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be set exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ index = 2 - idx2; /* flip */
+ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[index] |= (value >> ((idx2 * 32) - start));
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -141,27 +162,39 @@ static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
ale_entry[idx] |= (value << start);
}
-#define DEFINE_ALE_FIELD(name, start, bits) \
+#define DEFINE_ALE_FIELD_GET(name, start, bits) \
static inline int cpsw_ale_get_##name(u32 *ale_entry) \
{ \
return cpsw_ale_get_field(ale_entry, start, bits); \
-} \
+}
+
+#define DEFINE_ALE_FIELD_SET(name, start, bits) \
static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
{ \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
-#define DEFINE_ALE_FIELD1(name, start) \
+#define DEFINE_ALE_FIELD(name, start, bits) \
+DEFINE_ALE_FIELD_GET(name, start, bits) \
+DEFINE_ALE_FIELD_SET(name, start, bits)
+
+#define DEFINE_ALE_FIELD1_GET(name, start) \
static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \
{ \
return cpsw_ale_get_field(ale_entry, start, bits); \
-} \
+}
+
+#define DEFINE_ALE_FIELD1_SET(name, start) \
static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \
u32 bits) \
{ \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
+#define DEFINE_ALE_FIELD1(name, start) \
+DEFINE_ALE_FIELD1_GET(name, start) \
+DEFINE_ALE_FIELD1_SET(name, start)
+
enum {
ALE_ENT_VID_MEMBER_LIST = 0,
ALE_ENT_VID_UNREG_MCAST_MSK,
@@ -217,14 +250,14 @@ static const struct ale_entry_fld vlan_entry_k3_cpswxg[] = {
DEFINE_ALE_FIELD(entry_type, 60, 2)
DEFINE_ALE_FIELD(vlan_id, 48, 12)
-DEFINE_ALE_FIELD(mcast_state, 62, 2)
+DEFINE_ALE_FIELD_SET(mcast_state, 62, 2)
DEFINE_ALE_FIELD1(port_mask, 66)
DEFINE_ALE_FIELD(super, 65, 1)
DEFINE_ALE_FIELD(ucast_type, 62, 2)
-DEFINE_ALE_FIELD1(port_num, 66)
-DEFINE_ALE_FIELD(blocked, 65, 1)
-DEFINE_ALE_FIELD(secure, 64, 1)
-DEFINE_ALE_FIELD(mcast, 40, 1)
+DEFINE_ALE_FIELD1_SET(port_num, 66)
+DEFINE_ALE_FIELD_SET(blocked, 65, 1)
+DEFINE_ALE_FIELD_SET(secure, 64, 1)
+DEFINE_ALE_FIELD_GET(mcast, 40, 1)
#define NU_VLAN_UNREG_MCAST_IDX 1
@@ -1198,7 +1231,7 @@ int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int rateli
static void cpsw_ale_timer(struct timer_list *t)
{
- struct cpsw_ale *ale = from_timer(ale, t, timer);
+ struct cpsw_ale *ale = timer_container_of(ale, t, timer);
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
@@ -1254,7 +1287,7 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
return;
}
- del_timer_sync(&ale->timer);
+ timer_delete_sync(&ale->timer);
}
void cpsw_ale_start(struct cpsw_ale *ale)
@@ -1292,25 +1325,111 @@ void cpsw_ale_stop(struct cpsw_ale *ale)
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
+static const struct reg_field ale_fields_cpsw[] = {
+ /* CPSW_ALE_IDVER_REG */
+ [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
+ [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 15),
+};
+
+static const struct reg_field ale_fields_cpsw_nu[] = {
+ /* CPSW_ALE_IDVER_REG */
+ [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
+ [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 10),
+ /* CPSW_ALE_STATUS_REG */
+ [ALE_ENTRIES] = REG_FIELD(ALE_STATUS, 0, 7),
+ [ALE_POLICERS] = REG_FIELD(ALE_STATUS, 8, 15),
+ /* CPSW_ALE_POLICER_PORT_OUI_REG */
+ [POL_PORT_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 31, 31),
+ [POL_TRUNK_ID] = REG_FIELD(ALE_POLICER_PORT_OUI, 30, 30),
+ [POL_PORT_NUM] = REG_FIELD(ALE_POLICER_PORT_OUI, 25, 25),
+ [POL_PRI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 19, 19),
+ [POL_PRI_VAL] = REG_FIELD(ALE_POLICER_PORT_OUI, 16, 18),
+ [POL_OUI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 15, 15),
+ [POL_OUI_INDEX] = REG_FIELD(ALE_POLICER_PORT_OUI, 0, 5),
+
+ /* CPSW_ALE_POLICER_DA_SA_REG */
+ [POL_DST_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 31, 31),
+ [POL_DST_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 16, 21),
+ [POL_SRC_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 15, 15),
+ [POL_SRC_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 0, 5),
+
+ /* CPSW_ALE_POLICER_VLAN_REG */
+ [POL_OVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 31, 31),
+ [POL_OVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 16, 21),
+ [POL_IVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 15, 15),
+ [POL_IVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 0, 5),
+
+ /* CPSW_ALE_POLICER_ETHERTYPE_IPSA_REG */
+ [POL_ETHERTYPE_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 31, 31),
+ [POL_ETHERTYPE_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 16, 21),
+ [POL_IPSRC_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 15, 15),
+ [POL_IPSRC_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 0, 5),
+
+ /* CPSW_ALE_POLICER_IPDA_REG */
+ [POL_IPDST_MEN] = REG_FIELD(ALE_POLICER_IPDA, 31, 31),
+ [POL_IPDST_INDEX] = REG_FIELD(ALE_POLICER_IPDA, 16, 21),
+
+ /* CPSW_ALE_POLICER_TBL_CTL_REG */
+ /**
+ * REG_FIELDS not defined for this as fields cannot be correctly
+ * used independently
+ */
+
+ /* CPSW_ALE_POLICER_CTL_REG */
+ [POL_EN] = REG_FIELD(ALE_POLICER_CTL, 31, 31),
+ [POL_RED_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 29, 29),
+ [POL_YELLOW_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 28, 28),
+ [POL_YELLOW_THRESH] = REG_FIELD(ALE_POLICER_CTL, 24, 26),
+ [POL_POL_MATCH_MODE] = REG_FIELD(ALE_POLICER_CTL, 22, 23),
+ [POL_PRIORITY_THREAD_EN] = REG_FIELD(ALE_POLICER_CTL, 21, 21),
+ [POL_MAC_ONLY_DEF_DIS] = REG_FIELD(ALE_POLICER_CTL, 20, 20),
+
+ /* CPSW_ALE_POLICER_TEST_CTL_REG */
+ [POL_TEST_CLR] = REG_FIELD(ALE_POLICER_TEST_CTL, 31, 31),
+ [POL_TEST_CLR_RED] = REG_FIELD(ALE_POLICER_TEST_CTL, 30, 30),
+ [POL_TEST_CLR_YELLOW] = REG_FIELD(ALE_POLICER_TEST_CTL, 29, 29),
+ [POL_TEST_CLR_SELECTED] = REG_FIELD(ALE_POLICER_TEST_CTL, 28, 28),
+ [POL_TEST_ENTRY] = REG_FIELD(ALE_POLICER_TEST_CTL, 0, 4),
+
+ /* CPSW_ALE_POLICER_HIT_STATUS_REG */
+ [POL_STATUS_HIT] = REG_FIELD(ALE_POLICER_HIT_STATUS, 31, 31),
+ [POL_STATUS_HIT_RED] = REG_FIELD(ALE_POLICER_HIT_STATUS, 30, 30),
+ [POL_STATUS_HIT_YELLOW] = REG_FIELD(ALE_POLICER_HIT_STATUS, 29, 29),
+
+ /* CPSW_ALE_THREAD_DEF_REG */
+ [ALE_DEFAULT_THREAD_EN] = REG_FIELD(ALE_THREAD_DEF, 15, 15),
+ [ALE_DEFAULT_THREAD_VAL] = REG_FIELD(ALE_THREAD_DEF, 0, 5),
+
+ /* CPSW_ALE_THREAD_CTL_REG */
+ [ALE_THREAD_CLASS_INDEX] = REG_FIELD(ALE_THREAD_CTL, 0, 4),
+
+ /* CPSW_ALE_THREAD_VAL_REG */
+ [ALE_THREAD_ENABLE] = REG_FIELD(ALE_THREAD_VAL, 15, 15),
+ [ALE_THREAD_VALUE] = REG_FIELD(ALE_THREAD_VAL, 0, 5),
+};
+
static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
{
/* am3/4/5, dra7. dm814x, 66ak2hk-gbe */
.dev_id = "cpsw",
.tbl_entries = 1024,
- .major_ver_mask = 0xff,
+ .reg_fields = ale_fields_cpsw,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw),
.vlan_entry_tbl = vlan_entry_cpsw,
},
{
/* 66ak2h_xgbe */
.dev_id = "66ak2h-xgbe",
.tbl_entries = 2048,
- .major_ver_mask = 0xff,
+ .reg_fields = ale_fields_cpsw,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw),
.vlan_entry_tbl = vlan_entry_cpsw,
},
{
.dev_id = "66ak2el",
.features = CPSW_ALE_F_STATUS_REG,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
@@ -1318,7 +1437,8 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "66ak2g",
.features = CPSW_ALE_F_STATUS_REG,
.tbl_entries = 64,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
@@ -1326,20 +1446,23 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "am65x-cpsw2g",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
.tbl_entries = 64,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
{
.dev_id = "j721e-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
{
.dev_id = "am64-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
+ .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu),
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
.tbl_entries = 512,
},
@@ -1361,47 +1484,81 @@ cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id,
return NULL;
}
+static const struct regmap_config ale_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "cpsw-ale",
+};
+
+static int cpsw_ale_regfield_init(struct cpsw_ale *ale)
+{
+ const struct reg_field *reg_fields = ale->params.reg_fields;
+ struct device *dev = ale->params.dev;
+ struct regmap *regmap = ale->regmap;
+ int i;
+
+ for (i = 0; i < ale->params.num_fields; i++) {
+ ale->fields[i] = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[i]);
+ if (IS_ERR(ale->fields[i])) {
+ dev_err(dev, "Unable to allocate regmap field %d\n", i);
+ return PTR_ERR(ale->fields[i]);
+ }
+ }
+
+ return 0;
+}
+
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
+ u32 ale_entries, rev_major, rev_minor, policers;
const struct cpsw_ale_dev_id *ale_dev_id;
struct cpsw_ale *ale;
- u32 rev, ale_entries;
+ int ret;
ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id);
if (!ale_dev_id)
return ERR_PTR(-EINVAL);
params->ale_entries = ale_dev_id->tbl_entries;
- params->major_ver_mask = ale_dev_id->major_ver_mask;
params->nu_switch_ale = ale_dev_id->nu_switch_ale;
+ params->reg_fields = ale_dev_id->reg_fields;
+ params->num_fields = ale_dev_id->num_fields;
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale)
return ERR_PTR(-ENOMEM);
+ ale->regmap = devm_regmap_init_mmio(params->dev, params->ale_regs,
+ &ale_regmap_cfg);
+ if (IS_ERR(ale->regmap)) {
+ dev_err(params->dev, "Couldn't create CPSW ALE regmap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ale->params = *params;
+ ret = cpsw_ale_regfield_init(ale);
+ if (ret)
+ return ERR_PTR(ret);
ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID,
GFP_KERNEL);
if (!ale->p0_untag_vid_mask)
return ERR_PTR(-ENOMEM);
- ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
ale->features = ale_dev_id->features;
ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl;
- rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
- ale->version =
- (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
- ALE_VERSION_MINOR(rev);
+ regmap_field_read(ale->fields[MINOR_VER], &rev_minor);
+ regmap_field_read(ale->fields[MAJOR_VER], &rev_major);
+ ale->version = rev_major << 8 | rev_minor;
dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
- ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
- ALE_VERSION_MINOR(rev));
+ rev_major, rev_minor);
if (ale->features & CPSW_ALE_F_STATUS_REG &&
!ale->params.ale_entries) {
- ale_entries =
- readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
- ALE_STATUS_SIZE_MASK;
+ regmap_field_read(ale->fields[ALE_ENTRIES], &ale_entries);
/* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries.
@@ -1415,8 +1572,20 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
ale->params.ale_entries = ale_entries;
}
+
+ if (ale->features & CPSW_ALE_F_STATUS_REG &&
+ !ale->params.num_policers) {
+ regmap_field_read(ale->fields[ALE_POLICERS], &policers);
+ if (!policers)
+ return ERR_PTR(-EINVAL);
+
+ policers *= ALE_POLICER_SIZE_MULTIPLIER;
+ ale->params.num_policers = policers;
+ }
+
dev_info(ale->params.dev,
- "ALE Table size %ld\n", ale->params.ale_entries);
+ "ALE Table size %ld, Policers %ld\n", ale->params.ale_entries,
+ ale->params.num_policers);
/* set default bits for existing h/w */
ale->port_mask_bits = ale->params.ale_ports;
@@ -1480,3 +1649,105 @@ u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
{
return ale ? ale->params.ale_entries : 0;
}
+
+/* Reads the specified policer index into ALE POLICER registers */
+static void cpsw_ale_policer_read_idx(struct cpsw_ale *ale, u32 idx)
+{
+ idx &= ALE_POLICER_TBL_INDEX_MASK;
+ writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
+}
+
+/* Writes the ALE POLICER registers into the specified policer index */
+static void cpsw_ale_policer_write_idx(struct cpsw_ale *ale, u32 idx)
+{
+ idx &= ALE_POLICER_TBL_INDEX_MASK;
+ idx |= ALE_POLICER_TBL_WRITE_ENABLE;
+ writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
+}
+
+/* enables/disables the custom thread value for the specified policer index */
+static void cpsw_ale_policer_thread_idx_enable(struct cpsw_ale *ale, u32 idx,
+ u32 thread_id, bool enable)
+{
+ regmap_field_write(ale->fields[ALE_THREAD_CLASS_INDEX], idx);
+ regmap_field_write(ale->fields[ALE_THREAD_VALUE], thread_id);
+ regmap_field_write(ale->fields[ALE_THREAD_ENABLE], enable ? 1 : 0);
+}
+
+/* Disable all policer entries and thread mappings */
+static void cpsw_ale_policer_reset(struct cpsw_ale *ale)
+{
+ int i;
+
+ for (i = 0; i < ale->params.num_policers ; i++) {
+ cpsw_ale_policer_read_idx(ale, i);
+ regmap_field_write(ale->fields[POL_PORT_MEN], 0);
+ regmap_field_write(ale->fields[POL_PRI_MEN], 0);
+ regmap_field_write(ale->fields[POL_OUI_MEN], 0);
+ regmap_field_write(ale->fields[POL_DST_MEN], 0);
+ regmap_field_write(ale->fields[POL_SRC_MEN], 0);
+ regmap_field_write(ale->fields[POL_OVLAN_MEN], 0);
+ regmap_field_write(ale->fields[POL_IVLAN_MEN], 0);
+ regmap_field_write(ale->fields[POL_ETHERTYPE_MEN], 0);
+ regmap_field_write(ale->fields[POL_IPSRC_MEN], 0);
+ regmap_field_write(ale->fields[POL_IPDST_MEN], 0);
+ regmap_field_write(ale->fields[POL_EN], 0);
+ regmap_field_write(ale->fields[POL_RED_DROP_EN], 0);
+ regmap_field_write(ale->fields[POL_YELLOW_DROP_EN], 0);
+ regmap_field_write(ale->fields[POL_PRIORITY_THREAD_EN], 0);
+
+ cpsw_ale_policer_thread_idx_enable(ale, i, 0, 0);
+ }
+}
+
+/* Default classifier is to map 8 user priorities to N receive channels */
+void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch)
+{
+ int pri, idx;
+
+ /* Reference:
+ * IEEE802.1Q-2014, Standard for Local and metropolitan area networks
+ * Table I-2 - Traffic type acronyms
+ * Table I-3 - Defining traffic types
+ * Section I.4 Traffic types and priority values, states:
+ * "0 is thus used both for default priority and for Best Effort, and
+ * Background is associated with a priority value of 1. This means
+ * that the value 1 effectively communicates a lower priority than 0."
+ *
+ * In the table below, Priority Code Point (PCP) 0 is assigned
+ * to a higher priority thread than PCP 1 wherever possible.
+ * The table maps which thread the PCP traffic needs to be
+ * sent to for a given number of threads (RX channels). Upper threads
+ * have higher priority.
+ * e.g. if number of threads is 8 then user priority 0 will map to
+ * pri_thread_map[8-1][0] i.e. thread 1
+ */
+
+ int pri_thread_map[8][8] = { /* BK,BE,EE,CA,VI,VO,IC,NC */
+ { 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 1, 1, 1, 1, },
+ { 0, 0, 0, 0, 1, 1, 2, 2, },
+ { 0, 0, 1, 1, 2, 2, 3, 3, },
+ { 0, 0, 1, 1, 2, 2, 3, 4, },
+ { 1, 0, 2, 2, 3, 3, 4, 5, },
+ { 1, 0, 2, 3, 4, 4, 5, 6, },
+ { 1, 0, 2, 3, 4, 5, 6, 7 } };
+
+ cpsw_ale_policer_reset(ale);
+
+ /* use first 8 classifiers to map 8 (DSCP/PCP) priorities to channels */
+ for (pri = 0; pri < 8; pri++) {
+ idx = pri;
+
+ /* Classifier 'idx' match on priority 'pri' */
+ cpsw_ale_policer_read_idx(ale, idx);
+ regmap_field_write(ale->fields[POL_PRI_VAL], pri);
+ regmap_field_write(ale->fields[POL_PRI_MEN], 1);
+ cpsw_ale_policer_write_idx(ale, idx);
+
+ /* Map Classifier 'idx' to thread provided by the map */
+ cpsw_ale_policer_thread_idx_enable(ale, idx,
+ pri_thread_map[num_rx_ch - 1][pri],
+ 1);
+ }
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 6779ee111d57..87b7d1b3a34a 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -8,11 +8,14 @@
#ifndef __TI_CPSW_ALE_H__
#define __TI_CPSW_ALE_H__
+struct reg_fields;
+
struct cpsw_ale_params {
struct device *dev;
void __iomem *ale_regs;
unsigned long ale_ageout; /* in secs */
unsigned long ale_entries;
+ unsigned long num_policers;
unsigned long ale_ports;
/* NU Switch has specific handling as number of bits in ALE entries
* are different than other versions of ALE. Also there are specific
@@ -20,19 +23,70 @@ struct cpsw_ale_params {
* to identify this hardware.
*/
bool nu_switch_ale;
- /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
- * pass it from caller.
- */
- u32 major_ver_mask;
+ const struct reg_field *reg_fields;
+ int num_fields;
const char *dev_id;
unsigned long bus_freq;
};
struct ale_entry_fld;
+struct regmap;
+
+enum ale_fields {
+ MINOR_VER,
+ MAJOR_VER,
+ ALE_ENTRIES,
+ ALE_POLICERS,
+ POL_PORT_MEN,
+ POL_TRUNK_ID,
+ POL_PORT_NUM,
+ POL_PRI_MEN,
+ POL_PRI_VAL,
+ POL_OUI_MEN,
+ POL_OUI_INDEX,
+ POL_DST_MEN,
+ POL_DST_INDEX,
+ POL_SRC_MEN,
+ POL_SRC_INDEX,
+ POL_OVLAN_MEN,
+ POL_OVLAN_INDEX,
+ POL_IVLAN_MEN,
+ POL_IVLAN_INDEX,
+ POL_ETHERTYPE_MEN,
+ POL_ETHERTYPE_INDEX,
+ POL_IPSRC_MEN,
+ POL_IPSRC_INDEX,
+ POL_IPDST_MEN,
+ POL_IPDST_INDEX,
+ POL_EN,
+ POL_RED_DROP_EN,
+ POL_YELLOW_DROP_EN,
+ POL_YELLOW_THRESH,
+ POL_POL_MATCH_MODE,
+ POL_PRIORITY_THREAD_EN,
+ POL_MAC_ONLY_DEF_DIS,
+ POL_TEST_CLR,
+ POL_TEST_CLR_RED,
+ POL_TEST_CLR_YELLOW,
+ POL_TEST_CLR_SELECTED,
+ POL_TEST_ENTRY,
+ POL_STATUS_HIT,
+ POL_STATUS_HIT_RED,
+ POL_STATUS_HIT_YELLOW,
+ ALE_DEFAULT_THREAD_EN,
+ ALE_DEFAULT_THREAD_VAL,
+ ALE_THREAD_CLASS_INDEX,
+ ALE_THREAD_ENABLE,
+ ALE_THREAD_VALUE,
+ /* terminator */
+ ALE_FIELDS_MAX,
+};
struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
+ struct regmap *regmap;
+ struct regmap_field *fields[ALE_FIELDS_MAX];
unsigned long ageout;
u32 version;
u32 features;
@@ -140,5 +194,6 @@ int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add);
+void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch);
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index f7b283353ba2..bdc4db0d169c 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -434,18 +434,6 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
int cpsw_nway_reset(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -717,7 +705,7 @@ err:
}
#if IS_ENABLED(CONFIG_TI_CPTS)
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -725,8 +713,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = cpsw->cpts->phc_index;
info->tx_types =
@@ -738,13 +724,10 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
return 0;
}
#else
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = 0;
info->rx_filters = 0;
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 087dcb67505a..ab88d4c02cbd 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -293,6 +293,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct page_pool *pool;
struct sk_buff *skb;
struct xdp_buff xdp;
+ u32 metasize = 0;
int ret = 0;
dma_addr_t dma;
@@ -345,13 +346,14 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -368,6 +370,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->offload_fwd_mark = priv->offload_fwd_mark;
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -554,7 +558,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -778,6 +782,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -1126,7 +1132,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_eth_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
@@ -1141,6 +1147,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
.ndo_get_port_parent_id = cpsw_get_port_parent_id,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -1209,7 +1217,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
@@ -1407,7 +1414,8 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
cpsw->slaves[i].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC;
+ ndev->netns_immutable = true;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
@@ -1416,6 +1424,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) {
/* CPSW Host port CPDMA interface is shared between
@@ -1609,7 +1618,8 @@ static const struct devlink_ops cpsw_devlink_ops = {
};
static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -1625,7 +1635,8 @@ static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
}
static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -1743,7 +1754,8 @@ exit:
}
static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -1762,7 +1774,8 @@ static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
}
static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -2124,7 +2137,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
- .remove_new = cpsw_remove,
+ .remove = cpsw_remove,
};
module_platform_driver(cpsw_driver);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 764ed298b570..bc4fdf17a99e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -614,24 +614,29 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
- struct hwtstamp_config cfg;
+
+ /* This will only execute if dev->see_all_hwtstamp_requests is set */
+ if (cfg->source != HWTSTAMP_SOURCE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Switch mode only supports MAC timestamping");
+ return -EOPNOTSUPP;
+ }
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->rx_ts_enabled = 0;
break;
@@ -651,13 +656,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+ priv->tx_ts_enabled = cfg->tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -671,65 +676,40 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
WARN_ON(1);
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- cfg.flags = 0;
- cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = priv->rx_ts_enabled;
+ cfg->tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg->rx_filter = priv->rx_ts_enabled;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
return -EOPNOTSUPP;
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
#endif /*CONFIG_TI_CPTS*/
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
- struct phy_device *phy;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- phy = cpsw->slaves[slave_no].phy;
-
- if (!phy_has_hwtstamp(phy)) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
- }
- }
-
- if (phy)
- return phy_mii_ioctl(phy, req, cmd);
-
- return -EOPNOTSUPP;
-}
-
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1404,6 +1384,9 @@ static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 7efa72502c86..91add8925e23 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -461,7 +461,6 @@ void soft_reset(const char *module, void __iomem *reg);
void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
int cpsw_need_resplit(struct cpsw_common *cpsw);
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
@@ -469,6 +468,11 @@ bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
@@ -497,7 +501,6 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
@@ -510,6 +513,6 @@ int cpsw_set_ringparam(struct net_device *ndev,
int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs,
cpdma_handler_fn rx_handler);
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info);
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info);
#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index dbbea9146040..2ba4c8795d60 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -181,7 +181,7 @@ void cpts_misc_interrupt(struct cpts *cpts)
}
EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
-static u64 cpts_systim_read(const struct cyclecounter *cc)
+static u64 cpts_systim_read(struct cyclecounter *cc)
{
struct cpts *cpts = container_of(cc, struct cpts, cc);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index b0950a318c42..ed8116fb05e9 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2070,7 +2070,7 @@ static struct platform_driver davinci_emac_driver = {
.of_match_table = davinci_emac_of_match,
},
.probe = davinci_emac_probe,
- .remove_new = davinci_emac_remove,
+ .remove = davinci_emac_remove,
};
/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 8e07d4a1b6ba..48f85a3649b2 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -234,7 +234,6 @@ static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
ret = mdiobb_read_c22(bus, phy, reg);
- pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
return ret;
@@ -251,7 +250,6 @@ static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg,
ret = mdiobb_write_c22(bus, phy, reg, val);
- pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
return ret;
@@ -268,7 +266,6 @@ static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad,
ret = mdiobb_read_c45(bus, phy, devad, reg);
- pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
return ret;
@@ -285,7 +282,6 @@ static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad,
ret = mdiobb_write_c45(bus, phy, devad, reg, val);
- pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
return ret;
@@ -332,7 +328,6 @@ static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
data->bus->phy_mask = phy_mask;
done:
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return 0;
@@ -441,7 +436,6 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
break;
}
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
}
@@ -478,7 +472,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
break;
}
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -548,8 +541,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct davinci_mdio_data *data;
struct resource *res;
struct phy_device *phy;
- int ret, addr;
int autosuspend_delay_ms = -1;
+ int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -652,14 +645,10 @@ static int davinci_mdio_probe(struct platform_device *pdev)
goto bail_out;
/* scan and dump the bus */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- phy = mdiobus_get_phy(data->bus, addr);
- if (phy) {
- dev_info(dev, "phy[%d]: device %s, driver %s\n",
- phy->mdio.addr, phydev_name(phy),
- phy->drv ? phy->drv->name : "unknown");
- }
- }
+ mdiobus_for_each_phy(data->bus, phy)
+ dev_info(dev, "phy[%d]: device %s, driver %s\n",
+ phy->mdio.addr, phydev_name(phy),
+ phy->drv ? phy->drv->name : "unknown");
return 0;
@@ -760,7 +749,7 @@ static struct platform_driver davinci_mdio_driver = {
.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
},
.probe = davinci_mdio_probe,
- .remove_new = davinci_mdio_remove,
+ .remove = davinci_mdio_remove,
};
static int __init davinci_mdio_init(void)
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 3025e9c18970..ec085897edf0 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -17,6 +17,7 @@
#include <linux/timekeeping.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <linux/workqueue.h>
#include "icss_iep.h"
@@ -52,78 +53,6 @@
#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
#define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
-enum {
- ICSS_IEP_GLOBAL_CFG_REG,
- ICSS_IEP_GLOBAL_STATUS_REG,
- ICSS_IEP_COMPEN_REG,
- ICSS_IEP_SLOW_COMPEN_REG,
- ICSS_IEP_COUNT_REG0,
- ICSS_IEP_COUNT_REG1,
- ICSS_IEP_CAPTURE_CFG_REG,
- ICSS_IEP_CAPTURE_STAT_REG,
-
- ICSS_IEP_CAP6_RISE_REG0,
- ICSS_IEP_CAP6_RISE_REG1,
-
- ICSS_IEP_CAP7_RISE_REG0,
- ICSS_IEP_CAP7_RISE_REG1,
-
- ICSS_IEP_CMP_CFG_REG,
- ICSS_IEP_CMP_STAT_REG,
- ICSS_IEP_CMP0_REG0,
- ICSS_IEP_CMP0_REG1,
- ICSS_IEP_CMP1_REG0,
- ICSS_IEP_CMP1_REG1,
-
- ICSS_IEP_CMP8_REG0,
- ICSS_IEP_CMP8_REG1,
- ICSS_IEP_SYNC_CTRL_REG,
- ICSS_IEP_SYNC0_STAT_REG,
- ICSS_IEP_SYNC1_STAT_REG,
- ICSS_IEP_SYNC_PWIDTH_REG,
- ICSS_IEP_SYNC0_PERIOD_REG,
- ICSS_IEP_SYNC1_DELAY_REG,
- ICSS_IEP_SYNC_START_REG,
- ICSS_IEP_MAX_REGS,
-};
-
-/**
- * struct icss_iep_plat_data - Plat data to handle SoC variants
- * @config: Regmap configuration data
- * @reg_offs: register offsets to capture offset differences across SoCs
- * @flags: Flags to represent IEP properties
- */
-struct icss_iep_plat_data {
- struct regmap_config *config;
- u32 reg_offs[ICSS_IEP_MAX_REGS];
- u32 flags;
-};
-
-struct icss_iep {
- struct device *dev;
- void __iomem *base;
- const struct icss_iep_plat_data *plat_data;
- struct regmap *map;
- struct device_node *client_np;
- unsigned long refclk_freq;
- int clk_tick_time; /* one refclk tick time in ns */
- struct ptp_clock_info ptp_info;
- struct ptp_clock *ptp_clock;
- struct mutex ptp_clk_mutex; /* PHC access serializer */
- spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
- u32 def_inc;
- s16 slow_cmp_inc;
- u32 slow_cmp_count;
- const struct icss_iep_clockops *ops;
- void *clockops_data;
- u32 cycle_time_ns;
- u32 perout_enabled;
- bool pps_enabled;
- int cap_cmp_irq;
- u64 period;
- u32 latch_enable;
-};
-
/**
* icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
* @iep: Pointer to structure representing IEP.
@@ -192,14 +121,11 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
*/
static void icss_iep_settime(struct icss_iep *iep, u64 ns)
{
- unsigned long flags;
-
if (iep->ops && iep->ops->settime) {
iep->ops->settime(iep->clockops_data, ns);
return;
}
- spin_lock_irqsave(&iep->irq_lock, flags);
if (iep->pps_enabled || iep->perout_enabled)
writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
@@ -210,7 +136,6 @@ static void icss_iep_settime(struct icss_iep *iep, u64 ns)
writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
}
- spin_unlock_irqrestore(&iep->irq_lock, flags);
}
/**
@@ -290,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(cmp), 0);
}
/* enable reset counter on CMP0 event */
@@ -478,66 +406,79 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
static int icss_iep_perout_enable_hw(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
+ struct timespec64 ts;
+ u64 ns_start;
+ u64 ns_width;
int ret;
u64 cmp;
+ if (!on) {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear CMP regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+
+ /* Disable sync */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+
+ return 0;
+ }
+
+ /* Calculate width of the signal for PPS/PEROUT handling */
+ ts.tv_sec = req->on.sec;
+ ts.tv_nsec = req->on.nsec;
+ ns_width = timespec64_to_ns(&ts);
+
+ if (req->flags & PTP_PEROUT_PHASE) {
+ ts.tv_sec = req->phase.sec;
+ ts.tv_nsec = req->phase.nsec;
+ ns_start = timespec64_to_ns(&ts);
+ } else {
+ ns_start = 0;
+ }
+
if (iep->ops && iep->ops->perout_enable) {
ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
if (ret)
return ret;
- if (on) {
- /* Configure CMP */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
- /* Configure SYNC, 1ms pulse width */
- regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
- regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
- /* Enable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
- } else {
- /* Disable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), 0);
-
- /* clear regs */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
- }
+ /* Configure CMP */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+ /* Configure SYNC, based on req on width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
} else {
- if (on) {
- u64 start_ns;
-
- iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
- req->period.nsec;
- start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
- + req->period.nsec;
- icss_iep_update_to_next_boundary(iep, start_ns);
-
- /* Enable Sync in single shot mode */
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
- IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
- /* Enable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
- } else {
- /* Disable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), 0);
-
- /* clear CMP regs */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
-
- /* Disable sync */
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
- }
+ u64 start_ns;
+
+ iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+ req->period.nsec;
+ start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+ + req->period.nsec;
+ icss_iep_update_to_next_boundary(iep, start_ns);
+
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
+ /* Enable Sync in single shot mode */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
}
return 0;
@@ -546,9 +487,23 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- unsigned long flags;
int ret = 0;
+ if (!on)
+ goto disable;
+
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ /* Set default "on" time (1ms) for the signal if not passed by the app */
+ if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
+ req->on.sec = 0;
+ req->on.nsec = NSEC_PER_MSEC;
+ }
+
+disable:
mutex_lock(&iep->ptp_clk_mutex);
if (iep->pps_enabled) {
@@ -559,11 +514,9 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
if (iep->perout_enabled == !!on)
goto exit;
- spin_lock_irqsave(&iep->irq_lock, flags);
ret = icss_iep_perout_enable_hw(iep, req, on);
if (!ret)
iep->perout_enabled = !!on;
- spin_unlock_irqrestore(&iep->irq_lock, flags);
exit:
mutex_unlock(&iep->ptp_clk_mutex);
@@ -571,11 +524,61 @@ exit:
return ret;
}
+static void icss_iep_cap_cmp_work(struct work_struct *work)
+{
+ struct icss_iep *iep = container_of(work, struct icss_iep, work);
+ const u32 *reg_offs = iep->plat_data->reg_offs;
+ struct ptp_clock_event pevent;
+ unsigned int val;
+ u64 ns, ns_next;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
+ val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
+ ns |= (u64)val << 32;
+ }
+ /* set next event */
+ ns_next = ns + iep->period;
+ writel(lower_32_bits(ns_next),
+ iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ writel(upper_32_bits(ns_next),
+ iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
+
+ pevent.pps_times.ts_real = ns_to_timespec64(ns);
+ pevent.type = PTP_CLOCK_PPSUSR;
+ pevent.index = 0;
+ ptp_clock_event(iep->ptp_clock, &pevent);
+ dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
+
+ mutex_unlock(&iep->ptp_clk_mutex);
+}
+
+static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id)
+{
+ struct icss_iep *iep = (struct icss_iep *)dev_id;
+ const u32 *reg_offs = iep->plat_data->reg_offs;
+ unsigned int val;
+
+ val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
+ /* The driver only enables CMP1 */
+ if (val & BIT(1)) {
+ /* Clear the event */
+ writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
+ if (iep->pps_enabled || iep->perout_enabled)
+ schedule_work(&iep->work);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
static int icss_iep_pps_enable(struct icss_iep *iep, int on)
{
struct ptp_clock_request rq;
struct timespec64 ts;
- unsigned long flags;
int ret = 0;
u64 ns;
@@ -589,26 +592,27 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
if (iep->pps_enabled == !!on)
goto exit;
- spin_lock_irqsave(&iep->irq_lock, flags);
-
rq.perout.index = 0;
if (on) {
ns = icss_iep_gettime(iep, NULL);
ts = ns_to_timespec64(ns);
+ rq.perout.flags = 0;
rq.perout.period.sec = 1;
rq.perout.period.nsec = 0;
rq.perout.start.sec = ts.tv_sec + 2;
rq.perout.start.nsec = 0;
+ rq.perout.on.sec = 0;
+ rq.perout.on.nsec = NSEC_PER_MSEC;
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
} else {
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ if (iep->cap_cmp_irq)
+ cancel_work_sync(&iep->work);
}
if (!ret)
iep->pps_enabled = !!on;
- spin_unlock_irqrestore(&iep->irq_lock, flags);
-
exit:
mutex_unlock(&iep->ptp_clk_mutex);
@@ -617,7 +621,8 @@ exit:
static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
{
- u32 val, cap, ret = 0;
+ u32 val, cap;
+ int ret = 0;
mutex_lock(&iep->ptp_clk_mutex);
@@ -681,11 +686,17 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
struct platform_device *pdev;
struct device_node *iep_np;
struct icss_iep *iep;
+ int ret;
iep_np = of_parse_phandle(np, "ti,iep", idx);
- if (!iep_np || !of_device_is_available(iep_np))
+ if (!iep_np)
return ERR_PTR(-ENODEV);
+ if (!of_device_is_available(iep_np)) {
+ of_node_put(iep_np);
+ return ERR_PTR(-ENODEV);
+ }
+
pdev = of_find_device_by_node(iep_np);
of_node_put(iep_np);
@@ -694,21 +705,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
return ERR_PTR(-EPROBE_DEFER);
iep = platform_get_drvdata(pdev);
- if (!iep)
- return ERR_PTR(-EPROBE_DEFER);
+ if (!iep) {
+ ret = -EPROBE_DEFER;
+ goto err_put_pdev;
+ }
device_lock(iep->dev);
if (iep->client_np) {
device_unlock(iep->dev);
dev_err(iep->dev, "IEP is already acquired by %s",
iep->client_np->name);
- return ERR_PTR(-EBUSY);
+ ret = -EBUSY;
+ goto err_put_pdev;
}
iep->client_np = np;
device_unlock(iep->dev);
- get_device(iep->dev);
return iep;
+
+err_put_pdev:
+ put_device(&pdev->dev);
+
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(icss_iep_get_idx);
@@ -777,6 +795,8 @@ int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
if (iep->ops && iep->ops->perout_enable) {
iep->ptp_info.n_per_out = 1;
iep->ptp_info.pps = 1;
+ } else if (iep->cap_cmp_irq) {
+ iep->ptp_info.pps = 1;
}
if (iep->ops && iep->ops->extts_enable)
@@ -808,6 +828,11 @@ int icss_iep_exit(struct icss_iep *iep)
}
icss_iep_disable(iep);
+ if (iep->pps_enabled)
+ icss_iep_pps_enable(iep, false);
+ else if (iep->perout_enabled)
+ icss_iep_perout_enable(iep, NULL, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(icss_iep_exit);
@@ -817,6 +842,7 @@ static int icss_iep_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct icss_iep *iep;
struct clk *iep_clk;
+ int ret, irq;
iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
if (!iep)
@@ -827,6 +853,22 @@ static int icss_iep_probe(struct platform_device *pdev)
if (IS_ERR(iep->base))
return -ENODEV;
+ irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq,
+ IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep);
+ if (ret) {
+ dev_info(iep->dev, "cap_cmp irq request failed: %x\n",
+ ret);
+ } else {
+ iep->cap_cmp_irq = irq;
+ INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
+ }
+ }
+
iep_clk = devm_clk_get(dev, NULL);
if (IS_ERR(iep_clk))
return PTR_ERR(iep_clk);
@@ -853,7 +895,6 @@ static int icss_iep_probe(struct platform_device *pdev)
iep->ptp_info = icss_iep_ptp_info;
mutex_init(&iep->ptp_clk_mutex);
- spin_lock_init(&iep->irq_lock);
dev_set_drvdata(dev, iep);
icss_iep_disable(iep);
@@ -892,7 +933,7 @@ static int icss_iep_regmap_read(void *context, unsigned int reg,
return 0;
}
-static struct regmap_config am654_icss_iep_regmap_config = {
+static const struct regmap_config am654_icss_iep_regmap_config = {
.name = "icss iep",
.reg_stride = 1,
.reg_write = icss_iep_regmap_write,
@@ -941,11 +982,112 @@ static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
.config = &am654_icss_iep_regmap_config,
};
+static const struct icss_iep_plat_data am57xx_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0c,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static bool am335x_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_CAPTURE_STAT_REG:
+ case ICSS_IEP_CAP6_RISE_REG0:
+ case ICSS_IEP_CMP_CFG_REG ... ICSS_IEP_CMP0_REG0:
+ case ICSS_IEP_CMP8_REG0 ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config am335x_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am335x_icss_iep_valid_reg,
+ .readable_reg = am335x_icss_iep_valid_reg,
+};
+
+static const struct icss_iep_plat_data am335x_icss_iep_plat_data = {
+ .flags = 0,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_COUNT_REG0] = 0x0c,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x10,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x14,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x30,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x38,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x40,
+ [ICSS_IEP_CMP_STAT_REG] = 0x44,
+ [ICSS_IEP_CMP0_REG0] = 0x48,
+
+ [ICSS_IEP_CMP8_REG0] = 0x88,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x100,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x108,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x10c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x110,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x114,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x118,
+ [ICSS_IEP_SYNC_START_REG] = 0x11c,
+ },
+ .config = &am335x_icss_iep_regmap_config,
+};
+
static const struct of_device_id icss_iep_of_match[] = {
{
.compatible = "ti,am654-icss-iep",
.data = &am654_icss_iep_plat_data,
},
+ {
+ .compatible = "ti,am5728-icss-iep",
+ .data = &am57xx_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am4376-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am3356-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, icss_iep_of_match);
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.h b/drivers/net/ethernet/ti/icssg/icss_iep.h
index 803a4b714893..0bdca0155abd 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.h
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.h
@@ -12,7 +12,78 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
-struct icss_iep;
+enum {
+ ICSS_IEP_GLOBAL_CFG_REG,
+ ICSS_IEP_GLOBAL_STATUS_REG,
+ ICSS_IEP_COMPEN_REG,
+ ICSS_IEP_SLOW_COMPEN_REG,
+ ICSS_IEP_COUNT_REG0,
+ ICSS_IEP_COUNT_REG1,
+ ICSS_IEP_CAPTURE_CFG_REG,
+ ICSS_IEP_CAPTURE_STAT_REG,
+
+ ICSS_IEP_CAP6_RISE_REG0,
+ ICSS_IEP_CAP6_RISE_REG1,
+
+ ICSS_IEP_CAP7_RISE_REG0,
+ ICSS_IEP_CAP7_RISE_REG1,
+
+ ICSS_IEP_CMP_CFG_REG,
+ ICSS_IEP_CMP_STAT_REG,
+ ICSS_IEP_CMP0_REG0,
+ ICSS_IEP_CMP0_REG1,
+ ICSS_IEP_CMP1_REG0,
+ ICSS_IEP_CMP1_REG1,
+
+ ICSS_IEP_CMP8_REG0,
+ ICSS_IEP_CMP8_REG1,
+ ICSS_IEP_SYNC_CTRL_REG,
+ ICSS_IEP_SYNC0_STAT_REG,
+ ICSS_IEP_SYNC1_STAT_REG,
+ ICSS_IEP_SYNC_PWIDTH_REG,
+ ICSS_IEP_SYNC0_PERIOD_REG,
+ ICSS_IEP_SYNC1_DELAY_REG,
+ ICSS_IEP_SYNC_START_REG,
+ ICSS_IEP_MAX_REGS,
+};
+
+/**
+ * struct icss_iep_plat_data - Plat data to handle SoC variants
+ * @config: Regmap configuration data
+ * @reg_offs: register offsets to capture offset differences across SoCs
+ * @flags: Flags to represent IEP properties
+ */
+struct icss_iep_plat_data {
+ const struct regmap_config *config;
+ u32 reg_offs[ICSS_IEP_MAX_REGS];
+ u32 flags;
+};
+
+struct icss_iep {
+ struct device *dev;
+ void __iomem *base;
+ const struct icss_iep_plat_data *plat_data;
+ struct regmap *map;
+ struct device_node *client_np;
+ unsigned long refclk_freq;
+ int clk_tick_time; /* one refclk tick time in ns */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct mutex ptp_clk_mutex; /* PHC access serializer */
+ u32 def_inc;
+ s16 slow_cmp_inc;
+ u32 slow_cmp_count;
+ const struct icss_iep_clockops *ops;
+ void *clockops_data;
+ u32 cycle_time_ns;
+ u32 perout_enabled;
+ bool pps_enabled;
+ int cap_cmp_irq;
+ u64 period;
+ u32 latch_enable;
+ struct work_struct work;
+};
+
extern const struct icss_iep_clockops prueth_iep_clockops;
/* Firmware specific clock operations */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
index 6df53ab17fbc..833ca86d0b71 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -274,12 +274,23 @@ static void rx_class_set_or(struct regmap *miig_rt, int slice, int n,
regmap_write(miig_rt, offset, data);
}
+static u32 rx_class_get_or(struct regmap *miig_rt, int slice, int n)
+{
+ u32 offset, val;
+
+ offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
+ regmap_read(miig_rt, offset, &val);
+
+ return val;
+}
+
void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
{
regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 |
mac[2] << 16 | mac[3] << 24));
regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8));
}
+EXPORT_SYMBOL_GPL(icssg_class_set_host_mac_addr);
void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
{
@@ -287,6 +298,27 @@ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
mac[2] << 16 | mac[3] << 24));
regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8));
}
+EXPORT_SYMBOL_GPL(icssg_class_set_mac_addr);
+
+static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice,
+ int slot, const u8 *addr, const u8 *mask)
+{
+ u32 val;
+ int i;
+
+ WARN(slot >= FT1_NUM_SLOTS, "invalid slot: %d\n", slot);
+
+ rx_class_ft1_set_da(miig_rt, slice, slot, addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, slot, mask);
+ rx_class_ft1_cfg_set_type(miig_rt, slice, slot, FT1_CFG_TYPE_EQ);
+
+ /* Enable the FT1 slot in OR enable for all classifiers */
+ for (i = 0; i < ICSSG_NUM_CLASSIFIERS_IN_USE; i++) {
+ val = rx_class_get_or(miig_rt, slice, i);
+ val |= RX_CLASS_FT_FT1_MATCH(slot);
+ rx_class_set_or(miig_rt, slice, i, val);
+ }
+}
/* disable all RX traffic */
void icssg_class_disable(struct regmap *miig_rt, int slice)
@@ -330,38 +362,108 @@ void icssg_class_disable(struct regmap *miig_rt, int slice)
/* clear CFG2 */
regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
}
+EXPORT_SYMBOL_GPL(icssg_class_disable);
-void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti)
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
+ bool is_sr1)
{
+ int num_classifiers = is_sr1 ? ICSSG_NUM_CLASSIFIERS_IN_USE : 1;
u32 data;
+ int n;
/* defaults */
icssg_class_disable(miig_rt, slice);
/* Setup Classifier */
- /* match on Broadcast or MAC_PRU address */
- data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
+ for (n = 0; n < num_classifiers; n++) {
+ /* match on Broadcast or MAC_PRU address */
+ data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
- /* multicast */
- if (allmulti)
- data |= RX_CLASS_FT_MC;
+ /* multicast */
+ if (allmulti)
+ data |= RX_CLASS_FT_MC;
- rx_class_set_or(miig_rt, slice, 0, data);
+ rx_class_set_or(miig_rt, slice, n, data);
- /* set CFG1 for OR_OR_AND for classifier */
- rx_class_sel_set_type(miig_rt, slice, 0, RX_CLASS_SEL_TYPE_OR_OR_AND);
+ /* set CFG1 for OR_OR_AND for classifier */
+ rx_class_sel_set_type(miig_rt, slice, n,
+ RX_CLASS_SEL_TYPE_OR_OR_AND);
+ }
/* clear CFG2 */
regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
}
+EXPORT_SYMBOL_GPL(icssg_class_default);
+
+void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
+{
+ u32 data, offset;
+ int n;
+
+ /* defaults */
+ icssg_class_disable(miig_rt, slice);
+
+ /* Setup Classifier */
+ for (n = 0; n < ICSSG_NUM_CLASSIFIERS_IN_USE; n++) {
+ /* set RAW_MASK to bypass filters */
+ offset = RX_CLASS_GATES_N_REG(slice, n);
+ regmap_read(miig_rt, offset, &data);
+ data |= RX_CLASS_GATES_RAW_MASK;
+ regmap_write(miig_rt, offset, data);
+ }
+}
+EXPORT_SYMBOL_GPL(icssg_class_promiscuous_sr1);
+
+void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
+ struct net_device *ndev)
+{
+ u8 mask_addr[6] = { 0, 0, 0, 0, 0, 0xff };
+ struct netdev_hw_addr *ha;
+ int slot = 2;
+
+ rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ /* reserve first 2 slots for
+ * 1) 01-80-C2-00-00-XX Known Service Ethernet Multicast addresses
+ * 2) 01-00-5e-00-00-XX Local Network Control Block
+ * (224.0.0.0 - 224.0.0.255 (224.0.0/24))
+ */
+ icssg_class_ft1_add_mcast(miig_rt, slice, 0,
+ eth_reserved_addr_base, mask_addr);
+ icssg_class_ft1_add_mcast(miig_rt, slice, 1,
+ eth_ipv4_mcast_addr_base, mask_addr);
+ mask_addr[5] = 0;
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* skip addresses matching reserved slots */
+ if (!memcmp(eth_reserved_addr_base, ha->addr, 5) ||
+ !memcmp(eth_ipv4_mcast_addr_base, ha->addr, 5)) {
+ netdev_dbg(ndev, "mcast skip %pM\n", ha->addr);
+ continue;
+ }
+
+ if (slot >= FT1_NUM_SLOTS) {
+ netdev_dbg(ndev,
+ "can't add more than %d MC addresses, enabling allmulti\n",
+ FT1_NUM_SLOTS);
+ icssg_class_default(miig_rt, slice, 1, true);
+ break;
+ }
+
+ netdev_dbg(ndev, "mcast add %pM\n", ha->addr);
+ icssg_class_ft1_add_mcast(miig_rt, slice, slot,
+ ha->addr, mask_addr);
+ slot++;
+ }
+}
+EXPORT_SYMBOL_GPL(icssg_class_add_mcast_sr1);
/* required for SAV check */
void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
{
const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
- rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
}
+EXPORT_SYMBOL_GPL(icssg_ft1_set_mac_addr);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
new file mode 100644
index 000000000000..090aa74d3ce7
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -0,0 +1,1853 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) Siemens AG, 2024
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+
+#include "icssg_prueth.h"
+#include "../k3-cppi-desc-pool.h"
+
+/* Netif debug messages possible */
+#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
+
+void prueth_cleanup_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ int max_rflows)
+{
+ if (rx_chn->pg_pool) {
+ page_pool_destroy(rx_chn->pg_pool);
+ rx_chn->pg_pool = NULL;
+ }
+
+ if (rx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (rx_chn->rx_chn)
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+}
+EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns);
+
+void prueth_cleanup_tx_chns(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ if (tx_chn->tx_chn)
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ /* Assume prueth_cleanup_tx_chns() is called at the
+ * end after all channel resources are freed
+ */
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns);
+
+void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->irq)
+ free_irq(tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+}
+EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
+
+static int emac_xsk_xmit_zc(struct prueth_emac *emac,
+ unsigned int q_idx)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx];
+ struct xsk_buff_pool *pool = tx_chn->xsk_pool;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *host_desc;
+ dma_addr_t dma_desc, dma_buf;
+ struct prueth_swdata *swdata;
+ struct xdp_desc xdp_desc;
+ int num_tx = 0, pkt_len;
+ int descs_avail, ret;
+ u32 *epib;
+ int i;
+
+ descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool);
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (descs_avail <= MAX_SKB_FRAGS)
+ return 0;
+
+ descs_avail -= MAX_SKB_FRAGS;
+
+ for (i = 0; i < descs_avail; i++) {
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ pkt_len = xdp_desc.len;
+ xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len);
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc))
+ break;
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, 0);
+ epib = host_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0,
+ (emac->port_id | (q_idx << 8)));
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf,
+ pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ swdata->type = PRUETH_SWDATA_XSK;
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ host_desc);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn,
+ host_desc, dma_desc);
+
+ if (ret) {
+ ndev->stats.tx_errors++;
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ break;
+ }
+
+ num_tx++;
+ }
+
+ xsk_tx_release(tx_chn->xsk_pool);
+ return num_tx;
+}
+
+void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (swdata->type == PRUETH_SWDATA_XSK)
+ goto free_pool;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+free_pool:
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+EXPORT_SYMBOL_GPL(prueth_xmit_free);
+
+int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
+ int budget, bool *tdown)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_tx;
+ struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
+ struct prueth_tx_chn *tx_chn;
+ unsigned int total_bytes = 0;
+ int xsk_frames_done = 0;
+ struct xdp_frame *xdpf;
+ unsigned int pkt_len;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+
+ tx_chn = &emac->tx_chns[chn];
+
+ while (true) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ /* teardown completion */
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&emac->tdown_cnt))
+ complete(&emac->tdown_complete);
+ *tdown = true;
+ break;
+ }
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
+ total_bytes += xdpf->len;
+ xdp_return_frame(xdpf);
+ break;
+ case PRUETH_SWDATA_XSK:
+ pkt_len = cppi5_hdesc_get_pktlen(desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ xsk_frames_done++;
+ break;
+ default:
+ prueth_xmit_free(tx_chn, desc_tx);
+ ndev->stats.tx_dropped++;
+ continue;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* If the TX queue was stopped, wake it now
+ * if we have enough room.
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+ __netif_tx_unlock(netif_txq);
+ }
+
+ if (tx_chn->xsk_pool) {
+ if (xsk_frames_done)
+ xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_chn->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_chn->xsk_pool);
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ txq_trans_cond_update(netif_txq);
+ emac_xsk_xmit_zc(emac, chn);
+ }
+
+ return num_tx;
+}
+
+static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
+{
+ struct prueth_tx_chn *tx_chns =
+ container_of(timer, struct prueth_tx_chn, tx_hrtimer);
+
+ if (tx_chns->irq_disabled) {
+ tx_chns->irq_disabled = false;
+ enable_irq(tx_chns->irq);
+ }
+ return HRTIMER_NORESTART;
+}
+
+static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
+ struct prueth_emac *emac = tx_chn->emac;
+ bool tdown = false;
+ int num_tx_packets;
+
+ num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget,
+ &tdown);
+
+ if (num_tx_packets >= budget)
+ return budget;
+
+ if (napi_complete_done(napi_tx, num_tx_packets)) {
+ if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) {
+ hrtimer_start(&tx_chn->tx_hrtimer,
+ ns_to_ktime(tx_chn->tx_pace_timeout_ns),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ if (tx_chn->irq_disabled) {
+ tx_chn->irq_disabled = false;
+ enable_irq(tx_chn->irq);
+ }
+ }
+ }
+
+ return num_tx_packets;
+}
+
+static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
+{
+ struct prueth_tx_chn *tx_chn = dev_id;
+
+ tx_chn->irq_disabled = true;
+ disable_irq_nosync(irq);
+ napi_schedule(&tx_chn->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int i, ret;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
+ hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ ret = request_irq(tx_chn->irq, prueth_tx_irq,
+ IRQF_TRIGGER_HIGH, tx_chn->name,
+ tx_chn);
+ if (ret) {
+ netif_napi_del(&tx_chn->napi_tx);
+ dev_err(prueth->dev, "unable to request TX IRQ %d\n",
+ tx_chn->irq);
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ prueth_ndev_del_tx_napi(emac, i);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi);
+
+int prueth_init_tx_chns(struct prueth_emac *emac)
+{
+ static const struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ .size = PRUETH_MAX_TX_DESC,
+ };
+ struct k3_udma_glue_tx_channel_cfg tx_cfg;
+ struct device *dev = emac->prueth->dev;
+ struct net_device *ndev = emac->ndev;
+ int ret, slice, i;
+ u32 hdesc_size;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ init_completion(&emac->tdown_complete);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&tx_cfg, 0, sizeof(tx_cfg));
+ tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(tx_chn->name, sizeof(tx_chn->name),
+ "tx%d-%d", slice, i);
+
+ tx_chn->emac = emac;
+ tx_chn->id = i;
+ tx_chn->descs_num = PRUETH_MAX_TX_DESC;
+
+ tx_chn->tx_chn =
+ k3_udma_glue_request_tx_chn(dev, tx_chn->name,
+ &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = PTR_ERR(tx_chn->tx_chn);
+ tx_chn->tx_chn = NULL;
+ netdev_err(ndev,
+ "Failed to request tx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+ tx_chn->desc_pool =
+ k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ tx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
+ goto fail;
+ }
+
+ ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get tx irq\n");
+ goto fail;
+ }
+ tx_chn->irq = ret;
+
+ snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
+ dev_name(dev), tx_chn->id);
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_tx_chns(emac);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
+
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = size;
+ pp_params.nid = dev_to_node(emac->prueth->dev);
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+ pp_params.napi = &emac->napi_rx;
+ pp_params.max_len = PAGE_SIZE;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
+int prueth_init_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ char *name, u32 max_rflows,
+ u32 max_desc_num)
+{
+ struct k3_udma_glue_rx_channel_cfg rx_cfg;
+ struct device *dev = emac->prueth->dev;
+ struct net_device *ndev = emac->ndev;
+ u32 fdqring_id, hdesc_size;
+ struct page_pool *pool;
+ int i, ret = 0, slice;
+ int flow_id_base;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&rx_cfg, 0, sizeof(rx_cfg));
+ rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = max_rflows;
+ rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
+ &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = PTR_ERR(rx_chn->rx_chn);
+ rx_chn->rx_chn = NULL;
+ netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size,
+ rx_chn->name);
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ rx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
+ goto fail;
+ }
+
+ pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto fail;
+ }
+
+ rx_chn->pg_pool = pool;
+
+ flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
+ emac->rx_mgm_flow_id_base = flow_id_base;
+ netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base);
+ } else {
+ emac->rx_flow_id_base = flow_id_base;
+ netdev_dbg(ndev, "flow id base = %d\n", flow_id_base);
+ }
+
+ fdqring_id = K3_RINGACC_RING_ID_ANY;
+ for (i = 0; i < rx_cfg.flow_id_num; i++) {
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .flags = K3_RINGACC_RING_SHARED,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel =
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
+
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ i, &rx_flow_cfg);
+ if (ret) {
+ netdev_err(ndev, "Failed to init rx flow%d %d\n",
+ i, ret);
+ goto fail;
+ }
+ if (!i)
+ fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+ ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (ret < 0) {
+ netdev_err(ndev, "Failed to get rx dma irq");
+ goto fail;
+ }
+ rx_chn->irq[i] = ret;
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
+
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+
+ buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
+ desc_rx, desc_dma);
+}
+EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped);
+
+u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
+{
+ u32 iepcount_lo, iepcount_hi, hi_rollover_count;
+ u64 ns;
+
+ iepcount_lo = lo & GENMASK(19, 0);
+ iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
+ hi_rollover_count = hi >> 11;
+
+ ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
+ ns = ns * cycle_time_ns + iepcount_lo;
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(icssg_ts_to_ns);
+
+void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ if (emac->is_sr1) {
+ ns = (u64)psdata[1] << 32 | psdata[0];
+ } else {
+ u32 hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
+ IEP_DEFAULT_CYCLE_TIME_NS);
+ }
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * emac_xmit_xdp_frame - transmits an XDP frame
+ * @emac: emac device
+ * @xdpf: data to transmit
+ * @q_idx: queue id
+ * @buff_type: Type of buffer to be transmitted
+ *
+ * Return: XDP state
+ */
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type)
+{
+ struct cppi5_host_desc_t *first_desc;
+ struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ struct page *page;
+ u32 *epib;
+ int ret;
+
+ if (q_idx >= PRUETH_MAX_TX_QUEUES) {
+ netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ tx_chn = &emac->tx_chns[q_idx];
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */
+ page = virt_to_head_page(xdpf->data);
+ if (unlikely(!page)) {
+ netdev_err(ndev, "xdp tx: failed to get page from xdpf\n");
+ goto drop_free_descs;
+ }
+ buf_dma = page_pool_get_dma_addr(page);
+ buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
+ } else { /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "xdp tx: failed to map data buffer\n");
+ goto drop_free_descs; /* drop */
+ }
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
+
+ /* Report BQL before sending the packet */
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ netdev_tx_sent_queue(netif_txq, xdpf->len);
+
+ cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
+ netdev_tx_completed_queue(netif_txq, 1, xdpf->len);
+ goto drop_free_descs;
+ }
+
+ return ICSSG_XDP_TX;
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+ return ICSSG_XDP_CONSUMED;
+}
+EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
+
+/**
+ * emac_run_xdp - run an XDP program
+ * @emac: emac device
+ * @xdp: XDP buffer containing the frame
+ * @len: Rx descriptor packet length
+ *
+ * Return: XDP state
+ */
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len)
+{
+ struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 pkt_len = *len;
+ u32 act, result;
+ int q_idx, err;
+
+ xdp_prog = READ_ONCE(emac->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return ICSSG_XDP_PASS;
+ case XDP_TX:
+ /* Send packet to TX ring for immediate transmission */
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ q_idx = cpu % emac->tx_ch_num;
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+ __netif_tx_lock(netif_txq, cpu);
+ result = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_TX);
+ __netif_tx_unlock(netif_txq);
+ if (result == ICSSG_XDP_CONSUMED) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ dev_sw_netstats_rx_add(ndev, xdpf->len);
+ return result;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
+ if (err)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return ICSSG_XDP_REDIR;
+ default:
+ bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+drop:
+ trace_xdp_exception(emac->ndev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ return ICSSG_XDP_CONSUMED;
+ }
+}
+
+static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct xdp_buff *xdp)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ int buf_len;
+
+ buf_dma = xsk_buff_xdp_get_dma(xdp);
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ swdata->type = PRUETH_SWDATA_XSK;
+ swdata->data.xdp = xdp;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
+ desc_rx, desc_dma);
+}
+
+static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ struct xdp_buff *xdp;
+ int i, ret;
+
+ for (i = 0; i < budget; i++) {
+ xdp = xsk_buff_alloc(rx_chn->xsk_pool);
+ if (!xdp)
+ break;
+
+ ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp);
+ if (ret) {
+ netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n");
+ xsk_buff_free(xdp);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata)
+{
+ unsigned int headroom = xdp->data - xdp->data_hard_start;
+ unsigned int pkt_len = xdp->data_end - xdp->data;
+ struct net_device *ndev = emac->ndev;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start);
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+}
+
+static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id,
+ int budget)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma, buf_dma;
+ struct xdp_buff *xdp;
+ int xdp_status = 0;
+ int count = 0;
+ u32 *psdata;
+ int ret;
+
+ while (count < budget) {
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ break;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ complete(&emac->tdown_complete);
+ break;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ if (swdata->type != PRUETH_SWDATA_XSK) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ break;
+ }
+
+ xdp = swdata->data.xdp;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ count++;
+ xsk_buff_set_size(xdp, pkt_len);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ if (prueth_xdp_is_enabled(emac)) {
+ ret = emac_run_xdp(emac, xdp, &pkt_len);
+ switch (ret) {
+ case ICSSG_XDP_PASS:
+ /* prepare skb and send to n/w stack */
+ emac_dispatch_skb_zc(emac, xdp, psdata);
+ xsk_buff_free(xdp);
+ break;
+ case ICSSG_XDP_CONSUMED:
+ xsk_buff_free(xdp);
+ break;
+ case ICSSG_XDP_TX:
+ case ICSSG_XDP_REDIR:
+ xdp_status |= ret;
+ break;
+ }
+ } else {
+ /* prepare skb and send to n/w stack */
+ emac_dispatch_skb_zc(emac, xdp, psdata);
+ xsk_buff_free(xdp);
+ }
+ }
+
+ if (xdp_status & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
+ /* Allocate xsk buffers from the pool for the "count" number of
+ * packets processed in order to be able to receive more packets.
+ */
+ ret = prueth_rx_alloc_zc(emac, count);
+
+ if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) {
+ /* If the user space doesn't provide enough buffers then it must
+ * explicitly wake up the kernel when new buffers are available
+ */
+ if (ret < count)
+ xsk_set_rx_need_wakeup(rx_chn->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_chn->xsk_pool);
+ }
+
+ return count;
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma, buf_dma;
+ struct page *page, *new_page;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ int headroom, ret;
+ u32 *psdata;
+ void *pa;
+
+ *xdp_state = 0;
+ pool = rx_chn->pg_pool;
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ return ret;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ complete(&emac->tdown_complete);
+ return 0;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ if (swdata->type != PRUETH_SWDATA_PAGE) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ return 0;
+ }
+
+ page = swdata->data.page;
+ page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ /* if allocation fails we drop the packet but push the
+ * descriptor back to the ring with old page to prevent a stall
+ */
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
+ ndev->stats.rx_dropped++;
+ goto requeue;
+ }
+
+ pa = page_address(page);
+ if (prueth_xdp_is_enabled(emac)) {
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
+ xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
+
+ *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len);
+ if (*xdp_state != ICSSG_XDP_PASS)
+ goto requeue;
+ headroom = xdp.data - xdp.data_hard_start;
+ pkt_len = xdp.data_end - xdp.data;
+ } else {
+ headroom = PRUETH_HEADROOM;
+ }
+
+ /* prepare skb and send to n/w stack */
+ skb = napi_build_skb(pa, PAGE_SIZE);
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+
+requeue:
+ /* queue another RX DMA */
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
+ if (WARN_ON(ret < 0)) {
+ page_pool_recycle_direct(pool, new_page);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ struct page_pool *pool;
+ struct xdp_buff *xdp;
+ struct page *page;
+
+ pool = rx_chn->pg_pool;
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ if (rx_chn->xsk_pool) {
+ xdp = swdata->data.xdp;
+ xsk_buff_free(xdp);
+ } else {
+ page = swdata->data.page;
+ page_pool_recycle_direct(pool, page);
+ }
+
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+}
+EXPORT_SYMBOL_GPL(prueth_rx_cleanup);
+
+static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
+{
+ int i;
+
+ /* search and get the next free slot */
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (!emac->tx_ts_skb[i]) {
+ emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
+ return i;
+ }
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * icssg_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ * Doesn't wait for completion we'll check for TX completion in
+ * emac_tx_complete_packets().
+ *
+ * Return: enum netdev_tx
+ */
+enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ u32 pkt_len, dst_tag_id;
+ int i, ret = 0, q_idx;
+ bool in_tx_ts = 0;
+ int tx_ts_cookie;
+ u32 *epib;
+
+ pkt_len = skb_headlen(skb);
+ q_idx = skb_get_queue_mapping(skb);
+
+ tx_chn = &emac->tx_chns[q_idx];
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: failed to map skb buffer\n");
+ ret = NETDEV_TX_OK;
+ goto drop_free_skb;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ goto drop_stop_q_busy;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ emac->tx_ts_enabled) {
+ tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
+ if (tx_ts_cookie >= 0) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Request TX timestamp */
+ epib[0] = (u32)tx_ts_cookie;
+ epib[1] = 0x80000000; /* TX TS request */
+ emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
+ in_tx_ts = 1;
+ }
+ }
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation and port num 0
+ * for undirected packets in case of HSR offload mode
+ */
+ dst_tag_id = emac->port_id | (q_idx << 8);
+
+ if (prueth->is_hsr_offload_mode &&
+ (ndev->features & NETIF_F_HW_HSR_DUP))
+ dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG;
+
+ if (prueth->is_hsr_offload_mode &&
+ (ndev->features & NETIF_F_HW_HSR_TAG_INS))
+ epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS;
+
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ swdata->type = PRUETH_SWDATA_SKB;
+ swdata->data.skb = skb;
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ netdev_err(ndev,
+ "tx: failed to allocate frag. descriptor\n");
+ goto free_desc_stop_q_busy_cleanup_tx_ts;
+ }
+
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: Failed to map skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ret = NETDEV_TX_OK;
+ goto cleanup_tx_ts;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON_ONCE(pkt_len != skb->len);
+
+ /* report bql before sending packet */
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ /* cppi5_desc_dump(first_desc, 64); */
+
+ skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "tx: push failed: %d\n", ret);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ goto drop_free_descs;
+ }
+
+ if (in_tx_ts)
+ atomic_inc(&emac->tx_ts_pending);
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS)
+ netif_tx_wake_queue(netif_txq);
+ }
+
+ return NETDEV_TX_OK;
+
+cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+
+drop_free_skb:
+ dev_kfree_skb_any(skb);
+
+ /* error */
+ ndev->stats.tx_dropped++;
+ netdev_err(ndev, "tx: error: %d\n", ret);
+
+ return ret;
+
+free_desc_stop_q_busy_cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+ prueth_xmit_free(tx_chn, first_desc);
+
+drop_stop_q_busy:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
+
+void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct xsk_buff_pool *xsk_pool;
+ struct prueth_swdata *swdata;
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_kfree_skb_any(skb);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ xdp_return_frame(xdpf);
+ break;
+ case PRUETH_SWDATA_XSK:
+ xsk_pool = tx_chn->xsk_pool;
+ xsk_tx_completed(xsk_pool, 1);
+ break;
+ default:
+ break;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
+}
+EXPORT_SYMBOL_GPL(prueth_tx_cleanup);
+
+irqreturn_t prueth_rx_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ emac->rx_chns.irq_disabled = true;
+ disable_irq_nosync(irq);
+ napi_schedule(&emac->napi_rx);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(prueth_rx_irq);
+
+void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (emac->tx_ts_skb[i]) {
+ dev_kfree_skb_any(emac->tx_ts_skb[i]);
+ emac->tx_ts_skb[i] = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts);
+
+int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
+ int rx_flow = emac->is_sr1 ?
+ PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
+ int flow = emac->is_sr1 ?
+ PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ int xdp_state_or = 0;
+ int num_rx = 0;
+ int cur_budget;
+ u32 xdp_state;
+ int ret;
+
+ while (flow--) {
+ if (rx_chn->xsk_pool) {
+ num_rx = emac_rx_packet_zc(emac, flow, budget);
+ } else {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
+ break;
+ num_rx++;
+ }
+ }
+
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (xdp_state_or & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
+ if (unlikely(emac->rx_pace_timeout_ns)) {
+ hrtimer_start(&emac->rx_hrtimer,
+ ns_to_ktime(emac->rx_pace_timeout_ns),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
+ }
+ }
+
+ return num_rx;
+}
+EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
+
+int prueth_prepare_rx_chan(struct prueth_emac *emac,
+ struct prueth_rx_chn *chn,
+ int buf_size)
+{
+ struct page *page;
+ int desc_avail;
+ int i, ret;
+
+ desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool);
+ if (desc_avail < chn->descs_num)
+ netdev_warn(emac->ndev,
+ "not enough RX descriptors available %d < %d\n",
+ desc_avail, chn->descs_num);
+
+ if (chn->xsk_pool) {
+ /* get pages from xsk_pool and push to RX ring
+ * queue as much as possible
+ */
+ ret = prueth_rx_alloc_zc(emac, desc_avail);
+ if (!ret)
+ goto recycle_alloc_pg;
+ } else {
+ for (i = 0; i < desc_avail; i++) {
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(chn->pg_pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
+
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
+ if (ret < 0) {
+ netdev_err(emac->ndev,
+ "cannot submit page for rx chan %s ret %d\n",
+ chn->name, ret);
+ page_pool_recycle_direct(chn->pg_pool, page);
+ goto recycle_alloc_pg;
+ }
+ }
+ }
+
+ return 0;
+
+recycle_alloc_pg:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
+
+void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
+ bool free_skb)
+{
+ int i;
+
+ for (i = 0; i < ch_num; i++) {
+ if (free_skb)
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+EXPORT_SYMBOL_GPL(prueth_reset_tx_chan);
+
+void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
+ int num_flows, bool disable)
+{
+ int i;
+
+ for (i = 0; i < num_flows; i++)
+ k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
+ prueth_rx_cleanup);
+ if (disable)
+ k3_udma_glue_disable_rx_chn(chn->rx_chn);
+}
+EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
+
+void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ ndev->stats.tx_errors++;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout);
+
+int icssg_ndo_set_ts_config(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ emac->tx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ emac->tx_ts_enabled = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ emac->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ emac->rx_ts_enabled = 1;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_set_ts_config);
+
+int icssg_ndo_get_ts_config(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ config->flags = 0;
+ config->tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_get_ts_config);
+
+void icssg_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ emac_update_hardware_stats(emac);
+
+ stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
+ stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
+ stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
+ stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
+ stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
+ stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
+ stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
+
+ stats->rx_errors = ndev->stats.rx_errors +
+ emac_get_stat_by_name(emac, "FW_RX_ERROR") +
+ emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") +
+ emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") +
+ emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") +
+ emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN");
+ stats->rx_dropped = ndev->stats.rx_dropped +
+ emac_get_stat_by_name(emac, "FW_DROPPED_PKT") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") +
+ emac_get_stat_by_name(emac, "FW_INF_SAV") +
+ emac_get_stat_by_name(emac, "FW_INF_SA_DL") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER");
+ stats->tx_errors = ndev->stats.tx_errors;
+ stats->tx_dropped = ndev->stats.tx_dropped +
+ emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") +
+ emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF");
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
+
+int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+
+ ret = snprintf(name, len, "p%d", emac->port_id);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name);
+
+/* get emac_port corresponding to eth_node name */
+int prueth_node_port(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_PORT_MII0;
+ else if (port_id == 1)
+ return PRUETH_PORT_MII1;
+ else
+ return PRUETH_PORT_INVALID;
+}
+EXPORT_SYMBOL_GPL(prueth_node_port);
+
+/* get MAC instance corresponding to eth_node name */
+int prueth_node_mac(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_MAC0;
+ else if (port_id == 1)
+ return PRUETH_MAC1;
+ else
+ return PRUETH_MAC_INVALID;
+}
+EXPORT_SYMBOL_GPL(prueth_node_mac);
+
+void prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ if (of_phy_is_fixed_link(emac->phy_node))
+ of_phy_deregister_fixed_link(emac->phy_node);
+
+ netif_napi_del(&emac->napi_rx);
+
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+ destroy_workqueue(emac->cmd_wq);
+ free_netdev(emac->ndev);
+ prueth->emac[mac] = NULL;
+}
+EXPORT_SYMBOL_GPL(prueth_netdev_exit);
+
+int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1)
+{
+ struct device *dev = prueth->dev;
+ enum pruss_pru_id pruss_id;
+ struct device_node *np;
+ int idx = -1, ret;
+
+ np = dev->of_node;
+
+ switch (slice) {
+ case ICSS_SLICE0:
+ idx = 0;
+ break;
+ case ICSS_SLICE1:
+ idx = is_sr1 ? 2 : 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
+ if (IS_ERR(prueth->pru[slice])) {
+ ret = PTR_ERR(prueth->pru[slice]);
+ prueth->pru[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
+ }
+ prueth->pru_id[slice] = pruss_id;
+
+ idx++;
+ prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
+ if (IS_ERR(prueth->rtu[slice])) {
+ ret = PTR_ERR(prueth->rtu[slice]);
+ prueth->rtu[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
+ }
+
+ if (is_sr1)
+ return 0;
+
+ idx++;
+ prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
+ if (IS_ERR(prueth->txpru[slice])) {
+ ret = PTR_ERR(prueth->txpru[slice]);
+ prueth->txpru[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(prueth_get_cores);
+
+void prueth_put_cores(struct prueth *prueth, int slice)
+{
+ if (prueth->txpru[slice])
+ pru_rproc_put(prueth->txpru[slice]);
+
+ if (prueth->rtu[slice])
+ pru_rproc_put(prueth->rtu[slice]);
+
+ if (prueth->pru[slice])
+ pru_rproc_put(prueth->pru[slice]);
+}
+EXPORT_SYMBOL_GPL(prueth_put_cores);
+
+#ifdef CONFIG_PM_SLEEP
+static int prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = ndev->netdev_ops->ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = ndev->netdev_ops->ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
+};
+EXPORT_SYMBOL_GPL(prueth_dev_pm_ops);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
+MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 99de8a40ed60..3f8237c17d09 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -20,6 +20,8 @@
/* IPG is in core_clk cycles */
#define MII_RT_TX_IPG_100M 0x17
#define MII_RT_TX_IPG_1G 0xb
+#define MII_RT_TX_IPG_100M_SR1 0x166
+#define MII_RT_TX_IPG_1G_SR1 0x1a
#define ICSSG_QUEUES_MAX 64
#define ICSSG_QUEUE_OFFSET 0xd00
@@ -64,6 +66,9 @@
#define FDB_GEN_CFG1 0x60
#define SMEM_VLAN_OFFSET 8
#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8)
+#define FDB_HASH_SIZE_MASK GENMASK(6, 3)
+#define FDB_HASH_SIZE_SHIFT 3
+#define FDB_HASH_SIZE 3
#define FDB_GEN_CFG2 0x64
#define FDB_VLAN_EN BIT(6)
@@ -105,28 +110,49 @@ static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
},
};
+static void icssg_config_mii_init_fw_offload(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int mii = prueth_emac_slice(emac);
+ u32 txcfg_reg, pcnt_reg, txcfg;
+ struct regmap *mii_rt;
+
+ mii_rt = prueth->mii_rt;
+
+ txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
+ PRUSS_MII_RT_TXCFG1;
+ pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
+ PRUSS_MII_RT_RX_PCNT1;
+
+ txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
+ PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN;
+
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+ else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, pcnt_reg, 0x1);
+}
+
static void icssg_config_mii_init(struct prueth_emac *emac)
{
- u32 rxcfg, txcfg, rxcfg_reg, txcfg_reg, pcnt_reg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
+ u32 txcfg, txcfg_reg, pcnt_reg;
struct regmap *mii_rt;
mii_rt = prueth->mii_rt;
- rxcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 :
- PRUSS_MII_RT_RXCFG1;
txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
PRUSS_MII_RT_TXCFG1;
pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
PRUSS_MII_RT_RX_PCNT1;
- rxcfg = MII_RXCFG_DEFAULT;
txcfg = MII_TXCFG_DEFAULT;
- if (slice == ICSS_MII1)
- rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
-
/* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need
* to be swapped also comparing to RGMII mode.
*/
@@ -135,7 +161,6 @@ static void icssg_config_mii_init(struct prueth_emac *emac)
else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1)
txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
- regmap_write(mii_rt, rxcfg_reg, rxcfg);
regmap_write(mii_rt, txcfg_reg, txcfg);
regmap_write(mii_rt, pcnt_reg, 0x1);
}
@@ -202,24 +227,31 @@ void icssg_config_ipg(struct prueth_emac *emac)
{
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
+ u32 ipg;
switch (emac->speed) {
case SPEED_1000:
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_1G);
+ ipg = emac->is_sr1 ? MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G;
break;
case SPEED_100:
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ ipg = emac->is_sr1 ? MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M;
break;
case SPEED_10:
+ /* Firmware hardcodes IPG for SR1.0 */
+ if (emac->is_sr1)
+ return;
/* IPG for 10M is same as 100M */
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ ipg = MII_RT_TX_IPG_100M;
break;
default:
/* Other links speeds not supported */
netdev_err(emac->ndev, "Unsupported link speed\n");
return;
}
+
+ icssg_mii_update_ipg(prueth->mii_rt, slice, ipg);
}
+EXPORT_SYMBOL_GPL(icssg_config_ipg);
static void emac_r30_cmd_init(struct prueth_emac *emac)
{
@@ -249,7 +281,7 @@ static int emac_r30_is_done(struct prueth_emac *emac)
return 1;
}
-static int prueth_emac_buffer_setup(struct prueth_emac *emac)
+static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
{
struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
struct icssg_rxq_ctx __iomem *rxq_ctx;
@@ -258,13 +290,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
u32 addr;
int i;
- /* Layout to have 64KB aligned buffer pool
- * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
- */
-
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -272,58 +304,207 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initilalized */
- writel(addr, &bpool_cfg[0].addr);
- writel(0, &bpool_cfg[0].len);
- for (i = PRUETH_EMAC_BUF_POOL_START;
- i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS;
- i++) {
+ /* Configure buffer pools for forwarding buffers
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len);
+ addr += PRUETH_SW_FWD_BUF_POOL_SIZE;
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* The driver only uses first 4 queues per PRU,
+ * so only initialize buffer for them
+ */
+ if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE)
+ < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_SW_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_SW_LI_BUF_POOL_SIZE;
+ } else {
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
+ }
+ }
+
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to the host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
- /* Pre-emptible RX buffer queue */
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to the host core
+ */
rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
- /* Express RX buffer queue */
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
+
+ return 0;
+}
+
+static int prueth_emac_buffer_setup(struct prueth_emac *emac)
+{
+ struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
+ struct icssg_rxq_ctx __iomem *rxq_ctx;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ u32 addr;
+ int i;
+
+ addr = lower_32_bits(prueth->msmcram.pa);
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE;
+ }
+
+ if (addr % SZ_64K) {
+ dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
+ return -EINVAL;
+ }
+
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+
+ /* Configure buffer pools for forwarding buffers
+ * - in mac mode - no forwarding so initialize all pools to 0
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
+ writel(0, &bpool_cfg[i].addr);
+ writel(0, &bpool_cfg[i].len);
+ }
+
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* In EMAC mode, only first 4 buffers are used,
+ * as 1 slice needs to handle only 1 port
+ */
+ if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_EMAC_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_EMAC_LI_BUF_POOL_SIZE;
+ } else {
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
+ }
+ }
+
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to host core
+ */
rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
+
return 0;
}
-static void icssg_init_emac_mode(struct prueth *prueth)
+void icssg_init_emac_mode(struct prueth *prueth)
{
/* When the device is configured as a bridge and it is being brought
* back to the emac mode, the host mac address has to be set as 0.
*/
+ u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ int i;
u8 mac[ETH_ALEN] = { 0 };
- if (prueth->emacs_initialized)
- return;
-
- regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1,
- SMEM_VLAN_OFFSET_MASK, 0);
- regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0);
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
+ FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
+ /* Set enable VLAN aware mode, and FDBs for all PRUs */
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
+ prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
+ EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
+ for (i = 0; i < SZ_4K - 1; i++) {
+ prueth->vlan_tbl[i].fid = i;
+ prueth->vlan_tbl[i].fid_c1 = 0;
+ }
/* Clear host MAC address */
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
+EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
+
+void icssg_init_fw_offload_mode(struct prueth *prueth)
+{
+ u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ int i;
+
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
+ FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
+ /* Set enable VLAN aware mode, and FDBs for all PRUs */
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
+ prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
+ EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
+ for (i = 0; i < SZ_4K - 1; i++) {
+ prueth->vlan_tbl[i].fid = i;
+ prueth->vlan_tbl[i].fid_c1 = 0;
+ }
+
+ if (prueth->hw_bridge_dev)
+ icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
+ icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
+}
+EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
{
@@ -331,8 +512,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- icssg_init_emac_mode(prueth);
-
memset_io(config, 0, TAS_GATE_MASK_LIST0);
icssg_miig_queues_init(prueth, slice);
@@ -345,7 +524,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
- icssg_config_mii_init(emac);
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_config_mii_init_fw_offload(emac);
+ else
+ icssg_config_mii_init(emac);
icssg_config_ipg(emac);
icssg_update_rgmii_cfg(prueth->miig_rt, emac);
@@ -368,7 +550,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
writeb(0, config + QUEUE_NUM_UNTAGGED);
- ret = prueth_emac_buffer_setup(emac);
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ ret = prueth_fw_offload_buffer_setup(emac);
+ else
+ ret = prueth_emac_buffer_setup(emac);
if (ret)
return ret;
@@ -376,6 +561,7 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
return 0;
}
+EXPORT_SYMBOL_GPL(icssg_config);
/* Bitmask for ICSSG r30 commands */
static const struct icssg_r30_cmd emac_r32_bitmask[] = {
@@ -397,11 +583,13 @@ static const struct icssg_r30_cmd emac_r32_bitmask[] = {
{{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/
{{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/
{{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/
- {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/
+ {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}}, /* VLAN UNWARE*/
+ {{0xffff2000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* HSR_RX_OFFLOAD_ENABLE */
+ {{0xdfff0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}} /* HSR_RX_OFFLOAD_DISABLE */
};
-int emac_set_port_state(struct prueth_emac *emac,
- enum icssg_port_state_cmd cmd)
+int icssg_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd cmd)
{
struct icssg_r30_cmd __iomem *p;
int ret = -ETIMEDOUT;
@@ -432,6 +620,7 @@ int emac_set_port_state(struct prueth_emac *emac,
return ret;
}
+EXPORT_SYMBOL_GPL(icssg_set_port_state);
void icssg_config_half_duplex(struct prueth_emac *emac)
{
@@ -443,6 +632,7 @@ void icssg_config_half_duplex(struct prueth_emac *emac)
val = get_random_u32();
writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
}
+EXPORT_SYMBOL_GPL(icssg_config_half_duplex);
void icssg_config_set_speed(struct prueth_emac *emac)
{
@@ -469,3 +659,206 @@ void icssg_config_set_speed(struct prueth_emac *emac)
writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
}
+EXPORT_SYMBOL_GPL(icssg_config_set_speed);
+
+int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
+ struct mgmt_cmd_rsp *rsp)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ int addr, ret;
+
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1);
+ if (addr < 0)
+ return addr;
+
+ /* First 4 bytes have FW owned buffer linking info which should
+ * not be touched
+ */
+ memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd));
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr);
+ ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0,
+ 2000, 20000000, false, prueth, slice == 0 ?
+ ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1);
+ if (ret) {
+ netdev_err(emac->ndev, "Timedout sending HWQ message\n");
+ return ret;
+ }
+
+ memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
+ /* Return buffer back for to pool */
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_send_fdb_msg);
+
+static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd,
+ const unsigned char *addr, u8 fid, int cmd)
+{
+ int slice = prueth_emac_slice(emac);
+ u8 mac_fid[ETH_ALEN + 2];
+ u16 fdb_slot;
+
+ ether_addr_copy(mac_fid, addr);
+
+ /* 1-1 VID-FID mapping is already setup */
+ mac_fid[ETH_ALEN] = fid;
+ mac_fid[ETH_ALEN + 1] = 0;
+
+ fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK;
+
+ fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd->type = ICSSG_FW_MGMT_FDB_CMD_TYPE;
+ fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd->param = cmd;
+ fdb_cmd->param |= (slice << 4);
+
+ memcpy(&fdb_cmd->cmd_args[0], addr, 4);
+ memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2);
+ fdb_cmd->cmd_args[2] = fdb_slot;
+
+ netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid);
+}
+
+int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid, u8 fid_c2, bool add)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ struct mgmt_cmd fdb_cmd = { 0 };
+ u8 fid = vid;
+ int ret;
+
+ icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB);
+
+ fid_c2 |= ICSSG_FDB_ENTRY_VALID;
+ fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24));
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ if (fdb_cmd_rsp.status == 1)
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(icssg_fdb_add_del);
+
+int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ struct mgmt_cmd fdb_cmd = { 0 };
+ struct prueth_fdb_slot *slot;
+ u8 fid = vid;
+ int ret, i;
+
+ icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT);
+
+ fdb_cmd.cmd_args[1] |= fid << 16;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+
+ slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER);
+ for (i = 0; i < 4; i++) {
+ if (ether_addr_equal(addr, slot->mac) && vid == slot->fid)
+ return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID);
+ slot++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_fdb_lookup);
+
+void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ u8 untag_mask, bool add)
+{
+ struct prueth *prueth = emac->prueth;
+ struct prueth_vlan_tbl *tbl;
+ u8 fid_c1;
+
+ tbl = prueth->vlan_tbl;
+ spin_lock(&prueth->vtbl_lock);
+ fid_c1 = tbl[vid].fid_c1;
+
+ /* FID_C1: bit0..2 port membership mask,
+ * bit3..5 tagging mask for each port
+ * bit6 Stream VID (not handled currently)
+ * bit7 MC flood (not handled currently)
+ */
+ if (add) {
+ fid_c1 |= (port_mask | port_mask << 3);
+ fid_c1 &= ~(untag_mask << 3);
+ } else {
+ fid_c1 &= ~(port_mask | port_mask << 3);
+ }
+
+ tbl[vid].fid_c1 = fid_c1;
+ spin_unlock(&prueth->vtbl_lock);
+}
+EXPORT_SYMBOL_GPL(icssg_vtbl_modify);
+
+u16 icssg_get_pvid(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 pvid;
+
+ if (emac->port_id == PRUETH_PORT_MII0)
+ pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
+ else
+ pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
+
+ pvid = pvid >> 24;
+
+ return pvid;
+}
+EXPORT_SYMBOL_GPL(icssg_get_pvid);
+
+void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
+{
+ u32 pvid;
+
+ /* only 256 VLANs are supported */
+ pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff));
+
+ if (port == PRUETH_PORT_MII0)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
+ else if (port == PRUETH_PORT_MII1)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
+ else
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
+}
+EXPORT_SYMBOL_GPL(icssg_set_pvid);
+
+int emac_fdb_flow_id_updated(struct prueth_emac *emac)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ int slice = prueth_emac_slice(emac);
+ struct mgmt_cmd fdb_cmd = { 0 };
+ int ret;
+
+ fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
+ fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd.param = 0;
+
+ fdb_cmd.param |= (slice << 4);
+ fdb_cmd.cmd_args[0] = 0;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 43eb0922172a..60d69744ffae 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -26,14 +26,73 @@ struct icssg_flow_cfg {
#define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */
#define PRUETH_RX_FLOW_DATA 0
-#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K
-#define PRUETH_EMAC_POOLS_PER_SLICE 24
-#define PRUETH_EMAC_BUF_POOL_START 8
-#define PRUETH_NUM_BUF_POOLS 8
-#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */
-#define MSMC_RAM_SIZE \
- (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
- PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
+/* Defines for forwarding path buffer pools:
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ * - only used in switch mode (as no forwarding in mac mode)
+ */
+#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K)
+
+/* Defines for local injection path buffer pools:
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ * - 8 pools per port per slice and each slice handles both ports
+ * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG)
+ * - switch mode: 8 total pools used
+ * - mac mode: 4 total pools used
+ */
+#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16
+#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8
+#define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+#define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+
+/* Defines for host egress path - express and preemptible buffers
+ * - used by firmware to store express and preemptible packets
+ * to be transmitted to host core
+ * - used by both mac/switch modes
+ */
+#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K
+#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K)
+#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE
+#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE
+
+/* Buffer used by firmware to temporarily store packet to be dropped */
+#define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K
+#define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE
+
+/* Total switch mode memory usage for buffers per slice */
+#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_SW_DROP_PKT_BUF_SIZE)
+
+/* Total switch mode memory usage for all buffers */
+#define PRUETH_SW_TOTAL_BUF_SIZE \
+ (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Total mac mode memory usage for buffers per slice */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_EMAC_LI_BUF_POOL_SIZE * \
+ PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_EMAC_DROP_PKT_BUF_SIZE)
+
+/* Total mac mode memory usage for all buffers */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE \
+ (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Size of 1 bank of MSMC/OC_SRAM memory */
+#define MSMC_RAM_BANK_SIZE SZ_256K
+
+#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
struct icssg_rxq_ctx {
__le32 start[3];
@@ -46,6 +105,7 @@ struct icssg_rxq_ctx {
#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
#define ICSSG_FW_MGMT_CMD_TYPE 0x04
#define ICSSG_FW_MGMT_PKT 0x80000000
+#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05
struct icssg_r30_cmd {
u32 cmd[4];
@@ -71,6 +131,8 @@ enum icssg_port_state_cmd {
ICSSG_EMAC_PORT_PREMPT_TX_DISABLE,
ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE,
ICSSG_EMAC_PORT_VLAN_AWARE_DISABLE,
+ ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE,
+ ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE,
ICSSG_EMAC_PORT_MAX_COMMANDS
};
@@ -109,6 +171,62 @@ enum icssg_port_state_cmd {
#define ICSSG_FLAG_MASK 0xff00ffff
+/* SR1.0-specific bits */
+#define PRUETH_MAX_RX_FLOWS_SR1 4 /* excluding default flow */
+#define PRUETH_RX_FLOW_DATA_SR1 3 /* highest priority flow */
+#define PRUETH_MAX_RX_MGM_DESC_SR1 8
+#define PRUETH_MAX_RX_MGM_FLOWS_SR1 2 /* excluding default flow */
+#define PRUETH_RX_MGM_FLOW_RESPONSE_SR1 0
+#define PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1 1
+
+#define PRUETH_NUM_BUF_POOLS_SR1 16
+#define PRUETH_EMAC_BUF_POOL_START_SR1 8
+#define PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1 128
+#define PRUETH_EMAC_BUF_SIZE_SR1 1536
+#define PRUETH_EMAC_NUM_BUF_SR1 4
+#define PRUETH_EMAC_BUF_POOL_SIZE_SR1 (PRUETH_EMAC_NUM_BUF_SR1 * \
+ PRUETH_EMAC_BUF_SIZE_SR1)
+#define MSMC_RAM_SIZE_SR1 (SZ_64K + SZ_32K + SZ_2K) /* 0x1880 x 8 x 2 */
+
+struct icssg_sr1_config {
+ __le32 status; /* Firmware status */
+ __le32 addr_lo; /* MSMC Buffer pool base address low. */
+ __le32 addr_hi; /* MSMC Buffer pool base address high. Must be 0 */
+ __le32 tx_buf_sz[16]; /* Array of buffer pool sizes */
+ __le32 num_tx_threads; /* Number of active egress threads, 1 to 4 */
+ __le32 tx_rate_lim_en; /* Bitmask: Egress rate limit en per thread */
+ __le32 rx_flow_id; /* RX flow id for first rx ring */
+ __le32 rx_mgr_flow_id; /* RX flow id for the first management ring */
+ __le32 flags; /* TBD */
+ __le32 n_burst; /* for debug */
+ __le32 rtu_status; /* RTU status */
+ __le32 info; /* reserved */
+ __le32 reserve;
+ __le32 rand_seed; /* Used for the random number generation at fw */
+} __packed;
+
+/* SR1.0 shutdown command to stop processing at firmware.
+ * Command format: 0x8101ss00, where
+ * - ss: sequence number. Currently not used by driver.
+ */
+#define ICSSG_SHUTDOWN_CMD_SR1 0x81010000
+
+/* SR1.0 pstate speed/duplex command to set speed and duplex settings
+ * in firmware.
+ * Command format: 0x8102ssPN, where
+ * - ss: sequence number. Currently not used by driver.
+ * - P: port number (for switch mode).
+ * - N: Speed/Duplex state:
+ * 0x0 - 10Mbps/Half duplex;
+ * 0x8 - 10Mbps/Full duplex;
+ * 0x2 - 100Mbps/Half duplex;
+ * 0xa - 100Mbps/Full duplex;
+ * 0xc - 1Gbps/Full duplex;
+ * NOTE: The above are the same value as bits [3..1](slice 0)
+ * or bits [7..5](slice 1) of RGMII CFG register.
+ */
+#define ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1 0x81020000
+
struct icssg_setclock_desc {
u8 request;
u8 restore;
@@ -146,6 +264,23 @@ struct icssg_setclock_desc {
#define ICSSG_TS_PUSH_SLICE0 40
#define ICSSG_TS_PUSH_SLICE1 41
+struct mgmt_cmd {
+ u8 param;
+ u8 seqnum;
+ u8 type;
+ u8 header;
+ u32 cmd_args[3];
+};
+
+struct mgmt_cmd_rsp {
+ u32 reserved;
+ u8 status;
+ u8 seqnum;
+ u8 type;
+ u8 header;
+ u32 cmd_args[3];
+};
+
/* FDB FID_C2 flag definitions */
/* Indicates host port membership.*/
#define ICSSG_FDB_ENTRY_P0_MEMBERSHIP BIT(0)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 9a7dd7efcf69..b715af21d23a 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -68,9 +68,13 @@ static int emac_nway_reset(struct net_device *ndev)
static int emac_get_sset_count(struct net_device *ndev, int stringset)
{
+ struct prueth_emac *emac = netdev_priv(ndev);
switch (stringset) {
case ETH_SS_STATS:
- return ICSSG_NUM_ETHTOOL_STATS;
+ if (emac->prueth->pa_stats)
+ return ICSSG_NUM_ETHTOOL_STATS;
+ else
+ return ICSSG_NUM_ETHTOOL_STATS - ICSSG_NUM_PA_STATS;
default:
return -EOPNOTSUPP;
}
@@ -78,18 +82,18 @@ static int emac_get_sset_count(struct net_device *ndev, int stringset)
static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
+ struct prueth_emac *emac = netdev_priv(ndev);
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
- if (!icssg_all_stats[i].standard_stats) {
- memcpy(p, icssg_all_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
+ if (!icssg_all_miig_stats[i].standard_stats)
+ ethtool_puts(&p, icssg_all_miig_stats[i].name);
+ if (emac->prueth->pa_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ ethtool_puts(&p, icssg_all_pa_stats[i].name);
break;
default:
break;
@@ -104,13 +108,17 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
emac_update_hardware_stats(emac);
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++)
- if (!icssg_all_stats[i].standard_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
+ if (!icssg_all_miig_stats[i].standard_stats)
*(data++) = emac->stats[i];
+
+ if (emac->prueth->pa_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ *(data++) = emac->pa_stats[i];
}
static int emac_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct prueth_emac *emac = netdev_priv(ndev);
@@ -118,8 +126,6 @@ static int emac_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep);
@@ -142,6 +148,9 @@ static int emac_set_channels(struct net_device *ndev,
emac->tx_ch_num = ch->tx_count;
+ if (emac->is_sr1)
+ emac->tx_ch_num++;
+
return 0;
}
@@ -152,8 +161,17 @@ static void emac_get_channels(struct net_device *ndev,
ch->max_rx = 1;
ch->max_tx = PRUETH_MAX_TX_QUEUES;
+
+ /* Disable multiple TX channels due to timeouts
+ * when using more than one queue */
+ if (emac->is_sr1)
+ ch->max_tx = 1;
+
ch->rx_count = 1;
ch->tx_count = emac->tx_ch_num;
+
+ if (emac->is_sr1)
+ ch->tx_count--;
}
static const struct ethtool_rmon_hist_range emac_rmon_ranges[] = {
@@ -189,6 +207,93 @@ static void emac_get_rmon_stats(struct net_device *ndev,
rmon_stats->hist_tx[4] = emac_get_stat_by_name(emac, "tx_bucket5_frames");
}
+static int emac_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn;
+
+ tx_chn = &emac->tx_chns[0];
+
+ coal->rx_coalesce_usecs = emac->rx_pace_timeout_ns / 1000;
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout_ns / 1000;
+
+ return 0;
+}
+
+static int emac_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn;
+
+ if (queue >= PRUETH_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &emac->tx_chns[queue];
+
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout_ns / 1000;
+
+ return 0;
+}
+
+static int emac_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_tx_chn *tx_chn;
+
+ tx_chn = &emac->tx_chns[0];
+
+ if (coal->rx_coalesce_usecs &&
+ coal->rx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for rx-usecs\n",
+ ICSSG_MIN_COALESCE_USECS);
+ coal->rx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ if (coal->tx_coalesce_usecs &&
+ coal->tx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for tx-usecs\n",
+ ICSSG_MIN_COALESCE_USECS);
+ coal->tx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ emac->rx_pace_timeout_ns = coal->rx_coalesce_usecs * 1000;
+ tx_chn->tx_pace_timeout_ns = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static int emac_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_tx_chn *tx_chn;
+
+ if (queue >= PRUETH_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &emac->tx_chns[queue];
+
+ if (coal->tx_coalesce_usecs &&
+ coal->tx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for tx-usecs for tx-%u\n",
+ ICSSG_MIN_COALESCE_USECS, queue);
+ coal->tx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ tx_chn->tx_pace_timeout_ns = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
const struct ethtool_ops icssg_ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
.get_msglevel = emac_get_msglevel,
@@ -197,6 +302,12 @@ const struct ethtool_ops icssg_ethtool_ops = {
.get_ethtool_stats = emac_get_ethtool_stats,
.get_strings = emac_get_strings,
.get_ts_info = emac_get_ts_info,
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_TX_USECS,
+ .get_coalesce = emac_get_coalesce,
+ .set_coalesce = emac_set_coalesce,
+ .get_per_queue_coalesce = emac_get_per_queue_coalesce,
+ .set_per_queue_coalesce = emac_set_per_queue_coalesce,
.get_channels = emac_get_channels,
.set_channels = emac_set_channels,
.get_link_ksettings = emac_get_link_ksettings,
@@ -207,3 +318,4 @@ const struct ethtool_ops icssg_ethtool_ops = {
.nway_reset = emac_nway_reset,
.get_rmon_stats = emac_get_rmon_stats,
};
+EXPORT_SYMBOL_GPL(icssg_ethtool_ops);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
index 92718ae40d7e..b64955438bb2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
@@ -40,6 +40,7 @@ void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu)
(mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
}
}
+EXPORT_SYMBOL_GPL(icssg_mii_update_mtu);
void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac)
{
@@ -66,6 +67,7 @@ void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac)
regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask,
full_duplex_val);
}
+EXPORT_SYMBOL_GPL(icssg_update_rgmii_cfg);
void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if)
{
@@ -105,6 +107,7 @@ u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii)
return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
}
+EXPORT_SYMBOL_GPL(icssg_rgmii_get_speed);
u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii)
{
@@ -118,3 +121,4 @@ u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii)
return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
}
+EXPORT_SYMBOL_GPL(icssg_rgmii_get_fullduplex);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index cf7b73f8f450..f65041662173 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -13,8 +13,10 @@
#include <linux/dma/ti-cppi5.h>
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
+#include <linux/if_hsr.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -27,574 +29,31 @@
#include <linux/remoteproc/pruss.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
+#include <net/switchdev.h>
#include "icssg_prueth.h"
#include "icssg_mii_rt.h"
+#include "icssg_switchdev.h"
#include "../k3-cppi-desc-pool.h"
#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
-/* Netif debug messages possible */
-#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL)
-
-#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
+#define DEFAULT_VID 1
+#define DEFAULT_PORT_MASK 1
+#define DEFAULT_UNTAG_MASK 1
-/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
-#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
-
-#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
-
-static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
- struct prueth_rx_chn *rx_chn,
- int max_rflows)
-{
- if (rx_chn->desc_pool)
- k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
-
- if (rx_chn->rx_chn)
- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
-}
-
-static void prueth_cleanup_tx_chns(struct prueth_emac *emac)
-{
- int i;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- if (tx_chn->desc_pool)
- k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
-
- if (tx_chn->tx_chn)
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
- /* Assume prueth_cleanup_tx_chns() is called at the
- * end after all channel resources are freed
- */
- memset(tx_chn, 0, sizeof(*tx_chn));
- }
-}
-
-static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- if (tx_chn->irq)
- free_irq(tx_chn->irq, tx_chn);
- netif_napi_del(&tx_chn->napi_tx);
- }
-}
-
-static void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
- struct cppi5_host_desc_t *desc)
-{
- struct cppi5_host_desc_t *first_desc, *next_desc;
- dma_addr_t buf_dma, next_desc_dma;
- u32 buf_dma_len;
-
- first_desc = desc;
- next_desc = first_desc;
-
- cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
- while (next_desc_dma) {
- next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- next_desc_dma);
- cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
- }
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
-}
-
-static int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
- int budget)
-{
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_tx;
- struct netdev_queue *netif_txq;
- struct prueth_tx_chn *tx_chn;
- unsigned int total_bytes = 0;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
- void **swdata;
-
- tx_chn = &emac->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- /* teardown completion */
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&emac->tdown_cnt))
- complete(&emac->tdown_complete);
- break;
- }
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
-
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- if (netif_tx_queue_stopped(netif_txq)) {
- /* If the TX queue was stopped, wake it now
- * if we have enough room.
- */
- __netif_tx_lock(netif_txq, smp_processor_id());
- if (netif_running(ndev) &&
- (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS))
- netif_tx_wake_queue(netif_txq);
- __netif_tx_unlock(netif_txq);
- }
-
- return num_tx;
-}
-
-static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
-{
- struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
- struct prueth_emac *emac = tx_chn->emac;
- int num_tx_packets;
-
- num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget);
-
- if (num_tx_packets >= budget)
- return budget;
-
- if (napi_complete_done(napi_tx, num_tx_packets))
- enable_irq(tx_chn->irq);
-
- return num_tx_packets;
-}
-
-static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
-{
- struct prueth_tx_chn *tx_chn = dev_id;
-
- disable_irq_nosync(irq);
- napi_schedule(&tx_chn->napi_tx);
-
- return IRQ_HANDLED;
-}
-
-static int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int i, ret;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- ret = request_irq(tx_chn->irq, prueth_tx_irq,
- IRQF_TRIGGER_HIGH, tx_chn->name,
- tx_chn);
- if (ret) {
- netif_napi_del(&tx_chn->napi_tx);
- dev_err(prueth->dev, "unable to request TX IRQ %d\n",
- tx_chn->irq);
- goto fail;
- }
- }
-
- return 0;
-fail:
- prueth_ndev_del_tx_napi(emac, i);
- return ret;
-}
-
-static int prueth_init_tx_chns(struct prueth_emac *emac)
-{
- static const struct k3_ring_cfg ring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_RING,
- .flags = 0,
- .size = PRUETH_MAX_TX_DESC,
- };
- struct k3_udma_glue_tx_channel_cfg tx_cfg;
- struct device *dev = emac->prueth->dev;
- struct net_device *ndev = emac->ndev;
- int ret, slice, i;
- u32 hdesc_size;
-
- slice = prueth_emac_slice(emac);
- if (slice < 0)
- return slice;
-
- init_completion(&emac->tdown_complete);
-
- hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
- PRUETH_NAV_SW_DATA_SIZE);
- memset(&tx_cfg, 0, sizeof(tx_cfg));
- tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
- tx_cfg.tx_cfg = ring_cfg;
- tx_cfg.txcq_cfg = ring_cfg;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- /* To differentiate channels for SLICE0 vs SLICE1 */
- snprintf(tx_chn->name, sizeof(tx_chn->name),
- "tx%d-%d", slice, i);
-
- tx_chn->emac = emac;
- tx_chn->id = i;
- tx_chn->descs_num = PRUETH_MAX_TX_DESC;
-
- tx_chn->tx_chn =
- k3_udma_glue_request_tx_chn(dev, tx_chn->name,
- &tx_cfg);
- if (IS_ERR(tx_chn->tx_chn)) {
- ret = PTR_ERR(tx_chn->tx_chn);
- tx_chn->tx_chn = NULL;
- netdev_err(ndev,
- "Failed to request tx dma ch: %d\n", ret);
- goto fail;
- }
-
- tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
- tx_chn->desc_pool =
- k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
- tx_chn->descs_num,
- hdesc_size,
- tx_chn->name);
- if (IS_ERR(tx_chn->desc_pool)) {
- ret = PTR_ERR(tx_chn->desc_pool);
- tx_chn->desc_pool = NULL;
- netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
- goto fail;
- }
-
- ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (ret < 0) {
- netdev_err(ndev, "failed to get tx irq\n");
- goto fail;
- }
- tx_chn->irq = ret;
-
- snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
- dev_name(dev), tx_chn->id);
- }
-
- return 0;
-
-fail:
- prueth_cleanup_tx_chns(emac);
- return ret;
-}
-
-static int prueth_init_rx_chns(struct prueth_emac *emac,
- struct prueth_rx_chn *rx_chn,
- char *name, u32 max_rflows,
- u32 max_desc_num)
-{
- struct k3_udma_glue_rx_channel_cfg rx_cfg;
- struct device *dev = emac->prueth->dev;
- struct net_device *ndev = emac->ndev;
- u32 fdqring_id, hdesc_size;
- int i, ret = 0, slice;
-
- slice = prueth_emac_slice(emac);
- if (slice < 0)
- return slice;
-
- /* To differentiate channels for SLICE0 vs SLICE1 */
- snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
-
- hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
- PRUETH_NAV_SW_DATA_SIZE);
- memset(&rx_cfg, 0, sizeof(rx_cfg));
- rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
- rx_cfg.flow_id_num = max_rflows;
- rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
-
- /* init all flows */
- rx_chn->dev = dev;
- rx_chn->descs_num = max_desc_num;
-
- rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
- &rx_cfg);
- if (IS_ERR(rx_chn->rx_chn)) {
- ret = PTR_ERR(rx_chn->rx_chn);
- rx_chn->rx_chn = NULL;
- netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
- goto fail;
- }
-
- rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
- rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
- rx_chn->descs_num,
- hdesc_size,
- rx_chn->name);
- if (IS_ERR(rx_chn->desc_pool)) {
- ret = PTR_ERR(rx_chn->desc_pool);
- rx_chn->desc_pool = NULL;
- netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
- goto fail;
- }
-
- emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
- netdev_dbg(ndev, "flow id base = %d\n", emac->rx_flow_id_base);
-
- fdqring_id = K3_RINGACC_RING_ID_ANY;
- for (i = 0; i < rx_cfg.flow_id_num; i++) {
- struct k3_ring_cfg rxring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_RING,
- .flags = 0,
- };
- struct k3_ring_cfg fdqring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .flags = K3_RINGACC_RING_SHARED,
- };
- struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
- .rx_cfg = rxring_cfg,
- .rxfdq_cfg = fdqring_cfg,
- .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
- .src_tag_lo_sel =
- K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
- };
-
- rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
- rx_flow_cfg.rx_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
-
- ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
- i, &rx_flow_cfg);
- if (ret) {
- netdev_err(ndev, "Failed to init rx flow%d %d\n",
- i, ret);
- goto fail;
- }
- if (!i)
- fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
- i);
- rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (rx_chn->irq[i] <= 0) {
- ret = rx_chn->irq[i];
- netdev_err(ndev, "Failed to get rx dma irq");
- goto fail;
- }
- }
-
- return 0;
-
-fail:
- prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
- return ret;
-}
-
-static int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn)
-{
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_rx;
- u32 pkt_len = skb_tailroom(skb);
- dma_addr_t desc_dma;
- dma_addr_t buf_dma;
- void **swdata;
-
- desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
- if (!desc_rx) {
- netdev_err(ndev, "rx push: failed to allocate descriptor\n");
- return -ENOMEM;
- }
- desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
-
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
- return -EINVAL;
- }
-
- cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
- PRUETH_NAV_PS_DATA_SIZE);
- k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
-
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- *swdata = skb;
-
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
- desc_rx, desc_dma);
-}
-
-static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
-{
- u32 iepcount_lo, iepcount_hi, hi_rollover_count;
- u64 ns;
-
- iepcount_lo = lo & GENMASK(19, 0);
- iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
- hi_rollover_count = hi >> 11;
-
- ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
- ns = ns * cycle_time_ns + iepcount_lo;
-
- return ns;
-}
-
-static void emac_rx_timestamp(struct prueth_emac *emac,
- struct sk_buff *skb, u32 *psdata)
-{
- struct skb_shared_hwtstamps *ssh;
- u64 ns;
-
- u32 hi_sw = readl(emac->prueth->shram.va +
- TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
- ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
- IEP_DEFAULT_CYCLE_TIME_NS);
-
- ssh = skb_hwtstamps(skb);
- memset(ssh, 0, sizeof(*ssh));
- ssh->hwtstamp = ns_to_ktime(ns);
-}
-
-static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
-{
- struct prueth_rx_chn *rx_chn = &emac->rx_chns;
- u32 buf_dma_len, pkt_len, port_id = 0;
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
- dma_addr_t desc_dma, buf_dma;
- void **swdata;
- u32 *psdata;
- int ret;
-
- ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
- if (ret) {
- if (ret != -ENODATA)
- netdev_err(ndev, "rx pop: failed: %d\n", ret);
- return ret;
- }
-
- if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
- return 0;
-
- desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
-
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
-
- psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* RX HW timestamp */
- if (emac->rx_ts_enabled)
- emac_rx_timestamp(emac, skb, psdata);
-
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
- pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
- /* firmware adds 4 CRC bytes, strip them */
- pkt_len -= 4;
- cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
-
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- skb->dev = ndev;
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
- /* if allocation fails we drop the packet but push the
- * descriptor back to the ring with old skb to prevent a stall
- */
- if (!new_skb) {
- ndev->stats.rx_dropped++;
- new_skb = skb;
- } else {
- /* send the filled skb up the n/w stack */
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&emac->napi_rx, skb);
- ndev->stats.rx_bytes += pkt_len;
- ndev->stats.rx_packets++;
- }
-
- /* queue another RX DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
- if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
- ndev->stats.rx_errors++;
- ndev->stats.rx_dropped++;
- }
-
- return ret;
-}
-
-static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct prueth_rx_chn *rx_chn = data;
- struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
+#define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \
+ NETIF_F_HW_HSR_DUP | \
+ NETIF_F_HW_HSR_TAG_INS | \
+ NETIF_F_HW_HSR_TAG_RM)
- desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\
+ DMA_ATTR_WEAK_ORDERING)
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_FROM_DEVICE);
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
+#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
- dev_kfree_skb_any(skb);
-}
+static void emac_adjust_link(struct net_device *ndev);
static int emac_get_tx_ts(struct prueth_emac *emac,
struct emac_tx_ts_response *rsp)
@@ -661,331 +120,206 @@ static void tx_ts_work(struct prueth_emac *emac)
}
}
-static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
+static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
{
- int i;
+ struct prueth_emac *emac = dev_id;
- /* search and get the next free slot */
- for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
- if (!emac->tx_ts_skb[i]) {
- emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
- return i;
- }
- }
+ /* currently only TX timestamp is being returned */
+ tx_ts_work(emac);
- return -EBUSY;
+ return IRQ_HANDLED;
}
-/**
- * emac_ndo_start_xmit - EMAC Transmit function
- * @skb: SKB pointer
- * @ndev: EMAC network adapter
- *
- * Called by the system to transmit a packet - we queue the packet in
- * EMAC hardware transmit queue
- * Doesn't wait for completion we'll check for TX completion in
- * emac_tx_complete_packets().
- *
- * Return: enum netdev_tx
- */
-static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static int prueth_start(struct rproc *rproc, const char *fw_name)
{
- struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
- struct prueth_emac *emac = netdev_priv(ndev);
- struct netdev_queue *netif_txq;
- struct prueth_tx_chn *tx_chn;
- dma_addr_t desc_dma, buf_dma;
- int i, ret = 0, q_idx;
- bool in_tx_ts = 0;
- int tx_ts_cookie;
- void **swdata;
- u32 pkt_len;
- u32 *epib;
-
- pkt_len = skb_headlen(skb);
- q_idx = skb_get_queue_mapping(skb);
-
- tx_chn = &emac->tx_chns[q_idx];
- netif_txq = netdev_get_tx_queue(ndev, q_idx);
-
- /* Map the linear buffer */
- buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
- netdev_err(ndev, "tx: failed to map skb buffer\n");
- ret = NETDEV_TX_OK;
- goto drop_free_skb;
- }
-
- first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
- if (!first_desc) {
- netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
- dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
- goto drop_stop_q_busy;
- }
-
- cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
- PRUETH_NAV_PS_DATA_SIZE);
- cppi5_hdesc_set_pkttype(first_desc, 0);
- epib = first_desc->epib;
- epib[0] = 0;
- epib[1] = 0;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- emac->tx_ts_enabled) {
- tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
- if (tx_ts_cookie >= 0) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Request TX timestamp */
- epib[0] = (u32)tx_ts_cookie;
- epib[1] = 0x80000000; /* TX TS request */
- emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
- in_tx_ts = 1;
- }
- }
-
- /* set dst tag to indicate internal qid at the firmware which is at
- * bit8..bit15. bit0..bit7 indicates port num for directed
- * packets in case of switch mode operation
- */
- cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
- swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = skb;
-
- /* Handle the case where skb is fragmented in pages */
- cur_desc = first_desc;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 frag_size = skb_frag_size(frag);
-
- next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
- if (!next_desc) {
- netdev_err(ndev,
- "tx: failed to allocate frag. descriptor\n");
- goto free_desc_stop_q_busy_cleanup_tx_ts;
- }
-
- buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
- netdev_err(ndev, "tx: Failed to map skb page\n");
- k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
- ret = NETDEV_TX_OK;
- goto cleanup_tx_ts;
- }
-
- cppi5_hdesc_reset_hbdesc(next_desc);
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(next_desc,
- buf_dma, frag_size, buf_dma, frag_size);
-
- desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
- next_desc);
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
- cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
-
- pkt_len += frag_size;
- cur_desc = next_desc;
- }
- WARN_ON_ONCE(pkt_len != skb->len);
+ int ret;
- /* report bql before sending packet */
- netdev_tx_sent_queue(netif_txq, pkt_len);
+ ret = rproc_set_firmware(rproc, fw_name);
+ if (ret)
+ return ret;
+ return rproc_boot(rproc);
+}
- cppi5_hdesc_set_pktlen(first_desc, pkt_len);
- desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
- /* cppi5_desc_dump(first_desc, 64); */
+static void prueth_shutdown(struct rproc *rproc)
+{
+ rproc_shutdown(rproc);
+}
- skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
- ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
- if (ret) {
- netdev_err(ndev, "tx: push failed: %d\n", ret);
- goto drop_free_descs;
- }
+static int prueth_emac_start(struct prueth *prueth)
+{
+ struct icssg_firmwares *firmwares;
+ struct device *dev = prueth->dev;
+ int ret, slice;
+
+ if (prueth->is_switch_mode)
+ firmwares = prueth->icssg_switch_firmwares;
+ else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version)
+ firmwares = prueth->icssg_hsr_firmwares;
+ else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version)
+ firmwares = prueth->icssg_prp_firmwares;
+ else
+ firmwares = prueth->icssg_emac_firmwares;
- if (in_tx_ts)
- atomic_inc(&emac->tx_ts_pending);
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ goto unwind_slices;
+ }
- if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
- netif_tx_stop_queue(netif_txq);
- /* Barrier, so that stop_queue visible to other cpus */
- smp_mb__after_atomic();
+ ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
- if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS)
- netif_tx_wake_queue(netif_txq);
+ ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
+ if (ret) {
+ dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
}
- return NETDEV_TX_OK;
+ return 0;
-cleanup_tx_ts:
- if (in_tx_ts) {
- dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
- emac->tx_ts_skb[tx_ts_cookie] = NULL;
+unwind_slices:
+ while (--slice >= 0) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
-drop_free_descs:
- prueth_xmit_free(tx_chn, first_desc);
-
-drop_free_skb:
- dev_kfree_skb_any(skb);
-
- /* error */
- ndev->stats.tx_dropped++;
- netdev_err(ndev, "tx: error: %d\n", ret);
-
return ret;
-
-free_desc_stop_q_busy_cleanup_tx_ts:
- if (in_tx_ts) {
- dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
- emac->tx_ts_skb[tx_ts_cookie] = NULL;
- }
- prueth_xmit_free(tx_chn, first_desc);
-
-drop_stop_q_busy:
- netif_tx_stop_queue(netif_txq);
- return NETDEV_TX_BUSY;
}
-static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+static void prueth_emac_stop(struct prueth *prueth)
{
- struct prueth_tx_chn *tx_chn = data;
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
+ int slice;
- dev_kfree_skb_any(skb);
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
+ }
}
-static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
+static void icssg_enable_fw_offload(struct prueth *prueth)
{
- struct prueth_emac *emac = dev_id;
-
- /* currently only TX timestamp is being returned */
- tx_ts_work(emac);
+ struct prueth_emac *emac;
+ int mac;
+
+ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+ emac = prueth->emac[mac];
+ if (prueth->is_hsr_offload_mode) {
+ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+ else
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+ }
- return IRQ_HANDLED;
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
+ if (netif_running(emac->ndev)) {
+ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK,
+ true);
+ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+ BIT(emac->port_id) | DEFAULT_PORT_MASK,
+ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+ true);
+ if (prueth->is_hsr_offload_mode)
+ icssg_vtbl_modify(emac, DEFAULT_VID,
+ DEFAULT_PORT_MASK,
+ DEFAULT_UNTAG_MASK, true);
+ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+ if (prueth->is_switch_mode)
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ }
+ }
+ }
}
-static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
+static int prueth_emac_common_start(struct prueth *prueth)
{
- struct prueth_emac *emac = dev_id;
-
- disable_irq_nosync(irq);
- napi_schedule(&emac->napi_rx);
+ struct prueth_emac *emac;
+ int ret = 0;
+ int slice;
- return IRQ_HANDLED;
-}
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
-struct icssg_firmwares {
- char *pru;
- char *rtu;
- char *txpru;
-};
+ /* clear SMEM and MSMC settings for all slices */
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
-static struct icssg_firmwares icssg_emac_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
- }
-};
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
-static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
-{
- struct icssg_firmwares *firmwares;
- struct device *dev = prueth->dev;
- int slice, ret;
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_init_fw_offload_mode(prueth);
+ else
+ icssg_init_emac_mode(prueth);
- firmwares = icssg_emac_firmwares;
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ emac = prueth->emac[slice];
+ if (!emac)
+ continue;
+ ret = icssg_config(prueth, emac, slice);
+ if (ret)
+ goto disable_class;
- slice = prueth_emac_slice(emac);
- if (slice < 0) {
- netdev_err(emac->ndev, "invalid port\n");
- return -EINVAL;
+ mutex_lock(&emac->ndev->phydev->lock);
+ emac_adjust_link(emac->ndev);
+ mutex_unlock(&emac->ndev->phydev->lock);
}
- ret = icssg_config(prueth, emac, slice);
+ ret = prueth_emac_start(prueth);
if (ret)
- return ret;
+ goto disable_class;
- ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
- ret = rproc_boot(prueth->pru[slice]);
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
if (ret) {
- dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
- return -EINVAL;
- }
-
- ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
- ret = rproc_boot(prueth->rtu[slice]);
- if (ret) {
- dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
- goto halt_pru;
- }
-
- ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
- ret = rproc_boot(prueth->txpru[slice]);
- if (ret) {
- dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
- goto halt_rtu;
+ dev_err(prueth->dev, "Failed to initialize IEP module\n");
+ goto stop_pruss;
}
- emac->fw_running = 1;
return 0;
-halt_rtu:
- rproc_shutdown(prueth->rtu[slice]);
+stop_pruss:
+ prueth_emac_stop(prueth);
-halt_pru:
- rproc_shutdown(prueth->pru[slice]);
+disable_class:
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
return ret;
}
-static void prueth_emac_stop(struct prueth_emac *emac)
+static int prueth_emac_common_stop(struct prueth *prueth)
{
- struct prueth *prueth = emac->prueth;
- int slice;
+ struct prueth_emac *emac;
- switch (emac->port_id) {
- case PRUETH_PORT_MII0:
- slice = ICSS_SLICE0;
- break;
- case PRUETH_PORT_MII1:
- slice = ICSS_SLICE1;
- break;
- default:
- netdev_err(emac->ndev, "invalid port\n");
- return;
- }
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
- emac->fw_running = 0;
- rproc_shutdown(prueth->txpru[slice]);
- rproc_shutdown(prueth->rtu[slice]);
- rproc_shutdown(prueth->pru[slice]);
-}
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
-static void prueth_cleanup_tx_ts(struct prueth_emac *emac)
-{
- int i;
+ prueth_emac_stop(prueth);
- for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
- if (emac->tx_ts_skb[i]) {
- dev_kfree_skb_any(emac->tx_ts_skb[i]);
- emac->tx_ts_skb[i] = NULL;
- }
- }
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ icss_iep_exit(emac->iep);
+
+ return 0;
}
/* called back by PHY layer if there is change in link state of hw port*/
@@ -1039,10 +373,10 @@ static void emac_adjust_link(struct net_device *ndev)
icssg_config_ipg(emac);
spin_unlock_irqrestore(&emac->lock, flags);
icssg_config_set_speed(emac);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
} else {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
}
}
@@ -1055,84 +389,18 @@ static void emac_adjust_link(struct net_device *ndev)
}
}
-static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
{
- struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
+ struct prueth_emac *emac =
+ container_of(timer, struct prueth_emac, rx_hrtimer);
int rx_flow = PRUETH_RX_FLOW_DATA;
- int flow = PRUETH_MAX_RX_FLOWS;
- int num_rx = 0;
- int cur_budget;
- int ret;
-
- while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = emac_rx_packet(emac, flow);
- if (ret)
- break;
- num_rx++;
- }
-
- if (num_rx >= budget)
- break;
- }
- if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
enable_irq(emac->rx_chns.irq[rx_flow]);
-
- return num_rx;
-}
-
-static int prueth_prepare_rx_chan(struct prueth_emac *emac,
- struct prueth_rx_chn *chn,
- int buf_size)
-{
- struct sk_buff *skb;
- int i, ret;
-
- for (i = 0; i < chn->descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- ret = prueth_dma_rx_push(emac, skb, chn);
- if (ret < 0) {
- netdev_err(emac->ndev,
- "cannot submit skb for rx chan %s ret %d\n",
- chn->name, ret);
- kfree_skb(skb);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
- bool free_skb)
-{
- int i;
-
- for (i = 0; i < ch_num; i++) {
- if (free_skb)
- k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
- &emac->tx_chns[i],
- prueth_tx_cleanup);
- k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
}
-}
-
-static void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
- int num_flows, bool disable)
-{
- int i;
-
- for (i = 0; i < num_flows; i++)
- k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
- prueth_rx_cleanup, !!i);
- if (disable)
- k3_udma_glue_disable_rx_chn(chn->rx_chn);
+ return HRTIMER_NORESTART;
}
static int emac_phy_connect(struct prueth_emac *emac)
@@ -1212,9 +480,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
u32 cycletime;
int timeout;
- if (!emac->fw_running)
- return;
-
sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
@@ -1225,7 +490,8 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
sc_desc.iepcount_set = ns % cycletime;
- sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4
+ /* Count from 0 to (cycle time) - emac->iep->def_inc */
+ sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
@@ -1249,6 +515,8 @@ static int prueth_perout_enable(void *clockops_data,
struct prueth_emac *emac = clockops_data;
u32 reduction_factor = 0, offset = 0;
struct timespec64 ts;
+ u64 current_cycle;
+ u64 start_offset;
u64 ns_period;
if (!on)
@@ -1287,8 +555,14 @@ static int prueth_perout_enable(void *clockops_data,
writel(reduction_factor, emac->prueth->shram.va +
TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
- writel(0, emac->prueth->shram.va +
- TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
+ current_cycle = icssg_read_time(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
+
+ /* Rounding of current_cycle count to next second */
+ start_offset = roundup(current_cycle, MSEC_PER_SEC);
+
+ hi_lo_writeq(start_offset, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
return 0;
}
@@ -1299,6 +573,307 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+}
+
+static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ struct page_pool *pool = emac->rx_chns.pg_pool;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ int ret;
+
+ ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
+ if (ret)
+ return ret;
+
+ if (rx_chn->xsk_pool) {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (ret)
+ goto xdp_unreg;
+ xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq);
+ } else {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ goto xdp_unreg;
+ }
+
+ return 0;
+
+xdp_unreg:
+ prueth_destroy_xdp_rxqs(emac);
+ return ret;
+}
+
+static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ int port_mask;
+ u8 vlan_id;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
+
+ port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
+
+ return 0;
+}
+
+static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ int other_port_mask;
+ int port_mask;
+ u8 vlan_id;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
+
+ port_mask = BIT(emac->port_id);
+ other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
+
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
+
+ if (other_port_mask) {
+ icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, other_port_mask,
+ other_port_mask, true);
+ }
+
+ return 0;
+}
+
+static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
+ const u8 *addr, u8 vid, bool add)
+{
+ icssg_fdb_add_del(emac, addr, vid,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK, add);
+
+ if (add)
+ icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
+ BIT(emac->port_id), add);
+}
+
+static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev, *port_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ port_dev = hsr_get_port_ndev(real_dev, i);
+ emac = netdev_priv(port_dev);
+ if (!emac) {
+ dev_put(port_dev);
+ return -EINVAL;
+ }
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ true);
+ dev_put(port_dev);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
+ }
+
+ return 0;
+}
+
+static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev, *port_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ port_dev = hsr_get_port_ndev(real_dev, i);
+ emac = netdev_priv(port_dev);
+ if (!emac) {
+ dev_put(port_dev);
+ return -EINVAL;
+ }
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ false);
+ dev_put(port_dev);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
+ }
+
+ return 0;
+}
+
+static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
+ void *args)
+{
+ struct prueth_emac *emac = args;
+
+ if (!vdev || !vid)
+ return 0;
+
+ netif_addr_lock_bh(vdev);
+ __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
+ vdev->addr_len);
+ netif_addr_unlock_bh(vdev);
+
+ if (emac->prueth->is_hsr_offload_mode)
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_hsr_add_mcast,
+ icssg_prueth_hsr_del_mcast);
+ else
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_add_mcast,
+ icssg_prueth_del_mcast);
+
+ return 0;
+}
+
+static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != queue_id) {
+ rx_chn->xsk_pool = NULL;
+ tx_chn->xsk_pool = NULL;
+ } else {
+ rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ }
+}
+
+static void prueth_destroy_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "tx teardown timeout\n");
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ napi_disable(&emac->tx_chns[i].napi_tx);
+ hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+
+static void prueth_destroy_rxq(struct prueth_emac *emac)
+{
+ int i, ret;
+
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ /* When RX DMA Channel Teardown is initiated, it will result in an
+ * interrupt and a Teardown Completion Marker (TDCM) is queued into
+ * the RX Completion queue. Acknowledging the interrupt involves
+ * popping the TDCM descriptor from the RX Completion queue via the
+ * RX NAPI Handler. To avoid timing out when waiting for the TDCM to
+ * be popped, schedule the RX NAPI handler to run immediately.
+ */
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (napi_schedule_prep(&emac->napi_rx))
+ __napi_schedule(&emac->napi_rx);
+ }
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "rx teardown timeout\n");
+
+ for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) {
+ napi_disable(&emac->napi_rx);
+ hrtimer_cancel(&emac->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i,
+ &emac->rx_chns,
+ prueth_rx_cleanup);
+ }
+
+ prueth_destroy_xdp_rxqs(emac);
+ k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn);
+}
+
+static int prueth_create_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ }
+ return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+ return ret;
+}
+
+static int prueth_create_rxq(struct prueth_emac *emac)
+{
+ int ret;
+
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ return ret;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = prueth_create_xdp_rxqs(emac);
+ if (ret)
+ goto reset_rx_chn;
+
+ napi_enable(&emac->napi_rx);
+ return 0;
+
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+ return ret;
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -1310,27 +885,20 @@ const struct icss_iep_clockops prueth_iep_clockops = {
static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
- int ret, i, num_data_chn = emac->tx_ch_num;
+ int ret, num_data_chn = emac->tx_ch_num;
+ struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
struct device *dev = prueth->dev;
int max_rx_flows;
int rx_flow;
- /* clear SMEM and MSMC settings for all slices */
- if (!prueth->emacs_initialized) {
- memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
- memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
- }
-
/* set h/w MAC as user might have re-configured */
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
- icssg_class_default(prueth->miig_rt, slice, 0);
-
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
if (ret) {
@@ -1338,6 +906,7 @@ static int emac_ndo_open(struct net_device *ndev)
return ret;
}
+ emac->xsk_qid = -EINVAL;
init_completion(&emac->cmd_complete);
ret = prueth_init_tx_chns(emac);
if (ret) {
@@ -1366,42 +935,37 @@ static int emac_ndo_open(struct net_device *ndev)
goto cleanup_napi;
}
- /* reset and start PRU firmware */
- ret = prueth_emac_start(prueth, emac);
- if (ret)
- goto free_rx_irq;
+ if (!prueth->emacs_initialized) {
+ ret = prueth_emac_common_start(prueth);
+ if (ret)
+ goto free_rx_irq;
+ icssg_enable_fw_offload(prueth);
+ }
- icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+ flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+ writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
+ ret = emac_fdb_flow_id_updated(emac);
- if (!prueth->emacs_initialized) {
- ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
- emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ if (ret) {
+ netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
+ goto stop;
}
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
IRQF_ONESHOT, dev_name(dev), emac);
if (ret)
goto stop;
/* Prepare RX */
- ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ ret = prueth_create_rxq(emac);
if (ret)
goto free_tx_ts_irq;
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_txq(emac);
if (ret)
- goto reset_rx_chn;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
- if (ret)
- goto reset_tx_chan;
- }
-
- /* Enable NAPI in Tx and Rx direction */
- for (i = 0; i < emac->tx_ch_num; i++)
- napi_enable(&emac->tx_chns[i].napi_tx);
- napi_enable(&emac->napi_rx);
+ goto destroy_rxq;
/* start PHY */
phy_start(ndev->phydev);
@@ -1412,17 +976,13 @@ static int emac_ndo_open(struct net_device *ndev)
return 0;
-reset_tx_chan:
- /* Since interface is not yet up, there is wouldn't be
- * any SKB for completion. So set false to free_skb
- */
- prueth_reset_tx_chan(emac, i, false);
-reset_rx_chn:
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+destroy_rxq:
+ prueth_destroy_rxq(emac);
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
- prueth_emac_stop(emac);
+ if (!prueth->emacs_initialized)
+ prueth_emac_common_stop(prueth);
free_rx_irq:
free_irq(emac->rx_chns.irq[rx_flow], emac);
cleanup_napi:
@@ -1447,9 +1007,6 @@ static int emac_ndo_stop(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
- int rx_flow = PRUETH_RX_FLOW_DATA;
- int max_rx_flows;
- int ret, i;
/* inform the upper layers. */
netif_tx_stop_all_queues(ndev);
@@ -1458,49 +1015,29 @@ static int emac_ndo_stop(struct net_device *ndev)
if (ndev->phydev)
phy_stop(ndev->phydev);
- icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
-
- atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- /* tear down and disable UDMA channels */
- reinit_completion(&emac->tdown_complete);
- for (i = 0; i < emac->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
-
- ret = wait_for_completion_timeout(&emac->tdown_complete,
- msecs_to_jiffies(1000));
- if (!ret)
- netdev_err(ndev, "tx teardown timeout\n");
-
- prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
- for (i = 0; i < emac->tx_ch_num; i++)
- napi_disable(&emac->tx_chns[i].napi_tx);
-
- max_rx_flows = PRUETH_MAX_RX_FLOWS;
- k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
-
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
+ if (emac->prueth->is_hsr_offload_mode)
+ __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
+ else
+ __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
- napi_disable(&emac->napi_rx);
+ prueth_destroy_txq(emac);
+ prueth_destroy_rxq(emac);
cancel_work_sync(&emac->rx_mode_work);
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- if (prueth->emacs_initialized == 1)
- icss_iep_exit(emac->iep);
-
/* stop PRUs */
- prueth_emac_stop(emac);
+ if (prueth->emacs_initialized == 1)
+ prueth_emac_common_stop(prueth);
free_irq(emac->tx_ts_irq, emac);
- free_irq(emac->rx_chns.irq[rx_flow], emac);
+ free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS);
prueth_cleanup_tx_chns(emac);
prueth->emacs_initialized--;
@@ -1508,11 +1045,6 @@ static int emac_ndo_stop(struct net_device *ndev)
return 0;
}
-static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
-{
- ndev->stats.tx_errors++;
-}
-
static void emac_ndo_set_rx_mode_work(struct work_struct *work)
{
struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
@@ -1524,23 +1056,35 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
promisc = ndev->flags & IFF_PROMISC;
allmulti = ndev->flags & IFF_ALLMULTI;
- emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
if (promisc) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
return;
}
if (allmulti) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
return;
}
- if (!netdev_mc_empty(ndev)) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
- return;
+ if (emac->prueth->is_hsr_offload_mode) {
+ __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,